hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
175f61b9d802a91e4de9d9247955ef5ce23d1af4
| 35
|
py
|
Python
|
tf_semseg/model/pretrained/openmmlab/__init__.py
|
fferflo/tf-semseg
|
b392cac2e8cca5389e7a099e8f7a87d72f4a70fc
|
[
"MIT"
] | null | null | null |
tf_semseg/model/pretrained/openmmlab/__init__.py
|
fferflo/tf-semseg
|
b392cac2e8cca5389e7a099e8f7a87d72f4a70fc
|
[
"MIT"
] | null | null | null |
tf_semseg/model/pretrained/openmmlab/__init__.py
|
fferflo/tf-semseg
|
b392cac2e8cca5389e7a099e8f7a87d72f4a70fc
|
[
"MIT"
] | null | null | null |
from . import upernet_vitb_ade20k
| 17.5
| 34
| 0.828571
| 5
| 35
| 5.4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.066667
| 0.142857
| 35
| 1
| 35
| 35
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
17822a1b24fd1c362503fd797eef56d974a5838a
| 366,229
|
py
|
Python
|
Colab_notebook/psp_colab_notebook.py
|
Ramprasad-Group/PSP
|
6f0df0a052898c8f44a58ef9ab0578731455c4a3
|
[
"MIT"
] | 11
|
2022-02-24T00:29:46.000Z
|
2022-03-30T08:07:00.000Z
|
Colab_notebook/psp_colab_notebook.py
|
Ramprasad-Group/PSP
|
6f0df0a052898c8f44a58ef9ab0578731455c4a3
|
[
"MIT"
] | null | null | null |
Colab_notebook/psp_colab_notebook.py
|
Ramprasad-Group/PSP
|
6f0df0a052898c8f44a58ef9ab0578731455c4a3
|
[
"MIT"
] | 1
|
2022-03-05T01:49:48.000Z
|
2022-03-05T01:49:48.000Z
|
# -*- coding: utf-8 -*-
"""psp_Colab_notebook.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1CuAuTpxlR6f1CT_Qmm6LJ8GzP7GyNkDR
# Polymer Structure Predictor (PSP)
Polymer Structure Predictor (PSP) is an autonomous model generator that builds a hierarchy of polymer models from SMILES strings, ranging from monomers, linear and loop oligomers, infinite polymer chains, to crystal and amorphous structures. The flexibility to define parameters allows for the construction of desired models at a reasonable computational cost. Individual output models can be visualized and used to perform *ab initio* and molecular dynamics simulations.
In this tutorial, we will cover:
- Instation of PSP and dependencies
- Examples for building polymer models
- $MoleculeBuilder$ (Linear and circular oligomers)
- $ChainBuilder$ (Finite and infinite polymer chains [*periodic*])
- $CrystalBuilder$ (Crystal models for small molecules and polymers)
- $AmorphousBuilder$ (Amorphous structures)
> The user can save a copy of this notebook to Google Drive or download it using the **File** tab (top left) for later use.
# Install PSP and dependencies
- Miniconda
- Scipy (v1.7.3)
- Open Babel (v3.1.1)
- RDKit (v2020.09.1.0)
- Packmol (v20.3.1)
- PySIMM (v1.1)
- py3Dmol (v1.7.0)
- Ambertools (v3.1.1)
- PSP (v0.1.0)
## Install Miniconda
Miniconda is a free minimal installer for conda. For more details visit https://docs.conda.io/en/latest/miniconda.html.
### Download the installer script
The following code downloads the suitable Miniconda installer script.
"""
!wget https://repo.anaconda.com/miniconda/Miniconda3-py37_4.10.3-Linux-x86_64.sh
!chmod +x Miniconda3-py37_4.10.3-Linux-x86_64.sh
"""### Unset the *PYTHONPATH* variable
It is recommended that you clear the PYTHONPATH environment variable prior to installing Miniconda. Otherwise, it may cause problems if the packages installed and accessible via the PYTHONPATH directories are incompatible with the Python version included with Miniconda.
"""
# Commented out IPython magic to ensure Python compatibility.
# %env PYTHONPATH=
"""### Install Miniconda
The code below will install Miniconda into */usr/local*.
"""
!./Miniconda3-py37_4.10.3-Linux-x86_64.sh -b -f -p /usr/local
"""### Appending to the sys.path
You must include the directory where *conda* will install packages in the list of directories that Python will search for modules to import.
"""
import sys
_ = (sys.path.append("/usr/local/lib/python3.7/site-packages"))
sys.path
"""## Install Scipy (v1.7.3)
SciPy provides algorithms for optimization, integration, interpolation, eigenvalue problems, algebraic equations, differential equations, statistics, and many other classes of problems. Additional information can be found at: https://scipy.org. It will be installed as follows:
"""
!conda install -y -c anaconda scipy=1.7
"""## Install RDKit (v2020.09.1.0)
RDKit is a cheminformatics toolkit for reading SMILES strings and writing and optimizing 3D molecular structures, among other things. Additional information can be found at: https://www.rdkit.org/. It will be installed as follows:
"""
!conda install -y -c rdkit rdkit
"""## Install Open Babel (v3.1.1)
Openbabel is used to read, write, and convert chemical file formats, obtain information about atomic connectivity, and perform constrained optimization. For additional information, see: https://open-babel.readthedocs.io/en/latest/ReleaseNotes/ob310.html. It will be installed using the following code:
"""
!conda install -y -c conda-forge openbabel
"""## Install Packmol (v20.3.1)
Packmol generates initial configurations for amorphous structures by utilizing 3D geometries of individual molecules. It packs all molecules into a predefined simulation box with a specified minimum intermolecular distance cutoff. For additional information, see: http://leandro.iqm.unicamp.br/m3g/packmol/home.shtml. The following codes will download the PACKMOL to /usr/local/ and install it.
"""
!git clone https://github.com/m3g/packmol.git /usr/local/packmol
cd /usr/local/packmol
!make
"""The code snippet below will create a variable for the PACKMOL executable."""
# Commented out IPython magic to ensure Python compatibility.
import os
os.environ['PACKMOL_EXEC'] = '/usr/local/packmol/packmol'
# %cd ..
"""## Install PySIMM (v1.1)
PySIMM is an open-source, object-oriented Python package for molecular simulations. Additional information can be found at: https://pysimm.org. This package is used for generating GAFF2 FF parameter files. The following code will install it and append it to the *sys.path*.
"""
!git clone -b 1.1 --single-branch https://github.com/polysimtools/pysimm
sys.path.insert(0,'/usr/local/pysimm')
"""##Install py3Dmol (v1.7.0)
Py3DMol is a python package for molecular visualization in iPython notebooks. For more details, visit https://github.com/avirshup/py3dmol. It is used to visualize 3D models generated by PSP. It will be installed as follows:
"""
!pip install py3Dmol
"""Function **visualize3D** shows 3D structure of molecules. It accepts **pdb**, **xyz** and **vasp** files as input."""
import py3Dmol
def visualize3D(input_file, supercell=[1,1,1]):
if input_file.split(".")[-1] == 'vasp':
!obabel -iposcar $input_file -ocif > model_wCell.cif
input_file = "model_wCell.cif"
with open(input_file) as ifile:
print(input_file)
system = "".join([x for x in ifile])
view = py3Dmol.view(width=400, height=300)
view.addModelsAsFrames(system)
view.setStyle({"stick":{}}) # 'colorscheme':'greenCarbon'
view.addUnitCell()
view.replicateUnitCell(supercell[0],supercell[1],supercell[2])
view.zoomTo()
view.show()
"""## Install Polymer Structure Predictor (PSP)
The code snippet below will download the PSP package from the Ramprasad GitHub account and install into /usr/local.
*Currently, PSP is a private package. To download the package, please use your GitHub login credentials.*
"""
# Commented out IPython magic to ensure Python compatibility.
# %cd /usr/local
!git clone https://github.com/Ramprasad-Group/PSP.git
# %cd PSP
!python setup.py install
"""## Verify that PSP and dependencies are installed properly
If all packages are properly installed, PSP is capable of building a hierarchy of polymer models using SMILES, starting from monomer, oligomers, infinite polymer chains, crystal, and amorphous structures. It can also generate GAFF2 FF parameter files for molecules and amorphous structures.
"""
!pip show PolymerStructurePredictor
from rdkit import Chem
from openbabel import openbabel as ob
import psp.AmorphousBuilder as am
"""**Note**: LAMMPS and LigParGen packages are not installed. So, it doesn't perform MD simulations or generates an OPLS FF parameter file. *If you encounter an error during the package installation, contact hsahu3@gatech.edu.*
## Install Ambertools (v3.1.1)
AmberTools is a set of programs for biomolecular simulation and analysis. Additional information can be found at: https://ambermd.org/AmberTools.php. This package is used for generating GAFF2 FF parameter files.
"""
!conda install -y -c conda-forge ambertools
"""The code snippet below will create a variable for the *antechamber* executable."""
import os
os.environ['ANTECHAMBER_EXEC'] = '/usr/local/bin/antechamber'
"""## Clean /content/ directory and move test files to /content/polymer_models/
The following code snippet will remove unnecessary files and move sample INPUT files to the /content directory.
"""
# Commented out IPython magic to ensure Python compatibility.
# %cd /content/
!rm Miniconda3-py37_4.10.3-Linux-x86_64.sh
!rm -rf sample_data/
!mkdir /content/polymer_models
!cp -r /usr/local/PSP/test/* /content/polymer_models/
"""# PSP Examples
There are four modules in PSP, each of which is designed to build a specific type of polymer model.
- $MoleculeBuilder$ (Linear and circular oligomers)
- $ChainBuilder$ (Finite and infinite polymer chains [*periodic*])
- $CrystalBuilder$ (Crystal models for small molecules and polymers)
- $AmorphousBuilder$ (Amorphous structures)
Note that $ChainBuilder$ accepts only polymer SMILES, whereas $MoleculeBuilder$ and $AmorphousBuilder$ accept both polymer and molecule SMILES strings.
>**Polymer SMILES**: Two linking atoms are indicated with asterisks (\[\*]).
For example, a SMILES string for polypropylene can be expressed as C(C(C)\[\*])\[\*]. For more details, please visit our *Polymer Genome (PG)* website at: https://www.polymergenome.org. To obtain a polymer SMILES string, use the $Draw Polymer$ tool of the PG platform.
>**Model visualization**: The function $visualize3D(input\_file, supercell=[1,1,1])$ can be used to visualize models in XYZ, PDB, and POSCAR ($.vasp$) file formats. Users must use the POSCAR file to see the boundary box or supercell structures. Supercell (default value: supercell=[1,1,1]): Users need to define the size of the supercell as [x,y,z] where x, y, z define replication of the unit cell along X, Y, and Z directions, respectively. Users can rotate and zoom in/out the molecule using the mouse pointer.
>**Download models** : Go to the **Files** tab (on the notebook's left side) and navigate to the relevant directory; then, simply select and download. If you don't see directories or models, refresh it.
In the */content/polymer_models* directory, we provided test samples for all model builders available in PSP. Let's go to the respective directories and start building models.
## MoleculeBuilder
$MoleculeBuilder$ is designed to build 3D structures of linear and circular oligomers. It accepts SMILES strings for both molecules and polymers as input, and output models are saved in a variety of formats, including xyz, pdb, poscar, and lammps data files, as well as GAFF2/OPLS parameter files. Several examples are provided here to show how we can quickly build models with $MoleculeBuilder$.
Let's go to the *polymer_models/MoleculeBuilder* directory:
"""
cd /content/polymer_models/MoleculeBuilder
"""### **Example 1**: Linear oligomers with endcap units
Now, we intend to construct a model for linear oligomers with endcap units. To construct such a model, users must define SMILES strings for the repeating/endcap units and the length of an oligomer. Note that the endcap SMILES include just one [\*\], as there is only one connecting atom. Let's build a model for **Pentaethylene** end-capped with **CF$_3$** and **CCl$_3$**.
As shown below, SMILES strings for the repeating unit of polyethylene and endcap units (**CF$_3$** and **CCl$_3$**) are provided. The oligomer is defined to have a length of 5 units. Let's run the code below to build a pentamer model.
"""
# Input SMILES strings and length of the oligomer
repeating_unit = '[*]CC[*]'
leftcap_unit = 'C(Cl)(Cl)(Cl)[*]'
rightcap_unit = 'C(F)(F)(F)[*]'
oligo_length = 5
# Create a pandas DataFrame
import pandas as pd
input_data = [['PE',repeating_unit,leftcap_unit,rightcap_unit]]
df_smiles = pd.DataFrame(input_data, columns = ['ID', 'smiles', 'LeftCap', 'RightCap'])
# Build models
import pandas as pd
import psp.MoleculeBuilder as mb
mol = mb.Builder(df_smiles, Length=[oligo_length], GAFF2=True)
results = mol.Build()
"""PSP terminated normally. All models are stored in *molecules/* directory. As mentioned before, models are stored in different file formats. The output files are named according to the following convention: \$*ID*_N\$*Length*_C\$*NumConf*.\$*file_format*."""
ls molecules/
"""Let's visualize a model built by $MoleculeBuilder$."""
visualize3D("molecules/PE_N5_C1.xyz")
"""###**Example 2**: Loop oligomers
$MoleculeBuilder$ also builds loop models of a polymer. Required inputs are SMILES of the repeating unit and length (in terms of the number of monomer units) of the loop oligomer. Below, we have shown an example of a loop model for deca(vinyl chloride). As can be seen, we have provided the SMILES string for poly(vinyl chloride) and defined the length as 10 units.
"""
# Input SMILES string and length of the oligomer
repeating_unit = 'C(C([*])Cl)[*]'
oligo_length = 10
# Create a pandas DataFrame
import pandas as pd
input_data = [['PVC',repeating_unit]]
df_smiles = pd.DataFrame(input_data, columns = ['ID', 'smiles'])
# Build models
import pandas as pd
import psp.MoleculeBuilder as mb
mol = mb.Builder(df_smiles, Length=[oligo_length], Loop=True)
results = mol.Build()
"""PSP normally terminated. All PSP output files are stored in *molecules/* directory. Let's visualize the loop model for deca(vinyl chloride)."""
visualize3D("molecules/PVC_N10_C1.vasp")
"""### Additional information
$MoleculeBuilder$ allows fine-tuning of atomic models by adjusting a variety of parameters or keywords. Users may also construct several models simultaneously by defining all the input SMILES strings in a comma-separated values (CSV) file and supplying them as a pandas DataFrame to $MoleculeBuilder$.
$MoleculeBuilder$ supports the following **parameters** or **keywords**:
- $\verb=Dataframe=$ **:** A *pandas Dataframe* that contains polymer IDs and SMILES strings representing repeating and endcap units.
- $\verb=ID_col=$ [*default option=`ID'*] **:** Column name for IDs of polymers/molecules in the Dataframe.
- $\verb=SMILES_col=$ [*default option=`smiles'*] **:** Column name for the polymer SMILES string (repeating unit) in the Dataframe.
- $\verb=LeftCap=$ [*default option=`LeftCap'*] **:** Column name for the SMILES string of left cap molecular fragment in the Dataframe.
- $\verb=RightCap=$ [*default option=`RightCap'*] **:** Column name for the SMILES string of right cap molecular fragment in the Dataframe.
- $\verb=Inter_Mol_Dis=$ [*default value=6$Å$*] **:** When periodic boundary conditions are applied (POSCAR format), it determines the minimum distance between two molecules in x, y, and z directions.
- $\verb=Length=$ [*default option=[1]*] **:** Monomer and oligomers can be generated by setting $\verb=Length=$ as desired. For example, if $\verb=Length=$=[1,2,5], monomer, dimer, and pentamer chains will be constructed.
- $\verb=NumConf=$ [*default value=1*] **:** PSP can create *N* number of conformers if $\verb=NumConf=$=*N* is assigned, where *N* is an integer.
- $\verb=Loop=$ [*default option=False*] **:** It specifies whether to produce a linear or circular oligomer model. For circular one, set $\verb=loop=$=True.
- $\verb=IrrStruc=$ [*default option=False*] **:** If $\verb=IrrStruc=$=True, a short MD simulation will be performed to generate a more relevant structure. *This feature is not available here.*
- $\verb=OPLS=$ [*default option=False*] **:** If $\verb=OPLS=$=True, an OPLS FF parameter file will be generated for each model.
- $\verb=GAFF2=$ [*default option=False*] **:** If $\verb=GAFF2=$=True, an GAFF2 FF parameter file will be generated for each model.
- $\verb=GAFF2_atom_typing=$ [*default option=`pysimm'*] **:** Use *pysimm* or *antechamber* for atom typing.
- $\verb=OutDir=$ [*default option=`molecules'*] **:** The output directory, which contains output models of all molecules in XYZ ($\textit{.xyz}$), POSCAR ($\textit{.vasp}$), and PDB ($\textit{.pdb}$) formats, including FF parameter files.
**NOTE**: In the colab notebook, $\verb=IrrStruc=$ and $\verb=OPLS=$ do not work. $\verb=IrrStruc=$ necessitates the installation of LAMMPS.
$\verb=OPLS=$ requires the BOSS executable and the LigParGen package. Please contact Prof. William L. Jorgensen for the BOSS executable, and to install LigparGen, follow the instructions at http://zarbi.chem.yale.edu/ligpargen.
As previously stated, users can provide a CSV file to build multiple models at the same time. An example CSV file containing SMILES strings of one molecule and two repeating polymer units, including endcap units, is provided below. Note that endcap units for **Mol1** are not provided, and the corresponding columns are left blank.
"""
cat linear_oligomer_with_endcaps.csv
"""Below is an example of a python code snippet required for $MoleculeBuilder$. The definitions of keywords and parameters are provided above."""
cat molecule_model.py
"""###**Example 3**: Build multiple models using a CSV file
Now, we'll build models for all the SMILES provided in the CSV file (*linear_oligomer_with_endcaps.csv*). Also, we will simultaneously build monomer and pentamer models for each case, which is defined by $\verb=Length=$. GAFF2 parameter files will be generated, which can then be used directly in LAMMPS MD simulations. All output models will be stored in */models* directory.
"""
df_smiles = pd.read_csv("linear_oligomer_with_endcaps.csv")
mol = mb.Builder(
df_smiles,
ID_col="ID",
SMILES_col="smiles",
LeftCap = "LeftCap",
RightCap ='RightCap',
OutDir='models',
Inter_Mol_Dis=6,
Length=[1,5],#16
NumConf=1,
Loop=False,
NCores=1,
IrrStruc=False,
OPLS=False,
GAFF2=True,
GAFF2_atom_typing='antechamber'
)
results = mol.Build()
"""PSP terminated normally. Users can visualize models stored in */models* directory using the $\verb=visualize3D()=$ function. **Note:** it accepts *.pbd*, *.xyz* and *.vasp* file formats."""
ls models/
"""Users can modify the supplied CSV file or upload their own to create the desired models.
## ChainBuilder
$ChainBuilder$ builds periodic 3D structures of finite and infinite polymer chains (periodic) using polymer SMILES strings. Notably, all generated models are aligned along the Z-axis. Infinite polymer chains are stored in POSCAR format ($.vasp$), while oligomers are stored in POSCAR and XYZ formats. Several examples are provided to show how quickly infinite chain models can be built using $ChainBuilder$.
Let's go to the *polymer_models/ChainBuilder* directory:
"""
cd /content/polymer_models/ChainBuilder/
"""### **Example 1**: Models for infinite polymer chains
The minimum input required to construct a periodic chain model is the repeating unit SMILES string. Here's an example of creating an infinite polymer chain model for poly(vinyl chloride).
"""
# Input SMILES string
repeating_unit = 'C(C(CC([*])Cl)Cl)[*]'
# Create a pandas DataFrame
import pandas as pd
input_data = [['PVC',repeating_unit]]
df_smiles = pd.DataFrame(input_data, columns = ['ID', 'smiles'])
# Build models
import psp.ChainBuilder as ChB
chain_builder = ChB.Builder(Dataframe=df_smiles)
results = chain_builder.BuildChain()
"""PSP normally terminated. The polymer model is saved in the directory: *chains/PVC/*. Let's see how the model looks."""
visualize3D("chains/PVC/PVC.vasp")
"""### Additional information
$ChainBuilder$ allows fine-tuning of chain models by adjusting a variety of parameters or keywords. Users can also build multiple models simultaneously by defining all of the input SMILES strings in a CSV file and supplying them to $ChainBuilder$ as a pandas DataFrame.
$ChainBuilder$ allows altering the following **parameters** or **keywords**:
- $\verb=Dataframe=$ **:** A pandas Dataframe having polymer IDs and SMILES strings.
- $\verb=ID_col=$ [*default option=`ID'*] **:** Column name for the polymer IDs in the Dataframe.
- $\verb=SMILE_col=$ [*default option=`smiles'*] **:** Column name for the polymer SMILES strings (repeating unit) in the Dataframe.
- $\verb=Length=$ [*default option=`n'*] **:** It defines the lengths of oligomers. For example, if $\verb=Length=$=[1,2,5,*`n'*], monomer, dimer, pentamer, and infinite polymer chain will be created. Note that *`n'* stands for infinite polymer chains.
- $\verb=Method=$ [*default option=`SA'*] **:** The default option for $\verb=method=$ is *`SA'*, which means that the simulated annealing (SA) method will be used. If method=*`Dimer'*, then the $ChainBuilder$ will create dimers by rotating one monomer unit while keeping the other one fixed. This can quickly create polymer chains; however, the quality of chains may not be good, and the number of atoms in a unit cell will be high.
- $\verb=Steps=$ [*default value=20*] **:** The computational cost of $ChainBuilder$ mainly depends on how many steps and substeps are used in the SA method. \verb=Steps= is the outer loop of the SA, and we recommend to set a large value ($\verb=Steps=$=100 or more) if there are many rotatable single bonds in a molecule. To avoid unnecessary calculations, if the SA doesn't find a better geometry after three steps, it is terminated.
- $\verb=Substeps=$ [*default value=10*] **:** $\verb=Substeps=$ is the inner loop of the SA, and $\verb=Substeps=$=20 should be sufficient for most of the cases.
- $\verb=IntraChainCorr=$ [*default option=True*] **:** $ChainBuilder$ sometimes generates a twisted polymer (often helical) backbone. If $\verb=IntraChainCorr=$=True, $ChainBuilder$ makes the backbone straight by gradually moving two sets of the dummy and linking atoms towards opposite directions on the $z$-axis.
- $\verb=Tol_ChainCorr=$ [*default value=50*] **:** While straightening a monomer, we apply a cutoff for a sudden energy bump (in KJ/mol) defined by $\verb=Tol_ChainCorr=$. Once it is reached, $ChainBuilder$ doesn't stretch the molecule further.
- $\verb=OutDir=$ [*default option=`chains'*] **:** This key allows users to define a directory where they want to store output files.
As previously stated, users can provide a CSV file to build multiple chain models at the same time. An example CSV file containing repeating unit SMILES strings for 6 different polymers is provided below.
"""
cat input_chain.csv
"""The python code snippet required for $ChainBuilder$ is shown below.
The definitions of keywords and parameters are provided above.
"""
cat chain_model.py
"""###**Example 2**: Build multiple chain models
Using the given CSV file (*input_chain.csv*), let's build infinite chain and pentamer models for 6 polymers simultaneously.
"""
import pandas as pd
import psp.ChainBuilder as ChB
df_smiles = pd.read_csv("input_chain.csv")
chain_builder = ChB.Builder(
Dataframe=df_smiles,
ID_col="PID",
SMILES_col="smiles_polymer",
NumConf=1,
Length=["n", 5],
Steps=20,
Substeps=20,
MonomerAng="intense",
DimerAng="intense",
Method="SA",
NCores=0,
OutDir='chains',
Tol_ChainCorr=50,
)
results = chain_builder.BuildChain()
"""PSP terminated normally. All infinite and pentamer chain models are stored in *chains/* directory. The output files are named according to the following convention: *\$ID_1.\$file_format* (infinite chain) and *\$ID_N\$Length.\$file_format* (oligomers)."""
!ls chains
!ls chains/ABPBO
"""Let's visualize a pentamer chain model generated using $ChainBuilder$:"""
visualize3D("chains/ABPBO/ABPBO_N5.vasp")
"""To generate your desired chain models, modify the CSV file or upload your own and adjust the parameters as needed.
## CrystalBuilder
$CrystalBuilder$ generates crystal models for small molecules and infinite polymer chains. It accepts only POSCAR files as INPUT that can be generated using $ChainBuilder$. Users can also provide their own POSCAR files as INPUT. Note that all input chain models are expected to be aligned along the Z-axis. Examples of crystal models are given below, starting with the construction of a chain model, which is then used to build crystal models.
Let's go to the *polymer_models/CrystalBuilder* directory
"""
# Commented out IPython magic to ensure Python compatibility.
# %cd /content/polymer_models/CrystalBuilder/
"""###**Example 1**: Crystal structures for an infinite polymer chain
First, we'll use $ChainBuilder$ to create an infinite chain model for poly(vinyl chloride). See the $ChainBuilder$ section for more information. Second, using the generated chain model, several crystal models will be built by placing two chains in a simulation box and translating and rotating one while fixing the other. The SMILES string of a polymer repeating unit is the minimum requirement for building crystal models.The following is an example of crystal models for poly(vinyl chloride).
"""
# Input SMILES string
repeating_unit = 'C(C(CC([*])Cl)Cl)[*]'
# Create a pandas DataFrame
import pandas as pd
input_data = [['PVC',repeating_unit]]
df_smiles = pd.DataFrame(input_data, columns = ['ID', 'smiles'])
# Build chain models
import psp.ChainBuilder as ChB
chain_builder = ChB.Builder(Dataframe=df_smiles, Inter_Chain_Dis=0.0)
results = chain_builder.BuildChain()
# Build crystal models
import psp.CrystalBuilder as CrB
vasp_input_list = ['chains/PVC/PVC.vasp']
crystal_builder = CrB.Builder(VaspInp_list=vasp_input_list, NSamples=3, Polymer=True)
results = crystal_builder.BuildCrystal()
"""PSP terminated normally. We created 27 crystal models for Poly(vinyl chloride). Models are stored in *crystals/PVC/* directory and follow the following naming convention: *cryst_out-$N.vasp*, where *N* is an integer, indicating the model's serial number.
**NOTE** : While rotating and translating polymer chains, identical crystal structures can be generated. Before proceeding with downstream ab-initio simulations, it is strongly advised to filter out identical models and keep only unique ones.
"""
ls crystals/PVC
"""Let's visualize a crystal model for poly(vinyl chloride):"""
visualize3D("crystals/PVC/cryst_out-06.vasp")
"""###**Example 2**: User defined configurations
$CrystalBuilder$ allows us to construct a specific set of desired crystal models by providing translational distance and rotational angles. Let's build a T-shape crystal model for penta(vinyl chloride), including a few models close to it. To get a T-shape model, we need to rotate molecule B around its Y-axis (See Figure below).


First, the user will generate a pentamer model using $ChainBuilder$. Second, two pentamer chains will be placed in a simulation box and translated/rotated as specified by the user. As INPUT, the user must provide the repeating unit SMILES, length of an oligomer, and transitional distance or rotational angles in a list of lists format, as shown below.
"""
# Input SMILES string
repeating_unit = 'C(C([*])Cl)[*]'
length_oligomer = 5
samples = [[0],[0],[0],[0],[60,80,90,100,120],[0],[0],[0]]
# Create a pandas DataFrame
import pandas as pd
input_data = [['pvc',repeating_unit]]
df_smiles = pd.DataFrame(input_data, columns = ['ID', 'smiles'])
# Build chain models
import psp.ChainBuilder as ChB
chain_builder = ChB.Builder(Dataframe=df_smiles, Length=[length_oligomer], Inter_Chain_Dis=0.0)
results = chain_builder.BuildChain()
# Build crystal models
import psp.CrystalBuilder as CrB
vasp_input_list = ['chains/pvc/pvc_N5.vasp']
crystal_builder = CrB.Builder(
VaspInp_list=vasp_input_list,
NSamples=samples,
Polymer=False,
OutDir='selective_models'
)
results = crystal_builder.BuildCrystal()
"""Crystal models are stored at *selective_models/PVC_N5/*. As can be seen, there are only five models generated, including a T-shape model (*cryst_out-3.vasp*)."""
!ls selective_models/pvc_N5/
visualize3D("selective_models/pvc_N5/cryst_out-3.vasp")
"""### Additional information
$CrystalBuilder$ allows users to modify a number of parameters/keywords in order to create desired crystal models. It can build crystal models of multiple molecules/polymers at the same time by providing $CrystalBuilder$ with a list of chain models.
$CrystalBuilder$ supports the following **parameters** or **keywords**:
- $\verb=VASPInp_list=$ **:** A list of names of oligomer or polymer chains with their PATH. Make sure that $CrystalBuilder$ can find them in the specified locations.
- $\verb=NSamples=$ [*default value=5*] **:** It determines the number of crystal models to be generated. It can be an integer or a list of lists. If it is an integer, it will be considered as the number of samples for each degree of freedom. For polymer and small molecules, the total number of crystal models will be $\verb=NSamples=^3$ and $\verb=NSamples=^8$, respectively. A specific set of crystal models can be generated by specifying translation distance and rotation angles in a list of lists.
- $\verb=InputRadius=$ [*default option=`auto'*] **:** This key defines the distance between $z$-axes of two chains, which is used to rotate one chain with respect to another fixed one. If $\verb=InputRadius=$=`auto', $CrystalBuilder$ will calculate an approximate distance by considering $x$ and $y$ coordinates of the input geometry and the minimum atomic distance defined by $\verb=MinAtomicDis=$. To define your own value, change this to a float or an integer (in $Å$).
- $\verb=MinAtomicDis=$ [*default value=2.0*] **:** This key defines the minimum distance between any two atoms of two chains in $Å$. Note that this key works if $\verb=InputRadius=$=`auto'.
- $\verb=Polymer=$ [*default option=`True'*]: In the case of polymers, fixing the position of the first polymer, the second one is moved in three distinct ways, i.e., (1) translated along the $z$-axis, (2) rotated around its own axis, (3) rotated around the first polymer chain. However, for oligomers, more degrees of freedom must be considered to capture every possible crystal models, which can be activated by setting $\verb=Polymer=$=`False'.
- $\verb=Optimize=$ [*default option=`False'*] **:** PSP generated crystal models can be further optimized using UFF and conjugate gradient method by setting $\verb=Optimize=$=`True'.
- $\verb=NumCandidate=$ [*default value=50*] **:** This keyword is activated when $\verb=Optimize=$=`True', determining the number of crystal models selected based on the total energy computed by UFF. Note that high-energy models are discarded.
- $\verb=OutDir=$ [*default option=`crystals'*]: This key allows users to define a directory where they want to store output files.
The code snippet below demonstrates how to build crystal models from a set of repeated unit SMILES strings (provided as a CSV file).
"""
cat crystal_model.py
"""## AmorphousBuilder
$AmorphousBuilder$ builds models of amorphous organic materials. It accepts SMILES strings for both molecules and polymers. In this section, we'll show you how to build a variety of amorphous structures using SMILES strings as INPUT.
Let's go to the *polymer_models/AmorphousBuilder* directory.
"""
# Commented out IPython magic to ensure Python compatibility.
# %cd /content/polymer_models/AmorphousBuilder/
"""###**Example 1**: Amorphous structure of a polymer
Users can build an amorphous model for polymers by providing repeated unit SMILES, length of an oligomer, and number molecules to be included in the simulation box. The below example shows a toy amorphous model for polyethylene, where 10 pentaethylene molecules are packed in a cubic box.
"""
# Input SMILES string
repeating_unit_PE = '[*]CC[*]'
Length_PE = 5
Number_molecule_PE = 10
# Create a pandas DataFrame
import pandas as pd
input_data = [['PE', repeating_unit_PE, Length_PE, Number_molecule_PE, 1, False]]
input_df = pd.DataFrame(input_data, columns = ['ID', 'smiles', 'Len', 'Num', 'NumConf', 'Loop'])
import psp.AmorphousBuilder as ab
amor = ab.Builder(input_df, density=0.65, box_type='c', OutDir='amor_model')
amor.Build()
"""PSP terminated normally. All models are stored in *amor_model/* directory. Output files are in POSCAR ($.vasp$) and LAMMPS data ($.data$) formats. Individual models are stored in *amor_model/molecules/* and PACKMOL output files are saved in *amor_model/packmol/* directory."""
ls amor_model
"""Let's take a look at the amorphous model."""
visualize3D("amor_model/amor_model.vasp")
"""#### Force field parameter file
PSP also allows generating a GAFF2 FF parameter file for the amorphous model. It can use either AmberTools (antechamber) or PySIMM for atom typing.
"""
amor.get_gaff2(output_fname='amor_gaff2.lmps', atom_typing='antechamber')
"""For the amorphous model, a GAFF2 parameter file is now generated: *amor_gaff2.lmps*"""
ls amor_model
"""###**Example 2**: Build amorphous models with O2 gas
Next, we'll learn how to incorporate multiple types of oligomers/molecules into an amorphous model. For example, oxygen molecules can be incorporated into the polyethylene system. As shown in the code snippet below, we must supply the SMILES strings and the number of molecules for both polyethylene and oxygen to be included in an amorphous model.
"""
# Input SMILES string
repeating_unit_PE = '[*]CC[*]'
Length_PE = 5
Number_molecule_PE = 4
Oxygen = 'O=O'
Number_molecule_Oxy = 10
# Create a pandas DataFrame
import pandas as pd
input_data = [['PE', repeating_unit_PE, Length_PE, Number_molecule_PE, 1, False], ['O2', Oxygen, 1, Number_molecule_Oxy, 1, False]]
input_df = pd.DataFrame(input_data, columns = ['ID', 'smiles', 'Len', 'Num', 'NumConf', 'Loop'])
import psp.AmorphousBuilder as ab
amor = ab.Builder(input_df, density=0.85, box_type='c', OutDir='amor_model2')
amor.Build()
"""PSP normally terminated. Let's take a look at the amorphous model we created."""
visualize3D("amor_model2/amor_model.vasp")
"""### Additional information
$AmorphousBuilder$ allows altering keywords or parameters to fine-tune an amorphous model. A CSV file may be used to supply SMILES strings and other information for all polymers/molecules that will be included in an amorphous model.
$AmorphousBuilder$ supports the following **parameters** or **keywords**:
- $\verb=Dataframe=$ **:** A pandas Dataframe that contains polymer IDs, SMILES strings representing repeating and endcap units, length of oligomers, number of oligomers/molecules, chain type (linear or loop), etc.
- $\verb=ID_col=$ [*default option=`ID'*] **:** Column name for molecule/oligomer IDs in the Dataframe.
- $\verb=SMILES_col=$ [*default option=`smiles'*] **:** Column name for polymer SMILES strings (repeating unit) in the Dataframe.
- $\verb=NumMole=$ [*default option=`Num'*] **:** Column name for the number of each molecule in the Dataframe.
- $\verb=Length=$ [*default option=`Len'*] **:** Column name for the length of each oligomer in the Dataframe.
- $\verb=LeftCap=$ [*default option=`LeftCap'*] **:** Column name for the SMILES string of left cap molecular fragment in the Dataframe.
- $\verb=RightCap=$ [*default option=`RightCap'*] **:** Column name for the SMILES string of right cap molecular fragment in the Dataframe.
- $\verb=Loop=$ [*default option=`Loop'*] **:** Column name for selecting linear or circular chains.
- $\verb=OutFile=$ [*default option=`amor_model'*]: The names of the output files exported in POSCAR and LAMMPS data formats.
- $\verb=OutDir=$ [*default option=`amorphous_models'*] **:** The output directory, which contains all files, including POSCAR and LAMMPS data files.
- $\verb=density=$ [*default value=0.65 g/cm$^3$*] **:** The density of a simulation box.
- $\verb=tol_dis=$ [*default value=2.0*] **:** The minimum distance between any two molecules in a simulation box.
- $\verb=box_type=$ [*default option=`c'*] **:** The shape of a simulation box. Here, *`c'* and *`r'* denote cubic and rectangular, respectively.
- $\verb=incr_per=$ [*default value=0.4*] **:** If the shape of a box is rectangular, $\verb=incr_per=$ determines the length of z axis (l$_{z}$). l$_{z}$ = Volume$^{1/3}$ + $\verb=incr_per=$ $\times$ Volume$^{1/3}$.
- $\verb=box_size=$ [No default value]: It is a list of six numbers that offers an alternative method of explicitly defining the box size. [*xmin, xmax, ymin, ymax, zmin, zmax*]
A sample CSV file containing data for three oligomers is provided below. Users can add more molecules/oligomers to the amorphous model by appending new raws to the list.
"""
cat input_amor.csv
"""Below is an example of a code snippet required for $AmorphousBuilder$. The definitions of keywords and parameters are provided above."""
cat amor_model.py
"""###**Example 3**: Build an amorphous model that includes multiple polymers/molecules
The code snippet below will read all of the relevant information about molecules/polymers from a CSV file (*input_amor.csv*) and then generate an amorphous model.
"""
import pandas as pd
import psp.AmorphousBuilder as ab
input_df = pd.read_csv("input_amor.csv")
amor = ab.Builder(
input_df,
ID_col="ID",
SMILES_col="smiles",
OutDir='amor_model',
Length='Len',
NumConf='NumConf',
NumModel=1,
LeftCap = "LeftCap",
RightCap = "RightCap",
Loop='Loop',
density=0.85,
box_type='c',
)
amor.Build()
"""Let's take a look at the amorphous model."""
visualize3D("amor_model/amor_model.vasp")
"""# References and further reading
- Harikrishna Sahu, Huan Tran, Kuan-Hsuan Shen, Joseph H. Montoya, Rampi Ramprasad, *PSP: A python toolkit for predicting 3D models of polymers*, J. Chem. Theory Comput., **2022**. DOI: 10.1021/acs.jctc.2c00022
- User Manual: https://github.com/Ramprasad-Group/PSP/blob/master/documentation/PSP_user_manual.pdf
- https://github.com/Ramprasad-Group/PSP
"""
| 567.796899
| 249,487
| 0.945075
| 16,008
| 366,229
| 21.60576
| 0.680722
| 0.001203
| 0.000489
| 0.000625
| 0.024677
| 0.020147
| 0.017822
| 0.015639
| 0.0152
| 0.013479
| 0
| 0.134519
| 0.014283
| 366,229
| 645
| 249,488
| 567.796899
| 0.823561
| 0.002889
| 0
| 0.318627
| 1
| 0
| 0.135387
| 0.052436
| 0
| 1
| 0
| 0
| 0
| 0
| null | null | 0
| 0.142157
| null | null | 0.004902
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
bd6e8b67b15f5ca5c21f230302daaa4e6c227964
| 59
|
py
|
Python
|
python3/__init__.py
|
deanelzinga/google-diff-match-patch
|
90823d875839edcd690e88fe785a936bc87f2fde
|
[
"Apache-2.0"
] | 5,211
|
2018-01-30T10:31:38.000Z
|
2022-03-31T12:38:38.000Z
|
python3/__init__.py
|
deanelzinga/google-diff-match-patch
|
90823d875839edcd690e88fe785a936bc87f2fde
|
[
"Apache-2.0"
] | 533
|
2016-08-23T20:48:23.000Z
|
2022-03-28T15:55:13.000Z
|
python3/__init__.py
|
deanelzinga/google-diff-match-patch
|
90823d875839edcd690e88fe785a936bc87f2fde
|
[
"Apache-2.0"
] | 1,008
|
2018-02-12T17:08:53.000Z
|
2022-03-30T11:24:31.000Z
|
from .diff_match_patch import diff_match_patch, patch_obj
| 19.666667
| 57
| 0.864407
| 10
| 59
| 4.6
| 0.6
| 0.391304
| 0.608696
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.101695
| 59
| 2
| 58
| 29.5
| 0.867925
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
bdade5aa91ae469459270b10546366003a412bd3
| 43
|
py
|
Python
|
837477/app/models/__init__.py
|
837477/Django-Study
|
a55006f1788080ba4f57b46bc7f0aaa5246c7b6e
|
[
"MIT"
] | null | null | null |
837477/app/models/__init__.py
|
837477/Django-Study
|
a55006f1788080ba4f57b46bc7f0aaa5246c7b6e
|
[
"MIT"
] | null | null | null |
837477/app/models/__init__.py
|
837477/Django-Study
|
a55006f1788080ba4f57b46bc7f0aaa5246c7b6e
|
[
"MIT"
] | null | null | null |
from .sample import Sample, SampleReference
| 43
| 43
| 0.860465
| 5
| 43
| 7.4
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.093023
| 43
| 1
| 43
| 43
| 0.948718
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
bdc0c9c198cea4c9aeca3bd89f8df8fb7c56a9d7
| 1,643
|
py
|
Python
|
testapp/render/views.py
|
Bit03/django-text2img
|
8da3af93a691ed26cefcc670d50a0f1d605d3657
|
[
"MIT"
] | 2
|
2022-01-24T04:23:25.000Z
|
2022-01-24T07:16:43.000Z
|
testapp/render/views.py
|
Bit03/django-text2img
|
8da3af93a691ed26cefcc670d50a0f1d605d3657
|
[
"MIT"
] | 6
|
2021-04-08T21:45:54.000Z
|
2022-02-10T14:06:09.000Z
|
testapp/render/views.py
|
Bit03/django-text2img
|
8da3af93a691ed26cefcc670d50a0f1d605d3657
|
[
"MIT"
] | 2
|
2018-05-27T16:49:26.000Z
|
2019-05-15T04:34:49.000Z
|
import json
from django.views.generic import View
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
from text2img.render_text import RenderText
class RenderTextView(View):
def get(self, request, *args, **kwargs):
return HttpResponse("need post json data")
def post(self, request, *args, **kwargs):
json_data = json.loads(request.body)
r = RenderText(**json_data)
content = r.draw_image_output()
return HttpResponse(content=content, content_type="image/jpeg")
@csrf_exempt
def dispatch(self, request, *args, **kwargs):
if request.method.lower() in self.http_method_names:
handler = getattr(
self, request.method.lower(), self.http_method_not_allowed
)
else:
handler = self.http_method_not_allowed
return handler(request, *args, **kwargs)
class RenderListTextView(View):
def get(self, request, *args, **kwargs):
return HttpResponse("need post json data")
def post(self, request, *args, **kwargs):
json_data = json.loads(request.body)
r = RenderText(**json_data)
content = r.draw_24h_image()
return HttpResponse(content=content, content_type="image/jpeg")
@csrf_exempt
def dispatch(self, request, *args, **kwargs):
if request.method.lower() in self.http_method_names:
handler = getattr(
self, request.method.lower(), self.http_method_not_allowed
)
else:
handler = self.http_method_not_allowed
return handler(request, *args, **kwargs)
| 34.229167
| 74
| 0.652465
| 195
| 1,643
| 5.34359
| 0.25641
| 0.084453
| 0.130518
| 0.120921
| 0.790787
| 0.790787
| 0.790787
| 0.790787
| 0.790787
| 0.790787
| 0
| 0.00241
| 0.24224
| 1,643
| 47
| 75
| 34.957447
| 0.834538
| 0
| 0
| 0.717949
| 0
| 0
| 0.035301
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.153846
| false
| 0
| 0.128205
| 0.051282
| 0.487179
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
bdc3a6e726cd5f261f14f9b8efd8aa58b600c4ef
| 208
|
py
|
Python
|
server/expenses/admin.py
|
cristicismas/top-budget
|
d61db578287b2f77c12032045fca21e58c9ae1eb
|
[
"MIT"
] | null | null | null |
server/expenses/admin.py
|
cristicismas/top-budget
|
d61db578287b2f77c12032045fca21e58c9ae1eb
|
[
"MIT"
] | 11
|
2019-12-05T15:21:40.000Z
|
2021-10-05T22:08:17.000Z
|
server/expenses/admin.py
|
cristicismas/top-budget
|
d61db578287b2f77c12032045fca21e58c9ae1eb
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Expense, Category, Location, Source
admin.site.register(Expense)
admin.site.register(Category)
admin.site.register(Location)
admin.site.register(Source)
| 23.111111
| 55
| 0.817308
| 28
| 208
| 6.071429
| 0.428571
| 0.211765
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.081731
| 208
| 8
| 56
| 26
| 0.890052
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
bde9fafe024c4d30c30bdea43d026d8256d6b927
| 135
|
py
|
Python
|
sympde/__init__.py
|
pyccel/sympde
|
69e6a6634b2045d46286a0453cb66deab5f2e76c
|
[
"MIT"
] | 12
|
2018-09-02T17:50:19.000Z
|
2022-03-08T01:26:47.000Z
|
sympde/__init__.py
|
pyccel/sympde
|
69e6a6634b2045d46286a0453cb66deab5f2e76c
|
[
"MIT"
] | 51
|
2019-08-18T14:50:01.000Z
|
2021-09-16T13:54:58.000Z
|
sympde/__init__.py
|
pyccel/sympde
|
69e6a6634b2045d46286a0453cb66deab5f2e76c
|
[
"MIT"
] | 3
|
2019-10-04T18:05:48.000Z
|
2020-10-17T02:15:10.000Z
|
from .version import __version__
from .core import *
from .topology import *
from .exterior import *
from .printing import *
| 22.5
| 34
| 0.711111
| 16
| 135
| 5.75
| 0.4375
| 0.326087
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.222222
| 135
| 5
| 35
| 27
| 0.87619
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0.2
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
da14b7a4c7b8d7555d4d86eff8cfe40d36bc0d3b
| 21,707
|
py
|
Python
|
explore/zhou.py
|
sjshao09/KaggleRH
|
da22d6b42ecf4172d8c35313c35334ee8cc28c85
|
[
"MIT"
] | null | null | null |
explore/zhou.py
|
sjshao09/KaggleRH
|
da22d6b42ecf4172d8c35313c35334ee8cc28c85
|
[
"MIT"
] | null | null | null |
explore/zhou.py
|
sjshao09/KaggleRH
|
da22d6b42ecf4172d8c35313c35334ee8cc28c85
|
[
"MIT"
] | null | null | null |
# Mostly a lot of silliness at this point:
# Main contribution (50%) is based on Reynaldo's script with a linear transformation of y_train
# that happens to fit the public test data well
# and may also fit the private test data well
# if it reflects a macro effect
# but almost certainly won't generalize to later data
# Second contribution (20%) is based on Bruno do Amaral's very early entry but
# with an outlier that I deleted early in the competition
# Third contribution (30%) is based on a legitimate data cleaning,
# probably by gunja agarwal (or actually by Jason Benner, it seems,
# but there's also a small transformation applied ot the predictions,
# so also probably not generalizable),
# This combo being run by Andy Harless on June 4
import numpy as np
import pandas as pd
#import matplotlib.pyplot as plt
#import seaborn as sns
from sklearn import model_selection, preprocessing
import xgboost as xgb
import datetime
########################## Gunja Model ############################
# ----------------- Settings ----------------- #
EN_CROSSVALIDATION = True
######################### Train for Investment Data ############################
DEFAULT_TRAIN_ROUNDS = 432
#load files
df = pd.read_csv('input/train.csv', parse_dates=['timestamp'])
test_df = pd.read_csv('input/test.csv', parse_dates=['timestamp'])
macro = pd.read_csv('input/macro.csv', parse_dates=['timestamp'])
# ----------------- Data Cleaning ----------------- #
# Training Set
df.loc[df.id==13549, 'life_sq'] = 74
df.loc[df.id==10092, 'build_year'] = 2007
df.loc[df.id==10092, 'state'] = 3
df.loc[df.id==13120, 'build_year'] = 1970
df.loc[df.id==25943, 'max_floor'] = 17
# Clean - Full Sq
df = df[(df.full_sq>1)|(df.life_sq>1)]
df.loc[(df.full_sq<10) & (df.life_sq>1), 'full_sq'] = df.life_sq
df = df[df.full_sq<400]
# Clean - Life Sq
df.loc[df.life_sq > df.full_sq*4, 'life_sq'] = df.life_sq/10
df.loc[df.life_sq > df.full_sq, 'life_sq'] = np.nan
df.loc[df.life_sq < 5, 'life_sq'] = np.nan
df.loc[df.life_sq < df.full_sq * 0.3, 'life_sq'] = np.nan
df = df[df.life_sq<300]
# Clean - Kitch Sq
df.loc[df.kitch_sq < 2, 'kitch_sq'] = np.nan
df.loc[df.kitch_sq > df.full_sq * 0.5, 'kitch_sq'] = np.nan
df.loc[df.kitch_sq > df.life_sq, 'kitch_sq'] = np.nan
# Clean - Build Year
df.loc[df.build_year<1000, 'build_year'] = np.nan
df.loc[df.build_year>2050, 'build_year'] = np.nan
# Clean - Num Room
df.loc[df.num_room<1, 'num_room'] = np.nan
df.loc[(df.num_room>4) & (df.full_sq<60), 'num_room'] = np.nan
# Clean - Floor and Max Floor
df.loc[df.floor==0, 'floor'] = np.nan
df.loc[df.max_floor==0, 'max_floor'] = np.nan
df.loc[(df.max_floor==1) & (df.floor>1), 'max_floor'] = np.nan
df.loc[df.max_floor>50, 'max_floor'] = np.nan
df.loc[df.floor>df.max_floor, 'floor'] = np.nan
# Test Set
test_df.loc[test_df.id==30938, 'full_sq'] = 37.8
test_df.loc[test_df.id==35857, 'full_sq'] = 42.07
test_df.loc[test_df.id==35108, 'full_sq'] = 40.3
test_df.loc[test_df.id==33648, 'num_room'] = 1
# Clean - Full Sq
test_df.loc[(test_df.full_sq<10) & (test_df.life_sq>1), 'full_sq'] = test_df.life_sq
# Clean - Life Sq
test_df.loc[test_df.life_sq>test_df.full_sq*2, 'life_sq'] = test_df.life_sq/10
test_df.loc[test_df.life_sq > test_df.full_sq, 'life_sq'] = np.nan
test_df.loc[test_df.life_sq < 5, 'life_sq'] = np.nan
test_df.loc[test_df.life_sq < test_df.full_sq * 0.3, 'life_sq'] = np.nan
# Clean - Kitch Sq
test_df.loc[test_df.kitch_sq < 2, 'kitch_sq'] = np.nan
test_df.loc[test_df.kitch_sq > test_df.full_sq * 0.5, 'kitch_sq'] = np.nan
test_df.loc[test_df.kitch_sq > test_df.life_sq, 'kitch_sq'] = np.nan
# Clean - Build Year
test_df.loc[test_df.build_year<1000, 'build_year'] = np.nan
test_df.loc[test_df.build_year>2050, 'build_year'] = np.nan
# Clean - Num Room
test_df.loc[test_df.num_room<1, 'num_room'] = np.nan
test_df.loc[(test_df.num_room>4) & (test_df.full_sq<60), 'num_room'] = np.nan
# Clean - Floor and Max Floor
test_df.loc[test_df.floor==0, 'floor'] = np.nan
test_df.loc[test_df.max_floor==0, 'max_floor'] = np.nan
test_df.loc[(test_df.max_floor==1) & (test_df.floor>1), 'max_floor'] = np.nan
test_df.loc[test_df.max_floor>50, 'max_floor'] = np.nan
test_df.loc[test_df.floor>test_df.max_floor, 'floor'] = np.nan
# ----------------- New Features ----------------- #
# month_year_cnt
month_year = (df.timestamp.dt.month + df.timestamp.dt.year * 100)
month_year_cnt_map = month_year.value_counts().to_dict()
df['month_year_cnt'] = month_year.map(month_year_cnt_map)
month_year = (test_df.timestamp.dt.month + test_df.timestamp.dt.year * 100)
month_year_cnt_map = month_year.value_counts().to_dict()
test_df['month_year_cnt'] = month_year.map(month_year_cnt_map)
# week_year_cnt
week_year = (df.timestamp.dt.weekofyear + df.timestamp.dt.year * 100)
week_year_cnt_map = week_year.value_counts().to_dict()
df['week_year_cnt'] = week_year.map(week_year_cnt_map)
week_year = (test_df.timestamp.dt.weekofyear + test_df.timestamp.dt.year * 100)
week_year_cnt_map = week_year.value_counts().to_dict()
test_df['week_year_cnt'] = week_year.map(week_year_cnt_map)
# month
df['month'] = df.timestamp.dt.month
test_df['month'] = test_df.timestamp.dt.month
# day of week
df['dow'] = df.timestamp.dt.dayofweek
test_df['dow'] = test_df.timestamp.dt.dayofweek
# floor/max_floor
df['floor/max_floor'] = df['floor'] / df['max_floor'].astype(float)
test_df['floor/max_floor'] = test_df['floor'] / test_df['max_floor'].astype(float)
# kitch_sq/full_sq
df["kitch_sq/full_sq"] = df["kitch_sq"] / df["full_sq"].astype(float)
test_df["kitch_sq/full_sq"] = test_df["kitch_sq"] / test_df["full_sq"].astype(float)
# Avg Room Size
df['avg_room_size'] = df['life_sq'] / df['num_room'].astype(float)
test_df['avg_room_size'] = test_df['life_sq'] / test_df['num_room'].astype(float)
# Apartment Name
df['apartment_name'] = df['sub_area'] + df['metro_km_avto'].astype(str)
test_df['apartment_name'] = test_df['sub_area'] + test_df['metro_km_avto'].astype(str)
# ----------------- Train for Investment Data ----------------- #
df = df[df.product_type=="Investment"]
#df = df[df.price_doc>1000000]
df = df[df.price_doc/df.full_sq <= np.exp(13.05)]
#df = df[df.price_doc/df.full_sq >= np.exp(9)]
test_df.product_type = "Investment"
y_train = df["price_doc"] * 0.97
x_train = df.drop(["id", "timestamp", "price_doc"], axis=1)
x_test = test_df.drop(["id", "timestamp"], axis=1)
x_all = pd.concat([x_train, x_test])
# Feature Encoding
for c in x_all.columns:
if x_all[c].dtype == 'object':
lbl = preprocessing.LabelEncoder()
lbl.fit(list(x_all[c].values))
x_all[c] = lbl.transform(list(x_all[c].values))
# Separate Training and Test Data
num_train = len(x_train)
x_train = x_all[:num_train]
x_test = x_all[num_train:]
dtrain = xgb.DMatrix(x_train, y_train)
dtest = xgb.DMatrix(x_test)
# ----------------- Cross Validation ----------------- #
xgb_params = {
'eta': 0.03,
'max_depth': 5,
'subsample': 0.7,
'colsample_bytree': 1,
'objective': 'reg:linear',
'eval_metric': 'rmse',
'silent': 1,
'seed': 0
}
if EN_CROSSVALIDATION:
print "[INFO] Cross Validation..."
cv_output = xgb.cv(xgb_params, dtrain, num_boost_round=1000, early_stopping_rounds=20,
verbose_eval=20, show_stdv=True)
DEFAULT_TRAIN_ROUNDS = len(cv_output)
print "[INFO] Optimal Training Rounds =", DEFAULT_TRAIN_ROUNDS
# ----------------- Training ----------------- #
print "[INFO] Training for", DEFAULT_TRAIN_ROUNDS, "rounds..."
model = xgb.train(xgb_params, dtrain, num_boost_round=DEFAULT_TRAIN_ROUNDS,
evals=[(dtrain, 'train')], verbose_eval=50)
y_predict = model.predict(dtest)
gunja_invest = pd.DataFrame({'id': test_df.id, 'price_doc': y_predict})
print gunja_invest.head()
########################## Train for OwnerOccupier Data #########################
# ----------------- Settings ----------------- #
DEFAULT_TRAIN_ROUNDS = 704
#load files
df = pd.read_csv('input/train.csv', parse_dates=['timestamp'])
test_df = pd.read_csv('input/test.csv', parse_dates=['timestamp'])
macro = pd.read_csv('input/macro.csv', parse_dates=['timestamp'])
# ----------------- Data Cleaning ----------------- #
# Training Set
df.loc[df.id==13549, 'life_sq'] = 74
df.loc[df.id==10092, 'build_year'] = 2007
df.loc[df.id==10092, 'state'] = 3
df.loc[df.id==13120, 'build_year'] = 1970
df.loc[df.id==25943, 'max_floor'] = 17
# Clean - Full Sq
df = df[(df.full_sq>1)|(df.life_sq>1)]
df.loc[(df.full_sq<10) & (df.life_sq>1), 'full_sq'] = df.life_sq
df = df[df.full_sq<400]
# Clean - Life Sq
df.loc[df.life_sq > df.full_sq*4, 'life_sq'] = df.life_sq/10
df.loc[df.life_sq > df.full_sq, 'life_sq'] = np.nan
df.loc[df.life_sq < 5, 'life_sq'] = np.nan
df.loc[df.life_sq < df.full_sq * 0.3, 'life_sq'] = np.nan
df = df[df.life_sq<300]
# Clean - Kitch Sq
df.loc[df.kitch_sq < 2, 'kitch_sq'] = np.nan
df.loc[df.kitch_sq > df.full_sq * 0.5, 'kitch_sq'] = np.nan
df.loc[df.kitch_sq > df.life_sq, 'kitch_sq'] = np.nan
# Clean - Build Year
df.loc[df.build_year<1000, 'build_year'] = np.nan
df.loc[df.build_year>2050, 'build_year'] = np.nan
# Clean - Num Room
df.loc[df.num_room<1, 'num_room'] = np.nan
df.loc[(df.num_room>4) & (df.full_sq<60), 'num_room'] = np.nan
# Clean - Floor and Max Floor
df.loc[df.floor==0, 'floor'] = np.nan
df.loc[df.max_floor==0, 'max_floor'] = np.nan
df.loc[(df.max_floor==1) & (df.floor>1), 'max_floor'] = np.nan
df.loc[df.max_floor>50, 'max_floor'] = np.nan
df.loc[df.floor>df.max_floor, 'floor'] = np.nan
# Test Set
test_df.loc[test_df.id==30938, 'full_sq'] = 37.8
test_df.loc[test_df.id==35857, 'full_sq'] = 42.07
test_df.loc[test_df.id==35108, 'full_sq'] = 40.3
test_df.loc[test_df.id==33648, 'num_room'] = 1
# Clean - Full Sq
test_df.loc[(test_df.full_sq<10) & (test_df.life_sq>1), 'full_sq'] = test_df.life_sq
# Clean - Life Sq
test_df.loc[test_df.life_sq>test_df.full_sq*2, 'life_sq'] = test_df.life_sq/10
test_df.loc[test_df.life_sq > test_df.full_sq, 'life_sq'] = np.nan
test_df.loc[test_df.life_sq < 5, 'life_sq'] = np.nan
test_df.loc[test_df.life_sq < test_df.full_sq * 0.3, 'life_sq'] = np.nan
# Clean - Kitch Sq
test_df.loc[test_df.kitch_sq < 2, 'kitch_sq'] = np.nan
test_df.loc[test_df.kitch_sq > test_df.full_sq * 0.5, 'kitch_sq'] = np.nan
test_df.loc[test_df.kitch_sq > test_df.life_sq, 'kitch_sq'] = np.nan
# Clean - Build Year
test_df.loc[test_df.build_year<1000, 'build_year'] = np.nan
test_df.loc[test_df.build_year>2050, 'build_year'] = np.nan
# Clean - Num Room
test_df.loc[test_df.num_room<1, 'num_room'] = np.nan
test_df.loc[(test_df.num_room>4) & (test_df.full_sq<60), 'num_room'] = np.nan
# Clean - Floor and Max Floor
test_df.loc[test_df.floor==0, 'floor'] = np.nan
test_df.loc[test_df.max_floor==0, 'max_floor'] = np.nan
test_df.loc[(test_df.max_floor==1) & (test_df.floor>1), 'max_floor'] = np.nan
test_df.loc[test_df.max_floor>50, 'max_floor'] = np.nan
test_df.loc[test_df.floor>test_df.max_floor, 'floor'] = np.nan
# ----------------- New Features ----------------- #
# month_year_cnt
month_year = (df.timestamp.dt.month + df.timestamp.dt.year * 100)
month_year_cnt_map = month_year.value_counts().to_dict()
df['month_year_cnt'] = month_year.map(month_year_cnt_map)
month_year = (test_df.timestamp.dt.month + test_df.timestamp.dt.year * 100)
month_year_cnt_map = month_year.value_counts().to_dict()
test_df['month_year_cnt'] = month_year.map(month_year_cnt_map)
# week_year_cnt
week_year = (df.timestamp.dt.weekofyear + df.timestamp.dt.year * 100)
week_year_cnt_map = week_year.value_counts().to_dict()
df['week_year_cnt'] = week_year.map(week_year_cnt_map)
week_year = (test_df.timestamp.dt.weekofyear + test_df.timestamp.dt.year * 100)
week_year_cnt_map = week_year.value_counts().to_dict()
test_df['week_year_cnt'] = week_year.map(week_year_cnt_map)
# month
df['month'] = df.timestamp.dt.month
test_df['month'] = test_df.timestamp.dt.month
# day of week
df['dow'] = df.timestamp.dt.dayofweek
test_df['dow'] = test_df.timestamp.dt.dayofweek
# floor/max_floor
df['floor/max_floor'] = df['floor'] / df['max_floor'].astype(float)
test_df['floor/max_floor'] = test_df['floor'] / test_df['max_floor'].astype(float)
# kitch_sq/full_sq
df["kitch_sq/full_sq"] = df["kitch_sq"] / df["full_sq"].astype(float)
test_df["kitch_sq/full_sq"] = test_df["kitch_sq"] / test_df["full_sq"].astype(float)
# Avg Room Size
df['avg_room_size'] = df['life_sq'] / df['num_room'].astype(float)
test_df['avg_room_size'] = test_df['life_sq'] / test_df['num_room'].astype(float)
# Apartment Name
df['apartment_name'] = df['sub_area'] + df['metro_km_avto'].astype(str)
test_df['apartment_name'] = test_df['sub_area'] + test_df['metro_km_avto'].astype(str)
# ----------------- Train for OwnerOccupier Data ----------------- #
df = df[df.product_type=="OwnerOccupier"]
df = df[df.price_doc/df.full_sq <= np.exp(13.15)]
df = df[df.price_doc/df.full_sq >= np.exp(10.4)]
test_df.product_type = "OwnerOccupier"
y_train = df["price_doc"]
x_train = df.drop(["id", "timestamp", "price_doc"], axis=1)
x_test = test_df.drop(["id", "timestamp"], axis=1)
x_all = pd.concat([x_train, x_test])
# Feature Encoding
for c in x_all.columns:
if x_all[c].dtype == 'object':
lbl = preprocessing.LabelEncoder()
lbl.fit(list(x_all[c].values))
x_all[c] = lbl.transform(list(x_all[c].values))
# Separate Training and Test Data
num_train = len(x_train)
x_train = x_all[:num_train]
x_test = x_all[num_train:]
dtrain = xgb.DMatrix(x_train, y_train)
dtest = xgb.DMatrix(x_test)
# ----------------- Cross Validation ----------------- #
xgb_params = {
'eta': 0.03,
'max_depth': 5,
'subsample': 0.7,
'colsample_bytree': 1,
'objective': 'reg:linear',
'eval_metric': 'rmse',
'silent': 1,
'seed': 0
}
if EN_CROSSVALIDATION:
print "[INFO] Cross Validation..."
cv_output = xgb.cv(xgb_params, dtrain, num_boost_round=1000, early_stopping_rounds=10,
verbose_eval=20, show_stdv=True)
DEFAULT_TRAIN_ROUNDS = len(cv_output)
print "[INFO] Optimal Training Rounds =", DEFAULT_TRAIN_ROUNDS
# ----------------- Training ----------------- #
print "[INFO] Training for", DEFAULT_TRAIN_ROUNDS, "rounds..."
model = xgb.train(xgb_params, dtrain, num_boost_round=DEFAULT_TRAIN_ROUNDS,
evals=[(dtrain, 'train')], verbose_eval=50)
y_predict = model.predict(dtest)
gunja_owner = pd.DataFrame({'id': test_df.id, 'price_doc': y_predict})
print gunja_owner.head()
############################## Merge #############################
test_df = pd.read_csv('input/test.csv', parse_dates=['timestamp'])
test_df['price_doc'] = gunja_invest['price_doc']
test_df.loc[test_df.product_type=="OwnerOccupier", 'price_doc'] = gunja_owner['price_doc']
gunja_output = test_df[["id", "price_doc"]]
print gunja_output.head()
print "[INFO] Average Price =", gunja_output['price_doc'].mean()
################# Louis Model #####################
train = pd.read_csv('input/train.csv')
test = pd.read_csv('input/test.csv')
id_test = test.id
mult = .969
y_train = train["price_doc"] * mult + 10
x_train = train.drop(["id", "timestamp", "price_doc"], axis=1)
x_test = test.drop(["id", "timestamp"], axis=1)
for c in x_train.columns:
if x_train[c].dtype == 'object':
lbl = preprocessing.LabelEncoder()
lbl.fit(list(x_train[c].values))
x_train[c] = lbl.transform(list(x_train[c].values))
for c in x_test.columns:
if x_test[c].dtype == 'object':
lbl = preprocessing.LabelEncoder()
lbl.fit(list(x_test[c].values))
x_test[c] = lbl.transform(list(x_test[c].values))
xgb_params = {
'eta': 0.05,
'max_depth': 5,
'subsample': 0.7,
'colsample_bytree': 0.7,
'objective': 'reg:linear',
'eval_metric': 'rmse',
'seed': 0,
'silent': 1
}
dtrain = xgb.DMatrix(x_train, y_train)
dtest = xgb.DMatrix(x_test)
# cv_output = xgb.cv(xgb_params, dtrain, num_boost_round=1000, early_stopping_rounds=20, verbose_eval=25, show_stdv=False)
# print('best num_boost_rounds = ', len(cv_output))
# num_boost_rounds = len(cv_output) # 382
num_boost_rounds = 384 # This was the CV output, as earlier version shows
model = xgb.train(xgb_params, dtrain, num_boost_round= num_boost_rounds)
y_predict = model.predict(dtest)
output = pd.DataFrame({'id': id_test, 'price_doc': y_predict})
print "[INFO] Louis Model Average Price =", output['price_doc'].mean()
# output.drop('average_q_price', axis=1, inplace=True)
# output.head()
######################## Bruno Model #########################
# Any results you write to the current directory are saved as output.
df_train = pd.read_csv("input/train.csv", parse_dates=['timestamp'])
df_test = pd.read_csv("input/test.csv", parse_dates=['timestamp'])
df_macro = pd.read_csv("input/macro.csv", parse_dates=['timestamp'])
df_train.drop(df_train[df_train["life_sq"] > 7000].index, inplace=True)
mult = 0.969
y_train = df_train['price_doc'].values * mult + 10
id_test = df_test['id']
df_train.drop(['id', 'price_doc'], axis=1, inplace=True)
df_test.drop(['id'], axis=1, inplace=True)
num_train = len(df_train)
df_all = pd.concat([df_train, df_test])
# Next line just adds a lot of NA columns (becuase "join" only works on indexes)
# but somewhow it seems to affect the result
df_all = df_all.join(df_macro, on='timestamp', rsuffix='_macro')
print(df_all.shape)
# Add month-year
month_year = (df_all.timestamp.dt.month + df_all.timestamp.dt.year * 100)
month_year_cnt_map = month_year.value_counts().to_dict()
df_all['month_year_cnt'] = month_year.map(month_year_cnt_map)
# Add week-year count
week_year = (df_all.timestamp.dt.weekofyear + df_all.timestamp.dt.year * 100)
week_year_cnt_map = week_year.value_counts().to_dict()
df_all['week_year_cnt'] = week_year.map(week_year_cnt_map)
# Add month and day-of-week
df_all['month'] = df_all.timestamp.dt.month
df_all['dow'] = df_all.timestamp.dt.dayofweek
# Other feature engineering
df_all['rel_floor'] = df_all['floor'] / df_all['max_floor'].astype(float)
df_all['rel_kitch_sq'] = df_all['kitch_sq'] / df_all['full_sq'].astype(float)
train['building_name'] = pd.factorize(train.sub_area + train['metro_km_avto'].astype(str))[0]
test['building_name'] = pd.factorize(test.sub_area + test['metro_km_avto'].astype(str))[0]
def add_time_features(col):
col_month_year = pd.Series(pd.factorize(train[col].astype(str) + month_year.astype(str))[0])
train[col + '_month_year_cnt'] = col_month_year.map(col_month_year.value_counts())
col_week_year = pd.Series(pd.factorize(train[col].astype(str) + week_year.astype(str))[0])
train[col + '_week_year_cnt'] = col_week_year.map(col_week_year.value_counts())
add_time_features('building_name')
add_time_features('sub_area')
def add_time_features(col):
col_month_year = pd.Series(pd.factorize(test[col].astype(str) + month_year.astype(str))[0])
test[col + '_month_year_cnt'] = col_month_year.map(col_month_year.value_counts())
col_week_year = pd.Series(pd.factorize(test[col].astype(str) + week_year.astype(str))[0])
test[col + '_week_year_cnt'] = col_week_year.map(col_week_year.value_counts())
add_time_features('building_name')
add_time_features('sub_area')
# Remove timestamp column (may overfit the model in train)
df_all.drop(['timestamp', 'timestamp_macro'], axis=1, inplace=True)
factorize = lambda t: pd.factorize(t[1])[0]
df_obj = df_all.select_dtypes(include=['object'])
X_all = np.c_[
df_all.select_dtypes(exclude=['object']).values,
np.array(list(map(factorize, df_obj.iteritems()))).T
]
print(X_all.shape)
X_train = X_all[:num_train]
X_test = X_all[num_train:]
# Deal with categorical values
df_numeric = df_all.select_dtypes(exclude=['object'])
df_obj = df_all.select_dtypes(include=['object']).copy()
for c in df_obj:
df_obj[c] = pd.factorize(df_obj[c])[0]
df_values = pd.concat([df_numeric, df_obj], axis=1)
# Convert to numpy values
X_all = df_values.values
print(X_all.shape)
X_train = X_all[:num_train]
X_test = X_all[num_train:]
df_columns = df_values.columns
xgb_params = {
'eta': 0.05,
'max_depth': 5,
'subsample': 0.7,
'colsample_bytree': 0.7,
'objective': 'reg:linear',
'eval_metric': 'rmse',
'seed': 0,
'silent': 1
}
dtrain = xgb.DMatrix(X_train, y_train, feature_names=df_columns)
dtest = xgb.DMatrix(X_test, feature_names=df_columns)
# cv_output = xgb.cv(xgb_params, dtrain, num_boost_round=1000, early_stopping_rounds=20, verbose_eval=25, show_stdv=False)
# print('best num_boost_rounds = ', len(cv_output))
# num_boost_rounds = len(cv_output) #
num_boost_rounds = 420 # From Bruno's original CV, I think
model = xgb.train(xgb_params, dtrain, num_boost_round=num_boost_rounds)
y_pred = model.predict(dtest)
df_sub = pd.DataFrame({'id': id_test, 'price_doc': y_pred})
print "[INFO] Bruno Model Average Price =", df_sub['price_doc'].mean()
df_sub.head()
first_result = output.merge(df_sub, on="id", suffixes=['_louis','_bruno'])
first_result["price_doc"] = np.exp( .714*np.log(first_result.price_doc_louis) +
.286*np.log(first_result.price_doc_bruno) ) # multiplies out to .5 & .2
result = first_result.merge(gunja_output, on="id", suffixes=['_follow','_gunja'])
result["price_doc"] = np.exp( .78*np.log(result.price_doc_follow) +
.22*np.log(result.price_doc_gunja) )
result.drop(["price_doc_louis","price_doc_bruno","price_doc_follow","price_doc_gunja"],axis=1,inplace=True)
result.head()
print "[INFO] Ensemble Average Price =", result['price_doc'].mean()
result.to_csv('sub-silly-fixed-price-changed-local.csv', index=False)
| 37.620451
| 122
| 0.680518
| 3,672
| 21,707
| 3.753813
| 0.096678
| 0.071387
| 0.022345
| 0.040554
| 0.782429
| 0.75341
| 0.737377
| 0.728743
| 0.714234
| 0.702554
| 0
| 0.024801
| 0.126964
| 21,707
| 576
| 123
| 37.685764
| 0.702549
| 0.158244
| 0
| 0.688889
| 0
| 0
| 0.168039
| 0.002185
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.013889
| null | null | 0.044444
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
da6f71630a22c262b4073a0c51a77d513715b121
| 425
|
py
|
Python
|
appJar/__init__.py
|
AndreaMordenti/spotydowny
|
2c0630b1abb6f1fed1aeda46e81a303f9b3057ec
|
[
"MIT"
] | null | null | null |
appJar/__init__.py
|
AndreaMordenti/spotydowny
|
2c0630b1abb6f1fed1aeda46e81a303f9b3057ec
|
[
"MIT"
] | null | null | null |
appJar/__init__.py
|
AndreaMordenti/spotydowny
|
2c0630b1abb6f1fed1aeda46e81a303f9b3057ec
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from appJar.appjar import gui
from appJar.appjar import SelectableLabel
from appJar.appjar import AutoCompleteEntry
from appJar.appjar import ajScale
from appJar.appjar import AjText, AjScrolledText
from appJar.appjar import Meter
from appJar.appjar import Properties
from appJar.appjar import Link
from appJar.appjar import Separator
from appJar.appjar import Grip
from appJar.appjar import PieChart
| 32.692308
| 48
| 0.832941
| 59
| 425
| 6
| 0.305085
| 0.310734
| 0.497175
| 0.683616
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002667
| 0.117647
| 425
| 12
| 49
| 35.416667
| 0.941333
| 0.049412
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e538de8873ebefefd24ac4ce0c1e5d7dcb552650
| 124
|
py
|
Python
|
core/__init__.py
|
wmww/instigate
|
bc4f77d2cc40a57a0757ca4f460d5a9bc8a076ae
|
[
"MIT"
] | null | null | null |
core/__init__.py
|
wmww/instigate
|
bc4f77d2cc40a57a0757ca4f460d5a9bc8a076ae
|
[
"MIT"
] | null | null | null |
core/__init__.py
|
wmww/instigate
|
bc4f77d2cc40a57a0757ca4f460d5a9bc8a076ae
|
[
"MIT"
] | null | null | null |
from . import text
from . import result
from . import context
from . import task
from . import mission
from . import runner
| 17.714286
| 21
| 0.758065
| 18
| 124
| 5.222222
| 0.444444
| 0.638298
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.193548
| 124
| 6
| 22
| 20.666667
| 0.94
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e57305d58af2a255c91b8b82a61a357fa4e8287c
| 21
|
py
|
Python
|
scadasim/fluids/__init__.py
|
sintax1/scadasim
|
8d8beaecf081b81ca5cb0a84b19cb9fe84b7cfa7
|
[
"MIT"
] | 6
|
2017-10-09T20:02:01.000Z
|
2021-06-02T11:43:46.000Z
|
sim/scadasim/fluids/__init__.py
|
PMaynard/ndn-water-treatment-testbed
|
926db68237b06f43f6e736f035201ed71fc153bc
|
[
"MIT"
] | 1
|
2017-03-24T03:31:35.000Z
|
2017-03-24T03:31:35.000Z
|
sim/scadasim/fluids/__init__.py
|
PMaynard/ndn-water-treatment-testbed
|
926db68237b06f43f6e736f035201ed71fc153bc
|
[
"MIT"
] | 3
|
2018-05-19T07:47:47.000Z
|
2019-04-09T15:57:57.000Z
|
from fluids import *
| 10.5
| 20
| 0.761905
| 3
| 21
| 5.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.190476
| 21
| 1
| 21
| 21
| 0.941176
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f92bb9642c37c93c60e01ee1eac6056d6c684fe1
| 301
|
py
|
Python
|
venv/Lib/site-packages/pybrain/datasets/__init__.py
|
ishatserka/MachineLearningAndDataAnalysisCoursera
|
e82e772df2f4aec162cb34ac6127df10d14a625a
|
[
"MIT"
] | 4
|
2015-01-01T14:57:38.000Z
|
2018-07-12T04:21:36.000Z
|
pybrain/datasets/__init__.py
|
abhishekgahlot/pybrain
|
c54661f13857d5bcb0095ba2fb12f5a403a4a70f
|
[
"BSD-3-Clause"
] | null | null | null |
pybrain/datasets/__init__.py
|
abhishekgahlot/pybrain
|
c54661f13857d5bcb0095ba2fb12f5a403a4a70f
|
[
"BSD-3-Clause"
] | 2
|
2015-01-23T09:23:58.000Z
|
2019-02-22T05:42:29.000Z
|
# $Id$
from sequential import SequentialDataSet
from supervised import SupervisedDataSet
from unsupervised import UnsupervisedDataSet
from importance import ImportanceDataSet
from reinforcement import ReinforcementDataSet
from classification import ClassificationDataSet, SequenceClassificationDataSet
| 43
| 79
| 0.900332
| 26
| 301
| 10.423077
| 0.615385
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.086379
| 301
| 7
| 79
| 43
| 0.985455
| 0.013289
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
00a24a7c834a962e5087dd20d0c9645c710fc243
| 35
|
py
|
Python
|
jdaviz/components/file_loader/__init__.py
|
mariobuikhuizen/jdaviz
|
1757f49b2ea8c679c23f5925d754798a1deb0c96
|
[
"BSD-3-Clause"
] | null | null | null |
jdaviz/components/file_loader/__init__.py
|
mariobuikhuizen/jdaviz
|
1757f49b2ea8c679c23f5925d754798a1deb0c96
|
[
"BSD-3-Clause"
] | null | null | null |
jdaviz/components/file_loader/__init__.py
|
mariobuikhuizen/jdaviz
|
1757f49b2ea8c679c23f5925d754798a1deb0c96
|
[
"BSD-3-Clause"
] | null | null | null |
from .file_loader import FileLoader
| 35
| 35
| 0.885714
| 5
| 35
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.085714
| 35
| 1
| 35
| 35
| 0.9375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
00b824bbfaff68674d84422b6eb362cb02c5a447
| 120
|
py
|
Python
|
tests/conda_env/__init__.py
|
jack-pappas/conda
|
c53a3a44acd3f4194e8c065cfa31a2eb9bc0cd77
|
[
"BSD-3-Clause"
] | 6
|
2017-09-09T04:47:49.000Z
|
2020-09-30T11:20:46.000Z
|
tests/conda_env/__init__.py
|
jack-pappas/conda
|
c53a3a44acd3f4194e8c065cfa31a2eb9bc0cd77
|
[
"BSD-3-Clause"
] | 2
|
2015-11-08T05:03:41.000Z
|
2016-08-27T17:58:48.000Z
|
tests/conda_env/__init__.py
|
jack-pappas/conda
|
c53a3a44acd3f4194e8c065cfa31a2eb9bc0cd77
|
[
"BSD-3-Clause"
] | 4
|
2015-11-08T04:46:01.000Z
|
2016-05-27T15:48:37.000Z
|
from os.path import dirname, join
def support_file(filename):
return join(dirname(__file__), 'support', filename)
| 20
| 55
| 0.75
| 16
| 120
| 5.3125
| 0.6875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.141667
| 120
| 5
| 56
| 24
| 0.825243
| 0
| 0
| 0
| 0
| 0
| 0.058333
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
00d93841bbcb2760d3fb2ef11fed4aedff75748e
| 47
|
py
|
Python
|
app/rooms/models/__init__.py
|
mandarhan/mandarhan
|
9ce38d10e536e0d3e2f907c3b5c560d66ccf8e40
|
[
"MIT"
] | null | null | null |
app/rooms/models/__init__.py
|
mandarhan/mandarhan
|
9ce38d10e536e0d3e2f907c3b5c560d66ccf8e40
|
[
"MIT"
] | 6
|
2020-02-18T03:49:09.000Z
|
2022-03-12T00:10:05.000Z
|
app/rooms/models/__init__.py
|
mandarhan/mandarhan
|
9ce38d10e536e0d3e2f907c3b5c560d66ccf8e40
|
[
"MIT"
] | 1
|
2020-03-25T10:25:43.000Z
|
2020-03-25T10:25:43.000Z
|
from .categories import *
from .rooms import *
| 15.666667
| 25
| 0.744681
| 6
| 47
| 5.833333
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.170213
| 47
| 2
| 26
| 23.5
| 0.897436
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
00e58020cddc3e823098e7657cd1bc61ff2364e1
| 142
|
py
|
Python
|
geopayment/providers/bog/installment/form.py
|
Lh4cKg/tbcpay
|
481ef6148defc9897643919f7c47ce78d149acbf
|
[
"MIT"
] | null | null | null |
geopayment/providers/bog/installment/form.py
|
Lh4cKg/tbcpay
|
481ef6148defc9897643919f7c47ce78d149acbf
|
[
"MIT"
] | null | null | null |
geopayment/providers/bog/installment/form.py
|
Lh4cKg/tbcpay
|
481ef6148defc9897643919f7c47ce78d149acbf
|
[
"MIT"
] | null | null | null |
class InstallmentForm(object):
def __init__(self):
pass
def media(self):
pass
def static(self):
pass
| 10.923077
| 30
| 0.549296
| 15
| 142
| 4.933333
| 0.6
| 0.324324
| 0.297297
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.366197
| 142
| 12
| 31
| 11.833333
| 0.822222
| 0
| 0
| 0.428571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.428571
| false
| 0.428571
| 0
| 0
| 0.571429
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 6
|
9767f117d5090e013ff3b0b1f7b56c6efb209143
| 73
|
py
|
Python
|
parqser/page/loaded_page.py
|
ARQtty/parqser
|
2f4f9505544d718dc818d1d9177ac1394bfbb352
|
[
"MIT"
] | null | null | null |
parqser/page/loaded_page.py
|
ARQtty/parqser
|
2f4f9505544d718dc818d1d9177ac1394bfbb352
|
[
"MIT"
] | null | null | null |
parqser/page/loaded_page.py
|
ARQtty/parqser
|
2f4f9505544d718dc818d1d9177ac1394bfbb352
|
[
"MIT"
] | null | null | null |
from parqser.page import BasePage
class LoadedPage(BasePage):
pass
| 12.166667
| 33
| 0.767123
| 9
| 73
| 6.222222
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.178082
| 73
| 5
| 34
| 14.6
| 0.933333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
976d4e5094f350a685ab32e681cd8d16f8fd1538
| 139,327
|
py
|
Python
|
AppDatabase.py
|
msimms/OpenWorkoutWeb
|
21e506b7d5cfaf30875c3f584cfae796ba112d49
|
[
"MIT"
] | 1
|
2021-06-23T03:59:19.000Z
|
2021-06-23T03:59:19.000Z
|
AppDatabase.py
|
msimms/OpenWorkoutWeb
|
21e506b7d5cfaf30875c3f584cfae796ba112d49
|
[
"MIT"
] | 10
|
2021-06-26T20:37:01.000Z
|
2022-03-22T16:42:22.000Z
|
AppDatabase.py
|
msimms/OpenWorkoutWeb
|
21e506b7d5cfaf30875c3f584cfae796ba112d49
|
[
"MIT"
] | 1
|
2021-06-17T01:51:27.000Z
|
2021-06-17T01:51:27.000Z
|
# -*- coding: utf-8 -*-
#
# # MIT License
#
# Copyright (c) 2017 Mike Simms
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Database implementation"""
import json
import re
import sys
import traceback
import uuid
from bson.objectid import ObjectId
if sys.version_info[0] < 3:
from bson import Binary
import pymongo
import time
import Database
import InputChecker
import Keys
import Perf
import Workout
def retrieve_time_from_location(location):
"""Used with the sort function."""
return location['time']
def retrieve_time_from_time_value_pair(value):
"""Used with the sort function."""
if sys.version_info[0] < 3:
return value.keys()[0]
return list(value.keys())[0]
class Device(object):
def __init__(self):
self.id = 0
self.name = ""
self.description = ""
super(Device, self).__init__()
class MongoDatabase(Database.Database):
"""Mongo DB implementation of the application database."""
conn = None
database = None
users_collection = None
activities_collection = None
workouts_collection = None
tasks_collectoin = None
uploads_collection = None
def __init__(self):
Database.Database.__init__(self)
def connect(self):
"""Connects/creates the database"""
try:
self.conn = pymongo.MongoClient('localhost:27017')
# Database. Try the old name, if not found then create or open it with the new name.
db_names = self.conn.list_database_names()
if 'straendb' in db_names:
self.database = self.conn['straendb']
else:
self.database = self.conn['openworkoutdb']
# Handles to the various collections.
self.users_collection = self.database['users']
self.activities_collection = self.database['activities']
self.records_collection = self.database['records']
self.workouts_collection = self.database['workouts']
self.tasks_collection = self.database['tasks']
self.uploads_collection = self.database['uploads']
# Create indexes.
self.activities_collection.create_index(Keys.ACTIVITY_ID_KEY)
return True
except pymongo.errors.ConnectionFailure as e:
self.log_error("Could not connect to MongoDB: %s" % e)
return False
def total_users_count(self):
"""Returns the number of users in the database."""
try:
return self.users_collection.count()
except:
self.log_error(MongoDatabase.total_users_count.__name__ + ": Exception")
return 0
def total_activities_count(self):
"""Returns the number of activities in the database."""
try:
return self.activities_collection.count()
except:
self.log_error(MongoDatabase.total_activities_count.__name__ + ": Exception")
return 0
def list_excluded_user_keys(self):
"""This is the list of stuff we don't need to return when we're building a friends list. Helps with efficiency and privacy by not exposing more than we need."""
exclude_keys = {}
exclude_keys[Keys.HASH_KEY] = False
exclude_keys[Keys.DEVICES_KEY] = False
exclude_keys[Keys.FRIEND_REQUESTS_KEY] = False
exclude_keys[Keys.FRIENDS_KEY] = False
exclude_keys[Keys.PR_KEY] = False
exclude_keys[Keys.DEFAULT_PRIVACY_KEY] = False
exclude_keys[Keys.USER_PREFERRED_UNITS_KEY] = False
return exclude_keys
def list_excluded_activity_keys_activity_lists(self):
"""This is the list of stuff we don't need to return when we're summarizing activities."""
exclude_keys = {}
exclude_keys[Keys.APP_LOCATIONS_KEY] = False
exclude_keys[Keys.APP_ACCELEROMETER_KEY] = False
exclude_keys[Keys.APP_CURRENT_SPEED_KEY] = False
exclude_keys[Keys.APP_HEART_RATE_KEY] = False
exclude_keys[Keys.APP_CADENCE_KEY] = False
exclude_keys[Keys.APP_POWER_KEY] = False
return exclude_keys
#
# User management methods
#
def create_user(self, username, realname, passhash):
"""Create method for a user."""
if username is None:
self.log_error(MongoDatabase.create_user.__name__ + ": Unexpected empty object: username")
return False
if realname is None:
self.log_error(MongoDatabase.create_user.__name__ + ": Unexpected empty object: realname")
return False
if passhash is None:
self.log_error(MongoDatabase.create_user.__name__ + ": Unexpected empty object: passhash")
return False
if len(username) == 0:
self.log_error(MongoDatabase.create_user.__name__ + ": username too short")
return False
if len(realname) == 0:
self.log_error(MongoDatabase.create_user.__name__ + ": realname too short")
return False
if len(passhash) == 0:
self.log_error(MongoDatabase.create_user.__name__ + ": hash too short")
return False
try:
post = { Keys.USERNAME_KEY: username, Keys.REALNAME_KEY: realname, Keys.HASH_KEY: passhash, Keys.DEVICES_KEY: [], Keys.FRIENDS_KEY: [], Keys.DEFAULT_PRIVACY_KEY: Keys.ACTIVITY_VISIBILITY_PUBLIC }
self.users_collection.insert(post)
return True
except:
self.log_error(traceback.format_exc())
self.log_error(sys.exc_info()[0])
return False
def retrieve_user_details(self, username):
"""Retrieve method for a user."""
if username is None:
self.log_error(MongoDatabase.retrieve_user_details.__name__ + ": Unexpected empty object: username")
return None
if len(username) == 0:
self.log_error(MongoDatabase.retrieve_user_details.__name__ + ": username is empty")
return None
try:
return self.users_collection.find_one({ Keys.USERNAME_KEY: username })
except:
self.log_error(traceback.format_exc())
self.log_error(sys.exc_info()[0])
return None
def retrieve_user(self, username):
"""Retrieve method for a user."""
if username is None:
self.log_error(MongoDatabase.retrieve_user.__name__ + ": Unexpected empty object: username")
return None, None, None
if len(username) == 0:
self.log_error(MongoDatabase.retrieve_user.__name__ + ": username is empty")
return None, None, None
try:
# Find the user.
result_keys = { Keys.DATABASE_ID_KEY: 1, Keys.HASH_KEY: 1, Keys.REALNAME_KEY: 1 }
user = self.users_collection.find_one({ Keys.USERNAME_KEY: username }, result_keys)
# If the user was found.
if user is not None:
return str(user[Keys.DATABASE_ID_KEY]), user[Keys.HASH_KEY], str(user[Keys.REALNAME_KEY])
return None, None, None
except:
self.log_error(traceback.format_exc())
self.log_error(sys.exc_info()[0])
return None, None, None
def retrieve_user_from_id(self, user_id):
"""Retrieve method for a user."""
if user_id is None:
self.log_error(MongoDatabase.retrieve_user_from_id.__name__ + ": Unexpected empty object: user_id")
return None, None
try:
# Find the user.
user_id_obj = ObjectId(str(user_id))
result_keys = { Keys.USERNAME_KEY: 1, Keys.REALNAME_KEY: 1 }
user = self.users_collection.find_one({ Keys.DATABASE_ID_KEY: user_id_obj }, result_keys)
# If the user was found.
if user is not None:
return user[Keys.USERNAME_KEY], user[Keys.REALNAME_KEY]
return None, None
except:
self.log_error(traceback.format_exc())
self.log_error(sys.exc_info()[0])
return None, None
def retrieve_user_from_api_key(self, api_key):
"""Retrieve method for a user."""
if api_key is None:
self.log_error(MongoDatabase.retrieve_user_from_id.__name__ + ": Unexpected empty object: api_key")
return None, None, None
try:
# Find the user.
rate = 100
query = { Keys.API_KEYS: { Keys.API_KEY: str(api_key), Keys.API_KEY_RATE : rate } }
result_keys = { Keys.DATABASE_ID_KEY: 1, Keys.HASH_KEY: 1, Keys.REALNAME_KEY: 1 }
user = self.users_collection.find_one(query, result_keys)
# If the user was found.
if user is not None:
return str(user[Keys.DATABASE_ID_KEY]), user[Keys.HASH_KEY], user[Keys.REALNAME_KEY], rate
return None, None, None, rate
except:
self.log_error(traceback.format_exc())
self.log_error(sys.exc_info()[0])
return None, None, None, None
def update_user(self, user_id, username, realname, passhash):
"""Update method for a user."""
if user_id is None:
self.log_error(MongoDatabase.update_user.__name__ + ": Unexpected empty object: user_id")
return False
if username is None:
self.log_error(MongoDatabase.update_user.__name__ + ": Unexpected empty object: username")
return False
if realname is None:
self.log_error(MongoDatabase.update_user.__name__ + ": Unexpected empty object: realname")
return False
if len(username) == 0:
self.log_error(MongoDatabase.update_user.__name__ + ": username too short")
return False
if len(realname) == 0:
self.log_error(MongoDatabase.update_user.__name__ + ": realname too short")
return False
try:
# Find the user.
user_id_obj = ObjectId(str(user_id))
user = self.users_collection.find_one({ Keys.DATABASE_ID_KEY: user_id_obj })
# If the user was found.
if user is not None:
user[Keys.USERNAME_KEY] = username
user[Keys.REALNAME_KEY] = realname
if passhash is not None:
user[Keys.HASH_KEY] = passhash
self.users_collection.save(user)
return True
except:
self.log_error(traceback.format_exc())
self.log_error(sys.exc_info()[0])
return False
def delete_user(self, user_id):
"""Delete method for a user."""
if user_id is None:
self.log_error(MongoDatabase.delete_user.__name__ + ": Unexpected empty object: user_id")
return False
try:
user_id_obj = ObjectId(str(user_id))
deleted_result = self.users_collection.delete_one({ Keys.DATABASE_ID_KEY: user_id_obj })
if deleted_result is not None:
return True
except:
self.log_error(traceback.format_exc())
self.log_error(sys.exc_info()[0])
return False
def retrieve_matched_users(self, username):
"""Returns a list of user names for users that match the specified regex."""
user_list = []
if username is None:
self.log_error(MongoDatabase.retrieve_matched_users.__name__ + ": Unexpected empty object: username")
return user_list
if len(username) == 0:
self.log_error(MongoDatabase.retrieve_matched_users.__name__ + ": username is empty")
return user_list
try:
matched_usernames = self.users_collection.find({ Keys.USERNAME_KEY: { "$regex": username } })
if matched_usernames is not None:
for matched_user in matched_usernames:
user_list.append(matched_user[Keys.USERNAME_KEY])
matched_realnames = self.users_collection.find({ Keys.REALNAME_KEY: { "$regex": username } })
if matched_realnames is not None:
for matched_user in matched_realnames:
username = matched_user[Keys.USERNAME_KEY]
if username not in user_list:
user_list.append(username)
except:
self.log_error(traceback.format_exc())
self.log_error(sys.exc_info()[0])
return user_list
#
# Device management methods
#
def create_user_device(self, user_id, device_str):
"""Create method for a device."""
if user_id is None:
self.log_error(MongoDatabase.create_user_device.__name__ + ": Unexpected empty object: user_id")
return False
if device_str is None:
self.log_error(MongoDatabase.create_user_device.__name__ + ": Unexpected empty object: device_str")
return False
try:
# Find the user.
user_id_obj = ObjectId(str(user_id))
user = self.users_collection.find_one({ Keys.DATABASE_ID_KEY: user_id_obj })
# Read the devices list.
devices = []
if user is not None and Keys.DEVICES_KEY in user:
devices = user[Keys.DEVICES_KEY]
# Append the device to the devices list, if it is not already there.
if device_str not in devices:
devices.append(device_str)
user[Keys.DEVICES_KEY] = devices
self.users_collection.save(user)
except:
self.log_error(traceback.format_exc())
self.log_error(sys.exc_info()[0])
return True
def retrieve_user_devices(self, user_id):
"""Retrieve method for a device."""
if user_id is None:
self.log_error(MongoDatabase.retrieve_user_devices.__name__ + ": Unexpected empty object: user_id")
return None
try:
# Find the user.
user_id_obj = ObjectId(str(user_id))
result_keys = { Keys.DEVICES_KEY: 1 }
user = self.users_collection.find_one({ Keys.DATABASE_ID_KEY: user_id_obj }, result_keys)
# Read the devices list.
if user is not None and Keys.DEVICES_KEY in user:
return user[Keys.DEVICES_KEY]
except:
self.log_error(traceback.format_exc())
self.log_error(sys.exc_info()[0])
return None
def retrieve_user_from_device(self, device_str):
"""Finds the user associated with the device."""
if device_str is None:
self.log_error(MongoDatabase.retrieve_user_from_device.__name__ + ": Unexpected empty object: device_str")
return None
if len(device_str) == 0:
self.log_error(MongoDatabase.retrieve_user_from_device.__name__ + ": Device string not provided")
return None
try:
return self.users_collection.find_one({ Keys.DEVICES_KEY: device_str })
except:
self.log_error(traceback.format_exc())
self.log_error(sys.exc_info()[0])
return None
def delete_user_device(self, device_str):
"""Deletes method for a device."""
if device_str is None:
self.log_error(MongoDatabase.delete_user_device.__name__ + ": Unexpected empty object: device_str")
return False
try:
self.activities_collection.remove({ Keys.ACTIVITY_DEVICE_STR_KEY: device_str })
return True
except:
self.log_error(traceback.format_exc())
self.log_error(sys.exc_info()[0])
return False
#
# Friend management methods
#
def create_pending_friend_request(self, user_id, target_id):
"""Appends a user to the friends list of the user with the specified id."""
if user_id is None:
self.log_error(MongoDatabase.create_pending_friend_request.__name__ + ": Unexpected empty object: user_id")
return False
if target_id is None:
self.log_error(MongoDatabase.create_pending_friend_request.__name__ + ": Unexpected empty object: target_id")
return False
try:
# Find the user whose friendship is being requested.
user_id_obj = ObjectId(str(target_id))
user = self.users_collection.find_one({ Keys.DATABASE_ID_KEY: user_id_obj })
# If the user was found then add the target user to the pending friends list.
if user is not None:
pending_friends_list = []
if Keys.FRIEND_REQUESTS_KEY in user:
pending_friends_list = user[Keys.FRIEND_REQUESTS_KEY]
if user_id not in pending_friends_list:
pending_friends_list.append(user_id)
user[Keys.FRIEND_REQUESTS_KEY] = pending_friends_list
self.users_collection.save(user)
return True
except:
self.log_error(traceback.format_exc())
self.log_error(sys.exc_info()[0])
return False
def retrieve_pending_friends(self, user_id):
"""Returns the user ids for all users that are pending confirmation as friends of the specified user."""
if user_id is None:
self.log_error(MongoDatabase.retrieve_pending_friends.__name__ + ": Unexpected empty object: user_id")
return None
try:
# Things we don't need.
exclude_keys = self.list_excluded_user_keys()
# Find the users whose friendship we have requested.
pending_friends_list = []
pending_friends = self.users_collection.find({ Keys.FRIEND_REQUESTS_KEY: user_id }, exclude_keys)
for pending_friend in pending_friends:
pending_friend[Keys.DATABASE_ID_KEY] = str(pending_friend[Keys.DATABASE_ID_KEY])
pending_friend[Keys.REQUESTING_USER_KEY] = "self"
pending_friends_list.append(pending_friend)
# Find the users who have requested our friendship.
user_id_obj = ObjectId(str(user_id))
user = self.users_collection.find_one({ Keys.DATABASE_ID_KEY: user_id_obj })
# If we found ourselves.
if user is not None:
temp_friend_id_list = []
if Keys.FRIEND_REQUESTS_KEY in user:
temp_friend_id_list = user[Keys.FRIEND_REQUESTS_KEY]
for temp_friend_id in temp_friend_id_list:
temp_friend_id_obj = ObjectId(str(temp_friend_id))
pending_friend = self.users_collection.find_one({ Keys.DATABASE_ID_KEY: temp_friend_id_obj }, exclude_keys)
if pending_friend is not None:
pending_friend[Keys.DATABASE_ID_KEY] = str(pending_friend[Keys.DATABASE_ID_KEY])
pending_friend[Keys.REQUESTING_USER_KEY] = str(pending_friend[Keys.DATABASE_ID_KEY])
pending_friends_list.append(pending_friend)
return pending_friends_list
except:
self.log_error(traceback.format_exc())
self.log_error(sys.exc_info()[0])
return []
def delete_pending_friend_request(self, user_id, target_id):
"""Appends a user to the friends list of the user with the specified id."""
if user_id is None:
self.log_error(MongoDatabase.delete_pending_friend_request.__name__ + ": Unexpected empty object: user_id")
return False
if target_id is None:
self.log_error(MongoDatabase.delete_pending_friend_request.__name__ + ": Unexpected empty object: target_id")
return False
try:
# Find the user whose friendship is being requested.
user_id_obj = ObjectId(str(user_id))
user = self.users_collection.find_one({ Keys.DATABASE_ID_KEY: user_id_obj })
# If the user was found then add the target user to the pending friends list.
if user is not None:
pending_friends_list = []
if Keys.FRIEND_REQUESTS_KEY in user:
pending_friends_list = user[Keys.FRIEND_REQUESTS_KEY]
if target_id in pending_friends_list:
pending_friends_list.remove(target_id)
user[Keys.FRIEND_REQUESTS_KEY] = pending_friends_list
self.users_collection.save(user)
return True
except:
self.log_error(traceback.format_exc())
self.log_error(sys.exc_info()[0])
return False
def create_friend(self, user_id, target_id):
"""Appends a user to the friends list of the user with the specified id."""
if user_id is None:
self.log_error(MongoDatabase.create_friend.__name__ + ": Unexpected empty object: user_id")
return False
if target_id is None:
self.log_error(MongoDatabase.create_friend.__name__ + ": Unexpected empty object: target_id")
return False
try:
# Find the user.
user_id_obj = ObjectId(str(user_id))
user = self.users_collection.find_one({ Keys.DATABASE_ID_KEY: user_id_obj })
# Find the target user.
target_user_id_obj = ObjectId(str(target_id))
target_user = self.users_collection.find_one({ Keys.DATABASE_ID_KEY: target_user_id_obj })
# If the users were found then add each other to their friends lists.
if user is not None and target_user is not None:
# Update the user's friends list.
friends_list = []
if Keys.FRIENDS_KEY in user:
friends_list = user[Keys.FRIENDS_KEY]
if target_id not in friends_list:
friends_list.append(target_id)
user[Keys.FRIENDS_KEY] = friends_list
self.users_collection.save(user)
# Update the target user's friends list.
friends_list = []
if Keys.FRIENDS_KEY in target_user:
friends_list = target_user[Keys.FRIENDS_KEY]
if user_id not in friends_list:
friends_list.append(user_id)
target_user[Keys.FRIENDS_KEY] = friends_list
self.users_collection.save(target_user)
return True
except:
self.log_error(traceback.format_exc())
self.log_error(sys.exc_info()[0])
return False
def retrieve_friends(self, user_id):
"""Returns the user ids for all users that are friends with the user who has the specified id."""
if user_id is None:
self.log_error(MongoDatabase.retrieve_friends.__name__ + ": Unexpected empty object: user_id")
return None
try:
# Things we don't need.
exclude_keys = self.list_excluded_user_keys()
# Find the user's friends list.
friends_list = []
friends = self.users_collection.find({ Keys.FRIENDS_KEY: user_id }, exclude_keys)
for friend in friends:
friend[Keys.DATABASE_ID_KEY] = str(friend[Keys.DATABASE_ID_KEY])
friends_list.append(friend)
return friends_list
except:
self.log_error(traceback.format_exc())
self.log_error(sys.exc_info()[0])
return []
def delete_friend(self, user_id, target_id):
"""Removes the users from each other's friends lists."""
if user_id is None:
self.log_error(MongoDatabase.delete_friend.__name__ + ": Unexpected empty object: user_id")
return False
if target_id is None:
self.log_error(MongoDatabase.delete_friend.__name__ + ": Unexpected empty object: target_id")
return False
try:
# Find the user.
user_id_obj = ObjectId(str(user_id))
user = self.users_collection.find_one({ Keys.DATABASE_ID_KEY: user_id_obj })
# Find the target user.
target_user_id_obj = ObjectId(str(target_id))
target_user = self.users_collection.find_one({ Keys.DATABASE_ID_KEY: target_user_id_obj })
# If the users were found then add each other to their friends lists.
if user is not None and target_user is not None:
# Update the user's friends list.
friends_list = []
if Keys.FRIENDS_KEY in user:
friends_list = user[Keys.FRIENDS_KEY]
if target_id in friends_list:
friends_list.remove(target_id)
user[Keys.FRIENDS_KEY] = friends_list
self.users_collection.save(user)
# Update the target user's friends list.
friends_list = []
if Keys.FRIENDS_KEY in target_user:
friends_list = target_user[Keys.FRIENDS_KEY]
if user_id in friends_list:
friends_list.remove(user_id)
target_user[Keys.FRIENDS_KEY] = friends_list
self.users_collection.save(target_user)
return True
except:
self.log_error(traceback.format_exc())
self.log_error(sys.exc_info()[0])
return False
#
# User settings methods
#
def update_user_setting(self, user_id, key, value, update_time):
"""Create/update method for user preferences."""
if user_id is None:
self.log_error(MongoDatabase.update_user_setting.__name__ + ": Unexpected empty object: user_id")
return False
if key is None:
self.log_error(MongoDatabase.update_user_setting.__name__ + ": Unexpected empty object: key")
return False
if value is None:
self.log_error(MongoDatabase.update_user_setting.__name__ + ": Unexpected empty object: value")
return False
if update_time is None:
self.log_error(MongoDatabase.update_user_setting.__name__ + ": Unexpected empty object: update_time")
return False
try:
# Find the user.
user_id_obj = ObjectId(str(user_id))
user = self.users_collection.find_one({ Keys.DATABASE_ID_KEY: user_id_obj })
# If the user was found.
if user is not None:
# Do not replace a newer value with an older value.
if Keys.USER_SETTINGS_LAST_UPDATED_KEY not in user:
user[Keys.USER_SETTINGS_LAST_UPDATED_KEY] = {}
elif key in user[Keys.USER_SETTINGS_LAST_UPDATED_KEY] and user[Keys.USER_SETTINGS_LAST_UPDATED_KEY][key] > update_time:
return False
# Update.
user[Keys.USER_SETTINGS_LAST_UPDATED_KEY][key] = update_time
user[key] = value
self.users_collection.save(user)
return True
except:
self.log_error(traceback.format_exc())
self.log_error(sys.exc_info()[0])
return False
def retrieve_user_setting(self, user_id, key):
"""Retrieve method for user preferences."""
if user_id is None:
self.log_error(MongoDatabase.retrieve_user_setting.__name__ + ": Unexpected empty object: user_id")
return None
if key is None:
self.log_error(MongoDatabase.retrieve_user_setting.__name__ + ": Unexpected empty object: key")
return None
try:
# Find the user.
user_id_obj = ObjectId(str(user_id))
user = self.users_collection.find_one({ Keys.DATABASE_ID_KEY: user_id_obj })
# Find the setting.
if user is not None and key in user and key in Keys.USER_SETTINGS:
return user[key]
except:
self.log_error(traceback.format_exc())
self.log_error(sys.exc_info()[0])
return None
def retrieve_user_settings(self, user_id, keys):
"""Retrieve method for user preferences."""
if user_id is None:
self.log_error(MongoDatabase.retrieve_user_settings.__name__ + ": Unexpected empty object: user_id")
return None
if keys is None:
self.log_error(MongoDatabase.retrieve_user_settings.__name__ + ": Unexpected empty object: keys")
return None
try:
# Find the user.
user_id_obj = ObjectId(str(user_id))
user = self.users_collection.find_one({ Keys.DATABASE_ID_KEY: user_id_obj })
# Find the settings.
results = []
if user is not None:
for key in keys:
if key in user and key in Keys.USER_SETTINGS:
results.append({key: user[key]})
return results
except:
self.log_error(traceback.format_exc())
self.log_error(sys.exc_info()[0])
return None
#
# Personal record management methods
#
def create_user_personal_records(self, user_id, records):
"""Create method for a user's personal record."""
if user_id is None:
self.log_error(MongoDatabase.create_user_personal_records.__name__ + ": Unexpected empty object: user_id")
return False
if records is None:
self.log_error(MongoDatabase.create_user_personal_records.__name__ + ": Unexpected empty object: records")
return False
try:
# Find the user's records collection.
user_id_str = str(user_id)
user_records = self.records_collection.find_one({ Keys.RECORDS_USER_ID: user_id_str })
# If the collection was found.
if user_records is None:
post = { Keys.RECORDS_USER_ID: user_id_str, Keys.PERSONAL_RECORDS_KEY: records }
self.records_collection.insert(post)
return True
except:
self.log_error(traceback.format_exc())
self.log_error(sys.exc_info()[0])
return False
def update_user_personal_records(self, user_id, records):
"""Create method for a user's personal record. These are the bests across all activities. Activity records are the bests for individual activities."""
if user_id is None:
self.log_error(MongoDatabase.update_user_personal_records.__name__ + ": Unexpected empty object: user_id")
return False
if records is None or len(records) == 0:
self.log_error(MongoDatabase.update_user_personal_records.__name__ + ": Unexpected empty object: records")
return False
try:
# Find the user's records collection.
user_id_str = str(user_id)
user_records = self.records_collection.find_one({ Keys.RECORDS_USER_ID: user_id_str })
# If the collection was found.
if user_records is not None:
user_records[Keys.PERSONAL_RECORDS_KEY] = records
self.records_collection.save(user_records)
return True
except:
self.log_error(traceback.format_exc())
self.log_error(sys.exc_info()[0])
return False
def delete_all_user_personal_records(self, user_id):
"""Delete method for a user's personal record. Deletes the entire personal record cache."""
if user_id is None:
self.log_error(MongoDatabase.delete_all_user_personal_records.__name__ + ": Unexpected empty object: user_id")
return False
try:
# Delete the user's records collection.
user_id_str = str(user_id)
deleted_result = self.records_collection.delete_one({ Keys.RECORDS_USER_ID: user_id_str })
if deleted_result is not None:
return True
except:
self.log_error(traceback.format_exc())
self.log_error(sys.exc_info()[0])
return False
#
# Activity bests management methods
#
def create_activity_bests(self, user_id, activity_id, activity_type, activity_time, bests):
"""Create method for a user's personal records for a given activity."""
if user_id is None:
self.log_error(MongoDatabase.create_activity_bests.__name__ + ": Unexpected empty object: user_id")
return False
if activity_id is None:
self.log_error(MongoDatabase.create_activity_bests.__name__ + ": Unexpected empty object: activity_id")
return False
if not InputChecker.is_uuid(activity_id):
self.log_error(MongoDatabase.create_activity_bests.__name__ + ": Invalid object: activity_id")
return False
if activity_type is None:
self.log_error(MongoDatabase.create_activity_bests.__name__ + ": Unexpected empty object: activity_type")
return False
if activity_time is None:
self.log_error(MongoDatabase.create_activity_bests.__name__ + ": Unexpected empty object: activity_time")
return False
if bests is None:
self.log_error(MongoDatabase.create_activity_bests.__name__ + ": Unexpected empty object: bests")
return False
try:
# Find the user's records collection.
user_records = self.records_collection.find_one({ Keys.RECORDS_USER_ID: user_id })
if user_records is not None:
bests[Keys.ACTIVITY_TYPE_KEY] = activity_type
bests[Keys.ACTIVITY_START_TIME_KEY] = activity_time
user_records[activity_id] = bests
self.records_collection.save(user_records)
return True
except:
self.log_error(traceback.format_exc())
self.log_error(sys.exc_info()[0])
return False
def retrieve_recent_activity_bests_for_user(self, user_id, cutoff_time):
"""Retrieve method for a user's activity records. Only activities more recent than the specified cutoff time will be returned."""
if user_id is None:
self.log_error(MongoDatabase.retrieve_recent_activity_bests_for_user.__name__ + ": Unexpected empty object: user_id")
return {}
try:
user_records = self.records_collection.find_one({ Keys.RECORDS_USER_ID: user_id })
if user_records is not None:
bests = {}
for activity_id in user_records:
if InputChecker.is_uuid(activity_id):
activity_bests = user_records[activity_id]
if (cutoff_time is None) or (activity_bests[Keys.ACTIVITY_START_TIME_KEY] > cutoff_time):
bests[activity_id] = activity_bests
return bests
except:
self.log_error(traceback.format_exc())
self.log_error(sys.exc_info()[0])
return {}
def retrieve_bounded_activity_bests_for_user(self, user_id, cutoff_time_lower, cutoff_time_higher):
"""Retrieve method for a user's activity records. Only activities more recent than the specified cutoff time will be returned."""
if user_id is None:
self.log_error(MongoDatabase.retrieve_bounded_activity_bests_for_user.__name__ + ": Unexpected empty object: user_id")
return {}
if cutoff_time_lower is None:
self.log_error(MongoDatabase.retrieve_bounded_activity_bests_for_user.__name__ + ": Unexpected empty object: cutoff_time_lower")
return {}
if cutoff_time_higher is None:
self.log_error(MongoDatabase.retrieve_bounded_activity_bests_for_user.__name__ + ": Unexpected empty object: cutoff_time_higher")
return {}
try:
user_records = self.records_collection.find_one({ Keys.RECORDS_USER_ID: user_id })
if user_records is not None:
bests = {}
for activity_id in user_records:
if InputChecker.is_uuid(activity_id):
activity_bests = user_records[activity_id]
activity_time = activity_bests[Keys.ACTIVITY_START_TIME_KEY]
if activity_time >= cutoff_time_lower and activity_time < cutoff_time_higher:
bests[activity_id] = activity_bests
return bests
except:
self.log_error(traceback.format_exc())
self.log_error(sys.exc_info()[0])
return {}
def delete_activity_best_for_user(self, user_id, activity_id):
"""Delete method for a user's personal records for a given activity."""
if user_id is None:
self.log_error(MongoDatabase.delete_activity_best_for_user.__name__ + ": Unexpected empty object: user_id")
return False
if activity_id is None:
self.log_error(MongoDatabase.delete_activity_best_for_user.__name__ + ": Unexpected empty object: activity_id")
return False
if not InputChecker.is_uuid(activity_id):
self.log_error(MongoDatabase.delete_activity_best_for_user.__name__ + ": Invalid object: activity_id")
return False
try:
user_records = self.records_collection.find_one({ Keys.RECORDS_USER_ID: user_id })
if user_records is not None:
user_records.pop(activity_id, None)
self.records_collection.save(user_records)
return True
except:
self.log_error(traceback.format_exc())
self.log_error(sys.exc_info()[0])
return False
#
# Activity management methods
#
@Perf.statistics
def retrieve_user_activity_list(self, user_id, start_time, end_time, return_all_data):
"""Retrieves the list of activities associated with the specified user."""
"""If return_all_data is False then only metadata is returned."""
if user_id is None:
self.log_error(MongoDatabase.retrieve_user_activity_list.__name__ + ": Unexpected empty object: user_id")
return []
try:
# Things we don't need.
if return_all_data:
exclude_keys = None
else:
exclude_keys = self.list_excluded_activity_keys_activity_lists()
if start_time is None or end_time is None:
return list(self.activities_collection.find({ "$and": [ { Keys.ACTIVITY_USER_ID_KEY: { '$eq': user_id } } ]}, exclude_keys))
return list(self.activities_collection.find({ "$and": [ { Keys.ACTIVITY_USER_ID_KEY: { '$eq': user_id }}, { Keys.ACTIVITY_START_TIME_KEY: { '$gt': start_time } }, { Keys.ACTIVITY_START_TIME_KEY: { '$lt': end_time } } ]}, exclude_keys))
except:
self.log_error(traceback.format_exc())
self.log_error(sys.exc_info()[0])
return []
@Perf.statistics
def retrieve_each_user_activity(self, context, user_id, callback_func, return_all_data):
"""Retrieves each user activity and calls the callback function for each one."""
"""Returns TRUE on success, FALSE if an error was encountered."""
"""If return_all_data is False then only metadata is returned."""
try:
# Things we don't need.
if return_all_data:
exclude_keys = None
else:
exclude_keys = self.list_excluded_activity_keys_activity_lists()
activities_cursor = self.activities_collection.find({ Keys.ACTIVITY_USER_ID_KEY: user_id }, exclude_keys)
if activities_cursor is not None:
while activities_cursor.alive:
activity = activities_cursor.next()
callback_func(context, activity, user_id)
return True
except:
self.log_error(traceback.format_exc())
self.log_error(sys.exc_info()[0])
return False
@Perf.statistics
def retrieve_devices_activity_list(self, devices, start_time, end_time, return_all_data):
"""Retrieves the list of activities associated with the specified devices."""
if devices is None:
self.log_error(MongoDatabase.retrieve_devices_activity_list.__name__ + ": Unexpected empty object: devices")
return []
try:
# Things we don't need.
if return_all_data:
exclude_keys = None
else:
exclude_keys = self.list_excluded_activity_keys_activity_lists()
# Build part of the exptression while sanity checking the input.
device_list = []
for device_str in devices:
if InputChecker.is_uuid(device_str):
device_list.append( { Keys.ACTIVITY_DEVICE_STR_KEY: {'$eq': device_str} } )
if start_time is None or end_time is None:
return list(self.activities_collection.find({ "$or": device_list }, exclude_keys))
return list(self.activities_collection.find({ "$and": [ { "$or": device_list }, { Keys.ACTIVITY_START_TIME_KEY: { '$gt': start_time } }, { Keys.ACTIVITY_START_TIME_KEY: { '$lt': end_time } } ] }, exclude_keys))
except:
self.log_error(traceback.format_exc())
self.log_error(sys.exc_info()[0])
return []
@Perf.statistics
def retrieve_each_device_activity(self, context, user_id, device_str, callback_func, return_all_data):
"""Retrieves each device activity and calls the callback function for each one."""
"""If return_all_data is False then only metadata is returned."""
if user_id is None:
self.log_error(MongoDatabase.retrieve_each_device_activity.__name__ + ": Unexpected empty object: device_str")
return None
if device_str is None:
self.log_error(MongoDatabase.retrieve_each_device_activity.__name__ + ": Unexpected empty object: device_str")
return None
if callback_func is None:
self.log_error(MongoDatabase.retrieve_each_device_activity.__name__ + ": Unexpected empty object: device_str")
return None
try:
# Things we don't need.
if return_all_data:
exclude_keys = None
else:
exclude_keys = self.list_excluded_activity_keys_activity_lists()
activities_cursor = self.activities_collection.find({ Keys.ACTIVITY_DEVICE_STR_KEY: device_str }, exclude_keys)
if activities_cursor is not None:
try:
while activities_cursor.alive:
activity = activities_cursor.next()
callback_func(context, activity, user_id)
except StopIteration:
pass
except:
self.log_error(traceback.format_exc())
self.log_error(sys.exc_info()[0])
return None
@Perf.statistics
def retrieve_most_recent_activity_for_device(self, device_str, return_all_data):
"""Retrieves the ID for the most recent activity to be associated with the specified device."""
if device_str is None:
self.log_error(MongoDatabase.retrieve_most_recent_activity_for_device.__name__ + ": Unexpected empty object: device_str")
return None
try:
# Things we don't need.
if return_all_data:
exclude_keys = None
else:
exclude_keys = self.list_excluded_activity_keys_activity_lists()
activity = self.activities_collection.find_one({ Keys.ACTIVITY_DEVICE_STR_KEY: device_str }, exclude_keys, sort=[( '_id', pymongo.DESCENDING )])
return activity
except:
self.log_error(traceback.format_exc())
self.log_error(sys.exc_info()[0])
return None
def create_activity(self, activity_id, activty_name, date_time, device_str):
"""Create method for an activity."""
if activity_id is None:
self.log_error(MongoDatabase.create_activity.__name__ + ": Unexpected empty object: activity_id")
return False
if not InputChecker.is_uuid(activity_id):
self.log_error(MongoDatabase.create_activity.__name__ + ": Invalid object: activity_id")
return False
if activty_name is None:
self.log_error(MongoDatabase.create_activity.__name__ + ": Unexpected empty object: activty_name")
return False
if date_time is None:
self.log_error(MongoDatabase.create_activity.__name__ + ": Unexpected empty object: date_time")
return False
if device_str is None:
self.log_error(MongoDatabase.create_activity.__name__ + ": Unexpected empty object: device_str")
return False
try:
# Make sure the activity name is a string.
activty_name = str(activty_name)
# Create the activity.
post = { Keys.ACTIVITY_ID_KEY: activity_id, Keys.ACTIVITY_NAME_KEY: activty_name, Keys.ACTIVITY_START_TIME_KEY: date_time, Keys.ACTIVITY_DEVICE_STR_KEY: device_str, Keys.ACTIVITY_VISIBILITY_KEY: "public", Keys.ACTIVITY_LOCATIONS_KEY: [] }
self.activities_collection.insert(post)
return True
except:
self.log_error(traceback.format_exc())
self.log_error(sys.exc_info()[0])
return False
@Perf.statistics
def retrieve_activity(self, activity_id):
"""Retrieve method for an activity, specified by the activity ID."""
if activity_id is None:
self.log_error(MongoDatabase.retrieve_activity.__name__ + ": Unexpected empty object: activity_id")
return None
if not InputChecker.is_uuid(activity_id):
self.log_error(MongoDatabase.retrieve_activity.__name__ + ": Invalid object: activity_id")
return None
try:
# Find the activity.
return self.activities_collection.find_one({ Keys.ACTIVITY_ID_KEY: re.compile(activity_id, re.IGNORECASE) })
except:
self.log_error(traceback.format_exc())
self.log_error(sys.exc_info()[0])
return None
@Perf.statistics
def retrieve_activity_small(self, activity_id):
"""Retrieve method for an activity, specified by the activity ID."""
if activity_id is None:
self.log_error(MongoDatabase.retrieve_activity_small.__name__ + ": Unexpected empty object: activity_id")
return None
if not InputChecker.is_uuid(activity_id):
self.log_error(MongoDatabase.retrieve_activity_small.__name__ + ": Invalid object: activity_id")
return None
try:
# Things we don't need.
exclude_keys = self.list_excluded_activity_keys_activity_lists()
# Find the activity.
return self.activities_collection.find_one({ Keys.ACTIVITY_ID_KEY: re.compile(activity_id, re.IGNORECASE) }, exclude_keys)
except:
self.log_error(traceback.format_exc())
self.log_error(sys.exc_info()[0])
return None
def update_activity(self, device_str, activity_id, locations, sensor_readings_dict, metadata_list_dict):
"""Updates locations, sensor readings, and metadata associated with a moving activity. Provided as a performance improvement over making several database updates."""
if device_str is None:
self.log_error(MongoDatabase.update_activity.__name__ + ": Unexpected empty object: device_str")
return False
if activity_id is None:
self.log_error(MongoDatabase.update_activity.__name__ + ": Unexpected empty object: activity_id")
return False
if not InputChecker.is_uuid(activity_id):
self.log_error(MongoDatabase.update_activity.__name__ + ": Invalid object: activity_id")
return False
if not locations:
self.log_error(MongoDatabase.update_activity.__name__ + ": Unexpected empty object: locations")
return False
try:
# Find the activity.
activity = self.activities_collection.find_one({ Keys.ACTIVITY_ID_KEY: activity_id, Keys.ACTIVITY_DEVICE_STR_KEY: device_str })
# If the activity was not found then create it.
if activity is None:
first_location = locations[0]
if self.create_activity(activity_id, "", first_location[0] / 1000, device_str):
activity = self.activities_collection.find_one({ Keys.ACTIVITY_ID_KEY: activity_id, Keys.ACTIVITY_DEVICE_STR_KEY: device_str })
# If the activity was found.
if activity is not None:
# Update the locations. Location data is an array, the order is defined in Api.parse_json_loc_obj.
if len(locations) > 0:
# Find any existing location data.
old_locations = []
if Keys.ACTIVITY_LOCATIONS_KEY in activity:
old_locations = activity[Keys.ACTIVITY_LOCATIONS_KEY]
# Append the new locations.
for location in locations:
value = { Keys.LOCATION_TIME_KEY: location[0], Keys.LOCATION_LAT_KEY: location[1], Keys.LOCATION_LON_KEY: location[2], Keys.LOCATION_ALT_KEY: location[3], Keys.LOCATION_HORIZONTAL_ACCURACY_KEY: location[4], Keys.LOCATION_VERTICAL_ACCURACY_KEY: location[5] }
old_locations.append(value)
# Make sure everything is in the right order, no guarantee we got the updates in the correct order.
old_locations.sort(key=retrieve_time_from_location)
# Update the database.
activity[Keys.ACTIVITY_LOCATIONS_KEY] = old_locations
# Update the sensor readings.
if sensor_readings_dict:
for sensor_type in sensor_readings_dict:
# Existing sensor values.
old_value_list = []
if sensor_type in activity:
old_value_list = activity[sensor_type]
# Append new values.
for value in sensor_readings_dict[sensor_type]:
time_value_pair = { str(value[0]): float(value[1]) }
old_value_list.append(time_value_pair)
# Sort and update.
old_value_list.sort(key=retrieve_time_from_time_value_pair)
activity[sensor_type] = old_value_list
# Update the metadata readings.
if metadata_list_dict:
for metadata_type in metadata_list_dict:
# Existing metadata values.
old_value_list = []
if metadata_type in activity:
old_value_list = activity[metadata_type]
# Append new values.
for value in metadata_list_dict[metadata_type]:
time_value_pair = { str(value[0]): float(value[1]) }
old_value_list.append(time_value_pair)
# Sort and update.
old_value_list.sort(key=retrieve_time_from_time_value_pair)
activity[metadata_type] = old_value_list
# Write out the changes.
self.activities_collection.save(activity)
return True
except:
self.log_error(traceback.format_exc())
self.log_error(sys.exc_info()[0])
return False
def delete_activity(self, activity_id):
"""Delete method for an activity, specified by the activity ID."""
if activity_id is None:
self.log_error(MongoDatabase.delete_activity.__name__ + ": Unexpected empty object: activity_id")
return False
if not InputChecker.is_uuid(activity_id):
self.log_error(MongoDatabase.delete_activity.__name__ + ": Invalid object: activity_id")
return False
try:
deleted_result = self.activities_collection.delete_one({ Keys.ACTIVITY_ID_KEY: activity_id })
if deleted_result is not None:
return True
except:
self.log_error(traceback.format_exc())
self.log_error(sys.exc_info()[0])
return False
def activity_exists(self, activity_id):
"""Determines whether or not there is a document corresonding to the activity ID."""
if activity_id is None:
self.log_error(MongoDatabase.activity_exists.__name__ + ": Unexpected empty object: activity_id")
return False
if not InputChecker.is_uuid(activity_id):
self.log_error(MongoDatabase.activity_exists.__name__ + ": Invalid object: activity_id")
return False
try:
return self.activities_collection.count_documents({ Keys.ACTIVITY_ID_KEY: activity_id }, limit = 1) != 0
except:
self.log_error(traceback.format_exc())
self.log_error(sys.exc_info()[0])
return False
def retrieve_activity_visibility(self, activity_id):
"""Returns the visibility setting for the specified activity."""
if activity_id is None:
self.log_error(MongoDatabase.retrieve_activity_visibility.__name__ + ": Unexpected empty object: activity_id")
return None
if not InputChecker.is_uuid(activity_id):
self.log_error(MongoDatabase.retrieve_activity_visibility.__name__ + ": Invalid object: activity_id")
return None
try:
# Find the activity.
activity = self.activities_collection.find_one({ Keys.ACTIVITY_ID_KEY: activity_id })
# If the activity was found and it has a visibility setting.
if activity is not None and Keys.ACTIVITY_VISIBILITY_KEY in activity:
visibility = activity[Keys.ACTIVITY_VISIBILITY_KEY]
return visibility
except:
self.log_error(traceback.format_exc())
self.log_error(sys.exc_info()[0])
return None
def update_activity_visibility(self, activity_id, visibility):
"""Changes the visibility setting for the specified activity."""
if activity_id is None:
self.log_error(MongoDatabase.update_activity_visibility.__name__ + ": Unexpected empty object: activity_id")
return False
if not InputChecker.is_uuid(activity_id):
self.log_error(MongoDatabase.update_activity_visibility.__name__ + ": Invalid object: activity_id")
return False
if visibility is None:
self.log_error(MongoDatabase.update_activity_visibility.__name__ + ": Unexpected empty object: visibility")
return False
try:
# Find the activity.
activity = self.activities_collection.find_one({ Keys.ACTIVITY_ID_KEY: activity_id })
# If the activity was found.
if activity is not None:
activity[Keys.ACTIVITY_VISIBILITY_KEY] = visibility
self.activities_collection.save(activity)
return True
except:
self.log_error(traceback.format_exc())
self.log_error(sys.exc_info()[0])
return False
def create_activity_locations(self, device_str, activity_id, locations):
"""Adds several locations to the database. 'locations' is an array of arrays in the form [time, lat, lon, alt]."""
if device_str is None:
self.log_error(MongoDatabase.create_activity_locations.__name__ + ": Unexpected empty object: device_str")
return False
if activity_id is None:
self.log_error(MongoDatabase.create_activity_locations.__name__ + ": Unexpected empty object: activity_id")
return False
if not InputChecker.is_uuid(activity_id):
self.log_error(MongoDatabase.create_activity_locations.__name__ + ": Invalid object: activity_id")
return False
if not locations:
self.log_error(MongoDatabase.create_activity_locations.__name__ + ": Unexpected empty object: locations")
return False
try:
# Find the activity.
activity = self.activities_collection.find_one({ Keys.ACTIVITY_ID_KEY: activity_id, Keys.ACTIVITY_DEVICE_STR_KEY: device_str })
# If the activity was not found then create it.
if activity is None:
first_location = locations[0]
if self.create_activity(activity_id, "", first_location[0] / 1000, device_str):
activity = self.activities_collection.find_one({ Keys.ACTIVITY_ID_KEY: activity_id, Keys.ACTIVITY_DEVICE_STR_KEY: device_str })
# If the activity was found.
if activity is not None:
location_list = []
# Get the existing list.
if Keys.ACTIVITY_LOCATIONS_KEY in activity:
location_list = activity[Keys.ACTIVITY_LOCATIONS_KEY]
# Append the new locations.
for location in locations:
value = { Keys.LOCATION_TIME_KEY: location[0], Keys.LOCATION_LAT_KEY: location[1], Keys.LOCATION_LON_KEY: location[2], Keys.LOCATION_ALT_KEY: location[3] }
location_list.append(value)
# Make sure everything is in order.
location_list.sort(key=retrieve_time_from_location)
# Save the changes.
activity[Keys.ACTIVITY_LOCATIONS_KEY] = location_list
self.activities_collection.save(activity)
return True
except:
self.log_error(traceback.format_exc())
self.log_error(sys.exc_info()[0])
return False
def retrieve_activity_locations(self, activity_id):
"""Returns all the locations for the specified activity."""
if activity_id is None:
self.log_error(MongoDatabase.retrieve_activity_locations.__name__ + ": Unexpected empty object: activity_id")
return None
if not InputChecker.is_uuid(activity_id):
self.log_error(MongoDatabase.retrieve_activity_locations.__name__ + ": Invalid object: activity_id")
return None
try:
# Find the activity.
activity = self.activities_collection.find_one({ Keys.ACTIVITY_ID_KEY: activity_id })
# If the activity was found and it has location data.
if activity is not None and Keys.ACTIVITY_LOCATIONS_KEY in activity:
locations = activity[Keys.ACTIVITY_LOCATIONS_KEY]
locations.sort(key=retrieve_time_from_location)
return locations
except:
self.log_error(traceback.format_exc())
self.log_error(sys.exc_info()[0])
return None
def create_activity_sensor_reading(self, activity_id, date_time, sensor_type, value):
"""Create method for a piece of sensor data, such as a heart rate or power meter reading."""
if activity_id is None:
self.log_error(MongoDatabase.create_activity_sensor_reading.__name__ + ": Unexpected empty object: activity_id")
return False
if not InputChecker.is_uuid(activity_id):
self.log_error(MongoDatabase.create_activity_sensor_reading.__name__ + ": Invalid object: activity_id")
return False
if date_time is None:
self.log_error(MongoDatabase.create_activity_sensor_reading.__name__ + ": Unexpected empty object: date_time")
return False
if sensor_type is None:
self.log_error(MongoDatabase.create_activity_sensor_reading.__name__ + ": Unexpected empty object: sensor_type")
return False
if value is None:
self.log_error(MongoDatabase.create_activity_sensor_reading.__name__ + ": Unexpected empty object: value")
return False
try:
# Find the activity.
activity = self.activities_collection.find_one({ Keys.ACTIVITY_ID_KEY: activity_id })
# If the activity was found.
if activity is not None:
value_list = []
# Get the existing list.
if sensor_type in activity:
value_list = activity[sensor_type]
time_value_pair = { str(date_time): float(value) }
value_list.append(time_value_pair)
value_list.sort(key=retrieve_time_from_time_value_pair)
# Save the changes.
activity[sensor_type] = value_list
self.activities_collection.save(activity)
return True
except:
self.log_error(traceback.format_exc())
self.log_error(sys.exc_info()[0])
return False
def create_activity_sensor_readings(self, activity_id, sensor_type, values):
"""Create method for several pieces of sensor data, such as a heart rate or power meter reading."""
if activity_id is None:
self.log_error(MongoDatabase.create_activity_sensor_readings.__name__ + ": Unexpected empty object: activity_id")
return False
if not InputChecker.is_uuid(activity_id):
self.log_error(MongoDatabase.create_activity_sensor_readings.__name__ + ": Invalid object: activity_id")
return False
if sensor_type is None:
self.log_error(MongoDatabase.create_activity_sensor_readings.__name__ + ": Unexpected empty object: sensor_type")
return False
if values is None:
self.log_error(MongoDatabase.create_activity_sensor_readings.__name__ + ": Unexpected empty object: values")
return False
try:
# Find the activity.
activity = self.activities_collection.find_one({ Keys.ACTIVITY_ID_KEY: activity_id })
# If the activity was found.
if activity is not None:
value_list = []
# Get the existing list.
if sensor_type in activity:
value_list = activity[sensor_type]
for value in values:
time_value_pair = { str(value[0]): float(value[1]) }
value_list.append(time_value_pair)
value_list.sort(key=retrieve_time_from_time_value_pair)
# Save the changes.
activity[sensor_type] = value_list
self.activities_collection.save(activity)
return True
except:
self.log_error(traceback.format_exc())
self.log_error(sys.exc_info()[0])
return False
def retrieve_activity_sensor_readings(self, sensor_type, activity_id):
"""Returns all the sensor data for the specified sensor for the given activity."""
if sensor_type is None:
self.log_error(MongoDatabase.retrieve_activity_sensor_readings.__name__ + ": Unexpected empty object: sensor_type")
return None
if activity_id is None:
self.log_error(MongoDatabase.retrieve_activity_sensor_readings.__name__ + ": Unexpected empty object: activity_id")
return None
if not InputChecker.is_uuid(activity_id):
self.log_error(MongoDatabase.retrieve_activity_sensor_readings.__name__ + ": Invalid object: activity_id")
return None
try:
# Find the activity.
activity = self.activities_collection.find_one({ Keys.ACTIVITY_ID_KEY: activity_id })
# If the activity was found and if it has data for the specified sensor type.
if activity is not None and sensor_type in activity:
sensor_data = activity[sensor_type]
sensor_data.sort(key=retrieve_time_from_time_value_pair)
return sensor_data
except:
self.log_error(traceback.format_exc())
self.log_error(sys.exc_info()[0])
return None
def create_activity_event(self, activity_id, event):
"""Inherited from ActivityWriter. 'events' is an array of dictionaries in which each dictionary describes an event."""
if activity_id is None:
self.log_error(MongoDatabase.create_activity_event.__name__ + ": Unexpected empty object: activity_id")
return None
if not InputChecker.is_uuid(activity_id):
self.log_error(MongoDatabase.create_activity_event.__name__ + ": Invalid object: activity_id")
return None
if event is None:
self.log_error(MongoDatabase.create_activity_event.__name__ + ": Unexpected empty object: event")
return None
try:
# Find the activity.
activity = self.activities_collection.find_one({ Keys.ACTIVITY_ID_KEY: activity_id })
# If the activity was found and if it has data for the specified sensor type.
if activity is not None:
events_list = []
# Get the existing list.
if Keys.APP_EVENTS_KEY in activity:
events_list = activity[Keys.APP_EVENTS_KEY]
# Update the list.
events_list.append(event)
# Save the changes.
activity[Keys.APP_EVENTS_KEY] = events_list
self.activities_collection.save(activity)
return True
except:
self.log_error(traceback.format_exc())
self.log_error(sys.exc_info()[0])
return None
def create_activity_events(self, activity_id, events):
"""Inherited from ActivityWriter. 'events' is an array of dictionaries in which each dictionary describes an event."""
if activity_id is None:
self.log_error(MongoDatabase.create_activity_events.__name__ + ": Unexpected empty object: activity_id")
return None
if not InputChecker.is_uuid(activity_id):
self.log_error(MongoDatabase.create_activity_events.__name__ + ": Invalid object: activity_id")
return None
if events is None:
self.log_error(MongoDatabase.create_activity_events.__name__ + ": Unexpected empty object: events")
return None
try:
# Find the activity.
activity = self.activities_collection.find_one({ Keys.ACTIVITY_ID_KEY: activity_id })
# If the activity was found and if it has data for the specified sensor type.
if activity is not None:
events_list = []
# Get the existing list.
if Keys.APP_EVENTS_KEY in activity:
events_list = activity[Keys.APP_EVENTS_KEY]
# Update the list.
events_list.extend(events)
# Save the changes.
activity[Keys.APP_EVENTS_KEY] = events_list
self.activities_collection.save(activity)
return True
except:
self.log_error(traceback.format_exc())
self.log_error(sys.exc_info()[0])
return None
def create_activity_metadata(self, activity_id, date_time, key, value, create_list):
"""Create method for a piece of metaadata. When dealing with a list, will append values."""
if activity_id is None:
self.log_error(MongoDatabase.create_activity_metadata.__name__ + ": Unexpected empty object: activity_id")
return False
if not InputChecker.is_uuid(activity_id):
self.log_error(MongoDatabase.create_activity_metadata.__name__ + ": Invalid object: activity_id")
return False
if date_time is None:
self.log_error(MongoDatabase.create_activity_metadata.__name__ + ": Unexpected empty object: date_time")
return False
if key is None:
self.log_error(MongoDatabase.create_activity_metadata.__name__ + ": Unexpected empty object: key")
return False
if value is None:
self.log_error(MongoDatabase.create_activity_metadata.__name__ + ": Unexpected empty object: value")
return False
if create_list is None:
self.log_error(MongoDatabase.create_activity_metadata.__name__ + ": Unexpected empty object: create_list")
return False
try:
# Find the activity.
activity = self.activities_collection.find_one({ Keys.ACTIVITY_ID_KEY: activity_id })
# If the activity was found.
if activity is not None:
# Make sure we're working with a number, if the value is supposed to be a number.
try:
if not key in [ Keys.ACTIVITY_NAME_KEY, Keys.ACTIVITY_TYPE_KEY, Keys.ACTIVITY_DESCRIPTION_KEY ]:
value = float(value)
except ValueError:
pass
# The metadata is a list.
if create_list is True:
value_list = []
if key in activity:
value_list = activity[key]
time_value_pair = { str(date_time): value }
value_list.append(time_value_pair)
value_list.sort(key=retrieve_time_from_time_value_pair)
activity[key] = value_list
self.activities_collection.save(activity)
return True
# The metadata is a scalar, just make sure to only update it if it has actually changed or was previously non-existent.
elif key not in activity or activity[key] != value:
activity[key] = value
self.activities_collection.save(activity)
return True
# It's ok if the value isn't being updated.
elif activity[key] == value:
return True
except:
self.log_error(traceback.format_exc())
self.log_error(sys.exc_info()[0])
return False
def create_activity_metadata_list(self, activity_id, key, values):
"""Create method for a list of metaadata values. Will overwrite existing data."""
if activity_id is None:
self.log_error(MongoDatabase.create_activity_metadata_list.__name__ + ": Unexpected empty object: activity_id")
return False
if not InputChecker.is_uuid(activity_id):
self.log_error(MongoDatabase.create_activity_metadata_list.__name__ + ": Invalid object: activity_id")
return False
if key is None:
self.log_error(MongoDatabase.create_activity_metadata_list.__name__ + ": Unexpected empty object: key")
return False
if values is None:
self.log_error(MongoDatabase.create_activity_metadata_list.__name__ + ": Unexpected empty object: values")
return False
try:
# Find the activity.
activity = self.activities_collection.find_one({ Keys.ACTIVITY_ID_KEY: activity_id })
# If the activity was found.
if activity is not None:
value_list = []
for value in values:
time_value_pair = { str(value[0]): float(value[1]) }
value_list.append(time_value_pair)
value_list.sort(key=retrieve_time_from_time_value_pair)
activity[key] = value_list
self.activities_collection.save(activity)
return True
except:
self.log_error(traceback.format_exc())
self.log_error(sys.exc_info()[0])
return False
def create_activity_sets_and_reps_data(self, activity_id, sets):
"""Create method for a list of of metaadata values."""
if activity_id is None:
self.log_error(MongoDatabase.create_activity_sets_and_reps_data.__name__ + ": Unexpected empty object: activity_id")
return False
if not InputChecker.is_uuid(activity_id):
self.log_error(MongoDatabase.create_activity_sets_and_reps_data.__name__ + ": Invalid object: activity_id")
return False
if sets is None:
self.log_error(MongoDatabase.create_activity_sets_and_reps_data.__name__ + ": Unexpected empty object: sets")
return False
try:
# Find the activity.
activity = self.activities_collection.find_one({ Keys.ACTIVITY_ID_KEY: activity_id })
# If the activity was found.
if activity is not None:
activity[Keys.APP_SETS_KEY] = sets
self.activities_collection.save(activity)
return True
except:
self.log_error(traceback.format_exc())
self.log_error(sys.exc_info()[0])
return False
def create_activity_accelerometer_reading(self, device_str, activity_id, accels):
"""Adds several accelerometer readings to the database. 'accels' is an array of arrays in the form [time, x, y, z]."""
if device_str is None:
self.log_error(MongoDatabase.create_activity_accelerometer_reading.__name__ + ": Unexpected empty object: device_str")
return False
if activity_id is None:
self.log_error(MongoDatabase.create_activity_accelerometer_reading.__name__ + ": Unexpected empty object: activity_id")
return False
if not InputChecker.is_uuid(activity_id):
self.log_error(MongoDatabase.create_activity_accelerometer_reading.__name__ + ": Invalid object: activity_id")
return False
if not accels:
self.log_error(MongoDatabase.create_activity_accelerometer_reading.__name__ + ": Unexpected empty object: accels")
return False
try:
# Find the activity.
activity = self.activities_collection.find_one({ Keys.ACTIVITY_ID_KEY: activity_id, Keys.ACTIVITY_DEVICE_STR_KEY: device_str })
# If the activity was not found then create it.
if activity is None:
first_accel = accels[0]
if self.create_activity(activity_id, "", first_accel[0] / 1000, device_str):
activity = self.activities_collection.find_one({ Keys.ACTIVITY_ID_KEY: activity_id, Keys.ACTIVITY_DEVICE_STR_KEY: device_str })
# If the activity was found.
if activity is not None:
accel_list = []
# Get the existing list.
if Keys.APP_ACCELEROMETER_KEY in activity:
accel_list = activity[Keys.APP_ACCELEROMETER_KEY]
for accel in accels:
# Make sure time values are monotonically increasing.
if accel_list and int(accel_list[-1][Keys.ACCELEROMETER_TIME_KEY]) > accel[0]:
self.log_error(MongoDatabase.create_activity_accelerometer_reading.__name__ + ": Received out-of-order time value.")
else:
value = { Keys.ACCELEROMETER_TIME_KEY: accel[0], Keys.ACCELEROMETER_AXIS_NAME_X: accel[1], Keys.ACCELEROMETER_AXIS_NAME_Y: accel[2], Keys.ACCELEROMETER_AXIS_NAME_Z: accel[3] }
accel_list.append(value)
activity[Keys.APP_ACCELEROMETER_KEY] = accel_list
self.activities_collection.save(activity)
return True
except:
self.log_error(traceback.format_exc())
self.log_error(sys.exc_info()[0])
return False
def create_activity_summary(self, activity_id, summary_data):
"""Create method for activity summary data. Summary data is data computed from the raw data."""
if activity_id is None:
self.log_error(MongoDatabase.create_activity_summary.__name__ + ": Unexpected empty object: activity_id")
return False
if not InputChecker.is_uuid(activity_id):
self.log_error(MongoDatabase.create_activity_summary.__name__ + ": Invalid object: activity_id")
return False
if summary_data is None:
self.log_error(MongoDatabase.create_activity_summary.__name__ + ": Unexpected empty object: summary_data")
return False
try:
# Find the activity.
activity = self.activities_collection.find_one({ Keys.ACTIVITY_ID_KEY: activity_id })
# If the activity was found.
if activity is not None:
activity[Keys.ACTIVITY_SUMMARY_KEY] = summary_data
self.activities_collection.save(activity)
return True
except:
self.log_error(traceback.format_exc())
self.log_error(sys.exc_info()[0])
return False
def retrieve_activity_summary(self, activity_id):
"""Returns the activity summary data. Summary data is data computed from the raw data."""
if activity_id is None:
self.log_error(MongoDatabase.retrieve_activity_summary.__name__ + ": Unexpected empty object: activity_id")
return None
if not InputChecker.is_uuid(activity_id):
self.log_error(MongoDatabase.retrieve_activity_summary.__name__ + ": Invalid object: activity_id")
return None
try:
# Find the activity.
activity = self.activities_collection.find_one({ Keys.ACTIVITY_ID_KEY: activity_id })
# If the activity was found.
if activity is not None and Keys.ACTIVITY_SUMMARY_KEY in activity:
return activity[Keys.ACTIVITY_SUMMARY_KEY]
except:
self.log_error(traceback.format_exc())
self.log_error(sys.exc_info()[0])
return None
def delete_activity_summary(self, activity_id):
"""Delete method for activity summary data. Summary data is data computed from the raw data."""
if activity_id is None:
self.log_error(MongoDatabase.delete_activity_summary.__name__ + ": Unexpected empty object: activity_id")
return False
if not InputChecker.is_uuid(activity_id):
self.log_error(MongoDatabase.delete_activity_summary.__name__ + ": Invalid object: activity_id")
return False
try:
# Find the activity.
activity = self.activities_collection.find_one({ Keys.ACTIVITY_ID_KEY: activity_id })
# If the activity was found.
if activity is not None and Keys.ACTIVITY_SUMMARY_KEY in activity:
# Currently left out for performance reasons.
#activity[Keys.ACTIVITY_SUMMARY_KEY] = {}
#self.activities_collection.save(activity)
return True
except:
self.log_error(traceback.format_exc())
self.log_error(sys.exc_info()[0])
return False
#
# Tag management methods
#
def create_tag(self, activity_id, tag):
"""Adds a tag to the specified activity."""
if activity_id is None:
self.log_error(MongoDatabase.create_tag.__name__ + ": Unexpected empty object: activity_id")
return False
if not InputChecker.is_uuid(activity_id):
self.log_error(MongoDatabase.create_tag.__name__ + ": Invalid object: activity_id")
return False
if tag is None:
self.log_error(MongoDatabase.create_tag.__name__ + ": Unexpected empty object: tag")
return False
try:
# Find the activity.
activity = self.activities_collection.find_one({ Keys.ACTIVITY_ID_KEY: activity_id })
# If the activity was found.
if activity is not None:
data = []
if Keys.ACTIVITY_TAGS_KEY in activity:
data = activity[Keys.ACTIVITY_TAGS_KEY]
data.append(tag)
activity[Keys.ACTIVITY_TAGS_KEY] = data
self.activities_collection.save(activity)
return True
except:
self.log_error(traceback.format_exc())
self.log_error(sys.exc_info()[0])
return False
def create_tags_on_activity(self, activity, tags):
"""Adds a tag to the specified activity."""
if activity is None:
self.log_error(MongoDatabase.create_tags_on_activity.__name__ + ": Unexpected empty object: activity")
return False
if tags is None:
self.log_error(MongoDatabase.create_tags_on_activity.__name__ + ": Unexpected empty object: tags")
return False
try:
activity[Keys.ACTIVITY_TAGS_KEY] = tags
self.activities_collection.save(activity)
return True
except:
self.log_error(traceback.format_exc())
self.log_error(sys.exc_info()[0])
return False
def create_tags_on_activity_by_id(self, activity_id, tags):
"""Adds a tag to the specified activity."""
if activity_id is None:
self.log_error(MongoDatabase.create_tags_on_activity_by_id.__name__ + ": Unexpected empty object: activity_id")
return False
if not InputChecker.is_uuid(activity_id):
self.log_error(MongoDatabase.create_tags_on_activity_by_id.__name__ + ": Invalid object: activity_id")
return False
if tags is None:
self.log_error(MongoDatabase.create_tags_on_activity_by_id.__name__ + ": Unexpected empty object: tags")
return False
try:
# Find the activity.
activity = self.activities_collection.find_one({ Keys.ACTIVITY_ID_KEY: activity_id })
# If the activity was found then add the tags.
if activity is not None:
activity[Keys.ACTIVITY_TAGS_KEY] = tags
self.activities_collection.save(activity)
return True
except:
self.log_error(traceback.format_exc())
self.log_error(sys.exc_info()[0])
return False
def retrieve_tags(self, activity_id):
"""Retrieves all the tags for the specified activity."""
if activity_id is None:
self.log_error(MongoDatabase.retrieve_tags.__name__ + ": Unexpected empty object: activity_id")
return []
if not InputChecker.is_uuid(activity_id):
self.log_error(MongoDatabase.retrieve_tags.__name__ + ": Invalid object: activity_id")
return []
try:
# Find the activity.
activity = self.activities_collection.find_one({ Keys.ACTIVITY_ID_KEY: activity_id })
# If the activity was found and contains tags.
if activity is not None and Keys.ACTIVITY_TAGS_KEY in activity:
return activity[Keys.ACTIVITY_TAGS_KEY]
except:
self.log_error(traceback.format_exc())
self.log_error(sys.exc_info()[0])
return []
def delete_tag(self, activity_id, tag):
"""Deletes the specified tag from the activity with the given ID."""
if activity_id is None:
self.log_error(MongoDatabase.create_tag.__name__ + ": Unexpected empty object: activity_id")
return False
if not InputChecker.is_uuid(activity_id):
self.log_error(MongoDatabase.create_tag.__name__ + ": Invalid object: activity_id")
return False
if tag is None:
self.log_error(MongoDatabase.create_tag.__name__ + ": Unexpected empty object: tag")
return False
try:
# Find the activity.
activity = self.activities_collection.find_one({ Keys.ACTIVITY_ID_KEY: activity_id })
# If the activity was found.
if activity is not None and Keys.ACTIVITY_TAGS_KEY in activity:
data = activity[Keys.ACTIVITY_TAGS_KEY]
data.remove(tag)
activity[Keys.ACTIVITY_TAGS_KEY] = data
self.activities_collection.save(activity)
return True
except:
self.log_error(traceback.format_exc())
self.log_error(sys.exc_info()[0])
return False
#
# Activity comment management methods
#
def create_activity_comment(self, activity_id, commenter_id, comment):
"""Create method for a comment on an activity."""
if activity_id is None:
self.log_error(MongoDatabase.create_activity_comment.__name__ + ": Unexpected empty object: activity_id")
return False
if not InputChecker.is_uuid(activity_id):
self.log_error(MongoDatabase.create_activity_comment.__name__ + ": Invalid object: activity_id")
return False
if commenter_id is None:
self.log_error(MongoDatabase.create_activity_comment.__name__ + ": Unexpected empty object: commenter_id")
return False
if comment is None:
self.log_error(MongoDatabase.create_activity_comment.__name__ + ": Unexpected empty object: comment")
return False
try:
# Find the activity.
activity = self.activities_collection.find_one({ Keys.ACTIVITY_ID_KEY: activity_id })
# If the activity was found.
if activity is not None:
data = []
if Keys.ACTIVITY_COMMENTS_KEY in activity:
data = activity[Keys.ACTIVITY_COMMENTS_KEY]
entry_dict = { Keys.ACTIVITY_COMMENTER_ID_KEY: commenter_id, Keys.ACTIVITY_COMMENT_KEY: comment }
entry_str = json.dumps(entry_dict)
data.append(entry_str)
activity[Keys.ACTIVITY_COMMENTS_KEY] = data
self.activities_collection.save(activity)
return True
except:
self.log_error(traceback.format_exc())
self.log_error(sys.exc_info()[0])
return False
def retrieve_activity_comments(self, activity_id):
"""Returns a list containing all of the comments on the specified activity."""
if activity_id is None:
self.log_error(MongoDatabase.retrieve_activity_comments.__name__ + ": Unexpected empty object: activity_id")
return None
if not InputChecker.is_uuid(activity_id):
self.log_error(MongoDatabase.retrieve_activity_comments.__name__ + ": Invalid object: activity_id")
return False
try:
# Find the activity.
activity = self.activities_collection.find_one({ Keys.ACTIVITY_ID_KEY: activity_id })
# If the activity was found and contains comments.
if activity is not None and Keys.ACTIVITY_COMMENTS_KEY in activity:
return activity[Keys.ACTIVITY_COMMENTS_KEY]
except:
self.log_error(traceback.format_exc())
self.log_error(sys.exc_info()[0])
return None
#
# Activity photo management methods
#
def create_activity_photo(self, user_id, activity_id, photo_hash):
"""Create method for an activity photo."""
if user_id is None:
self.log_error(MongoDatabase.create_activity_photo.__name__ + ": Unexpected empty object: user_id")
return False
if activity_id is None:
self.log_error(MongoDatabase.create_activity_photo.__name__ + ": Unexpected empty object: activity_id")
return False
if not InputChecker.is_uuid(activity_id):
self.log_error(MongoDatabase.create_activity_photo.__name__ + ": Invalid object: activity_id")
return False
if photo_hash is None:
self.log_error(MongoDatabase.create_activity_photo.__name__ + ": Unexpected empty object: photo_hash")
return False
try:
# Find the activity.
activity = self.activities_collection.find_one({ Keys.ACTIVITY_ID_KEY: activity_id })
# If the activity was found.
if activity is not None:
# Append the hash of the photo to the photos list.
photos = []
if Keys.ACTIVITY_PHOTOS_KEY in activity:
photos = activity[Keys.ACTIVITY_PHOTOS_KEY]
photos.append(photo_hash)
# Remove duplicates.
photos = list(dict.fromkeys(photos))
# Save the updated activity.
activity[Keys.ACTIVITY_PHOTOS_KEY] = photos
self.activities_collection.save(activity)
return True
except:
self.log_error(traceback.format_exc())
self.log_error(sys.exc_info()[0])
return False
def list_activity_photos(self, activity_id):
"""Lists all photos associated with an activity. Response is a list of identifiers."""
if activity_id is None:
self.log_error(MongoDatabase.list_activity_photos.__name__ + ": Unexpected empty object: activity_id")
return []
if not InputChecker.is_uuid(activity_id):
self.log_error(MongoDatabase.list_activity_photos.__name__ + ": Invalid object: activity_id")
return []
try:
# Things we don't need.
exclude_keys = self.list_excluded_activity_keys_activity_lists()
# Find the activity.
activity = self.activities_collection.find_one({ Keys.ACTIVITY_ID_KEY: activity_id }, exclude_keys)
# If the activity was found and contains comments.
if activity is not None and Keys.ACTIVITY_PHOTOS_KEY in activity:
return activity[Keys.ACTIVITY_PHOTOS_KEY]
except:
self.log_error(traceback.format_exc())
self.log_error(sys.exc_info()[0])
return []
def delete_activity_photo(self, activity_id, photo_id):
"""Deletes the specified tag from the activity with the given ID."""
if activity_id is None:
self.log_error(MongoDatabase.delete_activity_photo.__name__ + ": Unexpected empty object: activity_id")
return False
if not InputChecker.is_uuid(activity_id):
self.log_error(MongoDatabase.delete_activity_photo.__name__ + ": Invalid object: activity_id")
return False
if photo_id is None:
self.log_error(MongoDatabase.delete_activity_photo.__name__ + ": Unexpected empty object: photo_id")
return False
try:
# Find the activity.
activity = self.activities_collection.find_one({ Keys.ACTIVITY_ID_KEY: activity_id })
# If the activity was found.
if activity is not None and Keys.ACTIVITY_PHOTOS_KEY in activity:
photos = activity[Keys.ACTIVITY_PHOTOS_KEY]
photos.remove(photo_id)
activity[Keys.ACTIVITY_PHOTOS_KEY] = photos
self.activities_collection.save(activity)
return True
except:
self.log_error(traceback.format_exc())
self.log_error(sys.exc_info()[0])
return False
#
# Planned workout management methods
#
def create_workout(self, user_id, workout_obj):
"""Create method for a workout."""
if user_id is None:
self.log_error(MongoDatabase.create_workout.__name__ + ": Unexpected empty object: user_id")
return False
if workout_obj is None:
self.log_error(MongoDatabase.create_workout.__name__ + ": Unexpected empty object: workout_obj")
return False
try:
# Find the user's workouts document.
workouts_doc = self.workouts_collection.find_one({ Keys.WORKOUT_PLAN_USER_ID_KEY: user_id })
# If the workouts document was not found then create it.
if workouts_doc is None:
post = {}
post[Keys.WORKOUT_PLAN_USER_ID_KEY] = user_id
post[Keys.WORKOUT_PLAN_CALENDAR_ID_KEY] = str(uuid.uuid4()) # Create a calendar ID
post[Keys.WORKOUT_LIST_KEY] = []
self.workouts_collection.insert(post)
workouts_doc = self.workouts_collection.find_one({ Keys.WORKOUT_PLAN_USER_ID_KEY: user_id })
# If the workouts document was found (or created).
if workouts_doc is not None and Keys.WORKOUT_LIST_KEY in workouts_doc:
# Make sure we have a calendar ID.
if Keys.WORKOUT_PLAN_CALENDAR_ID_KEY not in workouts_doc:
workouts_doc[Keys.WORKOUT_PLAN_CALENDAR_ID_KEY] = str(uuid.uuid4()) # Create a calendar ID
# Make sure this workout isn't already in the list.
last_scheduled_workout = 0
workouts_list = workouts_doc[Keys.WORKOUT_LIST_KEY]
for workout in workouts_list:
if Keys.WORKOUT_ID_KEY in workout and workout[Keys.WORKOUT_ID_KEY] == workout_obj.workout_id:
return False
if Keys.WORKOUT_SCHEDULED_TIME_KEY in workout and workout[Keys.WORKOUT_SCHEDULED_TIME_KEY] > last_scheduled_workout:
last_scheduled_workout = workout[Keys.WORKOUT_SCHEDULED_TIME_KEY]
# Add the workout to the list.
workout = workout_obj.to_dict()
workouts_list.append(workout)
# Update and save the document.
workouts_doc[Keys.WORKOUT_LIST_KEY] = workouts_list
workouts_doc[Keys.WORKOUT_LAST_SCHEDULED_WORKOUT_TIME_KEY] = last_scheduled_workout
self.workouts_collection.save(workouts_doc)
return True
except:
self.log_error(traceback.format_exc())
self.log_error(sys.exc_info()[0])
return False
def retrieve_planned_workout(self, user_id, workout_id):
"""Retrieve method for the workout with the specified user and ID."""
if user_id is None:
self.log_error(MongoDatabase.retrieve_planned_workout.__name__ + ": Unexpected empty object: user_id")
return None
if workout_id is None:
self.log_error(MongoDatabase.retrieve_planned_workout.__name__ + ": Unexpected empty object: workout_id")
return None
try:
# Find the user's workouts document.
workouts_doc = self.workouts_collection.find_one({ Keys.WORKOUT_PLAN_USER_ID_KEY: user_id })
# If the workouts document was found.
if workouts_doc is not None and Keys.WORKOUT_LIST_KEY in workouts_doc:
workouts_list = workouts_doc[Keys.WORKOUT_LIST_KEY]
# Find the workout in the list.
for workout in workouts_list:
if Keys.WORKOUT_ID_KEY in workout and str(workout[Keys.WORKOUT_ID_KEY]) == workout_id:
workout_obj = Workout.Workout(user_id)
workout_obj.from_dict(workout)
return workout_obj
except:
self.log_error(traceback.format_exc())
self.log_error(sys.exc_info()[0])
return None
def retrieve_planned_workouts_for_user(self, user_id, start_time, end_time):
"""Retrieve method for the ical calendar ID for with specified ID."""
if user_id is None:
self.log_error(MongoDatabase.retrieve_planned_workouts_for_user.__name__ + ": Unexpected empty object: user_id")
return None
workouts = []
try:
# Find the user's workouts document.
workouts_doc = self.workouts_collection.find_one({ Keys.WORKOUT_PLAN_USER_ID_KEY: user_id })
# If the workouts document was found.
if workouts_doc is not None and Keys.WORKOUT_LIST_KEY in workouts_doc:
workouts_list = workouts_doc[Keys.WORKOUT_LIST_KEY]
# Create an object for each workout in the list.
for workout in workouts_list:
workout_obj = Workout.Workout(user_id)
workout_obj.from_dict(workout)
scheduled_time = int(time.mktime(workout_obj.scheduled_time.timetuple()))
if (start_time is None or start_time == 0 or scheduled_time > start_time) and (end_time is None or end_time == 0 or scheduled_time <= end_time):
workouts.append(workout_obj)
except:
self.log_error(traceback.format_exc())
self.log_error(sys.exc_info()[0])
return workouts
def retrieve_workouts_calendar_id_for_user(self, user_id):
"""Retrieve method for all workouts pertaining to the user with the specified ID."""
if user_id is None:
self.log_error(MongoDatabase.retrieve_workouts_calendar_id_for_user.__name__ + ": Unexpected empty object: user_id")
return None
try:
# Find the user's workouts document.
workouts_doc = self.workouts_collection.find_one({ Keys.WORKOUT_PLAN_USER_ID_KEY: user_id })
# If the workouts document was found and it has a calendar ID.
if workouts_doc is not None and Keys.WORKOUT_PLAN_CALENDAR_ID_KEY in workouts_doc:
return workouts_doc[Keys.WORKOUT_PLAN_CALENDAR_ID_KEY]
except:
self.log_error(traceback.format_exc())
self.log_error(sys.exc_info()[0])
return None
def retrieve_workouts_by_calendar_id(self, calendar_id):
"""Retrieve method for all workouts pertaining to the user with the specified ID."""
if calendar_id is None:
self.log_error(MongoDatabase.retrieve_workouts_by_calendar_id.__name__ + ": Unexpected empty object: calendar_id")
return None
workouts = []
try:
# Find the user's document with the specified calendar ID.
workouts_doc = self.workouts_collection.find_one({ Keys.WORKOUT_PLAN_CALENDAR_ID_KEY: calendar_id })
# If the workouts document was found then return the workouts list.
if workouts_doc is not None and Keys.WORKOUT_LIST_KEY in workouts_doc and Keys.WORKOUT_PLAN_USER_ID_KEY in workouts_doc:
workouts_list = workouts_doc[Keys.WORKOUT_LIST_KEY]
# Create an object for each workout in the list.
for workout in workouts_list:
workout_obj = Workout.Workout(workouts_doc[Keys.WORKOUT_PLAN_USER_ID_KEY])
workout_obj.from_dict(workout)
workouts.append(workout_obj)
except:
self.log_error(traceback.format_exc())
self.log_error(sys.exc_info()[0])
return workouts
def update_workouts_for_user(self, user_id, workout_objs):
"""Update method for a list of workout objects."""
if user_id is None:
self.log_error(MongoDatabase.update_workouts_for_user.__name__ + ": Unexpected empty object: user_id")
return False
if workout_objs is None:
self.log_error(MongoDatabase.update_workouts_for_user.__name__ + ": Unexpected empty object: workout_objs")
return False
try:
# Find the user's workouts document.
workouts_doc = self.workouts_collection.find_one({ Keys.WORKOUT_PLAN_USER_ID_KEY: user_id })
# If the workouts document was not found then create it.
if workouts_doc is None:
post = {}
post[Keys.WORKOUT_PLAN_USER_ID_KEY] = user_id
post[Keys.WORKOUT_LIST_KEY] = []
self.workouts_collection.insert(post)
workouts_doc = self.workouts_collection.find_one({ Keys.WORKOUT_PLAN_USER_ID_KEY: user_id })
# If the workouts document was found (or created).
if workouts_doc is not None and Keys.WORKOUT_LIST_KEY in workouts_doc:
# Update and save the document.
workouts_doc[Keys.WORKOUT_LIST_KEY] = []
for workout_obj in workout_objs:
workouts_doc[Keys.WORKOUT_LIST_KEY].append(workout_obj.to_dict())
self.workouts_collection.save(workouts_doc)
return True
except:
self.log_error(traceback.format_exc())
self.log_error(sys.exc_info()[0])
return False
def retrieve_users_without_scheduled_workouts(self):
"""Returns a list of user IDs for users who have workout plans that need to be re-run."""
try:
user_ids = []
workout_docs = self.workouts_collection.find({ Keys.WORKOUT_LAST_SCHEDULED_WORKOUT_TIME_KEY: { "$lt": time.time() } })
for workout_doc in workout_docs:
if Keys.WORKOUT_PLAN_USER_ID_KEY in workout_doc:
user_id = workout_doc[Keys.WORKOUT_PLAN_USER_ID_KEY]
user_ids.append(user_id)
except:
self.log_error(traceback.format_exc())
self.log_error(sys.exc_info()[0])
return user_ids
#
# Gear default management methods
#
def retrieve_gear_defaults(self, user_id):
"""Retrieve method for the gear with the specified ID."""
if user_id is None:
self.log_error(MongoDatabase.retrieve_gear_defaults.__name__ + ": Unexpected empty object: user_id")
return None
try:
# Find the user's document.
user_id_obj = ObjectId(str(user_id))
user = self.users_collection.find_one({ Keys.DATABASE_ID_KEY: user_id_obj })
# If the user's document was found.
if user is not None:
# Read the gear list.
gear_defaults_list = []
if Keys.GEAR_DEFAULTS_KEY in user:
gear_defaults_list = user[Keys.GEAR_DEFAULTS_KEY]
return gear_defaults_list
except:
self.log_error(traceback.format_exc())
self.log_error(sys.exc_info()[0])
return []
def update_gear_defaults(self, user_id, activity_type, gear_name):
"""Retrieve method for the gear with the specified ID."""
if user_id is None:
self.log_error(MongoDatabase.update_gear_defaults.__name__ + ": Unexpected empty object: user_id")
return False
if activity_type is None:
self.log_error(MongoDatabase.update_gear_defaults.__name__ + ": Unexpected empty object: activity_type")
return False
if gear_name is None:
self.log_error(MongoDatabase.update_gear_defaults.__name__ + ": Unexpected empty object: gear_name")
return False
try:
# Find the user's document.
user_id_obj = ObjectId(str(user_id))
user = self.users_collection.find_one({ Keys.DATABASE_ID_KEY: user_id_obj })
# If the user's document was found.
if user is not None:
# Update the gear list.
gear_defaults_list = []
# Remove any old items that are no longer relevant.
if Keys.GEAR_DEFAULTS_KEY in user:
gear_defaults_list = user[Keys.GEAR_DEFAULTS_KEY]
gear_index = 0
for gear in gear_defaults_list:
if Keys.ACTIVITY_TYPE_KEY in gear and gear[Keys.ACTIVITY_TYPE_KEY] == activity_type:
gear_defaults_list.pop(gear_index)
gear_index = gear_index + 1
# Add the new item.
gear = {}
gear[Keys.ACTIVITY_TYPE_KEY] = activity_type
gear[Keys.GEAR_NAME_KEY] = gear_name
gear_defaults_list.append(gear)
user[Keys.GEAR_DEFAULTS_KEY] = gear_defaults_list
self.users_collection.save(user)
return True
except:
self.log_error(traceback.format_exc())
self.log_error(sys.exc_info()[0])
return False
#
# Gear management methods
#
def create_gear(self, user_id, gear_id, gear_type, gear_name, gear_description, gear_add_time, gear_retire_time):
"""Create method for gear."""
if user_id is None:
self.log_error(MongoDatabase.create_gear.__name__ + ": Unexpected empty object: user_id")
return False
if gear_id is None:
self.log_error(MongoDatabase.create_gear.__name__ + ": Unexpected empty object: gear_id")
return False
if gear_type is None:
self.log_error(MongoDatabase.create_gear.__name__ + ": Unexpected empty object: gear_type")
return False
if gear_name is None:
self.log_error(MongoDatabase.create_gear.__name__ + ": Unexpected empty object: gear_name")
return False
try:
# Find the user's document.
user_id_obj = ObjectId(str(user_id))
user = self.users_collection.find_one({ Keys.DATABASE_ID_KEY: user_id_obj })
# If the user's document was found.
if user is not None:
# Find the gear list.
gear_list = []
if Keys.GEAR_KEY in user:
gear_list = user[Keys.GEAR_KEY]
# Make sure we don't already have an item with this ID.
for gear in gear_list:
if Keys.GEAR_ID_KEY in gear and gear[Keys.GEAR_ID_KEY] == str(gear_id):
return False
# Update the gear list.
new_gear = {}
new_gear[Keys.GEAR_ID_KEY] = str(gear_id)
new_gear[Keys.GEAR_TYPE_KEY] = gear_type
new_gear[Keys.GEAR_NAME_KEY] = gear_name
new_gear[Keys.GEAR_DESCRIPTION_KEY] = gear_description
new_gear[Keys.GEAR_ADD_TIME_KEY] = int(gear_add_time)
new_gear[Keys.GEAR_RETIRE_TIME_KEY] = int(gear_retire_time)
gear_list.append(new_gear)
user[Keys.GEAR_KEY] = gear_list
self.users_collection.save(user)
return True
except:
self.log_error(traceback.format_exc())
self.log_error(sys.exc_info()[0])
return False
def retrieve_gear(self, user_id):
"""Retrieve method for the gear with the specified ID."""
if user_id is None:
self.log_error(MongoDatabase.retrieve_gear.__name__ + ": Unexpected empty object: user_id")
return None
try:
# Find the user's document.
user_id_obj = ObjectId(str(user_id))
user = self.users_collection.find_one({ Keys.DATABASE_ID_KEY: user_id_obj })
# If the user's document was found.
if user is not None:
# Read the gear list.
gear_list = []
if Keys.GEAR_KEY in user:
gear_list = user[Keys.GEAR_KEY]
return gear_list
except:
self.log_error(traceback.format_exc())
self.log_error(sys.exc_info()[0])
return []
def update_gear(self, user_id, gear_id, gear_type, gear_name, gear_description, gear_add_time, gear_retire_time):
"""Retrieve method for the gear with the specified ID."""
if user_id is None:
self.log_error(MongoDatabase.update_gear.__name__ + ": Unexpected empty object: user_id")
return False
if gear_id is None:
self.log_error(MongoDatabase.update_gear.__name__ + ": Unexpected empty object: gear_id")
return False
try:
# Find the user's document.
user_id_obj = ObjectId(str(user_id))
user = self.users_collection.find_one({ Keys.DATABASE_ID_KEY: user_id_obj })
# If the user's document was found.
if user is not None:
# Update the gear list.
gear_list = []
if Keys.GEAR_KEY in user:
gear_list = user[Keys.GEAR_KEY]
gear_index = 0
for gear in gear_list:
if Keys.GEAR_ID_KEY in gear and gear[Keys.GEAR_ID_KEY] == str(gear_id):
if gear_type is not None:
gear[Keys.GEAR_TYPE_KEY] = gear_type
if gear_name is not None:
gear[Keys.GEAR_NAME_KEY] = gear_name
if gear_description is not None:
gear[Keys.GEAR_DESCRIPTION_KEY] = gear_description
if gear_add_time is not None:
gear[Keys.GEAR_ADD_TIME_KEY] = int(gear_add_time)
if gear_retire_time is not None:
gear[Keys.GEAR_RETIRE_TIME_KEY] = int(gear_retire_time)
gear_list.pop(gear_index)
gear_list.append(gear)
user[Keys.GEAR_KEY] = gear_list
self.users_collection.save(user)
return True
gear_index = gear_index + 1
except:
self.log_error(traceback.format_exc())
self.log_error(sys.exc_info()[0])
return False
def delete_gear(self, user_id, gear_id):
"""Delete method for the gear with the specified ID."""
if user_id is None:
self.log_error(MongoDatabase.delete_gear.__name__ + ": Unexpected empty object: user_id")
return False
if gear_id is None:
self.log_error(MongoDatabase.delete_gear.__name__ + ": Unexpected empty object: gear_id")
return False
try:
# Find the user's document.
user_id_obj = ObjectId(str(user_id))
user = self.users_collection.find_one({ Keys.DATABASE_ID_KEY: user_id_obj })
# If the user's document was found.
if user is not None:
# Update the gear list.
gear_list = []
if Keys.GEAR_KEY in user:
gear_list = user[Keys.GEAR_KEY]
gear_index = 0
for gear in gear_list:
if Keys.GEAR_ID_KEY in gear and gear[Keys.GEAR_ID_KEY] == str(gear_id):
gear_list.pop(gear_index)
user[Keys.GEAR_KEY] = gear_list
self.users_collection.save(user)
return True
gear_index = gear_index + 1
except:
self.log_error(traceback.format_exc())
self.log_error(sys.exc_info()[0])
return False
def delete_all_gear(self, user_id):
"""Delete method for the gear with the specified ID."""
if user_id is None:
self.log_error(MongoDatabase.delete_gear.__name__ + ": Unexpected empty object: user_id")
return False
try:
# Find the user's document.
user_id_obj = ObjectId(str(user_id))
user = self.users_collection.find_one({ Keys.DATABASE_ID_KEY: user_id_obj })
# If the user's document was found.
if user is not None:
# Update the gear list.
if Keys.GEAR_KEY in user:
user[Keys.GEAR_KEY] = []
self.users_collection.save(user)
return True
except:
self.log_error(traceback.format_exc())
self.log_error(sys.exc_info()[0])
return False
def create_service_record(self, user_id, gear_id, service_record_id, record_date, record_description):
"""Create method for gear service records."""
if user_id is None:
self.log_error(MongoDatabase.create_service_record.__name__ + ": Unexpected empty object: user_id")
return False
if gear_id is None:
self.log_error(MongoDatabase.create_service_record.__name__ + ": Unexpected empty object: gear_id")
return False
if service_record_id is None:
self.log_error(MongoDatabase.create_service_record.__name__ + ": Unexpected empty object: service_record_id")
return False
if record_date is None:
self.log_error(MongoDatabase.create_service_record.__name__ + ": Unexpected empty object: record_date")
return False
if record_description is None:
self.log_error(MongoDatabase.create_service_record.__name__ + ": Unexpected empty object: record_description")
return False
try:
# Find the user's document.
user_id_obj = ObjectId(str(user_id))
user = self.users_collection.find_one({ Keys.DATABASE_ID_KEY: user_id_obj })
# If the user's document was found.
if user is not None:
# Find the gear list.
gear_list = []
if Keys.GEAR_KEY in user:
gear_list = user[Keys.GEAR_KEY]
# Find the gear.
for gear in gear_list:
if Keys.GEAR_ID_KEY in gear and gear[Keys.GEAR_ID_KEY] == str(gear_id):
service_rec = {}
service_rec[Keys.SERVICE_RECORD_ID_KEY] = str(service_record_id)
service_rec[Keys.SERVICE_RECORD_DATE_KEY] = int(record_date)
service_rec[Keys.SERVICE_RECORD_DESCRIPTION_KEY] = record_description
service_history = []
if Keys.GEAR_SERVICE_HISTORY in gear:
service_history = gear[Keys.GEAR_SERVICE_HISTORY]
service_history.append(service_rec)
gear[Keys.GEAR_SERVICE_HISTORY] = service_history
self.users_collection.save(user)
return True
except:
self.log_error(traceback.format_exc())
self.log_error(sys.exc_info()[0])
return False
def delete_service_record(self, user_id, gear_id, service_record_id):
"""Delete method for the service record with the specified user and gear ID."""
if user_id is None:
self.log_error(MongoDatabase.delete_service_record.__name__ + ": Unexpected empty object: user_id")
return False
if gear_id is None:
self.log_error(MongoDatabase.delete_service_record.__name__ + ": Unexpected empty object: gear_id")
return False
if service_record_id is None:
self.log_error(MongoDatabase.delete_service_record.__name__ + ": Unexpected empty object: service_record_id")
return False
try:
# Find the user's document.
user_id_obj = ObjectId(str(user_id))
user = self.users_collection.find_one({ Keys.DATABASE_ID_KEY: user_id_obj })
# If the user's document was found.
if user is not None:
# Find the gear list.
gear_list = []
if Keys.GEAR_KEY in user:
gear_list = user[Keys.GEAR_KEY]
# Find the gear.
for gear in gear_list:
if Keys.GEAR_ID_KEY in gear and gear[Keys.GEAR_ID_KEY] == str(gear_id):
if Keys.GEAR_SERVICE_HISTORY in gear:
service_history = gear[Keys.GEAR_SERVICE_HISTORY]
record_index = 0
for service_record in service_history:
if Keys.SERVICE_RECORD_ID_KEY in service_record and service_record[Keys.SERVICE_RECORD_ID_KEY] == service_record_id:
service_history.pop(record_index)
gear[Keys.GEAR_SERVICE_HISTORY] = service_history
self.users_collection.save(user)
return True
record_index = record_index + 1
except:
self.log_error(traceback.format_exc())
self.log_error(sys.exc_info()[0])
return False
#
# Pace plan management methods
#
def create_pace_plan(self, user_id, plan_id, plan_name, target_pace_min_km, target_distance_kms, display_units_pace, display_units_distance, splits, last_updated_time):
"""Create method for a pace plan associated with the specified user."""
if user_id is None:
self.log_error(MongoDatabase.create_pace_plan.__name__ + ": Unexpected empty object: user_id")
return False
if plan_id is None:
self.log_error(MongoDatabase.create_pace_plan.__name__ + ": Unexpected empty object: plan_id")
return False
if plan_name is None:
self.log_error(MongoDatabase.create_pace_plan.__name__ + ": Unexpected empty object: plan_name")
return False
if target_pace_min_km is None:
self.log_error(MongoDatabase.create_pace_plan.__name__ + ": Unexpected empty object: target_pace_min_km")
return False
if target_distance_kms is None:
self.log_error(MongoDatabase.create_pace_plan.__name__ + ": Unexpected empty object: target_distance_kms")
return False
if display_units_pace is None:
self.log_error(MongoDatabase.create_pace_plan.__name__ + ": Unexpected empty object: display_units_pace")
return False
if display_units_distance is None:
self.log_error(MongoDatabase.create_pace_plan.__name__ + ": Unexpected empty object: display_units_distance")
return False
if splits is None:
self.log_error(MongoDatabase.create_pace_plan.__name__ + ": Unexpected empty object: splits")
return False
if last_updated_time is None:
self.log_error(MongoDatabase.create_pace_plan.__name__ + ": Unexpected empty object: last_updated_time")
return False
try:
# Find the user's document.
user_id_obj = ObjectId(str(user_id))
user = self.users_collection.find_one({ Keys.DATABASE_ID_KEY: user_id_obj })
# If the user's document was found.
if user is not None:
# Fidn the pace plans list.
pace_plan_list = []
if Keys.PACE_PLANS_KEY in user:
pace_plan_list = user[Keys.PACE_PLANS_KEY]
# Make sure we don't already have a pace plan with this ID.
for pace_plan in pace_plan_list:
if Keys.PACE_PLAN_ID_KEY in pace_plan and pace_plan[Keys.PACE_PLAN_ID_KEY] == str(plan_id):
return False
# Update the pace plans list.
new_pace_plan = {}
new_pace_plan[Keys.PACE_PLAN_ID_KEY] = str(plan_id)
new_pace_plan[Keys.PACE_PLAN_NAME_KEY] = plan_name
new_pace_plan[Keys.PACE_PLAN_TARGET_PACE_KEY] = float(target_pace_min_km)
new_pace_plan[Keys.PACE_PLAN_TARGET_DISTANCE_KEY] = float(target_distance_kms)
new_pace_plan[Keys.PACE_PLAN_DISPLAY_UNITS_PACE_KEY] = int(float(display_units_pace))
new_pace_plan[Keys.PACE_PLAN_DISPLAY_UNITS_DISTANCE_KEY] = int(float(display_units_distance))
new_pace_plan[Keys.PACE_PLAN_SPLITS_KEY] = int(float(splits))
new_pace_plan[Keys.PACE_PLAN_LAST_UPDATED_KEY] = int(last_updated_time)
pace_plan_list.append(new_pace_plan)
user[Keys.PACE_PLANS_KEY] = pace_plan_list
self.users_collection.save(user)
return True
except:
self.log_error(traceback.format_exc())
self.log_error(sys.exc_info()[0])
return False
def retrieve_pace_plans(self, user_id):
"""Retrieve method for pace plans associated with the specified user."""
if user_id is None:
self.log_error(MongoDatabase.retrieve_pace_plan.__name__ + ": Unexpected empty object: user_id")
return False
try:
# Find the user's document.
user_id_obj = ObjectId(str(user_id))
user = self.users_collection.find_one({ Keys.DATABASE_ID_KEY: user_id_obj })
# If the user's document was found.
if user is not None:
# Read the pace plans list.
pace_plan_list = []
if Keys.PACE_PLANS_KEY in user:
pace_plan_list = user[Keys.PACE_PLANS_KEY]
return pace_plan_list
except:
self.log_error(traceback.format_exc())
self.log_error(sys.exc_info()[0])
return []
def update_pace_plan(self, user_id, plan_id, plan_name, target_pace_min_km, target_distance_kms, display_units_pace, display_units_distance, splits, last_updated_time):
"""Update method for a pace plan associated with the specified user."""
if user_id is None:
self.log_error(MongoDatabase.update_pace_plan.__name__ + ": Unexpected empty object: user_id")
return False
if plan_id is None:
self.log_error(MongoDatabase.update_pace_plan.__name__ + ": Unexpected empty object: plan_id")
return False
if plan_name is None:
self.log_error(MongoDatabase.update_pace_plan.__name__ + ": Unexpected empty object: plan_name")
return False
if target_pace_min_km is None:
self.log_error(MongoDatabase.update_pace_plan.__name__ + ": Unexpected empty object: target_pace_min_km")
return False
if target_distance_kms is None:
self.log_error(MongoDatabase.update_pace_plan.__name__ + ": Unexpected empty object: target_pace_min_km")
return False
if display_units_pace is None:
self.log_error(MongoDatabase.update_pace_plan.__name__ + ": Unexpected empty object: display_units_pace")
return False
if display_units_distance is None:
self.log_error(MongoDatabase.update_pace_plan.__name__ + ": Unexpected empty object: display_units_distance")
return False
if splits is None:
self.log_error(MongoDatabase.update_pace_plan.__name__ + ": Unexpected empty object: splits")
return False
if last_updated_time is None:
self.log_error(MongoDatabase.create_pace_plan.__name__ + ": Unexpected empty object: last_updated_time")
return False
try:
# Find the user's document.
user_id_obj = ObjectId(str(user_id))
user = self.users_collection.find_one({ Keys.DATABASE_ID_KEY: user_id_obj })
# If the user's document was found.
if user is not None:
# Update the pace plans list.
pace_plan_list = []
if Keys.PACE_PLANS_KEY in user:
pace_plan_list = user[Keys.PACE_PLANS_KEY]
pace_plan_index = 0
for pace_plan in pace_plan_list:
if Keys.PACE_PLAN_ID_KEY in pace_plan and pace_plan[Keys.PACE_PLAN_ID_KEY] == str(plan_id):
pace_plan[Keys.PACE_PLAN_NAME_KEY] = plan_name
pace_plan[Keys.PACE_PLAN_TARGET_PACE_KEY] = float(target_pace_min_km)
pace_plan[Keys.PACE_PLAN_TARGET_DISTANCE_KEY] = float(target_distance_kms)
pace_plan[Keys.PACE_PLAN_DISPLAY_UNITS_PACE_KEY] = int(float(display_units_pace))
pace_plan[Keys.PACE_PLAN_DISPLAY_UNITS_DISTANCE_KEY] = int(float(display_units_distance))
pace_plan[Keys.PACE_PLAN_SPLITS_KEY] = int(float(splits))
pace_plan[Keys.PACE_PLAN_LAST_UPDATED_KEY] = int(last_updated_time)
pace_plan_list.pop(pace_plan_index)
pace_plan_list.append(pace_plan)
user[Keys.PACE_PLANS_KEY] = pace_plan_list
self.users_collection.save(user)
return True
pace_plan_index = pace_plan_index + 1
except:
self.log_error(traceback.format_exc())
self.log_error(sys.exc_info()[0])
return False
def delete_pace_plan(self, user_id, pace_plan_id):
"""Delete method for a pace plan associated with the specified user."""
if user_id is None:
self.log_error(MongoDatabase.delete_pace_plan.__name__ + ": Unexpected empty object: user_id")
return False
if pace_plan_id is None:
self.log_error(MongoDatabase.update_pace_plan.__name__ + ": Unexpected empty object: pace_plan_id")
return False
try:
# Find the user's document.
user_id_obj = ObjectId(str(user_id))
user = self.users_collection.find_one({ Keys.DATABASE_ID_KEY: user_id_obj })
# If the user's document was found.
if user is not None:
# Update the pace plans list.
pace_plan_list = []
if Keys.PACE_PLANS_KEY in user:
pace_plan_list = user[Keys.PACE_PLANS_KEY]
pace_plan_index = 0
for pace_plan in pace_plan_list:
if Keys.PACE_PLAN_ID_KEY in pace_plan and pace_plan[Keys.PACE_PLAN_ID_KEY] == str(pace_plan_id):
pace_plan_list.pop(pace_plan_index)
user[Keys.PACE_PLANS_KEY] = pace_plan_list
self.users_collection.save(user)
return True
pace_plan_index = pace_plan_index + 1
except:
self.log_error(traceback.format_exc())
self.log_error(sys.exc_info()[0])
return False
#
# Deferred task management methods
#
def create_deferred_task(self, user_id, task_type, celery_task_id, internal_task_id, details, status):
"""Create method for tracking a deferred task, such as a file import or activity analysis."""
if user_id is None:
self.log_error(MongoDatabase.create_deferred_task.__name__ + ": Unexpected empty object: user_id")
return False
if task_type is None:
self.log_error(MongoDatabase.create_deferred_task.__name__ + ": Unexpected empty object: task_type")
return False
if celery_task_id is None:
self.log_error(MongoDatabase.create_deferred_task.__name__ + ": Unexpected empty object: celery_task_id")
return False
if internal_task_id is None:
self.log_error(MongoDatabase.create_deferred_task.__name__ + ": Unexpected empty object: internal_task_id")
return False
if status is None:
self.log_error(MongoDatabase.create_deferred_task.__name__ + ": Unexpected empty object: status")
return False
try:
# Make sure we're dealing with a string.
user_id_str = str(user_id)
# Find the user's tasks document.
user_tasks = self.tasks_collection.find_one({ Keys.DEFERRED_TASKS_USER_ID: user_id_str })
# If the user's tasks document was not found then create it.
if user_tasks is None:
post = { Keys.DEFERRED_TASKS_USER_ID: user_id }
self.tasks_collection.insert(post)
user_tasks = self.tasks_collection.find_one({ Keys.DEFERRED_TASKS_USER_ID: user_id_str })
# If the user's tasks document was found.
if user_tasks is not None:
# Get the list of existing tasks.
deferred_tasks = []
if Keys.TASKS_KEY in user_tasks:
deferred_tasks = user_tasks[Keys.TASKS_KEY]
# Create an entry for the new task.
task = {}
task[Keys.TASK_CELERY_ID_KEY] = str(celery_task_id)
task[Keys.TASK_INTERNAL_ID_KEY] = str(internal_task_id)
task[Keys.TASK_TYPE_KEY] = task_type
task[Keys.TASK_DETAILS_KEY] = details
task[Keys.TASK_STATUS_KEY] = status
# Append it to the list.
deferred_tasks.append(task)
user_tasks[Keys.TASKS_KEY] = deferred_tasks
# Update the database.
self.tasks_collection.save(user_tasks)
return True
except:
self.log_error(traceback.format_exc())
self.log_error(sys.exc_info()[0])
return False
def retrieve_deferred_tasks(self, user_id):
"""Retrieve method for returning all the deferred tasks for a given user."""
if user_id is None:
self.log_error(MongoDatabase.retrieve_deferred_tasks.__name__ + ": Unexpected empty object: user_id")
return None
try:
# Make sure we're dealing with a string.
user_id_str = str(user_id)
# Find the user's tasks document.
user_tasks = self.tasks_collection.find_one({ Keys.DEFERRED_TASKS_USER_ID: user_id_str })
# If the user's tasks document was found.
if user_tasks is not None and Keys.TASKS_KEY in user_tasks:
return user_tasks[Keys.TASKS_KEY]
except:
self.log_error(traceback.format_exc())
self.log_error(sys.exc_info()[0])
return []
def update_deferred_task(self, user_id, internal_task_id, activity_id, status):
"""Updated method for deferred task status."""
if user_id is None:
self.log_error(MongoDatabase.update_deferred_task.__name__ + ": Unexpected empty object: user_id")
return False
if internal_task_id is None:
self.log_error(MongoDatabase.update_deferred_task.__name__ + ": Unexpected empty object: internal_task_id")
return False
if status is None:
self.log_error(MongoDatabase.update_deferred_task.__name__ + ": Unexpected empty object: status")
return False
try:
# Make sure we're dealing with strings.
user_id_str = str(user_id)
internal_task_id_str = str(internal_task_id)
# Find the user's tasks document.
user_tasks = self.tasks_collection.find_one({ Keys.DEFERRED_TASKS_USER_ID: user_id_str })
# If the user's tasks document was found.
if user_tasks is not None and Keys.TASKS_KEY in user_tasks:
# Find and update the record.
for task in user_tasks[Keys.TASKS_KEY]:
if Keys.TASK_INTERNAL_ID_KEY in task and task[Keys.TASK_INTERNAL_ID_KEY] == internal_task_id_str:
task[Keys.TASK_ACTIVITY_ID_KEY] = activity_id
task[Keys.TASK_STATUS_KEY] = status
break
# Update the database.
self.tasks_collection.save(user_tasks)
return True
except:
self.log_error(traceback.format_exc())
self.log_error(sys.exc_info()[0])
return False
def delete_finished_deferred_tasks(self):
"""Delete method for removing deferred tasks that are completed."""
try:
# Find the user's tasks document.
user_tasks_list = self.tasks_collection.find({})
# For each user's task lists.
for user_tasks in user_tasks_list:
if Keys.TASKS_KEY in user_tasks:
# Find and update the record.
new_list = []
for task in user_tasks[Keys.TASKS_KEY]:
if task[Keys.TASK_STATUS_KEY] != Keys.TASK_STATUS_FINISHED:
new_list.append(task)
user_tasks[Keys.TASKS_KEY] = new_list
# Update the database.
self.tasks_collection.save(user_tasks)
return True
except:
self.log_error(traceback.format_exc())
self.log_error(sys.exc_info()[0])
return False
def create_uploaded_file(self, activity_id, file_data):
"""Create method for an uploaded activity file."""
if activity_id is None:
self.log_error(MongoDatabase.create_uploaded_file.__name__ + ": Unexpected empty object: activity_id")
return False
if file_data is None:
self.log_error(MongoDatabase.create_uploaded_file.__name__ + ": Unexpected empty object: file_data")
return False
try:
if sys.version_info[0] < 3:
post = { Keys.ACTIVITY_ID_KEY: activity_id, Keys.UPLOADED_FILE_DATA_KEY: Binary(file_data) }
else:
post = { Keys.ACTIVITY_ID_KEY: activity_id, Keys.UPLOADED_FILE_DATA_KEY: bytes(file_data) }
self.uploads_collection.insert(post)
return True
except:
self.log_error(traceback.format_exc())
self.log_error(sys.exc_info()[0])
return False
def delete_uploaded_file(self, activity_id):
"""Delete method for an uploaded file associated with an activity."""
if activity_id is None:
self.log_error(MongoDatabase.delete_uploaded_file.__name__ + ": Unexpected empty object: activity_id")
return False
try:
deleted_result = self.uploads_collection.delete_one({ Keys.ACTIVITY_ID_KEY: str(activity_id) })
if deleted_result is not None:
return True
except:
self.log_error(traceback.format_exc())
self.log_error(sys.exc_info()[0])
return False
#
# API key management methods
#
def create_api_key(self, user_id, key, rate):
"""Create method for an API key."""
if user_id is None:
self.log_error(MongoDatabase.create_api_key.__name__ + ": Unexpected empty object: user_id")
return False
if key is None:
self.log_error(MongoDatabase.create_api_key.__name__ + ": Unexpected empty object: key")
return False
if rate is None:
self.log_error(MongoDatabase.create_api_key.__name__ + ": Unexpected empty object: rate")
return False
try:
# Find the user.
user_id_obj = ObjectId(str(user_id))
user = self.users_collection.find_one({ Keys.DATABASE_ID_KEY: user_id_obj })
# If the user was found.
if user is not None:
key_list = []
if Keys.API_KEYS in user:
key_list = user[Keys.API_KEYS]
key_dict = { Keys.API_KEY: str(key), Keys.API_KEY_RATE: int(rate) }
key_list.append(key_dict)
user[Keys.API_KEYS] = key_list
self.users_collection.save(user)
return True
except:
self.log_error(traceback.format_exc())
self.log_error(sys.exc_info()[0])
return False
def retrieve_api_keys(self, user_id):
"""Retrieve method for API keys."""
if user_id is None:
self.log_error(MongoDatabase.retrieve_api_keys.__name__ + ": Unexpected empty object: user_id")
return None
try:
# Find the user.
user_id_obj = ObjectId(str(user_id))
user = self.users_collection.find_one({ Keys.DATABASE_ID_KEY: user_id_obj}, { Keys.API_KEYS: 1 })
# If the user was found.
if user is not None and Keys.API_KEYS in user:
return user[Keys.API_KEYS]
return []
except:
self.log_error(traceback.format_exc())
self.log_error(sys.exc_info()[0])
return []
def delete_api_key(self, user_id, key):
"""Delete method for an API key."""
if user_id is None:
self.log_error(MongoDatabase.delete_api_key.__name__ + ": Unexpected empty object: user_id")
return False
if key is None:
self.log_error(MongoDatabase.delete_api_key.__name__ + ": Unexpected empty object: key")
return False
try:
# Find the user.
user_id_obj = ObjectId(str(user_id))
user = self.users_collection.find_one({ Keys.DATABASE_ID_KEY: user_id_obj })
# If the user was found.
if user is not None and Keys.API_KEYS in user:
# Make sure we're dealing with a string.
key_str = str(key)
key_list = user[Keys.API_KEYS]
for item in key_list:
if item[Keys.API_KEY] == key_str:
key_list.remove(item)
break
user[Keys.API_KEYS] = key_list
self.users_collection.save(user)
return True
except:
self.log_error(traceback.format_exc())
self.log_error(sys.exc_info()[0])
return False
| 45.031351
| 281
| 0.615444
| 16,796
| 139,327
| 4.785068
| 0.031496
| 0.037278
| 0.063904
| 0.074344
| 0.851761
| 0.815242
| 0.784298
| 0.755842
| 0.730559
| 0.707391
| 0
| 0.002084
| 0.311088
| 139,327
| 3,093
| 282
| 45.04591
| 0.835243
| 0.114895
| 0
| 0.669492
| 0
| 0
| 0.068796
| 0.00036
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045941
| false
| 0.00446
| 0.006244
| 0
| 0.252899
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
976fc30ffc8fc3771f270d96bc8eaf0871a14ee4
| 21,311
|
py
|
Python
|
v1/api/statbotics/main.py
|
avgupta456/statbotics
|
8847cec161104ec54f4c501653cd4ec558d30379
|
[
"MIT"
] | 14
|
2020-05-28T21:54:45.000Z
|
2022-03-17T19:39:23.000Z
|
v1/api/statbotics/main.py
|
avgupta456/statbotics
|
8847cec161104ec54f4c501653cd4ec558d30379
|
[
"MIT"
] | 59
|
2020-05-28T21:39:45.000Z
|
2022-03-25T23:51:39.000Z
|
api/statbotics/main.py
|
statbotics/statbotics
|
37bb0e3730d5b3aff7b6a5ba6e78ef2eada950bc
|
[
"MIT"
] | 1
|
2020-07-04T07:30:40.000Z
|
2020-07-04T07:30:40.000Z
|
import datetime
import requests
from cachecontrol import CacheControl
from . import validate
class Statbotics:
"""
Main Object for interfacing with the Statbotics API
"""
def __init__(self):
self.BASE_URL = "https://api.statbotics.io"
self.session = CacheControl(requests.Session())
self.login(self.get_token())
self.token = self.get_token()
def get_token(self, retries=0):
if retries > 2:
raise UserWarning("Could not connect to Statbotics.io")
self.session.get(self.BASE_URL + "/admin/")
if "csrftoken" not in self.session.cookies:
return self.getToken(retries + 1)
return self.session.cookies["csrftoken"]
def login(self, token):
login_data = {"csrfmiddlewaretoken": token, "next": self.BASE_URL + "/admin/"}
self.session.post(
self.BASE_URL + "/admin/login/",
data=login_data,
headers=dict(Referer=self.BASE_URL),
)
def _filter(self, data, fields):
if fields == ["all"]:
return data
for field in fields:
if field not in data[0]:
raise ValueError("Invalid field: " + str(field))
out = []
for entry in data:
new_entry = {}
for field in fields:
new_entry[field] = entry[field]
out.append(new_entry)
return out
def _get(self, url, fields, retry=0):
resp = self.session.get(self.BASE_URL + url)
if resp.status_code != 200:
if retry < 2:
return self._get(url, fields, retry=retry + 1)
raise UserWarning("Invalid query: " + url)
data = resp.json()
if "results" in data:
data = data["results"]
if len(data) == 0:
raise UserWarning("Invalid inputs, no data recieved for " + url)
return self._filter(data, fields)
def _negate(self, string):
if len(string) == 0:
return string
if string[0] == "-":
return string[1:]
return "-" + string
def get_team(self, team, fields=["all"]):
"""
Function to retrieve information on an individual team\n
:param team: Team Number, integer\n
:param fields: List of fields to return. The default is ["all"]\n
:return: a dictionary with the team's number, location (country, state, district), and Elo statistics (Current Elo, Recent Elo, Mean Elo, Max Elo)\n
"""
validate.check_type(team, "int", "team")
validate.check_type(fields, "list", "fields")
return self._get("/v1/_teams?team=" + str(team), fields)[0]
def get_teams(
self,
country=None,
state=None,
district=None,
active=True,
metric=None,
limit=1000,
offset=0,
fields=["all"],
):
"""
Function to retrieve information on multiple teams\n
:param country: Restrict based on country (select countries included)\n
:param state: US States and Canada provinces only. Can infer country.\n
:param district: Use 2 or 3-letter key (ex: FIM, NE, etc)\n
:param active: Restrict to active teams (played most recent season)\n
:param metric: Order output. Default descending, add '-' for ascending. (Ex: "-elo", "team", etc)\n
:param limit: Limits the output length to speed up queries. Max 10,000\n
:param offset: Skips the first (offset) items when returning\n
:param fields: List of fields to return. Default is ["all"]\n
:return: A list of dictionaries, each dictionary including the team, location, and Elo statistics\n
"""
url = "/v1/_teams?"
validate.check_type(metric, "str", "metric")
validate.check_type(limit, "int", "limit")
validate.check_type(offset, "int", "offset")
validate.check_type(fields, "list", "fields")
if limit > 10000:
raise ValueError(
"Please reduce 'limit', consider breaking into multiple smaller queries"
)
url += "limit=" + str(limit) + "&offset=" + str(offset)
url += validate.get_locations(country, state, district)
if active:
url += "&active=1"
if metric:
if metric not in validate.get_team_metrics():
raise ValueError("Invalid metric")
url += "&o=" + self._negate(metric)
return self._get(url, fields)
def get_year(self, year, fields=["all"]):
"""
Function to retrieve information for a specific year\n
:param year: Year, integer\n
:param fields: List of fields to return. The default is ["all"]\n
:return: a dictionary with the year, match prediction statistics, and RP prediction statistics\n
"""
validate.check_type(year, "int", "year")
validate.check_type(fields, "list", "fields")
return self._get("/v1/_years?year=" + str(year), fields)[0]
def get_years(self, metric=None, limit=1000, offset=0, fields=["all"]):
"""
Function to retrieve information on multiple years\n
:param metric: Order output. Default descending, add '-' for ascending. (Ex: "elo_acc", "-opr_mse", etc)\n
:param limit: Limits the output length to speed up queries. Max 10,000\n
:param offset: Skips the first (offset) items when returning\n
:param fields: List of fields to return. Default is ["all"]\n
:return: A list of dictionaries, each dictionary including the year and match/RP prediction statistics\n
"""
validate.check_type(metric, "str", "metric")
validate.check_type(limit, "int", "limit")
validate.check_type(offset, "int", "offset")
validate.check_type(fields, "list", "fields")
url = "/v1/_years?limit=" + str(limit) + "&offset=" + str(offset)
if metric:
url += "&o=" + self._negate(metric)
return self._get(url, fields)
def get_team_year(self, team, year, fields=["all"]):
"""
Function to retrieve information for a specific team's performance in a specific year\n
:param team: Team number, integer\n
:param year: Year, integer\n
:param fields: List of fields to return. The default is ["all"]\n
:return: a dictionary with the team, year, and Elo/OPR statistics\n
"""
validate.check_type(team, "int", "team")
validate.check_type(year, "int", "year")
validate.check_type(fields, "list", "fields")
url = "/v1/_team_years?team=" + str(team) + "&year=" + str(year)
return self._get(url, fields)[0]
def get_team_years(
self,
team=None,
year=None,
country=None,
state=None,
district=None,
metric=None,
limit=1000,
offset=0,
fields=["all"],
):
"""
Function to retrieve information on multiple (team, year) pairs\n
:param team: Restrict based on a specific team number\n
:param country: Restrict based on country (select countries included)\n
:param state: US States and Canada provinces only. Can infer country.\n
:param district: Use 2 or 3-letter key (ex: FIM, NE, etc)\n
:param metric: Order output. Default descending, add '-' for ascending. (Ex: "elo_pre_champs", "-opr_auto", etc)\n
:param limit: Limits the output length to speed up queries. Max 10,000\n
:param offset: Skips the first (offset) items when returning\n
:param fields: List of fields to return. Default is ["all"]\n
:return: A list of dictionaries, each dictionary including the team, year, and OPR/Elo statistics\n
"""
url = "/v1/_team_years"
validate.check_type(team, "int", "team")
validate.check_type(year, "int", "year")
validate.check_type(metric, "str", "metric")
validate.check_type(limit, "int", "limit")
validate.check_type(offset, "int", "offset")
validate.check_type(fields, "list", "fields")
if limit > 10000:
raise ValueError(
"Please reduce 'limit', consider breaking into multiple smaller queries"
)
url += "?limit=" + str(limit) + "&offset=" + str(offset)
if team and year:
raise UserWarning("Use get_team_year() instead")
if team and (country or state or district):
raise UserWarning("Conflicting location input")
if team:
url += "&team=" + str(team)
if year:
url += "&year=" + str(year)
url += validate.get_locations(country, state, district)
if metric:
if metric not in validate.get_team_year_metrics():
raise ValueError("Invalid metric")
url += "&o=" + self._negate(metric)
return self._get(url, fields)
def get_event(self, event, fields=["all"]):
"""
Function to retrieve information for a specific event\n
:param event: Event key, string (ex: "2019cur")\n
:param fields: List of fields to return. The default is ["all"]\n
:return: a dictionary with the event and Elo/OPR statistics\n
"""
validate.check_type(event, "str", "event")
validate.check_type(fields, "list", "fields")
url = "/v1/_events?key=" + event
return self._get(url, fields)[0]
def get_events(
self,
year=None,
country=None,
state=None,
district=None,
type=None,
week=None,
metric=None,
limit=1000,
offset=0,
fields=["all"],
):
"""
Function to retrieve information on multiple events\n
:param year: Restrict by specific year, integer\n
:param country: Restrict based on country (select countries included)\n
:param state: US States and Canada provinces only. Can infer country.\n
:param district: Use 2 or 3-letter key (ex: FIM, NE, etc)\n
:param type: 0=regional, 1=district, 2=district champ, 3=champs, 4=einstein\n
:param week: Week of play, generally between 0 and 8\n
:param metric: Order output. Default descending, add '-' for ascending. (Ex: "elo_pre_playoffs", "-opr_end", etc)\n
:param limit: Limits the output length to speed up queries. Max 10,000\n
:param offset: Skips the first (offset) items when returning\n
:param fields: List of fields to return. Default is ["all"]\n
:return: A list of dictionaries, each dictionary including the team, event and Elo/OPR statistics\n
"""
url = "/v1/_events"
validate.check_type(year, "int", "year")
validate.check_type(metric, "str", "metric")
type = validate.get_type(type)
validate.check_type(week, "int", "week")
validate.check_type(limit, "int", "limit")
validate.check_type(offset, "int", "offset")
validate.check_type(fields, "list", "fields")
if limit > 10000:
raise ValueError(
"Please reduce 'limit', consider breaking into multiple smaller queries"
)
url += "?limit=" + str(limit) + "&offset=" + str(offset)
if year:
url += "&year=" + str(year)
url += validate.get_locations(country, state, district)
if type is not None:
url += "&type=" + str(type)
if week is not None:
url += "&week=" + str(week)
if metric:
if metric not in validate.get_event_metrics():
raise ValueError("Invalid metric")
url += "&o=" + self._negate(metric)
return self._get(url, fields)
def get_team_event(self, team, event, fields=["all"]):
"""
Function to retrieve information for a specific (team, event) pair\n
:param team: Team number, integer\n
:param event: Event key, string (ex: "2019cur")\n
:param fields: List of fields to return. The default is ["all"]\n
:return: a dictionary with the event and Elo/OPR statistics\n
"""
validate.check_type(team, "int", "team")
validate.check_type(event, "str", "event")
validate.check_type(fields, "list", "fields")
url = "/v1/_team_events?team=" + str(team) + "&event=" + event
return self._get(url, fields)[0]
def get_team_events(
self,
team=None,
year=None,
event=None,
country=None,
state=None,
district=None,
type=None,
week=None,
metric=None,
limit=1000,
offset=0,
fields=["all"],
):
"""
Function to retrieve information on multiple (team, event) pairs\n
:param team: Restrict by team number, integer\n
:param year: Restrict by specific year, integer\n
:param country: Restrict based on country (select countries included)\n
:param state: US States and Canada provinces only. Can infer country.\n
:param district: Use 2 or 3-letter key (ex: FIM, NE, etc)\n
:param type: 0=regional, 1=district, 2=district champ, 3=champs, 4=einstein\n
:param week: Week of play, generally between 0 and 8\n
:param metric: Order output. Default descending, add '-' for ascending. (Ex: "elo_pre_playoffs", "-opr_end", etc)\n
:param limit: Limits the output length to speed up queries. Max 10,000\n
:param offset: Skips the first (offset) items when returning\n
:param fields: List of fields to return. Default is ["all"]\n
:return: A list of dictionaries, each dictionary including the team, event and Elo/OPR statistics\n
"""
url = "/v1/_team_events"
validate.check_type(team, "int", "team")
validate.check_type(event, "str", "event")
type = validate.get_type(type)
validate.check_type(week, "int", "week")
validate.check_type(metric, "str", "metric")
validate.check_type(limit, "int", "limit")
validate.check_type(offset, "int", "offset")
validate.check_type(fields, "list", "fields")
if limit > 10000:
raise ValueError(
"Please reduce 'limit', consider breaking into multiple smaller queries"
)
url += "?limit=" + str(limit) + "&offset=" + str(offset)
if team and event:
raise UserWarning("Use get_team_event() instead")
if event and (year or type or week):
raise UserWarning("Overconstrained query")
if (team or event) and (country or state or district):
raise UserWarning("Conflicting location input")
if team:
url += "&team=" + str(team)
if year:
url += "&year=" + str(year)
if event:
url += "&event=" + event
url += validate.get_locations(country, state, district)
if type is not None:
url += "&type=" + str(type)
if week is not None:
url += "&week=" + str(week)
if metric:
if metric not in validate.get_team_event_metrics():
raise ValueError("Invalid metric")
url += "&o=" + self._negate(metric)
return self._get(url, fields)
def get_match(self, match, fields=["all"]):
"""
Function to retrieve information for a specific match\n
:param match: Match key, string (ex: "2019cur_qm1", "2019cmptx_f1m3")\n
:param fields: List of fields to return. The default is ["all"]\n
:return: a dictionary with the match, score breakdowns, and predictions\n
"""
validate.check_type(match, "str", "match")
validate.check_type(fields, "list", "fields")
return self._get("/v1/_matches?key=" + match, fields)[0]
def get_matches(
self, year=None, event=None, elims=None, limit=1000, offset=0, fields=["all"]
):
"""
Function to retrieve information on multiple matches\n
:param year: Restrict by specific year, integer\n
:param event: Restrict by specific event key, string\n
:param elims: Restrict to only elimination matches, default False\n
:param limit: Limits the output length to speed up queries. Max 10,000\n
:param offset: Skips the first (offset) items when returning\n
:param fields: List of fields to return. Default is ["all"]\n
:return: A list of dictionaries, each dictionary including the match, score breakdowns, and predictions\n
"""
url = "/v1/_matches"
validate.check_type(year, "int", "year")
validate.check_type(event, "str", "event")
validate.check_type(elims, "bool", "elims")
validate.check_type(limit, "int", "limit")
validate.check_type(offset, "int", "offset")
validate.check_type(fields, "list", "fields")
if limit > 10000:
raise ValueError(
"Please reduce 'limit', consider breaking into multiple smaller queries"
)
url += "?limit=" + str(limit) + "&offset=" + str(offset)
if not event:
raise UserWarning("Query too large, be more specific (event)")
if year and event:
raise UserWarning("Year input will be ignored")
if year:
url += "&year=" + str(year)
if event:
url += "&event=" + event
if elims:
url += "&playoff=1"
url += "&o=time"
return self._get(url, fields)
def get_team_match(self, team, match, fields=["all"]):
"""
Function to retrieve information for a specific (team, match) pair\n
:param team: Team number, integer\n
:param match: Match key, string (ex: "2019cur_qm1", "2019cmptx_f1m3")\n
:param fields: List of fields to return. The default is ["all"]\n
:return: a dictionary with the team, match, alliance, and then elo\n
"""
validate.check_type(team, "int", "team")
validate.check_type(match, "str", "match")
validate.check_type(fields, "list", "fields")
url = "/v1/_team_matches?team=" + str(team) + "&match=" + str(match)
return self._get(url, fields)[0]
def get_team_matches(
self,
team=None,
year=None,
event=None,
match=None,
elims=None,
limit=1000,
offset=0,
fields=["all"],
):
"""
Function to retrieve information on multiple (team, match) pairs\n
:param team: Restrict by team number, integer\n
:param year: Restrict by specific year, integer\n
:param event: Restrict by specific event key, string\n
:param elims: Restrict to only elimination matches, default False\n
:param limit: Limits the output length to speed up queries. Max 10,000\n
:param offset: Skips the first (offset) items when returning\n
:param fields: List of fields to return. Default is ["all"]\n
:return: A list of dictionaries, each dictionary including the team, match, alliance, and then elo\n
"""
url = "/v1/_team_matches"
validate.check_type(team, "int", "team")
validate.check_type(year, "int", "year")
validate.check_type(event, "str", "event")
validate.check_type(match, "str", "match")
validate.check_type(elims, "bool", "elims")
validate.check_type(limit, "int", "limit")
validate.check_type(offset, "int", "offset")
validate.check_type(fields, "list", "fields")
if limit > 10000:
raise ValueError(
"Please reduce 'limit', consider breaking into multiple smaller queries"
)
url += "?limit=" + str(limit) + "&offset=" + str(offset)
if not team and not event and not match:
raise UserWarning(
"Query too large, be more specific (team, event, or match)"
)
if (year and event) or (year and match) or (event and match):
raise UserWarning("Only specify one of (year, event, match)")
if team:
url += "&team=" + str(team)
if year:
url += "&year=" + str(year)
if event:
url += "&event=" + event
if match:
url += "&match=" + match
if elims:
url += "&playoff=1"
url += "&o=time"
return self._get(url, fields)
def get_event_sim(self, event, index=None, full=False, iterations=None):
validate.check_type(event, "str", "event")
validate.check_type(index, "int", "index")
validate.check_type(full, "bool", "full")
validate.check_type(iterations, "int", "iterations")
url = "/v1/event_sim/event/" + event
if index:
url += "/index/" + str(index)
if full:
url += "/full"
if iterations:
if iterations > 100:
raise ValueError("Iterations must be <= 100")
url += "/iterations/" + str(iterations)
else:
url += "/simple"
return self._get(url, fields=["all"])
| 36.934142
| 156
| 0.58444
| 2,643
| 21,311
| 4.64283
| 0.089292
| 0.034716
| 0.085894
| 0.021677
| 0.800098
| 0.784288
| 0.777606
| 0.758047
| 0.735148
| 0.706136
| 0
| 0.013131
| 0.295997
| 21,311
| 576
| 157
| 36.998264
| 0.804772
| 0.31411
| 0
| 0.608187
| 0
| 0
| 0.158312
| 0.004894
| 0
| 0
| 0
| 0
| 0
| 1
| 0.061404
| false
| 0
| 0.011696
| 0
| 0.146199
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
978789bc1d6bed55a57cc09abbb28c1c2dad01ec
| 21
|
py
|
Python
|
src/__init__.py
|
reticenceji/samplemod
|
77c90a9a72202687bbdf0ed3184e8f95f5557d7f
|
[
"BSD-2-Clause"
] | null | null | null |
src/__init__.py
|
reticenceji/samplemod
|
77c90a9a72202687bbdf0ed3184e8f95f5557d7f
|
[
"BSD-2-Clause"
] | null | null | null |
src/__init__.py
|
reticenceji/samplemod
|
77c90a9a72202687bbdf0ed3184e8f95f5557d7f
|
[
"BSD-2-Clause"
] | null | null | null |
from .core import add
| 21
| 21
| 0.809524
| 4
| 21
| 4.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 21
| 1
| 21
| 21
| 0.944444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
979fae5d7e238b3a6dca17482d2db31170e6e3d9
| 1,803
|
py
|
Python
|
pysixdesk/lib/dbtypedict.py
|
michuschenk/pysixdesk
|
151c8bcff0625cfb1dbb78228a85a1e31809df01
|
[
"MIT"
] | 3
|
2019-07-03T11:49:14.000Z
|
2020-03-03T05:49:38.000Z
|
pysixdesk/lib/dbtypedict.py
|
michuschenk/pysixdesk
|
151c8bcff0625cfb1dbb78228a85a1e31809df01
|
[
"MIT"
] | 27
|
2019-03-04T17:51:21.000Z
|
2020-08-24T15:47:34.000Z
|
pysixdesk/lib/dbtypedict.py
|
michuschenk/pysixdesk
|
151c8bcff0625cfb1dbb78228a85a1e31809df01
|
[
"MIT"
] | 4
|
2019-02-19T15:08:02.000Z
|
2019-07-02T12:20:33.000Z
|
def bigint_check(val):
"""
Checks to see if `val` is or contains BIGINTs.
Args:
val (int, float, list): value or list of values to check.
Returns:
bool: True if `val` contains one or more BIGINT, False if not.
"""
if not isinstance(val, list) or isinstance(val, str):
val = [val]
return any([(type(v) == int and v > 2147483647) for v in val])
class SQLiteDict(object):
def __init__(self):
self.db_type = {}
self.db_type['NoneType'] = 'NULL'
self.db_type['int'] = 'INT'
self.db_type['float'] = 'DOUBLE'
self.db_type['str'] = 'TEXT'
self.db_type['bytes'] = 'BLOB'
self.db_type['tuple'] = 'TEXT'
def __getitem__(self, param):
'''Get the corresponding sqlite data type'''
if isinstance(param, list):
param_0 = param[0]
else:
param_0 = param
a = type(param_0)
sql_type = self.db_type[a.__name__]
if sql_type == 'INT' and bigint_check(param):
return 'BIGINT'
else:
return sql_type
class MySQLDict(object):
def __init__(self):
self.db_type = {}
self.db_type['NoneType'] = 'NULL'
self.db_type['int'] = 'INT'
self.db_type['float'] = 'DOUBLE'
self.db_type['str'] = 'TEXT'
self.db_type['bytes'] = 'BLOB'
self.db_type['tuple'] = 'TEXT'
def __getitem__(self, param):
'''Get the corresponding mysql data type'''
if isinstance(param, list):
param_0 = param[0]
else:
param_0 = param
a = type(param_0)
sql_type = self.db_type[a.__name__]
if sql_type == 'INT' and bigint_check(param):
return 'BIGINT'
else:
return sql_type
| 28.171875
| 70
| 0.547421
| 233
| 1,803
| 4
| 0.270386
| 0.103004
| 0.171674
| 0.060086
| 0.703863
| 0.703863
| 0.703863
| 0.703863
| 0.703863
| 0.703863
| 0
| 0.014718
| 0.321686
| 1,803
| 63
| 71
| 28.619048
| 0.747343
| 0.149196
| 0
| 0.863636
| 0
| 0
| 0.08445
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.113636
| false
| 0
| 0
| 0
| 0.272727
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
97c52685e87be4d31f352727755d75a4494ea8f9
| 12,754
|
py
|
Python
|
tests/k8s/test_scanning.py
|
pshchelo/kopf
|
a94648d18b3d0dc84e8d15caf1fcab7a84f7fdb8
|
[
"MIT"
] | null | null | null |
tests/k8s/test_scanning.py
|
pshchelo/kopf
|
a94648d18b3d0dc84e8d15caf1fcab7a84f7fdb8
|
[
"MIT"
] | null | null | null |
tests/k8s/test_scanning.py
|
pshchelo/kopf
|
a94648d18b3d0dc84e8d15caf1fcab7a84f7fdb8
|
[
"MIT"
] | null | null | null |
import aiohttp.web
import pytest
from kopf._cogs.clients.errors import APIError
from kopf._cogs.clients.scanning import scan_resources
async def test_no_resources_in_empty_apis(
resp_mocker, aresponses, hostname):
core_mock = resp_mocker(return_value=aiohttp.web.json_response({'versions': []}))
apis_mock = resp_mocker(return_value=aiohttp.web.json_response({'groups': []}))
aresponses.add(hostname, '/api', 'get', core_mock)
aresponses.add(hostname, '/apis', 'get', apis_mock)
resources = await scan_resources()
assert len(resources) == 0
assert core_mock.call_count == 1
assert apis_mock.call_count == 1
@pytest.mark.parametrize('namespaced', [True, False])
async def test_resources_in_old_apis(
resp_mocker, aresponses, hostname, namespaced):
core_mock = resp_mocker(return_value=aiohttp.web.json_response({'versions': ['v1']}))
apis_mock = resp_mocker(return_value=aiohttp.web.json_response({'groups': []}))
scan_mock = resp_mocker(return_value=aiohttp.web.json_response({'resources': [
{
'kind': 'kind1',
'name': 'plural1',
'singularName': 'singular1',
'namespaced': namespaced,
'categories': ['category1', 'category2'],
'shortNames': ['shortname1', 'shortname2'],
'verbs': ['verb1', 'verb2'],
},
]}))
aresponses.add(hostname, '/api', 'get', core_mock)
aresponses.add(hostname, '/apis', 'get', apis_mock)
aresponses.add(hostname, '/api/v1', 'get', scan_mock)
resources = await scan_resources()
assert len(resources) == 1
resource1 = list(resources)[0]
assert resource1.group == ''
assert resource1.version == 'v1'
assert resource1.kind == 'kind1'
assert resource1.plural == 'plural1'
assert resource1.singular == 'singular1'
assert resource1.preferred == True
assert resource1.namespaced == namespaced
assert resource1.subresources == set()
assert resource1.categories == {'category1', 'category2'}
assert resource1.shortcuts == {'shortname1', 'shortname2'}
assert resource1.verbs == {'verb1', 'verb2'}
assert core_mock.call_count == 1
assert apis_mock.call_count == 1
assert scan_mock.call_count == 1
@pytest.mark.parametrize('namespaced', [True, False])
@pytest.mark.parametrize('preferred_version, expected_preferred', [
('version1', True),
('versionX', False),
])
async def test_resources_in_new_apis(
resp_mocker, aresponses, hostname, namespaced,
preferred_version, expected_preferred):
core_mock = resp_mocker(return_value=aiohttp.web.json_response({'versions': []}))
apis_mock = resp_mocker(return_value=aiohttp.web.json_response({'groups': [
{
'name': 'group1',
'preferredVersion': {'version': preferred_version},
'versions': [{'version': 'version1'}],
},
]}))
g1v1_mock = resp_mocker(return_value=aiohttp.web.json_response({'resources': [
{
'kind': 'kind1',
'name': 'plural1',
'singularName': 'singular1',
'namespaced': namespaced,
'categories': ['category1', 'category2'],
'shortNames': ['shortname1', 'shortname2'],
'verbs': ['verb1', 'verb2'],
},
]}))
aresponses.add(hostname, '/api', 'get', core_mock)
aresponses.add(hostname, '/apis', 'get', apis_mock)
aresponses.add(hostname, '/apis/group1/version1', 'get', g1v1_mock)
resources = await scan_resources()
assert len(resources) == 1
resource1 = list(resources)[0]
assert resource1.group == 'group1'
assert resource1.version == 'version1'
assert resource1.kind == 'kind1'
assert resource1.plural == 'plural1'
assert resource1.singular == 'singular1'
assert resource1.preferred == expected_preferred
assert resource1.namespaced == namespaced
assert resource1.subresources == set()
assert resource1.categories == {'category1', 'category2'}
assert resource1.shortcuts == {'shortname1', 'shortname2'}
assert resource1.verbs == {'verb1', 'verb2'}
assert core_mock.call_count == 1
assert apis_mock.call_count == 1
assert g1v1_mock.call_count == 1
async def test_subresources_in_old_apis(
resp_mocker, aresponses, hostname):
core_mock = resp_mocker(return_value=aiohttp.web.json_response({'versions': ['v1']}))
apis_mock = resp_mocker(return_value=aiohttp.web.json_response({'groups': []}))
v1v1_mock = resp_mocker(return_value=aiohttp.web.json_response({'resources': [
{
'kind': 'kind1',
'name': 'plural1',
'singularName': 'singular1',
'namespaced': True,
'categories': [],
'shortNames': [],
'verbs': [],
},
{
'name': 'plural1/sub1',
},
{
'name': 'plural1/sub2',
},
{
'name': 'pluralX/sub3',
},
]}))
aresponses.add(hostname, '/api', 'get', core_mock)
aresponses.add(hostname, '/apis', 'get', apis_mock)
aresponses.add(hostname, '/api/v1', 'get', v1v1_mock)
resources = await scan_resources()
assert len(resources) == 1
resource1 = list(resources)[0]
assert resource1.subresources == {'sub1', 'sub2'}
async def test_subresources_in_new_apis(
resp_mocker, aresponses, hostname):
core_mock = resp_mocker(return_value=aiohttp.web.json_response({'versions': []}))
apis_mock = resp_mocker(return_value=aiohttp.web.json_response({'groups': [
{
'name': 'group1',
'preferredVersion': {'version': 'version1'},
'versions': [{'version': 'version1'}],
},
]}))
g1v1_mock = resp_mocker(return_value=aiohttp.web.json_response({'resources': [
{
'kind': 'kind1',
'name': 'plural1',
'singularName': 'singular1',
'namespaced': True,
'categories': [],
'shortNames': [],
'verbs': [],
},
{
'name': 'plural1/sub1',
},
{
'name': 'plural1/sub2',
},
{
'name': 'pluralX/sub3',
},
]}))
aresponses.add(hostname, '/api', 'get', core_mock)
aresponses.add(hostname, '/apis', 'get', apis_mock)
aresponses.add(hostname, '/apis/group1/version1', 'get', g1v1_mock)
resources = await scan_resources()
assert len(resources) == 1
resource1 = list(resources)[0]
assert resource1.subresources == {'sub1', 'sub2'}
@pytest.mark.parametrize('group_filter, exp_core, exp_apis, exp_crv1, exp_g1v1, exp_g2v1', [
pytest.param([''], 1, 0, 1, 0, 0, id='only-core'),
pytest.param(['g1'], 0, 1, 0, 1, 0, id='only-g1'),
pytest.param(['g2'], 0, 1, 0, 0, 1, id='only-g2'),
pytest.param(['', 'g1'], 1, 1, 1, 1, 0, id='core-and-g1'),
pytest.param(['', 'g2'], 1, 1, 1, 0, 1, id='core-and-g2'),
pytest.param(['g1', 'g2'], 0, 1, 0, 1, 1, id='g1-and-g2'),
pytest.param(['X'], 0, 1, 0, 0, 0, id='unexistent'),
pytest.param([], 0, 0, 0, 0, 0, id='restrictive'),
pytest.param(None, 1, 1, 1, 1, 1, id='unfiltered'),
])
async def test_group_filtering(
resp_mocker, aresponses, hostname,
group_filter, exp_core, exp_apis, exp_crv1, exp_g1v1, exp_g2v1):
core_mock = resp_mocker(return_value=aiohttp.web.json_response({'versions': ['v1']}))
apis_mock = resp_mocker(return_value=aiohttp.web.json_response({'groups': [
{'name': 'g1', 'preferredVersion': {'version': ''}, 'versions': [{'version': 'g1v1'}]},
{'name': 'g2', 'preferredVersion': {'version': ''}, 'versions': [{'version': 'g2v1'}]},
]}))
crv1_mock = resp_mocker(return_value=aiohttp.web.json_response({'resources': []}))
g1v1_mock = resp_mocker(return_value=aiohttp.web.json_response({'resources': []}))
g2v1_mock = resp_mocker(return_value=aiohttp.web.json_response({'resources': []}))
aresponses.add(hostname, '/api', 'get', core_mock)
aresponses.add(hostname, '/api/v1', 'get', crv1_mock)
aresponses.add(hostname, '/apis', 'get', apis_mock)
aresponses.add(hostname, '/apis/g1/g1v1', 'get', g1v1_mock)
aresponses.add(hostname, '/apis/g2/g2v1', 'get', g2v1_mock)
await scan_resources(groups=group_filter)
assert core_mock.call_count == exp_core
assert apis_mock.call_count == exp_apis
assert crv1_mock.call_count == exp_crv1
assert g1v1_mock.call_count == exp_g1v1
assert g2v1_mock.call_count == exp_g2v1
@pytest.mark.parametrize('status', [404])
async def test_http404_returns_no_resources_from_old_apis(
resp_mocker, aresponses, hostname, status):
core_mock = resp_mocker(return_value=aiohttp.web.json_response({'versions': ['v1']}))
apis_mock = resp_mocker(return_value=aiohttp.web.json_response({'groups': []}))
status_mock = resp_mocker(return_value=aresponses.Response(status=status))
aresponses.add(hostname, '/api', 'get', core_mock)
aresponses.add(hostname, '/apis', 'get', apis_mock)
aresponses.add(hostname, '/api/v1', 'get', status_mock)
resources = await scan_resources()
assert not resources
assert status_mock.call_count == 1
@pytest.mark.parametrize('status', [404])
async def test_http404_returns_no_resources_from_new_apis(
resp_mocker, aresponses, hostname, status):
core_mock = resp_mocker(return_value=aiohttp.web.json_response({'versions': []}))
apis_mock = resp_mocker(return_value=aiohttp.web.json_response({'groups': [
{'name': 'g1', 'preferredVersion': {'version': ''}, 'versions': [{'version': 'g1v1'}]},
]}))
status_mock = resp_mocker(return_value=aresponses.Response(status=status))
aresponses.add(hostname, '/api', 'get', core_mock)
aresponses.add(hostname, '/apis', 'get', apis_mock)
aresponses.add(hostname, '/apis/g1/g1v1', 'get', status_mock)
resources = await scan_resources()
assert not resources
assert status_mock.call_count == 1
@pytest.mark.parametrize('status', [403, 500, 666])
async def test_unknown_api_statuses_escalate_from_old_apis(
resp_mocker, aresponses, hostname, status):
core_mock = resp_mocker(return_value=aiohttp.web.json_response({'versions': ['v1']}))
apis_mock = resp_mocker(return_value=aiohttp.web.json_response({'groups': []}))
status_mock = resp_mocker(return_value=aresponses.Response(status=status))
aresponses.add(hostname, '/api', 'get', core_mock)
aresponses.add(hostname, '/apis', 'get', apis_mock)
aresponses.add(hostname, '/api/v1', 'get', status_mock)
with pytest.raises(APIError) as err:
await scan_resources()
assert err.value.status == status
assert status_mock.call_count == 1
@pytest.mark.parametrize('status', [403, 500, 666])
async def test_unknown_api_statuses_escalate_from_new_apis(
resp_mocker, aresponses, hostname, status):
core_mock = resp_mocker(return_value=aiohttp.web.json_response({'versions': []}))
apis_mock = resp_mocker(return_value=aiohttp.web.json_response({'groups': [
{'name': 'g1', 'preferredVersion': {'version': ''}, 'versions': [{'version': 'g1v1'}]},
]}))
status_mock = resp_mocker(return_value=aresponses.Response(status=status))
aresponses.add(hostname, '/api', 'get', core_mock)
aresponses.add(hostname, '/apis', 'get', apis_mock)
aresponses.add(hostname, '/apis/g1/g1v1', 'get', status_mock)
with pytest.raises(APIError) as err:
await scan_resources()
assert err.value.status == status
assert status_mock.call_count == 1
async def test_empty_singulars_fall_back_to_kinds(
resp_mocker, aresponses, hostname):
# Only one endpoint is enough, core v1 is easier to mock:
core_mock = resp_mocker(return_value=aiohttp.web.json_response({'versions': ['v1']}))
apis_mock = resp_mocker(return_value=aiohttp.web.json_response({'groups': []}))
scan_mock = resp_mocker(return_value=aiohttp.web.json_response({'resources': [
{
'kind': 'MultiWordKind',
'name': '...',
'singularName': '', # as in K3s
'namespaced': True,
'categories': [],
'shortNames': [],
'verbs': [],
},
]}))
aresponses.add(hostname, '/api', 'get', core_mock)
aresponses.add(hostname, '/apis', 'get', apis_mock)
aresponses.add(hostname, '/api/v1', 'get', scan_mock)
resources = await scan_resources(groups=[''])
assert len(resources) == 1
resource1 = list(resources)[0]
assert resource1.singular == 'multiwordkind'
# TODO: LATER: test that the requests are done in parallel, and the total timing is the best possible.
| 37.511765
| 102
| 0.638466
| 1,460
| 12,754
| 5.367808
| 0.096575
| 0.05742
| 0.060738
| 0.086768
| 0.854919
| 0.832334
| 0.823785
| 0.807197
| 0.801455
| 0.801455
| 0
| 0.028552
| 0.200878
| 12,754
| 339
| 103
| 37.622419
| 0.740385
| 0.013016
| 0
| 0.671533
| 0
| 0
| 0.150509
| 0.003338
| 0
| 0
| 0
| 0.00295
| 0.189781
| 1
| 0
| false
| 0
| 0.014599
| 0
| 0.014599
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
c1032838bbeb109ad9a4215c10b83d42db4b6591
| 48
|
py
|
Python
|
kaori/support/files/__init__.py
|
austinpray/kaori
|
b21c4146b9d0d27b87015cff0768138568a12e9c
|
[
"MIT"
] | 3
|
2020-05-04T03:43:20.000Z
|
2020-12-03T22:34:47.000Z
|
kaori/support/files/__init__.py
|
austinpray/kaori
|
b21c4146b9d0d27b87015cff0768138568a12e9c
|
[
"MIT"
] | 287
|
2020-04-21T02:39:47.000Z
|
2022-03-28T13:11:59.000Z
|
kaori/support/files/__init__.py
|
austinpray/kaori
|
b21c4146b9d0d27b87015cff0768138568a12e9c
|
[
"MIT"
] | 1
|
2020-10-22T00:20:43.000Z
|
2020-10-22T00:20:43.000Z
|
from .hash import file_digest, hashed_file_name
| 24
| 47
| 0.854167
| 8
| 48
| 4.75
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.104167
| 48
| 1
| 48
| 48
| 0.883721
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
c159f1783f883d83bd7616672805dadd74c8ae35
| 828
|
py
|
Python
|
notebook/numpy_select_fancy_index.py
|
vhn0912/python-snippets
|
80b2e1d6b2b8f12ae30d6dbe86d25bb2b3a02038
|
[
"MIT"
] | 174
|
2018-05-30T21:14:50.000Z
|
2022-03-25T07:59:37.000Z
|
notebook/numpy_select_fancy_index.py
|
vhn0912/python-snippets
|
80b2e1d6b2b8f12ae30d6dbe86d25bb2b3a02038
|
[
"MIT"
] | 5
|
2019-08-10T03:22:02.000Z
|
2021-07-12T20:31:17.000Z
|
notebook/numpy_select_fancy_index.py
|
vhn0912/python-snippets
|
80b2e1d6b2b8f12ae30d6dbe86d25bb2b3a02038
|
[
"MIT"
] | 53
|
2018-04-27T05:26:35.000Z
|
2022-03-25T07:59:37.000Z
|
import numpy as np
a_1d = np.arange(4)
print(a_1d)
# [0 1 2 3]
print(a_1d[[0, 2]])
# [0 2]
print(a_1d[[0, 3, 2, 1, 2, -1, -2]])
# [0 3 2 1 2 3 2]
print(a_1d[np.array([0, 3, 2, 1, 2, -1, -2])])
# [0 3 2 1 2 3 2]
a_2d = np.arange(12).reshape(3, 4)
print(a_2d)
# [[ 0 1 2 3]
# [ 4 5 6 7]
# [ 8 9 10 11]]
print(a_2d[[0, 2]])
# [[ 0 1 2 3]
# [ 8 9 10 11]]
print(a_2d[:, [0, 2]])
# [[ 0 2]
# [ 4 6]
# [ 8 10]]
print(a_2d[[0, 2], [0, 2]])
# [ 0 10]
print(a_2d[np.ix_([0, 2], [0, 2])])
# [[ 0 2]
# [ 8 10]]
print(a_2d[np.ix_([0, 2, 1, 1, -1, -1], [0, 2, 1, 3])])
# [[ 0 2 1 3]
# [ 8 10 9 11]
# [ 4 6 5 7]
# [ 4 6 5 7]
# [ 8 10 9 11]
# [ 8 10 9 11]]
print(a_2d[:, [1]])
# [[1]
# [5]
# [9]]
print(a_2d[:, [1]].shape)
# (3, 1)
print(a_2d[:, 1])
# [1 5 9]
print(a_2d[:, 1].shape)
# (3,)
| 14.033898
| 55
| 0.415459
| 194
| 828
| 1.680412
| 0.134021
| 0.257669
| 0.245399
| 0.06135
| 0.518405
| 0.493865
| 0.493865
| 0.453988
| 0.355828
| 0.355828
| 0
| 0.274744
| 0.292271
| 828
| 58
| 56
| 14.275862
| 0.28157
| 0.369565
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.058824
| 0
| 0.058824
| 0.823529
| 0
| 0
| 1
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
c165b0e06ec07bef7ad3b094ec7223baafa8a02f
| 497
|
py
|
Python
|
utilities/exceptions.py
|
Hecate946/Candybot
|
e98a7acb73acb806aa5118fccb10fef428929bd4
|
[
"MIT"
] | 2
|
2022-01-28T12:56:18.000Z
|
2022-02-23T20:11:28.000Z
|
utilities/exceptions.py
|
Hecate946/Candybot
|
e98a7acb73acb806aa5118fccb10fef428929bd4
|
[
"MIT"
] | null | null | null |
utilities/exceptions.py
|
Hecate946/Candybot
|
e98a7acb73acb806aa5118fccb10fef428929bd4
|
[
"MIT"
] | null | null | null |
from discord.ext import commands
class AlreadyConnectedToChannel(commands.CommandError):
pass
class NoVoiceChannel(commands.CommandError):
pass
class QueueIsEmpty(commands.CommandError):
pass
class NoTracksFound(commands.CommandError):
pass
class PlayerIsAlreadyPaused(commands.CommandError):
pass
class NoMoreTracks(commands.CommandError):
pass
class NoPreviousTracks(commands.CommandError):
pass
class InvalidRepeatMode(commands.CommandError):
pass
| 15.53125
| 55
| 0.784708
| 45
| 497
| 8.666667
| 0.355556
| 0.410256
| 0.492308
| 0.520513
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.148893
| 497
| 32
| 56
| 15.53125
| 0.921986
| 0
| 0
| 0.470588
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.470588
| 0.058824
| 0
| 0.529412
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
|
0
| 6
|
c1f2d0a797b5e28daf184fe10d6586db4c86ef2b
| 165
|
py
|
Python
|
module/__init__.py
|
zxlzr/LegalPP
|
5039b3848adfd9a9962eac5ff3e86084a3fe8596
|
[
"MIT"
] | 4
|
2021-08-22T16:25:30.000Z
|
2022-01-05T13:34:53.000Z
|
module/__init__.py
|
Riroaki/Text-Graph-Reasoning
|
a87737f6bbc47dbb7a106756c6472453d5cb026d
|
[
"MIT"
] | null | null | null |
module/__init__.py
|
Riroaki/Text-Graph-Reasoning
|
a87737f6bbc47dbb7a106756c6472453d5cb026d
|
[
"MIT"
] | 1
|
2022-03-14T08:57:51.000Z
|
2022-03-14T08:57:51.000Z
|
from module.module_with_device import ModuleWithDevice
from module.graph_encoder import load_graph_encoder
from module.relation_decoder import load_relation_decoder
| 41.25
| 57
| 0.909091
| 23
| 165
| 6.173913
| 0.478261
| 0.211268
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.072727
| 165
| 3
| 58
| 55
| 0.928105
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a9ffdf393208ecc2a8dc6469a58e4c5a588b047d
| 1,522
|
py
|
Python
|
problem_008.py
|
arboreus/project_euler
|
796173e8e72fcbfc15bbb3def7a36349639fafcf
|
[
"MIT"
] | null | null | null |
problem_008.py
|
arboreus/project_euler
|
796173e8e72fcbfc15bbb3def7a36349639fafcf
|
[
"MIT"
] | null | null | null |
problem_008.py
|
arboreus/project_euler
|
796173e8e72fcbfc15bbb3def7a36349639fafcf
|
[
"MIT"
] | null | null | null |
#8) Largest product in a series
#The four adjacent digits in the 1000-digit number that have the greatest product are 9 × 9 × 8 × 9 = 5832.
#Find the thirteen adjacent digits in the 1000-digit number that have the greatest product. What is the value of this product?
num = """
73167176531330624919225119674426574742355349194934
96983520312774506326239578318016984801869478851843
85861560789112949495459501737958331952853208805511
12540698747158523863050715693290963295227443043557
66896648950445244523161731856403098711121722383113
62229893423380308135336276614282806444486645238749
30358907296290491560440772390713810515859307960866
70172427121883998797908792274921901699720888093776
65727333001053367881220235421809751254540594752243
52584907711670556013604839586446706324415722155397
53697817977846174064955149290862569321978468622482
83972241375657056057490261407972968652414535100474
82166370484403199890008895243450658541227588666881
16427171479924442928230863465674813919123162824586
17866458359124566529476545682848912883142607690042
24219022671055626321111109370544217506941658960408
07198403850962455444362981230987879927244284909188
84580156166097919133875499200524063689912560717606
05886116467109405077541002256983155200055935729725
71636269561882670428252483600823257530420752963450
""".replace('\n', '')
# Solution
def prod(x):
product = 1
for i in x:
product *= int(i)
return product
l = 13
nums = [num[x:x+l] for x in range(len(num)-l+1)]
prods = [prod(x) for x in nums]
max(prods)
| 39.025641
| 126
| 0.87385
| 113
| 1,522
| 11.79646
| 0.584071
| 0.021005
| 0.024006
| 0.028507
| 0.090023
| 0.090023
| 0.090023
| 0.090023
| 0.090023
| 0.090023
| 0
| 0.734532
| 0.086728
| 1,522
| 39
| 127
| 39.025641
| 0.222302
| 0.177398
| 0
| 0
| 0
| 0
| 0.819055
| 0.800641
| 0
| 1
| 0
| 0
| 0
| 1
| 0.032258
| false
| 0
| 0
| 0
| 0.064516
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e777d21769c1cd0304143ee438b6c6104208ba75
| 95
|
py
|
Python
|
sarepy/__init__.py
|
dtasev/sarepy
|
3179a63ac09aca791f3bccf13d61e2d47bee44c3
|
[
"Apache-2.0"
] | null | null | null |
sarepy/__init__.py
|
dtasev/sarepy
|
3179a63ac09aca791f3bccf13d61e2d47bee44c3
|
[
"Apache-2.0"
] | null | null | null |
sarepy/__init__.py
|
dtasev/sarepy
|
3179a63ac09aca791f3bccf13d61e2d47bee44c3
|
[
"Apache-2.0"
] | null | null | null |
from sarepy import prep
from sarepy import losa
from sarepy import post
from sarepy import reco
| 23.75
| 23
| 0.842105
| 16
| 95
| 5
| 0.4375
| 0.5
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.157895
| 95
| 4
| 24
| 23.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
99af15ae175663d346120648e835c4d382b3d757
| 188
|
py
|
Python
|
tests/test_dummy.py
|
PedrosWits/pydummy
|
154f7f6e638ebe4effc335c92f650cf533c2a547
|
[
"MIT"
] | 1
|
2022-01-30T13:42:30.000Z
|
2022-01-30T13:42:30.000Z
|
tests/test_dummy.py
|
PedrosWits/pydummy
|
154f7f6e638ebe4effc335c92f650cf533c2a547
|
[
"MIT"
] | null | null | null |
tests/test_dummy.py
|
PedrosWits/pydummy
|
154f7f6e638ebe4effc335c92f650cf533c2a547
|
[
"MIT"
] | null | null | null |
import pytest
import dummy
def test_imports():
import numpy
def test_dummy_True():
assert dummy.dummy_equals(1,1)
def test_dummy_False():
assert not dummy.dummy_equals(1,2)
| 15.666667
| 38
| 0.739362
| 30
| 188
| 4.4
| 0.466667
| 0.159091
| 0.181818
| 0.257576
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.025641
| 0.170213
| 188
| 11
| 39
| 17.090909
| 0.820513
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.25
| 1
| 0.375
| true
| 0
| 0.5
| 0
| 0.875
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
99cea7075d8d9d0714b550e3a13dab3b9873f5c1
| 43
|
py
|
Python
|
f2py_skel/frontend/__init__.py
|
HaoZeke/f2py_skel
|
240dd65456f1cb5dbcf4e312cec552fb35f72922
|
[
"MIT"
] | null | null | null |
f2py_skel/frontend/__init__.py
|
HaoZeke/f2py_skel
|
240dd65456f1cb5dbcf4e312cec552fb35f72922
|
[
"MIT"
] | 18
|
2021-11-24T19:01:13.000Z
|
2022-01-09T12:24:48.000Z
|
f2py_skel/frontend/__init__.py
|
HaoZeke/f2py_skel
|
240dd65456f1cb5dbcf4e312cec552fb35f72922
|
[
"MIT"
] | 1
|
2021-12-26T17:40:52.000Z
|
2021-12-26T17:40:52.000Z
|
from f2py_skel.frontend.f2py2e import main
| 21.5
| 42
| 0.860465
| 7
| 43
| 5.142857
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.076923
| 0.093023
| 43
| 1
| 43
| 43
| 0.846154
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
8209a8e8f549b8aaa726ef2fb3bb0783ebd606a2
| 44
|
py
|
Python
|
__init__.py
|
sanmik/brain-network-viz
|
9c881e49c14c94e3f7ef4b7776d98c930716ee91
|
[
"MIT"
] | 5
|
2017-09-01T14:05:03.000Z
|
2019-07-13T07:52:49.000Z
|
__init__.py
|
sanmik/brain-network-viz
|
9c881e49c14c94e3f7ef4b7776d98c930716ee91
|
[
"MIT"
] | null | null | null |
__init__.py
|
sanmik/brain-network-viz
|
9c881e49c14c94e3f7ef4b7776d98c930716ee91
|
[
"MIT"
] | 1
|
2017-09-01T14:05:03.000Z
|
2017-09-01T14:05:03.000Z
|
from source import networkviz as networkviz
| 22
| 43
| 0.863636
| 6
| 44
| 6.333333
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.136364
| 44
| 1
| 44
| 44
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
416f90dbc695f4173c084d7c4cb793e3eca82e64
| 1,786
|
py
|
Python
|
ftc/migrations/0013_auto_20210212_2215.py
|
drkane/find-that-charity
|
25f778cfa1429e465bc19a6465b09f0473cfe113
|
[
"MIT"
] | 14
|
2018-09-14T11:51:26.000Z
|
2021-02-28T22:00:29.000Z
|
ftc/migrations/0013_auto_20210212_2215.py
|
drkane/find-that-charity
|
25f778cfa1429e465bc19a6465b09f0473cfe113
|
[
"MIT"
] | 89
|
2018-01-26T22:20:43.000Z
|
2022-01-20T14:16:25.000Z
|
ftc/migrations/0013_auto_20210212_2215.py
|
drkane/find-that-charity
|
25f778cfa1429e465bc19a6465b09f0473cfe113
|
[
"MIT"
] | 7
|
2019-01-31T11:23:17.000Z
|
2022-03-09T07:42:08.000Z
|
# Generated by Django 3.1.1 on 2021-02-12 22:15
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("ftc", "0012_remove_organisation_location"),
]
operations = [
migrations.RemoveField(
model_name="organisation",
name="geo_ctry",
),
migrations.RemoveField(
model_name="organisation",
name="geo_cty",
),
migrations.RemoveField(
model_name="organisation",
name="geo_lat",
),
migrations.RemoveField(
model_name="organisation",
name="geo_laua",
),
migrations.RemoveField(
model_name="organisation",
name="geo_lep1",
),
migrations.RemoveField(
model_name="organisation",
name="geo_lep2",
),
migrations.RemoveField(
model_name="organisation",
name="geo_long",
),
migrations.RemoveField(
model_name="organisation",
name="geo_lsoa11",
),
migrations.RemoveField(
model_name="organisation",
name="geo_msoa11",
),
migrations.RemoveField(
model_name="organisation",
name="geo_oa11",
),
migrations.RemoveField(
model_name="organisation",
name="geo_pcon",
),
migrations.RemoveField(
model_name="organisation",
name="geo_rgn",
),
migrations.RemoveField(
model_name="organisation",
name="geo_ttwa",
),
migrations.RemoveField(
model_name="organisation",
name="geo_ward",
),
]
| 25.514286
| 53
| 0.517357
| 140
| 1,786
| 6.378571
| 0.3
| 0.329227
| 0.407615
| 0.470325
| 0.768197
| 0.768197
| 0.768197
| 0
| 0
| 0
| 0
| 0.024172
| 0.37458
| 1,786
| 69
| 54
| 25.884058
| 0.775291
| 0.025196
| 0
| 0.666667
| 1
| 0
| 0.182289
| 0.018976
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.015873
| 0
| 0.063492
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
418ca7469c2babd664df7570056a6315b995c662
| 90
|
py
|
Python
|
vesper/django/app/refresh_recording_audio_file_paths_form.py
|
RichardLitt/Vesper
|
5360844f42a06942e7684121c650b08cf8616285
|
[
"MIT"
] | 29
|
2017-07-10T14:49:15.000Z
|
2022-02-02T23:14:38.000Z
|
vesper/django/app/refresh_recording_audio_file_paths_form.py
|
Tubbz-alt/Vesper
|
76e5931ca0c7fbe070c53b1362ec246ec9007beb
|
[
"MIT"
] | 167
|
2015-03-17T14:45:22.000Z
|
2022-03-30T21:00:05.000Z
|
vesper/django/app/refresh_recording_audio_file_paths_form.py
|
Tubbz-alt/Vesper
|
76e5931ca0c7fbe070c53b1362ec246ec9007beb
|
[
"MIT"
] | 4
|
2015-02-06T03:30:27.000Z
|
2020-12-27T08:38:52.000Z
|
from django import forms
class RefreshRecordingAudioFilePathsForm(forms.Form):
pass
| 15
| 53
| 0.811111
| 9
| 90
| 8.111111
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.144444
| 90
| 5
| 54
| 18
| 0.948052
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
418cef3b14d5a6c6f85f68c9486aa33bfb49651f
| 41
|
py
|
Python
|
src/apps/startposes/serializers/__init__.py
|
sanderland/katago-server
|
6414fab080d007c05068a06ff4f25907b92848bd
|
[
"MIT"
] | 27
|
2020-05-03T11:01:27.000Z
|
2022-03-17T05:33:10.000Z
|
src/apps/startposes/serializers/__init__.py
|
sanderland/katago-server
|
6414fab080d007c05068a06ff4f25907b92848bd
|
[
"MIT"
] | 54
|
2020-05-09T01:18:41.000Z
|
2022-01-22T10:31:15.000Z
|
src/apps/startposes/serializers/__init__.py
|
sanderland/katago-server
|
6414fab080d007c05068a06ff4f25907b92848bd
|
[
"MIT"
] | 9
|
2020-09-29T11:31:32.000Z
|
2022-03-09T01:37:50.000Z
|
from .startpos import StartPosSerializer
| 20.5
| 40
| 0.878049
| 4
| 41
| 9
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.097561
| 41
| 1
| 41
| 41
| 0.972973
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
419fd38f818abbc14a74cb98f09c9b355f504fc0
| 31
|
py
|
Python
|
cred_loader/__init__.py
|
ntnunk/aws_credential_manager
|
c37cafd99472d608ce4fcb6b475a4ae2f22a25f5
|
[
"MIT"
] | null | null | null |
cred_loader/__init__.py
|
ntnunk/aws_credential_manager
|
c37cafd99472d608ce4fcb6b475a4ae2f22a25f5
|
[
"MIT"
] | null | null | null |
cred_loader/__init__.py
|
ntnunk/aws_credential_manager
|
c37cafd99472d608ce4fcb6b475a4ae2f22a25f5
|
[
"MIT"
] | null | null | null |
from cred_loader import loader
| 15.5
| 30
| 0.870968
| 5
| 31
| 5.2
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.129032
| 31
| 1
| 31
| 31
| 0.962963
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
41c6c4a37904a6bdb5c4fe28ebe5c2e5c3f5c4e0
| 2,412
|
py
|
Python
|
src/animator/rotateout.py
|
Z3t5upro/AC_Management
|
d9c64177312d8c3eebc5e05124fd372bfe42d000
|
[
"MIT"
] | 22
|
2020-01-05T19:30:13.000Z
|
2022-03-29T06:14:04.000Z
|
src/animator/rotateout.py
|
Z3t5upro/AC_Management
|
d9c64177312d8c3eebc5e05124fd372bfe42d000
|
[
"MIT"
] | 7
|
2019-12-07T09:51:11.000Z
|
2021-12-05T00:22:46.000Z
|
src/animator/rotateout.py
|
Z3t5upro/AC_Management
|
d9c64177312d8c3eebc5e05124fd372bfe42d000
|
[
"MIT"
] | 8
|
2019-11-17T07:16:47.000Z
|
2021-10-06T04:32:22.000Z
|
from kivy.animation import Animation
from functools import partial
from .base import Animator
__all__ = (
"RotateOutAnimator",
"RotateOutDownLeftAnimator",
"RotateOutDownRightAnimator",
"RotateOutUpLeftAnimator",
"RotateOutUpRightAnimator",
)
# rotate out
class RotateOutAnimator(Animator):
def start_(self, tmp=None):
props = ["angle", "opacity"]
vals = [-200, 0]
anim = Animation(d=self.duration, **dict(zip(props, vals)))
anim.cancel_all(self.widget)
anim.start(self.widget)
anim.bind(on_complete=partial(self.anim_complete, self))
class RotateOutDownLeftAnimator(Animator):
def start_(self, tmp=None):
pivot = (self.widget.x - self.widget.width / 2, self.widget.y)
self.widget.origin_ = pivot
props = ["angle", "opacity"]
vals = [-90, 0]
anim = Animation(d=self.duration, **dict(zip(props, vals)))
anim.cancel_all(self.widget)
anim.start(self.widget)
anim.bind(on_complete=partial(self.anim_complete, self))
class RotateOutDownRightAnimator(Animator):
def start_(self, tmp=None):
pivot = (self.widget.x + 3 * self.widget.width / 2, self.widget.y)
self.widget.origin_ = pivot
props = ["angle", "opacity"]
vals = [90, 0]
anim = Animation(d=self.duration, **dict(zip(props, vals)))
anim.cancel_all(self.widget)
anim.start(self.widget)
anim.bind(on_complete=partial(self.anim_complete, self))
class RotateOutUpLeftAnimator(Animator):
def start_(self, tmp=None):
pivot = (self.widget.x - self.widget.width / 2, self.widget.y)
self.widget.origin_ = pivot
props = ["angle", "opacity"]
vals = [90, 0]
anim = Animation(d=self.duration, **dict(zip(props, vals)))
anim.cancel_all(self.widget)
anim.start(self.widget)
anim.bind(on_complete=partial(self.anim_complete, self))
class RotateOutUpRightAnimator(Animator):
def start_(self, tmp=None):
pivot = (self.widget.x + 3 * self.widget.width / 2, self.widget.y)
self.widget.origin_ = pivot
props = ["angle", "opacity"]
vals = [-90, 0]
anim = Animation(d=self.duration, **dict(zip(props, vals)))
anim.cancel_all(self.widget)
anim.start(self.widget)
anim.bind(on_complete=partial(self.anim_complete, self))
| 29.777778
| 74
| 0.635987
| 288
| 2,412
| 5.229167
| 0.159722
| 0.172643
| 0.092961
| 0.066401
| 0.766932
| 0.766932
| 0.749004
| 0.749004
| 0.749004
| 0.749004
| 0
| 0.011796
| 0.226783
| 2,412
| 80
| 75
| 30.15
| 0.79571
| 0.004146
| 0
| 0.724138
| 0
| 0
| 0.072917
| 0.040833
| 0
| 0
| 0
| 0
| 0
| 1
| 0.086207
| false
| 0
| 0.051724
| 0
| 0.224138
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
ec097cabfa465d4801932fdd7fbc521e952d2c87
| 137
|
py
|
Python
|
anysma/datasets/__init__.py
|
saralajew/thesis_GTLVQ_experiments
|
4f6eb967a1f37917cd185fc20d6df7bbbd9288d1
|
[
"BSD-3-Clause"
] | 2
|
2019-10-09T07:51:42.000Z
|
2020-02-11T02:49:35.000Z
|
anysma/datasets/__init__.py
|
AnysmaForBlindReview/anysma
|
98445edfdbbe81f227ec807995ea75a5f6e08628
|
[
"BSD-3-Clause"
] | 4
|
2020-04-05T22:29:05.000Z
|
2022-02-09T23:36:01.000Z
|
anysma/datasets/__init__.py
|
AnysmaForBlindReview/anysma
|
98445edfdbbe81f227ec807995ea75a5f6e08628
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import absolute_import
from keras.datasets import *
from . import tecator
from . import affnist
from . import smallnorb
| 19.571429
| 38
| 0.810219
| 18
| 137
| 5.888889
| 0.5
| 0.283019
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.153285
| 137
| 6
| 39
| 22.833333
| 0.913793
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ec127137489977087bf4175c25340390eccba253
| 109
|
py
|
Python
|
commands/upgrader/commands/playsound.py
|
Red-Teapot/mc-commandblock-1.13-update
|
64106e1ecb5adca2aff1eeb3a1fcc11486940000
|
[
"MIT"
] | 1
|
2020-07-27T16:53:26.000Z
|
2020-07-27T16:53:26.000Z
|
commands/upgrader/commands/playsound.py
|
Red-Teapot/mc-commandblock-1.13-update
|
64106e1ecb5adca2aff1eeb3a1fcc11486940000
|
[
"MIT"
] | 5
|
2019-01-02T14:21:32.000Z
|
2019-07-07T05:39:39.000Z
|
commands/upgrader/commands/playsound.py
|
Red-Teapot/mc-commandblock-1.13-update
|
64106e1ecb5adca2aff1eeb3a1fcc11486940000
|
[
"MIT"
] | null | null | null |
# Nothing to do
# TODO Maybe process selectors properly?
def upgrade(command: str) -> str:
return command
| 27.25
| 40
| 0.733945
| 15
| 109
| 5.333333
| 0.866667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.183486
| 109
| 4
| 41
| 27.25
| 0.898876
| 0.477064
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.25
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
ec1cf38700b934b57b1240453458b10bd2ff171e
| 38
|
py
|
Python
|
hmpa/__init__.py
|
awesome-archive/hmpa-pi
|
190a257395bc067ebb83a04e9abe7df9b0e5311f
|
[
"MIT"
] | 166
|
2019-01-24T10:18:43.000Z
|
2022-02-12T22:42:32.000Z
|
hmpa/__init__.py
|
ykbj/hmpa-pi
|
a60a1cff0cdf334240abdfaf6613a285608c6e08
|
[
"MIT"
] | null | null | null |
hmpa/__init__.py
|
ykbj/hmpa-pi
|
a60a1cff0cdf334240abdfaf6613a285608c6e08
|
[
"MIT"
] | 27
|
2019-01-24T11:02:41.000Z
|
2021-02-05T05:54:50.000Z
|
from . import oui
from . import tshark
| 19
| 20
| 0.763158
| 6
| 38
| 4.833333
| 0.666667
| 0.689655
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.184211
| 38
| 2
| 20
| 19
| 0.935484
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
6b8edf3af09bc8bac94c324feb1377373966fc60
| 1,361
|
py
|
Python
|
MaximumLikelihoodEstimation/PSET09-03.py
|
sum-coderepo/HadoopApp
|
0e8d48c5d541b5935c9054fb1335d829d67d7b59
|
[
"Apache-2.0"
] | 2
|
2020-05-26T23:58:32.000Z
|
2020-11-01T20:45:30.000Z
|
MaximumLikelihoodEstimation/PSET09-03.py
|
sum-coderepo/HadoopApp
|
0e8d48c5d541b5935c9054fb1335d829d67d7b59
|
[
"Apache-2.0"
] | null | null | null |
MaximumLikelihoodEstimation/PSET09-03.py
|
sum-coderepo/HadoopApp
|
0e8d48c5d541b5935c9054fb1335d829d67d7b59
|
[
"Apache-2.0"
] | null | null | null |
import numpy
import statistics as st
from scipy import stats
mu, sigma = 0, 1
samples1 = numpy.random.normal(mu, sigma, 1000)
X = numpy.array_split(samples1, 10)
print("The below stats is for {0} sets and {1} samples each".format(10,100))
for num,arr in enumerate(X, start=1):
mean = st.mean(arr)
stdev = st.stdev(arr)
mle = stats.norm(mean, stdev).pdf(mean)
print("The maximum likelihood of sample {0} and mean {1} and standard deviation {2} is {3}".format(num,mean,stdev,mle))
print('\n')
samples2 = numpy.random.normal(mu, sigma, 2000)
X = numpy.array_split(samples2, 10)
print("The below stats is for {0} sets and {1} samples each".format(10,200))
for num,arr in enumerate(X, start=1):
mean = st.mean(arr)
stdev = st.stdev(arr)
mle = stats.norm(mean, stdev).pdf(mean)
print("The maximum likelihood of sample {0} and mean {1} and standard deviation {2} is {3}".format(num,mean,stdev,mle))
#print('\n')
#samples2 = numpy.random.normal(mu, sigma, 500)
#X = numpy.array_split(samples2, 5)
#print("The below stats is for {0} sets and {1} samples each".format(5,100))
#for num,arr in enumerate(X, start=1):
# mean = st.mean(arr)
# stdev = st.stdev(arr)
# mle = stats.norm(mean, stdev).pdf(mean)
# print("The maximum likelihood of sample {0} and mean {1} and standard deviation {2} is {3}".format(num,mean,stdev,mle))
| 41.242424
| 124
| 0.683321
| 234
| 1,361
| 3.961538
| 0.235043
| 0.05178
| 0.055016
| 0.061489
| 0.883495
| 0.805825
| 0.805825
| 0.805825
| 0.805825
| 0.805825
| 0
| 0.051573
| 0.159442
| 1,361
| 32
| 125
| 42.53125
| 0.758741
| 0.306392
| 0
| 0.47619
| 0
| 0.095238
| 0.290909
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.142857
| 0
| 0.142857
| 0.238095
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
6b9e2cfff591f7be08a404ad6e2a9a5179077eb0
| 17,414
|
py
|
Python
|
krake/krake/api/openstack.py
|
rak-n-rok/Krake
|
2f0d4a382b99639e2c1149ee8593a9bb589d2d3f
|
[
"Apache-2.0"
] | 1
|
2020-05-29T08:43:32.000Z
|
2020-05-29T08:43:32.000Z
|
krake/krake/api/openstack.py
|
rak-n-rok/Krake
|
2f0d4a382b99639e2c1149ee8593a9bb589d2d3f
|
[
"Apache-2.0"
] | null | null | null |
krake/krake/api/openstack.py
|
rak-n-rok/Krake
|
2f0d4a382b99639e2c1149ee8593a9bb589d2d3f
|
[
"Apache-2.0"
] | 1
|
2019-11-19T13:39:02.000Z
|
2019-11-19T13:39:02.000Z
|
import dataclasses
import json
import logging
from aiohttp import web
from uuid import uuid4
from webargs.aiohttpparser import use_kwargs
from krake import utils
from krake.api.auth import protected
from krake.api.database import EventType
from krake.api.helpers import (
load,
session,
Heartbeat,
use_schema,
HttpReason,
HttpReasonCode,
make_create_request_schema,
json_error,
ListQuery,
)
from krake.data.core import WatchEvent, WatchEventType, ListMetadata
from krake.data.openstack import (
ProjectList,
Project,
MagnumCluster,
MagnumClusterList,
MagnumClusterBinding,
)
logger = logging.getLogger(__name__)
class OpenStackApi(object):
"""Contains all handlers for the resources of the "openstack" API.
These handlers will be added to the Krake API components.
"""
routes = web.RouteTableDef()
@routes.route("POST", "/openstack/namespaces/{namespace}/magnumclusters")
@protected(api="openstack", resource="magnumclusters", verb="create")
@use_schema("body", schema=make_create_request_schema(MagnumCluster))
async def create_magnum_cluster(request, body):
kwargs = {"name": body.metadata.name}
namespace = request.match_info.get("namespace")
kwargs["namespace"] = namespace
# Ensure that a resource with the same name does not already
# exists.
existing = await session(request).get(body.__class__, **kwargs)
if existing is not None:
message = (
f"MagnumCluster {body.metadata.name!r} already "
f"exists in namespace {namespace!r}"
)
reason = HttpReason(
reason=message, code=HttpReasonCode.RESOURCE_ALREADY_EXISTS
)
raise json_error(web.HTTPConflict, reason.serialize())
now = utils.now()
body.metadata.namespace = namespace
body.metadata.uid = str(uuid4())
body.metadata.created = now
body.metadata.modified = now
# Initialize subresource fields
for field in dataclasses.fields(body):
if field.metadata.get("subresource", False):
value = field.type()
setattr(body, field.name, value)
await session(request).put(body)
logger.info(
"Created %s %r (%s)", "MagnumCluster", body.metadata.name, body.metadata.uid
)
return web.json_response(body.serialize())
@routes.route("DELETE", "/openstack/namespaces/{namespace}/magnumclusters/{name}")
@protected(api="openstack", resource="magnumclusters", verb="delete")
@load("entity", MagnumCluster)
async def delete_magnum_cluster(request, entity):
# Resource is already deleting
if entity.metadata.deleted:
return web.json_response(entity.serialize())
# TODO: Should be update "modified" here?
# Resource marked as deletion, to be deleted by the Garbage Collector
entity.metadata.deleted = utils.now()
entity.metadata.finalizers.append("cascade_deletion")
await session(request).put(entity)
logger.info(
"Deleting %s %r (%s)",
"MagnumCluster",
entity.metadata.name,
entity.metadata.uid,
)
return web.json_response(entity.serialize())
@routes.route("GET", "/openstack/magnumclusters")
@routes.route("GET", "/openstack/namespaces/{namespace}/magnumclusters")
@protected(api="openstack", resource="magnumclusters", verb="list")
@use_kwargs(ListQuery.query, location="query")
async def list_or_watch_magnum_clusters(request, heartbeat, watch, **query):
resource_class = MagnumCluster
# If the ListAll operation
namespace = request.match_info.get("namespace", None)
# Return the list of resources
if not watch:
if namespace is None:
objs = [obj async for obj in session(request).all(resource_class)]
else:
objs = [
obj
async for obj in session(request).all(
resource_class, namespace=namespace
)
]
body = MagnumClusterList(metadata=ListMetadata(), items=objs)
return web.json_response(body.serialize())
# Watching resources
kwargs = {}
if namespace is not None:
kwargs["namespace"] = namespace
async with session(request).watch(resource_class, **kwargs) as watcher:
resp = web.StreamResponse(headers={"Content-Type": "application/x-ndjson"})
resp.enable_chunked_encoding()
await resp.prepare(request)
async with Heartbeat(resp, interval=heartbeat):
async for event, obj, rev in watcher:
# Key was deleted. Stop update stream
if event == EventType.PUT:
if rev.created == rev.modified:
event_type = WatchEventType.ADDED
else:
event_type = WatchEventType.MODIFIED
else:
event_type = WatchEventType.DELETED
obj = await session(request).get_by_key(
resource_class, key=rev.key, revision=rev.modified - 1
)
watch_event = WatchEvent(type=event_type, object=obj.serialize())
await resp.write(json.dumps(watch_event.serialize()).encode())
await resp.write(b"\n")
@routes.route("GET", "/openstack/namespaces/{namespace}/magnumclusters/{name}")
@protected(api="openstack", resource="magnumclusters", verb="get")
@load("entity", MagnumCluster)
async def read_magnum_cluster(request, entity):
return web.json_response(entity.serialize())
@routes.route("PUT", "/openstack/namespaces/{namespace}/magnumclusters/{name}")
@protected(api="openstack", resource="magnumclusters", verb="update")
@use_schema("body", schema=MagnumCluster.Schema)
@load("entity", MagnumCluster)
async def update_magnum_cluster(request, body, entity):
# Once a resource is in the "deletion in progress" state, finalizers
# can only be removed.
if entity.metadata.deleted:
if not set(body.metadata.finalizers) <= set(entity.metadata.finalizers):
raise json_error(
web.HTTPConflict,
{
"metadata": {
"finalizers": [
"Finalizers can only be removed if "
"deletion is in progress."
]
}
},
)
# FIXME: if a user updates an immutable field, (such as the created timestamp),
# the request is accepted and the API returns 200. The `modified` timestamp
# will also still be updated, even though no change from the request on
# immutable fields will be applied.
# Changes to immutable fields should be rejected, see Krake issue #410
if body == entity:
raise json_error(web.HTTPBadRequest, "The body contained no update.")
entity.update(body)
entity.metadata.modified = utils.now()
# Resource is in "deletion in progress" state and all finalizers have
# been removed. Delete the resource from database.
if entity.metadata.deleted and not entity.metadata.finalizers:
await session(request).delete(entity)
logger.info(
"Delete %s %r (%s)",
"MagnumCluster",
entity.metadata.name,
entity.metadata.uid,
)
else:
await session(request).put(entity)
logger.info(
"Update %s %r (%s)",
"MagnumCluster",
entity.metadata.name,
entity.metadata.uid,
)
return web.json_response(entity.serialize())
@routes.route(
"PUT", "/openstack/namespaces/{namespace}/magnumclusters/{name}/binding"
)
@protected(api="openstack", resource="magnumclusters/binding", verb="update")
@load("cluster", MagnumCluster)
@use_schema("body", MagnumClusterBinding.Schema)
async def update_magnum_cluster_binding(request, body, cluster):
cluster.status.project = body.project
cluster.status.template = body.template
if body.project not in cluster.metadata.owners:
cluster.metadata.owners.append(body.project)
await session(request).put(cluster)
logger.info("Bind %r to %r", cluster, cluster.status.project)
return web.json_response(cluster.serialize())
@routes.route(
"PUT", "/openstack/namespaces/{namespace}/magnumclusters/{name}/status"
)
@protected(api="openstack", resource="magnumclusters/status", verb="update")
@use_schema("body", MagnumCluster.Schema)
@load("entity", MagnumCluster)
async def update_magnum_cluster_status(request, body, entity):
source = getattr(body, "status")
dest = getattr(entity, "status")
dest.update(source)
await session(request).put(entity)
logger.info(
"Update %s of %s %r (%s)",
"Status",
"MagnumCluster",
entity.metadata.name,
entity.metadata.uid,
)
return web.json_response(entity.serialize())
@routes.route("POST", "/openstack/namespaces/{namespace}/projects")
@protected(api="openstack", resource="projects", verb="create")
@use_schema("body", schema=make_create_request_schema(Project))
async def create_project(request, body):
kwargs = {"name": body.metadata.name}
namespace = request.match_info.get("namespace")
kwargs["namespace"] = namespace
# Ensure that a resource with the same name does not already
# exists.
existing = await session(request).get(body.__class__, **kwargs)
if existing is not None:
message = (
f"Project {body.metadata.name!r} already "
f"exists in namespace {namespace!r}"
)
reason = HttpReason(
reason=message, code=HttpReasonCode.RESOURCE_ALREADY_EXISTS
)
raise json_error(web.HTTPConflict, reason.serialize())
now = utils.now()
body.metadata.namespace = namespace
body.metadata.uid = str(uuid4())
body.metadata.created = now
body.metadata.modified = now
# Initialize subresource fields
for field in dataclasses.fields(body):
if field.metadata.get("subresource", False):
value = field.type()
setattr(body, field.name, value)
await session(request).put(body)
logger.info(
"Created %s %r (%s)", "Project", body.metadata.name, body.metadata.uid
)
return web.json_response(body.serialize())
@routes.route("DELETE", "/openstack/namespaces/{namespace}/projects/{name}")
@protected(api="openstack", resource="projects", verb="delete")
@load("entity", Project)
async def delete_project(request, entity):
# Resource is already deleting
if entity.metadata.deleted:
return web.json_response(entity.serialize())
# TODO: Should be update "modified" here?
# Resource marked as deletion, to be deleted by the Garbage Collector
entity.metadata.deleted = utils.now()
entity.metadata.finalizers.append("cascade_deletion")
await session(request).put(entity)
logger.info(
"Deleting %s %r (%s)", "Project", entity.metadata.name, entity.metadata.uid
)
return web.json_response(entity.serialize())
@routes.route("GET", "/openstack/projects")
@routes.route("GET", "/openstack/namespaces/{namespace}/projects")
@protected(api="openstack", resource="projects", verb="list")
@use_kwargs(ListQuery.query, location="query")
async def list_or_watch_projects(request, heartbeat, watch, **query):
resource_class = Project
# If the ListAll operation
namespace = request.match_info.get("namespace", None)
# Return the list of resources
if not watch:
if namespace is None:
objs = [obj async for obj in session(request).all(resource_class)]
else:
objs = [
obj
async for obj in session(request).all(
resource_class, namespace=namespace
)
]
body = ProjectList(metadata=ListMetadata(), items=objs)
return web.json_response(body.serialize())
# Watching resources
kwargs = {}
if namespace is not None:
kwargs["namespace"] = namespace
async with session(request).watch(resource_class, **kwargs) as watcher:
resp = web.StreamResponse(headers={"Content-Type": "application/x-ndjson"})
resp.enable_chunked_encoding()
await resp.prepare(request)
async with Heartbeat(resp, interval=heartbeat):
async for event, obj, rev in watcher:
# Key was deleted. Stop update stream
if event == EventType.PUT:
if rev.created == rev.modified:
event_type = WatchEventType.ADDED
else:
event_type = WatchEventType.MODIFIED
else:
event_type = WatchEventType.DELETED
obj = await session(request).get_by_key(
resource_class, key=rev.key, revision=rev.modified - 1
)
watch_event = WatchEvent(type=event_type, object=obj.serialize())
await resp.write(json.dumps(watch_event.serialize()).encode())
await resp.write(b"\n")
@routes.route("GET", "/openstack/namespaces/{namespace}/projects/{name}")
@protected(api="openstack", resource="projects", verb="get")
@load("entity", Project)
async def read_project(request, entity):
return web.json_response(entity.serialize())
@routes.route("PUT", "/openstack/namespaces/{namespace}/projects/{name}")
@protected(api="openstack", resource="projects", verb="update")
@use_schema("body", schema=Project.Schema)
@load("entity", Project)
async def update_project(request, body, entity):
# Once a resource is in the "deletion in progress" state, finalizers
# can only be removed.
if entity.metadata.deleted:
if not set(body.metadata.finalizers) <= set(entity.metadata.finalizers):
raise json_error(
web.HTTPConflict,
{
"metadata": {
"finalizers": [
"Finalizers can only be removed if "
"deletion is in progress."
]
}
},
)
# FIXME: if a user updates an immutable field, (such as the created timestamp),
# the request is accepted and the API returns 200. The `modified` timestamp
# will also still be updated, even though no change from the request on
# immutable fields will be applied.
# Changes to immutable fields should be rejected, see Krake issue #410
if body == entity:
raise json_error(web.HTTPBadRequest, "The body contained no update.")
entity.update(body)
entity.metadata.modified = utils.now()
# Resource is in "deletion in progress" state and all finalizers have
# been removed. Delete the resource from database.
if entity.metadata.deleted and not entity.metadata.finalizers:
await session(request).delete(entity)
logger.info(
"Delete %s %r (%s)",
"Project",
entity.metadata.name,
entity.metadata.uid,
)
else:
await session(request).put(entity)
logger.info(
"Update %s %r (%s)",
"Project",
entity.metadata.name,
entity.metadata.uid,
)
return web.json_response(entity.serialize())
@routes.route("PUT", "/openstack/namespaces/{namespace}/projects/{name}/status")
@protected(api="openstack", resource="projects/status", verb="update")
@use_schema("body", Project.Schema)
@load("entity", Project)
async def update_project_status(request, body, entity):
source = getattr(body, "status")
dest = getattr(entity, "status")
dest.update(source)
await session(request).put(entity)
logger.info(
"Update %s of %s %r (%s)",
"Status",
"Project",
entity.metadata.name,
entity.metadata.uid,
)
return web.json_response(entity.serialize())
| 38.021834
| 88
| 0.589181
| 1,778
| 17,414
| 5.702475
| 0.124859
| 0.044186
| 0.028109
| 0.031068
| 0.873755
| 0.853634
| 0.826511
| 0.82375
| 0.82375
| 0.806884
| 0
| 0.001408
| 0.306592
| 17,414
| 457
| 89
| 38.105033
| 0.838261
| 0.107155
| 0
| 0.622807
| 0
| 0
| 0.136504
| 0.050665
| 0
| 0
| 0
| 0.002188
| 0
| 1
| 0
| false
| 0
| 0.035088
| 0
| 0.084795
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
6bedd385b0bf9c29210ea6006e37e60df05eeac5
| 32
|
py
|
Python
|
lantern/analysis/__init__.py
|
CameronLonsdale/cckrusher
|
3d9d99d27e56a78b0c4982c30f30818f13150e47
|
[
"MIT"
] | 19
|
2017-04-08T08:03:40.000Z
|
2022-02-23T03:06:33.000Z
|
lantern/analysis/__init__.py
|
CameronLonsdale/cckrusher
|
3d9d99d27e56a78b0c4982c30f30818f13150e47
|
[
"MIT"
] | 21
|
2017-04-23T12:42:22.000Z
|
2019-06-15T03:51:43.000Z
|
lantern/analysis/__init__.py
|
CameronLonsdale/cckrypto
|
3d9d99d27e56a78b0c4982c30f30818f13150e47
|
[
"MIT"
] | 4
|
2017-06-28T06:10:05.000Z
|
2019-11-19T04:23:46.000Z
|
from . import frequency, search
| 16
| 31
| 0.78125
| 4
| 32
| 6.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.15625
| 32
| 1
| 32
| 32
| 0.925926
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d43517d81e20b38dd612fdabcdaeb957f04ddc02
| 46
|
py
|
Python
|
infrastructure/cognito/__init__.py
|
maciejGolebio/aws-crud-lambda
|
5b4bdfc1cd2da60ebe99b6d9036fa06ea5ce861d
|
[
"MIT"
] | null | null | null |
infrastructure/cognito/__init__.py
|
maciejGolebio/aws-crud-lambda
|
5b4bdfc1cd2da60ebe99b6d9036fa06ea5ce861d
|
[
"MIT"
] | null | null | null |
infrastructure/cognito/__init__.py
|
maciejGolebio/aws-crud-lambda
|
5b4bdfc1cd2da60ebe99b6d9036fa06ea5ce861d
|
[
"MIT"
] | null | null | null |
from infrastructure.cognito.user_pool import *
| 46
| 46
| 0.869565
| 6
| 46
| 6.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.065217
| 46
| 1
| 46
| 46
| 0.906977
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d48a828a08ea0d083a01bd3c0890eb056000d5e1
| 94
|
py
|
Python
|
Lesson 5.py
|
Dmitrii388444/python_lesson_5
|
da6f9640b149ccece65ce751ea6de4bfcc186658
|
[
"MIT"
] | null | null | null |
Lesson 5.py
|
Dmitrii388444/python_lesson_5
|
da6f9640b149ccece65ce751ea6de4bfcc186658
|
[
"MIT"
] | null | null | null |
Lesson 5.py
|
Dmitrii388444/python_lesson_5
|
da6f9640b149ccece65ce751ea6de4bfcc186658
|
[
"MIT"
] | null | null | null |
import requests
import flask
print ('Hello world')
print ('Hello')
print('Hello', 'student')
| 13.428571
| 25
| 0.712766
| 12
| 94
| 5.583333
| 0.583333
| 0.447761
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.12766
| 94
| 6
| 26
| 15.666667
| 0.817073
| 0
| 0
| 0
| 0
| 0
| 0.301075
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.4
| 0
| 0.4
| 0.6
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 6
|
2e394fe2a9cc7481c7aa0986a0f6c43847204520
| 29
|
py
|
Python
|
geowall/__init__.py
|
kashev/geometric-wallpaper
|
7797f99278d05c6a4e57c089fa53dafbdb0b7c77
|
[
"MIT"
] | null | null | null |
geowall/__init__.py
|
kashev/geometric-wallpaper
|
7797f99278d05c6a4e57c089fa53dafbdb0b7c77
|
[
"MIT"
] | null | null | null |
geowall/__init__.py
|
kashev/geometric-wallpaper
|
7797f99278d05c6a4e57c089fa53dafbdb0b7c77
|
[
"MIT"
] | null | null | null |
from . import colors, shapes
| 14.5
| 28
| 0.758621
| 4
| 29
| 5.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.172414
| 29
| 1
| 29
| 29
| 0.916667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
2e74f51344b0451768609aff68a25e0cad80d2a4
| 394
|
py
|
Python
|
pyecog/__init__.py
|
mikailweston/pyecog
|
9a1674ec95b63ad9aa0a2d3aedc1a74be6441446
|
[
"MIT"
] | 10
|
2016-09-07T16:01:39.000Z
|
2019-03-26T11:14:28.000Z
|
pyecog/__init__.py
|
mikailweston/pyecog
|
9a1674ec95b63ad9aa0a2d3aedc1a74be6441446
|
[
"MIT"
] | 54
|
2016-11-21T14:41:52.000Z
|
2022-03-18T08:41:11.000Z
|
pyecog/__init__.py
|
jcornford/pyecog
|
356439bd5e3c50fd0cd74eef90a897bd41363920
|
[
"MIT"
] | 5
|
2016-10-11T14:14:44.000Z
|
2017-08-02T11:45:48.000Z
|
'''
from .ndf import H5File
from .ndf import NdfFile
from .ndf import plot
from .ndf import basic_plot
from .ndf import DataHandler
from .ndf import FeaturePreProcesser
from .ndf import Classifier
from .ndf import FeatureExtractor
from .ndf import load_classifier
from .ndf import make_hmm_model
from .visualisation import pyecog_main_gui
'''
from .ndf import *
from .visualisation import *
| 20.736842
| 42
| 0.80203
| 56
| 394
| 5.535714
| 0.339286
| 0.248387
| 0.46129
| 0.109677
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002967
| 0.14467
| 394
| 18
| 43
| 21.888889
| 0.916914
| 0.847716
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
2e754e6532ceb633cb77f35a7cd979d84fe72a19
| 71
|
py
|
Python
|
sprocket/__init__.py
|
ontodev/sprocket
|
f0bf09b23f38513a693178f4ee960092bd7f5ee3
|
[
"BSD-3-Clause"
] | null | null | null |
sprocket/__init__.py
|
ontodev/sprocket
|
f0bf09b23f38513a693178f4ee960092bd7f5ee3
|
[
"BSD-3-Clause"
] | 29
|
2021-09-01T20:20:36.000Z
|
2021-12-15T18:43:55.000Z
|
sprocket/__init__.py
|
ontodev/sprocket
|
f0bf09b23f38513a693178f4ee960092bd7f5ee3
|
[
"BSD-3-Clause"
] | null | null | null |
from sprocket.lib import *
from sprocket.run import BLUEPRINT, prepare
| 23.666667
| 43
| 0.816901
| 10
| 71
| 5.8
| 0.7
| 0.413793
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.126761
| 71
| 2
| 44
| 35.5
| 0.935484
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
2e7e5c6fb4642fd70082ae7e8c936db5d7c3a2c2
| 116
|
py
|
Python
|
srcs/parser/tokens/square_open_bracket_token.py
|
pomponchik/computor_v2
|
742b3f3b47c8d46806b2f733b4ec07ae63a23f00
|
[
"MIT"
] | null | null | null |
srcs/parser/tokens/square_open_bracket_token.py
|
pomponchik/computor_v2
|
742b3f3b47c8d46806b2f733b4ec07ae63a23f00
|
[
"MIT"
] | null | null | null |
srcs/parser/tokens/square_open_bracket_token.py
|
pomponchik/computor_v2
|
742b3f3b47c8d46806b2f733b4ec07ae63a23f00
|
[
"MIT"
] | null | null | null |
from srcs.parser.tokens.abstract_token import AbstractToken
class SquareOpenBracketToken(AbstractToken):
pass
| 19.333333
| 59
| 0.836207
| 12
| 116
| 8
| 0.916667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.112069
| 116
| 5
| 60
| 23.2
| 0.932039
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
cf0bb91af6c75e0953044e49b7df6168e6a0e1f4
| 148
|
py
|
Python
|
geometri/square.py
|
enterprise-analytics-id/modularisasi
|
629edced07ca92f51826e89da9934b49ba13c301
|
[
"Unlicense"
] | null | null | null |
geometri/square.py
|
enterprise-analytics-id/modularisasi
|
629edced07ca92f51826e89da9934b49ba13c301
|
[
"Unlicense"
] | null | null | null |
geometri/square.py
|
enterprise-analytics-id/modularisasi
|
629edced07ca92f51826e89da9934b49ba13c301
|
[
"Unlicense"
] | null | null | null |
def info():
return 'Modul menghitung rumus tentang persegi panjang'
def hitung_luas_persegi_panjang(panjang, lebar):
return panjang * lebar
| 29.6
| 59
| 0.77027
| 19
| 148
| 5.842105
| 0.631579
| 0.252252
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.162162
| 148
| 5
| 60
| 29.6
| 0.895161
| 0
| 0
| 0
| 0
| 0
| 0.308725
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
cf3085d2aa372ef6a29aa73b06bbb72676cfaacf
| 1,777
|
py
|
Python
|
liaison/forms.py
|
hhdMrLion/DJANGO_CRM
|
1f8d2d7a025f9dc54b5bf498e7a577469f74c612
|
[
"Apache-2.0"
] | 1
|
2021-06-18T03:03:46.000Z
|
2021-06-18T03:03:46.000Z
|
liaison/forms.py
|
hhdMrLion/DJANGO_CRM
|
1f8d2d7a025f9dc54b5bf498e7a577469f74c612
|
[
"Apache-2.0"
] | null | null | null |
liaison/forms.py
|
hhdMrLion/DJANGO_CRM
|
1f8d2d7a025f9dc54b5bf498e7a577469f74c612
|
[
"Apache-2.0"
] | null | null | null |
import re
from django import forms
from customer.models import Customer
from liaison.models import Liaison
class LiaisonAddForm(forms.ModelForm):
"""联系人新增"""
def __init__(self, request, customer, *args, **kwargs):
super().__init__(*args, **kwargs)
self.request = request
self.fields['customer'].queryset = Customer.objects.filter(name=customer)
class Meta:
model = Liaison
fields = ['name', 'customer', 'phone', 'job', 'injob',
'wx', 'qq', 'email', 'hobby', 'birthday', 'remarks']
def clean_phone(self):
"""验证用户输入的手机号"""
phone = self.cleaned_data['phone']
# 判断用户名是否为手机号码
pattern = r'^1[0-9]{10}$'
if not re.search(pattern, phone):
raise forms.ValidationError('请输入正确的手机号码')
return phone
def save(self, commit=True):
obj = super().save(commit=False)
obj.user = self.request.user
obj.save()
class LiaisonEditForm(forms.ModelForm):
"""联系人修改"""
def __init__(self, request, *args, **kwargs):
super().__init__(*args, **kwargs)
self.request = request
class Meta:
model = Liaison
fields = ['name', 'customer', 'phone', 'job', 'injob',
'wx', 'qq', 'email', 'hobby', 'birthday', 'remarks']
def clean_phone(self):
"""验证用户输入的手机号"""
phone = self.cleaned_data['phone']
# 判断用户名是否为手机号码
pattern = r'^1[0-9]{10}$'
if not re.search(pattern, phone):
raise forms.ValidationError('请输入正确的手机号码')
return phone
def save(self, commit=True):
obj = super().save(commit=False)
obj.user = self.request.user
obj.save()
| 28.66129
| 82
| 0.556556
| 186
| 1,777
| 5.209677
| 0.327957
| 0.068111
| 0.022704
| 0.037152
| 0.72033
| 0.72033
| 0.72033
| 0.72033
| 0.72033
| 0.623323
| 0
| 0.008013
| 0.297693
| 1,777
| 61
| 83
| 29.131148
| 0.768429
| 0.033765
| 0
| 0.780488
| 0
| 0
| 0.104039
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.146341
| false
| 0
| 0.097561
| 0
| 0.390244
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
cf380e3ade3fa8a4f697b4a49f7a1a4d12ea7986
| 47
|
py
|
Python
|
example_pkg/main.py
|
j5ew9/ExamplePackagePEP517
|
252eea2240e6bc90cdfe4f37f21b397d15f0c215
|
[
"MIT"
] | null | null | null |
example_pkg/main.py
|
j5ew9/ExamplePackagePEP517
|
252eea2240e6bc90cdfe4f37f21b397d15f0c215
|
[
"MIT"
] | null | null | null |
example_pkg/main.py
|
j5ew9/ExamplePackagePEP517
|
252eea2240e6bc90cdfe4f37f21b397d15f0c215
|
[
"MIT"
] | null | null | null |
def main():
print("just example package")
| 11.75
| 33
| 0.638298
| 6
| 47
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.212766
| 47
| 3
| 34
| 15.666667
| 0.810811
| 0
| 0
| 0
| 0
| 0
| 0.434783
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0
| 0.5
| 0.5
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
cf88ecaac421de66666443a4ca70c0e43911de3e
| 56
|
py
|
Python
|
Strings/Mutations.py
|
AbdallahHemdan/HackerRank-Python-Solutions
|
0e388b4f594a210426791246ca1278a6a872dd96
|
[
"MIT"
] | 3
|
2020-01-03T11:39:24.000Z
|
2021-03-13T13:35:23.000Z
|
Strings/Mutations.py
|
AbdallahHemdan/HackerRank-Python-Solutions
|
0e388b4f594a210426791246ca1278a6a872dd96
|
[
"MIT"
] | null | null | null |
Strings/Mutations.py
|
AbdallahHemdan/HackerRank-Python-Solutions
|
0e388b4f594a210426791246ca1278a6a872dd96
|
[
"MIT"
] | 1
|
2018-10-20T09:36:06.000Z
|
2018-10-20T09:36:06.000Z
|
def mutate_string(s, p, c):
return s[:p]+c+s[p+1:]
| 18.666667
| 27
| 0.553571
| 13
| 56
| 2.307692
| 0.615385
| 0.2
| 0.2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.022222
| 0.196429
| 56
| 2
| 28
| 28
| 0.644444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
d8742b5f3ac97ebe919d74d6f0f59ef3e54f4abf
| 750
|
py
|
Python
|
src/models/__init__.py
|
andrijaster/FAIR
|
cf4f3ced1f54c87d543ea1345f241ce338f7bea0
|
[
"MIT"
] | 2
|
2022-02-25T22:38:30.000Z
|
2022-02-25T22:38:43.000Z
|
src/models/__init__.py
|
andrijaster/FAIR
|
cf4f3ced1f54c87d543ea1345f241ce338f7bea0
|
[
"MIT"
] | null | null | null |
src/models/__init__.py
|
andrijaster/FAIR
|
cf4f3ced1f54c87d543ea1345f241ce338f7bea0
|
[
"MIT"
] | null | null | null |
from .FAD import FAD_class # Fair Adversial Discriminative model
from .FAIR_scalar import FAIR_scalar_class # Fair Adversarial Instance Re-weighting (not probabilistic)
from .FAIR_Bernoulli import FAIR_Bernoulli_class # Fair Adversarial Instance Re-weighting with Bernoulli distribution (reparametrization)
from .FAIR_betaSF import FAIR_betaSF_class # Fair Adversarial Instance Re-weighting with Beta distribution (score function)
from .FAIR_betaREP import FAIR_betaREP_class # Fair Adversarial Instance Re-weighting with Beta distribution (reparametrization)
from .LURMI import LURMI_class # Learning Unbiased Representations via Mutual Information backpropagation
from .CLFR import CLFR_class # Conditional Learning of Fair Representations
| 93.75
| 138
| 0.845333
| 94
| 750
| 6.585106
| 0.37234
| 0.072698
| 0.129241
| 0.180937
| 0.323102
| 0.323102
| 0.260097
| 0.19063
| 0.19063
| 0
| 0
| 0
| 0.12
| 750
| 7
| 139
| 107.142857
| 0.937879
| 0.613333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d87a547b078ff1e23939c724135d6a3ec6c04e4b
| 48
|
py
|
Python
|
deco/schedules/__init__.py
|
mfojtak/decor
|
203979351635a6794c91200fca4a14296ec9bc37
|
[
"MIT"
] | 1
|
2019-09-05T07:23:19.000Z
|
2019-09-05T07:23:19.000Z
|
deco/schedules/__init__.py
|
mfojtak/decor
|
203979351635a6794c91200fca4a14296ec9bc37
|
[
"MIT"
] | 2
|
2020-10-25T17:41:08.000Z
|
2020-10-26T16:48:19.000Z
|
deco/schedules/__init__.py
|
mfojtak/deco
|
203979351635a6794c91200fca4a14296ec9bc37
|
[
"MIT"
] | null | null | null |
from deco.schedules.warmup import WarmupSchedule
| 48
| 48
| 0.895833
| 6
| 48
| 7.166667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.0625
| 48
| 1
| 48
| 48
| 0.955556
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d87bac2629499a69b2f96733637d7208f1199610
| 26
|
py
|
Python
|
src/schctest/micro_enum/__init__.py
|
saguilarDevel/open_schc
|
ac7f2a84b6120964c8fdaabf9f5c8ca8ae39c289
|
[
"MIT"
] | 21
|
2018-11-05T06:48:32.000Z
|
2022-02-28T14:38:09.000Z
|
src/schctest/micro_enum/__init__.py
|
saguilarDevel/open_schc
|
ac7f2a84b6120964c8fdaabf9f5c8ca8ae39c289
|
[
"MIT"
] | 34
|
2019-01-28T01:32:41.000Z
|
2021-05-06T09:40:14.000Z
|
src/schctest/micro_enum/__init__.py
|
saguilarDevel/open_schc
|
ac7f2a84b6120964c8fdaabf9f5c8ca8ae39c289
|
[
"MIT"
] | 28
|
2018-10-31T22:21:26.000Z
|
2022-03-17T09:44:40.000Z
|
from .micro_enum import *
| 13
| 25
| 0.769231
| 4
| 26
| 4.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.153846
| 26
| 1
| 26
| 26
| 0.863636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d891661bc917b1c04e57acf495dc06b6bf082b65
| 64
|
py
|
Python
|
acq4/drivers/MultiClamp/__init__.py
|
aleonlein/acq4
|
4b1fcb9ad2c5e8d4595a2b9cf99d50ece0c0f555
|
[
"MIT"
] | 47
|
2015-01-05T16:18:10.000Z
|
2022-03-16T13:09:30.000Z
|
acq4/drivers/MultiClamp/__init__.py
|
aleonlein/acq4
|
4b1fcb9ad2c5e8d4595a2b9cf99d50ece0c0f555
|
[
"MIT"
] | 48
|
2015-04-19T16:51:41.000Z
|
2022-03-31T14:48:16.000Z
|
acq4/drivers/MultiClamp/__init__.py
|
sensapex/acq4
|
9561ba73caff42c609bd02270527858433862ad8
|
[
"MIT"
] | 32
|
2015-01-15T14:11:49.000Z
|
2021-07-15T13:44:52.000Z
|
from __future__ import print_function
from .multiclamp import *
| 21.333333
| 37
| 0.84375
| 8
| 64
| 6.125
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 64
| 2
| 38
| 32
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
|
0
| 6
|
d89db256818771d01163fab0c0a583b36892b4ff
| 21
|
py
|
Python
|
example_project/some_modules/third_modules/a194.py
|
Yuriy-Leonov/cython_imports_limit_issue
|
2f9e7c02798fb52185dabfe6ce3811c439ca2839
|
[
"MIT"
] | null | null | null |
example_project/some_modules/third_modules/a194.py
|
Yuriy-Leonov/cython_imports_limit_issue
|
2f9e7c02798fb52185dabfe6ce3811c439ca2839
|
[
"MIT"
] | null | null | null |
example_project/some_modules/third_modules/a194.py
|
Yuriy-Leonov/cython_imports_limit_issue
|
2f9e7c02798fb52185dabfe6ce3811c439ca2839
|
[
"MIT"
] | null | null | null |
class A194:
pass
| 7
| 11
| 0.619048
| 3
| 21
| 4.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.214286
| 0.333333
| 21
| 2
| 12
| 10.5
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
d8a18bbb8fdfcd40164a176a9330974e6c0c1a45
| 12,090
|
py
|
Python
|
tests/test_detection.py
|
theovincent/ruptures
|
2bd37c2655bf4d20ef5a0797a5b22bd7ea5fc494
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_detection.py
|
theovincent/ruptures
|
2bd37c2655bf4d20ef5a0797a5b22bd7ea5fc494
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_detection.py
|
theovincent/ruptures
|
2bd37c2655bf4d20ef5a0797a5b22bd7ea5fc494
|
[
"BSD-2-Clause"
] | null | null | null |
from copy import deepcopy
from itertools import product
import numpy as np
import pytest
from ruptures.costs import CostAR
from ruptures.datasets import pw_constant
from ruptures.detection import Binseg, BottomUp, Dynp, Pelt, Window, KernelCPD
from ruptures.exceptions import BadSegmentationParameters
@pytest.fixture(scope="module")
def signal_bkps_5D_n10():
signal, bkps = pw_constant(n_samples=10, n_features=5, noise_std=1)
return signal, bkps
@pytest.fixture(scope="module")
def signal_bkps_5D():
signal, bkps = pw_constant(n_features=5, noise_std=1)
return signal, bkps
@pytest.fixture(scope="module")
def signal_bkps_1D():
signal, bkps = pw_constant(noise_std=1)
return signal.astype(np.float32), bkps
@pytest.fixture(scope="module")
def signal_bkps_5D_no_noise():
signal, bkps = pw_constant(n_features=5, noise_std=0)
return signal, bkps
@pytest.fixture(scope="module")
def signal_bkps_1D_no_noise():
signal, bkps = pw_constant(noise_std=0)
return signal, bkps
@pytest.fixture(scope="module")
def signal_bkps_1D_constant():
signal, bkps = np.zeros(200), [200]
return signal, bkps
@pytest.mark.parametrize("algo", [Binseg, BottomUp, Dynp, Pelt, Window])
def test_empty(signal_bkps_1D, algo):
signal, _ = signal_bkps_1D
algo().fit(signal).predict(1)
algo().fit_predict(signal, 1)
@pytest.mark.parametrize(
"algo, model",
product(
[Binseg, BottomUp, Window],
["l1", "l2", "ar", "normal", "rbf", "rank", "mahalanobis"],
),
)
def test_model_1D(signal_bkps_1D, algo, model):
signal, _ = signal_bkps_1D
algo(model=model).fit_predict(signal, pen=1)
ret = algo(model=model).fit_predict(signal, n_bkps=1)
assert len(ret) == 2
assert ret[-1] == signal.shape[0]
algo(model=model).fit_predict(signal, epsilon=10)
@pytest.mark.parametrize(
"algo, model",
product([Dynp, Pelt], ["l1", "l2", "ar", "normal", "rbf", "rank", "mahalanobis"]),
)
def test_model_1D_bis(signal_bkps_1D, algo, model):
signal, _ = signal_bkps_1D
algo_t = algo(model=model)
ret = algo_t.fit_predict(signal, 1)
if isinstance(algo_t, Dynp):
assert len(ret) == 2
assert ret[-1] == signal.shape[0]
@pytest.mark.parametrize(
"algo, model",
product(
[Dynp, Binseg, BottomUp, Window, Pelt],
["l1", "l2", "ar", "normal", "rbf", "rank"],
),
)
def test_model_1D_constant(signal_bkps_1D_constant, algo, model):
signal, _ = signal_bkps_1D_constant
algo = algo(model=model)
if isinstance(algo, Dynp) or isinstance(algo, BottomUp) or isinstance(algo, Binseg):
ret = algo.fit_predict(signal=signal, n_bkps=1)
# Even with constant signals, return the specified number of
# change-points.
assert len(ret) == 2
if isinstance(algo, Window):
ret = algo.fit_predict(signal=signal, n_bkps=1)
# With constant signal, this search method returns 0 change-point.
assert len(ret) == 1
if isinstance(algo, Pelt):
ret = algo.fit_predict(signal=signal, pen=1)
# With constant signal, this search method returns 0 change-point.
assert len(ret) == 1
assert ret[-1] == signal.shape[0], "The last change-point is equal to"
" n_samples."
@pytest.mark.parametrize("algo", [Binseg, Window])
def test_costnormal_on_constant_old_behaviour(signal_bkps_1D_constant, algo):
signal, _ = signal_bkps_1D_constant
algo = algo(model="normal", params={"add_small_diag": False})
ret = algo.fit_predict(signal=signal, n_bkps=2)
# With constant signal, this search method returns 0 change-point.
assert len(ret) == 1
# The last change-point is equal to n_samples.
assert ret[-1] == signal.shape[0], "The last change-point is equal to"
" n_samples."
@pytest.mark.parametrize(
"algo, model",
product(
[Binseg, BottomUp, Window],
["l1", "l2", "linear", "normal", "rbf", "rank", "mahalanobis"],
),
)
def test_model_5D(signal_bkps_5D, algo, model):
signal, _ = signal_bkps_5D
algo(model=model).fit_predict(signal, pen=1)
ret = algo(model=model).fit_predict(signal, n_bkps=1)
assert len(ret) == 2
algo(model=model).fit_predict(signal, epsilon=10)
@pytest.mark.parametrize(
"algo, model",
product(
[Dynp, Pelt],
["l1", "l2", "linear", "normal", "rbf", "rank", "mahalanobis"],
),
)
def test_model_5D_bis(signal_bkps_5D, algo, model):
signal, _ = signal_bkps_5D
algo_t = algo(model=model)
ret = algo_t.fit_predict(signal, 1)
if isinstance(algo_t, Dynp):
assert len(ret) == 2
@pytest.mark.parametrize("algo", [Binseg, BottomUp, Window, Dynp, Pelt])
def test_custom_cost(signal_bkps_1D, algo):
signal, _ = signal_bkps_1D
c = CostAR(order=10)
algo_t = algo(custom_cost=c)
ret = algo_t.fit_predict(signal, 1)
if isinstance(algo_t, Pelt):
assert len(ret) >= 2
else:
assert len(ret) == 2
@pytest.mark.parametrize("algo", [Binseg, BottomUp, Window, Dynp, Pelt])
def test_pass_param_to_cost(signal_bkps_1D, algo):
signal, _ = signal_bkps_1D
algo_t = algo(model="ar", params={"order": 10})
ret = algo_t.fit_predict(signal, 1)
if isinstance(algo_t, Pelt):
assert len(ret) >= 2
else:
assert len(ret) == 2
@pytest.mark.parametrize(
"kernel, min_size",
product(["linear"], [2, 5]),
)
def test_kernelcpd_1D_linear(signal_bkps_1D, kernel, min_size):
signal, bkps = signal_bkps_1D
ret = (
KernelCPD(kernel=kernel, min_size=min_size, jump=1)
.fit(signal)
.predict(n_bkps=len(bkps) - 1)
)
assert len(ret) == len(bkps)
@pytest.mark.parametrize(
"kernel, min_size",
product(["linear"], [2, 5]),
)
def test_kernelcpd_5D_linear(signal_bkps_5D, kernel, min_size):
signal, bkps = signal_bkps_5D
ret = (
KernelCPD(kernel=kernel, min_size=min_size, jump=1)
.fit(signal)
.predict(n_bkps=len(bkps) - 1)
)
assert len(ret) == len(bkps)
@pytest.mark.parametrize(
"kernel, min_size",
product(["rbf"], [2, 5]),
)
def test_kernelcpd_1D_rbf(signal_bkps_1D, kernel, min_size):
signal, bkps = signal_bkps_1D
ret = (
KernelCPD(kernel=kernel, min_size=min_size, jump=1, params={"gamma": 1.5})
.fit(signal)
.predict(n_bkps=len(bkps) - 1)
)
assert len(ret) == len(bkps)
@pytest.mark.parametrize(
"kernel, min_size",
product(["rbf"], [2, 5]),
)
def test_kernelcpd_5D_rbf(signal_bkps_5D, kernel, min_size):
signal, bkps = signal_bkps_5D
ret = (
KernelCPD(kernel=kernel, min_size=min_size, jump=1, params={"gamma": 1.5})
.fit(signal)
.predict(n_bkps=len(bkps) - 1)
)
assert len(ret) == len(bkps)
@pytest.mark.parametrize(
"kernel, min_size",
product(["linear"], [2, 5]),
)
def test_kernelcpd_1D_no_noise_linear(signal_bkps_1D_no_noise, kernel, min_size):
signal, bkps = signal_bkps_1D_no_noise
res = (
KernelCPD(kernel=kernel, min_size=min_size, jump=1)
.fit(signal)
.predict(n_bkps=len(bkps) - 1)
)
assert res == bkps
@pytest.mark.parametrize(
"kernel, min_size",
product(["linear"], [2, 5]),
)
def test_kernelcpd_5D_no_noise_linear(signal_bkps_5D_no_noise, kernel, min_size):
signal, bkps = signal_bkps_5D_no_noise
res = (
KernelCPD(kernel=kernel, min_size=min_size, jump=1)
.fit(signal)
.predict(n_bkps=len(bkps) - 1)
)
assert res == bkps
@pytest.mark.parametrize(
"kernel, min_size",
product(["rbf"], [2, 5]),
)
def test_kernelcpd_1D_no_noise_rbf(signal_bkps_1D_no_noise, kernel, min_size):
signal, bkps = signal_bkps_1D_no_noise
res = (
KernelCPD(kernel=kernel, min_size=min_size, jump=1, params={"gamma": 1.5})
.fit(signal)
.predict(n_bkps=len(bkps) - 1)
)
assert res == bkps
@pytest.mark.parametrize(
"kernel, min_size",
product(["rbf"], [2, 5]),
)
def test_kernelcpd_5D_no_noise_rbf(signal_bkps_5D_no_noise, kernel, min_size):
signal, bkps = signal_bkps_5D_no_noise
res = (
KernelCPD(kernel=kernel, min_size=min_size, jump=1, params={"gamma": 1.5})
.fit(signal)
.predict(n_bkps=len(bkps) - 1)
)
assert res == bkps
# Exhaustive test of KernelCPD
@pytest.mark.parametrize("kernel", ["linear", "rbf", "cosine"])
def test_kernelcpd(signal_bkps_5D, kernel):
signal, bkps = signal_bkps_5D
# Test we do not compute if intermediary results exist
algo_temp = KernelCPD(kernel=kernel)
algo_temp.fit(signal).predict(n_bkps=len(bkps) - 1)
algo_temp.predict(n_bkps=1)
# Test penalized version
KernelCPD(kernel=kernel).fit(signal).predict(pen=0.2)
# Test fit_predict
KernelCPD(kernel=kernel).fit_predict(signal, pen=0.2)
@pytest.mark.parametrize("kernel", ["linear", "rbf", "cosine"])
def test_kernelcpd_small_signal(signal_bkps_5D_n10, kernel):
signal, _ = signal_bkps_5D_n10
algo_temp = KernelCPD(kernel=kernel)
with pytest.raises(BadSegmentationParameters):
KernelCPD(kernel=kernel, min_size=10, jump=2).fit_predict(signal, n_bkps=2)
with pytest.raises(AssertionError):
KernelCPD(kernel=kernel, min_size=10, jump=2).fit_predict(signal, n_bkps=0)
with pytest.raises(BadSegmentationParameters):
KernelCPD(kernel=kernel, min_size=10, jump=2).fit_predict(signal, pen=0.2)
assert (
len(KernelCPD(kernel=kernel, min_size=5, jump=2).fit_predict(signal, pen=0.2))
> 0
)
@pytest.mark.parametrize("kernel", ["linear", "rbf", "cosine"])
def test_kernelcpd_small_signal_same_result(signal_bkps_5D_n10, kernel):
signal, _ = signal_bkps_5D_n10
algo = KernelCPD(kernel=kernel)
list_of_segmentations = list()
n_iter = 100
for _ in range(n_iter):
bkps = algo.fit(signal=signal).predict(pen=1.0)
list_of_segmentations.append(bkps)
# test if all segmentations are equal
first_bkps = list_of_segmentations[0]
all_elements_are_equal = all(
first_bkps == other_bkps for other_bkps in list_of_segmentations[1:]
)
err_msg = "KernelCPD returns different segmentations on the same signal."
assert all_elements_are_equal, err_msg
@pytest.mark.parametrize(
"algo, model",
product(
[Binseg, BottomUp, Window],
["l1", "l2", "ar", "normal", "rbf", "rank", "mahalanobis"],
),
)
def test_model_small_signal(signal_bkps_5D_n10, algo, model):
signal, _ = signal_bkps_5D_n10
with pytest.raises(BadSegmentationParameters):
algo(model=model, min_size=5, jump=2).fit_predict(signal, n_bkps=2)
assert (
len(algo(model=model, min_size=5, jump=2).fit_predict(signal, pen=10**6)) > 0
)
assert (
len(algo(model=model, min_size=5, jump=2).fit_predict(signal, epsilon=10)) > 0
)
assert (
len(algo(model=model, min_size=9, jump=2).fit_predict(signal, pen=10**6)) > 0
)
@pytest.mark.parametrize(
"model", ["l1", "l2", "ar", "normal", "rbf", "rank", "mahalanobis"]
)
def test_model_small_signal_dynp(signal_bkps_5D_n10, model):
signal, _ = signal_bkps_5D_n10
with pytest.raises(BadSegmentationParameters):
Dynp(model=model, min_size=5, jump=2).fit_predict(signal, 2)
with pytest.raises(BadSegmentationParameters):
Dynp(model=model, min_size=9, jump=2).fit_predict(signal, 2)
with pytest.raises(BadSegmentationParameters):
Dynp(model=model, min_size=11, jump=2).fit_predict(signal, 2)
@pytest.mark.parametrize(
"model", ["l1", "l2", "ar", "normal", "rbf", "rank", "mahalanobis"]
)
def test_model_small_signal_pelt(signal_bkps_5D_n10, model):
signal, _ = signal_bkps_5D_n10
with pytest.raises(BadSegmentationParameters):
Pelt(model=model, min_size=11, jump=2).fit_predict(signal, 2)
assert len(Pelt(model=model, min_size=10, jump=2).fit_predict(signal, 1.0)) > 0
def test_binseg_deepcopy():
binseg = Binseg()
binseg_copy = deepcopy(binseg)
assert id(binseg.single_bkp) != id(binseg_copy.single_bkp)
| 31.24031
| 88
| 0.669479
| 1,705
| 12,090
| 4.518475
| 0.090909
| 0.093458
| 0.060228
| 0.025312
| 0.807373
| 0.771937
| 0.748702
| 0.740395
| 0.704569
| 0.656931
| 0
| 0.028329
| 0.191232
| 12,090
| 386
| 89
| 31.321244
| 0.759562
| 0.038958
| 0
| 0.599359
| 0
| 0
| 0.06789
| 0
| 0
| 0
| 0
| 0
| 0.102564
| 1
| 0.096154
| false
| 0.003205
| 0.025641
| 0
| 0.141026
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d8b76bc731be69bcc0d64a5c3c587dc9815b187f
| 34
|
py
|
Python
|
perceptual_similarity/__init__.py
|
wilhelmhb/PerceptualSimilarity
|
bdeac239ca24064c0ac2a1c62e8ee539e75a27ff
|
[
"BSD-2-Clause"
] | 1
|
2020-06-16T21:28:13.000Z
|
2020-06-16T21:28:13.000Z
|
perceptual_similarity/__init__.py
|
wilhelmhb/PerceptualSimilarity
|
bdeac239ca24064c0ac2a1c62e8ee539e75a27ff
|
[
"BSD-2-Clause"
] | null | null | null |
perceptual_similarity/__init__.py
|
wilhelmhb/PerceptualSimilarity
|
bdeac239ca24064c0ac2a1c62e8ee539e75a27ff
|
[
"BSD-2-Clause"
] | null | null | null |
from models import PerceptualLoss
| 17
| 33
| 0.882353
| 4
| 34
| 7.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.117647
| 34
| 1
| 34
| 34
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d8cdf91c76ecf7e5ce4575fee510f91dcb0df964
| 50
|
py
|
Python
|
cw02/zad1.py
|
BartoszHolubowicz/projekt-psi
|
e1d753e543ed2676a21ba1d99191e36dbe484ae5
|
[
"bzip2-1.0.6"
] | null | null | null |
cw02/zad1.py
|
BartoszHolubowicz/projekt-psi
|
e1d753e543ed2676a21ba1d99191e36dbe484ae5
|
[
"bzip2-1.0.6"
] | null | null | null |
cw02/zad1.py
|
BartoszHolubowicz/projekt-psi
|
e1d753e543ed2676a21ba1d99191e36dbe484ae5
|
[
"bzip2-1.0.6"
] | null | null | null |
def fun(l1, l2):
return [*l1[::2], *l2[1::2]]
| 16.666667
| 32
| 0.46
| 10
| 50
| 2.3
| 0.7
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.175
| 0.2
| 50
| 2
| 33
| 25
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
2b28eba4063c14d306ae2ae56ca5f5f158f336d0
| 33
|
py
|
Python
|
retro_star/utils/__init__.py
|
cthoyt/retro_star
|
280231eb2f5dffc0e14bed300d770977b323205a
|
[
"MIT"
] | 65
|
2020-06-27T04:28:21.000Z
|
2022-03-30T11:18:22.000Z
|
retro_star/utils/__init__.py
|
cthoyt/retro_star
|
280231eb2f5dffc0e14bed300d770977b323205a
|
[
"MIT"
] | 15
|
2020-07-07T13:17:05.000Z
|
2022-03-22T12:52:29.000Z
|
retro_star/utils/__init__.py
|
cthoyt/retro_star
|
280231eb2f5dffc0e14bed300d770977b323205a
|
[
"MIT"
] | 14
|
2020-06-30T09:22:13.000Z
|
2022-03-30T11:18:28.000Z
|
from .logger import setup_logger
| 16.5
| 32
| 0.848485
| 5
| 33
| 5.4
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.121212
| 33
| 1
| 33
| 33
| 0.931034
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
2b47783d3fea518b7086f2dd472d28ecba960f49
| 153
|
py
|
Python
|
telemetry/telescope_devkit/filesystem.py
|
hmrc/telescope-devkit
|
86bcc7ef808ea42bc29f56bc48b4c5f1f0cb2b73
|
[
"Apache-2.0"
] | 1
|
2022-01-18T09:47:30.000Z
|
2022-01-18T09:47:30.000Z
|
telemetry/telescope_devkit/filesystem.py
|
hmrc/telescope-devkit
|
86bcc7ef808ea42bc29f56bc48b4c5f1f0cb2b73
|
[
"Apache-2.0"
] | 3
|
2021-11-18T13:59:06.000Z
|
2022-03-31T15:59:55.000Z
|
telemetry/telescope_devkit/filesystem.py
|
hmrc/telescope-devkit
|
86bcc7ef808ea42bc29f56bc48b4c5f1f0cb2b73
|
[
"Apache-2.0"
] | 1
|
2021-04-10T23:28:45.000Z
|
2021-04-10T23:28:45.000Z
|
import os
def get_repo_path() -> str:
return os.path.realpath(
os.path.join(os.path.dirname(os.path.realpath(__file__)), "./../../")
)
| 19.125
| 77
| 0.601307
| 21
| 153
| 4.095238
| 0.571429
| 0.27907
| 0.325581
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.189542
| 153
| 7
| 78
| 21.857143
| 0.693548
| 0
| 0
| 0
| 0
| 0
| 0.052288
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| true
| 0
| 0.2
| 0.2
| 0.6
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
|
0
| 6
|
99507b799c6b0c9bdbd9faa162e34c5b1c490ad5
| 117
|
py
|
Python
|
examples/keyboard/drivers/drivers.py
|
Breq16/vibrance_ctrl
|
93247f686b4e5e42d5f36b9d592fe9cd8322069e
|
[
"MIT"
] | null | null | null |
examples/keyboard/drivers/drivers.py
|
Breq16/vibrance_ctrl
|
93247f686b4e5e42d5f36b9d592fe9cd8322069e
|
[
"MIT"
] | null | null | null |
examples/keyboard/drivers/drivers.py
|
Breq16/vibrance_ctrl
|
93247f686b4e5e42d5f36b9d592fe9cd8322069e
|
[
"MIT"
] | null | null | null |
import vibrance.driver.pygame_if
drivers = []
drivers.append(vibrance.driver.pygame_if.PyGameDriver("PyGame Demo"))
| 23.4
| 69
| 0.803419
| 15
| 117
| 6.133333
| 0.6
| 0.304348
| 0.434783
| 0.478261
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.068376
| 117
| 4
| 70
| 29.25
| 0.844037
| 0
| 0
| 0
| 0
| 0
| 0.094017
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
99a97f531695848aa026ba944f3bb991c6aef185
| 20,328
|
py
|
Python
|
tests/unit_tests/api/auth_methods/test_approle.py
|
reimerp/hvac
|
306fe62473664deb076c9e3f1ed54da370d1541e
|
[
"Apache-2.0"
] | 544
|
2018-07-23T08:17:41.000Z
|
2022-03-31T11:53:01.000Z
|
tests/unit_tests/api/auth_methods/test_approle.py
|
reimerp/hvac
|
306fe62473664deb076c9e3f1ed54da370d1541e
|
[
"Apache-2.0"
] | 469
|
2018-07-22T18:50:04.000Z
|
2022-03-30T08:56:06.000Z
|
tests/unit_tests/api/auth_methods/test_approle.py
|
harrinry/hvac
|
bf899c6504704ad6d5f85c1d74361c6f6e1b5dfb
|
[
"Apache-2.0"
] | 236
|
2018-07-24T16:59:47.000Z
|
2022-03-25T14:15:43.000Z
|
from unittest import TestCase
import requests_mock
from parameterized import parameterized
from hvac import exceptions
from hvac.adapters import JSONAdapter
from hvac.api.auth_methods import AppRole
from hvac.constants.approle import DEFAULT_MOUNT_POINT
class TestAppRole(TestCase):
@parameterized.expand(
[
("default mount point", DEFAULT_MOUNT_POINT, "default", None),
("custom mount point", "approle-test", "default", None),
(
"bad token type",
DEFAULT_MOUNT_POINT,
"bad_token",
exceptions.ParamValidationError,
),
]
)
@requests_mock.Mocker()
def test_create_or_update_approle(
self, test_label, mount_point, token_type, raises, requests_mocker
):
expected_status_code = 204
role_name = "testrole"
mock_url = (
"http://localhost:8200/v1/auth/{mount_point}/role/{role_name}".format(
mount_point=mount_point, role_name=role_name
)
)
requests_mocker.register_uri(
method="POST",
url=mock_url,
status_code=expected_status_code,
)
app_role = AppRole(adapter=JSONAdapter())
if raises is not None:
with self.assertRaises(raises) as cm:
app_role.create_or_update_approle(
role_name=role_name,
token_policies=["default"],
token_type=token_type,
mount_point=mount_point,
)
self.assertIn(member="unsupported token_type", container=str(cm.exception))
else:
response = app_role.create_or_update_approle(
role_name=role_name, token_policies=["default"], mount_point=mount_point
)
self.assertEqual(first=expected_status_code, second=response.status_code)
@parameterized.expand(
[
("default mount point", DEFAULT_MOUNT_POINT),
("custom mount point", "approle-test"),
]
)
@requests_mock.Mocker()
def test_list_roles(self, test_label, mount_point, requests_mocker):
expected_status_code = 200
mock_response = {
"auth": None,
"data": {"keys": ["testrole"]},
"lease_duration": 0,
"lease_id": "",
"renewable": False,
"request_id": "860a11a8-b835-cbab-7fce-de4edc4cf533",
"warnings": None,
"wrap_info": None,
}
mock_url = "http://localhost:8200/v1/auth/{mount_point}/role".format(
mount_point=mount_point
)
requests_mocker.register_uri(
method="LIST",
url=mock_url,
status_code=expected_status_code,
json=mock_response,
)
app_role = AppRole(adapter=JSONAdapter())
response = app_role.list_roles(mount_point=mount_point)
self.assertEqual(first=mock_response, second=response)
@parameterized.expand(
[
("default mount point", DEFAULT_MOUNT_POINT),
("custom mount point", "approle-test"),
]
)
@requests_mock.Mocker()
def test_read_role(self, test_label, mount_point, requests_mocker):
expected_status_code = 200
role_name = "testrole"
mock_response = {
"auth": None,
"data": {
"bind_secret_id": True,
"local_secret_ids": False,
"secret_id_bound_cidrs": None,
"secret_id_num_uses": 0,
"secret_id_ttl": 0,
"token_bound_cidrs": None,
"token_explicit_max_ttl": 0,
"token_max_ttl": 0,
"token_no_default_poolicy": False,
"token_num_uses": 0,
"token_period": 14400,
"token_policies": None,
"token_ttl": 0,
"token_type": "default",
},
"lease_duration": 0,
"lease_id": "",
"renewable": False,
"request_id": "860a11a8-b835-cbab-7fce-de4edc4cf533",
"warnings": None,
"wrap_info": None,
}
mock_url = (
"http://localhost:8200/v1/auth/{mount_point}/role/{role_name}".format(
mount_point=mount_point, role_name=role_name
)
)
requests_mocker.register_uri(
method="GET",
url=mock_url,
status_code=expected_status_code,
json=mock_response,
)
app_role = AppRole(adapter=JSONAdapter())
response = app_role.read_role(role_name="testrole", mount_point=mount_point)
self.assertEqual(first=mock_response, second=response)
@parameterized.expand(
[
("default mount point", DEFAULT_MOUNT_POINT),
("custom mount point", "approle-test"),
]
)
@requests_mock.Mocker()
def test_delete_role(self, test_label, mount_point, requests_mocker):
expected_status_code = 204
role_name = "testrole"
mock_url = (
"http://localhost:8200/v1/auth/{mount_point}/role/{role_name}".format(
mount_point=mount_point, role_name=role_name
)
)
requests_mocker.register_uri(
method="DELETE",
url=mock_url,
status_code=expected_status_code,
)
app_role = AppRole(adapter=JSONAdapter())
response = app_role.delete_role(role_name=role_name, mount_point=mount_point)
self.assertEqual(first=expected_status_code, second=response.status_code)
@parameterized.expand(
[
("default mount point", DEFAULT_MOUNT_POINT),
("custom mount point", "approle-test"),
]
)
@requests_mock.Mocker()
def test_read_role_id(self, test_label, mount_point, requests_mocker):
expected_status_code = 200
role_name = "testrole"
mock_response = {
"auth": None,
"data": {"role_id": "e5a7b66e-5d08-da9c-7075-71984634b882"},
"lease_duration": 0,
"lease_id": "",
"renewable": False,
"request_id": "860a11a8-b835-cbab-7fce-de4edc4cf533",
"warnings": None,
"wrap_info": None,
}
mock_url = "http://localhost:8200/v1/auth/{mount_point}/role/{role_name}/role-id".format(
mount_point=mount_point, role_name=role_name
)
requests_mocker.register_uri(
method="GET",
url=mock_url,
status_code=expected_status_code,
json=mock_response,
)
app_role = AppRole(adapter=JSONAdapter())
response = app_role.read_role_id(role_name=role_name, mount_point=mount_point)
self.assertEqual(first=mock_response, second=response)
@parameterized.expand(
[
("default mount point", DEFAULT_MOUNT_POINT),
("custom mount point", "approle-test"),
]
)
@requests_mock.Mocker()
def test_update_role_id(self, test_label, mount_point, requests_mocker):
expected_status_code = 200
role_name = "testrole"
role_id = "test_role_id"
mock_response = {
"auth": None,
"data": {"role_id": role_id},
"lease_duration": 0,
"lease_id": "",
"renewable": False,
"request_id": "860a11a8-b835-cbab-7fce-de4edc4cf533",
"warnings": None,
"wrap_info": None,
}
mock_url = "http://localhost:8200/v1/auth/{mount_point}/role/{role_name}/role-id".format(
mount_point=mount_point, role_name=role_name
)
requests_mocker.register_uri(
method="POST",
url=mock_url,
status_code=expected_status_code,
json=mock_response,
)
app_role = AppRole(adapter=JSONAdapter())
response = app_role.update_role_id(
role_name=role_name, role_id=role_id, mount_point=mount_point
)
self.assertEqual(first=mock_response, second=response)
@parameterized.expand(
[
("default mount point", DEFAULT_MOUNT_POINT, None),
("custom mount point", "approle-test", exceptions.ParamValidationError),
]
)
@requests_mock.Mocker()
def test_generate_secret_id(self, test_label, mount_point, raises, requests_mocker):
expected_status_code = 200
role_name = "testrole"
mock_response = {
"auth": None,
"data": {
"secret_id": "841771dc-11c9-bbc7-bcac-6a3945a69cd9",
"secret_id_accessor": "84896a0c-1347-aa90-a4f6-aca8b7558780",
},
"lease_duration": 0,
"lease_id": "",
"renewable": False,
"request_id": "860a11a8-b835-cbab-7fce-de4edc4cf533",
"warnings": None,
"wrap_info": None,
}
mock_url = "http://localhost:8200/v1/auth/{mount_point}/role/{role_name}/secret-id".format(
mount_point=mount_point, role_name=role_name
)
requests_mocker.register_uri(
method="POST",
url=mock_url,
status_code=expected_status_code,
json=mock_response,
)
app_role = AppRole(adapter=JSONAdapter())
if raises is not None:
with self.assertRaises(raises) as cm:
app_role.generate_secret_id(
role_name=role_name,
metadata="metadata string",
mount_point=mount_point,
)
self.assertIn(
member="unsupported metadata argument", container=str(cm.exception)
)
else:
response = app_role.generate_secret_id(
role_name=role_name, cidr_list=["127.0.0.1/32"], mount_point=mount_point
)
self.assertEqual(first=mock_response, second=response)
@parameterized.expand(
[
("default mount point", DEFAULT_MOUNT_POINT, None),
("custom mount point", "approle-test", exceptions.ParamValidationError),
]
)
@requests_mock.Mocker()
def test_create_custom_secret_id(
self, test_label, mount_point, raises, requests_mocker
):
expected_status_code = 200
role_name = "testrole"
secret_id = "custom_secret"
mock_response = {
"auth": None,
"data": {
"secret_id": secret_id,
"secret_id_accessor": "84896a0c-1347-aa90-a4f6-aca8b7558780",
},
"lease_duration": 0,
"lease_id": "",
"renewable": False,
"request_id": "860a11a8-b835-cbab-7fce-de4edc4cf533",
"warnings": None,
"wrap_info": None,
}
mock_url = "http://localhost:8200/v1/auth/{mount_point}/role/{role_name}/custom-secret-id".format(
mount_point=mount_point, role_name=role_name
)
requests_mocker.register_uri(
method="POST",
url=mock_url,
status_code=expected_status_code,
json=mock_response,
)
app_role = AppRole(adapter=JSONAdapter())
if raises is not None:
with self.assertRaises(raises) as cm:
app_role.create_custom_secret_id(
role_name=role_name,
secret_id=secret_id,
cidr_list=["127.0.0.1/32"],
metadata="metadata string",
mount_point=mount_point,
)
self.assertIn(
member="unsupported metadata argument", container=str(cm.exception)
)
else:
response = app_role.create_custom_secret_id(
role_name=role_name,
secret_id=secret_id,
cidr_list=["127.0.0.1/32"],
mount_point=mount_point,
)
self.assertEqual(first=mock_response, second=response)
@parameterized.expand(
[
("default mount point", DEFAULT_MOUNT_POINT),
("custom mount point", "approle-test"),
]
)
@requests_mock.Mocker()
def test_read_secret_id(self, test_label, mount_point, requests_mocker):
expected_status_code = 200
role_name = "testrole"
secret_id = "custom_secret"
mock_response = {
"auth": None,
"data": {
"secret_id": secret_id,
"secret_id_accessor": "84896a0c-1347-aa90-a4f6-aca8b7558780",
},
"lease_duration": 0,
"lease_id": "",
"renewable": False,
"request_id": "860a11a8-b835-cbab-7fce-de4edc4cf533",
"warnings": None,
"wrap_info": None,
}
mock_url = "http://localhost:8200/v1/auth/{mount_point}/role/{role_name}/secret-id/lookup".format(
mount_point=mount_point, role_name=role_name
)
requests_mocker.register_uri(
method="POST",
url=mock_url,
status_code=expected_status_code,
json=mock_response,
)
app_role = AppRole(adapter=JSONAdapter())
response = app_role.read_secret_id(
role_name=role_name, secret_id=secret_id, mount_point=mount_point
)
self.assertEqual(first=mock_response, second=response)
@parameterized.expand(
[
("default mount point", DEFAULT_MOUNT_POINT),
("custom mount point", "approle-test"),
]
)
@requests_mock.Mocker()
def test_destroy_secret_id(self, test_label, mount_point, requests_mocker):
expected_status_code = 204
role_name = "testrole"
secret_id = "custom_secret"
mock_url = "http://localhost:8200/v1/auth/{mount_point}/role/{role_name}/secret-id/destroy".format(
mount_point=mount_point, role_name=role_name
)
requests_mocker.register_uri(
method="POST",
url=mock_url,
status_code=expected_status_code,
)
app_role = AppRole(adapter=JSONAdapter())
response = app_role.destroy_secret_id(
role_name=role_name, secret_id=secret_id, mount_point=mount_point
)
self.assertEqual(first=expected_status_code, second=response.status_code)
@parameterized.expand(
[
("default mount point", DEFAULT_MOUNT_POINT),
("custom mount point", "approle-test"),
]
)
@requests_mock.Mocker()
def test_list_secret_id_accessors(self, test_label, mount_point, requests_mocker):
expected_status_code = 200
role_name = "testrole"
mock_response = {
"auth": None,
"data": {
"keys": [
"ce102d2a-8253-c437-bf9a-aceed4241491",
"a1c8dee4-b869-e68d-3520-2040c1a0849a",
"be83b7e2-044c-7244-07e1-47560ca1c787",
"84896a0c-1347-aa90-a4f6-aca8b7558780",
"239b1328-6523-15e7-403a-a48038cdc45a",
]
},
"lease_duration": 0,
"lease_id": "",
"renewable": False,
"request_id": "860a11a8-b835-cbab-7fce-de4edc4cf533",
"warnings": None,
"wrap_info": None,
}
mock_url = "http://localhost:8200/v1/auth/{mount_point}/role/{role_name}/secret-id".format(
mount_point=mount_point, role_name=role_name
)
requests_mocker.register_uri(
method="LIST",
url=mock_url,
status_code=expected_status_code,
json=mock_response,
)
app_role = AppRole(adapter=JSONAdapter())
response = app_role.list_secret_id_accessors(
role_name=role_name, mount_point=mount_point
)
self.assertEqual(first=mock_response, second=response)
@parameterized.expand(
[
("default mount point", DEFAULT_MOUNT_POINT),
("custom mount point", "approle-test"),
]
)
@requests_mock.Mocker()
def test_read_secret_id_accessor(self, test_label, mount_point, requests_mocker):
expected_status_code = 200
role_name = "testrole"
secret_id = "custom_secret"
secret_id_accessor = "84896a0c-1347-aa90-a4f6-aca8b7558780"
mock_response = {
"auth": None,
"data": {
"secret_id": secret_id,
"secret_id_accessor": "84896a0c-1347-aa90-a4f6-aca8b7558780",
},
"lease_duration": 0,
"lease_id": "",
"renewable": False,
"request_id": "860a11a8-b835-cbab-7fce-de4edc4cf533",
"warnings": None,
"wrap_info": None,
}
mock_url = "http://localhost:8200/v1/auth/{mount_point}/role/{role_name}/secret-id-accessor/lookup".format(
mount_point=mount_point, role_name=role_name
)
requests_mocker.register_uri(
method="POST",
url=mock_url,
status_code=expected_status_code,
json=mock_response,
)
app_role = AppRole(adapter=JSONAdapter())
response = app_role.read_secret_id_accessor(
role_name=role_name,
secret_id_accessor=secret_id_accessor,
mount_point=mount_point,
)
self.assertEqual(first=mock_response, second=response)
@parameterized.expand(
[
("default mount point", DEFAULT_MOUNT_POINT),
("custom mount point", "approle-test"),
]
)
@requests_mock.Mocker()
def test_destroy_secret_id_accessor(self, test_label, mount_point, requests_mocker):
expected_status_code = 204
role_name = "testrole"
secret_id_accessor = "84896a0c-1347-aa90-a4f6-aca8b7558780"
mock_url = "http://localhost:8200/v1/auth/{mount_point}/role/{role_name}/secret-id-accessor/destroy".format(
mount_point=mount_point, role_name=role_name
)
requests_mocker.register_uri(
method="POST",
url=mock_url,
status_code=expected_status_code,
)
app_role = AppRole(adapter=JSONAdapter())
response = app_role.destroy_secret_id_accessor(
role_name=role_name,
secret_id_accessor=secret_id_accessor,
mount_point=mount_point,
)
self.assertEqual(first=expected_status_code, second=response.status_code)
@parameterized.expand(
[
("default mount point", DEFAULT_MOUNT_POINT),
("custom mount point", "approle-test"),
]
)
@requests_mock.Mocker()
def test_login(self, test_label, mount_point, requests_mocker):
expected_status_code = 200
role_id = "test_role_id"
secret_id = "custom_secret"
mock_response = {
"data": None,
"auth": {
"renewable": True,
"lease_duration": 1200,
"metadata": None,
"token_policies": ["default"],
"accessor": "fd6c9a00-d2dc-3b11-0be5-af7ae0e1d374",
"client_token": "5b1a0318-679c-9c45-e5c6-d1b9a9035d49",
},
"lease_duration": 0,
"lease_id": "",
"renewable": False,
"request_id": "860a11a8-b835-cbab-7fce-de4edc4cf533",
"warnings": None,
"wrap_info": None,
}
mock_url = "http://localhost:8200/v1/auth/{mount_point}/login".format(
mount_point=mount_point,
)
requests_mocker.register_uri(
method="POST",
url=mock_url,
status_code=expected_status_code,
json=mock_response,
)
app_role = AppRole(adapter=JSONAdapter())
response = app_role.login(
role_id=role_id, secret_id=secret_id, mount_point=mount_point
)
self.assertEqual(first=mock_response, second=response)
| 33.88
| 116
| 0.56951
| 2,086
| 20,328
| 5.232982
| 0.083893
| 0.122756
| 0.052767
| 0.056797
| 0.89978
| 0.891993
| 0.880542
| 0.876512
| 0.85993
| 0.8428
| 0
| 0.045991
| 0.325069
| 20,328
| 599
| 117
| 33.936561
| 0.749636
| 0
| 0
| 0.620818
| 0
| 0.009294
| 0.200905
| 0.04757
| 0
| 0
| 0
| 0
| 0.037175
| 1
| 0.026022
| false
| 0
| 0.013011
| 0
| 0.040892
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
41f187a29a6bf2c3e4eacdb4d15521d2e73bc273
| 399
|
py
|
Python
|
geotrek/core/tests/__init__.py
|
jmdecastel/GEOTADMIN
|
15547c0a99ae4c541ca517cdbc2cf17ab5c96f87
|
[
"BSD-2-Clause"
] | null | null | null |
geotrek/core/tests/__init__.py
|
jmdecastel/GEOTADMIN
|
15547c0a99ae4c541ca517cdbc2cf17ab5c96f87
|
[
"BSD-2-Clause"
] | null | null | null |
geotrek/core/tests/__init__.py
|
jmdecastel/GEOTADMIN
|
15547c0a99ae4c541ca517cdbc2cf17ab5c96f87
|
[
"BSD-2-Clause"
] | null | null | null |
from .test_triggers import * # NOQA
from .test_views import * # NOQA
from .test_factories import * # NOQA
from .test_path_filter import * # NOQA
from .test_topology import * # NOQA
from .test_path_split import * # NOQA
from .test_filters import * # NOQA
from .test_graph import * # NOQA
from .test_forms import * # NOQA
from .test_fields import * # NOQA
from .test_models import * # NOQA
| 33.25
| 39
| 0.724311
| 57
| 399
| 4.842105
| 0.280702
| 0.318841
| 0.507246
| 0.652174
| 0.15942
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.192982
| 399
| 11
| 40
| 36.272727
| 0.857143
| 0.135338
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
41f78e3197b2675cacb8ee4688c1daf1da7756c4
| 28
|
py
|
Python
|
Section 6/6.5 Start/main.py
|
chenjianAgain/Full-Stack-Web-Development-with-Flask
|
e110b0b649c19a9fd11ae70311cbf03727640921
|
[
"MIT"
] | 14
|
2019-10-24T16:14:29.000Z
|
2021-12-29T16:44:21.000Z
|
Section 6/6.5 Start/main.py
|
chenjianAgain/Full-Stack-Web-Development-with-Flask
|
e110b0b649c19a9fd11ae70311cbf03727640921
|
[
"MIT"
] | 6
|
2019-10-03T08:03:00.000Z
|
2021-08-14T12:36:11.000Z
|
Section 6/6.5 Start/main.py
|
chenjianAgain/Full-Stack-Web-Development-with-Flask
|
e110b0b649c19a9fd11ae70311cbf03727640921
|
[
"MIT"
] | 29
|
2019-03-31T00:19:44.000Z
|
2022-02-07T11:12:24.000Z
|
from application import app
| 14
| 27
| 0.857143
| 4
| 28
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 28
| 2
| 27
| 14
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
5126109ebb18c6329572ec7d7ade0aa493ab7e28
| 213
|
py
|
Python
|
tests/conftest.py
|
iamareebjamal/flask-combo-jsonapi
|
35a5e35de8c94fd5d9d24492c8b00f6cb2ea5e4a
|
[
"MIT"
] | 15
|
2020-05-02T05:05:03.000Z
|
2022-01-10T18:32:23.000Z
|
tests/conftest.py
|
iamareebjamal/flask-combo-jsonapi
|
35a5e35de8c94fd5d9d24492c8b00f6cb2ea5e4a
|
[
"MIT"
] | 48
|
2020-04-21T17:32:58.000Z
|
2022-03-02T20:55:19.000Z
|
tests/conftest.py
|
iamareebjamal/flask-combo-jsonapi
|
35a5e35de8c94fd5d9d24492c8b00f6cb2ea5e4a
|
[
"MIT"
] | 14
|
2020-05-17T11:08:14.000Z
|
2021-12-16T09:28:32.000Z
|
import pytest
from flask import Flask
@pytest.fixture(scope="session")
def app():
app = Flask(__name__)
return app
@pytest.yield_fixture(scope="session")
def client(app):
return app.test_client()
| 14.2
| 38
| 0.70892
| 29
| 213
| 5
| 0.482759
| 0.165517
| 0.262069
| 0.303448
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.169014
| 213
| 14
| 39
| 15.214286
| 0.819209
| 0
| 0
| 0
| 0
| 0
| 0.065728
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.222222
| false
| 0
| 0.222222
| 0.111111
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
512e7af1fc1797b067b5be9983ccdd0ed9664b40
| 47
|
py
|
Python
|
jupyterlab_gitsync/jupyterlab_gitsync/handlers.py
|
angelakuo/jupyter-extensions
|
06efeb0240e7bbba59b8dc7b4941ab4301737471
|
[
"Apache-2.0"
] | 39
|
2020-06-04T14:42:21.000Z
|
2021-11-16T10:03:02.000Z
|
jupyterlab_gitsync/jupyterlab_gitsync/handlers.py
|
angelakuo/jupyter-extensions
|
06efeb0240e7bbba59b8dc7b4941ab4301737471
|
[
"Apache-2.0"
] | 161
|
2020-05-30T01:13:05.000Z
|
2022-02-06T10:04:14.000Z
|
jupyterlab_gitsync/jupyterlab_gitsync/handlers.py
|
mwiewior/jupyter-extensions
|
b928265ee22246ac2761a5439b8363e98ec735e6
|
[
"Apache-2.0"
] | 28
|
2020-06-05T20:38:09.000Z
|
2021-11-25T20:03:40.000Z
|
from jupyterlab_gitsync.git_handlers import *
| 23.5
| 46
| 0.851064
| 6
| 47
| 6.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.106383
| 47
| 1
| 47
| 47
| 0.904762
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
5a9cb4c86ad9db08bbbc6d5a2743d984f45c2e58
| 24
|
py
|
Python
|
apps/polls/views/__init__.py
|
Eduardo-RFarias/Django-docs-tutorial
|
b4af94baf946b3b6b823b1f971cb905c061c03ec
|
[
"MIT"
] | null | null | null |
apps/polls/views/__init__.py
|
Eduardo-RFarias/Django-docs-tutorial
|
b4af94baf946b3b6b823b1f971cb905c061c03ec
|
[
"MIT"
] | null | null | null |
apps/polls/views/__init__.py
|
Eduardo-RFarias/Django-docs-tutorial
|
b4af94baf946b3b6b823b1f971cb905c061c03ec
|
[
"MIT"
] | null | null | null |
from .PoolCRUD import *
| 12
| 23
| 0.75
| 3
| 24
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 24
| 1
| 24
| 24
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
5ac2978767e59d2c9ef1a377dbbc09eccbe8d125
| 226
|
py
|
Python
|
django_mri/models/choices/__init__.py
|
GalBenZvi/django_mri
|
558e9187a6ba18b7ba69aced919a6f94d066a2fc
|
[
"Apache-2.0"
] | 4
|
2020-07-27T20:33:54.000Z
|
2022-01-11T20:24:03.000Z
|
django_mri/models/choices/__init__.py
|
GalBenZvi/django_mri
|
558e9187a6ba18b7ba69aced919a6f94d066a2fc
|
[
"Apache-2.0"
] | 107
|
2019-09-04T11:38:46.000Z
|
2022-03-04T13:59:51.000Z
|
django_mri/models/choices/__init__.py
|
GalBenZvi/django_mri
|
558e9187a6ba18b7ba69aced919a6f94d066a2fc
|
[
"Apache-2.0"
] | 2
|
2020-05-24T06:35:33.000Z
|
2020-06-14T13:15:32.000Z
|
"""
Choice ENUMs for easier maintenance of CharField's choice parameters.
"""
from django_mri.models.choices.scanning_sequence import ScanningSequence
from django_mri.models.choices.sequence_variant import SequenceVariant
| 25.111111
| 72
| 0.836283
| 28
| 226
| 6.607143
| 0.714286
| 0.108108
| 0.140541
| 0.205405
| 0.281081
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.10177
| 226
| 8
| 73
| 28.25
| 0.91133
| 0.30531
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
5ad1d1d129ee697a259d4b093b71075cc3d15fc8
| 157
|
py
|
Python
|
Controller/ZO/__init__.py
|
colindomoney-hardware/ZeroOne
|
3e02f7b7ba1957ab4c35abeba228e40a8a06e810
|
[
"MIT"
] | null | null | null |
Controller/ZO/__init__.py
|
colindomoney-hardware/ZeroOne
|
3e02f7b7ba1957ab4c35abeba228e40a8a06e810
|
[
"MIT"
] | 4
|
2021-06-08T19:58:45.000Z
|
2022-03-08T21:09:20.000Z
|
Controller/ZO/__init__.py
|
colindomoney/ZeroOne
|
3e02f7b7ba1957ab4c35abeba228e40a8a06e810
|
[
"MIT"
] | null | null | null |
print('__init__ in ZO')
# TODO : These * imports are going to clutter the root namespace ...
from ZO.zero_one import *
from ZO.Image import *
import ZO.ui
| 19.625
| 68
| 0.719745
| 26
| 157
| 4.153846
| 0.769231
| 0.111111
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.184713
| 157
| 7
| 69
| 22.428571
| 0.84375
| 0.420382
| 0
| 0
| 0
| 0
| 0.159091
| 0
| 0
| 0
| 0
| 0.142857
| 0
| 1
| 0
| true
| 0
| 0.75
| 0
| 0.75
| 0.25
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
852a00ffb3cf9a75fd2755fbf46022d7ef06a0e9
| 4,905
|
py
|
Python
|
functions/basic_functions.py
|
raghuvarapadma/streaming_movies_shows
|
6e1717c1042b07be95c61f955245564c2d781386
|
[
"MIT"
] | null | null | null |
functions/basic_functions.py
|
raghuvarapadma/streaming_movies_shows
|
6e1717c1042b07be95c61f955245564c2d781386
|
[
"MIT"
] | null | null | null |
functions/basic_functions.py
|
raghuvarapadma/streaming_movies_shows
|
6e1717c1042b07be95c61f955245564c2d781386
|
[
"MIT"
] | null | null | null |
# adds in Platform column into datatable which represents on what platforms the show or movie is availble on
def plat(row):
string = ''
if row['Netflix'] == 1:
if string == '':
string+='Netflix'
else:
string+=',Netflix'
if row['Hulu'] == 1:
if string == '':
string+='Hulu'
else:
string+=',Hulu'
if row['Prime Video'] == 1:
if string == '':
string+='Prime Video'
else:
string+=',Prime Video'
if row['Disney+'] == 1:
if string == '':
string+='Disney+'
else:
string+=',Disney+'
return string
def taking_input_movies(user_arr,data):
while True:
try:
param = input('Which parameter would you like to search by? Search by Title, Year, Age, Rating, Directors, Genres, Country, Language, Runtime, Platform. ')
except TypeError:
print('Please input a valid answer!')
continue
else:
if (param in data.columns) or (param == 'Rating'):
user_arr.append(param)
while True:
try:
repeat = input('Would you like to search for another parameter? ')
except TypeError:
print('Please input an integer!')
continue
else:
if repeat.lower() == 'yes':
return taking_input_movies(user_arr,data)
break
elif repeat.lower() == 'no':
return user_arr
break
else:
print('Please enter a valid input!')
continue
else:
print('Please enter a valid input!')
continue
return user_arr
def taking_input_shows(user_arr,data):
while True:
try:
param = input('Which parameter would you like to search by? Search by Title, Year, Age, Rating, Platform. ')
except TypeError:
print('Please input a valid answer!')
continue
else:
if (param in data.columns) or (param == 'Rating'):
user_arr.append(param)
while True:
try:
repeat = input('Would you like to search for another parameter? ')
except TypeError:
print('Please input an integer!')
continue
else:
if repeat.lower() == 'yes':
return taking_input_shows(user_arr,data)
break
elif repeat.lower() == 'no':
return user_arr
break
else:
print('Please enter a valid input!')
continue
else:
print('Please enter a valid input!')
continue
return user_arr
def sorting_data_movies(data):
while True:
try:
param = input('Which parameter would you like to sort the data by? Search by Title, Year, Age, IMDb, Rotten Tomatoes, Directors, Genres, Country, Language, Runtime, Platform. ')
except TypeError:
print('Please input a valid answer!')
continue
else:
if param in data.columns:
return param
break
else:
print('Please input a valid answer!')
continue
def sorting_data_shows(data):
while True:
try:
param = input('Which parameter would you like to sort the data by? Search by Title, Year, Age, IMDb, Rotten Tomatoes, Platform. ')
except TypeError:
print('Please input a valid answer!')
continue
else:
if param in data.columns:
return param
break
else:
print('Please input a valid answer!')
continue
def data_to_csv(data):
while True:
try:
copy = input('Do you want to copy to a .csv file? ')
except:
print('Please enter a valid input!')
else:
if copy.lower() == 'yes':
print('Here is your .csv file!')
data.to_csv('Movies and Streaming.csv')
break
elif copy.lower() == 'no':
print('You did not want to convert to a .csv file!')
break
else:
print('Please enter a valid input!')
continue
| 36.604478
| 189
| 0.459939
| 480
| 4,905
| 4.647917
| 0.197917
| 0.069027
| 0.057373
| 0.037651
| 0.755267
| 0.755267
| 0.723442
| 0.723442
| 0.723442
| 0.70372
| 0
| 0.001508
| 0.459123
| 4,905
| 134
| 190
| 36.604478
| 0.839427
| 0.021611
| 0
| 0.742188
| 0
| 0.03125
| 0.254273
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.046875
| false
| 0
| 0
| 0
| 0.117188
| 0.125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
518dfd66ec08f00dc11db6ca2c41b813e1e16729
| 157
|
py
|
Python
|
tests/basics/string_strip.py
|
mgglcode/micro-python
|
8c1bec4ae75d4d042a6fdeda00f7e1e495ed14e9
|
[
"MIT"
] | 1
|
2015-06-15T11:52:01.000Z
|
2015-06-15T11:52:01.000Z
|
tests/basics/string_strip.py
|
mgglcode/micro-python
|
8c1bec4ae75d4d042a6fdeda00f7e1e495ed14e9
|
[
"MIT"
] | null | null | null |
tests/basics/string_strip.py
|
mgglcode/micro-python
|
8c1bec4ae75d4d042a6fdeda00f7e1e495ed14e9
|
[
"MIT"
] | null | null | null |
print("".strip())
print(" \t\n\r\v\f".strip())
print(" T E S T".strip())
print("abcabc".strip("ce"))
print("aaa".strip("b"))
print("abc efg ".strip("g a"))
| 22.428571
| 31
| 0.566879
| 29
| 157
| 3.068966
| 0.586207
| 0.337079
| 0.247191
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.095541
| 157
| 6
| 32
| 26.166667
| 0.626761
| 0
| 0
| 0
| 0
| 0
| 0.273885
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
51f5c5835b32785597a5dc44ea3324c4980442ab
| 82
|
py
|
Python
|
kaolin/io/__init__.py
|
T0mt0mp/kaolin
|
57d1e1478eec8df49dc7cc492f25637cec40399f
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2021-04-02T17:48:05.000Z
|
2021-04-02T17:48:05.000Z
|
kaolin/io/__init__.py
|
T0mt0mp/kaolin
|
57d1e1478eec8df49dc7cc492f25637cec40399f
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
kaolin/io/__init__.py
|
T0mt0mp/kaolin
|
57d1e1478eec8df49dc7cc492f25637cec40399f
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
from . import dataset
from . import materials
from . import obj
from . import usd
| 16.4
| 23
| 0.756098
| 12
| 82
| 5.166667
| 0.5
| 0.645161
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.195122
| 82
| 4
| 24
| 20.5
| 0.939394
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a40045ac6795db5522019a74d5443924c91d5a2f
| 95
|
py
|
Python
|
app/utils/states.py
|
edementyev/aiogram-chatbot-template
|
a3c0b3b7c2f7f57e5c8ffee8e48e10aa6bf89101
|
[
"MIT"
] | 1
|
2020-10-07T12:09:21.000Z
|
2020-10-07T12:09:21.000Z
|
app/utils/states.py
|
edementyev/aiogram-chatbot-template
|
a3c0b3b7c2f7f57e5c8ffee8e48e10aa6bf89101
|
[
"MIT"
] | null | null | null |
app/utils/states.py
|
edementyev/aiogram-chatbot-template
|
a3c0b3b7c2f7f57e5c8ffee8e48e10aa6bf89101
|
[
"MIT"
] | null | null | null |
from aiogram.dispatcher.filters.state import StatesGroup
class States(StatesGroup):
pass
| 15.833333
| 56
| 0.8
| 11
| 95
| 6.909091
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.136842
| 95
| 5
| 57
| 19
| 0.926829
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
a4029434386a37498c80316b3b152a6337f4aa0f
| 42
|
py
|
Python
|
recipes/psutil/run_test.py
|
pelson/RaspberryPi-conda
|
757a94dc17d76fbe9203dcec45172e19b2c84e22
|
[
"BSD-3-Clause"
] | null | null | null |
recipes/psutil/run_test.py
|
pelson/RaspberryPi-conda
|
757a94dc17d76fbe9203dcec45172e19b2c84e22
|
[
"BSD-3-Clause"
] | null | null | null |
recipes/psutil/run_test.py
|
pelson/RaspberryPi-conda
|
757a94dc17d76fbe9203dcec45172e19b2c84e22
|
[
"BSD-3-Clause"
] | null | null | null |
import psutil
print(psutil.cpu_times())
| 8.4
| 25
| 0.761905
| 6
| 42
| 5.166667
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.119048
| 42
| 4
| 26
| 10.5
| 0.837838
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.5
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 6
|
32076fb3c140795781d160cffa965995ee916664
| 13,867
|
py
|
Python
|
models/decoder.py
|
okwrtdsh/3D-ResNets-PyTorch
|
f36a32ea8b283524d1d102937c49689b1f475b5f
|
[
"MIT"
] | null | null | null |
models/decoder.py
|
okwrtdsh/3D-ResNets-PyTorch
|
f36a32ea8b283524d1d102937c49689b1f475b5f
|
[
"MIT"
] | null | null | null |
models/decoder.py
|
okwrtdsh/3D-ResNets-PyTorch
|
f36a32ea8b283524d1d102937c49689b1f475b5f
|
[
"MIT"
] | null | null | null |
# import torch
import math
import torch.nn as nn
import torch.nn.functional as F
# from torch.autograd import Variable
# from functools import partial
from .binarized_modules import Exposuref
from .pattern_conv_modules import PatternConv
__all__ = [
'STSRResNetExp'
]
##############################################################################
import torch
from torch import nn
def pixel_shuffle(input, upscale_factor):
r"""Rearranges elements in a Tensor of shape :math:`(N, C, d_{1}, d_{2}, ..., d_{n})` to a
tensor of shape :math:`(N, C/(r^n), d_{1}*r, d_{2}*r, ..., d_{n}*r)`.
Where :math:`n` is the dimensionality of the data.
See :class:`~torch.nn.PixelShuffle` for details.
Args:
input (Variable): Input
upscale_factor (int): factor to increase spatial resolution by
Examples::
# 1D example
>>> input = torch.Tensor(1, 4, 8)
>>> output = F.pixel_shuffle(input, 2)
>>> print(output.size())
torch.Size([1, 2, 16])
# 2D example
>>> input = torch.Tensor(1, 9, 8, 8)
>>> output = F.pixel_shuffle(input, 3)
>>> print(output.size())
torch.Size([1, 1, 24, 24])
# 3D example
>>> input = torch.Tensor(1, 8, 16, 16, 16)
>>> output = F.pixel_shuffle(input, 2)
>>> print(output.size())
torch.Size([1, 1, 32, 32, 32])
"""
input_size = list(input.size())
dimensionality = len(input_size) - 2
input_size[1] //= (upscale_factor ** dimensionality)
output_size = [dim * upscale_factor for dim in input_size[2:]]
input_view = input.contiguous().view(
input_size[0], input_size[1],
*(([upscale_factor] * dimensionality) + input_size[2:])
)
indicies = list(range(2, 2 + 2 * dimensionality))
indicies = indicies[1::2] + indicies[0::2]
shuffle_out = input_view.permute(0, 1, *(indicies[::-1])).contiguous()
return shuffle_out.view(input_size[0], input_size[1], *output_size)
class PixelShuffle(nn.Module):
r"""Rearranges elements in a Tensor of shape :math:`(N, C, d_{1}, d_{2}, ..., d_{n})` to a
tensor of shape :math:`(N, C/(r^n), d_{1}*r, d_{2}*r, ..., d_{n}*r)`.
Where :math:`n` is the dimensionality of the data.
This is useful for implementing efficient sub-pixel convolution
with a stride of :math:`1/r`.
Input Tensor must have at least 3 dimensions, e.g. :math:`(N, C, d_{1})` for 1D data,
but Tensors with any number of dimensions after :math:`(N, C, ...)` (where N is mini-batch size,
and C is channels) are supported.
Look at the paper:
`Real-Time Single Image and Video Super-Resolution Using an Efficient Sub-Pixel Convolutional Neural Network`_
by Shi et. al (2016) for more details
Args:
upscale_factor (int): factor to increase spatial resolution by
Shape:
- Input: :math:`(N, C, d_{1}, d_{2}, ..., d_{n})`
- Output: :math:`(N, C/(r^n), d_{1}*r, d_{2}*r, ..., d_{n}*r)`
Where :math:`n` is the dimensionality of the data, e.g. :math:`n-1` for 1D audio,
:math:`n=2` for 2D images, etc.
Examples::
# 1D example
>>> ps = nn.PixelShuffle(2)
>>> input = torch.Tensor(1, 4, 8)
>>> output = ps(input)
>>> print(output.size())
torch.Size([1, 2, 16])
# 2D example
>>> ps = nn.PixelShuffle(3)
>>> input = torch.Tensor(1, 9, 8, 8)
>>> output = ps(input)
>>> print(output.size())
torch.Size([1, 1, 24, 24])
# 3D example
>>> ps = nn.PixelShuffle(2)
>>> input = torch.Tensor(1, 8, 16, 16, 16)
>>> output = ps(input)
>>> print(output.size())
torch.Size([1, 1, 32, 32, 32])
.. _Real-Time Single Image and Video Super-Resolution Using an Efficient Sub-Pixel Convolutional Neural Network:
https://arxiv.org/abs/1609.05158
"""
def __init__(self, upscale_factor):
super(PixelShuffle, self).__init__()
self.upscale_factor = upscale_factor
def forward(self, input):
return pixel_shuffle(input, self.upscale_factor)
def extra_repr(self):
return 'upscale_factor={}'.format(self.upscale_factor)
##############################################################################
class ResidualBlock(nn.Module):
def __init__(self, n_channels=64):
super().__init__()
self.conv1 = nn.Conv2d(in_channels=n_channels, out_channels=n_channels, kernel_size=3, stride=1, padding=1, bias=False)
self.in1 = nn.InstanceNorm2d(n_channels, affine=True)
self.relu = nn.LeakyReLU(0.2, inplace=True)
self.conv2 = nn.Conv2d(in_channels=n_channels, out_channels=n_channels, kernel_size=3, stride=1, padding=1, bias=False)
self.in2 = nn.InstanceNorm2d(n_channels, affine=True)
def forward(self, x):
identity_data = x
output = self.relu(self.in1(self.conv1(x)))
output = self.in2(self.conv2(output))
output = torch.add(output,identity_data)
return output
class ResidualBlock3D(nn.Module):
def __init__(self, n_channels=64):
super().__init__()
self.conv1 = nn.Conv3d(in_channels=n_channels, out_channels=n_channels, kernel_size=3, stride=1, padding=1, bias=False)
self.in1 = nn.InstanceNorm3d(n_channels, affine=True)
self.relu = nn.LeakyReLU(0.2, inplace=True)
self.conv2 = nn.Conv3d(in_channels=n_channels, out_channels=n_channels, kernel_size=3, stride=1, padding=1, bias=False)
self.in2 = nn.InstanceNorm3d(n_channels, affine=True)
def forward(self, x):
identity_data = x
output = self.relu(self.in1(self.conv1(x)))
output = self.in2(self.conv2(output))
output = torch.add(output,identity_data)
return output
##############################################################################
class STSRResNetExp(nn.Module):
"""
Spatio-temporal super-resolution ResNet for a pixel-wise coded exposure image
"""
def __init__(self,
sample_size,
sample_duration,
n_classes=101,
upscale=2,
n_features_base=256,
n_features_up=16,
n_features_clf=1024
):
super().__init__()
assert sample_size == 112
assert upscale == 2
self.activation = F.sigmoid
self.n_classes = n_classes
self.upscale = upscale
self.duration = sample_duration
self.n_features_base = n_features_base
self.n_features_up = n_features_up
self.n_features_clf = n_features_clf
n_batchs = 1193
self.exp = Exposuref(t=sample_duration, c=1, s=8, block=sample_size//upscale//8, noise_count=0, pass_count=n_batchs*3)
self.conv_input = nn.Conv2d(in_channels=1, out_channels=self.n_features_base, kernel_size=9, stride=1, padding=4, bias=False)
self.relu = nn.LeakyReLU(0.2, inplace=True)
self.residual = self.make_layer(ResidualBlock, 16, self.n_features_base)
self.conv_mid = nn.Conv2d(in_channels=self.n_features_base, out_channels=self.n_features_base, kernel_size=3, stride=1, padding=1, bias=False)
self.bn_mid = nn.InstanceNorm2d(self.n_features_base, affine=True)
self.upscale2x = nn.Sequential(
nn.Conv3d(in_channels=self.n_features_base//(self.duration//self.upscale), out_channels=self.n_features_up*self.upscale**3, kernel_size=3, stride=1, padding=1, bias=False),
PixelShuffle(2),
nn.LeakyReLU(0.2, inplace=True),
)
self.residual2 = self.make_layer(ResidualBlock3D, 3, self.n_features_up)
self.conv_output = nn.Conv3d(in_channels=self.n_features_up, out_channels=1, kernel_size=9, stride=1, padding=4, bias=False)
self.conv_clf = nn.Conv2d(in_channels=self.n_features_base, out_channels=self.n_features_clf, kernel_size=1, stride=1, padding=0, bias=False)
self.bn_clf = nn.InstanceNorm2d(self.n_features_clf, affine=True)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(self.n_features_clf, self.n_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.Conv3d):
n = m.kernel_size[0] * m.kernel_size[1] * m.kernel_size[2] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
def make_layer(self, block, num_of_layer, *args):
layers = []
for _ in range(num_of_layer):
layers.append(block(*args))
return nn.Sequential(*layers)
def forward(self, x):
x = self.exp(x)
out = self.relu(self.conv_input(x))
residual = out
out = self.residual(out)
clf_out = torch.flatten(self.avgpool(self.conv_clf(out)), 1)
clf_out = F.dropout2d(clf_out, training=self.training)
clf_out = self.fc(clf_out)
out = self.bn_mid(self.conv_mid(out))
out = torch.add(out,residual)
shape = out.shape
out = out.view(shape[0], shape[1]//(self.duration//self.upscale), self.duration//self.upscale, *shape[2:])
out = self.upscale2x(out)
out = self.residual2(out)
out = self.conv_output(out)
return self.activation(out), F.log_softmax(clf_out, dim=1)
class SVSTSRResNetExp(STSRResNetExp):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.conv_input = PatternConv(in_channels=1, out_channels=self.n_features_base, kernel_size=9, stride=1, padding=4, bias=True)
class TSRResNetExp(nn.Module):
"""
Temporal super-resolution ResNet for a pixel-wise coded exposure image
"""
def __init__(self,
sample_size,
sample_duration,
n_classes=101,
upscale=1,
n_features_base=128,
n_features_up=16,
n_features_clf=1024
):
super().__init__()
assert sample_size == 112
assert upscale == 1
self.activation = F.sigmoid
self.n_classes = n_classes
self.upscale = upscale
self.duration = sample_duration
self.n_features_base = n_features_base
self.n_features_up = n_features_up
self.n_features_clf = n_features_clf
self.n_batchs = 1193
self.exp = Exposuref(t=sample_duration, c=1, s=8, block=sample_size//upscale//8, noise_count=0, pass_count=self.n_batchs*3)
self.conv_input = nn.Conv2d(in_channels=1, out_channels=self.n_features_base, kernel_size=9, stride=1, padding=4, bias=False)
self.relu = nn.LeakyReLU(0.2, inplace=True)
self.residual = self.make_layer(ResidualBlock, 16, self.n_features_base)
self.conv_mid = nn.Conv2d(in_channels=self.n_features_base, out_channels=self.n_features_base, kernel_size=3, stride=1, padding=1, bias=False)
self.bn_mid = nn.InstanceNorm2d(self.n_features_base, affine=True)
self.upscale1x = nn.Sequential(
nn.Conv3d(in_channels=self.n_features_base//(self.duration//self.upscale), out_channels=self.n_features_up*self.upscale**3, kernel_size=3, stride=1, padding=1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
)
self.residual2 = self.make_layer(ResidualBlock3D, 3, self.n_features_up)
self.conv_output = nn.Conv3d(in_channels=self.n_features_up, out_channels=1, kernel_size=9, stride=1, padding=4, bias=False)
self.conv_clf = nn.Conv2d(in_channels=self.n_features_base, out_channels=self.n_features_clf, kernel_size=1, stride=1, padding=0, bias=False)
self.bn_clf = nn.InstanceNorm2d(self.n_features_clf, affine=True)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(self.n_features_clf, self.n_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.Conv3d):
n = m.kernel_size[0] * m.kernel_size[1] * m.kernel_size[2] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
def make_layer(self, block, num_of_layer, *args):
layers = []
for _ in range(num_of_layer):
layers.append(block(*args))
return nn.Sequential(*layers)
def forward(self, x):
x = self.exp(x)
out = self.relu(self.conv_input(x))
residual = out
out = self.residual(out)
clf_out = torch.flatten(self.avgpool(self.conv_clf(out)), 1)
clf_out = F.dropout2d(clf_out, training=self.training)
clf_out = self.fc(clf_out)
out = self.bn_mid(self.conv_mid(out))
out = torch.add(out,residual)
shape = out.shape
out = out.view(shape[0], shape[1]//(self.duration//self.upscale), self.duration//self.upscale, *shape[2:])
out = self.upscale1x(out)
out = self.residual2(out)
out = self.conv_output(out)
return self.activation(out), F.log_softmax(clf_out, dim=1)
class SVTSRResNetExp(TSRResNetExp):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.conv_input = PatternConv(in_channels=1, out_channels=self.n_features_base, kernel_size=9, stride=1, padding=4, bias=True)
| 43.065217
| 184
| 0.612822
| 1,936
| 13,867
| 4.193698
| 0.119318
| 0.050992
| 0.05444
| 0.037689
| 0.825225
| 0.822885
| 0.805887
| 0.797512
| 0.791969
| 0.764873
| 0
| 0.033153
| 0.24086
| 13,867
| 321
| 185
| 43.199377
| 0.738102
| 0.201053
| 0
| 0.692308
| 0
| 0
| 0.002843
| 0
| 0
| 0
| 0
| 0
| 0.019231
| 1
| 0.076923
| false
| 0.009615
| 0.033654
| 0.009615
| 0.1875
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
5c7d618cdbf581afd8b16400b6fda4e1c7ed5436
| 441
|
py
|
Python
|
dashboard/lib/flanker/mime/message/headers/__init__.py
|
robertsimmons514/isthislegit
|
aa8f2b6cb2ac3de2b0fe03bb93dbceccc4c1f495
|
[
"BSD-3-Clause"
] | 929
|
2015-01-01T11:14:21.000Z
|
2022-03-28T23:47:40.000Z
|
dashboard/lib/flanker/mime/message/headers/__init__.py
|
robertsimmons514/isthislegit
|
aa8f2b6cb2ac3de2b0fe03bb93dbceccc4c1f495
|
[
"BSD-3-Clause"
] | 141
|
2015-01-10T19:02:03.000Z
|
2021-07-26T18:04:14.000Z
|
dashboard/lib/flanker/mime/message/headers/__init__.py
|
robertsimmons514/isthislegit
|
aa8f2b6cb2ac3de2b0fe03bb93dbceccc4c1f495
|
[
"BSD-3-Clause"
] | 179
|
2015-01-01T18:42:46.000Z
|
2022-02-16T21:57:14.000Z
|
from flanker.mime.message.headers.headers import MimeHeaders
from flanker.mime.message.headers.encodedword import mime_to_unicode
from flanker.mime.message.headers.parsing import normalize, is_empty, parse_header_value
from flanker.mime.message.headers.encoding import to_mime
from flanker.mime.message.headers.parametrized import is_parametrized
from flanker.mime.message.headers.wrappers import WithParams, ContentType, MessageId, Subject
| 63
| 93
| 0.868481
| 60
| 441
| 6.266667
| 0.4
| 0.175532
| 0.239362
| 0.351064
| 0.462766
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.06576
| 441
| 6
| 94
| 73.5
| 0.912621
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
5c8e9ce7437351d7d69328dc8914a2ec60532ac5
| 199
|
py
|
Python
|
projects/admin.py
|
stafazzoli/django_issuetracker
|
fd12605d53cfff9619abcb35e7e011d9d9c1da33
|
[
"MIT"
] | 1
|
2020-08-13T13:20:55.000Z
|
2020-08-13T13:20:55.000Z
|
projects/admin.py
|
stafazzoli/django_issuetracker
|
fd12605d53cfff9619abcb35e7e011d9d9c1da33
|
[
"MIT"
] | 2
|
2021-06-10T17:44:39.000Z
|
2022-02-10T10:49:21.000Z
|
projects/admin.py
|
stafazzoli/django_issuetracker
|
fd12605d53cfff9619abcb35e7e011d9d9c1da33
|
[
"MIT"
] | 2
|
2020-06-18T00:09:39.000Z
|
2020-07-03T19:18:14.000Z
|
from django.contrib import admin
from . import models
# Register your models here.
admin.site.register(models.ProjectCategory)
admin.site.register(models.Project)
admin.site.register(models.Issue)
| 22.111111
| 43
| 0.81407
| 27
| 199
| 6
| 0.481481
| 0.166667
| 0.314815
| 0.425926
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090452
| 199
| 8
| 44
| 24.875
| 0.895028
| 0.130653
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.4
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
7a41146d9d9f5b3170add4863afcb1b9d7b5f894
| 2,302
|
py
|
Python
|
epytope/Data/pssms/smm/mat/A_02_02_9.py
|
christopher-mohr/epytope
|
8ac9fe52c0b263bdb03235a5a6dffcb72012a4fd
|
[
"BSD-3-Clause"
] | 7
|
2021-02-01T18:11:28.000Z
|
2022-01-31T19:14:07.000Z
|
epytope/Data/pssms/smm/mat/A_02_02_9.py
|
christopher-mohr/epytope
|
8ac9fe52c0b263bdb03235a5a6dffcb72012a4fd
|
[
"BSD-3-Clause"
] | 22
|
2021-01-02T15:25:23.000Z
|
2022-03-14T11:32:53.000Z
|
epytope/Data/pssms/smm/mat/A_02_02_9.py
|
christopher-mohr/epytope
|
8ac9fe52c0b263bdb03235a5a6dffcb72012a4fd
|
[
"BSD-3-Clause"
] | 4
|
2021-05-28T08:50:38.000Z
|
2022-03-14T11:45:32.000Z
|
A_02_02_9 = {0: {'A': -0.145, 'C': 0.221, 'E': 0.72, 'D': 0.844, 'G': 0.058, 'F': -0.922, 'I': -0.135, 'H': 0.116, 'K': -0.195, 'M': -0.461, 'L': -0.138, 'N': 0.087, 'Q': 0.011, 'P': 0.503, 'S': -0.089, 'R': 0.099, 'T': 0.161, 'W': -0.221, 'V': 0.035, 'Y': -0.547}, 1: {'A': 0.108, 'C': 0.324, 'E': 0.89, 'D': 0.324, 'G': -0.085, 'F': -0.094, 'I': -0.572, 'H': 0.05, 'K': 0.233, 'M': -1.25, 'L': -1.345, 'N': 0.41, 'Q': -0.308, 'P': 1.043, 'S': -0.004, 'R': 0.877, 'T': -0.128, 'W': -0.272, 'V': -0.341, 'Y': 0.14}, 2: {'A': -0.513, 'C': 0.144, 'E': 0.353, 'D': 0.04, 'G': 0.163, 'F': -0.354, 'I': -0.132, 'H': 0.102, 'K': 0.352, 'M': -0.561, 'L': 0.233, 'N': -0.217, 'Q': 0.135, 'P': 0.1, 'S': -0.352, 'R': 0.425, 'T': 0.128, 'W': 0.149, 'V': -0.037, 'Y': -0.157}, 3: {'A': -0.172, 'C': -0.042, 'E': -0.216, 'D': -0.315, 'G': -0.157, 'F': 0.003, 'I': 0.129, 'H': 0.033, 'K': 0.103, 'M': 0.093, 'L': 0.145, 'N': 0.118, 'Q': 0.037, 'P': -0.045, 'S': -0.121, 'R': 0.226, 'T': 0.118, 'W': 0.026, 'V': 0.092, 'Y': -0.056}, 4: {'A': 0.035, 'C': -0.054, 'E': 0.023, 'D': 0.049, 'G': 0.109, 'F': -0.272, 'I': -0.3, 'H': -0.127, 'K': 0.131, 'M': 0.092, 'L': -0.107, 'N': 0.122, 'Q': 0.034, 'P': 0.264, 'S': 0.04, 'R': 0.161, 'T': 0.195, 'W': 0.052, 'V': -0.097, 'Y': -0.351}, 5: {'A': 0.099, 'C': -0.034, 'E': 0.087, 'D': 0.139, 'G': 0.167, 'F': -0.218, 'I': -0.196, 'H': 0.144, 'K': 0.449, 'M': -0.138, 'L': -0.265, 'N': -0.078, 'Q': -0.003, 'P': 0.028, 'S': -0.151, 'R': 0.218, 'T': -0.17, 'W': 0.112, 'V': -0.145, 'Y': -0.044}, 6: {'A': -0.116, 'C': 0.037, 'E': -0.098, 'D': -0.071, 'G': 0.241, 'F': -0.355, 'I': 0.156, 'H': -0.175, 'K': 0.554, 'M': -0.063, 'L': 0.183, 'N': -0.031, 'Q': 0.062, 'P': 0.19, 'S': -0.029, 'R': 0.47, 'T': -0.083, 'W': -0.39, 'V': -0.06, 'Y': -0.422}, 7: {'A': -0.048, 'C': 0.154, 'E': -0.175, 'D': 0.432, 'G': -0.001, 'F': -0.374, 'I': 0.173, 'H': 0.007, 'K': 0.243, 'M': 0.1, 'L': -0.233, 'N': -0.014, 'Q': -0.004, 'P': -0.08, 'S': -0.086, 'R': 0.077, 'T': 0.143, 'W': -0.157, 'V': 0.264, 'Y': -0.42}, 8: {'A': -0.423, 'C': 0.65, 'E': -0.065, 'D': -0.186, 'G': -0.273, 'F': 0.009, 'I': -0.619, 'H': 0.454, 'K': 0.779, 'M': -0.252, 'L': -0.945, 'N': -0.315, 'Q': 0.288, 'P': -0.101, 'S': 0.282, 'R': 0.578, 'T': 0.148, 'W': 0.44, 'V': -1.051, 'Y': 0.602}, -1: {'con': 4.16801}}
| 2,302
| 2,302
| 0.393136
| 557
| 2,302
| 1.61939
| 0.319569
| 0.019956
| 0.011086
| 0.013304
| 0.031042
| 0
| 0
| 0
| 0
| 0
| 0
| 0.372214
| 0.162033
| 2,302
| 1
| 2,302
| 2,302
| 0.095386
| 0
| 0
| 0
| 0
| 0
| 0.079462
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
7a704638f4ead736faef7491ca6fb950df8e7bc2
| 123
|
py
|
Python
|
actors/__init__.py
|
andriikobyakovskiy/simple_actors
|
d02017313626784ba8addbbcd1ce1f4bf9d94c3b
|
[
"MIT"
] | null | null | null |
actors/__init__.py
|
andriikobyakovskiy/simple_actors
|
d02017313626784ba8addbbcd1ce1f4bf9d94c3b
|
[
"MIT"
] | null | null | null |
actors/__init__.py
|
andriikobyakovskiy/simple_actors
|
d02017313626784ba8addbbcd1ce1f4bf9d94c3b
|
[
"MIT"
] | null | null | null |
from actors.message import Message, PoisonPill
from actors.actor import Actor, Props
from actors.system import ActorSystem
| 30.75
| 46
| 0.845528
| 17
| 123
| 6.117647
| 0.529412
| 0.288462
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.113821
| 123
| 3
| 47
| 41
| 0.954128
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
7a80c1b9fd0aeed94abc826cb34fecf2ee81d23b
| 173
|
py
|
Python
|
app/modules/streamer/__init__.py
|
Sean2525/VAWSR_backend
|
c3fcff8308648777b0a678debcb7b2989ae860b2
|
[
"MIT"
] | null | null | null |
app/modules/streamer/__init__.py
|
Sean2525/VAWSR_backend
|
c3fcff8308648777b0a678debcb7b2989ae860b2
|
[
"MIT"
] | null | null | null |
app/modules/streamer/__init__.py
|
Sean2525/VAWSR_backend
|
c3fcff8308648777b0a678debcb7b2989ae860b2
|
[
"MIT"
] | null | null | null |
from app.modules.streamer.websocketstream import WebsocketStream
from app.modules.streamer.google import GoogleStreamer
from app.modules.streamer.kaldi import KaldiStreamer
| 43.25
| 64
| 0.878613
| 21
| 173
| 7.238095
| 0.47619
| 0.138158
| 0.276316
| 0.434211
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.069364
| 173
| 3
| 65
| 57.666667
| 0.944099
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
8f50319ef794198dd2cd134fcff2c181935e63d3
| 34
|
py
|
Python
|
part4/first.py
|
MADTeacher/python_basics
|
06ae43d8063c1c8426a4fbb53443b6d1ee727951
|
[
"MIT"
] | null | null | null |
part4/first.py
|
MADTeacher/python_basics
|
06ae43d8063c1c8426a4fbb53443b6d1ee727951
|
[
"MIT"
] | null | null | null |
part4/first.py
|
MADTeacher/python_basics
|
06ae43d8063c1c8426a4fbb53443b6d1ee727951
|
[
"MIT"
] | 4
|
2020-10-04T12:24:14.000Z
|
2022-01-16T17:01:59.000Z
|
def function():
print('first')
| 17
| 18
| 0.617647
| 4
| 34
| 5.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.176471
| 34
| 2
| 18
| 17
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0
| 0.5
| 0.5
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
56c6fa4e6f6850444b1df89bf9a14b406e92a780
| 604
|
py
|
Python
|
torchsupport/interacting/environments/environment.py
|
bobelly/torchsupport
|
5aa0a04f20c193ec99310f5d6a3375d2e95e740d
|
[
"MIT"
] | 18
|
2019-05-02T16:32:15.000Z
|
2021-04-16T09:33:54.000Z
|
torchsupport/interacting/environments/environment.py
|
bobelly/torchsupport
|
5aa0a04f20c193ec99310f5d6a3375d2e95e740d
|
[
"MIT"
] | 5
|
2019-10-14T13:46:49.000Z
|
2021-06-08T11:48:34.000Z
|
torchsupport/interacting/environments/environment.py
|
bobelly/torchsupport
|
5aa0a04f20c193ec99310f5d6a3375d2e95e740d
|
[
"MIT"
] | 12
|
2019-05-12T21:34:24.000Z
|
2021-07-15T14:14:16.000Z
|
from torchsupport.data.namedtuple import namedtuple
class Environment:
data_type = namedtuple("Data", [
"state", "action", "rewards", "done"
])
def reset(self):
raise NotImplementedError
def push_changes(self):
pass
def pull_changes(self):
pass
def action_space(self):
raise NotImplementedError
def observation_space(self):
raise NotImplementedError
def is_done(self):
raise NotImplementedError
def observe(self):
raise NotImplementedError
def act(self, action):
raise NotImplementedError
def schema(self):
raise NotImplementedError
| 18.30303
| 51
| 0.716887
| 65
| 604
| 6.569231
| 0.415385
| 0.393443
| 0.393443
| 0.362998
| 0.168618
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.200331
| 604
| 32
| 52
| 18.875
| 0.884058
| 0
| 0
| 0.391304
| 0
| 0
| 0.043046
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.391304
| false
| 0.086957
| 0.043478
| 0
| 0.521739
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 6
|
7127277f1c785585c5d59116c38ea2722f1ef895
| 323,918
|
py
|
Python
|
projects/customer_segmentation_with_python.py
|
Jacquedelest/jacquedelest.github.io
|
7bd2434f85e6e2a7a12f23145182bbf756d88091
|
[
"Unlicense"
] | null | null | null |
projects/customer_segmentation_with_python.py
|
Jacquedelest/jacquedelest.github.io
|
7bd2434f85e6e2a7a12f23145182bbf756d88091
|
[
"Unlicense"
] | null | null | null |
projects/customer_segmentation_with_python.py
|
Jacquedelest/jacquedelest.github.io
|
7bd2434f85e6e2a7a12f23145182bbf756d88091
|
[
"Unlicense"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Customer Segmentation with Python
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/github/Jacquedelest/Latihan-dengan-Python/blob/main/Customer_Segmentation_with_Python.ipynb
---
Title: "Data Science in Marketing: Customer Segmentation with Python"
Author: "Joseph Armando Carvallo"
Date: "25/03/2021"
---
# Prepare Libraries and Data

The data above has seven columns with the following explanation:
* `Customer_ID` is a customer code in mixed format CUST- text followed by a number.
* `Nama Pelanggan` is the name of the customer in text format of course.
* `Jenis Kelamin` is the gender of the customer, there are only two data content categories, namely Pria and Wanita.
* `Umur` is the age of the customer in numeric format.
* `Profesi` is the profession of the customer, also type of category text consisting of Wiraswasta, Pelajar, Professional, Ibu Rumah Tangga, and Mahasiswa.
* `Tipe Residen` is the type of residence of our customers, for this dataset there are only two categories, Cluster and Sector.
* `NilaiBelanjaSetahun` is the total expenditure that has been issued by the customer.
**Preparing Libraries**
* Pandas is used to perform data analysis processing
Matplotlib, also as a basis for visualizing data.
* Seaborn is used on top of matplotlib to do more interesting data visualization.
* ScikitLearn is used to prepare data before modeling.
* kmodes is used to perform modeling using K-Modes and K-Prototypes algorithms.
* Pickle is used to store the model to be made.
"""
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import LabelEncoder
from kmodes.kmodes import KModes
from kmodes.kprototypes import KPrototypes
import pickle
from pathlib import Path
"""**Reading Customer Data**
The first step that needs to be done is to read the data which was originally a textfile into a pandas dataframe.
"""
# Import dataset
df = pd.read_csv("https://storage.googleapis.com/dqlab-dataset/customer_segments.txt", sep="\t")
df.head()
"""**View Information from Data**
Next I need to see the information from the existing data. So that I can find out the number of rows and columns, column names, identify null values, and also know the data type easily.
"""
df.info()
"""After doing a data call and looking at the data information, I finally found out that:
* The data to be used consists of 50 rows and 7 columns
* No data dense *Null* values
* Two columns have numeric data type and five data type string
# Perform Data Exploration
In the previous step I have just prepared the data and seen the basic information of the data. Next I need to do data exploration to get to know the dataset that is going to be used. I am going to do some exploration for numeric data as well as categorical data.
**Numeric Data Exploration**
First I need to look at the data distribution of data of numeric type. Here I am using a boxplot and also a histogram to see the distribution of the data. To create the graph, I need to set up which column is the numeric column, then use the seaborn library to plot each of the numeric columns, namely `Umur` and `NilaiBelanjaSetahun`.
"""
sns.set(style='white')
plt.clf()
# Function to create plot
def observasi_num(features):
fig, axs = plt.subplots(2, 2, figsize=(10, 9))
for i, kol in enumerate(features):
sns.boxplot(df[kol], ax = axs[i][0])
sns.distplot(df[kol], ax = axs[i][1])
axs[i][0].set_title('mean = %.2f\n median = %.2f\n std = %.2f'%(df[kol].mean(), df[kol].median(), df[kol].std()))
plt.setp(axs)
plt.tight_layout()
plt.show()
# Call function to create plot of numeric variables
kolom_numerik = ['Umur','NilaiBelanjaSetahun']
observasi_num(kolom_numerik)
"""From the results of the data exploration, I can get the following information:
* The average age of the customer is 37.5 years
* The average value of a customer's annual spending is 7,069,874.82
* Gender of customers is dominated by 41 women (82%) and 9 men (18%)
* Most professions are Entrepreneur (40%) followed by Professional (36%) and others (24%)
* Of all customers 64% of them live in clusters and 36% live in sectors
# Prepare Data Before Modeling
Each machine learning model has different characteristics. This makes me have to prepare the data that I have before it is used for modeling. So that it can adjust to the characteristics possessed by each model and get optimal results.
I am planing to do the modeling using unsupervised clustering technique. The algorithm that is going to be used is K-Prototypes. One of the main factors in this algorithm is that I need to use data that scales between variables equally. In addition, I also need to encode the categorical columns I have into numeric, then combine the results of data processing into one data frame for use in modeling.
**Standardization of Numeric Columns**
In order to get optimal results in the application of the algorithm, I need to equalize the numerical data is on one scale. This can be done by standardizing the data. The goal is variables that have a large scale do not dominate how the cluster will be formed and also each variable will be considered equally important by the algorithm that will be used.
"""
from sklearn.preprocessing import StandardScaler
kolom_numerik = ['Umur','NilaiBelanjaSetahun']
# Statistics before standardization
print('Statistics Before Standardization')
print(df[kolom_numerik].describe().round(1))
# Standardize
df_std = StandardScaler().fit_transform(df[kolom_numerik])
# Create data frame
df_std = pd.DataFrame(data=df_std, index=df.index, columns=df[kolom_numerik].columns)
# Display examples of data content and summary statistics
print('\nExample of standardization')
print(df_std.head())
print('\nStandardization of statistics')
print(df_std.describe().round(0))
"""**Categorical Conversion of Data with Encoder Labels**
Next I need to convert the categorical columns into numbers. I am using one of the functions from sklearn namely LabelEncoder. Basically this function will convert customer data from text to numeric. For example for the `Jenis Kelamin` column, the text `Pria` will be changed to the number `0` and the text `Wanita` will be changed to the number `1`. This change is required for all text variables before being used in the K-Prototype algorithm.
"""
# Initiate categorical column
kolom_kategorikal = ['Jenis Kelamin','Profesi','Tipe Residen']
# Make a copy of the data frame
df_encode = df[kolom_kategorikal].copy()
# Encode all categorical columns
for col in kolom_kategorikal:
df_encode[col] = LabelEncoder().fit_transform(df_encode[col])
# Print data
df_encode.head()
"""**Combine Data for Modeling**
After completing the previous two steps, this time I am combining the two processing results into one data frame. This data frame is going to be used for modeling.
"""
df_model = df_encode.merge(df_std, left_index = True, right_index=True, how = 'left')
df_model.head()
"""# Modelling
**Clustering** is the process of dividing objects into several groups or clusters based on the degree of similarity between one object and another. There are several algorithms to perform clustering. One of the popular ones is **k-means** which is usually only used for numeric data. As for the categorical only, use **k-modes**.
If the data contains composite variables, use **kprototype** algorithm which is a combination of **k-means** and **k-modes**. it can be called using the **k-modes** library which contains the **kprototype** module. To use the **kprototype** algorithm, I need to enter the number of clusters needed and also provide a column index for categorical columns.
**Looking for the Optimal Number of Clusters**
One of the important parameters that must be included in kprototype algorithm is the number of clusters needed. Therefore, I need to find the optimal number of clusters. One way to get the optimal value is to use the help of an *elbow plot* that can be made by visualizing the total distance of all data to the center of the cluster. Next, I am selecting the angled point of the formed pattern and making it my cluster count.
To do this I need to run kprototype algorithm with various number of clusters. Next I am also storing the value of `cost_` and visualizing it with line plot or point plot.
"""
# Import the original data
df_model = pd.read_csv('https://storage.googleapis.com/dqlab-dataset/df-customer-segmentation.csv')
# Iterate to get `cost` value
cost = {}
for k in range(2,10):
kproto = KPrototypes(n_clusters = k,random_state=75)
kproto.fit_predict(df_model, categorical=[0,1,2])
cost[k]= kproto.cost_
# Visualize elbow plot
sns.pointplot(x=list(cost.keys()), y=list(cost.values()))
plt.show()
"""From the results above, I can find out that the angle of the plot is at `k = 5`. So I decided to use 5 as the optimal number of clusters.
**Create a Model**
Then I can create a model with the number of clusters that have been obtained in the previous stage, namely 5 and save the result as a pickle file.
"""
kproto = KPrototypes(n_clusters=5, random_state=75)
kproto = kproto.fit(df_model, categorical=[0,1,2])
#Save Model
pickle.dump(kproto, open('cluster.pkl', 'wb'))
"""**Use the Model**
The model that I have created can be used to determine which each customer belongs to which cluster. This time I will use the model to determine the customer segments in the dataset.
"""
df = pd.read_csv("https://dqlab-dataset.s3-ap-southeast-1.amazonaws.com/customer_segments.txt", sep="\t")
# Determine the segment of each customer
clusters = kproto.predict(df_model, categorical=[0,1,2])
print('customer segment {}\n'.format(clusters))
# Combine initial data and customer segments
df_final = df.copy()
df_final['cluster'] = clusters
df_final.head()
"""**Displaying Clusters of Each Customer**
After combining the initial data of customers and their clusters, I need to display and observe the results to help me in naming each cluster based on its characteristics.
"""
for i in range (0,5):
print('\nPelanggan cluster: {}\n'.format(i))
print(df_final[df_final['cluster']== i])
"""**Boxplot: Visualization of Clustering Results**
I am also making a visualization of the clustering result to make it easier for me to do naming in each cluster.
"""
# Numerical variables
kolom_numerik = ['Umur','NilaiBelanjaSetahun']
for i in kolom_numerik:
plt.figure(figsize=(6,4))
ax = sns.boxplot(x = 'cluster',y = i, data = df_final)
plt.title('\nBox Plot {}\n'.format(i), fontsize=12)
plt.show()
"""**Count Plot: Visualization of Clustering Results**
I am also making a visualization of the clustering results to make it easier for me to name each cluster.
"""
# Categorical data
kolom_categorical = ['Jenis Kelamin','Profesi','Tipe Residen']
for i in kolom_categorical:
plt.figure(figsize=(6,4))
ax = sns.countplot(data = df_final, x = 'cluster', hue = i )
plt.title('\nCount Plot {}\n'.format(i), fontsize=12)
ax.legend(loc="upper center")
for p in ax.patches:
ax.annotate(format(p.get_height(), '.0f'),
(p.get_x() + p.get_width() / 2., p.get_height()),
ha = 'center',
va = 'center',
xytext = (0, 10),
textcoords = 'offset points')
sns.despine(right=True,top = True, left = True)
ax.axes.yaxis.set_visible(False)
plt.show()
"""**Name the Clusters**
From the observations made, I want to give the name of the segment for each cluster number, namely:
* `Cluster 0` is Diamond Young Entrepreneur. The contents of this cluster are entrepreneurs who have average transaction value of close to 10 million and have ages of about 18 - 41 years with the average age is 29 years.
* `Cluster 1` is Diamond Senior Entrepreneur. The contents of this cluster are entrepreneurs who have average transaction value of close to 10 million and have ages of around 45 - 64 years with the average age is 55 years.
* `Cluster 2` is Silver Students. The contents of this cluster are students and college students whose average age is 16 years and the annual expenditure value is close to 3 million.
* `Cluster 3` is a Gold Young Member. The contents of this cluster are young professionals and housewives with age range of about 20 - 40 years and with the average age is 30 years, and their annual spending value is close to 6 million.
* `Cluster 4` is a Gold Senior Member. The contents of this cluster are elderly professionals and housewives with age range of 46 - 63 years and the average age is 53 years, and the annual expenditure value is close to 6 million.
"""
# Map column name
df_final['segmen'] = df_final['cluster'].map({
0: 'Diamond Young Member',
1: 'Diamond Senior Member',
2: 'Silver Member',
3: 'Gold Young Member',
4: 'Gold Senior Member'
})
print(df_final.info())
df_final.head()
"""# Operate Model
The newly created model must be able to be used every day to predict new data. For that I need to prepare the data again and then make predictions with the parameters and models that have been made.
**Prepar New Data**
Here I am creating a new sample of data to be predicted with the model that has been created. I do this by creating a dataframe that contains customer information.
"""
# New data
data = [{
'Customer_ID': 'CUST-100' ,
'Nama Pelanggan': 'Joko' ,
'Jenis Kelamin': 'Pria',
'Umur': 45,
'Profesi': 'Wiraswasta',
'Tipe Residen': 'Cluster' ,
'NilaiBelanjaSetahun': 8230000
}]
# Create data frame
new_df = pd.DataFrame(data)
# Print new data
new_df
"""**Creating a Data Processing Function**
Next I need to create a function to perform data processing based on the same parameters when I do the modeling and call the new data.
From the previous process, the representation of each code and their meaning, namely:
`Jenis Kelamin`
* 0 = Pria (Male)
* 1 = Wanita (Female)
`Profesi`
* 0 = Ibu Rumah Tangga (Housewife)
* 1 = Mahasiswa (University Student)
* 2 = Pelajar (Pupil)
* 3 = Professional
* 4 = Wiraswasta (Enterpreneur)
`Tipe Residen`
* 1 = Sector
* 2 = Cluster
To standardize numerical data with the same variables when modeling, I need to use the average value and standard deviation of each variable when modeling.
`Umur`
* mean = 37.5
* standard deviation = 14.7
`NilaiBelanjaSetahun`
* mean = 7069874.8
* standard deviation = 2590619.0
From these values I can calculate the standardized value (`z`) using the formula `Z = (x - u)/s` where `x` is each value, `u` is the average and `s` is the standard deviation.
Finally, combining the results of the two previous processes into one data frame
"""
def data_preprocess(data):
# Convert categorical data
kolom_kategorikal = ['Jenis Kelamin','Profesi','Tipe Residen']
df_encode = data[kolom_kategorikal].copy()
## Gender
df_encode['Jenis Kelamin'] = df_encode['Jenis Kelamin'].map({
'Pria': 0,
'Wanita' : 1
})
## Profession
df_encode['Profesi'] = df_encode['Profesi'].map({
'Ibu Rumah Tangga': 0,
'Mahasiswa' : 1,
'Pelajar': 2,
'Professional': 3,
'Wiraswasta': 4
})
## Residential type
df_encode['Tipe Residen'] = df_encode['Tipe Residen'].map({
'Cluster': 0,
'Sector' : 1
})
# Standardize numerical data
kolom_numerik = ['Umur','NilaiBelanjaSetahun']
df_std = data[kolom_numerik].copy()
## Standardize age
df_std['Umur'] = (df_std['Umur'] - 37.5)/14.7
## Standardize annual shopping value
df_std['NilaiBelanjaSetahun'] = (df_std['NilaiBelanjaSetahun'] - 7069874.8)/2590619.0
# merge categorical and numerical data
df_model = df_encode.merge(df_std, left_index = True,
right_index=True, how = 'left')
return df_model
# Run function
new_df_model = data_preprocess(new_df)
new_df_model
"""**Call Models and Make Predictions**
Once, the data ready to use, it's time to call the previously saved model and proceed with making predictions. To do this, I need to make the process into a single function called modeling using the new data as input.
"""
def modelling (data):
# Call model
kpoto = pickle.load(open('cluster.pkl', 'rb'))
# Predict
clusters= kpoto.predict(data,categorical=[0,1,2])
return clusters
# Run function
clusters = modelling(new_df_model)
print(clusters)
"""**Name the Segment**
Same as before, I need to create a function to do this process. The cluster names that have been obtained in the previous step need to be changed to segment names to make them easier to identify.
"""
def menamakan_segmen (data_asli, clusters):
# Merge cluster and original data
final_df = data_asli.copy()
final_df['cluster'] = clusters
# Name segment
final_df['segmen'] = final_df['cluster'].map({
0: 'Diamond Young Member',
1: 'Diamond Senior Member',
2: 'Silver Students',
3: 'Gold Young Member',
4: 'Gold Senior Member'
})
return final_df
# Run function
new_final_df = menamakan_segmen(new_df,clusters)
new_final_df
| 723.03125
| 306,309
| 0.951407
| 12,775
| 323,918
| 24.114286
| 0.801174
| 0.000357
| 0.000364
| 0.000276
| 0.006635
| 0.005577
| 0.004356
| 0.002746
| 0.002532
| 0.001961
| 0
| 0.153817
| 0.010759
| 323,918
| 448
| 306,310
| 723.03125
| 0.80757
| 0.953217
| 0
| 0.181208
| 1
| 0.006711
| 0.205691
| 0
| 0
| 1
| 0
| 0.002232
| 0
| 1
| 0.026846
| false
| 0
| 0.060403
| 0
| 0.107383
| 0.073826
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
857cabe01377edfef935c53e8f284384e0ed52cd
| 33
|
py
|
Python
|
endaq/batch/__init__.py
|
MideTechnology/endaq-python
|
a878efdd65f718c1324d92d467b19fd3b4142cd0
|
[
"MIT"
] | 5
|
2021-12-02T04:41:52.000Z
|
2022-02-01T19:44:41.000Z
|
endaq/batch/__init__.py
|
MideTechnology/endaq-python
|
a878efdd65f718c1324d92d467b19fd3b4142cd0
|
[
"MIT"
] | 136
|
2021-09-28T17:45:20.000Z
|
2022-03-30T11:35:15.000Z
|
endaq/batch/__init__.py
|
MideTechnology/endaq-python
|
a878efdd65f718c1324d92d467b19fd3b4142cd0
|
[
"MIT"
] | 2
|
2021-11-08T19:22:17.000Z
|
2021-12-15T20:25:04.000Z
|
from .core import GetDataBuilder
| 16.5
| 32
| 0.848485
| 4
| 33
| 7
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.121212
| 33
| 1
| 33
| 33
| 0.965517
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
85984587412dd84a61dbf5ae4733b540c9c41a19
| 1,830
|
py
|
Python
|
tests/test_webargs.py
|
FrailLeon/sanic-pydantic
|
e65b14fc36ad4b0bf6134ca8a64e184bd7535511
|
[
"MIT"
] | null | null | null |
tests/test_webargs.py
|
FrailLeon/sanic-pydantic
|
e65b14fc36ad4b0bf6134ca8a64e184bd7535511
|
[
"MIT"
] | null | null | null |
tests/test_webargs.py
|
FrailLeon/sanic-pydantic
|
e65b14fc36ad4b0bf6134ca8a64e184bd7535511
|
[
"MIT"
] | null | null | null |
from examples.server import app
import json
def test_sanic_webargs_query():
params = dict(name="ahmed")
request, response = app.test_client.get("/get-request", params=params)
expected_response = dict(payload=None, query=params)
assert response.status == 200
assert response.json == expected_response
def test_sanic_webargs_path():
request, response = app.test_client.get("/get/1")
expected_response = dict(payload=None, query=None, id=1)
assert response.status == 200
assert response.json == expected_response
def test_sanic_webargs_payload():
data = dict(age=29)
params = dict(name="ahmed")
request, response = app.test_client.post(
"/post-request", params=params, data=json.dumps(data)
)
expected_response = dict(payload=data, query=params)
assert response.status == 200
assert response.json == expected_response
def test_async_sanic_webargs_query():
params = dict(name="ahmed")
request, response = app.test_client.get(
"/async-get-request", params=params
)
expected_response = dict(payload=None, query=params)
assert response.status == 200
assert response.json == expected_response
def test_async_sanic_webargs_payload():
data = dict(age=29)
params = dict(name="ahmed")
request, response = app.test_client.post(
"/async-post-request", params=params, data=json.dumps(data)
)
expected_response = dict(payload=data, query=params)
assert response.status == 200
assert response.json == expected_response
def test_sanic_webargs_payload_invalid():
data = dict(invalid_body_param=29)
params = dict(invalid_query_param="ahmed")
request, response = app.test_client.post(
"/post-request", params=params, data=json.dumps(data)
)
assert response.status == 422
| 31.016949
| 74
| 0.704918
| 234
| 1,830
| 5.337607
| 0.162393
| 0.123299
| 0.086469
| 0.105685
| 0.880705
| 0.880705
| 0.851882
| 0.82466
| 0.82466
| 0.82466
| 0
| 0.017299
| 0.178689
| 1,830
| 58
| 75
| 31.551724
| 0.813706
| 0
| 0
| 0.543478
| 0
| 0
| 0.057924
| 0
| 0
| 0
| 0
| 0
| 0.23913
| 1
| 0.130435
| false
| 0
| 0.043478
| 0
| 0.173913
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a44a3703a199e2432a447693a6f3f73fed55eb2c
| 190
|
py
|
Python
|
1_joint_alignment/SE/Edge.py
|
BGU-CS-VIL/JA-POLS
|
0ee34ec0c8c7d7fdfc0c5b1c85b2bb6632cc3c41
|
[
"MIT"
] | 16
|
2020-03-16T08:52:09.000Z
|
2022-03-09T09:05:47.000Z
|
1_joint_alignment/SE/Edge.py
|
BGU-CS-VIL/JA-POLS
|
0ee34ec0c8c7d7fdfc0c5b1c85b2bb6632cc3c41
|
[
"MIT"
] | 1
|
2020-08-24T17:28:19.000Z
|
2020-08-24T17:28:19.000Z
|
1_joint_alignment/SE/Edge.py
|
BGU-CS-VIL/JA-POLS
|
0ee34ec0c8c7d7fdfc0c5b1c85b2bb6632cc3c41
|
[
"MIT"
] | 1
|
2022-02-04T20:54:24.000Z
|
2022-02-04T20:54:24.000Z
|
class Edge:
def __init__(self, src, dst):
self.src = src
self.dst = dst
def get_src(self):
return self.src
def get_dst(self):
return self.dst
| 14.615385
| 33
| 0.547368
| 27
| 190
| 3.62963
| 0.333333
| 0.214286
| 0.285714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.357895
| 190
| 12
| 34
| 15.833333
| 0.803279
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.375
| false
| 0
| 0
| 0.25
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
a469a30cf8dcc295148ca4429da22f75c4be3d97
| 118
|
py
|
Python
|
syn/tagmathon/b/__init__.py
|
mbodenhamer/syn
|
aeaa3ad8a49bac8f50cf89b6f1fe97ad43d1d258
|
[
"MIT"
] | 1
|
2021-07-15T08:55:12.000Z
|
2021-07-15T08:55:12.000Z
|
syn/tagmathon/b/__init__.py
|
mbodenhamer/syn
|
aeaa3ad8a49bac8f50cf89b6f1fe97ad43d1d258
|
[
"MIT"
] | 7
|
2021-01-07T23:51:57.000Z
|
2021-12-13T19:50:57.000Z
|
syn/tagmathon/b/__init__.py
|
mbodenhamer/syn
|
aeaa3ad8a49bac8f50cf89b6f1fe97ad43d1d258
|
[
"MIT"
] | 2
|
2016-07-11T08:46:31.000Z
|
2017-12-13T13:30:51.000Z
|
from .base import *
from .function import *
from .builtin import *
from .interpreter import *
from .compiler import *
| 19.666667
| 26
| 0.745763
| 15
| 118
| 5.866667
| 0.466667
| 0.454545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.169492
| 118
| 5
| 27
| 23.6
| 0.897959
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a49d965a8320f1d32580a7596eb63761aa392a0a
| 35
|
py
|
Python
|
grapheap/__init__.py
|
practo/grapheap
|
f9f22290eb0e8cd191747f555c3d15dbbcfa7a3e
|
[
"MIT"
] | null | null | null |
grapheap/__init__.py
|
practo/grapheap
|
f9f22290eb0e8cd191747f555c3d15dbbcfa7a3e
|
[
"MIT"
] | null | null | null |
grapheap/__init__.py
|
practo/grapheap
|
f9f22290eb0e8cd191747f555c3d15dbbcfa7a3e
|
[
"MIT"
] | 1
|
2019-09-12T06:54:09.000Z
|
2019-09-12T06:54:09.000Z
|
from .src.grapheap import Grapheap
| 17.5
| 34
| 0.828571
| 5
| 35
| 5.8
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.114286
| 35
| 1
| 35
| 35
| 0.935484
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
74b36e2e62ebc6c302472884d8e45300784ae159
| 118
|
py
|
Python
|
descriptors/__init__.py
|
WPaczula/image-fragmentation
|
f5650ff384bf803b3cca8b21c621019ce25018e6
|
[
"MIT"
] | null | null | null |
descriptors/__init__.py
|
WPaczula/image-fragmentation
|
f5650ff384bf803b3cca8b21c621019ce25018e6
|
[
"MIT"
] | null | null | null |
descriptors/__init__.py
|
WPaczula/image-fragmentation
|
f5650ff384bf803b3cca8b21c621019ce25018e6
|
[
"MIT"
] | null | null | null |
from descriptors.haralick import get_haralicks
from descriptors.lbp import get_lbp
from descriptors.hog import get_hog
| 39.333333
| 46
| 0.881356
| 18
| 118
| 5.611111
| 0.444444
| 0.445545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.09322
| 118
| 3
| 47
| 39.333333
| 0.943925
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
74db8448252041399a8b471be0180831f6f74eef
| 40
|
py
|
Python
|
ansible_subprocess/__init__.py
|
blawesom/ansible-subprocess
|
4c04a265d8cf1bee15dd7dbc8166b1d81642f234
|
[
"MIT"
] | 3
|
2016-11-22T12:43:15.000Z
|
2021-01-14T15:58:21.000Z
|
ansible_subprocess/__init__.py
|
blawesom/ansible-subprocess
|
4c04a265d8cf1bee15dd7dbc8166b1d81642f234
|
[
"MIT"
] | 2
|
2020-01-01T05:28:56.000Z
|
2020-03-11T01:38:12.000Z
|
ansible_subprocess/__init__.py
|
blawesom/ansible-subprocess
|
4c04a265d8cf1bee15dd7dbc8166b1d81642f234
|
[
"MIT"
] | 3
|
2017-11-24T14:21:04.000Z
|
2019-11-18T20:06:46.000Z
|
from .main import run_playbook, run_ping
| 40
| 40
| 0.85
| 7
| 40
| 4.571429
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1
| 40
| 1
| 40
| 40
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
74f9fe4b46e57e51f27b84c8cf2c939bdd035db2
| 96
|
py
|
Python
|
venv/lib/python3.8/site-packages/jedi/common/__init__.py
|
Retraces/UkraineBot
|
3d5d7f8aaa58fa0cb8b98733b8808e5dfbdb8b71
|
[
"MIT"
] | 2
|
2022-03-13T01:58:52.000Z
|
2022-03-31T06:07:54.000Z
|
venv/lib/python3.8/site-packages/jedi/common/__init__.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | 19
|
2021-11-20T04:09:18.000Z
|
2022-03-23T15:05:55.000Z
|
venv/lib/python3.8/site-packages/jedi/common/__init__.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | null | null | null |
/home/runner/.cache/pip/pool/c2/12/a2/2453035eedfb8af7c8b0bf4b253e20ea83c33b19874f4ee1918f0c0113
| 96
| 96
| 0.895833
| 9
| 96
| 9.555556
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.416667
| 0
| 96
| 1
| 96
| 96
| 0.479167
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
2d1edbedece3b088563616e2bfd468e2a94b9f04
| 138
|
py
|
Python
|
web/forms.py
|
wabscale/flasq
|
ea5cba81c3259441f64e96a7c63d2234728100a3
|
[
"MIT"
] | 1
|
2022-02-25T02:12:10.000Z
|
2022-02-25T02:12:10.000Z
|
web/forms.py
|
wabscale/flasq
|
ea5cba81c3259441f64e96a7c63d2234728100a3
|
[
"MIT"
] | null | null | null |
web/forms.py
|
wabscale/flasq
|
ea5cba81c3259441f64e96a7c63d2234728100a3
|
[
"MIT"
] | null | null | null |
from flask_wtf import FlaskForm
from flask_wtf.file import FileField, FileRequired, FileAllowed
from wtforms.validators import Required
| 23
| 63
| 0.855072
| 18
| 138
| 6.444444
| 0.666667
| 0.155172
| 0.206897
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.115942
| 138
| 5
| 64
| 27.6
| 0.95082
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
2d27f65d3810b3dbdaa183d729578e760beb9c75
| 24
|
py
|
Python
|
__init__.py
|
mputs/BayesCCal
|
49113ae969d4e30651c4aaa28c622d988447da60
|
[
"MIT"
] | 1
|
2021-06-02T17:59:37.000Z
|
2021-06-02T17:59:37.000Z
|
__init__.py
|
mputs/BayesCCal
|
49113ae969d4e30651c4aaa28c622d988447da60
|
[
"MIT"
] | null | null | null |
__init__.py
|
mputs/BayesCCal
|
49113ae969d4e30651c4aaa28c622d988447da60
|
[
"MIT"
] | null | null | null |
from BayesCCal import *
| 12
| 23
| 0.791667
| 3
| 24
| 6.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 24
| 2
| 23
| 12
| 0.95
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
2d344b6df986ef2087f2155abd8348966a3ef2ce
| 10,537
|
py
|
Python
|
iter8_analytics/api/v2/examples/examples_metrics.py
|
sriumcp/iter8-analytics
|
87704d0111328a718e6692d5f0aed946083732ba
|
[
"Apache-2.0"
] | 14
|
2019-11-14T01:30:32.000Z
|
2021-09-10T06:03:51.000Z
|
iter8_analytics/api/v2/examples/examples_metrics.py
|
sriumcp/iter8-analytics
|
87704d0111328a718e6692d5f0aed946083732ba
|
[
"Apache-2.0"
] | 120
|
2019-12-09T21:17:37.000Z
|
2021-07-21T00:21:17.000Z
|
iter8_analytics/api/v2/examples/examples_metrics.py
|
sriumcp/iter8-analytics
|
87704d0111328a718e6692d5f0aed946083732ba
|
[
"Apache-2.0"
] | 14
|
2020-04-01T15:40:39.000Z
|
2021-08-19T14:23:40.000Z
|
"""
Metric examples used in other examples.
"""
request_count = {
"name": "request-count",
"metricObj": {
"apiVersion": "iter8.tools/v2alpha2",
"kind": "Metric",
"metadata": {
"name": "request-count"
},
"spec": {
"type": "Counter",
"params": [{
"name": "query",
"value": "sum(increase(revision_app_request_latencies_count{service_name=~'.*$name'}[${elapsedTime}s])) or on() vector(0)"
}],
"description": "Number of requests",
"provider": "prometheus",
"jqExpression": ".data.result[0].value[1] | tonumber",
"urlTemplate": "http://metrics-mock:8080/promcounter"
}
}
}
mean_latency = {
"name": "mean-latency",
"metricObj": {
"apiVersion": "iter8.tools/v2alpha2",
"kind": "Metric",
"metadata": {
"name": "mean-latency"
},
"spec": {
"description": "Mean latency",
"units": "milliseconds",
"params": [{
"name": "query",
"value": "(sum(increase(revision_app_request_latencies_sum{service_name=~'.*$name'}[${elapsedTime}s]))or on() vector(0)) / (sum(increase(revision_app_request_latencies_count{service_name=~'.*$name'}[${elapsedTime}s])) or on() vector(0))"
}],
"type": "Gauge",
"sampleSize": {
"name": "request-count"
},
"provider": "prometheus",
"jqExpression": ".data.result[0].value[1] | tonumber",
"urlTemplate": "http://metrics-mock:8080/promcounter"
}
}
}
# This yaml body is marshalled into the corresponding JSON body.
# body: |
# {
# "last": $elapsedTime,
# "sampling": 600,
# "filter": "kubernetes.node.name = 'n1' and service = '$name'",
# "metrics": [
# {
# "id": "cpu.cores.used",
# "aggregations": { "time": "avg", "group": "sum" }
# }
# ],
# "dataSourceType": "container",
# "paging": {
# "from": 0,
# "to": 99
# }
cpu_utilization = {
"name": "cpu-utilization",
"metricObj": {
"apiVersion": "iter8.tools/v2alpha2",
"kind": "Metric",
"metadata": {
"name": "cpu-utilization"
},
"spec": {
"description": "CPU utilization",
"body": "{\n \"last\": $elapsedTime,\n \"sampling\": 600,\n \"filter\": \"kubernetes.node.name = 'n1' and service = '$name'\",\n \"metrics\": [\n {\n \"id\": \"cpu.cores.used\",\n \"aggregations\": { \"time\": \"avg\", \"group\": \"sum\" }\n }\n ],\n \"dataSourceType\": \"container\",\n \"paging\": {\n \"from\": 0,\n \"to\": 99\n }\n}\n",
"method": "POST",
"type": "Gauge",
"provider": "Sysdig",
"jqExpression": ".data[0].d[0] | tonumber",
"urlTemplate": "http://metrics-mock:8080/sysdig"
}
}
}
business_revenue = {
"name": "business-revenue",
"metricObj": {
"apiVersion": "iter8.tools/v2alpha2",
"kind": "Metric",
"metadata": {
"name": "business-revenue"
},
"spec": {
"description": "Business Revenue Metric",
"units": "dollars",
"params": [{
"name": "query",
"value": "(sum(increase(business_revenue{service_name=~'.*$name'}[${elapsedTime}s]))or on() vector(0)) / (sum(increase(revision_app_request_latencies_count{service_name=~'.*$name'}[${elapsedTime}s])) or on() vector(0))"
}],
"type": "Gauge",
"sampleSize": {
"name": "request-count"
},
"provider": "prometheus",
"jqExpression": ".data.result[0].value[1] | tonumber",
"urlTemplate": "http://prometheus-operated.iter8-monitoring:9090/api/v1/query"
}
}
}
new_relic_embedded = {
"apiVersion": "iter8.tools/v2alpha2",
"kind": "Metric",
"metadata": {
"name": "name-count"
},
"spec": {
"params": [
{
"name": "nrql",
"value": "SELECT count(appName) FROM PageView WHERE revisionName='${revision}' SINCE ${elapsedTime} seconds ago"
}
],
"description": "A New Relic example",
"type": "Counter",
"headerTemplates": [
{
"name": "X-Query-Key",
"value": "t0p-secret-api-key"
}
],
"provider": "newrelic",
"jqExpression": ".results[0].count | tonumber",
"urlTemplate": "https://insights-api.newrelic.com/v1/accounts/my_account_id"
}
}
new_relic_secret = {
"apiVersion": "iter8.tools/v2alpha2",
"kind": "Metric",
"metadata": {
"name": "name-count"
},
"spec": {
"params": [
{
"name": "nrql",
"value": "SELECT count(appName) FROM PageView WHERE revisionName='${revision}' SINCE ${elapsedTime} seconds ago"
}
],
"description": "A New Relic example",
"type": "Counter",
"authType": "APIKey",
"secret": "myns/nrcredentials",
"headerTemplates": [
{
"name": "X-Query-Key",
"value": "${mykey}"
}
],
"provider": "newrelic",
"jqExpression": ".results[0].count | tonumber",
"urlTemplate": "https://insights-api.newrelic.com/v1/accounts/my_account_id"
}
}
sysdig_embedded = {
"apiVersion": "iter8.tools/v2alpha2",
"kind": "Metric",
"metadata": {
"name": "cpu-utilization"
},
"spec": {
"description": "A Sysdig example",
"provider": "sysdig",
"body": "{\n \"last\": ${elapsedTime},\n \"sampling\": 600,\n \"filter\": \"kubernetes.app.revision.name = '${revision}'\",\n \"metrics\": [\n {\n \"id\": \"cpu.cores.used\",\n \"aggregations\": { \"time\": \"avg\", \"group\": \"sum\" }\n }\n ],\n \"dataSourceType\": \"container\",\n \"paging\": {\n \"from\": 0,\n \"to\": 99\n }\n}",
"method": "POST",
"type": "Gauge",
"headerTemplates": [
{
"name": "Accept",
"value": "application/json"
},
{
"name": "Authorization",
"value": "Bearer 87654321-1234-1234-1234-123456789012"
}
],
"jqExpression": ".data[0].d[0] | tonumber",
"urlTemplate": "https://secure.sysdig.com/api/data"
}
}
sysdig_secret = {
"apiVersion": "iter8.tools/v2alpha2",
"kind": "Metric",
"metadata": {
"name": "cpu-utilization"
},
"spec": {
"description": "A Sysdig example",
"provider": "sysdig",
"body": "{\n \"last\": ${elapsedTime},\n \"sampling\": 600,\n \"filter\": \"kubernetes.app.revision.name = '${revision}'\",\n \"metrics\": [\n {\n \"id\": \"cpu.cores.used\",\n \"aggregations\": { \"time\": \"avg\", \"group\": \"sum\" }\n }\n ],\n \"dataSourceType\": \"container\",\n \"paging\": {\n \"from\": 0,\n \"to\": 99\n }\n}",
"method": "POST",
"authType": "Bearer",
"secret": "myns/sdcredentials",
"type": "Gauge",
"headerTemplates": [
{
"name": "Accept",
"value": "application/json"
},
{
"name": "Authorization",
"value": "Bearer ${token}"
}
],
"jqExpression": ".data[0].d[0] | tonumber",
"urlTemplate": "https://secure.sysdig.com/api/data"
}
}
elastic_secret = {
"apiVersion": "iter8.tools/v2alpha2",
"kind": "Metric",
"metadata": {
"name": "average-sales"
},
"spec": {
"description": "An elastic example",
"provider": "elastic",
"body": "{\n \"aggs\": {\n \"range\": {\n \"date_range\": {\n \"field\": \"date\",\n \"ranges\": [\n { \"from\": \"now-${elapsedTime}s/s\" } \n ]\n }\n },\n \"items_to_sell\": {\n \"filter\": { \"term\": { \"version\": \"${revision}\" } },\n \"aggs\": {\n \"avg_sales\": { \"avg\": { \"field\": \"sale_price\" } }\n }\n }\n }\n}",
"method": "POST",
"authType": "Basic",
"secret": "myns/elasticcredentials",
"type": "Gauge",
"headerTemplates": [
{
"name": "Content-Type",
"value": "application/json"
}
],
"jqExpression": ".aggregations.items_to_sell.avg_sales.value | tonumber",
"urlTemplate": "https://secure.elastic.com/my/sales"
}
}
mocked_request_count = {
"name": "request-count",
"metricObj": {
"apiVersion": "iter8.tools/v2alpha2",
"kind": "Metric",
"metadata": {
"name": "request-count"
},
"spec": {
"type": "Counter",
"params": [{
"name": "query",
"value": "sum(increase(revision_app_request_latencies_count{service_name=~'.*$name'}[${elapsedTime}s])) or on() vector(0)"
}],
"description": "Number of requests",
"provider": "prometheus",
"jqExpression": ".data.result[0].value[1] | tonumber",
"urlTemplate": "http://metrics-mock:8080/promcounter",
"mock": [
{
"name": "default",
"level": "0.001"
}, {
"name": "canary",
"level": "0.00002"
}
]
}
}
}
mocked_mean_latency = {
"name": "mean-latency",
"metricObj": {
"apiVersion": "iter8.tools/v2alpha2",
"kind": "Metric",
"metadata": {
"name": "mean-latency"
},
"spec": {
"description": "Mean latency",
"units": "milliseconds",
"params": [{
"name": "query",
"value": "(sum(increase(revision_app_request_latencies_sum{service_name=~'.*$name'}[${elapsedTime}s]))or on() vector(0)) / (sum(increase(revision_app_request_latencies_count{service_name=~'.*$name'}[${elapsedTime}s])) or on() vector(0))"
}],
"type": "Gauge",
"sampleSize": {
"name": "request-count"
},
"provider": "prometheus",
"jqExpression": ".data.result[0].value[1] | tonumber",
"urlTemplate": "http://metrics-mock:8080/promcounter",
"mock": [
{
"name": "default",
"level": "20.0"
}, {
"name": "canary",
"level": "10.0"
}
]
}
}
}
| 33.239748
| 410
| 0.480023
| 925
| 10,537
| 5.398919
| 0.182703
| 0.007609
| 0.044053
| 0.061674
| 0.800561
| 0.789347
| 0.762115
| 0.754505
| 0.74169
| 0.711254
| 0
| 0.022195
| 0.307298
| 10,537
| 317
| 411
| 33.239748
| 0.662008
| 0.042612
| 0
| 0.618881
| 0
| 0.017483
| 0.520815
| 0.09995
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
743b06c9b9babccd07fa5f3cb7217db6d4f93509
| 1,842
|
py
|
Python
|
tests/probes/test_slo_is_met.py
|
chaostoolkit-incubator/chaostoolkit-reliably
|
f7d7f1f262b9416f6caa66ade2082119d9718d50
|
[
"Apache-2.0"
] | null | null | null |
tests/probes/test_slo_is_met.py
|
chaostoolkit-incubator/chaostoolkit-reliably
|
f7d7f1f262b9416f6caa66ade2082119d9718d50
|
[
"Apache-2.0"
] | 4
|
2021-07-22T14:07:36.000Z
|
2022-01-28T12:50:22.000Z
|
tests/probes/test_slo_is_met.py
|
chaostoolkit-incubator/chaostoolkit-reliably
|
f7d7f1f262b9416f6caa66ade2082119d9718d50
|
[
"Apache-2.0"
] | null | null | null |
from unittest.mock import MagicMock, patch
from chaosreliably.slo.probes import slo_is_met
@patch("chaosreliably.slo.probes.all_objective_results_ok")
@patch("chaosreliably.slo.probes.get_objective_results_by_labels")
def test_that_slo_is_met_correctly_calls_probe_and_tolerance_with_no_limit(
mock_get_objective_results_by_labels: MagicMock,
mock_all_objective_results_ok: MagicMock,
) -> None:
expected_results = [{"objective_result": "a-result"}]
mock_get_objective_results_by_labels.return_value = expected_results
mock_all_objective_results_ok.return_value = True
expected_labels = {"a-label": "a-label-value"}
slo_is_met_result = slo_is_met(labels=expected_labels, limit=1)
mock_get_objective_results_by_labels.assert_called_once_with(
limit=1, labels=expected_labels, configuration=None, secrets=None
)
mock_all_objective_results_ok.assert_called_once_with(expected_results)
assert slo_is_met_result
@patch("chaosreliably.slo.probes.all_objective_results_ok")
@patch("chaosreliably.slo.probes.get_objective_results_by_labels")
def test_that_slo_is_met_correctly_calls_probe_and_tolerance_with_limit(
mock_get_objective_results_by_labels: MagicMock,
mock_all_objective_results_ok: MagicMock,
) -> None:
expected_results = [{"objective_result": "a-result"}]
mock_get_objective_results_by_labels.return_value = expected_results
mock_all_objective_results_ok.return_value = False
expected_labels = {"a-label": "a-label-value"}
slo_is_met_result = slo_is_met(labels=expected_labels, limit=10)
mock_get_objective_results_by_labels.assert_called_once_with(
limit=10, labels=expected_labels, configuration=None, secrets=None
)
mock_all_objective_results_ok.assert_called_once_with(expected_results)
assert not slo_is_met_result
| 40.043478
| 75
| 0.812704
| 260
| 1,842
| 5.223077
| 0.176923
| 0.188513
| 0.053019
| 0.123711
| 0.908689
| 0.908689
| 0.908689
| 0.908689
| 0.908689
| 0.908689
| 0
| 0.003656
| 0.109121
| 1,842
| 45
| 76
| 40.933333
| 0.823888
| 0
| 0
| 0.588235
| 0
| 0
| 0.161781
| 0.114007
| 0
| 0
| 0
| 0
| 0.176471
| 1
| 0.058824
| false
| 0
| 0.058824
| 0
| 0.117647
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
744221466b465fa0fb1e34b9a380394b822b599a
| 19,017
|
py
|
Python
|
src/MSI/cti_core/cti_combine.py
|
carlylagrotta/MSI
|
e958beb5df2a2d1018bbb2f96382b5c99b08c3ef
|
[
"MIT"
] | 1
|
2021-06-25T15:46:06.000Z
|
2021-06-25T15:46:06.000Z
|
src/MSI/cti_core/cti_combine.py
|
TheBurkeLab/MSI
|
e958beb5df2a2d1018bbb2f96382b5c99b08c3ef
|
[
"MIT"
] | null | null | null |
src/MSI/cti_core/cti_combine.py
|
TheBurkeLab/MSI
|
e958beb5df2a2d1018bbb2f96382b5c99b08c3ef
|
[
"MIT"
] | 2
|
2019-12-18T23:45:25.000Z
|
2021-06-10T20:37:20.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 03 14:32:24 2017
@author: Mark Barbet
"""
"""Active parameter CTI writer. Function takes a subset of reactions from an already modified cti file and
writes them to internal memory. It then reads an input from a portion of the code dealing with master equation simulation
and adds those reactions to create a complete internal mechanism
"""
import numpy as np
import cantera as ct
from ..utilities import soln2cti_py3 as ctiw
def cti_write2(x={},original_cti='',master_rxns='',master_index=[],MP={},working_directory='',file_name=''):
#print(MP)
flatten = lambda *n: (e for a in n
for e in (flatten(*a) if isinstance(a, (tuple, list)) else (a,)))
#flatten master index
master_index = list(flatten(master_index))
print(bool(x))
if not original_cti:
raise Exception('Please provide a name for the original mechanism file and try again.')
if not master_rxns and np.any(master_index):
raise Exception('Please provide a mechanism file for reactions analysed with master equation or leave master_index empty')
if master_rxns and not np.any(master_index):
raise Exception('Please provide master_index, a non-empty list of reaction numbers from original file which are analysed with master equation.')
if not master_rxns and not master_index:
master_index=np.ones(ct.Solution(original_cti).n_reactions,dtype=bool)
elif master_rxns and np.any(master_index):
temp=np.ones(ct.Solution(original_cti).n_reactions,dtype=bool)
for j in np.arange(len(master_index)):
temp[master_index[j]-1]=False
master_index=temp
lineList=[]
with open(original_cti) as f:
lineList=f.readlines()
done=False
count=0
while not done or count<len(lineList):
if 'Reaction data' in lineList[count] or 'Reaction Data' in lineList[count] or 'reaction data' in lineList[count]:
done=True
lineList=lineList[0:count-1]
else:count+=1
#with open('tempcti.cti','w') as p:
#p.writelines(lineList)
#Attempt to get rid of temp cti files:
NewModelSpecies=ct.Species.listFromFile(original_cti)
NewModel=ct.Solution(thermo='IdealGas',kinetics='GasKinetics',
species=NewModelSpecies)
#NewModel=ct.Solution('tempcti.cti')
original_mechanism=ct.Solution(original_cti)
original_rxn_count=0
master_rxn_eqs=[]
if master_rxns:
with open(master_rxns) as f:
reactionsList=f.readlines()
lineList=lineList+reactionsList
with open('masterTemp.cti','w') as f:
f.writelines(lineList)
master_reactions=ct.Solution('masterTemp.cti')
master_rxn_eqs=master_reactions.reaction_equations()
original_rxn_eqs=[]
for i in np.arange(original_mechanism.n_reactions):
if master_index[i]:
NewModel.add_reaction(original_mechanism.reaction(i))
original_rxn_count+=1
original_rxn_eqs.append(original_mechanism.reaction_equation(i))
# if 'FalloffReaction' in str(type(original_mechanism.reaction(i))):
# print(original_mechanism.reaction(i).high_rate)
# print(original_mechanism.reaction(i).low_rate)
if master_rxns:
for i in np.arange(master_reactions.n_reactions):
# print(master_reactions.reaction(i).rate)
NewModel.add_reaction(master_reactions.reaction(i))
#
#print(master_reactions.reaction(0).rate)
if x=={}:
for j in np.arange(original_rxn_count-1):
#if master_index[j]:
#print(str(type(original_mechanism.reaction(j))),str(type(NewModel.reaction(j))))
if 'ThreeBodyReaction' in str(type(NewModel.reaction(j))):
NewModel.reaction(j).rate=NewModel.reaction(j).rate
elif 'ElementaryReaction' in str(type(NewModel.reaction(j))):
NewModel.reaction(j).rate=NewModel.reaction(j).rate
elif 'FalloffReaction' in str(type(NewModel.reaction(j))):
NewModel.reaction(j).high_rate=NewModel.reaction(j).high_rate
NewModel.reaction(j).low_rate=NewModel.reaction(j).low_rate
if NewModel.reaction(j).falloff.type=='Troe':
NewModel.reaction(j).falloff=NewModel.reaction(j).falloff
if NewModel.reaction(j).falloff.type=='Sri':
NewModel.reaction(j).falloff=NewModel.reaction(j).falloff
elif 'ChemicallyActivatedReaction' in str(type(NewModel.reaction(j))):
NewModel.reaction(j).high_rate=NewModel.reaction(j).high_rate
NewModel.reaction(j).low_rate=NewModel.reaction(j).low_rate
if NewModel.reaction(j).falloff.type=='Troe':
NewModel.reaction(j).falloff=NewModel.reaction(j).falloff
if NewModel.reaction(j).falloff.type=='Sri':
NewModel.reaction(j).falloff=NewModel.reaction(j).falloff
elif 'PlogReaction' in str(type(NewModel.reaction(j))):
NewModel.reaction(j).rates=NewModel.reaction(j).rates
elif 'ChebyshevReaction' in str(type(NewModel.reaction(j))):
NewModel.reaction(j).set_parameters(NewModel.reaction(j).Tmin,NewModel.reaction(j).Tmax,NewModel.reaction(j).Pmin,NewModel.reaction(j).Pmax,NewModel.reaction(j).coeffs)
#Rinv = 1/R #cal/mol*K
E = 1 #going test for energy
#T = 4184
#T= 4.186e3
T=ct.gas_constant
if x!={}:
for j in np.arange(original_rxn_count-1):
#if master_index[j]:
try:
if 'ThreeBodyReaction' in str(type(NewModel.reaction(j))):
A=NewModel.reaction(j).rate.pre_exponential_factor
n=NewModel.reaction(j).rate.temperature_exponent
Ea=NewModel.reaction(j).rate.activation_energy
NewModel.reaction(j).rate=ct.Arrhenius(A*np.exp(x['r'+str(j)]['A']),n+x['r'+str(j)]['n'],Ea+x['r'+str(j)]['Ea']*T)
elif 'ElementaryReaction' in str(type(NewModel.reaction(j))):
A=NewModel.reaction(j).rate.pre_exponential_factor
n=NewModel.reaction(j).rate.temperature_exponent
Ea=NewModel.reaction(j).rate.activation_energy
NewModel.reaction(j).rate=ct.Arrhenius(A*np.exp(x['r'+str(j)]['A']),n+x['r'+str(j)]['n'],Ea+x['r'+str(j)]['Ea']*T)
elif 'FalloffReaction' in str(type(NewModel.reaction(j))):
A=NewModel.reaction(j).high_rate.pre_exponential_factor
n=NewModel.reaction(j).high_rate.temperature_exponent
Ea=NewModel.reaction(j).high_rate.activation_energy
NewModel.reaction(j).high_rate=ct.Arrhenius(A*np.exp(x['r'+str(j)]['A']),n+x['r'+str(j)]['n'],Ea+x['r'+str(j)]['Ea']*T)
A=NewModel.reaction(j).low_rate.pre_exponential_factor
n=NewModel.reaction(j).low_rate.temperature_exponent
Ea=NewModel.reaction(j).low_rate.activation_energy
NewModel.reaction(j).low_rate=ct.Arrhenius(A*np.exp(x['r'+str(j)]['A']),n+x['r'+str(j)]['n'],Ea+x['r'+str(j)]['Ea']*T)
if NewModel.reaction(j).falloff.type=='Troe':
NewModel.reaction(j).falloff=NewModel.reaction(j).falloff
if NewModel.reaction(j).falloff.type=='Sri':
NewModel.reaction(j).falloff=NewModel.reaction(j).falloff
elif 'ChemicallyActivatedReaction' in str(type(NewModel.reaction(j))):
A=NewModel.reaction(j).high_rate.pre_exponential_factor
n=NewModel.reaction(j).high_rate.temperature_exponent
Ea=NewModel.reaction(j).high_rate.activation_energy
NewModel.reaction(j).high_rate=ct.Arrhenius(A*np.exp(x['r'+str(j)]['A']),n+x['r'+str(j)]['n'],Ea+x['r'+str(j)]['Ea']*T)
A=NewModel.reaction(j).low_rate.pre_exponential_factor
n=NewModel.reaction(j).low_rate.temperature_exponent
Ea=NewModel.reaction(j).low_rate.activation_energy
NewModel.reaction(j).low_rate=ct.Arrhenius(A*np.exp(x['r'+str(j)]['A']),n+x['r'+str(j)]['n'],Ea+x['r'+str(j)]['Ea']*T)
if NewModel.reaction(j).falloff.type=='Troe':
NewModel.reaction(j).falloff=NewModel.reaction(j).falloff
if NewModel.reaction(j).falloff.type=='Sri':
NewModel.reaction(j).falloff=NewModel.reaction(j).falloff
elif 'PlogReaction' in str(type(NewModel.reaction(j))):
temp_rate=[]
for number, reactions in enumerate(NewModel.reaction(j).rates):
A = NewModel.reaction(j).rates[number][1].pre_exponential_factor
n = NewModel.reaction(j).rates[number][1].temperature_exponent
Ea = NewModel.reaction(j).rates[number][1].activation_energy
pressure = NewModel.reaction(j).rates[number][0]
temp_rate.append((pressure, ct.Arrhenius(A*np.exp(x['r'+str(j)]['A']),n+x['r'+str(j)]['n'],Ea+x['r'+str(j)]['Ea']*T)))
NewModel.reaction(j).rates = temp_rate
#NewModel.reaction(j).rates=NewModel.reaction(j).rates
elif 'ChebyshevReaction' in str(type(original_mechanism.reaction(j))):
NewModel.reaction(j).set_parameters(NewModel.reaction(j).Tmin,NewModel.reaction(j).Tmax,NewModel.reaction(j).Pmin,NewModel.reaction(j).Pmax,NewModel.reaction(j).coeffs)
except:
print ('we are in the except statment in marks code',j)
if 'ThreeBodyReaction' in str(type(NewModel.reaction(j))):
NewModel.reaction(j).rate=NewModel.reaction(j).rate
elif 'ElementaryReaction' in str(type(NewModel.reaction(j))):
NewModel.reaction(j).rate=NewModel.reaction(j).rate
elif 'FalloffReaction' in str(type(NewModel.reaction(j))):
NewModel.reaction(j).high_rate=NewModel.reaction(j).high_rate
NewModel.reaction(j).low_rate=NewModel.reaction(j).low_rate
if NewModel.reaction(j).falloff.type=='Troe':
NewModel.reaction(j).falloff=NewModel.reaction(j).falloff
if NewModel.reaction(j).falloff.type=='Sri':
NewModel.reaction(j).falloff=NewModel.reaction(j).falloff
elif 'ChemicallyActivatedReaction' in str(type(NewModel.reaction(j))):
NewModel.reaction(j).high_rate=NewModel.reaction(j).high_rate
NewModel.reaction(j).low_rate=NewModel.reaction(j).low_rate
if NewModel.reaction(j).falloff.type=='Troe':
NewModel.reaction(j).falloff=NewModel.reaction(j).falloff
if NewModel.reaction(j).falloff.type=='Sri':
NewModel.reaction(j).falloff=NewModel.reaction(j).falloff
elif 'PlogReaction' in str(type(NewModel.reaction(j))):
NewModel.reaction(j).rates=NewModel.reaction(j).rates
elif 'ChebyshevReaction' in str(type(NewModel.reaction(j))):
NewModel.reaction(j).set_parameters(NewModel.reaction(j).Tmin,NewModel.reaction(j).Tmax,NewModel.reaction(j).Pmin,NewModel.reaction(j).Pmax,NewModel.reaction(j).coeffs)
if MP!={}:
print('insdie the MP if statment')
#print(MP)
for j in np.arange(original_rxn_count,NewModel.n_reactions):
try:
if 'ThreeBodyReaction' in str(type(NewModel.reaction(j))):
A=NewModel.reaction(j).rate.pre_exponential_factor
n=NewModel.reaction(j).rate.temperature_exponent
Ea=NewModel.reaction(j).rate.activation_energy
NewModel.reaction(j).rate=ct.Arrhenius(A*np.exp(MP['r'+str(j)]['A']),n+MP['r'+str(j)]['n'],Ea+MP['r'+str(j)]['Ea']*E)
elif 'ElementaryReaction' in str(type(NewModel.reaction(j))):
A=NewModel.reaction(j).rate.pre_exponential_factor
n=NewModel.reaction(j).rate.temperature_exponent
Ea=NewModel.reaction(j).rate.activation_energy
NewModel.reaction(j).rate=ct.Arrhenius(A*np.exp(MP['r'+str(j)]['A']),n+MP['r'+str(j)]['n'],Ea+MP['r'+str(j)]['Ea']*E)
elif 'FalloffReaction' in str(type(NewModel.reaction(j))):
A=NewModel.reaction(j).high_rate.pre_exponential_factor
n=NewModel.reaction(j).high_rate.temperature_exponent
Ea=NewModel.reaction(j).high_rate.activation_energy
NewModel.reaction(j).high_rate=ct.Arrhenius(A*np.exp(MP['r'+str(j)]['A']),n+MP['r'+str(j)]['n'],Ea+MP['r'+str(j)]['Ea']*E)
A=NewModel.reaction(j).low_rate.pre_exponential_factor
n=NewModel.reaction(j).low_rate.temperature_exponent
Ea=NewModel.reaction(j).low_rate.activation_energy
NewModel.reaction(j).low_rate=ct.Arrhenius(A*np.exp(MP['r'+str(j)]['A']),n+MP['r'+str(j)]['n'],Ea+MP['r'+str(j)]['Ea']*E)
if NewModel.reaction(j).falloff.type=='Troe':
NewModel.reaction(j).falloff=NewModel.reaction(j).falloff
if NewModel.reaction(j).falloff.type=='Sri':
NewModel.reaction(j).falloff=NewModel.reaction(j).falloff
elif 'ChemicallyActivatedReaction' in str(type(NewModel.reaction(j))):
A=NewModel.reaction(j).high_rate.pre_exponential_factor
n=NewModel.reaction(j).high_rate.temperature_exponent
Ea=NewModel.reaction(j).high_rate.activation_energy
NewModel.reaction(j).high_rate=ct.Arrhenius(A*np.exp(MP['r'+str(j)]['A']),n+MP['r'+str(j)]['n'],Ea+MP['r'+str(j)]['Ea']*E)
A=NewModel.reaction(j).low_rate.pre_exponential_factor
n=NewModel.reaction(j).low_rate.temperature_exponent
Ea=NewModel.reaction(j).low_rate.activation_energy
NewModel.reaction(j).low_rate=ct.Arrhenius(A*np.exp(MP['r'+str(j)]['A']),n+MP['r'+str(j)]['n'],Ea+MP['r'+str(j)]['Ea']*E)
if NewModel.reaction(j).falloff.type=='Troe':
NewModel.reaction(j).falloff=NewModel.reaction(j).falloff
if NewModel.reaction(j).falloff.type=='Sri':
NewModel.reaction(j).falloff=NewModel.reaction(j).falloff
elif 'PlogReaction' in str(type(NewModel.reaction(j))):
for number, reactions in enumerate(NewModel.reaction(j).rates):
A = NewModel.reaction(j)[number][1].pre_exponential_factor
n = NewModel.reaction(j)[number][1].temperature_exponent
Ea = NewModel.reaction(j)[number][1].activation_energy
NewModel.reaction(j)[number][1] = ct.Arrhenius(A*np.exp(MP['r'+str(j)]['A']),n+MP['r'+str(j)]['n'],Ea+MP['r'+str(j)]['Ea']*E)
NewModel.reaction(j).rates=NewModel.reaction(j).rates
elif 'ChebyshevReaction' in str(type(NewModel.reaction(j))):
converted = MP['r'+str(j)]/np.log(10)
test = NewModel.reaction(j).coeffs +converted
NewModel.reaction(j).set_parameters(NewModel.reaction(j).Tmin,NewModel.reaction(j).Tmax,NewModel.reaction(j).Pmin,NewModel.reaction(j).Pmax,(test))
except:
print ('we are in the except statment in marks code',j)
if 'ThreeBodyReaction' in str(type(NewModel.reaction(j))):
NewModel.reaction(j).rate=NewModel.reaction(j).rate
elif 'ElementaryReaction' in str(type(NewModel.reaction(j))):
NewModel.reaction(j).rate=NewModel.reaction(j).rate
elif 'FalloffReaction' in str(type(NewModel.reaction(j))):
NewModel.reaction(j).high_rate=NewModel.reaction(j).high_rate
NewModel.reaction(j).low_rate=NewModel.reaction(j).low_rate
if NewModel.reaction(j).falloff.type=='Troe':
NewModel.reaction(j).falloff=NewModel.reaction(j).falloff
if NewModel.reaction(j).falloff.type=='Sri':
NewModel.reaction(j).falloff=NewModel.reaction(j).falloff
elif 'ChemicallyActivatedReaction' in str(type(NewModel.reaction(j))):
NewModel.reaction(j).high_rate=NewModel.reaction(j).high_rate
NewModel.reaction(j).low_rate=NewModel.reaction(j).low_rate
if NewModel.reaction(j).falloff.type=='Troe':
NewModel.reaction(j).falloff=NewModel.reaction(j).falloff
if NewModel.reaction(j).falloff.type=='Sri':
NewModel.reaction(j).falloff=NewModel.reaction(j).falloff
elif 'PlogReaction' in str(type(NewModel.reaction(j))):
NewModel.reaction(j).rates=NewModel.reaction(j).rates
elif 'ChebyshevReaction' in str(type(NewModel.reaction(j))):
NewModel.reaction(j).set_parameters(NewModel.reaction(j).Tmin,NewModel.reaction(j).Tmax,NewModel.reaction(j).Pmin,NewModel.reaction(j).Pmax,NewModel.reaction(j).coeffs)
new_file=ctiw.write(NewModel, cwd=working_directory,file_name=file_name,original_cti=original_cti)
#tab
return new_file,original_rxn_eqs,master_rxn_eqs
| 61.944625
| 212
| 0.573014
| 2,269
| 19,017
| 4.709564
| 0.09167
| 0.191185
| 0.357945
| 0.134756
| 0.788415
| 0.764645
| 0.754726
| 0.750795
| 0.731518
| 0.723096
| 0
| 0.003503
| 0.294368
| 19,017
| 306
| 213
| 62.147059
| 0.792831
| 0.04191
| 0
| 0.670996
| 0
| 0.004329
| 0.066689
| 0.007547
| 0
| 0
| 0
| 0
| 0
| 1
| 0.004329
| false
| 0
| 0.012987
| 0
| 0.021645
| 0.017316
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
7794a404ef721f23086ce2ab23e2c4524ae9c056
| 2,950
|
py
|
Python
|
test_rm.py
|
ClaudeNovaMB/rightmove_webscraper.py
|
2077b5f46a0ccfb307426857e5ea9f86b6fb5d67
|
[
"MIT"
] | null | null | null |
test_rm.py
|
ClaudeNovaMB/rightmove_webscraper.py
|
2077b5f46a0ccfb307426857e5ea9f86b6fb5d67
|
[
"MIT"
] | null | null | null |
test_rm.py
|
ClaudeNovaMB/rightmove_webscraper.py
|
2077b5f46a0ccfb307426857e5ea9f86b6fb5d67
|
[
"MIT"
] | null | null | null |
import pandas as pd
import unittest
from rightmove_webscraper import RightmoveData
base_url = "https://www.rightmove.co.uk/"
columns = sorted(["price", "type", "address", "url", "agent_url", "postcode", "number_bedrooms", "search_date"])
class RightmoveWebscraperTest(unittest.TestCase):
"""Unit tests for the `RightmoveWebscraper` class."""
def test_sale(self):
"""Test a search on properties for sale."""
url = f"{base_url}property-for-sale/find.html?searchType=SALE&locationIdentifier=REGION%5E94346&insId=1"
rmd = RightmoveData(url)
self.assertIsInstance(rmd.average_price, float)
self.assertIsInstance(rmd.get_results, pd.DataFrame)
self.assertListEqual(sorted(rmd.get_results.columns), columns)
self.assertGreater(len(rmd.get_results), 0)
self.assertIsInstance(rmd.page_count, int)
self.assertEqual(rmd.rent_or_sale, "sale")
self.assertIsInstance(rmd.results_count, int)
self.assertIsInstance(rmd.results_count_display, int)
self.assertEqual(url, rmd.url)
df = rmd.summary()
self.assertIsInstance(df, pd.DataFrame)
self.assertListEqual(sorted(["number_bedrooms", "count", "mean"]), sorted(df.columns))
self.assertGreater(len(df), 0)
for c in columns:
df = rmd.summary(by=c)
self.assertIsInstance(df, pd.DataFrame)
self.assertListEqual(sorted([c, "count", "mean"]), sorted(df.columns))
self.assertGreater(len(df), 0)
def test_rent(self):
"""Test a search on properties for sale."""
url = f"{base_url}property-to-rent/find.html?searchType=RENT&locationIdentifier=REGION%5E94346"
rmd = RightmoveData(url)
self.assertIsInstance(rmd.average_price, float)
self.assertIsInstance(rmd.get_results, pd.DataFrame)
self.assertListEqual(sorted(rmd.get_results.columns), columns)
self.assertGreater(len(rmd.get_results), 0)
self.assertIsInstance(rmd.page_count, int)
self.assertEqual(rmd.rent_or_sale, "rent")
self.assertIsInstance(rmd.results_count, int)
self.assertIsInstance(rmd.results_count_display, int)
self.assertEqual(url, rmd.url)
df = rmd.summary()
self.assertIsInstance(df, pd.DataFrame)
self.assertListEqual(sorted(["number_bedrooms", "count", "mean"]), sorted(df.columns))
self.assertGreater(len(df), 0)
for c in columns:
df = rmd.summary(by=c)
self.assertIsInstance(df, pd.DataFrame)
self.assertListEqual(sorted([c, "count", "mean"]), sorted(df.columns))
self.assertGreater(len(df), 0)
def test_bad_url(self):
"""Test a bad URL raises a value error."""
bad_url = "https://www.rightmove.co.uk/property"
with self.assertRaises(ValueError):
_ = RightmoveData(bad_url)
if __name__ == "__main__":
unittest.main()
| 42.142857
| 112
| 0.664068
| 352
| 2,950
| 5.440341
| 0.244318
| 0.146214
| 0.120104
| 0.093995
| 0.739426
| 0.739426
| 0.71436
| 0.71436
| 0.71436
| 0.71436
| 0
| 0.008109
| 0.205763
| 2,950
| 69
| 113
| 42.753623
| 0.809219
| 0.054237
| 0
| 0.666667
| 0
| 0.037037
| 0.140535
| 0.06539
| 0
| 0
| 0
| 0
| 0.574074
| 1
| 0.055556
| false
| 0
| 0.055556
| 0
| 0.12963
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
77e7c50c439da73f7a25ca67244d0128f870a03e
| 1,413
|
py
|
Python
|
data/train/python/77e7c50c439da73f7a25ca67244d0128f870a03e__init__.py
|
harshp8l/deep-learning-lang-detection
|
2a54293181c1c2b1a2b840ddee4d4d80177efb33
|
[
"MIT"
] | 84
|
2017-10-25T15:49:21.000Z
|
2021-11-28T21:25:54.000Z
|
data/train/python/77e7c50c439da73f7a25ca67244d0128f870a03e__init__.py
|
vassalos/deep-learning-lang-detection
|
cbb00b3e81bed3a64553f9c6aa6138b2511e544e
|
[
"MIT"
] | 5
|
2018-03-29T11:50:46.000Z
|
2021-04-26T13:33:18.000Z
|
data/train/python/77e7c50c439da73f7a25ca67244d0128f870a03e__init__.py
|
vassalos/deep-learning-lang-detection
|
cbb00b3e81bed3a64553f9c6aa6138b2511e544e
|
[
"MIT"
] | 24
|
2017-11-22T08:31:00.000Z
|
2022-03-27T01:22:31.000Z
|
# -*- encoding: utf-8 -*-
__author__ = 'faide'
from sqlalchemy import MetaData
metadata = MetaData()
from xbus.broker.model.auth.main import user
from xbus.broker.model.auth.main import group
from xbus.broker.model.auth.main import permission
from xbus.broker.model.auth.main import user_group_table
from xbus.broker.model.auth.main import group_permission_table
from xbus.broker.model.auth.main import role
from xbus.broker.model.auth.main import emitter
from xbus.broker.model.auth.helpers import gen_password
from xbus.broker.model.auth.helpers import validate_password
from xbus.broker.model.setupmodel import setup_app
from xbus.broker.model.service import service
from xbus.broker.model.event import event_type
from xbus.broker.model.event import event_node
from xbus.broker.model.event import event_node_rel
from xbus.broker.model.emission import emission_profile
from xbus.broker.model.emission import emitter_profile
from xbus.broker.model.emission import emitter_profile_event_type_rel
from xbus.broker.model.input import input_descriptor
from xbus.broker.model.logging import envelope
from xbus.broker.model.logging import event
from xbus.broker.model.logging import event_error
from xbus.broker.model.logging import event_error_tracking
from xbus.broker.model.logging import event_tracking
from xbus.broker.model.logging import item
from xbus.broker.model.logging import event_consumer_inactive_rel
| 44.15625
| 69
| 0.847134
| 220
| 1,413
| 5.309091
| 0.195455
| 0.171233
| 0.299658
| 0.406678
| 0.781678
| 0.730308
| 0.674658
| 0.390411
| 0.086473
| 0
| 0
| 0.000772
| 0.082803
| 1,413
| 31
| 70
| 45.580645
| 0.900463
| 0.016277
| 0
| 0
| 0
| 0
| 0.003602
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.071429
| 0.928571
| 0
| 0.928571
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.