code
stringlengths 13
6.09M
| order_type
stringclasses 2
values | original_example
dict | step_ids
listlengths 1
5
|
|---|---|---|---|
<|reserved_special_token_0|>
class MockTracer(Tracer):
def __init__(self, scope_manager: (ScopeManager | None)=...) ->None:
...
def register_propagator(self, format: str, propagator: Propagator) ->None:
...
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MockTracer(Tracer):
def __init__(self, scope_manager: (ScopeManager | None)=...) ->None:
...
def register_propagator(self, format: str, propagator: Propagator) ->None:
...
<|reserved_special_token_0|>
def reset(self) ->None:
...
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MockTracer(Tracer):
def __init__(self, scope_manager: (ScopeManager | None)=...) ->None:
...
def register_propagator(self, format: str, propagator: Propagator) ->None:
...
def finished_spans(self) ->list[Span]:
...
def reset(self) ->None:
...
<|reserved_special_token_1|>
from ..scope_manager import ScopeManager
from ..span import Span
from ..tracer import Tracer
from .propagator import Propagator
class MockTracer(Tracer):
def __init__(self, scope_manager: (ScopeManager | None)=...) ->None:
...
def register_propagator(self, format: str, propagator: Propagator) ->None:
...
def finished_spans(self) ->list[Span]:
...
def reset(self) ->None:
...
<|reserved_special_token_1|>
from ..scope_manager import ScopeManager
from ..span import Span
from ..tracer import Tracer
from .propagator import Propagator
class MockTracer(Tracer):
def __init__(self, scope_manager: ScopeManager | None = ...) -> None: ...
def register_propagator(self, format: str, propagator: Propagator) -> None: ...
def finished_spans(self) -> list[Span]: ...
def reset(self) -> None: ...
|
flexible
|
{
"blob_id": "76d2c80c673f9a0444e72721909a51479ff35521",
"index": 1785,
"step-1": "<mask token>\n\n\nclass MockTracer(Tracer):\n\n def __init__(self, scope_manager: (ScopeManager | None)=...) ->None:\n ...\n\n def register_propagator(self, format: str, propagator: Propagator) ->None:\n ...\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass MockTracer(Tracer):\n\n def __init__(self, scope_manager: (ScopeManager | None)=...) ->None:\n ...\n\n def register_propagator(self, format: str, propagator: Propagator) ->None:\n ...\n <mask token>\n\n def reset(self) ->None:\n ...\n",
"step-3": "<mask token>\n\n\nclass MockTracer(Tracer):\n\n def __init__(self, scope_manager: (ScopeManager | None)=...) ->None:\n ...\n\n def register_propagator(self, format: str, propagator: Propagator) ->None:\n ...\n\n def finished_spans(self) ->list[Span]:\n ...\n\n def reset(self) ->None:\n ...\n",
"step-4": "from ..scope_manager import ScopeManager\nfrom ..span import Span\nfrom ..tracer import Tracer\nfrom .propagator import Propagator\n\n\nclass MockTracer(Tracer):\n\n def __init__(self, scope_manager: (ScopeManager | None)=...) ->None:\n ...\n\n def register_propagator(self, format: str, propagator: Propagator) ->None:\n ...\n\n def finished_spans(self) ->list[Span]:\n ...\n\n def reset(self) ->None:\n ...\n",
"step-5": "from ..scope_manager import ScopeManager\nfrom ..span import Span\nfrom ..tracer import Tracer\nfrom .propagator import Propagator\n\nclass MockTracer(Tracer):\n def __init__(self, scope_manager: ScopeManager | None = ...) -> None: ...\n def register_propagator(self, format: str, propagator: Propagator) -> None: ...\n def finished_spans(self) -> list[Span]: ...\n def reset(self) -> None: ...\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
i = 100
while i >= 100:
print(i)
i -= 1
print(i)
|
normal
|
{
"blob_id": "9527743802a0bb680ab3dcf325c0f7749a51afc6",
"index": 5949,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile i >= 100:\n print(i)\ni -= 1\nprint(i)\n",
"step-3": "i = 100\nwhile i >= 100:\n print(i)\ni -= 1\nprint(i)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class FosAppConfig(AppConfig):
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class FosAppConfig(AppConfig):
name = 'fos_app'
<|reserved_special_token_1|>
from django.apps import AppConfig
class FosAppConfig(AppConfig):
name = 'fos_app'
|
flexible
|
{
"blob_id": "d83f2d9bb25a46bc7344b420ce65bf729165e6b9",
"index": 278,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass FosAppConfig(AppConfig):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass FosAppConfig(AppConfig):\n name = 'fos_app'\n",
"step-4": "from django.apps import AppConfig\n\n\nclass FosAppConfig(AppConfig):\n name = 'fos_app'\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
"""Plugin setup."""
import importlib
from qiime2.plugin import (
Plugin,
Str,
Choices,
Int,
Bool,
Range,
Float,
Metadata,
MetadataColumn,
Categorical,
Numeric,
Citations,
)
import q2_micom
from q2_micom._formats_and_types import (
SBML,
JSON,
Pickle,
SBMLFormat,
SBMLDirectory,
JSONFormat,
JSONDirectory,
CommunityModelFormat,
CommunityModelManifest,
CommunityModelDirectory,
GrowthRates,
Fluxes,
MicomResultsDirectory,
MicomMediumFile,
MicomMediumDirectory,
MetabolicModels,
CommunityModels,
MicomResults,
MicomMedium,
Global,
PerSample,
TradeoffResults,
TradeoffResultsDirectory,
REQ_FIELDS,
)
from q2_types.feature_data import FeatureData, Taxonomy
from q2_types.feature_table import FeatureTable, RelativeFrequency, Frequency
citations = Citations.load("citations.bib", package="q2_micom")
plugin = Plugin(
name="micom",
version=q2_micom.__version__,
website="https://github.com/micom-dev/q2-micom",
package="q2_micom",
description=(""),
short_description="Plugin for metabolic modeling of microbial communities.",
citations=[citations["micom"]],
)
plugin.register_formats(
SBMLFormat,
SBMLDirectory,
JSONFormat,
JSONDirectory,
CommunityModelFormat,
CommunityModelManifest,
CommunityModelDirectory,
GrowthRates,
Fluxes,
MicomResultsDirectory,
MicomMediumFile,
MicomMediumDirectory,
TradeoffResultsDirectory,
)
plugin.register_semantic_types(
MetabolicModels, CommunityModels, MicomResults, MicomMedium
)
plugin.register_semantic_type_to_format(MetabolicModels[SBML], SBMLDirectory)
plugin.register_semantic_type_to_format(MetabolicModels[JSON], JSONDirectory)
plugin.register_semantic_type_to_format(
CommunityModels[Pickle], CommunityModelDirectory
)
plugin.register_semantic_type_to_format(MicomResults, MicomResultsDirectory)
plugin.register_semantic_type_to_format(TradeoffResults, TradeoffResultsDirectory)
plugin.register_semantic_type_to_format(MicomMedium[Global], MicomMediumDirectory)
plugin.register_semantic_type_to_format(MicomMedium[PerSample], MicomMediumDirectory)
plugin.methods.register_function(
function=q2_micom.db,
inputs={},
parameters={
"meta": Metadata,
"rank": Str % Choices(q2_micom._build.RANKS),
"threads": Int % Range(1, None),
},
outputs=[("metabolic_models", MetabolicModels[JSON])],
input_descriptions={},
parameter_descriptions={
"meta": (
"Metadata for the individual metabolic models in `folder`. "
"Must contain the the following columns: %s." % ", ".join(REQ_FIELDS)
),
"rank": "The phylogenetic rank at which to summarize taxa.",
"threads": "The number of threads to use when constructing models.",
},
output_descriptions={"metabolic_models": "The metabolic model DB."},
name="Build a metabolic model database.",
description=(
"Constructs pan-genome models summarized to the specified rank "
"and bundles the models to be used by MICOM. "
"The chosen rank has to be the same you want as when building your "
"community models. "
"So you may not build genus-level community models with a species "
"level database. "
"You will only need to run this function if you want to build a "
"custom DB. For many use cases downloading the prebuilt AGORA DB "
"with the the preferred rank should be sufficient."
),
citations=[
citations["agora"],
citations["agora_reply"],
citations["micom"],
],
)
plugin.methods.register_function(
function=q2_micom.build,
inputs={
"abundance": FeatureTable[Frequency | RelativeFrequency],
"taxonomy": FeatureData[Taxonomy],
"models": MetabolicModels[JSON],
},
parameters={
"threads": Int % Range(1, None),
"cutoff": Float % Range(0.0, 1.0),
"strict": Bool,
"solver": Str % Choices("auto", "cplex", "osqp", "gurobi"),
},
outputs=[("community_models", CommunityModels[Pickle])],
input_descriptions={
"abundance": (
"The feature table containing the samples over which beta "
"diversity should be computed."
),
"taxonomy": "The taxonomy assignments for the ASVs in the table.",
"models": "The single taxon model database to use.",
},
parameter_descriptions={
"threads": "The number of threads to use when constructing models.",
"cutoff": "Taxa with a relative abundance smaller than this will "
"be dropped.",
"strict": (
"If true will collapse and match on all taxa ranks up to the "
"specified rank (so on all higher ranks as well). If false "
"(default) will match only on single taxa rank specified before. "
"If using the strict option make sure ranks are named the same as in "
"the used database."
),
"solver": (
"The quadratic and linear programming solver that will be used "
"in the models. Will pick an appropriate one by default. "
"`cplex` and `gurobi` are commercial solvers with free academic "
"licenses and have to be installed manually. See the docs for more info."
),
},
output_descriptions={"community_models": "The community models."},
name="Build community models.",
description=("Builds the metabolic community models for a set of samples."),
citations=[citations["micom"]],
)
plugin.methods.register_function(
function=q2_micom.minimal_medium,
inputs={"models": CommunityModels[Pickle]},
parameters={
"min_growth": Float % Range(0.0, None, inclusive_start=False),
"threads": Int % Range(1, None),
},
outputs=[("medium", MicomMedium[Global])],
input_descriptions={
"models": (
"A collection of metabolic community models. "
"This should contain on model for each sample."
),
},
parameter_descriptions={
"min_growth": (
"The minimum achievable growth rate for each taxon. "
"The returned growth medium enables all taxa to growth "
"simultaneously with at least this rate."
),
"threads": "The number of threads to use when simulating.",
},
output_descriptions={"medium": "The resulting growth medium."},
name="Obtain a minimal growth medium for models.",
description=(
"Obtains a minimal growth medium for the community models. "
"Please note that this medium does not have any biological "
"feasibility. If you have any knowledge about metabolites present "
"in the environment we recommend you construct the medium by hand."
),
citations=[citations["micom"]],
)
plugin.methods.register_function(
function=q2_micom.grow,
inputs={
"models": CommunityModels[Pickle],
"medium": MicomMedium[Global | PerSample],
},
parameters={
"tradeoff": Float % Range(0.0, 1.0, inclusive_start=False, inclusive_end=True),
"strategy": Str % Choices("pFBA", "minimal uptake", "none"),
"threads": Int % Range(1, None),
},
outputs=[("results", MicomResults)],
input_descriptions={
"models": (
"A collection of metabolic community models. "
"This should contain on model for each sample."
),
"medium": "The growth medium to use.",
},
parameter_descriptions={
"tradeoff": (
"The tradeoff parameter. This describes the balance "
"between maximizing biomass production of the entire "
"community and biomass production of individual taxa "
'(ergo "egoistic" growth). A value of 1.0 would yield '
"the best biomass production across the community but "
"will only allow a few taxa to grow. Smaller values will "
"allow more taxa to grow but will sacrifice overall "
"biomass. A value of 0.5 (the default) has been shown to "
"best reproduce growth rates in the human gut."
),
"strategy": (
"The strategy used when choosing the solution in the "
"optimal flux space. `minimal uptake` uses the fluxes "
"that result in the smallest total uptake from the environment."
"`pFBA` uses parsimonious Flux Balance Analysis and thus will choose "
"the fluxes with the lowest enzyme requirement for each taxon. "
"`none` will return an arbitrary solution from the optimal flux space."
),
"threads": "The number of threads to use when simulating.",
},
output_descriptions={
"results": "The resulting taxa-level growth rates and metabolic "
"exchange fluxes."
},
name="Simulate growth for community models.",
description=(
"Simulates growth for a set of samples. Note that those are "
'sample-specific or "personalized" simulations, so each taxon '
"may have different growth rates and metabolite usage in each sample."
),
citations=[citations["micom"]],
)
plugin.methods.register_function(
function=q2_micom.tradeoff,
inputs={
"models": CommunityModels[Pickle],
"medium": MicomMedium[Global | PerSample],
},
parameters={
"tradeoff_min": Float % Range(0.0, 1.0, inclusive_start=False),
"tradeoff_max": Float % Range(0.0, 1.0, inclusive_end=True),
"step": Float % Range(0.0, 1.0),
"threads": Int,
},
outputs=[("results", TradeoffResults)],
input_descriptions={
"models": (
"A collection of metabolic community models. "
"This should contain on model for each sample."
),
"medium": "The growth medium to use.",
},
parameter_descriptions={
"tradeoff_min": "The minimum tradeoff parameter to test. This should "
"be larger than 0.0 and smaller than 1.0.",
"tradeoff_max": "The maximum tradeoff parameter to test. This should "
"be larger than 0.0 and smaller than 1.0 and also be"
"larger than `tradeoff_min`.",
"step": "The tradeoff value step size to use.",
"threads": "The number of threads to use when simulating.",
},
output_descriptions={
"results": "The resulting taxa-level growth rates for varying "
"tradeoff values."
},
name="Test a variety of tradeoff values.",
description=(
"Simulates growth for a set of samples while varying the tradeoff "
"between community and taxon biomass production. "
"This can be used to characterize a good tradeoff value for a "
"specific set of samples. Our study suggested that a good tradeoff "
"value is the largest value that allows the majority of taxa in the "
"sample to grow."
),
citations=[citations["micom"]],
)
plugin.methods.register_function(
function=q2_micom.filter_models,
inputs={"models": CommunityModels[Pickle]},
parameters={"metadata": Metadata, "query": Str, "exclude": Bool},
outputs=[("filtered_models", CommunityModels[Pickle])],
input_descriptions={
"models": (
"A collection of metabolic community models. "
"This should contain on model for each sample."
)
},
parameter_descriptions={
"metadata": "The metadata for the samples to keep or to query.",
"query": (
"A pandas query expression to select samples from the metadata. "
"This will call `query` on the metadata DataFrame, so you can test "
"your query by loading our metadata into a pandas DataFrame."
),
"exclude": (
"If true will use all samples *except* the ones selected "
"by metadata and query."
),
},
output_descriptions={"filtered_models": "The filtered community models."},
name="Filters models for a chosen set of samples.",
description=(
"Select a subset of samples and their community models using a list "
"of samples or a pandas query expression."
),
citations=[citations["micom"]],
)
plugin.methods.register_function(
function=q2_micom.filter_results,
inputs={"results": MicomResults},
parameters={"metadata": Metadata, "query": Str, "exclude": Bool},
outputs=[("filtered_results", MicomResults)],
input_descriptions={
"results": (
"A set of MICOM analysis results. "
"Contains predicted groath rates and exchange fluxes."
)
},
parameter_descriptions={
"metadata": "The metadata for the samples to keep or to query.",
"query": (
"A pandas query expression to select samples from the metadata. "
"This will call `query` on the metadata DataFrame, so you can test "
"your query by loading our metadata into a pandas DataFrame."
),
"exclude": (
"If true will use all samples *except* the ones selected "
"by metadata and query."
),
},
output_descriptions={"filtered_results": "The filtered simulation models."},
name="Filters results for a chosen set of samples.",
description=(
"Select a subset of samples and their simulation results using a list "
"of samples or a pandas query expression."
),
citations=[citations["micom"]],
)
plugin.visualizers.register_function(
function=q2_micom.plot_growth,
inputs={"results": MicomResults},
parameters={},
input_descriptions={
"results": (
"A set of MICOM analysis results. "
"Contains predicted groath rates and exchange fluxes."
)
},
parameter_descriptions={},
name="Plot taxa growth rates.",
description=(
"Plot predicted growth rates for each taxon in each sample. "
"Only points with growing taxa are shown (growth rate sufficiently "
"larger than zero)."
),
citations=[citations["micom"]],
)
plugin.visualizers.register_function(
function=q2_micom.exchanges_per_sample,
inputs={"results": MicomResults},
parameters={
"direction": Str % Choices("import", "export"),
"cluster": Bool,
},
input_descriptions={
"results": (
"A set of MICOM analysis results. "
"Contains predicted groath rates and exchange fluxes."
)
},
parameter_descriptions={
"direction": "The direction of the flux.",
"cluster": "Whether to perform clutering on samples and reactions.",
},
name="Plot gloabl exchange rates.",
description=(
"Plot predicted global exchange fluxes for each sample. "
"When plotting imports this corresponds to the consumption "
"fluxes for each metabolite that is available to the community. "
"When plotting export this corresponds to the production fluxes "
"for each metabolite."
),
citations=[citations["micom"]],
)
plugin.visualizers.register_function(
function=q2_micom.exchanges_per_taxon,
inputs={"results": MicomResults},
parameters={
"direction": Str % Choices("import", "export"),
"perplexity": Int % Range(2, None),
},
input_descriptions={
"results": (
"A set of MICOM analysis results. "
"Contains predicted growth rates and exchange fluxes."
)
},
parameter_descriptions={
"direction": "The direction of the flux.",
"perplexity": "TSNE parameter. Relates to the number of neighbors used to "
"calculate distances. Smaller values preserve more local "
"structure and larger values preserve more global structure.",
},
name="Plot niche overlap.",
description=(
"Plot growth or production niches. "
"The entire set of import or export fluxes for each taxon in each "
"sample is reduced onto a single point on a 2D plane."
"Taxa that are close to each other either consume similar metabolites "
" (imports) or produce similar metabolites (exports)."
),
citations=[citations["micom"]],
)
plugin.visualizers.register_function(
function=q2_micom.plot_tradeoff,
inputs={"results": TradeoffResults},
parameters={},
input_descriptions={
"results": (
"A set of MICOM tradeoff analysis results. "
"Contains predicted growth rates for each tested tradeoff."
)
},
parameter_descriptions={},
name="Plot tradeoff results.",
description=(
"Plot predicted growth rate distributions for each tradeoff as "
"well as the fraction of growing taxa in each sample and tradeoff "
"value. For a good tradeoff value one usually tries to find the "
"largest tradeoff value that still aloows most taxa to grow."
),
citations=[citations["micom"]],
)
plugin.visualizers.register_function(
function=q2_micom.fit_phenotype,
inputs={"results": MicomResults},
parameters={
"metadata": MetadataColumn[Categorical | Numeric],
"variable_type": Str % Choices("binary", "continuous"),
"flux_type": Str % Choices("import", "production"),
"min_coef": Float % Range(0, None),
},
input_descriptions={
"results": (
"A set of MICOM analysis results. "
"Contains predicted growth rates and exchange fluxes."
),
},
parameter_descriptions={
"metadata": "The metadata variable to use.",
"variable_type": "The type of the phenotype variable.",
"flux_type": "Which fluxes to use.",
"min_coef": (
"Only coefficient with absolute values larger than this " "will be shown."
),
},
name="Test for differential production",
description=(
"Test for overall metabolite production differences " "between two groups."
),
citations=[citations["micom"]],
)
importlib.import_module("q2_micom._transform")
|
normal
|
{
"blob_id": "9a6f159d9208ee9e337de7b717e2e25c7e7f9f06",
"index": 4277,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nplugin.register_formats(SBMLFormat, SBMLDirectory, JSONFormat,\n JSONDirectory, CommunityModelFormat, CommunityModelManifest,\n CommunityModelDirectory, GrowthRates, Fluxes, MicomResultsDirectory,\n MicomMediumFile, MicomMediumDirectory, TradeoffResultsDirectory)\nplugin.register_semantic_types(MetabolicModels, CommunityModels,\n MicomResults, MicomMedium)\nplugin.register_semantic_type_to_format(MetabolicModels[SBML], SBMLDirectory)\nplugin.register_semantic_type_to_format(MetabolicModels[JSON], JSONDirectory)\nplugin.register_semantic_type_to_format(CommunityModels[Pickle],\n CommunityModelDirectory)\nplugin.register_semantic_type_to_format(MicomResults, MicomResultsDirectory)\nplugin.register_semantic_type_to_format(TradeoffResults,\n TradeoffResultsDirectory)\nplugin.register_semantic_type_to_format(MicomMedium[Global],\n MicomMediumDirectory)\nplugin.register_semantic_type_to_format(MicomMedium[PerSample],\n MicomMediumDirectory)\nplugin.methods.register_function(function=q2_micom.db, inputs={},\n parameters={'meta': Metadata, 'rank': Str % Choices(q2_micom._build.\n RANKS), 'threads': Int % Range(1, None)}, outputs=[('metabolic_models',\n MetabolicModels[JSON])], input_descriptions={}, parameter_descriptions=\n {'meta': \n 'Metadata for the individual metabolic models in `folder`. Must contain the the following columns: %s.'\n % ', '.join(REQ_FIELDS), 'rank':\n 'The phylogenetic rank at which to summarize taxa.', 'threads':\n 'The number of threads to use when constructing models.'},\n output_descriptions={'metabolic_models': 'The metabolic model DB.'},\n name='Build a metabolic model database.', description=\n 'Constructs pan-genome models summarized to the specified rank and bundles the models to be used by MICOM. The chosen rank has to be the same you want as when building your community models. So you may not build genus-level community models with a species level database. You will only need to run this function if you want to build a custom DB. For many use cases downloading the prebuilt AGORA DB with the the preferred rank should be sufficient.'\n , citations=[citations['agora'], citations['agora_reply'], citations[\n 'micom']])\nplugin.methods.register_function(function=q2_micom.build, inputs={\n 'abundance': FeatureTable[Frequency | RelativeFrequency], 'taxonomy':\n FeatureData[Taxonomy], 'models': MetabolicModels[JSON]}, parameters={\n 'threads': Int % Range(1, None), 'cutoff': Float % Range(0.0, 1.0),\n 'strict': Bool, 'solver': Str % Choices('auto', 'cplex', 'osqp',\n 'gurobi')}, outputs=[('community_models', CommunityModels[Pickle])],\n input_descriptions={'abundance':\n 'The feature table containing the samples over which beta diversity should be computed.'\n , 'taxonomy': 'The taxonomy assignments for the ASVs in the table.',\n 'models': 'The single taxon model database to use.'},\n parameter_descriptions={'threads':\n 'The number of threads to use when constructing models.', 'cutoff':\n 'Taxa with a relative abundance smaller than this will be dropped.',\n 'strict':\n 'If true will collapse and match on all taxa ranks up to the specified rank (so on all higher ranks as well). If false (default) will match only on single taxa rank specified before. If using the strict option make sure ranks are named the same as in the used database.'\n , 'solver':\n 'The quadratic and linear programming solver that will be used in the models. Will pick an appropriate one by default. `cplex` and `gurobi` are commercial solvers with free academic licenses and have to be installed manually. See the docs for more info.'\n }, output_descriptions={'community_models': 'The community models.'},\n name='Build community models.', description=\n 'Builds the metabolic community models for a set of samples.',\n citations=[citations['micom']])\nplugin.methods.register_function(function=q2_micom.minimal_medium, inputs={\n 'models': CommunityModels[Pickle]}, parameters={'min_growth': Float %\n Range(0.0, None, inclusive_start=False), 'threads': Int % Range(1, None\n )}, outputs=[('medium', MicomMedium[Global])], input_descriptions={\n 'models':\n 'A collection of metabolic community models. This should contain on model for each sample.'\n }, parameter_descriptions={'min_growth':\n 'The minimum achievable growth rate for each taxon. The returned growth medium enables all taxa to growth simultaneously with at least this rate.'\n , 'threads': 'The number of threads to use when simulating.'},\n output_descriptions={'medium': 'The resulting growth medium.'}, name=\n 'Obtain a minimal growth medium for models.', description=\n 'Obtains a minimal growth medium for the community models. Please note that this medium does not have any biological feasibility. If you have any knowledge about metabolites present in the environment we recommend you construct the medium by hand.'\n , citations=[citations['micom']])\nplugin.methods.register_function(function=q2_micom.grow, inputs={'models':\n CommunityModels[Pickle], 'medium': MicomMedium[Global | PerSample]},\n parameters={'tradeoff': Float % Range(0.0, 1.0, inclusive_start=False,\n inclusive_end=True), 'strategy': Str % Choices('pFBA', 'minimal uptake',\n 'none'), 'threads': Int % Range(1, None)}, outputs=[('results',\n MicomResults)], input_descriptions={'models':\n 'A collection of metabolic community models. This should contain on model for each sample.'\n , 'medium': 'The growth medium to use.'}, parameter_descriptions={\n 'tradeoff':\n 'The tradeoff parameter. This describes the balance between maximizing biomass production of the entire community and biomass production of individual taxa (ergo \"egoistic\" growth). A value of 1.0 would yield the best biomass production across the community but will only allow a few taxa to grow. Smaller values will allow more taxa to grow but will sacrifice overall biomass. A value of 0.5 (the default) has been shown to best reproduce growth rates in the human gut.'\n , 'strategy':\n 'The strategy used when choosing the solution in the optimal flux space. `minimal uptake` uses the fluxes that result in the smallest total uptake from the environment.`pFBA` uses parsimonious Flux Balance Analysis and thus will choose the fluxes with the lowest enzyme requirement for each taxon. `none` will return an arbitrary solution from the optimal flux space.'\n , 'threads': 'The number of threads to use when simulating.'},\n output_descriptions={'results':\n 'The resulting taxa-level growth rates and metabolic exchange fluxes.'},\n name='Simulate growth for community models.', description=\n 'Simulates growth for a set of samples. Note that those are sample-specific or \"personalized\" simulations, so each taxon may have different growth rates and metabolite usage in each sample.'\n , citations=[citations['micom']])\nplugin.methods.register_function(function=q2_micom.tradeoff, inputs={\n 'models': CommunityModels[Pickle], 'medium': MicomMedium[Global |\n PerSample]}, parameters={'tradeoff_min': Float % Range(0.0, 1.0,\n inclusive_start=False), 'tradeoff_max': Float % Range(0.0, 1.0,\n inclusive_end=True), 'step': Float % Range(0.0, 1.0), 'threads': Int},\n outputs=[('results', TradeoffResults)], input_descriptions={'models':\n 'A collection of metabolic community models. This should contain on model for each sample.'\n , 'medium': 'The growth medium to use.'}, parameter_descriptions={\n 'tradeoff_min':\n 'The minimum tradeoff parameter to test. This should be larger than 0.0 and smaller than 1.0.'\n , 'tradeoff_max':\n 'The maximum tradeoff parameter to test. This should be larger than 0.0 and smaller than 1.0 and also belarger than `tradeoff_min`.'\n , 'step': 'The tradeoff value step size to use.', 'threads':\n 'The number of threads to use when simulating.'}, output_descriptions={\n 'results':\n 'The resulting taxa-level growth rates for varying tradeoff values.'},\n name='Test a variety of tradeoff values.', description=\n 'Simulates growth for a set of samples while varying the tradeoff between community and taxon biomass production. This can be used to characterize a good tradeoff value for a specific set of samples. Our study suggested that a good tradeoff value is the largest value that allows the majority of taxa in the sample to grow.'\n , citations=[citations['micom']])\nplugin.methods.register_function(function=q2_micom.filter_models, inputs={\n 'models': CommunityModels[Pickle]}, parameters={'metadata': Metadata,\n 'query': Str, 'exclude': Bool}, outputs=[('filtered_models',\n CommunityModels[Pickle])], input_descriptions={'models':\n 'A collection of metabolic community models. This should contain on model for each sample.'\n }, parameter_descriptions={'metadata':\n 'The metadata for the samples to keep or to query.', 'query':\n 'A pandas query expression to select samples from the metadata. This will call `query` on the metadata DataFrame, so you can test your query by loading our metadata into a pandas DataFrame.'\n , 'exclude':\n 'If true will use all samples *except* the ones selected by metadata and query.'\n }, output_descriptions={'filtered_models':\n 'The filtered community models.'}, name=\n 'Filters models for a chosen set of samples.', description=\n 'Select a subset of samples and their community models using a list of samples or a pandas query expression.'\n , citations=[citations['micom']])\nplugin.methods.register_function(function=q2_micom.filter_results, inputs={\n 'results': MicomResults}, parameters={'metadata': Metadata, 'query':\n Str, 'exclude': Bool}, outputs=[('filtered_results', MicomResults)],\n input_descriptions={'results':\n 'A set of MICOM analysis results. Contains predicted groath rates and exchange fluxes.'\n }, parameter_descriptions={'metadata':\n 'The metadata for the samples to keep or to query.', 'query':\n 'A pandas query expression to select samples from the metadata. This will call `query` on the metadata DataFrame, so you can test your query by loading our metadata into a pandas DataFrame.'\n , 'exclude':\n 'If true will use all samples *except* the ones selected by metadata and query.'\n }, output_descriptions={'filtered_results':\n 'The filtered simulation models.'}, name=\n 'Filters results for a chosen set of samples.', description=\n 'Select a subset of samples and their simulation results using a list of samples or a pandas query expression.'\n , citations=[citations['micom']])\nplugin.visualizers.register_function(function=q2_micom.plot_growth, inputs=\n {'results': MicomResults}, parameters={}, input_descriptions={'results':\n 'A set of MICOM analysis results. Contains predicted groath rates and exchange fluxes.'\n }, parameter_descriptions={}, name='Plot taxa growth rates.',\n description=\n 'Plot predicted growth rates for each taxon in each sample. Only points with growing taxa are shown (growth rate sufficiently larger than zero).'\n , citations=[citations['micom']])\nplugin.visualizers.register_function(function=q2_micom.exchanges_per_sample,\n inputs={'results': MicomResults}, parameters={'direction': Str %\n Choices('import', 'export'), 'cluster': Bool}, input_descriptions={\n 'results':\n 'A set of MICOM analysis results. Contains predicted groath rates and exchange fluxes.'\n }, parameter_descriptions={'direction': 'The direction of the flux.',\n 'cluster': 'Whether to perform clutering on samples and reactions.'},\n name='Plot gloabl exchange rates.', description=\n 'Plot predicted global exchange fluxes for each sample. When plotting imports this corresponds to the consumption fluxes for each metabolite that is available to the community. When plotting export this corresponds to the production fluxes for each metabolite.'\n , citations=[citations['micom']])\nplugin.visualizers.register_function(function=q2_micom.exchanges_per_taxon,\n inputs={'results': MicomResults}, parameters={'direction': Str %\n Choices('import', 'export'), 'perplexity': Int % Range(2, None)},\n input_descriptions={'results':\n 'A set of MICOM analysis results. Contains predicted growth rates and exchange fluxes.'\n }, parameter_descriptions={'direction': 'The direction of the flux.',\n 'perplexity':\n 'TSNE parameter. Relates to the number of neighbors used to calculate distances. Smaller values preserve more local structure and larger values preserve more global structure.'\n }, name='Plot niche overlap.', description=\n 'Plot growth or production niches. The entire set of import or export fluxes for each taxon in each sample is reduced onto a single point on a 2D plane.Taxa that are close to each other either consume similar metabolites (imports) or produce similar metabolites (exports).'\n , citations=[citations['micom']])\nplugin.visualizers.register_function(function=q2_micom.plot_tradeoff,\n inputs={'results': TradeoffResults}, parameters={}, input_descriptions=\n {'results':\n 'A set of MICOM tradeoff analysis results. Contains predicted growth rates for each tested tradeoff.'\n }, parameter_descriptions={}, name='Plot tradeoff results.',\n description=\n 'Plot predicted growth rate distributions for each tradeoff as well as the fraction of growing taxa in each sample and tradeoff value. For a good tradeoff value one usually tries to find the largest tradeoff value that still aloows most taxa to grow.'\n , citations=[citations['micom']])\nplugin.visualizers.register_function(function=q2_micom.fit_phenotype,\n inputs={'results': MicomResults}, parameters={'metadata':\n MetadataColumn[Categorical | Numeric], 'variable_type': Str % Choices(\n 'binary', 'continuous'), 'flux_type': Str % Choices('import',\n 'production'), 'min_coef': Float % Range(0, None)}, input_descriptions=\n {'results':\n 'A set of MICOM analysis results. Contains predicted growth rates and exchange fluxes.'\n }, parameter_descriptions={'metadata': 'The metadata variable to use.',\n 'variable_type': 'The type of the phenotype variable.', 'flux_type':\n 'Which fluxes to use.', 'min_coef':\n 'Only coefficient with absolute values larger than this will be shown.'\n }, name='Test for differential production', description=\n 'Test for overall metabolite production differences between two groups.',\n citations=[citations['micom']])\nimportlib.import_module('q2_micom._transform')\n",
"step-3": "<mask token>\ncitations = Citations.load('citations.bib', package='q2_micom')\nplugin = Plugin(name='micom', version=q2_micom.__version__, website=\n 'https://github.com/micom-dev/q2-micom', package='q2_micom',\n description='', short_description=\n 'Plugin for metabolic modeling of microbial communities.', citations=[\n citations['micom']])\nplugin.register_formats(SBMLFormat, SBMLDirectory, JSONFormat,\n JSONDirectory, CommunityModelFormat, CommunityModelManifest,\n CommunityModelDirectory, GrowthRates, Fluxes, MicomResultsDirectory,\n MicomMediumFile, MicomMediumDirectory, TradeoffResultsDirectory)\nplugin.register_semantic_types(MetabolicModels, CommunityModels,\n MicomResults, MicomMedium)\nplugin.register_semantic_type_to_format(MetabolicModels[SBML], SBMLDirectory)\nplugin.register_semantic_type_to_format(MetabolicModels[JSON], JSONDirectory)\nplugin.register_semantic_type_to_format(CommunityModels[Pickle],\n CommunityModelDirectory)\nplugin.register_semantic_type_to_format(MicomResults, MicomResultsDirectory)\nplugin.register_semantic_type_to_format(TradeoffResults,\n TradeoffResultsDirectory)\nplugin.register_semantic_type_to_format(MicomMedium[Global],\n MicomMediumDirectory)\nplugin.register_semantic_type_to_format(MicomMedium[PerSample],\n MicomMediumDirectory)\nplugin.methods.register_function(function=q2_micom.db, inputs={},\n parameters={'meta': Metadata, 'rank': Str % Choices(q2_micom._build.\n RANKS), 'threads': Int % Range(1, None)}, outputs=[('metabolic_models',\n MetabolicModels[JSON])], input_descriptions={}, parameter_descriptions=\n {'meta': \n 'Metadata for the individual metabolic models in `folder`. Must contain the the following columns: %s.'\n % ', '.join(REQ_FIELDS), 'rank':\n 'The phylogenetic rank at which to summarize taxa.', 'threads':\n 'The number of threads to use when constructing models.'},\n output_descriptions={'metabolic_models': 'The metabolic model DB.'},\n name='Build a metabolic model database.', description=\n 'Constructs pan-genome models summarized to the specified rank and bundles the models to be used by MICOM. The chosen rank has to be the same you want as when building your community models. So you may not build genus-level community models with a species level database. You will only need to run this function if you want to build a custom DB. For many use cases downloading the prebuilt AGORA DB with the the preferred rank should be sufficient.'\n , citations=[citations['agora'], citations['agora_reply'], citations[\n 'micom']])\nplugin.methods.register_function(function=q2_micom.build, inputs={\n 'abundance': FeatureTable[Frequency | RelativeFrequency], 'taxonomy':\n FeatureData[Taxonomy], 'models': MetabolicModels[JSON]}, parameters={\n 'threads': Int % Range(1, None), 'cutoff': Float % Range(0.0, 1.0),\n 'strict': Bool, 'solver': Str % Choices('auto', 'cplex', 'osqp',\n 'gurobi')}, outputs=[('community_models', CommunityModels[Pickle])],\n input_descriptions={'abundance':\n 'The feature table containing the samples over which beta diversity should be computed.'\n , 'taxonomy': 'The taxonomy assignments for the ASVs in the table.',\n 'models': 'The single taxon model database to use.'},\n parameter_descriptions={'threads':\n 'The number of threads to use when constructing models.', 'cutoff':\n 'Taxa with a relative abundance smaller than this will be dropped.',\n 'strict':\n 'If true will collapse and match on all taxa ranks up to the specified rank (so on all higher ranks as well). If false (default) will match only on single taxa rank specified before. If using the strict option make sure ranks are named the same as in the used database.'\n , 'solver':\n 'The quadratic and linear programming solver that will be used in the models. Will pick an appropriate one by default. `cplex` and `gurobi` are commercial solvers with free academic licenses and have to be installed manually. See the docs for more info.'\n }, output_descriptions={'community_models': 'The community models.'},\n name='Build community models.', description=\n 'Builds the metabolic community models for a set of samples.',\n citations=[citations['micom']])\nplugin.methods.register_function(function=q2_micom.minimal_medium, inputs={\n 'models': CommunityModels[Pickle]}, parameters={'min_growth': Float %\n Range(0.0, None, inclusive_start=False), 'threads': Int % Range(1, None\n )}, outputs=[('medium', MicomMedium[Global])], input_descriptions={\n 'models':\n 'A collection of metabolic community models. This should contain on model for each sample.'\n }, parameter_descriptions={'min_growth':\n 'The minimum achievable growth rate for each taxon. The returned growth medium enables all taxa to growth simultaneously with at least this rate.'\n , 'threads': 'The number of threads to use when simulating.'},\n output_descriptions={'medium': 'The resulting growth medium.'}, name=\n 'Obtain a minimal growth medium for models.', description=\n 'Obtains a minimal growth medium for the community models. Please note that this medium does not have any biological feasibility. If you have any knowledge about metabolites present in the environment we recommend you construct the medium by hand.'\n , citations=[citations['micom']])\nplugin.methods.register_function(function=q2_micom.grow, inputs={'models':\n CommunityModels[Pickle], 'medium': MicomMedium[Global | PerSample]},\n parameters={'tradeoff': Float % Range(0.0, 1.0, inclusive_start=False,\n inclusive_end=True), 'strategy': Str % Choices('pFBA', 'minimal uptake',\n 'none'), 'threads': Int % Range(1, None)}, outputs=[('results',\n MicomResults)], input_descriptions={'models':\n 'A collection of metabolic community models. This should contain on model for each sample.'\n , 'medium': 'The growth medium to use.'}, parameter_descriptions={\n 'tradeoff':\n 'The tradeoff parameter. This describes the balance between maximizing biomass production of the entire community and biomass production of individual taxa (ergo \"egoistic\" growth). A value of 1.0 would yield the best biomass production across the community but will only allow a few taxa to grow. Smaller values will allow more taxa to grow but will sacrifice overall biomass. A value of 0.5 (the default) has been shown to best reproduce growth rates in the human gut.'\n , 'strategy':\n 'The strategy used when choosing the solution in the optimal flux space. `minimal uptake` uses the fluxes that result in the smallest total uptake from the environment.`pFBA` uses parsimonious Flux Balance Analysis and thus will choose the fluxes with the lowest enzyme requirement for each taxon. `none` will return an arbitrary solution from the optimal flux space.'\n , 'threads': 'The number of threads to use when simulating.'},\n output_descriptions={'results':\n 'The resulting taxa-level growth rates and metabolic exchange fluxes.'},\n name='Simulate growth for community models.', description=\n 'Simulates growth for a set of samples. Note that those are sample-specific or \"personalized\" simulations, so each taxon may have different growth rates and metabolite usage in each sample.'\n , citations=[citations['micom']])\nplugin.methods.register_function(function=q2_micom.tradeoff, inputs={\n 'models': CommunityModels[Pickle], 'medium': MicomMedium[Global |\n PerSample]}, parameters={'tradeoff_min': Float % Range(0.0, 1.0,\n inclusive_start=False), 'tradeoff_max': Float % Range(0.0, 1.0,\n inclusive_end=True), 'step': Float % Range(0.0, 1.0), 'threads': Int},\n outputs=[('results', TradeoffResults)], input_descriptions={'models':\n 'A collection of metabolic community models. This should contain on model for each sample.'\n , 'medium': 'The growth medium to use.'}, parameter_descriptions={\n 'tradeoff_min':\n 'The minimum tradeoff parameter to test. This should be larger than 0.0 and smaller than 1.0.'\n , 'tradeoff_max':\n 'The maximum tradeoff parameter to test. This should be larger than 0.0 and smaller than 1.0 and also belarger than `tradeoff_min`.'\n , 'step': 'The tradeoff value step size to use.', 'threads':\n 'The number of threads to use when simulating.'}, output_descriptions={\n 'results':\n 'The resulting taxa-level growth rates for varying tradeoff values.'},\n name='Test a variety of tradeoff values.', description=\n 'Simulates growth for a set of samples while varying the tradeoff between community and taxon biomass production. This can be used to characterize a good tradeoff value for a specific set of samples. Our study suggested that a good tradeoff value is the largest value that allows the majority of taxa in the sample to grow.'\n , citations=[citations['micom']])\nplugin.methods.register_function(function=q2_micom.filter_models, inputs={\n 'models': CommunityModels[Pickle]}, parameters={'metadata': Metadata,\n 'query': Str, 'exclude': Bool}, outputs=[('filtered_models',\n CommunityModels[Pickle])], input_descriptions={'models':\n 'A collection of metabolic community models. This should contain on model for each sample.'\n }, parameter_descriptions={'metadata':\n 'The metadata for the samples to keep or to query.', 'query':\n 'A pandas query expression to select samples from the metadata. This will call `query` on the metadata DataFrame, so you can test your query by loading our metadata into a pandas DataFrame.'\n , 'exclude':\n 'If true will use all samples *except* the ones selected by metadata and query.'\n }, output_descriptions={'filtered_models':\n 'The filtered community models.'}, name=\n 'Filters models for a chosen set of samples.', description=\n 'Select a subset of samples and their community models using a list of samples or a pandas query expression.'\n , citations=[citations['micom']])\nplugin.methods.register_function(function=q2_micom.filter_results, inputs={\n 'results': MicomResults}, parameters={'metadata': Metadata, 'query':\n Str, 'exclude': Bool}, outputs=[('filtered_results', MicomResults)],\n input_descriptions={'results':\n 'A set of MICOM analysis results. Contains predicted groath rates and exchange fluxes.'\n }, parameter_descriptions={'metadata':\n 'The metadata for the samples to keep or to query.', 'query':\n 'A pandas query expression to select samples from the metadata. This will call `query` on the metadata DataFrame, so you can test your query by loading our metadata into a pandas DataFrame.'\n , 'exclude':\n 'If true will use all samples *except* the ones selected by metadata and query.'\n }, output_descriptions={'filtered_results':\n 'The filtered simulation models.'}, name=\n 'Filters results for a chosen set of samples.', description=\n 'Select a subset of samples and their simulation results using a list of samples or a pandas query expression.'\n , citations=[citations['micom']])\nplugin.visualizers.register_function(function=q2_micom.plot_growth, inputs=\n {'results': MicomResults}, parameters={}, input_descriptions={'results':\n 'A set of MICOM analysis results. Contains predicted groath rates and exchange fluxes.'\n }, parameter_descriptions={}, name='Plot taxa growth rates.',\n description=\n 'Plot predicted growth rates for each taxon in each sample. Only points with growing taxa are shown (growth rate sufficiently larger than zero).'\n , citations=[citations['micom']])\nplugin.visualizers.register_function(function=q2_micom.exchanges_per_sample,\n inputs={'results': MicomResults}, parameters={'direction': Str %\n Choices('import', 'export'), 'cluster': Bool}, input_descriptions={\n 'results':\n 'A set of MICOM analysis results. Contains predicted groath rates and exchange fluxes.'\n }, parameter_descriptions={'direction': 'The direction of the flux.',\n 'cluster': 'Whether to perform clutering on samples and reactions.'},\n name='Plot gloabl exchange rates.', description=\n 'Plot predicted global exchange fluxes for each sample. When plotting imports this corresponds to the consumption fluxes for each metabolite that is available to the community. When plotting export this corresponds to the production fluxes for each metabolite.'\n , citations=[citations['micom']])\nplugin.visualizers.register_function(function=q2_micom.exchanges_per_taxon,\n inputs={'results': MicomResults}, parameters={'direction': Str %\n Choices('import', 'export'), 'perplexity': Int % Range(2, None)},\n input_descriptions={'results':\n 'A set of MICOM analysis results. Contains predicted growth rates and exchange fluxes.'\n }, parameter_descriptions={'direction': 'The direction of the flux.',\n 'perplexity':\n 'TSNE parameter. Relates to the number of neighbors used to calculate distances. Smaller values preserve more local structure and larger values preserve more global structure.'\n }, name='Plot niche overlap.', description=\n 'Plot growth or production niches. The entire set of import or export fluxes for each taxon in each sample is reduced onto a single point on a 2D plane.Taxa that are close to each other either consume similar metabolites (imports) or produce similar metabolites (exports).'\n , citations=[citations['micom']])\nplugin.visualizers.register_function(function=q2_micom.plot_tradeoff,\n inputs={'results': TradeoffResults}, parameters={}, input_descriptions=\n {'results':\n 'A set of MICOM tradeoff analysis results. Contains predicted growth rates for each tested tradeoff.'\n }, parameter_descriptions={}, name='Plot tradeoff results.',\n description=\n 'Plot predicted growth rate distributions for each tradeoff as well as the fraction of growing taxa in each sample and tradeoff value. For a good tradeoff value one usually tries to find the largest tradeoff value that still aloows most taxa to grow.'\n , citations=[citations['micom']])\nplugin.visualizers.register_function(function=q2_micom.fit_phenotype,\n inputs={'results': MicomResults}, parameters={'metadata':\n MetadataColumn[Categorical | Numeric], 'variable_type': Str % Choices(\n 'binary', 'continuous'), 'flux_type': Str % Choices('import',\n 'production'), 'min_coef': Float % Range(0, None)}, input_descriptions=\n {'results':\n 'A set of MICOM analysis results. Contains predicted growth rates and exchange fluxes.'\n }, parameter_descriptions={'metadata': 'The metadata variable to use.',\n 'variable_type': 'The type of the phenotype variable.', 'flux_type':\n 'Which fluxes to use.', 'min_coef':\n 'Only coefficient with absolute values larger than this will be shown.'\n }, name='Test for differential production', description=\n 'Test for overall metabolite production differences between two groups.',\n citations=[citations['micom']])\nimportlib.import_module('q2_micom._transform')\n",
"step-4": "<mask token>\nimport importlib\nfrom qiime2.plugin import Plugin, Str, Choices, Int, Bool, Range, Float, Metadata, MetadataColumn, Categorical, Numeric, Citations\nimport q2_micom\nfrom q2_micom._formats_and_types import SBML, JSON, Pickle, SBMLFormat, SBMLDirectory, JSONFormat, JSONDirectory, CommunityModelFormat, CommunityModelManifest, CommunityModelDirectory, GrowthRates, Fluxes, MicomResultsDirectory, MicomMediumFile, MicomMediumDirectory, MetabolicModels, CommunityModels, MicomResults, MicomMedium, Global, PerSample, TradeoffResults, TradeoffResultsDirectory, REQ_FIELDS\nfrom q2_types.feature_data import FeatureData, Taxonomy\nfrom q2_types.feature_table import FeatureTable, RelativeFrequency, Frequency\ncitations = Citations.load('citations.bib', package='q2_micom')\nplugin = Plugin(name='micom', version=q2_micom.__version__, website=\n 'https://github.com/micom-dev/q2-micom', package='q2_micom',\n description='', short_description=\n 'Plugin for metabolic modeling of microbial communities.', citations=[\n citations['micom']])\nplugin.register_formats(SBMLFormat, SBMLDirectory, JSONFormat,\n JSONDirectory, CommunityModelFormat, CommunityModelManifest,\n CommunityModelDirectory, GrowthRates, Fluxes, MicomResultsDirectory,\n MicomMediumFile, MicomMediumDirectory, TradeoffResultsDirectory)\nplugin.register_semantic_types(MetabolicModels, CommunityModels,\n MicomResults, MicomMedium)\nplugin.register_semantic_type_to_format(MetabolicModels[SBML], SBMLDirectory)\nplugin.register_semantic_type_to_format(MetabolicModels[JSON], JSONDirectory)\nplugin.register_semantic_type_to_format(CommunityModels[Pickle],\n CommunityModelDirectory)\nplugin.register_semantic_type_to_format(MicomResults, MicomResultsDirectory)\nplugin.register_semantic_type_to_format(TradeoffResults,\n TradeoffResultsDirectory)\nplugin.register_semantic_type_to_format(MicomMedium[Global],\n MicomMediumDirectory)\nplugin.register_semantic_type_to_format(MicomMedium[PerSample],\n MicomMediumDirectory)\nplugin.methods.register_function(function=q2_micom.db, inputs={},\n parameters={'meta': Metadata, 'rank': Str % Choices(q2_micom._build.\n RANKS), 'threads': Int % Range(1, None)}, outputs=[('metabolic_models',\n MetabolicModels[JSON])], input_descriptions={}, parameter_descriptions=\n {'meta': \n 'Metadata for the individual metabolic models in `folder`. Must contain the the following columns: %s.'\n % ', '.join(REQ_FIELDS), 'rank':\n 'The phylogenetic rank at which to summarize taxa.', 'threads':\n 'The number of threads to use when constructing models.'},\n output_descriptions={'metabolic_models': 'The metabolic model DB.'},\n name='Build a metabolic model database.', description=\n 'Constructs pan-genome models summarized to the specified rank and bundles the models to be used by MICOM. The chosen rank has to be the same you want as when building your community models. So you may not build genus-level community models with a species level database. You will only need to run this function if you want to build a custom DB. For many use cases downloading the prebuilt AGORA DB with the the preferred rank should be sufficient.'\n , citations=[citations['agora'], citations['agora_reply'], citations[\n 'micom']])\nplugin.methods.register_function(function=q2_micom.build, inputs={\n 'abundance': FeatureTable[Frequency | RelativeFrequency], 'taxonomy':\n FeatureData[Taxonomy], 'models': MetabolicModels[JSON]}, parameters={\n 'threads': Int % Range(1, None), 'cutoff': Float % Range(0.0, 1.0),\n 'strict': Bool, 'solver': Str % Choices('auto', 'cplex', 'osqp',\n 'gurobi')}, outputs=[('community_models', CommunityModels[Pickle])],\n input_descriptions={'abundance':\n 'The feature table containing the samples over which beta diversity should be computed.'\n , 'taxonomy': 'The taxonomy assignments for the ASVs in the table.',\n 'models': 'The single taxon model database to use.'},\n parameter_descriptions={'threads':\n 'The number of threads to use when constructing models.', 'cutoff':\n 'Taxa with a relative abundance smaller than this will be dropped.',\n 'strict':\n 'If true will collapse and match on all taxa ranks up to the specified rank (so on all higher ranks as well). If false (default) will match only on single taxa rank specified before. If using the strict option make sure ranks are named the same as in the used database.'\n , 'solver':\n 'The quadratic and linear programming solver that will be used in the models. Will pick an appropriate one by default. `cplex` and `gurobi` are commercial solvers with free academic licenses and have to be installed manually. See the docs for more info.'\n }, output_descriptions={'community_models': 'The community models.'},\n name='Build community models.', description=\n 'Builds the metabolic community models for a set of samples.',\n citations=[citations['micom']])\nplugin.methods.register_function(function=q2_micom.minimal_medium, inputs={\n 'models': CommunityModels[Pickle]}, parameters={'min_growth': Float %\n Range(0.0, None, inclusive_start=False), 'threads': Int % Range(1, None\n )}, outputs=[('medium', MicomMedium[Global])], input_descriptions={\n 'models':\n 'A collection of metabolic community models. This should contain on model for each sample.'\n }, parameter_descriptions={'min_growth':\n 'The minimum achievable growth rate for each taxon. The returned growth medium enables all taxa to growth simultaneously with at least this rate.'\n , 'threads': 'The number of threads to use when simulating.'},\n output_descriptions={'medium': 'The resulting growth medium.'}, name=\n 'Obtain a minimal growth medium for models.', description=\n 'Obtains a minimal growth medium for the community models. Please note that this medium does not have any biological feasibility. If you have any knowledge about metabolites present in the environment we recommend you construct the medium by hand.'\n , citations=[citations['micom']])\nplugin.methods.register_function(function=q2_micom.grow, inputs={'models':\n CommunityModels[Pickle], 'medium': MicomMedium[Global | PerSample]},\n parameters={'tradeoff': Float % Range(0.0, 1.0, inclusive_start=False,\n inclusive_end=True), 'strategy': Str % Choices('pFBA', 'minimal uptake',\n 'none'), 'threads': Int % Range(1, None)}, outputs=[('results',\n MicomResults)], input_descriptions={'models':\n 'A collection of metabolic community models. This should contain on model for each sample.'\n , 'medium': 'The growth medium to use.'}, parameter_descriptions={\n 'tradeoff':\n 'The tradeoff parameter. This describes the balance between maximizing biomass production of the entire community and biomass production of individual taxa (ergo \"egoistic\" growth). A value of 1.0 would yield the best biomass production across the community but will only allow a few taxa to grow. Smaller values will allow more taxa to grow but will sacrifice overall biomass. A value of 0.5 (the default) has been shown to best reproduce growth rates in the human gut.'\n , 'strategy':\n 'The strategy used when choosing the solution in the optimal flux space. `minimal uptake` uses the fluxes that result in the smallest total uptake from the environment.`pFBA` uses parsimonious Flux Balance Analysis and thus will choose the fluxes with the lowest enzyme requirement for each taxon. `none` will return an arbitrary solution from the optimal flux space.'\n , 'threads': 'The number of threads to use when simulating.'},\n output_descriptions={'results':\n 'The resulting taxa-level growth rates and metabolic exchange fluxes.'},\n name='Simulate growth for community models.', description=\n 'Simulates growth for a set of samples. Note that those are sample-specific or \"personalized\" simulations, so each taxon may have different growth rates and metabolite usage in each sample.'\n , citations=[citations['micom']])\nplugin.methods.register_function(function=q2_micom.tradeoff, inputs={\n 'models': CommunityModels[Pickle], 'medium': MicomMedium[Global |\n PerSample]}, parameters={'tradeoff_min': Float % Range(0.0, 1.0,\n inclusive_start=False), 'tradeoff_max': Float % Range(0.0, 1.0,\n inclusive_end=True), 'step': Float % Range(0.0, 1.0), 'threads': Int},\n outputs=[('results', TradeoffResults)], input_descriptions={'models':\n 'A collection of metabolic community models. This should contain on model for each sample.'\n , 'medium': 'The growth medium to use.'}, parameter_descriptions={\n 'tradeoff_min':\n 'The minimum tradeoff parameter to test. This should be larger than 0.0 and smaller than 1.0.'\n , 'tradeoff_max':\n 'The maximum tradeoff parameter to test. This should be larger than 0.0 and smaller than 1.0 and also belarger than `tradeoff_min`.'\n , 'step': 'The tradeoff value step size to use.', 'threads':\n 'The number of threads to use when simulating.'}, output_descriptions={\n 'results':\n 'The resulting taxa-level growth rates for varying tradeoff values.'},\n name='Test a variety of tradeoff values.', description=\n 'Simulates growth for a set of samples while varying the tradeoff between community and taxon biomass production. This can be used to characterize a good tradeoff value for a specific set of samples. Our study suggested that a good tradeoff value is the largest value that allows the majority of taxa in the sample to grow.'\n , citations=[citations['micom']])\nplugin.methods.register_function(function=q2_micom.filter_models, inputs={\n 'models': CommunityModels[Pickle]}, parameters={'metadata': Metadata,\n 'query': Str, 'exclude': Bool}, outputs=[('filtered_models',\n CommunityModels[Pickle])], input_descriptions={'models':\n 'A collection of metabolic community models. This should contain on model for each sample.'\n }, parameter_descriptions={'metadata':\n 'The metadata for the samples to keep or to query.', 'query':\n 'A pandas query expression to select samples from the metadata. This will call `query` on the metadata DataFrame, so you can test your query by loading our metadata into a pandas DataFrame.'\n , 'exclude':\n 'If true will use all samples *except* the ones selected by metadata and query.'\n }, output_descriptions={'filtered_models':\n 'The filtered community models.'}, name=\n 'Filters models for a chosen set of samples.', description=\n 'Select a subset of samples and their community models using a list of samples or a pandas query expression.'\n , citations=[citations['micom']])\nplugin.methods.register_function(function=q2_micom.filter_results, inputs={\n 'results': MicomResults}, parameters={'metadata': Metadata, 'query':\n Str, 'exclude': Bool}, outputs=[('filtered_results', MicomResults)],\n input_descriptions={'results':\n 'A set of MICOM analysis results. Contains predicted groath rates and exchange fluxes.'\n }, parameter_descriptions={'metadata':\n 'The metadata for the samples to keep or to query.', 'query':\n 'A pandas query expression to select samples from the metadata. This will call `query` on the metadata DataFrame, so you can test your query by loading our metadata into a pandas DataFrame.'\n , 'exclude':\n 'If true will use all samples *except* the ones selected by metadata and query.'\n }, output_descriptions={'filtered_results':\n 'The filtered simulation models.'}, name=\n 'Filters results for a chosen set of samples.', description=\n 'Select a subset of samples and their simulation results using a list of samples or a pandas query expression.'\n , citations=[citations['micom']])\nplugin.visualizers.register_function(function=q2_micom.plot_growth, inputs=\n {'results': MicomResults}, parameters={}, input_descriptions={'results':\n 'A set of MICOM analysis results. Contains predicted groath rates and exchange fluxes.'\n }, parameter_descriptions={}, name='Plot taxa growth rates.',\n description=\n 'Plot predicted growth rates for each taxon in each sample. Only points with growing taxa are shown (growth rate sufficiently larger than zero).'\n , citations=[citations['micom']])\nplugin.visualizers.register_function(function=q2_micom.exchanges_per_sample,\n inputs={'results': MicomResults}, parameters={'direction': Str %\n Choices('import', 'export'), 'cluster': Bool}, input_descriptions={\n 'results':\n 'A set of MICOM analysis results. Contains predicted groath rates and exchange fluxes.'\n }, parameter_descriptions={'direction': 'The direction of the flux.',\n 'cluster': 'Whether to perform clutering on samples and reactions.'},\n name='Plot gloabl exchange rates.', description=\n 'Plot predicted global exchange fluxes for each sample. When plotting imports this corresponds to the consumption fluxes for each metabolite that is available to the community. When plotting export this corresponds to the production fluxes for each metabolite.'\n , citations=[citations['micom']])\nplugin.visualizers.register_function(function=q2_micom.exchanges_per_taxon,\n inputs={'results': MicomResults}, parameters={'direction': Str %\n Choices('import', 'export'), 'perplexity': Int % Range(2, None)},\n input_descriptions={'results':\n 'A set of MICOM analysis results. Contains predicted growth rates and exchange fluxes.'\n }, parameter_descriptions={'direction': 'The direction of the flux.',\n 'perplexity':\n 'TSNE parameter. Relates to the number of neighbors used to calculate distances. Smaller values preserve more local structure and larger values preserve more global structure.'\n }, name='Plot niche overlap.', description=\n 'Plot growth or production niches. The entire set of import or export fluxes for each taxon in each sample is reduced onto a single point on a 2D plane.Taxa that are close to each other either consume similar metabolites (imports) or produce similar metabolites (exports).'\n , citations=[citations['micom']])\nplugin.visualizers.register_function(function=q2_micom.plot_tradeoff,\n inputs={'results': TradeoffResults}, parameters={}, input_descriptions=\n {'results':\n 'A set of MICOM tradeoff analysis results. Contains predicted growth rates for each tested tradeoff.'\n }, parameter_descriptions={}, name='Plot tradeoff results.',\n description=\n 'Plot predicted growth rate distributions for each tradeoff as well as the fraction of growing taxa in each sample and tradeoff value. For a good tradeoff value one usually tries to find the largest tradeoff value that still aloows most taxa to grow.'\n , citations=[citations['micom']])\nplugin.visualizers.register_function(function=q2_micom.fit_phenotype,\n inputs={'results': MicomResults}, parameters={'metadata':\n MetadataColumn[Categorical | Numeric], 'variable_type': Str % Choices(\n 'binary', 'continuous'), 'flux_type': Str % Choices('import',\n 'production'), 'min_coef': Float % Range(0, None)}, input_descriptions=\n {'results':\n 'A set of MICOM analysis results. Contains predicted growth rates and exchange fluxes.'\n }, parameter_descriptions={'metadata': 'The metadata variable to use.',\n 'variable_type': 'The type of the phenotype variable.', 'flux_type':\n 'Which fluxes to use.', 'min_coef':\n 'Only coefficient with absolute values larger than this will be shown.'\n }, name='Test for differential production', description=\n 'Test for overall metabolite production differences between two groups.',\n citations=[citations['micom']])\nimportlib.import_module('q2_micom._transform')\n",
"step-5": "\"\"\"Plugin setup.\"\"\"\n\nimport importlib\nfrom qiime2.plugin import (\n Plugin,\n Str,\n Choices,\n Int,\n Bool,\n Range,\n Float,\n Metadata,\n MetadataColumn,\n Categorical,\n Numeric,\n Citations,\n)\n\nimport q2_micom\nfrom q2_micom._formats_and_types import (\n SBML,\n JSON,\n Pickle,\n SBMLFormat,\n SBMLDirectory,\n JSONFormat,\n JSONDirectory,\n CommunityModelFormat,\n CommunityModelManifest,\n CommunityModelDirectory,\n GrowthRates,\n Fluxes,\n MicomResultsDirectory,\n MicomMediumFile,\n MicomMediumDirectory,\n MetabolicModels,\n CommunityModels,\n MicomResults,\n MicomMedium,\n Global,\n PerSample,\n TradeoffResults,\n TradeoffResultsDirectory,\n REQ_FIELDS,\n)\nfrom q2_types.feature_data import FeatureData, Taxonomy\nfrom q2_types.feature_table import FeatureTable, RelativeFrequency, Frequency\n\ncitations = Citations.load(\"citations.bib\", package=\"q2_micom\")\n\n\nplugin = Plugin(\n name=\"micom\",\n version=q2_micom.__version__,\n website=\"https://github.com/micom-dev/q2-micom\",\n package=\"q2_micom\",\n description=(\"\"),\n short_description=\"Plugin for metabolic modeling of microbial communities.\",\n citations=[citations[\"micom\"]],\n)\n\nplugin.register_formats(\n SBMLFormat,\n SBMLDirectory,\n JSONFormat,\n JSONDirectory,\n CommunityModelFormat,\n CommunityModelManifest,\n CommunityModelDirectory,\n GrowthRates,\n Fluxes,\n MicomResultsDirectory,\n MicomMediumFile,\n MicomMediumDirectory,\n TradeoffResultsDirectory,\n)\nplugin.register_semantic_types(\n MetabolicModels, CommunityModels, MicomResults, MicomMedium\n)\nplugin.register_semantic_type_to_format(MetabolicModels[SBML], SBMLDirectory)\nplugin.register_semantic_type_to_format(MetabolicModels[JSON], JSONDirectory)\nplugin.register_semantic_type_to_format(\n CommunityModels[Pickle], CommunityModelDirectory\n)\nplugin.register_semantic_type_to_format(MicomResults, MicomResultsDirectory)\nplugin.register_semantic_type_to_format(TradeoffResults, TradeoffResultsDirectory)\nplugin.register_semantic_type_to_format(MicomMedium[Global], MicomMediumDirectory)\nplugin.register_semantic_type_to_format(MicomMedium[PerSample], MicomMediumDirectory)\n\nplugin.methods.register_function(\n function=q2_micom.db,\n inputs={},\n parameters={\n \"meta\": Metadata,\n \"rank\": Str % Choices(q2_micom._build.RANKS),\n \"threads\": Int % Range(1, None),\n },\n outputs=[(\"metabolic_models\", MetabolicModels[JSON])],\n input_descriptions={},\n parameter_descriptions={\n \"meta\": (\n \"Metadata for the individual metabolic models in `folder`. \"\n \"Must contain the the following columns: %s.\" % \", \".join(REQ_FIELDS)\n ),\n \"rank\": \"The phylogenetic rank at which to summarize taxa.\",\n \"threads\": \"The number of threads to use when constructing models.\",\n },\n output_descriptions={\"metabolic_models\": \"The metabolic model DB.\"},\n name=\"Build a metabolic model database.\",\n description=(\n \"Constructs pan-genome models summarized to the specified rank \"\n \"and bundles the models to be used by MICOM. \"\n \"The chosen rank has to be the same you want as when building your \"\n \"community models. \"\n \"So you may not build genus-level community models with a species \"\n \"level database. \"\n \"You will only need to run this function if you want to build a \"\n \"custom DB. For many use cases downloading the prebuilt AGORA DB \"\n \"with the the preferred rank should be sufficient.\"\n ),\n citations=[\n citations[\"agora\"],\n citations[\"agora_reply\"],\n citations[\"micom\"],\n ],\n)\n\nplugin.methods.register_function(\n function=q2_micom.build,\n inputs={\n \"abundance\": FeatureTable[Frequency | RelativeFrequency],\n \"taxonomy\": FeatureData[Taxonomy],\n \"models\": MetabolicModels[JSON],\n },\n parameters={\n \"threads\": Int % Range(1, None),\n \"cutoff\": Float % Range(0.0, 1.0),\n \"strict\": Bool,\n \"solver\": Str % Choices(\"auto\", \"cplex\", \"osqp\", \"gurobi\"),\n },\n outputs=[(\"community_models\", CommunityModels[Pickle])],\n input_descriptions={\n \"abundance\": (\n \"The feature table containing the samples over which beta \"\n \"diversity should be computed.\"\n ),\n \"taxonomy\": \"The taxonomy assignments for the ASVs in the table.\",\n \"models\": \"The single taxon model database to use.\",\n },\n parameter_descriptions={\n \"threads\": \"The number of threads to use when constructing models.\",\n \"cutoff\": \"Taxa with a relative abundance smaller than this will \"\n \"be dropped.\",\n \"strict\": (\n \"If true will collapse and match on all taxa ranks up to the \"\n \"specified rank (so on all higher ranks as well). If false \"\n \"(default) will match only on single taxa rank specified before. \"\n \"If using the strict option make sure ranks are named the same as in \"\n \"the used database.\"\n ),\n \"solver\": (\n \"The quadratic and linear programming solver that will be used \"\n \"in the models. Will pick an appropriate one by default. \"\n \"`cplex` and `gurobi` are commercial solvers with free academic \"\n \"licenses and have to be installed manually. See the docs for more info.\"\n ),\n },\n output_descriptions={\"community_models\": \"The community models.\"},\n name=\"Build community models.\",\n description=(\"Builds the metabolic community models for a set of samples.\"),\n citations=[citations[\"micom\"]],\n)\n\nplugin.methods.register_function(\n function=q2_micom.minimal_medium,\n inputs={\"models\": CommunityModels[Pickle]},\n parameters={\n \"min_growth\": Float % Range(0.0, None, inclusive_start=False),\n \"threads\": Int % Range(1, None),\n },\n outputs=[(\"medium\", MicomMedium[Global])],\n input_descriptions={\n \"models\": (\n \"A collection of metabolic community models. \"\n \"This should contain on model for each sample.\"\n ),\n },\n parameter_descriptions={\n \"min_growth\": (\n \"The minimum achievable growth rate for each taxon. \"\n \"The returned growth medium enables all taxa to growth \"\n \"simultaneously with at least this rate.\"\n ),\n \"threads\": \"The number of threads to use when simulating.\",\n },\n output_descriptions={\"medium\": \"The resulting growth medium.\"},\n name=\"Obtain a minimal growth medium for models.\",\n description=(\n \"Obtains a minimal growth medium for the community models. \"\n \"Please note that this medium does not have any biological \"\n \"feasibility. If you have any knowledge about metabolites present \"\n \"in the environment we recommend you construct the medium by hand.\"\n ),\n citations=[citations[\"micom\"]],\n)\n\nplugin.methods.register_function(\n function=q2_micom.grow,\n inputs={\n \"models\": CommunityModels[Pickle],\n \"medium\": MicomMedium[Global | PerSample],\n },\n parameters={\n \"tradeoff\": Float % Range(0.0, 1.0, inclusive_start=False, inclusive_end=True),\n \"strategy\": Str % Choices(\"pFBA\", \"minimal uptake\", \"none\"),\n \"threads\": Int % Range(1, None),\n },\n outputs=[(\"results\", MicomResults)],\n input_descriptions={\n \"models\": (\n \"A collection of metabolic community models. \"\n \"This should contain on model for each sample.\"\n ),\n \"medium\": \"The growth medium to use.\",\n },\n parameter_descriptions={\n \"tradeoff\": (\n \"The tradeoff parameter. This describes the balance \"\n \"between maximizing biomass production of the entire \"\n \"community and biomass production of individual taxa \"\n '(ergo \"egoistic\" growth). A value of 1.0 would yield '\n \"the best biomass production across the community but \"\n \"will only allow a few taxa to grow. Smaller values will \"\n \"allow more taxa to grow but will sacrifice overall \"\n \"biomass. A value of 0.5 (the default) has been shown to \"\n \"best reproduce growth rates in the human gut.\"\n ),\n \"strategy\": (\n \"The strategy used when choosing the solution in the \"\n \"optimal flux space. `minimal uptake` uses the fluxes \"\n \"that result in the smallest total uptake from the environment.\"\n \"`pFBA` uses parsimonious Flux Balance Analysis and thus will choose \"\n \"the fluxes with the lowest enzyme requirement for each taxon. \"\n \"`none` will return an arbitrary solution from the optimal flux space.\"\n ),\n \"threads\": \"The number of threads to use when simulating.\",\n },\n output_descriptions={\n \"results\": \"The resulting taxa-level growth rates and metabolic \"\n \"exchange fluxes.\"\n },\n name=\"Simulate growth for community models.\",\n description=(\n \"Simulates growth for a set of samples. Note that those are \"\n 'sample-specific or \"personalized\" simulations, so each taxon '\n \"may have different growth rates and metabolite usage in each sample.\"\n ),\n citations=[citations[\"micom\"]],\n)\n\nplugin.methods.register_function(\n function=q2_micom.tradeoff,\n inputs={\n \"models\": CommunityModels[Pickle],\n \"medium\": MicomMedium[Global | PerSample],\n },\n parameters={\n \"tradeoff_min\": Float % Range(0.0, 1.0, inclusive_start=False),\n \"tradeoff_max\": Float % Range(0.0, 1.0, inclusive_end=True),\n \"step\": Float % Range(0.0, 1.0),\n \"threads\": Int,\n },\n outputs=[(\"results\", TradeoffResults)],\n input_descriptions={\n \"models\": (\n \"A collection of metabolic community models. \"\n \"This should contain on model for each sample.\"\n ),\n \"medium\": \"The growth medium to use.\",\n },\n parameter_descriptions={\n \"tradeoff_min\": \"The minimum tradeoff parameter to test. This should \"\n \"be larger than 0.0 and smaller than 1.0.\",\n \"tradeoff_max\": \"The maximum tradeoff parameter to test. This should \"\n \"be larger than 0.0 and smaller than 1.0 and also be\"\n \"larger than `tradeoff_min`.\",\n \"step\": \"The tradeoff value step size to use.\",\n \"threads\": \"The number of threads to use when simulating.\",\n },\n output_descriptions={\n \"results\": \"The resulting taxa-level growth rates for varying \"\n \"tradeoff values.\"\n },\n name=\"Test a variety of tradeoff values.\",\n description=(\n \"Simulates growth for a set of samples while varying the tradeoff \"\n \"between community and taxon biomass production. \"\n \"This can be used to characterize a good tradeoff value for a \"\n \"specific set of samples. Our study suggested that a good tradeoff \"\n \"value is the largest value that allows the majority of taxa in the \"\n \"sample to grow.\"\n ),\n citations=[citations[\"micom\"]],\n)\n\nplugin.methods.register_function(\n function=q2_micom.filter_models,\n inputs={\"models\": CommunityModels[Pickle]},\n parameters={\"metadata\": Metadata, \"query\": Str, \"exclude\": Bool},\n outputs=[(\"filtered_models\", CommunityModels[Pickle])],\n input_descriptions={\n \"models\": (\n \"A collection of metabolic community models. \"\n \"This should contain on model for each sample.\"\n )\n },\n parameter_descriptions={\n \"metadata\": \"The metadata for the samples to keep or to query.\",\n \"query\": (\n \"A pandas query expression to select samples from the metadata. \"\n \"This will call `query` on the metadata DataFrame, so you can test \"\n \"your query by loading our metadata into a pandas DataFrame.\"\n ),\n \"exclude\": (\n \"If true will use all samples *except* the ones selected \"\n \"by metadata and query.\"\n ),\n },\n output_descriptions={\"filtered_models\": \"The filtered community models.\"},\n name=\"Filters models for a chosen set of samples.\",\n description=(\n \"Select a subset of samples and their community models using a list \"\n \"of samples or a pandas query expression.\"\n ),\n citations=[citations[\"micom\"]],\n)\n\nplugin.methods.register_function(\n function=q2_micom.filter_results,\n inputs={\"results\": MicomResults},\n parameters={\"metadata\": Metadata, \"query\": Str, \"exclude\": Bool},\n outputs=[(\"filtered_results\", MicomResults)],\n input_descriptions={\n \"results\": (\n \"A set of MICOM analysis results. \"\n \"Contains predicted groath rates and exchange fluxes.\"\n )\n },\n parameter_descriptions={\n \"metadata\": \"The metadata for the samples to keep or to query.\",\n \"query\": (\n \"A pandas query expression to select samples from the metadata. \"\n \"This will call `query` on the metadata DataFrame, so you can test \"\n \"your query by loading our metadata into a pandas DataFrame.\"\n ),\n \"exclude\": (\n \"If true will use all samples *except* the ones selected \"\n \"by metadata and query.\"\n ),\n },\n output_descriptions={\"filtered_results\": \"The filtered simulation models.\"},\n name=\"Filters results for a chosen set of samples.\",\n description=(\n \"Select a subset of samples and their simulation results using a list \"\n \"of samples or a pandas query expression.\"\n ),\n citations=[citations[\"micom\"]],\n)\n\nplugin.visualizers.register_function(\n function=q2_micom.plot_growth,\n inputs={\"results\": MicomResults},\n parameters={},\n input_descriptions={\n \"results\": (\n \"A set of MICOM analysis results. \"\n \"Contains predicted groath rates and exchange fluxes.\"\n )\n },\n parameter_descriptions={},\n name=\"Plot taxa growth rates.\",\n description=(\n \"Plot predicted growth rates for each taxon in each sample. \"\n \"Only points with growing taxa are shown (growth rate sufficiently \"\n \"larger than zero).\"\n ),\n citations=[citations[\"micom\"]],\n)\n\nplugin.visualizers.register_function(\n function=q2_micom.exchanges_per_sample,\n inputs={\"results\": MicomResults},\n parameters={\n \"direction\": Str % Choices(\"import\", \"export\"),\n \"cluster\": Bool,\n },\n input_descriptions={\n \"results\": (\n \"A set of MICOM analysis results. \"\n \"Contains predicted groath rates and exchange fluxes.\"\n )\n },\n parameter_descriptions={\n \"direction\": \"The direction of the flux.\",\n \"cluster\": \"Whether to perform clutering on samples and reactions.\",\n },\n name=\"Plot gloabl exchange rates.\",\n description=(\n \"Plot predicted global exchange fluxes for each sample. \"\n \"When plotting imports this corresponds to the consumption \"\n \"fluxes for each metabolite that is available to the community. \"\n \"When plotting export this corresponds to the production fluxes \"\n \"for each metabolite.\"\n ),\n citations=[citations[\"micom\"]],\n)\n\n\nplugin.visualizers.register_function(\n function=q2_micom.exchanges_per_taxon,\n inputs={\"results\": MicomResults},\n parameters={\n \"direction\": Str % Choices(\"import\", \"export\"),\n \"perplexity\": Int % Range(2, None),\n },\n input_descriptions={\n \"results\": (\n \"A set of MICOM analysis results. \"\n \"Contains predicted growth rates and exchange fluxes.\"\n )\n },\n parameter_descriptions={\n \"direction\": \"The direction of the flux.\",\n \"perplexity\": \"TSNE parameter. Relates to the number of neighbors used to \"\n \"calculate distances. Smaller values preserve more local \"\n \"structure and larger values preserve more global structure.\",\n },\n name=\"Plot niche overlap.\",\n description=(\n \"Plot growth or production niches. \"\n \"The entire set of import or export fluxes for each taxon in each \"\n \"sample is reduced onto a single point on a 2D plane.\"\n \"Taxa that are close to each other either consume similar metabolites \"\n \" (imports) or produce similar metabolites (exports).\"\n ),\n citations=[citations[\"micom\"]],\n)\n\nplugin.visualizers.register_function(\n function=q2_micom.plot_tradeoff,\n inputs={\"results\": TradeoffResults},\n parameters={},\n input_descriptions={\n \"results\": (\n \"A set of MICOM tradeoff analysis results. \"\n \"Contains predicted growth rates for each tested tradeoff.\"\n )\n },\n parameter_descriptions={},\n name=\"Plot tradeoff results.\",\n description=(\n \"Plot predicted growth rate distributions for each tradeoff as \"\n \"well as the fraction of growing taxa in each sample and tradeoff \"\n \"value. For a good tradeoff value one usually tries to find the \"\n \"largest tradeoff value that still aloows most taxa to grow.\"\n ),\n citations=[citations[\"micom\"]],\n)\n\nplugin.visualizers.register_function(\n function=q2_micom.fit_phenotype,\n inputs={\"results\": MicomResults},\n parameters={\n \"metadata\": MetadataColumn[Categorical | Numeric],\n \"variable_type\": Str % Choices(\"binary\", \"continuous\"),\n \"flux_type\": Str % Choices(\"import\", \"production\"),\n \"min_coef\": Float % Range(0, None),\n },\n input_descriptions={\n \"results\": (\n \"A set of MICOM analysis results. \"\n \"Contains predicted growth rates and exchange fluxes.\"\n ),\n },\n parameter_descriptions={\n \"metadata\": \"The metadata variable to use.\",\n \"variable_type\": \"The type of the phenotype variable.\",\n \"flux_type\": \"Which fluxes to use.\",\n \"min_coef\": (\n \"Only coefficient with absolute values larger than this \" \"will be shown.\"\n ),\n },\n name=\"Test for differential production\",\n description=(\n \"Test for overall metabolite production differences \" \"between two groups.\"\n ),\n citations=[citations[\"micom\"]],\n)\n\nimportlib.import_module(\"q2_micom._transform\")\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# function to add two numbers
def add2nums(a,b):
return a+b
|
normal
|
{
"blob_id": "6e2fb9d498294a580426ff408183f7beec135329",
"index": 5592,
"step-1": "<mask token>\n",
"step-2": "def add2nums(a, b):\n return a + b\n",
"step-3": "# function to add two numbers\ndef add2nums(a,b):\n return a+b\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import time
import datetime
import mx
from openerp.report import report_sxw
class course_form(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(course_form, self).__init__(cr, uid, name, context)
self.localcontext.update({
'time': time,
'time1': self._get_time,
'course':self._get_course,
'line':self._get_data,
'user':self._get_user,
})
self.year = int(time.strftime('%Y'))
def _get_user(self,data, header=False):
if header:
return self.pool.get('res.company').browse(self.cr, self.uid, data['form']['company_id'][0]).logo
else:
return self.pool.get('res.users').browse(self.cr, self.uid, self.uid).name
def _get_course(self,data):
training_category_obj = self.pool.get('hr.training.category')
training_category_id = data['training_category_id']
training_category_id = not training_category_id and training_category_obj.browse(self.cr,self.uid,[]) or training_category_id
self.cr.execute(" select distinct c.id as course_id , c.name as course_name "\
"from hr_training_course as c "\
"where c.training_category_id in %s",(tuple(training_category_id),))
res = self.cr.dictfetchall()
return res
def _get_data(self, data,course_id):
date1 = data['date_from']
date2 = data['date_to']
side = data['type'] == '3' and 'inside' or 'outside'
self.year = date1 and mx.DateTime.Parser.DateTimeFromString(date1).year or self.year
res=[]
if date1 and date2:
self.cr.execute(" select distinct emp.marital as marital, "\
"t.end_date as end,"\
"t.start_date as start,"\
"c.name as country,"\
"t.course_type as type,"\
"t.location as location,"\
"res.name as name " \
"from hr_employee_training t "\
"left join hr_employee_training_line line on (line.training_employee_id=t.id) "\
"left join hr_employee emp on (emp.id=line.employee_id) "\
"left join hr_job jop on (jop.id=emp.job_id) "\
"left join resource_resource res on (res.id=emp.resource_id) "\
"left join hr_training_course cou on(cou.id=t.course_id) "\
"left join res_country c on(t.country_id=c.id) "\
"where t.course_id = %s and "\
"t.type ='hr.approved.course' and t.training_place = %s and "\
"t.start_date >= %s and t.end_date <= %s ",(tuple([course_id]),side,date1,date2))
elif date1 and not date2:
self.cr.execute(" select distinct emp.marital as marital, "\
"t.end_date as end,"\
"t.start_date as start,"\
"c.name as country,"\
"t.course_type as type,"\
"t.location as location,"\
"res.name as name " \
"from hr_employee_training t "\
"left join hr_employee_training_line line on (line.training_employee_id=t.id) "\
"left join hr_employee emp on (emp.id=line.employee_id) "\
"left join hr_job jop on (jop.id=emp.job_id) "\
"left join resource_resource res on (res.id=emp.resource_id) "\
"left join hr_training_course cou on(cou.id=t.course_id) "\
"left join res_country c on(t.country_id=c.id) "\
"where t.course_id = %s and "\
"t.type ='hr.approved.course' and t.training_place = %s and "\
"t.start_date >= %s",(tuple([course_id]),side,date1))
elif date2 and not date1:
self.cr.execute(" select distinct emp.marital as marital, "\
"t.end_date as end,"\
"t.start_date as start,"\
"c.name as country,"\
"t.course_type as type,"\
"t.location as location,"\
"res.name as name " \
"from hr_employee_training t "\
"left join hr_employee_training_line line on (line.training_employee_id=t.id) "\
"left join hr_employee emp on (emp.id=line.employee_id) "\
"left join hr_job jop on (jop.id=emp.job_id) "\
"left join resource_resource res on (res.id=emp.resource_id) "\
"left join hr_training_course cou on(cou.id=t.course_id) "\
"left join res_country c on(t.country_id=c.id) "\
"where t.course_id = %s and "\
"t.type ='hr.approved.course' and t.training_place = %s and "\
"t.end_date <= %s ",(tuple([course_id]),side,date2))
else:
self.cr.execute(" select distinct emp.marital as marital, "\
"t.end_date as end,"\
"t.start_date as start,"\
"c.name as country,"\
"t.course_type as type,"\
"t.location as location,"\
"res.name as name " \
"from hr_employee_training t "\
"left join hr_employee_training_line line on (line.training_employee_id=t.id) "\
"left join hr_employee emp on (emp.id=line.employee_id) "\
"left join hr_job jop on (jop.id=emp.job_id) "\
"left join resource_resource res on (res.id=emp.resource_id) "\
"left join hr_training_course cou on(cou.id=t.course_id) "\
"left join res_country c on(t.country_id=c.id) "\
"where t.course_id = %s and "\
"t.type ='hr.approved.course' and t.training_place = %s ",(tuple([course_id]),side))
res=self.cr.dictfetchall()
return res
def _get_time(self):
return self.year
report_sxw.report_sxw('report.course.outside', 'hr.employee.training', 'addons/hr_ntc_custom/report/training.rml' ,parser=course_form ,header=False)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
normal
|
{
"blob_id": "c4fcca61e560046c77046079fb305be8c883653b",
"index": 2077,
"step-1": "<mask token>\n\n\nclass course_form(report_sxw.rml_parse):\n <mask token>\n <mask token>\n\n def _get_course(self, data):\n training_category_obj = self.pool.get('hr.training.category')\n training_category_id = data['training_category_id']\n training_category_id = (not training_category_id and\n training_category_obj.browse(self.cr, self.uid, []) or\n training_category_id)\n self.cr.execute(\n ' select distinct c.id as course_id , c.name as course_name from hr_training_course as c where c.training_category_id in %s'\n , (tuple(training_category_id),))\n res = self.cr.dictfetchall()\n return res\n\n def _get_data(self, data, course_id):\n date1 = data['date_from']\n date2 = data['date_to']\n side = data['type'] == '3' and 'inside' or 'outside'\n self.year = date1 and mx.DateTime.Parser.DateTimeFromString(date1\n ).year or self.year\n res = []\n if date1 and date2:\n self.cr.execute(\n \" select distinct emp.marital as marital, t.end_date as end,t.start_date as start,c.name as country,t.course_type as type,t.location as location,res.name as name from hr_employee_training t left join hr_employee_training_line line on (line.training_employee_id=t.id) left join hr_employee emp on (emp.id=line.employee_id) left join hr_job jop on (jop.id=emp.job_id) left join resource_resource res on (res.id=emp.resource_id) left join hr_training_course cou on(cou.id=t.course_id) left join res_country c on(t.country_id=c.id) where t.course_id = %s and t.type ='hr.approved.course' and t.training_place = %s and t.start_date >= %s and t.end_date <= %s \"\n , (tuple([course_id]), side, date1, date2))\n elif date1 and not date2:\n self.cr.execute(\n \" select distinct emp.marital as marital, t.end_date as end,t.start_date as start,c.name as country,t.course_type as type,t.location as location,res.name as name from hr_employee_training t left join hr_employee_training_line line on (line.training_employee_id=t.id) left join hr_employee emp on (emp.id=line.employee_id) left join hr_job jop on (jop.id=emp.job_id) left join resource_resource res on (res.id=emp.resource_id) left join hr_training_course cou on(cou.id=t.course_id) left join res_country c on(t.country_id=c.id) where t.course_id = %s and t.type ='hr.approved.course' and t.training_place = %s and t.start_date >= %s\"\n , (tuple([course_id]), side, date1))\n elif date2 and not date1:\n self.cr.execute(\n \" select distinct emp.marital as marital, t.end_date as end,t.start_date as start,c.name as country,t.course_type as type,t.location as location,res.name as name from hr_employee_training t left join hr_employee_training_line line on (line.training_employee_id=t.id) left join hr_employee emp on (emp.id=line.employee_id) left join hr_job jop on (jop.id=emp.job_id) left join resource_resource res on (res.id=emp.resource_id) left join hr_training_course cou on(cou.id=t.course_id) left join res_country c on(t.country_id=c.id) where t.course_id = %s and t.type ='hr.approved.course' and t.training_place = %s and t.end_date <= %s \"\n , (tuple([course_id]), side, date2))\n else:\n self.cr.execute(\n \" select distinct emp.marital as marital, t.end_date as end,t.start_date as start,c.name as country,t.course_type as type,t.location as location,res.name as name from hr_employee_training t left join hr_employee_training_line line on (line.training_employee_id=t.id) left join hr_employee emp on (emp.id=line.employee_id) left join hr_job jop on (jop.id=emp.job_id) left join resource_resource res on (res.id=emp.resource_id) left join hr_training_course cou on(cou.id=t.course_id) left join res_country c on(t.country_id=c.id) where t.course_id = %s and t.type ='hr.approved.course' and t.training_place = %s \"\n , (tuple([course_id]), side))\n res = self.cr.dictfetchall()\n return res\n\n def _get_time(self):\n return self.year\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass course_form(report_sxw.rml_parse):\n\n def __init__(self, cr, uid, name, context):\n super(course_form, self).__init__(cr, uid, name, context)\n self.localcontext.update({'time': time, 'time1': self._get_time,\n 'course': self._get_course, 'line': self._get_data, 'user':\n self._get_user})\n self.year = int(time.strftime('%Y'))\n\n def _get_user(self, data, header=False):\n if header:\n return self.pool.get('res.company').browse(self.cr, self.uid,\n data['form']['company_id'][0]).logo\n else:\n return self.pool.get('res.users').browse(self.cr, self.uid,\n self.uid).name\n\n def _get_course(self, data):\n training_category_obj = self.pool.get('hr.training.category')\n training_category_id = data['training_category_id']\n training_category_id = (not training_category_id and\n training_category_obj.browse(self.cr, self.uid, []) or\n training_category_id)\n self.cr.execute(\n ' select distinct c.id as course_id , c.name as course_name from hr_training_course as c where c.training_category_id in %s'\n , (tuple(training_category_id),))\n res = self.cr.dictfetchall()\n return res\n\n def _get_data(self, data, course_id):\n date1 = data['date_from']\n date2 = data['date_to']\n side = data['type'] == '3' and 'inside' or 'outside'\n self.year = date1 and mx.DateTime.Parser.DateTimeFromString(date1\n ).year or self.year\n res = []\n if date1 and date2:\n self.cr.execute(\n \" select distinct emp.marital as marital, t.end_date as end,t.start_date as start,c.name as country,t.course_type as type,t.location as location,res.name as name from hr_employee_training t left join hr_employee_training_line line on (line.training_employee_id=t.id) left join hr_employee emp on (emp.id=line.employee_id) left join hr_job jop on (jop.id=emp.job_id) left join resource_resource res on (res.id=emp.resource_id) left join hr_training_course cou on(cou.id=t.course_id) left join res_country c on(t.country_id=c.id) where t.course_id = %s and t.type ='hr.approved.course' and t.training_place = %s and t.start_date >= %s and t.end_date <= %s \"\n , (tuple([course_id]), side, date1, date2))\n elif date1 and not date2:\n self.cr.execute(\n \" select distinct emp.marital as marital, t.end_date as end,t.start_date as start,c.name as country,t.course_type as type,t.location as location,res.name as name from hr_employee_training t left join hr_employee_training_line line on (line.training_employee_id=t.id) left join hr_employee emp on (emp.id=line.employee_id) left join hr_job jop on (jop.id=emp.job_id) left join resource_resource res on (res.id=emp.resource_id) left join hr_training_course cou on(cou.id=t.course_id) left join res_country c on(t.country_id=c.id) where t.course_id = %s and t.type ='hr.approved.course' and t.training_place = %s and t.start_date >= %s\"\n , (tuple([course_id]), side, date1))\n elif date2 and not date1:\n self.cr.execute(\n \" select distinct emp.marital as marital, t.end_date as end,t.start_date as start,c.name as country,t.course_type as type,t.location as location,res.name as name from hr_employee_training t left join hr_employee_training_line line on (line.training_employee_id=t.id) left join hr_employee emp on (emp.id=line.employee_id) left join hr_job jop on (jop.id=emp.job_id) left join resource_resource res on (res.id=emp.resource_id) left join hr_training_course cou on(cou.id=t.course_id) left join res_country c on(t.country_id=c.id) where t.course_id = %s and t.type ='hr.approved.course' and t.training_place = %s and t.end_date <= %s \"\n , (tuple([course_id]), side, date2))\n else:\n self.cr.execute(\n \" select distinct emp.marital as marital, t.end_date as end,t.start_date as start,c.name as country,t.course_type as type,t.location as location,res.name as name from hr_employee_training t left join hr_employee_training_line line on (line.training_employee_id=t.id) left join hr_employee emp on (emp.id=line.employee_id) left join hr_job jop on (jop.id=emp.job_id) left join resource_resource res on (res.id=emp.resource_id) left join hr_training_course cou on(cou.id=t.course_id) left join res_country c on(t.country_id=c.id) where t.course_id = %s and t.type ='hr.approved.course' and t.training_place = %s \"\n , (tuple([course_id]), side))\n res = self.cr.dictfetchall()\n return res\n\n def _get_time(self):\n return self.year\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass course_form(report_sxw.rml_parse):\n\n def __init__(self, cr, uid, name, context):\n super(course_form, self).__init__(cr, uid, name, context)\n self.localcontext.update({'time': time, 'time1': self._get_time,\n 'course': self._get_course, 'line': self._get_data, 'user':\n self._get_user})\n self.year = int(time.strftime('%Y'))\n\n def _get_user(self, data, header=False):\n if header:\n return self.pool.get('res.company').browse(self.cr, self.uid,\n data['form']['company_id'][0]).logo\n else:\n return self.pool.get('res.users').browse(self.cr, self.uid,\n self.uid).name\n\n def _get_course(self, data):\n training_category_obj = self.pool.get('hr.training.category')\n training_category_id = data['training_category_id']\n training_category_id = (not training_category_id and\n training_category_obj.browse(self.cr, self.uid, []) or\n training_category_id)\n self.cr.execute(\n ' select distinct c.id as course_id , c.name as course_name from hr_training_course as c where c.training_category_id in %s'\n , (tuple(training_category_id),))\n res = self.cr.dictfetchall()\n return res\n\n def _get_data(self, data, course_id):\n date1 = data['date_from']\n date2 = data['date_to']\n side = data['type'] == '3' and 'inside' or 'outside'\n self.year = date1 and mx.DateTime.Parser.DateTimeFromString(date1\n ).year or self.year\n res = []\n if date1 and date2:\n self.cr.execute(\n \" select distinct emp.marital as marital, t.end_date as end,t.start_date as start,c.name as country,t.course_type as type,t.location as location,res.name as name from hr_employee_training t left join hr_employee_training_line line on (line.training_employee_id=t.id) left join hr_employee emp on (emp.id=line.employee_id) left join hr_job jop on (jop.id=emp.job_id) left join resource_resource res on (res.id=emp.resource_id) left join hr_training_course cou on(cou.id=t.course_id) left join res_country c on(t.country_id=c.id) where t.course_id = %s and t.type ='hr.approved.course' and t.training_place = %s and t.start_date >= %s and t.end_date <= %s \"\n , (tuple([course_id]), side, date1, date2))\n elif date1 and not date2:\n self.cr.execute(\n \" select distinct emp.marital as marital, t.end_date as end,t.start_date as start,c.name as country,t.course_type as type,t.location as location,res.name as name from hr_employee_training t left join hr_employee_training_line line on (line.training_employee_id=t.id) left join hr_employee emp on (emp.id=line.employee_id) left join hr_job jop on (jop.id=emp.job_id) left join resource_resource res on (res.id=emp.resource_id) left join hr_training_course cou on(cou.id=t.course_id) left join res_country c on(t.country_id=c.id) where t.course_id = %s and t.type ='hr.approved.course' and t.training_place = %s and t.start_date >= %s\"\n , (tuple([course_id]), side, date1))\n elif date2 and not date1:\n self.cr.execute(\n \" select distinct emp.marital as marital, t.end_date as end,t.start_date as start,c.name as country,t.course_type as type,t.location as location,res.name as name from hr_employee_training t left join hr_employee_training_line line on (line.training_employee_id=t.id) left join hr_employee emp on (emp.id=line.employee_id) left join hr_job jop on (jop.id=emp.job_id) left join resource_resource res on (res.id=emp.resource_id) left join hr_training_course cou on(cou.id=t.course_id) left join res_country c on(t.country_id=c.id) where t.course_id = %s and t.type ='hr.approved.course' and t.training_place = %s and t.end_date <= %s \"\n , (tuple([course_id]), side, date2))\n else:\n self.cr.execute(\n \" select distinct emp.marital as marital, t.end_date as end,t.start_date as start,c.name as country,t.course_type as type,t.location as location,res.name as name from hr_employee_training t left join hr_employee_training_line line on (line.training_employee_id=t.id) left join hr_employee emp on (emp.id=line.employee_id) left join hr_job jop on (jop.id=emp.job_id) left join resource_resource res on (res.id=emp.resource_id) left join hr_training_course cou on(cou.id=t.course_id) left join res_country c on(t.country_id=c.id) where t.course_id = %s and t.type ='hr.approved.course' and t.training_place = %s \"\n , (tuple([course_id]), side))\n res = self.cr.dictfetchall()\n return res\n\n def _get_time(self):\n return self.year\n\n\nreport_sxw.report_sxw('report.course.outside', 'hr.employee.training',\n 'addons/hr_ntc_custom/report/training.rml', parser=course_form, header=\n False)\n",
"step-4": "import time\nimport datetime\nimport mx\nfrom openerp.report import report_sxw\n\n\nclass course_form(report_sxw.rml_parse):\n\n def __init__(self, cr, uid, name, context):\n super(course_form, self).__init__(cr, uid, name, context)\n self.localcontext.update({'time': time, 'time1': self._get_time,\n 'course': self._get_course, 'line': self._get_data, 'user':\n self._get_user})\n self.year = int(time.strftime('%Y'))\n\n def _get_user(self, data, header=False):\n if header:\n return self.pool.get('res.company').browse(self.cr, self.uid,\n data['form']['company_id'][0]).logo\n else:\n return self.pool.get('res.users').browse(self.cr, self.uid,\n self.uid).name\n\n def _get_course(self, data):\n training_category_obj = self.pool.get('hr.training.category')\n training_category_id = data['training_category_id']\n training_category_id = (not training_category_id and\n training_category_obj.browse(self.cr, self.uid, []) or\n training_category_id)\n self.cr.execute(\n ' select distinct c.id as course_id , c.name as course_name from hr_training_course as c where c.training_category_id in %s'\n , (tuple(training_category_id),))\n res = self.cr.dictfetchall()\n return res\n\n def _get_data(self, data, course_id):\n date1 = data['date_from']\n date2 = data['date_to']\n side = data['type'] == '3' and 'inside' or 'outside'\n self.year = date1 and mx.DateTime.Parser.DateTimeFromString(date1\n ).year or self.year\n res = []\n if date1 and date2:\n self.cr.execute(\n \" select distinct emp.marital as marital, t.end_date as end,t.start_date as start,c.name as country,t.course_type as type,t.location as location,res.name as name from hr_employee_training t left join hr_employee_training_line line on (line.training_employee_id=t.id) left join hr_employee emp on (emp.id=line.employee_id) left join hr_job jop on (jop.id=emp.job_id) left join resource_resource res on (res.id=emp.resource_id) left join hr_training_course cou on(cou.id=t.course_id) left join res_country c on(t.country_id=c.id) where t.course_id = %s and t.type ='hr.approved.course' and t.training_place = %s and t.start_date >= %s and t.end_date <= %s \"\n , (tuple([course_id]), side, date1, date2))\n elif date1 and not date2:\n self.cr.execute(\n \" select distinct emp.marital as marital, t.end_date as end,t.start_date as start,c.name as country,t.course_type as type,t.location as location,res.name as name from hr_employee_training t left join hr_employee_training_line line on (line.training_employee_id=t.id) left join hr_employee emp on (emp.id=line.employee_id) left join hr_job jop on (jop.id=emp.job_id) left join resource_resource res on (res.id=emp.resource_id) left join hr_training_course cou on(cou.id=t.course_id) left join res_country c on(t.country_id=c.id) where t.course_id = %s and t.type ='hr.approved.course' and t.training_place = %s and t.start_date >= %s\"\n , (tuple([course_id]), side, date1))\n elif date2 and not date1:\n self.cr.execute(\n \" select distinct emp.marital as marital, t.end_date as end,t.start_date as start,c.name as country,t.course_type as type,t.location as location,res.name as name from hr_employee_training t left join hr_employee_training_line line on (line.training_employee_id=t.id) left join hr_employee emp on (emp.id=line.employee_id) left join hr_job jop on (jop.id=emp.job_id) left join resource_resource res on (res.id=emp.resource_id) left join hr_training_course cou on(cou.id=t.course_id) left join res_country c on(t.country_id=c.id) where t.course_id = %s and t.type ='hr.approved.course' and t.training_place = %s and t.end_date <= %s \"\n , (tuple([course_id]), side, date2))\n else:\n self.cr.execute(\n \" select distinct emp.marital as marital, t.end_date as end,t.start_date as start,c.name as country,t.course_type as type,t.location as location,res.name as name from hr_employee_training t left join hr_employee_training_line line on (line.training_employee_id=t.id) left join hr_employee emp on (emp.id=line.employee_id) left join hr_job jop on (jop.id=emp.job_id) left join resource_resource res on (res.id=emp.resource_id) left join hr_training_course cou on(cou.id=t.course_id) left join res_country c on(t.country_id=c.id) where t.course_id = %s and t.type ='hr.approved.course' and t.training_place = %s \"\n , (tuple([course_id]), side))\n res = self.cr.dictfetchall()\n return res\n\n def _get_time(self):\n return self.year\n\n\nreport_sxw.report_sxw('report.course.outside', 'hr.employee.training',\n 'addons/hr_ntc_custom/report/training.rml', parser=course_form, header=\n False)\n",
"step-5": "import time\nimport datetime\nimport mx\nfrom openerp.report import report_sxw\n\n\nclass course_form(report_sxw.rml_parse):\n def __init__(self, cr, uid, name, context):\n super(course_form, self).__init__(cr, uid, name, context)\n self.localcontext.update({\n 'time': time,\n 'time1': self._get_time,\n 'course':self._get_course,\n 'line':self._get_data,\n 'user':self._get_user,\n })\n self.year = int(time.strftime('%Y'))\n\n def _get_user(self,data, header=False):\n if header:\n return self.pool.get('res.company').browse(self.cr, self.uid, data['form']['company_id'][0]).logo\n else:\n return self.pool.get('res.users').browse(self.cr, self.uid, self.uid).name\n\n def _get_course(self,data):\n training_category_obj = self.pool.get('hr.training.category')\n training_category_id = data['training_category_id']\n training_category_id = not training_category_id and training_category_obj.browse(self.cr,self.uid,[]) or training_category_id\n self.cr.execute(\" select distinct c.id as course_id , c.name as course_name \"\\\n \"from hr_training_course as c \"\\\n \"where c.training_category_id in %s\",(tuple(training_category_id),))\n res = self.cr.dictfetchall()\n return res\n\n def _get_data(self, data,course_id):\n date1 = data['date_from']\n date2 = data['date_to']\n side = data['type'] == '3' and 'inside' or 'outside'\n self.year = date1 and mx.DateTime.Parser.DateTimeFromString(date1).year or self.year\n res=[]\n if date1 and date2:\n self.cr.execute(\" select distinct emp.marital as marital, \"\\\n \"t.end_date as end,\"\\\n \"t.start_date as start,\"\\\n \"c.name as country,\"\\\n \"t.course_type as type,\"\\\n \"t.location as location,\"\\\n \"res.name as name \" \\\n \"from hr_employee_training t \"\\\n \"left join hr_employee_training_line line on (line.training_employee_id=t.id) \"\\\n \"left join hr_employee emp on (emp.id=line.employee_id) \"\\\n \"left join hr_job jop on (jop.id=emp.job_id) \"\\\n \"left join resource_resource res on (res.id=emp.resource_id) \"\\\n \"left join hr_training_course cou on(cou.id=t.course_id) \"\\\n \"left join res_country c on(t.country_id=c.id) \"\\\n \"where t.course_id = %s and \"\\\n \"t.type ='hr.approved.course' and t.training_place = %s and \"\\\n \"t.start_date >= %s and t.end_date <= %s \",(tuple([course_id]),side,date1,date2))\n elif date1 and not date2:\n self.cr.execute(\" select distinct emp.marital as marital, \"\\\n \"t.end_date as end,\"\\\n \"t.start_date as start,\"\\\n \"c.name as country,\"\\\n \"t.course_type as type,\"\\\n \"t.location as location,\"\\\n \"res.name as name \" \\\n \"from hr_employee_training t \"\\\n \"left join hr_employee_training_line line on (line.training_employee_id=t.id) \"\\\n \"left join hr_employee emp on (emp.id=line.employee_id) \"\\\n \"left join hr_job jop on (jop.id=emp.job_id) \"\\\n \"left join resource_resource res on (res.id=emp.resource_id) \"\\\n \"left join hr_training_course cou on(cou.id=t.course_id) \"\\\n \"left join res_country c on(t.country_id=c.id) \"\\\n \"where t.course_id = %s and \"\\\n \"t.type ='hr.approved.course' and t.training_place = %s and \"\\\n \"t.start_date >= %s\",(tuple([course_id]),side,date1))\n elif date2 and not date1:\n self.cr.execute(\" select distinct emp.marital as marital, \"\\\n \"t.end_date as end,\"\\\n \"t.start_date as start,\"\\\n \"c.name as country,\"\\\n \"t.course_type as type,\"\\\n \"t.location as location,\"\\\n \"res.name as name \" \\\n \"from hr_employee_training t \"\\\n \"left join hr_employee_training_line line on (line.training_employee_id=t.id) \"\\\n \"left join hr_employee emp on (emp.id=line.employee_id) \"\\\n \"left join hr_job jop on (jop.id=emp.job_id) \"\\\n \"left join resource_resource res on (res.id=emp.resource_id) \"\\\n \"left join hr_training_course cou on(cou.id=t.course_id) \"\\\n \"left join res_country c on(t.country_id=c.id) \"\\\n \"where t.course_id = %s and \"\\\n \"t.type ='hr.approved.course' and t.training_place = %s and \"\\\n \"t.end_date <= %s \",(tuple([course_id]),side,date2))\n else:\n self.cr.execute(\" select distinct emp.marital as marital, \"\\\n \"t.end_date as end,\"\\\n \"t.start_date as start,\"\\\n \"c.name as country,\"\\\n \"t.course_type as type,\"\\\n \"t.location as location,\"\\\n \"res.name as name \" \\\n \"from hr_employee_training t \"\\\n \"left join hr_employee_training_line line on (line.training_employee_id=t.id) \"\\\n \"left join hr_employee emp on (emp.id=line.employee_id) \"\\\n \"left join hr_job jop on (jop.id=emp.job_id) \"\\\n \"left join resource_resource res on (res.id=emp.resource_id) \"\\\n \"left join hr_training_course cou on(cou.id=t.course_id) \"\\\n \"left join res_country c on(t.country_id=c.id) \"\\\n \"where t.course_id = %s and \"\\\n \"t.type ='hr.approved.course' and t.training_place = %s \",(tuple([course_id]),side))\n\n \n res=self.cr.dictfetchall()\n\n return res\n\n \n def _get_time(self):\n return self.year\n\nreport_sxw.report_sxw('report.course.outside', 'hr.employee.training', 'addons/hr_ntc_custom/report/training.rml' ,parser=course_form ,header=False)\n# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:\n",
"step-ids": [
4,
6,
7,
8,
9
]
}
|
[
4,
6,
7,
8,
9
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for key, ax in zip(sorted(res), axes.flatten()):
print(key, ax)
ax.plot(res[key]['df'].M, res[key]['df'].iv, '.')
ax.plot(res[key]['M'], res[key]['smile'])
ax.text(0.99, 0.99, '$\\tau$ = ' + str(key), horizontalalignment=
'right', verticalalignment='top', transform=ax.transAxes)
axes.flatten()[0].set_ylabel('implied volatility')
axes.flatten()[4].set_ylabel('implied volatility')
axes.flatten()[4].set_xlabel('moneyness')
axes.flatten()[5].set_xlabel('moneyness')
axes.flatten()[6].set_xlabel('moneyness')
axes.flatten()[7].set_xlabel('moneyness')
plt.tight_layout()
fig1.savefig(os.path.join(cwd, '{}_smiles.png'.format(day)), transparent=True)
<|reserved_special_token_0|>
for key, ax in zip(sorted(res), axes.flatten()):
print(key, ax)
ax.plot(res[key]['K'][::-1], res[key]['q'])
ax.text(0.99, 0.99, '$\\tau$ = ' + str(key), horizontalalignment=
'right', verticalalignment='top', transform=ax.transAxes)
ax.set_yticks([])
axes.flatten()[0].set_ylabel('risk neutral density')
axes.flatten()[4].set_ylabel('risk neutral density')
axes.flatten()[4].set_xlabel('spot price')
axes.flatten()[5].set_xlabel('spot price')
axes.flatten()[6].set_xlabel('spot price')
axes.flatten()[7].set_xlabel('spot price')
plt.tight_layout()
fig2.savefig(os.path.join(cwd, '{}_RND.png'.format(day)), transparent=True)
<|reserved_special_token_0|>
for key, ax in zip(sorted(res), axes.flatten()):
print(key, ax)
ax.plot(res[key]['M'], res[key]['smile'])
ax.plot(res[key]['M'], res[key]['first'])
ax.plot(res[key]['M'], res[key]['second'])
ax.text(0.99, 0.01, '$\\tau$ = ' + str(key), horizontalalignment=
'right', verticalalignment='bottom', transform=ax.transAxes)
ax.set_yticks([])
axes.flatten()[0].set_ylabel('implied volatility')
axes.flatten()[4].set_ylabel('implied volatility')
axes.flatten()[4].set_xlabel('moneyness')
axes.flatten()[5].set_xlabel('moneyness')
axes.flatten()[6].set_xlabel('moneyness')
axes.flatten()[7].set_xlabel('moneyness')
plt.tight_layout()
fig3.savefig(os.path.join(cwd, '{}_derivatives.png'.format(day)),
transparent=True)
for key in res:
s = res[key]
fig4, axes = plt.subplots(1, 3, figsize=(10, 4))
ax = axes[0]
ax.plot(s['df'].M, s['df'].iv, '.', c='r')
ax.plot(s['M'], s['smile'])
ax.set_xlabel('moneyness')
ax.set_ylabel('implied volatility')
ax = axes[1]
ax.plot(s['M'], s['smile'])
ax.plot(s['M'], s['first'])
ax.plot(s['M'], s['second'])
ax.set_xlabel('moneyness')
ax.set_ylabel('implied volatility')
ax = axes[2]
ax.plot(s['S'], s['q'])
ax.set_xlabel('spot price')
ax.set_ylabel('risk neutral density')
ax.set_yticks([])
plt.tight_layout()
fig4.savefig(os.path.join(cwd, '{}_T{}.png'.format(day, key)),
transparent=True)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
cwd = os.path.join(os.getcwd(), 'DEDA_2020SS_Crypto_Options_RND_HD',
'CrypOpt_RiskNeutralDensity')
data_path = os.path.join(cwd, 'data') + '/'
day = '2020-03-11'
res = pickle.load(open(data_path + 'results_{}.pkl'.format(day), 'rb'))
fig1, axes = plt.subplots(2, 4, figsize=(10, 7))
for key, ax in zip(sorted(res), axes.flatten()):
print(key, ax)
ax.plot(res[key]['df'].M, res[key]['df'].iv, '.')
ax.plot(res[key]['M'], res[key]['smile'])
ax.text(0.99, 0.99, '$\\tau$ = ' + str(key), horizontalalignment=
'right', verticalalignment='top', transform=ax.transAxes)
axes.flatten()[0].set_ylabel('implied volatility')
axes.flatten()[4].set_ylabel('implied volatility')
axes.flatten()[4].set_xlabel('moneyness')
axes.flatten()[5].set_xlabel('moneyness')
axes.flatten()[6].set_xlabel('moneyness')
axes.flatten()[7].set_xlabel('moneyness')
plt.tight_layout()
fig1.savefig(os.path.join(cwd, '{}_smiles.png'.format(day)), transparent=True)
fig2, axes = plt.subplots(2, 4, figsize=(10, 7))
for key, ax in zip(sorted(res), axes.flatten()):
print(key, ax)
ax.plot(res[key]['K'][::-1], res[key]['q'])
ax.text(0.99, 0.99, '$\\tau$ = ' + str(key), horizontalalignment=
'right', verticalalignment='top', transform=ax.transAxes)
ax.set_yticks([])
axes.flatten()[0].set_ylabel('risk neutral density')
axes.flatten()[4].set_ylabel('risk neutral density')
axes.flatten()[4].set_xlabel('spot price')
axes.flatten()[5].set_xlabel('spot price')
axes.flatten()[6].set_xlabel('spot price')
axes.flatten()[7].set_xlabel('spot price')
plt.tight_layout()
fig2.savefig(os.path.join(cwd, '{}_RND.png'.format(day)), transparent=True)
fig3, axes = plt.subplots(2, 4, figsize=(10, 7))
for key, ax in zip(sorted(res), axes.flatten()):
print(key, ax)
ax.plot(res[key]['M'], res[key]['smile'])
ax.plot(res[key]['M'], res[key]['first'])
ax.plot(res[key]['M'], res[key]['second'])
ax.text(0.99, 0.01, '$\\tau$ = ' + str(key), horizontalalignment=
'right', verticalalignment='bottom', transform=ax.transAxes)
ax.set_yticks([])
axes.flatten()[0].set_ylabel('implied volatility')
axes.flatten()[4].set_ylabel('implied volatility')
axes.flatten()[4].set_xlabel('moneyness')
axes.flatten()[5].set_xlabel('moneyness')
axes.flatten()[6].set_xlabel('moneyness')
axes.flatten()[7].set_xlabel('moneyness')
plt.tight_layout()
fig3.savefig(os.path.join(cwd, '{}_derivatives.png'.format(day)),
transparent=True)
for key in res:
s = res[key]
fig4, axes = plt.subplots(1, 3, figsize=(10, 4))
ax = axes[0]
ax.plot(s['df'].M, s['df'].iv, '.', c='r')
ax.plot(s['M'], s['smile'])
ax.set_xlabel('moneyness')
ax.set_ylabel('implied volatility')
ax = axes[1]
ax.plot(s['M'], s['smile'])
ax.plot(s['M'], s['first'])
ax.plot(s['M'], s['second'])
ax.set_xlabel('moneyness')
ax.set_ylabel('implied volatility')
ax = axes[2]
ax.plot(s['S'], s['q'])
ax.set_xlabel('spot price')
ax.set_ylabel('risk neutral density')
ax.set_yticks([])
plt.tight_layout()
fig4.savefig(os.path.join(cwd, '{}_T{}.png'.format(day, key)),
transparent=True)
<|reserved_special_token_1|>
import os
import pickle
from matplotlib import pyplot as plt
cwd = os.path.join(os.getcwd(), 'DEDA_2020SS_Crypto_Options_RND_HD',
'CrypOpt_RiskNeutralDensity')
data_path = os.path.join(cwd, 'data') + '/'
day = '2020-03-11'
res = pickle.load(open(data_path + 'results_{}.pkl'.format(day), 'rb'))
fig1, axes = plt.subplots(2, 4, figsize=(10, 7))
for key, ax in zip(sorted(res), axes.flatten()):
print(key, ax)
ax.plot(res[key]['df'].M, res[key]['df'].iv, '.')
ax.plot(res[key]['M'], res[key]['smile'])
ax.text(0.99, 0.99, '$\\tau$ = ' + str(key), horizontalalignment=
'right', verticalalignment='top', transform=ax.transAxes)
axes.flatten()[0].set_ylabel('implied volatility')
axes.flatten()[4].set_ylabel('implied volatility')
axes.flatten()[4].set_xlabel('moneyness')
axes.flatten()[5].set_xlabel('moneyness')
axes.flatten()[6].set_xlabel('moneyness')
axes.flatten()[7].set_xlabel('moneyness')
plt.tight_layout()
fig1.savefig(os.path.join(cwd, '{}_smiles.png'.format(day)), transparent=True)
fig2, axes = plt.subplots(2, 4, figsize=(10, 7))
for key, ax in zip(sorted(res), axes.flatten()):
print(key, ax)
ax.plot(res[key]['K'][::-1], res[key]['q'])
ax.text(0.99, 0.99, '$\\tau$ = ' + str(key), horizontalalignment=
'right', verticalalignment='top', transform=ax.transAxes)
ax.set_yticks([])
axes.flatten()[0].set_ylabel('risk neutral density')
axes.flatten()[4].set_ylabel('risk neutral density')
axes.flatten()[4].set_xlabel('spot price')
axes.flatten()[5].set_xlabel('spot price')
axes.flatten()[6].set_xlabel('spot price')
axes.flatten()[7].set_xlabel('spot price')
plt.tight_layout()
fig2.savefig(os.path.join(cwd, '{}_RND.png'.format(day)), transparent=True)
fig3, axes = plt.subplots(2, 4, figsize=(10, 7))
for key, ax in zip(sorted(res), axes.flatten()):
print(key, ax)
ax.plot(res[key]['M'], res[key]['smile'])
ax.plot(res[key]['M'], res[key]['first'])
ax.plot(res[key]['M'], res[key]['second'])
ax.text(0.99, 0.01, '$\\tau$ = ' + str(key), horizontalalignment=
'right', verticalalignment='bottom', transform=ax.transAxes)
ax.set_yticks([])
axes.flatten()[0].set_ylabel('implied volatility')
axes.flatten()[4].set_ylabel('implied volatility')
axes.flatten()[4].set_xlabel('moneyness')
axes.flatten()[5].set_xlabel('moneyness')
axes.flatten()[6].set_xlabel('moneyness')
axes.flatten()[7].set_xlabel('moneyness')
plt.tight_layout()
fig3.savefig(os.path.join(cwd, '{}_derivatives.png'.format(day)),
transparent=True)
for key in res:
s = res[key]
fig4, axes = plt.subplots(1, 3, figsize=(10, 4))
ax = axes[0]
ax.plot(s['df'].M, s['df'].iv, '.', c='r')
ax.plot(s['M'], s['smile'])
ax.set_xlabel('moneyness')
ax.set_ylabel('implied volatility')
ax = axes[1]
ax.plot(s['M'], s['smile'])
ax.plot(s['M'], s['first'])
ax.plot(s['M'], s['second'])
ax.set_xlabel('moneyness')
ax.set_ylabel('implied volatility')
ax = axes[2]
ax.plot(s['S'], s['q'])
ax.set_xlabel('spot price')
ax.set_ylabel('risk neutral density')
ax.set_yticks([])
plt.tight_layout()
fig4.savefig(os.path.join(cwd, '{}_T{}.png'.format(day, key)),
transparent=True)
<|reserved_special_token_1|>
import os
import pickle
from matplotlib import pyplot as plt
cwd = os.path.join(os.getcwd(), 'DEDA_2020SS_Crypto_Options_RND_HD',
'CrypOpt_RiskNeutralDensity')
data_path = os.path.join(cwd, 'data') + '/'
day = '2020-03-11'
res = pickle.load(open(data_path + 'results_{}.pkl'.format(day), 'rb'))
# ---------------------------------------------------------------------- SMILES
fig1, axes = plt.subplots(2,4, figsize=(10,7))
for key, ax in zip(sorted(res), axes.flatten()):
print(key, ax)
ax.plot(res[key]['df'].M, res[key]['df'].iv, '.')
ax.plot(res[key]['M'], res[key]['smile'])
ax.text(0.99, 0.99, r'$\tau$ = ' + str(key),
horizontalalignment='right',
verticalalignment='top',
transform=ax.transAxes)
axes.flatten()[0].set_ylabel('implied volatility')
axes.flatten()[4].set_ylabel('implied volatility')
axes.flatten()[4].set_xlabel('moneyness')
axes.flatten()[5].set_xlabel('moneyness')
axes.flatten()[6].set_xlabel('moneyness')
axes.flatten()[7].set_xlabel('moneyness')
plt.tight_layout()
fig1.savefig(os.path.join(cwd, '{}_smiles.png'.format(day)), transparent=True)
# ------------------------------------------------------------------------ RNDs
fig2, axes = plt.subplots(2,4, figsize=(10,7))
for key, ax in zip(sorted(res), axes.flatten()):
print(key, ax)
ax.plot(res[key]['K'][::-1], res[key]['q'])
ax.text(0.99, 0.99, r'$\tau$ = ' + str(key),
horizontalalignment='right',
verticalalignment='top',
transform=ax.transAxes)
ax.set_yticks([])
axes.flatten()[0].set_ylabel('risk neutral density')
axes.flatten()[4].set_ylabel('risk neutral density')
axes.flatten()[4].set_xlabel('spot price')
axes.flatten()[5].set_xlabel('spot price')
axes.flatten()[6].set_xlabel('spot price')
axes.flatten()[7].set_xlabel('spot price')
plt.tight_layout()
fig2.savefig(os.path.join(cwd, '{}_RND.png'.format(day)), transparent=True)
# ----------------------------------------------------------------- DERIVATIVES
fig3, axes = plt.subplots(2,4, figsize=(10,7))
for key, ax in zip(sorted(res), axes.flatten()):
print(key, ax)
ax.plot(res[key]['M'], res[key]['smile'])
ax.plot(res[key]['M'], res[key]['first'])
ax.plot(res[key]['M'], res[key]['second'])
ax.text(0.99, 0.01, r'$\tau$ = ' + str(key),
horizontalalignment='right',
verticalalignment='bottom',
transform=ax.transAxes)
ax.set_yticks([])
axes.flatten()[0].set_ylabel('implied volatility')
axes.flatten()[4].set_ylabel('implied volatility')
axes.flatten()[4].set_xlabel('moneyness')
axes.flatten()[5].set_xlabel('moneyness')
axes.flatten()[6].set_xlabel('moneyness')
axes.flatten()[7].set_xlabel('moneyness')
plt.tight_layout()
fig3.savefig(os.path.join(cwd, '{}_derivatives.png'.format(day)), transparent=True)
# ----------------------------------------------------------------- TAU PROCESS
for key in res:
s = res[key]
fig4, axes = plt.subplots(1,3, figsize=(10,4))
ax = axes[0]
ax.plot(s['df'].M, s['df'].iv, '.', c='r')
ax.plot(s['M'], s['smile'])
ax.set_xlabel('moneyness')
ax.set_ylabel('implied volatility')
ax = axes[1]
ax.plot(s['M'], s['smile'])
ax.plot(s['M'], s['first'])
ax.plot(s['M'], s['second'])
ax.set_xlabel('moneyness')
ax.set_ylabel('implied volatility')
ax = axes[2]
ax.plot(s['S'], s['q'])
ax.set_xlabel('spot price')
ax.set_ylabel(r'risk neutral density')
ax.set_yticks([])
plt.tight_layout()
fig4.savefig(os.path.join(cwd, '{}_T{}.png'.format(day, key)), transparent=True)
|
flexible
|
{
"blob_id": "a01f812584e4cee14c9fe15e9fb6ede4ae3e937a",
"index": 4953,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor key, ax in zip(sorted(res), axes.flatten()):\n print(key, ax)\n ax.plot(res[key]['df'].M, res[key]['df'].iv, '.')\n ax.plot(res[key]['M'], res[key]['smile'])\n ax.text(0.99, 0.99, '$\\\\tau$ = ' + str(key), horizontalalignment=\n 'right', verticalalignment='top', transform=ax.transAxes)\naxes.flatten()[0].set_ylabel('implied volatility')\naxes.flatten()[4].set_ylabel('implied volatility')\naxes.flatten()[4].set_xlabel('moneyness')\naxes.flatten()[5].set_xlabel('moneyness')\naxes.flatten()[6].set_xlabel('moneyness')\naxes.flatten()[7].set_xlabel('moneyness')\nplt.tight_layout()\nfig1.savefig(os.path.join(cwd, '{}_smiles.png'.format(day)), transparent=True)\n<mask token>\nfor key, ax in zip(sorted(res), axes.flatten()):\n print(key, ax)\n ax.plot(res[key]['K'][::-1], res[key]['q'])\n ax.text(0.99, 0.99, '$\\\\tau$ = ' + str(key), horizontalalignment=\n 'right', verticalalignment='top', transform=ax.transAxes)\n ax.set_yticks([])\naxes.flatten()[0].set_ylabel('risk neutral density')\naxes.flatten()[4].set_ylabel('risk neutral density')\naxes.flatten()[4].set_xlabel('spot price')\naxes.flatten()[5].set_xlabel('spot price')\naxes.flatten()[6].set_xlabel('spot price')\naxes.flatten()[7].set_xlabel('spot price')\nplt.tight_layout()\nfig2.savefig(os.path.join(cwd, '{}_RND.png'.format(day)), transparent=True)\n<mask token>\nfor key, ax in zip(sorted(res), axes.flatten()):\n print(key, ax)\n ax.plot(res[key]['M'], res[key]['smile'])\n ax.plot(res[key]['M'], res[key]['first'])\n ax.plot(res[key]['M'], res[key]['second'])\n ax.text(0.99, 0.01, '$\\\\tau$ = ' + str(key), horizontalalignment=\n 'right', verticalalignment='bottom', transform=ax.transAxes)\n ax.set_yticks([])\naxes.flatten()[0].set_ylabel('implied volatility')\naxes.flatten()[4].set_ylabel('implied volatility')\naxes.flatten()[4].set_xlabel('moneyness')\naxes.flatten()[5].set_xlabel('moneyness')\naxes.flatten()[6].set_xlabel('moneyness')\naxes.flatten()[7].set_xlabel('moneyness')\nplt.tight_layout()\nfig3.savefig(os.path.join(cwd, '{}_derivatives.png'.format(day)),\n transparent=True)\nfor key in res:\n s = res[key]\n fig4, axes = plt.subplots(1, 3, figsize=(10, 4))\n ax = axes[0]\n ax.plot(s['df'].M, s['df'].iv, '.', c='r')\n ax.plot(s['M'], s['smile'])\n ax.set_xlabel('moneyness')\n ax.set_ylabel('implied volatility')\n ax = axes[1]\n ax.plot(s['M'], s['smile'])\n ax.plot(s['M'], s['first'])\n ax.plot(s['M'], s['second'])\n ax.set_xlabel('moneyness')\n ax.set_ylabel('implied volatility')\n ax = axes[2]\n ax.plot(s['S'], s['q'])\n ax.set_xlabel('spot price')\n ax.set_ylabel('risk neutral density')\n ax.set_yticks([])\n plt.tight_layout()\n fig4.savefig(os.path.join(cwd, '{}_T{}.png'.format(day, key)),\n transparent=True)\n",
"step-3": "<mask token>\ncwd = os.path.join(os.getcwd(), 'DEDA_2020SS_Crypto_Options_RND_HD',\n 'CrypOpt_RiskNeutralDensity')\ndata_path = os.path.join(cwd, 'data') + '/'\nday = '2020-03-11'\nres = pickle.load(open(data_path + 'results_{}.pkl'.format(day), 'rb'))\nfig1, axes = plt.subplots(2, 4, figsize=(10, 7))\nfor key, ax in zip(sorted(res), axes.flatten()):\n print(key, ax)\n ax.plot(res[key]['df'].M, res[key]['df'].iv, '.')\n ax.plot(res[key]['M'], res[key]['smile'])\n ax.text(0.99, 0.99, '$\\\\tau$ = ' + str(key), horizontalalignment=\n 'right', verticalalignment='top', transform=ax.transAxes)\naxes.flatten()[0].set_ylabel('implied volatility')\naxes.flatten()[4].set_ylabel('implied volatility')\naxes.flatten()[4].set_xlabel('moneyness')\naxes.flatten()[5].set_xlabel('moneyness')\naxes.flatten()[6].set_xlabel('moneyness')\naxes.flatten()[7].set_xlabel('moneyness')\nplt.tight_layout()\nfig1.savefig(os.path.join(cwd, '{}_smiles.png'.format(day)), transparent=True)\nfig2, axes = plt.subplots(2, 4, figsize=(10, 7))\nfor key, ax in zip(sorted(res), axes.flatten()):\n print(key, ax)\n ax.plot(res[key]['K'][::-1], res[key]['q'])\n ax.text(0.99, 0.99, '$\\\\tau$ = ' + str(key), horizontalalignment=\n 'right', verticalalignment='top', transform=ax.transAxes)\n ax.set_yticks([])\naxes.flatten()[0].set_ylabel('risk neutral density')\naxes.flatten()[4].set_ylabel('risk neutral density')\naxes.flatten()[4].set_xlabel('spot price')\naxes.flatten()[5].set_xlabel('spot price')\naxes.flatten()[6].set_xlabel('spot price')\naxes.flatten()[7].set_xlabel('spot price')\nplt.tight_layout()\nfig2.savefig(os.path.join(cwd, '{}_RND.png'.format(day)), transparent=True)\nfig3, axes = plt.subplots(2, 4, figsize=(10, 7))\nfor key, ax in zip(sorted(res), axes.flatten()):\n print(key, ax)\n ax.plot(res[key]['M'], res[key]['smile'])\n ax.plot(res[key]['M'], res[key]['first'])\n ax.plot(res[key]['M'], res[key]['second'])\n ax.text(0.99, 0.01, '$\\\\tau$ = ' + str(key), horizontalalignment=\n 'right', verticalalignment='bottom', transform=ax.transAxes)\n ax.set_yticks([])\naxes.flatten()[0].set_ylabel('implied volatility')\naxes.flatten()[4].set_ylabel('implied volatility')\naxes.flatten()[4].set_xlabel('moneyness')\naxes.flatten()[5].set_xlabel('moneyness')\naxes.flatten()[6].set_xlabel('moneyness')\naxes.flatten()[7].set_xlabel('moneyness')\nplt.tight_layout()\nfig3.savefig(os.path.join(cwd, '{}_derivatives.png'.format(day)),\n transparent=True)\nfor key in res:\n s = res[key]\n fig4, axes = plt.subplots(1, 3, figsize=(10, 4))\n ax = axes[0]\n ax.plot(s['df'].M, s['df'].iv, '.', c='r')\n ax.plot(s['M'], s['smile'])\n ax.set_xlabel('moneyness')\n ax.set_ylabel('implied volatility')\n ax = axes[1]\n ax.plot(s['M'], s['smile'])\n ax.plot(s['M'], s['first'])\n ax.plot(s['M'], s['second'])\n ax.set_xlabel('moneyness')\n ax.set_ylabel('implied volatility')\n ax = axes[2]\n ax.plot(s['S'], s['q'])\n ax.set_xlabel('spot price')\n ax.set_ylabel('risk neutral density')\n ax.set_yticks([])\n plt.tight_layout()\n fig4.savefig(os.path.join(cwd, '{}_T{}.png'.format(day, key)),\n transparent=True)\n",
"step-4": "import os\nimport pickle\nfrom matplotlib import pyplot as plt\ncwd = os.path.join(os.getcwd(), 'DEDA_2020SS_Crypto_Options_RND_HD',\n 'CrypOpt_RiskNeutralDensity')\ndata_path = os.path.join(cwd, 'data') + '/'\nday = '2020-03-11'\nres = pickle.load(open(data_path + 'results_{}.pkl'.format(day), 'rb'))\nfig1, axes = plt.subplots(2, 4, figsize=(10, 7))\nfor key, ax in zip(sorted(res), axes.flatten()):\n print(key, ax)\n ax.plot(res[key]['df'].M, res[key]['df'].iv, '.')\n ax.plot(res[key]['M'], res[key]['smile'])\n ax.text(0.99, 0.99, '$\\\\tau$ = ' + str(key), horizontalalignment=\n 'right', verticalalignment='top', transform=ax.transAxes)\naxes.flatten()[0].set_ylabel('implied volatility')\naxes.flatten()[4].set_ylabel('implied volatility')\naxes.flatten()[4].set_xlabel('moneyness')\naxes.flatten()[5].set_xlabel('moneyness')\naxes.flatten()[6].set_xlabel('moneyness')\naxes.flatten()[7].set_xlabel('moneyness')\nplt.tight_layout()\nfig1.savefig(os.path.join(cwd, '{}_smiles.png'.format(day)), transparent=True)\nfig2, axes = plt.subplots(2, 4, figsize=(10, 7))\nfor key, ax in zip(sorted(res), axes.flatten()):\n print(key, ax)\n ax.plot(res[key]['K'][::-1], res[key]['q'])\n ax.text(0.99, 0.99, '$\\\\tau$ = ' + str(key), horizontalalignment=\n 'right', verticalalignment='top', transform=ax.transAxes)\n ax.set_yticks([])\naxes.flatten()[0].set_ylabel('risk neutral density')\naxes.flatten()[4].set_ylabel('risk neutral density')\naxes.flatten()[4].set_xlabel('spot price')\naxes.flatten()[5].set_xlabel('spot price')\naxes.flatten()[6].set_xlabel('spot price')\naxes.flatten()[7].set_xlabel('spot price')\nplt.tight_layout()\nfig2.savefig(os.path.join(cwd, '{}_RND.png'.format(day)), transparent=True)\nfig3, axes = plt.subplots(2, 4, figsize=(10, 7))\nfor key, ax in zip(sorted(res), axes.flatten()):\n print(key, ax)\n ax.plot(res[key]['M'], res[key]['smile'])\n ax.plot(res[key]['M'], res[key]['first'])\n ax.plot(res[key]['M'], res[key]['second'])\n ax.text(0.99, 0.01, '$\\\\tau$ = ' + str(key), horizontalalignment=\n 'right', verticalalignment='bottom', transform=ax.transAxes)\n ax.set_yticks([])\naxes.flatten()[0].set_ylabel('implied volatility')\naxes.flatten()[4].set_ylabel('implied volatility')\naxes.flatten()[4].set_xlabel('moneyness')\naxes.flatten()[5].set_xlabel('moneyness')\naxes.flatten()[6].set_xlabel('moneyness')\naxes.flatten()[7].set_xlabel('moneyness')\nplt.tight_layout()\nfig3.savefig(os.path.join(cwd, '{}_derivatives.png'.format(day)),\n transparent=True)\nfor key in res:\n s = res[key]\n fig4, axes = plt.subplots(1, 3, figsize=(10, 4))\n ax = axes[0]\n ax.plot(s['df'].M, s['df'].iv, '.', c='r')\n ax.plot(s['M'], s['smile'])\n ax.set_xlabel('moneyness')\n ax.set_ylabel('implied volatility')\n ax = axes[1]\n ax.plot(s['M'], s['smile'])\n ax.plot(s['M'], s['first'])\n ax.plot(s['M'], s['second'])\n ax.set_xlabel('moneyness')\n ax.set_ylabel('implied volatility')\n ax = axes[2]\n ax.plot(s['S'], s['q'])\n ax.set_xlabel('spot price')\n ax.set_ylabel('risk neutral density')\n ax.set_yticks([])\n plt.tight_layout()\n fig4.savefig(os.path.join(cwd, '{}_T{}.png'.format(day, key)),\n transparent=True)\n",
"step-5": "import os\nimport pickle\nfrom matplotlib import pyplot as plt\n\ncwd = os.path.join(os.getcwd(), 'DEDA_2020SS_Crypto_Options_RND_HD',\n 'CrypOpt_RiskNeutralDensity')\ndata_path = os.path.join(cwd, 'data') + '/'\n\nday = '2020-03-11'\nres = pickle.load(open(data_path + 'results_{}.pkl'.format(day), 'rb'))\n\n\n# ---------------------------------------------------------------------- SMILES\nfig1, axes = plt.subplots(2,4, figsize=(10,7))\nfor key, ax in zip(sorted(res), axes.flatten()):\n print(key, ax)\n ax.plot(res[key]['df'].M, res[key]['df'].iv, '.')\n ax.plot(res[key]['M'], res[key]['smile'])\n ax.text(0.99, 0.99, r'$\\tau$ = ' + str(key),\n horizontalalignment='right',\n verticalalignment='top',\n transform=ax.transAxes)\naxes.flatten()[0].set_ylabel('implied volatility')\naxes.flatten()[4].set_ylabel('implied volatility')\naxes.flatten()[4].set_xlabel('moneyness')\naxes.flatten()[5].set_xlabel('moneyness')\naxes.flatten()[6].set_xlabel('moneyness')\naxes.flatten()[7].set_xlabel('moneyness')\nplt.tight_layout()\nfig1.savefig(os.path.join(cwd, '{}_smiles.png'.format(day)), transparent=True)\n\n\n# ------------------------------------------------------------------------ RNDs\nfig2, axes = plt.subplots(2,4, figsize=(10,7))\nfor key, ax in zip(sorted(res), axes.flatten()):\n print(key, ax)\n ax.plot(res[key]['K'][::-1], res[key]['q'])\n ax.text(0.99, 0.99, r'$\\tau$ = ' + str(key),\n horizontalalignment='right',\n verticalalignment='top',\n transform=ax.transAxes)\n ax.set_yticks([])\naxes.flatten()[0].set_ylabel('risk neutral density')\naxes.flatten()[4].set_ylabel('risk neutral density')\naxes.flatten()[4].set_xlabel('spot price')\naxes.flatten()[5].set_xlabel('spot price')\naxes.flatten()[6].set_xlabel('spot price')\naxes.flatten()[7].set_xlabel('spot price')\nplt.tight_layout()\nfig2.savefig(os.path.join(cwd, '{}_RND.png'.format(day)), transparent=True)\n\n\n# ----------------------------------------------------------------- DERIVATIVES\nfig3, axes = plt.subplots(2,4, figsize=(10,7))\nfor key, ax in zip(sorted(res), axes.flatten()):\n print(key, ax)\n ax.plot(res[key]['M'], res[key]['smile'])\n ax.plot(res[key]['M'], res[key]['first'])\n ax.plot(res[key]['M'], res[key]['second'])\n ax.text(0.99, 0.01, r'$\\tau$ = ' + str(key),\n horizontalalignment='right',\n verticalalignment='bottom',\n transform=ax.transAxes)\n ax.set_yticks([])\naxes.flatten()[0].set_ylabel('implied volatility')\naxes.flatten()[4].set_ylabel('implied volatility')\naxes.flatten()[4].set_xlabel('moneyness')\naxes.flatten()[5].set_xlabel('moneyness')\naxes.flatten()[6].set_xlabel('moneyness')\naxes.flatten()[7].set_xlabel('moneyness')\nplt.tight_layout()\nfig3.savefig(os.path.join(cwd, '{}_derivatives.png'.format(day)), transparent=True)\n\n\n# ----------------------------------------------------------------- TAU PROCESS\nfor key in res:\n s = res[key]\n\n fig4, axes = plt.subplots(1,3, figsize=(10,4))\n ax = axes[0]\n ax.plot(s['df'].M, s['df'].iv, '.', c='r')\n ax.plot(s['M'], s['smile'])\n ax.set_xlabel('moneyness')\n ax.set_ylabel('implied volatility')\n\n ax = axes[1]\n ax.plot(s['M'], s['smile'])\n ax.plot(s['M'], s['first'])\n ax.plot(s['M'], s['second'])\n ax.set_xlabel('moneyness')\n ax.set_ylabel('implied volatility')\n\n ax = axes[2]\n ax.plot(s['S'], s['q'])\n ax.set_xlabel('spot price')\n ax.set_ylabel(r'risk neutral density')\n ax.set_yticks([])\n\n plt.tight_layout()\n\n fig4.savefig(os.path.join(cwd, '{}_T{}.png'.format(day, key)), transparent=True)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class FileDataSource(UpdateProcessor):
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class FileDataSource(UpdateProcessor):
@classmethod
def factory(cls, **kwargs):
"""Provides a way to use local files as a source of feature flag state.
.. deprecated:: 6.8.0
This module and this implementation class are deprecated and may be changed or removed in the future.
Please use :func:`ldclient.integrations.Files.new_data_source()`.
The keyword arguments are the same as the arguments to :func:`ldclient.integrations.Files.new_data_source()`.
"""
return lambda config, store, ready: _FileDataSource(store, ready,
paths=kwargs.get('paths'), auto_update=kwargs.get('auto_update',
False), poll_interval=kwargs.get('poll_interval', 1),
force_polling=kwargs.get('force_polling', False))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from ldclient.impl.integrations.files.file_data_source import _FileDataSource
from ldclient.interfaces import UpdateProcessor
class FileDataSource(UpdateProcessor):
@classmethod
def factory(cls, **kwargs):
"""Provides a way to use local files as a source of feature flag state.
.. deprecated:: 6.8.0
This module and this implementation class are deprecated and may be changed or removed in the future.
Please use :func:`ldclient.integrations.Files.new_data_source()`.
The keyword arguments are the same as the arguments to :func:`ldclient.integrations.Files.new_data_source()`.
"""
return lambda config, store, ready: _FileDataSource(store, ready,
paths=kwargs.get('paths'), auto_update=kwargs.get('auto_update',
False), poll_interval=kwargs.get('poll_interval', 1),
force_polling=kwargs.get('force_polling', False))
<|reserved_special_token_1|>
"""
Deprecated entry point for a component that has been moved.
"""
# currently excluded from documentation - see docs/README.md
from ldclient.impl.integrations.files.file_data_source import _FileDataSource
from ldclient.interfaces import UpdateProcessor
class FileDataSource(UpdateProcessor):
@classmethod
def factory(cls, **kwargs):
"""Provides a way to use local files as a source of feature flag state.
.. deprecated:: 6.8.0
This module and this implementation class are deprecated and may be changed or removed in the future.
Please use :func:`ldclient.integrations.Files.new_data_source()`.
The keyword arguments are the same as the arguments to :func:`ldclient.integrations.Files.new_data_source()`.
"""
return lambda config, store, ready : _FileDataSource(store, ready,
paths=kwargs.get("paths"),
auto_update=kwargs.get("auto_update", False),
poll_interval=kwargs.get("poll_interval", 1),
force_polling=kwargs.get("force_polling", False))
|
flexible
|
{
"blob_id": "ee68ebe146f948f3497577f40741e59b7421e652",
"index": 8186,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass FileDataSource(UpdateProcessor):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass FileDataSource(UpdateProcessor):\n\n @classmethod\n def factory(cls, **kwargs):\n \"\"\"Provides a way to use local files as a source of feature flag state.\n \n .. deprecated:: 6.8.0\n This module and this implementation class are deprecated and may be changed or removed in the future.\n Please use :func:`ldclient.integrations.Files.new_data_source()`.\n \n The keyword arguments are the same as the arguments to :func:`ldclient.integrations.Files.new_data_source()`.\n \"\"\"\n return lambda config, store, ready: _FileDataSource(store, ready,\n paths=kwargs.get('paths'), auto_update=kwargs.get('auto_update',\n False), poll_interval=kwargs.get('poll_interval', 1),\n force_polling=kwargs.get('force_polling', False))\n",
"step-4": "<mask token>\nfrom ldclient.impl.integrations.files.file_data_source import _FileDataSource\nfrom ldclient.interfaces import UpdateProcessor\n\n\nclass FileDataSource(UpdateProcessor):\n\n @classmethod\n def factory(cls, **kwargs):\n \"\"\"Provides a way to use local files as a source of feature flag state.\n \n .. deprecated:: 6.8.0\n This module and this implementation class are deprecated and may be changed or removed in the future.\n Please use :func:`ldclient.integrations.Files.new_data_source()`.\n \n The keyword arguments are the same as the arguments to :func:`ldclient.integrations.Files.new_data_source()`.\n \"\"\"\n return lambda config, store, ready: _FileDataSource(store, ready,\n paths=kwargs.get('paths'), auto_update=kwargs.get('auto_update',\n False), poll_interval=kwargs.get('poll_interval', 1),\n force_polling=kwargs.get('force_polling', False))\n",
"step-5": "\"\"\"\nDeprecated entry point for a component that has been moved.\n\"\"\"\n# currently excluded from documentation - see docs/README.md\n\nfrom ldclient.impl.integrations.files.file_data_source import _FileDataSource\nfrom ldclient.interfaces import UpdateProcessor\n\nclass FileDataSource(UpdateProcessor):\n @classmethod\n def factory(cls, **kwargs):\n \"\"\"Provides a way to use local files as a source of feature flag state.\n \n .. deprecated:: 6.8.0\n This module and this implementation class are deprecated and may be changed or removed in the future.\n Please use :func:`ldclient.integrations.Files.new_data_source()`.\n \n The keyword arguments are the same as the arguments to :func:`ldclient.integrations.Files.new_data_source()`.\n \"\"\"\n\n return lambda config, store, ready : _FileDataSource(store, ready,\n paths=kwargs.get(\"paths\"),\n auto_update=kwargs.get(\"auto_update\", False),\n poll_interval=kwargs.get(\"poll_interval\", 1),\n force_polling=kwargs.get(\"force_polling\", False))\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print('Time now : ', now)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
now = datetime.datetime.now()
print('Time now : ', now)
<|reserved_special_token_1|>
import datetime
now = datetime.datetime.now()
print('Time now : ', now)
<|reserved_special_token_1|>
import datetime
now = datetime.datetime.now()
# Printing value of now.
print ("Time now : ", now)
|
flexible
|
{
"blob_id": "0110d26e17a5402c22f519d0aeb2aacca3279d00",
"index": 7792,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('Time now : ', now)\n",
"step-3": "<mask token>\nnow = datetime.datetime.now()\nprint('Time now : ', now)\n",
"step-4": "import datetime\nnow = datetime.datetime.now()\nprint('Time now : ', now)\n",
"step-5": "import datetime \r\n\r\nnow = datetime.datetime.now() \r\n \r\n# Printing value of now. \r\nprint (\"Time now : \", now) \r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
class Calculator:
<|reserved_special_token_0|>
def Subtract(self, num1, num2):
return num1 - num2
<|reserved_special_token_0|>
def Divide(self, num1, num2):
return num1 / num2
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Calculator:
def Add(self, num1, num2):
return num1 + num2
def Subtract(self, num1, num2):
return num1 - num2
<|reserved_special_token_0|>
def Divide(self, num1, num2):
return num1 / num2
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Calculator:
def Add(self, num1, num2):
return num1 + num2
def Subtract(self, num1, num2):
return num1 - num2
def Multiply(self, num1, num2):
return num1 * num2
def Divide(self, num1, num2):
return num1 / num2
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Calculator:
def Add(self, num1, num2):
return num1 + num2
def Subtract(self, num1, num2):
return num1 - num2
def Multiply(self, num1, num2):
return num1 * num2
def Divide(self, num1, num2):
return num1 / num2
if __name__ == '__main__':
calc = Calculator()
print(calc.Add(1, 2))
print(calc.Subtract(1, 2))
print(calc.Multiply(1, 2))
print(calc.Divide(1, 2))
<|reserved_special_token_1|>
# This file is part of the functional_calculator_oop.py Task
# Create a class called Calculator
class Calculator:
def Add(self, num1, num2):
return num1 + num2
def Subtract(self, num1, num2):
return num1 - num2
def Multiply(self, num1, num2):
return num1 * num2
def Divide(self, num1, num2):
return num1 / num2
# We need this conditional check so that the code doesn't run automatically when we import it on another file
if __name__ == "__main__":
# Create calculator object
calc = Calculator()
# Use object to call methods
print(calc.Add(1, 2))
print(calc.Subtract(1, 2))
print(calc.Multiply(1, 2))
print(calc.Divide(1, 2))
# Here we can see that __name__ is main when ran from here directly, but calculator_oop when imported on another file
# print(__name__)
|
flexible
|
{
"blob_id": "d2972fb7cff08e15957f9baeaa6fd9a6f5bbb006",
"index": 1127,
"step-1": "class Calculator:\n <mask token>\n\n def Subtract(self, num1, num2):\n return num1 - num2\n <mask token>\n\n def Divide(self, num1, num2):\n return num1 / num2\n\n\n<mask token>\n",
"step-2": "class Calculator:\n\n def Add(self, num1, num2):\n return num1 + num2\n\n def Subtract(self, num1, num2):\n return num1 - num2\n <mask token>\n\n def Divide(self, num1, num2):\n return num1 / num2\n\n\n<mask token>\n",
"step-3": "class Calculator:\n\n def Add(self, num1, num2):\n return num1 + num2\n\n def Subtract(self, num1, num2):\n return num1 - num2\n\n def Multiply(self, num1, num2):\n return num1 * num2\n\n def Divide(self, num1, num2):\n return num1 / num2\n\n\n<mask token>\n",
"step-4": "class Calculator:\n\n def Add(self, num1, num2):\n return num1 + num2\n\n def Subtract(self, num1, num2):\n return num1 - num2\n\n def Multiply(self, num1, num2):\n return num1 * num2\n\n def Divide(self, num1, num2):\n return num1 / num2\n\n\nif __name__ == '__main__':\n calc = Calculator()\n print(calc.Add(1, 2))\n print(calc.Subtract(1, 2))\n print(calc.Multiply(1, 2))\n print(calc.Divide(1, 2))\n",
"step-5": "# This file is part of the functional_calculator_oop.py Task\n# Create a class called Calculator\nclass Calculator:\n\n def Add(self, num1, num2):\n return num1 + num2\n\n def Subtract(self, num1, num2):\n return num1 - num2\n\n def Multiply(self, num1, num2):\n return num1 * num2\n\n def Divide(self, num1, num2):\n return num1 / num2\n\n\n# We need this conditional check so that the code doesn't run automatically when we import it on another file\nif __name__ == \"__main__\":\n # Create calculator object\n calc = Calculator()\n # Use object to call methods\n print(calc.Add(1, 2))\n print(calc.Subtract(1, 2))\n print(calc.Multiply(1, 2))\n print(calc.Divide(1, 2))\n\n# Here we can see that __name__ is main when ran from here directly, but calculator_oop when imported on another file\n# print(__name__)\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
from urllib import request, parse
import pandas as pd
import json
import os
class BusInfo:
url = 'https://api-tokyochallenge.odpt.org/api/v4/odpt:Bus'
url_busstop = 'https://api-tokyochallenge.odpt.org/api/v4/odpt:BusstopPole.json'
url_routes = 'https://api-tokyochallenge.odpt.org/api/v4/odpt:BusroutePattern.json'
params = {
'odpt:operator': 'odpt.Operator:YokohamaMunicipal',
}
bus_stops = None
bus_routes = None
@staticmethod
def init():
apiKey = os.getenv('BUS_TOKEN')
BusInfo.params['acl:consumerKey'] = apiKey
BusInfo.getBusStops()
BusInfo.getBusRoutes()
@staticmethod
def getBusRoutes():
#BusInfo.bus_routes = pd.DataFrame()
#return
busroute_list=[]
req = request.Request('{}?{}'.format(BusInfo.url_routes, parse.urlencode(BusInfo.params)))
with request.urlopen(req) as res:
json_load = json.load(res)
for v in json_load:
try:
busstop = { 'route_id': v['owl:sameAs'],
'route_name': v['dc:title'],
}
busroute_list.append(busstop)
except Exception:
pass
BusInfo.bus_routes = pd.DataFrame(busroute_list).set_index('route_id')
@staticmethod
def getBusStops():
#BusInfo.bus_stops = pd.DataFrame()
#return
busstop_list=[]
req = request.Request('{}?{}'.format(BusInfo.url_busstop, parse.urlencode(BusInfo.params)))
with request.urlopen(req) as res:
json_load = json.load(res)
for v in json_load:
try:
busstop = { 'busstop_id': v['owl:sameAs'],
'pole_name': v['dc:title'],
}
busstop_list.append(busstop)
except Exception:
pass
BusInfo.bus_stops = pd.DataFrame(busstop_list).set_index('busstop_id')
@staticmethod
def update():
bus_list=[]
req = request.Request('{}?{}'.format(BusInfo.url, parse.urlencode(BusInfo.params)))
with request.urlopen(req) as res:
json_load = json.load(res)
for v in json_load:
try:
if v['odpt:occupancyStatus'] == 'odpt.OccupancyStatus:Empty':
occupancy = '空いている'
color='blue'
elif v['odpt:occupancyStatus'] == 'odpt.OccupancyStatus:ManySeatsAvailable':
occupancy = '空き座席多数'
color='blue'
elif v['odpt:occupancyStatus'] == 'odpt.OccupancyStatus:FewSeatsAvailable':
occupancy = '座席わすか'
color='yellow'
elif v['odpt:occupancyStatus'] == 'odpt.OccupancyStatus:StandingRoomOnly':
occupancy = '混雑'
color='red'
else:
color='gray'
bus = { 'bus_id': v['odpt:busNumber'],
'lat': v['geo:lat'],
'lng': v['geo:long'],
'route_num': v['odpt:busroute'][-3:],
'route_id': v['odpt:busroutePattern'],
'prevStop': v['odpt:fromBusstopPole'],
'nextStop': v['odpt:toBusstopPole'],
'occupancy' : occupancy,
'color' : color,
'azimuth' : v['odpt:azimuth'],
'img_url' : 'https://mxl00474.github.io/test_static/arrow_' + color + '.png'
}
bus_list.append(bus)
except Exception:
pass
df = pd.DataFrame(bus_list).set_index('bus_id')
df = pd.merge(df, BusInfo.bus_stops, left_on='prevStop', right_index=True, how='left')
df = pd.merge(df, BusInfo.bus_stops, left_on='nextStop', right_index=True, how='left')
df = pd.merge(df, BusInfo.bus_routes, left_on='route_id', right_index=True, how='left')
return df.fillna("-")
if __name__ == '__main__':
BusInfo.init()
print('=== Get stop info ===')
BusInfo.getBusStops()
print(BusInfo.bus_stops)
print('=== Get route info ===')
BusInfo.getBusRoutes()
#print(BusInfo.bus_routes)
print(len(BusInfo.bus_routes))
print('=== Get bus info ===')
bus_list = BusInfo.update()
print(bus_list)
print(bus_list.columns)
|
normal
|
{
"blob_id": "7eefcfdb9682cb09ce2d85d11aafc04977016ba4",
"index": 8332,
"step-1": "<mask token>\n\n\nclass BusInfo:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @staticmethod\n def init():\n apiKey = os.getenv('BUS_TOKEN')\n BusInfo.params['acl:consumerKey'] = apiKey\n BusInfo.getBusStops()\n BusInfo.getBusRoutes()\n\n @staticmethod\n def getBusRoutes():\n busroute_list = []\n req = request.Request('{}?{}'.format(BusInfo.url_routes, parse.\n urlencode(BusInfo.params)))\n with request.urlopen(req) as res:\n json_load = json.load(res)\n for v in json_load:\n try:\n busstop = {'route_id': v['owl:sameAs'], 'route_name': v\n ['dc:title']}\n busroute_list.append(busstop)\n except Exception:\n pass\n BusInfo.bus_routes = pd.DataFrame(busroute_list).set_index('route_id')\n\n @staticmethod\n def getBusStops():\n busstop_list = []\n req = request.Request('{}?{}'.format(BusInfo.url_busstop, parse.\n urlencode(BusInfo.params)))\n with request.urlopen(req) as res:\n json_load = json.load(res)\n for v in json_load:\n try:\n busstop = {'busstop_id': v['owl:sameAs'], 'pole_name':\n v['dc:title']}\n busstop_list.append(busstop)\n except Exception:\n pass\n BusInfo.bus_stops = pd.DataFrame(busstop_list).set_index('busstop_id')\n\n @staticmethod\n def update():\n bus_list = []\n req = request.Request('{}?{}'.format(BusInfo.url, parse.urlencode(\n BusInfo.params)))\n with request.urlopen(req) as res:\n json_load = json.load(res)\n for v in json_load:\n try:\n if v['odpt:occupancyStatus'\n ] == 'odpt.OccupancyStatus:Empty':\n occupancy = '空いている'\n color = 'blue'\n elif v['odpt:occupancyStatus'\n ] == 'odpt.OccupancyStatus:ManySeatsAvailable':\n occupancy = '空き座席多数'\n color = 'blue'\n elif v['odpt:occupancyStatus'\n ] == 'odpt.OccupancyStatus:FewSeatsAvailable':\n occupancy = '座席わすか'\n color = 'yellow'\n elif v['odpt:occupancyStatus'\n ] == 'odpt.OccupancyStatus:StandingRoomOnly':\n occupancy = '混雑'\n color = 'red'\n else:\n color = 'gray'\n bus = {'bus_id': v['odpt:busNumber'], 'lat': v[\n 'geo:lat'], 'lng': v['geo:long'], 'route_num': v[\n 'odpt:busroute'][-3:], 'route_id': v[\n 'odpt:busroutePattern'], 'prevStop': v[\n 'odpt:fromBusstopPole'], 'nextStop': v[\n 'odpt:toBusstopPole'], 'occupancy': occupancy,\n 'color': color, 'azimuth': v['odpt:azimuth'],\n 'img_url': \n 'https://mxl00474.github.io/test_static/arrow_' +\n color + '.png'}\n bus_list.append(bus)\n except Exception:\n pass\n df = pd.DataFrame(bus_list).set_index('bus_id')\n df = pd.merge(df, BusInfo.bus_stops, left_on='prevStop',\n right_index=True, how='left')\n df = pd.merge(df, BusInfo.bus_stops, left_on='nextStop',\n right_index=True, how='left')\n df = pd.merge(df, BusInfo.bus_routes, left_on='route_id',\n right_index=True, how='left')\n return df.fillna('-')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass BusInfo:\n url = 'https://api-tokyochallenge.odpt.org/api/v4/odpt:Bus'\n url_busstop = (\n 'https://api-tokyochallenge.odpt.org/api/v4/odpt:BusstopPole.json')\n url_routes = (\n 'https://api-tokyochallenge.odpt.org/api/v4/odpt:BusroutePattern.json')\n params = {'odpt:operator': 'odpt.Operator:YokohamaMunicipal'}\n bus_stops = None\n bus_routes = None\n\n @staticmethod\n def init():\n apiKey = os.getenv('BUS_TOKEN')\n BusInfo.params['acl:consumerKey'] = apiKey\n BusInfo.getBusStops()\n BusInfo.getBusRoutes()\n\n @staticmethod\n def getBusRoutes():\n busroute_list = []\n req = request.Request('{}?{}'.format(BusInfo.url_routes, parse.\n urlencode(BusInfo.params)))\n with request.urlopen(req) as res:\n json_load = json.load(res)\n for v in json_load:\n try:\n busstop = {'route_id': v['owl:sameAs'], 'route_name': v\n ['dc:title']}\n busroute_list.append(busstop)\n except Exception:\n pass\n BusInfo.bus_routes = pd.DataFrame(busroute_list).set_index('route_id')\n\n @staticmethod\n def getBusStops():\n busstop_list = []\n req = request.Request('{}?{}'.format(BusInfo.url_busstop, parse.\n urlencode(BusInfo.params)))\n with request.urlopen(req) as res:\n json_load = json.load(res)\n for v in json_load:\n try:\n busstop = {'busstop_id': v['owl:sameAs'], 'pole_name':\n v['dc:title']}\n busstop_list.append(busstop)\n except Exception:\n pass\n BusInfo.bus_stops = pd.DataFrame(busstop_list).set_index('busstop_id')\n\n @staticmethod\n def update():\n bus_list = []\n req = request.Request('{}?{}'.format(BusInfo.url, parse.urlencode(\n BusInfo.params)))\n with request.urlopen(req) as res:\n json_load = json.load(res)\n for v in json_load:\n try:\n if v['odpt:occupancyStatus'\n ] == 'odpt.OccupancyStatus:Empty':\n occupancy = '空いている'\n color = 'blue'\n elif v['odpt:occupancyStatus'\n ] == 'odpt.OccupancyStatus:ManySeatsAvailable':\n occupancy = '空き座席多数'\n color = 'blue'\n elif v['odpt:occupancyStatus'\n ] == 'odpt.OccupancyStatus:FewSeatsAvailable':\n occupancy = '座席わすか'\n color = 'yellow'\n elif v['odpt:occupancyStatus'\n ] == 'odpt.OccupancyStatus:StandingRoomOnly':\n occupancy = '混雑'\n color = 'red'\n else:\n color = 'gray'\n bus = {'bus_id': v['odpt:busNumber'], 'lat': v[\n 'geo:lat'], 'lng': v['geo:long'], 'route_num': v[\n 'odpt:busroute'][-3:], 'route_id': v[\n 'odpt:busroutePattern'], 'prevStop': v[\n 'odpt:fromBusstopPole'], 'nextStop': v[\n 'odpt:toBusstopPole'], 'occupancy': occupancy,\n 'color': color, 'azimuth': v['odpt:azimuth'],\n 'img_url': \n 'https://mxl00474.github.io/test_static/arrow_' +\n color + '.png'}\n bus_list.append(bus)\n except Exception:\n pass\n df = pd.DataFrame(bus_list).set_index('bus_id')\n df = pd.merge(df, BusInfo.bus_stops, left_on='prevStop',\n right_index=True, how='left')\n df = pd.merge(df, BusInfo.bus_stops, left_on='nextStop',\n right_index=True, how='left')\n df = pd.merge(df, BusInfo.bus_routes, left_on='route_id',\n right_index=True, how='left')\n return df.fillna('-')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass BusInfo:\n url = 'https://api-tokyochallenge.odpt.org/api/v4/odpt:Bus'\n url_busstop = (\n 'https://api-tokyochallenge.odpt.org/api/v4/odpt:BusstopPole.json')\n url_routes = (\n 'https://api-tokyochallenge.odpt.org/api/v4/odpt:BusroutePattern.json')\n params = {'odpt:operator': 'odpt.Operator:YokohamaMunicipal'}\n bus_stops = None\n bus_routes = None\n\n @staticmethod\n def init():\n apiKey = os.getenv('BUS_TOKEN')\n BusInfo.params['acl:consumerKey'] = apiKey\n BusInfo.getBusStops()\n BusInfo.getBusRoutes()\n\n @staticmethod\n def getBusRoutes():\n busroute_list = []\n req = request.Request('{}?{}'.format(BusInfo.url_routes, parse.\n urlencode(BusInfo.params)))\n with request.urlopen(req) as res:\n json_load = json.load(res)\n for v in json_load:\n try:\n busstop = {'route_id': v['owl:sameAs'], 'route_name': v\n ['dc:title']}\n busroute_list.append(busstop)\n except Exception:\n pass\n BusInfo.bus_routes = pd.DataFrame(busroute_list).set_index('route_id')\n\n @staticmethod\n def getBusStops():\n busstop_list = []\n req = request.Request('{}?{}'.format(BusInfo.url_busstop, parse.\n urlencode(BusInfo.params)))\n with request.urlopen(req) as res:\n json_load = json.load(res)\n for v in json_load:\n try:\n busstop = {'busstop_id': v['owl:sameAs'], 'pole_name':\n v['dc:title']}\n busstop_list.append(busstop)\n except Exception:\n pass\n BusInfo.bus_stops = pd.DataFrame(busstop_list).set_index('busstop_id')\n\n @staticmethod\n def update():\n bus_list = []\n req = request.Request('{}?{}'.format(BusInfo.url, parse.urlencode(\n BusInfo.params)))\n with request.urlopen(req) as res:\n json_load = json.load(res)\n for v in json_load:\n try:\n if v['odpt:occupancyStatus'\n ] == 'odpt.OccupancyStatus:Empty':\n occupancy = '空いている'\n color = 'blue'\n elif v['odpt:occupancyStatus'\n ] == 'odpt.OccupancyStatus:ManySeatsAvailable':\n occupancy = '空き座席多数'\n color = 'blue'\n elif v['odpt:occupancyStatus'\n ] == 'odpt.OccupancyStatus:FewSeatsAvailable':\n occupancy = '座席わすか'\n color = 'yellow'\n elif v['odpt:occupancyStatus'\n ] == 'odpt.OccupancyStatus:StandingRoomOnly':\n occupancy = '混雑'\n color = 'red'\n else:\n color = 'gray'\n bus = {'bus_id': v['odpt:busNumber'], 'lat': v[\n 'geo:lat'], 'lng': v['geo:long'], 'route_num': v[\n 'odpt:busroute'][-3:], 'route_id': v[\n 'odpt:busroutePattern'], 'prevStop': v[\n 'odpt:fromBusstopPole'], 'nextStop': v[\n 'odpt:toBusstopPole'], 'occupancy': occupancy,\n 'color': color, 'azimuth': v['odpt:azimuth'],\n 'img_url': \n 'https://mxl00474.github.io/test_static/arrow_' +\n color + '.png'}\n bus_list.append(bus)\n except Exception:\n pass\n df = pd.DataFrame(bus_list).set_index('bus_id')\n df = pd.merge(df, BusInfo.bus_stops, left_on='prevStop',\n right_index=True, how='left')\n df = pd.merge(df, BusInfo.bus_stops, left_on='nextStop',\n right_index=True, how='left')\n df = pd.merge(df, BusInfo.bus_routes, left_on='route_id',\n right_index=True, how='left')\n return df.fillna('-')\n\n\nif __name__ == '__main__':\n BusInfo.init()\n print('=== Get stop info ===')\n BusInfo.getBusStops()\n print(BusInfo.bus_stops)\n print('=== Get route info ===')\n BusInfo.getBusRoutes()\n print(len(BusInfo.bus_routes))\n print('=== Get bus info ===')\n bus_list = BusInfo.update()\n print(bus_list)\n print(bus_list.columns)\n",
"step-4": "from urllib import request, parse\nimport pandas as pd\nimport json\nimport os\n\n\nclass BusInfo:\n url = 'https://api-tokyochallenge.odpt.org/api/v4/odpt:Bus'\n url_busstop = (\n 'https://api-tokyochallenge.odpt.org/api/v4/odpt:BusstopPole.json')\n url_routes = (\n 'https://api-tokyochallenge.odpt.org/api/v4/odpt:BusroutePattern.json')\n params = {'odpt:operator': 'odpt.Operator:YokohamaMunicipal'}\n bus_stops = None\n bus_routes = None\n\n @staticmethod\n def init():\n apiKey = os.getenv('BUS_TOKEN')\n BusInfo.params['acl:consumerKey'] = apiKey\n BusInfo.getBusStops()\n BusInfo.getBusRoutes()\n\n @staticmethod\n def getBusRoutes():\n busroute_list = []\n req = request.Request('{}?{}'.format(BusInfo.url_routes, parse.\n urlencode(BusInfo.params)))\n with request.urlopen(req) as res:\n json_load = json.load(res)\n for v in json_load:\n try:\n busstop = {'route_id': v['owl:sameAs'], 'route_name': v\n ['dc:title']}\n busroute_list.append(busstop)\n except Exception:\n pass\n BusInfo.bus_routes = pd.DataFrame(busroute_list).set_index('route_id')\n\n @staticmethod\n def getBusStops():\n busstop_list = []\n req = request.Request('{}?{}'.format(BusInfo.url_busstop, parse.\n urlencode(BusInfo.params)))\n with request.urlopen(req) as res:\n json_load = json.load(res)\n for v in json_load:\n try:\n busstop = {'busstop_id': v['owl:sameAs'], 'pole_name':\n v['dc:title']}\n busstop_list.append(busstop)\n except Exception:\n pass\n BusInfo.bus_stops = pd.DataFrame(busstop_list).set_index('busstop_id')\n\n @staticmethod\n def update():\n bus_list = []\n req = request.Request('{}?{}'.format(BusInfo.url, parse.urlencode(\n BusInfo.params)))\n with request.urlopen(req) as res:\n json_load = json.load(res)\n for v in json_load:\n try:\n if v['odpt:occupancyStatus'\n ] == 'odpt.OccupancyStatus:Empty':\n occupancy = '空いている'\n color = 'blue'\n elif v['odpt:occupancyStatus'\n ] == 'odpt.OccupancyStatus:ManySeatsAvailable':\n occupancy = '空き座席多数'\n color = 'blue'\n elif v['odpt:occupancyStatus'\n ] == 'odpt.OccupancyStatus:FewSeatsAvailable':\n occupancy = '座席わすか'\n color = 'yellow'\n elif v['odpt:occupancyStatus'\n ] == 'odpt.OccupancyStatus:StandingRoomOnly':\n occupancy = '混雑'\n color = 'red'\n else:\n color = 'gray'\n bus = {'bus_id': v['odpt:busNumber'], 'lat': v[\n 'geo:lat'], 'lng': v['geo:long'], 'route_num': v[\n 'odpt:busroute'][-3:], 'route_id': v[\n 'odpt:busroutePattern'], 'prevStop': v[\n 'odpt:fromBusstopPole'], 'nextStop': v[\n 'odpt:toBusstopPole'], 'occupancy': occupancy,\n 'color': color, 'azimuth': v['odpt:azimuth'],\n 'img_url': \n 'https://mxl00474.github.io/test_static/arrow_' +\n color + '.png'}\n bus_list.append(bus)\n except Exception:\n pass\n df = pd.DataFrame(bus_list).set_index('bus_id')\n df = pd.merge(df, BusInfo.bus_stops, left_on='prevStop',\n right_index=True, how='left')\n df = pd.merge(df, BusInfo.bus_stops, left_on='nextStop',\n right_index=True, how='left')\n df = pd.merge(df, BusInfo.bus_routes, left_on='route_id',\n right_index=True, how='left')\n return df.fillna('-')\n\n\nif __name__ == '__main__':\n BusInfo.init()\n print('=== Get stop info ===')\n BusInfo.getBusStops()\n print(BusInfo.bus_stops)\n print('=== Get route info ===')\n BusInfo.getBusRoutes()\n print(len(BusInfo.bus_routes))\n print('=== Get bus info ===')\n bus_list = BusInfo.update()\n print(bus_list)\n print(bus_list.columns)\n",
"step-5": "from urllib import request, parse\nimport pandas as pd\nimport json\nimport os\n\nclass BusInfo:\n\n url = 'https://api-tokyochallenge.odpt.org/api/v4/odpt:Bus'\n url_busstop = 'https://api-tokyochallenge.odpt.org/api/v4/odpt:BusstopPole.json'\n url_routes = 'https://api-tokyochallenge.odpt.org/api/v4/odpt:BusroutePattern.json'\n \n params = {\n 'odpt:operator': 'odpt.Operator:YokohamaMunicipal',\n }\n\n bus_stops = None\n bus_routes = None\n\n @staticmethod\n def init():\n \n apiKey = os.getenv('BUS_TOKEN')\n BusInfo.params['acl:consumerKey'] = apiKey\n\n BusInfo.getBusStops()\n BusInfo.getBusRoutes()\n\n @staticmethod\n def getBusRoutes():\n \n #BusInfo.bus_routes = pd.DataFrame()\n #return\n\n busroute_list=[]\n req = request.Request('{}?{}'.format(BusInfo.url_routes, parse.urlencode(BusInfo.params)))\n with request.urlopen(req) as res:\n json_load = json.load(res) \n for v in json_load:\n try:\n busstop = { 'route_id': v['owl:sameAs'],\n 'route_name': v['dc:title'],\n }\n busroute_list.append(busstop)\n except Exception:\n pass\n BusInfo.bus_routes = pd.DataFrame(busroute_list).set_index('route_id')\n\n @staticmethod\n def getBusStops():\n\n #BusInfo.bus_stops = pd.DataFrame()\n #return\n\n busstop_list=[]\n req = request.Request('{}?{}'.format(BusInfo.url_busstop, parse.urlencode(BusInfo.params)))\n with request.urlopen(req) as res:\n json_load = json.load(res) \n for v in json_load:\n try:\n busstop = { 'busstop_id': v['owl:sameAs'],\n 'pole_name': v['dc:title'],\n }\n busstop_list.append(busstop)\n except Exception:\n pass\n BusInfo.bus_stops = pd.DataFrame(busstop_list).set_index('busstop_id')\n\n @staticmethod\n def update():\n bus_list=[]\n req = request.Request('{}?{}'.format(BusInfo.url, parse.urlencode(BusInfo.params)))\n with request.urlopen(req) as res:\n json_load = json.load(res) \n for v in json_load:\n try:\n\n if v['odpt:occupancyStatus'] == 'odpt.OccupancyStatus:Empty':\n occupancy = '空いている'\n color='blue'\n elif v['odpt:occupancyStatus'] == 'odpt.OccupancyStatus:ManySeatsAvailable':\n occupancy = '空き座席多数'\n color='blue'\n elif v['odpt:occupancyStatus'] == 'odpt.OccupancyStatus:FewSeatsAvailable':\n occupancy = '座席わすか'\n color='yellow'\n elif v['odpt:occupancyStatus'] == 'odpt.OccupancyStatus:StandingRoomOnly':\n occupancy = '混雑'\n color='red'\n else:\n color='gray'\n\n bus = { 'bus_id': v['odpt:busNumber'],\n 'lat': v['geo:lat'],\n 'lng': v['geo:long'],\n 'route_num': v['odpt:busroute'][-3:],\n 'route_id': v['odpt:busroutePattern'],\n 'prevStop': v['odpt:fromBusstopPole'],\n 'nextStop': v['odpt:toBusstopPole'],\n 'occupancy' : occupancy,\n 'color' : color,\n 'azimuth' : v['odpt:azimuth'],\n 'img_url' : 'https://mxl00474.github.io/test_static/arrow_' + color + '.png'\n }\n bus_list.append(bus)\n except Exception:\n pass\n df = pd.DataFrame(bus_list).set_index('bus_id')\n df = pd.merge(df, BusInfo.bus_stops, left_on='prevStop', right_index=True, how='left')\n df = pd.merge(df, BusInfo.bus_stops, left_on='nextStop', right_index=True, how='left')\n df = pd.merge(df, BusInfo.bus_routes, left_on='route_id', right_index=True, how='left')\n return df.fillna(\"-\")\n\nif __name__ == '__main__':\n\n BusInfo.init()\n\n print('=== Get stop info ===')\n BusInfo.getBusStops()\n print(BusInfo.bus_stops)\n\n print('=== Get route info ===')\n BusInfo.getBusRoutes()\n #print(BusInfo.bus_routes)\n print(len(BusInfo.bus_routes))\n\n print('=== Get bus info ===')\n bus_list = BusInfo.update()\n print(bus_list)\n print(bus_list.columns)\n\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
"""
Configuration management.
Environment must be set before use.
Call .get() to obtain configuration variable. If the variable does not exist
in the set environment, then
"""
CONFIG_KEY = "config_class"
ENV = {}
class EMPTY:
"""
Signifies that a default value was not set. Should trigger an error if
default is set to EMPTY and an attribute does not exist.
"""
pass
class Config:
"""
Configuration management entity.
Args:
name (str): Name of config environment.
fallback (bool): Indicate if configuration should fallback to base.
"""
no_config_err = "No such config variable {}"
def __init__(self, name, fallback):
from importlib import import_module
from os import listdir
from os.path import dirname
self.config_path = dirname(__file__)
self.name = name
self.fallback = fallback
# List of config modules available
self.config_modules = set([
i.strip(".py")
for i in listdir(self.config_path)
if ".py" in i and i != "__init__.py"
])
if name not in self.config_modules:
err = "Config environment {} does not exist".format(name)
raise AttributeError(err)
if self.fallback:
# Fallback configuration module.
self.base = import_module("illume.config.base")
# Desired configuration module.
self.module = import_module("illume.config.{}".format(self.name))
def get(self, name, default):
"""Get config value"""
value = getattr(self.module, name, default)
if value != EMPTY:
return value
elif value == EMPTY and not self.fallback:
raise AttributeError(self.no_config_err.format(name))
elif value == EMPTY and self.fallback:
value = getattr(self.base, name, default)
if value == EMPTY:
raise AttributeError(self.no_config_err.format(name))
return value
def setenv(name, fallback=True):
"""Set configuration environment."""
if CONFIG_KEY in ENV:
raise AttributeError("Config environment already set.")
config_class = Config(name, fallback)
ENV[CONFIG_KEY] = config_class
def get(name, default=EMPTY):
"""Get configuration variable."""
config_class = ENV.get(CONFIG_KEY, None)
if config_class is None:
raise AttributeError("Config environment not set.")
return config_class.get(name, default)
|
normal
|
{
"blob_id": "263d2fe43cf8747f20fd51897ba003c9c4cb4280",
"index": 9907,
"step-1": "<mask token>\n\n\nclass EMPTY:\n <mask token>\n pass\n\n\nclass Config:\n \"\"\"\n Configuration management entity.\n\n Args:\n name (str): Name of config environment.\n fallback (bool): Indicate if configuration should fallback to base.\n \"\"\"\n no_config_err = 'No such config variable {}'\n\n def __init__(self, name, fallback):\n from importlib import import_module\n from os import listdir\n from os.path import dirname\n self.config_path = dirname(__file__)\n self.name = name\n self.fallback = fallback\n self.config_modules = set([i.strip('.py') for i in listdir(self.\n config_path) if '.py' in i and i != '__init__.py'])\n if name not in self.config_modules:\n err = 'Config environment {} does not exist'.format(name)\n raise AttributeError(err)\n if self.fallback:\n self.base = import_module('illume.config.base')\n self.module = import_module('illume.config.{}'.format(self.name))\n\n def get(self, name, default):\n \"\"\"Get config value\"\"\"\n value = getattr(self.module, name, default)\n if value != EMPTY:\n return value\n elif value == EMPTY and not self.fallback:\n raise AttributeError(self.no_config_err.format(name))\n elif value == EMPTY and self.fallback:\n value = getattr(self.base, name, default)\n if value == EMPTY:\n raise AttributeError(self.no_config_err.format(name))\n return value\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass EMPTY:\n \"\"\"\n Signifies that a default value was not set. Should trigger an error if\n default is set to EMPTY and an attribute does not exist.\n \"\"\"\n pass\n\n\nclass Config:\n \"\"\"\n Configuration management entity.\n\n Args:\n name (str): Name of config environment.\n fallback (bool): Indicate if configuration should fallback to base.\n \"\"\"\n no_config_err = 'No such config variable {}'\n\n def __init__(self, name, fallback):\n from importlib import import_module\n from os import listdir\n from os.path import dirname\n self.config_path = dirname(__file__)\n self.name = name\n self.fallback = fallback\n self.config_modules = set([i.strip('.py') for i in listdir(self.\n config_path) if '.py' in i and i != '__init__.py'])\n if name not in self.config_modules:\n err = 'Config environment {} does not exist'.format(name)\n raise AttributeError(err)\n if self.fallback:\n self.base = import_module('illume.config.base')\n self.module = import_module('illume.config.{}'.format(self.name))\n\n def get(self, name, default):\n \"\"\"Get config value\"\"\"\n value = getattr(self.module, name, default)\n if value != EMPTY:\n return value\n elif value == EMPTY and not self.fallback:\n raise AttributeError(self.no_config_err.format(name))\n elif value == EMPTY and self.fallback:\n value = getattr(self.base, name, default)\n if value == EMPTY:\n raise AttributeError(self.no_config_err.format(name))\n return value\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass EMPTY:\n \"\"\"\n Signifies that a default value was not set. Should trigger an error if\n default is set to EMPTY and an attribute does not exist.\n \"\"\"\n pass\n\n\nclass Config:\n \"\"\"\n Configuration management entity.\n\n Args:\n name (str): Name of config environment.\n fallback (bool): Indicate if configuration should fallback to base.\n \"\"\"\n no_config_err = 'No such config variable {}'\n\n def __init__(self, name, fallback):\n from importlib import import_module\n from os import listdir\n from os.path import dirname\n self.config_path = dirname(__file__)\n self.name = name\n self.fallback = fallback\n self.config_modules = set([i.strip('.py') for i in listdir(self.\n config_path) if '.py' in i and i != '__init__.py'])\n if name not in self.config_modules:\n err = 'Config environment {} does not exist'.format(name)\n raise AttributeError(err)\n if self.fallback:\n self.base = import_module('illume.config.base')\n self.module = import_module('illume.config.{}'.format(self.name))\n\n def get(self, name, default):\n \"\"\"Get config value\"\"\"\n value = getattr(self.module, name, default)\n if value != EMPTY:\n return value\n elif value == EMPTY and not self.fallback:\n raise AttributeError(self.no_config_err.format(name))\n elif value == EMPTY and self.fallback:\n value = getattr(self.base, name, default)\n if value == EMPTY:\n raise AttributeError(self.no_config_err.format(name))\n return value\n\n\n<mask token>\n\n\ndef get(name, default=EMPTY):\n \"\"\"Get configuration variable.\"\"\"\n config_class = ENV.get(CONFIG_KEY, None)\n if config_class is None:\n raise AttributeError('Config environment not set.')\n return config_class.get(name, default)\n",
"step-4": "<mask token>\n\n\nclass EMPTY:\n \"\"\"\n Signifies that a default value was not set. Should trigger an error if\n default is set to EMPTY and an attribute does not exist.\n \"\"\"\n pass\n\n\nclass Config:\n \"\"\"\n Configuration management entity.\n\n Args:\n name (str): Name of config environment.\n fallback (bool): Indicate if configuration should fallback to base.\n \"\"\"\n no_config_err = 'No such config variable {}'\n\n def __init__(self, name, fallback):\n from importlib import import_module\n from os import listdir\n from os.path import dirname\n self.config_path = dirname(__file__)\n self.name = name\n self.fallback = fallback\n self.config_modules = set([i.strip('.py') for i in listdir(self.\n config_path) if '.py' in i and i != '__init__.py'])\n if name not in self.config_modules:\n err = 'Config environment {} does not exist'.format(name)\n raise AttributeError(err)\n if self.fallback:\n self.base = import_module('illume.config.base')\n self.module = import_module('illume.config.{}'.format(self.name))\n\n def get(self, name, default):\n \"\"\"Get config value\"\"\"\n value = getattr(self.module, name, default)\n if value != EMPTY:\n return value\n elif value == EMPTY and not self.fallback:\n raise AttributeError(self.no_config_err.format(name))\n elif value == EMPTY and self.fallback:\n value = getattr(self.base, name, default)\n if value == EMPTY:\n raise AttributeError(self.no_config_err.format(name))\n return value\n\n\ndef setenv(name, fallback=True):\n \"\"\"Set configuration environment.\"\"\"\n if CONFIG_KEY in ENV:\n raise AttributeError('Config environment already set.')\n config_class = Config(name, fallback)\n ENV[CONFIG_KEY] = config_class\n\n\ndef get(name, default=EMPTY):\n \"\"\"Get configuration variable.\"\"\"\n config_class = ENV.get(CONFIG_KEY, None)\n if config_class is None:\n raise AttributeError('Config environment not set.')\n return config_class.get(name, default)\n",
"step-5": "\"\"\"\nConfiguration management.\n\nEnvironment must be set before use.\n\nCall .get() to obtain configuration variable. If the variable does not exist\nin the set environment, then\n\"\"\"\n\n\nCONFIG_KEY = \"config_class\"\nENV = {}\n\n\nclass EMPTY:\n\n \"\"\"\n Signifies that a default value was not set. Should trigger an error if\n default is set to EMPTY and an attribute does not exist.\n \"\"\"\n\n pass\n\n\nclass Config:\n\n \"\"\"\n Configuration management entity.\n\n Args:\n name (str): Name of config environment.\n fallback (bool): Indicate if configuration should fallback to base.\n \"\"\"\n\n no_config_err = \"No such config variable {}\"\n\n def __init__(self, name, fallback):\n from importlib import import_module\n from os import listdir\n from os.path import dirname\n\n self.config_path = dirname(__file__)\n self.name = name\n self.fallback = fallback\n\n # List of config modules available\n self.config_modules = set([\n i.strip(\".py\")\n for i in listdir(self.config_path)\n if \".py\" in i and i != \"__init__.py\"\n ])\n\n if name not in self.config_modules:\n err = \"Config environment {} does not exist\".format(name)\n\n raise AttributeError(err)\n\n if self.fallback:\n # Fallback configuration module.\n self.base = import_module(\"illume.config.base\")\n\n # Desired configuration module.\n self.module = import_module(\"illume.config.{}\".format(self.name))\n\n def get(self, name, default):\n \"\"\"Get config value\"\"\"\n value = getattr(self.module, name, default)\n\n if value != EMPTY:\n return value\n elif value == EMPTY and not self.fallback:\n raise AttributeError(self.no_config_err.format(name))\n elif value == EMPTY and self.fallback:\n value = getattr(self.base, name, default)\n\n if value == EMPTY:\n raise AttributeError(self.no_config_err.format(name))\n\n return value\n\n\ndef setenv(name, fallback=True):\n \"\"\"Set configuration environment.\"\"\"\n if CONFIG_KEY in ENV:\n raise AttributeError(\"Config environment already set.\")\n\n config_class = Config(name, fallback)\n\n ENV[CONFIG_KEY] = config_class\n\n\ndef get(name, default=EMPTY):\n \"\"\"Get configuration variable.\"\"\"\n config_class = ENV.get(CONFIG_KEY, None)\n\n if config_class is None:\n raise AttributeError(\"Config environment not set.\")\n\n return config_class.get(name, default)\n",
"step-ids": [
6,
7,
8,
9,
11
]
}
|
[
6,
7,
8,
9,
11
] |
from time import sleep
import pytest
import allure
from app.debug_api import DebugAPI
from app.check_api import HandlersAPI
from locators.movies_details_locators import MoviesDetailsPageLocators
from locators.movies_locators import MoviesPageLocators
from locators.shedule_locators import ShedulePageLocators
from screens.MoviesPage import MoviesPage
from screens.MoviesDetailsPage import MoviesDetailsPage
from screens.ShedulePage import ShedulePage
from utils.internet import enable_proxy
@pytest.mark.usefixtures('driver')
class Test_001_ShedulePage:
@classmethod
def setup_class(cls):
cls.movies_locators = MoviesPageLocators()
cls.shedule_locators = ShedulePageLocators()
cls.event_detail_page_locators = MoviesDetailsPageLocators()
@staticmethod
def teardown_class(cls):
enable_proxy(mode=False)
def test_001_elements_exists(self, driver):
"""тапнуть на фичерс,
тапнуть на смотреть расписание,
найти кнопку отмены, кнопку карты, поле поиска"""
with allure.step('MoviesPage'):
self.movie_page = MoviesPage(driver)
self.movie_page.set_custom_wait(10)
self.movie_page.act.click_by_coords(50, 30)
with allure.step('EventDetailsPage'):
self.event_detail_page = MoviesDetailsPage(driver)
self.event_detail_page.set_custom_wait(10)
self.event_detail_page.click(*self.event_detail_page_locators.
btn_view_timetable)
with allure.step('ShedulePage'):
self.shedule_page = ShedulePage(driver)
self.shedule_page.set_custom_wait(10)
self.shedule_page.find_element(*self.shedule_locators.btn_back)
self.shedule_page.find_element(*self.shedule_locators.btn_map)
self.shedule_page.find_element(*self.shedule_locators.search_field)
def test_002_valid_filters(self, driver):
"""тапнуть на фичерс,
тапнуть на смотреть расписание,
проверить соответствие фильтров и ответа сервера
проверить порядок фильтров"""
dbg_api = DebugAPI.run(request=False, mapi_handler=HandlersAPI.
url_creations_movie_schedule_filter)
try:
with allure.step('MoviesPage'):
self.movie_page = MoviesPage(driver)
self.movie_page.set_custom_wait(10)
sleep(5)
self.movie_page.act.click_by_coords(50, 30)
with allure.step('EventDetailsPage'):
self.event_detail_page = MoviesDetailsPage(driver)
self.event_detail_page.set_custom_wait(10)
sleep(5)
self.event_detail_page.click(*self.
event_detail_page_locators.btn_view_timetable)
with allure.step('ShedulePage'):
self.shedule_page = ShedulePage(driver)
self.shedule_page.set_custom_wait(10)
sleep(5)
self.shedule_page.check_rows_filters(dbg_api)
finally:
dbg_api.kill()
def test_003_check_time_ticket_filter(self, driver):
"""тапнуть на фичерс,
тапнуть на смотреть расписание,
проверять соответствие времени на билетах с выставленными фильтрами"""
dbg_api = DebugAPI.run(request=False, mapi_handler=HandlersAPI.
url_creations_movie_schedule_filter)
try:
with allure.step('MoviesPage'):
self.movie_page = MoviesPage(driver)
self.movie_page.set_custom_wait(10)
sleep(10)
self.movie_page.act.click_by_coords(50, 30)
with allure.step('EventDetailsPage'):
self.event_detail_page = MoviesDetailsPage(driver)
self.event_detail_page.set_custom_wait(10)
self.event_detail_page.click(*self.
event_detail_page_locators.btn_view_timetable)
with allure.step('ShedulePage'):
self.shedule_page = ShedulePage(driver)
self.shedule_page.set_custom_wait(10)
sleep(2)
self.shedule_page.compare_tickets_datetime_options_second_filter(
dbg_api)
finally:
dbg_api.kill()
|
normal
|
{
"blob_id": "c7c412fe4e2d53af1b4f2a55bd3453496767890d",
"index": 975,
"step-1": "<mask token>\n\n\n@pytest.mark.usefixtures('driver')\nclass Test_001_ShedulePage:\n <mask token>\n <mask token>\n\n def test_001_elements_exists(self, driver):\n \"\"\"тапнуть на фичерс,\n тапнуть на смотреть расписание,\n найти кнопку отмены, кнопку карты, поле поиска\"\"\"\n with allure.step('MoviesPage'):\n self.movie_page = MoviesPage(driver)\n self.movie_page.set_custom_wait(10)\n self.movie_page.act.click_by_coords(50, 30)\n with allure.step('EventDetailsPage'):\n self.event_detail_page = MoviesDetailsPage(driver)\n self.event_detail_page.set_custom_wait(10)\n self.event_detail_page.click(*self.event_detail_page_locators.\n btn_view_timetable)\n with allure.step('ShedulePage'):\n self.shedule_page = ShedulePage(driver)\n self.shedule_page.set_custom_wait(10)\n self.shedule_page.find_element(*self.shedule_locators.btn_back)\n self.shedule_page.find_element(*self.shedule_locators.btn_map)\n self.shedule_page.find_element(*self.shedule_locators.search_field)\n\n def test_002_valid_filters(self, driver):\n \"\"\"тапнуть на фичерс,\n тапнуть на смотреть расписание,\n проверить соответствие фильтров и ответа сервера\n проверить порядок фильтров\"\"\"\n dbg_api = DebugAPI.run(request=False, mapi_handler=HandlersAPI.\n url_creations_movie_schedule_filter)\n try:\n with allure.step('MoviesPage'):\n self.movie_page = MoviesPage(driver)\n self.movie_page.set_custom_wait(10)\n sleep(5)\n self.movie_page.act.click_by_coords(50, 30)\n with allure.step('EventDetailsPage'):\n self.event_detail_page = MoviesDetailsPage(driver)\n self.event_detail_page.set_custom_wait(10)\n sleep(5)\n self.event_detail_page.click(*self.\n event_detail_page_locators.btn_view_timetable)\n with allure.step('ShedulePage'):\n self.shedule_page = ShedulePage(driver)\n self.shedule_page.set_custom_wait(10)\n sleep(5)\n self.shedule_page.check_rows_filters(dbg_api)\n finally:\n dbg_api.kill()\n\n def test_003_check_time_ticket_filter(self, driver):\n \"\"\"тапнуть на фичерс,\n тапнуть на смотреть расписание,\n проверять соответствие времени на билетах с выставленными фильтрами\"\"\"\n dbg_api = DebugAPI.run(request=False, mapi_handler=HandlersAPI.\n url_creations_movie_schedule_filter)\n try:\n with allure.step('MoviesPage'):\n self.movie_page = MoviesPage(driver)\n self.movie_page.set_custom_wait(10)\n sleep(10)\n self.movie_page.act.click_by_coords(50, 30)\n with allure.step('EventDetailsPage'):\n self.event_detail_page = MoviesDetailsPage(driver)\n self.event_detail_page.set_custom_wait(10)\n self.event_detail_page.click(*self.\n event_detail_page_locators.btn_view_timetable)\n with allure.step('ShedulePage'):\n self.shedule_page = ShedulePage(driver)\n self.shedule_page.set_custom_wait(10)\n sleep(2)\n self.shedule_page.compare_tickets_datetime_options_second_filter(\n dbg_api)\n finally:\n dbg_api.kill()\n",
"step-2": "<mask token>\n\n\n@pytest.mark.usefixtures('driver')\nclass Test_001_ShedulePage:\n <mask token>\n\n @staticmethod\n def teardown_class(cls):\n enable_proxy(mode=False)\n\n def test_001_elements_exists(self, driver):\n \"\"\"тапнуть на фичерс,\n тапнуть на смотреть расписание,\n найти кнопку отмены, кнопку карты, поле поиска\"\"\"\n with allure.step('MoviesPage'):\n self.movie_page = MoviesPage(driver)\n self.movie_page.set_custom_wait(10)\n self.movie_page.act.click_by_coords(50, 30)\n with allure.step('EventDetailsPage'):\n self.event_detail_page = MoviesDetailsPage(driver)\n self.event_detail_page.set_custom_wait(10)\n self.event_detail_page.click(*self.event_detail_page_locators.\n btn_view_timetable)\n with allure.step('ShedulePage'):\n self.shedule_page = ShedulePage(driver)\n self.shedule_page.set_custom_wait(10)\n self.shedule_page.find_element(*self.shedule_locators.btn_back)\n self.shedule_page.find_element(*self.shedule_locators.btn_map)\n self.shedule_page.find_element(*self.shedule_locators.search_field)\n\n def test_002_valid_filters(self, driver):\n \"\"\"тапнуть на фичерс,\n тапнуть на смотреть расписание,\n проверить соответствие фильтров и ответа сервера\n проверить порядок фильтров\"\"\"\n dbg_api = DebugAPI.run(request=False, mapi_handler=HandlersAPI.\n url_creations_movie_schedule_filter)\n try:\n with allure.step('MoviesPage'):\n self.movie_page = MoviesPage(driver)\n self.movie_page.set_custom_wait(10)\n sleep(5)\n self.movie_page.act.click_by_coords(50, 30)\n with allure.step('EventDetailsPage'):\n self.event_detail_page = MoviesDetailsPage(driver)\n self.event_detail_page.set_custom_wait(10)\n sleep(5)\n self.event_detail_page.click(*self.\n event_detail_page_locators.btn_view_timetable)\n with allure.step('ShedulePage'):\n self.shedule_page = ShedulePage(driver)\n self.shedule_page.set_custom_wait(10)\n sleep(5)\n self.shedule_page.check_rows_filters(dbg_api)\n finally:\n dbg_api.kill()\n\n def test_003_check_time_ticket_filter(self, driver):\n \"\"\"тапнуть на фичерс,\n тапнуть на смотреть расписание,\n проверять соответствие времени на билетах с выставленными фильтрами\"\"\"\n dbg_api = DebugAPI.run(request=False, mapi_handler=HandlersAPI.\n url_creations_movie_schedule_filter)\n try:\n with allure.step('MoviesPage'):\n self.movie_page = MoviesPage(driver)\n self.movie_page.set_custom_wait(10)\n sleep(10)\n self.movie_page.act.click_by_coords(50, 30)\n with allure.step('EventDetailsPage'):\n self.event_detail_page = MoviesDetailsPage(driver)\n self.event_detail_page.set_custom_wait(10)\n self.event_detail_page.click(*self.\n event_detail_page_locators.btn_view_timetable)\n with allure.step('ShedulePage'):\n self.shedule_page = ShedulePage(driver)\n self.shedule_page.set_custom_wait(10)\n sleep(2)\n self.shedule_page.compare_tickets_datetime_options_second_filter(\n dbg_api)\n finally:\n dbg_api.kill()\n",
"step-3": "<mask token>\n\n\n@pytest.mark.usefixtures('driver')\nclass Test_001_ShedulePage:\n\n @classmethod\n def setup_class(cls):\n cls.movies_locators = MoviesPageLocators()\n cls.shedule_locators = ShedulePageLocators()\n cls.event_detail_page_locators = MoviesDetailsPageLocators()\n\n @staticmethod\n def teardown_class(cls):\n enable_proxy(mode=False)\n\n def test_001_elements_exists(self, driver):\n \"\"\"тапнуть на фичерс,\n тапнуть на смотреть расписание,\n найти кнопку отмены, кнопку карты, поле поиска\"\"\"\n with allure.step('MoviesPage'):\n self.movie_page = MoviesPage(driver)\n self.movie_page.set_custom_wait(10)\n self.movie_page.act.click_by_coords(50, 30)\n with allure.step('EventDetailsPage'):\n self.event_detail_page = MoviesDetailsPage(driver)\n self.event_detail_page.set_custom_wait(10)\n self.event_detail_page.click(*self.event_detail_page_locators.\n btn_view_timetable)\n with allure.step('ShedulePage'):\n self.shedule_page = ShedulePage(driver)\n self.shedule_page.set_custom_wait(10)\n self.shedule_page.find_element(*self.shedule_locators.btn_back)\n self.shedule_page.find_element(*self.shedule_locators.btn_map)\n self.shedule_page.find_element(*self.shedule_locators.search_field)\n\n def test_002_valid_filters(self, driver):\n \"\"\"тапнуть на фичерс,\n тапнуть на смотреть расписание,\n проверить соответствие фильтров и ответа сервера\n проверить порядок фильтров\"\"\"\n dbg_api = DebugAPI.run(request=False, mapi_handler=HandlersAPI.\n url_creations_movie_schedule_filter)\n try:\n with allure.step('MoviesPage'):\n self.movie_page = MoviesPage(driver)\n self.movie_page.set_custom_wait(10)\n sleep(5)\n self.movie_page.act.click_by_coords(50, 30)\n with allure.step('EventDetailsPage'):\n self.event_detail_page = MoviesDetailsPage(driver)\n self.event_detail_page.set_custom_wait(10)\n sleep(5)\n self.event_detail_page.click(*self.\n event_detail_page_locators.btn_view_timetable)\n with allure.step('ShedulePage'):\n self.shedule_page = ShedulePage(driver)\n self.shedule_page.set_custom_wait(10)\n sleep(5)\n self.shedule_page.check_rows_filters(dbg_api)\n finally:\n dbg_api.kill()\n\n def test_003_check_time_ticket_filter(self, driver):\n \"\"\"тапнуть на фичерс,\n тапнуть на смотреть расписание,\n проверять соответствие времени на билетах с выставленными фильтрами\"\"\"\n dbg_api = DebugAPI.run(request=False, mapi_handler=HandlersAPI.\n url_creations_movie_schedule_filter)\n try:\n with allure.step('MoviesPage'):\n self.movie_page = MoviesPage(driver)\n self.movie_page.set_custom_wait(10)\n sleep(10)\n self.movie_page.act.click_by_coords(50, 30)\n with allure.step('EventDetailsPage'):\n self.event_detail_page = MoviesDetailsPage(driver)\n self.event_detail_page.set_custom_wait(10)\n self.event_detail_page.click(*self.\n event_detail_page_locators.btn_view_timetable)\n with allure.step('ShedulePage'):\n self.shedule_page = ShedulePage(driver)\n self.shedule_page.set_custom_wait(10)\n sleep(2)\n self.shedule_page.compare_tickets_datetime_options_second_filter(\n dbg_api)\n finally:\n dbg_api.kill()\n",
"step-4": "from time import sleep\nimport pytest\nimport allure\nfrom app.debug_api import DebugAPI\nfrom app.check_api import HandlersAPI\nfrom locators.movies_details_locators import MoviesDetailsPageLocators\nfrom locators.movies_locators import MoviesPageLocators\nfrom locators.shedule_locators import ShedulePageLocators\nfrom screens.MoviesPage import MoviesPage\nfrom screens.MoviesDetailsPage import MoviesDetailsPage\nfrom screens.ShedulePage import ShedulePage\nfrom utils.internet import enable_proxy\n\n\n@pytest.mark.usefixtures('driver')\nclass Test_001_ShedulePage:\n\n @classmethod\n def setup_class(cls):\n cls.movies_locators = MoviesPageLocators()\n cls.shedule_locators = ShedulePageLocators()\n cls.event_detail_page_locators = MoviesDetailsPageLocators()\n\n @staticmethod\n def teardown_class(cls):\n enable_proxy(mode=False)\n\n def test_001_elements_exists(self, driver):\n \"\"\"тапнуть на фичерс,\n тапнуть на смотреть расписание,\n найти кнопку отмены, кнопку карты, поле поиска\"\"\"\n with allure.step('MoviesPage'):\n self.movie_page = MoviesPage(driver)\n self.movie_page.set_custom_wait(10)\n self.movie_page.act.click_by_coords(50, 30)\n with allure.step('EventDetailsPage'):\n self.event_detail_page = MoviesDetailsPage(driver)\n self.event_detail_page.set_custom_wait(10)\n self.event_detail_page.click(*self.event_detail_page_locators.\n btn_view_timetable)\n with allure.step('ShedulePage'):\n self.shedule_page = ShedulePage(driver)\n self.shedule_page.set_custom_wait(10)\n self.shedule_page.find_element(*self.shedule_locators.btn_back)\n self.shedule_page.find_element(*self.shedule_locators.btn_map)\n self.shedule_page.find_element(*self.shedule_locators.search_field)\n\n def test_002_valid_filters(self, driver):\n \"\"\"тапнуть на фичерс,\n тапнуть на смотреть расписание,\n проверить соответствие фильтров и ответа сервера\n проверить порядок фильтров\"\"\"\n dbg_api = DebugAPI.run(request=False, mapi_handler=HandlersAPI.\n url_creations_movie_schedule_filter)\n try:\n with allure.step('MoviesPage'):\n self.movie_page = MoviesPage(driver)\n self.movie_page.set_custom_wait(10)\n sleep(5)\n self.movie_page.act.click_by_coords(50, 30)\n with allure.step('EventDetailsPage'):\n self.event_detail_page = MoviesDetailsPage(driver)\n self.event_detail_page.set_custom_wait(10)\n sleep(5)\n self.event_detail_page.click(*self.\n event_detail_page_locators.btn_view_timetable)\n with allure.step('ShedulePage'):\n self.shedule_page = ShedulePage(driver)\n self.shedule_page.set_custom_wait(10)\n sleep(5)\n self.shedule_page.check_rows_filters(dbg_api)\n finally:\n dbg_api.kill()\n\n def test_003_check_time_ticket_filter(self, driver):\n \"\"\"тапнуть на фичерс,\n тапнуть на смотреть расписание,\n проверять соответствие времени на билетах с выставленными фильтрами\"\"\"\n dbg_api = DebugAPI.run(request=False, mapi_handler=HandlersAPI.\n url_creations_movie_schedule_filter)\n try:\n with allure.step('MoviesPage'):\n self.movie_page = MoviesPage(driver)\n self.movie_page.set_custom_wait(10)\n sleep(10)\n self.movie_page.act.click_by_coords(50, 30)\n with allure.step('EventDetailsPage'):\n self.event_detail_page = MoviesDetailsPage(driver)\n self.event_detail_page.set_custom_wait(10)\n self.event_detail_page.click(*self.\n event_detail_page_locators.btn_view_timetable)\n with allure.step('ShedulePage'):\n self.shedule_page = ShedulePage(driver)\n self.shedule_page.set_custom_wait(10)\n sleep(2)\n self.shedule_page.compare_tickets_datetime_options_second_filter(\n dbg_api)\n finally:\n dbg_api.kill()\n",
"step-5": null,
"step-ids": [
4,
5,
6,
7
]
}
|
[
4,
5,
6,
7
] |
<|reserved_special_token_0|>
def run_timed_benchmark_time_series(series_name, tf_flags, do_training=True):
topology = load_default_topology(series_name, tf_flags)
n_train_samples = np.minimum(tf_flags.n_training_samples_benchmark, 10000)
bin_distribution = _create_bin_distribution(series_name,
n_train_samples, topology)
batch_size = tf_flags.batch_size
save_path = io.build_check_point_filename(series_name, topology, tf_flags)
@printtime(message='Training {} with do_train: {}'.format(series_name,
int(do_training)))
def _do_training():
execution_time = datetime.datetime.now()
if do_training:
data_provider = TrainDataProviderForDataSource(series_name,
D_TYPE, n_train_samples, batch_size, True, bin_distribution
.bin_edges)
train_x = data_provider.get_batch(0)
raw_train_data = TrainDataProvider(train_x, train_y, tf_flags.
batch_size)
tensorflow_path = TensorflowPath(save_path, tf_flags.
model_save_path)
tensorboard_options = TensorboardOptions(tf_flags.
tensorboard_log_path, tf_flags.learning_rate, batch_size,
execution_time)
crocubot_train.train(topology, data_provider, tensorflow_path,
tensorboard_options, tf_flags)
else:
tf.reset_default_graph()
model = CrocuBotModel(topology)
model.build_layers_variables()
train_time, _ = execute_and_get_duration(_do_training)
print('Training complete.')
eval_time, _ = execute_and_get_duration(evaluate_network, topology,
series_name, batch_size, save_path, bin_distribution, tf_flags)
print('Metrics:')
print_time_info(train_time, eval_time)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def run_timed_benchmark_time_series(series_name, tf_flags, do_training=True):
topology = load_default_topology(series_name, tf_flags)
n_train_samples = np.minimum(tf_flags.n_training_samples_benchmark, 10000)
bin_distribution = _create_bin_distribution(series_name,
n_train_samples, topology)
batch_size = tf_flags.batch_size
save_path = io.build_check_point_filename(series_name, topology, tf_flags)
@printtime(message='Training {} with do_train: {}'.format(series_name,
int(do_training)))
def _do_training():
execution_time = datetime.datetime.now()
if do_training:
data_provider = TrainDataProviderForDataSource(series_name,
D_TYPE, n_train_samples, batch_size, True, bin_distribution
.bin_edges)
train_x = data_provider.get_batch(0)
raw_train_data = TrainDataProvider(train_x, train_y, tf_flags.
batch_size)
tensorflow_path = TensorflowPath(save_path, tf_flags.
model_save_path)
tensorboard_options = TensorboardOptions(tf_flags.
tensorboard_log_path, tf_flags.learning_rate, batch_size,
execution_time)
crocubot_train.train(topology, data_provider, tensorflow_path,
tensorboard_options, tf_flags)
else:
tf.reset_default_graph()
model = CrocuBotModel(topology)
model.build_layers_variables()
train_time, _ = execute_and_get_duration(_do_training)
print('Training complete.')
eval_time, _ = execute_and_get_duration(evaluate_network, topology,
series_name, batch_size, save_path, bin_distribution, tf_flags)
print('Metrics:')
print_time_info(train_time, eval_time)
<|reserved_special_token_0|>
@printtime(message='Evaluation of Stocastic Series')
def evaluate_network(topology, series_name, batch_size, save_path, bin_dist,
tf_flags):
n_training_samples = batch_size * 2
data_provider = TrainDataProviderForDataSource(series_name, D_TYPE,
n_training_samples, batch_size, False)
test_features, test_labels = data_provider.get_batch(1)
binned_outputs = crocubot_eval.eval_neural_net(test_features, topology,
tf_flags, save_path)
estimated_means, estimated_covariance = (crocubot_eval.
forecast_means_and_variance(binned_outputs, bin_dist, tf_flags))
test_labels = np.squeeze(test_labels)
model_metrics = Metrics()
model_metrics.evaluate_sample_performance(data_provider.data_source,
test_labels, estimated_means, estimated_covariance)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def run_timed_benchmark_time_series(series_name, tf_flags, do_training=True):
topology = load_default_topology(series_name, tf_flags)
n_train_samples = np.minimum(tf_flags.n_training_samples_benchmark, 10000)
bin_distribution = _create_bin_distribution(series_name,
n_train_samples, topology)
batch_size = tf_flags.batch_size
save_path = io.build_check_point_filename(series_name, topology, tf_flags)
@printtime(message='Training {} with do_train: {}'.format(series_name,
int(do_training)))
def _do_training():
execution_time = datetime.datetime.now()
if do_training:
data_provider = TrainDataProviderForDataSource(series_name,
D_TYPE, n_train_samples, batch_size, True, bin_distribution
.bin_edges)
train_x = data_provider.get_batch(0)
raw_train_data = TrainDataProvider(train_x, train_y, tf_flags.
batch_size)
tensorflow_path = TensorflowPath(save_path, tf_flags.
model_save_path)
tensorboard_options = TensorboardOptions(tf_flags.
tensorboard_log_path, tf_flags.learning_rate, batch_size,
execution_time)
crocubot_train.train(topology, data_provider, tensorflow_path,
tensorboard_options, tf_flags)
else:
tf.reset_default_graph()
model = CrocuBotModel(topology)
model.build_layers_variables()
train_time, _ = execute_and_get_duration(_do_training)
print('Training complete.')
eval_time, _ = execute_and_get_duration(evaluate_network, topology,
series_name, batch_size, save_path, bin_distribution, tf_flags)
print('Metrics:')
print_time_info(train_time, eval_time)
def _create_bin_distribution(series_name, n_training_samples, topology):
data_provider = TrainDataProviderForDataSource(series_name, D_TYPE,
n_training_samples, n_training_samples, True)
train_data = data_provider.get_batch(0)
return BinDistribution(train_data.labels, topology.n_classification_bins)
@printtime(message='Evaluation of Stocastic Series')
def evaluate_network(topology, series_name, batch_size, save_path, bin_dist,
tf_flags):
n_training_samples = batch_size * 2
data_provider = TrainDataProviderForDataSource(series_name, D_TYPE,
n_training_samples, batch_size, False)
test_features, test_labels = data_provider.get_batch(1)
binned_outputs = crocubot_eval.eval_neural_net(test_features, topology,
tf_flags, save_path)
estimated_means, estimated_covariance = (crocubot_eval.
forecast_means_and_variance(binned_outputs, bin_dist, tf_flags))
test_labels = np.squeeze(test_labels)
model_metrics = Metrics()
model_metrics.evaluate_sample_performance(data_provider.data_source,
test_labels, estimated_means, estimated_covariance)
<|reserved_special_token_1|>
import datetime
import numpy as np
import tensorflow as tf
from alphai_time_series.performance_trials.performance import Metrics
import alphai_cromulon_oracle.cromulon.evaluate as crocubot_eval
import alphai_cromulon_oracle.cromulon.train as crocubot_train
from alphai_cromulon_oracle.cromulon.helpers import TensorflowPath, TensorboardOptions
from alphai_cromulon_oracle.cromulon.model import CrocuBotModel
from alphai_feature_generation.classifier import BinDistribution
from alphai_cromulon_oracle.data.providers import TrainDataProviderForDataSource
from alphai_cromulon_oracle.helpers import printtime, execute_and_get_duration
import examples.iotools as io
from examples.benchmark.helpers import print_time_info
from examples.helpers import D_TYPE, load_default_topology
def run_timed_benchmark_time_series(series_name, tf_flags, do_training=True):
topology = load_default_topology(series_name, tf_flags)
n_train_samples = np.minimum(tf_flags.n_training_samples_benchmark, 10000)
bin_distribution = _create_bin_distribution(series_name,
n_train_samples, topology)
batch_size = tf_flags.batch_size
save_path = io.build_check_point_filename(series_name, topology, tf_flags)
@printtime(message='Training {} with do_train: {}'.format(series_name,
int(do_training)))
def _do_training():
execution_time = datetime.datetime.now()
if do_training:
data_provider = TrainDataProviderForDataSource(series_name,
D_TYPE, n_train_samples, batch_size, True, bin_distribution
.bin_edges)
train_x = data_provider.get_batch(0)
raw_train_data = TrainDataProvider(train_x, train_y, tf_flags.
batch_size)
tensorflow_path = TensorflowPath(save_path, tf_flags.
model_save_path)
tensorboard_options = TensorboardOptions(tf_flags.
tensorboard_log_path, tf_flags.learning_rate, batch_size,
execution_time)
crocubot_train.train(topology, data_provider, tensorflow_path,
tensorboard_options, tf_flags)
else:
tf.reset_default_graph()
model = CrocuBotModel(topology)
model.build_layers_variables()
train_time, _ = execute_and_get_duration(_do_training)
print('Training complete.')
eval_time, _ = execute_and_get_duration(evaluate_network, topology,
series_name, batch_size, save_path, bin_distribution, tf_flags)
print('Metrics:')
print_time_info(train_time, eval_time)
def _create_bin_distribution(series_name, n_training_samples, topology):
data_provider = TrainDataProviderForDataSource(series_name, D_TYPE,
n_training_samples, n_training_samples, True)
train_data = data_provider.get_batch(0)
return BinDistribution(train_data.labels, topology.n_classification_bins)
@printtime(message='Evaluation of Stocastic Series')
def evaluate_network(topology, series_name, batch_size, save_path, bin_dist,
tf_flags):
n_training_samples = batch_size * 2
data_provider = TrainDataProviderForDataSource(series_name, D_TYPE,
n_training_samples, batch_size, False)
test_features, test_labels = data_provider.get_batch(1)
binned_outputs = crocubot_eval.eval_neural_net(test_features, topology,
tf_flags, save_path)
estimated_means, estimated_covariance = (crocubot_eval.
forecast_means_and_variance(binned_outputs, bin_dist, tf_flags))
test_labels = np.squeeze(test_labels)
model_metrics = Metrics()
model_metrics.evaluate_sample_performance(data_provider.data_source,
test_labels, estimated_means, estimated_covariance)
<|reserved_special_token_1|>
import datetime
import numpy as np
import tensorflow as tf
from alphai_time_series.performance_trials.performance import Metrics
import alphai_cromulon_oracle.cromulon.evaluate as crocubot_eval
import alphai_cromulon_oracle.cromulon.train as crocubot_train
from alphai_cromulon_oracle.cromulon.helpers import TensorflowPath, TensorboardOptions
from alphai_cromulon_oracle.cromulon.model import CrocuBotModel
from alphai_feature_generation.classifier import BinDistribution
from alphai_cromulon_oracle.data.providers import TrainDataProviderForDataSource
from alphai_cromulon_oracle.helpers import printtime, execute_and_get_duration
import examples.iotools as io
from examples.benchmark.helpers import print_time_info
from examples.helpers import D_TYPE, load_default_topology
def run_timed_benchmark_time_series(series_name, tf_flags, do_training=True):
topology = load_default_topology(series_name, tf_flags)
# First need to establish bin edges using full training set
n_train_samples = np.minimum(tf_flags.n_training_samples_benchmark, 10000)
bin_distribution = _create_bin_distribution(series_name, n_train_samples, topology)
batch_size = tf_flags.batch_size
save_path = io.build_check_point_filename(series_name, topology, tf_flags)
@printtime(message="Training {} with do_train: {}".format(series_name, int(do_training)))
def _do_training():
execution_time = datetime.datetime.now()
if do_training:
data_provider = TrainDataProviderForDataSource(
series_name,
D_TYPE,
n_train_samples,
batch_size,
True,
bin_distribution.bin_edges
)
train_x = data_provider.get_batch(0)
raw_train_data = TrainDataProvider(train_x, train_y, tf_flags.batch_size)
tensorflow_path = TensorflowPath(save_path, tf_flags.model_save_path)
tensorboard_options = TensorboardOptions(tf_flags.tensorboard_log_path,
tf_flags.learning_rate,
batch_size,
execution_time
)
crocubot_train.train(topology,
data_provider,
tensorflow_path,
tensorboard_options,
tf_flags
)
else:
tf.reset_default_graph()
model = CrocuBotModel(topology)
model.build_layers_variables()
train_time, _ = execute_and_get_duration(_do_training)
print("Training complete.")
eval_time, _ = execute_and_get_duration(evaluate_network, topology, series_name, batch_size,
save_path, bin_distribution, tf_flags)
print('Metrics:')
print_time_info(train_time, eval_time)
def _create_bin_distribution(series_name, n_training_samples, topology):
data_provider = TrainDataProviderForDataSource(series_name, D_TYPE, n_training_samples, n_training_samples, True)
train_data = data_provider.get_batch(0)
return BinDistribution(train_data.labels, topology.n_classification_bins)
@printtime(message="Evaluation of Stocastic Series")
def evaluate_network(topology, series_name, batch_size, save_path, bin_dist, tf_flags):
n_training_samples = batch_size * 2
data_provider = TrainDataProviderForDataSource(series_name, D_TYPE, n_training_samples, batch_size, False)
test_features, test_labels = data_provider.get_batch(1)
binned_outputs = crocubot_eval.eval_neural_net(test_features, topology, tf_flags, save_path)
estimated_means, estimated_covariance = crocubot_eval.forecast_means_and_variance(
binned_outputs, bin_dist, tf_flags)
test_labels = np.squeeze(test_labels)
model_metrics = Metrics()
model_metrics.evaluate_sample_performance(
data_provider.data_source,
test_labels,
estimated_means,
estimated_covariance
)
|
flexible
|
{
"blob_id": "bef16443f77b2c1e09db9950a4617703085d9f71",
"index": 7807,
"step-1": "<mask token>\n\n\ndef run_timed_benchmark_time_series(series_name, tf_flags, do_training=True):\n topology = load_default_topology(series_name, tf_flags)\n n_train_samples = np.minimum(tf_flags.n_training_samples_benchmark, 10000)\n bin_distribution = _create_bin_distribution(series_name,\n n_train_samples, topology)\n batch_size = tf_flags.batch_size\n save_path = io.build_check_point_filename(series_name, topology, tf_flags)\n\n @printtime(message='Training {} with do_train: {}'.format(series_name,\n int(do_training)))\n def _do_training():\n execution_time = datetime.datetime.now()\n if do_training:\n data_provider = TrainDataProviderForDataSource(series_name,\n D_TYPE, n_train_samples, batch_size, True, bin_distribution\n .bin_edges)\n train_x = data_provider.get_batch(0)\n raw_train_data = TrainDataProvider(train_x, train_y, tf_flags.\n batch_size)\n tensorflow_path = TensorflowPath(save_path, tf_flags.\n model_save_path)\n tensorboard_options = TensorboardOptions(tf_flags.\n tensorboard_log_path, tf_flags.learning_rate, batch_size,\n execution_time)\n crocubot_train.train(topology, data_provider, tensorflow_path,\n tensorboard_options, tf_flags)\n else:\n tf.reset_default_graph()\n model = CrocuBotModel(topology)\n model.build_layers_variables()\n train_time, _ = execute_and_get_duration(_do_training)\n print('Training complete.')\n eval_time, _ = execute_and_get_duration(evaluate_network, topology,\n series_name, batch_size, save_path, bin_distribution, tf_flags)\n print('Metrics:')\n print_time_info(train_time, eval_time)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef run_timed_benchmark_time_series(series_name, tf_flags, do_training=True):\n topology = load_default_topology(series_name, tf_flags)\n n_train_samples = np.minimum(tf_flags.n_training_samples_benchmark, 10000)\n bin_distribution = _create_bin_distribution(series_name,\n n_train_samples, topology)\n batch_size = tf_flags.batch_size\n save_path = io.build_check_point_filename(series_name, topology, tf_flags)\n\n @printtime(message='Training {} with do_train: {}'.format(series_name,\n int(do_training)))\n def _do_training():\n execution_time = datetime.datetime.now()\n if do_training:\n data_provider = TrainDataProviderForDataSource(series_name,\n D_TYPE, n_train_samples, batch_size, True, bin_distribution\n .bin_edges)\n train_x = data_provider.get_batch(0)\n raw_train_data = TrainDataProvider(train_x, train_y, tf_flags.\n batch_size)\n tensorflow_path = TensorflowPath(save_path, tf_flags.\n model_save_path)\n tensorboard_options = TensorboardOptions(tf_flags.\n tensorboard_log_path, tf_flags.learning_rate, batch_size,\n execution_time)\n crocubot_train.train(topology, data_provider, tensorflow_path,\n tensorboard_options, tf_flags)\n else:\n tf.reset_default_graph()\n model = CrocuBotModel(topology)\n model.build_layers_variables()\n train_time, _ = execute_and_get_duration(_do_training)\n print('Training complete.')\n eval_time, _ = execute_and_get_duration(evaluate_network, topology,\n series_name, batch_size, save_path, bin_distribution, tf_flags)\n print('Metrics:')\n print_time_info(train_time, eval_time)\n\n\n<mask token>\n\n\n@printtime(message='Evaluation of Stocastic Series')\ndef evaluate_network(topology, series_name, batch_size, save_path, bin_dist,\n tf_flags):\n n_training_samples = batch_size * 2\n data_provider = TrainDataProviderForDataSource(series_name, D_TYPE,\n n_training_samples, batch_size, False)\n test_features, test_labels = data_provider.get_batch(1)\n binned_outputs = crocubot_eval.eval_neural_net(test_features, topology,\n tf_flags, save_path)\n estimated_means, estimated_covariance = (crocubot_eval.\n forecast_means_and_variance(binned_outputs, bin_dist, tf_flags))\n test_labels = np.squeeze(test_labels)\n model_metrics = Metrics()\n model_metrics.evaluate_sample_performance(data_provider.data_source,\n test_labels, estimated_means, estimated_covariance)\n",
"step-3": "<mask token>\n\n\ndef run_timed_benchmark_time_series(series_name, tf_flags, do_training=True):\n topology = load_default_topology(series_name, tf_flags)\n n_train_samples = np.minimum(tf_flags.n_training_samples_benchmark, 10000)\n bin_distribution = _create_bin_distribution(series_name,\n n_train_samples, topology)\n batch_size = tf_flags.batch_size\n save_path = io.build_check_point_filename(series_name, topology, tf_flags)\n\n @printtime(message='Training {} with do_train: {}'.format(series_name,\n int(do_training)))\n def _do_training():\n execution_time = datetime.datetime.now()\n if do_training:\n data_provider = TrainDataProviderForDataSource(series_name,\n D_TYPE, n_train_samples, batch_size, True, bin_distribution\n .bin_edges)\n train_x = data_provider.get_batch(0)\n raw_train_data = TrainDataProvider(train_x, train_y, tf_flags.\n batch_size)\n tensorflow_path = TensorflowPath(save_path, tf_flags.\n model_save_path)\n tensorboard_options = TensorboardOptions(tf_flags.\n tensorboard_log_path, tf_flags.learning_rate, batch_size,\n execution_time)\n crocubot_train.train(topology, data_provider, tensorflow_path,\n tensorboard_options, tf_flags)\n else:\n tf.reset_default_graph()\n model = CrocuBotModel(topology)\n model.build_layers_variables()\n train_time, _ = execute_and_get_duration(_do_training)\n print('Training complete.')\n eval_time, _ = execute_and_get_duration(evaluate_network, topology,\n series_name, batch_size, save_path, bin_distribution, tf_flags)\n print('Metrics:')\n print_time_info(train_time, eval_time)\n\n\ndef _create_bin_distribution(series_name, n_training_samples, topology):\n data_provider = TrainDataProviderForDataSource(series_name, D_TYPE,\n n_training_samples, n_training_samples, True)\n train_data = data_provider.get_batch(0)\n return BinDistribution(train_data.labels, topology.n_classification_bins)\n\n\n@printtime(message='Evaluation of Stocastic Series')\ndef evaluate_network(topology, series_name, batch_size, save_path, bin_dist,\n tf_flags):\n n_training_samples = batch_size * 2\n data_provider = TrainDataProviderForDataSource(series_name, D_TYPE,\n n_training_samples, batch_size, False)\n test_features, test_labels = data_provider.get_batch(1)\n binned_outputs = crocubot_eval.eval_neural_net(test_features, topology,\n tf_flags, save_path)\n estimated_means, estimated_covariance = (crocubot_eval.\n forecast_means_and_variance(binned_outputs, bin_dist, tf_flags))\n test_labels = np.squeeze(test_labels)\n model_metrics = Metrics()\n model_metrics.evaluate_sample_performance(data_provider.data_source,\n test_labels, estimated_means, estimated_covariance)\n",
"step-4": "import datetime\nimport numpy as np\nimport tensorflow as tf\nfrom alphai_time_series.performance_trials.performance import Metrics\nimport alphai_cromulon_oracle.cromulon.evaluate as crocubot_eval\nimport alphai_cromulon_oracle.cromulon.train as crocubot_train\nfrom alphai_cromulon_oracle.cromulon.helpers import TensorflowPath, TensorboardOptions\nfrom alphai_cromulon_oracle.cromulon.model import CrocuBotModel\nfrom alphai_feature_generation.classifier import BinDistribution\nfrom alphai_cromulon_oracle.data.providers import TrainDataProviderForDataSource\nfrom alphai_cromulon_oracle.helpers import printtime, execute_and_get_duration\nimport examples.iotools as io\nfrom examples.benchmark.helpers import print_time_info\nfrom examples.helpers import D_TYPE, load_default_topology\n\n\ndef run_timed_benchmark_time_series(series_name, tf_flags, do_training=True):\n topology = load_default_topology(series_name, tf_flags)\n n_train_samples = np.minimum(tf_flags.n_training_samples_benchmark, 10000)\n bin_distribution = _create_bin_distribution(series_name,\n n_train_samples, topology)\n batch_size = tf_flags.batch_size\n save_path = io.build_check_point_filename(series_name, topology, tf_flags)\n\n @printtime(message='Training {} with do_train: {}'.format(series_name,\n int(do_training)))\n def _do_training():\n execution_time = datetime.datetime.now()\n if do_training:\n data_provider = TrainDataProviderForDataSource(series_name,\n D_TYPE, n_train_samples, batch_size, True, bin_distribution\n .bin_edges)\n train_x = data_provider.get_batch(0)\n raw_train_data = TrainDataProvider(train_x, train_y, tf_flags.\n batch_size)\n tensorflow_path = TensorflowPath(save_path, tf_flags.\n model_save_path)\n tensorboard_options = TensorboardOptions(tf_flags.\n tensorboard_log_path, tf_flags.learning_rate, batch_size,\n execution_time)\n crocubot_train.train(topology, data_provider, tensorflow_path,\n tensorboard_options, tf_flags)\n else:\n tf.reset_default_graph()\n model = CrocuBotModel(topology)\n model.build_layers_variables()\n train_time, _ = execute_and_get_duration(_do_training)\n print('Training complete.')\n eval_time, _ = execute_and_get_duration(evaluate_network, topology,\n series_name, batch_size, save_path, bin_distribution, tf_flags)\n print('Metrics:')\n print_time_info(train_time, eval_time)\n\n\ndef _create_bin_distribution(series_name, n_training_samples, topology):\n data_provider = TrainDataProviderForDataSource(series_name, D_TYPE,\n n_training_samples, n_training_samples, True)\n train_data = data_provider.get_batch(0)\n return BinDistribution(train_data.labels, topology.n_classification_bins)\n\n\n@printtime(message='Evaluation of Stocastic Series')\ndef evaluate_network(topology, series_name, batch_size, save_path, bin_dist,\n tf_flags):\n n_training_samples = batch_size * 2\n data_provider = TrainDataProviderForDataSource(series_name, D_TYPE,\n n_training_samples, batch_size, False)\n test_features, test_labels = data_provider.get_batch(1)\n binned_outputs = crocubot_eval.eval_neural_net(test_features, topology,\n tf_flags, save_path)\n estimated_means, estimated_covariance = (crocubot_eval.\n forecast_means_and_variance(binned_outputs, bin_dist, tf_flags))\n test_labels = np.squeeze(test_labels)\n model_metrics = Metrics()\n model_metrics.evaluate_sample_performance(data_provider.data_source,\n test_labels, estimated_means, estimated_covariance)\n",
"step-5": "import datetime\n\nimport numpy as np\nimport tensorflow as tf\nfrom alphai_time_series.performance_trials.performance import Metrics\n\nimport alphai_cromulon_oracle.cromulon.evaluate as crocubot_eval\nimport alphai_cromulon_oracle.cromulon.train as crocubot_train\n\nfrom alphai_cromulon_oracle.cromulon.helpers import TensorflowPath, TensorboardOptions\nfrom alphai_cromulon_oracle.cromulon.model import CrocuBotModel\nfrom alphai_feature_generation.classifier import BinDistribution\nfrom alphai_cromulon_oracle.data.providers import TrainDataProviderForDataSource\nfrom alphai_cromulon_oracle.helpers import printtime, execute_and_get_duration\n\nimport examples.iotools as io\nfrom examples.benchmark.helpers import print_time_info\nfrom examples.helpers import D_TYPE, load_default_topology\n\n\ndef run_timed_benchmark_time_series(series_name, tf_flags, do_training=True):\n\n topology = load_default_topology(series_name, tf_flags)\n\n # First need to establish bin edges using full training set\n n_train_samples = np.minimum(tf_flags.n_training_samples_benchmark, 10000)\n\n bin_distribution = _create_bin_distribution(series_name, n_train_samples, topology)\n batch_size = tf_flags.batch_size\n save_path = io.build_check_point_filename(series_name, topology, tf_flags)\n\n @printtime(message=\"Training {} with do_train: {}\".format(series_name, int(do_training)))\n def _do_training():\n execution_time = datetime.datetime.now()\n if do_training:\n\n data_provider = TrainDataProviderForDataSource(\n series_name,\n D_TYPE,\n n_train_samples,\n batch_size,\n True,\n bin_distribution.bin_edges\n )\n\n\n train_x = data_provider.get_batch(0)\n raw_train_data = TrainDataProvider(train_x, train_y, tf_flags.batch_size)\n\n tensorflow_path = TensorflowPath(save_path, tf_flags.model_save_path)\n tensorboard_options = TensorboardOptions(tf_flags.tensorboard_log_path,\n tf_flags.learning_rate,\n batch_size,\n execution_time\n )\n crocubot_train.train(topology,\n data_provider,\n tensorflow_path,\n tensorboard_options,\n tf_flags\n )\n else:\n tf.reset_default_graph()\n model = CrocuBotModel(topology)\n model.build_layers_variables()\n\n train_time, _ = execute_and_get_duration(_do_training)\n\n print(\"Training complete.\")\n\n eval_time, _ = execute_and_get_duration(evaluate_network, topology, series_name, batch_size,\n save_path, bin_distribution, tf_flags)\n\n print('Metrics:')\n print_time_info(train_time, eval_time)\n\n\ndef _create_bin_distribution(series_name, n_training_samples, topology):\n data_provider = TrainDataProviderForDataSource(series_name, D_TYPE, n_training_samples, n_training_samples, True)\n train_data = data_provider.get_batch(0)\n\n return BinDistribution(train_data.labels, topology.n_classification_bins)\n\n\n@printtime(message=\"Evaluation of Stocastic Series\")\ndef evaluate_network(topology, series_name, batch_size, save_path, bin_dist, tf_flags):\n\n n_training_samples = batch_size * 2\n data_provider = TrainDataProviderForDataSource(series_name, D_TYPE, n_training_samples, batch_size, False)\n\n test_features, test_labels = data_provider.get_batch(1)\n\n binned_outputs = crocubot_eval.eval_neural_net(test_features, topology, tf_flags, save_path)\n\n estimated_means, estimated_covariance = crocubot_eval.forecast_means_and_variance(\n binned_outputs, bin_dist, tf_flags)\n test_labels = np.squeeze(test_labels)\n\n model_metrics = Metrics()\n model_metrics.evaluate_sample_performance(\n data_provider.data_source,\n test_labels,\n estimated_means,\n estimated_covariance\n )\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import unittest
import brainfuck
import sys
from StringIO import StringIO
def run_program(program, input = None):
old_stdout = sys.stdout
old_stdin = sys.stdin
try:
out = StringIO()
sys.stdout = out
if input is not None:
input = StringIO(input)
sys.stdin = input
brainfuck.brainfuck(program)
finally:
sys.stdout = old_stdout
sys.stdin = old_stdin
return out.getvalue().strip()
class TestInterpreter(unittest.TestCase):
def setUp(self):
brainfuck.set_cell_size()
def test_HelloWorld(self):
result = run_program("""
++++++++++[>+++++++>++++++++++>+++>+<<<<-]>++.>+.
+++++++..+++.>++.<<+++++++++++++++.>.+++.------.-
-------.>+.>.""")
self.assertEquals(result, "Hello World!")
def test_Squares(self):
result = run_program("""
++++[>+++++<-]>[<+++++>-]+<+[>[>+>+<<-]++>>[<<+>>
-]>>>[-]++>[-]+>>>+[[-]++++++>>>]<<<[[<++++++++<+
+>>-]+<.<[>----<-]<]<<[>>>>>[>>>[-]+++++++++<[>-<
-]+++++++++>[-[<->-]+[<<<]]<[>+<-]>]<<-]<<-]""")
expected_result = "\n".join([str(x**2) for x in range(101)])
self.assertEquals(result, expected_result)
def test_ROT13(self):
result = run_program("""
-,+[-[>>++++[>++++++++<-]<+<-[>+>+>-[>>>]<[[>+<-]
>>+>]<<<<<-]]>>>[-]+>--[-[<->+++[-]]]<[++++++++++
++<[>-[>+>>]>[+[<+>-]>+>>]<<<<<-]>>[<+>-]>[-[-<<[
-]>>]<<[<<->>-]>>]<<[<<+>>-]]<[-]<.[-]<-,+]""", "applesauce")
self.assertEquals(result, "nccyrfnhpr")
def test_Clean(self):
self.assertRaises(Exception, brainfuck.clean, "[[]")
self.assertRaises(Exception, brainfuck.clean, "][")
if __name__ == '__main__':
unittest.main()
|
normal
|
{
"blob_id": "19ab44cec863560513aadd88b5fd4bb40f75e371",
"index": 2579,
"step-1": "<mask token>\n\n\nclass TestInterpreter(unittest.TestCase):\n <mask token>\n\n def test_HelloWorld(self):\n result = run_program(\n \"\"\"\n ++++++++++[>+++++++>++++++++++>+++>+<<<<-]>++.>+.\n +++++++..+++.>++.<<+++++++++++++++.>.+++.------.-\n -------.>+.>.\"\"\"\n )\n self.assertEquals(result, 'Hello World!')\n <mask token>\n\n def test_ROT13(self):\n result = run_program(\n \"\"\"\n -,+[-[>>++++[>++++++++<-]<+<-[>+>+>-[>>>]<[[>+<-]\n >>+>]<<<<<-]]>>>[-]+>--[-[<->+++[-]]]<[++++++++++\n ++<[>-[>+>>]>[+[<+>-]>+>>]<<<<<-]>>[<+>-]>[-[-<<[\n -]>>]<<[<<->>-]>>]<<[<<+>>-]]<[-]<.[-]<-,+]\"\"\"\n , 'applesauce')\n self.assertEquals(result, 'nccyrfnhpr')\n\n def test_Clean(self):\n self.assertRaises(Exception, brainfuck.clean, '[[]')\n self.assertRaises(Exception, brainfuck.clean, '][')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass TestInterpreter(unittest.TestCase):\n\n def setUp(self):\n brainfuck.set_cell_size()\n\n def test_HelloWorld(self):\n result = run_program(\n \"\"\"\n ++++++++++[>+++++++>++++++++++>+++>+<<<<-]>++.>+.\n +++++++..+++.>++.<<+++++++++++++++.>.+++.------.-\n -------.>+.>.\"\"\"\n )\n self.assertEquals(result, 'Hello World!')\n\n def test_Squares(self):\n result = run_program(\n \"\"\"\n ++++[>+++++<-]>[<+++++>-]+<+[>[>+>+<<-]++>>[<<+>>\n -]>>>[-]++>[-]+>>>+[[-]++++++>>>]<<<[[<++++++++<+\n +>>-]+<.<[>----<-]<]<<[>>>>>[>>>[-]+++++++++<[>-<\n -]+++++++++>[-[<->-]+[<<<]]<[>+<-]>]<<-]<<-]\"\"\"\n )\n expected_result = '\\n'.join([str(x ** 2) for x in range(101)])\n self.assertEquals(result, expected_result)\n\n def test_ROT13(self):\n result = run_program(\n \"\"\"\n -,+[-[>>++++[>++++++++<-]<+<-[>+>+>-[>>>]<[[>+<-]\n >>+>]<<<<<-]]>>>[-]+>--[-[<->+++[-]]]<[++++++++++\n ++<[>-[>+>>]>[+[<+>-]>+>>]<<<<<-]>>[<+>-]>[-[-<<[\n -]>>]<<[<<->>-]>>]<<[<<+>>-]]<[-]<.[-]<-,+]\"\"\"\n , 'applesauce')\n self.assertEquals(result, 'nccyrfnhpr')\n\n def test_Clean(self):\n self.assertRaises(Exception, brainfuck.clean, '[[]')\n self.assertRaises(Exception, brainfuck.clean, '][')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef run_program(program, input=None):\n old_stdout = sys.stdout\n old_stdin = sys.stdin\n try:\n out = StringIO()\n sys.stdout = out\n if input is not None:\n input = StringIO(input)\n sys.stdin = input\n brainfuck.brainfuck(program)\n finally:\n sys.stdout = old_stdout\n sys.stdin = old_stdin\n return out.getvalue().strip()\n\n\nclass TestInterpreter(unittest.TestCase):\n\n def setUp(self):\n brainfuck.set_cell_size()\n\n def test_HelloWorld(self):\n result = run_program(\n \"\"\"\n ++++++++++[>+++++++>++++++++++>+++>+<<<<-]>++.>+.\n +++++++..+++.>++.<<+++++++++++++++.>.+++.------.-\n -------.>+.>.\"\"\"\n )\n self.assertEquals(result, 'Hello World!')\n\n def test_Squares(self):\n result = run_program(\n \"\"\"\n ++++[>+++++<-]>[<+++++>-]+<+[>[>+>+<<-]++>>[<<+>>\n -]>>>[-]++>[-]+>>>+[[-]++++++>>>]<<<[[<++++++++<+\n +>>-]+<.<[>----<-]<]<<[>>>>>[>>>[-]+++++++++<[>-<\n -]+++++++++>[-[<->-]+[<<<]]<[>+<-]>]<<-]<<-]\"\"\"\n )\n expected_result = '\\n'.join([str(x ** 2) for x in range(101)])\n self.assertEquals(result, expected_result)\n\n def test_ROT13(self):\n result = run_program(\n \"\"\"\n -,+[-[>>++++[>++++++++<-]<+<-[>+>+>-[>>>]<[[>+<-]\n >>+>]<<<<<-]]>>>[-]+>--[-[<->+++[-]]]<[++++++++++\n ++<[>-[>+>>]>[+[<+>-]>+>>]<<<<<-]>>[<+>-]>[-[-<<[\n -]>>]<<[<<->>-]>>]<<[<<+>>-]]<[-]<.[-]<-,+]\"\"\"\n , 'applesauce')\n self.assertEquals(result, 'nccyrfnhpr')\n\n def test_Clean(self):\n self.assertRaises(Exception, brainfuck.clean, '[[]')\n self.assertRaises(Exception, brainfuck.clean, '][')\n\n\n<mask token>\n",
"step-4": "import unittest\nimport brainfuck\nimport sys\nfrom StringIO import StringIO\n\n\ndef run_program(program, input=None):\n old_stdout = sys.stdout\n old_stdin = sys.stdin\n try:\n out = StringIO()\n sys.stdout = out\n if input is not None:\n input = StringIO(input)\n sys.stdin = input\n brainfuck.brainfuck(program)\n finally:\n sys.stdout = old_stdout\n sys.stdin = old_stdin\n return out.getvalue().strip()\n\n\nclass TestInterpreter(unittest.TestCase):\n\n def setUp(self):\n brainfuck.set_cell_size()\n\n def test_HelloWorld(self):\n result = run_program(\n \"\"\"\n ++++++++++[>+++++++>++++++++++>+++>+<<<<-]>++.>+.\n +++++++..+++.>++.<<+++++++++++++++.>.+++.------.-\n -------.>+.>.\"\"\"\n )\n self.assertEquals(result, 'Hello World!')\n\n def test_Squares(self):\n result = run_program(\n \"\"\"\n ++++[>+++++<-]>[<+++++>-]+<+[>[>+>+<<-]++>>[<<+>>\n -]>>>[-]++>[-]+>>>+[[-]++++++>>>]<<<[[<++++++++<+\n +>>-]+<.<[>----<-]<]<<[>>>>>[>>>[-]+++++++++<[>-<\n -]+++++++++>[-[<->-]+[<<<]]<[>+<-]>]<<-]<<-]\"\"\"\n )\n expected_result = '\\n'.join([str(x ** 2) for x in range(101)])\n self.assertEquals(result, expected_result)\n\n def test_ROT13(self):\n result = run_program(\n \"\"\"\n -,+[-[>>++++[>++++++++<-]<+<-[>+>+>-[>>>]<[[>+<-]\n >>+>]<<<<<-]]>>>[-]+>--[-[<->+++[-]]]<[++++++++++\n ++<[>-[>+>>]>[+[<+>-]>+>>]<<<<<-]>>[<+>-]>[-[-<<[\n -]>>]<<[<<->>-]>>]<<[<<+>>-]]<[-]<.[-]<-,+]\"\"\"\n , 'applesauce')\n self.assertEquals(result, 'nccyrfnhpr')\n\n def test_Clean(self):\n self.assertRaises(Exception, brainfuck.clean, '[[]')\n self.assertRaises(Exception, brainfuck.clean, '][')\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-5": "import unittest\nimport brainfuck\nimport sys\nfrom StringIO import StringIO\n\n\ndef run_program(program, input = None):\n old_stdout = sys.stdout\n old_stdin = sys.stdin\n try:\n out = StringIO()\n sys.stdout = out\n if input is not None:\n input = StringIO(input) \n sys.stdin = input\n brainfuck.brainfuck(program)\n finally:\n sys.stdout = old_stdout\n sys.stdin = old_stdin\n\n return out.getvalue().strip()\n\nclass TestInterpreter(unittest.TestCase):\n def setUp(self):\n\n brainfuck.set_cell_size()\n\n def test_HelloWorld(self):\n result = run_program(\"\"\"\n ++++++++++[>+++++++>++++++++++>+++>+<<<<-]>++.>+.\n +++++++..+++.>++.<<+++++++++++++++.>.+++.------.-\n -------.>+.>.\"\"\")\n self.assertEquals(result, \"Hello World!\")\n def test_Squares(self):\n result = run_program(\"\"\"\n ++++[>+++++<-]>[<+++++>-]+<+[>[>+>+<<-]++>>[<<+>>\n -]>>>[-]++>[-]+>>>+[[-]++++++>>>]<<<[[<++++++++<+\n +>>-]+<.<[>----<-]<]<<[>>>>>[>>>[-]+++++++++<[>-<\n -]+++++++++>[-[<->-]+[<<<]]<[>+<-]>]<<-]<<-]\"\"\")\n expected_result = \"\\n\".join([str(x**2) for x in range(101)])\n self.assertEquals(result, expected_result)\n\n def test_ROT13(self):\n result = run_program(\"\"\"\n -,+[-[>>++++[>++++++++<-]<+<-[>+>+>-[>>>]<[[>+<-]\n >>+>]<<<<<-]]>>>[-]+>--[-[<->+++[-]]]<[++++++++++\n ++<[>-[>+>>]>[+[<+>-]>+>>]<<<<<-]>>[<+>-]>[-[-<<[\n -]>>]<<[<<->>-]>>]<<[<<+>>-]]<[-]<.[-]<-,+]\"\"\", \"applesauce\")\n self.assertEquals(result, \"nccyrfnhpr\")\n \n def test_Clean(self):\n self.assertRaises(Exception, brainfuck.clean, \"[[]\")\n self.assertRaises(Exception, brainfuck.clean, \"][\")\nif __name__ == '__main__':\n unittest.main()\n",
"step-ids": [
4,
6,
7,
9,
10
]
}
|
[
4,
6,
7,
9,
10
] |
import matplotlib.pyplot as plt
import numpy as np
from tti_explorer.contacts import he_infection_profile
plt.style.use('default')
loc = 0
# taken from He et al
gamma_params = {
'a': 2.11,
'loc': loc,
'scale': 1/0.69
}
t = 10
days = np.arange(t)
mass = he_infection_profile(t, gamma_params)
fig, ax = plt.subplots(1, figsize=(9*0.8, 5*0.8))
xaxis = np.linspace(-2, t, 1000)
ax.bar(
np.arange(5)+0.1,
[1/5, 1/5, 1/5, 1/5, 1/5],
label="Kucharski profile",
align="edge",
color="C1",
zorder=1,
alpha=0.6
)
ax.bar(days, mass, label="Discretised", align="edge", zorder=1)
ax.legend(loc="upper right")
ax.set_axis_on()
ax.set_ylabel('Secondary attack profile')
ax.set_xlabel('Days since start of infectious period')
ax.set_xticks(days)
plt.show()
# fig.savefig('./charts/inf_profile.pdf')
|
normal
|
{
"blob_id": "fa5cbbd03641d2937e4502ce459d64d20b5ee227",
"index": 8630,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nplt.style.use('default')\n<mask token>\nax.bar(np.arange(5) + 0.1, [1 / 5, 1 / 5, 1 / 5, 1 / 5, 1 / 5], label=\n 'Kucharski profile', align='edge', color='C1', zorder=1, alpha=0.6)\nax.bar(days, mass, label='Discretised', align='edge', zorder=1)\nax.legend(loc='upper right')\nax.set_axis_on()\nax.set_ylabel('Secondary attack profile')\nax.set_xlabel('Days since start of infectious period')\nax.set_xticks(days)\nplt.show()\n",
"step-3": "<mask token>\nplt.style.use('default')\nloc = 0\ngamma_params = {'a': 2.11, 'loc': loc, 'scale': 1 / 0.69}\nt = 10\ndays = np.arange(t)\nmass = he_infection_profile(t, gamma_params)\nfig, ax = plt.subplots(1, figsize=(9 * 0.8, 5 * 0.8))\nxaxis = np.linspace(-2, t, 1000)\nax.bar(np.arange(5) + 0.1, [1 / 5, 1 / 5, 1 / 5, 1 / 5, 1 / 5], label=\n 'Kucharski profile', align='edge', color='C1', zorder=1, alpha=0.6)\nax.bar(days, mass, label='Discretised', align='edge', zorder=1)\nax.legend(loc='upper right')\nax.set_axis_on()\nax.set_ylabel('Secondary attack profile')\nax.set_xlabel('Days since start of infectious period')\nax.set_xticks(days)\nplt.show()\n",
"step-4": "import matplotlib.pyplot as plt\nimport numpy as np\nfrom tti_explorer.contacts import he_infection_profile\nplt.style.use('default')\nloc = 0\ngamma_params = {'a': 2.11, 'loc': loc, 'scale': 1 / 0.69}\nt = 10\ndays = np.arange(t)\nmass = he_infection_profile(t, gamma_params)\nfig, ax = plt.subplots(1, figsize=(9 * 0.8, 5 * 0.8))\nxaxis = np.linspace(-2, t, 1000)\nax.bar(np.arange(5) + 0.1, [1 / 5, 1 / 5, 1 / 5, 1 / 5, 1 / 5], label=\n 'Kucharski profile', align='edge', color='C1', zorder=1, alpha=0.6)\nax.bar(days, mass, label='Discretised', align='edge', zorder=1)\nax.legend(loc='upper right')\nax.set_axis_on()\nax.set_ylabel('Secondary attack profile')\nax.set_xlabel('Days since start of infectious period')\nax.set_xticks(days)\nplt.show()\n",
"step-5": "\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom tti_explorer.contacts import he_infection_profile\n\nplt.style.use('default')\nloc = 0\n# taken from He et al\ngamma_params = {\n 'a': 2.11,\n 'loc': loc,\n 'scale': 1/0.69\n}\nt = 10\ndays = np.arange(t)\n\nmass = he_infection_profile(t, gamma_params)\n\nfig, ax = plt.subplots(1, figsize=(9*0.8, 5*0.8))\nxaxis = np.linspace(-2, t, 1000)\nax.bar(\n np.arange(5)+0.1,\n [1/5, 1/5, 1/5, 1/5, 1/5],\n label=\"Kucharski profile\",\n align=\"edge\",\n color=\"C1\",\n zorder=1,\n alpha=0.6\n)\nax.bar(days, mass, label=\"Discretised\", align=\"edge\", zorder=1)\nax.legend(loc=\"upper right\")\nax.set_axis_on()\nax.set_ylabel('Secondary attack profile')\nax.set_xlabel('Days since start of infectious period')\nax.set_xticks(days)\nplt.show()\n# fig.savefig('./charts/inf_profile.pdf')\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
with expression [as var]
#...BODY...
#object is the result of the expression and must have __enter__ and __exit__ methods
#result of the expression must be context manager - implements context management protocol
#https://www.python.org/dev/peps/pep-0343/
# This PEP adds a new statement "with" to the Python language to make
# it possible to factor out standard uses of try/finally statements.
# In this PEP, context managers provide __enter__() and __exit__()
# methods that are invoked on entry to and exit from the body of the
# with statement.
|
normal
|
{
"blob_id": "e1787fd4be66d19ab83ece44eacfd96cb488b504",
"index": 722,
"step-1": "with expression [as var]\n\t#...BODY...\n\n#object is the result of the expression and must have __enter__ and __exit__ methods\n#result of the expression must be context manager - implements context management protocol\n\n#https://www.python.org/dev/peps/pep-0343/\n# This PEP adds a new statement \"with\" to the Python language to make\n# it possible to factor out standard uses of try/finally statements.\n\n# In this PEP, context managers provide __enter__() and __exit__()\n# methods that are invoked on entry to and exit from the body of the\n# with statement.",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [migrations.CreateModel(name='User', fields=[('user_id',
models.IntegerField(primary_key=True, serialize=False)), (
'username', models.CharField(max_length=45)), ('userlogin', models.
CharField(max_length=45)), ('avartar_url', models.CharField(blank=
True, max_length=150, null=True))], options={'db_table': 'user'}),
migrations.CreateModel(name='Repos', fields=[('repo_id', models.
IntegerField(primary_key=True, serialize=False)), ('reponame',
models.CharField(max_length=150)), ('owner', models.CharField(
max_length=45)), ('user', models.ForeignKey(on_delete=django.db.
models.deletion.DO_NOTHING, to='attendance.User'))], options={
'db_table': 'repos'})]
<|reserved_special_token_1|>
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [migrations.CreateModel(name='User', fields=[('user_id',
models.IntegerField(primary_key=True, serialize=False)), (
'username', models.CharField(max_length=45)), ('userlogin', models.
CharField(max_length=45)), ('avartar_url', models.CharField(blank=
True, max_length=150, null=True))], options={'db_table': 'user'}),
migrations.CreateModel(name='Repos', fields=[('repo_id', models.
IntegerField(primary_key=True, serialize=False)), ('reponame',
models.CharField(max_length=150)), ('owner', models.CharField(
max_length=45)), ('user', models.ForeignKey(on_delete=django.db.
models.deletion.DO_NOTHING, to='attendance.User'))], options={
'db_table': 'repos'})]
<|reserved_special_token_1|>
# Generated by Django 2.2.6 on 2020-04-06 16:47
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('user_id', models.IntegerField(primary_key=True, serialize=False)),
('username', models.CharField(max_length=45)),
('userlogin', models.CharField(max_length=45)),
('avartar_url', models.CharField(blank=True, max_length=150, null=True)),
],
options={
'db_table': 'user',
},
),
migrations.CreateModel(
name='Repos',
fields=[
('repo_id', models.IntegerField(primary_key=True, serialize=False)),
('reponame', models.CharField(max_length=150)),
('owner', models.CharField(max_length=45)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='attendance.User')),
],
options={
'db_table': 'repos',
},
),
]
|
flexible
|
{
"blob_id": "1b71789ba7c2191b433a405723fe6c985c926610",
"index": 8620,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = []\n operations = [migrations.CreateModel(name='User', fields=[('user_id',\n models.IntegerField(primary_key=True, serialize=False)), (\n 'username', models.CharField(max_length=45)), ('userlogin', models.\n CharField(max_length=45)), ('avartar_url', models.CharField(blank=\n True, max_length=150, null=True))], options={'db_table': 'user'}),\n migrations.CreateModel(name='Repos', fields=[('repo_id', models.\n IntegerField(primary_key=True, serialize=False)), ('reponame',\n models.CharField(max_length=150)), ('owner', models.CharField(\n max_length=45)), ('user', models.ForeignKey(on_delete=django.db.\n models.deletion.DO_NOTHING, to='attendance.User'))], options={\n 'db_table': 'repos'})]\n",
"step-4": "from django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = []\n operations = [migrations.CreateModel(name='User', fields=[('user_id',\n models.IntegerField(primary_key=True, serialize=False)), (\n 'username', models.CharField(max_length=45)), ('userlogin', models.\n CharField(max_length=45)), ('avartar_url', models.CharField(blank=\n True, max_length=150, null=True))], options={'db_table': 'user'}),\n migrations.CreateModel(name='Repos', fields=[('repo_id', models.\n IntegerField(primary_key=True, serialize=False)), ('reponame',\n models.CharField(max_length=150)), ('owner', models.CharField(\n max_length=45)), ('user', models.ForeignKey(on_delete=django.db.\n models.deletion.DO_NOTHING, to='attendance.User'))], options={\n 'db_table': 'repos'})]\n",
"step-5": "# Generated by Django 2.2.6 on 2020-04-06 16:47\r\n\r\nfrom django.db import migrations, models\r\nimport django.db.models.deletion\r\n\r\n\r\nclass Migration(migrations.Migration):\r\n\r\n initial = True\r\n\r\n dependencies = [\r\n ]\r\n\r\n operations = [\r\n migrations.CreateModel(\r\n name='User',\r\n fields=[\r\n ('user_id', models.IntegerField(primary_key=True, serialize=False)),\r\n ('username', models.CharField(max_length=45)),\r\n ('userlogin', models.CharField(max_length=45)),\r\n ('avartar_url', models.CharField(blank=True, max_length=150, null=True)),\r\n ],\r\n options={\r\n 'db_table': 'user',\r\n },\r\n ),\r\n migrations.CreateModel(\r\n name='Repos',\r\n fields=[\r\n ('repo_id', models.IntegerField(primary_key=True, serialize=False)),\r\n ('reponame', models.CharField(max_length=150)),\r\n ('owner', models.CharField(max_length=45)),\r\n ('user', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='attendance.User')),\r\n ],\r\n options={\r\n 'db_table': 'repos',\r\n },\r\n ),\r\n ]\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import glob
from PIL import Image
from PIL.ExifTags import TAGS, GPSTAGS
from pyproj import Proj
from osgeo import gdal, osr
from PyQt4.QtCore import QFile, QFileInfo
import os
from os import walk
#slika="c:\slike\Zito\DJI_0060.jpg"
#georef_slika="c:\Slike\Zito\Georeferencirana.tif"
radni_dir = 'c:/slike/Zito/testiranje/'
#-----------------Izvlaci LAT LONG----------------------------------------------------------------------------
def exif(img):
exif_data = {}
try:
i = Image.open(img)
tags = i._getexif()
for tag, value in tags.items():
decoded = TAGS.get(tag, tag)
exif_data[decoded] = value
except:
pass
return exif_data
def dms2dd(d, m, s, i):
sec = float((m * 60) + s)
dec = float(sec / 3600)
deg = float(d + dec)
if i.upper() == "W":
deg = deg * -1
elif i.upper() == "S":
deg = deg * -1
return float(deg)
def gps(exif):
lat = None
lon = None
if exif["GPSInfo"]:
# Lat
coords = exif["GPSInfo"]
i = coords[1]
d = coords[2][0][0]
m = coords[2][1][0]
s = coords[2][2][0]
lat = dms2dd(d, m ,s, i)
lat = float(str(d)+str(m)+str(s))/100000000
# Lon
i = coords[3]
d = coords[4][0][0]
m = coords[4][1][0]
s = coords[4][2][0]
lon = float(str(d)+str(m)+str(s))/100000000
return lat, lon
#------------------Pretvara LAT LONG u UTM----------------------------------------------------------------------------
def pretvarac(fotka):
Lat = gps(exif(fotka))[0]
Lon = gps(exif(fotka))[1]
print "Lon/Lat Koordinate slike: ", Lon, " ",Lat
ZoneNo = "34T" # rucno uneseno, a moze se izracunati unaprijed preko alt long
myProj = Proj("+proj=utm +zone="+ZoneNo+",+north +ellps=WGS84 +datum=WGS84 +units=m +no_defs") # north za sjevernu hemisferu
UTMx, UTMy = myProj(Lon, Lat)
round(UTMx, 2)
round(UTMy, 2)
print "UTM Koordinate slike: ", UTMx, " ",UTMy
global UTMx
global UTMy
return UTMx, UTMy
#--------------------Georeferenciranje----------------------------------------------------------------------------
def georeferenciranje(src_filename,dst_filename):
src_ds = gdal.Open(src_filename)
format = "GTiff"
driver = gdal.GetDriverByName(format)
dst_ds = driver.CreateCopy(dst_filename, src_ds, 0)
# Specify raster location through geotransform array
# (uperleftx, scalex, skewx, uperlefty, skewy, scaley)
# Scale = size of one pixel in units of raster projection
# this example below assumes 100x100
gt = [UTMx, 100, 0, UTMy, 0, -100]
dst_ds.SetGeoTransform(gt)
epsg = 3857
srs = osr.SpatialReference()
srs.ImportFromEPSG(epsg)
dest_wkt = srs.ExportToWkt()
dst_ds.SetProjection(dest_wkt)
dst_ds = None
src_ds = None
#-----------------Ubacivanje u QGIS----------------------------------------------------------------------------
def ubacivanje(fileName):
print "ubacujem raster"
#fileName = dst_filename
fileInfo = QFileInfo(fileName)
baseName = fileInfo.baseName()
rlayer = QgsRasterLayer(fileName, baseName)
iface.addRasterLayer(fileName, "Raster Layer Zito")
print "raster ubacen"
#----------------Folder loop------------------------------------------------------------------------------------
li = []
l = 0
os.chdir(radni_dir)
#Uzima listu sa imenima slika ( li )
for file in glob.glob("*.jpg"):
li.append(os.path.splitext(file)[0])
l+= 1
pretvarac(file)
gr = os.path.dirname(file)+str(l)+ '_georeferencirana'+'.tif'
georeferenciranje(file,gr)
ubacivanje(gr)
#pretvarac(slika)
#georeferenciranje(slika,georef_slika)
#ubacivanje(georef_slika)
|
normal
|
{
"blob_id": "e92d770f9d2176b4943653b09ac1069fa3301e46",
"index": 1931,
"step-1": "import glob\r\nfrom PIL import Image\r\nfrom PIL.ExifTags import TAGS, GPSTAGS\r\nfrom pyproj import Proj\r\nfrom osgeo import gdal, osr\r\nfrom PyQt4.QtCore import QFile, QFileInfo\r\nimport os\r\nfrom os import walk\r\n#slika=\"c:\\slike\\Zito\\DJI_0060.jpg\"\r\n#georef_slika=\"c:\\Slike\\Zito\\Georeferencirana.tif\"\r\nradni_dir = 'c:/slike/Zito/testiranje/'\r\n#-----------------Izvlaci LAT LONG----------------------------------------------------------------------------\r\ndef exif(img):\r\n exif_data = {}\r\n try: \r\n i = Image.open(img)\r\n tags = i._getexif()\r\n for tag, value in tags.items():\r\n decoded = TAGS.get(tag, tag)\r\n exif_data[decoded] = value\r\n except:\r\n pass\r\n return exif_data\r\n \r\ndef dms2dd(d, m, s, i):\r\n sec = float((m * 60) + s)\r\n dec = float(sec / 3600)\r\n deg = float(d + dec)\r\n if i.upper() == \"W\":\r\n deg = deg * -1\r\n elif i.upper() == \"S\":\r\n deg = deg * -1\r\n return float(deg)\r\n \r\ndef gps(exif):\r\n lat = None\r\n lon = None\r\n if exif[\"GPSInfo\"]: \r\n # Lat\r\n coords = exif[\"GPSInfo\"]\r\n i = coords[1]\r\n d = coords[2][0][0]\r\n m = coords[2][1][0]\r\n s = coords[2][2][0]\r\n lat = dms2dd(d, m ,s, i)\r\n lat = float(str(d)+str(m)+str(s))/100000000\r\n # Lon\r\n i = coords[3]\r\n d = coords[4][0][0]\r\n m = coords[4][1][0]\r\n s = coords[4][2][0]\r\n lon = float(str(d)+str(m)+str(s))/100000000\r\n return lat, lon\r\n\r\n#------------------Pretvara LAT LONG u UTM----------------------------------------------------------------------------\r\ndef pretvarac(fotka):\r\n Lat = gps(exif(fotka))[0]\r\n Lon = gps(exif(fotka))[1]\r\n print \"Lon/Lat Koordinate slike: \", Lon, \" \",Lat\r\n ZoneNo = \"34T\" # rucno uneseno, a moze se izracunati unaprijed preko alt long\r\n myProj = Proj(\"+proj=utm +zone=\"+ZoneNo+\",+north +ellps=WGS84 +datum=WGS84 +units=m +no_defs\") # north za sjevernu hemisferu\r\n UTMx, UTMy = myProj(Lon, Lat)\r\n round(UTMx, 2)\r\n round(UTMy, 2)\r\n print \"UTM Koordinate slike: \", UTMx, \" \",UTMy\r\n global UTMx\r\n global UTMy\r\n return UTMx, UTMy\r\n\r\n#--------------------Georeferenciranje----------------------------------------------------------------------------\r\ndef georeferenciranje(src_filename,dst_filename):\r\n src_ds = gdal.Open(src_filename)\r\n format = \"GTiff\"\r\n driver = gdal.GetDriverByName(format)\r\n dst_ds = driver.CreateCopy(dst_filename, src_ds, 0)\r\n\r\n # Specify raster location through geotransform array\r\n # (uperleftx, scalex, skewx, uperlefty, skewy, scaley)\r\n # Scale = size of one pixel in units of raster projection\r\n # this example below assumes 100x100\r\n gt = [UTMx, 100, 0, UTMy, 0, -100]\r\n\r\n dst_ds.SetGeoTransform(gt)\r\n epsg = 3857\r\n srs = osr.SpatialReference()\r\n srs.ImportFromEPSG(epsg)\r\n dest_wkt = srs.ExportToWkt()\r\n dst_ds.SetProjection(dest_wkt)\r\n dst_ds = None\r\n src_ds = None\r\n#-----------------Ubacivanje u QGIS----------------------------------------------------------------------------\r\ndef ubacivanje(fileName):\r\n print \"ubacujem raster\"\r\n #fileName = dst_filename\r\n fileInfo = QFileInfo(fileName)\r\n baseName = fileInfo.baseName()\r\n rlayer = QgsRasterLayer(fileName, baseName)\r\n iface.addRasterLayer(fileName, \"Raster Layer Zito\")\r\n print \"raster ubacen\"\r\n\r\n#----------------Folder loop------------------------------------------------------------------------------------\r\nli = []\r\nl = 0\r\nos.chdir(radni_dir)\r\n#Uzima listu sa imenima slika ( li )\r\nfor file in glob.glob(\"*.jpg\"):\r\n li.append(os.path.splitext(file)[0])\r\n l+= 1\r\n pretvarac(file)\r\n gr = os.path.dirname(file)+str(l)+ '_georeferencirana'+'.tif'\r\n georeferenciranje(file,gr)\r\n ubacivanje(gr)\r\n \r\n\r\n\r\n\r\n#pretvarac(slika)\r\n#georeferenciranje(slika,georef_slika)\r\n#ubacivanje(georef_slika)\r\n\r\n\r\n\r\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1].lower(
) in ALLOWED_EXTENSIONS
@app.route('/', methods=['GET', 'POST'])
def index():
if request.method == 'POST':
if 'file' not in request.files:
print('No file attached in request')
return redirect(request.url)
file = request.files['file']
if file.filename == '':
print('No file selected')
return redirect(request.url)
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
process_file(os.path.join(app.config['UPLOAD_FOLDER'], filename
), filename)
return redirect(url_for('uploaded_file', filename=filename))
return render_template('index.html')
def process_file(path, filename):
check_encoding(path, filename)
def check_encoding(path, filename):
with open(path, 'rb') as rawdata:
result = chardet.detect(rawdata.read(10000))
df = pd.read_csv(path, encoding=result['encoding'])
GFG = pd.ExcelWriter(app.config['DOWNLOAD_FOLDER'] + filename.rsplit(
'.', 1)[0] + '.xlsx')
df.to_excel(GFG, index=False, encoding='utf-8')
GFG.save()
@app.route('/uploads/<filename>')
def uploaded_file(filename):
return send_from_directory(app.config['DOWNLOAD_FOLDER'], filename.
rsplit('.', 1)[0] + '.xlsx', as_attachment=True)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1].lower(
) in ALLOWED_EXTENSIONS
@app.route('/', methods=['GET', 'POST'])
def index():
if request.method == 'POST':
if 'file' not in request.files:
print('No file attached in request')
return redirect(request.url)
file = request.files['file']
if file.filename == '':
print('No file selected')
return redirect(request.url)
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
process_file(os.path.join(app.config['UPLOAD_FOLDER'], filename
), filename)
return redirect(url_for('uploaded_file', filename=filename))
return render_template('index.html')
def process_file(path, filename):
check_encoding(path, filename)
def check_encoding(path, filename):
with open(path, 'rb') as rawdata:
result = chardet.detect(rawdata.read(10000))
df = pd.read_csv(path, encoding=result['encoding'])
GFG = pd.ExcelWriter(app.config['DOWNLOAD_FOLDER'] + filename.rsplit(
'.', 1)[0] + '.xlsx')
df.to_excel(GFG, index=False, encoding='utf-8')
GFG.save()
@app.route('/uploads/<filename>')
def uploaded_file(filename):
return send_from_directory(app.config['DOWNLOAD_FOLDER'], filename.
rsplit('.', 1)[0] + '.xlsx', as_attachment=True)
if __name__ == '__main__':
port = int(os.environ.get('PORT', 5000))
app.run(host='0.0.0.0', port=port)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
UPLOAD_FOLDER = os.path.dirname(os.path.abspath(__file__)) + '/uploads/'
DOWNLOAD_FOLDER = os.path.dirname(os.path.abspath(__file__)) + '/downloads/'
ALLOWED_EXTENSIONS = {'csv', 'txt'}
app = Flask(__name__, static_url_path='/static')
DIR_PATH = os.path.dirname(os.path.realpath(__file__))
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
app.config['DOWNLOAD_FOLDER'] = DOWNLOAD_FOLDER
app.config['MAX_CONTENT_LENGTH'] = 8 * 1024 * 1024
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1].lower(
) in ALLOWED_EXTENSIONS
@app.route('/', methods=['GET', 'POST'])
def index():
if request.method == 'POST':
if 'file' not in request.files:
print('No file attached in request')
return redirect(request.url)
file = request.files['file']
if file.filename == '':
print('No file selected')
return redirect(request.url)
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
process_file(os.path.join(app.config['UPLOAD_FOLDER'], filename
), filename)
return redirect(url_for('uploaded_file', filename=filename))
return render_template('index.html')
def process_file(path, filename):
check_encoding(path, filename)
def check_encoding(path, filename):
with open(path, 'rb') as rawdata:
result = chardet.detect(rawdata.read(10000))
df = pd.read_csv(path, encoding=result['encoding'])
GFG = pd.ExcelWriter(app.config['DOWNLOAD_FOLDER'] + filename.rsplit(
'.', 1)[0] + '.xlsx')
df.to_excel(GFG, index=False, encoding='utf-8')
GFG.save()
@app.route('/uploads/<filename>')
def uploaded_file(filename):
return send_from_directory(app.config['DOWNLOAD_FOLDER'], filename.
rsplit('.', 1)[0] + '.xlsx', as_attachment=True)
if __name__ == '__main__':
port = int(os.environ.get('PORT', 5000))
app.run(host='0.0.0.0', port=port)
<|reserved_special_token_1|>
import os
from flask import Flask, request, redirect, url_for, render_template, send_from_directory
from werkzeug.utils import secure_filename
import chardet as chardet
import pandas as pd
UPLOAD_FOLDER = os.path.dirname(os.path.abspath(__file__)) + '/uploads/'
DOWNLOAD_FOLDER = os.path.dirname(os.path.abspath(__file__)) + '/downloads/'
ALLOWED_EXTENSIONS = {'csv', 'txt'}
app = Flask(__name__, static_url_path='/static')
DIR_PATH = os.path.dirname(os.path.realpath(__file__))
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
app.config['DOWNLOAD_FOLDER'] = DOWNLOAD_FOLDER
app.config['MAX_CONTENT_LENGTH'] = 8 * 1024 * 1024
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1].lower(
) in ALLOWED_EXTENSIONS
@app.route('/', methods=['GET', 'POST'])
def index():
if request.method == 'POST':
if 'file' not in request.files:
print('No file attached in request')
return redirect(request.url)
file = request.files['file']
if file.filename == '':
print('No file selected')
return redirect(request.url)
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
process_file(os.path.join(app.config['UPLOAD_FOLDER'], filename
), filename)
return redirect(url_for('uploaded_file', filename=filename))
return render_template('index.html')
def process_file(path, filename):
check_encoding(path, filename)
def check_encoding(path, filename):
with open(path, 'rb') as rawdata:
result = chardet.detect(rawdata.read(10000))
df = pd.read_csv(path, encoding=result['encoding'])
GFG = pd.ExcelWriter(app.config['DOWNLOAD_FOLDER'] + filename.rsplit(
'.', 1)[0] + '.xlsx')
df.to_excel(GFG, index=False, encoding='utf-8')
GFG.save()
@app.route('/uploads/<filename>')
def uploaded_file(filename):
return send_from_directory(app.config['DOWNLOAD_FOLDER'], filename.
rsplit('.', 1)[0] + '.xlsx', as_attachment=True)
if __name__ == '__main__':
port = int(os.environ.get('PORT', 5000))
app.run(host='0.0.0.0', port=port)
<|reserved_special_token_1|>
import os
from flask import Flask, request, redirect, url_for, render_template, send_from_directory
from werkzeug.utils import secure_filename
import chardet as chardet
import pandas as pd
UPLOAD_FOLDER = os.path.dirname(os.path.abspath(__file__)) + '/uploads/'
DOWNLOAD_FOLDER = os.path.dirname(os.path.abspath(__file__)) + '/downloads/'
ALLOWED_EXTENSIONS = {'csv', 'txt'}
app = Flask(__name__, static_url_path="/static")
DIR_PATH = os.path.dirname(os.path.realpath(__file__))
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
app.config['DOWNLOAD_FOLDER'] = DOWNLOAD_FOLDER
# limit upload size upto 8mb
app.config['MAX_CONTENT_LENGTH'] = 8 * 1024 * 1024
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@app.route('/', methods=['GET', 'POST'])
def index():
if request.method == 'POST':
if 'file' not in request.files:
print('No file attached in request')
return redirect(request.url)
file = request.files['file']
if file.filename == '':
print('No file selected')
return redirect(request.url)
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
process_file(os.path.join(app.config['UPLOAD_FOLDER'], filename), filename)
return redirect(url_for('uploaded_file', filename=filename))
return render_template('index.html')
def process_file(path, filename):
check_encoding(path, filename)
# with open(path, 'a') as f:
# f.write("\nAdded processed content")
def check_encoding(path, filename):
with open(path, 'rb') as rawdata:
result = chardet.detect(rawdata.read(10000))
df = pd.read_csv(path, encoding=result['encoding'])
GFG = pd.ExcelWriter(app.config['DOWNLOAD_FOLDER'] + filename.rsplit('.', 1)[0] + '.xlsx')
df.to_excel(GFG, index=False, encoding='utf-8')
#output_stream = open(app.config['DOWNLOAD_FOLDER'] + 'output.xlsx', 'wb')
#GFG.write(output_stream)
GFG.save()
@app.route('/uploads/<filename>')
def uploaded_file(filename):
return send_from_directory(app.config['DOWNLOAD_FOLDER'], filename.rsplit('.', 1)[0] + '.xlsx', as_attachment=True)
if __name__ == '__main__':
port = int(os.environ.get("PORT", 5000))
app.run(host='0.0.0.0', port=port)
|
flexible
|
{
"blob_id": "eb17de8828a600832253c4cfeeb91503b6876dd7",
"index": 9963,
"step-1": "<mask token>\n\n\ndef allowed_file(filename):\n return '.' in filename and filename.rsplit('.', 1)[1].lower(\n ) in ALLOWED_EXTENSIONS\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef index():\n if request.method == 'POST':\n if 'file' not in request.files:\n print('No file attached in request')\n return redirect(request.url)\n file = request.files['file']\n if file.filename == '':\n print('No file selected')\n return redirect(request.url)\n if file and allowed_file(file.filename):\n filename = secure_filename(file.filename)\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n process_file(os.path.join(app.config['UPLOAD_FOLDER'], filename\n ), filename)\n return redirect(url_for('uploaded_file', filename=filename))\n return render_template('index.html')\n\n\ndef process_file(path, filename):\n check_encoding(path, filename)\n\n\ndef check_encoding(path, filename):\n with open(path, 'rb') as rawdata:\n result = chardet.detect(rawdata.read(10000))\n df = pd.read_csv(path, encoding=result['encoding'])\n GFG = pd.ExcelWriter(app.config['DOWNLOAD_FOLDER'] + filename.rsplit(\n '.', 1)[0] + '.xlsx')\n df.to_excel(GFG, index=False, encoding='utf-8')\n GFG.save()\n\n\n@app.route('/uploads/<filename>')\ndef uploaded_file(filename):\n return send_from_directory(app.config['DOWNLOAD_FOLDER'], filename.\n rsplit('.', 1)[0] + '.xlsx', as_attachment=True)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef allowed_file(filename):\n return '.' in filename and filename.rsplit('.', 1)[1].lower(\n ) in ALLOWED_EXTENSIONS\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef index():\n if request.method == 'POST':\n if 'file' not in request.files:\n print('No file attached in request')\n return redirect(request.url)\n file = request.files['file']\n if file.filename == '':\n print('No file selected')\n return redirect(request.url)\n if file and allowed_file(file.filename):\n filename = secure_filename(file.filename)\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n process_file(os.path.join(app.config['UPLOAD_FOLDER'], filename\n ), filename)\n return redirect(url_for('uploaded_file', filename=filename))\n return render_template('index.html')\n\n\ndef process_file(path, filename):\n check_encoding(path, filename)\n\n\ndef check_encoding(path, filename):\n with open(path, 'rb') as rawdata:\n result = chardet.detect(rawdata.read(10000))\n df = pd.read_csv(path, encoding=result['encoding'])\n GFG = pd.ExcelWriter(app.config['DOWNLOAD_FOLDER'] + filename.rsplit(\n '.', 1)[0] + '.xlsx')\n df.to_excel(GFG, index=False, encoding='utf-8')\n GFG.save()\n\n\n@app.route('/uploads/<filename>')\ndef uploaded_file(filename):\n return send_from_directory(app.config['DOWNLOAD_FOLDER'], filename.\n rsplit('.', 1)[0] + '.xlsx', as_attachment=True)\n\n\nif __name__ == '__main__':\n port = int(os.environ.get('PORT', 5000))\n app.run(host='0.0.0.0', port=port)\n",
"step-3": "<mask token>\nUPLOAD_FOLDER = os.path.dirname(os.path.abspath(__file__)) + '/uploads/'\nDOWNLOAD_FOLDER = os.path.dirname(os.path.abspath(__file__)) + '/downloads/'\nALLOWED_EXTENSIONS = {'csv', 'txt'}\napp = Flask(__name__, static_url_path='/static')\nDIR_PATH = os.path.dirname(os.path.realpath(__file__))\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\napp.config['DOWNLOAD_FOLDER'] = DOWNLOAD_FOLDER\napp.config['MAX_CONTENT_LENGTH'] = 8 * 1024 * 1024\n\n\ndef allowed_file(filename):\n return '.' in filename and filename.rsplit('.', 1)[1].lower(\n ) in ALLOWED_EXTENSIONS\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef index():\n if request.method == 'POST':\n if 'file' not in request.files:\n print('No file attached in request')\n return redirect(request.url)\n file = request.files['file']\n if file.filename == '':\n print('No file selected')\n return redirect(request.url)\n if file and allowed_file(file.filename):\n filename = secure_filename(file.filename)\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n process_file(os.path.join(app.config['UPLOAD_FOLDER'], filename\n ), filename)\n return redirect(url_for('uploaded_file', filename=filename))\n return render_template('index.html')\n\n\ndef process_file(path, filename):\n check_encoding(path, filename)\n\n\ndef check_encoding(path, filename):\n with open(path, 'rb') as rawdata:\n result = chardet.detect(rawdata.read(10000))\n df = pd.read_csv(path, encoding=result['encoding'])\n GFG = pd.ExcelWriter(app.config['DOWNLOAD_FOLDER'] + filename.rsplit(\n '.', 1)[0] + '.xlsx')\n df.to_excel(GFG, index=False, encoding='utf-8')\n GFG.save()\n\n\n@app.route('/uploads/<filename>')\ndef uploaded_file(filename):\n return send_from_directory(app.config['DOWNLOAD_FOLDER'], filename.\n rsplit('.', 1)[0] + '.xlsx', as_attachment=True)\n\n\nif __name__ == '__main__':\n port = int(os.environ.get('PORT', 5000))\n app.run(host='0.0.0.0', port=port)\n",
"step-4": "import os\nfrom flask import Flask, request, redirect, url_for, render_template, send_from_directory\nfrom werkzeug.utils import secure_filename\nimport chardet as chardet\nimport pandas as pd\nUPLOAD_FOLDER = os.path.dirname(os.path.abspath(__file__)) + '/uploads/'\nDOWNLOAD_FOLDER = os.path.dirname(os.path.abspath(__file__)) + '/downloads/'\nALLOWED_EXTENSIONS = {'csv', 'txt'}\napp = Flask(__name__, static_url_path='/static')\nDIR_PATH = os.path.dirname(os.path.realpath(__file__))\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\napp.config['DOWNLOAD_FOLDER'] = DOWNLOAD_FOLDER\napp.config['MAX_CONTENT_LENGTH'] = 8 * 1024 * 1024\n\n\ndef allowed_file(filename):\n return '.' in filename and filename.rsplit('.', 1)[1].lower(\n ) in ALLOWED_EXTENSIONS\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef index():\n if request.method == 'POST':\n if 'file' not in request.files:\n print('No file attached in request')\n return redirect(request.url)\n file = request.files['file']\n if file.filename == '':\n print('No file selected')\n return redirect(request.url)\n if file and allowed_file(file.filename):\n filename = secure_filename(file.filename)\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n process_file(os.path.join(app.config['UPLOAD_FOLDER'], filename\n ), filename)\n return redirect(url_for('uploaded_file', filename=filename))\n return render_template('index.html')\n\n\ndef process_file(path, filename):\n check_encoding(path, filename)\n\n\ndef check_encoding(path, filename):\n with open(path, 'rb') as rawdata:\n result = chardet.detect(rawdata.read(10000))\n df = pd.read_csv(path, encoding=result['encoding'])\n GFG = pd.ExcelWriter(app.config['DOWNLOAD_FOLDER'] + filename.rsplit(\n '.', 1)[0] + '.xlsx')\n df.to_excel(GFG, index=False, encoding='utf-8')\n GFG.save()\n\n\n@app.route('/uploads/<filename>')\ndef uploaded_file(filename):\n return send_from_directory(app.config['DOWNLOAD_FOLDER'], filename.\n rsplit('.', 1)[0] + '.xlsx', as_attachment=True)\n\n\nif __name__ == '__main__':\n port = int(os.environ.get('PORT', 5000))\n app.run(host='0.0.0.0', port=port)\n",
"step-5": "import os\nfrom flask import Flask, request, redirect, url_for, render_template, send_from_directory\nfrom werkzeug.utils import secure_filename\nimport chardet as chardet\nimport pandas as pd\n\nUPLOAD_FOLDER = os.path.dirname(os.path.abspath(__file__)) + '/uploads/'\nDOWNLOAD_FOLDER = os.path.dirname(os.path.abspath(__file__)) + '/downloads/'\nALLOWED_EXTENSIONS = {'csv', 'txt'}\n\napp = Flask(__name__, static_url_path=\"/static\")\nDIR_PATH = os.path.dirname(os.path.realpath(__file__))\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\napp.config['DOWNLOAD_FOLDER'] = DOWNLOAD_FOLDER\n# limit upload size upto 8mb\napp.config['MAX_CONTENT_LENGTH'] = 8 * 1024 * 1024\n\n\ndef allowed_file(filename):\n return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef index():\n if request.method == 'POST':\n if 'file' not in request.files:\n print('No file attached in request')\n return redirect(request.url)\n file = request.files['file']\n if file.filename == '':\n print('No file selected')\n return redirect(request.url)\n if file and allowed_file(file.filename):\n filename = secure_filename(file.filename)\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n process_file(os.path.join(app.config['UPLOAD_FOLDER'], filename), filename)\n return redirect(url_for('uploaded_file', filename=filename))\n return render_template('index.html')\n\n\ndef process_file(path, filename):\n check_encoding(path, filename)\n # with open(path, 'a') as f:\n # f.write(\"\\nAdded processed content\")\n\n\ndef check_encoding(path, filename):\n with open(path, 'rb') as rawdata:\n result = chardet.detect(rawdata.read(10000))\n df = pd.read_csv(path, encoding=result['encoding'])\n GFG = pd.ExcelWriter(app.config['DOWNLOAD_FOLDER'] + filename.rsplit('.', 1)[0] + '.xlsx')\n df.to_excel(GFG, index=False, encoding='utf-8')\n #output_stream = open(app.config['DOWNLOAD_FOLDER'] + 'output.xlsx', 'wb')\n #GFG.write(output_stream)\n GFG.save()\n\n \n\n@app.route('/uploads/<filename>')\ndef uploaded_file(filename):\n return send_from_directory(app.config['DOWNLOAD_FOLDER'], filename.rsplit('.', 1)[0] + '.xlsx', as_attachment=True)\n\n\nif __name__ == '__main__':\n port = int(os.environ.get(\"PORT\", 5000))\n app.run(host='0.0.0.0', port=port)",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
from selenium import webdriver
from bs4 import BeautifulSoup
from selenium.webdriver.common.action_chains import ActionChains
import time
import json
import re
import os
import datetime
###########################################################################
driver_path = "/home/arnab/Codes/00_Libs/chromedriver_linux64/chromedriver"
###########################################################################
def simplify_string(inp):
inp = inp.lower().strip()
inp = re.sub(r'[^A-Za-z0-9]', '_', inp)
return inp
def makeDirectory(path):
print("creating directory " + path)
try:
os.mkdir(path)
except FileExistsError:
pass
def initialize(url, browser=None):
if(browser == None):
print("creating browser for the first and last time")
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument('--headless')
# chrome_options.add_argument('--no-sandbox')
# chrome_options.add_argument('--disable-dev-shm-usage')
browser = webdriver.Chrome(driver_path, chrome_options=chrome_options)
browser.implicitly_wait(3)
browser.get(url)
browser.implicitly_wait(3)
return browser
def performClick(driver, element):
driver.execute_script("arguments[0].click();", element)
def getSoupFromElement(element):
html = element.get_attribute('innerHTML')
soup = BeautifulSoup(html, 'html.parser')
return soup
def processPageAnchor(anchorElem):
url = anchorElem['href']
text = anchorElem.find(text=True).strip()
return url, text
def getCastInfo(page_soup):
cast_table = page_soup.find("table", {"class": "cast_list"})
# print(" >>>>>>>>>>>>>>>>>>>>>>>>> ")
# print(cast_table.prettify())
cast_elem_arr = cast_table.findAll("tr", {"class": "odd"}) + cast_table.findAll("tr", {"class": "even"})
# print(len(cast_elem_arr))
# print(cast_elem_arr[0].prettify())
cast_and_character = []
for cast_elem in cast_elem_arr:
td_arr = cast_elem.findAll("td")
if(len(td_arr) < 4):
continue
# print(td_arr[1].prettify())
actor_elem = td_arr[1]
actor_anchor = actor_elem.find("a")
actor_url, actor_name = processPageAnchor(actor_anchor)
actor_info = {
"@type" : "Person",
"url" : actor_url,
"name" : actor_name
}
# print(actor_info)
# print(td_arr[3].prettify())
character_elem = td_arr[3]
character_info = []
character_anchor_arr = character_elem.findAll('a')
for character_anchor in character_anchor_arr:
character_url, character_name = processPageAnchor(character_anchor)
character_info.append({
"url" : character_url,
"name" : character_name
})
# print(character_info)
cast_and_character.append({
"actor" : actor_info,
"character_and_episodes" : character_info
})
# print(cast_and_character)
# print(len(cast_and_character))
return cast_and_character
def checkvalidtext(txt):
if(txt.isspace()):
return False
arr = ["|", "See more", "\u00bb", ","]
if txt in arr:
return False
if txt.strip() in arr:
return False
return True
def filter(arr):
ret = []
attr = "#"
for val in arr:
if(checkvalidtext(val) == False):
continue
if(val[-1] == ":"):
attr = val[0:-1]
continue
ret.append(val.strip())
return attr, ret
def parseDetailInfo(page_soup):
detail_elem = page_soup.find("div", {
'class': 'article',
'id': "titleDetails"
})
divs = detail_elem.findAll("div")
details = {}
for div in divs:
vrr = div.findAll()
attr, value = filter(div.findAll(text=True))
if(attr == "Official Sites" or attr == "#" or attr == "Color"):
continue
# print(attr, " >>>>>> ", value)
details[attr] = value
return details
def processOneMovie(movie_url, folder_path, driver, try_cnt = 0):
# if(True):
try:
if(try_cnt == 0):
driver = initialize(movie_url, driver)
page_html = driver.page_source
page_soup = BeautifulSoup(page_html, 'html.parser')
# print(page_soup.prettify())
query_result = page_soup.find("script", {"type": "application/ld+json"})
# print(query_result.string)
meta_data = json.loads(query_result.string)
try:
meta_data["cast_and_character"] = getCastInfo(page_soup)
except:
meta_data["cast_and_character"] = "Error loading cast information -- checked {}".format(datetime.datetime.now())
meta_data['details'] = parseDetailInfo(page_soup)
movie_id = meta_data["url"].split('/')[-2]
movie_name = meta_data["name"]
file_name = "{}__{}".format(movie_id, simplify_string(movie_name)) + ".json"
# print(file_name)
# print(meta_data)
with open(folder_path + "/" + file_name, "w") as f:
json.dump(meta_data, f)
print("saved movie < {} > to < {} >".format(movie_name, file_name))
return True
except:
if(try_cnt == 17):
print("Error loading movie -- skip this")
return False
print("maybe temporary internet connection problem. trying again < {} >".format(try_cnt + 1))
driver.refresh()
time.sleep(2)
return processOneMovie(movie_url, folder_path, driver, try_cnt+1)
#############################################################################################################
url_root = "https://www.imdb.com/"
save_path = "MOVIES"
summary_path = "IMDB_SUMMARY/SUMMARY_DATA"
frm = 1
rng = 250
limit = 600000 # set it to -1 for all processing
#############################################################################################################
makeDirectory(save_path)
summary_files = sorted(os.listdir(summary_path))
driver = initialize(url_root)
def loadFailCases():
try:
with open("fail_cases.json", "r") as f:
fail_cases = json.load(f)
except:
print("Could not find fail_cases.json -- initializing with empty folder")
fail_cases = []
return fail_cases
print(summary_files)
# for summary in summary_files:
while(True):
summary = "{} - {}.json".format(frm, frm+rng-1)
if(summary not in summary_files):
print("Could not fild summary file < {} >".format(summary))
break
print("Now processing < {} >".format(summary))
folder_name = summary.split('.')[0]
folder_path = save_path + "/" + folder_name
makeDirectory(folder_path)
with open(summary_path + "/" + summary) as f:
movie_arr = json.load(f)
# print(type(movie_arr))
# print(movie_arr)
process_cnt = 0
st = 0
# if(frm == 65251):
# st = 173
for idx in range(st, len(movie_arr)):
movie = movie_arr[idx]
# print(movie["link"])
movie_url = url_root + movie["link"]
success = processOneMovie(movie_url, folder_path, driver)
if(success == False):
fail_cases = loadFailCases()
fail_cases.append(movie)
with open("fail_cases.json", "w") as f:
json.dump(fail_cases, f)
process_cnt += 1
print(">>>>>>>>>>>>>>>>>>>>>>>>>> processed {} of {} --- of :: {}".format(st + process_cnt, len(movie_arr), summary))
frm += rng
if limit == -1:
continue
elif (frm > limit):
break
|
normal
|
{
"blob_id": "43b9d308bb8d2b38c5f539e8700f5c2d8fe2287d",
"index": 2157,
"step-1": "<mask token>\n\n\ndef simplify_string(inp):\n inp = inp.lower().strip()\n inp = re.sub('[^A-Za-z0-9]', '_', inp)\n return inp\n\n\ndef makeDirectory(path):\n print('creating directory ' + path)\n try:\n os.mkdir(path)\n except FileExistsError:\n pass\n\n\ndef initialize(url, browser=None):\n if browser == None:\n print('creating browser for the first and last time')\n chrome_options = webdriver.ChromeOptions()\n chrome_options.add_argument('--headless')\n browser = webdriver.Chrome(driver_path, chrome_options=chrome_options)\n browser.implicitly_wait(3)\n browser.get(url)\n browser.implicitly_wait(3)\n return browser\n\n\n<mask token>\n\n\ndef getCastInfo(page_soup):\n cast_table = page_soup.find('table', {'class': 'cast_list'})\n cast_elem_arr = cast_table.findAll('tr', {'class': 'odd'}\n ) + cast_table.findAll('tr', {'class': 'even'})\n cast_and_character = []\n for cast_elem in cast_elem_arr:\n td_arr = cast_elem.findAll('td')\n if len(td_arr) < 4:\n continue\n actor_elem = td_arr[1]\n actor_anchor = actor_elem.find('a')\n actor_url, actor_name = processPageAnchor(actor_anchor)\n actor_info = {'@type': 'Person', 'url': actor_url, 'name': actor_name}\n character_elem = td_arr[3]\n character_info = []\n character_anchor_arr = character_elem.findAll('a')\n for character_anchor in character_anchor_arr:\n character_url, character_name = processPageAnchor(character_anchor)\n character_info.append({'url': character_url, 'name':\n character_name})\n cast_and_character.append({'actor': actor_info,\n 'character_and_episodes': character_info})\n return cast_and_character\n\n\ndef checkvalidtext(txt):\n if txt.isspace():\n return False\n arr = ['|', 'See more', '»', ',']\n if txt in arr:\n return False\n if txt.strip() in arr:\n return False\n return True\n\n\ndef filter(arr):\n ret = []\n attr = '#'\n for val in arr:\n if checkvalidtext(val) == False:\n continue\n if val[-1] == ':':\n attr = val[0:-1]\n continue\n ret.append(val.strip())\n return attr, ret\n\n\ndef parseDetailInfo(page_soup):\n detail_elem = page_soup.find('div', {'class': 'article', 'id':\n 'titleDetails'})\n divs = detail_elem.findAll('div')\n details = {}\n for div in divs:\n vrr = div.findAll()\n attr, value = filter(div.findAll(text=True))\n if attr == 'Official Sites' or attr == '#' or attr == 'Color':\n continue\n details[attr] = value\n return details\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef simplify_string(inp):\n inp = inp.lower().strip()\n inp = re.sub('[^A-Za-z0-9]', '_', inp)\n return inp\n\n\ndef makeDirectory(path):\n print('creating directory ' + path)\n try:\n os.mkdir(path)\n except FileExistsError:\n pass\n\n\ndef initialize(url, browser=None):\n if browser == None:\n print('creating browser for the first and last time')\n chrome_options = webdriver.ChromeOptions()\n chrome_options.add_argument('--headless')\n browser = webdriver.Chrome(driver_path, chrome_options=chrome_options)\n browser.implicitly_wait(3)\n browser.get(url)\n browser.implicitly_wait(3)\n return browser\n\n\n<mask token>\n\n\ndef getSoupFromElement(element):\n html = element.get_attribute('innerHTML')\n soup = BeautifulSoup(html, 'html.parser')\n return soup\n\n\ndef processPageAnchor(anchorElem):\n url = anchorElem['href']\n text = anchorElem.find(text=True).strip()\n return url, text\n\n\ndef getCastInfo(page_soup):\n cast_table = page_soup.find('table', {'class': 'cast_list'})\n cast_elem_arr = cast_table.findAll('tr', {'class': 'odd'}\n ) + cast_table.findAll('tr', {'class': 'even'})\n cast_and_character = []\n for cast_elem in cast_elem_arr:\n td_arr = cast_elem.findAll('td')\n if len(td_arr) < 4:\n continue\n actor_elem = td_arr[1]\n actor_anchor = actor_elem.find('a')\n actor_url, actor_name = processPageAnchor(actor_anchor)\n actor_info = {'@type': 'Person', 'url': actor_url, 'name': actor_name}\n character_elem = td_arr[3]\n character_info = []\n character_anchor_arr = character_elem.findAll('a')\n for character_anchor in character_anchor_arr:\n character_url, character_name = processPageAnchor(character_anchor)\n character_info.append({'url': character_url, 'name':\n character_name})\n cast_and_character.append({'actor': actor_info,\n 'character_and_episodes': character_info})\n return cast_and_character\n\n\ndef checkvalidtext(txt):\n if txt.isspace():\n return False\n arr = ['|', 'See more', '»', ',']\n if txt in arr:\n return False\n if txt.strip() in arr:\n return False\n return True\n\n\ndef filter(arr):\n ret = []\n attr = '#'\n for val in arr:\n if checkvalidtext(val) == False:\n continue\n if val[-1] == ':':\n attr = val[0:-1]\n continue\n ret.append(val.strip())\n return attr, ret\n\n\ndef parseDetailInfo(page_soup):\n detail_elem = page_soup.find('div', {'class': 'article', 'id':\n 'titleDetails'})\n divs = detail_elem.findAll('div')\n details = {}\n for div in divs:\n vrr = div.findAll()\n attr, value = filter(div.findAll(text=True))\n if attr == 'Official Sites' or attr == '#' or attr == 'Color':\n continue\n details[attr] = value\n return details\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef simplify_string(inp):\n inp = inp.lower().strip()\n inp = re.sub('[^A-Za-z0-9]', '_', inp)\n return inp\n\n\ndef makeDirectory(path):\n print('creating directory ' + path)\n try:\n os.mkdir(path)\n except FileExistsError:\n pass\n\n\ndef initialize(url, browser=None):\n if browser == None:\n print('creating browser for the first and last time')\n chrome_options = webdriver.ChromeOptions()\n chrome_options.add_argument('--headless')\n browser = webdriver.Chrome(driver_path, chrome_options=chrome_options)\n browser.implicitly_wait(3)\n browser.get(url)\n browser.implicitly_wait(3)\n return browser\n\n\ndef performClick(driver, element):\n driver.execute_script('arguments[0].click();', element)\n\n\ndef getSoupFromElement(element):\n html = element.get_attribute('innerHTML')\n soup = BeautifulSoup(html, 'html.parser')\n return soup\n\n\ndef processPageAnchor(anchorElem):\n url = anchorElem['href']\n text = anchorElem.find(text=True).strip()\n return url, text\n\n\ndef getCastInfo(page_soup):\n cast_table = page_soup.find('table', {'class': 'cast_list'})\n cast_elem_arr = cast_table.findAll('tr', {'class': 'odd'}\n ) + cast_table.findAll('tr', {'class': 'even'})\n cast_and_character = []\n for cast_elem in cast_elem_arr:\n td_arr = cast_elem.findAll('td')\n if len(td_arr) < 4:\n continue\n actor_elem = td_arr[1]\n actor_anchor = actor_elem.find('a')\n actor_url, actor_name = processPageAnchor(actor_anchor)\n actor_info = {'@type': 'Person', 'url': actor_url, 'name': actor_name}\n character_elem = td_arr[3]\n character_info = []\n character_anchor_arr = character_elem.findAll('a')\n for character_anchor in character_anchor_arr:\n character_url, character_name = processPageAnchor(character_anchor)\n character_info.append({'url': character_url, 'name':\n character_name})\n cast_and_character.append({'actor': actor_info,\n 'character_and_episodes': character_info})\n return cast_and_character\n\n\ndef checkvalidtext(txt):\n if txt.isspace():\n return False\n arr = ['|', 'See more', '»', ',']\n if txt in arr:\n return False\n if txt.strip() in arr:\n return False\n return True\n\n\ndef filter(arr):\n ret = []\n attr = '#'\n for val in arr:\n if checkvalidtext(val) == False:\n continue\n if val[-1] == ':':\n attr = val[0:-1]\n continue\n ret.append(val.strip())\n return attr, ret\n\n\ndef parseDetailInfo(page_soup):\n detail_elem = page_soup.find('div', {'class': 'article', 'id':\n 'titleDetails'})\n divs = detail_elem.findAll('div')\n details = {}\n for div in divs:\n vrr = div.findAll()\n attr, value = filter(div.findAll(text=True))\n if attr == 'Official Sites' or attr == '#' or attr == 'Color':\n continue\n details[attr] = value\n return details\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef simplify_string(inp):\n inp = inp.lower().strip()\n inp = re.sub('[^A-Za-z0-9]', '_', inp)\n return inp\n\n\ndef makeDirectory(path):\n print('creating directory ' + path)\n try:\n os.mkdir(path)\n except FileExistsError:\n pass\n\n\ndef initialize(url, browser=None):\n if browser == None:\n print('creating browser for the first and last time')\n chrome_options = webdriver.ChromeOptions()\n chrome_options.add_argument('--headless')\n browser = webdriver.Chrome(driver_path, chrome_options=chrome_options)\n browser.implicitly_wait(3)\n browser.get(url)\n browser.implicitly_wait(3)\n return browser\n\n\ndef performClick(driver, element):\n driver.execute_script('arguments[0].click();', element)\n\n\ndef getSoupFromElement(element):\n html = element.get_attribute('innerHTML')\n soup = BeautifulSoup(html, 'html.parser')\n return soup\n\n\ndef processPageAnchor(anchorElem):\n url = anchorElem['href']\n text = anchorElem.find(text=True).strip()\n return url, text\n\n\ndef getCastInfo(page_soup):\n cast_table = page_soup.find('table', {'class': 'cast_list'})\n cast_elem_arr = cast_table.findAll('tr', {'class': 'odd'}\n ) + cast_table.findAll('tr', {'class': 'even'})\n cast_and_character = []\n for cast_elem in cast_elem_arr:\n td_arr = cast_elem.findAll('td')\n if len(td_arr) < 4:\n continue\n actor_elem = td_arr[1]\n actor_anchor = actor_elem.find('a')\n actor_url, actor_name = processPageAnchor(actor_anchor)\n actor_info = {'@type': 'Person', 'url': actor_url, 'name': actor_name}\n character_elem = td_arr[3]\n character_info = []\n character_anchor_arr = character_elem.findAll('a')\n for character_anchor in character_anchor_arr:\n character_url, character_name = processPageAnchor(character_anchor)\n character_info.append({'url': character_url, 'name':\n character_name})\n cast_and_character.append({'actor': actor_info,\n 'character_and_episodes': character_info})\n return cast_and_character\n\n\ndef checkvalidtext(txt):\n if txt.isspace():\n return False\n arr = ['|', 'See more', '»', ',']\n if txt in arr:\n return False\n if txt.strip() in arr:\n return False\n return True\n\n\ndef filter(arr):\n ret = []\n attr = '#'\n for val in arr:\n if checkvalidtext(val) == False:\n continue\n if val[-1] == ':':\n attr = val[0:-1]\n continue\n ret.append(val.strip())\n return attr, ret\n\n\ndef parseDetailInfo(page_soup):\n detail_elem = page_soup.find('div', {'class': 'article', 'id':\n 'titleDetails'})\n divs = detail_elem.findAll('div')\n details = {}\n for div in divs:\n vrr = div.findAll()\n attr, value = filter(div.findAll(text=True))\n if attr == 'Official Sites' or attr == '#' or attr == 'Color':\n continue\n details[attr] = value\n return details\n\n\ndef processOneMovie(movie_url, folder_path, driver, try_cnt=0):\n try:\n if try_cnt == 0:\n driver = initialize(movie_url, driver)\n page_html = driver.page_source\n page_soup = BeautifulSoup(page_html, 'html.parser')\n query_result = page_soup.find('script', {'type': 'application/ld+json'}\n )\n meta_data = json.loads(query_result.string)\n try:\n meta_data['cast_and_character'] = getCastInfo(page_soup)\n except:\n meta_data['cast_and_character'\n ] = 'Error loading cast information -- checked {}'.format(\n datetime.datetime.now())\n meta_data['details'] = parseDetailInfo(page_soup)\n movie_id = meta_data['url'].split('/')[-2]\n movie_name = meta_data['name']\n file_name = '{}__{}'.format(movie_id, simplify_string(movie_name)\n ) + '.json'\n with open(folder_path + '/' + file_name, 'w') as f:\n json.dump(meta_data, f)\n print('saved movie < {} > to < {} >'.format(movie_name, file_name))\n return True\n except:\n if try_cnt == 17:\n print('Error loading movie -- skip this')\n return False\n print(\n 'maybe temporary internet connection problem. trying again < {} >'\n .format(try_cnt + 1))\n driver.refresh()\n time.sleep(2)\n return processOneMovie(movie_url, folder_path, driver, try_cnt + 1)\n\n\n<mask token>\n\n\ndef loadFailCases():\n try:\n with open('fail_cases.json', 'r') as f:\n fail_cases = json.load(f)\n except:\n print(\n 'Could not find fail_cases.json -- initializing with empty folder')\n fail_cases = []\n return fail_cases\n\n\n<mask token>\n",
"step-5": "from selenium import webdriver\nfrom bs4 import BeautifulSoup\nfrom selenium.webdriver.common.action_chains import ActionChains\nimport time\nimport json\nimport re\nimport os\nimport datetime\n\n###########################################################################\ndriver_path = \"/home/arnab/Codes/00_Libs/chromedriver_linux64/chromedriver\"\n###########################################################################\n\ndef simplify_string(inp):\n inp = inp.lower().strip()\n inp = re.sub(r'[^A-Za-z0-9]', '_', inp)\n\n return inp\n\ndef makeDirectory(path):\n print(\"creating directory \" + path)\n try:\n os.mkdir(path)\n except FileExistsError:\n pass\n\ndef initialize(url, browser=None):\n if(browser == None):\n print(\"creating browser for the first and last time\")\n chrome_options = webdriver.ChromeOptions()\n chrome_options.add_argument('--headless')\n # chrome_options.add_argument('--no-sandbox')\n # chrome_options.add_argument('--disable-dev-shm-usage')\n\n browser = webdriver.Chrome(driver_path, chrome_options=chrome_options)\n browser.implicitly_wait(3)\n\n browser.get(url)\n browser.implicitly_wait(3)\n\n return browser\n\n\ndef performClick(driver, element):\n driver.execute_script(\"arguments[0].click();\", element)\n\n\ndef getSoupFromElement(element):\n html = element.get_attribute('innerHTML')\n soup = BeautifulSoup(html, 'html.parser')\n return soup\n\n\ndef processPageAnchor(anchorElem):\n url = anchorElem['href']\n text = anchorElem.find(text=True).strip()\n return url, text\n\ndef getCastInfo(page_soup):\n cast_table = page_soup.find(\"table\", {\"class\": \"cast_list\"})\n # print(\" >>>>>>>>>>>>>>>>>>>>>>>>> \")\n # print(cast_table.prettify())\n\n cast_elem_arr = cast_table.findAll(\"tr\", {\"class\": \"odd\"}) + cast_table.findAll(\"tr\", {\"class\": \"even\"})\n # print(len(cast_elem_arr))\n # print(cast_elem_arr[0].prettify())\n\n cast_and_character = []\n\n for cast_elem in cast_elem_arr:\n td_arr = cast_elem.findAll(\"td\")\n if(len(td_arr) < 4):\n continue\n \n # print(td_arr[1].prettify())\n actor_elem = td_arr[1]\n\n\n actor_anchor = actor_elem.find(\"a\")\n actor_url, actor_name = processPageAnchor(actor_anchor)\n actor_info = {\n \"@type\" : \"Person\",\n \"url\" : actor_url,\n \"name\" : actor_name\n }\n # print(actor_info)\n\n # print(td_arr[3].prettify())\n character_elem = td_arr[3]\n character_info = []\n character_anchor_arr = character_elem.findAll('a')\n for character_anchor in character_anchor_arr:\n character_url, character_name = processPageAnchor(character_anchor)\n character_info.append({\n \"url\" : character_url,\n \"name\" : character_name\n })\n\n # print(character_info)\n\n cast_and_character.append({\n \"actor\" : actor_info,\n \"character_and_episodes\" : character_info\n })\n\n\n # print(cast_and_character)\n # print(len(cast_and_character))\n return cast_and_character\n\ndef checkvalidtext(txt):\n if(txt.isspace()):\n return False\n arr = [\"|\", \"See more\", \"\\u00bb\", \",\"]\n if txt in arr:\n return False\n if txt.strip() in arr:\n return False\n return True\n\n\ndef filter(arr):\n ret = []\n attr = \"#\"\n for val in arr:\n if(checkvalidtext(val) == False):\n continue\n if(val[-1] == \":\"):\n attr = val[0:-1]\n continue\n ret.append(val.strip())\n return attr, ret \n\ndef parseDetailInfo(page_soup):\n detail_elem = page_soup.find(\"div\", {\n 'class': 'article',\n 'id': \"titleDetails\"\n })\n divs = detail_elem.findAll(\"div\")\n \n details = {}\n for div in divs:\n vrr = div.findAll()\n attr, value = filter(div.findAll(text=True))\n if(attr == \"Official Sites\" or attr == \"#\" or attr == \"Color\"):\n continue\n # print(attr, \" >>>>>> \", value)\n details[attr] = value\n\n return details\n\n\ndef processOneMovie(movie_url, folder_path, driver, try_cnt = 0):\n\n # if(True):\n try:\n if(try_cnt == 0):\n driver = initialize(movie_url, driver)\n\n page_html = driver.page_source\n page_soup = BeautifulSoup(page_html, 'html.parser')\n\n # print(page_soup.prettify())\n\n query_result = page_soup.find(\"script\", {\"type\": \"application/ld+json\"})\n # print(query_result.string)\n meta_data = json.loads(query_result.string)\n try:\n meta_data[\"cast_and_character\"] = getCastInfo(page_soup)\n except:\n meta_data[\"cast_and_character\"] = \"Error loading cast information -- checked {}\".format(datetime.datetime.now())\n \n meta_data['details'] = parseDetailInfo(page_soup)\n\n movie_id = meta_data[\"url\"].split('/')[-2]\n movie_name = meta_data[\"name\"]\n\n file_name = \"{}__{}\".format(movie_id, simplify_string(movie_name)) + \".json\"\n # print(file_name)\n # print(meta_data)\n\n with open(folder_path + \"/\" + file_name, \"w\") as f:\n json.dump(meta_data, f)\n print(\"saved movie < {} > to < {} >\".format(movie_name, file_name))\n\n return True\n \n except:\n if(try_cnt == 17):\n print(\"Error loading movie -- skip this\")\n return False\n\n print(\"maybe temporary internet connection problem. trying again < {} >\".format(try_cnt + 1))\n driver.refresh()\n time.sleep(2)\n return processOneMovie(movie_url, folder_path, driver, try_cnt+1)\n\n\n\n#############################################################################################################\nurl_root = \"https://www.imdb.com/\"\nsave_path = \"MOVIES\"\nsummary_path = \"IMDB_SUMMARY/SUMMARY_DATA\"\nfrm = 1\nrng = 250\nlimit = 600000 # set it to -1 for all processing\n\n#############################################################################################################\n\nmakeDirectory(save_path)\nsummary_files = sorted(os.listdir(summary_path))\ndriver = initialize(url_root)\n\n\ndef loadFailCases():\n try:\n with open(\"fail_cases.json\", \"r\") as f:\n fail_cases = json.load(f)\n except:\n print(\"Could not find fail_cases.json -- initializing with empty folder\")\n fail_cases = []\n return fail_cases\n\nprint(summary_files)\n# for summary in summary_files:\nwhile(True):\n summary = \"{} - {}.json\".format(frm, frm+rng-1)\n\n if(summary not in summary_files):\n print(\"Could not fild summary file < {} >\".format(summary))\n break\n\n\n print(\"Now processing < {} >\".format(summary))\n\n folder_name = summary.split('.')[0]\n folder_path = save_path + \"/\" + folder_name\n makeDirectory(folder_path)\n\n with open(summary_path + \"/\" + summary) as f:\n movie_arr = json.load(f)\n # print(type(movie_arr))\n # print(movie_arr)\n process_cnt = 0\n\n st = 0\n # if(frm == 65251):\n # st = 173\n\n for idx in range(st, len(movie_arr)):\n movie = movie_arr[idx]\n # print(movie[\"link\"])\n movie_url = url_root + movie[\"link\"]\n success = processOneMovie(movie_url, folder_path, driver)\n\n if(success == False):\n fail_cases = loadFailCases()\n fail_cases.append(movie)\n with open(\"fail_cases.json\", \"w\") as f:\n json.dump(fail_cases, f)\n\n process_cnt += 1\n print(\">>>>>>>>>>>>>>>>>>>>>>>>>> processed {} of {} --- of :: {}\".format(st + process_cnt, len(movie_arr), summary))\n \n frm += rng\n\n if limit == -1:\n continue\n elif (frm > limit):\n break\n ",
"step-ids": [
7,
9,
10,
12,
16
]
}
|
[
7,
9,
10,
12,
16
] |
# Generated by Django 3.0.3 on 2020-05-30 05:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('people', '0110_auto_20200530_0631'),
]
operations = [
migrations.AlterField(
model_name='site',
name='password_reset_email_from',
field=models.CharField(blank=True, default='', max_length=100),
),
migrations.AlterField(
model_name='site',
name='password_reset_email_title',
field=models.CharField(blank=True, default='', max_length=100),
),
]
|
normal
|
{
"blob_id": "795f936423965063c44b347705c53fd1c306692f",
"index": 4927,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('people', '0110_auto_20200530_0631')]\n operations = [migrations.AlterField(model_name='site', name=\n 'password_reset_email_from', field=models.CharField(blank=True,\n default='', max_length=100)), migrations.AlterField(model_name=\n 'site', name='password_reset_email_title', field=models.CharField(\n blank=True, default='', max_length=100))]\n",
"step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('people', '0110_auto_20200530_0631')]\n operations = [migrations.AlterField(model_name='site', name=\n 'password_reset_email_from', field=models.CharField(blank=True,\n default='', max_length=100)), migrations.AlterField(model_name=\n 'site', name='password_reset_email_title', field=models.CharField(\n blank=True, default='', max_length=100))]\n",
"step-5": "# Generated by Django 3.0.3 on 2020-05-30 05:32\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('people', '0110_auto_20200530_0631'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='site',\n name='password_reset_email_from',\n field=models.CharField(blank=True, default='', max_length=100),\n ),\n migrations.AlterField(\n model_name='site',\n name='password_reset_email_title',\n field=models.CharField(blank=True, default='', max_length=100),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import asyncio
import sys
import aioredis
import msgpack
async def main(host: str, endpoint: str, message: str):
msg = msgpack.packb(
{
"endpoint": endpoint,
"headers": {"Content-Type": "text/json"},
"payload": message.encode("utf-8"),
},
)
redis = await aioredis.create_redis_pool(host)
await redis.rpush("acapy.outbound_transport", msg)
if __name__ == "__main__":
args = sys.argv
if len(args) <= 1:
raise SystemExit("Pass redis host URL as the first parameter")
if len(args) <= 2:
raise SystemExit("Pass endpoint as the second parameter")
if len(args) <= 3:
raise SystemExit("Pass message contents as the third parameter")
asyncio.get_event_loop().run_until_complete(main(args[1], args[2], args[3]))
|
normal
|
{
"blob_id": "e94d66732a172286814bc0b0051a52c1374a4de5",
"index": 3168,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nasync def main(host: str, endpoint: str, message: str):\n msg = msgpack.packb({'endpoint': endpoint, 'headers': {'Content-Type':\n 'text/json'}, 'payload': message.encode('utf-8')})\n redis = await aioredis.create_redis_pool(host)\n await redis.rpush('acapy.outbound_transport', msg)\n\n\nif __name__ == '__main__':\n args = sys.argv\n if len(args) <= 1:\n raise SystemExit('Pass redis host URL as the first parameter')\n if len(args) <= 2:\n raise SystemExit('Pass endpoint as the second parameter')\n if len(args) <= 3:\n raise SystemExit('Pass message contents as the third parameter')\n asyncio.get_event_loop().run_until_complete(main(args[1], args[2], args[3])\n )\n",
"step-3": "import asyncio\nimport sys\nimport aioredis\nimport msgpack\n\n\nasync def main(host: str, endpoint: str, message: str):\n msg = msgpack.packb({'endpoint': endpoint, 'headers': {'Content-Type':\n 'text/json'}, 'payload': message.encode('utf-8')})\n redis = await aioredis.create_redis_pool(host)\n await redis.rpush('acapy.outbound_transport', msg)\n\n\nif __name__ == '__main__':\n args = sys.argv\n if len(args) <= 1:\n raise SystemExit('Pass redis host URL as the first parameter')\n if len(args) <= 2:\n raise SystemExit('Pass endpoint as the second parameter')\n if len(args) <= 3:\n raise SystemExit('Pass message contents as the third parameter')\n asyncio.get_event_loop().run_until_complete(main(args[1], args[2], args[3])\n )\n",
"step-4": "import asyncio\nimport sys\n\nimport aioredis\nimport msgpack\n\n\nasync def main(host: str, endpoint: str, message: str):\n msg = msgpack.packb(\n {\n \"endpoint\": endpoint,\n \"headers\": {\"Content-Type\": \"text/json\"},\n \"payload\": message.encode(\"utf-8\"),\n },\n )\n redis = await aioredis.create_redis_pool(host)\n await redis.rpush(\"acapy.outbound_transport\", msg)\n\n\nif __name__ == \"__main__\":\n args = sys.argv\n if len(args) <= 1:\n raise SystemExit(\"Pass redis host URL as the first parameter\")\n if len(args) <= 2:\n raise SystemExit(\"Pass endpoint as the second parameter\")\n if len(args) <= 3:\n raise SystemExit(\"Pass message contents as the third parameter\")\n asyncio.get_event_loop().run_until_complete(main(args[1], args[2], args[3]))\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for i in range(len(word)):
if word[i] in '.,?!' or word[i] == ' ':
pass
else:
new.append(word[i])
for i in range(len(new)):
o.append(new[i])
for i in range(len(new)):
r.append(new[-i - 1])
print(new)
print(o)
print(r)
<|reserved_special_token_0|>
for i in range(len(new)):
if o[i] == r[i]:
same_count += 1
else:
pass
if same_count == len(new):
print('Palindrome')
else:
print('Non Palindrome')
<|reserved_special_token_1|>
word = input('Word: ')
new = []
o = []
r = []
for i in range(len(word)):
if word[i] in '.,?!' or word[i] == ' ':
pass
else:
new.append(word[i])
for i in range(len(new)):
o.append(new[i])
for i in range(len(new)):
r.append(new[-i - 1])
print(new)
print(o)
print(r)
same_count = 0
for i in range(len(new)):
if o[i] == r[i]:
same_count += 1
else:
pass
if same_count == len(new):
print('Palindrome')
else:
print('Non Palindrome')
<|reserved_special_token_1|>
#Multiple Word Palindromes
#Ex 72 extended
word = input("Word: ")
new = []
o = []
r = []
#canceling out the spaces
for i in range(len(word)):
if word[i] in ".,?!" or word[i] == ' ':
pass
else:
new.append(word[i])
#original
for i in range(len(new)):
o.append(new[i])
#reverse
for i in range(len(new)):
r.append(new[-i - 1])
print(new)
print(o)
print(r)
same_count = 0
for i in range(len(new)):
if o[i] == r[i]:
same_count += 1
else:
pass
if same_count == len(new):
print("Palindrome")
else:
print("Non Palindrome")
|
flexible
|
{
"blob_id": "c6ab82d7f59faeee2a74e90a96c2348b046d0889",
"index": 7382,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(len(word)):\n if word[i] in '.,?!' or word[i] == ' ':\n pass\n else:\n new.append(word[i])\nfor i in range(len(new)):\n o.append(new[i])\nfor i in range(len(new)):\n r.append(new[-i - 1])\nprint(new)\nprint(o)\nprint(r)\n<mask token>\nfor i in range(len(new)):\n if o[i] == r[i]:\n same_count += 1\n else:\n pass\nif same_count == len(new):\n print('Palindrome')\nelse:\n print('Non Palindrome')\n",
"step-3": "word = input('Word: ')\nnew = []\no = []\nr = []\nfor i in range(len(word)):\n if word[i] in '.,?!' or word[i] == ' ':\n pass\n else:\n new.append(word[i])\nfor i in range(len(new)):\n o.append(new[i])\nfor i in range(len(new)):\n r.append(new[-i - 1])\nprint(new)\nprint(o)\nprint(r)\nsame_count = 0\nfor i in range(len(new)):\n if o[i] == r[i]:\n same_count += 1\n else:\n pass\nif same_count == len(new):\n print('Palindrome')\nelse:\n print('Non Palindrome')\n",
"step-4": "#Multiple Word Palindromes\n#Ex 72 extended\n\nword = input(\"Word: \")\nnew = []\no = []\nr = []\n#canceling out the spaces\nfor i in range(len(word)):\n if word[i] in \".,?!\" or word[i] == ' ':\n pass\n else:\n new.append(word[i])\n\n#original\nfor i in range(len(new)):\n o.append(new[i])\n#reverse\nfor i in range(len(new)):\n r.append(new[-i - 1])\n\nprint(new)\nprint(o)\nprint(r)\nsame_count = 0\nfor i in range(len(new)):\n if o[i] == r[i]:\n same_count += 1\n else:\n pass\n\nif same_count == len(new):\n print(\"Palindrome\")\nelse:\n print(\"Non Palindrome\")",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
while True:
os.chdir('/home/ec2-user/ML-Processed')
print(str(os.getcwd()))
for f in os.listdir(os.getcwd()):
print('looping in file')
file_name, file_ext = os.path.splitext(f)
if file_ext == '.jpg':
print('working with this file ' + f)
print('about to upload ' + str(datetime.datetime.now()) + '\r\n')
s3.meta.client.upload_file(f, 'netball-ml-processed', str(sys.
argv[1]) + '/' + f)
print('Uploaded to S3 ' + str(datetime.datetime.now()) + '\r\n')
shutil.move(f, '/home/ec2-user/ML-Processed/shifted_to_s3')
print('should of moved the file locally')
print('sleeping, ')
time.sleep(2)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
session = boto3.Session(profile_name='default')
s3 = boto3.resource('s3')
bucket = s3.Bucket('netball-ml-processed')
while True:
os.chdir('/home/ec2-user/ML-Processed')
print(str(os.getcwd()))
for f in os.listdir(os.getcwd()):
print('looping in file')
file_name, file_ext = os.path.splitext(f)
if file_ext == '.jpg':
print('working with this file ' + f)
print('about to upload ' + str(datetime.datetime.now()) + '\r\n')
s3.meta.client.upload_file(f, 'netball-ml-processed', str(sys.
argv[1]) + '/' + f)
print('Uploaded to S3 ' + str(datetime.datetime.now()) + '\r\n')
shutil.move(f, '/home/ec2-user/ML-Processed/shifted_to_s3')
print('should of moved the file locally')
print('sleeping, ')
time.sleep(2)
<|reserved_special_token_1|>
import boto3, os, shutil, datetime, time, sys
session = boto3.Session(profile_name='default')
s3 = boto3.resource('s3')
bucket = s3.Bucket('netball-ml-processed')
while True:
os.chdir('/home/ec2-user/ML-Processed')
print(str(os.getcwd()))
for f in os.listdir(os.getcwd()):
print('looping in file')
file_name, file_ext = os.path.splitext(f)
if file_ext == '.jpg':
print('working with this file ' + f)
print('about to upload ' + str(datetime.datetime.now()) + '\r\n')
s3.meta.client.upload_file(f, 'netball-ml-processed', str(sys.
argv[1]) + '/' + f)
print('Uploaded to S3 ' + str(datetime.datetime.now()) + '\r\n')
shutil.move(f, '/home/ec2-user/ML-Processed/shifted_to_s3')
print('should of moved the file locally')
print('sleeping, ')
time.sleep(2)
<|reserved_special_token_1|>
import boto3, os, shutil, datetime, time, sys
session = boto3.Session(profile_name='default')
s3 = boto3.resource('s3')
bucket = s3.Bucket('netball-ml-processed')
#print(bucket.objects)
#needs to be run with *** sudo **** otherwise it won't work...
while True:
#change to the motion working Directory
os.chdir('/home/ec2-user/ML-Processed')
print (str(os.getcwd()))
for f in os.listdir(os.getcwd()):
print("looping in file")
file_name, file_ext = os.path.splitext(f)
#need to check the file starts with 2 (as in the timestamp) and is a .jpg
if file_ext == '.jpg':
print("working with this file " + f)
print("about to upload " + str(datetime.datetime.now()) + "\r\n")
# s3.meta.client.upload_file('/Users/andrewhammond/s3_upload.jpg','netball-ml-processing', 's3_upload.jpg')
s3.meta.client.upload_file(f, 'netball-ml-processed', str(sys.argv[1]) + "/" + f)
print ("Uploaded to S3 " + str(datetime.datetime.now()) + "\r\n")
# once pushed to s3 need to shift locally.
shutil.move(f, '/home/ec2-user/ML-Processed/shifted_to_s3')
print ("should of moved the file locally")
print ("sleeping, ")
time.sleep(2)
|
flexible
|
{
"blob_id": "ec0697d8d78fafe6bfd4630be2a1fb20eb9eb4cf",
"index": 2472,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile True:\n os.chdir('/home/ec2-user/ML-Processed')\n print(str(os.getcwd()))\n for f in os.listdir(os.getcwd()):\n print('looping in file')\n file_name, file_ext = os.path.splitext(f)\n if file_ext == '.jpg':\n print('working with this file ' + f)\n print('about to upload ' + str(datetime.datetime.now()) + '\\r\\n')\n s3.meta.client.upload_file(f, 'netball-ml-processed', str(sys.\n argv[1]) + '/' + f)\n print('Uploaded to S3 ' + str(datetime.datetime.now()) + '\\r\\n')\n shutil.move(f, '/home/ec2-user/ML-Processed/shifted_to_s3')\n print('should of moved the file locally')\n print('sleeping, ')\n time.sleep(2)\n",
"step-3": "<mask token>\nsession = boto3.Session(profile_name='default')\ns3 = boto3.resource('s3')\nbucket = s3.Bucket('netball-ml-processed')\nwhile True:\n os.chdir('/home/ec2-user/ML-Processed')\n print(str(os.getcwd()))\n for f in os.listdir(os.getcwd()):\n print('looping in file')\n file_name, file_ext = os.path.splitext(f)\n if file_ext == '.jpg':\n print('working with this file ' + f)\n print('about to upload ' + str(datetime.datetime.now()) + '\\r\\n')\n s3.meta.client.upload_file(f, 'netball-ml-processed', str(sys.\n argv[1]) + '/' + f)\n print('Uploaded to S3 ' + str(datetime.datetime.now()) + '\\r\\n')\n shutil.move(f, '/home/ec2-user/ML-Processed/shifted_to_s3')\n print('should of moved the file locally')\n print('sleeping, ')\n time.sleep(2)\n",
"step-4": "import boto3, os, shutil, datetime, time, sys\nsession = boto3.Session(profile_name='default')\ns3 = boto3.resource('s3')\nbucket = s3.Bucket('netball-ml-processed')\nwhile True:\n os.chdir('/home/ec2-user/ML-Processed')\n print(str(os.getcwd()))\n for f in os.listdir(os.getcwd()):\n print('looping in file')\n file_name, file_ext = os.path.splitext(f)\n if file_ext == '.jpg':\n print('working with this file ' + f)\n print('about to upload ' + str(datetime.datetime.now()) + '\\r\\n')\n s3.meta.client.upload_file(f, 'netball-ml-processed', str(sys.\n argv[1]) + '/' + f)\n print('Uploaded to S3 ' + str(datetime.datetime.now()) + '\\r\\n')\n shutil.move(f, '/home/ec2-user/ML-Processed/shifted_to_s3')\n print('should of moved the file locally')\n print('sleeping, ')\n time.sleep(2)\n",
"step-5": "import boto3, os, shutil, datetime, time, sys\n\nsession = boto3.Session(profile_name='default')\n\ns3 = boto3.resource('s3')\n\nbucket = s3.Bucket('netball-ml-processed')\n\n#print(bucket.objects)\n\n#needs to be run with *** sudo **** otherwise it won't work...\n\nwhile True:\n\n #change to the motion working Directory\n os.chdir('/home/ec2-user/ML-Processed')\n\n print (str(os.getcwd()))\n\n for f in os.listdir(os.getcwd()):\n print(\"looping in file\")\n\n file_name, file_ext = os.path.splitext(f)\n\n #need to check the file starts with 2 (as in the timestamp) and is a .jpg\n if file_ext == '.jpg':\n print(\"working with this file \" + f)\n\n print(\"about to upload \" + str(datetime.datetime.now()) + \"\\r\\n\")\n # s3.meta.client.upload_file('/Users/andrewhammond/s3_upload.jpg','netball-ml-processing', 's3_upload.jpg')\n s3.meta.client.upload_file(f, 'netball-ml-processed', str(sys.argv[1]) + \"/\" + f)\n\n print (\"Uploaded to S3 \" + str(datetime.datetime.now()) + \"\\r\\n\")\n\n # once pushed to s3 need to shift locally.\n shutil.move(f, '/home/ec2-user/ML-Processed/shifted_to_s3')\n\n print (\"should of moved the file locally\")\n\n print (\"sleeping, \")\n time.sleep(2)\n\n\n\n\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import math
class Solution:
# @param {integer} n
# @param {integer} k
# @return {string}
def getPermutation(self, n, k):
res = ''
k -= 1
nums = [str(i) for i in range(1, n+1)]
while n > 0:
tmp = math.factorial(n-1)
res += nums[k/tmp]
del nums[k/tmp]
k %= tmp
n -= 1
return res
# class Solution:
# def f(self,n,k):
# if n==1 :
# return [0]
# else:
# count=1
# for i in range(1,n):
# count*=i
# begin=(k-1)/count
# plus=k%count
# return [begin]+self.f(n-1,plus)
#
# # @return a string
# def getPermutation(self, n, k):
# res=self.f(n,k)
# print res
# lists=range(1,n+1)
# strs=''
# for i in range(n):
# strs+=str(lists[res[i]])
# lists.pop(res[i])
# return strs
if __name__=="__main__":
a=Solution()
print a.getPermutation(3, 1),"123"
print a.getPermutation(2,2)
print a.getPermutation(3,2)
#https://leetcode.com/discuss/16064/an-iterative-solution-for-reference
#TLE
# class Solution:
# def f(self,lists):
# if lists==None:
# return None
# tmpres=[]
#
# for idx,item in enumerate(lists):
# tmp=[i for i in lists]
# tmp.pop(idx)
# res=self.f(tmp)
# if len(res)>0:
# for i in res:
# tmpres.append(str(item)+i)
# else:
# tmpres.append(str(item))
# return tmpres
#
# # @return a string
# def getPermutation(self, n, k):
# if n==1:
# return '1'
# count=1
# begin=0
# plus=0
# for i in range(1,n):
# count*=i
# begin+=k/count
# plus=k%count
#
# tmp=[i for i in range(1,n+1)]
# if begin>0:
# tmp.pop(begin-1)
#
# tmp=self.f(tmp)
# if begin>0:
# return str(begin)+tmp[plus-1]
# else:
# return tmp[plus-1]
# TLE
# # class Solution:
# # def f(self,lists):
# # if lists==None:
# # return None
# # tmpres=[]
# #
# # for idx,item in enumerate(lists):
# # tmp=[i for i in lists]
# # tmp.pop(idx)
# # res=self.f(tmp)
# # if len(res)>0:
# # for i in res:
# # tmpres.append(str(item)+i)
# # else:
# # tmpres.append(str(item))
# # return tmpres
# #
# # # @return a string
# # def getPermutation(self, n, k):
# # tmp=self.f(range(1,n+1))
# # return tmp[k-1]
# #
|
normal
|
{
"blob_id": "d267bf82aee2eca29628fcd1d874a337adc1ae09",
"index": 8859,
"step-1": "import math\n\nclass Solution:\n # @param {integer} n\n # @param {integer} k\n # @return {string}\n def getPermutation(self, n, k):\n res = ''\n k -= 1\n nums = [str(i) for i in range(1, n+1)]\n while n > 0:\n tmp = math.factorial(n-1)\n res += nums[k/tmp]\n del nums[k/tmp]\n k %= tmp\n n -= 1\n return res\n\n\n\n# class Solution:\n# def f(self,n,k):\n# if n==1 :\n# return [0]\n# else:\n# count=1\n# for i in range(1,n):\n# count*=i\n# begin=(k-1)/count\n# plus=k%count\n# return [begin]+self.f(n-1,plus)\n#\n# # @return a string\n# def getPermutation(self, n, k):\n# res=self.f(n,k)\n# print res\n# lists=range(1,n+1)\n# strs=''\n# for i in range(n):\n# strs+=str(lists[res[i]])\n# lists.pop(res[i])\n# return strs\n\nif __name__==\"__main__\":\n a=Solution()\n print a.getPermutation(3, 1),\"123\"\n print a.getPermutation(2,2)\n print a.getPermutation(3,2)\n#https://leetcode.com/discuss/16064/an-iterative-solution-for-reference\n\n#TLE\n# class Solution:\n# def f(self,lists):\n# if lists==None:\n# return None\n# tmpres=[]\n# \n# for idx,item in enumerate(lists):\n# tmp=[i for i in lists]\n# tmp.pop(idx)\n# res=self.f(tmp)\n# if len(res)>0:\n# for i in res:\n# tmpres.append(str(item)+i)\n# else:\n# tmpres.append(str(item))\n# return tmpres\n# \n# # @return a string\n# def getPermutation(self, n, k):\n# if n==1:\n# return '1'\n# count=1\n# begin=0\n# plus=0\n# for i in range(1,n):\n# count*=i\n# begin+=k/count\n# plus=k%count\n# \n# tmp=[i for i in range(1,n+1)]\n# if begin>0:\n# tmp.pop(begin-1)\n# \n# tmp=self.f(tmp)\n# if begin>0:\n# return str(begin)+tmp[plus-1]\n# else:\n# return tmp[plus-1]\n# TLE\n# # class Solution:\n# # def f(self,lists):\n# # if lists==None:\n# # return None\n# # tmpres=[]\n# # \n# # for idx,item in enumerate(lists):\n# # tmp=[i for i in lists]\n# # tmp.pop(idx)\n# # res=self.f(tmp)\n# # if len(res)>0:\n# # for i in res:\n# # tmpres.append(str(item)+i)\n# # else:\n# # tmpres.append(str(item))\n# # return tmpres\n# # \n# # # @return a string\n# # def getPermutation(self, n, k):\n# # tmp=self.f(range(1,n+1))\n# # return tmp[k-1]\n# # \n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from helper.logger_helper import Log
from helper.mail_helper import MailHelper
import spider.spider as spider
from configuration.configuration_handler import Configuration
from configuration.products_handler import ProductsHandler
if __name__ == "__main__":
logger = Log()
conf = Configuration('configuration/configuration.yaml').load_configuration()
ph = ProductsHandler(conf["products_path"])
logger.info("Configuration loaded")
products = ph.load_products()
logger.info("Products loaded from {}".format(conf["products_path"]))
update, msg = spider.Spider(products, conf).crawl()
if len(update) > 0:
logger.info("Products to report")
mail_helper = MailHelper()
mail_helper.send_mail('', msg, "New prices lower")
logger.info("Mail sent")
mail_helper.close_connection()
else:
logger.info("Nothing to report")
ph.save_products(products)
logger.info("Configuration saved")
else:
print("Exec this file as the main entrypoint! -> python3 init.py")
|
normal
|
{
"blob_id": "2e140d1174e0b2d8a97df880b1bffdf84dc0d236",
"index": 1029,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n logger = Log()\n conf = Configuration('configuration/configuration.yaml'\n ).load_configuration()\n ph = ProductsHandler(conf['products_path'])\n logger.info('Configuration loaded')\n products = ph.load_products()\n logger.info('Products loaded from {}'.format(conf['products_path']))\n update, msg = spider.Spider(products, conf).crawl()\n if len(update) > 0:\n logger.info('Products to report')\n mail_helper = MailHelper()\n mail_helper.send_mail('', msg, 'New prices lower')\n logger.info('Mail sent')\n mail_helper.close_connection()\n else:\n logger.info('Nothing to report')\n ph.save_products(products)\n logger.info('Configuration saved')\nelse:\n print('Exec this file as the main entrypoint! -> python3 init.py')\n",
"step-3": "from helper.logger_helper import Log\nfrom helper.mail_helper import MailHelper\nimport spider.spider as spider\nfrom configuration.configuration_handler import Configuration\nfrom configuration.products_handler import ProductsHandler\nif __name__ == '__main__':\n logger = Log()\n conf = Configuration('configuration/configuration.yaml'\n ).load_configuration()\n ph = ProductsHandler(conf['products_path'])\n logger.info('Configuration loaded')\n products = ph.load_products()\n logger.info('Products loaded from {}'.format(conf['products_path']))\n update, msg = spider.Spider(products, conf).crawl()\n if len(update) > 0:\n logger.info('Products to report')\n mail_helper = MailHelper()\n mail_helper.send_mail('', msg, 'New prices lower')\n logger.info('Mail sent')\n mail_helper.close_connection()\n else:\n logger.info('Nothing to report')\n ph.save_products(products)\n logger.info('Configuration saved')\nelse:\n print('Exec this file as the main entrypoint! -> python3 init.py')\n",
"step-4": "from helper.logger_helper import Log\nfrom helper.mail_helper import MailHelper\nimport spider.spider as spider\nfrom configuration.configuration_handler import Configuration\nfrom configuration.products_handler import ProductsHandler\n\nif __name__ == \"__main__\":\n logger = Log()\n conf = Configuration('configuration/configuration.yaml').load_configuration()\n ph = ProductsHandler(conf[\"products_path\"]) \n logger.info(\"Configuration loaded\")\n products = ph.load_products()\n logger.info(\"Products loaded from {}\".format(conf[\"products_path\"]))\n\n update, msg = spider.Spider(products, conf).crawl()\n if len(update) > 0:\n logger.info(\"Products to report\")\n mail_helper = MailHelper()\n mail_helper.send_mail('', msg, \"New prices lower\")\n \n logger.info(\"Mail sent\")\n mail_helper.close_connection()\n\n else:\n logger.info(\"Nothing to report\")\n \n ph.save_products(products)\n logger.info(\"Configuration saved\")\nelse:\n print(\"Exec this file as the main entrypoint! -> python3 init.py\")",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import typing
import torch.nn as nn
from .torch_utils import get_activation, BatchNorm1d
from dna.models.torch_modules.torch_utils import PyTorchRandomStateContext
class Submodule(nn.Module):
def __init__(self, layer_sizes: typing.List[int], activation_name: str,
use_batch_norm: bool, use_skip: bool=False, dropout: float=0.0, *,
device: str='cuda:0', seed: int=0):
super().__init__()
with PyTorchRandomStateContext(seed):
n_layers = len(layer_sizes) - 1
activation = get_activation(activation_name)
layers = []
for i in range(n_layers):
if i > 0:
layers.append(activation())
if dropout > 0.0:
layers.append(nn.Dropout(p=dropout))
if use_batch_norm:
layers.append(BatchNorm1d(layer_sizes[i]))
layers.append(nn.Linear(layer_sizes[i], layer_sizes[i + 1]))
self.net = nn.Sequential(*layers)
self.net.to(device=device)
if use_skip:
if layer_sizes[0] == layer_sizes[-1]:
self.skip = nn.Sequential()
else:
self.skip = nn.Linear(layer_sizes[0], layer_sizes[-1])
self.skip.to(device=device)
else:
self.skip = None
def forward(self, x):
if self.skip is None:
return self.net(x)
else:
return self.net(x) + self.skip(x)
|
normal
|
{
"blob_id": "950b2906853c37cdeaa8ed1076fff79dbe99b6f8",
"index": 8327,
"step-1": "<mask token>\n\n\nclass Submodule(nn.Module):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Submodule(nn.Module):\n\n def __init__(self, layer_sizes: typing.List[int], activation_name: str,\n use_batch_norm: bool, use_skip: bool=False, dropout: float=0.0, *,\n device: str='cuda:0', seed: int=0):\n super().__init__()\n with PyTorchRandomStateContext(seed):\n n_layers = len(layer_sizes) - 1\n activation = get_activation(activation_name)\n layers = []\n for i in range(n_layers):\n if i > 0:\n layers.append(activation())\n if dropout > 0.0:\n layers.append(nn.Dropout(p=dropout))\n if use_batch_norm:\n layers.append(BatchNorm1d(layer_sizes[i]))\n layers.append(nn.Linear(layer_sizes[i], layer_sizes[i + 1]))\n self.net = nn.Sequential(*layers)\n self.net.to(device=device)\n if use_skip:\n if layer_sizes[0] == layer_sizes[-1]:\n self.skip = nn.Sequential()\n else:\n self.skip = nn.Linear(layer_sizes[0], layer_sizes[-1])\n self.skip.to(device=device)\n else:\n self.skip = None\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Submodule(nn.Module):\n\n def __init__(self, layer_sizes: typing.List[int], activation_name: str,\n use_batch_norm: bool, use_skip: bool=False, dropout: float=0.0, *,\n device: str='cuda:0', seed: int=0):\n super().__init__()\n with PyTorchRandomStateContext(seed):\n n_layers = len(layer_sizes) - 1\n activation = get_activation(activation_name)\n layers = []\n for i in range(n_layers):\n if i > 0:\n layers.append(activation())\n if dropout > 0.0:\n layers.append(nn.Dropout(p=dropout))\n if use_batch_norm:\n layers.append(BatchNorm1d(layer_sizes[i]))\n layers.append(nn.Linear(layer_sizes[i], layer_sizes[i + 1]))\n self.net = nn.Sequential(*layers)\n self.net.to(device=device)\n if use_skip:\n if layer_sizes[0] == layer_sizes[-1]:\n self.skip = nn.Sequential()\n else:\n self.skip = nn.Linear(layer_sizes[0], layer_sizes[-1])\n self.skip.to(device=device)\n else:\n self.skip = None\n\n def forward(self, x):\n if self.skip is None:\n return self.net(x)\n else:\n return self.net(x) + self.skip(x)\n",
"step-4": "import typing\nimport torch.nn as nn\nfrom .torch_utils import get_activation, BatchNorm1d\nfrom dna.models.torch_modules.torch_utils import PyTorchRandomStateContext\n\n\nclass Submodule(nn.Module):\n\n def __init__(self, layer_sizes: typing.List[int], activation_name: str,\n use_batch_norm: bool, use_skip: bool=False, dropout: float=0.0, *,\n device: str='cuda:0', seed: int=0):\n super().__init__()\n with PyTorchRandomStateContext(seed):\n n_layers = len(layer_sizes) - 1\n activation = get_activation(activation_name)\n layers = []\n for i in range(n_layers):\n if i > 0:\n layers.append(activation())\n if dropout > 0.0:\n layers.append(nn.Dropout(p=dropout))\n if use_batch_norm:\n layers.append(BatchNorm1d(layer_sizes[i]))\n layers.append(nn.Linear(layer_sizes[i], layer_sizes[i + 1]))\n self.net = nn.Sequential(*layers)\n self.net.to(device=device)\n if use_skip:\n if layer_sizes[0] == layer_sizes[-1]:\n self.skip = nn.Sequential()\n else:\n self.skip = nn.Linear(layer_sizes[0], layer_sizes[-1])\n self.skip.to(device=device)\n else:\n self.skip = None\n\n def forward(self, x):\n if self.skip is None:\n return self.net(x)\n else:\n return self.net(x) + self.skip(x)\n",
"step-5": null,
"step-ids": [
1,
2,
3,
4
]
}
|
[
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def get_input():
return sys.stdin.read(1)
def exit(orig_settings):
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, orig_settings)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def init():
orig_settings = termios.tcgetattr(sys.stdin)
tty.setcbreak(sys.stdin)
return orig_settings
def get_input():
return sys.stdin.read(1)
def exit(orig_settings):
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, orig_settings)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def init():
orig_settings = termios.tcgetattr(sys.stdin)
tty.setcbreak(sys.stdin)
return orig_settings
def get_input():
return sys.stdin.read(1)
def exit(orig_settings):
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, orig_settings)
if __name__ == '__main__':
settings = init()
key = 0
while key != chr(27):
key = get_input()
print("'" + str(key) + "'")
exit(settings)
<|reserved_special_token_1|>
import tty
import sys
import termios
def init():
orig_settings = termios.tcgetattr(sys.stdin)
tty.setcbreak(sys.stdin)
return orig_settings
def get_input():
return sys.stdin.read(1)
def exit(orig_settings):
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, orig_settings)
if __name__ == '__main__':
settings = init()
key = 0
while key != chr(27):
key = get_input()
print("'" + str(key) + "'")
exit(settings)
<|reserved_special_token_1|>
import tty
import sys
import termios
def init():
orig_settings = termios.tcgetattr(sys.stdin)
tty.setcbreak(sys.stdin)
return orig_settings
def get_input():
return sys.stdin.read(1)
def exit(orig_settings):
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, orig_settings)
if __name__ == "__main__":
settings = init()
key = 0
while key != chr(27): # esc
key = get_input()
print("'" + str(key) + "'")
exit(settings)
|
flexible
|
{
"blob_id": "c64e41609a19a20f59446399a2e864ff8834c3f0",
"index": 4322,
"step-1": "<mask token>\n\n\ndef get_input():\n return sys.stdin.read(1)\n\n\ndef exit(orig_settings):\n termios.tcsetattr(sys.stdin, termios.TCSADRAIN, orig_settings)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef init():\n orig_settings = termios.tcgetattr(sys.stdin)\n tty.setcbreak(sys.stdin)\n return orig_settings\n\n\ndef get_input():\n return sys.stdin.read(1)\n\n\ndef exit(orig_settings):\n termios.tcsetattr(sys.stdin, termios.TCSADRAIN, orig_settings)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef init():\n orig_settings = termios.tcgetattr(sys.stdin)\n tty.setcbreak(sys.stdin)\n return orig_settings\n\n\ndef get_input():\n return sys.stdin.read(1)\n\n\ndef exit(orig_settings):\n termios.tcsetattr(sys.stdin, termios.TCSADRAIN, orig_settings)\n\n\nif __name__ == '__main__':\n settings = init()\n key = 0\n while key != chr(27):\n key = get_input()\n print(\"'\" + str(key) + \"'\")\n exit(settings)\n",
"step-4": "import tty\nimport sys\nimport termios\n\n\ndef init():\n orig_settings = termios.tcgetattr(sys.stdin)\n tty.setcbreak(sys.stdin)\n return orig_settings\n\n\ndef get_input():\n return sys.stdin.read(1)\n\n\ndef exit(orig_settings):\n termios.tcsetattr(sys.stdin, termios.TCSADRAIN, orig_settings)\n\n\nif __name__ == '__main__':\n settings = init()\n key = 0\n while key != chr(27):\n key = get_input()\n print(\"'\" + str(key) + \"'\")\n exit(settings)\n",
"step-5": "import tty\nimport sys\nimport termios\n\n\ndef init():\n orig_settings = termios.tcgetattr(sys.stdin)\n tty.setcbreak(sys.stdin)\n return orig_settings\n\ndef get_input():\n return sys.stdin.read(1)\n\ndef exit(orig_settings):\n termios.tcsetattr(sys.stdin, termios.TCSADRAIN, orig_settings) \n\n\n\nif __name__ == \"__main__\":\n settings = init()\n key = 0\n while key != chr(27): # esc\n key = get_input()\n print(\"'\" + str(key) + \"'\")\n exit(settings)",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
number = int(input("Enter a number, and I'll tell you if it's even or odd: "))
if number % 2 == 0:
print(f"{number} is an even number.")
else:
print(f"{number} is an odd number.")
|
normal
|
{
"blob_id": "b147a22d6bd12a954c0d85c11e578a67f0a51332",
"index": 3025,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif number % 2 == 0:\n print(f'{number} is an even number.')\nelse:\n print(f'{number} is an odd number.')\n",
"step-3": "number = int(input(\"Enter a number, and I'll tell you if it's even or odd: \"))\nif number % 2 == 0:\n print(f'{number} is an even number.')\nelse:\n print(f'{number} is an odd number.')\n",
"step-4": "number = int(input(\"Enter a number, and I'll tell you if it's even or odd: \"))\n\nif number % 2 == 0:\n print(f\"{number} is an even number.\")\nelse:\n print(f\"{number} is an odd number.\")",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def cmd_create(nexus_client, **kwargs):
"""Performs ``nexus3 cleanup_policy create``"""
policy = cleanup_policy.CleanupPolicy(None, **kwargs)
nexus_client.cleanup_policies.create_or_update(policy)
return exception.CliReturnCode.SUCCESS.value
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def cmd_list(nexus_client):
"""Performs ``nexus3 cleanup_policy list``"""
policies = nexus_client.cleanup_policies.list()
if len(policies) == 0:
return exception.CliReturnCode.POLICY_NOT_FOUND.value
table = Texttable(max_width=constants.TTY_MAX_WIDTH)
table.add_row(['Name', 'Format', 'Downloaded', 'Updated', 'Regex'])
table.set_deco(Texttable.HEADER)
for policy in policies:
p = policy.configuration
table.add_row([p['name'], p['format'], p['criteria'].get(
'lastDownloaded', 'null'), p['criteria'].get('lastBlobUpdated',
'null'), p['criteria'].get('regex', 'null')])
print(table.draw())
return exception.CliReturnCode.SUCCESS.value
def cmd_create(nexus_client, **kwargs):
"""Performs ``nexus3 cleanup_policy create``"""
policy = cleanup_policy.CleanupPolicy(None, **kwargs)
nexus_client.cleanup_policies.create_or_update(policy)
return exception.CliReturnCode.SUCCESS.value
<|reserved_special_token_1|>
from texttable import Texttable
from nexuscli import exception
from nexuscli.api import cleanup_policy
from nexuscli.cli import constants
def cmd_list(nexus_client):
"""Performs ``nexus3 cleanup_policy list``"""
policies = nexus_client.cleanup_policies.list()
if len(policies) == 0:
return exception.CliReturnCode.POLICY_NOT_FOUND.value
table = Texttable(max_width=constants.TTY_MAX_WIDTH)
table.add_row(['Name', 'Format', 'Downloaded', 'Updated', 'Regex'])
table.set_deco(Texttable.HEADER)
for policy in policies:
p = policy.configuration
table.add_row([p['name'], p['format'], p['criteria'].get(
'lastDownloaded', 'null'), p['criteria'].get('lastBlobUpdated',
'null'), p['criteria'].get('regex', 'null')])
print(table.draw())
return exception.CliReturnCode.SUCCESS.value
def cmd_create(nexus_client, **kwargs):
"""Performs ``nexus3 cleanup_policy create``"""
policy = cleanup_policy.CleanupPolicy(None, **kwargs)
nexus_client.cleanup_policies.create_or_update(policy)
return exception.CliReturnCode.SUCCESS.value
<|reserved_special_token_1|>
from texttable import Texttable
from nexuscli import exception
from nexuscli.api import cleanup_policy
from nexuscli.cli import constants
def cmd_list(nexus_client):
"""Performs ``nexus3 cleanup_policy list``"""
policies = nexus_client.cleanup_policies.list()
if len(policies) == 0:
return exception.CliReturnCode.POLICY_NOT_FOUND.value
table = Texttable(max_width=constants.TTY_MAX_WIDTH)
table.add_row(
['Name', 'Format', 'Downloaded', 'Updated', 'Regex'])
table.set_deco(Texttable.HEADER)
for policy in policies:
p = policy.configuration
table.add_row([
p['name'], p['format'],
p['criteria'].get('lastDownloaded', 'null'),
p['criteria'].get('lastBlobUpdated', 'null'),
p['criteria'].get('regex', 'null')],
)
print(table.draw())
return exception.CliReturnCode.SUCCESS.value
def cmd_create(nexus_client, **kwargs):
"""Performs ``nexus3 cleanup_policy create``"""
policy = cleanup_policy.CleanupPolicy(None, **kwargs)
nexus_client.cleanup_policies.create_or_update(policy)
return exception.CliReturnCode.SUCCESS.value
|
flexible
|
{
"blob_id": "521b90ffb4bace4cbd50d08ed4be278d4f259822",
"index": 7049,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef cmd_create(nexus_client, **kwargs):\n \"\"\"Performs ``nexus3 cleanup_policy create``\"\"\"\n policy = cleanup_policy.CleanupPolicy(None, **kwargs)\n nexus_client.cleanup_policies.create_or_update(policy)\n return exception.CliReturnCode.SUCCESS.value\n",
"step-3": "<mask token>\n\n\ndef cmd_list(nexus_client):\n \"\"\"Performs ``nexus3 cleanup_policy list``\"\"\"\n policies = nexus_client.cleanup_policies.list()\n if len(policies) == 0:\n return exception.CliReturnCode.POLICY_NOT_FOUND.value\n table = Texttable(max_width=constants.TTY_MAX_WIDTH)\n table.add_row(['Name', 'Format', 'Downloaded', 'Updated', 'Regex'])\n table.set_deco(Texttable.HEADER)\n for policy in policies:\n p = policy.configuration\n table.add_row([p['name'], p['format'], p['criteria'].get(\n 'lastDownloaded', 'null'), p['criteria'].get('lastBlobUpdated',\n 'null'), p['criteria'].get('regex', 'null')])\n print(table.draw())\n return exception.CliReturnCode.SUCCESS.value\n\n\ndef cmd_create(nexus_client, **kwargs):\n \"\"\"Performs ``nexus3 cleanup_policy create``\"\"\"\n policy = cleanup_policy.CleanupPolicy(None, **kwargs)\n nexus_client.cleanup_policies.create_or_update(policy)\n return exception.CliReturnCode.SUCCESS.value\n",
"step-4": "from texttable import Texttable\nfrom nexuscli import exception\nfrom nexuscli.api import cleanup_policy\nfrom nexuscli.cli import constants\n\n\ndef cmd_list(nexus_client):\n \"\"\"Performs ``nexus3 cleanup_policy list``\"\"\"\n policies = nexus_client.cleanup_policies.list()\n if len(policies) == 0:\n return exception.CliReturnCode.POLICY_NOT_FOUND.value\n table = Texttable(max_width=constants.TTY_MAX_WIDTH)\n table.add_row(['Name', 'Format', 'Downloaded', 'Updated', 'Regex'])\n table.set_deco(Texttable.HEADER)\n for policy in policies:\n p = policy.configuration\n table.add_row([p['name'], p['format'], p['criteria'].get(\n 'lastDownloaded', 'null'), p['criteria'].get('lastBlobUpdated',\n 'null'), p['criteria'].get('regex', 'null')])\n print(table.draw())\n return exception.CliReturnCode.SUCCESS.value\n\n\ndef cmd_create(nexus_client, **kwargs):\n \"\"\"Performs ``nexus3 cleanup_policy create``\"\"\"\n policy = cleanup_policy.CleanupPolicy(None, **kwargs)\n nexus_client.cleanup_policies.create_or_update(policy)\n return exception.CliReturnCode.SUCCESS.value\n",
"step-5": "from texttable import Texttable\n\nfrom nexuscli import exception\nfrom nexuscli.api import cleanup_policy\nfrom nexuscli.cli import constants\n\n\ndef cmd_list(nexus_client):\n \"\"\"Performs ``nexus3 cleanup_policy list``\"\"\"\n policies = nexus_client.cleanup_policies.list()\n if len(policies) == 0:\n return exception.CliReturnCode.POLICY_NOT_FOUND.value\n\n table = Texttable(max_width=constants.TTY_MAX_WIDTH)\n table.add_row(\n ['Name', 'Format', 'Downloaded', 'Updated', 'Regex'])\n table.set_deco(Texttable.HEADER)\n for policy in policies:\n p = policy.configuration\n table.add_row([\n p['name'], p['format'],\n p['criteria'].get('lastDownloaded', 'null'),\n p['criteria'].get('lastBlobUpdated', 'null'),\n p['criteria'].get('regex', 'null')],\n )\n\n print(table.draw())\n return exception.CliReturnCode.SUCCESS.value\n\n\ndef cmd_create(nexus_client, **kwargs):\n \"\"\"Performs ``nexus3 cleanup_policy create``\"\"\"\n policy = cleanup_policy.CleanupPolicy(None, **kwargs)\n nexus_client.cleanup_policies.create_or_update(policy)\n\n return exception.CliReturnCode.SUCCESS.value\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class PageIndex(BasePageIndex, Indexable):
template = CharField(model_attr='template')
template_title = CharField(model_attr='get_template_display')
get_template_display = CharField(model_attr='get_template_display')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class BasePageIndex(SearchIndex):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def get_model(self):
return swapper.load_model('varlet', 'page')
class PageIndex(BasePageIndex, Indexable):
template = CharField(model_attr='template')
template_title = CharField(model_attr='get_template_display')
get_template_display = CharField(model_attr='get_template_display')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class BasePageIndex(SearchIndex):
text = CharField(document=True, use_template=True, template_name=
'search/indexes/varlet/page_text.txt')
url = CharField(model_attr='url')
get_absolute_url = CharField(model_attr='get_absolute_url')
created = DateTimeField(model_attr='created')
modified = DateTimeField(model_attr='modified')
def get_model(self):
return swapper.load_model('varlet', 'page')
class PageIndex(BasePageIndex, Indexable):
template = CharField(model_attr='template')
template_title = CharField(model_attr='get_template_display')
get_template_display = CharField(model_attr='get_template_display')
<|reserved_special_token_1|>
from __future__ import absolute_import, unicode_literals
import swapper
from haystack.constants import Indexable
from haystack.fields import CharField, DateTimeField
from haystack.indexes import SearchIndex
class BasePageIndex(SearchIndex):
text = CharField(document=True, use_template=True, template_name=
'search/indexes/varlet/page_text.txt')
url = CharField(model_attr='url')
get_absolute_url = CharField(model_attr='get_absolute_url')
created = DateTimeField(model_attr='created')
modified = DateTimeField(model_attr='modified')
def get_model(self):
return swapper.load_model('varlet', 'page')
class PageIndex(BasePageIndex, Indexable):
template = CharField(model_attr='template')
template_title = CharField(model_attr='get_template_display')
get_template_display = CharField(model_attr='get_template_display')
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import swapper
from haystack.constants import Indexable
from haystack.fields import CharField, DateTimeField
from haystack.indexes import SearchIndex
class BasePageIndex(SearchIndex):
text = CharField(document=True, use_template=True, template_name='search/indexes/varlet/page_text.txt')
url = CharField(model_attr='url')
get_absolute_url = CharField(model_attr='get_absolute_url')
created = DateTimeField(model_attr='created')
modified = DateTimeField(model_attr='modified')
def get_model(self):
return swapper.load_model('varlet', 'page')
class PageIndex(BasePageIndex, Indexable):
template = CharField(model_attr='template')
template_title = CharField(model_attr='get_template_display')
get_template_display = CharField(model_attr='get_template_display')
|
flexible
|
{
"blob_id": "8e1eef3c5a9ca3ea504bbc269b48446527637626",
"index": 1323,
"step-1": "<mask token>\n\n\nclass PageIndex(BasePageIndex, Indexable):\n template = CharField(model_attr='template')\n template_title = CharField(model_attr='get_template_display')\n get_template_display = CharField(model_attr='get_template_display')\n",
"step-2": "<mask token>\n\n\nclass BasePageIndex(SearchIndex):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def get_model(self):\n return swapper.load_model('varlet', 'page')\n\n\nclass PageIndex(BasePageIndex, Indexable):\n template = CharField(model_attr='template')\n template_title = CharField(model_attr='get_template_display')\n get_template_display = CharField(model_attr='get_template_display')\n",
"step-3": "<mask token>\n\n\nclass BasePageIndex(SearchIndex):\n text = CharField(document=True, use_template=True, template_name=\n 'search/indexes/varlet/page_text.txt')\n url = CharField(model_attr='url')\n get_absolute_url = CharField(model_attr='get_absolute_url')\n created = DateTimeField(model_attr='created')\n modified = DateTimeField(model_attr='modified')\n\n def get_model(self):\n return swapper.load_model('varlet', 'page')\n\n\nclass PageIndex(BasePageIndex, Indexable):\n template = CharField(model_attr='template')\n template_title = CharField(model_attr='get_template_display')\n get_template_display = CharField(model_attr='get_template_display')\n",
"step-4": "from __future__ import absolute_import, unicode_literals\nimport swapper\nfrom haystack.constants import Indexable\nfrom haystack.fields import CharField, DateTimeField\nfrom haystack.indexes import SearchIndex\n\n\nclass BasePageIndex(SearchIndex):\n text = CharField(document=True, use_template=True, template_name=\n 'search/indexes/varlet/page_text.txt')\n url = CharField(model_attr='url')\n get_absolute_url = CharField(model_attr='get_absolute_url')\n created = DateTimeField(model_attr='created')\n modified = DateTimeField(model_attr='modified')\n\n def get_model(self):\n return swapper.load_model('varlet', 'page')\n\n\nclass PageIndex(BasePageIndex, Indexable):\n template = CharField(model_attr='template')\n template_title = CharField(model_attr='get_template_display')\n get_template_display = CharField(model_attr='get_template_display')\n",
"step-5": "# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, unicode_literals\nimport swapper\nfrom haystack.constants import Indexable\nfrom haystack.fields import CharField, DateTimeField\nfrom haystack.indexes import SearchIndex\n\n\nclass BasePageIndex(SearchIndex):\n text = CharField(document=True, use_template=True, template_name='search/indexes/varlet/page_text.txt')\n url = CharField(model_attr='url')\n get_absolute_url = CharField(model_attr='get_absolute_url')\n created = DateTimeField(model_attr='created')\n modified = DateTimeField(model_attr='modified')\n\n def get_model(self):\n return swapper.load_model('varlet', 'page')\n\nclass PageIndex(BasePageIndex, Indexable):\n template = CharField(model_attr='template')\n template_title = CharField(model_attr='get_template_display')\n get_template_display = CharField(model_attr='get_template_display')\n",
"step-ids": [
2,
4,
5,
6,
7
]
}
|
[
2,
4,
5,
6,
7
] |
import tensorflow as tf
import tensorflow_probability as tfp
import pytest
import numpy as np
from estimators import NormalizingFlowNetwork
tfd = tfp.distributions
tf.random.set_seed(22)
np.random.seed(22)
@pytest.mark.slow
def test_x_noise_reg():
x_train = np.linspace(-3, 3, 300, dtype=np.float32).reshape((300, 1))
noise = tfd.MultivariateNormalDiag(loc=5 * tf.math.sin(2 * x_train), scale_diag=abs(x_train))
y_train = noise.sample().numpy()
too_much_noise = NormalizingFlowNetwork(
1,
n_flows=2,
hidden_sizes=(16, 16),
noise_reg=("fixed_rate", 3.0),
trainable_base_dist=True,
)
too_much_noise.fit(x_train, y_train, epochs=700, verbose=0)
x_test = np.linspace(-3, 3, 300, dtype=np.float32).reshape((300, 1))
noise = tfd.MultivariateNormalDiag(loc=5 * tf.math.sin(2 * x_test), scale_diag=abs(x_test))
y_test = noise.sample().numpy()
out1 = too_much_noise.pdf(x_test, y_test).numpy()
out2 = too_much_noise.pdf(x_test, y_test).numpy()
# making sure that the noise regularisation is deactivated in testing mode
assert all(out1 == out2)
little_noise = NormalizingFlowNetwork(
1,
n_flows=2,
hidden_sizes=(16, 16),
noise_reg=("rule_of_thumb", 0.1),
trainable_base_dist=True,
)
little_noise.fit(x_train, y_train, epochs=700, verbose=0)
little_noise_score = tf.reduce_sum(little_noise.pdf(x_test, y_test)) / 700.0
too_much_noise_score = tf.reduce_sum(too_much_noise.pdf(x_test, y_test)) / 700.0
assert little_noise_score > too_much_noise_score
def test_y_noise_reg():
x_train = np.linspace([[-1]] * 3, [[1]] * 3, 10, dtype=np.float32).reshape((10, 3))
y_train = np.linspace([[-1]] * 3, [[1]] * 3, 10, dtype=np.float32).reshape((10, 3))
noise = NormalizingFlowNetwork(
3,
n_flows=3,
hidden_sizes=(16, 16),
trainable_base_dist=True,
noise_reg=("fixed_rate", 1.0),
)
noise.fit(x_train, y_train, epochs=10, verbose=0)
input_model = noise._get_input_model()
# y_input should not include randomness during evaluation
y1 = input_model(y_train, training=False).numpy()
y2 = input_model(y_train, training=False).numpy()
assert np.all(y1 == y2)
# loss should include randomness during learning
y1 = input_model(y_train, training=True).numpy()
y2 = input_model(y_train, training=True).numpy()
assert not np.all(y1 == y2)
|
normal
|
{
"blob_id": "303a8609cb21c60a416160264c3d3da805674920",
"index": 777,
"step-1": "<mask token>\n\n\n@pytest.mark.slow\ndef test_x_noise_reg():\n x_train = np.linspace(-3, 3, 300, dtype=np.float32).reshape((300, 1))\n noise = tfd.MultivariateNormalDiag(loc=5 * tf.math.sin(2 * x_train),\n scale_diag=abs(x_train))\n y_train = noise.sample().numpy()\n too_much_noise = NormalizingFlowNetwork(1, n_flows=2, hidden_sizes=(16,\n 16), noise_reg=('fixed_rate', 3.0), trainable_base_dist=True)\n too_much_noise.fit(x_train, y_train, epochs=700, verbose=0)\n x_test = np.linspace(-3, 3, 300, dtype=np.float32).reshape((300, 1))\n noise = tfd.MultivariateNormalDiag(loc=5 * tf.math.sin(2 * x_test),\n scale_diag=abs(x_test))\n y_test = noise.sample().numpy()\n out1 = too_much_noise.pdf(x_test, y_test).numpy()\n out2 = too_much_noise.pdf(x_test, y_test).numpy()\n assert all(out1 == out2)\n little_noise = NormalizingFlowNetwork(1, n_flows=2, hidden_sizes=(16, \n 16), noise_reg=('rule_of_thumb', 0.1), trainable_base_dist=True)\n little_noise.fit(x_train, y_train, epochs=700, verbose=0)\n little_noise_score = tf.reduce_sum(little_noise.pdf(x_test, y_test)\n ) / 700.0\n too_much_noise_score = tf.reduce_sum(too_much_noise.pdf(x_test, y_test)\n ) / 700.0\n assert little_noise_score > too_much_noise_score\n\n\ndef test_y_noise_reg():\n x_train = np.linspace([[-1]] * 3, [[1]] * 3, 10, dtype=np.float32).reshape(\n (10, 3))\n y_train = np.linspace([[-1]] * 3, [[1]] * 3, 10, dtype=np.float32).reshape(\n (10, 3))\n noise = NormalizingFlowNetwork(3, n_flows=3, hidden_sizes=(16, 16),\n trainable_base_dist=True, noise_reg=('fixed_rate', 1.0))\n noise.fit(x_train, y_train, epochs=10, verbose=0)\n input_model = noise._get_input_model()\n y1 = input_model(y_train, training=False).numpy()\n y2 = input_model(y_train, training=False).numpy()\n assert np.all(y1 == y2)\n y1 = input_model(y_train, training=True).numpy()\n y2 = input_model(y_train, training=True).numpy()\n assert not np.all(y1 == y2)\n",
"step-2": "<mask token>\ntf.random.set_seed(22)\nnp.random.seed(22)\n\n\n@pytest.mark.slow\ndef test_x_noise_reg():\n x_train = np.linspace(-3, 3, 300, dtype=np.float32).reshape((300, 1))\n noise = tfd.MultivariateNormalDiag(loc=5 * tf.math.sin(2 * x_train),\n scale_diag=abs(x_train))\n y_train = noise.sample().numpy()\n too_much_noise = NormalizingFlowNetwork(1, n_flows=2, hidden_sizes=(16,\n 16), noise_reg=('fixed_rate', 3.0), trainable_base_dist=True)\n too_much_noise.fit(x_train, y_train, epochs=700, verbose=0)\n x_test = np.linspace(-3, 3, 300, dtype=np.float32).reshape((300, 1))\n noise = tfd.MultivariateNormalDiag(loc=5 * tf.math.sin(2 * x_test),\n scale_diag=abs(x_test))\n y_test = noise.sample().numpy()\n out1 = too_much_noise.pdf(x_test, y_test).numpy()\n out2 = too_much_noise.pdf(x_test, y_test).numpy()\n assert all(out1 == out2)\n little_noise = NormalizingFlowNetwork(1, n_flows=2, hidden_sizes=(16, \n 16), noise_reg=('rule_of_thumb', 0.1), trainable_base_dist=True)\n little_noise.fit(x_train, y_train, epochs=700, verbose=0)\n little_noise_score = tf.reduce_sum(little_noise.pdf(x_test, y_test)\n ) / 700.0\n too_much_noise_score = tf.reduce_sum(too_much_noise.pdf(x_test, y_test)\n ) / 700.0\n assert little_noise_score > too_much_noise_score\n\n\ndef test_y_noise_reg():\n x_train = np.linspace([[-1]] * 3, [[1]] * 3, 10, dtype=np.float32).reshape(\n (10, 3))\n y_train = np.linspace([[-1]] * 3, [[1]] * 3, 10, dtype=np.float32).reshape(\n (10, 3))\n noise = NormalizingFlowNetwork(3, n_flows=3, hidden_sizes=(16, 16),\n trainable_base_dist=True, noise_reg=('fixed_rate', 1.0))\n noise.fit(x_train, y_train, epochs=10, verbose=0)\n input_model = noise._get_input_model()\n y1 = input_model(y_train, training=False).numpy()\n y2 = input_model(y_train, training=False).numpy()\n assert np.all(y1 == y2)\n y1 = input_model(y_train, training=True).numpy()\n y2 = input_model(y_train, training=True).numpy()\n assert not np.all(y1 == y2)\n",
"step-3": "<mask token>\ntfd = tfp.distributions\ntf.random.set_seed(22)\nnp.random.seed(22)\n\n\n@pytest.mark.slow\ndef test_x_noise_reg():\n x_train = np.linspace(-3, 3, 300, dtype=np.float32).reshape((300, 1))\n noise = tfd.MultivariateNormalDiag(loc=5 * tf.math.sin(2 * x_train),\n scale_diag=abs(x_train))\n y_train = noise.sample().numpy()\n too_much_noise = NormalizingFlowNetwork(1, n_flows=2, hidden_sizes=(16,\n 16), noise_reg=('fixed_rate', 3.0), trainable_base_dist=True)\n too_much_noise.fit(x_train, y_train, epochs=700, verbose=0)\n x_test = np.linspace(-3, 3, 300, dtype=np.float32).reshape((300, 1))\n noise = tfd.MultivariateNormalDiag(loc=5 * tf.math.sin(2 * x_test),\n scale_diag=abs(x_test))\n y_test = noise.sample().numpy()\n out1 = too_much_noise.pdf(x_test, y_test).numpy()\n out2 = too_much_noise.pdf(x_test, y_test).numpy()\n assert all(out1 == out2)\n little_noise = NormalizingFlowNetwork(1, n_flows=2, hidden_sizes=(16, \n 16), noise_reg=('rule_of_thumb', 0.1), trainable_base_dist=True)\n little_noise.fit(x_train, y_train, epochs=700, verbose=0)\n little_noise_score = tf.reduce_sum(little_noise.pdf(x_test, y_test)\n ) / 700.0\n too_much_noise_score = tf.reduce_sum(too_much_noise.pdf(x_test, y_test)\n ) / 700.0\n assert little_noise_score > too_much_noise_score\n\n\ndef test_y_noise_reg():\n x_train = np.linspace([[-1]] * 3, [[1]] * 3, 10, dtype=np.float32).reshape(\n (10, 3))\n y_train = np.linspace([[-1]] * 3, [[1]] * 3, 10, dtype=np.float32).reshape(\n (10, 3))\n noise = NormalizingFlowNetwork(3, n_flows=3, hidden_sizes=(16, 16),\n trainable_base_dist=True, noise_reg=('fixed_rate', 1.0))\n noise.fit(x_train, y_train, epochs=10, verbose=0)\n input_model = noise._get_input_model()\n y1 = input_model(y_train, training=False).numpy()\n y2 = input_model(y_train, training=False).numpy()\n assert np.all(y1 == y2)\n y1 = input_model(y_train, training=True).numpy()\n y2 = input_model(y_train, training=True).numpy()\n assert not np.all(y1 == y2)\n",
"step-4": "import tensorflow as tf\nimport tensorflow_probability as tfp\nimport pytest\nimport numpy as np\nfrom estimators import NormalizingFlowNetwork\ntfd = tfp.distributions\ntf.random.set_seed(22)\nnp.random.seed(22)\n\n\n@pytest.mark.slow\ndef test_x_noise_reg():\n x_train = np.linspace(-3, 3, 300, dtype=np.float32).reshape((300, 1))\n noise = tfd.MultivariateNormalDiag(loc=5 * tf.math.sin(2 * x_train),\n scale_diag=abs(x_train))\n y_train = noise.sample().numpy()\n too_much_noise = NormalizingFlowNetwork(1, n_flows=2, hidden_sizes=(16,\n 16), noise_reg=('fixed_rate', 3.0), trainable_base_dist=True)\n too_much_noise.fit(x_train, y_train, epochs=700, verbose=0)\n x_test = np.linspace(-3, 3, 300, dtype=np.float32).reshape((300, 1))\n noise = tfd.MultivariateNormalDiag(loc=5 * tf.math.sin(2 * x_test),\n scale_diag=abs(x_test))\n y_test = noise.sample().numpy()\n out1 = too_much_noise.pdf(x_test, y_test).numpy()\n out2 = too_much_noise.pdf(x_test, y_test).numpy()\n assert all(out1 == out2)\n little_noise = NormalizingFlowNetwork(1, n_flows=2, hidden_sizes=(16, \n 16), noise_reg=('rule_of_thumb', 0.1), trainable_base_dist=True)\n little_noise.fit(x_train, y_train, epochs=700, verbose=0)\n little_noise_score = tf.reduce_sum(little_noise.pdf(x_test, y_test)\n ) / 700.0\n too_much_noise_score = tf.reduce_sum(too_much_noise.pdf(x_test, y_test)\n ) / 700.0\n assert little_noise_score > too_much_noise_score\n\n\ndef test_y_noise_reg():\n x_train = np.linspace([[-1]] * 3, [[1]] * 3, 10, dtype=np.float32).reshape(\n (10, 3))\n y_train = np.linspace([[-1]] * 3, [[1]] * 3, 10, dtype=np.float32).reshape(\n (10, 3))\n noise = NormalizingFlowNetwork(3, n_flows=3, hidden_sizes=(16, 16),\n trainable_base_dist=True, noise_reg=('fixed_rate', 1.0))\n noise.fit(x_train, y_train, epochs=10, verbose=0)\n input_model = noise._get_input_model()\n y1 = input_model(y_train, training=False).numpy()\n y2 = input_model(y_train, training=False).numpy()\n assert np.all(y1 == y2)\n y1 = input_model(y_train, training=True).numpy()\n y2 = input_model(y_train, training=True).numpy()\n assert not np.all(y1 == y2)\n",
"step-5": "import tensorflow as tf\nimport tensorflow_probability as tfp\nimport pytest\nimport numpy as np\nfrom estimators import NormalizingFlowNetwork\n\ntfd = tfp.distributions\ntf.random.set_seed(22)\nnp.random.seed(22)\n\n\n@pytest.mark.slow\ndef test_x_noise_reg():\n x_train = np.linspace(-3, 3, 300, dtype=np.float32).reshape((300, 1))\n noise = tfd.MultivariateNormalDiag(loc=5 * tf.math.sin(2 * x_train), scale_diag=abs(x_train))\n y_train = noise.sample().numpy()\n\n too_much_noise = NormalizingFlowNetwork(\n 1,\n n_flows=2,\n hidden_sizes=(16, 16),\n noise_reg=(\"fixed_rate\", 3.0),\n trainable_base_dist=True,\n )\n\n too_much_noise.fit(x_train, y_train, epochs=700, verbose=0)\n\n x_test = np.linspace(-3, 3, 300, dtype=np.float32).reshape((300, 1))\n noise = tfd.MultivariateNormalDiag(loc=5 * tf.math.sin(2 * x_test), scale_diag=abs(x_test))\n y_test = noise.sample().numpy()\n out1 = too_much_noise.pdf(x_test, y_test).numpy()\n out2 = too_much_noise.pdf(x_test, y_test).numpy()\n # making sure that the noise regularisation is deactivated in testing mode\n assert all(out1 == out2)\n\n little_noise = NormalizingFlowNetwork(\n 1,\n n_flows=2,\n hidden_sizes=(16, 16),\n noise_reg=(\"rule_of_thumb\", 0.1),\n trainable_base_dist=True,\n )\n little_noise.fit(x_train, y_train, epochs=700, verbose=0)\n\n little_noise_score = tf.reduce_sum(little_noise.pdf(x_test, y_test)) / 700.0\n too_much_noise_score = tf.reduce_sum(too_much_noise.pdf(x_test, y_test)) / 700.0\n assert little_noise_score > too_much_noise_score\n\n\ndef test_y_noise_reg():\n x_train = np.linspace([[-1]] * 3, [[1]] * 3, 10, dtype=np.float32).reshape((10, 3))\n y_train = np.linspace([[-1]] * 3, [[1]] * 3, 10, dtype=np.float32).reshape((10, 3))\n\n noise = NormalizingFlowNetwork(\n 3,\n n_flows=3,\n hidden_sizes=(16, 16),\n trainable_base_dist=True,\n noise_reg=(\"fixed_rate\", 1.0),\n )\n noise.fit(x_train, y_train, epochs=10, verbose=0)\n\n input_model = noise._get_input_model()\n # y_input should not include randomness during evaluation\n y1 = input_model(y_train, training=False).numpy()\n y2 = input_model(y_train, training=False).numpy()\n assert np.all(y1 == y2)\n\n # loss should include randomness during learning\n y1 = input_model(y_train, training=True).numpy()\n y2 = input_model(y_train, training=True).numpy()\n assert not np.all(y1 == y2)\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
import os, random, string
from django.conf import settings
from django.template.loader import render_to_string
from django.core.mail import send_mail
def generate_temp_password():
length = 7
chars = string.ascii_letters + string.digits
rnd = random.SystemRandom()
return ''.join(rnd.choice(chars) for i in range(length))
def send_confirmation_email(user):
#Bug in simple_email_confirmation: refer to https://github.com/mfogel/django-simple-email-confirmation/issues/22
try:
confirmation_key = user.confirmation_key
except:
confirmation_key = user.add_unconfirmed_email(user.email)
msg_txt=render_to_string('email/confirmation.txt', {'SITE_URL': settings.SITE_URL, 'user': user.email, 'key' : confirmation_key})
msg_html = render_to_string('email/confirmation.html', {'SITE_URL': settings.SITE_URL, 'user': user.email, 'key' : confirmation_key})
return send_mail('Confirmation email',msg_txt,'daniyar.yeralin@gmail.com',[user.email],html_message=msg_html,)
|
normal
|
{
"blob_id": "822fc2941099cb9d7791580678cfb2a89a987175",
"index": 4685,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef send_confirmation_email(user):\n try:\n confirmation_key = user.confirmation_key\n except:\n confirmation_key = user.add_unconfirmed_email(user.email)\n msg_txt = render_to_string('email/confirmation.txt', {'SITE_URL':\n settings.SITE_URL, 'user': user.email, 'key': confirmation_key})\n msg_html = render_to_string('email/confirmation.html', {'SITE_URL':\n settings.SITE_URL, 'user': user.email, 'key': confirmation_key})\n return send_mail('Confirmation email', msg_txt,\n 'daniyar.yeralin@gmail.com', [user.email], html_message=msg_html)\n",
"step-3": "<mask token>\n\n\ndef generate_temp_password():\n length = 7\n chars = string.ascii_letters + string.digits\n rnd = random.SystemRandom()\n return ''.join(rnd.choice(chars) for i in range(length))\n\n\ndef send_confirmation_email(user):\n try:\n confirmation_key = user.confirmation_key\n except:\n confirmation_key = user.add_unconfirmed_email(user.email)\n msg_txt = render_to_string('email/confirmation.txt', {'SITE_URL':\n settings.SITE_URL, 'user': user.email, 'key': confirmation_key})\n msg_html = render_to_string('email/confirmation.html', {'SITE_URL':\n settings.SITE_URL, 'user': user.email, 'key': confirmation_key})\n return send_mail('Confirmation email', msg_txt,\n 'daniyar.yeralin@gmail.com', [user.email], html_message=msg_html)\n",
"step-4": "import os, random, string\nfrom django.conf import settings\nfrom django.template.loader import render_to_string\nfrom django.core.mail import send_mail\n\n\ndef generate_temp_password():\n length = 7\n chars = string.ascii_letters + string.digits\n rnd = random.SystemRandom()\n return ''.join(rnd.choice(chars) for i in range(length))\n\n\ndef send_confirmation_email(user):\n try:\n confirmation_key = user.confirmation_key\n except:\n confirmation_key = user.add_unconfirmed_email(user.email)\n msg_txt = render_to_string('email/confirmation.txt', {'SITE_URL':\n settings.SITE_URL, 'user': user.email, 'key': confirmation_key})\n msg_html = render_to_string('email/confirmation.html', {'SITE_URL':\n settings.SITE_URL, 'user': user.email, 'key': confirmation_key})\n return send_mail('Confirmation email', msg_txt,\n 'daniyar.yeralin@gmail.com', [user.email], html_message=msg_html)\n",
"step-5": "import os, random, string\nfrom django.conf import settings\nfrom django.template.loader import render_to_string\nfrom django.core.mail import send_mail\n\ndef generate_temp_password(): \n length = 7\n chars = string.ascii_letters + string.digits\n rnd = random.SystemRandom()\n return ''.join(rnd.choice(chars) for i in range(length))\n\ndef send_confirmation_email(user):\n #Bug in simple_email_confirmation: refer to https://github.com/mfogel/django-simple-email-confirmation/issues/22\n try: \n confirmation_key = user.confirmation_key\n except:\n confirmation_key = user.add_unconfirmed_email(user.email)\n msg_txt=render_to_string('email/confirmation.txt', {'SITE_URL': settings.SITE_URL, 'user': user.email, 'key' : confirmation_key})\n msg_html = render_to_string('email/confirmation.html', {'SITE_URL': settings.SITE_URL, 'user': user.email, 'key' : confirmation_key})\n return send_mail('Confirmation email',msg_txt,'daniyar.yeralin@gmail.com',[user.email],html_message=msg_html,)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class Styled(object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __str__(self):
return str(self._styles)
def __repr__(self):
cls = self.__class__.__name__
items = pprint.pformat(self._styles)
nature = self.nature
return '<{cls}({items}, {nature!r})>'.format(cls=cls, items=items,
nature=nature)
@property
def styles(self):
""" Dictionary of styles: key-value pairs. """
return self._styles
@styles.setter
def styles(self, styles):
""" Setup the dictionary of styles (shallow copy of the items). """
self._styles = {} if styles is None else styles.copy()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Styled(object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __init__(self, styles, nature):
"""
Construct a styled object from a dictionary of styles.
:type styles: typing.Dict[str, str]
:param styles:
Dictionary of key-value pairs, where *keys* are the style names.
:type nature: str
:ivar nature:
Cell *nature* used to distinguish the body cells, from the header and the footer.
Table *nature* used to store a value similar to HTML ``@class`` attribute.
"""
self.styles = styles
self.nature = nature
def __str__(self):
return str(self._styles)
def __repr__(self):
cls = self.__class__.__name__
items = pprint.pformat(self._styles)
nature = self.nature
return '<{cls}({items}, {nature!r})>'.format(cls=cls, items=items,
nature=nature)
@property
def styles(self):
""" Dictionary of styles: key-value pairs. """
return self._styles
@styles.setter
def styles(self, styles):
""" Setup the dictionary of styles (shallow copy of the items). """
self._styles = {} if styles is None else styles.copy()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Styled(object):
"""
Styled object, like Table, Row, Column, or Cell objects.
A styled object stores user-defined styles: a dictionary of key-value pairs.
This values are useful to store some HTML-like styles (border-style,
border-width, border-color, vertical-align, text-align, etc.).
Of course, we are not tied to the HTML-like styles, you can use your
own list of styles.
.. note::
The style dictionary is always copied: in other words, key-value pairs
are copied but a shallow copy is done for the values (in general, it
is not a problem if you use non-mutable values like :class:`str`).
A styled object stores a nature: a way to distinguish the body cells,
from the header and the footer. The default value is ``None``, but you can
use "body", "header", "footer" or whatever is suitable for your needs.
This kind of information is in general not stored in the styles,
even if it is similar.
Tables can also have a *nature*, similar to HTML ``@class`` attribute,
you can use it do identify the styles to apply to your table.
.. note::
In a :class:`~benker.grid.Grid`, the :ref:`merging <benker__grid__merging>`
of two natures is done by keeping the first nature and
dropping the second one. In other words, the resulting nature is
the group of the most top-left nature of the merged cells.
"""
__slots__ = '_styles', 'nature'
def __init__(self, styles, nature):
"""
Construct a styled object from a dictionary of styles.
:type styles: typing.Dict[str, str]
:param styles:
Dictionary of key-value pairs, where *keys* are the style names.
:type nature: str
:ivar nature:
Cell *nature* used to distinguish the body cells, from the header and the footer.
Table *nature* used to store a value similar to HTML ``@class`` attribute.
"""
self.styles = styles
self.nature = nature
def __str__(self):
return str(self._styles)
def __repr__(self):
cls = self.__class__.__name__
items = pprint.pformat(self._styles)
nature = self.nature
return '<{cls}({items}, {nature!r})>'.format(cls=cls, items=items,
nature=nature)
@property
def styles(self):
""" Dictionary of styles: key-value pairs. """
return self._styles
@styles.setter
def styles(self, styles):
""" Setup the dictionary of styles (shallow copy of the items). """
self._styles = {} if styles is None else styles.copy()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import pprint
class Styled(object):
"""
Styled object, like Table, Row, Column, or Cell objects.
A styled object stores user-defined styles: a dictionary of key-value pairs.
This values are useful to store some HTML-like styles (border-style,
border-width, border-color, vertical-align, text-align, etc.).
Of course, we are not tied to the HTML-like styles, you can use your
own list of styles.
.. note::
The style dictionary is always copied: in other words, key-value pairs
are copied but a shallow copy is done for the values (in general, it
is not a problem if you use non-mutable values like :class:`str`).
A styled object stores a nature: a way to distinguish the body cells,
from the header and the footer. The default value is ``None``, but you can
use "body", "header", "footer" or whatever is suitable for your needs.
This kind of information is in general not stored in the styles,
even if it is similar.
Tables can also have a *nature*, similar to HTML ``@class`` attribute,
you can use it do identify the styles to apply to your table.
.. note::
In a :class:`~benker.grid.Grid`, the :ref:`merging <benker__grid__merging>`
of two natures is done by keeping the first nature and
dropping the second one. In other words, the resulting nature is
the group of the most top-left nature of the merged cells.
"""
__slots__ = '_styles', 'nature'
def __init__(self, styles, nature):
"""
Construct a styled object from a dictionary of styles.
:type styles: typing.Dict[str, str]
:param styles:
Dictionary of key-value pairs, where *keys* are the style names.
:type nature: str
:ivar nature:
Cell *nature* used to distinguish the body cells, from the header and the footer.
Table *nature* used to store a value similar to HTML ``@class`` attribute.
"""
self.styles = styles
self.nature = nature
def __str__(self):
return str(self._styles)
def __repr__(self):
cls = self.__class__.__name__
items = pprint.pformat(self._styles)
nature = self.nature
return '<{cls}({items}, {nature!r})>'.format(cls=cls, items=items,
nature=nature)
@property
def styles(self):
""" Dictionary of styles: key-value pairs. """
return self._styles
@styles.setter
def styles(self, styles):
""" Setup the dictionary of styles (shallow copy of the items). """
self._styles = {} if styles is None else styles.copy()
<|reserved_special_token_1|>
# coding: utf-8
"""
Styled object
=============
A :class:`~benker.styled.Styled` object contains a dictionary of styles.
It is mainly used for :class:`~benker.table.Table`, :class:`~benker.table.RowView`,
:class:`~benker.table.ColView`, and :class:`~benker.cell.Cell`.
"""
import pprint
class Styled(object):
"""
Styled object, like Table, Row, Column, or Cell objects.
A styled object stores user-defined styles: a dictionary of key-value pairs.
This values are useful to store some HTML-like styles (border-style,
border-width, border-color, vertical-align, text-align, etc.).
Of course, we are not tied to the HTML-like styles, you can use your
own list of styles.
.. note::
The style dictionary is always copied: in other words, key-value pairs
are copied but a shallow copy is done for the values (in general, it
is not a problem if you use non-mutable values like :class:`str`).
A styled object stores a nature: a way to distinguish the body cells,
from the header and the footer. The default value is ``None``, but you can
use "body", "header", "footer" or whatever is suitable for your needs.
This kind of information is in general not stored in the styles,
even if it is similar.
Tables can also have a *nature*, similar to HTML ``@class`` attribute,
you can use it do identify the styles to apply to your table.
.. note::
In a :class:`~benker.grid.Grid`, the :ref:`merging <benker__grid__merging>`
of two natures is done by keeping the first nature and
dropping the second one. In other words, the resulting nature is
the group of the most top-left nature of the merged cells.
"""
__slots__ = ('_styles', 'nature')
def __init__(self, styles, nature):
"""
Construct a styled object from a dictionary of styles.
:type styles: typing.Dict[str, str]
:param styles:
Dictionary of key-value pairs, where *keys* are the style names.
:type nature: str
:ivar nature:
Cell *nature* used to distinguish the body cells, from the header and the footer.
Table *nature* used to store a value similar to HTML ``@class`` attribute.
"""
#: Dictionary of key-value pairs, where *keys* are the style names.
self.styles = styles
#: Cell *nature* used to distinguish the body cells, from the header and the footer.
self.nature = nature
def __str__(self):
return str(self._styles)
def __repr__(self):
cls = self.__class__.__name__
items = pprint.pformat(self._styles)
nature = self.nature
return "<{cls}({items}, {nature!r})>".format(cls=cls, items=items, nature=nature)
@property
def styles(self):
""" Dictionary of styles: key-value pairs. """
return self._styles
@styles.setter
def styles(self, styles):
""" Setup the dictionary of styles (shallow copy of the items). """
# each cell owns it's own copy of the styles
self._styles = {} if styles is None else styles.copy()
|
flexible
|
{
"blob_id": "8fa58791aae1352109b3bf7410d68bf5ae1d8cb7",
"index": 9559,
"step-1": "<mask token>\n\n\nclass Styled(object):\n <mask token>\n <mask token>\n <mask token>\n\n def __str__(self):\n return str(self._styles)\n\n def __repr__(self):\n cls = self.__class__.__name__\n items = pprint.pformat(self._styles)\n nature = self.nature\n return '<{cls}({items}, {nature!r})>'.format(cls=cls, items=items,\n nature=nature)\n\n @property\n def styles(self):\n \"\"\" Dictionary of styles: key-value pairs. \"\"\"\n return self._styles\n\n @styles.setter\n def styles(self, styles):\n \"\"\" Setup the dictionary of styles (shallow copy of the items). \"\"\"\n self._styles = {} if styles is None else styles.copy()\n",
"step-2": "<mask token>\n\n\nclass Styled(object):\n <mask token>\n <mask token>\n\n def __init__(self, styles, nature):\n \"\"\"\n Construct a styled object from a dictionary of styles.\n\n :type styles: typing.Dict[str, str]\n :param styles:\n Dictionary of key-value pairs, where *keys* are the style names.\n\n :type nature: str\n :ivar nature:\n Cell *nature* used to distinguish the body cells, from the header and the footer.\n\n Table *nature* used to store a value similar to HTML ``@class`` attribute.\n \"\"\"\n self.styles = styles\n self.nature = nature\n\n def __str__(self):\n return str(self._styles)\n\n def __repr__(self):\n cls = self.__class__.__name__\n items = pprint.pformat(self._styles)\n nature = self.nature\n return '<{cls}({items}, {nature!r})>'.format(cls=cls, items=items,\n nature=nature)\n\n @property\n def styles(self):\n \"\"\" Dictionary of styles: key-value pairs. \"\"\"\n return self._styles\n\n @styles.setter\n def styles(self, styles):\n \"\"\" Setup the dictionary of styles (shallow copy of the items). \"\"\"\n self._styles = {} if styles is None else styles.copy()\n",
"step-3": "<mask token>\n\n\nclass Styled(object):\n \"\"\"\n Styled object, like Table, Row, Column, or Cell objects.\n\n A styled object stores user-defined styles: a dictionary of key-value pairs.\n This values are useful to store some HTML-like styles (border-style,\n border-width, border-color, vertical-align, text-align, etc.).\n Of course, we are not tied to the HTML-like styles, you can use your\n own list of styles.\n\n .. note::\n\n The style dictionary is always copied: in other words, key-value pairs\n are copied but a shallow copy is done for the values (in general, it\n is not a problem if you use non-mutable values like :class:`str`).\n\n A styled object stores a nature: a way to distinguish the body cells,\n from the header and the footer. The default value is ``None``, but you can\n use \"body\", \"header\", \"footer\" or whatever is suitable for your needs.\n This kind of information is in general not stored in the styles,\n even if it is similar.\n\n Tables can also have a *nature*, similar to HTML ``@class`` attribute,\n you can use it do identify the styles to apply to your table.\n\n .. note::\n\n In a :class:`~benker.grid.Grid`, the :ref:`merging <benker__grid__merging>`\n of two natures is done by keeping the first nature and\n dropping the second one. In other words, the resulting nature is\n the group of the most top-left nature of the merged cells.\n\n \"\"\"\n __slots__ = '_styles', 'nature'\n\n def __init__(self, styles, nature):\n \"\"\"\n Construct a styled object from a dictionary of styles.\n\n :type styles: typing.Dict[str, str]\n :param styles:\n Dictionary of key-value pairs, where *keys* are the style names.\n\n :type nature: str\n :ivar nature:\n Cell *nature* used to distinguish the body cells, from the header and the footer.\n\n Table *nature* used to store a value similar to HTML ``@class`` attribute.\n \"\"\"\n self.styles = styles\n self.nature = nature\n\n def __str__(self):\n return str(self._styles)\n\n def __repr__(self):\n cls = self.__class__.__name__\n items = pprint.pformat(self._styles)\n nature = self.nature\n return '<{cls}({items}, {nature!r})>'.format(cls=cls, items=items,\n nature=nature)\n\n @property\n def styles(self):\n \"\"\" Dictionary of styles: key-value pairs. \"\"\"\n return self._styles\n\n @styles.setter\n def styles(self, styles):\n \"\"\" Setup the dictionary of styles (shallow copy of the items). \"\"\"\n self._styles = {} if styles is None else styles.copy()\n",
"step-4": "<mask token>\nimport pprint\n\n\nclass Styled(object):\n \"\"\"\n Styled object, like Table, Row, Column, or Cell objects.\n\n A styled object stores user-defined styles: a dictionary of key-value pairs.\n This values are useful to store some HTML-like styles (border-style,\n border-width, border-color, vertical-align, text-align, etc.).\n Of course, we are not tied to the HTML-like styles, you can use your\n own list of styles.\n\n .. note::\n\n The style dictionary is always copied: in other words, key-value pairs\n are copied but a shallow copy is done for the values (in general, it\n is not a problem if you use non-mutable values like :class:`str`).\n\n A styled object stores a nature: a way to distinguish the body cells,\n from the header and the footer. The default value is ``None``, but you can\n use \"body\", \"header\", \"footer\" or whatever is suitable for your needs.\n This kind of information is in general not stored in the styles,\n even if it is similar.\n\n Tables can also have a *nature*, similar to HTML ``@class`` attribute,\n you can use it do identify the styles to apply to your table.\n\n .. note::\n\n In a :class:`~benker.grid.Grid`, the :ref:`merging <benker__grid__merging>`\n of two natures is done by keeping the first nature and\n dropping the second one. In other words, the resulting nature is\n the group of the most top-left nature of the merged cells.\n\n \"\"\"\n __slots__ = '_styles', 'nature'\n\n def __init__(self, styles, nature):\n \"\"\"\n Construct a styled object from a dictionary of styles.\n\n :type styles: typing.Dict[str, str]\n :param styles:\n Dictionary of key-value pairs, where *keys* are the style names.\n\n :type nature: str\n :ivar nature:\n Cell *nature* used to distinguish the body cells, from the header and the footer.\n\n Table *nature* used to store a value similar to HTML ``@class`` attribute.\n \"\"\"\n self.styles = styles\n self.nature = nature\n\n def __str__(self):\n return str(self._styles)\n\n def __repr__(self):\n cls = self.__class__.__name__\n items = pprint.pformat(self._styles)\n nature = self.nature\n return '<{cls}({items}, {nature!r})>'.format(cls=cls, items=items,\n nature=nature)\n\n @property\n def styles(self):\n \"\"\" Dictionary of styles: key-value pairs. \"\"\"\n return self._styles\n\n @styles.setter\n def styles(self, styles):\n \"\"\" Setup the dictionary of styles (shallow copy of the items). \"\"\"\n self._styles = {} if styles is None else styles.copy()\n",
"step-5": "# coding: utf-8\n\"\"\"\nStyled object\n=============\n\nA :class:`~benker.styled.Styled` object contains a dictionary of styles.\n\nIt is mainly used for :class:`~benker.table.Table`, :class:`~benker.table.RowView`,\n:class:`~benker.table.ColView`, and :class:`~benker.cell.Cell`.\n\n\"\"\"\nimport pprint\n\n\nclass Styled(object):\n \"\"\"\n Styled object, like Table, Row, Column, or Cell objects.\n\n A styled object stores user-defined styles: a dictionary of key-value pairs.\n This values are useful to store some HTML-like styles (border-style,\n border-width, border-color, vertical-align, text-align, etc.).\n Of course, we are not tied to the HTML-like styles, you can use your\n own list of styles.\n\n .. note::\n\n The style dictionary is always copied: in other words, key-value pairs\n are copied but a shallow copy is done for the values (in general, it\n is not a problem if you use non-mutable values like :class:`str`).\n\n A styled object stores a nature: a way to distinguish the body cells,\n from the header and the footer. The default value is ``None``, but you can\n use \"body\", \"header\", \"footer\" or whatever is suitable for your needs.\n This kind of information is in general not stored in the styles,\n even if it is similar.\n\n Tables can also have a *nature*, similar to HTML ``@class`` attribute,\n you can use it do identify the styles to apply to your table.\n\n .. note::\n\n In a :class:`~benker.grid.Grid`, the :ref:`merging <benker__grid__merging>`\n of two natures is done by keeping the first nature and\n dropping the second one. In other words, the resulting nature is\n the group of the most top-left nature of the merged cells.\n\n \"\"\"\n __slots__ = ('_styles', 'nature')\n\n def __init__(self, styles, nature):\n \"\"\"\n Construct a styled object from a dictionary of styles.\n\n :type styles: typing.Dict[str, str]\n :param styles:\n Dictionary of key-value pairs, where *keys* are the style names.\n\n :type nature: str\n :ivar nature:\n Cell *nature* used to distinguish the body cells, from the header and the footer.\n\n Table *nature* used to store a value similar to HTML ``@class`` attribute.\n \"\"\"\n #: Dictionary of key-value pairs, where *keys* are the style names.\n self.styles = styles\n\n #: Cell *nature* used to distinguish the body cells, from the header and the footer.\n self.nature = nature\n\n def __str__(self):\n return str(self._styles)\n\n def __repr__(self):\n cls = self.__class__.__name__\n items = pprint.pformat(self._styles)\n nature = self.nature\n return \"<{cls}({items}, {nature!r})>\".format(cls=cls, items=items, nature=nature)\n\n @property\n def styles(self):\n \"\"\" Dictionary of styles: key-value pairs. \"\"\"\n return self._styles\n\n @styles.setter\n def styles(self, styles):\n \"\"\" Setup the dictionary of styles (shallow copy of the items). \"\"\"\n # each cell owns it's own copy of the styles\n self._styles = {} if styles is None else styles.copy()\n",
"step-ids": [
5,
6,
8,
9,
10
]
}
|
[
5,
6,
8,
9,
10
] |
<|reserved_special_token_0|>
class ArrayReader:
def __init__(self, arr):
self.arr = arr
def get(self, index):
if index > len(self.arr):
return math.inf
return self.arr[index]
<|reserved_special_token_0|>
def binary_search_array(reader, key, low, high):
while low <= high:
mid = (low + high) // 2
if key == reader.get(mid):
return mid
if key > reader.get(mid):
low = mid + 1
else:
high = mid - 1
return -1
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ArrayReader:
def __init__(self, arr):
self.arr = arr
def get(self, index):
if index > len(self.arr):
return math.inf
return self.arr[index]
def search_in_infinite_array(reader, key):
low = 0
high = 1
while reader.get(high) < key:
new_low = high + 1
high = (high - low + 1) * 2
low = new_low
return binary_search_array(reader, key, low, high)
def binary_search_array(reader, key, low, high):
while low <= high:
mid = (low + high) // 2
if key == reader.get(mid):
return mid
if key > reader.get(mid):
low = mid + 1
else:
high = mid - 1
return -1
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ArrayReader:
def __init__(self, arr):
self.arr = arr
def get(self, index):
if index > len(self.arr):
return math.inf
return self.arr[index]
def search_in_infinite_array(reader, key):
low = 0
high = 1
while reader.get(high) < key:
new_low = high + 1
high = (high - low + 1) * 2
low = new_low
return binary_search_array(reader, key, low, high)
def binary_search_array(reader, key, low, high):
while low <= high:
mid = (low + high) // 2
if key == reader.get(mid):
return mid
if key > reader.get(mid):
low = mid + 1
else:
high = mid - 1
return -1
reader = ArrayReader([4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30])
print(search_in_infinite_array(reader, 16))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import math
class ArrayReader:
def __init__(self, arr):
self.arr = arr
def get(self, index):
if index > len(self.arr):
return math.inf
return self.arr[index]
def search_in_infinite_array(reader, key):
low = 0
high = 1
while reader.get(high) < key:
new_low = high + 1
high = (high - low + 1) * 2
low = new_low
return binary_search_array(reader, key, low, high)
def binary_search_array(reader, key, low, high):
while low <= high:
mid = (low + high) // 2
if key == reader.get(mid):
return mid
if key > reader.get(mid):
low = mid + 1
else:
high = mid - 1
return -1
reader = ArrayReader([4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30])
print(search_in_infinite_array(reader, 16))
<|reserved_special_token_1|>
'''
Given an infinite sorted array (or an array with unknown size), find if a given number ‘key’ is present in the array. Write a function to return the index of the ‘key’ if it is present in the array, otherwise return -1.
Since it is not possible to define an array with infinite (unknown) size, you will be provided with an interface ArrayReader to read elements of the array. ArrayReader.get(index) will return the number at index; if the array’s size is smaller than the index, it will return Integer.MAX_VALUE.
Example 1:
Input: [4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30], key = 16
Output: 6
Explanation: The key is present at index '6' in the array.
Example 2:
Input: [4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30], key = 11
Output: -1
Explanation: The key is not present in the array.
Example 3:
Input: [1, 3, 8, 10, 15], key = 15
Output: 4
Explanation: The key is present at index '4' in the array.
Example 4:
Input: [1, 3, 8, 10, 15], key = 200
Output: -1
Explanation: The key is not present in the array.
'''
import math
class ArrayReader:
def __init__(self, arr):
self.arr = arr
def get(self, index):
if index > len(self.arr):
return math.inf
return self.arr[index]
def search_in_infinite_array(reader, key):
# first find the bounds
low = 0
high = 1
while reader.get(high) < key:
new_low = high + 1
high = (high - low + 1)*2
low = new_low
return binary_search_array(reader, key, low, high)
def binary_search_array(reader, key, low, high):
while low <= high:
mid = (low + high) // 2
if key == reader.get(mid):
return mid
if key > reader.get(mid):
low = mid + 1
else:
high = mid - 1
return - 1
reader = ArrayReader([4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30])
print(search_in_infinite_array(reader, 16))
|
flexible
|
{
"blob_id": "a9efa258c223460b2b79861acdde89161706ad9a",
"index": 8770,
"step-1": "<mask token>\n\n\nclass ArrayReader:\n\n def __init__(self, arr):\n self.arr = arr\n\n def get(self, index):\n if index > len(self.arr):\n return math.inf\n return self.arr[index]\n\n\n<mask token>\n\n\ndef binary_search_array(reader, key, low, high):\n while low <= high:\n mid = (low + high) // 2\n if key == reader.get(mid):\n return mid\n if key > reader.get(mid):\n low = mid + 1\n else:\n high = mid - 1\n return -1\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass ArrayReader:\n\n def __init__(self, arr):\n self.arr = arr\n\n def get(self, index):\n if index > len(self.arr):\n return math.inf\n return self.arr[index]\n\n\ndef search_in_infinite_array(reader, key):\n low = 0\n high = 1\n while reader.get(high) < key:\n new_low = high + 1\n high = (high - low + 1) * 2\n low = new_low\n return binary_search_array(reader, key, low, high)\n\n\ndef binary_search_array(reader, key, low, high):\n while low <= high:\n mid = (low + high) // 2\n if key == reader.get(mid):\n return mid\n if key > reader.get(mid):\n low = mid + 1\n else:\n high = mid - 1\n return -1\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass ArrayReader:\n\n def __init__(self, arr):\n self.arr = arr\n\n def get(self, index):\n if index > len(self.arr):\n return math.inf\n return self.arr[index]\n\n\ndef search_in_infinite_array(reader, key):\n low = 0\n high = 1\n while reader.get(high) < key:\n new_low = high + 1\n high = (high - low + 1) * 2\n low = new_low\n return binary_search_array(reader, key, low, high)\n\n\ndef binary_search_array(reader, key, low, high):\n while low <= high:\n mid = (low + high) // 2\n if key == reader.get(mid):\n return mid\n if key > reader.get(mid):\n low = mid + 1\n else:\n high = mid - 1\n return -1\n\n\nreader = ArrayReader([4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30])\nprint(search_in_infinite_array(reader, 16))\n",
"step-4": "<mask token>\nimport math\n\n\nclass ArrayReader:\n\n def __init__(self, arr):\n self.arr = arr\n\n def get(self, index):\n if index > len(self.arr):\n return math.inf\n return self.arr[index]\n\n\ndef search_in_infinite_array(reader, key):\n low = 0\n high = 1\n while reader.get(high) < key:\n new_low = high + 1\n high = (high - low + 1) * 2\n low = new_low\n return binary_search_array(reader, key, low, high)\n\n\ndef binary_search_array(reader, key, low, high):\n while low <= high:\n mid = (low + high) // 2\n if key == reader.get(mid):\n return mid\n if key > reader.get(mid):\n low = mid + 1\n else:\n high = mid - 1\n return -1\n\n\nreader = ArrayReader([4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30])\nprint(search_in_infinite_array(reader, 16))\n",
"step-5": "'''\nGiven an infinite sorted array (or an array with unknown size), find if a given number ‘key’ is present in the array. Write a function to return the index of the ‘key’ if it is present in the array, otherwise return -1.\n\nSince it is not possible to define an array with infinite (unknown) size, you will be provided with an interface ArrayReader to read elements of the array. ArrayReader.get(index) will return the number at index; if the array’s size is smaller than the index, it will return Integer.MAX_VALUE.\n\nExample 1:\n\nInput: [4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30], key = 16\nOutput: 6\nExplanation: The key is present at index '6' in the array.\nExample 2:\n\nInput: [4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30], key = 11\nOutput: -1\nExplanation: The key is not present in the array.\nExample 3:\n\nInput: [1, 3, 8, 10, 15], key = 15\nOutput: 4\nExplanation: The key is present at index '4' in the array.\nExample 4:\n\nInput: [1, 3, 8, 10, 15], key = 200\nOutput: -1\nExplanation: The key is not present in the array.\n'''\n\nimport math\n\n\nclass ArrayReader:\n def __init__(self, arr):\n self.arr = arr\n\n def get(self, index):\n if index > len(self.arr):\n return math.inf\n\n return self.arr[index]\n\n\ndef search_in_infinite_array(reader, key):\n # first find the bounds\n\n low = 0\n high = 1\n\n while reader.get(high) < key:\n new_low = high + 1\n high = (high - low + 1)*2\n low = new_low\n\n return binary_search_array(reader, key, low, high)\n\n\ndef binary_search_array(reader, key, low, high):\n\n while low <= high:\n\n mid = (low + high) // 2\n\n if key == reader.get(mid):\n return mid\n\n if key > reader.get(mid):\n low = mid + 1\n\n else:\n high = mid - 1\n\n return - 1\n\n\nreader = ArrayReader([4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30])\nprint(search_in_infinite_array(reader, 16))\n",
"step-ids": [
4,
5,
7,
8,
9
]
}
|
[
4,
5,
7,
8,
9
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def plot_merc(s):
proj = ccrs.Mercator()
ax = plt.axes(projection=proj)
ax.set_extent((s.bounds[0], s.bounds[2], s.bounds[1], s.bounds[3]), crs
=ccrs.PlateCarree())
shape_feature = ShapelyFeature([s], ccrs.PlateCarree(), facecolor=
'#AAFFAA', edgecolor='k')
ax.add_feature(shape_feature)
gl = ax.gridlines(crs=ccrs.PlateCarree(), draw_labels=True, linewidth=2,
color='gray', alpha=0.5, linestyle='--')
gl.xlabels_top = False
gl.ylabels_left = False
gl.xlabel_style = {'size': 10, 'color': 'black'}
gl.ylabel_style = {'size': 10, 'color': 'black'}
return gl
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def plot(s):
proj = ccrs.PlateCarree()
ax = plt.axes(projection=proj)
ax.set_extent((s.bounds[0], s.bounds[2], s.bounds[1], s.bounds[3]), crs
=ccrs.PlateCarree())
shape_feature = ShapelyFeature([s], ccrs.PlateCarree(), facecolor=
'#AAFFAA', edgecolor='k')
ax.add_feature(shape_feature)
gl = ax.gridlines(crs=ccrs.PlateCarree(), draw_labels=True, linewidth=2,
color='gray', alpha=0.5, linestyle='--')
gl.xlabels_top = False
gl.ylabels_left = False
gl.xlabel_style = {'size': 10, 'color': 'black'}
gl.ylabel_style = {'size': 10, 'color': 'black'}
return gl
def plot_merc(s):
proj = ccrs.Mercator()
ax = plt.axes(projection=proj)
ax.set_extent((s.bounds[0], s.bounds[2], s.bounds[1], s.bounds[3]), crs
=ccrs.PlateCarree())
shape_feature = ShapelyFeature([s], ccrs.PlateCarree(), facecolor=
'#AAFFAA', edgecolor='k')
ax.add_feature(shape_feature)
gl = ax.gridlines(crs=ccrs.PlateCarree(), draw_labels=True, linewidth=2,
color='gray', alpha=0.5, linestyle='--')
gl.xlabels_top = False
gl.ylabels_left = False
gl.xlabel_style = {'size': 10, 'color': 'black'}
gl.ylabel_style = {'size': 10, 'color': 'black'}
return gl
<|reserved_special_token_1|>
import matplotlib.pyplot as plt
import cartopy.crs as ccrs
from cartopy.feature import ShapelyFeature
from shapely.geometry import shape
def plot(s):
proj = ccrs.PlateCarree()
ax = plt.axes(projection=proj)
ax.set_extent((s.bounds[0], s.bounds[2], s.bounds[1], s.bounds[3]), crs
=ccrs.PlateCarree())
shape_feature = ShapelyFeature([s], ccrs.PlateCarree(), facecolor=
'#AAFFAA', edgecolor='k')
ax.add_feature(shape_feature)
gl = ax.gridlines(crs=ccrs.PlateCarree(), draw_labels=True, linewidth=2,
color='gray', alpha=0.5, linestyle='--')
gl.xlabels_top = False
gl.ylabels_left = False
gl.xlabel_style = {'size': 10, 'color': 'black'}
gl.ylabel_style = {'size': 10, 'color': 'black'}
return gl
def plot_merc(s):
proj = ccrs.Mercator()
ax = plt.axes(projection=proj)
ax.set_extent((s.bounds[0], s.bounds[2], s.bounds[1], s.bounds[3]), crs
=ccrs.PlateCarree())
shape_feature = ShapelyFeature([s], ccrs.PlateCarree(), facecolor=
'#AAFFAA', edgecolor='k')
ax.add_feature(shape_feature)
gl = ax.gridlines(crs=ccrs.PlateCarree(), draw_labels=True, linewidth=2,
color='gray', alpha=0.5, linestyle='--')
gl.xlabels_top = False
gl.ylabels_left = False
gl.xlabel_style = {'size': 10, 'color': 'black'}
gl.ylabel_style = {'size': 10, 'color': 'black'}
return gl
<|reserved_special_token_1|>
import matplotlib.pyplot as plt
import cartopy.crs as ccrs
from cartopy.feature import ShapelyFeature
from shapely.geometry import shape
def plot(s):
proj = ccrs.PlateCarree()
ax = plt.axes(projection=proj)
ax.set_extent((s.bounds[0], s.bounds[2], s.bounds[1], s.bounds[3]), crs=ccrs.PlateCarree())
shape_feature = ShapelyFeature([s], ccrs.PlateCarree(), facecolor='#AAFFAA', edgecolor='k')
ax.add_feature(shape_feature);
gl = ax.gridlines(crs=ccrs.PlateCarree(), draw_labels=True,
linewidth=2, color='gray', alpha=0.5, linestyle='--')
gl.xlabels_top = False
gl.ylabels_left = False
gl.xlabel_style = {'size': 10, 'color': 'black'}
gl.ylabel_style = {'size': 10, 'color': 'black'}
return gl
def plot_merc(s):
proj = ccrs.Mercator()
ax = plt.axes(projection=proj)
ax.set_extent((s.bounds[0], s.bounds[2], s.bounds[1], s.bounds[3]), crs=ccrs.PlateCarree())
shape_feature = ShapelyFeature([s], ccrs.PlateCarree(), facecolor='#AAFFAA', edgecolor='k')
ax.add_feature(shape_feature);
gl = ax.gridlines(crs=ccrs.PlateCarree(), draw_labels=True,
linewidth=2, color='gray', alpha=0.5, linestyle='--')
gl.xlabels_top = False
gl.ylabels_left = False
gl.xlabel_style = {'size': 10, 'color': 'black'}
gl.ylabel_style = {'size': 10, 'color': 'black'}
return gl
|
flexible
|
{
"blob_id": "75754f4032d6e22e53cdbed0f6c640247473faec",
"index": 7606,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef plot_merc(s):\n proj = ccrs.Mercator()\n ax = plt.axes(projection=proj)\n ax.set_extent((s.bounds[0], s.bounds[2], s.bounds[1], s.bounds[3]), crs\n =ccrs.PlateCarree())\n shape_feature = ShapelyFeature([s], ccrs.PlateCarree(), facecolor=\n '#AAFFAA', edgecolor='k')\n ax.add_feature(shape_feature)\n gl = ax.gridlines(crs=ccrs.PlateCarree(), draw_labels=True, linewidth=2,\n color='gray', alpha=0.5, linestyle='--')\n gl.xlabels_top = False\n gl.ylabels_left = False\n gl.xlabel_style = {'size': 10, 'color': 'black'}\n gl.ylabel_style = {'size': 10, 'color': 'black'}\n return gl\n",
"step-3": "<mask token>\n\n\ndef plot(s):\n proj = ccrs.PlateCarree()\n ax = plt.axes(projection=proj)\n ax.set_extent((s.bounds[0], s.bounds[2], s.bounds[1], s.bounds[3]), crs\n =ccrs.PlateCarree())\n shape_feature = ShapelyFeature([s], ccrs.PlateCarree(), facecolor=\n '#AAFFAA', edgecolor='k')\n ax.add_feature(shape_feature)\n gl = ax.gridlines(crs=ccrs.PlateCarree(), draw_labels=True, linewidth=2,\n color='gray', alpha=0.5, linestyle='--')\n gl.xlabels_top = False\n gl.ylabels_left = False\n gl.xlabel_style = {'size': 10, 'color': 'black'}\n gl.ylabel_style = {'size': 10, 'color': 'black'}\n return gl\n\n\ndef plot_merc(s):\n proj = ccrs.Mercator()\n ax = plt.axes(projection=proj)\n ax.set_extent((s.bounds[0], s.bounds[2], s.bounds[1], s.bounds[3]), crs\n =ccrs.PlateCarree())\n shape_feature = ShapelyFeature([s], ccrs.PlateCarree(), facecolor=\n '#AAFFAA', edgecolor='k')\n ax.add_feature(shape_feature)\n gl = ax.gridlines(crs=ccrs.PlateCarree(), draw_labels=True, linewidth=2,\n color='gray', alpha=0.5, linestyle='--')\n gl.xlabels_top = False\n gl.ylabels_left = False\n gl.xlabel_style = {'size': 10, 'color': 'black'}\n gl.ylabel_style = {'size': 10, 'color': 'black'}\n return gl\n",
"step-4": "import matplotlib.pyplot as plt\nimport cartopy.crs as ccrs\nfrom cartopy.feature import ShapelyFeature\nfrom shapely.geometry import shape\n\n\ndef plot(s):\n proj = ccrs.PlateCarree()\n ax = plt.axes(projection=proj)\n ax.set_extent((s.bounds[0], s.bounds[2], s.bounds[1], s.bounds[3]), crs\n =ccrs.PlateCarree())\n shape_feature = ShapelyFeature([s], ccrs.PlateCarree(), facecolor=\n '#AAFFAA', edgecolor='k')\n ax.add_feature(shape_feature)\n gl = ax.gridlines(crs=ccrs.PlateCarree(), draw_labels=True, linewidth=2,\n color='gray', alpha=0.5, linestyle='--')\n gl.xlabels_top = False\n gl.ylabels_left = False\n gl.xlabel_style = {'size': 10, 'color': 'black'}\n gl.ylabel_style = {'size': 10, 'color': 'black'}\n return gl\n\n\ndef plot_merc(s):\n proj = ccrs.Mercator()\n ax = plt.axes(projection=proj)\n ax.set_extent((s.bounds[0], s.bounds[2], s.bounds[1], s.bounds[3]), crs\n =ccrs.PlateCarree())\n shape_feature = ShapelyFeature([s], ccrs.PlateCarree(), facecolor=\n '#AAFFAA', edgecolor='k')\n ax.add_feature(shape_feature)\n gl = ax.gridlines(crs=ccrs.PlateCarree(), draw_labels=True, linewidth=2,\n color='gray', alpha=0.5, linestyle='--')\n gl.xlabels_top = False\n gl.ylabels_left = False\n gl.xlabel_style = {'size': 10, 'color': 'black'}\n gl.ylabel_style = {'size': 10, 'color': 'black'}\n return gl\n",
"step-5": "import matplotlib.pyplot as plt\nimport cartopy.crs as ccrs\nfrom cartopy.feature import ShapelyFeature\nfrom shapely.geometry import shape\n\n\ndef plot(s):\n proj = ccrs.PlateCarree()\n ax = plt.axes(projection=proj)\n ax.set_extent((s.bounds[0], s.bounds[2], s.bounds[1], s.bounds[3]), crs=ccrs.PlateCarree())\n shape_feature = ShapelyFeature([s], ccrs.PlateCarree(), facecolor='#AAFFAA', edgecolor='k')\n ax.add_feature(shape_feature);\n \n gl = ax.gridlines(crs=ccrs.PlateCarree(), draw_labels=True,\n linewidth=2, color='gray', alpha=0.5, linestyle='--')\n gl.xlabels_top = False\n gl.ylabels_left = False\n gl.xlabel_style = {'size': 10, 'color': 'black'}\n gl.ylabel_style = {'size': 10, 'color': 'black'}\n \n return gl\n \n \n \n \n \ndef plot_merc(s):\n proj = ccrs.Mercator()\n ax = plt.axes(projection=proj)\n ax.set_extent((s.bounds[0], s.bounds[2], s.bounds[1], s.bounds[3]), crs=ccrs.PlateCarree())\n shape_feature = ShapelyFeature([s], ccrs.PlateCarree(), facecolor='#AAFFAA', edgecolor='k')\n ax.add_feature(shape_feature);\n\n gl = ax.gridlines(crs=ccrs.PlateCarree(), draw_labels=True,\n linewidth=2, color='gray', alpha=0.5, linestyle='--')\n gl.xlabels_top = False\n gl.ylabels_left = False\n gl.xlabel_style = {'size': 10, 'color': 'black'}\n gl.ylabel_style = {'size': 10, 'color': 'black'}\n \n return gl",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/env python
# -*- coding:utf-8 _*-
"""
@author:tom_tao626
@license: Apache Licence
@file: 17.列表中的元素统计.py
@time: 2020/12/09
@contact: tp320670258@gmail.com
@site: xxxx.suizhu.net
@software: PyCharm
"""
# collections.Counter()
from collections import Counter
list1 = ['a', 'b', 'b', 'c', 'd', 'e', 'a', 'b', 'e']
count = Counter(list1)
print(count)
# Counter({'a': 2, 'b': 2, 'e': 2, 'c': 1, 'd': 1})
print(count['b'])
# 3
# 出现次数最多的元素
print(count.most_common(1))
# [('b', 3)]
print(count.items())
# dict_items([('a', 2), ('b', 3), ('c', 1), ('d', 1), ('e', 2)])
|
normal
|
{
"blob_id": "f2c592a0ea38d800510323a1001c646cdbecefff",
"index": 3009,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(count)\nprint(count['b'])\nprint(count.most_common(1))\nprint(count.items())\n",
"step-3": "<mask token>\nlist1 = ['a', 'b', 'b', 'c', 'd', 'e', 'a', 'b', 'e']\ncount = Counter(list1)\nprint(count)\nprint(count['b'])\nprint(count.most_common(1))\nprint(count.items())\n",
"step-4": "<mask token>\nfrom collections import Counter\nlist1 = ['a', 'b', 'b', 'c', 'd', 'e', 'a', 'b', 'e']\ncount = Counter(list1)\nprint(count)\nprint(count['b'])\nprint(count.most_common(1))\nprint(count.items())\n",
"step-5": "#!/usr/bin/env python \n# -*- coding:utf-8 _*-\n\"\"\" \n@author:tom_tao626 \n@license: Apache Licence \n@file: 17.列表中的元素统计.py \n@time: 2020/12/09\n@contact: tp320670258@gmail.com\n@site: xxxx.suizhu.net\n@software: PyCharm \n\"\"\"\n\n# collections.Counter()\n\nfrom collections import Counter\nlist1 = ['a', 'b', 'b', 'c', 'd', 'e', 'a', 'b', 'e']\ncount = Counter(list1)\nprint(count)\n# Counter({'a': 2, 'b': 2, 'e': 2, 'c': 1, 'd': 1})\nprint(count['b'])\n# 3\n# 出现次数最多的元素\nprint(count.most_common(1))\n# [('b', 3)]\nprint(count.items())\n# dict_items([('a', 2), ('b', 3), ('c', 1), ('d', 1), ('e', 2)])\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
experiment_name = 'nodes10'
wall = 'wall2'
wall_image = 'irati_110'
mr_dif_policy = True
spn_dif_policy = True
destination_ip = '2001:40b0:7500:286:84:88:81:57'
<|reserved_special_token_1|>
#!/usr/bin/python3
experiment_name = "nodes10"
wall = "wall2"
wall_image = "irati_110"
mr_dif_policy = True
spn_dif_policy = True
destination_ip = "2001:40b0:7500:286:84:88:81:57"
|
flexible
|
{
"blob_id": "78db25586f742b0a20bc3fad382b0d4f1a271841",
"index": 3970,
"step-1": "<mask token>\n",
"step-2": "experiment_name = 'nodes10'\nwall = 'wall2'\nwall_image = 'irati_110'\nmr_dif_policy = True\nspn_dif_policy = True\ndestination_ip = '2001:40b0:7500:286:84:88:81:57'\n",
"step-3": "#!/usr/bin/python3\n\nexperiment_name = \"nodes10\"\nwall = \"wall2\"\nwall_image = \"irati_110\"\nmr_dif_policy = True\nspn_dif_policy = True\ndestination_ip = \"2001:40b0:7500:286:84:88:81:57\"\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
class SimulatorInfo(object):
def __init__(self, name=None, device_type=None, sdk=None, device_id=
None, sim_id=None):
self.name = name
self.device_type = device_type
self.sdk = sdk
self.device_id = device_id
self.sim_id = sim_id
|
normal
|
{
"blob_id": "9b94e8aed2b0be2771a38cf2d1cf391772f3a9f0",
"index": 6478,
"step-1": "<mask token>\n",
"step-2": "class SimulatorInfo(object):\n <mask token>\n",
"step-3": "class SimulatorInfo(object):\n\n def __init__(self, name=None, device_type=None, sdk=None, device_id=\n None, sim_id=None):\n self.name = name\n self.device_type = device_type\n self.sdk = sdk\n self.device_id = device_id\n self.sim_id = sim_id\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
def count_regexp():
"""Counts the occurences of the regular expressions you will write.
"""
email = re.compile('[a-zA-Z0-9_-]+@[a-zA-Z0-9_-]+\\.[a-zA-Z]{2,5}')
subheading = re.compile('\\=\\=+.*\\=\\=+')
link_to_subheading = re.compile(
"\\[\\[[\\w'*\\-*\\:*\\(*\\)*\\_*\\s*]+[#][\\s*\\w\\'*\\-*\\:*\\(*\\)*\\_*s*]+\\|*"
)
doi_citation = re.compile(
'\\{\\{[c][ite](?!{{).*[dD][oO][iI]\\s*[:|,=\\/]*\\s*[0-9]+\\.[0-9]+.*\\}\\}'
)
patterns = {'emails': email, 'subheadings': subheading,
'links to subheadings': link_to_subheading,
'citations with DOI numbers': doi_citation}
with open(RAW_DUMP_XML, encoding='utf-8') as f:
dump_text = f.read()
for name, pattern in patterns.items():
if pattern is None:
continue
matches = pattern.findall(dump_text)
count = len(matches)
example_matches = [matches[i * (count // 5)] for i in range(5)]
print('Found {} occurences of {}'.format(count, name))
print('Here are examples:')
print('\n'.join(example_matches))
print('\n')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def count_regexp():
"""Counts the occurences of the regular expressions you will write.
"""
email = re.compile('[a-zA-Z0-9_-]+@[a-zA-Z0-9_-]+\\.[a-zA-Z]{2,5}')
subheading = re.compile('\\=\\=+.*\\=\\=+')
link_to_subheading = re.compile(
"\\[\\[[\\w'*\\-*\\:*\\(*\\)*\\_*\\s*]+[#][\\s*\\w\\'*\\-*\\:*\\(*\\)*\\_*s*]+\\|*"
)
doi_citation = re.compile(
'\\{\\{[c][ite](?!{{).*[dD][oO][iI]\\s*[:|,=\\/]*\\s*[0-9]+\\.[0-9]+.*\\}\\}'
)
patterns = {'emails': email, 'subheadings': subheading,
'links to subheadings': link_to_subheading,
'citations with DOI numbers': doi_citation}
with open(RAW_DUMP_XML, encoding='utf-8') as f:
dump_text = f.read()
for name, pattern in patterns.items():
if pattern is None:
continue
matches = pattern.findall(dump_text)
count = len(matches)
example_matches = [matches[i * (count // 5)] for i in range(5)]
print('Found {} occurences of {}'.format(count, name))
print('Here are examples:')
print('\n'.join(example_matches))
print('\n')
if __name__ == '__main__':
count_regexp()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
RAW_DUMP_XML = Path('raw_data/Wikipedia.xml')
def count_regexp():
"""Counts the occurences of the regular expressions you will write.
"""
email = re.compile('[a-zA-Z0-9_-]+@[a-zA-Z0-9_-]+\\.[a-zA-Z]{2,5}')
subheading = re.compile('\\=\\=+.*\\=\\=+')
link_to_subheading = re.compile(
"\\[\\[[\\w'*\\-*\\:*\\(*\\)*\\_*\\s*]+[#][\\s*\\w\\'*\\-*\\:*\\(*\\)*\\_*s*]+\\|*"
)
doi_citation = re.compile(
'\\{\\{[c][ite](?!{{).*[dD][oO][iI]\\s*[:|,=\\/]*\\s*[0-9]+\\.[0-9]+.*\\}\\}'
)
patterns = {'emails': email, 'subheadings': subheading,
'links to subheadings': link_to_subheading,
'citations with DOI numbers': doi_citation}
with open(RAW_DUMP_XML, encoding='utf-8') as f:
dump_text = f.read()
for name, pattern in patterns.items():
if pattern is None:
continue
matches = pattern.findall(dump_text)
count = len(matches)
example_matches = [matches[i * (count // 5)] for i in range(5)]
print('Found {} occurences of {}'.format(count, name))
print('Here are examples:')
print('\n'.join(example_matches))
print('\n')
if __name__ == '__main__':
count_regexp()
<|reserved_special_token_1|>
import re
from pathlib import Path
RAW_DUMP_XML = Path('raw_data/Wikipedia.xml')
def count_regexp():
"""Counts the occurences of the regular expressions you will write.
"""
email = re.compile('[a-zA-Z0-9_-]+@[a-zA-Z0-9_-]+\\.[a-zA-Z]{2,5}')
subheading = re.compile('\\=\\=+.*\\=\\=+')
link_to_subheading = re.compile(
"\\[\\[[\\w'*\\-*\\:*\\(*\\)*\\_*\\s*]+[#][\\s*\\w\\'*\\-*\\:*\\(*\\)*\\_*s*]+\\|*"
)
doi_citation = re.compile(
'\\{\\{[c][ite](?!{{).*[dD][oO][iI]\\s*[:|,=\\/]*\\s*[0-9]+\\.[0-9]+.*\\}\\}'
)
patterns = {'emails': email, 'subheadings': subheading,
'links to subheadings': link_to_subheading,
'citations with DOI numbers': doi_citation}
with open(RAW_DUMP_XML, encoding='utf-8') as f:
dump_text = f.read()
for name, pattern in patterns.items():
if pattern is None:
continue
matches = pattern.findall(dump_text)
count = len(matches)
example_matches = [matches[i * (count // 5)] for i in range(5)]
print('Found {} occurences of {}'.format(count, name))
print('Here are examples:')
print('\n'.join(example_matches))
print('\n')
if __name__ == '__main__':
count_regexp()
<|reserved_special_token_1|>
import re
from pathlib import Path
RAW_DUMP_XML = Path("raw_data/Wikipedia.xml")
def count_regexp():
"""Counts the occurences of the regular expressions you will write.
"""
# Here's an example regular expression that roughly matches a valid email address.
# The ones you write below should be shorter than this
email = re.compile("[a-zA-Z0-9_-]+@[a-zA-Z0-9_-]+\.[a-zA-Z]{2,5}")
###### Write below #########
subheading = re.compile("\=\=+.*\=\=+")
link_to_subheading = re.compile("\[\[[\w\'*\-*\:*\(*\)*\_*\s*]+[#][\s*\w\\'*\-*\:*\(*\)*\_*s*]+\|*")
doi_citation = re.compile("\{\{[c][ite](?!{{).*[dD][oO][iI]\s*[:|,=\/]*\s*[0-9]+\.[0-9]+.*\}\}")
###### End of your work #########
patterns = {
"emails": email,
"subheadings": subheading,
"links to subheadings": link_to_subheading,
"citations with DOI numbers": doi_citation,
}
with open(RAW_DUMP_XML, encoding="utf-8") as f:
dump_text = f.read()
for name, pattern in patterns.items():
if pattern is None:
continue
matches = pattern.findall(dump_text)
count = len(matches)
example_matches = [matches[i * (count // 5)] for i in range(5)]
print("Found {} occurences of {}".format(count, name))
print("Here are examples:")
print("\n".join(example_matches))
print("\n")
if __name__ == "__main__":
count_regexp()
|
flexible
|
{
"blob_id": "8a4269f2094fa8ab8f6a93e653183dafb141232e",
"index": 5717,
"step-1": "<mask token>\n\n\ndef count_regexp():\n \"\"\"Counts the occurences of the regular expressions you will write.\n \"\"\"\n email = re.compile('[a-zA-Z0-9_-]+@[a-zA-Z0-9_-]+\\\\.[a-zA-Z]{2,5}')\n subheading = re.compile('\\\\=\\\\=+.*\\\\=\\\\=+')\n link_to_subheading = re.compile(\n \"\\\\[\\\\[[\\\\w'*\\\\-*\\\\:*\\\\(*\\\\)*\\\\_*\\\\s*]+[#][\\\\s*\\\\w\\\\'*\\\\-*\\\\:*\\\\(*\\\\)*\\\\_*s*]+\\\\|*\"\n )\n doi_citation = re.compile(\n '\\\\{\\\\{[c][ite](?!{{).*[dD][oO][iI]\\\\s*[:|,=\\\\/]*\\\\s*[0-9]+\\\\.[0-9]+.*\\\\}\\\\}'\n )\n patterns = {'emails': email, 'subheadings': subheading,\n 'links to subheadings': link_to_subheading,\n 'citations with DOI numbers': doi_citation}\n with open(RAW_DUMP_XML, encoding='utf-8') as f:\n dump_text = f.read()\n for name, pattern in patterns.items():\n if pattern is None:\n continue\n matches = pattern.findall(dump_text)\n count = len(matches)\n example_matches = [matches[i * (count // 5)] for i in range(5)]\n print('Found {} occurences of {}'.format(count, name))\n print('Here are examples:')\n print('\\n'.join(example_matches))\n print('\\n')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef count_regexp():\n \"\"\"Counts the occurences of the regular expressions you will write.\n \"\"\"\n email = re.compile('[a-zA-Z0-9_-]+@[a-zA-Z0-9_-]+\\\\.[a-zA-Z]{2,5}')\n subheading = re.compile('\\\\=\\\\=+.*\\\\=\\\\=+')\n link_to_subheading = re.compile(\n \"\\\\[\\\\[[\\\\w'*\\\\-*\\\\:*\\\\(*\\\\)*\\\\_*\\\\s*]+[#][\\\\s*\\\\w\\\\'*\\\\-*\\\\:*\\\\(*\\\\)*\\\\_*s*]+\\\\|*\"\n )\n doi_citation = re.compile(\n '\\\\{\\\\{[c][ite](?!{{).*[dD][oO][iI]\\\\s*[:|,=\\\\/]*\\\\s*[0-9]+\\\\.[0-9]+.*\\\\}\\\\}'\n )\n patterns = {'emails': email, 'subheadings': subheading,\n 'links to subheadings': link_to_subheading,\n 'citations with DOI numbers': doi_citation}\n with open(RAW_DUMP_XML, encoding='utf-8') as f:\n dump_text = f.read()\n for name, pattern in patterns.items():\n if pattern is None:\n continue\n matches = pattern.findall(dump_text)\n count = len(matches)\n example_matches = [matches[i * (count // 5)] for i in range(5)]\n print('Found {} occurences of {}'.format(count, name))\n print('Here are examples:')\n print('\\n'.join(example_matches))\n print('\\n')\n\n\nif __name__ == '__main__':\n count_regexp()\n",
"step-3": "<mask token>\nRAW_DUMP_XML = Path('raw_data/Wikipedia.xml')\n\n\ndef count_regexp():\n \"\"\"Counts the occurences of the regular expressions you will write.\n \"\"\"\n email = re.compile('[a-zA-Z0-9_-]+@[a-zA-Z0-9_-]+\\\\.[a-zA-Z]{2,5}')\n subheading = re.compile('\\\\=\\\\=+.*\\\\=\\\\=+')\n link_to_subheading = re.compile(\n \"\\\\[\\\\[[\\\\w'*\\\\-*\\\\:*\\\\(*\\\\)*\\\\_*\\\\s*]+[#][\\\\s*\\\\w\\\\'*\\\\-*\\\\:*\\\\(*\\\\)*\\\\_*s*]+\\\\|*\"\n )\n doi_citation = re.compile(\n '\\\\{\\\\{[c][ite](?!{{).*[dD][oO][iI]\\\\s*[:|,=\\\\/]*\\\\s*[0-9]+\\\\.[0-9]+.*\\\\}\\\\}'\n )\n patterns = {'emails': email, 'subheadings': subheading,\n 'links to subheadings': link_to_subheading,\n 'citations with DOI numbers': doi_citation}\n with open(RAW_DUMP_XML, encoding='utf-8') as f:\n dump_text = f.read()\n for name, pattern in patterns.items():\n if pattern is None:\n continue\n matches = pattern.findall(dump_text)\n count = len(matches)\n example_matches = [matches[i * (count // 5)] for i in range(5)]\n print('Found {} occurences of {}'.format(count, name))\n print('Here are examples:')\n print('\\n'.join(example_matches))\n print('\\n')\n\n\nif __name__ == '__main__':\n count_regexp()\n",
"step-4": "import re\nfrom pathlib import Path\nRAW_DUMP_XML = Path('raw_data/Wikipedia.xml')\n\n\ndef count_regexp():\n \"\"\"Counts the occurences of the regular expressions you will write.\n \"\"\"\n email = re.compile('[a-zA-Z0-9_-]+@[a-zA-Z0-9_-]+\\\\.[a-zA-Z]{2,5}')\n subheading = re.compile('\\\\=\\\\=+.*\\\\=\\\\=+')\n link_to_subheading = re.compile(\n \"\\\\[\\\\[[\\\\w'*\\\\-*\\\\:*\\\\(*\\\\)*\\\\_*\\\\s*]+[#][\\\\s*\\\\w\\\\'*\\\\-*\\\\:*\\\\(*\\\\)*\\\\_*s*]+\\\\|*\"\n )\n doi_citation = re.compile(\n '\\\\{\\\\{[c][ite](?!{{).*[dD][oO][iI]\\\\s*[:|,=\\\\/]*\\\\s*[0-9]+\\\\.[0-9]+.*\\\\}\\\\}'\n )\n patterns = {'emails': email, 'subheadings': subheading,\n 'links to subheadings': link_to_subheading,\n 'citations with DOI numbers': doi_citation}\n with open(RAW_DUMP_XML, encoding='utf-8') as f:\n dump_text = f.read()\n for name, pattern in patterns.items():\n if pattern is None:\n continue\n matches = pattern.findall(dump_text)\n count = len(matches)\n example_matches = [matches[i * (count // 5)] for i in range(5)]\n print('Found {} occurences of {}'.format(count, name))\n print('Here are examples:')\n print('\\n'.join(example_matches))\n print('\\n')\n\n\nif __name__ == '__main__':\n count_regexp()\n",
"step-5": "import re\r\nfrom pathlib import Path\r\n\r\nRAW_DUMP_XML = Path(\"raw_data/Wikipedia.xml\")\r\n\r\n\r\ndef count_regexp():\r\n \"\"\"Counts the occurences of the regular expressions you will write.\r\n \"\"\"\r\n # Here's an example regular expression that roughly matches a valid email address.\r\n # The ones you write below should be shorter than this\r\n email = re.compile(\"[a-zA-Z0-9_-]+@[a-zA-Z0-9_-]+\\.[a-zA-Z]{2,5}\")\r\n\r\n ###### Write below #########\r\n subheading = re.compile(\"\\=\\=+.*\\=\\=+\")\r\n link_to_subheading = re.compile(\"\\[\\[[\\w\\'*\\-*\\:*\\(*\\)*\\_*\\s*]+[#][\\s*\\w\\\\'*\\-*\\:*\\(*\\)*\\_*s*]+\\|*\")\r\n doi_citation = re.compile(\"\\{\\{[c][ite](?!{{).*[dD][oO][iI]\\s*[:|,=\\/]*\\s*[0-9]+\\.[0-9]+.*\\}\\}\")\r\n ###### End of your work #########\r\n\r\n patterns = {\r\n \"emails\": email,\r\n \"subheadings\": subheading,\r\n \"links to subheadings\": link_to_subheading,\r\n \"citations with DOI numbers\": doi_citation,\r\n }\r\n\r\n with open(RAW_DUMP_XML, encoding=\"utf-8\") as f:\r\n dump_text = f.read()\r\n for name, pattern in patterns.items():\r\n if pattern is None:\r\n continue\r\n matches = pattern.findall(dump_text)\r\n count = len(matches)\r\n\r\n example_matches = [matches[i * (count // 5)] for i in range(5)]\r\n\r\n print(\"Found {} occurences of {}\".format(count, name))\r\n print(\"Here are examples:\")\r\n print(\"\\n\".join(example_matches))\r\n print(\"\\n\")\r\n\r\n\r\nif __name__ == \"__main__\":\r\n count_regexp()\r\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class Horse(object):
def naili(self):
print('马力足,持久强……')
<|reserved_special_token_0|>
class Mule(Donkey, Horse):
pass
def jiao(self):
print('骡子在唱歌')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Donkey(object):
def manzou(self):
print('走路慢……')
<|reserved_special_token_0|>
class Horse(object):
def naili(self):
print('马力足,持久强……')
def jiao(self):
print('马在嘶鸣')
class Mule(Donkey, Horse):
pass
def jiao(self):
print('骡子在唱歌')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Donkey(object):
def manzou(self):
print('走路慢……')
def jiao(self):
print('驴在欢叫%……')
class Horse(object):
def naili(self):
print('马力足,持久强……')
def jiao(self):
print('马在嘶鸣')
class Mule(Donkey, Horse):
pass
def jiao(self):
print('骡子在唱歌')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Donkey(object):
def manzou(self):
print('走路慢……')
def jiao(self):
print('驴在欢叫%……')
class Horse(object):
def naili(self):
print('马力足,持久强……')
def jiao(self):
print('马在嘶鸣')
class Mule(Donkey, Horse):
pass
def jiao(self):
print('骡子在唱歌')
<|reserved_special_token_0|>
骡子一号.manzou()
骡子一号.naili()
骡子一号.jiao()
print(Mule.__mro__)
<|reserved_special_token_1|>
class Donkey(object):
def manzou(self):
print('走路慢……')
def jiao(self):
print('驴在欢叫%……')
class Horse(object):
def naili(self):
print('马力足,持久强……')
def jiao(self):
print('马在嘶鸣')
class Mule(Donkey,Horse):
pass
def jiao(self):
print('骡子在唱歌')
骡子一号 = Mule()
骡子一号.manzou()
骡子一号.naili()
骡子一号.jiao()
print(Mule.__mro__)
|
flexible
|
{
"blob_id": "5d4ef436c4ee5c31496977a5ae9b55db9ff34e79",
"index": 4082,
"step-1": "<mask token>\n\n\nclass Horse(object):\n\n def naili(self):\n print('马力足,持久强……')\n <mask token>\n\n\nclass Mule(Donkey, Horse):\n pass\n\n def jiao(self):\n print('骡子在唱歌')\n\n\n<mask token>\n",
"step-2": "class Donkey(object):\n\n def manzou(self):\n print('走路慢……')\n <mask token>\n\n\nclass Horse(object):\n\n def naili(self):\n print('马力足,持久强……')\n\n def jiao(self):\n print('马在嘶鸣')\n\n\nclass Mule(Donkey, Horse):\n pass\n\n def jiao(self):\n print('骡子在唱歌')\n\n\n<mask token>\n",
"step-3": "class Donkey(object):\n\n def manzou(self):\n print('走路慢……')\n\n def jiao(self):\n print('驴在欢叫%……')\n\n\nclass Horse(object):\n\n def naili(self):\n print('马力足,持久强……')\n\n def jiao(self):\n print('马在嘶鸣')\n\n\nclass Mule(Donkey, Horse):\n pass\n\n def jiao(self):\n print('骡子在唱歌')\n\n\n<mask token>\n",
"step-4": "class Donkey(object):\n\n def manzou(self):\n print('走路慢……')\n\n def jiao(self):\n print('驴在欢叫%……')\n\n\nclass Horse(object):\n\n def naili(self):\n print('马力足,持久强……')\n\n def jiao(self):\n print('马在嘶鸣')\n\n\nclass Mule(Donkey, Horse):\n pass\n\n def jiao(self):\n print('骡子在唱歌')\n\n\n<mask token>\n骡子一号.manzou()\n骡子一号.naili()\n骡子一号.jiao()\nprint(Mule.__mro__)\n",
"step-5": "\nclass Donkey(object):\n def manzou(self):\n print('走路慢……')\n def jiao(self):\n print('驴在欢叫%……')\n\nclass Horse(object):\n def naili(self):\n print('马力足,持久强……')\n def jiao(self):\n print('马在嘶鸣')\n\nclass Mule(Donkey,Horse):\n pass\n def jiao(self):\n print('骡子在唱歌')\n\n骡子一号 = Mule()\n骡子一号.manzou()\n骡子一号.naili()\n骡子一号.jiao()\nprint(Mule.__mro__)\n\n",
"step-ids": [
4,
7,
8,
9,
11
]
}
|
[
4,
7,
8,
9,
11
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def closeConnection():
cursor.close()
mariadb_connection.close()
return
def getTasks(amount):
mariadb_connection = mariadb.connect(user='web', password='raspberry',
database='PlantHubDB')
cursor = mariadb_connection.cursor()
all_data = []
cursor.execute('SELECT * FROM Sensor')
all_entries = cursor.fetchall()
for row in all_entries:
entry = Sensor(row[0], row[1], row[2])
all_data.append(entry.data)
closeConnection()
return all_data
def getTask(task_id):
mariadb_connection = mariadb.connect(user='web', password='raspberry',
database='PlantHubDB')
cursor = mariadb_connection.cursor()
cursor.execute('SELECT * FROM Sensor WHERE ID={}'.format(task_id))
entry = cursor.fetchall()
data = Sensor(entry[0][0], entry[0][1], entry[0][2])
closeConnection()
return data.data
<|reserved_special_token_1|>
<|reserved_special_token_0|>
mariadb_connection = mariadb.connect(user='web', password='raspberry',
database='PlantHubDB')
cursor = mariadb_connection.cursor()
def closeConnection():
cursor.close()
mariadb_connection.close()
return
def getTasks(amount):
mariadb_connection = mariadb.connect(user='web', password='raspberry',
database='PlantHubDB')
cursor = mariadb_connection.cursor()
all_data = []
cursor.execute('SELECT * FROM Sensor')
all_entries = cursor.fetchall()
for row in all_entries:
entry = Sensor(row[0], row[1], row[2])
all_data.append(entry.data)
closeConnection()
return all_data
def getTask(task_id):
mariadb_connection = mariadb.connect(user='web', password='raspberry',
database='PlantHubDB')
cursor = mariadb_connection.cursor()
cursor.execute('SELECT * FROM Sensor WHERE ID={}'.format(task_id))
entry = cursor.fetchall()
data = Sensor(entry[0][0], entry[0][1], entry[0][2])
closeConnection()
return data.data
<|reserved_special_token_1|>
from models import Sensor
import mysql.connector as mariadb
mariadb_connection = mariadb.connect(user='web', password='raspberry',
database='PlantHubDB')
cursor = mariadb_connection.cursor()
def closeConnection():
cursor.close()
mariadb_connection.close()
return
def getTasks(amount):
mariadb_connection = mariadb.connect(user='web', password='raspberry',
database='PlantHubDB')
cursor = mariadb_connection.cursor()
all_data = []
cursor.execute('SELECT * FROM Sensor')
all_entries = cursor.fetchall()
for row in all_entries:
entry = Sensor(row[0], row[1], row[2])
all_data.append(entry.data)
closeConnection()
return all_data
def getTask(task_id):
mariadb_connection = mariadb.connect(user='web', password='raspberry',
database='PlantHubDB')
cursor = mariadb_connection.cursor()
cursor.execute('SELECT * FROM Sensor WHERE ID={}'.format(task_id))
entry = cursor.fetchall()
data = Sensor(entry[0][0], entry[0][1], entry[0][2])
closeConnection()
return data.data
<|reserved_special_token_1|>
from models import Sensor
import mysql.connector as mariadb
## CREATE A DB WITH MARIADB ##
mariadb_connection = mariadb.connect(user='web', password='raspberry', database='PlantHubDB')
cursor = mariadb_connection.cursor()
def closeConnection():
cursor.close()
mariadb_connection.close()
return
def getTasks(amount):
mariadb_connection = mariadb.connect(user='web', password='raspberry', database='PlantHubDB')
cursor = mariadb_connection.cursor()
all_data = []
cursor.execute("SELECT * FROM Sensor")
all_entries = cursor.fetchall()
for row in all_entries:
entry = Sensor(row[0], row[1], row[2])
all_data.append(entry.data)
closeConnection()
return all_data
def getTask(task_id):
mariadb_connection = mariadb.connect(user='web', password='raspberry', database='PlantHubDB')
cursor = mariadb_connection.cursor()
cursor.execute("SELECT * FROM Sensor WHERE ID={}".format(task_id))
entry = cursor.fetchall()
data = Sensor(entry[0][0], entry[0][1], entry[0][2])
closeConnection()
return data.data
|
flexible
|
{
"blob_id": "f471062573a5ec8cfeb194168edfba3d2700cac6",
"index": 9845,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef closeConnection():\n cursor.close()\n mariadb_connection.close()\n return\n\n\ndef getTasks(amount):\n mariadb_connection = mariadb.connect(user='web', password='raspberry',\n database='PlantHubDB')\n cursor = mariadb_connection.cursor()\n all_data = []\n cursor.execute('SELECT * FROM Sensor')\n all_entries = cursor.fetchall()\n for row in all_entries:\n entry = Sensor(row[0], row[1], row[2])\n all_data.append(entry.data)\n closeConnection()\n return all_data\n\n\ndef getTask(task_id):\n mariadb_connection = mariadb.connect(user='web', password='raspberry',\n database='PlantHubDB')\n cursor = mariadb_connection.cursor()\n cursor.execute('SELECT * FROM Sensor WHERE ID={}'.format(task_id))\n entry = cursor.fetchall()\n data = Sensor(entry[0][0], entry[0][1], entry[0][2])\n closeConnection()\n return data.data\n",
"step-3": "<mask token>\nmariadb_connection = mariadb.connect(user='web', password='raspberry',\n database='PlantHubDB')\ncursor = mariadb_connection.cursor()\n\n\ndef closeConnection():\n cursor.close()\n mariadb_connection.close()\n return\n\n\ndef getTasks(amount):\n mariadb_connection = mariadb.connect(user='web', password='raspberry',\n database='PlantHubDB')\n cursor = mariadb_connection.cursor()\n all_data = []\n cursor.execute('SELECT * FROM Sensor')\n all_entries = cursor.fetchall()\n for row in all_entries:\n entry = Sensor(row[0], row[1], row[2])\n all_data.append(entry.data)\n closeConnection()\n return all_data\n\n\ndef getTask(task_id):\n mariadb_connection = mariadb.connect(user='web', password='raspberry',\n database='PlantHubDB')\n cursor = mariadb_connection.cursor()\n cursor.execute('SELECT * FROM Sensor WHERE ID={}'.format(task_id))\n entry = cursor.fetchall()\n data = Sensor(entry[0][0], entry[0][1], entry[0][2])\n closeConnection()\n return data.data\n",
"step-4": "from models import Sensor\nimport mysql.connector as mariadb\nmariadb_connection = mariadb.connect(user='web', password='raspberry',\n database='PlantHubDB')\ncursor = mariadb_connection.cursor()\n\n\ndef closeConnection():\n cursor.close()\n mariadb_connection.close()\n return\n\n\ndef getTasks(amount):\n mariadb_connection = mariadb.connect(user='web', password='raspberry',\n database='PlantHubDB')\n cursor = mariadb_connection.cursor()\n all_data = []\n cursor.execute('SELECT * FROM Sensor')\n all_entries = cursor.fetchall()\n for row in all_entries:\n entry = Sensor(row[0], row[1], row[2])\n all_data.append(entry.data)\n closeConnection()\n return all_data\n\n\ndef getTask(task_id):\n mariadb_connection = mariadb.connect(user='web', password='raspberry',\n database='PlantHubDB')\n cursor = mariadb_connection.cursor()\n cursor.execute('SELECT * FROM Sensor WHERE ID={}'.format(task_id))\n entry = cursor.fetchall()\n data = Sensor(entry[0][0], entry[0][1], entry[0][2])\n closeConnection()\n return data.data\n",
"step-5": "from models import Sensor\nimport mysql.connector as mariadb\n\n## CREATE A DB WITH MARIADB ##\nmariadb_connection = mariadb.connect(user='web', password='raspberry', database='PlantHubDB')\ncursor = mariadb_connection.cursor()\n\ndef closeConnection():\n cursor.close()\n mariadb_connection.close()\n return\n\ndef getTasks(amount):\n mariadb_connection = mariadb.connect(user='web', password='raspberry', database='PlantHubDB')\n cursor = mariadb_connection.cursor()\n all_data = []\n cursor.execute(\"SELECT * FROM Sensor\")\n all_entries = cursor.fetchall()\n\n for row in all_entries:\n entry = Sensor(row[0], row[1], row[2])\n all_data.append(entry.data)\n\n closeConnection()\n return all_data\n\ndef getTask(task_id):\n mariadb_connection = mariadb.connect(user='web', password='raspberry', database='PlantHubDB')\n cursor = mariadb_connection.cursor()\n cursor.execute(\"SELECT * FROM Sensor WHERE ID={}\".format(task_id))\n entry = cursor.fetchall()\n\n data = Sensor(entry[0][0], entry[0][1], entry[0][2])\n\n closeConnection()\n return data.data\n ",
"step-ids": [
0,
3,
4,
5,
6
]
}
|
[
0,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
class WithinDatagram(object):
def __init__(self, traceObj):
self.Trace = traceObj
self.current_datagram = None
<|reserved_special_token_0|>
class WithinFlowsample(object):
def __init__(self, traceObj):
self.Trace = traceObj
self.current_datagram = None
self.current_flowsample = None
def re_init(self, flowsampleObj, datagramObj):
self.current_datagram = datagramObj
self.current_flowsample = flowsampleObj
def process(self, line):
if 'endSample' in line:
self.current_datagram['flowSamples'][self.current_flowsample.id
] = self.current_flowsample.content
self.Trace.currentState = self.Trace.within_datagram
else:
process_line_and_store_in_obj(line, self.current_flowsample)
class Trace(object):
def __init__(self, callable=None):
self.within_datagram = WithinDatagram(self)
self.within_flowsample = WithinFlowsample(self)
self.currentState = self.within_datagram
self.callable = callable
def process(self, line):
self.currentState.process(line)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class FlowSample(Container):
<|reserved_special_token_0|>
def __init__(self):
super(FlowSample, self).__init__(FlowSample.flowsample_counter())
<|reserved_special_token_0|>
class WithinDatagram(object):
def __init__(self, traceObj):
self.Trace = traceObj
self.current_datagram = None
def process(self, line):
if 'startDatagram' in line:
self.current_datagram = Datagram()
elif 'endDatagram' in line:
self.Trace.callable(self.current_datagram.content)
elif 'startSample' in line:
self.Trace.currentState = self.Trace.within_flowsample
self.Trace.within_flowsample.re_init(FlowSample(), self.
current_datagram)
else:
process_line_and_store_in_obj(line, self.current_datagram)
class WithinFlowsample(object):
def __init__(self, traceObj):
self.Trace = traceObj
self.current_datagram = None
self.current_flowsample = None
def re_init(self, flowsampleObj, datagramObj):
self.current_datagram = datagramObj
self.current_flowsample = flowsampleObj
def process(self, line):
if 'endSample' in line:
self.current_datagram['flowSamples'][self.current_flowsample.id
] = self.current_flowsample.content
self.Trace.currentState = self.Trace.within_datagram
else:
process_line_and_store_in_obj(line, self.current_flowsample)
class Trace(object):
def __init__(self, callable=None):
self.within_datagram = WithinDatagram(self)
self.within_flowsample = WithinFlowsample(self)
self.currentState = self.within_datagram
self.callable = callable
def process(self, line):
self.currentState.process(line)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Datagram(Container):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class FlowSample(Container):
flowsample_counter = itertools.count().next
def __init__(self):
super(FlowSample, self).__init__(FlowSample.flowsample_counter())
<|reserved_special_token_0|>
class WithinDatagram(object):
def __init__(self, traceObj):
self.Trace = traceObj
self.current_datagram = None
def process(self, line):
if 'startDatagram' in line:
self.current_datagram = Datagram()
elif 'endDatagram' in line:
self.Trace.callable(self.current_datagram.content)
elif 'startSample' in line:
self.Trace.currentState = self.Trace.within_flowsample
self.Trace.within_flowsample.re_init(FlowSample(), self.
current_datagram)
else:
process_line_and_store_in_obj(line, self.current_datagram)
class WithinFlowsample(object):
def __init__(self, traceObj):
self.Trace = traceObj
self.current_datagram = None
self.current_flowsample = None
def re_init(self, flowsampleObj, datagramObj):
self.current_datagram = datagramObj
self.current_flowsample = flowsampleObj
def process(self, line):
if 'endSample' in line:
self.current_datagram['flowSamples'][self.current_flowsample.id
] = self.current_flowsample.content
self.Trace.currentState = self.Trace.within_datagram
else:
process_line_and_store_in_obj(line, self.current_flowsample)
class Trace(object):
def __init__(self, callable=None):
self.within_datagram = WithinDatagram(self)
self.within_flowsample = WithinFlowsample(self)
self.currentState = self.within_datagram
self.callable = callable
def process(self, line):
self.currentState.process(line)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Datagram(Container):
datagram_counter = itertools.count().next
def __init__(self):
super(Datagram, self).__init__(Datagram.datagram_counter())
self['flowSamples'] = {}
class FlowSample(Container):
flowsample_counter = itertools.count().next
def __init__(self):
super(FlowSample, self).__init__(FlowSample.flowsample_counter())
<|reserved_special_token_0|>
class WithinDatagram(object):
def __init__(self, traceObj):
self.Trace = traceObj
self.current_datagram = None
def process(self, line):
if 'startDatagram' in line:
self.current_datagram = Datagram()
elif 'endDatagram' in line:
self.Trace.callable(self.current_datagram.content)
elif 'startSample' in line:
self.Trace.currentState = self.Trace.within_flowsample
self.Trace.within_flowsample.re_init(FlowSample(), self.
current_datagram)
else:
process_line_and_store_in_obj(line, self.current_datagram)
class WithinFlowsample(object):
def __init__(self, traceObj):
self.Trace = traceObj
self.current_datagram = None
self.current_flowsample = None
def re_init(self, flowsampleObj, datagramObj):
self.current_datagram = datagramObj
self.current_flowsample = flowsampleObj
def process(self, line):
if 'endSample' in line:
self.current_datagram['flowSamples'][self.current_flowsample.id
] = self.current_flowsample.content
self.Trace.currentState = self.Trace.within_datagram
else:
process_line_and_store_in_obj(line, self.current_flowsample)
class Trace(object):
def __init__(self, callable=None):
self.within_datagram = WithinDatagram(self)
self.within_flowsample = WithinFlowsample(self)
self.currentState = self.within_datagram
self.callable = callable
def process(self, line):
self.currentState.process(line)
<|reserved_special_token_1|>
#@@---------------------------@@
# Author: Chamil Jayasundara
# Date: 5/18/17
# Description: Extract SFLOW data from slow logs
#@@---------------------------@@
import itertools
from collections import defaultdict
"""Flow Sample and Datagram Objects"""
class Container(object):
def __init__(self, id):
self.id = id
self.content = defaultdict(int)
def __getitem__(self, key):
return self.content[key]
def __setitem__(self, key, value):
self.content[key] = value
class Datagram(Container):
datagram_counter = itertools.count().next
def __init__(self):
super(Datagram, self).__init__(Datagram.datagram_counter())
self['flowSamples'] = {}
class FlowSample(Container):
flowsample_counter = itertools.count().next
def __init__(self):
super(FlowSample, self).__init__(FlowSample.flowsample_counter())
#############################
"""Data Extraction"""
def process_line_and_store_in_obj(line, obj):
partition = line.partition(" ")
obj[partition[0]] = partition[2].rstrip()
###State Machine Classses
class WithinDatagram(object):
def __init__(self, traceObj):
self.Trace = traceObj
self.current_datagram = None
def process(self,line):
if "startDatagram" in line:
self.current_datagram = Datagram()
elif "endDatagram" in line:
self.Trace.callable(self.current_datagram.content)
elif "startSample" in line:
self.Trace.currentState = self.Trace.within_flowsample
self.Trace.within_flowsample.re_init(FlowSample(), self.current_datagram)
else:
process_line_and_store_in_obj(line, self.current_datagram)
class WithinFlowsample(object):
def __init__(self, traceObj):
self.Trace = traceObj
self.current_datagram = None
self.current_flowsample = None
def re_init(self, flowsampleObj, datagramObj):
self.current_datagram = datagramObj
self.current_flowsample = flowsampleObj
def process(self,line):
if "endSample" in line:
self.current_datagram['flowSamples'][self.current_flowsample.id] = self.current_flowsample.content
self.Trace.currentState = self.Trace.within_datagram
else:
process_line_and_store_in_obj(line, self.current_flowsample)
class Trace(object):
def __init__(self, callable=None):
self.within_datagram = WithinDatagram(self)
self.within_flowsample = WithinFlowsample(self)
self.currentState = self.within_datagram
self.callable = callable
def process(self, line):
self.currentState.process(line)
|
flexible
|
{
"blob_id": "395ff2e7c052b57548151fc71fad971c94ebceea",
"index": 3974,
"step-1": "<mask token>\n\n\nclass WithinDatagram(object):\n\n def __init__(self, traceObj):\n self.Trace = traceObj\n self.current_datagram = None\n <mask token>\n\n\nclass WithinFlowsample(object):\n\n def __init__(self, traceObj):\n self.Trace = traceObj\n self.current_datagram = None\n self.current_flowsample = None\n\n def re_init(self, flowsampleObj, datagramObj):\n self.current_datagram = datagramObj\n self.current_flowsample = flowsampleObj\n\n def process(self, line):\n if 'endSample' in line:\n self.current_datagram['flowSamples'][self.current_flowsample.id\n ] = self.current_flowsample.content\n self.Trace.currentState = self.Trace.within_datagram\n else:\n process_line_and_store_in_obj(line, self.current_flowsample)\n\n\nclass Trace(object):\n\n def __init__(self, callable=None):\n self.within_datagram = WithinDatagram(self)\n self.within_flowsample = WithinFlowsample(self)\n self.currentState = self.within_datagram\n self.callable = callable\n\n def process(self, line):\n self.currentState.process(line)\n",
"step-2": "<mask token>\n\n\nclass FlowSample(Container):\n <mask token>\n\n def __init__(self):\n super(FlowSample, self).__init__(FlowSample.flowsample_counter())\n\n\n<mask token>\n\n\nclass WithinDatagram(object):\n\n def __init__(self, traceObj):\n self.Trace = traceObj\n self.current_datagram = None\n\n def process(self, line):\n if 'startDatagram' in line:\n self.current_datagram = Datagram()\n elif 'endDatagram' in line:\n self.Trace.callable(self.current_datagram.content)\n elif 'startSample' in line:\n self.Trace.currentState = self.Trace.within_flowsample\n self.Trace.within_flowsample.re_init(FlowSample(), self.\n current_datagram)\n else:\n process_line_and_store_in_obj(line, self.current_datagram)\n\n\nclass WithinFlowsample(object):\n\n def __init__(self, traceObj):\n self.Trace = traceObj\n self.current_datagram = None\n self.current_flowsample = None\n\n def re_init(self, flowsampleObj, datagramObj):\n self.current_datagram = datagramObj\n self.current_flowsample = flowsampleObj\n\n def process(self, line):\n if 'endSample' in line:\n self.current_datagram['flowSamples'][self.current_flowsample.id\n ] = self.current_flowsample.content\n self.Trace.currentState = self.Trace.within_datagram\n else:\n process_line_and_store_in_obj(line, self.current_flowsample)\n\n\nclass Trace(object):\n\n def __init__(self, callable=None):\n self.within_datagram = WithinDatagram(self)\n self.within_flowsample = WithinFlowsample(self)\n self.currentState = self.within_datagram\n self.callable = callable\n\n def process(self, line):\n self.currentState.process(line)\n",
"step-3": "<mask token>\n\n\nclass Datagram(Container):\n <mask token>\n <mask token>\n\n\nclass FlowSample(Container):\n flowsample_counter = itertools.count().next\n\n def __init__(self):\n super(FlowSample, self).__init__(FlowSample.flowsample_counter())\n\n\n<mask token>\n\n\nclass WithinDatagram(object):\n\n def __init__(self, traceObj):\n self.Trace = traceObj\n self.current_datagram = None\n\n def process(self, line):\n if 'startDatagram' in line:\n self.current_datagram = Datagram()\n elif 'endDatagram' in line:\n self.Trace.callable(self.current_datagram.content)\n elif 'startSample' in line:\n self.Trace.currentState = self.Trace.within_flowsample\n self.Trace.within_flowsample.re_init(FlowSample(), self.\n current_datagram)\n else:\n process_line_and_store_in_obj(line, self.current_datagram)\n\n\nclass WithinFlowsample(object):\n\n def __init__(self, traceObj):\n self.Trace = traceObj\n self.current_datagram = None\n self.current_flowsample = None\n\n def re_init(self, flowsampleObj, datagramObj):\n self.current_datagram = datagramObj\n self.current_flowsample = flowsampleObj\n\n def process(self, line):\n if 'endSample' in line:\n self.current_datagram['flowSamples'][self.current_flowsample.id\n ] = self.current_flowsample.content\n self.Trace.currentState = self.Trace.within_datagram\n else:\n process_line_and_store_in_obj(line, self.current_flowsample)\n\n\nclass Trace(object):\n\n def __init__(self, callable=None):\n self.within_datagram = WithinDatagram(self)\n self.within_flowsample = WithinFlowsample(self)\n self.currentState = self.within_datagram\n self.callable = callable\n\n def process(self, line):\n self.currentState.process(line)\n",
"step-4": "<mask token>\n\n\nclass Datagram(Container):\n datagram_counter = itertools.count().next\n\n def __init__(self):\n super(Datagram, self).__init__(Datagram.datagram_counter())\n self['flowSamples'] = {}\n\n\nclass FlowSample(Container):\n flowsample_counter = itertools.count().next\n\n def __init__(self):\n super(FlowSample, self).__init__(FlowSample.flowsample_counter())\n\n\n<mask token>\n\n\nclass WithinDatagram(object):\n\n def __init__(self, traceObj):\n self.Trace = traceObj\n self.current_datagram = None\n\n def process(self, line):\n if 'startDatagram' in line:\n self.current_datagram = Datagram()\n elif 'endDatagram' in line:\n self.Trace.callable(self.current_datagram.content)\n elif 'startSample' in line:\n self.Trace.currentState = self.Trace.within_flowsample\n self.Trace.within_flowsample.re_init(FlowSample(), self.\n current_datagram)\n else:\n process_line_and_store_in_obj(line, self.current_datagram)\n\n\nclass WithinFlowsample(object):\n\n def __init__(self, traceObj):\n self.Trace = traceObj\n self.current_datagram = None\n self.current_flowsample = None\n\n def re_init(self, flowsampleObj, datagramObj):\n self.current_datagram = datagramObj\n self.current_flowsample = flowsampleObj\n\n def process(self, line):\n if 'endSample' in line:\n self.current_datagram['flowSamples'][self.current_flowsample.id\n ] = self.current_flowsample.content\n self.Trace.currentState = self.Trace.within_datagram\n else:\n process_line_and_store_in_obj(line, self.current_flowsample)\n\n\nclass Trace(object):\n\n def __init__(self, callable=None):\n self.within_datagram = WithinDatagram(self)\n self.within_flowsample = WithinFlowsample(self)\n self.currentState = self.within_datagram\n self.callable = callable\n\n def process(self, line):\n self.currentState.process(line)\n",
"step-5": "#@@---------------------------@@\n# Author: Chamil Jayasundara\n# Date: 5/18/17\n# Description: Extract SFLOW data from slow logs\n#@@---------------------------@@\n\nimport itertools\nfrom collections import defaultdict\n\n\"\"\"Flow Sample and Datagram Objects\"\"\"\n\n\nclass Container(object):\n\n def __init__(self, id):\n self.id = id\n self.content = defaultdict(int)\n\n def __getitem__(self, key):\n return self.content[key]\n\n def __setitem__(self, key, value):\n self.content[key] = value\n\n\nclass Datagram(Container):\n\n datagram_counter = itertools.count().next\n\n def __init__(self):\n super(Datagram, self).__init__(Datagram.datagram_counter())\n self['flowSamples'] = {}\n\n\nclass FlowSample(Container):\n\n flowsample_counter = itertools.count().next\n\n def __init__(self):\n super(FlowSample, self).__init__(FlowSample.flowsample_counter())\n\n\n#############################\n\"\"\"Data Extraction\"\"\"\n\ndef process_line_and_store_in_obj(line, obj):\n partition = line.partition(\" \")\n obj[partition[0]] = partition[2].rstrip()\n\n\n###State Machine Classses\nclass WithinDatagram(object):\n\n def __init__(self, traceObj):\n self.Trace = traceObj\n self.current_datagram = None\n\n def process(self,line):\n if \"startDatagram\" in line:\n self.current_datagram = Datagram()\n\n elif \"endDatagram\" in line:\n self.Trace.callable(self.current_datagram.content)\n\n elif \"startSample\" in line:\n self.Trace.currentState = self.Trace.within_flowsample\n self.Trace.within_flowsample.re_init(FlowSample(), self.current_datagram)\n\n else:\n process_line_and_store_in_obj(line, self.current_datagram)\n\n\nclass WithinFlowsample(object):\n\n def __init__(self, traceObj):\n self.Trace = traceObj\n self.current_datagram = None\n self.current_flowsample = None\n\n def re_init(self, flowsampleObj, datagramObj):\n self.current_datagram = datagramObj\n self.current_flowsample = flowsampleObj\n\n def process(self,line):\n if \"endSample\" in line:\n self.current_datagram['flowSamples'][self.current_flowsample.id] = self.current_flowsample.content\n self.Trace.currentState = self.Trace.within_datagram\n\n else:\n process_line_and_store_in_obj(line, self.current_flowsample)\n\n\nclass Trace(object):\n\n def __init__(self, callable=None):\n self.within_datagram = WithinDatagram(self)\n self.within_flowsample = WithinFlowsample(self)\n self.currentState = self.within_datagram\n self.callable = callable\n\n def process(self, line):\n self.currentState.process(line)\n\n",
"step-ids": [
9,
12,
14,
16,
23
]
}
|
[
9,
12,
14,
16,
23
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if __name__ == '__main__':
gateway = config.gateway['trading_system_gateway']
host = gateway['host']
port = gateway['port']
server_id = gateway['server_id']
licences = gateway['licences']
service = SubService(host, port, server_id, licences)
"""订阅order"""
service.sub_order()
<|reserved_special_token_1|>
from cpp_service.SubService import SubService
import config
if __name__ == '__main__':
gateway = config.gateway['trading_system_gateway']
host = gateway['host']
port = gateway['port']
server_id = gateway['server_id']
licences = gateway['licences']
service = SubService(host, port, server_id, licences)
"""订阅order"""
service.sub_order()
<|reserved_special_token_1|>
from cpp_service.SubService import SubService
import config
if __name__ == "__main__":
gateway = config.gateway["trading_system_gateway"]
host = gateway["host"]
port = gateway["port"]
server_id = gateway["server_id"]
licences = gateway["licences"]
service = SubService(host, port, server_id, licences)
"""订阅order"""
service.sub_order()
|
flexible
|
{
"blob_id": "f72cdf8d91c31760335b96052a34615307f48727",
"index": 9774,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n gateway = config.gateway['trading_system_gateway']\n host = gateway['host']\n port = gateway['port']\n server_id = gateway['server_id']\n licences = gateway['licences']\n service = SubService(host, port, server_id, licences)\n \"\"\"订阅order\"\"\"\n service.sub_order()\n",
"step-3": "from cpp_service.SubService import SubService\nimport config\nif __name__ == '__main__':\n gateway = config.gateway['trading_system_gateway']\n host = gateway['host']\n port = gateway['port']\n server_id = gateway['server_id']\n licences = gateway['licences']\n service = SubService(host, port, server_id, licences)\n \"\"\"订阅order\"\"\"\n service.sub_order()\n",
"step-4": "from cpp_service.SubService import SubService\nimport config\n\nif __name__ == \"__main__\":\n gateway = config.gateway[\"trading_system_gateway\"]\n host = gateway[\"host\"]\n port = gateway[\"port\"]\n server_id = gateway[\"server_id\"]\n licences = gateway[\"licences\"]\n\n service = SubService(host, port, server_id, licences)\n \"\"\"订阅order\"\"\"\n service.sub_order()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
data = [
"........#.............#........",
"...#....#...#....#.............",
".#..#...#............#.....#..#",
"..#......#..##............###..",
"..........#......#..#..#.......",
".#..#.......#.........#.#......",
".........#..#....##..#.##....#.",
"..#....##...#..................",
"##..........#.##...#....##..#..",
"...#....#...#..............#...",
"...........................#..#",
"..##.##.#..................#...",
"...#.##..#............#........",
"........#.......#...#.....##.#.",
".##..........#......#.......#..",
"...#..........#...#..#.......#.",
"......#...#...#.##.......#.#...",
"........#...#...#...##.........",
"#..............#.#....#.......#",
"..#..#..#.#....#...............",
".....#........#...#..........#.",
"##......#...#..#.##.......#....",
"..#.#.....#.#.............#.#.#",
"#..#..##......##...#...........",
"..#......#........#.....#......",
".....#.......#....#.#...#......",
"...#........#...........#...#..",
".......#.#...........###....#..",
"...#...........##....##........",
"#....#..####....#.....#..#....#",
"..........#...........#........",
"...#.......#....#.#.........#..",
"....#...#.......#..###.........",
"......#......#..#......#..#....",
"...#.....#............#..#.....",
"...#.#.#.#..#.......#.....#....",
"#....##...#.........#...##.....",
"#..#.......#..#..#..#...##.....",
"#.......#............#.....#...",
".#........##....##...#........#",
".....#...#.....................",
".......#........#..............",
".....#............#.#.#...#.#..",
".....##..#.............#.......",
"..#.##..#........#..#...#......",
".........#.#....#...........#..",
".#.....#..#....#.....#...#.....",
"....#.#................#.......",
"...............##......#...#...",
".##...#...#.......##.#....#....",
"............#........#.......#.",
"......##.#.#...................",
".#.#..............#.......#....",
"#.....#...#.......#..#...#.....",
".............#....#..#......#..",
"........#...##................#",
".......#...#..#..##............",
"..#..#...##...#..#.#.....#...#.",
".#.#...#.........#.#...........",
"...###....#.......#...#........",
"........#......##.#...#..##..#.",
".....................#.#.......",
".............#...........#...#.",
"#..#..#.....#.#...#............",
"...#....#.....#...........#....",
"..##.....##...#......#..##.....",
"#.....#.....###.#.....#....##..",
".#...........###...............",
"..................#..##.#...#..",
"................#....##.#......",
".#.#.#...#....#.........#..#.#.",
"#.......#........##............",
".......##.#....#.#............#",
"..........#..##.#....#.........",
"........##..#....#.............",
".........#....#...........##...",
"#.........#.#..#..#..........#.",
".....#........#......#.........",
"....#.#.#...............#......",
".#..#..##...#.##..........#....",
"..#....................#.#.....",
".........#....#...........#.#.#",
"........#....##.##.............",
"..#.....#.......#..#......#....",
"#..........#.#.....#.#....#....",
"........##.#.....#..#.....#.#..",
"...................#...#....#.#",
"............#..#....#...#...#..",
"..............#.#.........#....",
"...#..#..#.#..##..##...........",
".#...........................#.",
".#.......#...........#....#.#.#",
"......#..#...#........#...##...",
".........#......#.#.......#...#",
"...#..##................#......",
".............#.#..##....#.#....",
"...............#..#......#.....",
".#......#.#.#....#........#....",
"........#..#.##..#..#.........#",
"...#....#.#...#..#.......#..#..",
"..#...##.........#..#...#......",
"...#...........#.............#.",
"....#.....................#....",
".....#..#...............#.#...#",
"....#..........#........#......",
"..#....#........##..##.........",
"...#....#..#.#.......#...#.....",
"..#........#....#...##....#.#..",
".#...#........##.....#....###..",
"#....#....##......#........#...",
".........#..#.#..........#....#",
"....#...#.....#.......##.......",
"..............#..........#.##..",
"#...#..#..............#......#.",
".................#......##....#",
"..#..##..#.......#..#.#......#.",
".............#........#.....#.#",
".#.##............#..#..........",
"..#...#...........#..##........",
".#....#...#....#.......#.......",
"...#.#..#..#..#....#.....#..#..",
"....#..##..............#...#...",
"#..........###......###........",
".##.##......#..#............#..",
".#...........#.#.....#...#.....",
"#.#..#...#............#........",
".........#...#...#..........##.",
".......###..#..........#.......",
"...........###.....#........#..",
".#.............#.....#......#..",
"...#.....#....#.#.........##...",
"....##..##...#.......##........",
"......#....##.........#......#.",
"..........#.....##..#.....#..#.",
"..........####...#..#.........#",
".##....#..#.#...#.......#......",
"...#.#.##.#.#...#....#.#.#.....",
".........#...##........##.....#",
"..#........#..........##...##.#",
"##...##..........#.#...........",
"..............#......#.........",
"........#.....#.#.......#......",
".#...#.....#....#.#..#.........",
".....#....................##...",
"....#..................#.#...##",
".....#............#..##........",
"#..........#....#.#.......##.#.",
"....#..#.....................#.",
"#..#....##.....#...............",
"..#...#..#..##....#.#..........",
".......#......#.#.......#.....#",
"...#.#.......#...#.##..........",
"....#..........#....#.#.#......",
".......#..#..........#..##.....",
"#......#......#...#......#...#.",
"###..#....##......##........#..",
".#..........#.....#.......#.#..",
".......#.....#.....#.#.........",
"..#...#....#...................",
"..............#.##.............",
".#...#.......#.##...#.#.......#",
".......#......................#",
"....#.#...#.#........#.........",
".#......#....#...#.............",
"#.......#...###.....#.#.#..#...",
"#....##.#...............##.....",
"..#.......#..................#.",
".....####...............#......",
".##......#......#.#.......##.#.",
"#......##..###....#....#......#",
".##.......##.##...#.##.........",
"......##............#.......#..",
"......#..#.....##.#............",
".#..........#.....##...........",
"#.........#......#......##.#...",
".........#.......#..#......#.#.",
".........#.......#...........#.",
".#..##.#..................##...",
".............#.............#...",
".....##........#......##...##..",
"..#..#.#.....#..#....#.........",
".....#....#.....#.....#........",
"#......##.....#....#....#......",
"#.................#..#.#......#",
".......#..#......#....#.#...#.#",
"....#.........#..#..........#.#",
"##......#............#...#...#.",
"....##......#...#.....#....##..",
".#...##.........#..............",
"......#.....................#..",
"..#..........###....#..........",
"#....#...#..#.............#....",
"#........#.#......#....#.......",
".#...#.......#..#...#.#...#..#.",
"................##.#.....#.....",
"###.......#...#................",
"...#.......#...#.#.....#.......",
"..#.........#.....#.#.......#..",
"......#.......................#",
"#.....#.#..#....#.......#......",
"...#....#..#....####...........",
".............#.....#...##......",
".......#.........#...#..#......",
".##..#.........#....#.#........",
"....##...#.#...........#....#..",
".........................##....",
"..###.......##....#.#.........#",
".#....#.#.#...........##....#..",
"......#...#..#..#..#..#.......#",
"..#....#.#.......#..#..#..#...#",
".....##...#.##....#.#...#......",
".........#..#....#..#..........",
".##..##.........#.#.....#......",
"..........#...##...#.#...#.....",
"#.##..#..#.............#.......",
"...#...........#.......#......#",
".......#....#....#...##.......#",
"..#.##........###..#......#....",
"...#...........###......#..#..#",
".#.........#.#.........#.#.....",
"##.......##.##.##......##......",
"............#...#..........#...",
"....................#..........",
"...#..#...........#...#...#....",
".................#...#......###",
"...#................#.#.##.....",
"...............#........#......",
"#.............##......#.#..#...",
"..#.#.....#..#.##.....##...#...",
"......#.........#......#.......",
"#.......#......#....#........#.",
".#..##.....#.........#.........",
"....##.##.#...#.........##.#...",
"...............#..#..#..##.....",
".#..#...............###........",
".##............##..............",
"...............#...##...#...#.#",
"..#.#......#.#..#.............#",
"#.#..#..##.........#.#.#...#...",
"....##.#....................##.",
".........#..#.....#.....#..#..#",
"....#......#......#.##....#....",
"........###..#.............#..#",
"##................#.........#..",
"#.....#.......#....#...........",
"..#.......#..#........#....#...",
"..#.#.##..#.#...##........#.##.",
"..#..........#............#....",
"..........#...............##...",
"..........###........#.#.......",
".....###..#.............#......",
"##.............#...#.....#.....",
".....#......#....#........#.#..",
"............#..#..............#",
".................#...........##",
"#........#.........###.....#...",
"..#.#..............##......#.#.",
".#...........#.........#..##..#",
"...............................",
".#.....#..#....#....#......#...",
".#...#......#.#..#....#.......#",
"......#.##.......#......#......",
"......#..###..#................",
"#..#.....#........##...#.......",
"......##.........##....#...##..",
".#..........#.................#",
"#..#.......#...............#...",
".........#..###....#.#.##.#....",
"..#...#.##..##...............##",
".........#.....................",
".#....##...#......#....#.......",
"............#..........#..#....",
"...#......##....#....#........#",
".#...................#.........",
"#.#........###....#..........#.",
".........#....#....#........##.",
".#....#..#.........#..#........",
"...............#..#...#..#...##",
".........#....##....#......#...",
".#.............................",
"...#........#...#.#...#.#..#...",
".....#..##...#.#...............",
"#.....#....#.........#.........",
"#...#...........##.........#...",
"..##........#.#...#...#......#.",
"...........#.....#...#.#.......",
"......###....#.....#...........",
"......##...#..........#....#.#.",
".......##..##..........#.......",
"....#............#..#....##....",
"..##...................#.#.....",
"...#.#..#.#....................",
".#..##..#............##.###..#.",
"#.#...#....#.#..........#.#....",
"........#....#.....#...........",
"..##....#...#.......#..........",
"...........##.##....#..........",
".....#............#............",
".......#.............#....#....",
".................#......#......",
"......##.......#....#..##...#..",
".#..#....#.....................",
"...#.#.#...#......##...........",
"##........##.#....#....#.......",
".......#.....#..#..#...#.##....",
"#..........#....#.#..#..#..#...",
"...##..............#...........",
".........#.....#.#....#.......#",
".........#....##..#..##..#.....",
".....#......................#..",
"...###...#..#......#...........",
"....#.....................#....",
"...............................",
"..#.....###.......#..#....#....",
"#..........#.................#.",
"......#.......###.......#..##..",
".............#.##..............",
"......#..#.#..#...........#....",
"...#....##.#...#..#.#...#....#.",
"..................#...#....#.##",
"......#.#....#.................",
"......#.#.....#.....#..##......",
"#..##...........#..#.....#.##..",
]
def treeCounter(moveRight, moveDown):
row = 0
index = 0
trees = 0
finished = False
while not finished:
row += moveDown
if len(data) > row:
index = (index + moveRight) % len(data[row])
if data[row][index] == '#':
trees += 1
else:
finished = True
print(trees)
treeCounter(1,1)
treeCounter(3,1)
treeCounter(5,1)
treeCounter(7,1)
treeCounter(1,2)
|
normal
|
{
"blob_id": "c22651437094723b711a959e031f1c7f928f735a",
"index": 7645,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef treeCounter(moveRight, moveDown):\n row = 0\n index = 0\n trees = 0\n finished = False\n while not finished:\n row += moveDown\n if len(data) > row:\n index = (index + moveRight) % len(data[row])\n if data[row][index] == '#':\n trees += 1\n else:\n finished = True\n print(trees)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef treeCounter(moveRight, moveDown):\n row = 0\n index = 0\n trees = 0\n finished = False\n while not finished:\n row += moveDown\n if len(data) > row:\n index = (index + moveRight) % len(data[row])\n if data[row][index] == '#':\n trees += 1\n else:\n finished = True\n print(trees)\n\n\ntreeCounter(1, 1)\ntreeCounter(3, 1)\ntreeCounter(5, 1)\ntreeCounter(7, 1)\ntreeCounter(1, 2)\n",
"step-4": "data = ['........#.............#........',\n '...#....#...#....#.............', '.#..#...#............#.....#..#',\n '..#......#..##............###..', '..........#......#..#..#.......',\n '.#..#.......#.........#.#......', '.........#..#....##..#.##....#.',\n '..#....##...#..................', '##..........#.##...#....##..#..',\n '...#....#...#..............#...', '...........................#..#',\n '..##.##.#..................#...', '...#.##..#............#........',\n '........#.......#...#.....##.#.', '.##..........#......#.......#..',\n '...#..........#...#..#.......#.', '......#...#...#.##.......#.#...',\n '........#...#...#...##.........', '#..............#.#....#.......#',\n '..#..#..#.#....#...............', '.....#........#...#..........#.',\n '##......#...#..#.##.......#....', '..#.#.....#.#.............#.#.#',\n '#..#..##......##...#...........', '..#......#........#.....#......',\n '.....#.......#....#.#...#......', '...#........#...........#...#..',\n '.......#.#...........###....#..', '...#...........##....##........',\n '#....#..####....#.....#..#....#', '..........#...........#........',\n '...#.......#....#.#.........#..', '....#...#.......#..###.........',\n '......#......#..#......#..#....', '...#.....#............#..#.....',\n '...#.#.#.#..#.......#.....#....', '#....##...#.........#...##.....',\n '#..#.......#..#..#..#...##.....', '#.......#............#.....#...',\n '.#........##....##...#........#', '.....#...#.....................',\n '.......#........#..............', '.....#............#.#.#...#.#..',\n '.....##..#.............#.......', '..#.##..#........#..#...#......',\n '.........#.#....#...........#..', '.#.....#..#....#.....#...#.....',\n '....#.#................#.......', '...............##......#...#...',\n '.##...#...#.......##.#....#....', '............#........#.......#.',\n '......##.#.#...................', '.#.#..............#.......#....',\n '#.....#...#.......#..#...#.....', '.............#....#..#......#..',\n '........#...##................#', '.......#...#..#..##............',\n '..#..#...##...#..#.#.....#...#.', '.#.#...#.........#.#...........',\n '...###....#.......#...#........', '........#......##.#...#..##..#.',\n '.....................#.#.......', '.............#...........#...#.',\n '#..#..#.....#.#...#............', '...#....#.....#...........#....',\n '..##.....##...#......#..##.....', '#.....#.....###.#.....#....##..',\n '.#...........###...............', '..................#..##.#...#..',\n '................#....##.#......', '.#.#.#...#....#.........#..#.#.',\n '#.......#........##............', '.......##.#....#.#............#',\n '..........#..##.#....#.........', '........##..#....#.............',\n '.........#....#...........##...', '#.........#.#..#..#..........#.',\n '.....#........#......#.........', '....#.#.#...............#......',\n '.#..#..##...#.##..........#....', '..#....................#.#.....',\n '.........#....#...........#.#.#', '........#....##.##.............',\n '..#.....#.......#..#......#....', '#..........#.#.....#.#....#....',\n '........##.#.....#..#.....#.#..', '...................#...#....#.#',\n '............#..#....#...#...#..', '..............#.#.........#....',\n '...#..#..#.#..##..##...........', '.#...........................#.',\n '.#.......#...........#....#.#.#', '......#..#...#........#...##...',\n '.........#......#.#.......#...#', '...#..##................#......',\n '.............#.#..##....#.#....', '...............#..#......#.....',\n '.#......#.#.#....#........#....', '........#..#.##..#..#.........#',\n '...#....#.#...#..#.......#..#..', '..#...##.........#..#...#......',\n '...#...........#.............#.', '....#.....................#....',\n '.....#..#...............#.#...#', '....#..........#........#......',\n '..#....#........##..##.........', '...#....#..#.#.......#...#.....',\n '..#........#....#...##....#.#..', '.#...#........##.....#....###..',\n '#....#....##......#........#...', '.........#..#.#..........#....#',\n '....#...#.....#.......##.......', '..............#..........#.##..',\n '#...#..#..............#......#.', '.................#......##....#',\n '..#..##..#.......#..#.#......#.', '.............#........#.....#.#',\n '.#.##............#..#..........', '..#...#...........#..##........',\n '.#....#...#....#.......#.......', '...#.#..#..#..#....#.....#..#..',\n '....#..##..............#...#...', '#..........###......###........',\n '.##.##......#..#............#..', '.#...........#.#.....#...#.....',\n '#.#..#...#............#........', '.........#...#...#..........##.',\n '.......###..#..........#.......', '...........###.....#........#..',\n '.#.............#.....#......#..', '...#.....#....#.#.........##...',\n '....##..##...#.......##........', '......#....##.........#......#.',\n '..........#.....##..#.....#..#.', '..........####...#..#.........#',\n '.##....#..#.#...#.......#......', '...#.#.##.#.#...#....#.#.#.....',\n '.........#...##........##.....#', '..#........#..........##...##.#',\n '##...##..........#.#...........', '..............#......#.........',\n '........#.....#.#.......#......', '.#...#.....#....#.#..#.........',\n '.....#....................##...', '....#..................#.#...##',\n '.....#............#..##........', '#..........#....#.#.......##.#.',\n '....#..#.....................#.', '#..#....##.....#...............',\n '..#...#..#..##....#.#..........', '.......#......#.#.......#.....#',\n '...#.#.......#...#.##..........', '....#..........#....#.#.#......',\n '.......#..#..........#..##.....', '#......#......#...#......#...#.',\n '###..#....##......##........#..', '.#..........#.....#.......#.#..',\n '.......#.....#.....#.#.........', '..#...#....#...................',\n '..............#.##.............', '.#...#.......#.##...#.#.......#',\n '.......#......................#', '....#.#...#.#........#.........',\n '.#......#....#...#.............', '#.......#...###.....#.#.#..#...',\n '#....##.#...............##.....', '..#.......#..................#.',\n '.....####...............#......', '.##......#......#.#.......##.#.',\n '#......##..###....#....#......#', '.##.......##.##...#.##.........',\n '......##............#.......#..', '......#..#.....##.#............',\n '.#..........#.....##...........', '#.........#......#......##.#...',\n '.........#.......#..#......#.#.', '.........#.......#...........#.',\n '.#..##.#..................##...', '.............#.............#...',\n '.....##........#......##...##..', '..#..#.#.....#..#....#.........',\n '.....#....#.....#.....#........', '#......##.....#....#....#......',\n '#.................#..#.#......#', '.......#..#......#....#.#...#.#',\n '....#.........#..#..........#.#', '##......#............#...#...#.',\n '....##......#...#.....#....##..', '.#...##.........#..............',\n '......#.....................#..', '..#..........###....#..........',\n '#....#...#..#.............#....', '#........#.#......#....#.......',\n '.#...#.......#..#...#.#...#..#.', '................##.#.....#.....',\n '###.......#...#................', '...#.......#...#.#.....#.......',\n '..#.........#.....#.#.......#..', '......#.......................#',\n '#.....#.#..#....#.......#......', '...#....#..#....####...........',\n '.............#.....#...##......', '.......#.........#...#..#......',\n '.##..#.........#....#.#........', '....##...#.#...........#....#..',\n '.........................##....', '..###.......##....#.#.........#',\n '.#....#.#.#...........##....#..', '......#...#..#..#..#..#.......#',\n '..#....#.#.......#..#..#..#...#', '.....##...#.##....#.#...#......',\n '.........#..#....#..#..........', '.##..##.........#.#.....#......',\n '..........#...##...#.#...#.....', '#.##..#..#.............#.......',\n '...#...........#.......#......#', '.......#....#....#...##.......#',\n '..#.##........###..#......#....', '...#...........###......#..#..#',\n '.#.........#.#.........#.#.....', '##.......##.##.##......##......',\n '............#...#..........#...', '....................#..........',\n '...#..#...........#...#...#....', '.................#...#......###',\n '...#................#.#.##.....', '...............#........#......',\n '#.............##......#.#..#...', '..#.#.....#..#.##.....##...#...',\n '......#.........#......#.......', '#.......#......#....#........#.',\n '.#..##.....#.........#.........', '....##.##.#...#.........##.#...',\n '...............#..#..#..##.....', '.#..#...............###........',\n '.##............##..............', '...............#...##...#...#.#',\n '..#.#......#.#..#.............#', '#.#..#..##.........#.#.#...#...',\n '....##.#....................##.', '.........#..#.....#.....#..#..#',\n '....#......#......#.##....#....', '........###..#.............#..#',\n '##................#.........#..', '#.....#.......#....#...........',\n '..#.......#..#........#....#...', '..#.#.##..#.#...##........#.##.',\n '..#..........#............#....', '..........#...............##...',\n '..........###........#.#.......', '.....###..#.............#......',\n '##.............#...#.....#.....', '.....#......#....#........#.#..',\n '............#..#..............#', '.................#...........##',\n '#........#.........###.....#...', '..#.#..............##......#.#.',\n '.#...........#.........#..##..#', '...............................',\n '.#.....#..#....#....#......#...', '.#...#......#.#..#....#.......#',\n '......#.##.......#......#......', '......#..###..#................',\n '#..#.....#........##...#.......', '......##.........##....#...##..',\n '.#..........#.................#', '#..#.......#...............#...',\n '.........#..###....#.#.##.#....', '..#...#.##..##...............##',\n '.........#.....................', '.#....##...#......#....#.......',\n '............#..........#..#....', '...#......##....#....#........#',\n '.#...................#.........', '#.#........###....#..........#.',\n '.........#....#....#........##.', '.#....#..#.........#..#........',\n '...............#..#...#..#...##', '.........#....##....#......#...',\n '.#.............................', '...#........#...#.#...#.#..#...',\n '.....#..##...#.#...............', '#.....#....#.........#.........',\n '#...#...........##.........#...', '..##........#.#...#...#......#.',\n '...........#.....#...#.#.......', '......###....#.....#...........',\n '......##...#..........#....#.#.', '.......##..##..........#.......',\n '....#............#..#....##....', '..##...................#.#.....',\n '...#.#..#.#....................', '.#..##..#............##.###..#.',\n '#.#...#....#.#..........#.#....', '........#....#.....#...........',\n '..##....#...#.......#..........', '...........##.##....#..........',\n '.....#............#............', '.......#.............#....#....',\n '.................#......#......', '......##.......#....#..##...#..',\n '.#..#....#.....................', '...#.#.#...#......##...........',\n '##........##.#....#....#.......', '.......#.....#..#..#...#.##....',\n '#..........#....#.#..#..#..#...', '...##..............#...........',\n '.........#.....#.#....#.......#', '.........#....##..#..##..#.....',\n '.....#......................#..', '...###...#..#......#...........',\n '....#.....................#....', '...............................',\n '..#.....###.......#..#....#....', '#..........#.................#.',\n '......#.......###.......#..##..', '.............#.##..............',\n '......#..#.#..#...........#....', '...#....##.#...#..#.#...#....#.',\n '..................#...#....#.##', '......#.#....#.................',\n '......#.#.....#.....#..##......', '#..##...........#..#.....#.##..']\n\n\ndef treeCounter(moveRight, moveDown):\n row = 0\n index = 0\n trees = 0\n finished = False\n while not finished:\n row += moveDown\n if len(data) > row:\n index = (index + moveRight) % len(data[row])\n if data[row][index] == '#':\n trees += 1\n else:\n finished = True\n print(trees)\n\n\ntreeCounter(1, 1)\ntreeCounter(3, 1)\ntreeCounter(5, 1)\ntreeCounter(7, 1)\ntreeCounter(1, 2)\n",
"step-5": "data = [\n \"........#.............#........\",\n \"...#....#...#....#.............\",\n \".#..#...#............#.....#..#\",\n \"..#......#..##............###..\",\n \"..........#......#..#..#.......\",\n \".#..#.......#.........#.#......\",\n \".........#..#....##..#.##....#.\",\n \"..#....##...#..................\",\n \"##..........#.##...#....##..#..\",\n \"...#....#...#..............#...\",\n \"...........................#..#\",\n \"..##.##.#..................#...\",\n \"...#.##..#............#........\",\n \"........#.......#...#.....##.#.\",\n \".##..........#......#.......#..\",\n \"...#..........#...#..#.......#.\",\n \"......#...#...#.##.......#.#...\",\n \"........#...#...#...##.........\",\n \"#..............#.#....#.......#\",\n \"..#..#..#.#....#...............\",\n \".....#........#...#..........#.\",\n \"##......#...#..#.##.......#....\",\n \"..#.#.....#.#.............#.#.#\",\n \"#..#..##......##...#...........\",\n \"..#......#........#.....#......\",\n \".....#.......#....#.#...#......\",\n \"...#........#...........#...#..\",\n \".......#.#...........###....#..\",\n \"...#...........##....##........\",\n \"#....#..####....#.....#..#....#\",\n \"..........#...........#........\",\n \"...#.......#....#.#.........#..\",\n \"....#...#.......#..###.........\",\n \"......#......#..#......#..#....\",\n \"...#.....#............#..#.....\",\n \"...#.#.#.#..#.......#.....#....\",\n \"#....##...#.........#...##.....\",\n \"#..#.......#..#..#..#...##.....\",\n \"#.......#............#.....#...\",\n \".#........##....##...#........#\",\n \".....#...#.....................\",\n \".......#........#..............\",\n \".....#............#.#.#...#.#..\",\n \".....##..#.............#.......\",\n \"..#.##..#........#..#...#......\",\n \".........#.#....#...........#..\",\n \".#.....#..#....#.....#...#.....\",\n \"....#.#................#.......\",\n \"...............##......#...#...\",\n \".##...#...#.......##.#....#....\",\n \"............#........#.......#.\",\n \"......##.#.#...................\",\n \".#.#..............#.......#....\",\n \"#.....#...#.......#..#...#.....\",\n \".............#....#..#......#..\",\n \"........#...##................#\",\n \".......#...#..#..##............\",\n \"..#..#...##...#..#.#.....#...#.\",\n \".#.#...#.........#.#...........\",\n \"...###....#.......#...#........\",\n \"........#......##.#...#..##..#.\",\n \".....................#.#.......\",\n \".............#...........#...#.\",\n \"#..#..#.....#.#...#............\",\n \"...#....#.....#...........#....\",\n \"..##.....##...#......#..##.....\",\n \"#.....#.....###.#.....#....##..\",\n \".#...........###...............\",\n \"..................#..##.#...#..\",\n \"................#....##.#......\",\n \".#.#.#...#....#.........#..#.#.\",\n \"#.......#........##............\",\n \".......##.#....#.#............#\",\n \"..........#..##.#....#.........\",\n \"........##..#....#.............\",\n \".........#....#...........##...\",\n \"#.........#.#..#..#..........#.\",\n \".....#........#......#.........\",\n \"....#.#.#...............#......\",\n \".#..#..##...#.##..........#....\",\n \"..#....................#.#.....\",\n \".........#....#...........#.#.#\",\n \"........#....##.##.............\",\n \"..#.....#.......#..#......#....\",\n \"#..........#.#.....#.#....#....\",\n \"........##.#.....#..#.....#.#..\",\n \"...................#...#....#.#\",\n \"............#..#....#...#...#..\",\n \"..............#.#.........#....\",\n \"...#..#..#.#..##..##...........\",\n \".#...........................#.\",\n \".#.......#...........#....#.#.#\",\n \"......#..#...#........#...##...\",\n \".........#......#.#.......#...#\",\n \"...#..##................#......\",\n \".............#.#..##....#.#....\",\n \"...............#..#......#.....\",\n \".#......#.#.#....#........#....\",\n \"........#..#.##..#..#.........#\",\n \"...#....#.#...#..#.......#..#..\",\n \"..#...##.........#..#...#......\",\n \"...#...........#.............#.\",\n \"....#.....................#....\",\n \".....#..#...............#.#...#\",\n \"....#..........#........#......\",\n \"..#....#........##..##.........\",\n \"...#....#..#.#.......#...#.....\",\n \"..#........#....#...##....#.#..\",\n \".#...#........##.....#....###..\",\n \"#....#....##......#........#...\",\n \".........#..#.#..........#....#\",\n \"....#...#.....#.......##.......\",\n \"..............#..........#.##..\",\n \"#...#..#..............#......#.\",\n \".................#......##....#\",\n \"..#..##..#.......#..#.#......#.\",\n \".............#........#.....#.#\",\n \".#.##............#..#..........\",\n \"..#...#...........#..##........\",\n \".#....#...#....#.......#.......\",\n \"...#.#..#..#..#....#.....#..#..\",\n \"....#..##..............#...#...\",\n \"#..........###......###........\",\n \".##.##......#..#............#..\",\n \".#...........#.#.....#...#.....\",\n \"#.#..#...#............#........\",\n \".........#...#...#..........##.\",\n \".......###..#..........#.......\",\n \"...........###.....#........#..\",\n \".#.............#.....#......#..\",\n \"...#.....#....#.#.........##...\",\n \"....##..##...#.......##........\",\n \"......#....##.........#......#.\",\n \"..........#.....##..#.....#..#.\",\n \"..........####...#..#.........#\",\n \".##....#..#.#...#.......#......\",\n \"...#.#.##.#.#...#....#.#.#.....\",\n \".........#...##........##.....#\",\n \"..#........#..........##...##.#\",\n \"##...##..........#.#...........\",\n \"..............#......#.........\",\n \"........#.....#.#.......#......\",\n \".#...#.....#....#.#..#.........\",\n \".....#....................##...\",\n \"....#..................#.#...##\",\n \".....#............#..##........\",\n \"#..........#....#.#.......##.#.\",\n \"....#..#.....................#.\",\n \"#..#....##.....#...............\",\n \"..#...#..#..##....#.#..........\",\n \".......#......#.#.......#.....#\",\n \"...#.#.......#...#.##..........\",\n \"....#..........#....#.#.#......\",\n \".......#..#..........#..##.....\",\n \"#......#......#...#......#...#.\",\n \"###..#....##......##........#..\",\n \".#..........#.....#.......#.#..\",\n \".......#.....#.....#.#.........\",\n \"..#...#....#...................\",\n \"..............#.##.............\",\n \".#...#.......#.##...#.#.......#\",\n \".......#......................#\",\n \"....#.#...#.#........#.........\",\n \".#......#....#...#.............\",\n \"#.......#...###.....#.#.#..#...\",\n \"#....##.#...............##.....\",\n \"..#.......#..................#.\",\n \".....####...............#......\",\n \".##......#......#.#.......##.#.\",\n \"#......##..###....#....#......#\",\n \".##.......##.##...#.##.........\",\n \"......##............#.......#..\",\n \"......#..#.....##.#............\",\n \".#..........#.....##...........\",\n \"#.........#......#......##.#...\",\n \".........#.......#..#......#.#.\",\n \".........#.......#...........#.\",\n \".#..##.#..................##...\",\n \".............#.............#...\",\n \".....##........#......##...##..\",\n \"..#..#.#.....#..#....#.........\",\n \".....#....#.....#.....#........\",\n \"#......##.....#....#....#......\",\n \"#.................#..#.#......#\",\n \".......#..#......#....#.#...#.#\",\n \"....#.........#..#..........#.#\",\n \"##......#............#...#...#.\",\n \"....##......#...#.....#....##..\",\n \".#...##.........#..............\",\n \"......#.....................#..\",\n \"..#..........###....#..........\",\n \"#....#...#..#.............#....\",\n \"#........#.#......#....#.......\",\n \".#...#.......#..#...#.#...#..#.\",\n \"................##.#.....#.....\",\n \"###.......#...#................\",\n \"...#.......#...#.#.....#.......\",\n \"..#.........#.....#.#.......#..\",\n \"......#.......................#\",\n \"#.....#.#..#....#.......#......\",\n \"...#....#..#....####...........\",\n \".............#.....#...##......\",\n \".......#.........#...#..#......\",\n \".##..#.........#....#.#........\",\n \"....##...#.#...........#....#..\",\n \".........................##....\",\n \"..###.......##....#.#.........#\",\n \".#....#.#.#...........##....#..\",\n \"......#...#..#..#..#..#.......#\",\n \"..#....#.#.......#..#..#..#...#\",\n \".....##...#.##....#.#...#......\",\n \".........#..#....#..#..........\",\n \".##..##.........#.#.....#......\",\n \"..........#...##...#.#...#.....\",\n \"#.##..#..#.............#.......\",\n \"...#...........#.......#......#\",\n \".......#....#....#...##.......#\",\n \"..#.##........###..#......#....\",\n \"...#...........###......#..#..#\",\n \".#.........#.#.........#.#.....\",\n \"##.......##.##.##......##......\",\n \"............#...#..........#...\",\n \"....................#..........\",\n \"...#..#...........#...#...#....\",\n \".................#...#......###\",\n \"...#................#.#.##.....\",\n \"...............#........#......\",\n \"#.............##......#.#..#...\",\n \"..#.#.....#..#.##.....##...#...\",\n \"......#.........#......#.......\",\n \"#.......#......#....#........#.\",\n \".#..##.....#.........#.........\",\n \"....##.##.#...#.........##.#...\",\n \"...............#..#..#..##.....\",\n \".#..#...............###........\",\n \".##............##..............\",\n \"...............#...##...#...#.#\",\n \"..#.#......#.#..#.............#\",\n \"#.#..#..##.........#.#.#...#...\",\n \"....##.#....................##.\",\n \".........#..#.....#.....#..#..#\",\n \"....#......#......#.##....#....\",\n \"........###..#.............#..#\",\n \"##................#.........#..\",\n \"#.....#.......#....#...........\",\n \"..#.......#..#........#....#...\",\n \"..#.#.##..#.#...##........#.##.\",\n \"..#..........#............#....\",\n \"..........#...............##...\",\n \"..........###........#.#.......\",\n \".....###..#.............#......\",\n \"##.............#...#.....#.....\",\n \".....#......#....#........#.#..\",\n \"............#..#..............#\",\n \".................#...........##\",\n \"#........#.........###.....#...\",\n \"..#.#..............##......#.#.\",\n \".#...........#.........#..##..#\",\n \"...............................\",\n \".#.....#..#....#....#......#...\",\n \".#...#......#.#..#....#.......#\",\n \"......#.##.......#......#......\",\n \"......#..###..#................\",\n \"#..#.....#........##...#.......\",\n \"......##.........##....#...##..\",\n \".#..........#.................#\",\n \"#..#.......#...............#...\",\n \".........#..###....#.#.##.#....\",\n \"..#...#.##..##...............##\",\n \".........#.....................\",\n \".#....##...#......#....#.......\",\n \"............#..........#..#....\",\n \"...#......##....#....#........#\",\n \".#...................#.........\",\n \"#.#........###....#..........#.\",\n \".........#....#....#........##.\",\n \".#....#..#.........#..#........\",\n \"...............#..#...#..#...##\",\n \".........#....##....#......#...\",\n \".#.............................\",\n \"...#........#...#.#...#.#..#...\",\n \".....#..##...#.#...............\",\n \"#.....#....#.........#.........\",\n \"#...#...........##.........#...\",\n \"..##........#.#...#...#......#.\",\n \"...........#.....#...#.#.......\",\n \"......###....#.....#...........\",\n \"......##...#..........#....#.#.\",\n \".......##..##..........#.......\",\n \"....#............#..#....##....\",\n \"..##...................#.#.....\",\n \"...#.#..#.#....................\",\n \".#..##..#............##.###..#.\",\n \"#.#...#....#.#..........#.#....\",\n \"........#....#.....#...........\",\n \"..##....#...#.......#..........\",\n \"...........##.##....#..........\",\n \".....#............#............\",\n \".......#.............#....#....\",\n \".................#......#......\",\n \"......##.......#....#..##...#..\",\n \".#..#....#.....................\",\n \"...#.#.#...#......##...........\",\n \"##........##.#....#....#.......\",\n \".......#.....#..#..#...#.##....\",\n \"#..........#....#.#..#..#..#...\",\n \"...##..............#...........\",\n \".........#.....#.#....#.......#\",\n \".........#....##..#..##..#.....\",\n \".....#......................#..\",\n \"...###...#..#......#...........\",\n \"....#.....................#....\",\n \"...............................\",\n \"..#.....###.......#..#....#....\",\n \"#..........#.................#.\",\n \"......#.......###.......#..##..\",\n \".............#.##..............\",\n \"......#..#.#..#...........#....\",\n \"...#....##.#...#..#.#...#....#.\",\n \"..................#...#....#.##\",\n \"......#.#....#.................\",\n \"......#.#.....#.....#..##......\",\n \"#..##...........#..#.....#.##..\",\n]\n\ndef treeCounter(moveRight, moveDown):\n\n row = 0\n index = 0\n trees = 0\n\n finished = False\n\n while not finished:\n\n row += moveDown\n if len(data) > row:\n index = (index + moveRight) % len(data[row])\n if data[row][index] == '#':\n trees += 1\n else:\n finished = True\n\n print(trees)\n\n\ntreeCounter(1,1)\ntreeCounter(3,1)\ntreeCounter(5,1)\ntreeCounter(7,1)\ntreeCounter(1,2)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class Dice2(Pmf):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Dice2(Pmf):
def __init__(self, sides):
Pmf.__init__(self)
for x in range(1, sides + 1):
self.Set(x, 1)
self.Normalize()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Dice2(Pmf):
def __init__(self, sides):
Pmf.__init__(self)
for x in range(1, sides + 1):
self.Set(x, 1)
self.Normalize()
if __name__ == '__main__':
d6 = Dice2(6)
dices = [d6] * 6
three = thinkbayes.SampleSum(dices, 1000)
thinkplot.Pmf(three)
<|reserved_special_token_1|>
import thinkbayes2 as thinkbayes
from thinkbayes2 import Pmf
import thinkplot
class Dice2(Pmf):
def __init__(self, sides):
Pmf.__init__(self)
for x in range(1, sides + 1):
self.Set(x, 1)
self.Normalize()
if __name__ == '__main__':
d6 = Dice2(6)
dices = [d6] * 6
three = thinkbayes.SampleSum(dices, 1000)
thinkplot.Pmf(three)
<|reserved_special_token_1|>
import thinkbayes2 as thinkbayes
from thinkbayes2 import Pmf
import thinkplot
class Dice2(Pmf):
def __init__(self, sides):
Pmf.__init__(self)
for x in range(1, sides + 1):
self.Set(x, 1)
self.Normalize()
if __name__ == "__main__":
d6 = Dice2(6)
dices = [d6] * 6
three = thinkbayes.SampleSum(dices, 1000)
thinkplot.Pmf(three)
|
flexible
|
{
"blob_id": "236dd70dec8d53062d6c38c370cb8f11dc5ef9d0",
"index": 556,
"step-1": "<mask token>\n\n\nclass Dice2(Pmf):\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Dice2(Pmf):\n\n def __init__(self, sides):\n Pmf.__init__(self)\n for x in range(1, sides + 1):\n self.Set(x, 1)\n self.Normalize()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Dice2(Pmf):\n\n def __init__(self, sides):\n Pmf.__init__(self)\n for x in range(1, sides + 1):\n self.Set(x, 1)\n self.Normalize()\n\n\nif __name__ == '__main__':\n d6 = Dice2(6)\n dices = [d6] * 6\n three = thinkbayes.SampleSum(dices, 1000)\n thinkplot.Pmf(three)\n",
"step-4": "import thinkbayes2 as thinkbayes\nfrom thinkbayes2 import Pmf\nimport thinkplot\n\n\nclass Dice2(Pmf):\n\n def __init__(self, sides):\n Pmf.__init__(self)\n for x in range(1, sides + 1):\n self.Set(x, 1)\n self.Normalize()\n\n\nif __name__ == '__main__':\n d6 = Dice2(6)\n dices = [d6] * 6\n three = thinkbayes.SampleSum(dices, 1000)\n thinkplot.Pmf(three)\n",
"step-5": "import thinkbayes2 as thinkbayes\nfrom thinkbayes2 import Pmf\nimport thinkplot\n\n\nclass Dice2(Pmf):\n def __init__(self, sides):\n Pmf.__init__(self)\n for x in range(1, sides + 1):\n self.Set(x, 1)\n self.Normalize()\n\n\nif __name__ == \"__main__\":\n d6 = Dice2(6)\n dices = [d6] * 6\n three = thinkbayes.SampleSum(dices, 1000)\n thinkplot.Pmf(three)\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
def save_ave_replay(aveData, nIter, nStart, bfname):
vd = np.zeros((nIter, 4, nStart))
for i_trial in range(nIter):
vv = aveData[i_trial]
for i_dendrite in range(4):
vvv = vv[i_dendrite]
mv = np.reshape(vvv, (nStart, 1501))
vd[i_trial, i_dendrite, :] = np.mean(mv[:, 550:1000], 1)
mvd = np.mean(vd, 0)
binfile = file(bfname, 'wb')
header = struct.pack('2I', mvd.shape[0], mvd.shape[1])
binfile.write(header)
for i in range(mvd.shape[1]):
ddata = struct.pack('%id' % mvd.shape[0], *mvd[:, i])
binfile.write(ddata)
binfile.close()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def save_ave_replay(aveData, nIter, nStart, bfname):
vd = np.zeros((nIter, 4, nStart))
for i_trial in range(nIter):
vv = aveData[i_trial]
for i_dendrite in range(4):
vvv = vv[i_dendrite]
mv = np.reshape(vvv, (nStart, 1501))
vd[i_trial, i_dendrite, :] = np.mean(mv[:, 550:1000], 1)
mvd = np.mean(vd, 0)
binfile = file(bfname, 'wb')
header = struct.pack('2I', mvd.shape[0], mvd.shape[1])
binfile.write(header)
for i in range(mvd.shape[1]):
ddata = struct.pack('%id' % mvd.shape[0], *mvd[:, i])
binfile.write(ddata)
binfile.close()
def save_ave_place(aveData, nIter, bfname):
vd = np.zeros((nIter, 4, 20))
for i_trial in range(nIter):
vv = aveData[i_trial]
for i_dendrite in range(4):
vvv = vv[i_dendrite]
mv = np.reshape(vvv[0:50000], (20, 2500))
vd[i_trial, i_dendrite, :] = np.mean(mv, 1)
mvd = np.mean(vd, 0)
print(bfname)
binfile = file(bfname, 'wb')
header = struct.pack('2I', mvd.shape[0], mvd.shape[1])
binfile.write(header)
for i in range(mvd.shape[1]):
ddata = struct.pack('%id' % mvd.shape[0], *mvd[:, i])
binfile.write(ddata)
binfile.close()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def save_ave_replay(aveData, nIter, nStart, bfname):
vd = np.zeros((nIter, 4, nStart))
for i_trial in range(nIter):
vv = aveData[i_trial]
for i_dendrite in range(4):
vvv = vv[i_dendrite]
mv = np.reshape(vvv, (nStart, 1501))
vd[i_trial, i_dendrite, :] = np.mean(mv[:, 550:1000], 1)
mvd = np.mean(vd, 0)
binfile = file(bfname, 'wb')
header = struct.pack('2I', mvd.shape[0], mvd.shape[1])
binfile.write(header)
for i in range(mvd.shape[1]):
ddata = struct.pack('%id' % mvd.shape[0], *mvd[:, i])
binfile.write(ddata)
binfile.close()
def save_ave_place(aveData, nIter, bfname):
vd = np.zeros((nIter, 4, 20))
for i_trial in range(nIter):
vv = aveData[i_trial]
for i_dendrite in range(4):
vvv = vv[i_dendrite]
mv = np.reshape(vvv[0:50000], (20, 2500))
vd[i_trial, i_dendrite, :] = np.mean(mv, 1)
mvd = np.mean(vd, 0)
print(bfname)
binfile = file(bfname, 'wb')
header = struct.pack('2I', mvd.shape[0], mvd.shape[1])
binfile.write(header)
for i in range(mvd.shape[1]):
ddata = struct.pack('%id' % mvd.shape[0], *mvd[:, i])
binfile.write(ddata)
binfile.close()
def save_sim(data, out_binary=False, out_vdend=False, out_pickle=False,
outdir='data', dt_save=1):
if not os.path.exists(outdir):
os.makedirs(outdir)
modelData = sc.emptyObject()
lb.props(modelData)
if data.stimType == 'DStim':
filename = 'T' + str(data.TSTOP) + '_dend' + str(data.iclampLoc[2]
) + '_N' + str(len(data.iRange)) + '_I' + str(data.iRange[0]
) + '_dI' + str(data.iRange[1] - data.iRange[0])
elif data.stimType == 'SStim':
filename = 'T' + str(data.TSTOP) + '_soma_N' + str(len(data.iRange)
) + '_I' + str(data.iRange[0]) + '_dI' + str(data.iRange[1] -
data.iRange[0])
else:
filename = 'T' + str(data.TSTOP) + '_Ne' + str(data.Ensyn
) + '_gA' + str(round(data.Agmax, 2)) + '_tauA' + str(data.Atau2)
if data.NMDA:
filename = filename + '_gN' + str(round(data.Ngmax, 2))
if data.GABA:
filename = filename + '_Ni' + str(data.Insyn) + '_gG' + str(round
(data.Igmax, 2))
if data.GABA_B:
filename = filename + '_gB' + str(round(data.Bgmax, 2))
if data.modulateNa:
filename = filename + '_noDendNa'
if data.stimType == 'nIter':
filename = filename + '_tInt' + str(data.tInterval
) + 'ms_' + data.locBias + '_' + data.direction
if (data.stimType == 'place') + (data.stimType == 'poisson') + (data
.stimType == 'replay'):
filename = filename + '_Er' + str(data.Erate) + '_Ir' + str(data
.Irate) + '_' + data.placeType + '_rep' + str(data.nIter)
filename = filename + '_stimseed' + str(data.stimseed)
if data.modulateK == True:
filename = filename + '_K0'
if data.modulateK_local == True:
filename = filename + '_KL0'
if data.modulateK_parents == True:
filename = filename + '_KP0'
if data.modulateRmRa == True:
filename = filename + '_RmRa'
if data.modulateRmRaSeg == True:
filename = filename + '_RmRaSeg'
if data.randomW == True:
filename = filename + '_randW'
if out_pickle:
dataList = [data, modelData]
fname = './' + outdir + '/' + filename + '.pkl'
f = open(fname, 'wb')
pickle.dump(dataList, f)
f.close()
if out_binary:
mat = np.array(data.vdata)
L = mat.shape[1]
dt_ratio = int(round(dt_save / data.dt))
mat = mat[:, 0:L:dt_ratio]
np.save('./' + outdir + '/vdata_' + filename + '.npy', mat)
if out_vdend:
nRep = len(data.vDdata)
mat = np.array(data.vDdata[0])
for i in range(1, nRep):
mat = np.hstack((mat, data.vDdata[i]))
L = mat.shape[1]
dt_ratio = int(round(dt_save / data.dt))
mat = mat[:, 0:L:dt_ratio]
np.save('./' + outdir + '/vDdata_' + filename + '.npy', mat)
if data.GABA:
Ilocs = np.array(data.Ilocs)
Elocs = np.array(data.Elocs)
Locs = np.row_stack((Elocs, Ilocs))
else:
Locs = np.array(data.Elocs)
np.save('./' + outdir + '/Elocs_' + filename + '.npy', Elocs)
np.save('./' + outdir + '/Ilocs_' + filename + '.npy', Ilocs)
if len(data.stim) > 0:
stim = data.stim
np.save('./' + outdir + '/stim_' + filename + '.npy', stim)
<|reserved_special_token_1|>
import pickle
import saveClass as sc
import libcell as lb
import numpy as np
import struct
import os
def save_ave_replay(aveData, nIter, nStart, bfname):
vd = np.zeros((nIter, 4, nStart))
for i_trial in range(nIter):
vv = aveData[i_trial]
for i_dendrite in range(4):
vvv = vv[i_dendrite]
mv = np.reshape(vvv, (nStart, 1501))
vd[i_trial, i_dendrite, :] = np.mean(mv[:, 550:1000], 1)
mvd = np.mean(vd, 0)
binfile = file(bfname, 'wb')
header = struct.pack('2I', mvd.shape[0], mvd.shape[1])
binfile.write(header)
for i in range(mvd.shape[1]):
ddata = struct.pack('%id' % mvd.shape[0], *mvd[:, i])
binfile.write(ddata)
binfile.close()
def save_ave_place(aveData, nIter, bfname):
vd = np.zeros((nIter, 4, 20))
for i_trial in range(nIter):
vv = aveData[i_trial]
for i_dendrite in range(4):
vvv = vv[i_dendrite]
mv = np.reshape(vvv[0:50000], (20, 2500))
vd[i_trial, i_dendrite, :] = np.mean(mv, 1)
mvd = np.mean(vd, 0)
print(bfname)
binfile = file(bfname, 'wb')
header = struct.pack('2I', mvd.shape[0], mvd.shape[1])
binfile.write(header)
for i in range(mvd.shape[1]):
ddata = struct.pack('%id' % mvd.shape[0], *mvd[:, i])
binfile.write(ddata)
binfile.close()
def save_sim(data, out_binary=False, out_vdend=False, out_pickle=False,
outdir='data', dt_save=1):
if not os.path.exists(outdir):
os.makedirs(outdir)
modelData = sc.emptyObject()
lb.props(modelData)
if data.stimType == 'DStim':
filename = 'T' + str(data.TSTOP) + '_dend' + str(data.iclampLoc[2]
) + '_N' + str(len(data.iRange)) + '_I' + str(data.iRange[0]
) + '_dI' + str(data.iRange[1] - data.iRange[0])
elif data.stimType == 'SStim':
filename = 'T' + str(data.TSTOP) + '_soma_N' + str(len(data.iRange)
) + '_I' + str(data.iRange[0]) + '_dI' + str(data.iRange[1] -
data.iRange[0])
else:
filename = 'T' + str(data.TSTOP) + '_Ne' + str(data.Ensyn
) + '_gA' + str(round(data.Agmax, 2)) + '_tauA' + str(data.Atau2)
if data.NMDA:
filename = filename + '_gN' + str(round(data.Ngmax, 2))
if data.GABA:
filename = filename + '_Ni' + str(data.Insyn) + '_gG' + str(round
(data.Igmax, 2))
if data.GABA_B:
filename = filename + '_gB' + str(round(data.Bgmax, 2))
if data.modulateNa:
filename = filename + '_noDendNa'
if data.stimType == 'nIter':
filename = filename + '_tInt' + str(data.tInterval
) + 'ms_' + data.locBias + '_' + data.direction
if (data.stimType == 'place') + (data.stimType == 'poisson') + (data
.stimType == 'replay'):
filename = filename + '_Er' + str(data.Erate) + '_Ir' + str(data
.Irate) + '_' + data.placeType + '_rep' + str(data.nIter)
filename = filename + '_stimseed' + str(data.stimseed)
if data.modulateK == True:
filename = filename + '_K0'
if data.modulateK_local == True:
filename = filename + '_KL0'
if data.modulateK_parents == True:
filename = filename + '_KP0'
if data.modulateRmRa == True:
filename = filename + '_RmRa'
if data.modulateRmRaSeg == True:
filename = filename + '_RmRaSeg'
if data.randomW == True:
filename = filename + '_randW'
if out_pickle:
dataList = [data, modelData]
fname = './' + outdir + '/' + filename + '.pkl'
f = open(fname, 'wb')
pickle.dump(dataList, f)
f.close()
if out_binary:
mat = np.array(data.vdata)
L = mat.shape[1]
dt_ratio = int(round(dt_save / data.dt))
mat = mat[:, 0:L:dt_ratio]
np.save('./' + outdir + '/vdata_' + filename + '.npy', mat)
if out_vdend:
nRep = len(data.vDdata)
mat = np.array(data.vDdata[0])
for i in range(1, nRep):
mat = np.hstack((mat, data.vDdata[i]))
L = mat.shape[1]
dt_ratio = int(round(dt_save / data.dt))
mat = mat[:, 0:L:dt_ratio]
np.save('./' + outdir + '/vDdata_' + filename + '.npy', mat)
if data.GABA:
Ilocs = np.array(data.Ilocs)
Elocs = np.array(data.Elocs)
Locs = np.row_stack((Elocs, Ilocs))
else:
Locs = np.array(data.Elocs)
np.save('./' + outdir + '/Elocs_' + filename + '.npy', Elocs)
np.save('./' + outdir + '/Ilocs_' + filename + '.npy', Ilocs)
if len(data.stim) > 0:
stim = data.stim
np.save('./' + outdir + '/stim_' + filename + '.npy', stim)
<|reserved_special_token_1|>
import pickle
import saveClass as sc
import libcell as lb
import numpy as np
import struct
import os
# def save_Ldend(Ldends, bfname):
# # create a binary file
# bfname='Dend_length.bin'
# binfile = file(bfname, 'wb')
# # and write out two integers with the row and column dimension
# header = struct.pack('2I', Ldends.shape[0], Ldends.shape[1])
# binfile.write(header)
# # then loop over columns and write each
# for i in range(Ldends.shape[1]):
# ddata = struct.pack('%id' % Ldends.shape[0], *Ldends[:,i])
# binfile.write(ddata)
# binfile.close()
def save_ave_replay(aveData, nIter, nStart, bfname):
vd = np.zeros((nIter, 4, nStart))
for i_trial in range(nIter):
vv = aveData[i_trial]
for i_dendrite in range(4):
vvv = vv[i_dendrite]
mv = np.reshape(vvv, (nStart, 1501))
vd[i_trial, i_dendrite, :] = np.mean(mv[:,550:1000], 1)
mvd = np.mean(vd, 0)
# print (bfname)
# create a binary file
binfile = file(bfname, 'wb')
# and write out two integers with the row and column dimension
header = struct.pack('2I', mvd.shape[0], mvd.shape[1])
binfile.write(header)
# then loop over columns and write each
for i in range(mvd.shape[1]):
ddata = struct.pack('%id' % mvd.shape[0], *mvd[:,i])
binfile.write(ddata)
binfile.close()
def save_ave_place(aveData, nIter, bfname):
vd = np.zeros((nIter, 4, 20))
for i_trial in range(nIter):
vv = aveData[i_trial]
for i_dendrite in range(4):
vvv = vv[i_dendrite]
mv = np.reshape(vvv[0:50000], (20, 2500))
vd[i_trial, i_dendrite, :] = np.mean(mv, 1)
mvd = np.mean(vd, 0)
print (bfname)
# create a binary file
binfile = file(bfname, 'wb')
# and write out two integers with the row and column dimension
header = struct.pack('2I', mvd.shape[0], mvd.shape[1])
binfile.write(header)
# then loop over columns and write each
for i in range(mvd.shape[1]):
ddata = struct.pack('%id' % mvd.shape[0], *mvd[:,i])
binfile.write(ddata)
binfile.close()
def save_sim(data, out_binary=False, out_vdend=False, out_pickle=False, outdir='data', dt_save=1):
if not os.path.exists(outdir):
os.makedirs(outdir)
modelData = sc.emptyObject()
lb.props(modelData)
if (data.stimType=='DStim'):
filename = 'T' + str(data.TSTOP) + '_dend' + str(data.iclampLoc[2]) + '_N' + str(len(data.iRange)) + '_I' + str(data.iRange[0]) + '_dI' + str(data.iRange[1]-data.iRange[0])
elif (data.stimType=='SStim'):
filename = 'T' + str(data.TSTOP) + '_soma_N' + str(len(data.iRange)) + '_I' + str(data.iRange[0]) + '_dI' + str(data.iRange[1]-data.iRange[0])
else :
filename = 'T' + str(data.TSTOP) + '_Ne' + str(data.Ensyn)+'_gA'+str(round(data.Agmax,2)) + '_tauA' + str(data.Atau2)
if (data.NMDA):
filename = filename + '_gN'+str(round(data.Ngmax,2))
if (data.GABA):
filename = filename + '_Ni'+str(data.Insyn) + '_gG'+str(round(data.Igmax, 2))
if (data.GABA_B):
filename = filename + '_gB'+str(round(data.Bgmax, 2))
if (data.modulateNa):
filename = filename + '_noDendNa'
if (data.stimType == 'nIter'):
filename = filename + '_tInt' + str(data.tInterval) + 'ms_' + data.locBias + '_' + data.direction
if ((data.stimType == 'place') + (data.stimType == 'poisson') + (data.stimType == 'replay')):
filename = filename + "_Er" + str(data.Erate) + '_Ir'+str(data.Irate) + '_' + data.placeType + '_rep' + str(data.nIter)
filename = filename + '_stimseed' + str(data.stimseed)
if (data.modulateK == True):
filename = filename + '_K0'
if (data.modulateK_local == True):
filename = filename + '_KL0'
if (data.modulateK_parents == True):
filename = filename + '_KP0'
if (data.modulateRmRa == True):
filename = filename + '_RmRa'
if (data.modulateRmRaSeg == True):
filename = filename + '_RmRaSeg'
if (data.randomW == True):
filename = filename + '_randW'
if out_pickle:
dataList = [data, modelData]
fname = './'+outdir+'/'+filename+'.pkl'
f = open(fname, 'wb')
pickle.dump(dataList, f)
f.close()
if out_binary:
#---------------------------------------------
# WRITE the response in a binary file to read it with R
mat = np.array(data.vdata)
L = mat.shape[1]
dt_ratio = int(round(dt_save / data.dt))
mat = mat[:,0:L:dt_ratio]
np.save("./"+outdir+"/vdata_"+filename+".npy", mat)
#bfname = './'+outdir+'/vdata_'+filename+'.bin'
#print (bfname)
# create a binary file
#binfile = file(bfname, 'wb')
# and write out two integers with the row and column dimension
#header = struct.pack('2I', mat.shape[0], mat.shape[1])
#binfile.write(header)
# then loop over columns and write each
#for i in range(mat.shape[1]):
#ddata = struct.pack('%id' % mat.shape[0], *mat[:,i])
#binfile.write(ddata)
#binfile.close()
if out_vdend:
# # WRITE the dendritic response
nRep = len(data.vDdata)
mat = np.array(data.vDdata[0])
for i in range(1, nRep):
mat = np.hstack((mat, data.vDdata[i]))
L = mat.shape[1]
dt_ratio = int(round(dt_save / data.dt))
mat = mat[:,0:L:dt_ratio]
np.save("./"+outdir+"/vDdata_"+filename+".npy", mat)
# bfname = './'+outdir+'/vDdata_'+filename+'.bin'
# # create a binary file
# binfile = file(bfname, 'wb')
# # and write out two integers with the row and column dimension
# header = struct.pack('2I', mat.shape[0], mat.shape[1])
# binfile.write(header)
# # then loop over columns and write each
# for i in range(mat.shape[1]):
# ddata = struct.pack('%id' % mat.shape[0], *mat[:,i])
# binfile.write(ddata)
# binfile.close()
# # ---------------------------------------------
# # WRITE the location of the synapses
if (data.GABA) :
Ilocs = np.array(data.Ilocs)
#Ilocs[:,1] = 1 + Ilocs[:,1] # code that these are inhibitory synapses
Elocs = np.array(data.Elocs)
Locs = np.row_stack((Elocs, Ilocs))
else :
Locs = np.array(data.Elocs)
#bfname = './'+outdir+'/synlocs_'+filename+'.npy'
#print (bfname)
np.save("./"+outdir+"/Elocs_"+filename+".npy", Elocs)
np.save("./"+outdir+"/Ilocs_"+filename+".npy", Ilocs)
# # create a binary file
# binfile = file(bfname, 'wb')
# # and write out two integers with the row and column dimension
# header = struct.pack('2I', Locs.shape[0], Locs.shape[1])
# binfile.write(header)
# # then loop over columns and write each
# for i in range(Locs.shape[1]):
# ddata = struct.pack('%id' % Locs.shape[0], *Locs[:,i])
# binfile.write(ddata)
# binfile.close()
# #---------------------------------------------
# Write the input spike train
if (len(data.stim)>0):
stim = data.stim
#bfname = './'+outdir+'/stim_'+filename+'.bin'
np.save("./"+outdir+"/stim_"+filename+".npy", stim)
# create a binary file
#binfile = file(bfname, 'wb')
# and write out two integers with the row and column dimension
#header = struct.pack('2I', stim.shape[0], stim.shape[1])
#binfile.write(header)
# then loop over columns and write each
#for i in range(stim.shape[1]):
#ddata = struct.pack('%id' % stim.shape[0], *stim[:,i])
#binfile.write(ddata)
#binfile.close()
|
flexible
|
{
"blob_id": "6eb8172e7e26ad6ec9cb0d30c5a0613ce79296e6",
"index": 8421,
"step-1": "<mask token>\n\n\ndef save_ave_replay(aveData, nIter, nStart, bfname):\n vd = np.zeros((nIter, 4, nStart))\n for i_trial in range(nIter):\n vv = aveData[i_trial]\n for i_dendrite in range(4):\n vvv = vv[i_dendrite]\n mv = np.reshape(vvv, (nStart, 1501))\n vd[i_trial, i_dendrite, :] = np.mean(mv[:, 550:1000], 1)\n mvd = np.mean(vd, 0)\n binfile = file(bfname, 'wb')\n header = struct.pack('2I', mvd.shape[0], mvd.shape[1])\n binfile.write(header)\n for i in range(mvd.shape[1]):\n ddata = struct.pack('%id' % mvd.shape[0], *mvd[:, i])\n binfile.write(ddata)\n binfile.close()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef save_ave_replay(aveData, nIter, nStart, bfname):\n vd = np.zeros((nIter, 4, nStart))\n for i_trial in range(nIter):\n vv = aveData[i_trial]\n for i_dendrite in range(4):\n vvv = vv[i_dendrite]\n mv = np.reshape(vvv, (nStart, 1501))\n vd[i_trial, i_dendrite, :] = np.mean(mv[:, 550:1000], 1)\n mvd = np.mean(vd, 0)\n binfile = file(bfname, 'wb')\n header = struct.pack('2I', mvd.shape[0], mvd.shape[1])\n binfile.write(header)\n for i in range(mvd.shape[1]):\n ddata = struct.pack('%id' % mvd.shape[0], *mvd[:, i])\n binfile.write(ddata)\n binfile.close()\n\n\ndef save_ave_place(aveData, nIter, bfname):\n vd = np.zeros((nIter, 4, 20))\n for i_trial in range(nIter):\n vv = aveData[i_trial]\n for i_dendrite in range(4):\n vvv = vv[i_dendrite]\n mv = np.reshape(vvv[0:50000], (20, 2500))\n vd[i_trial, i_dendrite, :] = np.mean(mv, 1)\n mvd = np.mean(vd, 0)\n print(bfname)\n binfile = file(bfname, 'wb')\n header = struct.pack('2I', mvd.shape[0], mvd.shape[1])\n binfile.write(header)\n for i in range(mvd.shape[1]):\n ddata = struct.pack('%id' % mvd.shape[0], *mvd[:, i])\n binfile.write(ddata)\n binfile.close()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef save_ave_replay(aveData, nIter, nStart, bfname):\n vd = np.zeros((nIter, 4, nStart))\n for i_trial in range(nIter):\n vv = aveData[i_trial]\n for i_dendrite in range(4):\n vvv = vv[i_dendrite]\n mv = np.reshape(vvv, (nStart, 1501))\n vd[i_trial, i_dendrite, :] = np.mean(mv[:, 550:1000], 1)\n mvd = np.mean(vd, 0)\n binfile = file(bfname, 'wb')\n header = struct.pack('2I', mvd.shape[0], mvd.shape[1])\n binfile.write(header)\n for i in range(mvd.shape[1]):\n ddata = struct.pack('%id' % mvd.shape[0], *mvd[:, i])\n binfile.write(ddata)\n binfile.close()\n\n\ndef save_ave_place(aveData, nIter, bfname):\n vd = np.zeros((nIter, 4, 20))\n for i_trial in range(nIter):\n vv = aveData[i_trial]\n for i_dendrite in range(4):\n vvv = vv[i_dendrite]\n mv = np.reshape(vvv[0:50000], (20, 2500))\n vd[i_trial, i_dendrite, :] = np.mean(mv, 1)\n mvd = np.mean(vd, 0)\n print(bfname)\n binfile = file(bfname, 'wb')\n header = struct.pack('2I', mvd.shape[0], mvd.shape[1])\n binfile.write(header)\n for i in range(mvd.shape[1]):\n ddata = struct.pack('%id' % mvd.shape[0], *mvd[:, i])\n binfile.write(ddata)\n binfile.close()\n\n\ndef save_sim(data, out_binary=False, out_vdend=False, out_pickle=False,\n outdir='data', dt_save=1):\n if not os.path.exists(outdir):\n os.makedirs(outdir)\n modelData = sc.emptyObject()\n lb.props(modelData)\n if data.stimType == 'DStim':\n filename = 'T' + str(data.TSTOP) + '_dend' + str(data.iclampLoc[2]\n ) + '_N' + str(len(data.iRange)) + '_I' + str(data.iRange[0]\n ) + '_dI' + str(data.iRange[1] - data.iRange[0])\n elif data.stimType == 'SStim':\n filename = 'T' + str(data.TSTOP) + '_soma_N' + str(len(data.iRange)\n ) + '_I' + str(data.iRange[0]) + '_dI' + str(data.iRange[1] -\n data.iRange[0])\n else:\n filename = 'T' + str(data.TSTOP) + '_Ne' + str(data.Ensyn\n ) + '_gA' + str(round(data.Agmax, 2)) + '_tauA' + str(data.Atau2)\n if data.NMDA:\n filename = filename + '_gN' + str(round(data.Ngmax, 2))\n if data.GABA:\n filename = filename + '_Ni' + str(data.Insyn) + '_gG' + str(round\n (data.Igmax, 2))\n if data.GABA_B:\n filename = filename + '_gB' + str(round(data.Bgmax, 2))\n if data.modulateNa:\n filename = filename + '_noDendNa'\n if data.stimType == 'nIter':\n filename = filename + '_tInt' + str(data.tInterval\n ) + 'ms_' + data.locBias + '_' + data.direction\n if (data.stimType == 'place') + (data.stimType == 'poisson') + (data\n .stimType == 'replay'):\n filename = filename + '_Er' + str(data.Erate) + '_Ir' + str(data\n .Irate) + '_' + data.placeType + '_rep' + str(data.nIter)\n filename = filename + '_stimseed' + str(data.stimseed)\n if data.modulateK == True:\n filename = filename + '_K0'\n if data.modulateK_local == True:\n filename = filename + '_KL0'\n if data.modulateK_parents == True:\n filename = filename + '_KP0'\n if data.modulateRmRa == True:\n filename = filename + '_RmRa'\n if data.modulateRmRaSeg == True:\n filename = filename + '_RmRaSeg'\n if data.randomW == True:\n filename = filename + '_randW'\n if out_pickle:\n dataList = [data, modelData]\n fname = './' + outdir + '/' + filename + '.pkl'\n f = open(fname, 'wb')\n pickle.dump(dataList, f)\n f.close()\n if out_binary:\n mat = np.array(data.vdata)\n L = mat.shape[1]\n dt_ratio = int(round(dt_save / data.dt))\n mat = mat[:, 0:L:dt_ratio]\n np.save('./' + outdir + '/vdata_' + filename + '.npy', mat)\n if out_vdend:\n nRep = len(data.vDdata)\n mat = np.array(data.vDdata[0])\n for i in range(1, nRep):\n mat = np.hstack((mat, data.vDdata[i]))\n L = mat.shape[1]\n dt_ratio = int(round(dt_save / data.dt))\n mat = mat[:, 0:L:dt_ratio]\n np.save('./' + outdir + '/vDdata_' + filename + '.npy', mat)\n if data.GABA:\n Ilocs = np.array(data.Ilocs)\n Elocs = np.array(data.Elocs)\n Locs = np.row_stack((Elocs, Ilocs))\n else:\n Locs = np.array(data.Elocs)\n np.save('./' + outdir + '/Elocs_' + filename + '.npy', Elocs)\n np.save('./' + outdir + '/Ilocs_' + filename + '.npy', Ilocs)\n if len(data.stim) > 0:\n stim = data.stim\n np.save('./' + outdir + '/stim_' + filename + '.npy', stim)\n",
"step-4": "import pickle\nimport saveClass as sc\nimport libcell as lb\nimport numpy as np\nimport struct\nimport os\n\n\ndef save_ave_replay(aveData, nIter, nStart, bfname):\n vd = np.zeros((nIter, 4, nStart))\n for i_trial in range(nIter):\n vv = aveData[i_trial]\n for i_dendrite in range(4):\n vvv = vv[i_dendrite]\n mv = np.reshape(vvv, (nStart, 1501))\n vd[i_trial, i_dendrite, :] = np.mean(mv[:, 550:1000], 1)\n mvd = np.mean(vd, 0)\n binfile = file(bfname, 'wb')\n header = struct.pack('2I', mvd.shape[0], mvd.shape[1])\n binfile.write(header)\n for i in range(mvd.shape[1]):\n ddata = struct.pack('%id' % mvd.shape[0], *mvd[:, i])\n binfile.write(ddata)\n binfile.close()\n\n\ndef save_ave_place(aveData, nIter, bfname):\n vd = np.zeros((nIter, 4, 20))\n for i_trial in range(nIter):\n vv = aveData[i_trial]\n for i_dendrite in range(4):\n vvv = vv[i_dendrite]\n mv = np.reshape(vvv[0:50000], (20, 2500))\n vd[i_trial, i_dendrite, :] = np.mean(mv, 1)\n mvd = np.mean(vd, 0)\n print(bfname)\n binfile = file(bfname, 'wb')\n header = struct.pack('2I', mvd.shape[0], mvd.shape[1])\n binfile.write(header)\n for i in range(mvd.shape[1]):\n ddata = struct.pack('%id' % mvd.shape[0], *mvd[:, i])\n binfile.write(ddata)\n binfile.close()\n\n\ndef save_sim(data, out_binary=False, out_vdend=False, out_pickle=False,\n outdir='data', dt_save=1):\n if not os.path.exists(outdir):\n os.makedirs(outdir)\n modelData = sc.emptyObject()\n lb.props(modelData)\n if data.stimType == 'DStim':\n filename = 'T' + str(data.TSTOP) + '_dend' + str(data.iclampLoc[2]\n ) + '_N' + str(len(data.iRange)) + '_I' + str(data.iRange[0]\n ) + '_dI' + str(data.iRange[1] - data.iRange[0])\n elif data.stimType == 'SStim':\n filename = 'T' + str(data.TSTOP) + '_soma_N' + str(len(data.iRange)\n ) + '_I' + str(data.iRange[0]) + '_dI' + str(data.iRange[1] -\n data.iRange[0])\n else:\n filename = 'T' + str(data.TSTOP) + '_Ne' + str(data.Ensyn\n ) + '_gA' + str(round(data.Agmax, 2)) + '_tauA' + str(data.Atau2)\n if data.NMDA:\n filename = filename + '_gN' + str(round(data.Ngmax, 2))\n if data.GABA:\n filename = filename + '_Ni' + str(data.Insyn) + '_gG' + str(round\n (data.Igmax, 2))\n if data.GABA_B:\n filename = filename + '_gB' + str(round(data.Bgmax, 2))\n if data.modulateNa:\n filename = filename + '_noDendNa'\n if data.stimType == 'nIter':\n filename = filename + '_tInt' + str(data.tInterval\n ) + 'ms_' + data.locBias + '_' + data.direction\n if (data.stimType == 'place') + (data.stimType == 'poisson') + (data\n .stimType == 'replay'):\n filename = filename + '_Er' + str(data.Erate) + '_Ir' + str(data\n .Irate) + '_' + data.placeType + '_rep' + str(data.nIter)\n filename = filename + '_stimseed' + str(data.stimseed)\n if data.modulateK == True:\n filename = filename + '_K0'\n if data.modulateK_local == True:\n filename = filename + '_KL0'\n if data.modulateK_parents == True:\n filename = filename + '_KP0'\n if data.modulateRmRa == True:\n filename = filename + '_RmRa'\n if data.modulateRmRaSeg == True:\n filename = filename + '_RmRaSeg'\n if data.randomW == True:\n filename = filename + '_randW'\n if out_pickle:\n dataList = [data, modelData]\n fname = './' + outdir + '/' + filename + '.pkl'\n f = open(fname, 'wb')\n pickle.dump(dataList, f)\n f.close()\n if out_binary:\n mat = np.array(data.vdata)\n L = mat.shape[1]\n dt_ratio = int(round(dt_save / data.dt))\n mat = mat[:, 0:L:dt_ratio]\n np.save('./' + outdir + '/vdata_' + filename + '.npy', mat)\n if out_vdend:\n nRep = len(data.vDdata)\n mat = np.array(data.vDdata[0])\n for i in range(1, nRep):\n mat = np.hstack((mat, data.vDdata[i]))\n L = mat.shape[1]\n dt_ratio = int(round(dt_save / data.dt))\n mat = mat[:, 0:L:dt_ratio]\n np.save('./' + outdir + '/vDdata_' + filename + '.npy', mat)\n if data.GABA:\n Ilocs = np.array(data.Ilocs)\n Elocs = np.array(data.Elocs)\n Locs = np.row_stack((Elocs, Ilocs))\n else:\n Locs = np.array(data.Elocs)\n np.save('./' + outdir + '/Elocs_' + filename + '.npy', Elocs)\n np.save('./' + outdir + '/Ilocs_' + filename + '.npy', Ilocs)\n if len(data.stim) > 0:\n stim = data.stim\n np.save('./' + outdir + '/stim_' + filename + '.npy', stim)\n",
"step-5": "import pickle\nimport saveClass as sc\nimport libcell as lb\nimport numpy as np\nimport struct\nimport os\n\n# def save_Ldend(Ldends, bfname):\n# # create a binary file\n# bfname='Dend_length.bin'\n# binfile = file(bfname, 'wb')\n# # and write out two integers with the row and column dimension\n# header = struct.pack('2I', Ldends.shape[0], Ldends.shape[1])\n# binfile.write(header)\n# # then loop over columns and write each\n# for i in range(Ldends.shape[1]):\n# ddata = struct.pack('%id' % Ldends.shape[0], *Ldends[:,i])\n# binfile.write(ddata)\n# binfile.close()\n\ndef save_ave_replay(aveData, nIter, nStart, bfname):\n vd = np.zeros((nIter, 4, nStart))\n\n for i_trial in range(nIter):\n vv = aveData[i_trial]\n for i_dendrite in range(4):\n vvv = vv[i_dendrite]\n mv = np.reshape(vvv, (nStart, 1501))\n vd[i_trial, i_dendrite, :] = np.mean(mv[:,550:1000], 1)\n\n mvd = np.mean(vd, 0)\n\n # print (bfname)\n\n # create a binary file\n binfile = file(bfname, 'wb')\n # and write out two integers with the row and column dimension\n header = struct.pack('2I', mvd.shape[0], mvd.shape[1])\n binfile.write(header)\n # then loop over columns and write each\n for i in range(mvd.shape[1]):\n ddata = struct.pack('%id' % mvd.shape[0], *mvd[:,i])\n binfile.write(ddata)\n binfile.close()\n\ndef save_ave_place(aveData, nIter, bfname):\n vd = np.zeros((nIter, 4, 20))\n\n for i_trial in range(nIter):\n vv = aveData[i_trial]\n for i_dendrite in range(4):\n vvv = vv[i_dendrite]\n mv = np.reshape(vvv[0:50000], (20, 2500))\n vd[i_trial, i_dendrite, :] = np.mean(mv, 1)\n\n mvd = np.mean(vd, 0)\n\n print (bfname)\n\n # create a binary file\n binfile = file(bfname, 'wb')\n # and write out two integers with the row and column dimension\n header = struct.pack('2I', mvd.shape[0], mvd.shape[1])\n binfile.write(header)\n # then loop over columns and write each\n for i in range(mvd.shape[1]):\n ddata = struct.pack('%id' % mvd.shape[0], *mvd[:,i])\n binfile.write(ddata)\n binfile.close()\n\n\ndef save_sim(data, out_binary=False, out_vdend=False, out_pickle=False, outdir='data', dt_save=1):\n if not os.path.exists(outdir):\n os.makedirs(outdir)\n\n modelData = sc.emptyObject()\n lb.props(modelData)\n\n if (data.stimType=='DStim'):\n filename = 'T' + str(data.TSTOP) + '_dend' + str(data.iclampLoc[2]) + '_N' + str(len(data.iRange)) + '_I' + str(data.iRange[0]) + '_dI' + str(data.iRange[1]-data.iRange[0]) \n elif (data.stimType=='SStim'):\n filename = 'T' + str(data.TSTOP) + '_soma_N' + str(len(data.iRange)) + '_I' + str(data.iRange[0]) + '_dI' + str(data.iRange[1]-data.iRange[0]) \n\n else :\n filename = 'T' + str(data.TSTOP) + '_Ne' + str(data.Ensyn)+'_gA'+str(round(data.Agmax,2)) + '_tauA' + str(data.Atau2)\n if (data.NMDA):\n filename = filename + '_gN'+str(round(data.Ngmax,2))\n if (data.GABA):\n filename = filename + '_Ni'+str(data.Insyn) + '_gG'+str(round(data.Igmax, 2))\n if (data.GABA_B):\n filename = filename + '_gB'+str(round(data.Bgmax, 2))\n\n if (data.modulateNa):\n filename = filename + '_noDendNa'\n\n if (data.stimType == 'nIter'):\n filename = filename + '_tInt' + str(data.tInterval) + 'ms_' + data.locBias + '_' + data.direction\n \n if ((data.stimType == 'place') + (data.stimType == 'poisson') + (data.stimType == 'replay')):\n filename = filename + \"_Er\" + str(data.Erate) + '_Ir'+str(data.Irate) + '_' + data.placeType + '_rep' + str(data.nIter)\n filename = filename + '_stimseed' + str(data.stimseed)\n\n if (data.modulateK == True):\n filename = filename + '_K0'\n if (data.modulateK_local == True):\n filename = filename + '_KL0'\n if (data.modulateK_parents == True):\n filename = filename + '_KP0'\n\n if (data.modulateRmRa == True):\n filename = filename + '_RmRa'\n if (data.modulateRmRaSeg == True):\n filename = filename + '_RmRaSeg'\n if (data.randomW == True):\n filename = filename + '_randW'\n\n if out_pickle:\n dataList = [data, modelData]\n fname = './'+outdir+'/'+filename+'.pkl'\n f = open(fname, 'wb')\n pickle.dump(dataList, f)\n f.close()\n\n\n if out_binary:\n #---------------------------------------------\n # WRITE the response in a binary file to read it with R\n mat = np.array(data.vdata)\n L = mat.shape[1]\n dt_ratio = int(round(dt_save / data.dt))\n mat = mat[:,0:L:dt_ratio]\n\n np.save(\"./\"+outdir+\"/vdata_\"+filename+\".npy\", mat)\n\n #bfname = './'+outdir+'/vdata_'+filename+'.bin'\n #print (bfname)\n # create a binary file\n #binfile = file(bfname, 'wb')\n # and write out two integers with the row and column dimension\n #header = struct.pack('2I', mat.shape[0], mat.shape[1])\n #binfile.write(header)\n # then loop over columns and write each\n #for i in range(mat.shape[1]):\n #ddata = struct.pack('%id' % mat.shape[0], *mat[:,i])\n #binfile.write(ddata)\n #binfile.close()\n\n if out_vdend:\n # # WRITE the dendritic response\n nRep = len(data.vDdata)\n mat = np.array(data.vDdata[0])\n for i in range(1, nRep):\n mat = np.hstack((mat, data.vDdata[i]))\n\n L = mat.shape[1]\n dt_ratio = int(round(dt_save / data.dt))\n mat = mat[:,0:L:dt_ratio]\n \n np.save(\"./\"+outdir+\"/vDdata_\"+filename+\".npy\", mat)\n \n # bfname = './'+outdir+'/vDdata_'+filename+'.bin'\n # # create a binary file\n # binfile = file(bfname, 'wb')\n # # and write out two integers with the row and column dimension\n # header = struct.pack('2I', mat.shape[0], mat.shape[1])\n # binfile.write(header)\n # # then loop over columns and write each\n # for i in range(mat.shape[1]):\n # ddata = struct.pack('%id' % mat.shape[0], *mat[:,i])\n # binfile.write(ddata)\n # binfile.close()\n \n\n # # ---------------------------------------------\n # # WRITE the location of the synapses \n if (data.GABA) :\n Ilocs = np.array(data.Ilocs) \n #Ilocs[:,1] = 1 + Ilocs[:,1] # code that these are inhibitory synapses\n Elocs = np.array(data.Elocs)\n Locs = np.row_stack((Elocs, Ilocs))\n else :\n Locs = np.array(data.Elocs)\n\n #bfname = './'+outdir+'/synlocs_'+filename+'.npy'\n #print (bfname)\n np.save(\"./\"+outdir+\"/Elocs_\"+filename+\".npy\", Elocs)\n np.save(\"./\"+outdir+\"/Ilocs_\"+filename+\".npy\", Ilocs)\n # # create a binary file\n # binfile = file(bfname, 'wb')\n # # and write out two integers with the row and column dimension\n # header = struct.pack('2I', Locs.shape[0], Locs.shape[1])\n # binfile.write(header)\n # # then loop over columns and write each\n # for i in range(Locs.shape[1]):\n # ddata = struct.pack('%id' % Locs.shape[0], *Locs[:,i])\n # binfile.write(ddata)\n # binfile.close()\n\n # #---------------------------------------------\n # Write the input spike train\n if (len(data.stim)>0):\n stim = data.stim\n #bfname = './'+outdir+'/stim_'+filename+'.bin'\n np.save(\"./\"+outdir+\"/stim_\"+filename+\".npy\", stim)\n\n # create a binary file\n #binfile = file(bfname, 'wb')\n # and write out two integers with the row and column dimension\n #header = struct.pack('2I', stim.shape[0], stim.shape[1])\n #binfile.write(header)\n # then loop over columns and write each\n #for i in range(stim.shape[1]):\n #ddata = struct.pack('%id' % stim.shape[0], *stim[:,i])\n #binfile.write(ddata)\n #binfile.close()\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
print(True)
print(False)
<|reserved_special_token_1|>
# 30_Days_Of_Code
# Day 2
# Boolean
print(True)
print(False)
|
flexible
|
{
"blob_id": "f1ca3d7ff7efcf500f1a16e415b13c47fd08688d",
"index": 5044,
"step-1": "<mask token>\n",
"step-2": "print(True)\nprint(False)\n",
"step-3": "# 30_Days_Of_Code\n# Day 2\n# Boolean\nprint(True)\nprint(False)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for member in team:
print('Hello, ' + member)
<|reserved_special_token_1|>
team = input('Wymien wszystkich czlonkow swojego zespolu: ').split(',')
for member in team:
print('Hello, ' + member)
<|reserved_special_token_1|>
team = input("Wymien wszystkich czlonkow swojego zespolu: ").split(",")
for member in team:
print("Hello, " + member)
|
flexible
|
{
"blob_id": "5d3f7d74cf1cc2612d599c65393abed11181c981",
"index": 2300,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor member in team:\n print('Hello, ' + member)\n",
"step-3": "team = input('Wymien wszystkich czlonkow swojego zespolu: ').split(',')\nfor member in team:\n print('Hello, ' + member)\n",
"step-4": "team = input(\"Wymien wszystkich czlonkow swojego zespolu: \").split(\",\")\nfor member in team:\n print(\"Hello, \" + member)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import gc
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from tqdm import tqdm
import cv2
import torch
from torch.utils.data import DataLoader
from torch import optim
from torch.optim import lr_scheduler
from dataset.car_dataset import CarDataset
from nn.network import MyUNet
from utils.utils import coords2str, extract_coords
from utils.evaluate_map import compute_map
from utils.utils import visualize
from efficientnet_pytorch import EfficientNet
camera_matrix = np.array([[2304.5479, 0, 1686.2379],
[0, 2305.8757, 1354.9849],
[0, 0, 1]], dtype=np.float32)
device = torch.device("cuda")
IMG_WIDTH = 1024
IMG_HEIGHT = IMG_WIDTH // 16 * 5
MODEL_SCALE = 8
if __name__ == "__main__":
ROOT_PATH = "/media/andreis/storage/datasets/pku-autonomous-driving/"
df = pd.read_csv(ROOT_PATH + "train.csv")
df_test = pd.read_csv(ROOT_PATH + "sample_submission.csv")
train_images_dir = ROOT_PATH + "train_images/"
test_images_dir = ROOT_PATH + "test_images/"
df_train, df_val = train_test_split(df, test_size=0.01, random_state=72)
df_val_gt = df_val.copy()
# create dataset objects
train_dataset = CarDataset(df_train, train_images_dir, camera_matrix)
val_dataset = CarDataset(df_val, train_images_dir, camera_matrix)
test_dataset = CarDataset(df_test, test_images_dir, camera_matrix)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = MyUNet(10).to(device)
model.load_state_dict(torch.load("model.pth"))
model.eval()
val_loader = DataLoader(dataset=val_dataset, batch_size=1, shuffle=False, num_workers=4)
#img, mask, regr = val_dataset[0]
#output = model(torch.tensor(img[None]).to(device))
#output = output.data.cpu().numpy()
predictions = []
for img, _, _, img0 in tqdm(val_loader):
img_np = np.moveaxis(torch.squeeze(img).numpy(), 0, 2)
img0 = torch.squeeze(img0).numpy()#p.moveaxis(torch.squeeze(img).numpy(), 0, 2)
#print(img_np.shape)
with torch.no_grad():
#output = model(torch.tensor(img[None]).to(device))
output = model(img.to(device))
output = output.data.cpu().numpy()
# looping over batch items
for out in output:
coords = extract_coords(out)
print(coords)
# s = coords2str(coords)
#predictions.append(s)
q_img = visualize(img0, coords, camera_matrix)
print(q_img.shape)
q_img = cv2.resize(q_img, (int(q_img.shape[1]*0.25), int(q_img.shape[0]*0.25) ))
# show predictions on image
cv2.imshow("Prediction", q_img)
cv2.waitKey()
# cv2.imshow("Predictions", visualize(img_np, coords, camera_matrix))
# cv2.waitKey()
#df_val['PredictionString'] = predictions
#df_test.to_csv('predictions.csv', index=False)
#print(df_val.head())
#def sigmoid(x):
# return 1 / (1 + np.exp(-x))
#map = compute_map(df_val_gt, df_val)
#print(map)
#logits = output[0,0].data.cpu().numpy()
#sigmoids = np.apply_along_axis(sigmoid, -1, logits)
#print(output.shape)
#print(logits.shape)
#print(sigmoids.shape)
#print(sigmoids)
#print(np.max(sigmoids))
#points = np.argwhere(logits > 0)
#print(points)
#preds = extract_coords(output)
#img = np.rollaxis(img, 0, 3)
#print(type(img))
#cv2.imshow("imagine", img)
#cv2.imshow("mask", mask)
#cv2.imshow("regr", regr[:,:,-1])
#cv2.imshow("predictions", sigmoids)
#cv2.waitKey(0)
#cv2.destroyAllWindows()
|
normal
|
{
"blob_id": "1861c394fb02643d2e6ac8362f3340f512ef6d72",
"index": 6402,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n ROOT_PATH = '/media/andreis/storage/datasets/pku-autonomous-driving/'\n df = pd.read_csv(ROOT_PATH + 'train.csv')\n df_test = pd.read_csv(ROOT_PATH + 'sample_submission.csv')\n train_images_dir = ROOT_PATH + 'train_images/'\n test_images_dir = ROOT_PATH + 'test_images/'\n df_train, df_val = train_test_split(df, test_size=0.01, random_state=72)\n df_val_gt = df_val.copy()\n train_dataset = CarDataset(df_train, train_images_dir, camera_matrix)\n val_dataset = CarDataset(df_val, train_images_dir, camera_matrix)\n test_dataset = CarDataset(df_test, test_images_dir, camera_matrix)\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n model = MyUNet(10).to(device)\n model.load_state_dict(torch.load('model.pth'))\n model.eval()\n val_loader = DataLoader(dataset=val_dataset, batch_size=1, shuffle=\n False, num_workers=4)\n predictions = []\n for img, _, _, img0 in tqdm(val_loader):\n img_np = np.moveaxis(torch.squeeze(img).numpy(), 0, 2)\n img0 = torch.squeeze(img0).numpy()\n with torch.no_grad():\n output = model(img.to(device))\n output = output.data.cpu().numpy()\n for out in output:\n coords = extract_coords(out)\n print(coords)\n q_img = visualize(img0, coords, camera_matrix)\n print(q_img.shape)\n q_img = cv2.resize(q_img, (int(q_img.shape[1] * 0.25), int(\n q_img.shape[0] * 0.25)))\n cv2.imshow('Prediction', q_img)\n cv2.waitKey()\n",
"step-3": "<mask token>\ncamera_matrix = np.array([[2304.5479, 0, 1686.2379], [0, 2305.8757, \n 1354.9849], [0, 0, 1]], dtype=np.float32)\ndevice = torch.device('cuda')\nIMG_WIDTH = 1024\nIMG_HEIGHT = IMG_WIDTH // 16 * 5\nMODEL_SCALE = 8\nif __name__ == '__main__':\n ROOT_PATH = '/media/andreis/storage/datasets/pku-autonomous-driving/'\n df = pd.read_csv(ROOT_PATH + 'train.csv')\n df_test = pd.read_csv(ROOT_PATH + 'sample_submission.csv')\n train_images_dir = ROOT_PATH + 'train_images/'\n test_images_dir = ROOT_PATH + 'test_images/'\n df_train, df_val = train_test_split(df, test_size=0.01, random_state=72)\n df_val_gt = df_val.copy()\n train_dataset = CarDataset(df_train, train_images_dir, camera_matrix)\n val_dataset = CarDataset(df_val, train_images_dir, camera_matrix)\n test_dataset = CarDataset(df_test, test_images_dir, camera_matrix)\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n model = MyUNet(10).to(device)\n model.load_state_dict(torch.load('model.pth'))\n model.eval()\n val_loader = DataLoader(dataset=val_dataset, batch_size=1, shuffle=\n False, num_workers=4)\n predictions = []\n for img, _, _, img0 in tqdm(val_loader):\n img_np = np.moveaxis(torch.squeeze(img).numpy(), 0, 2)\n img0 = torch.squeeze(img0).numpy()\n with torch.no_grad():\n output = model(img.to(device))\n output = output.data.cpu().numpy()\n for out in output:\n coords = extract_coords(out)\n print(coords)\n q_img = visualize(img0, coords, camera_matrix)\n print(q_img.shape)\n q_img = cv2.resize(q_img, (int(q_img.shape[1] * 0.25), int(\n q_img.shape[0] * 0.25)))\n cv2.imshow('Prediction', q_img)\n cv2.waitKey()\n",
"step-4": "import gc\nimport numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom tqdm import tqdm\nimport cv2\nimport torch\nfrom torch.utils.data import DataLoader\nfrom torch import optim\nfrom torch.optim import lr_scheduler\nfrom dataset.car_dataset import CarDataset\nfrom nn.network import MyUNet\nfrom utils.utils import coords2str, extract_coords\nfrom utils.evaluate_map import compute_map\nfrom utils.utils import visualize\nfrom efficientnet_pytorch import EfficientNet\ncamera_matrix = np.array([[2304.5479, 0, 1686.2379], [0, 2305.8757, \n 1354.9849], [0, 0, 1]], dtype=np.float32)\ndevice = torch.device('cuda')\nIMG_WIDTH = 1024\nIMG_HEIGHT = IMG_WIDTH // 16 * 5\nMODEL_SCALE = 8\nif __name__ == '__main__':\n ROOT_PATH = '/media/andreis/storage/datasets/pku-autonomous-driving/'\n df = pd.read_csv(ROOT_PATH + 'train.csv')\n df_test = pd.read_csv(ROOT_PATH + 'sample_submission.csv')\n train_images_dir = ROOT_PATH + 'train_images/'\n test_images_dir = ROOT_PATH + 'test_images/'\n df_train, df_val = train_test_split(df, test_size=0.01, random_state=72)\n df_val_gt = df_val.copy()\n train_dataset = CarDataset(df_train, train_images_dir, camera_matrix)\n val_dataset = CarDataset(df_val, train_images_dir, camera_matrix)\n test_dataset = CarDataset(df_test, test_images_dir, camera_matrix)\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n model = MyUNet(10).to(device)\n model.load_state_dict(torch.load('model.pth'))\n model.eval()\n val_loader = DataLoader(dataset=val_dataset, batch_size=1, shuffle=\n False, num_workers=4)\n predictions = []\n for img, _, _, img0 in tqdm(val_loader):\n img_np = np.moveaxis(torch.squeeze(img).numpy(), 0, 2)\n img0 = torch.squeeze(img0).numpy()\n with torch.no_grad():\n output = model(img.to(device))\n output = output.data.cpu().numpy()\n for out in output:\n coords = extract_coords(out)\n print(coords)\n q_img = visualize(img0, coords, camera_matrix)\n print(q_img.shape)\n q_img = cv2.resize(q_img, (int(q_img.shape[1] * 0.25), int(\n q_img.shape[0] * 0.25)))\n cv2.imshow('Prediction', q_img)\n cv2.waitKey()\n",
"step-5": "import gc\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom sklearn.model_selection import train_test_split\r\nfrom tqdm import tqdm\r\nimport cv2 \r\n\r\nimport torch\r\nfrom torch.utils.data import DataLoader\r\nfrom torch import optim\r\nfrom torch.optim import lr_scheduler\r\n\r\nfrom dataset.car_dataset import CarDataset\r\nfrom nn.network import MyUNet\r\nfrom utils.utils import coords2str, extract_coords\r\nfrom utils.evaluate_map import compute_map\r\nfrom utils.utils import visualize\r\n\r\nfrom efficientnet_pytorch import EfficientNet\r\n\r\ncamera_matrix = np.array([[2304.5479, 0, 1686.2379],\r\n [0, 2305.8757, 1354.9849],\r\n [0, 0, 1]], dtype=np.float32)\r\n\r\ndevice = torch.device(\"cuda\")\r\n\r\nIMG_WIDTH = 1024\r\nIMG_HEIGHT = IMG_WIDTH // 16 * 5\r\nMODEL_SCALE = 8\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\r\n ROOT_PATH = \"/media/andreis/storage/datasets/pku-autonomous-driving/\"\r\n df = pd.read_csv(ROOT_PATH + \"train.csv\")\r\n df_test = pd.read_csv(ROOT_PATH + \"sample_submission.csv\")\r\n\r\n train_images_dir = ROOT_PATH + \"train_images/\"\r\n test_images_dir = ROOT_PATH + \"test_images/\"\r\n\r\n df_train, df_val = train_test_split(df, test_size=0.01, random_state=72)\r\n df_val_gt = df_val.copy()\r\n\r\n # create dataset objects\r\n train_dataset = CarDataset(df_train, train_images_dir, camera_matrix)\r\n val_dataset = CarDataset(df_val, train_images_dir, camera_matrix)\r\n test_dataset = CarDataset(df_test, test_images_dir, camera_matrix)\r\n\r\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\r\n model = MyUNet(10).to(device)\r\n\r\n model.load_state_dict(torch.load(\"model.pth\"))\r\n model.eval()\r\n\r\n val_loader = DataLoader(dataset=val_dataset, batch_size=1, shuffle=False, num_workers=4)\r\n\r\n #img, mask, regr = val_dataset[0]\r\n\r\n #output = model(torch.tensor(img[None]).to(device))\r\n\r\n #output = output.data.cpu().numpy()\r\n \r\n predictions = []\r\n for img, _, _, img0 in tqdm(val_loader):\r\n img_np = np.moveaxis(torch.squeeze(img).numpy(), 0, 2)\r\n img0 = torch.squeeze(img0).numpy()#p.moveaxis(torch.squeeze(img).numpy(), 0, 2)\r\n #print(img_np.shape)\r\n with torch.no_grad():\r\n #output = model(torch.tensor(img[None]).to(device))\r\n output = model(img.to(device))\r\n output = output.data.cpu().numpy()\r\n # looping over batch items\r\n for out in output:\r\n coords = extract_coords(out)\r\n print(coords)\r\n # s = coords2str(coords)\r\n \r\n #predictions.append(s)\r\n q_img = visualize(img0, coords, camera_matrix)\r\n print(q_img.shape)\r\n q_img = cv2.resize(q_img, (int(q_img.shape[1]*0.25), int(q_img.shape[0]*0.25) ))\r\n # show predictions on image\r\n cv2.imshow(\"Prediction\", q_img)\r\n cv2.waitKey()\r\n # cv2.imshow(\"Predictions\", visualize(img_np, coords, camera_matrix))\r\n # cv2.waitKey()\r\n\r\n\r\n #df_val['PredictionString'] = predictions\r\n #df_test.to_csv('predictions.csv', index=False)\r\n #print(df_val.head())\r\n\r\n #def sigmoid(x):\r\n # return 1 / (1 + np.exp(-x))\r\n\r\n #map = compute_map(df_val_gt, df_val)\r\n #print(map)\r\n\r\n #logits = output[0,0].data.cpu().numpy()\r\n #sigmoids = np.apply_along_axis(sigmoid, -1, logits)\r\n #print(output.shape)\r\n #print(logits.shape)\r\n #print(sigmoids.shape)\r\n #print(sigmoids)\r\n #print(np.max(sigmoids))\r\n\r\n #points = np.argwhere(logits > 0)\r\n #print(points)\r\n #preds = extract_coords(output)\r\n\r\n\r\n #img = np.rollaxis(img, 0, 3)\r\n #print(type(img))\r\n\r\n #cv2.imshow(\"imagine\", img)\r\n #cv2.imshow(\"mask\", mask)\r\n #cv2.imshow(\"regr\", regr[:,:,-1])\r\n #cv2.imshow(\"predictions\", sigmoids)\r\n #cv2.waitKey(0)\r\n #cv2.destroyAllWindows()\r\n\r\n\r\n\r\n\r\n \r\n\r\n\r\n\r\n ",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/env python
from anytree import Node, RenderTree
webtest = Node("WebappTest")
registration = Node("Registration", parent=webtest)
smsconfirm = Node("SMSconfirm", parent=registration)
login = Node("Login", parent=smsconfirm)
useruploadCV = Node("UserUploadCV", parent=login)
usermatchJD = Node("UserMatchJD", parent=useruploadCV)
bemember = Node("BeMember", parent=login)
addprj = Node("AddProject", parent=bemember)
memuploadCV = Node("MemberUploadCV", parent=addprj)
memupfollowupCV = Node("MemberFollowupCV", parent=memuploadCV)
previewCV = Node("PreviewCV", parent=memuploadCV)
addbid = Node("AddBidding", parent=addprj)
modbid = Node("ModifyBidding", parent=addbid)
addcus = Node("AddCustomer", parent=addbid)
addJD = Node("AddJD", parent=addcus)
JDmatchCV = Node("JDmatchCV", parent=addJD)
JDmatchCVMultiDB = Node("JDmatchCVMultiDB", parent=JDmatchCV)
previewMatchCV = Node("previewMatchCV", parent=JDmatchCVMultiDB)
CVraderChart = Node("CVraderChart", parent=JDmatchCVMultiDB)
from anytree.exporter import DotExporter
DotExporter(webtest).to_picture("webtest.png")
|
normal
|
{
"blob_id": "33ac328b2bf16380b50c58013bd0d4d888dc3952",
"index": 4693,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nDotExporter(webtest).to_picture('webtest.png')\n",
"step-3": "<mask token>\nwebtest = Node('WebappTest')\nregistration = Node('Registration', parent=webtest)\nsmsconfirm = Node('SMSconfirm', parent=registration)\nlogin = Node('Login', parent=smsconfirm)\nuseruploadCV = Node('UserUploadCV', parent=login)\nusermatchJD = Node('UserMatchJD', parent=useruploadCV)\nbemember = Node('BeMember', parent=login)\naddprj = Node('AddProject', parent=bemember)\nmemuploadCV = Node('MemberUploadCV', parent=addprj)\nmemupfollowupCV = Node('MemberFollowupCV', parent=memuploadCV)\npreviewCV = Node('PreviewCV', parent=memuploadCV)\naddbid = Node('AddBidding', parent=addprj)\nmodbid = Node('ModifyBidding', parent=addbid)\naddcus = Node('AddCustomer', parent=addbid)\naddJD = Node('AddJD', parent=addcus)\nJDmatchCV = Node('JDmatchCV', parent=addJD)\nJDmatchCVMultiDB = Node('JDmatchCVMultiDB', parent=JDmatchCV)\npreviewMatchCV = Node('previewMatchCV', parent=JDmatchCVMultiDB)\nCVraderChart = Node('CVraderChart', parent=JDmatchCVMultiDB)\n<mask token>\nDotExporter(webtest).to_picture('webtest.png')\n",
"step-4": "from anytree import Node, RenderTree\nwebtest = Node('WebappTest')\nregistration = Node('Registration', parent=webtest)\nsmsconfirm = Node('SMSconfirm', parent=registration)\nlogin = Node('Login', parent=smsconfirm)\nuseruploadCV = Node('UserUploadCV', parent=login)\nusermatchJD = Node('UserMatchJD', parent=useruploadCV)\nbemember = Node('BeMember', parent=login)\naddprj = Node('AddProject', parent=bemember)\nmemuploadCV = Node('MemberUploadCV', parent=addprj)\nmemupfollowupCV = Node('MemberFollowupCV', parent=memuploadCV)\npreviewCV = Node('PreviewCV', parent=memuploadCV)\naddbid = Node('AddBidding', parent=addprj)\nmodbid = Node('ModifyBidding', parent=addbid)\naddcus = Node('AddCustomer', parent=addbid)\naddJD = Node('AddJD', parent=addcus)\nJDmatchCV = Node('JDmatchCV', parent=addJD)\nJDmatchCVMultiDB = Node('JDmatchCVMultiDB', parent=JDmatchCV)\npreviewMatchCV = Node('previewMatchCV', parent=JDmatchCVMultiDB)\nCVraderChart = Node('CVraderChart', parent=JDmatchCVMultiDB)\nfrom anytree.exporter import DotExporter\nDotExporter(webtest).to_picture('webtest.png')\n",
"step-5": "#!/usr/bin/env python\n\nfrom anytree import Node, RenderTree\n\n\nwebtest = Node(\"WebappTest\")\nregistration = Node(\"Registration\", parent=webtest)\nsmsconfirm = Node(\"SMSconfirm\", parent=registration)\nlogin = Node(\"Login\", parent=smsconfirm)\nuseruploadCV = Node(\"UserUploadCV\", parent=login)\nusermatchJD = Node(\"UserMatchJD\", parent=useruploadCV)\nbemember = Node(\"BeMember\", parent=login)\naddprj = Node(\"AddProject\", parent=bemember)\nmemuploadCV = Node(\"MemberUploadCV\", parent=addprj)\nmemupfollowupCV = Node(\"MemberFollowupCV\", parent=memuploadCV)\npreviewCV = Node(\"PreviewCV\", parent=memuploadCV)\naddbid = Node(\"AddBidding\", parent=addprj)\nmodbid = Node(\"ModifyBidding\", parent=addbid)\naddcus = Node(\"AddCustomer\", parent=addbid)\naddJD = Node(\"AddJD\", parent=addcus)\nJDmatchCV = Node(\"JDmatchCV\", parent=addJD)\nJDmatchCVMultiDB = Node(\"JDmatchCVMultiDB\", parent=JDmatchCV)\npreviewMatchCV = Node(\"previewMatchCV\", parent=JDmatchCVMultiDB)\nCVraderChart = Node(\"CVraderChart\", parent=JDmatchCVMultiDB)\n\n\nfrom anytree.exporter import DotExporter\nDotExporter(webtest).to_picture(\"webtest.png\")\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
@given('the browser is open, navigate to the SCALE URL, and login')
def the_browser_is_open_navigate_to_the_scale_url_and_login(driver, nas_ip,
root_password):
"""the browser is open, navigate to the SCALE URL, and login."""
if nas_ip not in driver.current_url:
driver.get(f'http://{nas_ip}')
assert wait_on_element(driver, 10, xpaths.login.user_Input)
if not is_element_present(driver, xpaths.side_Menu.dashboard):
assert wait_on_element(driver, 10, xpaths.login.user_Input)
driver.find_element_by_xpath(xpaths.login.user_Input).clear()
driver.find_element_by_xpath(xpaths.login.user_Input).send_keys('root')
driver.find_element_by_xpath(xpaths.login.password_Input).clear()
driver.find_element_by_xpath(xpaths.login.password_Input).send_keys(
root_password)
assert wait_on_element(driver, 5, xpaths.login.signin_Button)
driver.find_element_by_xpath(xpaths.login.signin_Button).click()
else:
assert wait_on_element(driver, 10, xpaths.side_Menu.dashboard,
'clickable')
driver.find_element_by_xpath(xpaths.side_Menu.dashboard).click()
@when('on the dashboard click on Credentials and Local Groups')
def on_the_dashboard_click_on_credentials_and_local_groups(driver):
"""on the dashboard click on Credentials and Local Groups."""
assert wait_on_element(driver, 10, xpaths.dashboard.title)
assert wait_on_element(driver, 10, xpaths.dashboard.system_Info_Card_Title)
assert wait_on_element(driver, 10, xpaths.side_Menu.credentials,
'clickable')
driver.find_element_by_xpath(xpaths.side_Menu.credentials).click()
assert wait_on_element(driver, 10, xpaths.side_Menu.local_Group,
'clickable')
driver.find_element_by_xpath(xpaths.side_Menu.local_Group).click()
<|reserved_special_token_0|>
@then('on the Add Group side box input the group name')
def on_the_add_group_side_box_input_the_group_name(driver):
"""on the Add Group side box input the group name."""
assert wait_on_element(driver, 7, xpaths.add_Group.title)
assert wait_on_element(driver, 7, xpaths.add_Group.name_Input, 'inputable')
driver.find_element_by_xpath(xpaths.add_Group.name_Input).clear()
driver.find_element_by_xpath(xpaths.add_Group.name_Input).send_keys(
'qetest')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@given('the browser is open, navigate to the SCALE URL, and login')
def the_browser_is_open_navigate_to_the_scale_url_and_login(driver, nas_ip,
root_password):
"""the browser is open, navigate to the SCALE URL, and login."""
if nas_ip not in driver.current_url:
driver.get(f'http://{nas_ip}')
assert wait_on_element(driver, 10, xpaths.login.user_Input)
if not is_element_present(driver, xpaths.side_Menu.dashboard):
assert wait_on_element(driver, 10, xpaths.login.user_Input)
driver.find_element_by_xpath(xpaths.login.user_Input).clear()
driver.find_element_by_xpath(xpaths.login.user_Input).send_keys('root')
driver.find_element_by_xpath(xpaths.login.password_Input).clear()
driver.find_element_by_xpath(xpaths.login.password_Input).send_keys(
root_password)
assert wait_on_element(driver, 5, xpaths.login.signin_Button)
driver.find_element_by_xpath(xpaths.login.signin_Button).click()
else:
assert wait_on_element(driver, 10, xpaths.side_Menu.dashboard,
'clickable')
driver.find_element_by_xpath(xpaths.side_Menu.dashboard).click()
@when('on the dashboard click on Credentials and Local Groups')
def on_the_dashboard_click_on_credentials_and_local_groups(driver):
"""on the dashboard click on Credentials and Local Groups."""
assert wait_on_element(driver, 10, xpaths.dashboard.title)
assert wait_on_element(driver, 10, xpaths.dashboard.system_Info_Card_Title)
assert wait_on_element(driver, 10, xpaths.side_Menu.credentials,
'clickable')
driver.find_element_by_xpath(xpaths.side_Menu.credentials).click()
assert wait_on_element(driver, 10, xpaths.side_Menu.local_Group,
'clickable')
driver.find_element_by_xpath(xpaths.side_Menu.local_Group).click()
@then('on the Groups page, click Add')
def on_the_groups_page_click_add(driver):
"""on the Groups page, click Add."""
assert wait_on_element(driver, 10, xpaths.groups.title)
assert wait_on_element(driver, 10, xpaths.button.add, 'clickable')
driver.find_element_by_xpath(xpaths.button.add).click()
@then('on the Add Group side box input the group name')
def on_the_add_group_side_box_input_the_group_name(driver):
"""on the Add Group side box input the group name."""
assert wait_on_element(driver, 7, xpaths.add_Group.title)
assert wait_on_element(driver, 7, xpaths.add_Group.name_Input, 'inputable')
driver.find_element_by_xpath(xpaths.add_Group.name_Input).clear()
driver.find_element_by_xpath(xpaths.add_Group.name_Input).send_keys(
'qetest')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@given('the browser is open, navigate to the SCALE URL, and login')
def the_browser_is_open_navigate_to_the_scale_url_and_login(driver, nas_ip,
root_password):
"""the browser is open, navigate to the SCALE URL, and login."""
if nas_ip not in driver.current_url:
driver.get(f'http://{nas_ip}')
assert wait_on_element(driver, 10, xpaths.login.user_Input)
if not is_element_present(driver, xpaths.side_Menu.dashboard):
assert wait_on_element(driver, 10, xpaths.login.user_Input)
driver.find_element_by_xpath(xpaths.login.user_Input).clear()
driver.find_element_by_xpath(xpaths.login.user_Input).send_keys('root')
driver.find_element_by_xpath(xpaths.login.password_Input).clear()
driver.find_element_by_xpath(xpaths.login.password_Input).send_keys(
root_password)
assert wait_on_element(driver, 5, xpaths.login.signin_Button)
driver.find_element_by_xpath(xpaths.login.signin_Button).click()
else:
assert wait_on_element(driver, 10, xpaths.side_Menu.dashboard,
'clickable')
driver.find_element_by_xpath(xpaths.side_Menu.dashboard).click()
@when('on the dashboard click on Credentials and Local Groups')
def on_the_dashboard_click_on_credentials_and_local_groups(driver):
"""on the dashboard click on Credentials and Local Groups."""
assert wait_on_element(driver, 10, xpaths.dashboard.title)
assert wait_on_element(driver, 10, xpaths.dashboard.system_Info_Card_Title)
assert wait_on_element(driver, 10, xpaths.side_Menu.credentials,
'clickable')
driver.find_element_by_xpath(xpaths.side_Menu.credentials).click()
assert wait_on_element(driver, 10, xpaths.side_Menu.local_Group,
'clickable')
driver.find_element_by_xpath(xpaths.side_Menu.local_Group).click()
@then('on the Groups page, click Add')
def on_the_groups_page_click_add(driver):
"""on the Groups page, click Add."""
assert wait_on_element(driver, 10, xpaths.groups.title)
assert wait_on_element(driver, 10, xpaths.button.add, 'clickable')
driver.find_element_by_xpath(xpaths.button.add).click()
@then('on the Add Group side box input the group name')
def on_the_add_group_side_box_input_the_group_name(driver):
"""on the Add Group side box input the group name."""
assert wait_on_element(driver, 7, xpaths.add_Group.title)
assert wait_on_element(driver, 7, xpaths.add_Group.name_Input, 'inputable')
driver.find_element_by_xpath(xpaths.add_Group.name_Input).clear()
driver.find_element_by_xpath(xpaths.add_Group.name_Input).send_keys(
'qetest')
@then('click save and verify the group was added')
def click_save_and_verify_the_group_was_added(driver):
"""click save and verify the group was added."""
assert wait_on_element(driver, 7, xpaths.button.save, 'clickable')
driver.find_element_by_xpath(xpaths.button.save).click()
assert wait_on_element_disappear(driver, 20, xpaths.progress.progressbar)
assert wait_on_element(driver, 10, xpaths.groups.title)
assert wait_on_element(driver, 10, xpaths.groups.qetest_Name)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import pytest
import xpaths
from function import wait_on_element, is_element_present, wait_on_element_disappear
from pytest_bdd import given, scenario, then, when
@pytest.mark.dependency(name='Set_Group')
@scenario('features/NAS-T1250.feature',
'Verify that you can create a new group')
def test_verify_that_you_can_create_a_new_group():
"""Verify that you can create a new group."""
@given('the browser is open, navigate to the SCALE URL, and login')
def the_browser_is_open_navigate_to_the_scale_url_and_login(driver, nas_ip,
root_password):
"""the browser is open, navigate to the SCALE URL, and login."""
if nas_ip not in driver.current_url:
driver.get(f'http://{nas_ip}')
assert wait_on_element(driver, 10, xpaths.login.user_Input)
if not is_element_present(driver, xpaths.side_Menu.dashboard):
assert wait_on_element(driver, 10, xpaths.login.user_Input)
driver.find_element_by_xpath(xpaths.login.user_Input).clear()
driver.find_element_by_xpath(xpaths.login.user_Input).send_keys('root')
driver.find_element_by_xpath(xpaths.login.password_Input).clear()
driver.find_element_by_xpath(xpaths.login.password_Input).send_keys(
root_password)
assert wait_on_element(driver, 5, xpaths.login.signin_Button)
driver.find_element_by_xpath(xpaths.login.signin_Button).click()
else:
assert wait_on_element(driver, 10, xpaths.side_Menu.dashboard,
'clickable')
driver.find_element_by_xpath(xpaths.side_Menu.dashboard).click()
@when('on the dashboard click on Credentials and Local Groups')
def on_the_dashboard_click_on_credentials_and_local_groups(driver):
"""on the dashboard click on Credentials and Local Groups."""
assert wait_on_element(driver, 10, xpaths.dashboard.title)
assert wait_on_element(driver, 10, xpaths.dashboard.system_Info_Card_Title)
assert wait_on_element(driver, 10, xpaths.side_Menu.credentials,
'clickable')
driver.find_element_by_xpath(xpaths.side_Menu.credentials).click()
assert wait_on_element(driver, 10, xpaths.side_Menu.local_Group,
'clickable')
driver.find_element_by_xpath(xpaths.side_Menu.local_Group).click()
@then('on the Groups page, click Add')
def on_the_groups_page_click_add(driver):
"""on the Groups page, click Add."""
assert wait_on_element(driver, 10, xpaths.groups.title)
assert wait_on_element(driver, 10, xpaths.button.add, 'clickable')
driver.find_element_by_xpath(xpaths.button.add).click()
@then('on the Add Group side box input the group name')
def on_the_add_group_side_box_input_the_group_name(driver):
"""on the Add Group side box input the group name."""
assert wait_on_element(driver, 7, xpaths.add_Group.title)
assert wait_on_element(driver, 7, xpaths.add_Group.name_Input, 'inputable')
driver.find_element_by_xpath(xpaths.add_Group.name_Input).clear()
driver.find_element_by_xpath(xpaths.add_Group.name_Input).send_keys(
'qetest')
@then('click save and verify the group was added')
def click_save_and_verify_the_group_was_added(driver):
"""click save and verify the group was added."""
assert wait_on_element(driver, 7, xpaths.button.save, 'clickable')
driver.find_element_by_xpath(xpaths.button.save).click()
assert wait_on_element_disappear(driver, 20, xpaths.progress.progressbar)
assert wait_on_element(driver, 10, xpaths.groups.title)
assert wait_on_element(driver, 10, xpaths.groups.qetest_Name)
<|reserved_special_token_1|>
# coding=utf-8
"""SCALE UI: feature tests."""
import pytest
import xpaths
from function import (
wait_on_element,
is_element_present,
wait_on_element_disappear
)
from pytest_bdd import (
given,
scenario,
then,
when,
)
@pytest.mark.dependency(name='Set_Group')
@scenario('features/NAS-T1250.feature', 'Verify that you can create a new group')
def test_verify_that_you_can_create_a_new_group():
"""Verify that you can create a new group."""
@given('the browser is open, navigate to the SCALE URL, and login')
def the_browser_is_open_navigate_to_the_scale_url_and_login(driver, nas_ip, root_password):
"""the browser is open, navigate to the SCALE URL, and login."""
if nas_ip not in driver.current_url:
driver.get(f"http://{nas_ip}")
assert wait_on_element(driver, 10, xpaths.login.user_Input)
if not is_element_present(driver, xpaths.side_Menu.dashboard):
assert wait_on_element(driver, 10, xpaths.login.user_Input)
driver.find_element_by_xpath(xpaths.login.user_Input).clear()
driver.find_element_by_xpath(xpaths.login.user_Input).send_keys('root')
driver.find_element_by_xpath(xpaths.login.password_Input).clear()
driver.find_element_by_xpath(xpaths.login.password_Input).send_keys(root_password)
assert wait_on_element(driver, 5, xpaths.login.signin_Button)
driver.find_element_by_xpath(xpaths.login.signin_Button).click()
else:
assert wait_on_element(driver, 10, xpaths.side_Menu.dashboard, 'clickable')
driver.find_element_by_xpath(xpaths.side_Menu.dashboard).click()
@when('on the dashboard click on Credentials and Local Groups')
def on_the_dashboard_click_on_credentials_and_local_groups(driver):
"""on the dashboard click on Credentials and Local Groups."""
assert wait_on_element(driver, 10, xpaths.dashboard.title)
assert wait_on_element(driver, 10, xpaths.dashboard.system_Info_Card_Title)
assert wait_on_element(driver, 10, xpaths.side_Menu.credentials, 'clickable')
driver.find_element_by_xpath(xpaths.side_Menu.credentials).click()
assert wait_on_element(driver, 10, xpaths.side_Menu.local_Group, 'clickable')
driver.find_element_by_xpath(xpaths.side_Menu.local_Group).click()
@then('on the Groups page, click Add')
def on_the_groups_page_click_add(driver):
"""on the Groups page, click Add."""
assert wait_on_element(driver, 10, xpaths.groups.title)
assert wait_on_element(driver, 10, xpaths.button.add, 'clickable')
driver.find_element_by_xpath(xpaths.button.add).click()
@then('on the Add Group side box input the group name')
def on_the_add_group_side_box_input_the_group_name(driver):
"""on the Add Group side box input the group name."""
assert wait_on_element(driver, 7, xpaths.add_Group.title)
assert wait_on_element(driver, 7, xpaths.add_Group.name_Input, 'inputable')
driver.find_element_by_xpath(xpaths.add_Group.name_Input).clear()
driver.find_element_by_xpath(xpaths.add_Group.name_Input).send_keys('qetest')
@then('click save and verify the group was added')
def click_save_and_verify_the_group_was_added(driver):
"""click save and verify the group was added."""
assert wait_on_element(driver, 7, xpaths.button.save, 'clickable')
driver.find_element_by_xpath(xpaths.button.save).click()
assert wait_on_element_disappear(driver, 20, xpaths.progress.progressbar)
assert wait_on_element(driver, 10, xpaths.groups.title)
assert wait_on_element(driver, 10, xpaths.groups.qetest_Name)
|
flexible
|
{
"blob_id": "f4aaf0449bff68814090552ea4f6ccac85dacf1b",
"index": 5617,
"step-1": "<mask token>\n\n\n@given('the browser is open, navigate to the SCALE URL, and login')\ndef the_browser_is_open_navigate_to_the_scale_url_and_login(driver, nas_ip,\n root_password):\n \"\"\"the browser is open, navigate to the SCALE URL, and login.\"\"\"\n if nas_ip not in driver.current_url:\n driver.get(f'http://{nas_ip}')\n assert wait_on_element(driver, 10, xpaths.login.user_Input)\n if not is_element_present(driver, xpaths.side_Menu.dashboard):\n assert wait_on_element(driver, 10, xpaths.login.user_Input)\n driver.find_element_by_xpath(xpaths.login.user_Input).clear()\n driver.find_element_by_xpath(xpaths.login.user_Input).send_keys('root')\n driver.find_element_by_xpath(xpaths.login.password_Input).clear()\n driver.find_element_by_xpath(xpaths.login.password_Input).send_keys(\n root_password)\n assert wait_on_element(driver, 5, xpaths.login.signin_Button)\n driver.find_element_by_xpath(xpaths.login.signin_Button).click()\n else:\n assert wait_on_element(driver, 10, xpaths.side_Menu.dashboard,\n 'clickable')\n driver.find_element_by_xpath(xpaths.side_Menu.dashboard).click()\n\n\n@when('on the dashboard click on Credentials and Local Groups')\ndef on_the_dashboard_click_on_credentials_and_local_groups(driver):\n \"\"\"on the dashboard click on Credentials and Local Groups.\"\"\"\n assert wait_on_element(driver, 10, xpaths.dashboard.title)\n assert wait_on_element(driver, 10, xpaths.dashboard.system_Info_Card_Title)\n assert wait_on_element(driver, 10, xpaths.side_Menu.credentials,\n 'clickable')\n driver.find_element_by_xpath(xpaths.side_Menu.credentials).click()\n assert wait_on_element(driver, 10, xpaths.side_Menu.local_Group,\n 'clickable')\n driver.find_element_by_xpath(xpaths.side_Menu.local_Group).click()\n\n\n<mask token>\n\n\n@then('on the Add Group side box input the group name')\ndef on_the_add_group_side_box_input_the_group_name(driver):\n \"\"\"on the Add Group side box input the group name.\"\"\"\n assert wait_on_element(driver, 7, xpaths.add_Group.title)\n assert wait_on_element(driver, 7, xpaths.add_Group.name_Input, 'inputable')\n driver.find_element_by_xpath(xpaths.add_Group.name_Input).clear()\n driver.find_element_by_xpath(xpaths.add_Group.name_Input).send_keys(\n 'qetest')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\n@given('the browser is open, navigate to the SCALE URL, and login')\ndef the_browser_is_open_navigate_to_the_scale_url_and_login(driver, nas_ip,\n root_password):\n \"\"\"the browser is open, navigate to the SCALE URL, and login.\"\"\"\n if nas_ip not in driver.current_url:\n driver.get(f'http://{nas_ip}')\n assert wait_on_element(driver, 10, xpaths.login.user_Input)\n if not is_element_present(driver, xpaths.side_Menu.dashboard):\n assert wait_on_element(driver, 10, xpaths.login.user_Input)\n driver.find_element_by_xpath(xpaths.login.user_Input).clear()\n driver.find_element_by_xpath(xpaths.login.user_Input).send_keys('root')\n driver.find_element_by_xpath(xpaths.login.password_Input).clear()\n driver.find_element_by_xpath(xpaths.login.password_Input).send_keys(\n root_password)\n assert wait_on_element(driver, 5, xpaths.login.signin_Button)\n driver.find_element_by_xpath(xpaths.login.signin_Button).click()\n else:\n assert wait_on_element(driver, 10, xpaths.side_Menu.dashboard,\n 'clickable')\n driver.find_element_by_xpath(xpaths.side_Menu.dashboard).click()\n\n\n@when('on the dashboard click on Credentials and Local Groups')\ndef on_the_dashboard_click_on_credentials_and_local_groups(driver):\n \"\"\"on the dashboard click on Credentials and Local Groups.\"\"\"\n assert wait_on_element(driver, 10, xpaths.dashboard.title)\n assert wait_on_element(driver, 10, xpaths.dashboard.system_Info_Card_Title)\n assert wait_on_element(driver, 10, xpaths.side_Menu.credentials,\n 'clickable')\n driver.find_element_by_xpath(xpaths.side_Menu.credentials).click()\n assert wait_on_element(driver, 10, xpaths.side_Menu.local_Group,\n 'clickable')\n driver.find_element_by_xpath(xpaths.side_Menu.local_Group).click()\n\n\n@then('on the Groups page, click Add')\ndef on_the_groups_page_click_add(driver):\n \"\"\"on the Groups page, click Add.\"\"\"\n assert wait_on_element(driver, 10, xpaths.groups.title)\n assert wait_on_element(driver, 10, xpaths.button.add, 'clickable')\n driver.find_element_by_xpath(xpaths.button.add).click()\n\n\n@then('on the Add Group side box input the group name')\ndef on_the_add_group_side_box_input_the_group_name(driver):\n \"\"\"on the Add Group side box input the group name.\"\"\"\n assert wait_on_element(driver, 7, xpaths.add_Group.title)\n assert wait_on_element(driver, 7, xpaths.add_Group.name_Input, 'inputable')\n driver.find_element_by_xpath(xpaths.add_Group.name_Input).clear()\n driver.find_element_by_xpath(xpaths.add_Group.name_Input).send_keys(\n 'qetest')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\n@given('the browser is open, navigate to the SCALE URL, and login')\ndef the_browser_is_open_navigate_to_the_scale_url_and_login(driver, nas_ip,\n root_password):\n \"\"\"the browser is open, navigate to the SCALE URL, and login.\"\"\"\n if nas_ip not in driver.current_url:\n driver.get(f'http://{nas_ip}')\n assert wait_on_element(driver, 10, xpaths.login.user_Input)\n if not is_element_present(driver, xpaths.side_Menu.dashboard):\n assert wait_on_element(driver, 10, xpaths.login.user_Input)\n driver.find_element_by_xpath(xpaths.login.user_Input).clear()\n driver.find_element_by_xpath(xpaths.login.user_Input).send_keys('root')\n driver.find_element_by_xpath(xpaths.login.password_Input).clear()\n driver.find_element_by_xpath(xpaths.login.password_Input).send_keys(\n root_password)\n assert wait_on_element(driver, 5, xpaths.login.signin_Button)\n driver.find_element_by_xpath(xpaths.login.signin_Button).click()\n else:\n assert wait_on_element(driver, 10, xpaths.side_Menu.dashboard,\n 'clickable')\n driver.find_element_by_xpath(xpaths.side_Menu.dashboard).click()\n\n\n@when('on the dashboard click on Credentials and Local Groups')\ndef on_the_dashboard_click_on_credentials_and_local_groups(driver):\n \"\"\"on the dashboard click on Credentials and Local Groups.\"\"\"\n assert wait_on_element(driver, 10, xpaths.dashboard.title)\n assert wait_on_element(driver, 10, xpaths.dashboard.system_Info_Card_Title)\n assert wait_on_element(driver, 10, xpaths.side_Menu.credentials,\n 'clickable')\n driver.find_element_by_xpath(xpaths.side_Menu.credentials).click()\n assert wait_on_element(driver, 10, xpaths.side_Menu.local_Group,\n 'clickable')\n driver.find_element_by_xpath(xpaths.side_Menu.local_Group).click()\n\n\n@then('on the Groups page, click Add')\ndef on_the_groups_page_click_add(driver):\n \"\"\"on the Groups page, click Add.\"\"\"\n assert wait_on_element(driver, 10, xpaths.groups.title)\n assert wait_on_element(driver, 10, xpaths.button.add, 'clickable')\n driver.find_element_by_xpath(xpaths.button.add).click()\n\n\n@then('on the Add Group side box input the group name')\ndef on_the_add_group_side_box_input_the_group_name(driver):\n \"\"\"on the Add Group side box input the group name.\"\"\"\n assert wait_on_element(driver, 7, xpaths.add_Group.title)\n assert wait_on_element(driver, 7, xpaths.add_Group.name_Input, 'inputable')\n driver.find_element_by_xpath(xpaths.add_Group.name_Input).clear()\n driver.find_element_by_xpath(xpaths.add_Group.name_Input).send_keys(\n 'qetest')\n\n\n@then('click save and verify the group was added')\ndef click_save_and_verify_the_group_was_added(driver):\n \"\"\"click save and verify the group was added.\"\"\"\n assert wait_on_element(driver, 7, xpaths.button.save, 'clickable')\n driver.find_element_by_xpath(xpaths.button.save).click()\n assert wait_on_element_disappear(driver, 20, xpaths.progress.progressbar)\n assert wait_on_element(driver, 10, xpaths.groups.title)\n assert wait_on_element(driver, 10, xpaths.groups.qetest_Name)\n",
"step-4": "<mask token>\nimport pytest\nimport xpaths\nfrom function import wait_on_element, is_element_present, wait_on_element_disappear\nfrom pytest_bdd import given, scenario, then, when\n\n\n@pytest.mark.dependency(name='Set_Group')\n@scenario('features/NAS-T1250.feature',\n 'Verify that you can create a new group')\ndef test_verify_that_you_can_create_a_new_group():\n \"\"\"Verify that you can create a new group.\"\"\"\n\n\n@given('the browser is open, navigate to the SCALE URL, and login')\ndef the_browser_is_open_navigate_to_the_scale_url_and_login(driver, nas_ip,\n root_password):\n \"\"\"the browser is open, navigate to the SCALE URL, and login.\"\"\"\n if nas_ip not in driver.current_url:\n driver.get(f'http://{nas_ip}')\n assert wait_on_element(driver, 10, xpaths.login.user_Input)\n if not is_element_present(driver, xpaths.side_Menu.dashboard):\n assert wait_on_element(driver, 10, xpaths.login.user_Input)\n driver.find_element_by_xpath(xpaths.login.user_Input).clear()\n driver.find_element_by_xpath(xpaths.login.user_Input).send_keys('root')\n driver.find_element_by_xpath(xpaths.login.password_Input).clear()\n driver.find_element_by_xpath(xpaths.login.password_Input).send_keys(\n root_password)\n assert wait_on_element(driver, 5, xpaths.login.signin_Button)\n driver.find_element_by_xpath(xpaths.login.signin_Button).click()\n else:\n assert wait_on_element(driver, 10, xpaths.side_Menu.dashboard,\n 'clickable')\n driver.find_element_by_xpath(xpaths.side_Menu.dashboard).click()\n\n\n@when('on the dashboard click on Credentials and Local Groups')\ndef on_the_dashboard_click_on_credentials_and_local_groups(driver):\n \"\"\"on the dashboard click on Credentials and Local Groups.\"\"\"\n assert wait_on_element(driver, 10, xpaths.dashboard.title)\n assert wait_on_element(driver, 10, xpaths.dashboard.system_Info_Card_Title)\n assert wait_on_element(driver, 10, xpaths.side_Menu.credentials,\n 'clickable')\n driver.find_element_by_xpath(xpaths.side_Menu.credentials).click()\n assert wait_on_element(driver, 10, xpaths.side_Menu.local_Group,\n 'clickable')\n driver.find_element_by_xpath(xpaths.side_Menu.local_Group).click()\n\n\n@then('on the Groups page, click Add')\ndef on_the_groups_page_click_add(driver):\n \"\"\"on the Groups page, click Add.\"\"\"\n assert wait_on_element(driver, 10, xpaths.groups.title)\n assert wait_on_element(driver, 10, xpaths.button.add, 'clickable')\n driver.find_element_by_xpath(xpaths.button.add).click()\n\n\n@then('on the Add Group side box input the group name')\ndef on_the_add_group_side_box_input_the_group_name(driver):\n \"\"\"on the Add Group side box input the group name.\"\"\"\n assert wait_on_element(driver, 7, xpaths.add_Group.title)\n assert wait_on_element(driver, 7, xpaths.add_Group.name_Input, 'inputable')\n driver.find_element_by_xpath(xpaths.add_Group.name_Input).clear()\n driver.find_element_by_xpath(xpaths.add_Group.name_Input).send_keys(\n 'qetest')\n\n\n@then('click save and verify the group was added')\ndef click_save_and_verify_the_group_was_added(driver):\n \"\"\"click save and verify the group was added.\"\"\"\n assert wait_on_element(driver, 7, xpaths.button.save, 'clickable')\n driver.find_element_by_xpath(xpaths.button.save).click()\n assert wait_on_element_disappear(driver, 20, xpaths.progress.progressbar)\n assert wait_on_element(driver, 10, xpaths.groups.title)\n assert wait_on_element(driver, 10, xpaths.groups.qetest_Name)\n",
"step-5": "# coding=utf-8\n\"\"\"SCALE UI: feature tests.\"\"\"\n\nimport pytest\nimport xpaths\nfrom function import (\n wait_on_element,\n is_element_present,\n wait_on_element_disappear\n)\nfrom pytest_bdd import (\n given,\n scenario,\n then,\n when,\n)\n\n\n@pytest.mark.dependency(name='Set_Group')\n@scenario('features/NAS-T1250.feature', 'Verify that you can create a new group')\ndef test_verify_that_you_can_create_a_new_group():\n \"\"\"Verify that you can create a new group.\"\"\"\n\n\n@given('the browser is open, navigate to the SCALE URL, and login')\ndef the_browser_is_open_navigate_to_the_scale_url_and_login(driver, nas_ip, root_password):\n \"\"\"the browser is open, navigate to the SCALE URL, and login.\"\"\"\n if nas_ip not in driver.current_url:\n driver.get(f\"http://{nas_ip}\")\n assert wait_on_element(driver, 10, xpaths.login.user_Input)\n if not is_element_present(driver, xpaths.side_Menu.dashboard):\n assert wait_on_element(driver, 10, xpaths.login.user_Input)\n driver.find_element_by_xpath(xpaths.login.user_Input).clear()\n driver.find_element_by_xpath(xpaths.login.user_Input).send_keys('root')\n driver.find_element_by_xpath(xpaths.login.password_Input).clear()\n driver.find_element_by_xpath(xpaths.login.password_Input).send_keys(root_password)\n assert wait_on_element(driver, 5, xpaths.login.signin_Button)\n driver.find_element_by_xpath(xpaths.login.signin_Button).click()\n else:\n assert wait_on_element(driver, 10, xpaths.side_Menu.dashboard, 'clickable')\n driver.find_element_by_xpath(xpaths.side_Menu.dashboard).click()\n\n\n@when('on the dashboard click on Credentials and Local Groups')\ndef on_the_dashboard_click_on_credentials_and_local_groups(driver):\n \"\"\"on the dashboard click on Credentials and Local Groups.\"\"\"\n assert wait_on_element(driver, 10, xpaths.dashboard.title)\n assert wait_on_element(driver, 10, xpaths.dashboard.system_Info_Card_Title)\n assert wait_on_element(driver, 10, xpaths.side_Menu.credentials, 'clickable')\n driver.find_element_by_xpath(xpaths.side_Menu.credentials).click()\n assert wait_on_element(driver, 10, xpaths.side_Menu.local_Group, 'clickable')\n driver.find_element_by_xpath(xpaths.side_Menu.local_Group).click()\n\n\n@then('on the Groups page, click Add')\ndef on_the_groups_page_click_add(driver):\n \"\"\"on the Groups page, click Add.\"\"\"\n assert wait_on_element(driver, 10, xpaths.groups.title)\n assert wait_on_element(driver, 10, xpaths.button.add, 'clickable')\n driver.find_element_by_xpath(xpaths.button.add).click()\n\n\n@then('on the Add Group side box input the group name')\ndef on_the_add_group_side_box_input_the_group_name(driver):\n \"\"\"on the Add Group side box input the group name.\"\"\"\n assert wait_on_element(driver, 7, xpaths.add_Group.title)\n assert wait_on_element(driver, 7, xpaths.add_Group.name_Input, 'inputable')\n driver.find_element_by_xpath(xpaths.add_Group.name_Input).clear()\n driver.find_element_by_xpath(xpaths.add_Group.name_Input).send_keys('qetest')\n\n\n@then('click save and verify the group was added')\ndef click_save_and_verify_the_group_was_added(driver):\n \"\"\"click save and verify the group was added.\"\"\"\n assert wait_on_element(driver, 7, xpaths.button.save, 'clickable')\n driver.find_element_by_xpath(xpaths.button.save).click()\n assert wait_on_element_disappear(driver, 20, xpaths.progress.progressbar)\n assert wait_on_element(driver, 10, xpaths.groups.title)\n assert wait_on_element(driver, 10, xpaths.groups.qetest_Name)\n",
"step-ids": [
3,
4,
5,
7,
8
]
}
|
[
3,
4,
5,
7,
8
] |
debt = 100
equity = 50
ratio = debt / equity
if ratio <= 2:
print('😊')
else:
print('⚠️')
print('Ratio is', ratio)
|
normal
|
{
"blob_id": "40b1fac14aaa81039aec8e80ce1c91bb881cfe78",
"index": 3474,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif ratio <= 2:\n print('😊')\nelse:\n print('⚠️')\nprint('Ratio is', ratio)\n",
"step-3": "debt = 100\nequity = 50\nratio = debt / equity\nif ratio <= 2:\n print('😊')\nelse:\n print('⚠️')\nprint('Ratio is', ratio)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
from . import chequeador_camion
from . import chequeador_camion_modelo
from . import chequeador_destino_tipo
from . import chequeador_destino
from . import chequeador_origen
from . import chequeador_minerales
|
normal
|
{
"blob_id": "bf7319996043a41b7d0ef4e6098c3609e5db101e",
"index": 9809,
"step-1": "<mask token>\n",
"step-2": "from . import chequeador_camion\nfrom . import chequeador_camion_modelo\nfrom . import chequeador_destino_tipo\nfrom . import chequeador_destino\nfrom . import chequeador_origen\nfrom . import chequeador_minerales\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def ip_address(address):
new_address = ''
split_address = address.split('.')
seprator = '[.]'
new_address = seprator.join(split_address)
return new_address
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def ip_address(address):
new_address = ''
split_address = address.split('.')
seprator = '[.]'
new_address = seprator.join(split_address)
return new_address
if __name__ == '__main__':
ipaddress = ip_address('192.168.1.1')
print(ipaddress)
<|reserved_special_token_1|>
def ip_address(address):
new_address = ""
split_address = address.split(".")
seprator = "[.]"
new_address = seprator.join(split_address)
return new_address
if __name__ == "__main__":
ipaddress = ip_address("192.168.1.1")
print(ipaddress)
|
flexible
|
{
"blob_id": "7ef62e5545930ab13312f8ae1ea70a74386d8bfa",
"index": 1231,
"step-1": "<mask token>\n",
"step-2": "def ip_address(address):\n new_address = ''\n split_address = address.split('.')\n seprator = '[.]'\n new_address = seprator.join(split_address)\n return new_address\n\n\n<mask token>\n",
"step-3": "def ip_address(address):\n new_address = ''\n split_address = address.split('.')\n seprator = '[.]'\n new_address = seprator.join(split_address)\n return new_address\n\n\nif __name__ == '__main__':\n ipaddress = ip_address('192.168.1.1')\n print(ipaddress)\n",
"step-4": "def ip_address(address):\n new_address = \"\"\n split_address = address.split(\".\")\n seprator = \"[.]\"\n new_address = seprator.join(split_address)\n return new_address\n\n\nif __name__ == \"__main__\":\n ipaddress = ip_address(\"192.168.1.1\")\n print(ipaddress)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def add_tuple(tuple_a=(), tuple_b=()):
if len(tuple_a) < 1:
a_x = 0
else:
a_x = tuple_a[0]
if len(tuple_a) < 2:
a_y = 0
else:
a_y = tuple_a[1]
if len(tuple_b) < 1:
b_x = 0
else:
b_x = tuple_b[0]
if len(tuple_b) < 2:
b_y = 0
else:
b_y = tuple_b[1]
a = a_x + b_x
b = a_y + b_y
tuple_c = a, b
return tuple_c
<|reserved_special_token_1|>
#!/usr/bin/python3
def add_tuple(tuple_a=(), tuple_b=()):
if len(tuple_a) < 1:
a_x = 0
else:
a_x = tuple_a[0]
if len(tuple_a) < 2:
a_y = 0
else:
a_y = tuple_a[1]
if len(tuple_b) < 1:
b_x = 0
else:
b_x = tuple_b[0]
if len(tuple_b) < 2:
b_y = 0
else:
b_y = tuple_b[1]
a = a_x + b_x
b = a_y + b_y
tuple_c = (a, b)
return tuple_c
|
flexible
|
{
"blob_id": "1522ebb52504f7f27a526b597fe1e262bbcbfbb0",
"index": 4429,
"step-1": "<mask token>\n",
"step-2": "def add_tuple(tuple_a=(), tuple_b=()):\n if len(tuple_a) < 1:\n a_x = 0\n else:\n a_x = tuple_a[0]\n if len(tuple_a) < 2:\n a_y = 0\n else:\n a_y = tuple_a[1]\n if len(tuple_b) < 1:\n b_x = 0\n else:\n b_x = tuple_b[0]\n if len(tuple_b) < 2:\n b_y = 0\n else:\n b_y = tuple_b[1]\n a = a_x + b_x\n b = a_y + b_y\n tuple_c = a, b\n return tuple_c\n",
"step-3": "#!/usr/bin/python3\ndef add_tuple(tuple_a=(), tuple_b=()):\n if len(tuple_a) < 1:\n a_x = 0\n else:\n a_x = tuple_a[0]\n if len(tuple_a) < 2:\n a_y = 0\n else:\n a_y = tuple_a[1]\n if len(tuple_b) < 1:\n b_x = 0\n else:\n b_x = tuple_b[0]\n if len(tuple_b) < 2:\n b_y = 0\n else:\n b_y = tuple_b[1]\n a = a_x + b_x\n b = a_y + b_y\n tuple_c = (a, b)\n return tuple_c\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
class ApiOAuth2Application(base.ObjectIDMixin, base.BaseModel):
"""Registration and key for user-created OAuth API applications
This collection is also used by CAS to create the master list of available applications.
Any changes made to field names in this model must be echoed in the CAS implementation.
"""
client_id = models.CharField(default=generate_client_id, unique=True,
max_length=50, db_index=True)
client_secret = models.CharField(default=generate_client_secret,
max_length=40)
is_active = models.BooleanField(default=True, db_index=True)
owner = models.ForeignKey('OSFUser', null=True, blank=True, on_delete=
models.SET_NULL)
name = models.CharField(db_index=True, blank=False, null=False,
max_length=200)
description = models.CharField(blank=True, null=True, max_length=1000)
home_url = models.URLField(blank=False, null=False)
callback_url = models.URLField(blank=False, null=False)
def deactivate(self, save=False):
"""
Deactivate an ApiOAuth2Application
Does not delete the database record, but revokes all tokens and sets a
flag that hides this instance from API
"""
client = cas.get_client()
resp = client.revoke_application_tokens(self.client_id, self.
client_secret)
self.is_active = False
if save:
self.save()
return True
def reset_secret(self, save=False):
"""
Reset the secret of an ApiOAuth2Application
Revokes all tokens
"""
client = cas.get_client()
client.revoke_application_tokens(self.client_id, self.client_secret)
self.client_secret = generate_client_secret()
if save:
self.save()
return True
@property
def url(self):
return '/settings/applications/{}/'.format(self.client_id)
@property
def absolute_url(self):
return urljoin(settings.DOMAIN, self.url)
@property
def absolute_api_v2_url(self):
path = '/applications/{}/'.format(self.client_id)
return api_v2_url(path)
def get_absolute_url(self):
return self.absolute_api_v2_url
<|reserved_special_token_0|>
class ApiOAuth2PersonalToken(base.ObjectIDMixin, base.BaseModel):
"""Information for user-created personal access tokens
This collection is also used by CAS to create the master list of available tokens.
Any changes made to field names in this model must be echoed in the CAS implementation.
"""
token_id = models.CharField(max_length=70, default=generate_token_id,
unique=True)
owner = models.ForeignKey('OSFUser', db_index=True, blank=True, null=
True, on_delete=models.SET_NULL)
name = models.CharField(max_length=100, blank=False, null=False,
db_index=True)
scopes = models.ManyToManyField('ApiOAuth2Scope', related_name='tokens',
blank=False)
is_active = models.BooleanField(default=True, db_index=True)
def deactivate(self, save=False):
"""
Deactivate an ApiOAuth2PersonalToken
Does not delete the database record, but hides this instance from API
"""
client = cas.get_client()
try:
resp = client.revoke_tokens({'token': self.token_id})
except cas.CasHTTPError as e:
if e.code == 400:
pass
else:
raise e
self.is_active = False
if save:
self.save()
return True
@property
def url(self):
return '/settings/tokens/{}/'.format(self._id)
@property
def absolute_url(self):
return urljoin(settings.DOMAIN, self.url)
@property
def absolute_api_v2_url(self):
path = '/tokens/{}/'.format(self._id)
return api_v2_url(path)
def get_absolute_url(self):
return self.absolute_api_v2_url
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ApiOAuth2Scope(base.ObjectIDMixin, base.BaseModel):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class ApiOAuth2Application(base.ObjectIDMixin, base.BaseModel):
"""Registration and key for user-created OAuth API applications
This collection is also used by CAS to create the master list of available applications.
Any changes made to field names in this model must be echoed in the CAS implementation.
"""
client_id = models.CharField(default=generate_client_id, unique=True,
max_length=50, db_index=True)
client_secret = models.CharField(default=generate_client_secret,
max_length=40)
is_active = models.BooleanField(default=True, db_index=True)
owner = models.ForeignKey('OSFUser', null=True, blank=True, on_delete=
models.SET_NULL)
name = models.CharField(db_index=True, blank=False, null=False,
max_length=200)
description = models.CharField(blank=True, null=True, max_length=1000)
home_url = models.URLField(blank=False, null=False)
callback_url = models.URLField(blank=False, null=False)
def deactivate(self, save=False):
"""
Deactivate an ApiOAuth2Application
Does not delete the database record, but revokes all tokens and sets a
flag that hides this instance from API
"""
client = cas.get_client()
resp = client.revoke_application_tokens(self.client_id, self.
client_secret)
self.is_active = False
if save:
self.save()
return True
def reset_secret(self, save=False):
"""
Reset the secret of an ApiOAuth2Application
Revokes all tokens
"""
client = cas.get_client()
client.revoke_application_tokens(self.client_id, self.client_secret)
self.client_secret = generate_client_secret()
if save:
self.save()
return True
@property
def url(self):
return '/settings/applications/{}/'.format(self.client_id)
@property
def absolute_url(self):
return urljoin(settings.DOMAIN, self.url)
@property
def absolute_api_v2_url(self):
path = '/applications/{}/'.format(self.client_id)
return api_v2_url(path)
def get_absolute_url(self):
return self.absolute_api_v2_url
<|reserved_special_token_0|>
class ApiOAuth2PersonalToken(base.ObjectIDMixin, base.BaseModel):
"""Information for user-created personal access tokens
This collection is also used by CAS to create the master list of available tokens.
Any changes made to field names in this model must be echoed in the CAS implementation.
"""
token_id = models.CharField(max_length=70, default=generate_token_id,
unique=True)
owner = models.ForeignKey('OSFUser', db_index=True, blank=True, null=
True, on_delete=models.SET_NULL)
name = models.CharField(max_length=100, blank=False, null=False,
db_index=True)
scopes = models.ManyToManyField('ApiOAuth2Scope', related_name='tokens',
blank=False)
is_active = models.BooleanField(default=True, db_index=True)
def deactivate(self, save=False):
"""
Deactivate an ApiOAuth2PersonalToken
Does not delete the database record, but hides this instance from API
"""
client = cas.get_client()
try:
resp = client.revoke_tokens({'token': self.token_id})
except cas.CasHTTPError as e:
if e.code == 400:
pass
else:
raise e
self.is_active = False
if save:
self.save()
return True
@property
def url(self):
return '/settings/tokens/{}/'.format(self._id)
@property
def absolute_url(self):
return urljoin(settings.DOMAIN, self.url)
@property
def absolute_api_v2_url(self):
path = '/tokens/{}/'.format(self._id)
return api_v2_url(path)
def get_absolute_url(self):
return self.absolute_api_v2_url
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ApiOAuth2Scope(base.ObjectIDMixin, base.BaseModel):
"""
Store information about recognized OAuth2 scopes. Only scopes registered under this database model can
be requested by third parties.
"""
name = models.CharField(max_length=50, unique=True, db_index=True, null
=False, blank=False)
description = models.CharField(max_length=255, null=False, blank=False)
is_active = models.BooleanField(default=True, db_index=True)
is_public = models.BooleanField(default=True, db_index=True)
def absolute_url(self):
return urljoin(settings.API_DOMAIN, '/v2/scopes/{}/'.format(self.name))
<|reserved_special_token_0|>
class ApiOAuth2Application(base.ObjectIDMixin, base.BaseModel):
"""Registration and key for user-created OAuth API applications
This collection is also used by CAS to create the master list of available applications.
Any changes made to field names in this model must be echoed in the CAS implementation.
"""
client_id = models.CharField(default=generate_client_id, unique=True,
max_length=50, db_index=True)
client_secret = models.CharField(default=generate_client_secret,
max_length=40)
is_active = models.BooleanField(default=True, db_index=True)
owner = models.ForeignKey('OSFUser', null=True, blank=True, on_delete=
models.SET_NULL)
name = models.CharField(db_index=True, blank=False, null=False,
max_length=200)
description = models.CharField(blank=True, null=True, max_length=1000)
home_url = models.URLField(blank=False, null=False)
callback_url = models.URLField(blank=False, null=False)
def deactivate(self, save=False):
"""
Deactivate an ApiOAuth2Application
Does not delete the database record, but revokes all tokens and sets a
flag that hides this instance from API
"""
client = cas.get_client()
resp = client.revoke_application_tokens(self.client_id, self.
client_secret)
self.is_active = False
if save:
self.save()
return True
def reset_secret(self, save=False):
"""
Reset the secret of an ApiOAuth2Application
Revokes all tokens
"""
client = cas.get_client()
client.revoke_application_tokens(self.client_id, self.client_secret)
self.client_secret = generate_client_secret()
if save:
self.save()
return True
@property
def url(self):
return '/settings/applications/{}/'.format(self.client_id)
@property
def absolute_url(self):
return urljoin(settings.DOMAIN, self.url)
@property
def absolute_api_v2_url(self):
path = '/applications/{}/'.format(self.client_id)
return api_v2_url(path)
def get_absolute_url(self):
return self.absolute_api_v2_url
<|reserved_special_token_0|>
class ApiOAuth2PersonalToken(base.ObjectIDMixin, base.BaseModel):
"""Information for user-created personal access tokens
This collection is also used by CAS to create the master list of available tokens.
Any changes made to field names in this model must be echoed in the CAS implementation.
"""
token_id = models.CharField(max_length=70, default=generate_token_id,
unique=True)
owner = models.ForeignKey('OSFUser', db_index=True, blank=True, null=
True, on_delete=models.SET_NULL)
name = models.CharField(max_length=100, blank=False, null=False,
db_index=True)
scopes = models.ManyToManyField('ApiOAuth2Scope', related_name='tokens',
blank=False)
is_active = models.BooleanField(default=True, db_index=True)
def deactivate(self, save=False):
"""
Deactivate an ApiOAuth2PersonalToken
Does not delete the database record, but hides this instance from API
"""
client = cas.get_client()
try:
resp = client.revoke_tokens({'token': self.token_id})
except cas.CasHTTPError as e:
if e.code == 400:
pass
else:
raise e
self.is_active = False
if save:
self.save()
return True
@property
def url(self):
return '/settings/tokens/{}/'.format(self._id)
@property
def absolute_url(self):
return urljoin(settings.DOMAIN, self.url)
@property
def absolute_api_v2_url(self):
path = '/tokens/{}/'.format(self._id)
return api_v2_url(path)
def get_absolute_url(self):
return self.absolute_api_v2_url
<|reserved_special_token_1|>
import uuid
from website.util import api_v2_url
from django.db import models
from osf.models import base
from website.security import random_string
from framework.auth import cas
from website import settings
from future.moves.urllib.parse import urljoin
def generate_client_secret():
return random_string(length=40)
class ApiOAuth2Scope(base.ObjectIDMixin, base.BaseModel):
"""
Store information about recognized OAuth2 scopes. Only scopes registered under this database model can
be requested by third parties.
"""
name = models.CharField(max_length=50, unique=True, db_index=True, null
=False, blank=False)
description = models.CharField(max_length=255, null=False, blank=False)
is_active = models.BooleanField(default=True, db_index=True)
is_public = models.BooleanField(default=True, db_index=True)
def absolute_url(self):
return urljoin(settings.API_DOMAIN, '/v2/scopes/{}/'.format(self.name))
def generate_client_id():
return uuid.uuid4().hex
class ApiOAuth2Application(base.ObjectIDMixin, base.BaseModel):
"""Registration and key for user-created OAuth API applications
This collection is also used by CAS to create the master list of available applications.
Any changes made to field names in this model must be echoed in the CAS implementation.
"""
client_id = models.CharField(default=generate_client_id, unique=True,
max_length=50, db_index=True)
client_secret = models.CharField(default=generate_client_secret,
max_length=40)
is_active = models.BooleanField(default=True, db_index=True)
owner = models.ForeignKey('OSFUser', null=True, blank=True, on_delete=
models.SET_NULL)
name = models.CharField(db_index=True, blank=False, null=False,
max_length=200)
description = models.CharField(blank=True, null=True, max_length=1000)
home_url = models.URLField(blank=False, null=False)
callback_url = models.URLField(blank=False, null=False)
def deactivate(self, save=False):
"""
Deactivate an ApiOAuth2Application
Does not delete the database record, but revokes all tokens and sets a
flag that hides this instance from API
"""
client = cas.get_client()
resp = client.revoke_application_tokens(self.client_id, self.
client_secret)
self.is_active = False
if save:
self.save()
return True
def reset_secret(self, save=False):
"""
Reset the secret of an ApiOAuth2Application
Revokes all tokens
"""
client = cas.get_client()
client.revoke_application_tokens(self.client_id, self.client_secret)
self.client_secret = generate_client_secret()
if save:
self.save()
return True
@property
def url(self):
return '/settings/applications/{}/'.format(self.client_id)
@property
def absolute_url(self):
return urljoin(settings.DOMAIN, self.url)
@property
def absolute_api_v2_url(self):
path = '/applications/{}/'.format(self.client_id)
return api_v2_url(path)
def get_absolute_url(self):
return self.absolute_api_v2_url
def generate_token_id():
return random_string(length=70)
class ApiOAuth2PersonalToken(base.ObjectIDMixin, base.BaseModel):
"""Information for user-created personal access tokens
This collection is also used by CAS to create the master list of available tokens.
Any changes made to field names in this model must be echoed in the CAS implementation.
"""
token_id = models.CharField(max_length=70, default=generate_token_id,
unique=True)
owner = models.ForeignKey('OSFUser', db_index=True, blank=True, null=
True, on_delete=models.SET_NULL)
name = models.CharField(max_length=100, blank=False, null=False,
db_index=True)
scopes = models.ManyToManyField('ApiOAuth2Scope', related_name='tokens',
blank=False)
is_active = models.BooleanField(default=True, db_index=True)
def deactivate(self, save=False):
"""
Deactivate an ApiOAuth2PersonalToken
Does not delete the database record, but hides this instance from API
"""
client = cas.get_client()
try:
resp = client.revoke_tokens({'token': self.token_id})
except cas.CasHTTPError as e:
if e.code == 400:
pass
else:
raise e
self.is_active = False
if save:
self.save()
return True
@property
def url(self):
return '/settings/tokens/{}/'.format(self._id)
@property
def absolute_url(self):
return urljoin(settings.DOMAIN, self.url)
@property
def absolute_api_v2_url(self):
path = '/tokens/{}/'.format(self._id)
return api_v2_url(path)
def get_absolute_url(self):
return self.absolute_api_v2_url
<|reserved_special_token_1|>
import uuid
from website.util import api_v2_url
from django.db import models
from osf.models import base
from website.security import random_string
from framework.auth import cas
from website import settings
from future.moves.urllib.parse import urljoin
def generate_client_secret():
return random_string(length=40)
class ApiOAuth2Scope(base.ObjectIDMixin, base.BaseModel):
"""
Store information about recognized OAuth2 scopes. Only scopes registered under this database model can
be requested by third parties.
"""
name = models.CharField(max_length=50, unique=True, db_index=True, null=False, blank=False)
description = models.CharField(max_length=255, null=False, blank=False)
is_active = models.BooleanField(default=True, db_index=True) # TODO: Add mechanism to deactivate a scope?
is_public = models.BooleanField(default=True, db_index=True)
def absolute_url(self):
return urljoin(settings.API_DOMAIN, '/v2/scopes/{}/'.format(self.name))
def generate_client_id():
return uuid.uuid4().hex
class ApiOAuth2Application(base.ObjectIDMixin, base.BaseModel):
"""Registration and key for user-created OAuth API applications
This collection is also used by CAS to create the master list of available applications.
Any changes made to field names in this model must be echoed in the CAS implementation.
"""
# Client ID and secret. Use separate ID field so ID format doesn't
# have to be restricted to database internals.
# Not *guaranteed* unique, but very unlikely
client_id = models.CharField(default=generate_client_id,
unique=True,
max_length=50,
db_index=True)
client_secret = models.CharField(default=generate_client_secret, max_length=40)
is_active = models.BooleanField(default=True, # Set to False if application is deactivated
db_index=True)
owner = models.ForeignKey('OSFUser', null=True, blank=True, on_delete=models.SET_NULL)
# User-specified application descriptors
name = models.CharField(db_index=True, blank=False, null=False, max_length=200)
description = models.CharField(blank=True, null=True, max_length=1000)
home_url = models.URLField(blank=False, null=False)
callback_url = models.URLField(blank=False, null=False)
def deactivate(self, save=False):
"""
Deactivate an ApiOAuth2Application
Does not delete the database record, but revokes all tokens and sets a
flag that hides this instance from API
"""
client = cas.get_client()
# Will raise a CasHttpError if deletion fails, which will also stop setting of active=False.
resp = client.revoke_application_tokens(self.client_id, self.client_secret) # noqa
self.is_active = False
if save:
self.save()
return True
def reset_secret(self, save=False):
"""
Reset the secret of an ApiOAuth2Application
Revokes all tokens
"""
client = cas.get_client()
client.revoke_application_tokens(self.client_id, self.client_secret)
self.client_secret = generate_client_secret()
if save:
self.save()
return True
@property
def url(self):
return '/settings/applications/{}/'.format(self.client_id)
@property
def absolute_url(self):
return urljoin(settings.DOMAIN, self.url)
# Properties used by Django and DRF "Links: self" field
@property
def absolute_api_v2_url(self):
path = '/applications/{}/'.format(self.client_id)
return api_v2_url(path)
# used by django and DRF
def get_absolute_url(self):
return self.absolute_api_v2_url
def generate_token_id():
return random_string(length=70)
class ApiOAuth2PersonalToken(base.ObjectIDMixin, base.BaseModel):
"""Information for user-created personal access tokens
This collection is also used by CAS to create the master list of available tokens.
Any changes made to field names in this model must be echoed in the CAS implementation.
"""
# Name of the field being `token_id` is a CAS requirement.
# This is the actual value of the token that's used to authenticate
token_id = models.CharField(max_length=70, default=generate_token_id,
unique=True)
owner = models.ForeignKey('OSFUser', db_index=True, blank=True, null=True, on_delete=models.SET_NULL)
name = models.CharField(max_length=100, blank=False, null=False, db_index=True)
scopes = models.ManyToManyField('ApiOAuth2Scope', related_name='tokens', blank=False)
is_active = models.BooleanField(default=True, db_index=True)
def deactivate(self, save=False):
"""
Deactivate an ApiOAuth2PersonalToken
Does not delete the database record, but hides this instance from API
"""
client = cas.get_client()
# Will raise a CasHttpError if deletion fails for any reason other than the token
# not yet being created. This will also stop setting of active=False.
try:
resp = client.revoke_tokens({'token': self.token_id}) # noqa
except cas.CasHTTPError as e:
if e.code == 400:
pass # Token hasn't been used yet, so not created in cas
else:
raise e
self.is_active = False
if save:
self.save()
return True
@property
def url(self):
return '/settings/tokens/{}/'.format(self._id)
@property
def absolute_url(self):
return urljoin(settings.DOMAIN, self.url)
# Properties used by Django and DRF "Links: self" field
@property
def absolute_api_v2_url(self):
path = '/tokens/{}/'.format(self._id)
return api_v2_url(path)
# used by django and DRF
def get_absolute_url(self):
return self.absolute_api_v2_url
|
flexible
|
{
"blob_id": "8186b7bddbdcdd730a3f79da1bd075c25c0c3998",
"index": 3131,
"step-1": "<mask token>\n\n\nclass ApiOAuth2Application(base.ObjectIDMixin, base.BaseModel):\n \"\"\"Registration and key for user-created OAuth API applications\n\n This collection is also used by CAS to create the master list of available applications.\n Any changes made to field names in this model must be echoed in the CAS implementation.\n \"\"\"\n client_id = models.CharField(default=generate_client_id, unique=True,\n max_length=50, db_index=True)\n client_secret = models.CharField(default=generate_client_secret,\n max_length=40)\n is_active = models.BooleanField(default=True, db_index=True)\n owner = models.ForeignKey('OSFUser', null=True, blank=True, on_delete=\n models.SET_NULL)\n name = models.CharField(db_index=True, blank=False, null=False,\n max_length=200)\n description = models.CharField(blank=True, null=True, max_length=1000)\n home_url = models.URLField(blank=False, null=False)\n callback_url = models.URLField(blank=False, null=False)\n\n def deactivate(self, save=False):\n \"\"\"\n Deactivate an ApiOAuth2Application\n\n Does not delete the database record, but revokes all tokens and sets a\n flag that hides this instance from API\n \"\"\"\n client = cas.get_client()\n resp = client.revoke_application_tokens(self.client_id, self.\n client_secret)\n self.is_active = False\n if save:\n self.save()\n return True\n\n def reset_secret(self, save=False):\n \"\"\"\n Reset the secret of an ApiOAuth2Application\n Revokes all tokens\n \"\"\"\n client = cas.get_client()\n client.revoke_application_tokens(self.client_id, self.client_secret)\n self.client_secret = generate_client_secret()\n if save:\n self.save()\n return True\n\n @property\n def url(self):\n return '/settings/applications/{}/'.format(self.client_id)\n\n @property\n def absolute_url(self):\n return urljoin(settings.DOMAIN, self.url)\n\n @property\n def absolute_api_v2_url(self):\n path = '/applications/{}/'.format(self.client_id)\n return api_v2_url(path)\n\n def get_absolute_url(self):\n return self.absolute_api_v2_url\n\n\n<mask token>\n\n\nclass ApiOAuth2PersonalToken(base.ObjectIDMixin, base.BaseModel):\n \"\"\"Information for user-created personal access tokens\n\n This collection is also used by CAS to create the master list of available tokens.\n Any changes made to field names in this model must be echoed in the CAS implementation.\n \"\"\"\n token_id = models.CharField(max_length=70, default=generate_token_id,\n unique=True)\n owner = models.ForeignKey('OSFUser', db_index=True, blank=True, null=\n True, on_delete=models.SET_NULL)\n name = models.CharField(max_length=100, blank=False, null=False,\n db_index=True)\n scopes = models.ManyToManyField('ApiOAuth2Scope', related_name='tokens',\n blank=False)\n is_active = models.BooleanField(default=True, db_index=True)\n\n def deactivate(self, save=False):\n \"\"\"\n Deactivate an ApiOAuth2PersonalToken\n\n Does not delete the database record, but hides this instance from API\n \"\"\"\n client = cas.get_client()\n try:\n resp = client.revoke_tokens({'token': self.token_id})\n except cas.CasHTTPError as e:\n if e.code == 400:\n pass\n else:\n raise e\n self.is_active = False\n if save:\n self.save()\n return True\n\n @property\n def url(self):\n return '/settings/tokens/{}/'.format(self._id)\n\n @property\n def absolute_url(self):\n return urljoin(settings.DOMAIN, self.url)\n\n @property\n def absolute_api_v2_url(self):\n path = '/tokens/{}/'.format(self._id)\n return api_v2_url(path)\n\n def get_absolute_url(self):\n return self.absolute_api_v2_url\n",
"step-2": "<mask token>\n\n\nclass ApiOAuth2Scope(base.ObjectIDMixin, base.BaseModel):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n<mask token>\n\n\nclass ApiOAuth2Application(base.ObjectIDMixin, base.BaseModel):\n \"\"\"Registration and key for user-created OAuth API applications\n\n This collection is also used by CAS to create the master list of available applications.\n Any changes made to field names in this model must be echoed in the CAS implementation.\n \"\"\"\n client_id = models.CharField(default=generate_client_id, unique=True,\n max_length=50, db_index=True)\n client_secret = models.CharField(default=generate_client_secret,\n max_length=40)\n is_active = models.BooleanField(default=True, db_index=True)\n owner = models.ForeignKey('OSFUser', null=True, blank=True, on_delete=\n models.SET_NULL)\n name = models.CharField(db_index=True, blank=False, null=False,\n max_length=200)\n description = models.CharField(blank=True, null=True, max_length=1000)\n home_url = models.URLField(blank=False, null=False)\n callback_url = models.URLField(blank=False, null=False)\n\n def deactivate(self, save=False):\n \"\"\"\n Deactivate an ApiOAuth2Application\n\n Does not delete the database record, but revokes all tokens and sets a\n flag that hides this instance from API\n \"\"\"\n client = cas.get_client()\n resp = client.revoke_application_tokens(self.client_id, self.\n client_secret)\n self.is_active = False\n if save:\n self.save()\n return True\n\n def reset_secret(self, save=False):\n \"\"\"\n Reset the secret of an ApiOAuth2Application\n Revokes all tokens\n \"\"\"\n client = cas.get_client()\n client.revoke_application_tokens(self.client_id, self.client_secret)\n self.client_secret = generate_client_secret()\n if save:\n self.save()\n return True\n\n @property\n def url(self):\n return '/settings/applications/{}/'.format(self.client_id)\n\n @property\n def absolute_url(self):\n return urljoin(settings.DOMAIN, self.url)\n\n @property\n def absolute_api_v2_url(self):\n path = '/applications/{}/'.format(self.client_id)\n return api_v2_url(path)\n\n def get_absolute_url(self):\n return self.absolute_api_v2_url\n\n\n<mask token>\n\n\nclass ApiOAuth2PersonalToken(base.ObjectIDMixin, base.BaseModel):\n \"\"\"Information for user-created personal access tokens\n\n This collection is also used by CAS to create the master list of available tokens.\n Any changes made to field names in this model must be echoed in the CAS implementation.\n \"\"\"\n token_id = models.CharField(max_length=70, default=generate_token_id,\n unique=True)\n owner = models.ForeignKey('OSFUser', db_index=True, blank=True, null=\n True, on_delete=models.SET_NULL)\n name = models.CharField(max_length=100, blank=False, null=False,\n db_index=True)\n scopes = models.ManyToManyField('ApiOAuth2Scope', related_name='tokens',\n blank=False)\n is_active = models.BooleanField(default=True, db_index=True)\n\n def deactivate(self, save=False):\n \"\"\"\n Deactivate an ApiOAuth2PersonalToken\n\n Does not delete the database record, but hides this instance from API\n \"\"\"\n client = cas.get_client()\n try:\n resp = client.revoke_tokens({'token': self.token_id})\n except cas.CasHTTPError as e:\n if e.code == 400:\n pass\n else:\n raise e\n self.is_active = False\n if save:\n self.save()\n return True\n\n @property\n def url(self):\n return '/settings/tokens/{}/'.format(self._id)\n\n @property\n def absolute_url(self):\n return urljoin(settings.DOMAIN, self.url)\n\n @property\n def absolute_api_v2_url(self):\n path = '/tokens/{}/'.format(self._id)\n return api_v2_url(path)\n\n def get_absolute_url(self):\n return self.absolute_api_v2_url\n",
"step-3": "<mask token>\n\n\nclass ApiOAuth2Scope(base.ObjectIDMixin, base.BaseModel):\n \"\"\"\n Store information about recognized OAuth2 scopes. Only scopes registered under this database model can\n be requested by third parties.\n \"\"\"\n name = models.CharField(max_length=50, unique=True, db_index=True, null\n =False, blank=False)\n description = models.CharField(max_length=255, null=False, blank=False)\n is_active = models.BooleanField(default=True, db_index=True)\n is_public = models.BooleanField(default=True, db_index=True)\n\n def absolute_url(self):\n return urljoin(settings.API_DOMAIN, '/v2/scopes/{}/'.format(self.name))\n\n\n<mask token>\n\n\nclass ApiOAuth2Application(base.ObjectIDMixin, base.BaseModel):\n \"\"\"Registration and key for user-created OAuth API applications\n\n This collection is also used by CAS to create the master list of available applications.\n Any changes made to field names in this model must be echoed in the CAS implementation.\n \"\"\"\n client_id = models.CharField(default=generate_client_id, unique=True,\n max_length=50, db_index=True)\n client_secret = models.CharField(default=generate_client_secret,\n max_length=40)\n is_active = models.BooleanField(default=True, db_index=True)\n owner = models.ForeignKey('OSFUser', null=True, blank=True, on_delete=\n models.SET_NULL)\n name = models.CharField(db_index=True, blank=False, null=False,\n max_length=200)\n description = models.CharField(blank=True, null=True, max_length=1000)\n home_url = models.URLField(blank=False, null=False)\n callback_url = models.URLField(blank=False, null=False)\n\n def deactivate(self, save=False):\n \"\"\"\n Deactivate an ApiOAuth2Application\n\n Does not delete the database record, but revokes all tokens and sets a\n flag that hides this instance from API\n \"\"\"\n client = cas.get_client()\n resp = client.revoke_application_tokens(self.client_id, self.\n client_secret)\n self.is_active = False\n if save:\n self.save()\n return True\n\n def reset_secret(self, save=False):\n \"\"\"\n Reset the secret of an ApiOAuth2Application\n Revokes all tokens\n \"\"\"\n client = cas.get_client()\n client.revoke_application_tokens(self.client_id, self.client_secret)\n self.client_secret = generate_client_secret()\n if save:\n self.save()\n return True\n\n @property\n def url(self):\n return '/settings/applications/{}/'.format(self.client_id)\n\n @property\n def absolute_url(self):\n return urljoin(settings.DOMAIN, self.url)\n\n @property\n def absolute_api_v2_url(self):\n path = '/applications/{}/'.format(self.client_id)\n return api_v2_url(path)\n\n def get_absolute_url(self):\n return self.absolute_api_v2_url\n\n\n<mask token>\n\n\nclass ApiOAuth2PersonalToken(base.ObjectIDMixin, base.BaseModel):\n \"\"\"Information for user-created personal access tokens\n\n This collection is also used by CAS to create the master list of available tokens.\n Any changes made to field names in this model must be echoed in the CAS implementation.\n \"\"\"\n token_id = models.CharField(max_length=70, default=generate_token_id,\n unique=True)\n owner = models.ForeignKey('OSFUser', db_index=True, blank=True, null=\n True, on_delete=models.SET_NULL)\n name = models.CharField(max_length=100, blank=False, null=False,\n db_index=True)\n scopes = models.ManyToManyField('ApiOAuth2Scope', related_name='tokens',\n blank=False)\n is_active = models.BooleanField(default=True, db_index=True)\n\n def deactivate(self, save=False):\n \"\"\"\n Deactivate an ApiOAuth2PersonalToken\n\n Does not delete the database record, but hides this instance from API\n \"\"\"\n client = cas.get_client()\n try:\n resp = client.revoke_tokens({'token': self.token_id})\n except cas.CasHTTPError as e:\n if e.code == 400:\n pass\n else:\n raise e\n self.is_active = False\n if save:\n self.save()\n return True\n\n @property\n def url(self):\n return '/settings/tokens/{}/'.format(self._id)\n\n @property\n def absolute_url(self):\n return urljoin(settings.DOMAIN, self.url)\n\n @property\n def absolute_api_v2_url(self):\n path = '/tokens/{}/'.format(self._id)\n return api_v2_url(path)\n\n def get_absolute_url(self):\n return self.absolute_api_v2_url\n",
"step-4": "import uuid\nfrom website.util import api_v2_url\nfrom django.db import models\nfrom osf.models import base\nfrom website.security import random_string\nfrom framework.auth import cas\nfrom website import settings\nfrom future.moves.urllib.parse import urljoin\n\n\ndef generate_client_secret():\n return random_string(length=40)\n\n\nclass ApiOAuth2Scope(base.ObjectIDMixin, base.BaseModel):\n \"\"\"\n Store information about recognized OAuth2 scopes. Only scopes registered under this database model can\n be requested by third parties.\n \"\"\"\n name = models.CharField(max_length=50, unique=True, db_index=True, null\n =False, blank=False)\n description = models.CharField(max_length=255, null=False, blank=False)\n is_active = models.BooleanField(default=True, db_index=True)\n is_public = models.BooleanField(default=True, db_index=True)\n\n def absolute_url(self):\n return urljoin(settings.API_DOMAIN, '/v2/scopes/{}/'.format(self.name))\n\n\ndef generate_client_id():\n return uuid.uuid4().hex\n\n\nclass ApiOAuth2Application(base.ObjectIDMixin, base.BaseModel):\n \"\"\"Registration and key for user-created OAuth API applications\n\n This collection is also used by CAS to create the master list of available applications.\n Any changes made to field names in this model must be echoed in the CAS implementation.\n \"\"\"\n client_id = models.CharField(default=generate_client_id, unique=True,\n max_length=50, db_index=True)\n client_secret = models.CharField(default=generate_client_secret,\n max_length=40)\n is_active = models.BooleanField(default=True, db_index=True)\n owner = models.ForeignKey('OSFUser', null=True, blank=True, on_delete=\n models.SET_NULL)\n name = models.CharField(db_index=True, blank=False, null=False,\n max_length=200)\n description = models.CharField(blank=True, null=True, max_length=1000)\n home_url = models.URLField(blank=False, null=False)\n callback_url = models.URLField(blank=False, null=False)\n\n def deactivate(self, save=False):\n \"\"\"\n Deactivate an ApiOAuth2Application\n\n Does not delete the database record, but revokes all tokens and sets a\n flag that hides this instance from API\n \"\"\"\n client = cas.get_client()\n resp = client.revoke_application_tokens(self.client_id, self.\n client_secret)\n self.is_active = False\n if save:\n self.save()\n return True\n\n def reset_secret(self, save=False):\n \"\"\"\n Reset the secret of an ApiOAuth2Application\n Revokes all tokens\n \"\"\"\n client = cas.get_client()\n client.revoke_application_tokens(self.client_id, self.client_secret)\n self.client_secret = generate_client_secret()\n if save:\n self.save()\n return True\n\n @property\n def url(self):\n return '/settings/applications/{}/'.format(self.client_id)\n\n @property\n def absolute_url(self):\n return urljoin(settings.DOMAIN, self.url)\n\n @property\n def absolute_api_v2_url(self):\n path = '/applications/{}/'.format(self.client_id)\n return api_v2_url(path)\n\n def get_absolute_url(self):\n return self.absolute_api_v2_url\n\n\ndef generate_token_id():\n return random_string(length=70)\n\n\nclass ApiOAuth2PersonalToken(base.ObjectIDMixin, base.BaseModel):\n \"\"\"Information for user-created personal access tokens\n\n This collection is also used by CAS to create the master list of available tokens.\n Any changes made to field names in this model must be echoed in the CAS implementation.\n \"\"\"\n token_id = models.CharField(max_length=70, default=generate_token_id,\n unique=True)\n owner = models.ForeignKey('OSFUser', db_index=True, blank=True, null=\n True, on_delete=models.SET_NULL)\n name = models.CharField(max_length=100, blank=False, null=False,\n db_index=True)\n scopes = models.ManyToManyField('ApiOAuth2Scope', related_name='tokens',\n blank=False)\n is_active = models.BooleanField(default=True, db_index=True)\n\n def deactivate(self, save=False):\n \"\"\"\n Deactivate an ApiOAuth2PersonalToken\n\n Does not delete the database record, but hides this instance from API\n \"\"\"\n client = cas.get_client()\n try:\n resp = client.revoke_tokens({'token': self.token_id})\n except cas.CasHTTPError as e:\n if e.code == 400:\n pass\n else:\n raise e\n self.is_active = False\n if save:\n self.save()\n return True\n\n @property\n def url(self):\n return '/settings/tokens/{}/'.format(self._id)\n\n @property\n def absolute_url(self):\n return urljoin(settings.DOMAIN, self.url)\n\n @property\n def absolute_api_v2_url(self):\n path = '/tokens/{}/'.format(self._id)\n return api_v2_url(path)\n\n def get_absolute_url(self):\n return self.absolute_api_v2_url\n",
"step-5": "import uuid\n\nfrom website.util import api_v2_url\n\nfrom django.db import models\nfrom osf.models import base\nfrom website.security import random_string\n\nfrom framework.auth import cas\n\nfrom website import settings\nfrom future.moves.urllib.parse import urljoin\n\n\ndef generate_client_secret():\n return random_string(length=40)\n\n\nclass ApiOAuth2Scope(base.ObjectIDMixin, base.BaseModel):\n \"\"\"\n Store information about recognized OAuth2 scopes. Only scopes registered under this database model can\n be requested by third parties.\n \"\"\"\n name = models.CharField(max_length=50, unique=True, db_index=True, null=False, blank=False)\n description = models.CharField(max_length=255, null=False, blank=False)\n is_active = models.BooleanField(default=True, db_index=True) # TODO: Add mechanism to deactivate a scope?\n is_public = models.BooleanField(default=True, db_index=True)\n\n def absolute_url(self):\n return urljoin(settings.API_DOMAIN, '/v2/scopes/{}/'.format(self.name))\n\n\ndef generate_client_id():\n return uuid.uuid4().hex\n\n\nclass ApiOAuth2Application(base.ObjectIDMixin, base.BaseModel):\n \"\"\"Registration and key for user-created OAuth API applications\n\n This collection is also used by CAS to create the master list of available applications.\n Any changes made to field names in this model must be echoed in the CAS implementation.\n \"\"\"\n\n # Client ID and secret. Use separate ID field so ID format doesn't\n # have to be restricted to database internals.\n # Not *guaranteed* unique, but very unlikely\n client_id = models.CharField(default=generate_client_id,\n unique=True,\n max_length=50,\n db_index=True)\n\n client_secret = models.CharField(default=generate_client_secret, max_length=40)\n\n is_active = models.BooleanField(default=True, # Set to False if application is deactivated\n db_index=True)\n\n owner = models.ForeignKey('OSFUser', null=True, blank=True, on_delete=models.SET_NULL)\n\n # User-specified application descriptors\n name = models.CharField(db_index=True, blank=False, null=False, max_length=200)\n description = models.CharField(blank=True, null=True, max_length=1000)\n\n home_url = models.URLField(blank=False, null=False)\n callback_url = models.URLField(blank=False, null=False)\n\n def deactivate(self, save=False):\n \"\"\"\n Deactivate an ApiOAuth2Application\n\n Does not delete the database record, but revokes all tokens and sets a\n flag that hides this instance from API\n \"\"\"\n client = cas.get_client()\n # Will raise a CasHttpError if deletion fails, which will also stop setting of active=False.\n resp = client.revoke_application_tokens(self.client_id, self.client_secret) # noqa\n\n self.is_active = False\n\n if save:\n self.save()\n return True\n\n def reset_secret(self, save=False):\n \"\"\"\n Reset the secret of an ApiOAuth2Application\n Revokes all tokens\n \"\"\"\n client = cas.get_client()\n client.revoke_application_tokens(self.client_id, self.client_secret)\n self.client_secret = generate_client_secret()\n\n if save:\n self.save()\n return True\n\n @property\n def url(self):\n return '/settings/applications/{}/'.format(self.client_id)\n\n @property\n def absolute_url(self):\n return urljoin(settings.DOMAIN, self.url)\n\n # Properties used by Django and DRF \"Links: self\" field\n @property\n def absolute_api_v2_url(self):\n path = '/applications/{}/'.format(self.client_id)\n return api_v2_url(path)\n\n # used by django and DRF\n def get_absolute_url(self):\n return self.absolute_api_v2_url\n\n\ndef generate_token_id():\n return random_string(length=70)\n\n\nclass ApiOAuth2PersonalToken(base.ObjectIDMixin, base.BaseModel):\n \"\"\"Information for user-created personal access tokens\n\n This collection is also used by CAS to create the master list of available tokens.\n Any changes made to field names in this model must be echoed in the CAS implementation.\n \"\"\"\n # Name of the field being `token_id` is a CAS requirement.\n # This is the actual value of the token that's used to authenticate\n token_id = models.CharField(max_length=70, default=generate_token_id,\n unique=True)\n\n owner = models.ForeignKey('OSFUser', db_index=True, blank=True, null=True, on_delete=models.SET_NULL)\n name = models.CharField(max_length=100, blank=False, null=False, db_index=True)\n\n scopes = models.ManyToManyField('ApiOAuth2Scope', related_name='tokens', blank=False)\n\n is_active = models.BooleanField(default=True, db_index=True)\n\n def deactivate(self, save=False):\n \"\"\"\n Deactivate an ApiOAuth2PersonalToken\n\n Does not delete the database record, but hides this instance from API\n \"\"\"\n client = cas.get_client()\n # Will raise a CasHttpError if deletion fails for any reason other than the token\n # not yet being created. This will also stop setting of active=False.\n try:\n resp = client.revoke_tokens({'token': self.token_id}) # noqa\n except cas.CasHTTPError as e:\n if e.code == 400:\n pass # Token hasn't been used yet, so not created in cas\n else:\n raise e\n\n self.is_active = False\n\n if save:\n self.save()\n return True\n\n @property\n def url(self):\n return '/settings/tokens/{}/'.format(self._id)\n\n @property\n def absolute_url(self):\n return urljoin(settings.DOMAIN, self.url)\n\n # Properties used by Django and DRF \"Links: self\" field\n @property\n def absolute_api_v2_url(self):\n path = '/tokens/{}/'.format(self._id)\n return api_v2_url(path)\n\n # used by django and DRF\n def get_absolute_url(self):\n return self.absolute_api_v2_url\n",
"step-ids": [
17,
18,
21,
25,
26
]
}
|
[
17,
18,
21,
25,
26
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Pharmacy4LessAUSpider(StoreLocatorWidgetsSpider):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Pharmacy4LessAUSpider(StoreLocatorWidgetsSpider):
name = 'pharmacy_4_less_au'
item_attributes = {'brand': 'Pharmacy 4 Less', 'brand_wikidata':
'Q63367608'}
key = '6c0hBJeL5yk8cmaKJGNjTu0JhWNaMQpX'
<|reserved_special_token_1|>
from locations.storefinders.storelocatorwidgets import StoreLocatorWidgetsSpider
class Pharmacy4LessAUSpider(StoreLocatorWidgetsSpider):
name = 'pharmacy_4_less_au'
item_attributes = {'brand': 'Pharmacy 4 Less', 'brand_wikidata':
'Q63367608'}
key = '6c0hBJeL5yk8cmaKJGNjTu0JhWNaMQpX'
<|reserved_special_token_1|>
from locations.storefinders.storelocatorwidgets import StoreLocatorWidgetsSpider
class Pharmacy4LessAUSpider(StoreLocatorWidgetsSpider):
name = "pharmacy_4_less_au"
item_attributes = {"brand": "Pharmacy 4 Less", "brand_wikidata": "Q63367608"}
key = "6c0hBJeL5yk8cmaKJGNjTu0JhWNaMQpX"
|
flexible
|
{
"blob_id": "aad3c104432a1a028d96263236133e495536ee69",
"index": 6644,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Pharmacy4LessAUSpider(StoreLocatorWidgetsSpider):\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Pharmacy4LessAUSpider(StoreLocatorWidgetsSpider):\n name = 'pharmacy_4_less_au'\n item_attributes = {'brand': 'Pharmacy 4 Less', 'brand_wikidata':\n 'Q63367608'}\n key = '6c0hBJeL5yk8cmaKJGNjTu0JhWNaMQpX'\n",
"step-4": "from locations.storefinders.storelocatorwidgets import StoreLocatorWidgetsSpider\n\n\nclass Pharmacy4LessAUSpider(StoreLocatorWidgetsSpider):\n name = 'pharmacy_4_less_au'\n item_attributes = {'brand': 'Pharmacy 4 Less', 'brand_wikidata':\n 'Q63367608'}\n key = '6c0hBJeL5yk8cmaKJGNjTu0JhWNaMQpX'\n",
"step-5": "from locations.storefinders.storelocatorwidgets import StoreLocatorWidgetsSpider\n\n\nclass Pharmacy4LessAUSpider(StoreLocatorWidgetsSpider):\n name = \"pharmacy_4_less_au\"\n item_attributes = {\"brand\": \"Pharmacy 4 Less\", \"brand_wikidata\": \"Q63367608\"}\n key = \"6c0hBJeL5yk8cmaKJGNjTu0JhWNaMQpX\"\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from pyspark.sql import SQLContext, Row
from pyspark import SparkContext, SparkConf
from pyspark.sql.functions import col
import collections
# Create a Spark Session (the config bit is only for windows)
#conf = SparkConf().setAppName("SQL App").setMaster("local")
sc = SparkContext()
sqlCtx = SQLContext(sc)
def mapper(line):
fields = line.split(",")
return Row(ID = int(fields[0]), name = fields[1].encode("utf-8"), age = int(fields[2]), numFriends = int(fields[3]))
lines = sc.textFile("fakefriends.csv")
people = lines.map(mapper)
# Infer the schema and register the DataFrame as a table
schemaPeople = sqlCtx.createDataFrame(people).cache()
schemaPeople.registerTempTable("people")
# SQL can be run over DataFrames that have been registered as a table
teenagers = sqlCtx.sql("SELECT * FROM people WHERE age >= 13 AND age <= 19")
print(teenagers.dtypes)
for teen in teenagers.collect():
print(teen)
schemaPeople.groupBy("age").count().orderBy(col("age").desc()).show()
|
normal
|
{
"blob_id": "e4bc2e97b70e2dc91dc86457866ec6b3531ef803",
"index": 8772,
"step-1": "<mask token>\n\n\ndef mapper(line):\n fields = line.split(',')\n return Row(ID=int(fields[0]), name=fields[1].encode('utf-8'), age=int(\n fields[2]), numFriends=int(fields[3]))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef mapper(line):\n fields = line.split(',')\n return Row(ID=int(fields[0]), name=fields[1].encode('utf-8'), age=int(\n fields[2]), numFriends=int(fields[3]))\n\n\n<mask token>\nschemaPeople.registerTempTable('people')\n<mask token>\nprint(teenagers.dtypes)\nfor teen in teenagers.collect():\n print(teen)\nschemaPeople.groupBy('age').count().orderBy(col('age').desc()).show()\n",
"step-3": "<mask token>\nsc = SparkContext()\nsqlCtx = SQLContext(sc)\n\n\ndef mapper(line):\n fields = line.split(',')\n return Row(ID=int(fields[0]), name=fields[1].encode('utf-8'), age=int(\n fields[2]), numFriends=int(fields[3]))\n\n\nlines = sc.textFile('fakefriends.csv')\npeople = lines.map(mapper)\nschemaPeople = sqlCtx.createDataFrame(people).cache()\nschemaPeople.registerTempTable('people')\nteenagers = sqlCtx.sql('SELECT * FROM people WHERE age >= 13 AND age <= 19')\nprint(teenagers.dtypes)\nfor teen in teenagers.collect():\n print(teen)\nschemaPeople.groupBy('age').count().orderBy(col('age').desc()).show()\n",
"step-4": "from pyspark.sql import SQLContext, Row\nfrom pyspark import SparkContext, SparkConf\nfrom pyspark.sql.functions import col\nimport collections\nsc = SparkContext()\nsqlCtx = SQLContext(sc)\n\n\ndef mapper(line):\n fields = line.split(',')\n return Row(ID=int(fields[0]), name=fields[1].encode('utf-8'), age=int(\n fields[2]), numFriends=int(fields[3]))\n\n\nlines = sc.textFile('fakefriends.csv')\npeople = lines.map(mapper)\nschemaPeople = sqlCtx.createDataFrame(people).cache()\nschemaPeople.registerTempTable('people')\nteenagers = sqlCtx.sql('SELECT * FROM people WHERE age >= 13 AND age <= 19')\nprint(teenagers.dtypes)\nfor teen in teenagers.collect():\n print(teen)\nschemaPeople.groupBy('age').count().orderBy(col('age').desc()).show()\n",
"step-5": "from pyspark.sql import SQLContext, Row\nfrom pyspark import SparkContext, SparkConf\nfrom pyspark.sql.functions import col\n\nimport collections\n\n# Create a Spark Session (the config bit is only for windows)\n#conf = SparkConf().setAppName(\"SQL App\").setMaster(\"local\")\nsc = SparkContext()\n\nsqlCtx = SQLContext(sc)\n\ndef mapper(line):\n\tfields = line.split(\",\")\n\treturn Row(ID = int(fields[0]), name = fields[1].encode(\"utf-8\"), age = int(fields[2]), numFriends = int(fields[3]))\n\nlines = sc.textFile(\"fakefriends.csv\")\npeople = lines.map(mapper)\n\n# Infer the schema and register the DataFrame as a table\nschemaPeople = sqlCtx.createDataFrame(people).cache()\nschemaPeople.registerTempTable(\"people\")\n\n# SQL can be run over DataFrames that have been registered as a table\nteenagers = sqlCtx.sql(\"SELECT * FROM people WHERE age >= 13 AND age <= 19\")\nprint(teenagers.dtypes)\n\nfor teen in teenagers.collect():\n\tprint(teen)\n\nschemaPeople.groupBy(\"age\").count().orderBy(col(\"age\").desc()).show()\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
message = input()
vowel = 'aeiouAEIOU'
consonant = 'bcdfghjklmnpqrstvwxyz'
consonant += consonant.upper()
vowel_count = 0
consonant_count = 0
for c in message:
if c in vowel:
vowel_count += 1
elif c in consonant:
consonant_count += 1
print(vowel_count, consonant_count)
|
normal
|
{
"blob_id": "edf704d720abdb09d176937664c9ba98bcd253a5",
"index": 8320,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nconsonant += consonant.upper()\n<mask token>\nfor c in message:\n if c in vowel:\n vowel_count += 1\n elif c in consonant:\n consonant_count += 1\nprint(vowel_count, consonant_count)\n",
"step-3": "message = input()\nvowel = 'aeiouAEIOU'\nconsonant = 'bcdfghjklmnpqrstvwxyz'\nconsonant += consonant.upper()\nvowel_count = 0\nconsonant_count = 0\nfor c in message:\n if c in vowel:\n vowel_count += 1\n elif c in consonant:\n consonant_count += 1\nprint(vowel_count, consonant_count)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import torch
import tarfile
import pickle
import pandas
import json
import argparse
from pathlib import Path
import numpy as np
import shutil
from shutil import copyfile
import os
import re
import pandas as pd
import sys
from numpy import asarray
from numpy import savetxt
sys.path.append("..")
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, required=True, help='dir holding sequences as separate files')
parser.add_argument('--maxlen', type=int, default=500, help='maximum length of sequence')
parser.add_argument('--ext', type=str, default='tar.gz', help='extention of files with sequences')
parser.add_argument('--datetime', type=bool, default=False, help='if time values in event sequences are represented in datetime format')
parser.add_argument('--save_dir', type=str, default = './', help='path to save results')
parser.add_argument('--maxsize', type=int, default=None, help='max number of sequences')
args = parser.parse_args()
return args
def tranform_data(args):
"""
Loads the sequences saved in the given directory.
Args:
data_dir (str, Path) - directory containing sequences
save_dir - directory for saving transform data
maxsize (int) - maximum number of sequences to load
maxlen (int) - maximum length of sequence, the sequences longer than maxlen will be truncated
ext (str) - extension of files in data_dir directory
datetime (bool) - variable meaning if time values in files are represented in datetime format
"""
data_dir = args.data_dir
save_dir = args.save_dir
os.makedirs(save_dir)
maxsize = args.maxsize
maxlen = args.maxlen
ext = args.ext
datetime = args.datetime
classes = set()
nb_files = 0
time_col = 'time'
event_col = 'event'
gt_ids = None
if args.ext == "pkl":
with open(Path(args.data_dir, "fx_labels"), "rb") as fp:
gt_ids = pickle.load(fp)[:maxsize]
labels = np.unique(gt_ids)
gt_data = []
for i in range (len(gt_ids)):
gt_data.append(int(np.nonzero(gt_ids[i] == labels)[0]))
gt = {'cluster_id': gt_data}
print(gt_data)
gt_table = pd.DataFrame(data=gt)
gt_table.to_csv(Path(save_dir, 'clusters.csv'))
if Path(args.data_dir, 'clusters.csv').exists():
gt_ids = pd.read_csv(Path(args.data_dir, 'clusters.csv'))[:(maxsize)]
gt_ids.to_csv(Path(save_dir, 'clusters.csv'))
args = parse_arguments()
print(args)
tranform_data(args)
|
normal
|
{
"blob_id": "da55d9a6534525e58b6c1d2db997e90a1c9b0f36",
"index": 1427,
"step-1": "<mask token>\n\n\ndef parse_arguments():\n parser = argparse.ArgumentParser()\n parser.add_argument('--data_dir', type=str, required=True, help=\n 'dir holding sequences as separate files')\n parser.add_argument('--maxlen', type=int, default=500, help=\n 'maximum length of sequence')\n parser.add_argument('--ext', type=str, default='tar.gz', help=\n 'extention of files with sequences')\n parser.add_argument('--datetime', type=bool, default=False, help=\n 'if time values in event sequences are represented in datetime format')\n parser.add_argument('--save_dir', type=str, default='./', help=\n 'path to save results')\n parser.add_argument('--maxsize', type=int, default=None, help=\n 'max number of sequences')\n args = parser.parse_args()\n return args\n\n\ndef tranform_data(args):\n \"\"\"\n Loads the sequences saved in the given directory.\n Args:\n data_dir (str, Path) - directory containing sequences\n save_dir - directory for saving transform data\n maxsize (int) - maximum number of sequences to load\n maxlen (int) - maximum length of sequence, the sequences longer than maxlen will be truncated\n ext (str) - extension of files in data_dir directory\n datetime (bool) - variable meaning if time values in files are represented in datetime format\n \n \"\"\"\n data_dir = args.data_dir\n save_dir = args.save_dir\n os.makedirs(save_dir)\n maxsize = args.maxsize\n maxlen = args.maxlen\n ext = args.ext\n datetime = args.datetime\n classes = set()\n nb_files = 0\n time_col = 'time'\n event_col = 'event'\n gt_ids = None\n if args.ext == 'pkl':\n with open(Path(args.data_dir, 'fx_labels'), 'rb') as fp:\n gt_ids = pickle.load(fp)[:maxsize]\n labels = np.unique(gt_ids)\n gt_data = []\n for i in range(len(gt_ids)):\n gt_data.append(int(np.nonzero(gt_ids[i] == labels)[0]))\n gt = {'cluster_id': gt_data}\n print(gt_data)\n gt_table = pd.DataFrame(data=gt)\n gt_table.to_csv(Path(save_dir, 'clusters.csv'))\n if Path(args.data_dir, 'clusters.csv').exists():\n gt_ids = pd.read_csv(Path(args.data_dir, 'clusters.csv'))[:maxsize]\n gt_ids.to_csv(Path(save_dir, 'clusters.csv'))\n\n\n<mask token>\n",
"step-2": "<mask token>\nsys.path.append('..')\n\n\ndef parse_arguments():\n parser = argparse.ArgumentParser()\n parser.add_argument('--data_dir', type=str, required=True, help=\n 'dir holding sequences as separate files')\n parser.add_argument('--maxlen', type=int, default=500, help=\n 'maximum length of sequence')\n parser.add_argument('--ext', type=str, default='tar.gz', help=\n 'extention of files with sequences')\n parser.add_argument('--datetime', type=bool, default=False, help=\n 'if time values in event sequences are represented in datetime format')\n parser.add_argument('--save_dir', type=str, default='./', help=\n 'path to save results')\n parser.add_argument('--maxsize', type=int, default=None, help=\n 'max number of sequences')\n args = parser.parse_args()\n return args\n\n\ndef tranform_data(args):\n \"\"\"\n Loads the sequences saved in the given directory.\n Args:\n data_dir (str, Path) - directory containing sequences\n save_dir - directory for saving transform data\n maxsize (int) - maximum number of sequences to load\n maxlen (int) - maximum length of sequence, the sequences longer than maxlen will be truncated\n ext (str) - extension of files in data_dir directory\n datetime (bool) - variable meaning if time values in files are represented in datetime format\n \n \"\"\"\n data_dir = args.data_dir\n save_dir = args.save_dir\n os.makedirs(save_dir)\n maxsize = args.maxsize\n maxlen = args.maxlen\n ext = args.ext\n datetime = args.datetime\n classes = set()\n nb_files = 0\n time_col = 'time'\n event_col = 'event'\n gt_ids = None\n if args.ext == 'pkl':\n with open(Path(args.data_dir, 'fx_labels'), 'rb') as fp:\n gt_ids = pickle.load(fp)[:maxsize]\n labels = np.unique(gt_ids)\n gt_data = []\n for i in range(len(gt_ids)):\n gt_data.append(int(np.nonzero(gt_ids[i] == labels)[0]))\n gt = {'cluster_id': gt_data}\n print(gt_data)\n gt_table = pd.DataFrame(data=gt)\n gt_table.to_csv(Path(save_dir, 'clusters.csv'))\n if Path(args.data_dir, 'clusters.csv').exists():\n gt_ids = pd.read_csv(Path(args.data_dir, 'clusters.csv'))[:maxsize]\n gt_ids.to_csv(Path(save_dir, 'clusters.csv'))\n\n\n<mask token>\nprint(args)\ntranform_data(args)\n",
"step-3": "<mask token>\nsys.path.append('..')\n\n\ndef parse_arguments():\n parser = argparse.ArgumentParser()\n parser.add_argument('--data_dir', type=str, required=True, help=\n 'dir holding sequences as separate files')\n parser.add_argument('--maxlen', type=int, default=500, help=\n 'maximum length of sequence')\n parser.add_argument('--ext', type=str, default='tar.gz', help=\n 'extention of files with sequences')\n parser.add_argument('--datetime', type=bool, default=False, help=\n 'if time values in event sequences are represented in datetime format')\n parser.add_argument('--save_dir', type=str, default='./', help=\n 'path to save results')\n parser.add_argument('--maxsize', type=int, default=None, help=\n 'max number of sequences')\n args = parser.parse_args()\n return args\n\n\ndef tranform_data(args):\n \"\"\"\n Loads the sequences saved in the given directory.\n Args:\n data_dir (str, Path) - directory containing sequences\n save_dir - directory for saving transform data\n maxsize (int) - maximum number of sequences to load\n maxlen (int) - maximum length of sequence, the sequences longer than maxlen will be truncated\n ext (str) - extension of files in data_dir directory\n datetime (bool) - variable meaning if time values in files are represented in datetime format\n \n \"\"\"\n data_dir = args.data_dir\n save_dir = args.save_dir\n os.makedirs(save_dir)\n maxsize = args.maxsize\n maxlen = args.maxlen\n ext = args.ext\n datetime = args.datetime\n classes = set()\n nb_files = 0\n time_col = 'time'\n event_col = 'event'\n gt_ids = None\n if args.ext == 'pkl':\n with open(Path(args.data_dir, 'fx_labels'), 'rb') as fp:\n gt_ids = pickle.load(fp)[:maxsize]\n labels = np.unique(gt_ids)\n gt_data = []\n for i in range(len(gt_ids)):\n gt_data.append(int(np.nonzero(gt_ids[i] == labels)[0]))\n gt = {'cluster_id': gt_data}\n print(gt_data)\n gt_table = pd.DataFrame(data=gt)\n gt_table.to_csv(Path(save_dir, 'clusters.csv'))\n if Path(args.data_dir, 'clusters.csv').exists():\n gt_ids = pd.read_csv(Path(args.data_dir, 'clusters.csv'))[:maxsize]\n gt_ids.to_csv(Path(save_dir, 'clusters.csv'))\n\n\nargs = parse_arguments()\nprint(args)\ntranform_data(args)\n",
"step-4": "import torch\nimport tarfile\nimport pickle\nimport pandas\nimport json\nimport argparse\nfrom pathlib import Path\nimport numpy as np\nimport shutil\nfrom shutil import copyfile\nimport os\nimport re\nimport pandas as pd\nimport sys\nfrom numpy import asarray\nfrom numpy import savetxt\nsys.path.append('..')\n\n\ndef parse_arguments():\n parser = argparse.ArgumentParser()\n parser.add_argument('--data_dir', type=str, required=True, help=\n 'dir holding sequences as separate files')\n parser.add_argument('--maxlen', type=int, default=500, help=\n 'maximum length of sequence')\n parser.add_argument('--ext', type=str, default='tar.gz', help=\n 'extention of files with sequences')\n parser.add_argument('--datetime', type=bool, default=False, help=\n 'if time values in event sequences are represented in datetime format')\n parser.add_argument('--save_dir', type=str, default='./', help=\n 'path to save results')\n parser.add_argument('--maxsize', type=int, default=None, help=\n 'max number of sequences')\n args = parser.parse_args()\n return args\n\n\ndef tranform_data(args):\n \"\"\"\n Loads the sequences saved in the given directory.\n Args:\n data_dir (str, Path) - directory containing sequences\n save_dir - directory for saving transform data\n maxsize (int) - maximum number of sequences to load\n maxlen (int) - maximum length of sequence, the sequences longer than maxlen will be truncated\n ext (str) - extension of files in data_dir directory\n datetime (bool) - variable meaning if time values in files are represented in datetime format\n \n \"\"\"\n data_dir = args.data_dir\n save_dir = args.save_dir\n os.makedirs(save_dir)\n maxsize = args.maxsize\n maxlen = args.maxlen\n ext = args.ext\n datetime = args.datetime\n classes = set()\n nb_files = 0\n time_col = 'time'\n event_col = 'event'\n gt_ids = None\n if args.ext == 'pkl':\n with open(Path(args.data_dir, 'fx_labels'), 'rb') as fp:\n gt_ids = pickle.load(fp)[:maxsize]\n labels = np.unique(gt_ids)\n gt_data = []\n for i in range(len(gt_ids)):\n gt_data.append(int(np.nonzero(gt_ids[i] == labels)[0]))\n gt = {'cluster_id': gt_data}\n print(gt_data)\n gt_table = pd.DataFrame(data=gt)\n gt_table.to_csv(Path(save_dir, 'clusters.csv'))\n if Path(args.data_dir, 'clusters.csv').exists():\n gt_ids = pd.read_csv(Path(args.data_dir, 'clusters.csv'))[:maxsize]\n gt_ids.to_csv(Path(save_dir, 'clusters.csv'))\n\n\nargs = parse_arguments()\nprint(args)\ntranform_data(args)\n",
"step-5": "import torch\nimport tarfile\nimport pickle\nimport pandas\nimport json\nimport argparse\nfrom pathlib import Path\nimport numpy as np\nimport shutil\nfrom shutil import copyfile\nimport os\nimport re\nimport pandas as pd\nimport sys\nfrom numpy import asarray\nfrom numpy import savetxt\nsys.path.append(\"..\")\ndef parse_arguments():\n parser = argparse.ArgumentParser()\n parser.add_argument('--data_dir', type=str, required=True, help='dir holding sequences as separate files')\n parser.add_argument('--maxlen', type=int, default=500, help='maximum length of sequence')\n parser.add_argument('--ext', type=str, default='tar.gz', help='extention of files with sequences')\n parser.add_argument('--datetime', type=bool, default=False, help='if time values in event sequences are represented in datetime format')\n parser.add_argument('--save_dir', type=str, default = './', help='path to save results')\n parser.add_argument('--maxsize', type=int, default=None, help='max number of sequences')\n args = parser.parse_args()\n return args\ndef tranform_data(args):\n \"\"\"\n Loads the sequences saved in the given directory.\n Args:\n data_dir (str, Path) - directory containing sequences\n save_dir - directory for saving transform data\n maxsize (int) - maximum number of sequences to load\n maxlen (int) - maximum length of sequence, the sequences longer than maxlen will be truncated\n ext (str) - extension of files in data_dir directory\n datetime (bool) - variable meaning if time values in files are represented in datetime format\n \n \"\"\"\n data_dir = args.data_dir\n save_dir = args.save_dir\n os.makedirs(save_dir)\n maxsize = args.maxsize\n maxlen = args.maxlen \n ext = args.ext\n datetime = args.datetime\n classes = set()\n nb_files = 0\n time_col = 'time'\n event_col = 'event'\n gt_ids = None\n if args.ext == \"pkl\":\n with open(Path(args.data_dir, \"fx_labels\"), \"rb\") as fp:\n gt_ids = pickle.load(fp)[:maxsize]\n labels = np.unique(gt_ids)\n gt_data = []\n for i in range (len(gt_ids)):\n gt_data.append(int(np.nonzero(gt_ids[i] == labels)[0]))\n gt = {'cluster_id': gt_data}\n print(gt_data)\n gt_table = pd.DataFrame(data=gt)\n gt_table.to_csv(Path(save_dir, 'clusters.csv'))\n if Path(args.data_dir, 'clusters.csv').exists():\n gt_ids = pd.read_csv(Path(args.data_dir, 'clusters.csv'))[:(maxsize)]\n gt_ids.to_csv(Path(save_dir, 'clusters.csv'))\n \n\n\n\nargs = parse_arguments()\nprint(args)\ntranform_data(args)",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
# File for the information gain feature selection algorithm
import numpy as np
import pandas as pd
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_selection import mutual_info_classif
# The function which will be called
def get_features(raw_data, raw_ids):
"""
Calculate the information gain of a dataset. This function takes three parameters:
1. data = The dataset for whose feature the IG should be calculated
2. split_attribute_name = the name of the feature for which the information gain should be calculated
3. target_name = the name of the target feature. The default for this example is "class"
"""
df = pd.DataFrame(raw_data)
df["person"] = raw_ids
return_columns = []
cv = CountVectorizer(max_df=1, min_df=1,
max_features=72, stop_words='english')
for column in df:
if column != "person":
X = df[column].astype(str)
Y = df["person"].astype(str)
X_vec = cv.fit_transform(X)
ig = mutual_info_classif(X_vec, Y, discrete_features=True)
avg = sum(ig)
if avg > .5 and column != "person":
return_columns.append(column)
return return_columns
|
normal
|
{
"blob_id": "ca403e8820a3e34e0eb11b2fdd5d0fc77e3ffdc4",
"index": 9394,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_features(raw_data, raw_ids):\n \"\"\"\n Calculate the information gain of a dataset. This function takes three parameters:\n 1. data = The dataset for whose feature the IG should be calculated\n 2. split_attribute_name = the name of the feature for which the information gain should be calculated\n 3. target_name = the name of the target feature. The default for this example is \"class\"\n \"\"\"\n df = pd.DataFrame(raw_data)\n df['person'] = raw_ids\n return_columns = []\n cv = CountVectorizer(max_df=1, min_df=1, max_features=72, stop_words=\n 'english')\n for column in df:\n if column != 'person':\n X = df[column].astype(str)\n Y = df['person'].astype(str)\n X_vec = cv.fit_transform(X)\n ig = mutual_info_classif(X_vec, Y, discrete_features=True)\n avg = sum(ig)\n if avg > 0.5 and column != 'person':\n return_columns.append(column)\n return return_columns\n",
"step-3": "import numpy as np\nimport pandas as pd\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.feature_selection import mutual_info_classif\n\n\ndef get_features(raw_data, raw_ids):\n \"\"\"\n Calculate the information gain of a dataset. This function takes three parameters:\n 1. data = The dataset for whose feature the IG should be calculated\n 2. split_attribute_name = the name of the feature for which the information gain should be calculated\n 3. target_name = the name of the target feature. The default for this example is \"class\"\n \"\"\"\n df = pd.DataFrame(raw_data)\n df['person'] = raw_ids\n return_columns = []\n cv = CountVectorizer(max_df=1, min_df=1, max_features=72, stop_words=\n 'english')\n for column in df:\n if column != 'person':\n X = df[column].astype(str)\n Y = df['person'].astype(str)\n X_vec = cv.fit_transform(X)\n ig = mutual_info_classif(X_vec, Y, discrete_features=True)\n avg = sum(ig)\n if avg > 0.5 and column != 'person':\n return_columns.append(column)\n return return_columns\n",
"step-4": "# File for the information gain feature selection algorithm\nimport numpy as np\nimport pandas as pd\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.feature_selection import mutual_info_classif\n\n# The function which will be called\ndef get_features(raw_data, raw_ids):\n\n \"\"\"\n Calculate the information gain of a dataset. This function takes three parameters:\n 1. data = The dataset for whose feature the IG should be calculated\n 2. split_attribute_name = the name of the feature for which the information gain should be calculated\n 3. target_name = the name of the target feature. The default for this example is \"class\"\n \"\"\"\n df = pd.DataFrame(raw_data)\n df[\"person\"] = raw_ids\n\n return_columns = []\n cv = CountVectorizer(max_df=1, min_df=1,\n max_features=72, stop_words='english')\n for column in df:\n if column != \"person\":\n X = df[column].astype(str)\n Y = df[\"person\"].astype(str)\n X_vec = cv.fit_transform(X)\n ig = mutual_info_classif(X_vec, Y, discrete_features=True)\n avg = sum(ig)\n if avg > .5 and column != \"person\":\n return_columns.append(column)\n\n return return_columns\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class PositionReader:
def __init__(self):
self.image_sub = rospy.Subscriber('/visp_auto_tracker/object_position',
PoseStamped, self.callback)
self.pub = rospy.Publisher('object_position', PoseStamped,
queue_size=10)
rospy.init_node('PositionReader', anonymous=False)
self.data = PoseStamped()
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class PositionReader:
def __init__(self):
self.image_sub = rospy.Subscriber('/visp_auto_tracker/object_position',
PoseStamped, self.callback)
self.pub = rospy.Publisher('object_position', PoseStamped,
queue_size=10)
rospy.init_node('PositionReader', anonymous=False)
self.data = PoseStamped()
def callback(self, data):
if self.data.pose.position.x != data.pose.position.x:
self.pub.publish(data)
print(data)
self.data = data
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class PositionReader:
def __init__(self):
self.image_sub = rospy.Subscriber('/visp_auto_tracker/object_position',
PoseStamped, self.callback)
self.pub = rospy.Publisher('object_position', PoseStamped,
queue_size=10)
rospy.init_node('PositionReader', anonymous=False)
self.data = PoseStamped()
def callback(self, data):
if self.data.pose.position.x != data.pose.position.x:
self.pub.publish(data)
print(data)
self.data = data
if __name__ == '__main__':
try:
PositionReader()
rospy.spin()
except rospy.ROSInterruptException:
cv2.destroyAllWindows()
pass
<|reserved_special_token_1|>
import rospy
import cv2
from geometry_msgs.msg import PoseStamped
class PositionReader:
def __init__(self):
self.image_sub = rospy.Subscriber('/visp_auto_tracker/object_position',
PoseStamped, self.callback)
self.pub = rospy.Publisher('object_position', PoseStamped,
queue_size=10)
rospy.init_node('PositionReader', anonymous=False)
self.data = PoseStamped()
def callback(self, data):
if self.data.pose.position.x != data.pose.position.x:
self.pub.publish(data)
print(data)
self.data = data
if __name__ == '__main__':
try:
PositionReader()
rospy.spin()
except rospy.ROSInterruptException:
cv2.destroyAllWindows()
pass
<|reserved_special_token_1|>
#!/usr/bin/env python
import rospy
import cv2
from geometry_msgs.msg import PoseStamped
class PositionReader:
def __init__(self):
self.image_sub = rospy.Subscriber(
"/visp_auto_tracker/object_position", PoseStamped, self.callback)
self.pub = rospy.Publisher('object_position', PoseStamped, queue_size=10)
rospy.init_node('PositionReader', anonymous=False)
self.data = PoseStamped()
def callback(self, data):
if(self.data.pose.position.x != data.pose.position.x):
self.pub.publish(data)
print(data)
self.data = data
if __name__ == '__main__':
try:
PositionReader()
rospy.spin()
except rospy.ROSInterruptException:
cv2.destroyAllWindows()
pass
|
flexible
|
{
"blob_id": "26ac0c94d0ab70d90854ca2c913ef0f633b54a3c",
"index": 4527,
"step-1": "<mask token>\n\n\nclass PositionReader:\n\n def __init__(self):\n self.image_sub = rospy.Subscriber('/visp_auto_tracker/object_position',\n PoseStamped, self.callback)\n self.pub = rospy.Publisher('object_position', PoseStamped,\n queue_size=10)\n rospy.init_node('PositionReader', anonymous=False)\n self.data = PoseStamped()\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass PositionReader:\n\n def __init__(self):\n self.image_sub = rospy.Subscriber('/visp_auto_tracker/object_position',\n PoseStamped, self.callback)\n self.pub = rospy.Publisher('object_position', PoseStamped,\n queue_size=10)\n rospy.init_node('PositionReader', anonymous=False)\n self.data = PoseStamped()\n\n def callback(self, data):\n if self.data.pose.position.x != data.pose.position.x:\n self.pub.publish(data)\n print(data)\n self.data = data\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass PositionReader:\n\n def __init__(self):\n self.image_sub = rospy.Subscriber('/visp_auto_tracker/object_position',\n PoseStamped, self.callback)\n self.pub = rospy.Publisher('object_position', PoseStamped,\n queue_size=10)\n rospy.init_node('PositionReader', anonymous=False)\n self.data = PoseStamped()\n\n def callback(self, data):\n if self.data.pose.position.x != data.pose.position.x:\n self.pub.publish(data)\n print(data)\n self.data = data\n\n\nif __name__ == '__main__':\n try:\n PositionReader()\n rospy.spin()\n except rospy.ROSInterruptException:\n cv2.destroyAllWindows()\n pass\n",
"step-4": "import rospy\nimport cv2\nfrom geometry_msgs.msg import PoseStamped\n\n\nclass PositionReader:\n\n def __init__(self):\n self.image_sub = rospy.Subscriber('/visp_auto_tracker/object_position',\n PoseStamped, self.callback)\n self.pub = rospy.Publisher('object_position', PoseStamped,\n queue_size=10)\n rospy.init_node('PositionReader', anonymous=False)\n self.data = PoseStamped()\n\n def callback(self, data):\n if self.data.pose.position.x != data.pose.position.x:\n self.pub.publish(data)\n print(data)\n self.data = data\n\n\nif __name__ == '__main__':\n try:\n PositionReader()\n rospy.spin()\n except rospy.ROSInterruptException:\n cv2.destroyAllWindows()\n pass\n",
"step-5": "#!/usr/bin/env python\n\nimport rospy\nimport cv2\nfrom geometry_msgs.msg import PoseStamped\n\n\nclass PositionReader:\n\n def __init__(self):\n self.image_sub = rospy.Subscriber(\n \"/visp_auto_tracker/object_position\", PoseStamped, self.callback)\n self.pub = rospy.Publisher('object_position', PoseStamped, queue_size=10)\n rospy.init_node('PositionReader', anonymous=False)\n self.data = PoseStamped()\n\n def callback(self, data):\n if(self.data.pose.position.x != data.pose.position.x):\n self.pub.publish(data)\n print(data)\n self.data = data\n\nif __name__ == '__main__':\n try:\n PositionReader()\n rospy.spin()\n except rospy.ROSInterruptException:\n cv2.destroyAllWindows()\n pass\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
import tkinter as tk
from tkinter import Tk, BOTH,RIGHT,LEFT,END
from tkinter.ttk import Frame, Label, Style,Entry
from tkinter.ttk import Frame, Button, Style
import random
import time
class StartPage(tk.Frame):
def __init__(self, master):
tk.Frame.__init__(self, master)
tk.Frame.configure(self,bg="#d0a3d8",height=200,width=200)
tk.Label(self, text="Mini Jeu: \n P-0", font=('Helvetica', 18, "bold")).pack(side="top", fill="x", pady=5)
bt=Button(self, text="Jouer",
command=lambda: master.switch_frame(PageOne,num=True))
bt.pack(fill=BOTH,expand=True)
# tk.Button(self, text="Go to page two",
# command=lambda: master.switch_frame(PageTwo)).pack()
class PageOne(tk.Frame):
def __init__(self, master):
tk.Frame.__init__(self, master)
# tk.Frame.configure(self,bg='blue')
# tk.Label(self, text="Page de jeu", font=('Helvetica', 18, "bold")).pack(side="top", fill=BOTH, pady=5)
frame_left=Frame(self)
self.frame_left=frame_left
frame_left.pack(fill=BOTH,side=LEFT)
# add entry to this frame
self.label=tk.Label(frame_left , text="", font=('Helvetica', 10), fg='red')
self.label.pack()
self.bagniere_bleu=tk.Canvas(frame_left,width=50,height=3)
self.bagniere_bleu.pack(side='top',anchor='c')
self.bagniere_bleu.create_rectangle(0,3,50,0,fill='blue')
self.Nombre_1=Entry(frame_left)
self.Nombre_1.pack(side='top',anchor='w')
# bagnier pour differencier les couleurs
self.bagniere_bleu=tk.Canvas(frame_left,width=50,height=3)
self.bagniere_bleu.pack(side='top',anchor='c')
self.bagniere_bleu.create_rectangle(0,3,50,0,fill='red')
self.Nombre_2=Entry(frame_left)
self.Nombre_2.pack(side='top',anchor='w')
tk.Button(frame_left, text="Go back to start page",
command=lambda: master.switch_frame(StartPage)).pack(side='bottom')
self.frame1 = Frame(self)
self.frame1.pack(fill='x')
self.rectangle=tk.Canvas(self.frame1)
self.rectangle.pack()
self.create_ret(self.rectangle)
# self.update_clock()
self.master=master
self.commencer_un_jeu()
def create_circle(self,r, canvasName,color): #center coordinates, radius
x=random.randint(20,300)
y=random.randint(20,250)
x0 = x - r
y0 = y - r
x1 = x + r
y1 = y + r
return canvasName.create_oval(x0, y0, x1, y1,fill=color)
def create_ret(self,canvas):
return canvas.create_rectangle(0,500,500,0,fill="#fdffdb")
def update_clock(self):
self.temps_de_rect=(time.time()-self.debut)
self.temps_de_rect=time.strftime("%H:%M:%S", time.gmtime(self.temps_de_rect))
self.label.configure(text=self.temps_de_rect)
if self.fin:
self.master.after(1000,self.update_clock)
def commencer_un_jeu(self):
self.fin=True
try :
self.rejouer.destroy()
self.label.config(text='')
self.Nombre_2.delete(0,END)
self.Nombre_1.delete(0,END)
except:
pass
self.bt_valider=tk.Button(self.frame_left,text='valider', command=lambda: self.fin_du_jeu())
self. bt_valider.pack(side='top',anchor='w')
self.debut=time.time()
self.temps_de_rect=(time.time()-self.debut)
self.temps_de_rect=time.strftime("%H:%M:%S", time.gmtime(self.temps_de_rect))
self.label.configure(text=self.temps_de_rect)
self.update_clock()
self.rectangle.destroy()
self.rectangle=tk.Canvas(self.frame1)
self.rectangle.pack()
self.create_ret(self.rectangle)
self.nombre_j1=random.randint(1,10)
self.nombre_j2=random.randint(1,10)
for _ in range(self.nombre_j2):
self.create_circle(20,self.rectangle,'red')
for _ in range(self.nombre_j1):
self.create_circle(20,self.rectangle,'blue')
def fin_du_jeu(self):
self.fin=False
if(int(self.Nombre_1.get())==self.nombre_j1 ) and (int(self.Nombre_2.get())==self.nombre_j2):
#jeu gagné
self.bt_valider.destroy()
self.rejouer=Button(self.frame_left, text="Rejouer",
command=lambda: self.commencer_un_jeu())
self.rejouer.pack(side='top',fill='x')
self.temps_de_rect=(time.time()-self.debut)
self.temps_de_rect=time.strftime("%H:%M:%S", time.gmtime(self.temps_de_rect))
self.label.configure(text=self.temps_de_rect)
self.rectangle.create_text(200,150,fill="darkblue",font="Times 20 italic bold",
text="Victoire")
else:
self.bt_valider.destroy()
self.rejouer=Button(self.frame_left, text="Rejouer",
command=lambda: self.commencer_un_jeu())
self.rejouer.pack(side='top',fill='x')
self.temps_de_rect=(time.time()-self.debut)
self.temps_de_rect=time.strftime("%H:%M:%S", time.gmtime(self.temps_de_rect))
self.label.configure(text=self.temps_de_rect)
self.rectangle.create_text(200,150,fill="darkblue",font="Times 20 italic bold",
text="Defaite")
class SampleApp(tk.Tk):
def __init__(self):
tk.Tk.__init__(self)
self._frame = None
self.switch_frame(StartPage)
def timer(self,frame_game):
self.after(1000,frame_game.update_clock)
def switch_frame(self, frame_class,num=False):
new_frame = frame_class(self)
if self._frame is not None:
self._frame.destroy()
self._frame = new_frame
self._frame.pack()
# try:
# if num:
# print(frame_class)
# self.timer(frame_class)
# except:
# print("le frame n'est pas le bon")
class PageTwo(tk.Frame):
def __init__(self, master):
tk.Frame.__init__(self, master)
tk.Frame.configure(self,bg='red')
tk.Label(self, text="Page two", font=('Helvetica', 18, "bold")).pack(side="top", fill="x", pady=5)
tk.Button(self, text="Go back to start page",
command=lambda: master.switch_frame(StartPage)).pack()
if __name__ == "__main__":
app = SampleApp()
app.geometry('800x800')
app.mainloop()
|
normal
|
{
"blob_id": "4e6401672d4762b444bb679e4cc39ada04193a26",
"index": 1882,
"step-1": "<mask token>\n\n\nclass PageOne(tk.Frame):\n\n def __init__(self, master):\n tk.Frame.__init__(self, master)\n frame_left = Frame(self)\n self.frame_left = frame_left\n frame_left.pack(fill=BOTH, side=LEFT)\n self.label = tk.Label(frame_left, text='', font=('Helvetica', 10),\n fg='red')\n self.label.pack()\n self.bagniere_bleu = tk.Canvas(frame_left, width=50, height=3)\n self.bagniere_bleu.pack(side='top', anchor='c')\n self.bagniere_bleu.create_rectangle(0, 3, 50, 0, fill='blue')\n self.Nombre_1 = Entry(frame_left)\n self.Nombre_1.pack(side='top', anchor='w')\n self.bagniere_bleu = tk.Canvas(frame_left, width=50, height=3)\n self.bagniere_bleu.pack(side='top', anchor='c')\n self.bagniere_bleu.create_rectangle(0, 3, 50, 0, fill='red')\n self.Nombre_2 = Entry(frame_left)\n self.Nombre_2.pack(side='top', anchor='w')\n tk.Button(frame_left, text='Go back to start page', command=lambda :\n master.switch_frame(StartPage)).pack(side='bottom')\n self.frame1 = Frame(self)\n self.frame1.pack(fill='x')\n self.rectangle = tk.Canvas(self.frame1)\n self.rectangle.pack()\n self.create_ret(self.rectangle)\n self.master = master\n self.commencer_un_jeu()\n <mask token>\n <mask token>\n\n def update_clock(self):\n self.temps_de_rect = time.time() - self.debut\n self.temps_de_rect = time.strftime('%H:%M:%S', time.gmtime(self.\n temps_de_rect))\n self.label.configure(text=self.temps_de_rect)\n if self.fin:\n self.master.after(1000, self.update_clock)\n\n def commencer_un_jeu(self):\n self.fin = True\n try:\n self.rejouer.destroy()\n self.label.config(text='')\n self.Nombre_2.delete(0, END)\n self.Nombre_1.delete(0, END)\n except:\n pass\n self.bt_valider = tk.Button(self.frame_left, text='valider',\n command=lambda : self.fin_du_jeu())\n self.bt_valider.pack(side='top', anchor='w')\n self.debut = time.time()\n self.temps_de_rect = time.time() - self.debut\n self.temps_de_rect = time.strftime('%H:%M:%S', time.gmtime(self.\n temps_de_rect))\n self.label.configure(text=self.temps_de_rect)\n self.update_clock()\n self.rectangle.destroy()\n self.rectangle = tk.Canvas(self.frame1)\n self.rectangle.pack()\n self.create_ret(self.rectangle)\n self.nombre_j1 = random.randint(1, 10)\n self.nombre_j2 = random.randint(1, 10)\n for _ in range(self.nombre_j2):\n self.create_circle(20, self.rectangle, 'red')\n for _ in range(self.nombre_j1):\n self.create_circle(20, self.rectangle, 'blue')\n\n def fin_du_jeu(self):\n self.fin = False\n if int(self.Nombre_1.get()) == self.nombre_j1 and int(self.Nombre_2\n .get()) == self.nombre_j2:\n self.bt_valider.destroy()\n self.rejouer = Button(self.frame_left, text='Rejouer', command=\n lambda : self.commencer_un_jeu())\n self.rejouer.pack(side='top', fill='x')\n self.temps_de_rect = time.time() - self.debut\n self.temps_de_rect = time.strftime('%H:%M:%S', time.gmtime(self\n .temps_de_rect))\n self.label.configure(text=self.temps_de_rect)\n self.rectangle.create_text(200, 150, fill='darkblue', font=\n 'Times 20 italic bold', text='Victoire')\n else:\n self.bt_valider.destroy()\n self.rejouer = Button(self.frame_left, text='Rejouer', command=\n lambda : self.commencer_un_jeu())\n self.rejouer.pack(side='top', fill='x')\n self.temps_de_rect = time.time() - self.debut\n self.temps_de_rect = time.strftime('%H:%M:%S', time.gmtime(self\n .temps_de_rect))\n self.label.configure(text=self.temps_de_rect)\n self.rectangle.create_text(200, 150, fill='darkblue', font=\n 'Times 20 italic bold', text='Defaite')\n\n\nclass SampleApp(tk.Tk):\n\n def __init__(self):\n tk.Tk.__init__(self)\n self._frame = None\n self.switch_frame(StartPage)\n\n def timer(self, frame_game):\n self.after(1000, frame_game.update_clock)\n\n def switch_frame(self, frame_class, num=False):\n new_frame = frame_class(self)\n if self._frame is not None:\n self._frame.destroy()\n self._frame = new_frame\n self._frame.pack()\n\n\nclass PageTwo(tk.Frame):\n\n def __init__(self, master):\n tk.Frame.__init__(self, master)\n tk.Frame.configure(self, bg='red')\n tk.Label(self, text='Page two', font=('Helvetica', 18, 'bold')).pack(\n side='top', fill='x', pady=5)\n tk.Button(self, text='Go back to start page', command=lambda :\n master.switch_frame(StartPage)).pack()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass PageOne(tk.Frame):\n\n def __init__(self, master):\n tk.Frame.__init__(self, master)\n frame_left = Frame(self)\n self.frame_left = frame_left\n frame_left.pack(fill=BOTH, side=LEFT)\n self.label = tk.Label(frame_left, text='', font=('Helvetica', 10),\n fg='red')\n self.label.pack()\n self.bagniere_bleu = tk.Canvas(frame_left, width=50, height=3)\n self.bagniere_bleu.pack(side='top', anchor='c')\n self.bagniere_bleu.create_rectangle(0, 3, 50, 0, fill='blue')\n self.Nombre_1 = Entry(frame_left)\n self.Nombre_1.pack(side='top', anchor='w')\n self.bagniere_bleu = tk.Canvas(frame_left, width=50, height=3)\n self.bagniere_bleu.pack(side='top', anchor='c')\n self.bagniere_bleu.create_rectangle(0, 3, 50, 0, fill='red')\n self.Nombre_2 = Entry(frame_left)\n self.Nombre_2.pack(side='top', anchor='w')\n tk.Button(frame_left, text='Go back to start page', command=lambda :\n master.switch_frame(StartPage)).pack(side='bottom')\n self.frame1 = Frame(self)\n self.frame1.pack(fill='x')\n self.rectangle = tk.Canvas(self.frame1)\n self.rectangle.pack()\n self.create_ret(self.rectangle)\n self.master = master\n self.commencer_un_jeu()\n <mask token>\n\n def create_ret(self, canvas):\n return canvas.create_rectangle(0, 500, 500, 0, fill='#fdffdb')\n\n def update_clock(self):\n self.temps_de_rect = time.time() - self.debut\n self.temps_de_rect = time.strftime('%H:%M:%S', time.gmtime(self.\n temps_de_rect))\n self.label.configure(text=self.temps_de_rect)\n if self.fin:\n self.master.after(1000, self.update_clock)\n\n def commencer_un_jeu(self):\n self.fin = True\n try:\n self.rejouer.destroy()\n self.label.config(text='')\n self.Nombre_2.delete(0, END)\n self.Nombre_1.delete(0, END)\n except:\n pass\n self.bt_valider = tk.Button(self.frame_left, text='valider',\n command=lambda : self.fin_du_jeu())\n self.bt_valider.pack(side='top', anchor='w')\n self.debut = time.time()\n self.temps_de_rect = time.time() - self.debut\n self.temps_de_rect = time.strftime('%H:%M:%S', time.gmtime(self.\n temps_de_rect))\n self.label.configure(text=self.temps_de_rect)\n self.update_clock()\n self.rectangle.destroy()\n self.rectangle = tk.Canvas(self.frame1)\n self.rectangle.pack()\n self.create_ret(self.rectangle)\n self.nombre_j1 = random.randint(1, 10)\n self.nombre_j2 = random.randint(1, 10)\n for _ in range(self.nombre_j2):\n self.create_circle(20, self.rectangle, 'red')\n for _ in range(self.nombre_j1):\n self.create_circle(20, self.rectangle, 'blue')\n\n def fin_du_jeu(self):\n self.fin = False\n if int(self.Nombre_1.get()) == self.nombre_j1 and int(self.Nombre_2\n .get()) == self.nombre_j2:\n self.bt_valider.destroy()\n self.rejouer = Button(self.frame_left, text='Rejouer', command=\n lambda : self.commencer_un_jeu())\n self.rejouer.pack(side='top', fill='x')\n self.temps_de_rect = time.time() - self.debut\n self.temps_de_rect = time.strftime('%H:%M:%S', time.gmtime(self\n .temps_de_rect))\n self.label.configure(text=self.temps_de_rect)\n self.rectangle.create_text(200, 150, fill='darkblue', font=\n 'Times 20 italic bold', text='Victoire')\n else:\n self.bt_valider.destroy()\n self.rejouer = Button(self.frame_left, text='Rejouer', command=\n lambda : self.commencer_un_jeu())\n self.rejouer.pack(side='top', fill='x')\n self.temps_de_rect = time.time() - self.debut\n self.temps_de_rect = time.strftime('%H:%M:%S', time.gmtime(self\n .temps_de_rect))\n self.label.configure(text=self.temps_de_rect)\n self.rectangle.create_text(200, 150, fill='darkblue', font=\n 'Times 20 italic bold', text='Defaite')\n\n\nclass SampleApp(tk.Tk):\n\n def __init__(self):\n tk.Tk.__init__(self)\n self._frame = None\n self.switch_frame(StartPage)\n\n def timer(self, frame_game):\n self.after(1000, frame_game.update_clock)\n\n def switch_frame(self, frame_class, num=False):\n new_frame = frame_class(self)\n if self._frame is not None:\n self._frame.destroy()\n self._frame = new_frame\n self._frame.pack()\n\n\nclass PageTwo(tk.Frame):\n\n def __init__(self, master):\n tk.Frame.__init__(self, master)\n tk.Frame.configure(self, bg='red')\n tk.Label(self, text='Page two', font=('Helvetica', 18, 'bold')).pack(\n side='top', fill='x', pady=5)\n tk.Button(self, text='Go back to start page', command=lambda :\n master.switch_frame(StartPage)).pack()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass StartPage(tk.Frame):\n\n def __init__(self, master):\n tk.Frame.__init__(self, master)\n tk.Frame.configure(self, bg='#d0a3d8', height=200, width=200)\n tk.Label(self, text='Mini Jeu: \\n P-0', font=('Helvetica', 18, 'bold')\n ).pack(side='top', fill='x', pady=5)\n bt = Button(self, text='Jouer', command=lambda : master.\n switch_frame(PageOne, num=True))\n bt.pack(fill=BOTH, expand=True)\n\n\nclass PageOne(tk.Frame):\n\n def __init__(self, master):\n tk.Frame.__init__(self, master)\n frame_left = Frame(self)\n self.frame_left = frame_left\n frame_left.pack(fill=BOTH, side=LEFT)\n self.label = tk.Label(frame_left, text='', font=('Helvetica', 10),\n fg='red')\n self.label.pack()\n self.bagniere_bleu = tk.Canvas(frame_left, width=50, height=3)\n self.bagniere_bleu.pack(side='top', anchor='c')\n self.bagniere_bleu.create_rectangle(0, 3, 50, 0, fill='blue')\n self.Nombre_1 = Entry(frame_left)\n self.Nombre_1.pack(side='top', anchor='w')\n self.bagniere_bleu = tk.Canvas(frame_left, width=50, height=3)\n self.bagniere_bleu.pack(side='top', anchor='c')\n self.bagniere_bleu.create_rectangle(0, 3, 50, 0, fill='red')\n self.Nombre_2 = Entry(frame_left)\n self.Nombre_2.pack(side='top', anchor='w')\n tk.Button(frame_left, text='Go back to start page', command=lambda :\n master.switch_frame(StartPage)).pack(side='bottom')\n self.frame1 = Frame(self)\n self.frame1.pack(fill='x')\n self.rectangle = tk.Canvas(self.frame1)\n self.rectangle.pack()\n self.create_ret(self.rectangle)\n self.master = master\n self.commencer_un_jeu()\n\n def create_circle(self, r, canvasName, color):\n x = random.randint(20, 300)\n y = random.randint(20, 250)\n x0 = x - r\n y0 = y - r\n x1 = x + r\n y1 = y + r\n return canvasName.create_oval(x0, y0, x1, y1, fill=color)\n\n def create_ret(self, canvas):\n return canvas.create_rectangle(0, 500, 500, 0, fill='#fdffdb')\n\n def update_clock(self):\n self.temps_de_rect = time.time() - self.debut\n self.temps_de_rect = time.strftime('%H:%M:%S', time.gmtime(self.\n temps_de_rect))\n self.label.configure(text=self.temps_de_rect)\n if self.fin:\n self.master.after(1000, self.update_clock)\n\n def commencer_un_jeu(self):\n self.fin = True\n try:\n self.rejouer.destroy()\n self.label.config(text='')\n self.Nombre_2.delete(0, END)\n self.Nombre_1.delete(0, END)\n except:\n pass\n self.bt_valider = tk.Button(self.frame_left, text='valider',\n command=lambda : self.fin_du_jeu())\n self.bt_valider.pack(side='top', anchor='w')\n self.debut = time.time()\n self.temps_de_rect = time.time() - self.debut\n self.temps_de_rect = time.strftime('%H:%M:%S', time.gmtime(self.\n temps_de_rect))\n self.label.configure(text=self.temps_de_rect)\n self.update_clock()\n self.rectangle.destroy()\n self.rectangle = tk.Canvas(self.frame1)\n self.rectangle.pack()\n self.create_ret(self.rectangle)\n self.nombre_j1 = random.randint(1, 10)\n self.nombre_j2 = random.randint(1, 10)\n for _ in range(self.nombre_j2):\n self.create_circle(20, self.rectangle, 'red')\n for _ in range(self.nombre_j1):\n self.create_circle(20, self.rectangle, 'blue')\n\n def fin_du_jeu(self):\n self.fin = False\n if int(self.Nombre_1.get()) == self.nombre_j1 and int(self.Nombre_2\n .get()) == self.nombre_j2:\n self.bt_valider.destroy()\n self.rejouer = Button(self.frame_left, text='Rejouer', command=\n lambda : self.commencer_un_jeu())\n self.rejouer.pack(side='top', fill='x')\n self.temps_de_rect = time.time() - self.debut\n self.temps_de_rect = time.strftime('%H:%M:%S', time.gmtime(self\n .temps_de_rect))\n self.label.configure(text=self.temps_de_rect)\n self.rectangle.create_text(200, 150, fill='darkblue', font=\n 'Times 20 italic bold', text='Victoire')\n else:\n self.bt_valider.destroy()\n self.rejouer = Button(self.frame_left, text='Rejouer', command=\n lambda : self.commencer_un_jeu())\n self.rejouer.pack(side='top', fill='x')\n self.temps_de_rect = time.time() - self.debut\n self.temps_de_rect = time.strftime('%H:%M:%S', time.gmtime(self\n .temps_de_rect))\n self.label.configure(text=self.temps_de_rect)\n self.rectangle.create_text(200, 150, fill='darkblue', font=\n 'Times 20 italic bold', text='Defaite')\n\n\nclass SampleApp(tk.Tk):\n\n def __init__(self):\n tk.Tk.__init__(self)\n self._frame = None\n self.switch_frame(StartPage)\n\n def timer(self, frame_game):\n self.after(1000, frame_game.update_clock)\n\n def switch_frame(self, frame_class, num=False):\n new_frame = frame_class(self)\n if self._frame is not None:\n self._frame.destroy()\n self._frame = new_frame\n self._frame.pack()\n\n\nclass PageTwo(tk.Frame):\n\n def __init__(self, master):\n tk.Frame.__init__(self, master)\n tk.Frame.configure(self, bg='red')\n tk.Label(self, text='Page two', font=('Helvetica', 18, 'bold')).pack(\n side='top', fill='x', pady=5)\n tk.Button(self, text='Go back to start page', command=lambda :\n master.switch_frame(StartPage)).pack()\n\n\n<mask token>\n",
"step-4": "import tkinter as tk\nfrom tkinter import Tk, BOTH, RIGHT, LEFT, END\nfrom tkinter.ttk import Frame, Label, Style, Entry\nfrom tkinter.ttk import Frame, Button, Style\nimport random\nimport time\n\n\nclass StartPage(tk.Frame):\n\n def __init__(self, master):\n tk.Frame.__init__(self, master)\n tk.Frame.configure(self, bg='#d0a3d8', height=200, width=200)\n tk.Label(self, text='Mini Jeu: \\n P-0', font=('Helvetica', 18, 'bold')\n ).pack(side='top', fill='x', pady=5)\n bt = Button(self, text='Jouer', command=lambda : master.\n switch_frame(PageOne, num=True))\n bt.pack(fill=BOTH, expand=True)\n\n\nclass PageOne(tk.Frame):\n\n def __init__(self, master):\n tk.Frame.__init__(self, master)\n frame_left = Frame(self)\n self.frame_left = frame_left\n frame_left.pack(fill=BOTH, side=LEFT)\n self.label = tk.Label(frame_left, text='', font=('Helvetica', 10),\n fg='red')\n self.label.pack()\n self.bagniere_bleu = tk.Canvas(frame_left, width=50, height=3)\n self.bagniere_bleu.pack(side='top', anchor='c')\n self.bagniere_bleu.create_rectangle(0, 3, 50, 0, fill='blue')\n self.Nombre_1 = Entry(frame_left)\n self.Nombre_1.pack(side='top', anchor='w')\n self.bagniere_bleu = tk.Canvas(frame_left, width=50, height=3)\n self.bagniere_bleu.pack(side='top', anchor='c')\n self.bagniere_bleu.create_rectangle(0, 3, 50, 0, fill='red')\n self.Nombre_2 = Entry(frame_left)\n self.Nombre_2.pack(side='top', anchor='w')\n tk.Button(frame_left, text='Go back to start page', command=lambda :\n master.switch_frame(StartPage)).pack(side='bottom')\n self.frame1 = Frame(self)\n self.frame1.pack(fill='x')\n self.rectangle = tk.Canvas(self.frame1)\n self.rectangle.pack()\n self.create_ret(self.rectangle)\n self.master = master\n self.commencer_un_jeu()\n\n def create_circle(self, r, canvasName, color):\n x = random.randint(20, 300)\n y = random.randint(20, 250)\n x0 = x - r\n y0 = y - r\n x1 = x + r\n y1 = y + r\n return canvasName.create_oval(x0, y0, x1, y1, fill=color)\n\n def create_ret(self, canvas):\n return canvas.create_rectangle(0, 500, 500, 0, fill='#fdffdb')\n\n def update_clock(self):\n self.temps_de_rect = time.time() - self.debut\n self.temps_de_rect = time.strftime('%H:%M:%S', time.gmtime(self.\n temps_de_rect))\n self.label.configure(text=self.temps_de_rect)\n if self.fin:\n self.master.after(1000, self.update_clock)\n\n def commencer_un_jeu(self):\n self.fin = True\n try:\n self.rejouer.destroy()\n self.label.config(text='')\n self.Nombre_2.delete(0, END)\n self.Nombre_1.delete(0, END)\n except:\n pass\n self.bt_valider = tk.Button(self.frame_left, text='valider',\n command=lambda : self.fin_du_jeu())\n self.bt_valider.pack(side='top', anchor='w')\n self.debut = time.time()\n self.temps_de_rect = time.time() - self.debut\n self.temps_de_rect = time.strftime('%H:%M:%S', time.gmtime(self.\n temps_de_rect))\n self.label.configure(text=self.temps_de_rect)\n self.update_clock()\n self.rectangle.destroy()\n self.rectangle = tk.Canvas(self.frame1)\n self.rectangle.pack()\n self.create_ret(self.rectangle)\n self.nombre_j1 = random.randint(1, 10)\n self.nombre_j2 = random.randint(1, 10)\n for _ in range(self.nombre_j2):\n self.create_circle(20, self.rectangle, 'red')\n for _ in range(self.nombre_j1):\n self.create_circle(20, self.rectangle, 'blue')\n\n def fin_du_jeu(self):\n self.fin = False\n if int(self.Nombre_1.get()) == self.nombre_j1 and int(self.Nombre_2\n .get()) == self.nombre_j2:\n self.bt_valider.destroy()\n self.rejouer = Button(self.frame_left, text='Rejouer', command=\n lambda : self.commencer_un_jeu())\n self.rejouer.pack(side='top', fill='x')\n self.temps_de_rect = time.time() - self.debut\n self.temps_de_rect = time.strftime('%H:%M:%S', time.gmtime(self\n .temps_de_rect))\n self.label.configure(text=self.temps_de_rect)\n self.rectangle.create_text(200, 150, fill='darkblue', font=\n 'Times 20 italic bold', text='Victoire')\n else:\n self.bt_valider.destroy()\n self.rejouer = Button(self.frame_left, text='Rejouer', command=\n lambda : self.commencer_un_jeu())\n self.rejouer.pack(side='top', fill='x')\n self.temps_de_rect = time.time() - self.debut\n self.temps_de_rect = time.strftime('%H:%M:%S', time.gmtime(self\n .temps_de_rect))\n self.label.configure(text=self.temps_de_rect)\n self.rectangle.create_text(200, 150, fill='darkblue', font=\n 'Times 20 italic bold', text='Defaite')\n\n\nclass SampleApp(tk.Tk):\n\n def __init__(self):\n tk.Tk.__init__(self)\n self._frame = None\n self.switch_frame(StartPage)\n\n def timer(self, frame_game):\n self.after(1000, frame_game.update_clock)\n\n def switch_frame(self, frame_class, num=False):\n new_frame = frame_class(self)\n if self._frame is not None:\n self._frame.destroy()\n self._frame = new_frame\n self._frame.pack()\n\n\nclass PageTwo(tk.Frame):\n\n def __init__(self, master):\n tk.Frame.__init__(self, master)\n tk.Frame.configure(self, bg='red')\n tk.Label(self, text='Page two', font=('Helvetica', 18, 'bold')).pack(\n side='top', fill='x', pady=5)\n tk.Button(self, text='Go back to start page', command=lambda :\n master.switch_frame(StartPage)).pack()\n\n\nif __name__ == '__main__':\n app = SampleApp()\n app.geometry('800x800')\n app.mainloop()\n",
"step-5": "\nimport tkinter as tk\nfrom tkinter import Tk, BOTH,RIGHT,LEFT,END\nfrom tkinter.ttk import Frame, Label, Style,Entry\nfrom tkinter.ttk import Frame, Button, Style\nimport random\nimport time\n\nclass StartPage(tk.Frame):\n def __init__(self, master):\n tk.Frame.__init__(self, master)\n \n tk.Frame.configure(self,bg=\"#d0a3d8\",height=200,width=200)\n\n tk.Label(self, text=\"Mini Jeu: \\n P-0\", font=('Helvetica', 18, \"bold\")).pack(side=\"top\", fill=\"x\", pady=5)\n bt=Button(self, text=\"Jouer\",\n command=lambda: master.switch_frame(PageOne,num=True))\n bt.pack(fill=BOTH,expand=True)\n\n \n # tk.Button(self, text=\"Go to page two\",\n # command=lambda: master.switch_frame(PageTwo)).pack()\n\nclass PageOne(tk.Frame):\n def __init__(self, master):\n \n\n tk.Frame.__init__(self, master)\n # tk.Frame.configure(self,bg='blue')\n # tk.Label(self, text=\"Page de jeu\", font=('Helvetica', 18, \"bold\")).pack(side=\"top\", fill=BOTH, pady=5)\n \n frame_left=Frame(self)\n self.frame_left=frame_left\n frame_left.pack(fill=BOTH,side=LEFT)\n\n\n # add entry to this frame \n self.label=tk.Label(frame_left , text=\"\", font=('Helvetica', 10), fg='red')\n self.label.pack()\n\n self.bagniere_bleu=tk.Canvas(frame_left,width=50,height=3)\n self.bagniere_bleu.pack(side='top',anchor='c')\n self.bagniere_bleu.create_rectangle(0,3,50,0,fill='blue')\n\n \n\n self.Nombre_1=Entry(frame_left)\n self.Nombre_1.pack(side='top',anchor='w')\n\n# bagnier pour differencier les couleurs\n self.bagniere_bleu=tk.Canvas(frame_left,width=50,height=3)\n self.bagniere_bleu.pack(side='top',anchor='c')\n self.bagniere_bleu.create_rectangle(0,3,50,0,fill='red')\n\n\n self.Nombre_2=Entry(frame_left)\n self.Nombre_2.pack(side='top',anchor='w')\n\n tk.Button(frame_left, text=\"Go back to start page\",\n command=lambda: master.switch_frame(StartPage)).pack(side='bottom')\n\n \n self.frame1 = Frame(self)\n self.frame1.pack(fill='x')\n self.rectangle=tk.Canvas(self.frame1)\n self.rectangle.pack()\n self.create_ret(self.rectangle)\n \n # self.update_clock()\n self.master=master\n self.commencer_un_jeu()\n\n \n def create_circle(self,r, canvasName,color): #center coordinates, radius\n x=random.randint(20,300)\n y=random.randint(20,250)\n x0 = x - r\n y0 = y - r\n x1 = x + r\n y1 = y + r\n return canvasName.create_oval(x0, y0, x1, y1,fill=color)\n def create_ret(self,canvas):\n return canvas.create_rectangle(0,500,500,0,fill=\"#fdffdb\")\n\n\n\n def update_clock(self):\n self.temps_de_rect=(time.time()-self.debut)\n self.temps_de_rect=time.strftime(\"%H:%M:%S\", time.gmtime(self.temps_de_rect))\n self.label.configure(text=self.temps_de_rect)\n if self.fin:\n self.master.after(1000,self.update_clock)\n\n def commencer_un_jeu(self):\n self.fin=True\n try :\n self.rejouer.destroy()\n self.label.config(text='')\n self.Nombre_2.delete(0,END)\n self.Nombre_1.delete(0,END)\n\n except:\n pass\n\n\n self.bt_valider=tk.Button(self.frame_left,text='valider', command=lambda: self.fin_du_jeu())\n self. bt_valider.pack(side='top',anchor='w')\n\n self.debut=time.time()\n self.temps_de_rect=(time.time()-self.debut)\n self.temps_de_rect=time.strftime(\"%H:%M:%S\", time.gmtime(self.temps_de_rect))\n self.label.configure(text=self.temps_de_rect)\n self.update_clock()\n \n\n self.rectangle.destroy()\n self.rectangle=tk.Canvas(self.frame1)\n self.rectangle.pack()\n self.create_ret(self.rectangle)\n\n self.nombre_j1=random.randint(1,10)\n self.nombre_j2=random.randint(1,10)\n for _ in range(self.nombre_j2):\n self.create_circle(20,self.rectangle,'red')\n for _ in range(self.nombre_j1):\n self.create_circle(20,self.rectangle,'blue')\n def fin_du_jeu(self):\n self.fin=False\n if(int(self.Nombre_1.get())==self.nombre_j1 ) and (int(self.Nombre_2.get())==self.nombre_j2):\n #jeu gagné\n \n self.bt_valider.destroy()\n self.rejouer=Button(self.frame_left, text=\"Rejouer\",\n command=lambda: self.commencer_un_jeu())\n \n self.rejouer.pack(side='top',fill='x')\n\n self.temps_de_rect=(time.time()-self.debut)\n self.temps_de_rect=time.strftime(\"%H:%M:%S\", time.gmtime(self.temps_de_rect))\n self.label.configure(text=self.temps_de_rect)\n self.rectangle.create_text(200,150,fill=\"darkblue\",font=\"Times 20 italic bold\",\n text=\"Victoire\")\n else:\n\n \n self.bt_valider.destroy()\n self.rejouer=Button(self.frame_left, text=\"Rejouer\",\n command=lambda: self.commencer_un_jeu())\n\n self.rejouer.pack(side='top',fill='x')\n\n self.temps_de_rect=(time.time()-self.debut)\n self.temps_de_rect=time.strftime(\"%H:%M:%S\", time.gmtime(self.temps_de_rect))\n self.label.configure(text=self.temps_de_rect)\n self.rectangle.create_text(200,150,fill=\"darkblue\",font=\"Times 20 italic bold\",\n text=\"Defaite\")\n\n\n \n\n \n\n\n\n\n \nclass SampleApp(tk.Tk):\n def __init__(self):\n\n tk.Tk.__init__(self)\n \n self._frame = None\n self.switch_frame(StartPage)\n \n\n def timer(self,frame_game):\n self.after(1000,frame_game.update_clock)\n\n\n def switch_frame(self, frame_class,num=False):\n new_frame = frame_class(self)\n if self._frame is not None:\n self._frame.destroy()\n self._frame = new_frame\n self._frame.pack()\n # try:\n \n # if num:\n # print(frame_class)\n # self.timer(frame_class) \n # except:\n # print(\"le frame n'est pas le bon\")\n\n\n\n\n\n\n\nclass PageTwo(tk.Frame):\n def __init__(self, master):\n tk.Frame.__init__(self, master)\n tk.Frame.configure(self,bg='red')\n tk.Label(self, text=\"Page two\", font=('Helvetica', 18, \"bold\")).pack(side=\"top\", fill=\"x\", pady=5)\n tk.Button(self, text=\"Go back to start page\",\n command=lambda: master.switch_frame(StartPage)).pack()\n\nif __name__ == \"__main__\":\n app = SampleApp()\n app.geometry('800x800')\n app.mainloop()",
"step-ids": [
11,
12,
15,
17,
18
]
}
|
[
11,
12,
15,
17,
18
] |
from pkg.models.board import Board
class BaseAI:
_board: Board = None
def __init__(self, board=None):
if board is not None:
self.set_board(board)
def set_board(self, board):
self._board = board
def find_move(self, for_player):
pass
|
normal
|
{
"blob_id": "b794a4cca3303ac7440e9aad7bc210df62648b51",
"index": 5476,
"step-1": "<mask token>\n\n\nclass BaseAI:\n _board: Board = None\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass BaseAI:\n _board: Board = None\n <mask token>\n <mask token>\n\n def find_move(self, for_player):\n pass\n",
"step-3": "<mask token>\n\n\nclass BaseAI:\n _board: Board = None\n <mask token>\n\n def set_board(self, board):\n self._board = board\n\n def find_move(self, for_player):\n pass\n",
"step-4": "<mask token>\n\n\nclass BaseAI:\n _board: Board = None\n\n def __init__(self, board=None):\n if board is not None:\n self.set_board(board)\n\n def set_board(self, board):\n self._board = board\n\n def find_move(self, for_player):\n pass\n",
"step-5": "from pkg.models.board import Board\n\n\nclass BaseAI:\n _board: Board = None\n\n def __init__(self, board=None):\n if board is not None:\n self.set_board(board)\n\n def set_board(self, board):\n self._board = board\n\n def find_move(self, for_player):\n pass\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class Command(BaseCommand):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def handle(self, *args, **options):
s = ''
result = 0
tag = sum([options[i] for i in ['add', 'substract', 'multiply',
'divide']])
if options['add'] or not tag:
for poll_id in options['poll_id']:
s += '{} + '.format(poll_id)
result += poll_id
self.stdout.write(self.style.SUCCESS('{}= {}'.format(s[:-2],
result)))
elif options['substract']:
result += options['poll_id'][0]
s = '{} - '.format(options['poll_id'][0])
for poll_id in options['poll_id'][1:]:
s += '{} - '.format(poll_id)
result -= poll_id
self.stdout.write(self.style.SUCCESS('{}= {}'.format(s[:-2],
result)))
elif options['multiply']:
result = 1
for poll_id in options['poll_id']:
s += '{} × '.format(poll_id)
result *= poll_id
self.stdout.write(self.style.SUCCESS('{}= {}'.format(s[:-2],
result)))
elif options['divide']:
result = options['poll_id'][0]
s = '{} ÷ '.format(options['poll_id'][0])
for poll_id in options['poll_id'][1:]:
s += '{} ÷ '.format(poll_id)
result /= poll_id
self.stdout.write(self.style.SUCCESS('{}= {}'.format(s[:-2],
result)))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Command(BaseCommand):
<|reserved_special_token_0|>
def add_arguments(self, parser):
parser.add_argument('poll_id', nargs='+', type=int)
parser.add_argument('--add', action='store_true', dest='add',
default=False, help='add')
parser.add_argument('--substract', action='store_true', dest=
'substract', default=False, help='substract')
parser.add_argument('--multiply', action='store_true', dest=
'multiply', default=False, help='multiply')
parser.add_argument('--divide', action='store_true', dest='divide',
default=False, help='divide')
def handle(self, *args, **options):
s = ''
result = 0
tag = sum([options[i] for i in ['add', 'substract', 'multiply',
'divide']])
if options['add'] or not tag:
for poll_id in options['poll_id']:
s += '{} + '.format(poll_id)
result += poll_id
self.stdout.write(self.style.SUCCESS('{}= {}'.format(s[:-2],
result)))
elif options['substract']:
result += options['poll_id'][0]
s = '{} - '.format(options['poll_id'][0])
for poll_id in options['poll_id'][1:]:
s += '{} - '.format(poll_id)
result -= poll_id
self.stdout.write(self.style.SUCCESS('{}= {}'.format(s[:-2],
result)))
elif options['multiply']:
result = 1
for poll_id in options['poll_id']:
s += '{} × '.format(poll_id)
result *= poll_id
self.stdout.write(self.style.SUCCESS('{}= {}'.format(s[:-2],
result)))
elif options['divide']:
result = options['poll_id'][0]
s = '{} ÷ '.format(options['poll_id'][0])
for poll_id in options['poll_id'][1:]:
s += '{} ÷ '.format(poll_id)
result /= poll_id
self.stdout.write(self.style.SUCCESS('{}= {}'.format(s[:-2],
result)))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Command(BaseCommand):
help = 'Closes the specified poll for voting'
def add_arguments(self, parser):
parser.add_argument('poll_id', nargs='+', type=int)
parser.add_argument('--add', action='store_true', dest='add',
default=False, help='add')
parser.add_argument('--substract', action='store_true', dest=
'substract', default=False, help='substract')
parser.add_argument('--multiply', action='store_true', dest=
'multiply', default=False, help='multiply')
parser.add_argument('--divide', action='store_true', dest='divide',
default=False, help='divide')
def handle(self, *args, **options):
s = ''
result = 0
tag = sum([options[i] for i in ['add', 'substract', 'multiply',
'divide']])
if options['add'] or not tag:
for poll_id in options['poll_id']:
s += '{} + '.format(poll_id)
result += poll_id
self.stdout.write(self.style.SUCCESS('{}= {}'.format(s[:-2],
result)))
elif options['substract']:
result += options['poll_id'][0]
s = '{} - '.format(options['poll_id'][0])
for poll_id in options['poll_id'][1:]:
s += '{} - '.format(poll_id)
result -= poll_id
self.stdout.write(self.style.SUCCESS('{}= {}'.format(s[:-2],
result)))
elif options['multiply']:
result = 1
for poll_id in options['poll_id']:
s += '{} × '.format(poll_id)
result *= poll_id
self.stdout.write(self.style.SUCCESS('{}= {}'.format(s[:-2],
result)))
elif options['divide']:
result = options['poll_id'][0]
s = '{} ÷ '.format(options['poll_id'][0])
for poll_id in options['poll_id'][1:]:
s += '{} ÷ '.format(poll_id)
result /= poll_id
self.stdout.write(self.style.SUCCESS('{}= {}'.format(s[:-2],
result)))
<|reserved_special_token_1|>
from django.core.management.base import BaseCommand, CommandError
class Command(BaseCommand):
help = 'Closes the specified poll for voting'
def add_arguments(self, parser):
parser.add_argument('poll_id', nargs='+', type=int)
parser.add_argument('--add', action='store_true', dest='add',
default=False, help='add')
parser.add_argument('--substract', action='store_true', dest=
'substract', default=False, help='substract')
parser.add_argument('--multiply', action='store_true', dest=
'multiply', default=False, help='multiply')
parser.add_argument('--divide', action='store_true', dest='divide',
default=False, help='divide')
def handle(self, *args, **options):
s = ''
result = 0
tag = sum([options[i] for i in ['add', 'substract', 'multiply',
'divide']])
if options['add'] or not tag:
for poll_id in options['poll_id']:
s += '{} + '.format(poll_id)
result += poll_id
self.stdout.write(self.style.SUCCESS('{}= {}'.format(s[:-2],
result)))
elif options['substract']:
result += options['poll_id'][0]
s = '{} - '.format(options['poll_id'][0])
for poll_id in options['poll_id'][1:]:
s += '{} - '.format(poll_id)
result -= poll_id
self.stdout.write(self.style.SUCCESS('{}= {}'.format(s[:-2],
result)))
elif options['multiply']:
result = 1
for poll_id in options['poll_id']:
s += '{} × '.format(poll_id)
result *= poll_id
self.stdout.write(self.style.SUCCESS('{}= {}'.format(s[:-2],
result)))
elif options['divide']:
result = options['poll_id'][0]
s = '{} ÷ '.format(options['poll_id'][0])
for poll_id in options['poll_id'][1:]:
s += '{} ÷ '.format(poll_id)
result /= poll_id
self.stdout.write(self.style.SUCCESS('{}= {}'.format(s[:-2],
result)))
<|reserved_special_token_1|>
from django.core.management.base import BaseCommand, CommandError
class Command(BaseCommand):
help = 'Closes the specified poll for voting'
def add_arguments(self, parser):
# Positional arguments
parser.add_argument('poll_id', nargs='+', type=int)
# Named (optional arguments)
parser.add_argument(
'--add',
action='store_true',
dest='add',
default=False,
help='add'
)
parser.add_argument(
'--substract',
action='store_true',
dest='substract',
default=False,
help='substract'
)
parser.add_argument(
'--multiply',
action='store_true',
dest='multiply',
default=False,
help='multiply'
)
parser.add_argument(
'--divide',
action='store_true',
dest='divide',
default=False,
help='divide'
)
def handle(self, *args, **options):
s = ''
result = 0
tag = sum([options[i] for i in ['add', 'substract', 'multiply', 'divide']])
if options['add'] or not tag:
for poll_id in options['poll_id']:
s += '{} + '.format(poll_id)
result += poll_id
self.stdout.write(self.style.SUCCESS('{}= {}'.format(s[:-2], result)))
elif options['substract']:
result += options['poll_id'][0]
s = '{} - '.format(options['poll_id'][0])
for poll_id in options['poll_id'][1:]:
s += '{} - '.format(poll_id)
result -= poll_id
self.stdout.write(self.style.SUCCESS('{}= {}'.format(s[:-2], result)))
elif options['multiply']:
result = 1
for poll_id in options['poll_id']:
s += '{} × '.format(poll_id)
result *= poll_id
self.stdout.write(self.style.SUCCESS('{}= {}'.format(s[:-2], result)))
elif options['divide']:
result = options['poll_id'][0]
s = '{} ÷ '.format(options['poll_id'][0])
for poll_id in options['poll_id'][1:]:
s += '{} ÷ '.format(poll_id)
result /= poll_id
self.stdout.write(self.style.SUCCESS('{}= {}'.format(s[:-2], result)))
|
flexible
|
{
"blob_id": "b2d5b16c287dc76a088f6e20eca4a16dd0aad00f",
"index": 8797,
"step-1": "<mask token>\n\n\nclass Command(BaseCommand):\n <mask token>\n <mask token>\n\n def handle(self, *args, **options):\n s = ''\n result = 0\n tag = sum([options[i] for i in ['add', 'substract', 'multiply',\n 'divide']])\n if options['add'] or not tag:\n for poll_id in options['poll_id']:\n s += '{} + '.format(poll_id)\n result += poll_id\n self.stdout.write(self.style.SUCCESS('{}= {}'.format(s[:-2],\n result)))\n elif options['substract']:\n result += options['poll_id'][0]\n s = '{} - '.format(options['poll_id'][0])\n for poll_id in options['poll_id'][1:]:\n s += '{} - '.format(poll_id)\n result -= poll_id\n self.stdout.write(self.style.SUCCESS('{}= {}'.format(s[:-2],\n result)))\n elif options['multiply']:\n result = 1\n for poll_id in options['poll_id']:\n s += '{} × '.format(poll_id)\n result *= poll_id\n self.stdout.write(self.style.SUCCESS('{}= {}'.format(s[:-2],\n result)))\n elif options['divide']:\n result = options['poll_id'][0]\n s = '{} ÷ '.format(options['poll_id'][0])\n for poll_id in options['poll_id'][1:]:\n s += '{} ÷ '.format(poll_id)\n result /= poll_id\n self.stdout.write(self.style.SUCCESS('{}= {}'.format(s[:-2],\n result)))\n",
"step-2": "<mask token>\n\n\nclass Command(BaseCommand):\n <mask token>\n\n def add_arguments(self, parser):\n parser.add_argument('poll_id', nargs='+', type=int)\n parser.add_argument('--add', action='store_true', dest='add',\n default=False, help='add')\n parser.add_argument('--substract', action='store_true', dest=\n 'substract', default=False, help='substract')\n parser.add_argument('--multiply', action='store_true', dest=\n 'multiply', default=False, help='multiply')\n parser.add_argument('--divide', action='store_true', dest='divide',\n default=False, help='divide')\n\n def handle(self, *args, **options):\n s = ''\n result = 0\n tag = sum([options[i] for i in ['add', 'substract', 'multiply',\n 'divide']])\n if options['add'] or not tag:\n for poll_id in options['poll_id']:\n s += '{} + '.format(poll_id)\n result += poll_id\n self.stdout.write(self.style.SUCCESS('{}= {}'.format(s[:-2],\n result)))\n elif options['substract']:\n result += options['poll_id'][0]\n s = '{} - '.format(options['poll_id'][0])\n for poll_id in options['poll_id'][1:]:\n s += '{} - '.format(poll_id)\n result -= poll_id\n self.stdout.write(self.style.SUCCESS('{}= {}'.format(s[:-2],\n result)))\n elif options['multiply']:\n result = 1\n for poll_id in options['poll_id']:\n s += '{} × '.format(poll_id)\n result *= poll_id\n self.stdout.write(self.style.SUCCESS('{}= {}'.format(s[:-2],\n result)))\n elif options['divide']:\n result = options['poll_id'][0]\n s = '{} ÷ '.format(options['poll_id'][0])\n for poll_id in options['poll_id'][1:]:\n s += '{} ÷ '.format(poll_id)\n result /= poll_id\n self.stdout.write(self.style.SUCCESS('{}= {}'.format(s[:-2],\n result)))\n",
"step-3": "<mask token>\n\n\nclass Command(BaseCommand):\n help = 'Closes the specified poll for voting'\n\n def add_arguments(self, parser):\n parser.add_argument('poll_id', nargs='+', type=int)\n parser.add_argument('--add', action='store_true', dest='add',\n default=False, help='add')\n parser.add_argument('--substract', action='store_true', dest=\n 'substract', default=False, help='substract')\n parser.add_argument('--multiply', action='store_true', dest=\n 'multiply', default=False, help='multiply')\n parser.add_argument('--divide', action='store_true', dest='divide',\n default=False, help='divide')\n\n def handle(self, *args, **options):\n s = ''\n result = 0\n tag = sum([options[i] for i in ['add', 'substract', 'multiply',\n 'divide']])\n if options['add'] or not tag:\n for poll_id in options['poll_id']:\n s += '{} + '.format(poll_id)\n result += poll_id\n self.stdout.write(self.style.SUCCESS('{}= {}'.format(s[:-2],\n result)))\n elif options['substract']:\n result += options['poll_id'][0]\n s = '{} - '.format(options['poll_id'][0])\n for poll_id in options['poll_id'][1:]:\n s += '{} - '.format(poll_id)\n result -= poll_id\n self.stdout.write(self.style.SUCCESS('{}= {}'.format(s[:-2],\n result)))\n elif options['multiply']:\n result = 1\n for poll_id in options['poll_id']:\n s += '{} × '.format(poll_id)\n result *= poll_id\n self.stdout.write(self.style.SUCCESS('{}= {}'.format(s[:-2],\n result)))\n elif options['divide']:\n result = options['poll_id'][0]\n s = '{} ÷ '.format(options['poll_id'][0])\n for poll_id in options['poll_id'][1:]:\n s += '{} ÷ '.format(poll_id)\n result /= poll_id\n self.stdout.write(self.style.SUCCESS('{}= {}'.format(s[:-2],\n result)))\n",
"step-4": "from django.core.management.base import BaseCommand, CommandError\n\n\nclass Command(BaseCommand):\n help = 'Closes the specified poll for voting'\n\n def add_arguments(self, parser):\n parser.add_argument('poll_id', nargs='+', type=int)\n parser.add_argument('--add', action='store_true', dest='add',\n default=False, help='add')\n parser.add_argument('--substract', action='store_true', dest=\n 'substract', default=False, help='substract')\n parser.add_argument('--multiply', action='store_true', dest=\n 'multiply', default=False, help='multiply')\n parser.add_argument('--divide', action='store_true', dest='divide',\n default=False, help='divide')\n\n def handle(self, *args, **options):\n s = ''\n result = 0\n tag = sum([options[i] for i in ['add', 'substract', 'multiply',\n 'divide']])\n if options['add'] or not tag:\n for poll_id in options['poll_id']:\n s += '{} + '.format(poll_id)\n result += poll_id\n self.stdout.write(self.style.SUCCESS('{}= {}'.format(s[:-2],\n result)))\n elif options['substract']:\n result += options['poll_id'][0]\n s = '{} - '.format(options['poll_id'][0])\n for poll_id in options['poll_id'][1:]:\n s += '{} - '.format(poll_id)\n result -= poll_id\n self.stdout.write(self.style.SUCCESS('{}= {}'.format(s[:-2],\n result)))\n elif options['multiply']:\n result = 1\n for poll_id in options['poll_id']:\n s += '{} × '.format(poll_id)\n result *= poll_id\n self.stdout.write(self.style.SUCCESS('{}= {}'.format(s[:-2],\n result)))\n elif options['divide']:\n result = options['poll_id'][0]\n s = '{} ÷ '.format(options['poll_id'][0])\n for poll_id in options['poll_id'][1:]:\n s += '{} ÷ '.format(poll_id)\n result /= poll_id\n self.stdout.write(self.style.SUCCESS('{}= {}'.format(s[:-2],\n result)))\n",
"step-5": "from django.core.management.base import BaseCommand, CommandError\n\nclass Command(BaseCommand):\n help = 'Closes the specified poll for voting'\n\n def add_arguments(self, parser):\n # Positional arguments\n parser.add_argument('poll_id', nargs='+', type=int)\n\n # Named (optional arguments)\n parser.add_argument(\n '--add',\n action='store_true',\n dest='add',\n default=False,\n help='add'\n )\n parser.add_argument(\n '--substract',\n action='store_true',\n dest='substract',\n default=False,\n help='substract'\n )\n parser.add_argument(\n '--multiply',\n action='store_true',\n dest='multiply',\n default=False,\n help='multiply'\n )\n parser.add_argument(\n '--divide',\n action='store_true',\n dest='divide',\n default=False,\n help='divide'\n )\n\n\n def handle(self, *args, **options):\n s = ''\n result = 0\n tag = sum([options[i] for i in ['add', 'substract', 'multiply', 'divide']])\n if options['add'] or not tag:\n for poll_id in options['poll_id']:\n s += '{} + '.format(poll_id)\n result += poll_id\n self.stdout.write(self.style.SUCCESS('{}= {}'.format(s[:-2], result)))\n elif options['substract']:\n result += options['poll_id'][0]\n s = '{} - '.format(options['poll_id'][0])\n for poll_id in options['poll_id'][1:]:\n s += '{} - '.format(poll_id)\n result -= poll_id\n self.stdout.write(self.style.SUCCESS('{}= {}'.format(s[:-2], result)))\n elif options['multiply']:\n result = 1\n for poll_id in options['poll_id']:\n s += '{} × '.format(poll_id)\n result *= poll_id\n self.stdout.write(self.style.SUCCESS('{}= {}'.format(s[:-2], result)))\n elif options['divide']:\n result = options['poll_id'][0]\n s = '{} ÷ '.format(options['poll_id'][0])\n for poll_id in options['poll_id'][1:]:\n s += '{} ÷ '.format(poll_id)\n result /= poll_id\n self.stdout.write(self.style.SUCCESS('{}= {}'.format(s[:-2], result)))\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
from sanic import Sanic
from sanic.blueprints import Blueprint
from sanic.response import html, json, text
from sanic_jwt import Initialize
from sanic_jwt.decorators import inject_user, protected, scoped
def test_forgotten_initialized_on_protected():
blueprint = Blueprint("Test")
@blueprint.get("/protected")
@protected()
def protected_hello_world(request):
return json({"message": "hello world"})
@blueprint.route("/scoped")
@scoped("something")
async def scoped_endpoint(request):
return json({"scoped": True})
app = Sanic("sanic-jwt-test")
sanicjwt = Initialize(blueprint, app=app, authenticate=lambda x: True)
app.blueprint(blueprint, url_prefix="/test")
_, response = app.test_client.post(
"/test/auth", json={"username": "user1", "password": "abcxyz"}
)
access_token = response.json.get(sanicjwt.config.access_token_name(), None)
_, response = app.test_client.get(
"/test/protected",
headers={"Authorization": "Bearer {}".format(access_token)},
)
assert response.status == 500
assert response.json.get("exception") == "SanicJWTException"
_, response = app.test_client.get(
"/test/scoped",
headers={"Authorization": "Bearer {}".format(access_token)},
)
assert response.status == 500
assert response.json.get("exception") == "SanicJWTException"
def test_option_method_on_protected(app):
sanic_app, sanic_jwt = app
@sanic_app.route("/protected/options", methods=["OPTIONS"])
@sanic_jwt.protected()
async def my_protected_options(request):
return text("", status=204)
_, response = sanic_app.test_client.options("/protected/options")
assert response.status == 204
def test_inject_user_regular(app_with_retrieve_user):
sanic_app, sanic_jwt = app_with_retrieve_user
_, response = sanic_app.test_client.post(
"/auth", json={"username": "user1", "password": "abcxyz"}
)
sanic_app.router.reset()
@sanic_app.route("/protected/user")
@inject_user()
@protected()
async def my_protected_user(request, user):
return json({"user_id": user.user_id})
access_token = response.json.get(
sanic_jwt.config.access_token_name(), None
)
_, response = sanic_app.test_client.get(
"/auth/me", headers={"Authorization": "Bearer {}".format(access_token)}
)
assert response.json.get("me").get("user_id") == 1
_, response = sanic_app.test_client.get(
"/protected/user",
headers={"Authorization": "Bearer {}".format(access_token)},
)
assert response.status == 200
assert response.json.get("user_id") == 1
def test_inject_user_on_instance(app_with_retrieve_user):
sanic_app, sanic_jwt = app_with_retrieve_user
_, response = sanic_app.test_client.post(
"/auth", json={"username": "user1", "password": "abcxyz"}
)
sanic_app.router.reset()
@sanic_app.route("/protected/user")
@sanic_jwt.inject_user()
@sanic_jwt.protected()
async def my_protected_user(request, user):
return json({"user_id": user.user_id})
access_token = response.json.get(
sanic_jwt.config.access_token_name(), None
)
_, response = sanic_app.test_client.get(
"/auth/me", headers={"Authorization": "Bearer {}".format(access_token)}
)
assert response.json.get("me").get("user_id") == 1
_, response = sanic_app.test_client.get(
"/protected/user",
headers={"Authorization": "Bearer {}".format(access_token)},
)
assert response.status == 200
assert response.json.get("user_id") == 1
def test_inject_user_on_instance_bp(app_with_retrieve_user):
sanic_app, sanic_jwt = app_with_retrieve_user
_, response = sanic_app.test_client.post(
"/auth", json={"username": "user1", "password": "abcxyz"}
)
sanic_app.router.reset()
@sanic_app.route("/protected/user")
@sanic_jwt.inject_user()
@sanic_jwt.protected()
async def my_protected_user(request, user):
return json({"user_id": user.user_id})
access_token = response.json.get(
sanic_jwt.config.access_token_name(), None
)
_, response = sanic_app.test_client.get(
"/auth/me", headers={"Authorization": "Bearer {}".format(access_token)}
)
assert response.json.get("me").get("user_id") == 1
_, response = sanic_app.test_client.get(
"/protected/user",
headers={"Authorization": "Bearer {}".format(access_token)},
)
assert response.status == 200
assert response.json.get("user_id") == 1
def test_inject_user_on_instance_non_async(app_with_retrieve_user):
sanic_app, sanic_jwt = app_with_retrieve_user
_, response = sanic_app.test_client.post(
"/auth", json={"username": "user1", "password": "abcxyz"}
)
sanic_app.router.reset()
@sanic_app.route("/protected/user")
@sanic_jwt.inject_user()
@sanic_jwt.protected()
def my_protected_user(request, user):
return json({"user_id": user.user_id})
access_token = response.json.get(
sanic_jwt.config.access_token_name(), None
)
_, response = sanic_app.test_client.get(
"/auth/me", headers={"Authorization": "Bearer {}".format(access_token)}
)
assert response.json.get("me").get("user_id") == 1
_, response = sanic_app.test_client.get(
"/protected/user",
headers={"Authorization": "Bearer {}".format(access_token)},
)
assert response.status == 200
assert response.json.get("user_id") == 1
def test_inject_user_with_auth_mode_off(app_with_retrieve_user):
async def retrieve_user(request, payload, *args, **kwargs):
return {"user_id": 123}
microservice_app = Sanic("sanic-jwt-test")
microservice_sanic_jwt = Initialize(
microservice_app, auth_mode=False, retrieve_user=retrieve_user
)
@microservice_app.route("/protected/user")
@microservice_sanic_jwt.inject_user()
@microservice_sanic_jwt.protected()
async def my_protected_user(request, user):
return json({"user_id": user.get("user_id")})
sanic_app, sanic_jwt = app_with_retrieve_user
_, response = sanic_app.test_client.post(
"/auth", json={"username": "user1", "password": "abcxyz"}
)
access_token = response.json.get(
sanic_jwt.config.access_token_name(), None
)
_, response = microservice_app.test_client.get(
"/protected/user",
headers={"Authorization": "Bearer {}".format(access_token)},
)
assert response.status == 200
assert response.json.get("user_id") == 123
_, response = microservice_app.test_client.get("/protected/user")
assert response.status == 401
def test_redirect_without_url(app):
sanic_app, sanic_jwt = app
@sanic_app.route("/index.html")
def index(request):
return html("<html><body>Home</body></html>")
@sanic_app.route("/protected/static")
@sanic_jwt.protected(redirect_on_fail=True)
async def my_protected_static(request):
return text("", status=200)
request, response = sanic_app.test_client.get("/protected/static")
assert response.status == 200
assert response.body == b"<html><body>Home</body></html>"
assert response.history
assert response.history[0].status_code == 302
def test_redirect_with_decorator_url(app):
sanic_app, sanic_jwt = app
@sanic_app.route("/protected/static")
@sanic_jwt.protected(redirect_on_fail=True, redirect_url="/unprotected")
async def my_protected_static(request):
return text("", status=200)
@sanic_app.route("/unprotected")
async def my_unprotected_goto(request):
return text("unprotected content", status=200)
_, response = sanic_app.test_client.get("/protected/static")
assert response.status == 200 and response.text == "unprotected content"
def test_redirect_with_configured_url():
sanic_app = Sanic("sanic-jwt-test")
sanic_jwt = Initialize(
sanic_app, auth_mode=False, login_redirect_url="/unprotected"
)
@sanic_app.route("/protected/static")
@sanic_jwt.protected(redirect_on_fail=True)
async def my_protected_static(request):
return text("", status=200)
@sanic_app.route("/unprotected")
async def my_unprotected_goto(request):
return text("unprotected content", status=200)
_, response = sanic_app.test_client.get("/protected/static")
assert response.status == 200 and response.text == "unprotected content"
def test_authenticated_redirect(app_with_retrieve_user):
sanic_app, sanic_jwt = app_with_retrieve_user
_, response = sanic_app.test_client.post(
"/auth", json={"username": "user1", "password": "abcxyz"}
)
sanic_app.router.reset()
@sanic_app.route("/protected/static")
@sanic_jwt.protected(redirect_on_fail=True)
async def my_protected_static(request):
return text("protected content", status=200)
@sanic_app.route("/unprotected")
async def my_unprotected_goto(request):
return text("unprotected content", status=200)
access_token = response.json.get(
sanic_jwt.config.access_token_name(), None
)
_, response = sanic_app.test_client.get(
"/protected/static",
headers={"Authorization": "Bearer {}".format(access_token)},
)
assert response.status == 200 and response.text == "protected content"
|
normal
|
{
"blob_id": "55fc197eebc4e06466e0fc0458957d0460602eef",
"index": 2032,
"step-1": "<mask token>\n\n\ndef test_forgotten_initialized_on_protected():\n blueprint = Blueprint('Test')\n\n @blueprint.get('/protected')\n @protected()\n def protected_hello_world(request):\n return json({'message': 'hello world'})\n\n @blueprint.route('/scoped')\n @scoped('something')\n async def scoped_endpoint(request):\n return json({'scoped': True})\n app = Sanic('sanic-jwt-test')\n sanicjwt = Initialize(blueprint, app=app, authenticate=lambda x: True)\n app.blueprint(blueprint, url_prefix='/test')\n _, response = app.test_client.post('/test/auth', json={'username':\n 'user1', 'password': 'abcxyz'})\n access_token = response.json.get(sanicjwt.config.access_token_name(), None)\n _, response = app.test_client.get('/test/protected', headers={\n 'Authorization': 'Bearer {}'.format(access_token)})\n assert response.status == 500\n assert response.json.get('exception') == 'SanicJWTException'\n _, response = app.test_client.get('/test/scoped', headers={\n 'Authorization': 'Bearer {}'.format(access_token)})\n assert response.status == 500\n assert response.json.get('exception') == 'SanicJWTException'\n\n\ndef test_option_method_on_protected(app):\n sanic_app, sanic_jwt = app\n\n @sanic_app.route('/protected/options', methods=['OPTIONS'])\n @sanic_jwt.protected()\n async def my_protected_options(request):\n return text('', status=204)\n _, response = sanic_app.test_client.options('/protected/options')\n assert response.status == 204\n\n\n<mask token>\n\n\ndef test_inject_user_on_instance(app_with_retrieve_user):\n sanic_app, sanic_jwt = app_with_retrieve_user\n _, response = sanic_app.test_client.post('/auth', json={'username':\n 'user1', 'password': 'abcxyz'})\n sanic_app.router.reset()\n\n @sanic_app.route('/protected/user')\n @sanic_jwt.inject_user()\n @sanic_jwt.protected()\n async def my_protected_user(request, user):\n return json({'user_id': user.user_id})\n access_token = response.json.get(sanic_jwt.config.access_token_name(), None\n )\n _, response = sanic_app.test_client.get('/auth/me', headers={\n 'Authorization': 'Bearer {}'.format(access_token)})\n assert response.json.get('me').get('user_id') == 1\n _, response = sanic_app.test_client.get('/protected/user', headers={\n 'Authorization': 'Bearer {}'.format(access_token)})\n assert response.status == 200\n assert response.json.get('user_id') == 1\n\n\n<mask token>\n\n\ndef test_inject_user_with_auth_mode_off(app_with_retrieve_user):\n\n async def retrieve_user(request, payload, *args, **kwargs):\n return {'user_id': 123}\n microservice_app = Sanic('sanic-jwt-test')\n microservice_sanic_jwt = Initialize(microservice_app, auth_mode=False,\n retrieve_user=retrieve_user)\n\n @microservice_app.route('/protected/user')\n @microservice_sanic_jwt.inject_user()\n @microservice_sanic_jwt.protected()\n async def my_protected_user(request, user):\n return json({'user_id': user.get('user_id')})\n sanic_app, sanic_jwt = app_with_retrieve_user\n _, response = sanic_app.test_client.post('/auth', json={'username':\n 'user1', 'password': 'abcxyz'})\n access_token = response.json.get(sanic_jwt.config.access_token_name(), None\n )\n _, response = microservice_app.test_client.get('/protected/user',\n headers={'Authorization': 'Bearer {}'.format(access_token)})\n assert response.status == 200\n assert response.json.get('user_id') == 123\n _, response = microservice_app.test_client.get('/protected/user')\n assert response.status == 401\n\n\ndef test_redirect_without_url(app):\n sanic_app, sanic_jwt = app\n\n @sanic_app.route('/index.html')\n def index(request):\n return html('<html><body>Home</body></html>')\n\n @sanic_app.route('/protected/static')\n @sanic_jwt.protected(redirect_on_fail=True)\n async def my_protected_static(request):\n return text('', status=200)\n request, response = sanic_app.test_client.get('/protected/static')\n assert response.status == 200\n assert response.body == b'<html><body>Home</body></html>'\n assert response.history\n assert response.history[0].status_code == 302\n\n\n<mask token>\n\n\ndef test_redirect_with_configured_url():\n sanic_app = Sanic('sanic-jwt-test')\n sanic_jwt = Initialize(sanic_app, auth_mode=False, login_redirect_url=\n '/unprotected')\n\n @sanic_app.route('/protected/static')\n @sanic_jwt.protected(redirect_on_fail=True)\n async def my_protected_static(request):\n return text('', status=200)\n\n @sanic_app.route('/unprotected')\n async def my_unprotected_goto(request):\n return text('unprotected content', status=200)\n _, response = sanic_app.test_client.get('/protected/static')\n assert response.status == 200 and response.text == 'unprotected content'\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_forgotten_initialized_on_protected():\n blueprint = Blueprint('Test')\n\n @blueprint.get('/protected')\n @protected()\n def protected_hello_world(request):\n return json({'message': 'hello world'})\n\n @blueprint.route('/scoped')\n @scoped('something')\n async def scoped_endpoint(request):\n return json({'scoped': True})\n app = Sanic('sanic-jwt-test')\n sanicjwt = Initialize(blueprint, app=app, authenticate=lambda x: True)\n app.blueprint(blueprint, url_prefix='/test')\n _, response = app.test_client.post('/test/auth', json={'username':\n 'user1', 'password': 'abcxyz'})\n access_token = response.json.get(sanicjwt.config.access_token_name(), None)\n _, response = app.test_client.get('/test/protected', headers={\n 'Authorization': 'Bearer {}'.format(access_token)})\n assert response.status == 500\n assert response.json.get('exception') == 'SanicJWTException'\n _, response = app.test_client.get('/test/scoped', headers={\n 'Authorization': 'Bearer {}'.format(access_token)})\n assert response.status == 500\n assert response.json.get('exception') == 'SanicJWTException'\n\n\ndef test_option_method_on_protected(app):\n sanic_app, sanic_jwt = app\n\n @sanic_app.route('/protected/options', methods=['OPTIONS'])\n @sanic_jwt.protected()\n async def my_protected_options(request):\n return text('', status=204)\n _, response = sanic_app.test_client.options('/protected/options')\n assert response.status == 204\n\n\n<mask token>\n\n\ndef test_inject_user_on_instance(app_with_retrieve_user):\n sanic_app, sanic_jwt = app_with_retrieve_user\n _, response = sanic_app.test_client.post('/auth', json={'username':\n 'user1', 'password': 'abcxyz'})\n sanic_app.router.reset()\n\n @sanic_app.route('/protected/user')\n @sanic_jwt.inject_user()\n @sanic_jwt.protected()\n async def my_protected_user(request, user):\n return json({'user_id': user.user_id})\n access_token = response.json.get(sanic_jwt.config.access_token_name(), None\n )\n _, response = sanic_app.test_client.get('/auth/me', headers={\n 'Authorization': 'Bearer {}'.format(access_token)})\n assert response.json.get('me').get('user_id') == 1\n _, response = sanic_app.test_client.get('/protected/user', headers={\n 'Authorization': 'Bearer {}'.format(access_token)})\n assert response.status == 200\n assert response.json.get('user_id') == 1\n\n\n<mask token>\n\n\ndef test_inject_user_with_auth_mode_off(app_with_retrieve_user):\n\n async def retrieve_user(request, payload, *args, **kwargs):\n return {'user_id': 123}\n microservice_app = Sanic('sanic-jwt-test')\n microservice_sanic_jwt = Initialize(microservice_app, auth_mode=False,\n retrieve_user=retrieve_user)\n\n @microservice_app.route('/protected/user')\n @microservice_sanic_jwt.inject_user()\n @microservice_sanic_jwt.protected()\n async def my_protected_user(request, user):\n return json({'user_id': user.get('user_id')})\n sanic_app, sanic_jwt = app_with_retrieve_user\n _, response = sanic_app.test_client.post('/auth', json={'username':\n 'user1', 'password': 'abcxyz'})\n access_token = response.json.get(sanic_jwt.config.access_token_name(), None\n )\n _, response = microservice_app.test_client.get('/protected/user',\n headers={'Authorization': 'Bearer {}'.format(access_token)})\n assert response.status == 200\n assert response.json.get('user_id') == 123\n _, response = microservice_app.test_client.get('/protected/user')\n assert response.status == 401\n\n\ndef test_redirect_without_url(app):\n sanic_app, sanic_jwt = app\n\n @sanic_app.route('/index.html')\n def index(request):\n return html('<html><body>Home</body></html>')\n\n @sanic_app.route('/protected/static')\n @sanic_jwt.protected(redirect_on_fail=True)\n async def my_protected_static(request):\n return text('', status=200)\n request, response = sanic_app.test_client.get('/protected/static')\n assert response.status == 200\n assert response.body == b'<html><body>Home</body></html>'\n assert response.history\n assert response.history[0].status_code == 302\n\n\n<mask token>\n\n\ndef test_redirect_with_configured_url():\n sanic_app = Sanic('sanic-jwt-test')\n sanic_jwt = Initialize(sanic_app, auth_mode=False, login_redirect_url=\n '/unprotected')\n\n @sanic_app.route('/protected/static')\n @sanic_jwt.protected(redirect_on_fail=True)\n async def my_protected_static(request):\n return text('', status=200)\n\n @sanic_app.route('/unprotected')\n async def my_unprotected_goto(request):\n return text('unprotected content', status=200)\n _, response = sanic_app.test_client.get('/protected/static')\n assert response.status == 200 and response.text == 'unprotected content'\n\n\ndef test_authenticated_redirect(app_with_retrieve_user):\n sanic_app, sanic_jwt = app_with_retrieve_user\n _, response = sanic_app.test_client.post('/auth', json={'username':\n 'user1', 'password': 'abcxyz'})\n sanic_app.router.reset()\n\n @sanic_app.route('/protected/static')\n @sanic_jwt.protected(redirect_on_fail=True)\n async def my_protected_static(request):\n return text('protected content', status=200)\n\n @sanic_app.route('/unprotected')\n async def my_unprotected_goto(request):\n return text('unprotected content', status=200)\n access_token = response.json.get(sanic_jwt.config.access_token_name(), None\n )\n _, response = sanic_app.test_client.get('/protected/static', headers={\n 'Authorization': 'Bearer {}'.format(access_token)})\n assert response.status == 200 and response.text == 'protected content'\n",
"step-3": "<mask token>\n\n\ndef test_forgotten_initialized_on_protected():\n blueprint = Blueprint('Test')\n\n @blueprint.get('/protected')\n @protected()\n def protected_hello_world(request):\n return json({'message': 'hello world'})\n\n @blueprint.route('/scoped')\n @scoped('something')\n async def scoped_endpoint(request):\n return json({'scoped': True})\n app = Sanic('sanic-jwt-test')\n sanicjwt = Initialize(blueprint, app=app, authenticate=lambda x: True)\n app.blueprint(blueprint, url_prefix='/test')\n _, response = app.test_client.post('/test/auth', json={'username':\n 'user1', 'password': 'abcxyz'})\n access_token = response.json.get(sanicjwt.config.access_token_name(), None)\n _, response = app.test_client.get('/test/protected', headers={\n 'Authorization': 'Bearer {}'.format(access_token)})\n assert response.status == 500\n assert response.json.get('exception') == 'SanicJWTException'\n _, response = app.test_client.get('/test/scoped', headers={\n 'Authorization': 'Bearer {}'.format(access_token)})\n assert response.status == 500\n assert response.json.get('exception') == 'SanicJWTException'\n\n\ndef test_option_method_on_protected(app):\n sanic_app, sanic_jwt = app\n\n @sanic_app.route('/protected/options', methods=['OPTIONS'])\n @sanic_jwt.protected()\n async def my_protected_options(request):\n return text('', status=204)\n _, response = sanic_app.test_client.options('/protected/options')\n assert response.status == 204\n\n\ndef test_inject_user_regular(app_with_retrieve_user):\n sanic_app, sanic_jwt = app_with_retrieve_user\n _, response = sanic_app.test_client.post('/auth', json={'username':\n 'user1', 'password': 'abcxyz'})\n sanic_app.router.reset()\n\n @sanic_app.route('/protected/user')\n @inject_user()\n @protected()\n async def my_protected_user(request, user):\n return json({'user_id': user.user_id})\n access_token = response.json.get(sanic_jwt.config.access_token_name(), None\n )\n _, response = sanic_app.test_client.get('/auth/me', headers={\n 'Authorization': 'Bearer {}'.format(access_token)})\n assert response.json.get('me').get('user_id') == 1\n _, response = sanic_app.test_client.get('/protected/user', headers={\n 'Authorization': 'Bearer {}'.format(access_token)})\n assert response.status == 200\n assert response.json.get('user_id') == 1\n\n\ndef test_inject_user_on_instance(app_with_retrieve_user):\n sanic_app, sanic_jwt = app_with_retrieve_user\n _, response = sanic_app.test_client.post('/auth', json={'username':\n 'user1', 'password': 'abcxyz'})\n sanic_app.router.reset()\n\n @sanic_app.route('/protected/user')\n @sanic_jwt.inject_user()\n @sanic_jwt.protected()\n async def my_protected_user(request, user):\n return json({'user_id': user.user_id})\n access_token = response.json.get(sanic_jwt.config.access_token_name(), None\n )\n _, response = sanic_app.test_client.get('/auth/me', headers={\n 'Authorization': 'Bearer {}'.format(access_token)})\n assert response.json.get('me').get('user_id') == 1\n _, response = sanic_app.test_client.get('/protected/user', headers={\n 'Authorization': 'Bearer {}'.format(access_token)})\n assert response.status == 200\n assert response.json.get('user_id') == 1\n\n\n<mask token>\n\n\ndef test_inject_user_with_auth_mode_off(app_with_retrieve_user):\n\n async def retrieve_user(request, payload, *args, **kwargs):\n return {'user_id': 123}\n microservice_app = Sanic('sanic-jwt-test')\n microservice_sanic_jwt = Initialize(microservice_app, auth_mode=False,\n retrieve_user=retrieve_user)\n\n @microservice_app.route('/protected/user')\n @microservice_sanic_jwt.inject_user()\n @microservice_sanic_jwt.protected()\n async def my_protected_user(request, user):\n return json({'user_id': user.get('user_id')})\n sanic_app, sanic_jwt = app_with_retrieve_user\n _, response = sanic_app.test_client.post('/auth', json={'username':\n 'user1', 'password': 'abcxyz'})\n access_token = response.json.get(sanic_jwt.config.access_token_name(), None\n )\n _, response = microservice_app.test_client.get('/protected/user',\n headers={'Authorization': 'Bearer {}'.format(access_token)})\n assert response.status == 200\n assert response.json.get('user_id') == 123\n _, response = microservice_app.test_client.get('/protected/user')\n assert response.status == 401\n\n\ndef test_redirect_without_url(app):\n sanic_app, sanic_jwt = app\n\n @sanic_app.route('/index.html')\n def index(request):\n return html('<html><body>Home</body></html>')\n\n @sanic_app.route('/protected/static')\n @sanic_jwt.protected(redirect_on_fail=True)\n async def my_protected_static(request):\n return text('', status=200)\n request, response = sanic_app.test_client.get('/protected/static')\n assert response.status == 200\n assert response.body == b'<html><body>Home</body></html>'\n assert response.history\n assert response.history[0].status_code == 302\n\n\n<mask token>\n\n\ndef test_redirect_with_configured_url():\n sanic_app = Sanic('sanic-jwt-test')\n sanic_jwt = Initialize(sanic_app, auth_mode=False, login_redirect_url=\n '/unprotected')\n\n @sanic_app.route('/protected/static')\n @sanic_jwt.protected(redirect_on_fail=True)\n async def my_protected_static(request):\n return text('', status=200)\n\n @sanic_app.route('/unprotected')\n async def my_unprotected_goto(request):\n return text('unprotected content', status=200)\n _, response = sanic_app.test_client.get('/protected/static')\n assert response.status == 200 and response.text == 'unprotected content'\n\n\ndef test_authenticated_redirect(app_with_retrieve_user):\n sanic_app, sanic_jwt = app_with_retrieve_user\n _, response = sanic_app.test_client.post('/auth', json={'username':\n 'user1', 'password': 'abcxyz'})\n sanic_app.router.reset()\n\n @sanic_app.route('/protected/static')\n @sanic_jwt.protected(redirect_on_fail=True)\n async def my_protected_static(request):\n return text('protected content', status=200)\n\n @sanic_app.route('/unprotected')\n async def my_unprotected_goto(request):\n return text('unprotected content', status=200)\n access_token = response.json.get(sanic_jwt.config.access_token_name(), None\n )\n _, response = sanic_app.test_client.get('/protected/static', headers={\n 'Authorization': 'Bearer {}'.format(access_token)})\n assert response.status == 200 and response.text == 'protected content'\n",
"step-4": "<mask token>\n\n\ndef test_forgotten_initialized_on_protected():\n blueprint = Blueprint('Test')\n\n @blueprint.get('/protected')\n @protected()\n def protected_hello_world(request):\n return json({'message': 'hello world'})\n\n @blueprint.route('/scoped')\n @scoped('something')\n async def scoped_endpoint(request):\n return json({'scoped': True})\n app = Sanic('sanic-jwt-test')\n sanicjwt = Initialize(blueprint, app=app, authenticate=lambda x: True)\n app.blueprint(blueprint, url_prefix='/test')\n _, response = app.test_client.post('/test/auth', json={'username':\n 'user1', 'password': 'abcxyz'})\n access_token = response.json.get(sanicjwt.config.access_token_name(), None)\n _, response = app.test_client.get('/test/protected', headers={\n 'Authorization': 'Bearer {}'.format(access_token)})\n assert response.status == 500\n assert response.json.get('exception') == 'SanicJWTException'\n _, response = app.test_client.get('/test/scoped', headers={\n 'Authorization': 'Bearer {}'.format(access_token)})\n assert response.status == 500\n assert response.json.get('exception') == 'SanicJWTException'\n\n\ndef test_option_method_on_protected(app):\n sanic_app, sanic_jwt = app\n\n @sanic_app.route('/protected/options', methods=['OPTIONS'])\n @sanic_jwt.protected()\n async def my_protected_options(request):\n return text('', status=204)\n _, response = sanic_app.test_client.options('/protected/options')\n assert response.status == 204\n\n\ndef test_inject_user_regular(app_with_retrieve_user):\n sanic_app, sanic_jwt = app_with_retrieve_user\n _, response = sanic_app.test_client.post('/auth', json={'username':\n 'user1', 'password': 'abcxyz'})\n sanic_app.router.reset()\n\n @sanic_app.route('/protected/user')\n @inject_user()\n @protected()\n async def my_protected_user(request, user):\n return json({'user_id': user.user_id})\n access_token = response.json.get(sanic_jwt.config.access_token_name(), None\n )\n _, response = sanic_app.test_client.get('/auth/me', headers={\n 'Authorization': 'Bearer {}'.format(access_token)})\n assert response.json.get('me').get('user_id') == 1\n _, response = sanic_app.test_client.get('/protected/user', headers={\n 'Authorization': 'Bearer {}'.format(access_token)})\n assert response.status == 200\n assert response.json.get('user_id') == 1\n\n\ndef test_inject_user_on_instance(app_with_retrieve_user):\n sanic_app, sanic_jwt = app_with_retrieve_user\n _, response = sanic_app.test_client.post('/auth', json={'username':\n 'user1', 'password': 'abcxyz'})\n sanic_app.router.reset()\n\n @sanic_app.route('/protected/user')\n @sanic_jwt.inject_user()\n @sanic_jwt.protected()\n async def my_protected_user(request, user):\n return json({'user_id': user.user_id})\n access_token = response.json.get(sanic_jwt.config.access_token_name(), None\n )\n _, response = sanic_app.test_client.get('/auth/me', headers={\n 'Authorization': 'Bearer {}'.format(access_token)})\n assert response.json.get('me').get('user_id') == 1\n _, response = sanic_app.test_client.get('/protected/user', headers={\n 'Authorization': 'Bearer {}'.format(access_token)})\n assert response.status == 200\n assert response.json.get('user_id') == 1\n\n\ndef test_inject_user_on_instance_bp(app_with_retrieve_user):\n sanic_app, sanic_jwt = app_with_retrieve_user\n _, response = sanic_app.test_client.post('/auth', json={'username':\n 'user1', 'password': 'abcxyz'})\n sanic_app.router.reset()\n\n @sanic_app.route('/protected/user')\n @sanic_jwt.inject_user()\n @sanic_jwt.protected()\n async def my_protected_user(request, user):\n return json({'user_id': user.user_id})\n access_token = response.json.get(sanic_jwt.config.access_token_name(), None\n )\n _, response = sanic_app.test_client.get('/auth/me', headers={\n 'Authorization': 'Bearer {}'.format(access_token)})\n assert response.json.get('me').get('user_id') == 1\n _, response = sanic_app.test_client.get('/protected/user', headers={\n 'Authorization': 'Bearer {}'.format(access_token)})\n assert response.status == 200\n assert response.json.get('user_id') == 1\n\n\ndef test_inject_user_on_instance_non_async(app_with_retrieve_user):\n sanic_app, sanic_jwt = app_with_retrieve_user\n _, response = sanic_app.test_client.post('/auth', json={'username':\n 'user1', 'password': 'abcxyz'})\n sanic_app.router.reset()\n\n @sanic_app.route('/protected/user')\n @sanic_jwt.inject_user()\n @sanic_jwt.protected()\n def my_protected_user(request, user):\n return json({'user_id': user.user_id})\n access_token = response.json.get(sanic_jwt.config.access_token_name(), None\n )\n _, response = sanic_app.test_client.get('/auth/me', headers={\n 'Authorization': 'Bearer {}'.format(access_token)})\n assert response.json.get('me').get('user_id') == 1\n _, response = sanic_app.test_client.get('/protected/user', headers={\n 'Authorization': 'Bearer {}'.format(access_token)})\n assert response.status == 200\n assert response.json.get('user_id') == 1\n\n\ndef test_inject_user_with_auth_mode_off(app_with_retrieve_user):\n\n async def retrieve_user(request, payload, *args, **kwargs):\n return {'user_id': 123}\n microservice_app = Sanic('sanic-jwt-test')\n microservice_sanic_jwt = Initialize(microservice_app, auth_mode=False,\n retrieve_user=retrieve_user)\n\n @microservice_app.route('/protected/user')\n @microservice_sanic_jwt.inject_user()\n @microservice_sanic_jwt.protected()\n async def my_protected_user(request, user):\n return json({'user_id': user.get('user_id')})\n sanic_app, sanic_jwt = app_with_retrieve_user\n _, response = sanic_app.test_client.post('/auth', json={'username':\n 'user1', 'password': 'abcxyz'})\n access_token = response.json.get(sanic_jwt.config.access_token_name(), None\n )\n _, response = microservice_app.test_client.get('/protected/user',\n headers={'Authorization': 'Bearer {}'.format(access_token)})\n assert response.status == 200\n assert response.json.get('user_id') == 123\n _, response = microservice_app.test_client.get('/protected/user')\n assert response.status == 401\n\n\ndef test_redirect_without_url(app):\n sanic_app, sanic_jwt = app\n\n @sanic_app.route('/index.html')\n def index(request):\n return html('<html><body>Home</body></html>')\n\n @sanic_app.route('/protected/static')\n @sanic_jwt.protected(redirect_on_fail=True)\n async def my_protected_static(request):\n return text('', status=200)\n request, response = sanic_app.test_client.get('/protected/static')\n assert response.status == 200\n assert response.body == b'<html><body>Home</body></html>'\n assert response.history\n assert response.history[0].status_code == 302\n\n\ndef test_redirect_with_decorator_url(app):\n sanic_app, sanic_jwt = app\n\n @sanic_app.route('/protected/static')\n @sanic_jwt.protected(redirect_on_fail=True, redirect_url='/unprotected')\n async def my_protected_static(request):\n return text('', status=200)\n\n @sanic_app.route('/unprotected')\n async def my_unprotected_goto(request):\n return text('unprotected content', status=200)\n _, response = sanic_app.test_client.get('/protected/static')\n assert response.status == 200 and response.text == 'unprotected content'\n\n\ndef test_redirect_with_configured_url():\n sanic_app = Sanic('sanic-jwt-test')\n sanic_jwt = Initialize(sanic_app, auth_mode=False, login_redirect_url=\n '/unprotected')\n\n @sanic_app.route('/protected/static')\n @sanic_jwt.protected(redirect_on_fail=True)\n async def my_protected_static(request):\n return text('', status=200)\n\n @sanic_app.route('/unprotected')\n async def my_unprotected_goto(request):\n return text('unprotected content', status=200)\n _, response = sanic_app.test_client.get('/protected/static')\n assert response.status == 200 and response.text == 'unprotected content'\n\n\ndef test_authenticated_redirect(app_with_retrieve_user):\n sanic_app, sanic_jwt = app_with_retrieve_user\n _, response = sanic_app.test_client.post('/auth', json={'username':\n 'user1', 'password': 'abcxyz'})\n sanic_app.router.reset()\n\n @sanic_app.route('/protected/static')\n @sanic_jwt.protected(redirect_on_fail=True)\n async def my_protected_static(request):\n return text('protected content', status=200)\n\n @sanic_app.route('/unprotected')\n async def my_unprotected_goto(request):\n return text('unprotected content', status=200)\n access_token = response.json.get(sanic_jwt.config.access_token_name(), None\n )\n _, response = sanic_app.test_client.get('/protected/static', headers={\n 'Authorization': 'Bearer {}'.format(access_token)})\n assert response.status == 200 and response.text == 'protected content'\n",
"step-5": "from sanic import Sanic\nfrom sanic.blueprints import Blueprint\nfrom sanic.response import html, json, text\n\nfrom sanic_jwt import Initialize\nfrom sanic_jwt.decorators import inject_user, protected, scoped\n\n\ndef test_forgotten_initialized_on_protected():\n blueprint = Blueprint(\"Test\")\n\n @blueprint.get(\"/protected\")\n @protected()\n def protected_hello_world(request):\n return json({\"message\": \"hello world\"})\n\n @blueprint.route(\"/scoped\")\n @scoped(\"something\")\n async def scoped_endpoint(request):\n return json({\"scoped\": True})\n\n app = Sanic(\"sanic-jwt-test\")\n\n sanicjwt = Initialize(blueprint, app=app, authenticate=lambda x: True)\n\n app.blueprint(blueprint, url_prefix=\"/test\")\n\n _, response = app.test_client.post(\n \"/test/auth\", json={\"username\": \"user1\", \"password\": \"abcxyz\"}\n )\n\n access_token = response.json.get(sanicjwt.config.access_token_name(), None)\n\n _, response = app.test_client.get(\n \"/test/protected\",\n headers={\"Authorization\": \"Bearer {}\".format(access_token)},\n )\n\n assert response.status == 500\n assert response.json.get(\"exception\") == \"SanicJWTException\"\n\n _, response = app.test_client.get(\n \"/test/scoped\",\n headers={\"Authorization\": \"Bearer {}\".format(access_token)},\n )\n\n assert response.status == 500\n assert response.json.get(\"exception\") == \"SanicJWTException\"\n\n\ndef test_option_method_on_protected(app):\n sanic_app, sanic_jwt = app\n\n @sanic_app.route(\"/protected/options\", methods=[\"OPTIONS\"])\n @sanic_jwt.protected()\n async def my_protected_options(request):\n return text(\"\", status=204)\n\n _, response = sanic_app.test_client.options(\"/protected/options\")\n\n assert response.status == 204\n\n\ndef test_inject_user_regular(app_with_retrieve_user):\n sanic_app, sanic_jwt = app_with_retrieve_user\n _, response = sanic_app.test_client.post(\n \"/auth\", json={\"username\": \"user1\", \"password\": \"abcxyz\"}\n )\n\n sanic_app.router.reset()\n\n @sanic_app.route(\"/protected/user\")\n @inject_user()\n @protected()\n async def my_protected_user(request, user):\n return json({\"user_id\": user.user_id})\n\n access_token = response.json.get(\n sanic_jwt.config.access_token_name(), None\n )\n\n _, response = sanic_app.test_client.get(\n \"/auth/me\", headers={\"Authorization\": \"Bearer {}\".format(access_token)}\n )\n\n assert response.json.get(\"me\").get(\"user_id\") == 1\n\n _, response = sanic_app.test_client.get(\n \"/protected/user\",\n headers={\"Authorization\": \"Bearer {}\".format(access_token)},\n )\n assert response.status == 200\n assert response.json.get(\"user_id\") == 1\n\n\ndef test_inject_user_on_instance(app_with_retrieve_user):\n sanic_app, sanic_jwt = app_with_retrieve_user\n _, response = sanic_app.test_client.post(\n \"/auth\", json={\"username\": \"user1\", \"password\": \"abcxyz\"}\n )\n\n sanic_app.router.reset()\n\n @sanic_app.route(\"/protected/user\")\n @sanic_jwt.inject_user()\n @sanic_jwt.protected()\n async def my_protected_user(request, user):\n return json({\"user_id\": user.user_id})\n\n access_token = response.json.get(\n sanic_jwt.config.access_token_name(), None\n )\n\n _, response = sanic_app.test_client.get(\n \"/auth/me\", headers={\"Authorization\": \"Bearer {}\".format(access_token)}\n )\n\n assert response.json.get(\"me\").get(\"user_id\") == 1\n\n _, response = sanic_app.test_client.get(\n \"/protected/user\",\n headers={\"Authorization\": \"Bearer {}\".format(access_token)},\n )\n assert response.status == 200\n assert response.json.get(\"user_id\") == 1\n\n\ndef test_inject_user_on_instance_bp(app_with_retrieve_user):\n sanic_app, sanic_jwt = app_with_retrieve_user\n _, response = sanic_app.test_client.post(\n \"/auth\", json={\"username\": \"user1\", \"password\": \"abcxyz\"}\n )\n\n sanic_app.router.reset()\n\n @sanic_app.route(\"/protected/user\")\n @sanic_jwt.inject_user()\n @sanic_jwt.protected()\n async def my_protected_user(request, user):\n return json({\"user_id\": user.user_id})\n\n access_token = response.json.get(\n sanic_jwt.config.access_token_name(), None\n )\n\n _, response = sanic_app.test_client.get(\n \"/auth/me\", headers={\"Authorization\": \"Bearer {}\".format(access_token)}\n )\n\n assert response.json.get(\"me\").get(\"user_id\") == 1\n\n _, response = sanic_app.test_client.get(\n \"/protected/user\",\n headers={\"Authorization\": \"Bearer {}\".format(access_token)},\n )\n assert response.status == 200\n assert response.json.get(\"user_id\") == 1\n\n\ndef test_inject_user_on_instance_non_async(app_with_retrieve_user):\n sanic_app, sanic_jwt = app_with_retrieve_user\n _, response = sanic_app.test_client.post(\n \"/auth\", json={\"username\": \"user1\", \"password\": \"abcxyz\"}\n )\n\n sanic_app.router.reset()\n\n @sanic_app.route(\"/protected/user\")\n @sanic_jwt.inject_user()\n @sanic_jwt.protected()\n def my_protected_user(request, user):\n return json({\"user_id\": user.user_id})\n\n access_token = response.json.get(\n sanic_jwt.config.access_token_name(), None\n )\n\n _, response = sanic_app.test_client.get(\n \"/auth/me\", headers={\"Authorization\": \"Bearer {}\".format(access_token)}\n )\n\n assert response.json.get(\"me\").get(\"user_id\") == 1\n\n _, response = sanic_app.test_client.get(\n \"/protected/user\",\n headers={\"Authorization\": \"Bearer {}\".format(access_token)},\n )\n assert response.status == 200\n assert response.json.get(\"user_id\") == 1\n\n\ndef test_inject_user_with_auth_mode_off(app_with_retrieve_user):\n async def retrieve_user(request, payload, *args, **kwargs):\n return {\"user_id\": 123}\n\n microservice_app = Sanic(\"sanic-jwt-test\")\n microservice_sanic_jwt = Initialize(\n microservice_app, auth_mode=False, retrieve_user=retrieve_user\n )\n\n @microservice_app.route(\"/protected/user\")\n @microservice_sanic_jwt.inject_user()\n @microservice_sanic_jwt.protected()\n async def my_protected_user(request, user):\n return json({\"user_id\": user.get(\"user_id\")})\n\n sanic_app, sanic_jwt = app_with_retrieve_user\n _, response = sanic_app.test_client.post(\n \"/auth\", json={\"username\": \"user1\", \"password\": \"abcxyz\"}\n )\n\n access_token = response.json.get(\n sanic_jwt.config.access_token_name(), None\n )\n\n _, response = microservice_app.test_client.get(\n \"/protected/user\",\n headers={\"Authorization\": \"Bearer {}\".format(access_token)},\n )\n\n assert response.status == 200\n assert response.json.get(\"user_id\") == 123\n\n _, response = microservice_app.test_client.get(\"/protected/user\")\n\n assert response.status == 401\n\n\ndef test_redirect_without_url(app):\n sanic_app, sanic_jwt = app\n\n @sanic_app.route(\"/index.html\")\n def index(request):\n return html(\"<html><body>Home</body></html>\")\n\n @sanic_app.route(\"/protected/static\")\n @sanic_jwt.protected(redirect_on_fail=True)\n async def my_protected_static(request):\n return text(\"\", status=200)\n\n request, response = sanic_app.test_client.get(\"/protected/static\")\n\n assert response.status == 200\n assert response.body == b\"<html><body>Home</body></html>\"\n assert response.history\n assert response.history[0].status_code == 302\n\n\ndef test_redirect_with_decorator_url(app):\n sanic_app, sanic_jwt = app\n\n @sanic_app.route(\"/protected/static\")\n @sanic_jwt.protected(redirect_on_fail=True, redirect_url=\"/unprotected\")\n async def my_protected_static(request):\n return text(\"\", status=200)\n\n @sanic_app.route(\"/unprotected\")\n async def my_unprotected_goto(request):\n return text(\"unprotected content\", status=200)\n\n _, response = sanic_app.test_client.get(\"/protected/static\")\n\n assert response.status == 200 and response.text == \"unprotected content\"\n\n\ndef test_redirect_with_configured_url():\n sanic_app = Sanic(\"sanic-jwt-test\")\n sanic_jwt = Initialize(\n sanic_app, auth_mode=False, login_redirect_url=\"/unprotected\"\n )\n\n @sanic_app.route(\"/protected/static\")\n @sanic_jwt.protected(redirect_on_fail=True)\n async def my_protected_static(request):\n return text(\"\", status=200)\n\n @sanic_app.route(\"/unprotected\")\n async def my_unprotected_goto(request):\n return text(\"unprotected content\", status=200)\n\n _, response = sanic_app.test_client.get(\"/protected/static\")\n\n assert response.status == 200 and response.text == \"unprotected content\"\n\n\ndef test_authenticated_redirect(app_with_retrieve_user):\n sanic_app, sanic_jwt = app_with_retrieve_user\n _, response = sanic_app.test_client.post(\n \"/auth\", json={\"username\": \"user1\", \"password\": \"abcxyz\"}\n )\n\n sanic_app.router.reset()\n\n @sanic_app.route(\"/protected/static\")\n @sanic_jwt.protected(redirect_on_fail=True)\n async def my_protected_static(request):\n return text(\"protected content\", status=200)\n\n @sanic_app.route(\"/unprotected\")\n async def my_unprotected_goto(request):\n return text(\"unprotected content\", status=200)\n\n access_token = response.json.get(\n sanic_jwt.config.access_token_name(), None\n )\n\n _, response = sanic_app.test_client.get(\n \"/protected/static\",\n headers={\"Authorization\": \"Bearer {}\".format(access_token)},\n )\n\n assert response.status == 200 and response.text == \"protected content\"\n",
"step-ids": [
6,
7,
8,
11,
13
]
}
|
[
6,
7,
8,
11,
13
] |
# Generated by Django 2.1.4 on 2019-04-23 23:37
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('machine', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='AboutRequest',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, null=True)),
('modified', models.DateTimeField(auto_now=True, null=True)),
('address_of_delivery', models.CharField(choices=[('meru', 'Meru Town'), ('kianjai', 'Kianjai'), ('nkubu', 'Nkubu'), ('maua', 'Maua'), ('Nchiu', 'Nchiru')], default='Meru Town', max_length=50)),
('approved', models.BooleanField(default=False)),
('active', models.BooleanField(default=True)),
('paid', models.BooleanField(default=False)),
],
options={
'ordering': ('-created',),
},
),
migrations.CreateModel(
name='Request',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, null=True)),
('modified', models.DateTimeField(auto_now=True, null=True)),
('price', models.DecimalField(decimal_places=2, max_digits=6, null=True)),
('quantity', models.PositiveIntegerField(default=1)),
('order', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='details', to='request.AboutRequest')),
('product', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='order_item', to='machine.Machine')),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='orders', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('-quantity',),
},
),
]
|
normal
|
{
"blob_id": "b9608208f71f25ae05ed9bd7bdf94b8882a26e06",
"index": 3091,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = [migrations.swappable_dependency(settings.\n AUTH_USER_MODEL), ('machine', '0001_initial')]\n operations = [migrations.CreateModel(name='AboutRequest', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('created', models.DateTimeField(\n auto_now_add=True, null=True)), ('modified', models.DateTimeField(\n auto_now=True, null=True)), ('address_of_delivery', models.\n CharField(choices=[('meru', 'Meru Town'), ('kianjai', 'Kianjai'), (\n 'nkubu', 'Nkubu'), ('maua', 'Maua'), ('Nchiu', 'Nchiru')], default=\n 'Meru Town', max_length=50)), ('approved', models.BooleanField(\n default=False)), ('active', models.BooleanField(default=True)), (\n 'paid', models.BooleanField(default=False))], options={'ordering':\n ('-created',)}), migrations.CreateModel(name='Request', fields=[(\n 'id', models.AutoField(auto_created=True, primary_key=True,\n serialize=False, verbose_name='ID')), ('created', models.\n DateTimeField(auto_now_add=True, null=True)), ('modified', models.\n DateTimeField(auto_now=True, null=True)), ('price', models.\n DecimalField(decimal_places=2, max_digits=6, null=True)), (\n 'quantity', models.PositiveIntegerField(default=1)), ('order',\n models.ForeignKey(null=True, on_delete=django.db.models.deletion.\n CASCADE, related_name='details', to='request.AboutRequest')), (\n 'product', models.ForeignKey(null=True, on_delete=django.db.models.\n deletion.CASCADE, related_name='order_item', to='machine.Machine')),\n ('user', models.ForeignKey(null=True, on_delete=django.db.models.\n deletion.CASCADE, related_name='orders', to=settings.\n AUTH_USER_MODEL))], options={'ordering': ('-quantity',)})]\n",
"step-4": "from django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = [migrations.swappable_dependency(settings.\n AUTH_USER_MODEL), ('machine', '0001_initial')]\n operations = [migrations.CreateModel(name='AboutRequest', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('created', models.DateTimeField(\n auto_now_add=True, null=True)), ('modified', models.DateTimeField(\n auto_now=True, null=True)), ('address_of_delivery', models.\n CharField(choices=[('meru', 'Meru Town'), ('kianjai', 'Kianjai'), (\n 'nkubu', 'Nkubu'), ('maua', 'Maua'), ('Nchiu', 'Nchiru')], default=\n 'Meru Town', max_length=50)), ('approved', models.BooleanField(\n default=False)), ('active', models.BooleanField(default=True)), (\n 'paid', models.BooleanField(default=False))], options={'ordering':\n ('-created',)}), migrations.CreateModel(name='Request', fields=[(\n 'id', models.AutoField(auto_created=True, primary_key=True,\n serialize=False, verbose_name='ID')), ('created', models.\n DateTimeField(auto_now_add=True, null=True)), ('modified', models.\n DateTimeField(auto_now=True, null=True)), ('price', models.\n DecimalField(decimal_places=2, max_digits=6, null=True)), (\n 'quantity', models.PositiveIntegerField(default=1)), ('order',\n models.ForeignKey(null=True, on_delete=django.db.models.deletion.\n CASCADE, related_name='details', to='request.AboutRequest')), (\n 'product', models.ForeignKey(null=True, on_delete=django.db.models.\n deletion.CASCADE, related_name='order_item', to='machine.Machine')),\n ('user', models.ForeignKey(null=True, on_delete=django.db.models.\n deletion.CASCADE, related_name='orders', to=settings.\n AUTH_USER_MODEL))], options={'ordering': ('-quantity',)})]\n",
"step-5": "# Generated by Django 2.1.4 on 2019-04-23 23:37\n\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('machine', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='AboutRequest',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('created', models.DateTimeField(auto_now_add=True, null=True)),\n ('modified', models.DateTimeField(auto_now=True, null=True)),\n ('address_of_delivery', models.CharField(choices=[('meru', 'Meru Town'), ('kianjai', 'Kianjai'), ('nkubu', 'Nkubu'), ('maua', 'Maua'), ('Nchiu', 'Nchiru')], default='Meru Town', max_length=50)),\n ('approved', models.BooleanField(default=False)),\n ('active', models.BooleanField(default=True)),\n ('paid', models.BooleanField(default=False)),\n ],\n options={\n 'ordering': ('-created',),\n },\n ),\n migrations.CreateModel(\n name='Request',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('created', models.DateTimeField(auto_now_add=True, null=True)),\n ('modified', models.DateTimeField(auto_now=True, null=True)),\n ('price', models.DecimalField(decimal_places=2, max_digits=6, null=True)),\n ('quantity', models.PositiveIntegerField(default=1)),\n ('order', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='details', to='request.AboutRequest')),\n ('product', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='order_item', to='machine.Machine')),\n ('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='orders', to=settings.AUTH_USER_MODEL)),\n ],\n options={\n 'ordering': ('-quantity',),\n },\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from django.shortcuts import render
def index(request):
return render(request, 'munchiesfastfood/home.html', {'drinks': [
'Pineapple Juice', 'Green Juice', 'Soft Drinks',
'Carlo Rosee Drinks'], 'dishes': ['Beef Steak',
'Tomato with Chicken', 'Sausages from Italy', 'Beef Grilled']})
|
normal
|
{
"blob_id": "e279ca43ce2c582c702f1c6a0c1acf37eb9bcefe",
"index": 5603,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef index(request):\n return render(request, 'munchiesfastfood/home.html', {'drinks': [\n 'Pineapple Juice', 'Green Juice', 'Soft Drinks',\n 'Carlo Rosee Drinks'], 'dishes': ['Beef Steak',\n 'Tomato with Chicken', 'Sausages from Italy', 'Beef Grilled']})\n",
"step-3": "from django.shortcuts import render\n\n\ndef index(request):\n return render(request, 'munchiesfastfood/home.html', {'drinks': [\n 'Pineapple Juice', 'Green Juice', 'Soft Drinks',\n 'Carlo Rosee Drinks'], 'dishes': ['Beef Steak',\n 'Tomato with Chicken', 'Sausages from Italy', 'Beef Grilled']})\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
import models
import wizard
import parser
<|reserved_special_token_1|>
# -*- encoding: utf-8 -*-
#----------------------------------------------------------------------------
#
# Copyright (C) 2014 .
# Coded by: Borni DHIFI (dhifi.borni@gmail.com)
#
#----------------------------------------------------------------------------
import models
import wizard
import parser
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
flexible
|
{
"blob_id": "a3216aa41cd28b91653b99017e21a03e43372e9b",
"index": 4137,
"step-1": "<mask token>\n",
"step-2": "import models\nimport wizard\nimport parser\n",
"step-3": "# -*- encoding: utf-8 -*-\n#----------------------------------------------------------------------------\n#\n# Copyright (C) 2014 .\n# Coded by: Borni DHIFI (dhifi.borni@gmail.com)\n#\n#----------------------------------------------------------------------------\n\nimport models\nimport wizard\nimport parser\n\n# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
def parse_args():
parser = argparse.ArgumentParser(description=
'Analyze codon usage of SP and LP\n')
parser.add_argument('sp_file', help='one input SP data file\n')
parser.add_argument('lp_file', help='one input LP data file\n')
parser.add_argument('--label', '-l', type=str, required=False, default=
'top', help="""Define the label of out-put files. Default="top\"
""")
args = parser.parse_args()
for path in [args.sp_file, args.lp_file]:
if not os.path.isfile(path):
parser.error('File "%s" cannot be found.' % path)
return args
class Codon_Usage:
def __init__(self, filename):
self.seq, self.gene_num = self.get_seq(filename)
def get_seq(self, filename):
file = io.open(filename)
all_seq = []
gene_seq = ''
count_all = 0
count_non_triple = 0
for line in file:
if line[0] == '>':
count_all += 1
if gene_seq != '':
if len(gene_seq) % 3:
count_non_triple += 1
else:
all_seq.append(gene_seq)
gene_seq = ''
else:
gene_seq += line.strip()
file.close()
print('%s:\n%d genes added\n%d are non-triple\n' % (filename[:2],
count_all, count_non_triple))
return all_seq, count_all - count_non_triple
def get_AA(self, codon):
codon_map = {'TTT': 'Phe', 'TTC': 'Phe', 'TTA': 'Leu', 'TTG': 'Leu',
'TCT': 'Ser', 'TCC': 'Ser', 'TCA': 'Ser', 'TCG': 'Ser', 'TAT':
'Tyr', 'TAC': 'Tyr', 'TAA': 'STOP', 'TAG': 'STOP', 'TGT': 'Cys',
'TGC': 'Cys', 'TGA': 'STOP', 'TGG': 'Trp', 'CTT': 'Leu', 'CTC':
'Leu', 'CTA': 'Leu', 'CTG': 'Leu', 'CCT': 'Pro', 'CCC': 'Pro',
'CCA': 'Pro', 'CCG': 'Pro', 'CAT': 'His', 'CAC': 'His', 'CAA':
'Gln', 'CAG': 'Gln', 'CGT': 'Arg', 'CGC': 'Arg', 'CGA': 'Arg',
'CGG': 'Arg', 'ATT': 'Ile', 'ATC': 'Ile', 'ATA': 'Ile', 'ATG':
'Met', 'ACT': 'Thr', 'ACC': 'Thr', 'ACA': 'Thr', 'ACG': 'Thr',
'AAT': 'Asn', 'AAC': 'Asn', 'AAA': 'Lys', 'AAG': 'Lys', 'AGT':
'Ser', 'AGC': 'Ser', 'AGA': 'Arg', 'AGG': 'Arg', 'GTT': 'Val',
'GTC': 'Val', 'GTA': 'Val', 'GTG': 'Val', 'GCT': 'Ala', 'GCC':
'Ala', 'GCA': 'Ala', 'GCG': 'Ala', 'GAT': 'Asp', 'GAC': 'Asp',
'GAA': 'Glu', 'GAG': 'Glu', 'GGT': 'Gly', 'GGC': 'Gly', 'GGA':
'Gly', 'GGG': 'Gly'}
if codon in codon_map:
return codon_map[codon]
else:
return ''
def get_usage_dict(self, seq):
usage_dict = collections.defaultdict(lambda : [collections.
defaultdict(lambda : [0, 0]), 0])
for index in range(0, len(seq), 3):
codon = seq[index:index + 3]
AA = self.get_AA(codon)
if AA:
usage_dict[AA][1] += 1
usage_dict[AA][0][codon][0] += 1
for AA in usage_dict:
for codon in usage_dict[AA][0]:
usage_dict[AA][0][codon][1] = usage_dict[AA][0][codon][0
] / usage_dict[AA][1]
return usage_dict
def get_AA_dict(self):
AA_dict = collections.defaultdict(lambda : collections.defaultdict(
list))
AA_map = {'Phe': ['TTT', 'TTC'], 'Leu': ['TTA', 'TTG', 'CTT', 'CTC',
'CTA', 'CTG'], 'Ser': ['TCT', 'TCC', 'TCA', 'TCG', 'AGT', 'AGC'
], 'Tyr': ['TAT', 'TAC'], 'STOP': ['TAA', 'TAG', 'TGA'], 'Cys':
['TGT', 'TGC'], 'Trp': ['TGG'], 'Pro': ['CCT', 'CCC', 'CCA',
'CCG'], 'His': ['CAT', 'CAC'], 'Gln': ['CAA', 'CAG'], 'Arg': [
'CGT', 'CGC', 'CGA', 'CGG', 'AGA', 'AGG'], 'Ile': ['ATT', 'ATC',
'ATA'], 'Met': ['ATG'], 'Thr': ['ACT', 'ACC', 'ACA', 'ACG'],
'Asn': ['AAT', 'AAC'], 'Lys': ['AAA', 'AAG'], 'Val': ['GTT',
'GTC', 'GTA', 'GTG'], 'Ala': ['GCT', 'GCC', 'GCA', 'GCG'],
'Asp': ['GAT', 'GAC'], 'Glu': ['GAA', 'GAG'], 'Gly': ['GGT',
'GGC', 'GGA', 'GGG']}
usage_dict_list = []
for seq in self.seq:
usage_dict_list.append(self.get_usage_dict(seq))
for AA in list(AA_map.keys()):
for codon in AA_map[AA]:
for usage_dict in usage_dict_list:
AA_dict[AA][codon].append(usage_dict[AA][0][codon][1])
return AA_dict
<|reserved_special_token_0|>
def annotate_heatmap(im, text_label):
textcolors = ['black', 'white']
data = im.get_array()
threshold = im.norm(data.max()) / 2
kw = dict(horizontalalignment='center', verticalalignment='center')
for i in range(data.shape[0]):
for j in range(data.shape[1]):
kw.update(color=textcolors[im.norm(data[i, j]) > threshold])
im.axes.text(j, i, text_label[i, j], **kw)
def choose_codons(ttest, text):
codon_map = {'TTT': 'Phe', 'TTC': 'Phe', 'TAT': 'Tyr', 'TAC': 'Tyr',
'TGT': 'Cys', 'TGC': 'Cys', 'CAT': 'His', 'CAC': 'His', 'CAA':
'Gln', 'CAG': 'Gln', 'AAT': 'Asn', 'AAC': 'Asn', 'AAA': 'Lys',
'AAG': 'Lys', 'GAT': 'Asp', 'GAC': 'Asp', 'GAA': 'Glu', 'GAG': 'Glu'}
codon_dict = collections.defaultdict(list)
for i in range(len(ttest)):
for j in range(len(ttest[i])):
if ttest[i][j] < 0.01:
codon = text[i][j][:3]
if codon in codon_map:
codon_dict[codon_map[codon]].append(codon)
file = io.open('AAs_to_compare.txt', 'w')
file.write('Compare following AAs\n')
AAs = []
for AA in codon_dict.keys():
AAs.append(AA)
if len(codon_dict[AA]) == 2:
file.write('%s: %s, %s\n' % (AA, codon_dict[AA][0], codon_dict[
AA][1]))
else:
file.write('%s: %s\n' % (AA, codon_dict[AA][0]))
file.close()
return AAs
<|reserved_special_token_0|>
def plot_distribution(sp_dict, lp_dict, AA):
fig, axes = plt.subplots(nrows=2, ncols=1, figsize=(40, 20))
for codon in sp_dict[AA]:
x = np.arange(len(sp_dict[AA][codon]))
sp_y = np.array(sp_dict[AA][codon])
lp_y = np.array(lp_dict[AA][codon])
axes[0].plot(x, sp_y)
axes[1].plot(x, lp_y)
plt.show
def get_skellam_distribution(sp_dict, lp_dict, AA):
sp_mu = {}
lp_mu = {}
codons = []
for codon in sp_dict[AA]:
codons.append(codon)
sp_mu[codon] = np.mean(sp_dict[AA][codon])
lp_mu[codon] = np.mean(lp_dict[AA][codon])
skellam_plot(sp_mu[codons[0]], sp_mu[codons[1]], 'SP-' + AA)
skellam_plot(lp_mu[codons[0]], lp_mu[codons[1]], 'LP-' + AA)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def parse_args():
parser = argparse.ArgumentParser(description=
'Analyze codon usage of SP and LP\n')
parser.add_argument('sp_file', help='one input SP data file\n')
parser.add_argument('lp_file', help='one input LP data file\n')
parser.add_argument('--label', '-l', type=str, required=False, default=
'top', help="""Define the label of out-put files. Default="top\"
""")
args = parser.parse_args()
for path in [args.sp_file, args.lp_file]:
if not os.path.isfile(path):
parser.error('File "%s" cannot be found.' % path)
return args
class Codon_Usage:
def __init__(self, filename):
self.seq, self.gene_num = self.get_seq(filename)
def get_seq(self, filename):
file = io.open(filename)
all_seq = []
gene_seq = ''
count_all = 0
count_non_triple = 0
for line in file:
if line[0] == '>':
count_all += 1
if gene_seq != '':
if len(gene_seq) % 3:
count_non_triple += 1
else:
all_seq.append(gene_seq)
gene_seq = ''
else:
gene_seq += line.strip()
file.close()
print('%s:\n%d genes added\n%d are non-triple\n' % (filename[:2],
count_all, count_non_triple))
return all_seq, count_all - count_non_triple
def get_AA(self, codon):
codon_map = {'TTT': 'Phe', 'TTC': 'Phe', 'TTA': 'Leu', 'TTG': 'Leu',
'TCT': 'Ser', 'TCC': 'Ser', 'TCA': 'Ser', 'TCG': 'Ser', 'TAT':
'Tyr', 'TAC': 'Tyr', 'TAA': 'STOP', 'TAG': 'STOP', 'TGT': 'Cys',
'TGC': 'Cys', 'TGA': 'STOP', 'TGG': 'Trp', 'CTT': 'Leu', 'CTC':
'Leu', 'CTA': 'Leu', 'CTG': 'Leu', 'CCT': 'Pro', 'CCC': 'Pro',
'CCA': 'Pro', 'CCG': 'Pro', 'CAT': 'His', 'CAC': 'His', 'CAA':
'Gln', 'CAG': 'Gln', 'CGT': 'Arg', 'CGC': 'Arg', 'CGA': 'Arg',
'CGG': 'Arg', 'ATT': 'Ile', 'ATC': 'Ile', 'ATA': 'Ile', 'ATG':
'Met', 'ACT': 'Thr', 'ACC': 'Thr', 'ACA': 'Thr', 'ACG': 'Thr',
'AAT': 'Asn', 'AAC': 'Asn', 'AAA': 'Lys', 'AAG': 'Lys', 'AGT':
'Ser', 'AGC': 'Ser', 'AGA': 'Arg', 'AGG': 'Arg', 'GTT': 'Val',
'GTC': 'Val', 'GTA': 'Val', 'GTG': 'Val', 'GCT': 'Ala', 'GCC':
'Ala', 'GCA': 'Ala', 'GCG': 'Ala', 'GAT': 'Asp', 'GAC': 'Asp',
'GAA': 'Glu', 'GAG': 'Glu', 'GGT': 'Gly', 'GGC': 'Gly', 'GGA':
'Gly', 'GGG': 'Gly'}
if codon in codon_map:
return codon_map[codon]
else:
return ''
def get_usage_dict(self, seq):
usage_dict = collections.defaultdict(lambda : [collections.
defaultdict(lambda : [0, 0]), 0])
for index in range(0, len(seq), 3):
codon = seq[index:index + 3]
AA = self.get_AA(codon)
if AA:
usage_dict[AA][1] += 1
usage_dict[AA][0][codon][0] += 1
for AA in usage_dict:
for codon in usage_dict[AA][0]:
usage_dict[AA][0][codon][1] = usage_dict[AA][0][codon][0
] / usage_dict[AA][1]
return usage_dict
def get_AA_dict(self):
AA_dict = collections.defaultdict(lambda : collections.defaultdict(
list))
AA_map = {'Phe': ['TTT', 'TTC'], 'Leu': ['TTA', 'TTG', 'CTT', 'CTC',
'CTA', 'CTG'], 'Ser': ['TCT', 'TCC', 'TCA', 'TCG', 'AGT', 'AGC'
], 'Tyr': ['TAT', 'TAC'], 'STOP': ['TAA', 'TAG', 'TGA'], 'Cys':
['TGT', 'TGC'], 'Trp': ['TGG'], 'Pro': ['CCT', 'CCC', 'CCA',
'CCG'], 'His': ['CAT', 'CAC'], 'Gln': ['CAA', 'CAG'], 'Arg': [
'CGT', 'CGC', 'CGA', 'CGG', 'AGA', 'AGG'], 'Ile': ['ATT', 'ATC',
'ATA'], 'Met': ['ATG'], 'Thr': ['ACT', 'ACC', 'ACA', 'ACG'],
'Asn': ['AAT', 'AAC'], 'Lys': ['AAA', 'AAG'], 'Val': ['GTT',
'GTC', 'GTA', 'GTG'], 'Ala': ['GCT', 'GCC', 'GCA', 'GCG'],
'Asp': ['GAT', 'GAC'], 'Glu': ['GAA', 'GAG'], 'Gly': ['GGT',
'GGC', 'GGA', 'GGG']}
usage_dict_list = []
for seq in self.seq:
usage_dict_list.append(self.get_usage_dict(seq))
for AA in list(AA_map.keys()):
for codon in AA_map[AA]:
for usage_dict in usage_dict_list:
AA_dict[AA][codon].append(usage_dict[AA][0][codon][1])
return AA_dict
<|reserved_special_token_0|>
def plot_heatmap(data, text, cbarlabel, label):
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(10, 5))
im, cbar = heatmap(data, ax, 'YlGn', cbarlabel)
annotate_heatmap(im, text)
fig.tight_layout()
plt.show
plt.savefig(f'../results/{cbarlabel}_{label}.png')
<|reserved_special_token_0|>
def annotate_heatmap(im, text_label):
textcolors = ['black', 'white']
data = im.get_array()
threshold = im.norm(data.max()) / 2
kw = dict(horizontalalignment='center', verticalalignment='center')
for i in range(data.shape[0]):
for j in range(data.shape[1]):
kw.update(color=textcolors[im.norm(data[i, j]) > threshold])
im.axes.text(j, i, text_label[i, j], **kw)
def choose_codons(ttest, text):
codon_map = {'TTT': 'Phe', 'TTC': 'Phe', 'TAT': 'Tyr', 'TAC': 'Tyr',
'TGT': 'Cys', 'TGC': 'Cys', 'CAT': 'His', 'CAC': 'His', 'CAA':
'Gln', 'CAG': 'Gln', 'AAT': 'Asn', 'AAC': 'Asn', 'AAA': 'Lys',
'AAG': 'Lys', 'GAT': 'Asp', 'GAC': 'Asp', 'GAA': 'Glu', 'GAG': 'Glu'}
codon_dict = collections.defaultdict(list)
for i in range(len(ttest)):
for j in range(len(ttest[i])):
if ttest[i][j] < 0.01:
codon = text[i][j][:3]
if codon in codon_map:
codon_dict[codon_map[codon]].append(codon)
file = io.open('AAs_to_compare.txt', 'w')
file.write('Compare following AAs\n')
AAs = []
for AA in codon_dict.keys():
AAs.append(AA)
if len(codon_dict[AA]) == 2:
file.write('%s: %s, %s\n' % (AA, codon_dict[AA][0], codon_dict[
AA][1]))
else:
file.write('%s: %s\n' % (AA, codon_dict[AA][0]))
file.close()
return AAs
def plot_SP_LP(sp_AA_dict, lp_AA_dict):
for AA in list(sp_AA_dict.keys()):
codon_data = []
codons = []
for codon in sp_AA_dict[AA]:
lp_AA_dict[AA][codon].reverse()
codons.append(codon)
codon_data.append([])
codon_data[-1].append(sp_AA_dict[AA][codon])
codon_data[-1].append(lp_AA_dict[AA][codon])
codon_usage_plot(codon_data, AA, codons)
<|reserved_special_token_0|>
def plot_distribution(sp_dict, lp_dict, AA):
fig, axes = plt.subplots(nrows=2, ncols=1, figsize=(40, 20))
for codon in sp_dict[AA]:
x = np.arange(len(sp_dict[AA][codon]))
sp_y = np.array(sp_dict[AA][codon])
lp_y = np.array(lp_dict[AA][codon])
axes[0].plot(x, sp_y)
axes[1].plot(x, lp_y)
plt.show
def get_skellam_distribution(sp_dict, lp_dict, AA):
sp_mu = {}
lp_mu = {}
codons = []
for codon in sp_dict[AA]:
codons.append(codon)
sp_mu[codon] = np.mean(sp_dict[AA][codon])
lp_mu[codon] = np.mean(lp_dict[AA][codon])
skellam_plot(sp_mu[codons[0]], sp_mu[codons[1]], 'SP-' + AA)
skellam_plot(lp_mu[codons[0]], lp_mu[codons[1]], 'LP-' + AA)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def parse_args():
parser = argparse.ArgumentParser(description=
'Analyze codon usage of SP and LP\n')
parser.add_argument('sp_file', help='one input SP data file\n')
parser.add_argument('lp_file', help='one input LP data file\n')
parser.add_argument('--label', '-l', type=str, required=False, default=
'top', help="""Define the label of out-put files. Default="top\"
""")
args = parser.parse_args()
for path in [args.sp_file, args.lp_file]:
if not os.path.isfile(path):
parser.error('File "%s" cannot be found.' % path)
return args
class Codon_Usage:
def __init__(self, filename):
self.seq, self.gene_num = self.get_seq(filename)
def get_seq(self, filename):
file = io.open(filename)
all_seq = []
gene_seq = ''
count_all = 0
count_non_triple = 0
for line in file:
if line[0] == '>':
count_all += 1
if gene_seq != '':
if len(gene_seq) % 3:
count_non_triple += 1
else:
all_seq.append(gene_seq)
gene_seq = ''
else:
gene_seq += line.strip()
file.close()
print('%s:\n%d genes added\n%d are non-triple\n' % (filename[:2],
count_all, count_non_triple))
return all_seq, count_all - count_non_triple
def get_AA(self, codon):
codon_map = {'TTT': 'Phe', 'TTC': 'Phe', 'TTA': 'Leu', 'TTG': 'Leu',
'TCT': 'Ser', 'TCC': 'Ser', 'TCA': 'Ser', 'TCG': 'Ser', 'TAT':
'Tyr', 'TAC': 'Tyr', 'TAA': 'STOP', 'TAG': 'STOP', 'TGT': 'Cys',
'TGC': 'Cys', 'TGA': 'STOP', 'TGG': 'Trp', 'CTT': 'Leu', 'CTC':
'Leu', 'CTA': 'Leu', 'CTG': 'Leu', 'CCT': 'Pro', 'CCC': 'Pro',
'CCA': 'Pro', 'CCG': 'Pro', 'CAT': 'His', 'CAC': 'His', 'CAA':
'Gln', 'CAG': 'Gln', 'CGT': 'Arg', 'CGC': 'Arg', 'CGA': 'Arg',
'CGG': 'Arg', 'ATT': 'Ile', 'ATC': 'Ile', 'ATA': 'Ile', 'ATG':
'Met', 'ACT': 'Thr', 'ACC': 'Thr', 'ACA': 'Thr', 'ACG': 'Thr',
'AAT': 'Asn', 'AAC': 'Asn', 'AAA': 'Lys', 'AAG': 'Lys', 'AGT':
'Ser', 'AGC': 'Ser', 'AGA': 'Arg', 'AGG': 'Arg', 'GTT': 'Val',
'GTC': 'Val', 'GTA': 'Val', 'GTG': 'Val', 'GCT': 'Ala', 'GCC':
'Ala', 'GCA': 'Ala', 'GCG': 'Ala', 'GAT': 'Asp', 'GAC': 'Asp',
'GAA': 'Glu', 'GAG': 'Glu', 'GGT': 'Gly', 'GGC': 'Gly', 'GGA':
'Gly', 'GGG': 'Gly'}
if codon in codon_map:
return codon_map[codon]
else:
return ''
def get_usage_dict(self, seq):
usage_dict = collections.defaultdict(lambda : [collections.
defaultdict(lambda : [0, 0]), 0])
for index in range(0, len(seq), 3):
codon = seq[index:index + 3]
AA = self.get_AA(codon)
if AA:
usage_dict[AA][1] += 1
usage_dict[AA][0][codon][0] += 1
for AA in usage_dict:
for codon in usage_dict[AA][0]:
usage_dict[AA][0][codon][1] = usage_dict[AA][0][codon][0
] / usage_dict[AA][1]
return usage_dict
def get_AA_dict(self):
AA_dict = collections.defaultdict(lambda : collections.defaultdict(
list))
AA_map = {'Phe': ['TTT', 'TTC'], 'Leu': ['TTA', 'TTG', 'CTT', 'CTC',
'CTA', 'CTG'], 'Ser': ['TCT', 'TCC', 'TCA', 'TCG', 'AGT', 'AGC'
], 'Tyr': ['TAT', 'TAC'], 'STOP': ['TAA', 'TAG', 'TGA'], 'Cys':
['TGT', 'TGC'], 'Trp': ['TGG'], 'Pro': ['CCT', 'CCC', 'CCA',
'CCG'], 'His': ['CAT', 'CAC'], 'Gln': ['CAA', 'CAG'], 'Arg': [
'CGT', 'CGC', 'CGA', 'CGG', 'AGA', 'AGG'], 'Ile': ['ATT', 'ATC',
'ATA'], 'Met': ['ATG'], 'Thr': ['ACT', 'ACC', 'ACA', 'ACG'],
'Asn': ['AAT', 'AAC'], 'Lys': ['AAA', 'AAG'], 'Val': ['GTT',
'GTC', 'GTA', 'GTG'], 'Ala': ['GCT', 'GCC', 'GCA', 'GCG'],
'Asp': ['GAT', 'GAC'], 'Glu': ['GAA', 'GAG'], 'Gly': ['GGT',
'GGC', 'GGA', 'GGG']}
usage_dict_list = []
for seq in self.seq:
usage_dict_list.append(self.get_usage_dict(seq))
for AA in list(AA_map.keys()):
for codon in AA_map[AA]:
for usage_dict in usage_dict_list:
AA_dict[AA][codon].append(usage_dict[AA][0][codon][1])
return AA_dict
<|reserved_special_token_0|>
def plot_heatmap(data, text, cbarlabel, label):
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(10, 5))
im, cbar = heatmap(data, ax, 'YlGn', cbarlabel)
annotate_heatmap(im, text)
fig.tight_layout()
plt.show
plt.savefig(f'../results/{cbarlabel}_{label}.png')
def heatmap(data, ax, cmap, cbarlabel):
if not ax:
ax = plt.gca()
im = ax.imshow(data, cmap)
cbar = ax.figure.colorbar(im, ax=ax)
ax.set_xticks(np.arange(data.shape[1]))
ax.set_yticks(np.arange(data.shape[0]))
ax.set_xticklabels(range(data.shape[1]))
ax.set_yticklabels(range(data.shape[0]))
ax.tick_params(top=False, bottom=True, labeltop=False, labelbottom=True)
for edge, spine in ax.spines.items():
spine.set_visible(False)
ax.set_xticks(np.arange(data.shape[1] + 1) - 0.5, minor=True)
ax.set_yticks(np.arange(data.shape[0] + 1) - 0.5, minor=True)
ax.grid(which='minor', color='w', linestyle='-', linewidth=3)
ax.tick_params(which='minor', bottom=False, left=False)
cbar.ax.set_ylabel(cbarlabel, va='top')
return im, cbar
def annotate_heatmap(im, text_label):
textcolors = ['black', 'white']
data = im.get_array()
threshold = im.norm(data.max()) / 2
kw = dict(horizontalalignment='center', verticalalignment='center')
for i in range(data.shape[0]):
for j in range(data.shape[1]):
kw.update(color=textcolors[im.norm(data[i, j]) > threshold])
im.axes.text(j, i, text_label[i, j], **kw)
def choose_codons(ttest, text):
codon_map = {'TTT': 'Phe', 'TTC': 'Phe', 'TAT': 'Tyr', 'TAC': 'Tyr',
'TGT': 'Cys', 'TGC': 'Cys', 'CAT': 'His', 'CAC': 'His', 'CAA':
'Gln', 'CAG': 'Gln', 'AAT': 'Asn', 'AAC': 'Asn', 'AAA': 'Lys',
'AAG': 'Lys', 'GAT': 'Asp', 'GAC': 'Asp', 'GAA': 'Glu', 'GAG': 'Glu'}
codon_dict = collections.defaultdict(list)
for i in range(len(ttest)):
for j in range(len(ttest[i])):
if ttest[i][j] < 0.01:
codon = text[i][j][:3]
if codon in codon_map:
codon_dict[codon_map[codon]].append(codon)
file = io.open('AAs_to_compare.txt', 'w')
file.write('Compare following AAs\n')
AAs = []
for AA in codon_dict.keys():
AAs.append(AA)
if len(codon_dict[AA]) == 2:
file.write('%s: %s, %s\n' % (AA, codon_dict[AA][0], codon_dict[
AA][1]))
else:
file.write('%s: %s\n' % (AA, codon_dict[AA][0]))
file.close()
return AAs
def plot_SP_LP(sp_AA_dict, lp_AA_dict):
for AA in list(sp_AA_dict.keys()):
codon_data = []
codons = []
for codon in sp_AA_dict[AA]:
lp_AA_dict[AA][codon].reverse()
codons.append(codon)
codon_data.append([])
codon_data[-1].append(sp_AA_dict[AA][codon])
codon_data[-1].append(lp_AA_dict[AA][codon])
codon_usage_plot(codon_data, AA, codons)
def codon_usage_plot(data, AA, codons):
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(15, 5))
for i in range(len(data)):
x_sp = np.linspace(0, 50, len(data[i][0]))
x_lp = np.linspace(50, 100, len(data[i][1]))
ax.plot(x_sp, data[i][0], label='sp_' + codons[i])
ax.plot(x_lp, data[i][1], label='lp_' + codons[i])
ax.legend(loc=1)
ax.set_title(AA)
def plot_distribution(sp_dict, lp_dict, AA):
fig, axes = plt.subplots(nrows=2, ncols=1, figsize=(40, 20))
for codon in sp_dict[AA]:
x = np.arange(len(sp_dict[AA][codon]))
sp_y = np.array(sp_dict[AA][codon])
lp_y = np.array(lp_dict[AA][codon])
axes[0].plot(x, sp_y)
axes[1].plot(x, lp_y)
plt.show
def get_skellam_distribution(sp_dict, lp_dict, AA):
sp_mu = {}
lp_mu = {}
codons = []
for codon in sp_dict[AA]:
codons.append(codon)
sp_mu[codon] = np.mean(sp_dict[AA][codon])
lp_mu[codon] = np.mean(lp_dict[AA][codon])
skellam_plot(sp_mu[codons[0]], sp_mu[codons[1]], 'SP-' + AA)
skellam_plot(lp_mu[codons[0]], lp_mu[codons[1]], 'LP-' + AA)
def skellam_plot(mu1, mu2, name):
print(mu1, ' ', mu2, ' ', mu1 - mu2, ' ', name)
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(5, 5))
x = np.arange(stats.skellam.ppf(0.01, mu1, mu2), stats.skellam.ppf(0.99,
mu1, mu2))
ax.plot(x, stats.skellam.pmf(x, mu1, mu2), marker='o', label=name)
ax.legend(loc=1)
plt.show
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import io, os, argparse, collections
from scipy import stats
import matplotlib.pyplot as plt
import numpy as np
def parse_args():
parser = argparse.ArgumentParser(description=
'Analyze codon usage of SP and LP\n')
parser.add_argument('sp_file', help='one input SP data file\n')
parser.add_argument('lp_file', help='one input LP data file\n')
parser.add_argument('--label', '-l', type=str, required=False, default=
'top', help="""Define the label of out-put files. Default="top\"
""")
args = parser.parse_args()
for path in [args.sp_file, args.lp_file]:
if not os.path.isfile(path):
parser.error('File "%s" cannot be found.' % path)
return args
class Codon_Usage:
def __init__(self, filename):
self.seq, self.gene_num = self.get_seq(filename)
def get_seq(self, filename):
file = io.open(filename)
all_seq = []
gene_seq = ''
count_all = 0
count_non_triple = 0
for line in file:
if line[0] == '>':
count_all += 1
if gene_seq != '':
if len(gene_seq) % 3:
count_non_triple += 1
else:
all_seq.append(gene_seq)
gene_seq = ''
else:
gene_seq += line.strip()
file.close()
print('%s:\n%d genes added\n%d are non-triple\n' % (filename[:2],
count_all, count_non_triple))
return all_seq, count_all - count_non_triple
def get_AA(self, codon):
codon_map = {'TTT': 'Phe', 'TTC': 'Phe', 'TTA': 'Leu', 'TTG': 'Leu',
'TCT': 'Ser', 'TCC': 'Ser', 'TCA': 'Ser', 'TCG': 'Ser', 'TAT':
'Tyr', 'TAC': 'Tyr', 'TAA': 'STOP', 'TAG': 'STOP', 'TGT': 'Cys',
'TGC': 'Cys', 'TGA': 'STOP', 'TGG': 'Trp', 'CTT': 'Leu', 'CTC':
'Leu', 'CTA': 'Leu', 'CTG': 'Leu', 'CCT': 'Pro', 'CCC': 'Pro',
'CCA': 'Pro', 'CCG': 'Pro', 'CAT': 'His', 'CAC': 'His', 'CAA':
'Gln', 'CAG': 'Gln', 'CGT': 'Arg', 'CGC': 'Arg', 'CGA': 'Arg',
'CGG': 'Arg', 'ATT': 'Ile', 'ATC': 'Ile', 'ATA': 'Ile', 'ATG':
'Met', 'ACT': 'Thr', 'ACC': 'Thr', 'ACA': 'Thr', 'ACG': 'Thr',
'AAT': 'Asn', 'AAC': 'Asn', 'AAA': 'Lys', 'AAG': 'Lys', 'AGT':
'Ser', 'AGC': 'Ser', 'AGA': 'Arg', 'AGG': 'Arg', 'GTT': 'Val',
'GTC': 'Val', 'GTA': 'Val', 'GTG': 'Val', 'GCT': 'Ala', 'GCC':
'Ala', 'GCA': 'Ala', 'GCG': 'Ala', 'GAT': 'Asp', 'GAC': 'Asp',
'GAA': 'Glu', 'GAG': 'Glu', 'GGT': 'Gly', 'GGC': 'Gly', 'GGA':
'Gly', 'GGG': 'Gly'}
if codon in codon_map:
return codon_map[codon]
else:
return ''
def get_usage_dict(self, seq):
usage_dict = collections.defaultdict(lambda : [collections.
defaultdict(lambda : [0, 0]), 0])
for index in range(0, len(seq), 3):
codon = seq[index:index + 3]
AA = self.get_AA(codon)
if AA:
usage_dict[AA][1] += 1
usage_dict[AA][0][codon][0] += 1
for AA in usage_dict:
for codon in usage_dict[AA][0]:
usage_dict[AA][0][codon][1] = usage_dict[AA][0][codon][0
] / usage_dict[AA][1]
return usage_dict
def get_AA_dict(self):
AA_dict = collections.defaultdict(lambda : collections.defaultdict(
list))
AA_map = {'Phe': ['TTT', 'TTC'], 'Leu': ['TTA', 'TTG', 'CTT', 'CTC',
'CTA', 'CTG'], 'Ser': ['TCT', 'TCC', 'TCA', 'TCG', 'AGT', 'AGC'
], 'Tyr': ['TAT', 'TAC'], 'STOP': ['TAA', 'TAG', 'TGA'], 'Cys':
['TGT', 'TGC'], 'Trp': ['TGG'], 'Pro': ['CCT', 'CCC', 'CCA',
'CCG'], 'His': ['CAT', 'CAC'], 'Gln': ['CAA', 'CAG'], 'Arg': [
'CGT', 'CGC', 'CGA', 'CGG', 'AGA', 'AGG'], 'Ile': ['ATT', 'ATC',
'ATA'], 'Met': ['ATG'], 'Thr': ['ACT', 'ACC', 'ACA', 'ACG'],
'Asn': ['AAT', 'AAC'], 'Lys': ['AAA', 'AAG'], 'Val': ['GTT',
'GTC', 'GTA', 'GTG'], 'Ala': ['GCT', 'GCC', 'GCA', 'GCG'],
'Asp': ['GAT', 'GAC'], 'Glu': ['GAA', 'GAG'], 'Gly': ['GGT',
'GGC', 'GGA', 'GGG']}
usage_dict_list = []
for seq in self.seq:
usage_dict_list.append(self.get_usage_dict(seq))
for AA in list(AA_map.keys()):
for codon in AA_map[AA]:
for usage_dict in usage_dict_list:
AA_dict[AA][codon].append(usage_dict[AA][0][codon][1])
return AA_dict
def heatmap_SP_LP(sp_AA_dict, lp_AA_dict, label):
AA_chisquare = []
AA_text = []
codon_ttest = []
codon_text = []
i = 0
j = 0
count_all = 0
count_sig = 0
for AA in list(sp_AA_dict.keys()):
sp_codon_mean = []
lp_codon_mean = []
for codon in sp_AA_dict[AA]:
p_val = stats.ttest_ind(sp_AA_dict[AA][codon], lp_AA_dict[AA][
codon], equal_var=False)[1]
if not i % 8:
codon_ttest.append([])
codon_text.append([])
i += 1
if np.isnan(p_val):
codon_ttest[-1].append(0)
codon_text[-1].append(codon + '\n NA')
else:
codon_ttest[-1].append(p_val)
codon_text[-1].append(codon + '\n' + str(round(p_val, 2)))
count_all += 1
if p_val < 0.5:
count_sig += 1
sp_codon_mean.append(np.mean(sp_AA_dict[AA][codon]))
lp_codon_mean.append(np.mean(lp_AA_dict[AA][codon]))
p_val = stats.chisquare(np.array([sp_codon_mean, lp_codon_mean]),
axis=None)[1]
if not j % 6:
AA_chisquare.append([])
AA_text.append([])
j += 1
if np.isnan(p_val):
AA_chisquare[-1].append(0)
AA_text[-1].append(AA + '\n NA')
else:
AA_chisquare[-1].append(p_val)
AA_text[-1].append(AA + '\n' + str(round(p_val, 2)))
for n in range(j % 6, 6):
AA_chisquare[-1].append(0)
AA_text[-1].append('')
AAs = choose_codons(codon_ttest, codon_text)
AA_chisquare = np.array(AA_chisquare)
codon_ttest = np.array(codon_ttest)
AA_text = np.array(AA_text)
codon_text = np.array(codon_text)
print(
"""%d out of %d codon show significant usage difference between SP and LP genes (p_value < 0.5)
"""
% (count_sig, count_all))
plot_heatmap(AA_chisquare, AA_text, 'AAs_ChiSquare', label)
plot_heatmap(codon_ttest, codon_text, 'Codons_ttest', label)
return AAs
def plot_heatmap(data, text, cbarlabel, label):
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(10, 5))
im, cbar = heatmap(data, ax, 'YlGn', cbarlabel)
annotate_heatmap(im, text)
fig.tight_layout()
plt.show
plt.savefig(f'../results/{cbarlabel}_{label}.png')
def heatmap(data, ax, cmap, cbarlabel):
if not ax:
ax = plt.gca()
im = ax.imshow(data, cmap)
cbar = ax.figure.colorbar(im, ax=ax)
ax.set_xticks(np.arange(data.shape[1]))
ax.set_yticks(np.arange(data.shape[0]))
ax.set_xticklabels(range(data.shape[1]))
ax.set_yticklabels(range(data.shape[0]))
ax.tick_params(top=False, bottom=True, labeltop=False, labelbottom=True)
for edge, spine in ax.spines.items():
spine.set_visible(False)
ax.set_xticks(np.arange(data.shape[1] + 1) - 0.5, minor=True)
ax.set_yticks(np.arange(data.shape[0] + 1) - 0.5, minor=True)
ax.grid(which='minor', color='w', linestyle='-', linewidth=3)
ax.tick_params(which='minor', bottom=False, left=False)
cbar.ax.set_ylabel(cbarlabel, va='top')
return im, cbar
def annotate_heatmap(im, text_label):
textcolors = ['black', 'white']
data = im.get_array()
threshold = im.norm(data.max()) / 2
kw = dict(horizontalalignment='center', verticalalignment='center')
for i in range(data.shape[0]):
for j in range(data.shape[1]):
kw.update(color=textcolors[im.norm(data[i, j]) > threshold])
im.axes.text(j, i, text_label[i, j], **kw)
def choose_codons(ttest, text):
codon_map = {'TTT': 'Phe', 'TTC': 'Phe', 'TAT': 'Tyr', 'TAC': 'Tyr',
'TGT': 'Cys', 'TGC': 'Cys', 'CAT': 'His', 'CAC': 'His', 'CAA':
'Gln', 'CAG': 'Gln', 'AAT': 'Asn', 'AAC': 'Asn', 'AAA': 'Lys',
'AAG': 'Lys', 'GAT': 'Asp', 'GAC': 'Asp', 'GAA': 'Glu', 'GAG': 'Glu'}
codon_dict = collections.defaultdict(list)
for i in range(len(ttest)):
for j in range(len(ttest[i])):
if ttest[i][j] < 0.01:
codon = text[i][j][:3]
if codon in codon_map:
codon_dict[codon_map[codon]].append(codon)
file = io.open('AAs_to_compare.txt', 'w')
file.write('Compare following AAs\n')
AAs = []
for AA in codon_dict.keys():
AAs.append(AA)
if len(codon_dict[AA]) == 2:
file.write('%s: %s, %s\n' % (AA, codon_dict[AA][0], codon_dict[
AA][1]))
else:
file.write('%s: %s\n' % (AA, codon_dict[AA][0]))
file.close()
return AAs
def plot_SP_LP(sp_AA_dict, lp_AA_dict):
for AA in list(sp_AA_dict.keys()):
codon_data = []
codons = []
for codon in sp_AA_dict[AA]:
lp_AA_dict[AA][codon].reverse()
codons.append(codon)
codon_data.append([])
codon_data[-1].append(sp_AA_dict[AA][codon])
codon_data[-1].append(lp_AA_dict[AA][codon])
codon_usage_plot(codon_data, AA, codons)
def codon_usage_plot(data, AA, codons):
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(15, 5))
for i in range(len(data)):
x_sp = np.linspace(0, 50, len(data[i][0]))
x_lp = np.linspace(50, 100, len(data[i][1]))
ax.plot(x_sp, data[i][0], label='sp_' + codons[i])
ax.plot(x_lp, data[i][1], label='lp_' + codons[i])
ax.legend(loc=1)
ax.set_title(AA)
def plot_distribution(sp_dict, lp_dict, AA):
fig, axes = plt.subplots(nrows=2, ncols=1, figsize=(40, 20))
for codon in sp_dict[AA]:
x = np.arange(len(sp_dict[AA][codon]))
sp_y = np.array(sp_dict[AA][codon])
lp_y = np.array(lp_dict[AA][codon])
axes[0].plot(x, sp_y)
axes[1].plot(x, lp_y)
plt.show
def get_skellam_distribution(sp_dict, lp_dict, AA):
sp_mu = {}
lp_mu = {}
codons = []
for codon in sp_dict[AA]:
codons.append(codon)
sp_mu[codon] = np.mean(sp_dict[AA][codon])
lp_mu[codon] = np.mean(lp_dict[AA][codon])
skellam_plot(sp_mu[codons[0]], sp_mu[codons[1]], 'SP-' + AA)
skellam_plot(lp_mu[codons[0]], lp_mu[codons[1]], 'LP-' + AA)
def skellam_plot(mu1, mu2, name):
print(mu1, ' ', mu2, ' ', mu1 - mu2, ' ', name)
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(5, 5))
x = np.arange(stats.skellam.ppf(0.01, mu1, mu2), stats.skellam.ppf(0.99,
mu1, mu2))
ax.plot(x, stats.skellam.pmf(x, mu1, mu2), marker='o', label=name)
ax.legend(loc=1)
plt.show
args = parse_args()
sp_codon_usage = Codon_Usage(args.sp_file)
lp_codon_usage = Codon_Usage(args.lp_file)
sp_AA_dict = sp_codon_usage.get_AA_dict()
lp_AA_dict = lp_codon_usage.get_AA_dict()
print('Analyzing SP and LP %s group data\n' % args.label)
AAs = heatmap_SP_LP(sp_AA_dict, lp_AA_dict, args.label)
plot_SP_LP(sp_AA_dict, lp_AA_dict)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 13 17:34:32 2019
@author: fanlizhou
Analyze codon usage of sequence from 'SP_gene_seq.txt' and 'LP_gene_seq.txt'
Plot heatmap of amino acid usage and codon usage
Plot codon usage in each gene for each amino acid. Genes were arranged so that
the gene expression of SP decrease from 0 to 50 (x-axis) and the gene expression
of LP increase from 51 to 100 (x-axis)
Usage: codon_usage.py [-h] [--label LABEL] sp_file lp_file
Options:
--label Define the label of out-put files. Default="top"
sp_file Path to the SP data files
lp_file Path to the LP data files
"""
import io, os, argparse, collections
from scipy import stats
import matplotlib.pyplot as plt
import numpy as np
def parse_args():
parser = argparse.ArgumentParser(description=
'Analyze codon usage of SP and LP\n')
parser.add_argument('sp_file', help = 'one input SP data file\n')
parser.add_argument('lp_file', help = 'one input LP data file\n')
parser.add_argument('--label', '-l',
type = str, required = False, default = 'top',
help = 'Define the label of out-put files. Default="top"\n')
args = parser.parse_args()
for path in [args.sp_file, args.lp_file]:
if not os.path.isfile(path):
parser.error('File "%s" cannot be found.' % (path))
return args
# a Codon_Usage class to store codon usage information for each genotype
class Codon_Usage:
def __init__(self, filename):
self.seq, self.gene_num = self.get_seq(filename)
def get_seq(self, filename):
file = io.open(filename)
# list of selected gene sequences, excluded genes that are non-triple
all_seq = []
gene_seq = ''
count_all = 0
count_non_triple = 0
for line in file:
# read a gene information line
if line[0]=='>':
count_all += 1
# if a gene has been read, then append it to all_seq if the
# sequence is triple
if gene_seq!='':
if len(gene_seq)%3:
count_non_triple += 1
else:
all_seq.append(gene_seq)
gene_seq = ''
# read a gene sequence line
else:
gene_seq += line.strip()
file.close()
print('%s:\n%d genes added\n%d are non-triple\n'%
(filename[:2],count_all, count_non_triple))
return (all_seq, count_all - count_non_triple)
def get_AA(self, codon):
# dict key: codon -> AA
codon_map = {
'TTT':'Phe', 'TTC':'Phe', 'TTA':'Leu', 'TTG':'Leu',
'TCT':'Ser', 'TCC':'Ser', 'TCA':'Ser', 'TCG':'Ser',
'TAT':'Tyr', 'TAC':'Tyr', 'TAA':'STOP', 'TAG':'STOP',
'TGT':'Cys', 'TGC':'Cys', 'TGA':'STOP', 'TGG':'Trp',
'CTT':'Leu', 'CTC':'Leu', 'CTA':'Leu', 'CTG':'Leu',
'CCT':'Pro', 'CCC':'Pro', 'CCA':'Pro', 'CCG':'Pro',
'CAT':'His', 'CAC':'His', 'CAA':'Gln', 'CAG':'Gln',
'CGT':'Arg', 'CGC':'Arg', 'CGA':'Arg', 'CGG':'Arg',
'ATT':'Ile', 'ATC':'Ile', 'ATA':'Ile', 'ATG':'Met',
'ACT':'Thr', 'ACC':'Thr', 'ACA':'Thr', 'ACG':'Thr',
'AAT':'Asn', 'AAC':'Asn', 'AAA':'Lys', 'AAG':'Lys',
'AGT':'Ser', 'AGC':'Ser', 'AGA':'Arg', 'AGG':'Arg',
'GTT':'Val', 'GTC':'Val', 'GTA':'Val', 'GTG':'Val',
'GCT':'Ala', 'GCC':'Ala', 'GCA':'Ala', 'GCG':'Ala',
'GAT':'Asp', 'GAC':'Asp', 'GAA':'Glu', 'GAG':'Glu',
'GGT':'Gly', 'GGC':'Gly', 'GGA':'Gly', 'GGG':'Gly'}
if codon in codon_map:
return codon_map[codon]
else:
return ''
def get_usage_dict(self, seq):
# usage_dict structure:
# dict key: AA -> [
# dict key: codon ->
# [codon_count,
# codon_count/AA_count]
# AA_count
# ]
usage_dict = \
collections.defaultdict(lambda:
[
collections.defaultdict(
lambda: [0, 0]),
0
])
# save AAs usage information
for index in range(0, len(seq), 3):
codon = seq[index:index+3]
AA = self.get_AA(codon)
if AA:
# count how many times the AA appears
usage_dict[AA][1] += 1
# count how many times the codon is used
usage_dict[AA][0][codon][0] += 1
# calculate the codon usage percentage for an AA
for AA in usage_dict:
for codon in usage_dict[AA][0]:
usage_dict[AA][0][codon][1] = \
usage_dict[AA][0][codon][0]/usage_dict[AA][1]
return usage_dict
def get_AA_dict(self):
# AA_dict structure:
# 1st dict key: AA -> 2nd dict key: codon -> a list of codon usage
# percentage of each gene
AA_dict = \
collections.defaultdict(
lambda:collections.defaultdict(list))
# dict key: AA -> codon list
AA_map = {
'Phe':['TTT', 'TTC'],
'Leu':['TTA', 'TTG', 'CTT', 'CTC', 'CTA', 'CTG'],
'Ser':['TCT', 'TCC', 'TCA', 'TCG', 'AGT', 'AGC'],
'Tyr':['TAT', 'TAC'],
'STOP':['TAA', 'TAG', 'TGA'],
'Cys':['TGT', 'TGC'],
'Trp':['TGG'],
'Pro':['CCT', 'CCC', 'CCA', 'CCG'],
'His':['CAT', 'CAC'],
'Gln':['CAA', 'CAG'],
'Arg':['CGT', 'CGC', 'CGA', 'CGG', 'AGA', 'AGG'],
'Ile':['ATT', 'ATC', 'ATA'],
'Met':['ATG'],
'Thr':['ACT', 'ACC', 'ACA', 'ACG'],
'Asn':['AAT', 'AAC'],
'Lys':['AAA', 'AAG'],
'Val':['GTT', 'GTC', 'GTA', 'GTG'],
'Ala':['GCT', 'GCC', 'GCA', 'GCG'],
'Asp':['GAT', 'GAC'],
'Glu':['GAA', 'GAG'],
'Gly':['GGT', 'GGC', 'GGA', 'GGG']
}
# list of codon usage for each gene
usage_dict_list = []
# get codon usage information for each gene
for seq in self.seq:
usage_dict_list.append(self.get_usage_dict(seq))
# get the list of codon usage percentage from each gene
for AA in list(AA_map.keys()):
for codon in AA_map[AA]:
# get codon usage information from each gene
for usage_dict in usage_dict_list:
# append codon usage percentage in the gene
AA_dict[AA][codon].append(
usage_dict[AA][0][codon][1])
return AA_dict
def heatmap_SP_LP(sp_AA_dict, lp_AA_dict, label):
# list of Chi-Square test results
AA_chisquare = []
# AA plotting annotation information
AA_text = []
# list of student's t-test results
codon_ttest = []
# codon plotting annotaion information
codon_text = []
i = 0
j = 0
# number of genes analyzed
count_all = 0
# number of genes that show significant results
count_sig = 0
for AA in list(sp_AA_dict.keys()):
# mean values of codon usage for each AA
sp_codon_mean = []
lp_codon_mean = []
for codon in sp_AA_dict[AA]:
# calculate ttest results
p_val = stats.ttest_ind(sp_AA_dict[AA][codon],
lp_AA_dict[AA][codon],
equal_var = False)[1]
# display eight codons in a row
if not i % 8:
codon_ttest.append([])
codon_text.append([])
i += 1
# handle NULL values
if np.isnan(p_val):
codon_ttest[-1].append(0)
codon_text[-1].append(codon + '\n NA')
# save ttest p-values and annotation information
else:
codon_ttest[-1].append(p_val)
codon_text[-1].append(codon + '\n' + str(round(p_val, 2)))
count_all += 1
if p_val < 0.5:
count_sig += 1
sp_codon_mean.append(np.mean(sp_AA_dict[AA][codon]))
lp_codon_mean.append(np.mean(lp_AA_dict[AA][codon]))
# get Chi-Square test results of each AA
p_val = stats.chisquare(np.array([sp_codon_mean, lp_codon_mean]),
axis = None)[1]
# display six AA in a row
if not j % 6:
AA_chisquare.append([])
AA_text.append([])
j += 1
# handle Null values
if np.isnan(p_val):
AA_chisquare[-1].append(0)
AA_text[-1].append(AA + '\n NA')
# save Chi-Square test p-values and annotation information
else:
AA_chisquare[-1].append(p_val)
AA_text[-1].append(AA + '\n' + str(round(p_val, 2)))
# handle empty cells
for n in range(j % 6, 6):
AA_chisquare[-1].append(0)
AA_text[-1].append('')
# get list of AAs that show significant difference between SP and LP groups
AAs = choose_codons(codon_ttest, codon_text)
AA_chisquare = np.array(AA_chisquare)
codon_ttest = np.array(codon_ttest)
AA_text = np.array(AA_text)
codon_text = np.array(codon_text)
print('%d out of %d codon show significant usage difference \
between SP and LP genes (p_value < 0.5)\n' %
(count_sig, count_all))
plot_heatmap(AA_chisquare, AA_text, 'AAs_ChiSquare', label)
plot_heatmap(codon_ttest, codon_text, 'Codons_ttest', label)
return AAs
def plot_heatmap(data, text, cbarlabel, label):
fig, ax = plt.subplots(nrows = 1, ncols = 1, figsize = (10, 5))
im, cbar = heatmap(data, ax, 'YlGn', cbarlabel)
annotate_heatmap(im, text)
fig.tight_layout()
plt.show
plt.savefig(f'../results/{cbarlabel}_{label}.png')
def heatmap(data, ax, cmap, cbarlabel):
if not ax:
ax = plt.gca()
im = ax.imshow(data, cmap)
cbar = ax.figure.colorbar(im, ax=ax)
ax.set_xticks(np.arange(data.shape[1]))
ax.set_yticks(np.arange(data.shape[0]))
ax.set_xticklabels(range(data.shape[1]))
ax.set_yticklabels(range(data.shape[0]))
ax.tick_params(top=False, bottom=True,
labeltop=False, labelbottom=True)
# draw white space between squares
for edge, spine in ax.spines.items():
spine.set_visible(False)
ax.set_xticks(np.arange(data.shape[1] + 1) - 0.5, minor = True)
ax.set_yticks(np.arange(data.shape[0] + 1) - 0.5, minor = True)
ax.grid(which = 'minor', color = 'w', linestyle = '-', linewidth = 3)
ax.tick_params(which = 'minor', bottom = False, left = False)
cbar.ax.set_ylabel(cbarlabel, va = 'top')
return im, cbar
def annotate_heatmap(im, text_label):
textcolors = ['black','white']
data = im.get_array()
# set threshold to decide color
threshold = im.norm(data.max()) / 2
kw = dict(horizontalalignment = 'center',
verticalalignment = 'center')
for i in range(data.shape[0]):
for j in range(data.shape[1]):
kw.update(color = textcolors[im.norm(data[i,j]) > threshold])
im.axes.text(j, i, text_label[i,j], **kw)
def choose_codons(ttest, text):
# dict key: AA -> codon
# only contains AAs with only two codon choices
codon_map = {
'TTT':'Phe', 'TTC':'Phe', 'TAT':'Tyr', 'TAC':'Tyr',
'TGT':'Cys', 'TGC':'Cys', 'CAT':'His', 'CAC':'His',
'CAA':'Gln', 'CAG':'Gln', 'AAT':'Asn', 'AAC':'Asn',
'AAA':'Lys', 'AAG':'Lys', 'GAT':'Asp', 'GAC':'Asp',
'GAA':'Glu', 'GAG':'Glu'}
codon_dict = collections.defaultdict(list)
for i in range(len(ttest)):
for j in range(len(ttest[i])):
if ttest[i][j] < 0.01:
codon = text[i][j][:3]
if codon in codon_map:
codon_dict[codon_map[codon]].append(codon)
file = io.open('AAs_to_compare.txt', 'w')
file.write('Compare following AAs\n')
# AAs that have only two codon choices and show significant
# codon usage difference between SP and LP groups
AAs = []
for AA in codon_dict.keys():
AAs.append(AA)
if len(codon_dict[AA]) == 2:
file.write('%s: %s, %s\n' %
(AA, codon_dict[AA][0], codon_dict[AA][1]))
else:
file.write('%s: %s\n' % (AA, codon_dict[AA][0]))
file.close()
return AAs
def plot_SP_LP(sp_AA_dict, lp_AA_dict):
# plot each AA
for AA in list(sp_AA_dict.keys()):
# list of codon usage information
codon_data = []
# List of codon names
codons = []
for codon in sp_AA_dict[AA]:
# LP group data is displayed from lowest expressed genes
# to highest expressed genes
lp_AA_dict[AA][codon].reverse()
codons.append(codon)
codon_data.append([])
# display SP group data first and then LP group data
codon_data[-1].append(sp_AA_dict[AA][codon])
codon_data[-1].append(lp_AA_dict[AA][codon])
# plot usage curves
codon_usage_plot(codon_data, AA, codons)
def codon_usage_plot(data, AA, codons):
fig, ax = plt.subplots(nrows = 1, ncols = 1, figsize = (15,5))
for i in range(len(data)):
# 0-50 shows SP group data
x_sp = np.linspace(0, 50, len(data[i][0]))
# 50-100 shows LP group data
x_lp = np.linspace(50, 100, len(data[i][1]))
ax.plot(x_sp, data[i][0], label = 'sp_' + codons[i])
ax.plot(x_lp, data[i][1], label = 'lp_' + codons[i])
ax.legend(loc = 1)
ax.set_title(AA)
def plot_distribution(sp_dict, lp_dict, AA):
fig, axes = plt.subplots(nrows = 2, ncols =1, figsize = (40, 20))
for codon in sp_dict[AA]:
x = np.arange(len(sp_dict[AA][codon]))
sp_y = np.array(sp_dict[AA][codon])
lp_y = np.array(lp_dict[AA][codon])
axes[0].plot(x, sp_y)
axes[1].plot(x, lp_y)
plt.show
def get_skellam_distribution(sp_dict, lp_dict, AA):
sp_mu = {}
lp_mu = {}
codons = []
# get mean values
for codon in sp_dict[AA]:
codons.append(codon)
sp_mu[codon] = np.mean(sp_dict[AA][codon])
lp_mu[codon] = np.mean(lp_dict[AA][codon])
skellam_plot(sp_mu[codons[0]], sp_mu[codons[1]], 'SP-' + AA)
skellam_plot(lp_mu[codons[0]], lp_mu[codons[1]], 'LP-' + AA)
def skellam_plot(mu1, mu2, name):
print(mu1,' ', mu2, ' ', mu1-mu2, ' ', name)
fig, ax = plt.subplots(nrows = 1, ncols = 1, figsize = (5, 5))
x = np.arange(stats.skellam.ppf(0.01, mu1, mu2),
stats.skellam.ppf(0.99, mu1, mu2))
ax.plot(x, stats.skellam.pmf(x, mu1, mu2), marker = 'o', label = name)
ax.legend(loc = 1)
plt.show
# main flow
args = parse_args()
sp_codon_usage = Codon_Usage(args.sp_file)
lp_codon_usage = Codon_Usage(args.lp_file)
sp_AA_dict = sp_codon_usage.get_AA_dict()
lp_AA_dict = lp_codon_usage.get_AA_dict()
print("Analyzing SP and LP %s group data\n" % (args.label))
AAs = heatmap_SP_LP(sp_AA_dict, lp_AA_dict, args.label)
plot_SP_LP(sp_AA_dict, lp_AA_dict)
# optional
# get Skellam distributions of AAs that have only two codon choices
# and show distictive usage between SP and LP
'''
sp_all_codon_usage = Codon_Usage('SP_all_gene_seq.txt')
lp_all_codon_usage = Codon_Usage('LP_all_gene_seq.txt')
sp_all_AA_dict = sp_all_codon_usage.get_AA_dict()
lp_all_AA_dict = lp_all_codon_usage.get_AA_dict()
for AA in AAs:
plot_distribution(sp_all_AA_dict, lp_all_AA_dict, AA)
get_skellam_distribution(sp_all_AA_dict, lp_all_AA_dict, AA)
'''
|
flexible
|
{
"blob_id": "ae7a2de8742e353818d4f5a28feb9bce04d787bb",
"index": 8382,
"step-1": "<mask token>\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description=\n 'Analyze codon usage of SP and LP\\n')\n parser.add_argument('sp_file', help='one input SP data file\\n')\n parser.add_argument('lp_file', help='one input LP data file\\n')\n parser.add_argument('--label', '-l', type=str, required=False, default=\n 'top', help=\"\"\"Define the label of out-put files. Default=\"top\\\"\n\"\"\")\n args = parser.parse_args()\n for path in [args.sp_file, args.lp_file]:\n if not os.path.isfile(path):\n parser.error('File \"%s\" cannot be found.' % path)\n return args\n\n\nclass Codon_Usage:\n\n def __init__(self, filename):\n self.seq, self.gene_num = self.get_seq(filename)\n\n def get_seq(self, filename):\n file = io.open(filename)\n all_seq = []\n gene_seq = ''\n count_all = 0\n count_non_triple = 0\n for line in file:\n if line[0] == '>':\n count_all += 1\n if gene_seq != '':\n if len(gene_seq) % 3:\n count_non_triple += 1\n else:\n all_seq.append(gene_seq)\n gene_seq = ''\n else:\n gene_seq += line.strip()\n file.close()\n print('%s:\\n%d genes added\\n%d are non-triple\\n' % (filename[:2],\n count_all, count_non_triple))\n return all_seq, count_all - count_non_triple\n\n def get_AA(self, codon):\n codon_map = {'TTT': 'Phe', 'TTC': 'Phe', 'TTA': 'Leu', 'TTG': 'Leu',\n 'TCT': 'Ser', 'TCC': 'Ser', 'TCA': 'Ser', 'TCG': 'Ser', 'TAT':\n 'Tyr', 'TAC': 'Tyr', 'TAA': 'STOP', 'TAG': 'STOP', 'TGT': 'Cys',\n 'TGC': 'Cys', 'TGA': 'STOP', 'TGG': 'Trp', 'CTT': 'Leu', 'CTC':\n 'Leu', 'CTA': 'Leu', 'CTG': 'Leu', 'CCT': 'Pro', 'CCC': 'Pro',\n 'CCA': 'Pro', 'CCG': 'Pro', 'CAT': 'His', 'CAC': 'His', 'CAA':\n 'Gln', 'CAG': 'Gln', 'CGT': 'Arg', 'CGC': 'Arg', 'CGA': 'Arg',\n 'CGG': 'Arg', 'ATT': 'Ile', 'ATC': 'Ile', 'ATA': 'Ile', 'ATG':\n 'Met', 'ACT': 'Thr', 'ACC': 'Thr', 'ACA': 'Thr', 'ACG': 'Thr',\n 'AAT': 'Asn', 'AAC': 'Asn', 'AAA': 'Lys', 'AAG': 'Lys', 'AGT':\n 'Ser', 'AGC': 'Ser', 'AGA': 'Arg', 'AGG': 'Arg', 'GTT': 'Val',\n 'GTC': 'Val', 'GTA': 'Val', 'GTG': 'Val', 'GCT': 'Ala', 'GCC':\n 'Ala', 'GCA': 'Ala', 'GCG': 'Ala', 'GAT': 'Asp', 'GAC': 'Asp',\n 'GAA': 'Glu', 'GAG': 'Glu', 'GGT': 'Gly', 'GGC': 'Gly', 'GGA':\n 'Gly', 'GGG': 'Gly'}\n if codon in codon_map:\n return codon_map[codon]\n else:\n return ''\n\n def get_usage_dict(self, seq):\n usage_dict = collections.defaultdict(lambda : [collections.\n defaultdict(lambda : [0, 0]), 0])\n for index in range(0, len(seq), 3):\n codon = seq[index:index + 3]\n AA = self.get_AA(codon)\n if AA:\n usage_dict[AA][1] += 1\n usage_dict[AA][0][codon][0] += 1\n for AA in usage_dict:\n for codon in usage_dict[AA][0]:\n usage_dict[AA][0][codon][1] = usage_dict[AA][0][codon][0\n ] / usage_dict[AA][1]\n return usage_dict\n\n def get_AA_dict(self):\n AA_dict = collections.defaultdict(lambda : collections.defaultdict(\n list))\n AA_map = {'Phe': ['TTT', 'TTC'], 'Leu': ['TTA', 'TTG', 'CTT', 'CTC',\n 'CTA', 'CTG'], 'Ser': ['TCT', 'TCC', 'TCA', 'TCG', 'AGT', 'AGC'\n ], 'Tyr': ['TAT', 'TAC'], 'STOP': ['TAA', 'TAG', 'TGA'], 'Cys':\n ['TGT', 'TGC'], 'Trp': ['TGG'], 'Pro': ['CCT', 'CCC', 'CCA',\n 'CCG'], 'His': ['CAT', 'CAC'], 'Gln': ['CAA', 'CAG'], 'Arg': [\n 'CGT', 'CGC', 'CGA', 'CGG', 'AGA', 'AGG'], 'Ile': ['ATT', 'ATC',\n 'ATA'], 'Met': ['ATG'], 'Thr': ['ACT', 'ACC', 'ACA', 'ACG'],\n 'Asn': ['AAT', 'AAC'], 'Lys': ['AAA', 'AAG'], 'Val': ['GTT',\n 'GTC', 'GTA', 'GTG'], 'Ala': ['GCT', 'GCC', 'GCA', 'GCG'],\n 'Asp': ['GAT', 'GAC'], 'Glu': ['GAA', 'GAG'], 'Gly': ['GGT',\n 'GGC', 'GGA', 'GGG']}\n usage_dict_list = []\n for seq in self.seq:\n usage_dict_list.append(self.get_usage_dict(seq))\n for AA in list(AA_map.keys()):\n for codon in AA_map[AA]:\n for usage_dict in usage_dict_list:\n AA_dict[AA][codon].append(usage_dict[AA][0][codon][1])\n return AA_dict\n\n\n<mask token>\n\n\ndef annotate_heatmap(im, text_label):\n textcolors = ['black', 'white']\n data = im.get_array()\n threshold = im.norm(data.max()) / 2\n kw = dict(horizontalalignment='center', verticalalignment='center')\n for i in range(data.shape[0]):\n for j in range(data.shape[1]):\n kw.update(color=textcolors[im.norm(data[i, j]) > threshold])\n im.axes.text(j, i, text_label[i, j], **kw)\n\n\ndef choose_codons(ttest, text):\n codon_map = {'TTT': 'Phe', 'TTC': 'Phe', 'TAT': 'Tyr', 'TAC': 'Tyr',\n 'TGT': 'Cys', 'TGC': 'Cys', 'CAT': 'His', 'CAC': 'His', 'CAA':\n 'Gln', 'CAG': 'Gln', 'AAT': 'Asn', 'AAC': 'Asn', 'AAA': 'Lys',\n 'AAG': 'Lys', 'GAT': 'Asp', 'GAC': 'Asp', 'GAA': 'Glu', 'GAG': 'Glu'}\n codon_dict = collections.defaultdict(list)\n for i in range(len(ttest)):\n for j in range(len(ttest[i])):\n if ttest[i][j] < 0.01:\n codon = text[i][j][:3]\n if codon in codon_map:\n codon_dict[codon_map[codon]].append(codon)\n file = io.open('AAs_to_compare.txt', 'w')\n file.write('Compare following AAs\\n')\n AAs = []\n for AA in codon_dict.keys():\n AAs.append(AA)\n if len(codon_dict[AA]) == 2:\n file.write('%s: %s, %s\\n' % (AA, codon_dict[AA][0], codon_dict[\n AA][1]))\n else:\n file.write('%s: %s\\n' % (AA, codon_dict[AA][0]))\n file.close()\n return AAs\n\n\n<mask token>\n\n\ndef plot_distribution(sp_dict, lp_dict, AA):\n fig, axes = plt.subplots(nrows=2, ncols=1, figsize=(40, 20))\n for codon in sp_dict[AA]:\n x = np.arange(len(sp_dict[AA][codon]))\n sp_y = np.array(sp_dict[AA][codon])\n lp_y = np.array(lp_dict[AA][codon])\n axes[0].plot(x, sp_y)\n axes[1].plot(x, lp_y)\n plt.show\n\n\ndef get_skellam_distribution(sp_dict, lp_dict, AA):\n sp_mu = {}\n lp_mu = {}\n codons = []\n for codon in sp_dict[AA]:\n codons.append(codon)\n sp_mu[codon] = np.mean(sp_dict[AA][codon])\n lp_mu[codon] = np.mean(lp_dict[AA][codon])\n skellam_plot(sp_mu[codons[0]], sp_mu[codons[1]], 'SP-' + AA)\n skellam_plot(lp_mu[codons[0]], lp_mu[codons[1]], 'LP-' + AA)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description=\n 'Analyze codon usage of SP and LP\\n')\n parser.add_argument('sp_file', help='one input SP data file\\n')\n parser.add_argument('lp_file', help='one input LP data file\\n')\n parser.add_argument('--label', '-l', type=str, required=False, default=\n 'top', help=\"\"\"Define the label of out-put files. Default=\"top\\\"\n\"\"\")\n args = parser.parse_args()\n for path in [args.sp_file, args.lp_file]:\n if not os.path.isfile(path):\n parser.error('File \"%s\" cannot be found.' % path)\n return args\n\n\nclass Codon_Usage:\n\n def __init__(self, filename):\n self.seq, self.gene_num = self.get_seq(filename)\n\n def get_seq(self, filename):\n file = io.open(filename)\n all_seq = []\n gene_seq = ''\n count_all = 0\n count_non_triple = 0\n for line in file:\n if line[0] == '>':\n count_all += 1\n if gene_seq != '':\n if len(gene_seq) % 3:\n count_non_triple += 1\n else:\n all_seq.append(gene_seq)\n gene_seq = ''\n else:\n gene_seq += line.strip()\n file.close()\n print('%s:\\n%d genes added\\n%d are non-triple\\n' % (filename[:2],\n count_all, count_non_triple))\n return all_seq, count_all - count_non_triple\n\n def get_AA(self, codon):\n codon_map = {'TTT': 'Phe', 'TTC': 'Phe', 'TTA': 'Leu', 'TTG': 'Leu',\n 'TCT': 'Ser', 'TCC': 'Ser', 'TCA': 'Ser', 'TCG': 'Ser', 'TAT':\n 'Tyr', 'TAC': 'Tyr', 'TAA': 'STOP', 'TAG': 'STOP', 'TGT': 'Cys',\n 'TGC': 'Cys', 'TGA': 'STOP', 'TGG': 'Trp', 'CTT': 'Leu', 'CTC':\n 'Leu', 'CTA': 'Leu', 'CTG': 'Leu', 'CCT': 'Pro', 'CCC': 'Pro',\n 'CCA': 'Pro', 'CCG': 'Pro', 'CAT': 'His', 'CAC': 'His', 'CAA':\n 'Gln', 'CAG': 'Gln', 'CGT': 'Arg', 'CGC': 'Arg', 'CGA': 'Arg',\n 'CGG': 'Arg', 'ATT': 'Ile', 'ATC': 'Ile', 'ATA': 'Ile', 'ATG':\n 'Met', 'ACT': 'Thr', 'ACC': 'Thr', 'ACA': 'Thr', 'ACG': 'Thr',\n 'AAT': 'Asn', 'AAC': 'Asn', 'AAA': 'Lys', 'AAG': 'Lys', 'AGT':\n 'Ser', 'AGC': 'Ser', 'AGA': 'Arg', 'AGG': 'Arg', 'GTT': 'Val',\n 'GTC': 'Val', 'GTA': 'Val', 'GTG': 'Val', 'GCT': 'Ala', 'GCC':\n 'Ala', 'GCA': 'Ala', 'GCG': 'Ala', 'GAT': 'Asp', 'GAC': 'Asp',\n 'GAA': 'Glu', 'GAG': 'Glu', 'GGT': 'Gly', 'GGC': 'Gly', 'GGA':\n 'Gly', 'GGG': 'Gly'}\n if codon in codon_map:\n return codon_map[codon]\n else:\n return ''\n\n def get_usage_dict(self, seq):\n usage_dict = collections.defaultdict(lambda : [collections.\n defaultdict(lambda : [0, 0]), 0])\n for index in range(0, len(seq), 3):\n codon = seq[index:index + 3]\n AA = self.get_AA(codon)\n if AA:\n usage_dict[AA][1] += 1\n usage_dict[AA][0][codon][0] += 1\n for AA in usage_dict:\n for codon in usage_dict[AA][0]:\n usage_dict[AA][0][codon][1] = usage_dict[AA][0][codon][0\n ] / usage_dict[AA][1]\n return usage_dict\n\n def get_AA_dict(self):\n AA_dict = collections.defaultdict(lambda : collections.defaultdict(\n list))\n AA_map = {'Phe': ['TTT', 'TTC'], 'Leu': ['TTA', 'TTG', 'CTT', 'CTC',\n 'CTA', 'CTG'], 'Ser': ['TCT', 'TCC', 'TCA', 'TCG', 'AGT', 'AGC'\n ], 'Tyr': ['TAT', 'TAC'], 'STOP': ['TAA', 'TAG', 'TGA'], 'Cys':\n ['TGT', 'TGC'], 'Trp': ['TGG'], 'Pro': ['CCT', 'CCC', 'CCA',\n 'CCG'], 'His': ['CAT', 'CAC'], 'Gln': ['CAA', 'CAG'], 'Arg': [\n 'CGT', 'CGC', 'CGA', 'CGG', 'AGA', 'AGG'], 'Ile': ['ATT', 'ATC',\n 'ATA'], 'Met': ['ATG'], 'Thr': ['ACT', 'ACC', 'ACA', 'ACG'],\n 'Asn': ['AAT', 'AAC'], 'Lys': ['AAA', 'AAG'], 'Val': ['GTT',\n 'GTC', 'GTA', 'GTG'], 'Ala': ['GCT', 'GCC', 'GCA', 'GCG'],\n 'Asp': ['GAT', 'GAC'], 'Glu': ['GAA', 'GAG'], 'Gly': ['GGT',\n 'GGC', 'GGA', 'GGG']}\n usage_dict_list = []\n for seq in self.seq:\n usage_dict_list.append(self.get_usage_dict(seq))\n for AA in list(AA_map.keys()):\n for codon in AA_map[AA]:\n for usage_dict in usage_dict_list:\n AA_dict[AA][codon].append(usage_dict[AA][0][codon][1])\n return AA_dict\n\n\n<mask token>\n\n\ndef plot_heatmap(data, text, cbarlabel, label):\n fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(10, 5))\n im, cbar = heatmap(data, ax, 'YlGn', cbarlabel)\n annotate_heatmap(im, text)\n fig.tight_layout()\n plt.show\n plt.savefig(f'../results/{cbarlabel}_{label}.png')\n\n\n<mask token>\n\n\ndef annotate_heatmap(im, text_label):\n textcolors = ['black', 'white']\n data = im.get_array()\n threshold = im.norm(data.max()) / 2\n kw = dict(horizontalalignment='center', verticalalignment='center')\n for i in range(data.shape[0]):\n for j in range(data.shape[1]):\n kw.update(color=textcolors[im.norm(data[i, j]) > threshold])\n im.axes.text(j, i, text_label[i, j], **kw)\n\n\ndef choose_codons(ttest, text):\n codon_map = {'TTT': 'Phe', 'TTC': 'Phe', 'TAT': 'Tyr', 'TAC': 'Tyr',\n 'TGT': 'Cys', 'TGC': 'Cys', 'CAT': 'His', 'CAC': 'His', 'CAA':\n 'Gln', 'CAG': 'Gln', 'AAT': 'Asn', 'AAC': 'Asn', 'AAA': 'Lys',\n 'AAG': 'Lys', 'GAT': 'Asp', 'GAC': 'Asp', 'GAA': 'Glu', 'GAG': 'Glu'}\n codon_dict = collections.defaultdict(list)\n for i in range(len(ttest)):\n for j in range(len(ttest[i])):\n if ttest[i][j] < 0.01:\n codon = text[i][j][:3]\n if codon in codon_map:\n codon_dict[codon_map[codon]].append(codon)\n file = io.open('AAs_to_compare.txt', 'w')\n file.write('Compare following AAs\\n')\n AAs = []\n for AA in codon_dict.keys():\n AAs.append(AA)\n if len(codon_dict[AA]) == 2:\n file.write('%s: %s, %s\\n' % (AA, codon_dict[AA][0], codon_dict[\n AA][1]))\n else:\n file.write('%s: %s\\n' % (AA, codon_dict[AA][0]))\n file.close()\n return AAs\n\n\ndef plot_SP_LP(sp_AA_dict, lp_AA_dict):\n for AA in list(sp_AA_dict.keys()):\n codon_data = []\n codons = []\n for codon in sp_AA_dict[AA]:\n lp_AA_dict[AA][codon].reverse()\n codons.append(codon)\n codon_data.append([])\n codon_data[-1].append(sp_AA_dict[AA][codon])\n codon_data[-1].append(lp_AA_dict[AA][codon])\n codon_usage_plot(codon_data, AA, codons)\n\n\n<mask token>\n\n\ndef plot_distribution(sp_dict, lp_dict, AA):\n fig, axes = plt.subplots(nrows=2, ncols=1, figsize=(40, 20))\n for codon in sp_dict[AA]:\n x = np.arange(len(sp_dict[AA][codon]))\n sp_y = np.array(sp_dict[AA][codon])\n lp_y = np.array(lp_dict[AA][codon])\n axes[0].plot(x, sp_y)\n axes[1].plot(x, lp_y)\n plt.show\n\n\ndef get_skellam_distribution(sp_dict, lp_dict, AA):\n sp_mu = {}\n lp_mu = {}\n codons = []\n for codon in sp_dict[AA]:\n codons.append(codon)\n sp_mu[codon] = np.mean(sp_dict[AA][codon])\n lp_mu[codon] = np.mean(lp_dict[AA][codon])\n skellam_plot(sp_mu[codons[0]], sp_mu[codons[1]], 'SP-' + AA)\n skellam_plot(lp_mu[codons[0]], lp_mu[codons[1]], 'LP-' + AA)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description=\n 'Analyze codon usage of SP and LP\\n')\n parser.add_argument('sp_file', help='one input SP data file\\n')\n parser.add_argument('lp_file', help='one input LP data file\\n')\n parser.add_argument('--label', '-l', type=str, required=False, default=\n 'top', help=\"\"\"Define the label of out-put files. Default=\"top\\\"\n\"\"\")\n args = parser.parse_args()\n for path in [args.sp_file, args.lp_file]:\n if not os.path.isfile(path):\n parser.error('File \"%s\" cannot be found.' % path)\n return args\n\n\nclass Codon_Usage:\n\n def __init__(self, filename):\n self.seq, self.gene_num = self.get_seq(filename)\n\n def get_seq(self, filename):\n file = io.open(filename)\n all_seq = []\n gene_seq = ''\n count_all = 0\n count_non_triple = 0\n for line in file:\n if line[0] == '>':\n count_all += 1\n if gene_seq != '':\n if len(gene_seq) % 3:\n count_non_triple += 1\n else:\n all_seq.append(gene_seq)\n gene_seq = ''\n else:\n gene_seq += line.strip()\n file.close()\n print('%s:\\n%d genes added\\n%d are non-triple\\n' % (filename[:2],\n count_all, count_non_triple))\n return all_seq, count_all - count_non_triple\n\n def get_AA(self, codon):\n codon_map = {'TTT': 'Phe', 'TTC': 'Phe', 'TTA': 'Leu', 'TTG': 'Leu',\n 'TCT': 'Ser', 'TCC': 'Ser', 'TCA': 'Ser', 'TCG': 'Ser', 'TAT':\n 'Tyr', 'TAC': 'Tyr', 'TAA': 'STOP', 'TAG': 'STOP', 'TGT': 'Cys',\n 'TGC': 'Cys', 'TGA': 'STOP', 'TGG': 'Trp', 'CTT': 'Leu', 'CTC':\n 'Leu', 'CTA': 'Leu', 'CTG': 'Leu', 'CCT': 'Pro', 'CCC': 'Pro',\n 'CCA': 'Pro', 'CCG': 'Pro', 'CAT': 'His', 'CAC': 'His', 'CAA':\n 'Gln', 'CAG': 'Gln', 'CGT': 'Arg', 'CGC': 'Arg', 'CGA': 'Arg',\n 'CGG': 'Arg', 'ATT': 'Ile', 'ATC': 'Ile', 'ATA': 'Ile', 'ATG':\n 'Met', 'ACT': 'Thr', 'ACC': 'Thr', 'ACA': 'Thr', 'ACG': 'Thr',\n 'AAT': 'Asn', 'AAC': 'Asn', 'AAA': 'Lys', 'AAG': 'Lys', 'AGT':\n 'Ser', 'AGC': 'Ser', 'AGA': 'Arg', 'AGG': 'Arg', 'GTT': 'Val',\n 'GTC': 'Val', 'GTA': 'Val', 'GTG': 'Val', 'GCT': 'Ala', 'GCC':\n 'Ala', 'GCA': 'Ala', 'GCG': 'Ala', 'GAT': 'Asp', 'GAC': 'Asp',\n 'GAA': 'Glu', 'GAG': 'Glu', 'GGT': 'Gly', 'GGC': 'Gly', 'GGA':\n 'Gly', 'GGG': 'Gly'}\n if codon in codon_map:\n return codon_map[codon]\n else:\n return ''\n\n def get_usage_dict(self, seq):\n usage_dict = collections.defaultdict(lambda : [collections.\n defaultdict(lambda : [0, 0]), 0])\n for index in range(0, len(seq), 3):\n codon = seq[index:index + 3]\n AA = self.get_AA(codon)\n if AA:\n usage_dict[AA][1] += 1\n usage_dict[AA][0][codon][0] += 1\n for AA in usage_dict:\n for codon in usage_dict[AA][0]:\n usage_dict[AA][0][codon][1] = usage_dict[AA][0][codon][0\n ] / usage_dict[AA][1]\n return usage_dict\n\n def get_AA_dict(self):\n AA_dict = collections.defaultdict(lambda : collections.defaultdict(\n list))\n AA_map = {'Phe': ['TTT', 'TTC'], 'Leu': ['TTA', 'TTG', 'CTT', 'CTC',\n 'CTA', 'CTG'], 'Ser': ['TCT', 'TCC', 'TCA', 'TCG', 'AGT', 'AGC'\n ], 'Tyr': ['TAT', 'TAC'], 'STOP': ['TAA', 'TAG', 'TGA'], 'Cys':\n ['TGT', 'TGC'], 'Trp': ['TGG'], 'Pro': ['CCT', 'CCC', 'CCA',\n 'CCG'], 'His': ['CAT', 'CAC'], 'Gln': ['CAA', 'CAG'], 'Arg': [\n 'CGT', 'CGC', 'CGA', 'CGG', 'AGA', 'AGG'], 'Ile': ['ATT', 'ATC',\n 'ATA'], 'Met': ['ATG'], 'Thr': ['ACT', 'ACC', 'ACA', 'ACG'],\n 'Asn': ['AAT', 'AAC'], 'Lys': ['AAA', 'AAG'], 'Val': ['GTT',\n 'GTC', 'GTA', 'GTG'], 'Ala': ['GCT', 'GCC', 'GCA', 'GCG'],\n 'Asp': ['GAT', 'GAC'], 'Glu': ['GAA', 'GAG'], 'Gly': ['GGT',\n 'GGC', 'GGA', 'GGG']}\n usage_dict_list = []\n for seq in self.seq:\n usage_dict_list.append(self.get_usage_dict(seq))\n for AA in list(AA_map.keys()):\n for codon in AA_map[AA]:\n for usage_dict in usage_dict_list:\n AA_dict[AA][codon].append(usage_dict[AA][0][codon][1])\n return AA_dict\n\n\n<mask token>\n\n\ndef plot_heatmap(data, text, cbarlabel, label):\n fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(10, 5))\n im, cbar = heatmap(data, ax, 'YlGn', cbarlabel)\n annotate_heatmap(im, text)\n fig.tight_layout()\n plt.show\n plt.savefig(f'../results/{cbarlabel}_{label}.png')\n\n\ndef heatmap(data, ax, cmap, cbarlabel):\n if not ax:\n ax = plt.gca()\n im = ax.imshow(data, cmap)\n cbar = ax.figure.colorbar(im, ax=ax)\n ax.set_xticks(np.arange(data.shape[1]))\n ax.set_yticks(np.arange(data.shape[0]))\n ax.set_xticklabels(range(data.shape[1]))\n ax.set_yticklabels(range(data.shape[0]))\n ax.tick_params(top=False, bottom=True, labeltop=False, labelbottom=True)\n for edge, spine in ax.spines.items():\n spine.set_visible(False)\n ax.set_xticks(np.arange(data.shape[1] + 1) - 0.5, minor=True)\n ax.set_yticks(np.arange(data.shape[0] + 1) - 0.5, minor=True)\n ax.grid(which='minor', color='w', linestyle='-', linewidth=3)\n ax.tick_params(which='minor', bottom=False, left=False)\n cbar.ax.set_ylabel(cbarlabel, va='top')\n return im, cbar\n\n\ndef annotate_heatmap(im, text_label):\n textcolors = ['black', 'white']\n data = im.get_array()\n threshold = im.norm(data.max()) / 2\n kw = dict(horizontalalignment='center', verticalalignment='center')\n for i in range(data.shape[0]):\n for j in range(data.shape[1]):\n kw.update(color=textcolors[im.norm(data[i, j]) > threshold])\n im.axes.text(j, i, text_label[i, j], **kw)\n\n\ndef choose_codons(ttest, text):\n codon_map = {'TTT': 'Phe', 'TTC': 'Phe', 'TAT': 'Tyr', 'TAC': 'Tyr',\n 'TGT': 'Cys', 'TGC': 'Cys', 'CAT': 'His', 'CAC': 'His', 'CAA':\n 'Gln', 'CAG': 'Gln', 'AAT': 'Asn', 'AAC': 'Asn', 'AAA': 'Lys',\n 'AAG': 'Lys', 'GAT': 'Asp', 'GAC': 'Asp', 'GAA': 'Glu', 'GAG': 'Glu'}\n codon_dict = collections.defaultdict(list)\n for i in range(len(ttest)):\n for j in range(len(ttest[i])):\n if ttest[i][j] < 0.01:\n codon = text[i][j][:3]\n if codon in codon_map:\n codon_dict[codon_map[codon]].append(codon)\n file = io.open('AAs_to_compare.txt', 'w')\n file.write('Compare following AAs\\n')\n AAs = []\n for AA in codon_dict.keys():\n AAs.append(AA)\n if len(codon_dict[AA]) == 2:\n file.write('%s: %s, %s\\n' % (AA, codon_dict[AA][0], codon_dict[\n AA][1]))\n else:\n file.write('%s: %s\\n' % (AA, codon_dict[AA][0]))\n file.close()\n return AAs\n\n\ndef plot_SP_LP(sp_AA_dict, lp_AA_dict):\n for AA in list(sp_AA_dict.keys()):\n codon_data = []\n codons = []\n for codon in sp_AA_dict[AA]:\n lp_AA_dict[AA][codon].reverse()\n codons.append(codon)\n codon_data.append([])\n codon_data[-1].append(sp_AA_dict[AA][codon])\n codon_data[-1].append(lp_AA_dict[AA][codon])\n codon_usage_plot(codon_data, AA, codons)\n\n\ndef codon_usage_plot(data, AA, codons):\n fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(15, 5))\n for i in range(len(data)):\n x_sp = np.linspace(0, 50, len(data[i][0]))\n x_lp = np.linspace(50, 100, len(data[i][1]))\n ax.plot(x_sp, data[i][0], label='sp_' + codons[i])\n ax.plot(x_lp, data[i][1], label='lp_' + codons[i])\n ax.legend(loc=1)\n ax.set_title(AA)\n\n\ndef plot_distribution(sp_dict, lp_dict, AA):\n fig, axes = plt.subplots(nrows=2, ncols=1, figsize=(40, 20))\n for codon in sp_dict[AA]:\n x = np.arange(len(sp_dict[AA][codon]))\n sp_y = np.array(sp_dict[AA][codon])\n lp_y = np.array(lp_dict[AA][codon])\n axes[0].plot(x, sp_y)\n axes[1].plot(x, lp_y)\n plt.show\n\n\ndef get_skellam_distribution(sp_dict, lp_dict, AA):\n sp_mu = {}\n lp_mu = {}\n codons = []\n for codon in sp_dict[AA]:\n codons.append(codon)\n sp_mu[codon] = np.mean(sp_dict[AA][codon])\n lp_mu[codon] = np.mean(lp_dict[AA][codon])\n skellam_plot(sp_mu[codons[0]], sp_mu[codons[1]], 'SP-' + AA)\n skellam_plot(lp_mu[codons[0]], lp_mu[codons[1]], 'LP-' + AA)\n\n\ndef skellam_plot(mu1, mu2, name):\n print(mu1, ' ', mu2, ' ', mu1 - mu2, ' ', name)\n fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(5, 5))\n x = np.arange(stats.skellam.ppf(0.01, mu1, mu2), stats.skellam.ppf(0.99,\n mu1, mu2))\n ax.plot(x, stats.skellam.pmf(x, mu1, mu2), marker='o', label=name)\n ax.legend(loc=1)\n plt.show\n\n\n<mask token>\n",
"step-4": "<mask token>\nimport io, os, argparse, collections\nfrom scipy import stats\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description=\n 'Analyze codon usage of SP and LP\\n')\n parser.add_argument('sp_file', help='one input SP data file\\n')\n parser.add_argument('lp_file', help='one input LP data file\\n')\n parser.add_argument('--label', '-l', type=str, required=False, default=\n 'top', help=\"\"\"Define the label of out-put files. Default=\"top\\\"\n\"\"\")\n args = parser.parse_args()\n for path in [args.sp_file, args.lp_file]:\n if not os.path.isfile(path):\n parser.error('File \"%s\" cannot be found.' % path)\n return args\n\n\nclass Codon_Usage:\n\n def __init__(self, filename):\n self.seq, self.gene_num = self.get_seq(filename)\n\n def get_seq(self, filename):\n file = io.open(filename)\n all_seq = []\n gene_seq = ''\n count_all = 0\n count_non_triple = 0\n for line in file:\n if line[0] == '>':\n count_all += 1\n if gene_seq != '':\n if len(gene_seq) % 3:\n count_non_triple += 1\n else:\n all_seq.append(gene_seq)\n gene_seq = ''\n else:\n gene_seq += line.strip()\n file.close()\n print('%s:\\n%d genes added\\n%d are non-triple\\n' % (filename[:2],\n count_all, count_non_triple))\n return all_seq, count_all - count_non_triple\n\n def get_AA(self, codon):\n codon_map = {'TTT': 'Phe', 'TTC': 'Phe', 'TTA': 'Leu', 'TTG': 'Leu',\n 'TCT': 'Ser', 'TCC': 'Ser', 'TCA': 'Ser', 'TCG': 'Ser', 'TAT':\n 'Tyr', 'TAC': 'Tyr', 'TAA': 'STOP', 'TAG': 'STOP', 'TGT': 'Cys',\n 'TGC': 'Cys', 'TGA': 'STOP', 'TGG': 'Trp', 'CTT': 'Leu', 'CTC':\n 'Leu', 'CTA': 'Leu', 'CTG': 'Leu', 'CCT': 'Pro', 'CCC': 'Pro',\n 'CCA': 'Pro', 'CCG': 'Pro', 'CAT': 'His', 'CAC': 'His', 'CAA':\n 'Gln', 'CAG': 'Gln', 'CGT': 'Arg', 'CGC': 'Arg', 'CGA': 'Arg',\n 'CGG': 'Arg', 'ATT': 'Ile', 'ATC': 'Ile', 'ATA': 'Ile', 'ATG':\n 'Met', 'ACT': 'Thr', 'ACC': 'Thr', 'ACA': 'Thr', 'ACG': 'Thr',\n 'AAT': 'Asn', 'AAC': 'Asn', 'AAA': 'Lys', 'AAG': 'Lys', 'AGT':\n 'Ser', 'AGC': 'Ser', 'AGA': 'Arg', 'AGG': 'Arg', 'GTT': 'Val',\n 'GTC': 'Val', 'GTA': 'Val', 'GTG': 'Val', 'GCT': 'Ala', 'GCC':\n 'Ala', 'GCA': 'Ala', 'GCG': 'Ala', 'GAT': 'Asp', 'GAC': 'Asp',\n 'GAA': 'Glu', 'GAG': 'Glu', 'GGT': 'Gly', 'GGC': 'Gly', 'GGA':\n 'Gly', 'GGG': 'Gly'}\n if codon in codon_map:\n return codon_map[codon]\n else:\n return ''\n\n def get_usage_dict(self, seq):\n usage_dict = collections.defaultdict(lambda : [collections.\n defaultdict(lambda : [0, 0]), 0])\n for index in range(0, len(seq), 3):\n codon = seq[index:index + 3]\n AA = self.get_AA(codon)\n if AA:\n usage_dict[AA][1] += 1\n usage_dict[AA][0][codon][0] += 1\n for AA in usage_dict:\n for codon in usage_dict[AA][0]:\n usage_dict[AA][0][codon][1] = usage_dict[AA][0][codon][0\n ] / usage_dict[AA][1]\n return usage_dict\n\n def get_AA_dict(self):\n AA_dict = collections.defaultdict(lambda : collections.defaultdict(\n list))\n AA_map = {'Phe': ['TTT', 'TTC'], 'Leu': ['TTA', 'TTG', 'CTT', 'CTC',\n 'CTA', 'CTG'], 'Ser': ['TCT', 'TCC', 'TCA', 'TCG', 'AGT', 'AGC'\n ], 'Tyr': ['TAT', 'TAC'], 'STOP': ['TAA', 'TAG', 'TGA'], 'Cys':\n ['TGT', 'TGC'], 'Trp': ['TGG'], 'Pro': ['CCT', 'CCC', 'CCA',\n 'CCG'], 'His': ['CAT', 'CAC'], 'Gln': ['CAA', 'CAG'], 'Arg': [\n 'CGT', 'CGC', 'CGA', 'CGG', 'AGA', 'AGG'], 'Ile': ['ATT', 'ATC',\n 'ATA'], 'Met': ['ATG'], 'Thr': ['ACT', 'ACC', 'ACA', 'ACG'],\n 'Asn': ['AAT', 'AAC'], 'Lys': ['AAA', 'AAG'], 'Val': ['GTT',\n 'GTC', 'GTA', 'GTG'], 'Ala': ['GCT', 'GCC', 'GCA', 'GCG'],\n 'Asp': ['GAT', 'GAC'], 'Glu': ['GAA', 'GAG'], 'Gly': ['GGT',\n 'GGC', 'GGA', 'GGG']}\n usage_dict_list = []\n for seq in self.seq:\n usage_dict_list.append(self.get_usage_dict(seq))\n for AA in list(AA_map.keys()):\n for codon in AA_map[AA]:\n for usage_dict in usage_dict_list:\n AA_dict[AA][codon].append(usage_dict[AA][0][codon][1])\n return AA_dict\n\n\ndef heatmap_SP_LP(sp_AA_dict, lp_AA_dict, label):\n AA_chisquare = []\n AA_text = []\n codon_ttest = []\n codon_text = []\n i = 0\n j = 0\n count_all = 0\n count_sig = 0\n for AA in list(sp_AA_dict.keys()):\n sp_codon_mean = []\n lp_codon_mean = []\n for codon in sp_AA_dict[AA]:\n p_val = stats.ttest_ind(sp_AA_dict[AA][codon], lp_AA_dict[AA][\n codon], equal_var=False)[1]\n if not i % 8:\n codon_ttest.append([])\n codon_text.append([])\n i += 1\n if np.isnan(p_val):\n codon_ttest[-1].append(0)\n codon_text[-1].append(codon + '\\n NA')\n else:\n codon_ttest[-1].append(p_val)\n codon_text[-1].append(codon + '\\n' + str(round(p_val, 2)))\n count_all += 1\n if p_val < 0.5:\n count_sig += 1\n sp_codon_mean.append(np.mean(sp_AA_dict[AA][codon]))\n lp_codon_mean.append(np.mean(lp_AA_dict[AA][codon]))\n p_val = stats.chisquare(np.array([sp_codon_mean, lp_codon_mean]),\n axis=None)[1]\n if not j % 6:\n AA_chisquare.append([])\n AA_text.append([])\n j += 1\n if np.isnan(p_val):\n AA_chisquare[-1].append(0)\n AA_text[-1].append(AA + '\\n NA')\n else:\n AA_chisquare[-1].append(p_val)\n AA_text[-1].append(AA + '\\n' + str(round(p_val, 2)))\n for n in range(j % 6, 6):\n AA_chisquare[-1].append(0)\n AA_text[-1].append('')\n AAs = choose_codons(codon_ttest, codon_text)\n AA_chisquare = np.array(AA_chisquare)\n codon_ttest = np.array(codon_ttest)\n AA_text = np.array(AA_text)\n codon_text = np.array(codon_text)\n print(\n \"\"\"%d out of %d codon show significant usage difference between SP and LP genes (p_value < 0.5)\n\"\"\"\n % (count_sig, count_all))\n plot_heatmap(AA_chisquare, AA_text, 'AAs_ChiSquare', label)\n plot_heatmap(codon_ttest, codon_text, 'Codons_ttest', label)\n return AAs\n\n\ndef plot_heatmap(data, text, cbarlabel, label):\n fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(10, 5))\n im, cbar = heatmap(data, ax, 'YlGn', cbarlabel)\n annotate_heatmap(im, text)\n fig.tight_layout()\n plt.show\n plt.savefig(f'../results/{cbarlabel}_{label}.png')\n\n\ndef heatmap(data, ax, cmap, cbarlabel):\n if not ax:\n ax = plt.gca()\n im = ax.imshow(data, cmap)\n cbar = ax.figure.colorbar(im, ax=ax)\n ax.set_xticks(np.arange(data.shape[1]))\n ax.set_yticks(np.arange(data.shape[0]))\n ax.set_xticklabels(range(data.shape[1]))\n ax.set_yticklabels(range(data.shape[0]))\n ax.tick_params(top=False, bottom=True, labeltop=False, labelbottom=True)\n for edge, spine in ax.spines.items():\n spine.set_visible(False)\n ax.set_xticks(np.arange(data.shape[1] + 1) - 0.5, minor=True)\n ax.set_yticks(np.arange(data.shape[0] + 1) - 0.5, minor=True)\n ax.grid(which='minor', color='w', linestyle='-', linewidth=3)\n ax.tick_params(which='minor', bottom=False, left=False)\n cbar.ax.set_ylabel(cbarlabel, va='top')\n return im, cbar\n\n\ndef annotate_heatmap(im, text_label):\n textcolors = ['black', 'white']\n data = im.get_array()\n threshold = im.norm(data.max()) / 2\n kw = dict(horizontalalignment='center', verticalalignment='center')\n for i in range(data.shape[0]):\n for j in range(data.shape[1]):\n kw.update(color=textcolors[im.norm(data[i, j]) > threshold])\n im.axes.text(j, i, text_label[i, j], **kw)\n\n\ndef choose_codons(ttest, text):\n codon_map = {'TTT': 'Phe', 'TTC': 'Phe', 'TAT': 'Tyr', 'TAC': 'Tyr',\n 'TGT': 'Cys', 'TGC': 'Cys', 'CAT': 'His', 'CAC': 'His', 'CAA':\n 'Gln', 'CAG': 'Gln', 'AAT': 'Asn', 'AAC': 'Asn', 'AAA': 'Lys',\n 'AAG': 'Lys', 'GAT': 'Asp', 'GAC': 'Asp', 'GAA': 'Glu', 'GAG': 'Glu'}\n codon_dict = collections.defaultdict(list)\n for i in range(len(ttest)):\n for j in range(len(ttest[i])):\n if ttest[i][j] < 0.01:\n codon = text[i][j][:3]\n if codon in codon_map:\n codon_dict[codon_map[codon]].append(codon)\n file = io.open('AAs_to_compare.txt', 'w')\n file.write('Compare following AAs\\n')\n AAs = []\n for AA in codon_dict.keys():\n AAs.append(AA)\n if len(codon_dict[AA]) == 2:\n file.write('%s: %s, %s\\n' % (AA, codon_dict[AA][0], codon_dict[\n AA][1]))\n else:\n file.write('%s: %s\\n' % (AA, codon_dict[AA][0]))\n file.close()\n return AAs\n\n\ndef plot_SP_LP(sp_AA_dict, lp_AA_dict):\n for AA in list(sp_AA_dict.keys()):\n codon_data = []\n codons = []\n for codon in sp_AA_dict[AA]:\n lp_AA_dict[AA][codon].reverse()\n codons.append(codon)\n codon_data.append([])\n codon_data[-1].append(sp_AA_dict[AA][codon])\n codon_data[-1].append(lp_AA_dict[AA][codon])\n codon_usage_plot(codon_data, AA, codons)\n\n\ndef codon_usage_plot(data, AA, codons):\n fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(15, 5))\n for i in range(len(data)):\n x_sp = np.linspace(0, 50, len(data[i][0]))\n x_lp = np.linspace(50, 100, len(data[i][1]))\n ax.plot(x_sp, data[i][0], label='sp_' + codons[i])\n ax.plot(x_lp, data[i][1], label='lp_' + codons[i])\n ax.legend(loc=1)\n ax.set_title(AA)\n\n\ndef plot_distribution(sp_dict, lp_dict, AA):\n fig, axes = plt.subplots(nrows=2, ncols=1, figsize=(40, 20))\n for codon in sp_dict[AA]:\n x = np.arange(len(sp_dict[AA][codon]))\n sp_y = np.array(sp_dict[AA][codon])\n lp_y = np.array(lp_dict[AA][codon])\n axes[0].plot(x, sp_y)\n axes[1].plot(x, lp_y)\n plt.show\n\n\ndef get_skellam_distribution(sp_dict, lp_dict, AA):\n sp_mu = {}\n lp_mu = {}\n codons = []\n for codon in sp_dict[AA]:\n codons.append(codon)\n sp_mu[codon] = np.mean(sp_dict[AA][codon])\n lp_mu[codon] = np.mean(lp_dict[AA][codon])\n skellam_plot(sp_mu[codons[0]], sp_mu[codons[1]], 'SP-' + AA)\n skellam_plot(lp_mu[codons[0]], lp_mu[codons[1]], 'LP-' + AA)\n\n\ndef skellam_plot(mu1, mu2, name):\n print(mu1, ' ', mu2, ' ', mu1 - mu2, ' ', name)\n fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(5, 5))\n x = np.arange(stats.skellam.ppf(0.01, mu1, mu2), stats.skellam.ppf(0.99,\n mu1, mu2))\n ax.plot(x, stats.skellam.pmf(x, mu1, mu2), marker='o', label=name)\n ax.legend(loc=1)\n plt.show\n\n\nargs = parse_args()\nsp_codon_usage = Codon_Usage(args.sp_file)\nlp_codon_usage = Codon_Usage(args.lp_file)\nsp_AA_dict = sp_codon_usage.get_AA_dict()\nlp_AA_dict = lp_codon_usage.get_AA_dict()\nprint('Analyzing SP and LP %s group data\\n' % args.label)\nAAs = heatmap_SP_LP(sp_AA_dict, lp_AA_dict, args.label)\nplot_SP_LP(sp_AA_dict, lp_AA_dict)\n<mask token>\n",
"step-5": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\nCreated on Wed Mar 13 17:34:32 2019\n\n@author: fanlizhou\n\nAnalyze codon usage of sequence from 'SP_gene_seq.txt' and 'LP_gene_seq.txt'\nPlot heatmap of amino acid usage and codon usage\nPlot codon usage in each gene for each amino acid. Genes were arranged so that\nthe gene expression of SP decrease from 0 to 50 (x-axis) and the gene expression\nof LP increase from 51 to 100 (x-axis)\n\nUsage: codon_usage.py [-h] [--label LABEL] sp_file lp_file \n\nOptions:\n--label Define the label of out-put files. Default=\"top\"\nsp_file Path to the SP data files\nlp_file Path to the LP data files\n\n\"\"\"\n\nimport io, os, argparse, collections\nfrom scipy import stats\nimport matplotlib.pyplot as plt\nimport numpy as np\n\ndef parse_args():\n parser = argparse.ArgumentParser(description=\n 'Analyze codon usage of SP and LP\\n')\n parser.add_argument('sp_file', help = 'one input SP data file\\n')\n parser.add_argument('lp_file', help = 'one input LP data file\\n')\n parser.add_argument('--label', '-l', \n type = str, required = False, default = 'top', \n help = 'Define the label of out-put files. Default=\"top\"\\n')\n \n args = parser.parse_args()\n \n for path in [args.sp_file, args.lp_file]:\n if not os.path.isfile(path):\n parser.error('File \"%s\" cannot be found.' % (path))\n \n return args\n\n\n# a Codon_Usage class to store codon usage information for each genotype\nclass Codon_Usage:\n \n def __init__(self, filename): \n self.seq, self.gene_num = self.get_seq(filename)\n \n \n def get_seq(self, filename): \n file = io.open(filename)\n # list of selected gene sequences, excluded genes that are non-triple\n all_seq = []\n gene_seq = ''\n count_all = 0\n count_non_triple = 0\n \n for line in file:\n # read a gene information line\n if line[0]=='>':\n count_all += 1\n \n # if a gene has been read, then append it to all_seq if the\n # sequence is triple\n if gene_seq!='': \n if len(gene_seq)%3:\n count_non_triple += 1\n else:\n all_seq.append(gene_seq)\n \n gene_seq = ''\n \n # read a gene sequence line \n else:\n gene_seq += line.strip()\n \n \n file.close() \n print('%s:\\n%d genes added\\n%d are non-triple\\n'%\n (filename[:2],count_all, count_non_triple))\n \n return (all_seq, count_all - count_non_triple)\n \n\n def get_AA(self, codon):\n # dict key: codon -> AA\n codon_map = {\n 'TTT':'Phe', 'TTC':'Phe', 'TTA':'Leu', 'TTG':'Leu',\n 'TCT':'Ser', 'TCC':'Ser', 'TCA':'Ser', 'TCG':'Ser',\n 'TAT':'Tyr', 'TAC':'Tyr', 'TAA':'STOP', 'TAG':'STOP',\n 'TGT':'Cys', 'TGC':'Cys', 'TGA':'STOP', 'TGG':'Trp',\n 'CTT':'Leu', 'CTC':'Leu', 'CTA':'Leu', 'CTG':'Leu',\n 'CCT':'Pro', 'CCC':'Pro', 'CCA':'Pro', 'CCG':'Pro',\n 'CAT':'His', 'CAC':'His', 'CAA':'Gln', 'CAG':'Gln',\n 'CGT':'Arg', 'CGC':'Arg', 'CGA':'Arg', 'CGG':'Arg',\n 'ATT':'Ile', 'ATC':'Ile', 'ATA':'Ile', 'ATG':'Met',\n 'ACT':'Thr', 'ACC':'Thr', 'ACA':'Thr', 'ACG':'Thr',\n 'AAT':'Asn', 'AAC':'Asn', 'AAA':'Lys', 'AAG':'Lys',\n 'AGT':'Ser', 'AGC':'Ser', 'AGA':'Arg', 'AGG':'Arg',\n 'GTT':'Val', 'GTC':'Val', 'GTA':'Val', 'GTG':'Val',\n 'GCT':'Ala', 'GCC':'Ala', 'GCA':'Ala', 'GCG':'Ala',\n 'GAT':'Asp', 'GAC':'Asp', 'GAA':'Glu', 'GAG':'Glu',\n 'GGT':'Gly', 'GGC':'Gly', 'GGA':'Gly', 'GGG':'Gly'}\n\n if codon in codon_map:\n return codon_map[codon] \n else:\n return ''\n \n \n def get_usage_dict(self, seq):\n # usage_dict structure:\n # dict key: AA -> [\n # dict key: codon -> \n # [codon_count,\n # codon_count/AA_count]\n # AA_count\n # ] \n usage_dict = \\\n collections.defaultdict(lambda: \n [\n collections.defaultdict(\n lambda: [0, 0]), \n 0\n ])\n # save AAs usage information\n for index in range(0, len(seq), 3):\n codon = seq[index:index+3]\n AA = self.get_AA(codon)\n if AA:\n # count how many times the AA appears\n usage_dict[AA][1] += 1\n # count how many times the codon is used\n usage_dict[AA][0][codon][0] += 1\n \n # calculate the codon usage percentage for an AA\n for AA in usage_dict:\n for codon in usage_dict[AA][0]:\n usage_dict[AA][0][codon][1] = \\\n usage_dict[AA][0][codon][0]/usage_dict[AA][1]\n\n return usage_dict\n\n\n def get_AA_dict(self): \n # AA_dict structure:\n # 1st dict key: AA -> 2nd dict key: codon -> a list of codon usage \n # percentage of each gene \n AA_dict = \\\n collections.defaultdict(\n lambda:collections.defaultdict(list))\n \n # dict key: AA -> codon list\n AA_map = {\n 'Phe':['TTT', 'TTC'],\n 'Leu':['TTA', 'TTG', 'CTT', 'CTC', 'CTA', 'CTG'],\n 'Ser':['TCT', 'TCC', 'TCA', 'TCG', 'AGT', 'AGC'], \n 'Tyr':['TAT', 'TAC'], \n 'STOP':['TAA', 'TAG', 'TGA'],\n 'Cys':['TGT', 'TGC'], \n 'Trp':['TGG'],\n 'Pro':['CCT', 'CCC', 'CCA', 'CCG'],\n 'His':['CAT', 'CAC'], \n 'Gln':['CAA', 'CAG'],\n 'Arg':['CGT', 'CGC', 'CGA', 'CGG', 'AGA', 'AGG'],\n 'Ile':['ATT', 'ATC', 'ATA'], \n 'Met':['ATG'],\n 'Thr':['ACT', 'ACC', 'ACA', 'ACG'],\n 'Asn':['AAT', 'AAC'], \n 'Lys':['AAA', 'AAG'],\n 'Val':['GTT', 'GTC', 'GTA', 'GTG'],\n 'Ala':['GCT', 'GCC', 'GCA', 'GCG'],\n 'Asp':['GAT', 'GAC'], \n 'Glu':['GAA', 'GAG'],\n 'Gly':['GGT', 'GGC', 'GGA', 'GGG']\n }\n \n # list of codon usage for each gene\n usage_dict_list = []\n \n # get codon usage information for each gene\n for seq in self.seq:\n usage_dict_list.append(self.get_usage_dict(seq))\n \n # get the list of codon usage percentage from each gene \n for AA in list(AA_map.keys()):\n for codon in AA_map[AA]:\n # get codon usage information from each gene\n for usage_dict in usage_dict_list:\n # append codon usage percentage in the gene\n AA_dict[AA][codon].append(\n usage_dict[AA][0][codon][1])\n \n return AA_dict \n \n\ndef heatmap_SP_LP(sp_AA_dict, lp_AA_dict, label): \n # list of Chi-Square test results\n AA_chisquare = []\n # AA plotting annotation information\n AA_text = []\n \n # list of student's t-test results\n codon_ttest = []\n # codon plotting annotaion information\n codon_text = []\n \n i = 0\n j = 0\n # number of genes analyzed\n count_all = 0\n # number of genes that show significant results\n count_sig = 0\n \n for AA in list(sp_AA_dict.keys()): \n # mean values of codon usage for each AA\n sp_codon_mean = []\n lp_codon_mean = [] \n \n for codon in sp_AA_dict[AA]:\n # calculate ttest results \n p_val = stats.ttest_ind(sp_AA_dict[AA][codon],\n lp_AA_dict[AA][codon],\n equal_var = False)[1]\n \n # display eight codons in a row\n if not i % 8:\n codon_ttest.append([])\n codon_text.append([])\n i += 1\n \n # handle NULL values\n if np.isnan(p_val):\n codon_ttest[-1].append(0)\n codon_text[-1].append(codon + '\\n NA')\n # save ttest p-values and annotation information \n else: \n codon_ttest[-1].append(p_val)\n codon_text[-1].append(codon + '\\n' + str(round(p_val, 2)))\n count_all += 1\n if p_val < 0.5:\n count_sig += 1\n \n sp_codon_mean.append(np.mean(sp_AA_dict[AA][codon]))\n lp_codon_mean.append(np.mean(lp_AA_dict[AA][codon])) \n \n # get Chi-Square test results of each AA\n p_val = stats.chisquare(np.array([sp_codon_mean, lp_codon_mean]), \n axis = None)[1]\n \n # display six AA in a row\n if not j % 6:\n AA_chisquare.append([])\n AA_text.append([])\n j += 1\n \n # handle Null values\n if np.isnan(p_val): \n AA_chisquare[-1].append(0)\n AA_text[-1].append(AA + '\\n NA')\n # save Chi-Square test p-values and annotation information\n else: \n AA_chisquare[-1].append(p_val)\n AA_text[-1].append(AA + '\\n' + str(round(p_val, 2)))\n \n # handle empty cells\n for n in range(j % 6, 6):\n AA_chisquare[-1].append(0)\n AA_text[-1].append('')\n \n # get list of AAs that show significant difference between SP and LP groups\n AAs = choose_codons(codon_ttest, codon_text) \n\n AA_chisquare = np.array(AA_chisquare)\n codon_ttest = np.array(codon_ttest)\n \n AA_text = np.array(AA_text)\n codon_text = np.array(codon_text)\n\n print('%d out of %d codon show significant usage difference \\\n between SP and LP genes (p_value < 0.5)\\n' % \n (count_sig, count_all))\n plot_heatmap(AA_chisquare, AA_text, 'AAs_ChiSquare', label)\n plot_heatmap(codon_ttest, codon_text, 'Codons_ttest', label)\n \n return AAs\n\n\ndef plot_heatmap(data, text, cbarlabel, label):\n \n fig, ax = plt.subplots(nrows = 1, ncols = 1, figsize = (10, 5))\n\n im, cbar = heatmap(data, ax, 'YlGn', cbarlabel)\n \n annotate_heatmap(im, text)\n\n fig.tight_layout()\n plt.show\n plt.savefig(f'../results/{cbarlabel}_{label}.png') \n \ndef heatmap(data, ax, cmap, cbarlabel):\n \n if not ax:\n ax = plt.gca()\n \n im = ax.imshow(data, cmap)\n \n cbar = ax.figure.colorbar(im, ax=ax)\n\n ax.set_xticks(np.arange(data.shape[1]))\n ax.set_yticks(np.arange(data.shape[0]))\n ax.set_xticklabels(range(data.shape[1]))\n ax.set_yticklabels(range(data.shape[0]))\n\n ax.tick_params(top=False, bottom=True,\n labeltop=False, labelbottom=True)\n\n # draw white space between squares\n for edge, spine in ax.spines.items():\n spine.set_visible(False)\n \n ax.set_xticks(np.arange(data.shape[1] + 1) - 0.5, minor = True)\n ax.set_yticks(np.arange(data.shape[0] + 1) - 0.5, minor = True)\n ax.grid(which = 'minor', color = 'w', linestyle = '-', linewidth = 3)\n ax.tick_params(which = 'minor', bottom = False, left = False) \n cbar.ax.set_ylabel(cbarlabel, va = 'top')\n\n return im, cbar\n\n\ndef annotate_heatmap(im, text_label):\n textcolors = ['black','white']\n\n data = im.get_array()\n # set threshold to decide color\n threshold = im.norm(data.max()) / 2\n \n kw = dict(horizontalalignment = 'center',\n verticalalignment = 'center')\n \n for i in range(data.shape[0]):\n for j in range(data.shape[1]):\n kw.update(color = textcolors[im.norm(data[i,j]) > threshold])\n im.axes.text(j, i, text_label[i,j], **kw)\n\n\ndef choose_codons(ttest, text): \n # dict key: AA -> codon\n # only contains AAs with only two codon choices \n codon_map = {\n 'TTT':'Phe', 'TTC':'Phe', 'TAT':'Tyr', 'TAC':'Tyr',\n 'TGT':'Cys', 'TGC':'Cys', 'CAT':'His', 'CAC':'His', \n 'CAA':'Gln', 'CAG':'Gln', 'AAT':'Asn', 'AAC':'Asn', \n 'AAA':'Lys', 'AAG':'Lys', 'GAT':'Asp', 'GAC':'Asp', \n 'GAA':'Glu', 'GAG':'Glu'} \n \n codon_dict = collections.defaultdict(list)\n for i in range(len(ttest)):\n for j in range(len(ttest[i])):\n if ttest[i][j] < 0.01:\n codon = text[i][j][:3]\n if codon in codon_map:\n codon_dict[codon_map[codon]].append(codon)\n \n file = io.open('AAs_to_compare.txt', 'w') \n file.write('Compare following AAs\\n')\n # AAs that have only two codon choices and show significant \n # codon usage difference between SP and LP groups\n AAs = []\n \n for AA in codon_dict.keys():\n AAs.append(AA) \n if len(codon_dict[AA]) == 2:\n file.write('%s: %s, %s\\n' % \n (AA, codon_dict[AA][0], codon_dict[AA][1]))\n else:\n file.write('%s: %s\\n' % (AA, codon_dict[AA][0]))\n \n file.close()\n \n return AAs\n \n\ndef plot_SP_LP(sp_AA_dict, lp_AA_dict):\n # plot each AA\n for AA in list(sp_AA_dict.keys()): \n # list of codon usage information\n codon_data = []\n # List of codon names\n codons = []\n \n for codon in sp_AA_dict[AA]: \n # LP group data is displayed from lowest expressed genes \n # to highest expressed genes\n lp_AA_dict[AA][codon].reverse()\n \n codons.append(codon) \n codon_data.append([])\n # display SP group data first and then LP group data\n codon_data[-1].append(sp_AA_dict[AA][codon]) \n codon_data[-1].append(lp_AA_dict[AA][codon])\n \n # plot usage curves \n codon_usage_plot(codon_data, AA, codons)\n\n \ndef codon_usage_plot(data, AA, codons):\n fig, ax = plt.subplots(nrows = 1, ncols = 1, figsize = (15,5))\n \n for i in range(len(data)):\n # 0-50 shows SP group data\n x_sp = np.linspace(0, 50, len(data[i][0]))\n # 50-100 shows LP group data\n x_lp = np.linspace(50, 100, len(data[i][1]))\n \n ax.plot(x_sp, data[i][0], label = 'sp_' + codons[i])\n ax.plot(x_lp, data[i][1], label = 'lp_' + codons[i])\n ax.legend(loc = 1)\n ax.set_title(AA)\n\n \ndef plot_distribution(sp_dict, lp_dict, AA):\n fig, axes = plt.subplots(nrows = 2, ncols =1, figsize = (40, 20))\n\n for codon in sp_dict[AA]:\n x = np.arange(len(sp_dict[AA][codon]))\n sp_y = np.array(sp_dict[AA][codon])\n lp_y = np.array(lp_dict[AA][codon])\n \n axes[0].plot(x, sp_y)\n axes[1].plot(x, lp_y)\n \n plt.show\n\n\ndef get_skellam_distribution(sp_dict, lp_dict, AA): \n sp_mu = {}\n lp_mu = {}\n codons = []\n \n # get mean values\n for codon in sp_dict[AA]:\n codons.append(codon)\n sp_mu[codon] = np.mean(sp_dict[AA][codon])\n lp_mu[codon] = np.mean(lp_dict[AA][codon])\n \n skellam_plot(sp_mu[codons[0]], sp_mu[codons[1]], 'SP-' + AA)\n skellam_plot(lp_mu[codons[0]], lp_mu[codons[1]], 'LP-' + AA)\n\n\ndef skellam_plot(mu1, mu2, name): \n print(mu1,' ', mu2, ' ', mu1-mu2, ' ', name)\n\n fig, ax = plt.subplots(nrows = 1, ncols = 1, figsize = (5, 5)) \n x = np.arange(stats.skellam.ppf(0.01, mu1, mu2), \n stats.skellam.ppf(0.99, mu1, mu2))\n ax.plot(x, stats.skellam.pmf(x, mu1, mu2), marker = 'o', label = name)\n ax.legend(loc = 1)\n \n plt.show\n \n \n# main flow\nargs = parse_args()\nsp_codon_usage = Codon_Usage(args.sp_file)\nlp_codon_usage = Codon_Usage(args.lp_file)\n\nsp_AA_dict = sp_codon_usage.get_AA_dict() \nlp_AA_dict = lp_codon_usage.get_AA_dict()\n\nprint(\"Analyzing SP and LP %s group data\\n\" % (args.label))\n \nAAs = heatmap_SP_LP(sp_AA_dict, lp_AA_dict, args.label)\nplot_SP_LP(sp_AA_dict, lp_AA_dict)\n\n# optional\n# get Skellam distributions of AAs that have only two codon choices \n# and show distictive usage between SP and LP\n'''\nsp_all_codon_usage = Codon_Usage('SP_all_gene_seq.txt')\nlp_all_codon_usage = Codon_Usage('LP_all_gene_seq.txt')\n\nsp_all_AA_dict = sp_all_codon_usage.get_AA_dict() \nlp_all_AA_dict = lp_all_codon_usage.get_AA_dict()\n\nfor AA in AAs:\n plot_distribution(sp_all_AA_dict, lp_all_AA_dict, AA)\n get_skellam_distribution(sp_all_AA_dict, lp_all_AA_dict, AA)\n'''",
"step-ids": [
11,
13,
16,
20,
21
]
}
|
[
11,
13,
16,
20,
21
] |
from django.db import models
class TamLicense(models.Model):
license = models.TextField("Inserisci qui il tuo codice licenza.")
|
normal
|
{
"blob_id": "1daecce86769e36a17fe2935f89b9266a0197cf0",
"index": 3942,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass TamLicense(models.Model):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass TamLicense(models.Model):\n license = models.TextField('Inserisci qui il tuo codice licenza.')\n",
"step-4": "from django.db import models\n\n\nclass TamLicense(models.Model):\n license = models.TextField('Inserisci qui il tuo codice licenza.')\n",
"step-5": "from django.db import models\n\n\nclass TamLicense(models.Model):\n license = models.TextField(\"Inserisci qui il tuo codice licenza.\")\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#calss header
class _PULPIER():
def __init__(self,):
self.name = "PULPIER"
self.definitions = pulpy
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['pulpy']
|
normal
|
{
"blob_id": "a1d1056f302cf7bc050537dd8cc53cdb2da7e989",
"index": 5507,
"step-1": "<mask token>\n",
"step-2": "class _PULPIER:\n <mask token>\n",
"step-3": "class _PULPIER:\n\n def __init__(self):\n self.name = 'PULPIER'\n self.definitions = pulpy\n self.parents = []\n self.childen = []\n self.properties = []\n self.jsondata = {}\n self.basic = ['pulpy']\n",
"step-4": "\n\n#calss header\nclass _PULPIER():\n\tdef __init__(self,): \n\t\tself.name = \"PULPIER\"\n\t\tself.definitions = pulpy\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.basic = ['pulpy']\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from setuptools import setup
from Cython.Build import cythonize
setup(
ext_modules=cythonize("utils.pyx"),
)
|
normal
|
{
"blob_id": "66c71111eae27f6e9fee84eef05cc1f44cc5a477",
"index": 3745,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nsetup(ext_modules=cythonize('utils.pyx'))\n",
"step-3": "from setuptools import setup\nfrom Cython.Build import cythonize\nsetup(ext_modules=cythonize('utils.pyx'))\n",
"step-4": "from setuptools import setup\nfrom Cython.Build import cythonize\n\nsetup(\n ext_modules=cythonize(\"utils.pyx\"),\n)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def Error(app):
@app.route('/errors', cors=True, methods=['POST'])
@printError
def errors():
request = app.current_request
data = request.json_body
print(data)
return data
<|reserved_special_token_1|>
from chalicelib.utilities import *
def Error(app):
@app.route('/errors', cors=True, methods=['POST'])
@printError
def errors():
request = app.current_request
data = request.json_body
print(data)
return data
|
flexible
|
{
"blob_id": "f100757fcb1bef334f9f8eacae83af551d2bac5b",
"index": 3239,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef Error(app):\n\n @app.route('/errors', cors=True, methods=['POST'])\n @printError\n def errors():\n request = app.current_request\n data = request.json_body\n print(data)\n return data\n",
"step-3": "from chalicelib.utilities import *\n\n\ndef Error(app):\n\n @app.route('/errors', cors=True, methods=['POST'])\n @printError\n def errors():\n request = app.current_request\n data = request.json_body\n print(data)\n return data\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
def voxels():
shape = []
for x in range(-5, 4, 1):
for y in range(-5, 4, 1):
for z in range(0, 10, 1):
translate([x, y, z])
new_cube = color([0, 0, 1, 0.5])(cube([1, 1, 1], center=False))
shape.append(new_cube)
return shape
<|reserved_special_token_0|>
def export(shape, filename):
with open(filename + '.scad', 'w+') as f:
f.write(scad_render(shape, file_header='$fn = %s;' % SEGMENTS))
f.closed
print('Success')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def voxels():
shape = []
for x in range(-5, 4, 1):
for y in range(-5, 4, 1):
for z in range(0, 10, 1):
translate([x, y, z])
new_cube = color([0, 0, 1, 0.5])(cube([1, 1, 1], center=False))
shape.append(new_cube)
return shape
def basic_geometry():
box_functions = [makeRectBeam, makeCubeBeam, makeTriangleBeam,
makeNothingBox, makeCylindBeam, makeHollowCylindBeam,
makeHollowCone, makeEye]
shape_list = []
for bf in box_functions:
for cf in box_functions:
for bf2 in box_functions:
for i in range(2):
shape = union()(bf(5, 4, 5), translate([0, 0, 5])(cf(4,
3, 5)), translate([0, 0, 10])(bf2(5, 4, 5)))
if i == 0:
shapeInner = cylinder(r=0.5, h=20, center=False)
shape = shape - shapeInner
shape_list.append(shape)
return shape_list
def export(shape, filename):
with open(filename + '.scad', 'w+') as f:
f.write(scad_render(shape, file_header='$fn = %s;' % SEGMENTS))
f.closed
print('Success')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def voxels():
shape = []
for x in range(-5, 4, 1):
for y in range(-5, 4, 1):
for z in range(0, 10, 1):
translate([x, y, z])
new_cube = color([0, 0, 1, 0.5])(cube([1, 1, 1], center=False))
shape.append(new_cube)
return shape
def basic_geometry():
box_functions = [makeRectBeam, makeCubeBeam, makeTriangleBeam,
makeNothingBox, makeCylindBeam, makeHollowCylindBeam,
makeHollowCone, makeEye]
shape_list = []
for bf in box_functions:
for cf in box_functions:
for bf2 in box_functions:
for i in range(2):
shape = union()(bf(5, 4, 5), translate([0, 0, 5])(cf(4,
3, 5)), translate([0, 0, 10])(bf2(5, 4, 5)))
if i == 0:
shapeInner = cylinder(r=0.5, h=20, center=False)
shape = shape - shapeInner
shape_list.append(shape)
return shape_list
def export(shape, filename):
with open(filename + '.scad', 'w+') as f:
f.write(scad_render(shape, file_header='$fn = %s;' % SEGMENTS))
f.closed
print('Success')
if __name__ == '__main__':
out_dir = sys.argv[1] if len(sys.argv) > 1 else os.curdir
file_out = os.path.join(out_dir, 'basic_geometry.scad')
shape_list = basic_geometry()
for i, shape in enumerate(shape_list):
export(shape, 'output' + str(i))
print('Created OpenSCAD file...')
print('Compiling STL file...')
<|reserved_special_token_1|>
from __future__ import division
import os
from solid import *
from solid.utils import *
from shapes import *
import sys
from solid import *
from solid.utils import *
def voxels():
shape = []
for x in range(-5, 4, 1):
for y in range(-5, 4, 1):
for z in range(0, 10, 1):
translate([x, y, z])
new_cube = color([0, 0, 1, 0.5])(cube([1, 1, 1], center=False))
shape.append(new_cube)
return shape
def basic_geometry():
box_functions = [makeRectBeam, makeCubeBeam, makeTriangleBeam,
makeNothingBox, makeCylindBeam, makeHollowCylindBeam,
makeHollowCone, makeEye]
shape_list = []
for bf in box_functions:
for cf in box_functions:
for bf2 in box_functions:
for i in range(2):
shape = union()(bf(5, 4, 5), translate([0, 0, 5])(cf(4,
3, 5)), translate([0, 0, 10])(bf2(5, 4, 5)))
if i == 0:
shapeInner = cylinder(r=0.5, h=20, center=False)
shape = shape - shapeInner
shape_list.append(shape)
return shape_list
def export(shape, filename):
with open(filename + '.scad', 'w+') as f:
f.write(scad_render(shape, file_header='$fn = %s;' % SEGMENTS))
f.closed
print('Success')
if __name__ == '__main__':
out_dir = sys.argv[1] if len(sys.argv) > 1 else os.curdir
file_out = os.path.join(out_dir, 'basic_geometry.scad')
shape_list = basic_geometry()
for i, shape in enumerate(shape_list):
export(shape, 'output' + str(i))
print('Created OpenSCAD file...')
print('Compiling STL file...')
<|reserved_special_token_1|>
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division
import os
from solid import *
from solid.utils import *
from shapes import *
import sys
# Assumes SolidPython is in site-packages or elsewhwere in sys.path
from solid import *
from solid.utils import *
def voxels():
# shape = cube([1, 1, 1], center=False);
shape = []
for x in range(-5, 4, 1):
for y in range(-5, 4, 1):
for z in range(0, 10, 1):
translate([x, y, z])
new_cube = color([0,0,1, 0.5])(cube([1, 1, 1], center=False));
# shape = (shape+new_cube)
shape.append(new_cube)
return shape
def basic_geometry():
box_functions = [makeRectBeam, makeCubeBeam, makeTriangleBeam,makeNothingBox, makeCylindBeam, makeHollowCylindBeam, makeHollowCone, makeEye]
# cylind_functions = [makeCylindBeam, makeHollowCylindBeam, makeHollowCone, makeEye, makeNothingCylind]
shape_list = []
for bf in box_functions:
for cf in box_functions:
for bf2 in box_functions:
for i in range(2):
shape = union()(
# translate([-2, -3, 0])(
bf(5, 4, 5),
translate([0, 0, 5])(
cf(4, 3, 5)),
translate([0, 0, 10])(
bf2(5, 4, 5))
)
if i == 0:
shapeInner = cylinder(r=0.5, h=20, center=False)
shape = shape - shapeInner
shape_list.append(shape)
return shape_list
def export(shape, filename):
with open(filename + '.scad', 'w+') as f:
f.write(scad_render(shape, file_header='$fn = %s;' % SEGMENTS))
f.closed
print("Success")
if __name__ == '__main__':
out_dir = sys.argv[1] if len(sys.argv) > 1 else os.curdir
file_out = os.path.join(out_dir, 'basic_geometry.scad')
shape_list = basic_geometry()
for i, shape in enumerate(shape_list):
export(shape, "output" + str(i))
print("Created OpenSCAD file...")
print("Compiling STL file...")
|
flexible
|
{
"blob_id": "27ca60435c614e4d748917da45fc2fc75ee59f1c",
"index": 1682,
"step-1": "<mask token>\n\n\ndef voxels():\n shape = []\n for x in range(-5, 4, 1):\n for y in range(-5, 4, 1):\n for z in range(0, 10, 1):\n translate([x, y, z])\n new_cube = color([0, 0, 1, 0.5])(cube([1, 1, 1], center=False))\n shape.append(new_cube)\n return shape\n\n\n<mask token>\n\n\ndef export(shape, filename):\n with open(filename + '.scad', 'w+') as f:\n f.write(scad_render(shape, file_header='$fn = %s;' % SEGMENTS))\n f.closed\n print('Success')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef voxels():\n shape = []\n for x in range(-5, 4, 1):\n for y in range(-5, 4, 1):\n for z in range(0, 10, 1):\n translate([x, y, z])\n new_cube = color([0, 0, 1, 0.5])(cube([1, 1, 1], center=False))\n shape.append(new_cube)\n return shape\n\n\ndef basic_geometry():\n box_functions = [makeRectBeam, makeCubeBeam, makeTriangleBeam,\n makeNothingBox, makeCylindBeam, makeHollowCylindBeam,\n makeHollowCone, makeEye]\n shape_list = []\n for bf in box_functions:\n for cf in box_functions:\n for bf2 in box_functions:\n for i in range(2):\n shape = union()(bf(5, 4, 5), translate([0, 0, 5])(cf(4,\n 3, 5)), translate([0, 0, 10])(bf2(5, 4, 5)))\n if i == 0:\n shapeInner = cylinder(r=0.5, h=20, center=False)\n shape = shape - shapeInner\n shape_list.append(shape)\n return shape_list\n\n\ndef export(shape, filename):\n with open(filename + '.scad', 'w+') as f:\n f.write(scad_render(shape, file_header='$fn = %s;' % SEGMENTS))\n f.closed\n print('Success')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef voxels():\n shape = []\n for x in range(-5, 4, 1):\n for y in range(-5, 4, 1):\n for z in range(0, 10, 1):\n translate([x, y, z])\n new_cube = color([0, 0, 1, 0.5])(cube([1, 1, 1], center=False))\n shape.append(new_cube)\n return shape\n\n\ndef basic_geometry():\n box_functions = [makeRectBeam, makeCubeBeam, makeTriangleBeam,\n makeNothingBox, makeCylindBeam, makeHollowCylindBeam,\n makeHollowCone, makeEye]\n shape_list = []\n for bf in box_functions:\n for cf in box_functions:\n for bf2 in box_functions:\n for i in range(2):\n shape = union()(bf(5, 4, 5), translate([0, 0, 5])(cf(4,\n 3, 5)), translate([0, 0, 10])(bf2(5, 4, 5)))\n if i == 0:\n shapeInner = cylinder(r=0.5, h=20, center=False)\n shape = shape - shapeInner\n shape_list.append(shape)\n return shape_list\n\n\ndef export(shape, filename):\n with open(filename + '.scad', 'w+') as f:\n f.write(scad_render(shape, file_header='$fn = %s;' % SEGMENTS))\n f.closed\n print('Success')\n\n\nif __name__ == '__main__':\n out_dir = sys.argv[1] if len(sys.argv) > 1 else os.curdir\n file_out = os.path.join(out_dir, 'basic_geometry.scad')\n shape_list = basic_geometry()\n for i, shape in enumerate(shape_list):\n export(shape, 'output' + str(i))\n print('Created OpenSCAD file...')\n print('Compiling STL file...')\n",
"step-4": "from __future__ import division\nimport os\nfrom solid import *\nfrom solid.utils import *\nfrom shapes import *\nimport sys\nfrom solid import *\nfrom solid.utils import *\n\n\ndef voxels():\n shape = []\n for x in range(-5, 4, 1):\n for y in range(-5, 4, 1):\n for z in range(0, 10, 1):\n translate([x, y, z])\n new_cube = color([0, 0, 1, 0.5])(cube([1, 1, 1], center=False))\n shape.append(new_cube)\n return shape\n\n\ndef basic_geometry():\n box_functions = [makeRectBeam, makeCubeBeam, makeTriangleBeam,\n makeNothingBox, makeCylindBeam, makeHollowCylindBeam,\n makeHollowCone, makeEye]\n shape_list = []\n for bf in box_functions:\n for cf in box_functions:\n for bf2 in box_functions:\n for i in range(2):\n shape = union()(bf(5, 4, 5), translate([0, 0, 5])(cf(4,\n 3, 5)), translate([0, 0, 10])(bf2(5, 4, 5)))\n if i == 0:\n shapeInner = cylinder(r=0.5, h=20, center=False)\n shape = shape - shapeInner\n shape_list.append(shape)\n return shape_list\n\n\ndef export(shape, filename):\n with open(filename + '.scad', 'w+') as f:\n f.write(scad_render(shape, file_header='$fn = %s;' % SEGMENTS))\n f.closed\n print('Success')\n\n\nif __name__ == '__main__':\n out_dir = sys.argv[1] if len(sys.argv) > 1 else os.curdir\n file_out = os.path.join(out_dir, 'basic_geometry.scad')\n shape_list = basic_geometry()\n for i, shape in enumerate(shape_list):\n export(shape, 'output' + str(i))\n print('Created OpenSCAD file...')\n print('Compiling STL file...')\n",
"step-5": "#! /usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom __future__ import division\nimport os\nfrom solid import *\nfrom solid.utils import *\n\nfrom shapes import *\nimport sys\n\n# Assumes SolidPython is in site-packages or elsewhwere in sys.path\nfrom solid import *\nfrom solid.utils import *\n\ndef voxels():\n # shape = cube([1, 1, 1], center=False);\n shape = []\n for x in range(-5, 4, 1):\n for y in range(-5, 4, 1):\n for z in range(0, 10, 1):\n translate([x, y, z])\n new_cube = color([0,0,1, 0.5])(cube([1, 1, 1], center=False));\n # shape = (shape+new_cube)\n shape.append(new_cube)\n return shape\n\ndef basic_geometry():\n box_functions = [makeRectBeam, makeCubeBeam, makeTriangleBeam,makeNothingBox, makeCylindBeam, makeHollowCylindBeam, makeHollowCone, makeEye]\n # cylind_functions = [makeCylindBeam, makeHollowCylindBeam, makeHollowCone, makeEye, makeNothingCylind]\n shape_list = []\n for bf in box_functions:\n for cf in box_functions:\n for bf2 in box_functions:\n for i in range(2):\n shape = union()(\n # translate([-2, -3, 0])(\n bf(5, 4, 5),\n translate([0, 0, 5])(\n cf(4, 3, 5)),\n translate([0, 0, 10])(\n bf2(5, 4, 5))\n )\n if i == 0:\n shapeInner = cylinder(r=0.5, h=20, center=False)\n shape = shape - shapeInner\n shape_list.append(shape)\n\n return shape_list\n\ndef export(shape, filename):\n with open(filename + '.scad', 'w+') as f:\n f.write(scad_render(shape, file_header='$fn = %s;' % SEGMENTS))\n\n f.closed\n print(\"Success\")\n\nif __name__ == '__main__':\n out_dir = sys.argv[1] if len(sys.argv) > 1 else os.curdir\n file_out = os.path.join(out_dir, 'basic_geometry.scad')\n\n shape_list = basic_geometry()\n for i, shape in enumerate(shape_list):\n export(shape, \"output\" + str(i))\n print(\"Created OpenSCAD file...\")\n print(\"Compiling STL file...\")",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
class WireValues:
def __init__(self):
self.wires = {}
def __getitem__(self, name):
return int(name) if isnum(name) else self.wires[name]
def __setitem__(self, name, value):
self.wires[name] = value
def __contains__(self, name):
return isnum(name) or name in self.wires
<|reserved_special_token_0|>
@make_command('# RSHIFT # -> #')
def rshift(wires, v1, v2, name):
wires[name] = wires[v1] >> wires[v2]
<|reserved_special_token_0|>
def process_links(links):
wires = WireValues()
while links:
remaining = []
for link in links:
if all(i in wires for i in link.inputs):
link.command.function(wires, *link.inputs, link.output)
else:
remaining.append(link)
links = remaining
return wires
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def isnum(name):
return name.startswith('-') or name.isdigit()
class WireValues:
def __init__(self):
self.wires = {}
def __getitem__(self, name):
return int(name) if isnum(name) else self.wires[name]
def __setitem__(self, name, value):
self.wires[name] = value
def __contains__(self, name):
return isnum(name) or name in self.wires
<|reserved_special_token_0|>
def make_command(expr):
pattern = re.compile('^' + expr.replace('#', '([0-9a-z]+)') + '$')
def command_maker(function):
command = Command(pattern, function)
COMMANDS.append(command)
return command
return command_maker
@make_command('# -> #')
def assignment(wires, v1, name):
wires[name] = wires[v1]
@make_command('# AND # -> #')
def anding(wires, v1, v2, name):
wires[name] = wires[v1] & wires[v2]
<|reserved_special_token_0|>
@make_command('# RSHIFT # -> #')
def rshift(wires, v1, v2, name):
wires[name] = wires[v1] >> wires[v2]
@make_command('NOT # -> #')
def notting(wires, v1, name):
wires[name] = (1 << 16) - 1 & ~wires[v1]
def create_link(line):
for cmd in COMMANDS:
m = re.match(cmd.pattern, line)
if m:
gps = m.groups()
return WireLink(cmd, gps[:-1], gps[-1])
raise ValueError(repr(line))
def process_links(links):
wires = WireValues()
while links:
remaining = []
for link in links:
if all(i in wires for i in link.inputs):
link.command.function(wires, *link.inputs, link.output)
else:
remaining.append(link)
links = remaining
return wires
def main():
lines = sys.stdin.read().strip().split('\n')
links = [create_link(line) for line in lines]
wires = process_links(links)
answer = wires['a']
print('Part 1 wire a:', answer)
index = next(i for i, link in enumerate(links) if link.output == 'b')
links[index] = WireLink(assignment, [str(answer)], 'b')
wires = process_links(links)
answer = wires['a']
print('Part 2 wire a:', answer)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def isnum(name):
return name.startswith('-') or name.isdigit()
class WireValues:
def __init__(self):
self.wires = {}
def __getitem__(self, name):
return int(name) if isnum(name) else self.wires[name]
def __setitem__(self, name, value):
self.wires[name] = value
def __contains__(self, name):
return isnum(name) or name in self.wires
<|reserved_special_token_0|>
def make_command(expr):
pattern = re.compile('^' + expr.replace('#', '([0-9a-z]+)') + '$')
def command_maker(function):
command = Command(pattern, function)
COMMANDS.append(command)
return command
return command_maker
@make_command('# -> #')
def assignment(wires, v1, name):
wires[name] = wires[v1]
@make_command('# AND # -> #')
def anding(wires, v1, v2, name):
wires[name] = wires[v1] & wires[v2]
@make_command('# OR # -> #')
def oring(wires, v1, v2, name):
wires[name] = wires[v1] | wires[v2]
@make_command('# LSHIFT # -> #')
def lshift(wires, v1, v2, name):
wires[name] = wires[v1] << wires[v2]
@make_command('# RSHIFT # -> #')
def rshift(wires, v1, v2, name):
wires[name] = wires[v1] >> wires[v2]
@make_command('NOT # -> #')
def notting(wires, v1, name):
wires[name] = (1 << 16) - 1 & ~wires[v1]
def create_link(line):
for cmd in COMMANDS:
m = re.match(cmd.pattern, line)
if m:
gps = m.groups()
return WireLink(cmd, gps[:-1], gps[-1])
raise ValueError(repr(line))
def process_links(links):
wires = WireValues()
while links:
remaining = []
for link in links:
if all(i in wires for i in link.inputs):
link.command.function(wires, *link.inputs, link.output)
else:
remaining.append(link)
links = remaining
return wires
def main():
lines = sys.stdin.read().strip().split('\n')
links = [create_link(line) for line in lines]
wires = process_links(links)
answer = wires['a']
print('Part 1 wire a:', answer)
index = next(i for i, link in enumerate(links) if link.output == 'b')
links[index] = WireLink(assignment, [str(answer)], 'b')
wires = process_links(links)
answer = wires['a']
print('Part 2 wire a:', answer)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def isnum(name):
return name.startswith('-') or name.isdigit()
class WireValues:
def __init__(self):
self.wires = {}
def __getitem__(self, name):
return int(name) if isnum(name) else self.wires[name]
def __setitem__(self, name, value):
self.wires[name] = value
def __contains__(self, name):
return isnum(name) or name in self.wires
Command = namedtuple('Command', 'pattern function')
WireLink = namedtuple('WireLink', 'command inputs output')
COMMANDS = []
def make_command(expr):
pattern = re.compile('^' + expr.replace('#', '([0-9a-z]+)') + '$')
def command_maker(function):
command = Command(pattern, function)
COMMANDS.append(command)
return command
return command_maker
@make_command('# -> #')
def assignment(wires, v1, name):
wires[name] = wires[v1]
@make_command('# AND # -> #')
def anding(wires, v1, v2, name):
wires[name] = wires[v1] & wires[v2]
@make_command('# OR # -> #')
def oring(wires, v1, v2, name):
wires[name] = wires[v1] | wires[v2]
@make_command('# LSHIFT # -> #')
def lshift(wires, v1, v2, name):
wires[name] = wires[v1] << wires[v2]
@make_command('# RSHIFT # -> #')
def rshift(wires, v1, v2, name):
wires[name] = wires[v1] >> wires[v2]
@make_command('NOT # -> #')
def notting(wires, v1, name):
wires[name] = (1 << 16) - 1 & ~wires[v1]
def create_link(line):
for cmd in COMMANDS:
m = re.match(cmd.pattern, line)
if m:
gps = m.groups()
return WireLink(cmd, gps[:-1], gps[-1])
raise ValueError(repr(line))
def process_links(links):
wires = WireValues()
while links:
remaining = []
for link in links:
if all(i in wires for i in link.inputs):
link.command.function(wires, *link.inputs, link.output)
else:
remaining.append(link)
links = remaining
return wires
def main():
lines = sys.stdin.read().strip().split('\n')
links = [create_link(line) for line in lines]
wires = process_links(links)
answer = wires['a']
print('Part 1 wire a:', answer)
index = next(i for i, link in enumerate(links) if link.output == 'b')
links[index] = WireLink(assignment, [str(answer)], 'b')
wires = process_links(links)
answer = wires['a']
print('Part 2 wire a:', answer)
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
#!/usr/bin/env python3
import sys
import re
from collections import namedtuple
def isnum(name):
return name.startswith('-') or name.isdigit()
class WireValues:
def __init__(self):
self.wires = {}
def __getitem__(self, name):
return int(name) if isnum(name) else self.wires[name]
def __setitem__(self, name, value):
self.wires[name] = value
def __contains__(self, name):
return isnum(name) or name in self.wires
Command = namedtuple('Command', 'pattern function')
WireLink = namedtuple('WireLink', 'command inputs output')
COMMANDS = []
def make_command(expr):
pattern = re.compile('^'+expr.replace('#', '([0-9a-z]+)')+'$')
def command_maker(function):
command = Command(pattern, function)
COMMANDS.append(command)
return command
return command_maker
@make_command('# -> #')
def assignment(wires, v1, name):
wires[name] = wires[v1]
@make_command('# AND # -> #')
def anding(wires, v1, v2, name):
wires[name] = wires[v1] & wires[v2]
@make_command('# OR # -> #')
def oring(wires, v1, v2, name):
wires[name] = wires[v1] | wires[v2]
@make_command('# LSHIFT # -> #')
def lshift(wires, v1, v2, name):
wires[name] = wires[v1] << wires[v2]
@make_command('# RSHIFT # -> #')
def rshift(wires, v1, v2, name):
wires[name] = wires[v1] >> wires[v2]
@make_command('NOT # -> #')
def notting(wires, v1, name):
wires[name] = ((1<<16)-1)&~wires[v1]
def create_link(line):
for cmd in COMMANDS:
m = re.match(cmd.pattern, line)
if m:
gps = m.groups()
return WireLink(cmd, gps[:-1], gps[-1])
raise ValueError(repr(line))
def process_links(links):
wires = WireValues()
while links:
remaining = []
for link in links:
if all(i in wires for i in link.inputs):
link.command.function(wires, *link.inputs, link.output)
else:
remaining.append(link)
links = remaining
return wires
def main():
lines = sys.stdin.read().strip().split('\n')
links = [create_link(line) for line in lines]
wires = process_links(links)
answer = wires['a']
print("Part 1 wire a:", answer)
index = next(i for (i,link) in enumerate(links) if link.output=='b')
links[index] = WireLink(assignment, [str(answer)], 'b')
wires = process_links(links)
answer = wires['a']
print("Part 2 wire a:", answer)
if __name__ == '__main__':
main()
|
flexible
|
{
"blob_id": "a5eb1f559972519dbe0f3702e03af77e61fbfb4e",
"index": 7985,
"step-1": "<mask token>\n\n\nclass WireValues:\n\n def __init__(self):\n self.wires = {}\n\n def __getitem__(self, name):\n return int(name) if isnum(name) else self.wires[name]\n\n def __setitem__(self, name, value):\n self.wires[name] = value\n\n def __contains__(self, name):\n return isnum(name) or name in self.wires\n\n\n<mask token>\n\n\n@make_command('# RSHIFT # -> #')\ndef rshift(wires, v1, v2, name):\n wires[name] = wires[v1] >> wires[v2]\n\n\n<mask token>\n\n\ndef process_links(links):\n wires = WireValues()\n while links:\n remaining = []\n for link in links:\n if all(i in wires for i in link.inputs):\n link.command.function(wires, *link.inputs, link.output)\n else:\n remaining.append(link)\n links = remaining\n return wires\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef isnum(name):\n return name.startswith('-') or name.isdigit()\n\n\nclass WireValues:\n\n def __init__(self):\n self.wires = {}\n\n def __getitem__(self, name):\n return int(name) if isnum(name) else self.wires[name]\n\n def __setitem__(self, name, value):\n self.wires[name] = value\n\n def __contains__(self, name):\n return isnum(name) or name in self.wires\n\n\n<mask token>\n\n\ndef make_command(expr):\n pattern = re.compile('^' + expr.replace('#', '([0-9a-z]+)') + '$')\n\n def command_maker(function):\n command = Command(pattern, function)\n COMMANDS.append(command)\n return command\n return command_maker\n\n\n@make_command('# -> #')\ndef assignment(wires, v1, name):\n wires[name] = wires[v1]\n\n\n@make_command('# AND # -> #')\ndef anding(wires, v1, v2, name):\n wires[name] = wires[v1] & wires[v2]\n\n\n<mask token>\n\n\n@make_command('# RSHIFT # -> #')\ndef rshift(wires, v1, v2, name):\n wires[name] = wires[v1] >> wires[v2]\n\n\n@make_command('NOT # -> #')\ndef notting(wires, v1, name):\n wires[name] = (1 << 16) - 1 & ~wires[v1]\n\n\ndef create_link(line):\n for cmd in COMMANDS:\n m = re.match(cmd.pattern, line)\n if m:\n gps = m.groups()\n return WireLink(cmd, gps[:-1], gps[-1])\n raise ValueError(repr(line))\n\n\ndef process_links(links):\n wires = WireValues()\n while links:\n remaining = []\n for link in links:\n if all(i in wires for i in link.inputs):\n link.command.function(wires, *link.inputs, link.output)\n else:\n remaining.append(link)\n links = remaining\n return wires\n\n\ndef main():\n lines = sys.stdin.read().strip().split('\\n')\n links = [create_link(line) for line in lines]\n wires = process_links(links)\n answer = wires['a']\n print('Part 1 wire a:', answer)\n index = next(i for i, link in enumerate(links) if link.output == 'b')\n links[index] = WireLink(assignment, [str(answer)], 'b')\n wires = process_links(links)\n answer = wires['a']\n print('Part 2 wire a:', answer)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef isnum(name):\n return name.startswith('-') or name.isdigit()\n\n\nclass WireValues:\n\n def __init__(self):\n self.wires = {}\n\n def __getitem__(self, name):\n return int(name) if isnum(name) else self.wires[name]\n\n def __setitem__(self, name, value):\n self.wires[name] = value\n\n def __contains__(self, name):\n return isnum(name) or name in self.wires\n\n\n<mask token>\n\n\ndef make_command(expr):\n pattern = re.compile('^' + expr.replace('#', '([0-9a-z]+)') + '$')\n\n def command_maker(function):\n command = Command(pattern, function)\n COMMANDS.append(command)\n return command\n return command_maker\n\n\n@make_command('# -> #')\ndef assignment(wires, v1, name):\n wires[name] = wires[v1]\n\n\n@make_command('# AND # -> #')\ndef anding(wires, v1, v2, name):\n wires[name] = wires[v1] & wires[v2]\n\n\n@make_command('# OR # -> #')\ndef oring(wires, v1, v2, name):\n wires[name] = wires[v1] | wires[v2]\n\n\n@make_command('# LSHIFT # -> #')\ndef lshift(wires, v1, v2, name):\n wires[name] = wires[v1] << wires[v2]\n\n\n@make_command('# RSHIFT # -> #')\ndef rshift(wires, v1, v2, name):\n wires[name] = wires[v1] >> wires[v2]\n\n\n@make_command('NOT # -> #')\ndef notting(wires, v1, name):\n wires[name] = (1 << 16) - 1 & ~wires[v1]\n\n\ndef create_link(line):\n for cmd in COMMANDS:\n m = re.match(cmd.pattern, line)\n if m:\n gps = m.groups()\n return WireLink(cmd, gps[:-1], gps[-1])\n raise ValueError(repr(line))\n\n\ndef process_links(links):\n wires = WireValues()\n while links:\n remaining = []\n for link in links:\n if all(i in wires for i in link.inputs):\n link.command.function(wires, *link.inputs, link.output)\n else:\n remaining.append(link)\n links = remaining\n return wires\n\n\ndef main():\n lines = sys.stdin.read().strip().split('\\n')\n links = [create_link(line) for line in lines]\n wires = process_links(links)\n answer = wires['a']\n print('Part 1 wire a:', answer)\n index = next(i for i, link in enumerate(links) if link.output == 'b')\n links[index] = WireLink(assignment, [str(answer)], 'b')\n wires = process_links(links)\n answer = wires['a']\n print('Part 2 wire a:', answer)\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef isnum(name):\n return name.startswith('-') or name.isdigit()\n\n\nclass WireValues:\n\n def __init__(self):\n self.wires = {}\n\n def __getitem__(self, name):\n return int(name) if isnum(name) else self.wires[name]\n\n def __setitem__(self, name, value):\n self.wires[name] = value\n\n def __contains__(self, name):\n return isnum(name) or name in self.wires\n\n\nCommand = namedtuple('Command', 'pattern function')\nWireLink = namedtuple('WireLink', 'command inputs output')\nCOMMANDS = []\n\n\ndef make_command(expr):\n pattern = re.compile('^' + expr.replace('#', '([0-9a-z]+)') + '$')\n\n def command_maker(function):\n command = Command(pattern, function)\n COMMANDS.append(command)\n return command\n return command_maker\n\n\n@make_command('# -> #')\ndef assignment(wires, v1, name):\n wires[name] = wires[v1]\n\n\n@make_command('# AND # -> #')\ndef anding(wires, v1, v2, name):\n wires[name] = wires[v1] & wires[v2]\n\n\n@make_command('# OR # -> #')\ndef oring(wires, v1, v2, name):\n wires[name] = wires[v1] | wires[v2]\n\n\n@make_command('# LSHIFT # -> #')\ndef lshift(wires, v1, v2, name):\n wires[name] = wires[v1] << wires[v2]\n\n\n@make_command('# RSHIFT # -> #')\ndef rshift(wires, v1, v2, name):\n wires[name] = wires[v1] >> wires[v2]\n\n\n@make_command('NOT # -> #')\ndef notting(wires, v1, name):\n wires[name] = (1 << 16) - 1 & ~wires[v1]\n\n\ndef create_link(line):\n for cmd in COMMANDS:\n m = re.match(cmd.pattern, line)\n if m:\n gps = m.groups()\n return WireLink(cmd, gps[:-1], gps[-1])\n raise ValueError(repr(line))\n\n\ndef process_links(links):\n wires = WireValues()\n while links:\n remaining = []\n for link in links:\n if all(i in wires for i in link.inputs):\n link.command.function(wires, *link.inputs, link.output)\n else:\n remaining.append(link)\n links = remaining\n return wires\n\n\ndef main():\n lines = sys.stdin.read().strip().split('\\n')\n links = [create_link(line) for line in lines]\n wires = process_links(links)\n answer = wires['a']\n print('Part 1 wire a:', answer)\n index = next(i for i, link in enumerate(links) if link.output == 'b')\n links[index] = WireLink(assignment, [str(answer)], 'b')\n wires = process_links(links)\n answer = wires['a']\n print('Part 2 wire a:', answer)\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "#!/usr/bin/env python3\n\nimport sys\nimport re\n\nfrom collections import namedtuple\n\ndef isnum(name):\n return name.startswith('-') or name.isdigit()\n\nclass WireValues:\n def __init__(self):\n self.wires = {}\n def __getitem__(self, name):\n return int(name) if isnum(name) else self.wires[name]\n def __setitem__(self, name, value):\n self.wires[name] = value\n def __contains__(self, name):\n return isnum(name) or name in self.wires\n\nCommand = namedtuple('Command', 'pattern function')\nWireLink = namedtuple('WireLink', 'command inputs output')\n\nCOMMANDS = []\n\ndef make_command(expr):\n pattern = re.compile('^'+expr.replace('#', '([0-9a-z]+)')+'$')\n def command_maker(function):\n command = Command(pattern, function)\n COMMANDS.append(command)\n return command\n return command_maker\n\n@make_command('# -> #')\ndef assignment(wires, v1, name):\n wires[name] = wires[v1]\n\n@make_command('# AND # -> #')\ndef anding(wires, v1, v2, name):\n wires[name] = wires[v1] & wires[v2]\n\n@make_command('# OR # -> #')\ndef oring(wires, v1, v2, name):\n wires[name] = wires[v1] | wires[v2]\n\n@make_command('# LSHIFT # -> #')\ndef lshift(wires, v1, v2, name):\n wires[name] = wires[v1] << wires[v2]\n\n@make_command('# RSHIFT # -> #')\ndef rshift(wires, v1, v2, name):\n wires[name] = wires[v1] >> wires[v2]\n\n@make_command('NOT # -> #')\ndef notting(wires, v1, name):\n wires[name] = ((1<<16)-1)&~wires[v1]\n\ndef create_link(line):\n for cmd in COMMANDS:\n m = re.match(cmd.pattern, line)\n if m:\n gps = m.groups()\n return WireLink(cmd, gps[:-1], gps[-1])\n raise ValueError(repr(line))\n\ndef process_links(links):\n wires = WireValues()\n while links:\n remaining = []\n for link in links:\n if all(i in wires for i in link.inputs):\n link.command.function(wires, *link.inputs, link.output)\n else:\n remaining.append(link)\n links = remaining\n return wires\n\ndef main():\n lines = sys.stdin.read().strip().split('\\n')\n links = [create_link(line) for line in lines]\n wires = process_links(links)\n answer = wires['a']\n print(\"Part 1 wire a:\", answer)\n index = next(i for (i,link) in enumerate(links) if link.output=='b')\n links[index] = WireLink(assignment, [str(answer)], 'b')\n wires = process_links(links)\n answer = wires['a']\n print(\"Part 2 wire a:\", answer)\n\nif __name__ == '__main__':\n main()\n",
"step-ids": [
7,
14,
16,
18,
20
]
}
|
[
7,
14,
16,
18,
20
] |
import numpy as np
import matplotlib.pyplot as plt
from sklearn import mixture, metrics
import utils
import spsa_clustering
N = 5000
mix_prob = np.array([0.4, 0.4, 0.2])
clust_means = np.array([[0, 0], [2, 2], [-3, 6]])
clust_gammas = np.array([[[1, -0.7], [-0.7, 1]], np.eye(2), [[1, 0.8], [0.8, 1]]])
data_set = []
true_labels = []
spsa_gamma = 1. / 6
spsa_alpha = lambda x: 0.25 / (x ** spsa_gamma)
spsa_beta = lambda x: 15. / (x ** (spsa_gamma / 4))
# spsa_alpha = lambda x: 0.001
# spsa_beta = lambda x: 0.001
clustering = spsa_clustering.ClusteringSPSA(n_clusters=clust_means.shape[0], data_shape=2, Gammas=None, alpha=spsa_alpha,
beta=spsa_beta, norm_init=False, eta=1000)
for _ in range(N):
mix_ind = np.random.choice(len(mix_prob), p=mix_prob)
data_point = np.random.multivariate_normal(clust_means[mix_ind],
clust_gammas[mix_ind])
data_set.append(data_point)
true_labels.append(mix_ind)
clustering.fit(data_point)
data_set = np.array(data_set)
utils.order_clust_centers(clust_means, clustering)
clustering.clusters_fill(data_set)
gmm = mixture.GaussianMixture(n_components=clust_means.shape[0], init_params='kmeans')
gmm.fit(data_set)
labels_pred_gmm = gmm.predict(data_set)
bgmm = mixture.BayesianGaussianMixture(n_components=clust_means.shape[0], init_params='random')
bgmm.fit(data_set)
labels_pred_bgmm = bgmm.predict(data_set)
ari_gmm = metrics.adjusted_rand_score(true_labels, labels_pred_gmm)
print('\nARI GMM: {:f}'.format(ari_gmm))
ari_bgmm = metrics.adjusted_rand_score(true_labels, labels_pred_bgmm)
print('ARI Bayesian GMM: {:f}'.format(ari_bgmm))
ari_spsa = metrics.adjusted_rand_score(true_labels, clustering.labels_)
print('ARI SPSA clustering: {:f}'.format(ari_spsa))
print('\n')
for i in range(clust_means.shape[0]):
print('GMM covar matrix distance {0}: {1:f}'.format(i,
np.linalg.norm(clust_gammas[i] - gmm.covariances_[i])))
print('\n')
for i in range(clust_means.shape[0]):
print('Bayesian GMM covar matrix distance {0}: {1:f}'.format(i,
np.linalg.norm(clust_gammas[i] - bgmm.covariances_[i])))
print('\n')
for i in range(clust_means.shape[0]):
print('SPSA clustering covar matrix distance {0}: {1:f}'.format(i,
np.linalg.norm(clust_gammas[i] - clustering.Gammas[i])))
plt.style.use('grayscale')
utils.plot_centers(clust_means, clustering)
utils.plot_centers_converg(clust_means, clustering)
# utils.plot_clustering_cov(data_set, clustering.labels_, 'SPSA clustering partition', clustering.cluster_centers_,
# clustering.Gammas)
# utils.plot_clustering_cov(data_set, true_labels, 'True partition', clust_means, clust_gammas)
# utils.plot_clustering_cov(data_set, labels_pred_gmm, 'GMM partition', gmm.means_, gmm.covariances_)
# utils.plot_clustering_cov(data_set, labels_pred_bgmm, 'Bayesian GMM partition', bgmm.means_, bgmm.covariances_)
plt.show()
|
normal
|
{
"blob_id": "5807d1c2318ffa19d237d77fbe3f4c1d51da8601",
"index": 7634,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor _ in range(N):\n mix_ind = np.random.choice(len(mix_prob), p=mix_prob)\n data_point = np.random.multivariate_normal(clust_means[mix_ind],\n clust_gammas[mix_ind])\n data_set.append(data_point)\n true_labels.append(mix_ind)\n clustering.fit(data_point)\n<mask token>\nutils.order_clust_centers(clust_means, clustering)\nclustering.clusters_fill(data_set)\n<mask token>\ngmm.fit(data_set)\n<mask token>\nbgmm.fit(data_set)\n<mask token>\nprint('\\nARI GMM: {:f}'.format(ari_gmm))\n<mask token>\nprint('ARI Bayesian GMM: {:f}'.format(ari_bgmm))\n<mask token>\nprint('ARI SPSA clustering: {:f}'.format(ari_spsa))\nprint('\\n')\nfor i in range(clust_means.shape[0]):\n print('GMM covar matrix distance {0}: {1:f}'.format(i, np.linalg.norm(\n clust_gammas[i] - gmm.covariances_[i])))\nprint('\\n')\nfor i in range(clust_means.shape[0]):\n print('Bayesian GMM covar matrix distance {0}: {1:f}'.format(i, np.\n linalg.norm(clust_gammas[i] - bgmm.covariances_[i])))\nprint('\\n')\nfor i in range(clust_means.shape[0]):\n print('SPSA clustering covar matrix distance {0}: {1:f}'.format(i, np.\n linalg.norm(clust_gammas[i] - clustering.Gammas[i])))\nplt.style.use('grayscale')\nutils.plot_centers(clust_means, clustering)\nutils.plot_centers_converg(clust_means, clustering)\nplt.show()\n",
"step-3": "<mask token>\nN = 5000\nmix_prob = np.array([0.4, 0.4, 0.2])\nclust_means = np.array([[0, 0], [2, 2], [-3, 6]])\nclust_gammas = np.array([[[1, -0.7], [-0.7, 1]], np.eye(2), [[1, 0.8], [0.8,\n 1]]])\ndata_set = []\ntrue_labels = []\nspsa_gamma = 1.0 / 6\nspsa_alpha = lambda x: 0.25 / x ** spsa_gamma\nspsa_beta = lambda x: 15.0 / x ** (spsa_gamma / 4)\nclustering = spsa_clustering.ClusteringSPSA(n_clusters=clust_means.shape[0],\n data_shape=2, Gammas=None, alpha=spsa_alpha, beta=spsa_beta, norm_init=\n False, eta=1000)\nfor _ in range(N):\n mix_ind = np.random.choice(len(mix_prob), p=mix_prob)\n data_point = np.random.multivariate_normal(clust_means[mix_ind],\n clust_gammas[mix_ind])\n data_set.append(data_point)\n true_labels.append(mix_ind)\n clustering.fit(data_point)\ndata_set = np.array(data_set)\nutils.order_clust_centers(clust_means, clustering)\nclustering.clusters_fill(data_set)\ngmm = mixture.GaussianMixture(n_components=clust_means.shape[0],\n init_params='kmeans')\ngmm.fit(data_set)\nlabels_pred_gmm = gmm.predict(data_set)\nbgmm = mixture.BayesianGaussianMixture(n_components=clust_means.shape[0],\n init_params='random')\nbgmm.fit(data_set)\nlabels_pred_bgmm = bgmm.predict(data_set)\nari_gmm = metrics.adjusted_rand_score(true_labels, labels_pred_gmm)\nprint('\\nARI GMM: {:f}'.format(ari_gmm))\nari_bgmm = metrics.adjusted_rand_score(true_labels, labels_pred_bgmm)\nprint('ARI Bayesian GMM: {:f}'.format(ari_bgmm))\nari_spsa = metrics.adjusted_rand_score(true_labels, clustering.labels_)\nprint('ARI SPSA clustering: {:f}'.format(ari_spsa))\nprint('\\n')\nfor i in range(clust_means.shape[0]):\n print('GMM covar matrix distance {0}: {1:f}'.format(i, np.linalg.norm(\n clust_gammas[i] - gmm.covariances_[i])))\nprint('\\n')\nfor i in range(clust_means.shape[0]):\n print('Bayesian GMM covar matrix distance {0}: {1:f}'.format(i, np.\n linalg.norm(clust_gammas[i] - bgmm.covariances_[i])))\nprint('\\n')\nfor i in range(clust_means.shape[0]):\n print('SPSA clustering covar matrix distance {0}: {1:f}'.format(i, np.\n linalg.norm(clust_gammas[i] - clustering.Gammas[i])))\nplt.style.use('grayscale')\nutils.plot_centers(clust_means, clustering)\nutils.plot_centers_converg(clust_means, clustering)\nplt.show()\n",
"step-4": "import numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn import mixture, metrics\nimport utils\nimport spsa_clustering\nN = 5000\nmix_prob = np.array([0.4, 0.4, 0.2])\nclust_means = np.array([[0, 0], [2, 2], [-3, 6]])\nclust_gammas = np.array([[[1, -0.7], [-0.7, 1]], np.eye(2), [[1, 0.8], [0.8,\n 1]]])\ndata_set = []\ntrue_labels = []\nspsa_gamma = 1.0 / 6\nspsa_alpha = lambda x: 0.25 / x ** spsa_gamma\nspsa_beta = lambda x: 15.0 / x ** (spsa_gamma / 4)\nclustering = spsa_clustering.ClusteringSPSA(n_clusters=clust_means.shape[0],\n data_shape=2, Gammas=None, alpha=spsa_alpha, beta=spsa_beta, norm_init=\n False, eta=1000)\nfor _ in range(N):\n mix_ind = np.random.choice(len(mix_prob), p=mix_prob)\n data_point = np.random.multivariate_normal(clust_means[mix_ind],\n clust_gammas[mix_ind])\n data_set.append(data_point)\n true_labels.append(mix_ind)\n clustering.fit(data_point)\ndata_set = np.array(data_set)\nutils.order_clust_centers(clust_means, clustering)\nclustering.clusters_fill(data_set)\ngmm = mixture.GaussianMixture(n_components=clust_means.shape[0],\n init_params='kmeans')\ngmm.fit(data_set)\nlabels_pred_gmm = gmm.predict(data_set)\nbgmm = mixture.BayesianGaussianMixture(n_components=clust_means.shape[0],\n init_params='random')\nbgmm.fit(data_set)\nlabels_pred_bgmm = bgmm.predict(data_set)\nari_gmm = metrics.adjusted_rand_score(true_labels, labels_pred_gmm)\nprint('\\nARI GMM: {:f}'.format(ari_gmm))\nari_bgmm = metrics.adjusted_rand_score(true_labels, labels_pred_bgmm)\nprint('ARI Bayesian GMM: {:f}'.format(ari_bgmm))\nari_spsa = metrics.adjusted_rand_score(true_labels, clustering.labels_)\nprint('ARI SPSA clustering: {:f}'.format(ari_spsa))\nprint('\\n')\nfor i in range(clust_means.shape[0]):\n print('GMM covar matrix distance {0}: {1:f}'.format(i, np.linalg.norm(\n clust_gammas[i] - gmm.covariances_[i])))\nprint('\\n')\nfor i in range(clust_means.shape[0]):\n print('Bayesian GMM covar matrix distance {0}: {1:f}'.format(i, np.\n linalg.norm(clust_gammas[i] - bgmm.covariances_[i])))\nprint('\\n')\nfor i in range(clust_means.shape[0]):\n print('SPSA clustering covar matrix distance {0}: {1:f}'.format(i, np.\n linalg.norm(clust_gammas[i] - clustering.Gammas[i])))\nplt.style.use('grayscale')\nutils.plot_centers(clust_means, clustering)\nutils.plot_centers_converg(clust_means, clustering)\nplt.show()\n",
"step-5": "import numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn import mixture, metrics\nimport utils\nimport spsa_clustering\n\n\nN = 5000\nmix_prob = np.array([0.4, 0.4, 0.2])\nclust_means = np.array([[0, 0], [2, 2], [-3, 6]])\nclust_gammas = np.array([[[1, -0.7], [-0.7, 1]], np.eye(2), [[1, 0.8], [0.8, 1]]])\ndata_set = []\ntrue_labels = []\n\nspsa_gamma = 1. / 6\nspsa_alpha = lambda x: 0.25 / (x ** spsa_gamma)\nspsa_beta = lambda x: 15. / (x ** (spsa_gamma / 4))\n\n# spsa_alpha = lambda x: 0.001\n# spsa_beta = lambda x: 0.001\n\nclustering = spsa_clustering.ClusteringSPSA(n_clusters=clust_means.shape[0], data_shape=2, Gammas=None, alpha=spsa_alpha,\n beta=spsa_beta, norm_init=False, eta=1000)\n\nfor _ in range(N):\n mix_ind = np.random.choice(len(mix_prob), p=mix_prob)\n data_point = np.random.multivariate_normal(clust_means[mix_ind],\n clust_gammas[mix_ind])\n data_set.append(data_point)\n true_labels.append(mix_ind)\n clustering.fit(data_point)\ndata_set = np.array(data_set)\n\nutils.order_clust_centers(clust_means, clustering)\nclustering.clusters_fill(data_set)\n\ngmm = mixture.GaussianMixture(n_components=clust_means.shape[0], init_params='kmeans')\ngmm.fit(data_set)\nlabels_pred_gmm = gmm.predict(data_set)\n\nbgmm = mixture.BayesianGaussianMixture(n_components=clust_means.shape[0], init_params='random')\nbgmm.fit(data_set)\nlabels_pred_bgmm = bgmm.predict(data_set)\n\nari_gmm = metrics.adjusted_rand_score(true_labels, labels_pred_gmm)\nprint('\\nARI GMM: {:f}'.format(ari_gmm))\nari_bgmm = metrics.adjusted_rand_score(true_labels, labels_pred_bgmm)\nprint('ARI Bayesian GMM: {:f}'.format(ari_bgmm))\nari_spsa = metrics.adjusted_rand_score(true_labels, clustering.labels_)\nprint('ARI SPSA clustering: {:f}'.format(ari_spsa))\n\nprint('\\n')\nfor i in range(clust_means.shape[0]):\n print('GMM covar matrix distance {0}: {1:f}'.format(i,\n np.linalg.norm(clust_gammas[i] - gmm.covariances_[i])))\nprint('\\n')\nfor i in range(clust_means.shape[0]):\n print('Bayesian GMM covar matrix distance {0}: {1:f}'.format(i,\n np.linalg.norm(clust_gammas[i] - bgmm.covariances_[i])))\n\nprint('\\n')\nfor i in range(clust_means.shape[0]):\n print('SPSA clustering covar matrix distance {0}: {1:f}'.format(i,\n np.linalg.norm(clust_gammas[i] - clustering.Gammas[i])))\n\nplt.style.use('grayscale')\n\nutils.plot_centers(clust_means, clustering)\nutils.plot_centers_converg(clust_means, clustering)\n\n# utils.plot_clustering_cov(data_set, clustering.labels_, 'SPSA clustering partition', clustering.cluster_centers_,\n# clustering.Gammas)\n# utils.plot_clustering_cov(data_set, true_labels, 'True partition', clust_means, clust_gammas)\n# utils.plot_clustering_cov(data_set, labels_pred_gmm, 'GMM partition', gmm.means_, gmm.covariances_)\n# utils.plot_clustering_cov(data_set, labels_pred_bgmm, 'Bayesian GMM partition', bgmm.means_, bgmm.covariances_)\n\nplt.show()\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def get_accuracy(a, b, X_test, y_test):
size = len(y_test)
count = 0
for i in range(size):
x = X_test[i]
real = y_test[i]
x = np.array(x)
x = x.reshape(1, 6)
prediction = x.dot(a.T) + b
if prediction > 0 and real == 1:
count += 1
elif prediction < 0 and real == -1:
count += 1
return count / size
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_accuracy(a, b, X_test, y_test):
size = len(y_test)
count = 0
for i in range(size):
x = X_test[i]
real = y_test[i]
x = np.array(x)
x = x.reshape(1, 6)
prediction = x.dot(a.T) + b
if prediction > 0 and real == 1:
count += 1
elif prediction < 0 and real == -1:
count += 1
return count / size
<|reserved_special_token_0|>
with open('train.txt') as file:
data = [line.split() for line in file]
<|reserved_special_token_0|>
for line in data:
numerical = [int(line[0][:-1]), int(line[2][:-1]), int(line[4][:-1]),
int(line[10][:-1]), int(line[11][:-1]), int(line[12][:-1])]
X.append(numerical)
if line[14] == '<=50K':
y.append(-1)
else:
y.append(1)
<|reserved_special_token_0|>
for lamb in lambdas:
dict_accuracy[lamb] = []
<|reserved_special_token_0|>
for lamb in lambdas:
dict_a[lamb] = []
<|reserved_special_token_0|>
for lamb in lambdas:
dict_b[lamb] = []
<|reserved_special_token_0|>
for lamb in lambdas:
a = np.zeros(6)
b = 0
for epoch in range(50):
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1
)
if epoch == 49:
result = get_accuracy(a, b, X_test, y_test)
print(str(lamb) + ' : ' + str(result))
shuffle(X_train)
validation_train = X_train[0:50]
validation_test = y_train[0:50]
train_data = X_train[51:]
train_test = y_train[51:]
m = 1
n = 50
step_size = m / (0.01 * epoch + n)
for step in range(500):
if step % 30 == 0:
accuracy = get_accuracy(a, b, validation_train, validation_test
)
dict_accuracy[lamb].append(accuracy)
dict_a[lamb].append(a)
dict_b[lamb].append(b)
curr = random.randint(0, len(train_data))
curr_train = np.array(train_data[curr])
curr_train = curr_train.reshape(1, 6)
curr_val = (curr_train.dot(a.T) + b) * train_test[curr]
if curr_val >= 1:
a = a - np.dot(a, lamb) * step_size
else:
a = a - step_size * (np.dot(a, lamb) - np.dot(train_data[
curr], train_test[curr]))
b = b - step_size * -train_test[curr]
<|reserved_special_token_0|>
with open('train.txt') as file:
data = [line.split() for line in file]
<|reserved_special_token_0|>
for line in data:
numerical = [int(line[0][:-1]), int(line[2][:-1]), int(line[4][:-1]),
int(line[10][:-1]), int(line[11][:-1]), int(line[12][:-1])]
X.append(numerical)
if line[14] == '<=50K':
y.append(-1)
else:
y.append(1)
<|reserved_special_token_0|>
for epoch in range(30):
if epoch == 29:
result = get_accuracy(a, b, X_test, y_test)
print(str(lamb) + ' : ' + str(result))
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1)
shuffle(X_train)
validation_train = X_train[0:50]
validation_test = y_train[0:50]
train_data = X_train[51:]
train_test = y_train[51:]
m = 1
n = 50
step_size = m / (0.01 * epoch + n)
for step in range(300):
curr = random.randint(0, len(train_data))
curr_train = np.array(train_data[curr])
curr_train = curr_train.reshape(1, 6)
curr_val = (curr_train.dot(a.T) + b) * train_test[curr]
if curr_val >= 1:
a = a - np.dot(a, lamb) * step_size
else:
a = a - step_size * (np.dot(a, lamb) - np.dot(train_data[curr],
train_test[curr]))
b = b - step_size * -train_test[curr]
<|reserved_special_token_0|>
with open('test.txt') as file:
data = [line.split() for line in file]
<|reserved_special_token_0|>
for line in data:
numerical = [int(line[0][:-1]), int(line[2][:-1]), int(line[4][:-1]),
int(line[10][:-1]), int(line[11][:-1]), int(line[12][:-1])]
X.append(numerical)
<|reserved_special_token_0|>
for k in X:
numerical = np.array(k)
estimate = numerical.dot(a.T) + b
if estimate < 0:
prediction.append('<=50K')
else:
prediction.append('>50K')
<|reserved_special_token_0|>
for i in range(len(prediction)):
index_final.append(["'" + str(i) + "'", prediction[i]])
with open('output.csv', 'w') as csvfile:
writer = csv.writer(csvfile, quoting=csv.QUOTE_ALL)
writer.writerow(['Example', 'Label'])
writer.writerows(index_final)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_accuracy(a, b, X_test, y_test):
size = len(y_test)
count = 0
for i in range(size):
x = X_test[i]
real = y_test[i]
x = np.array(x)
x = x.reshape(1, 6)
prediction = x.dot(a.T) + b
if prediction > 0 and real == 1:
count += 1
elif prediction < 0 and real == -1:
count += 1
return count / size
data = []
with open('train.txt') as file:
data = [line.split() for line in file]
X = []
y = []
for line in data:
numerical = [int(line[0][:-1]), int(line[2][:-1]), int(line[4][:-1]),
int(line[10][:-1]), int(line[11][:-1]), int(line[12][:-1])]
X.append(numerical)
if line[14] == '<=50K':
y.append(-1)
else:
y.append(1)
a = random.dirichlet(np.ones(6) * 1000, size=1)
b = 0
scaler = StandardScaler()
X = scaler.fit_transform(X)
X = X - np.mean(X)
lambdas = [0.001, 0.01, 0.1, 1]
dict_accuracy = {}
for lamb in lambdas:
dict_accuracy[lamb] = []
dict_a = {}
for lamb in lambdas:
dict_a[lamb] = []
dict_b = {}
for lamb in lambdas:
dict_b[lamb] = []
a = 0
b = 0
for lamb in lambdas:
a = np.zeros(6)
b = 0
for epoch in range(50):
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1
)
if epoch == 49:
result = get_accuracy(a, b, X_test, y_test)
print(str(lamb) + ' : ' + str(result))
shuffle(X_train)
validation_train = X_train[0:50]
validation_test = y_train[0:50]
train_data = X_train[51:]
train_test = y_train[51:]
m = 1
n = 50
step_size = m / (0.01 * epoch + n)
for step in range(500):
if step % 30 == 0:
accuracy = get_accuracy(a, b, validation_train, validation_test
)
dict_accuracy[lamb].append(accuracy)
dict_a[lamb].append(a)
dict_b[lamb].append(b)
curr = random.randint(0, len(train_data))
curr_train = np.array(train_data[curr])
curr_train = curr_train.reshape(1, 6)
curr_val = (curr_train.dot(a.T) + b) * train_test[curr]
if curr_val >= 1:
a = a - np.dot(a, lamb) * step_size
else:
a = a - step_size * (np.dot(a, lamb) - np.dot(train_data[
curr], train_test[curr]))
b = b - step_size * -train_test[curr]
<|reserved_special_token_0|>
lamb = 0.001
a = random.dirichlet(np.ones(6) * 1000, size=1)
b = 0
data = []
with open('train.txt') as file:
data = [line.split() for line in file]
X = []
y = []
for line in data:
numerical = [int(line[0][:-1]), int(line[2][:-1]), int(line[4][:-1]),
int(line[10][:-1]), int(line[11][:-1]), int(line[12][:-1])]
X.append(numerical)
if line[14] == '<=50K':
y.append(-1)
else:
y.append(1)
scaler = StandardScaler()
X = scaler.fit_transform(X)
X = X - np.mean(X)
for epoch in range(30):
if epoch == 29:
result = get_accuracy(a, b, X_test, y_test)
print(str(lamb) + ' : ' + str(result))
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1)
shuffle(X_train)
validation_train = X_train[0:50]
validation_test = y_train[0:50]
train_data = X_train[51:]
train_test = y_train[51:]
m = 1
n = 50
step_size = m / (0.01 * epoch + n)
for step in range(300):
curr = random.randint(0, len(train_data))
curr_train = np.array(train_data[curr])
curr_train = curr_train.reshape(1, 6)
curr_val = (curr_train.dot(a.T) + b) * train_test[curr]
if curr_val >= 1:
a = a - np.dot(a, lamb) * step_size
else:
a = a - step_size * (np.dot(a, lamb) - np.dot(train_data[curr],
train_test[curr]))
b = b - step_size * -train_test[curr]
data = []
with open('test.txt') as file:
data = [line.split() for line in file]
X = []
y = []
for line in data:
numerical = [int(line[0][:-1]), int(line[2][:-1]), int(line[4][:-1]),
int(line[10][:-1]), int(line[11][:-1]), int(line[12][:-1])]
X.append(numerical)
prediction = []
for k in X:
numerical = np.array(k)
estimate = numerical.dot(a.T) + b
if estimate < 0:
prediction.append('<=50K')
else:
prediction.append('>50K')
index_final = []
for i in range(len(prediction)):
index_final.append(["'" + str(i) + "'", prediction[i]])
with open('output.csv', 'w') as csvfile:
writer = csv.writer(csvfile, quoting=csv.QUOTE_ALL)
writer.writerow(['Example', 'Label'])
writer.writerows(index_final)
<|reserved_special_token_1|>
import numpy as np
from numpy import random
from sklearn.preprocessing import StandardScaler
from sklearn.cross_validation import train_test_split
from numpy.random import shuffle
import matplotlib.pyplot as plt
import numpy.linalg as la
import sklearn.preprocessing as proc
import csv
def get_accuracy(a, b, X_test, y_test):
size = len(y_test)
count = 0
for i in range(size):
x = X_test[i]
real = y_test[i]
x = np.array(x)
x = x.reshape(1, 6)
prediction = x.dot(a.T) + b
if prediction > 0 and real == 1:
count += 1
elif prediction < 0 and real == -1:
count += 1
return count / size
data = []
with open('train.txt') as file:
data = [line.split() for line in file]
X = []
y = []
for line in data:
numerical = [int(line[0][:-1]), int(line[2][:-1]), int(line[4][:-1]),
int(line[10][:-1]), int(line[11][:-1]), int(line[12][:-1])]
X.append(numerical)
if line[14] == '<=50K':
y.append(-1)
else:
y.append(1)
a = random.dirichlet(np.ones(6) * 1000, size=1)
b = 0
scaler = StandardScaler()
X = scaler.fit_transform(X)
X = X - np.mean(X)
lambdas = [0.001, 0.01, 0.1, 1]
dict_accuracy = {}
for lamb in lambdas:
dict_accuracy[lamb] = []
dict_a = {}
for lamb in lambdas:
dict_a[lamb] = []
dict_b = {}
for lamb in lambdas:
dict_b[lamb] = []
a = 0
b = 0
for lamb in lambdas:
a = np.zeros(6)
b = 0
for epoch in range(50):
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1
)
if epoch == 49:
result = get_accuracy(a, b, X_test, y_test)
print(str(lamb) + ' : ' + str(result))
shuffle(X_train)
validation_train = X_train[0:50]
validation_test = y_train[0:50]
train_data = X_train[51:]
train_test = y_train[51:]
m = 1
n = 50
step_size = m / (0.01 * epoch + n)
for step in range(500):
if step % 30 == 0:
accuracy = get_accuracy(a, b, validation_train, validation_test
)
dict_accuracy[lamb].append(accuracy)
dict_a[lamb].append(a)
dict_b[lamb].append(b)
curr = random.randint(0, len(train_data))
curr_train = np.array(train_data[curr])
curr_train = curr_train.reshape(1, 6)
curr_val = (curr_train.dot(a.T) + b) * train_test[curr]
if curr_val >= 1:
a = a - np.dot(a, lamb) * step_size
else:
a = a - step_size * (np.dot(a, lamb) - np.dot(train_data[
curr], train_test[curr]))
b = b - step_size * -train_test[curr]
<|reserved_special_token_0|>
lamb = 0.001
a = random.dirichlet(np.ones(6) * 1000, size=1)
b = 0
data = []
with open('train.txt') as file:
data = [line.split() for line in file]
X = []
y = []
for line in data:
numerical = [int(line[0][:-1]), int(line[2][:-1]), int(line[4][:-1]),
int(line[10][:-1]), int(line[11][:-1]), int(line[12][:-1])]
X.append(numerical)
if line[14] == '<=50K':
y.append(-1)
else:
y.append(1)
scaler = StandardScaler()
X = scaler.fit_transform(X)
X = X - np.mean(X)
for epoch in range(30):
if epoch == 29:
result = get_accuracy(a, b, X_test, y_test)
print(str(lamb) + ' : ' + str(result))
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1)
shuffle(X_train)
validation_train = X_train[0:50]
validation_test = y_train[0:50]
train_data = X_train[51:]
train_test = y_train[51:]
m = 1
n = 50
step_size = m / (0.01 * epoch + n)
for step in range(300):
curr = random.randint(0, len(train_data))
curr_train = np.array(train_data[curr])
curr_train = curr_train.reshape(1, 6)
curr_val = (curr_train.dot(a.T) + b) * train_test[curr]
if curr_val >= 1:
a = a - np.dot(a, lamb) * step_size
else:
a = a - step_size * (np.dot(a, lamb) - np.dot(train_data[curr],
train_test[curr]))
b = b - step_size * -train_test[curr]
data = []
with open('test.txt') as file:
data = [line.split() for line in file]
X = []
y = []
for line in data:
numerical = [int(line[0][:-1]), int(line[2][:-1]), int(line[4][:-1]),
int(line[10][:-1]), int(line[11][:-1]), int(line[12][:-1])]
X.append(numerical)
prediction = []
for k in X:
numerical = np.array(k)
estimate = numerical.dot(a.T) + b
if estimate < 0:
prediction.append('<=50K')
else:
prediction.append('>50K')
index_final = []
for i in range(len(prediction)):
index_final.append(["'" + str(i) + "'", prediction[i]])
with open('output.csv', 'w') as csvfile:
writer = csv.writer(csvfile, quoting=csv.QUOTE_ALL)
writer.writerow(['Example', 'Label'])
writer.writerows(index_final)
<|reserved_special_token_1|>
import numpy as np
from numpy import random
from sklearn.preprocessing import StandardScaler
from sklearn.cross_validation import train_test_split
from numpy.random import shuffle
import matplotlib.pyplot as plt
import numpy.linalg as la
import sklearn.preprocessing as proc
import csv
def get_accuracy(a, b, X_test, y_test):
size = len(y_test)
count = 0
for i in range(size):
x = X_test[i]
real = y_test[i]
x = np.array(x)
x = x.reshape(1, 6)
prediction = x.dot(a.T) + b
if prediction > 0 and real == 1:
count += 1
elif prediction < 0 and real == -1:
count += 1
return count / size
data = []
with open("train.txt") as file:
data = [line.split() for line in file]
X = []
y = []
for line in data:
numerical = [int(line[0][:-1]), int(line[2][:-1]), int(line[4][:-1]), \
int(line[10][:-1]), int(line[11][:-1]), int(line[12][:-1])]
X.append(numerical)
if line[14] == '<=50K':
y.append(-1)
else:
y.append(1)
a = random.dirichlet(np.ones(6)*1000, size = 1)
b = 0
#scale X
scaler = StandardScaler()
X = scaler.fit_transform(X)
X = X - np.mean(X)
#10% test data and 90% train data
#X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.1)
lambdas = [0.001, 0.01, 0.1, 1]
dict_accuracy = {}
for lamb in lambdas:
dict_accuracy[lamb] = []
dict_a = {}
for lamb in lambdas:
dict_a[lamb] = []
dict_b = {}
for lamb in lambdas:
dict_b[lamb] = []
a = 0
b = 0
for lamb in lambdas:
#a = random.dirichlet(np.ones(6)*1000, size = 1)
a = np.zeros(6)
b = 0
for epoch in range(50):
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.1)
if epoch == 49:
result = get_accuracy(a, b, X_test, y_test)
print(str(lamb) + ' : ' + str(result))
shuffle(X_train)
validation_train = X_train[0:50]
validation_test = y_train[0:50]
train_data = X_train[51:]
train_test = y_train[51:]
m = 1
n = 50
step_size = m / (0.01 * epoch + n)
for step in range(500):
if step % 30 == 0:
accuracy = get_accuracy(a, b, validation_train, validation_test)
dict_accuracy[lamb].append(accuracy)
dict_a[lamb].append(a)
dict_b[lamb].append(b)
# current index randomly chosen
curr = random.randint(0, len(train_data))
curr_train = np.array(train_data[curr])
curr_train = curr_train.reshape(1, 6)
curr_val = (curr_train.dot(a.T) + b) * train_test[curr]
if curr_val >= 1:
a = a - np.dot(a, lamb) * step_size
else:
a = a - step_size * (np.dot(a, lamb) - np.dot(train_data[curr], train_test[curr]))
b = b - (step_size * (-train_test[curr]))
'''
x_val = [i for i in range(1, 851)]
# dict_accuracy
fig = plt.figure()
ax1 = fig.add_subplot(411)
ax2 = fig.add_subplot(412)
ax3 = fig.add_subplot(413)
ax4 = fig.add_subplot(414)
fig.tight_layout()
y1 = dict_accuracy[0.001]
y2 = dict_accuracy[0.01]
y3 = dict_accuracy[0.1]
y4 = dict_accuracy[1]
ax1.plot(x_val, y1, color='m')
ax2.plot(x_val, y2, color='g')
ax3.plot(x_val, y3, color='r')
ax4.plot(x_val, y4, color='b')
ax1.set_xlabel('lambda = 0.001')
ax2.set_xlabel('lambda = 0.01')
ax3.set_xlabel('lambda = 0.1')
ax4.set_xlabel('lambda = 1')
plt.show()
#########################################
a_norm = {}
for lamb in lambdas:
a_norm[lamb] = []
for lamb in dict_a:
curr_list = dict_a[lamb]
for curr in curr_list:
norm = la.norm(curr, 2)
a_norm[lamb].append(norm)
plt.plot(x_val, a_norm[0.001], label = 'lambda is 0.001', color = 'b')
plt.plot(x_val, a_norm[0.01], label = 'lambda is 0.01', color = 'r')
plt.plot(x_val, a_norm[0.1], label = 'lambda is 0.01', color = 'g')
plt.plot(x_val, a_norm[1], label = 'lambda is 1', color = 'm')
plt.legend()
plt.show()
'''
lamb = 0.001
a = random.dirichlet(np.ones(6)*1000, size = 1)
b = 0
data = []
with open("train.txt") as file:
data = [line.split() for line in file]
X = []
y = []
for line in data:
numerical = [int(line[0][:-1]), int(line[2][:-1]), int(line[4][:-1]), \
int(line[10][:-1]), int(line[11][:-1]), int(line[12][:-1])]
X.append(numerical)
if line[14] == '<=50K':
y.append(-1)
else:
y.append(1)
#scale X
scaler = StandardScaler()
X = scaler.fit_transform(X)
X = X - np.mean(X)
for epoch in range(30):
if epoch == 29:
result = get_accuracy(a, b, X_test, y_test)
print(str(lamb) + ' : ' + str(result))
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.1)
shuffle(X_train)
validation_train = X_train[0:50]
validation_test = y_train[0:50]
train_data = X_train[51:]
train_test = y_train[51:]
m = 1
n = 50
step_size = m / (0.01 * epoch + n)
for step in range(300):
# current index randomly chosen
curr = random.randint(0, len(train_data))
curr_train = np.array(train_data[curr])
curr_train = curr_train.reshape(1, 6)
curr_val = (curr_train.dot(a.T) + b) * train_test[curr]
if curr_val >= 1:
a = a - np.dot(a, lamb) * step_size
else:
a = a - step_size * (np.dot(a, lamb) - np.dot(train_data[curr], train_test[curr]))
b = b - (step_size * (-train_test[curr]))
data = []
with open("test.txt") as file:
data = [line.split() for line in file]
X = []
y = []
for line in data:
numerical = [int(line[0][:-1]), int(line[2][:-1]), int(line[4][:-1]), \
int(line[10][:-1]), int(line[11][:-1]), int(line[12][:-1])]
X.append(numerical)
prediction = []
for k in X:
numerical = np.array(k)
estimate = numerical.dot(a.T) + b
#print(estimate)
if estimate < 0:
prediction.append('<=50K')
else:
prediction.append('>50K')
index_final = []
for i in range(len(prediction)):
index_final.append(["'" + str(i) + "'", prediction[i]])
with open('output.csv', 'w') as csvfile:
writer = csv.writer(csvfile, quoting=csv.QUOTE_ALL)
writer.writerow(['Example', 'Label'])
writer.writerows(index_final)
|
flexible
|
{
"blob_id": "f5c4057babc873099ae2a4d8c1aca960ab9fa30a",
"index": 9692,
"step-1": "<mask token>\n\n\ndef get_accuracy(a, b, X_test, y_test):\n size = len(y_test)\n count = 0\n for i in range(size):\n x = X_test[i]\n real = y_test[i]\n x = np.array(x)\n x = x.reshape(1, 6)\n prediction = x.dot(a.T) + b\n if prediction > 0 and real == 1:\n count += 1\n elif prediction < 0 and real == -1:\n count += 1\n return count / size\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_accuracy(a, b, X_test, y_test):\n size = len(y_test)\n count = 0\n for i in range(size):\n x = X_test[i]\n real = y_test[i]\n x = np.array(x)\n x = x.reshape(1, 6)\n prediction = x.dot(a.T) + b\n if prediction > 0 and real == 1:\n count += 1\n elif prediction < 0 and real == -1:\n count += 1\n return count / size\n\n\n<mask token>\nwith open('train.txt') as file:\n data = [line.split() for line in file]\n<mask token>\nfor line in data:\n numerical = [int(line[0][:-1]), int(line[2][:-1]), int(line[4][:-1]),\n int(line[10][:-1]), int(line[11][:-1]), int(line[12][:-1])]\n X.append(numerical)\n if line[14] == '<=50K':\n y.append(-1)\n else:\n y.append(1)\n<mask token>\nfor lamb in lambdas:\n dict_accuracy[lamb] = []\n<mask token>\nfor lamb in lambdas:\n dict_a[lamb] = []\n<mask token>\nfor lamb in lambdas:\n dict_b[lamb] = []\n<mask token>\nfor lamb in lambdas:\n a = np.zeros(6)\n b = 0\n for epoch in range(50):\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1\n )\n if epoch == 49:\n result = get_accuracy(a, b, X_test, y_test)\n print(str(lamb) + ' : ' + str(result))\n shuffle(X_train)\n validation_train = X_train[0:50]\n validation_test = y_train[0:50]\n train_data = X_train[51:]\n train_test = y_train[51:]\n m = 1\n n = 50\n step_size = m / (0.01 * epoch + n)\n for step in range(500):\n if step % 30 == 0:\n accuracy = get_accuracy(a, b, validation_train, validation_test\n )\n dict_accuracy[lamb].append(accuracy)\n dict_a[lamb].append(a)\n dict_b[lamb].append(b)\n curr = random.randint(0, len(train_data))\n curr_train = np.array(train_data[curr])\n curr_train = curr_train.reshape(1, 6)\n curr_val = (curr_train.dot(a.T) + b) * train_test[curr]\n if curr_val >= 1:\n a = a - np.dot(a, lamb) * step_size\n else:\n a = a - step_size * (np.dot(a, lamb) - np.dot(train_data[\n curr], train_test[curr]))\n b = b - step_size * -train_test[curr]\n<mask token>\nwith open('train.txt') as file:\n data = [line.split() for line in file]\n<mask token>\nfor line in data:\n numerical = [int(line[0][:-1]), int(line[2][:-1]), int(line[4][:-1]),\n int(line[10][:-1]), int(line[11][:-1]), int(line[12][:-1])]\n X.append(numerical)\n if line[14] == '<=50K':\n y.append(-1)\n else:\n y.append(1)\n<mask token>\nfor epoch in range(30):\n if epoch == 29:\n result = get_accuracy(a, b, X_test, y_test)\n print(str(lamb) + ' : ' + str(result))\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1)\n shuffle(X_train)\n validation_train = X_train[0:50]\n validation_test = y_train[0:50]\n train_data = X_train[51:]\n train_test = y_train[51:]\n m = 1\n n = 50\n step_size = m / (0.01 * epoch + n)\n for step in range(300):\n curr = random.randint(0, len(train_data))\n curr_train = np.array(train_data[curr])\n curr_train = curr_train.reshape(1, 6)\n curr_val = (curr_train.dot(a.T) + b) * train_test[curr]\n if curr_val >= 1:\n a = a - np.dot(a, lamb) * step_size\n else:\n a = a - step_size * (np.dot(a, lamb) - np.dot(train_data[curr],\n train_test[curr]))\n b = b - step_size * -train_test[curr]\n<mask token>\nwith open('test.txt') as file:\n data = [line.split() for line in file]\n<mask token>\nfor line in data:\n numerical = [int(line[0][:-1]), int(line[2][:-1]), int(line[4][:-1]),\n int(line[10][:-1]), int(line[11][:-1]), int(line[12][:-1])]\n X.append(numerical)\n<mask token>\nfor k in X:\n numerical = np.array(k)\n estimate = numerical.dot(a.T) + b\n if estimate < 0:\n prediction.append('<=50K')\n else:\n prediction.append('>50K')\n<mask token>\nfor i in range(len(prediction)):\n index_final.append([\"'\" + str(i) + \"'\", prediction[i]])\nwith open('output.csv', 'w') as csvfile:\n writer = csv.writer(csvfile, quoting=csv.QUOTE_ALL)\n writer.writerow(['Example', 'Label'])\n writer.writerows(index_final)\n",
"step-3": "<mask token>\n\n\ndef get_accuracy(a, b, X_test, y_test):\n size = len(y_test)\n count = 0\n for i in range(size):\n x = X_test[i]\n real = y_test[i]\n x = np.array(x)\n x = x.reshape(1, 6)\n prediction = x.dot(a.T) + b\n if prediction > 0 and real == 1:\n count += 1\n elif prediction < 0 and real == -1:\n count += 1\n return count / size\n\n\ndata = []\nwith open('train.txt') as file:\n data = [line.split() for line in file]\nX = []\ny = []\nfor line in data:\n numerical = [int(line[0][:-1]), int(line[2][:-1]), int(line[4][:-1]),\n int(line[10][:-1]), int(line[11][:-1]), int(line[12][:-1])]\n X.append(numerical)\n if line[14] == '<=50K':\n y.append(-1)\n else:\n y.append(1)\na = random.dirichlet(np.ones(6) * 1000, size=1)\nb = 0\nscaler = StandardScaler()\nX = scaler.fit_transform(X)\nX = X - np.mean(X)\nlambdas = [0.001, 0.01, 0.1, 1]\ndict_accuracy = {}\nfor lamb in lambdas:\n dict_accuracy[lamb] = []\ndict_a = {}\nfor lamb in lambdas:\n dict_a[lamb] = []\ndict_b = {}\nfor lamb in lambdas:\n dict_b[lamb] = []\na = 0\nb = 0\nfor lamb in lambdas:\n a = np.zeros(6)\n b = 0\n for epoch in range(50):\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1\n )\n if epoch == 49:\n result = get_accuracy(a, b, X_test, y_test)\n print(str(lamb) + ' : ' + str(result))\n shuffle(X_train)\n validation_train = X_train[0:50]\n validation_test = y_train[0:50]\n train_data = X_train[51:]\n train_test = y_train[51:]\n m = 1\n n = 50\n step_size = m / (0.01 * epoch + n)\n for step in range(500):\n if step % 30 == 0:\n accuracy = get_accuracy(a, b, validation_train, validation_test\n )\n dict_accuracy[lamb].append(accuracy)\n dict_a[lamb].append(a)\n dict_b[lamb].append(b)\n curr = random.randint(0, len(train_data))\n curr_train = np.array(train_data[curr])\n curr_train = curr_train.reshape(1, 6)\n curr_val = (curr_train.dot(a.T) + b) * train_test[curr]\n if curr_val >= 1:\n a = a - np.dot(a, lamb) * step_size\n else:\n a = a - step_size * (np.dot(a, lamb) - np.dot(train_data[\n curr], train_test[curr]))\n b = b - step_size * -train_test[curr]\n<mask token>\nlamb = 0.001\na = random.dirichlet(np.ones(6) * 1000, size=1)\nb = 0\ndata = []\nwith open('train.txt') as file:\n data = [line.split() for line in file]\nX = []\ny = []\nfor line in data:\n numerical = [int(line[0][:-1]), int(line[2][:-1]), int(line[4][:-1]),\n int(line[10][:-1]), int(line[11][:-1]), int(line[12][:-1])]\n X.append(numerical)\n if line[14] == '<=50K':\n y.append(-1)\n else:\n y.append(1)\nscaler = StandardScaler()\nX = scaler.fit_transform(X)\nX = X - np.mean(X)\nfor epoch in range(30):\n if epoch == 29:\n result = get_accuracy(a, b, X_test, y_test)\n print(str(lamb) + ' : ' + str(result))\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1)\n shuffle(X_train)\n validation_train = X_train[0:50]\n validation_test = y_train[0:50]\n train_data = X_train[51:]\n train_test = y_train[51:]\n m = 1\n n = 50\n step_size = m / (0.01 * epoch + n)\n for step in range(300):\n curr = random.randint(0, len(train_data))\n curr_train = np.array(train_data[curr])\n curr_train = curr_train.reshape(1, 6)\n curr_val = (curr_train.dot(a.T) + b) * train_test[curr]\n if curr_val >= 1:\n a = a - np.dot(a, lamb) * step_size\n else:\n a = a - step_size * (np.dot(a, lamb) - np.dot(train_data[curr],\n train_test[curr]))\n b = b - step_size * -train_test[curr]\ndata = []\nwith open('test.txt') as file:\n data = [line.split() for line in file]\nX = []\ny = []\nfor line in data:\n numerical = [int(line[0][:-1]), int(line[2][:-1]), int(line[4][:-1]),\n int(line[10][:-1]), int(line[11][:-1]), int(line[12][:-1])]\n X.append(numerical)\nprediction = []\nfor k in X:\n numerical = np.array(k)\n estimate = numerical.dot(a.T) + b\n if estimate < 0:\n prediction.append('<=50K')\n else:\n prediction.append('>50K')\nindex_final = []\nfor i in range(len(prediction)):\n index_final.append([\"'\" + str(i) + \"'\", prediction[i]])\nwith open('output.csv', 'w') as csvfile:\n writer = csv.writer(csvfile, quoting=csv.QUOTE_ALL)\n writer.writerow(['Example', 'Label'])\n writer.writerows(index_final)\n",
"step-4": "import numpy as np\nfrom numpy import random\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.cross_validation import train_test_split\nfrom numpy.random import shuffle\nimport matplotlib.pyplot as plt\nimport numpy.linalg as la\nimport sklearn.preprocessing as proc\nimport csv\n\n\ndef get_accuracy(a, b, X_test, y_test):\n size = len(y_test)\n count = 0\n for i in range(size):\n x = X_test[i]\n real = y_test[i]\n x = np.array(x)\n x = x.reshape(1, 6)\n prediction = x.dot(a.T) + b\n if prediction > 0 and real == 1:\n count += 1\n elif prediction < 0 and real == -1:\n count += 1\n return count / size\n\n\ndata = []\nwith open('train.txt') as file:\n data = [line.split() for line in file]\nX = []\ny = []\nfor line in data:\n numerical = [int(line[0][:-1]), int(line[2][:-1]), int(line[4][:-1]),\n int(line[10][:-1]), int(line[11][:-1]), int(line[12][:-1])]\n X.append(numerical)\n if line[14] == '<=50K':\n y.append(-1)\n else:\n y.append(1)\na = random.dirichlet(np.ones(6) * 1000, size=1)\nb = 0\nscaler = StandardScaler()\nX = scaler.fit_transform(X)\nX = X - np.mean(X)\nlambdas = [0.001, 0.01, 0.1, 1]\ndict_accuracy = {}\nfor lamb in lambdas:\n dict_accuracy[lamb] = []\ndict_a = {}\nfor lamb in lambdas:\n dict_a[lamb] = []\ndict_b = {}\nfor lamb in lambdas:\n dict_b[lamb] = []\na = 0\nb = 0\nfor lamb in lambdas:\n a = np.zeros(6)\n b = 0\n for epoch in range(50):\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1\n )\n if epoch == 49:\n result = get_accuracy(a, b, X_test, y_test)\n print(str(lamb) + ' : ' + str(result))\n shuffle(X_train)\n validation_train = X_train[0:50]\n validation_test = y_train[0:50]\n train_data = X_train[51:]\n train_test = y_train[51:]\n m = 1\n n = 50\n step_size = m / (0.01 * epoch + n)\n for step in range(500):\n if step % 30 == 0:\n accuracy = get_accuracy(a, b, validation_train, validation_test\n )\n dict_accuracy[lamb].append(accuracy)\n dict_a[lamb].append(a)\n dict_b[lamb].append(b)\n curr = random.randint(0, len(train_data))\n curr_train = np.array(train_data[curr])\n curr_train = curr_train.reshape(1, 6)\n curr_val = (curr_train.dot(a.T) + b) * train_test[curr]\n if curr_val >= 1:\n a = a - np.dot(a, lamb) * step_size\n else:\n a = a - step_size * (np.dot(a, lamb) - np.dot(train_data[\n curr], train_test[curr]))\n b = b - step_size * -train_test[curr]\n<mask token>\nlamb = 0.001\na = random.dirichlet(np.ones(6) * 1000, size=1)\nb = 0\ndata = []\nwith open('train.txt') as file:\n data = [line.split() for line in file]\nX = []\ny = []\nfor line in data:\n numerical = [int(line[0][:-1]), int(line[2][:-1]), int(line[4][:-1]),\n int(line[10][:-1]), int(line[11][:-1]), int(line[12][:-1])]\n X.append(numerical)\n if line[14] == '<=50K':\n y.append(-1)\n else:\n y.append(1)\nscaler = StandardScaler()\nX = scaler.fit_transform(X)\nX = X - np.mean(X)\nfor epoch in range(30):\n if epoch == 29:\n result = get_accuracy(a, b, X_test, y_test)\n print(str(lamb) + ' : ' + str(result))\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1)\n shuffle(X_train)\n validation_train = X_train[0:50]\n validation_test = y_train[0:50]\n train_data = X_train[51:]\n train_test = y_train[51:]\n m = 1\n n = 50\n step_size = m / (0.01 * epoch + n)\n for step in range(300):\n curr = random.randint(0, len(train_data))\n curr_train = np.array(train_data[curr])\n curr_train = curr_train.reshape(1, 6)\n curr_val = (curr_train.dot(a.T) + b) * train_test[curr]\n if curr_val >= 1:\n a = a - np.dot(a, lamb) * step_size\n else:\n a = a - step_size * (np.dot(a, lamb) - np.dot(train_data[curr],\n train_test[curr]))\n b = b - step_size * -train_test[curr]\ndata = []\nwith open('test.txt') as file:\n data = [line.split() for line in file]\nX = []\ny = []\nfor line in data:\n numerical = [int(line[0][:-1]), int(line[2][:-1]), int(line[4][:-1]),\n int(line[10][:-1]), int(line[11][:-1]), int(line[12][:-1])]\n X.append(numerical)\nprediction = []\nfor k in X:\n numerical = np.array(k)\n estimate = numerical.dot(a.T) + b\n if estimate < 0:\n prediction.append('<=50K')\n else:\n prediction.append('>50K')\nindex_final = []\nfor i in range(len(prediction)):\n index_final.append([\"'\" + str(i) + \"'\", prediction[i]])\nwith open('output.csv', 'w') as csvfile:\n writer = csv.writer(csvfile, quoting=csv.QUOTE_ALL)\n writer.writerow(['Example', 'Label'])\n writer.writerows(index_final)\n",
"step-5": "import numpy as np\nfrom numpy import random\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.cross_validation import train_test_split\nfrom numpy.random import shuffle\nimport matplotlib.pyplot as plt\nimport numpy.linalg as la\nimport sklearn.preprocessing as proc\nimport csv\n\ndef get_accuracy(a, b, X_test, y_test):\n size = len(y_test)\n count = 0\n for i in range(size):\n x = X_test[i]\n real = y_test[i]\n\n x = np.array(x)\n x = x.reshape(1, 6)\n\n prediction = x.dot(a.T) + b\n\n if prediction > 0 and real == 1:\n count += 1\n elif prediction < 0 and real == -1:\n count += 1\n return count / size\n\ndata = []\nwith open(\"train.txt\") as file:\n data = [line.split() for line in file]\n\n\nX = []\ny = []\nfor line in data:\n numerical = [int(line[0][:-1]), int(line[2][:-1]), int(line[4][:-1]), \\\n int(line[10][:-1]), int(line[11][:-1]), int(line[12][:-1])]\n\n X.append(numerical)\n if line[14] == '<=50K':\n y.append(-1)\n else:\n y.append(1)\n\n\na = random.dirichlet(np.ones(6)*1000, size = 1)\nb = 0\n\n\n#scale X\nscaler = StandardScaler()\nX = scaler.fit_transform(X)\n\nX = X - np.mean(X)\n\n#10% test data and 90% train data\n#X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.1)\n\n\nlambdas = [0.001, 0.01, 0.1, 1]\n\ndict_accuracy = {}\nfor lamb in lambdas:\n dict_accuracy[lamb] = []\n\n\ndict_a = {}\nfor lamb in lambdas:\n dict_a[lamb] = []\n\ndict_b = {}\nfor lamb in lambdas:\n dict_b[lamb] = []\n\na = 0\nb = 0\n\nfor lamb in lambdas:\n\n #a = random.dirichlet(np.ones(6)*1000, size = 1)\n a = np.zeros(6)\n b = 0\n\n for epoch in range(50):\n\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.1)\n\n if epoch == 49:\n result = get_accuracy(a, b, X_test, y_test)\n print(str(lamb) + ' : ' + str(result))\n\n shuffle(X_train)\n\n validation_train = X_train[0:50]\n validation_test = y_train[0:50]\n\n train_data = X_train[51:]\n train_test = y_train[51:]\n\n m = 1\n n = 50\n step_size = m / (0.01 * epoch + n)\n\n for step in range(500):\n\n if step % 30 == 0:\n accuracy = get_accuracy(a, b, validation_train, validation_test)\n\n dict_accuracy[lamb].append(accuracy)\n dict_a[lamb].append(a)\n dict_b[lamb].append(b)\n\n # current index randomly chosen\n curr = random.randint(0, len(train_data))\n\n curr_train = np.array(train_data[curr])\n\n curr_train = curr_train.reshape(1, 6)\n\n curr_val = (curr_train.dot(a.T) + b) * train_test[curr]\n\n if curr_val >= 1:\n a = a - np.dot(a, lamb) * step_size\n else:\n a = a - step_size * (np.dot(a, lamb) - np.dot(train_data[curr], train_test[curr]))\n b = b - (step_size * (-train_test[curr]))\n\n\n'''\n\nx_val = [i for i in range(1, 851)]\n\n# dict_accuracy\nfig = plt.figure()\nax1 = fig.add_subplot(411)\nax2 = fig.add_subplot(412)\nax3 = fig.add_subplot(413)\nax4 = fig.add_subplot(414)\n\nfig.tight_layout()\n\ny1 = dict_accuracy[0.001]\ny2 = dict_accuracy[0.01]\ny3 = dict_accuracy[0.1]\ny4 = dict_accuracy[1]\n\nax1.plot(x_val, y1, color='m')\nax2.plot(x_val, y2, color='g')\nax3.plot(x_val, y3, color='r')\nax4.plot(x_val, y4, color='b')\n\nax1.set_xlabel('lambda = 0.001')\nax2.set_xlabel('lambda = 0.01')\nax3.set_xlabel('lambda = 0.1')\nax4.set_xlabel('lambda = 1')\n\nplt.show()\n\n#########################################\n\na_norm = {}\nfor lamb in lambdas:\n a_norm[lamb] = []\n\nfor lamb in dict_a:\n curr_list = dict_a[lamb]\n for curr in curr_list:\n norm = la.norm(curr, 2)\n a_norm[lamb].append(norm)\n\nplt.plot(x_val, a_norm[0.001], label = 'lambda is 0.001', color = 'b')\nplt.plot(x_val, a_norm[0.01], label = 'lambda is 0.01', color = 'r')\nplt.plot(x_val, a_norm[0.1], label = 'lambda is 0.01', color = 'g')\nplt.plot(x_val, a_norm[1], label = 'lambda is 1', color = 'm')\nplt.legend()\nplt.show()\n\n\n'''\n\n\nlamb = 0.001\n\na = random.dirichlet(np.ones(6)*1000, size = 1)\n\n\nb = 0\n\ndata = []\nwith open(\"train.txt\") as file:\n data = [line.split() for line in file]\n\nX = []\ny = []\nfor line in data:\n numerical = [int(line[0][:-1]), int(line[2][:-1]), int(line[4][:-1]), \\\n int(line[10][:-1]), int(line[11][:-1]), int(line[12][:-1])]\n\n X.append(numerical)\n if line[14] == '<=50K':\n y.append(-1)\n else:\n y.append(1)\n\n\n#scale X\nscaler = StandardScaler()\nX = scaler.fit_transform(X)\n\nX = X - np.mean(X)\n\nfor epoch in range(30):\n\n if epoch == 29:\n result = get_accuracy(a, b, X_test, y_test)\n print(str(lamb) + ' : ' + str(result))\n\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.1)\n\n shuffle(X_train)\n\n validation_train = X_train[0:50]\n validation_test = y_train[0:50]\n\n train_data = X_train[51:]\n train_test = y_train[51:]\n\n m = 1\n n = 50\n step_size = m / (0.01 * epoch + n)\n\n for step in range(300):\n # current index randomly chosen\n curr = random.randint(0, len(train_data))\n curr_train = np.array(train_data[curr])\n curr_train = curr_train.reshape(1, 6)\n curr_val = (curr_train.dot(a.T) + b) * train_test[curr]\n\n if curr_val >= 1:\n a = a - np.dot(a, lamb) * step_size\n else:\n a = a - step_size * (np.dot(a, lamb) - np.dot(train_data[curr], train_test[curr]))\n b = b - (step_size * (-train_test[curr]))\n\n\ndata = []\nwith open(\"test.txt\") as file:\n data = [line.split() for line in file]\n\nX = []\ny = []\nfor line in data:\n numerical = [int(line[0][:-1]), int(line[2][:-1]), int(line[4][:-1]), \\\n int(line[10][:-1]), int(line[11][:-1]), int(line[12][:-1])]\n X.append(numerical)\n\n\n\n\nprediction = []\nfor k in X:\n numerical = np.array(k)\n estimate = numerical.dot(a.T) + b\n #print(estimate)\n if estimate < 0:\n prediction.append('<=50K')\n else:\n prediction.append('>50K')\n\n\nindex_final = []\nfor i in range(len(prediction)):\n index_final.append([\"'\" + str(i) + \"'\", prediction[i]])\n\nwith open('output.csv', 'w') as csvfile:\n writer = csv.writer(csvfile, quoting=csv.QUOTE_ALL)\n writer.writerow(['Example', 'Label'])\n writer.writerows(index_final)\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from django.db import models
from django.utils.safestring import mark_safe
from ondoc.authentication.models import TimeStampedModel, CreatedByModel, Image
import datetime
from django.contrib.contenttypes.models import ContentType
from django.urls import reverse
from ondoc.doctor.models import Doctor, PracticeSpecialization
class ArticleCategory(TimeStampedModel):
name = models.CharField(blank=False, null=False, max_length=500)
identifier = models.CharField(max_length=48, blank=False, null=True)
url = models.CharField(blank=False, null=True, max_length=500, unique=True)
title = models.CharField(max_length=500, null=True, blank=True)
description = models.CharField(max_length=200000, null=True, blank=True)
def __str__(self):
return self.name
class Meta:
db_table = "article_categories"
def save(self, *args, **kwargs):
if hasattr(self, 'url'):
self.url = self.url.strip('/').lower()
super(ArticleCategory, self).save(*args, **kwargs)
class Article(TimeStampedModel, CreatedByModel):
title = models.CharField(blank=False, null=False, max_length=500, unique=True)
url = models.CharField(blank=False, null=True, max_length=500, unique=True)
heading_title = models.CharField(blank=True, null=False, max_length=500)
body = models.CharField(blank=False, null=False, max_length=200000)
category = models.ForeignKey(ArticleCategory, null=True, related_name='articles', on_delete=models.SET_NULL)
header_image = models.ImageField(upload_to='articles/header/images', null=True, blank=True, default='')
header_image_alt = models.CharField(max_length=512, blank=True, null=True, default='')
icon = models.ImageField(upload_to='articles/icons', null=True, blank=True, default='')
is_published = models.BooleanField(default=False, verbose_name='Published')
description = models.CharField(max_length=500, blank=True, null=True)
keywords = models.CharField(max_length=256, blank=True, null=True)
author_name = models.CharField(max_length=256, null=True, blank=True)
author = models.ForeignKey(Doctor, null=True, blank=True, related_name='published_articles', on_delete=models.SET_NULL)
published_date = models.DateField(default=datetime.date.today)
linked_articles = models.ManyToManyField('self', symmetrical=False, through='LinkedArticle',
through_fields=('article', 'linked_article'))
pharmeasy_url = models.TextField(blank=True, null=True)
pharmeasy_product_id = models.PositiveIntegerField(null=True, blank=True)
is_widget_available = models.NullBooleanField()
def get_absolute_url(self):
content_type = ContentType.objects.get_for_model(self)
return reverse('admin:%s_%s_change' % (content_type.app_label, content_type.model), args=[self.id])
def icon_tag(self):
if self.icon:
return mark_safe('<img src="%s" width="150" height="150" />' % (self.icon.url))
return ""
def save(self, *args, **kwargs):
self.published_date = self.published_date if self.published_date else datetime.date.today()
if hasattr(self, 'url'):
self.url = self.url.strip('/').lower()
super().save(*args, **kwargs)
def __str__(self):
return self.title
class Meta:
db_table = "article"
class ArticleImage(TimeStampedModel, CreatedByModel):
name = models.ImageField(upload_to='article/images')
def image_tag(self):
if self.name:
return mark_safe('<img src="%s" width="150" height="150" />' % (self.name.url))
return ""
def __str__(self):
if self.name:
return self.name.url
return ""
class Meta:
db_table = "article_image"
class ArticleContentBox(TimeStampedModel):
name = models.CharField(max_length=1000)
title = models.CharField(max_length=1000)
rank = models.PositiveSmallIntegerField(default=0, blank=True)
def __str__(self):
return self.name
class Meta:
db_table = 'article_content_box'
class ArticleLinkedUrl(TimeStampedModel):
article = models.ForeignKey(Article, on_delete=models.CASCADE)
url = models.CharField(max_length=2000, unique=True)
title = models.CharField(max_length=500)
content_box = models.ForeignKey(ArticleContentBox,null=True, on_delete=models.SET_NULL)
def __str__(self):
return self.title
class Meta:
db_table = 'article_linked_urls'
class LinkedArticle(TimeStampedModel):
article = models.ForeignKey(Article, on_delete=models.CASCADE, related_name='related_articles')
linked_article = models.ForeignKey(Article, on_delete=models.CASCADE, related_name='related_article')
title = models.CharField(max_length=500, null=True, blank=False)
content_box = models.ForeignKey(ArticleContentBox,null=True, on_delete=models.SET_NULL)
def __str__(self):
return "{}-{}".format(self.article.title, self.linked_article.title)
class Meta:
db_table = 'linked_articles'
unique_together = (('article', 'linked_article'),)
class MedicineSpecialization(TimeStampedModel):
medicine = models.ForeignKey(Article, on_delete=models.CASCADE)
specialization = models.ForeignKey(PracticeSpecialization, on_delete=models.CASCADE, null=True,
blank=True)
def __str__(self):
return self.medicine.title + " " + self.specialization.name
class Meta:
db_table = "medicine_specialization"
|
normal
|
{
"blob_id": "9bc15f063adc7d2a5ea81d090736ab6ce66a03d4",
"index": 5028,
"step-1": "<mask token>\n\n\nclass ArticleLinkedUrl(TimeStampedModel):\n article = models.ForeignKey(Article, on_delete=models.CASCADE)\n url = models.CharField(max_length=2000, unique=True)\n title = models.CharField(max_length=500)\n content_box = models.ForeignKey(ArticleContentBox, null=True, on_delete\n =models.SET_NULL)\n\n def __str__(self):\n return self.title\n\n\n class Meta:\n db_table = 'article_linked_urls'\n\n\nclass LinkedArticle(TimeStampedModel):\n article = models.ForeignKey(Article, on_delete=models.CASCADE,\n related_name='related_articles')\n linked_article = models.ForeignKey(Article, on_delete=models.CASCADE,\n related_name='related_article')\n title = models.CharField(max_length=500, null=True, blank=False)\n content_box = models.ForeignKey(ArticleContentBox, null=True, on_delete\n =models.SET_NULL)\n\n def __str__(self):\n return '{}-{}'.format(self.article.title, self.linked_article.title)\n\n\n class Meta:\n db_table = 'linked_articles'\n unique_together = ('article', 'linked_article'),\n\n\nclass MedicineSpecialization(TimeStampedModel):\n medicine = models.ForeignKey(Article, on_delete=models.CASCADE)\n specialization = models.ForeignKey(PracticeSpecialization, on_delete=\n models.CASCADE, null=True, blank=True)\n\n def __str__(self):\n return self.medicine.title + ' ' + self.specialization.name\n\n\n class Meta:\n db_table = 'medicine_specialization'\n",
"step-2": "<mask token>\n\n\nclass Article(TimeStampedModel, CreatedByModel):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __str__(self):\n return self.title\n\n\n class Meta:\n db_table = 'article'\n\n\nclass ArticleImage(TimeStampedModel, CreatedByModel):\n name = models.ImageField(upload_to='article/images')\n\n def image_tag(self):\n if self.name:\n return mark_safe('<img src=\"%s\" width=\"150\" height=\"150\" />' %\n self.name.url)\n return ''\n\n def __str__(self):\n if self.name:\n return self.name.url\n return ''\n\n\n class Meta:\n db_table = 'article_image'\n\n\nclass ArticleContentBox(TimeStampedModel):\n name = models.CharField(max_length=1000)\n title = models.CharField(max_length=1000)\n rank = models.PositiveSmallIntegerField(default=0, blank=True)\n\n def __str__(self):\n return self.name\n\n\n class Meta:\n db_table = 'article_content_box'\n\n\nclass ArticleLinkedUrl(TimeStampedModel):\n article = models.ForeignKey(Article, on_delete=models.CASCADE)\n url = models.CharField(max_length=2000, unique=True)\n title = models.CharField(max_length=500)\n content_box = models.ForeignKey(ArticleContentBox, null=True, on_delete\n =models.SET_NULL)\n\n def __str__(self):\n return self.title\n\n\n class Meta:\n db_table = 'article_linked_urls'\n\n\nclass LinkedArticle(TimeStampedModel):\n article = models.ForeignKey(Article, on_delete=models.CASCADE,\n related_name='related_articles')\n linked_article = models.ForeignKey(Article, on_delete=models.CASCADE,\n related_name='related_article')\n title = models.CharField(max_length=500, null=True, blank=False)\n content_box = models.ForeignKey(ArticleContentBox, null=True, on_delete\n =models.SET_NULL)\n\n def __str__(self):\n return '{}-{}'.format(self.article.title, self.linked_article.title)\n\n\n class Meta:\n db_table = 'linked_articles'\n unique_together = ('article', 'linked_article'),\n\n\nclass MedicineSpecialization(TimeStampedModel):\n medicine = models.ForeignKey(Article, on_delete=models.CASCADE)\n specialization = models.ForeignKey(PracticeSpecialization, on_delete=\n models.CASCADE, null=True, blank=True)\n\n def __str__(self):\n return self.medicine.title + ' ' + self.specialization.name\n\n\n class Meta:\n db_table = 'medicine_specialization'\n",
"step-3": "<mask token>\n\n\nclass Article(TimeStampedModel, CreatedByModel):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def get_absolute_url(self):\n content_type = ContentType.objects.get_for_model(self)\n return reverse('admin:%s_%s_change' % (content_type.app_label,\n content_type.model), args=[self.id])\n\n def icon_tag(self):\n if self.icon:\n return mark_safe('<img src=\"%s\" width=\"150\" height=\"150\" />' %\n self.icon.url)\n return ''\n\n def save(self, *args, **kwargs):\n self.published_date = (self.published_date if self.published_date else\n datetime.date.today())\n if hasattr(self, 'url'):\n self.url = self.url.strip('/').lower()\n super().save(*args, **kwargs)\n\n def __str__(self):\n return self.title\n\n\n class Meta:\n db_table = 'article'\n\n\nclass ArticleImage(TimeStampedModel, CreatedByModel):\n name = models.ImageField(upload_to='article/images')\n\n def image_tag(self):\n if self.name:\n return mark_safe('<img src=\"%s\" width=\"150\" height=\"150\" />' %\n self.name.url)\n return ''\n\n def __str__(self):\n if self.name:\n return self.name.url\n return ''\n\n\n class Meta:\n db_table = 'article_image'\n\n\nclass ArticleContentBox(TimeStampedModel):\n name = models.CharField(max_length=1000)\n title = models.CharField(max_length=1000)\n rank = models.PositiveSmallIntegerField(default=0, blank=True)\n\n def __str__(self):\n return self.name\n\n\n class Meta:\n db_table = 'article_content_box'\n\n\nclass ArticleLinkedUrl(TimeStampedModel):\n article = models.ForeignKey(Article, on_delete=models.CASCADE)\n url = models.CharField(max_length=2000, unique=True)\n title = models.CharField(max_length=500)\n content_box = models.ForeignKey(ArticleContentBox, null=True, on_delete\n =models.SET_NULL)\n\n def __str__(self):\n return self.title\n\n\n class Meta:\n db_table = 'article_linked_urls'\n\n\nclass LinkedArticle(TimeStampedModel):\n article = models.ForeignKey(Article, on_delete=models.CASCADE,\n related_name='related_articles')\n linked_article = models.ForeignKey(Article, on_delete=models.CASCADE,\n related_name='related_article')\n title = models.CharField(max_length=500, null=True, blank=False)\n content_box = models.ForeignKey(ArticleContentBox, null=True, on_delete\n =models.SET_NULL)\n\n def __str__(self):\n return '{}-{}'.format(self.article.title, self.linked_article.title)\n\n\n class Meta:\n db_table = 'linked_articles'\n unique_together = ('article', 'linked_article'),\n\n\nclass MedicineSpecialization(TimeStampedModel):\n medicine = models.ForeignKey(Article, on_delete=models.CASCADE)\n specialization = models.ForeignKey(PracticeSpecialization, on_delete=\n models.CASCADE, null=True, blank=True)\n\n def __str__(self):\n return self.medicine.title + ' ' + self.specialization.name\n\n\n class Meta:\n db_table = 'medicine_specialization'\n",
"step-4": "<mask token>\n\n\nclass Article(TimeStampedModel, CreatedByModel):\n title = models.CharField(blank=False, null=False, max_length=500,\n unique=True)\n url = models.CharField(blank=False, null=True, max_length=500, unique=True)\n heading_title = models.CharField(blank=True, null=False, max_length=500)\n body = models.CharField(blank=False, null=False, max_length=200000)\n category = models.ForeignKey(ArticleCategory, null=True, related_name=\n 'articles', on_delete=models.SET_NULL)\n header_image = models.ImageField(upload_to='articles/header/images',\n null=True, blank=True, default='')\n header_image_alt = models.CharField(max_length=512, blank=True, null=\n True, default='')\n icon = models.ImageField(upload_to='articles/icons', null=True, blank=\n True, default='')\n is_published = models.BooleanField(default=False, verbose_name='Published')\n description = models.CharField(max_length=500, blank=True, null=True)\n keywords = models.CharField(max_length=256, blank=True, null=True)\n author_name = models.CharField(max_length=256, null=True, blank=True)\n author = models.ForeignKey(Doctor, null=True, blank=True, related_name=\n 'published_articles', on_delete=models.SET_NULL)\n published_date = models.DateField(default=datetime.date.today)\n linked_articles = models.ManyToManyField('self', symmetrical=False,\n through='LinkedArticle', through_fields=('article', 'linked_article'))\n pharmeasy_url = models.TextField(blank=True, null=True)\n pharmeasy_product_id = models.PositiveIntegerField(null=True, blank=True)\n is_widget_available = models.NullBooleanField()\n\n def get_absolute_url(self):\n content_type = ContentType.objects.get_for_model(self)\n return reverse('admin:%s_%s_change' % (content_type.app_label,\n content_type.model), args=[self.id])\n\n def icon_tag(self):\n if self.icon:\n return mark_safe('<img src=\"%s\" width=\"150\" height=\"150\" />' %\n self.icon.url)\n return ''\n\n def save(self, *args, **kwargs):\n self.published_date = (self.published_date if self.published_date else\n datetime.date.today())\n if hasattr(self, 'url'):\n self.url = self.url.strip('/').lower()\n super().save(*args, **kwargs)\n\n def __str__(self):\n return self.title\n\n\n class Meta:\n db_table = 'article'\n\n\nclass ArticleImage(TimeStampedModel, CreatedByModel):\n name = models.ImageField(upload_to='article/images')\n\n def image_tag(self):\n if self.name:\n return mark_safe('<img src=\"%s\" width=\"150\" height=\"150\" />' %\n self.name.url)\n return ''\n\n def __str__(self):\n if self.name:\n return self.name.url\n return ''\n\n\n class Meta:\n db_table = 'article_image'\n\n\nclass ArticleContentBox(TimeStampedModel):\n name = models.CharField(max_length=1000)\n title = models.CharField(max_length=1000)\n rank = models.PositiveSmallIntegerField(default=0, blank=True)\n\n def __str__(self):\n return self.name\n\n\n class Meta:\n db_table = 'article_content_box'\n\n\nclass ArticleLinkedUrl(TimeStampedModel):\n article = models.ForeignKey(Article, on_delete=models.CASCADE)\n url = models.CharField(max_length=2000, unique=True)\n title = models.CharField(max_length=500)\n content_box = models.ForeignKey(ArticleContentBox, null=True, on_delete\n =models.SET_NULL)\n\n def __str__(self):\n return self.title\n\n\n class Meta:\n db_table = 'article_linked_urls'\n\n\nclass LinkedArticle(TimeStampedModel):\n article = models.ForeignKey(Article, on_delete=models.CASCADE,\n related_name='related_articles')\n linked_article = models.ForeignKey(Article, on_delete=models.CASCADE,\n related_name='related_article')\n title = models.CharField(max_length=500, null=True, blank=False)\n content_box = models.ForeignKey(ArticleContentBox, null=True, on_delete\n =models.SET_NULL)\n\n def __str__(self):\n return '{}-{}'.format(self.article.title, self.linked_article.title)\n\n\n class Meta:\n db_table = 'linked_articles'\n unique_together = ('article', 'linked_article'),\n\n\nclass MedicineSpecialization(TimeStampedModel):\n medicine = models.ForeignKey(Article, on_delete=models.CASCADE)\n specialization = models.ForeignKey(PracticeSpecialization, on_delete=\n models.CASCADE, null=True, blank=True)\n\n def __str__(self):\n return self.medicine.title + ' ' + self.specialization.name\n\n\n class Meta:\n db_table = 'medicine_specialization'\n",
"step-5": "from django.db import models\nfrom django.utils.safestring import mark_safe\nfrom ondoc.authentication.models import TimeStampedModel, CreatedByModel, Image\nimport datetime\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.urls import reverse\n\nfrom ondoc.doctor.models import Doctor, PracticeSpecialization\n\n\nclass ArticleCategory(TimeStampedModel):\n\n name = models.CharField(blank=False, null=False, max_length=500)\n identifier = models.CharField(max_length=48, blank=False, null=True)\n url = models.CharField(blank=False, null=True, max_length=500, unique=True)\n title = models.CharField(max_length=500, null=True, blank=True)\n description = models.CharField(max_length=200000, null=True, blank=True)\n\n def __str__(self):\n return self.name\n\n class Meta:\n db_table = \"article_categories\"\n\n def save(self, *args, **kwargs):\n if hasattr(self, 'url'):\n self.url = self.url.strip('/').lower()\n super(ArticleCategory, self).save(*args, **kwargs)\n\n\nclass Article(TimeStampedModel, CreatedByModel):\n title = models.CharField(blank=False, null=False, max_length=500, unique=True)\n url = models.CharField(blank=False, null=True, max_length=500, unique=True)\n heading_title = models.CharField(blank=True, null=False, max_length=500)\n body = models.CharField(blank=False, null=False, max_length=200000)\n category = models.ForeignKey(ArticleCategory, null=True, related_name='articles', on_delete=models.SET_NULL)\n header_image = models.ImageField(upload_to='articles/header/images', null=True, blank=True, default='')\n header_image_alt = models.CharField(max_length=512, blank=True, null=True, default='')\n icon = models.ImageField(upload_to='articles/icons', null=True, blank=True, default='')\n is_published = models.BooleanField(default=False, verbose_name='Published')\n description = models.CharField(max_length=500, blank=True, null=True)\n keywords = models.CharField(max_length=256, blank=True, null=True)\n author_name = models.CharField(max_length=256, null=True, blank=True)\n author = models.ForeignKey(Doctor, null=True, blank=True, related_name='published_articles', on_delete=models.SET_NULL)\n published_date = models.DateField(default=datetime.date.today)\n linked_articles = models.ManyToManyField('self', symmetrical=False, through='LinkedArticle',\n through_fields=('article', 'linked_article'))\n pharmeasy_url = models.TextField(blank=True, null=True)\n pharmeasy_product_id = models.PositiveIntegerField(null=True, blank=True)\n is_widget_available = models.NullBooleanField()\n\n def get_absolute_url(self):\n content_type = ContentType.objects.get_for_model(self)\n return reverse('admin:%s_%s_change' % (content_type.app_label, content_type.model), args=[self.id])\n\n def icon_tag(self):\n if self.icon:\n return mark_safe('<img src=\"%s\" width=\"150\" height=\"150\" />' % (self.icon.url))\n return \"\"\n\n def save(self, *args, **kwargs):\n self.published_date = self.published_date if self.published_date else datetime.date.today()\n if hasattr(self, 'url'):\n self.url = self.url.strip('/').lower()\n super().save(*args, **kwargs)\n\n def __str__(self):\n return self.title\n\n class Meta:\n db_table = \"article\"\n\n\nclass ArticleImage(TimeStampedModel, CreatedByModel):\n name = models.ImageField(upload_to='article/images')\n\n def image_tag(self):\n if self.name:\n return mark_safe('<img src=\"%s\" width=\"150\" height=\"150\" />' % (self.name.url))\n return \"\"\n\n def __str__(self):\n if self.name:\n return self.name.url\n return \"\"\n\n class Meta:\n db_table = \"article_image\"\n\nclass ArticleContentBox(TimeStampedModel):\n name = models.CharField(max_length=1000)\n title = models.CharField(max_length=1000)\n rank = models.PositiveSmallIntegerField(default=0, blank=True)\n\n def __str__(self):\n return self.name\n\n class Meta:\n db_table = 'article_content_box'\n\n\nclass ArticleLinkedUrl(TimeStampedModel):\n article = models.ForeignKey(Article, on_delete=models.CASCADE)\n url = models.CharField(max_length=2000, unique=True)\n title = models.CharField(max_length=500)\n content_box = models.ForeignKey(ArticleContentBox,null=True, on_delete=models.SET_NULL)\n\n def __str__(self):\n return self.title\n\n class Meta:\n db_table = 'article_linked_urls'\n\n\nclass LinkedArticle(TimeStampedModel):\n article = models.ForeignKey(Article, on_delete=models.CASCADE, related_name='related_articles')\n linked_article = models.ForeignKey(Article, on_delete=models.CASCADE, related_name='related_article')\n title = models.CharField(max_length=500, null=True, blank=False)\n content_box = models.ForeignKey(ArticleContentBox,null=True, on_delete=models.SET_NULL)\n\n def __str__(self):\n return \"{}-{}\".format(self.article.title, self.linked_article.title)\n\n class Meta:\n db_table = 'linked_articles'\n unique_together = (('article', 'linked_article'),)\n\n\nclass MedicineSpecialization(TimeStampedModel):\n medicine = models.ForeignKey(Article, on_delete=models.CASCADE)\n specialization = models.ForeignKey(PracticeSpecialization, on_delete=models.CASCADE, null=True,\n blank=True)\n\n def __str__(self):\n return self.medicine.title + \" \" + self.specialization.name\n\n class Meta:\n db_table = \"medicine_specialization\"\n\n",
"step-ids": [
9,
18,
21,
22,
28
]
}
|
[
9,
18,
21,
22,
28
] |
def label_modes(trip_list, silent=True):
"""Labels trip segments by likely mode of travel.
Labels are "chilling" if traveler is stationary, "walking" if slow,
"driving" if fast, and "bogus" if too fast to be real.
trip_list [list]: a list of dicts in JSON format.
silent [bool]: if True, does not print reports.
Returns list of dicts in JSON format."""
if silent == False:
print('Preparing to label modes of travel for ' \
+ str(len(trip_list)) + ' trips.')
loop_counter = 0
loop_size = len(trip_list)
for doc in trip_list:
if silent == False:
loop_counter = loop_counter + 1
if loop_counter % 10000 == 0:
print('Labeling modes. Finished ' + str(loop_counter) \
+ ' trips.')
time_spent_driving = 0
time_spent_walking = 0
time_spent_chilling = 0
time_spent_bogus = 0
for i in range(1,len(doc['reduction'])):
if (float(doc['reduction'][i]['velocity']) >= 2.3):
doc['reduction'][i]['mode'] = 'driving'
elif (float(doc['reduction'][i]['velocity']) < 2.3 and float(doc['reduction'][i]['velocity']) > 0):
doc['reduction'][i]['mode'] = 'walking'
elif (float(doc['reduction'][i]['velocity']) == 0.0):
doc['reduction'][i]['mode'] = 'chilling'
if (float(doc['reduction'][i]['velocity']) > 22.22):
doc['reduction'][i]['mode'] = 'bogus'
for i in range(1,len(doc['reduction']) - 1):
path_length = 0
if (doc['reduction'][i]['mode'] == 'driving'):
for j in range(i+1,len(doc['reduction'])):
last_intersection_id = doc['reduction'][j]['IntersectionID']
if (doc['reduction'][j]['mode'] == 'walking'): path_length = path_length + 1
elif (doc['reduction'][j]['mode'] == 'driving' or doc['reduction'][j]['mode'] == 'bogus'): break
if (path_length > 5 or last_intersection_id == doc['reduction'][i]['IntersectionID']):
for k in range(i+1,j):
if (doc['reduction'][k]['mode'] != 'chilling'): doc['reduction'][k]['mode'] = 'walking'
else :
for k in range(i+1,j):
if (doc['reduction'][k]['mode'] != 'chilling'): doc['reduction'][k]['mode'] = 'driving'
if (doc['reduction'][i]['mode'] == 'driving'): time_spent_driving = time_spent_driving + float(doc['reduction'][i]['time']) - float(doc['reduction'][i-1]['time'])
elif (doc['reduction'][i]['mode'] == 'walking'): time_spent_walking = time_spent_walking + float(doc['reduction'][i]['time']) - float(doc['reduction'][i-1]['time'])
elif (doc['reduction'][i]['mode'] == 'chilling'): time_spent_chilling = time_spent_chilling + float(doc['reduction'][i]['time']) - float(doc['reduction'][i-1]['time'])
elif (doc['reduction'][i]['mode'] == 'bogus'): time_spent_bogus = time_spent_bogus + float(doc['reduction'][i]['time']) - float(doc['reduction'][i-1]['time'])
if (doc['reduction'][-1]['mode'] == 'driving'): time_spent_driving = time_spent_driving + float(doc['reduction'][-1]['time']) - float(doc['reduction'][-2]['time'])
elif (doc['reduction'][-1]['mode'] == 'walking'): time_spent_walking = time_spent_walking + float(doc['reduction'][-1]['time']) - float(doc['reduction'][-2]['time'])
elif (doc['reduction'][-1]['mode'] == 'chilling'): time_spent_chilling = time_spent_chilling + float(doc['reduction'][-1]['time']) - float(doc['reduction'][-2]['time'])
elif (doc['reduction'][-1]['mode'] == 'bogus'): time_spent_bogus = time_spent_bogus + float(doc['reduction'][-1]['time']) - float(doc['reduction'][-2]['time'])
duration_of_trip = float(doc['duration_of_trip'])
doc['time_percentage_driving'] = str(time_spent_driving/duration_of_trip*100)
doc['time_percentage_walking'] = str(time_spent_walking/duration_of_trip*100)
doc['time_percentage_chilling'] = str(time_spent_chilling/duration_of_trip*100)
doc['time_percentage_bogus'] = str(time_spent_bogus/duration_of_trip*100)
if silent == False:
print('Done labeling mode of travel. Returning list of length ' \
+ str(len(trip_list)) + '.')
return trip_list
|
normal
|
{
"blob_id": "3f4e8402bbd096a33ed159ca0fed250c74c2f876",
"index": 4833,
"step-1": "<mask token>\n",
"step-2": "def label_modes(trip_list, silent=True):\n \"\"\"Labels trip segments by likely mode of travel.\n\n Labels are \"chilling\" if traveler is stationary, \"walking\" if slow,\n \"driving\" if fast, and \"bogus\" if too fast to be real.\n\n trip_list [list]: a list of dicts in JSON format.\n silent [bool]: if True, does not print reports.\n\n Returns list of dicts in JSON format.\"\"\"\n if silent == False:\n print('Preparing to label modes of travel for ' + str(len(trip_list\n )) + ' trips.')\n loop_counter = 0\n loop_size = len(trip_list)\n for doc in trip_list:\n if silent == False:\n loop_counter = loop_counter + 1\n if loop_counter % 10000 == 0:\n print('Labeling modes. Finished ' + str(loop_counter) +\n ' trips.')\n time_spent_driving = 0\n time_spent_walking = 0\n time_spent_chilling = 0\n time_spent_bogus = 0\n for i in range(1, len(doc['reduction'])):\n if float(doc['reduction'][i]['velocity']) >= 2.3:\n doc['reduction'][i]['mode'] = 'driving'\n elif float(doc['reduction'][i]['velocity']) < 2.3 and float(doc\n ['reduction'][i]['velocity']) > 0:\n doc['reduction'][i]['mode'] = 'walking'\n elif float(doc['reduction'][i]['velocity']) == 0.0:\n doc['reduction'][i]['mode'] = 'chilling'\n if float(doc['reduction'][i]['velocity']) > 22.22:\n doc['reduction'][i]['mode'] = 'bogus'\n for i in range(1, len(doc['reduction']) - 1):\n path_length = 0\n if doc['reduction'][i]['mode'] == 'driving':\n for j in range(i + 1, len(doc['reduction'])):\n last_intersection_id = doc['reduction'][j]['IntersectionID'\n ]\n if doc['reduction'][j]['mode'] == 'walking':\n path_length = path_length + 1\n elif doc['reduction'][j]['mode'] == 'driving' or doc[\n 'reduction'][j]['mode'] == 'bogus':\n break\n if path_length > 5 or last_intersection_id == doc['reduction'][\n i]['IntersectionID']:\n for k in range(i + 1, j):\n if doc['reduction'][k]['mode'] != 'chilling':\n doc['reduction'][k]['mode'] = 'walking'\n else:\n for k in range(i + 1, j):\n if doc['reduction'][k]['mode'] != 'chilling':\n doc['reduction'][k]['mode'] = 'driving'\n if doc['reduction'][i]['mode'] == 'driving':\n time_spent_driving = time_spent_driving + float(doc[\n 'reduction'][i]['time']) - float(doc['reduction'][i - 1\n ]['time'])\n elif doc['reduction'][i]['mode'] == 'walking':\n time_spent_walking = time_spent_walking + float(doc[\n 'reduction'][i]['time']) - float(doc['reduction'][i - 1\n ]['time'])\n elif doc['reduction'][i]['mode'] == 'chilling':\n time_spent_chilling = time_spent_chilling + float(doc[\n 'reduction'][i]['time']) - float(doc['reduction'][i - 1\n ]['time'])\n elif doc['reduction'][i]['mode'] == 'bogus':\n time_spent_bogus = time_spent_bogus + float(doc['reduction'\n ][i]['time']) - float(doc['reduction'][i - 1]['time'])\n if doc['reduction'][-1]['mode'] == 'driving':\n time_spent_driving = time_spent_driving + float(doc['reduction'\n ][-1]['time']) - float(doc['reduction'][-2]['time'])\n elif doc['reduction'][-1]['mode'] == 'walking':\n time_spent_walking = time_spent_walking + float(doc['reduction'\n ][-1]['time']) - float(doc['reduction'][-2]['time'])\n elif doc['reduction'][-1]['mode'] == 'chilling':\n time_spent_chilling = time_spent_chilling + float(doc[\n 'reduction'][-1]['time']) - float(doc['reduction'][-2]['time'])\n elif doc['reduction'][-1]['mode'] == 'bogus':\n time_spent_bogus = time_spent_bogus + float(doc['reduction'][-1\n ]['time']) - float(doc['reduction'][-2]['time'])\n duration_of_trip = float(doc['duration_of_trip'])\n doc['time_percentage_driving'] = str(time_spent_driving /\n duration_of_trip * 100)\n doc['time_percentage_walking'] = str(time_spent_walking /\n duration_of_trip * 100)\n doc['time_percentage_chilling'] = str(time_spent_chilling /\n duration_of_trip * 100)\n doc['time_percentage_bogus'] = str(time_spent_bogus /\n duration_of_trip * 100)\n if silent == False:\n print('Done labeling mode of travel. Returning list of length ' +\n str(len(trip_list)) + '.')\n return trip_list\n",
"step-3": "def label_modes(trip_list, silent=True):\n \"\"\"Labels trip segments by likely mode of travel.\n\n Labels are \"chilling\" if traveler is stationary, \"walking\" if slow,\n \"driving\" if fast, and \"bogus\" if too fast to be real.\n\n trip_list [list]: a list of dicts in JSON format.\n silent [bool]: if True, does not print reports.\n\n Returns list of dicts in JSON format.\"\"\"\n\n\n if silent == False:\n print('Preparing to label modes of travel for ' \\\n + str(len(trip_list)) + ' trips.')\n\n loop_counter = 0\n loop_size = len(trip_list)\n for doc in trip_list:\n\n if silent == False:\n loop_counter = loop_counter + 1\n if loop_counter % 10000 == 0:\n print('Labeling modes. Finished ' + str(loop_counter) \\\n + ' trips.')\n\n time_spent_driving = 0\n time_spent_walking = 0\n time_spent_chilling = 0\n time_spent_bogus = 0\n for i in range(1,len(doc['reduction'])):\n if (float(doc['reduction'][i]['velocity']) >= 2.3):\n doc['reduction'][i]['mode'] = 'driving'\n\n elif (float(doc['reduction'][i]['velocity']) < 2.3 and float(doc['reduction'][i]['velocity']) > 0):\n doc['reduction'][i]['mode'] = 'walking'\n\n elif (float(doc['reduction'][i]['velocity']) == 0.0):\n doc['reduction'][i]['mode'] = 'chilling'\n\n if (float(doc['reduction'][i]['velocity']) > 22.22):\n doc['reduction'][i]['mode'] = 'bogus'\n\n\n for i in range(1,len(doc['reduction']) - 1):\n path_length = 0\n\n if (doc['reduction'][i]['mode'] == 'driving'):\n for j in range(i+1,len(doc['reduction'])):\n last_intersection_id = doc['reduction'][j]['IntersectionID']\n if (doc['reduction'][j]['mode'] == 'walking'): path_length = path_length + 1\n elif (doc['reduction'][j]['mode'] == 'driving' or doc['reduction'][j]['mode'] == 'bogus'): break\n\n if (path_length > 5 or last_intersection_id == doc['reduction'][i]['IntersectionID']):\n for k in range(i+1,j):\n if (doc['reduction'][k]['mode'] != 'chilling'): doc['reduction'][k]['mode'] = 'walking'\n else :\n for k in range(i+1,j):\n if (doc['reduction'][k]['mode'] != 'chilling'): doc['reduction'][k]['mode'] = 'driving'\n\n if (doc['reduction'][i]['mode'] == 'driving'): time_spent_driving = time_spent_driving + float(doc['reduction'][i]['time']) - float(doc['reduction'][i-1]['time'])\n elif (doc['reduction'][i]['mode'] == 'walking'): time_spent_walking = time_spent_walking + float(doc['reduction'][i]['time']) - float(doc['reduction'][i-1]['time'])\n elif (doc['reduction'][i]['mode'] == 'chilling'): time_spent_chilling = time_spent_chilling + float(doc['reduction'][i]['time']) - float(doc['reduction'][i-1]['time'])\n elif (doc['reduction'][i]['mode'] == 'bogus'): time_spent_bogus = time_spent_bogus + float(doc['reduction'][i]['time']) - float(doc['reduction'][i-1]['time'])\n\n if (doc['reduction'][-1]['mode'] == 'driving'): time_spent_driving = time_spent_driving + float(doc['reduction'][-1]['time']) - float(doc['reduction'][-2]['time'])\n elif (doc['reduction'][-1]['mode'] == 'walking'): time_spent_walking = time_spent_walking + float(doc['reduction'][-1]['time']) - float(doc['reduction'][-2]['time'])\n elif (doc['reduction'][-1]['mode'] == 'chilling'): time_spent_chilling = time_spent_chilling + float(doc['reduction'][-1]['time']) - float(doc['reduction'][-2]['time'])\n elif (doc['reduction'][-1]['mode'] == 'bogus'): time_spent_bogus = time_spent_bogus + float(doc['reduction'][-1]['time']) - float(doc['reduction'][-2]['time'])\n\n\n duration_of_trip = float(doc['duration_of_trip'])\n doc['time_percentage_driving'] = str(time_spent_driving/duration_of_trip*100)\n doc['time_percentage_walking'] = str(time_spent_walking/duration_of_trip*100)\n doc['time_percentage_chilling'] = str(time_spent_chilling/duration_of_trip*100)\n doc['time_percentage_bogus'] = str(time_spent_bogus/duration_of_trip*100)\n\n if silent == False:\n print('Done labeling mode of travel. Returning list of length ' \\\n + str(len(trip_list)) + '.')\n\n return trip_list",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import datetime
import time
def calculate(a):
return a
data = set()
class Bank:
amount = 0
def __init__(self):
self.Bank_name = "State Bank of India"
self.ifsc = 'SBI0N00012'
def __repr__(self):
return f'Bank Name: {self.Bank_name}, IFSC_Code : {self.ifsc} '
# self.stored = datetime.date.today()
class CustomerDetails(Bank):
check_amt = 18
def __init__(self,name,identity,acc,op_amount):
Bank.__init__(self)
self.name = name
self.identity = identity
self.acc = acc
self.op_amount = op_amount
Bank.amount += self.op_amount
self.count = 0
def __repr__(self):
return f'Name : {self.name}, Aaddhar_card : {self.identity}, Account No : {self.acc}, Amount : {self.op_amount}, Bank_Amount : {Bank.amount} '
# stored = datetime.datetime.today()
# def __repr__(self)
def deposite(self,credit):
self.credit = credit
self.op_amount += self.credit
Bank.amount += self.op_amount
print(f'You\'ve added {self.credit} : Total Amount = {self.op_amount}')
return (Bank.amount)
def check_balance(self):
self.count += 1
if self.count > 3:
self.op_amount -= CustomerDetails.check_amt
return f'{self.name} due to over checking {CustomerDetails.check_amt} Rs. has been deducted. your Balance : {self.op_amount} '
else:
return f'{self.name} your Balance : {self.op_amount}'
# cus1 = CustomerDetails('Lucky','755376288106','67001010115773',5000)
# print(cus1)
cus2 = CustomerDetails('Pawan','755376288078','37376989161',10000)
print(cus2)
cus2.deposite(20000)
print(cus2.check_balance())
print(cus2.check_balance())
print(cus2.check_balance())
print(cus2.check_balance())
print(cus2)
# print(cus2.check_balance())
|
normal
|
{
"blob_id": "66ae7f4ee01ca5516d8e3dc447eeb4709e2b6aec",
"index": 4615,
"step-1": "<mask token>\n\n\nclass Bank:\n <mask token>\n\n def __init__(self):\n self.Bank_name = 'State Bank of India'\n self.ifsc = 'SBI0N00012'\n\n def __repr__(self):\n return f'Bank Name: {self.Bank_name}, IFSC_Code : {self.ifsc} '\n\n\nclass CustomerDetails(Bank):\n check_amt = 18\n\n def __init__(self, name, identity, acc, op_amount):\n Bank.__init__(self)\n self.name = name\n self.identity = identity\n self.acc = acc\n self.op_amount = op_amount\n Bank.amount += self.op_amount\n self.count = 0\n\n def __repr__(self):\n return (\n f'Name : {self.name}, Aaddhar_card : {self.identity}, Account No : {self.acc}, Amount : {self.op_amount}, Bank_Amount : {Bank.amount} '\n )\n\n def deposite(self, credit):\n self.credit = credit\n self.op_amount += self.credit\n Bank.amount += self.op_amount\n print(f\"You've added {self.credit} : Total Amount = {self.op_amount}\")\n return Bank.amount\n\n def check_balance(self):\n self.count += 1\n if self.count > 3:\n self.op_amount -= CustomerDetails.check_amt\n return (\n f'{self.name} due to over checking {CustomerDetails.check_amt} Rs. has been deducted. your Balance : {self.op_amount} '\n )\n else:\n return f'{self.name} your Balance : {self.op_amount}'\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Bank:\n amount = 0\n\n def __init__(self):\n self.Bank_name = 'State Bank of India'\n self.ifsc = 'SBI0N00012'\n\n def __repr__(self):\n return f'Bank Name: {self.Bank_name}, IFSC_Code : {self.ifsc} '\n\n\nclass CustomerDetails(Bank):\n check_amt = 18\n\n def __init__(self, name, identity, acc, op_amount):\n Bank.__init__(self)\n self.name = name\n self.identity = identity\n self.acc = acc\n self.op_amount = op_amount\n Bank.amount += self.op_amount\n self.count = 0\n\n def __repr__(self):\n return (\n f'Name : {self.name}, Aaddhar_card : {self.identity}, Account No : {self.acc}, Amount : {self.op_amount}, Bank_Amount : {Bank.amount} '\n )\n\n def deposite(self, credit):\n self.credit = credit\n self.op_amount += self.credit\n Bank.amount += self.op_amount\n print(f\"You've added {self.credit} : Total Amount = {self.op_amount}\")\n return Bank.amount\n\n def check_balance(self):\n self.count += 1\n if self.count > 3:\n self.op_amount -= CustomerDetails.check_amt\n return (\n f'{self.name} due to over checking {CustomerDetails.check_amt} Rs. has been deducted. your Balance : {self.op_amount} '\n )\n else:\n return f'{self.name} your Balance : {self.op_amount}'\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef calculate(a):\n return a\n\n\ndata = set()\n\n\nclass Bank:\n amount = 0\n\n def __init__(self):\n self.Bank_name = 'State Bank of India'\n self.ifsc = 'SBI0N00012'\n\n def __repr__(self):\n return f'Bank Name: {self.Bank_name}, IFSC_Code : {self.ifsc} '\n\n\nclass CustomerDetails(Bank):\n check_amt = 18\n\n def __init__(self, name, identity, acc, op_amount):\n Bank.__init__(self)\n self.name = name\n self.identity = identity\n self.acc = acc\n self.op_amount = op_amount\n Bank.amount += self.op_amount\n self.count = 0\n\n def __repr__(self):\n return (\n f'Name : {self.name}, Aaddhar_card : {self.identity}, Account No : {self.acc}, Amount : {self.op_amount}, Bank_Amount : {Bank.amount} '\n )\n\n def deposite(self, credit):\n self.credit = credit\n self.op_amount += self.credit\n Bank.amount += self.op_amount\n print(f\"You've added {self.credit} : Total Amount = {self.op_amount}\")\n return Bank.amount\n\n def check_balance(self):\n self.count += 1\n if self.count > 3:\n self.op_amount -= CustomerDetails.check_amt\n return (\n f'{self.name} due to over checking {CustomerDetails.check_amt} Rs. has been deducted. your Balance : {self.op_amount} '\n )\n else:\n return f'{self.name} your Balance : {self.op_amount}'\n\n\ncus2 = CustomerDetails('Pawan', '755376288078', '37376989161', 10000)\nprint(cus2)\ncus2.deposite(20000)\nprint(cus2.check_balance())\nprint(cus2.check_balance())\nprint(cus2.check_balance())\nprint(cus2.check_balance())\nprint(cus2)\n",
"step-4": "import datetime\nimport time\n\n\ndef calculate(a):\n return a\n\n\ndata = set()\n\n\nclass Bank:\n amount = 0\n\n def __init__(self):\n self.Bank_name = 'State Bank of India'\n self.ifsc = 'SBI0N00012'\n\n def __repr__(self):\n return f'Bank Name: {self.Bank_name}, IFSC_Code : {self.ifsc} '\n\n\nclass CustomerDetails(Bank):\n check_amt = 18\n\n def __init__(self, name, identity, acc, op_amount):\n Bank.__init__(self)\n self.name = name\n self.identity = identity\n self.acc = acc\n self.op_amount = op_amount\n Bank.amount += self.op_amount\n self.count = 0\n\n def __repr__(self):\n return (\n f'Name : {self.name}, Aaddhar_card : {self.identity}, Account No : {self.acc}, Amount : {self.op_amount}, Bank_Amount : {Bank.amount} '\n )\n\n def deposite(self, credit):\n self.credit = credit\n self.op_amount += self.credit\n Bank.amount += self.op_amount\n print(f\"You've added {self.credit} : Total Amount = {self.op_amount}\")\n return Bank.amount\n\n def check_balance(self):\n self.count += 1\n if self.count > 3:\n self.op_amount -= CustomerDetails.check_amt\n return (\n f'{self.name} due to over checking {CustomerDetails.check_amt} Rs. has been deducted. your Balance : {self.op_amount} '\n )\n else:\n return f'{self.name} your Balance : {self.op_amount}'\n\n\ncus2 = CustomerDetails('Pawan', '755376288078', '37376989161', 10000)\nprint(cus2)\ncus2.deposite(20000)\nprint(cus2.check_balance())\nprint(cus2.check_balance())\nprint(cus2.check_balance())\nprint(cus2.check_balance())\nprint(cus2)\n",
"step-5": "import datetime\nimport time\n\ndef calculate(a):\n return a\n\n\ndata = set()\nclass Bank:\n amount = 0\n def __init__(self):\n self.Bank_name = \"State Bank of India\"\n self.ifsc = 'SBI0N00012'\n \n def __repr__(self):\n return f'Bank Name: {self.Bank_name}, IFSC_Code : {self.ifsc} '\n\n # self.stored = datetime.date.today()\n\nclass CustomerDetails(Bank):\n check_amt = 18\n def __init__(self,name,identity,acc,op_amount):\n Bank.__init__(self)\n self.name = name\n self.identity = identity\n self.acc = acc\n self.op_amount = op_amount\n Bank.amount += self.op_amount\n self.count = 0\n\n def __repr__(self):\n return f'Name : {self.name}, Aaddhar_card : {self.identity}, Account No : {self.acc}, Amount : {self.op_amount}, Bank_Amount : {Bank.amount} '\n\n # stored = datetime.datetime.today()\n # def __repr__(self)\n def deposite(self,credit):\n self.credit = credit\n self.op_amount += self.credit\n Bank.amount += self.op_amount\n print(f'You\\'ve added {self.credit} : Total Amount = {self.op_amount}')\n return (Bank.amount)\n \n def check_balance(self):\n self.count += 1\n if self.count > 3:\n self.op_amount -= CustomerDetails.check_amt\n return f'{self.name} due to over checking {CustomerDetails.check_amt} Rs. has been deducted. your Balance : {self.op_amount} '\n else:\n return f'{self.name} your Balance : {self.op_amount}'\n \n \n\n# cus1 = CustomerDetails('Lucky','755376288106','67001010115773',5000)\n# print(cus1)\ncus2 = CustomerDetails('Pawan','755376288078','37376989161',10000)\nprint(cus2)\ncus2.deposite(20000)\nprint(cus2.check_balance())\nprint(cus2.check_balance())\nprint(cus2.check_balance())\nprint(cus2.check_balance())\nprint(cus2)\n# print(cus2.check_balance())\n\n\n ",
"step-ids": [
9,
10,
13,
14,
15
]
}
|
[
9,
10,
13,
14,
15
] |
<|reserved_special_token_0|>
class IntegratedRegressor:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def fit(self, X, y):
self.regs = []
for target in y.columns:
tmp = deepcopy(self.reg)
if self.predict_log:
tmp.fit(X, np.log1p(y[target]))
else:
tmp.fit(X, y[target])
self.regs.append(tmp)
<|reserved_special_token_0|>
class DayNightRegressor:
def __init__(self, reg):
self.night_reg = deepcopy(reg)
self.day_reg = deepcopy(reg)
def fit(self, X, y):
self.night_reg.fit(X[X['night'] == 1], y[X['night'] == 1])
self.day_reg.fit(X[X['night'] == 0], y[X['night'] == 0])
def predict(self, X):
pred = []
pred = np.append(pred, self.night_reg.predict(X[X['night'] == 1]))
pred = np.append(pred, self.day_reg.predict(X[X['night'] == 0]))
idx = X[X['night'] == 1].index.tolist() + X[X['night'] == 0
].index.tolist()
return np.intp([x for _, x in sorted(zip(idx, pred))])
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class IntegratedRegressor:
<|reserved_special_token_0|>
def __init__(self, reg, predict_log=True):
self.reg = reg
self.predict_log = predict_log
def fit(self, X, y):
self.regs = []
for target in y.columns:
tmp = deepcopy(self.reg)
if self.predict_log:
tmp.fit(X, np.log1p(y[target]))
else:
tmp.fit(X, y[target])
self.regs.append(tmp)
<|reserved_special_token_0|>
class DayNightRegressor:
def __init__(self, reg):
self.night_reg = deepcopy(reg)
self.day_reg = deepcopy(reg)
def fit(self, X, y):
self.night_reg.fit(X[X['night'] == 1], y[X['night'] == 1])
self.day_reg.fit(X[X['night'] == 0], y[X['night'] == 0])
def predict(self, X):
pred = []
pred = np.append(pred, self.night_reg.predict(X[X['night'] == 1]))
pred = np.append(pred, self.day_reg.predict(X[X['night'] == 0]))
idx = X[X['night'] == 1].index.tolist() + X[X['night'] == 0
].index.tolist()
return np.intp([x for _, x in sorted(zip(idx, pred))])
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class IntegratedRegressor:
<|reserved_special_token_0|>
def __init__(self, reg, predict_log=True):
self.reg = reg
self.predict_log = predict_log
def fit(self, X, y):
self.regs = []
for target in y.columns:
tmp = deepcopy(self.reg)
if self.predict_log:
tmp.fit(X, np.log1p(y[target]))
else:
tmp.fit(X, y[target])
self.regs.append(tmp)
def predict(self, X):
pred = np.zeros((X.shape[0],))
for reg in self.regs:
if self.predict_log:
pred += np.expm1(reg.predict(X))
else:
pred += reg.predict(X)
return np.intp(pred.round())
class DayNightRegressor:
def __init__(self, reg):
self.night_reg = deepcopy(reg)
self.day_reg = deepcopy(reg)
def fit(self, X, y):
self.night_reg.fit(X[X['night'] == 1], y[X['night'] == 1])
self.day_reg.fit(X[X['night'] == 0], y[X['night'] == 0])
def predict(self, X):
pred = []
pred = np.append(pred, self.night_reg.predict(X[X['night'] == 1]))
pred = np.append(pred, self.day_reg.predict(X[X['night'] == 0]))
idx = X[X['night'] == 1].index.tolist() + X[X['night'] == 0
].index.tolist()
return np.intp([x for _, x in sorted(zip(idx, pred))])
<|reserved_special_token_1|>
from __future__ import division, print_function
import numpy as np
from copy import deepcopy
class IntegratedRegressor:
regs = []
def __init__(self, reg, predict_log=True):
self.reg = reg
self.predict_log = predict_log
def fit(self, X, y):
self.regs = []
for target in y.columns:
tmp = deepcopy(self.reg)
if self.predict_log:
tmp.fit(X, np.log1p(y[target]))
else:
tmp.fit(X, y[target])
self.regs.append(tmp)
def predict(self, X):
pred = np.zeros((X.shape[0],))
for reg in self.regs:
if self.predict_log:
pred += np.expm1(reg.predict(X))
else:
pred += reg.predict(X)
return np.intp(pred.round())
class DayNightRegressor:
def __init__(self, reg):
self.night_reg = deepcopy(reg)
self.day_reg = deepcopy(reg)
def fit(self, X, y):
self.night_reg.fit(X[X['night'] == 1], y[X['night'] == 1])
self.day_reg.fit(X[X['night'] == 0], y[X['night'] == 0])
def predict(self, X):
pred = []
pred = np.append(pred, self.night_reg.predict(X[X['night'] == 1]))
pred = np.append(pred, self.day_reg.predict(X[X['night'] == 0]))
idx = X[X['night'] == 1].index.tolist() + X[X['night'] == 0
].index.tolist()
return np.intp([x for _, x in sorted(zip(idx, pred))])
<|reserved_special_token_1|>
from __future__ import division, print_function
import numpy as np
from copy import deepcopy
class IntegratedRegressor():
regs = []
def __init__(self, reg, predict_log=True):
self.reg = reg
self.predict_log = predict_log
def fit(self, X, y):
self.regs = []
for target in y.columns:
tmp = deepcopy(self.reg)
if self.predict_log:
tmp.fit(X, np.log1p(y[target]))
else:
tmp.fit(X, y[target])
self.regs.append(tmp)
def predict(self, X):
pred = np.zeros((X.shape[0],))
for reg in self.regs:
if self.predict_log:
pred += np.expm1(reg.predict(X))
else:
pred += reg.predict(X)
return np.intp(pred.round())
class DayNightRegressor():
def __init__(self, reg):
self.night_reg = deepcopy(reg)
self.day_reg = deepcopy(reg)
def fit(self, X, y):
self.night_reg.fit(X[X['night'] == 1], y[X['night'] == 1])
self.day_reg.fit(X[X['night'] == 0], y[X['night'] == 0])
def predict(self, X):
pred = []
pred = np.append(pred, self.night_reg.predict(X[X['night'] == 1]))
pred = np.append(pred, self.day_reg.predict(X[X['night'] == 0]))
idx = X[X['night'] == 1].index.tolist() + X[X['night'] == 0].index.tolist()
return np.intp([x for (_, x) in sorted(zip(idx, pred))])
|
flexible
|
{
"blob_id": "72d41f939a586fbd8459927983d9d62a96b650e2",
"index": 1844,
"step-1": "<mask token>\n\n\nclass IntegratedRegressor:\n <mask token>\n <mask token>\n\n def fit(self, X, y):\n self.regs = []\n for target in y.columns:\n tmp = deepcopy(self.reg)\n if self.predict_log:\n tmp.fit(X, np.log1p(y[target]))\n else:\n tmp.fit(X, y[target])\n self.regs.append(tmp)\n <mask token>\n\n\nclass DayNightRegressor:\n\n def __init__(self, reg):\n self.night_reg = deepcopy(reg)\n self.day_reg = deepcopy(reg)\n\n def fit(self, X, y):\n self.night_reg.fit(X[X['night'] == 1], y[X['night'] == 1])\n self.day_reg.fit(X[X['night'] == 0], y[X['night'] == 0])\n\n def predict(self, X):\n pred = []\n pred = np.append(pred, self.night_reg.predict(X[X['night'] == 1]))\n pred = np.append(pred, self.day_reg.predict(X[X['night'] == 0]))\n idx = X[X['night'] == 1].index.tolist() + X[X['night'] == 0\n ].index.tolist()\n return np.intp([x for _, x in sorted(zip(idx, pred))])\n",
"step-2": "<mask token>\n\n\nclass IntegratedRegressor:\n <mask token>\n\n def __init__(self, reg, predict_log=True):\n self.reg = reg\n self.predict_log = predict_log\n\n def fit(self, X, y):\n self.regs = []\n for target in y.columns:\n tmp = deepcopy(self.reg)\n if self.predict_log:\n tmp.fit(X, np.log1p(y[target]))\n else:\n tmp.fit(X, y[target])\n self.regs.append(tmp)\n <mask token>\n\n\nclass DayNightRegressor:\n\n def __init__(self, reg):\n self.night_reg = deepcopy(reg)\n self.day_reg = deepcopy(reg)\n\n def fit(self, X, y):\n self.night_reg.fit(X[X['night'] == 1], y[X['night'] == 1])\n self.day_reg.fit(X[X['night'] == 0], y[X['night'] == 0])\n\n def predict(self, X):\n pred = []\n pred = np.append(pred, self.night_reg.predict(X[X['night'] == 1]))\n pred = np.append(pred, self.day_reg.predict(X[X['night'] == 0]))\n idx = X[X['night'] == 1].index.tolist() + X[X['night'] == 0\n ].index.tolist()\n return np.intp([x for _, x in sorted(zip(idx, pred))])\n",
"step-3": "<mask token>\n\n\nclass IntegratedRegressor:\n <mask token>\n\n def __init__(self, reg, predict_log=True):\n self.reg = reg\n self.predict_log = predict_log\n\n def fit(self, X, y):\n self.regs = []\n for target in y.columns:\n tmp = deepcopy(self.reg)\n if self.predict_log:\n tmp.fit(X, np.log1p(y[target]))\n else:\n tmp.fit(X, y[target])\n self.regs.append(tmp)\n\n def predict(self, X):\n pred = np.zeros((X.shape[0],))\n for reg in self.regs:\n if self.predict_log:\n pred += np.expm1(reg.predict(X))\n else:\n pred += reg.predict(X)\n return np.intp(pred.round())\n\n\nclass DayNightRegressor:\n\n def __init__(self, reg):\n self.night_reg = deepcopy(reg)\n self.day_reg = deepcopy(reg)\n\n def fit(self, X, y):\n self.night_reg.fit(X[X['night'] == 1], y[X['night'] == 1])\n self.day_reg.fit(X[X['night'] == 0], y[X['night'] == 0])\n\n def predict(self, X):\n pred = []\n pred = np.append(pred, self.night_reg.predict(X[X['night'] == 1]))\n pred = np.append(pred, self.day_reg.predict(X[X['night'] == 0]))\n idx = X[X['night'] == 1].index.tolist() + X[X['night'] == 0\n ].index.tolist()\n return np.intp([x for _, x in sorted(zip(idx, pred))])\n",
"step-4": "from __future__ import division, print_function\nimport numpy as np\nfrom copy import deepcopy\n\n\nclass IntegratedRegressor:\n regs = []\n\n def __init__(self, reg, predict_log=True):\n self.reg = reg\n self.predict_log = predict_log\n\n def fit(self, X, y):\n self.regs = []\n for target in y.columns:\n tmp = deepcopy(self.reg)\n if self.predict_log:\n tmp.fit(X, np.log1p(y[target]))\n else:\n tmp.fit(X, y[target])\n self.regs.append(tmp)\n\n def predict(self, X):\n pred = np.zeros((X.shape[0],))\n for reg in self.regs:\n if self.predict_log:\n pred += np.expm1(reg.predict(X))\n else:\n pred += reg.predict(X)\n return np.intp(pred.round())\n\n\nclass DayNightRegressor:\n\n def __init__(self, reg):\n self.night_reg = deepcopy(reg)\n self.day_reg = deepcopy(reg)\n\n def fit(self, X, y):\n self.night_reg.fit(X[X['night'] == 1], y[X['night'] == 1])\n self.day_reg.fit(X[X['night'] == 0], y[X['night'] == 0])\n\n def predict(self, X):\n pred = []\n pred = np.append(pred, self.night_reg.predict(X[X['night'] == 1]))\n pred = np.append(pred, self.day_reg.predict(X[X['night'] == 0]))\n idx = X[X['night'] == 1].index.tolist() + X[X['night'] == 0\n ].index.tolist()\n return np.intp([x for _, x in sorted(zip(idx, pred))])\n",
"step-5": "from __future__ import division, print_function\n\nimport numpy as np\nfrom copy import deepcopy\n\n\nclass IntegratedRegressor():\n regs = []\n\n def __init__(self, reg, predict_log=True):\n self.reg = reg\n self.predict_log = predict_log\n\n def fit(self, X, y):\n self.regs = []\n for target in y.columns:\n tmp = deepcopy(self.reg)\n if self.predict_log:\n tmp.fit(X, np.log1p(y[target]))\n else:\n tmp.fit(X, y[target])\n self.regs.append(tmp)\n\n def predict(self, X):\n pred = np.zeros((X.shape[0],))\n for reg in self.regs:\n if self.predict_log:\n pred += np.expm1(reg.predict(X))\n else:\n pred += reg.predict(X)\n return np.intp(pred.round())\n\n\nclass DayNightRegressor():\n def __init__(self, reg):\n self.night_reg = deepcopy(reg)\n self.day_reg = deepcopy(reg)\n\n def fit(self, X, y):\n self.night_reg.fit(X[X['night'] == 1], y[X['night'] == 1])\n self.day_reg.fit(X[X['night'] == 0], y[X['night'] == 0])\n\n def predict(self, X):\n pred = []\n pred = np.append(pred, self.night_reg.predict(X[X['night'] == 1]))\n pred = np.append(pred, self.day_reg.predict(X[X['night'] == 0]))\n idx = X[X['night'] == 1].index.tolist() + X[X['night'] == 0].index.tolist()\n return np.intp([x for (_, x) in sorted(zip(idx, pred))])\n",
"step-ids": [
6,
7,
8,
10,
11
]
}
|
[
6,
7,
8,
10,
11
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def quantity_posts():
try:
data = shelve.open('data')
except Exception:
print(Exception)
else:
for key, value in sorted(data.items()):
print(key, ': \t', value, '\n')
finally:
data.close()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def quantity_posts():
try:
data = shelve.open('data')
except Exception:
print(Exception)
else:
for key, value in sorted(data.items()):
print(key, ': \t', value, '\n')
finally:
data.close()
if __name__ == '__main__':
print('begin')
quantity_posts()
print('end')
<|reserved_special_token_1|>
import shelve
def quantity_posts():
try:
data = shelve.open('data')
except Exception:
print(Exception)
else:
for key, value in sorted(data.items()):
print(key, ': \t', value, '\n')
finally:
data.close()
if __name__ == '__main__':
print('begin')
quantity_posts()
print('end')
<|reserved_special_token_1|>
import shelve
def quantity_posts():
try:
data = shelve.open('data')
except Exception:
print(Exception)
else:
for key, value in sorted(data.items()):
print(key, ': \t', value, '\n')
finally:
data.close()
if __name__ == "__main__":
print('begin')
quantity_posts()
print('end')
|
flexible
|
{
"blob_id": "41c44b32ce3329cbba5b9b336c4266bb20de31f0",
"index": 5151,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef quantity_posts():\n try:\n data = shelve.open('data')\n except Exception:\n print(Exception)\n else:\n for key, value in sorted(data.items()):\n print(key, ': \\t', value, '\\n')\n finally:\n data.close()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef quantity_posts():\n try:\n data = shelve.open('data')\n except Exception:\n print(Exception)\n else:\n for key, value in sorted(data.items()):\n print(key, ': \\t', value, '\\n')\n finally:\n data.close()\n\n\nif __name__ == '__main__':\n print('begin')\n quantity_posts()\n print('end')\n",
"step-4": "import shelve\n\n\ndef quantity_posts():\n try:\n data = shelve.open('data')\n except Exception:\n print(Exception)\n else:\n for key, value in sorted(data.items()):\n print(key, ': \\t', value, '\\n')\n finally:\n data.close()\n\n\nif __name__ == '__main__':\n print('begin')\n quantity_posts()\n print('end')\n",
"step-5": "import shelve\r\n\r\ndef quantity_posts():\r\n try:\r\n data = shelve.open('data')\r\n except Exception:\r\n print(Exception)\r\n else:\r\n for key, value in sorted(data.items()):\r\n print(key, ': \\t', value, '\\n')\r\n finally:\r\n data.close()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n print('begin')\r\n quantity_posts()\r\n print('end')\r\n \r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from StockDatabase import StockDatabase
from RNNinner import RecurrentAnalyzer
import torch
import matplotlib.pyplot as plt
import numpy as np
database = StockDatabase()
database.read_data()
prices = torch.tensor(database.normalize(database.get_stock_prices('AAPL',
length=2000)))
print(prices.shape)
model = RecurrentAnalyzer(100, 10).to('cpu')
model.load_state_dict(torch.load('rnn_inner'))
model.init_hidden()
model.eval()
with torch.no_grad():
preds = list(model(prices[:50, None, None])[:, 0])
for i in range(len(prices) - 50):
preds.append(model.forward_step(preds[-1][None, ...])[0])
print(preds)
print(prices[1:])
plt.plot(np.arange(len(prices) - 1), prices[1:])
plt.plot(np.arange(len(preds)), preds)
plt.show()
|
normal
|
{
"blob_id": "8abfb6a9ca3a7a909a1e8125e8c03e29b2bacda8",
"index": 109,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ndatabase.read_data()\n<mask token>\nprint(prices.shape)\n<mask token>\nmodel.load_state_dict(torch.load('rnn_inner'))\nmodel.init_hidden()\nmodel.eval()\nwith torch.no_grad():\n preds = list(model(prices[:50, None, None])[:, 0])\n for i in range(len(prices) - 50):\n preds.append(model.forward_step(preds[-1][None, ...])[0])\n print(preds)\n print(prices[1:])\n plt.plot(np.arange(len(prices) - 1), prices[1:])\n plt.plot(np.arange(len(preds)), preds)\n plt.show()\n",
"step-3": "<mask token>\ndatabase = StockDatabase()\ndatabase.read_data()\nprices = torch.tensor(database.normalize(database.get_stock_prices('AAPL',\n length=2000)))\nprint(prices.shape)\nmodel = RecurrentAnalyzer(100, 10).to('cpu')\nmodel.load_state_dict(torch.load('rnn_inner'))\nmodel.init_hidden()\nmodel.eval()\nwith torch.no_grad():\n preds = list(model(prices[:50, None, None])[:, 0])\n for i in range(len(prices) - 50):\n preds.append(model.forward_step(preds[-1][None, ...])[0])\n print(preds)\n print(prices[1:])\n plt.plot(np.arange(len(prices) - 1), prices[1:])\n plt.plot(np.arange(len(preds)), preds)\n plt.show()\n",
"step-4": "from StockDatabase import StockDatabase\nfrom RNNinner import RecurrentAnalyzer\nimport torch\nimport matplotlib.pyplot as plt\nimport numpy as np\ndatabase = StockDatabase()\ndatabase.read_data()\nprices = torch.tensor(database.normalize(database.get_stock_prices('AAPL',\n length=2000)))\nprint(prices.shape)\nmodel = RecurrentAnalyzer(100, 10).to('cpu')\nmodel.load_state_dict(torch.load('rnn_inner'))\nmodel.init_hidden()\nmodel.eval()\nwith torch.no_grad():\n preds = list(model(prices[:50, None, None])[:, 0])\n for i in range(len(prices) - 50):\n preds.append(model.forward_step(preds[-1][None, ...])[0])\n print(preds)\n print(prices[1:])\n plt.plot(np.arange(len(prices) - 1), prices[1:])\n plt.plot(np.arange(len(preds)), preds)\n plt.show()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class Column(Expr):
"""
Inherited from :class:`~expr.Expr`.
Representation of a Python object :class:`~col.Column`.
"""
def __init__(self, name: str, dataframe: 'DataFrame') ->None:
""":meta private:"""
super().__init__(dataframe=dataframe)
self._name = name
self._type: Optional[DataType] = None
def _serialize(self, db: Optional[Database]=None) ->str:
assert self._dataframe is not None
return (f'{self._dataframe._name}."{self._name}"' if self._name !=
'*' else f'{self._dataframe._name}.*')
def __getitem__(self, field_name: str) ->ColumnField:
"""
Get access to a field of the current column.
Args:
field_name: str
Returns:
Field of the column with the specified name.
"""
return ColumnField(self, field_name=field_name)
def _bind(self, dataframe: Optional['DataFrame']=None, db: Optional[
Database]=None):
""":meta private:"""
c = Column(self._name, self._dataframe)
c._db = (db if db is not None else dataframe._db if dataframe is not
None else self._db)
assert c._db is not None
return c
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ColumnField(Expr):
<|reserved_special_token_0|>
def __init__(self, column: 'Column', field_name: str) ->None:
""":meta private:"""
self._field_name = field_name
self._column = column
super().__init__(column._dataframe)
def _serialize(self, db: Optional[Database]=None) ->str:
return (f'({self._column._serialize(db=db)})."{self._field_name}"' if
self._field_name != '*' else
f'({self._column._serialize(db=db)}).*')
class Column(Expr):
"""
Inherited from :class:`~expr.Expr`.
Representation of a Python object :class:`~col.Column`.
"""
def __init__(self, name: str, dataframe: 'DataFrame') ->None:
""":meta private:"""
super().__init__(dataframe=dataframe)
self._name = name
self._type: Optional[DataType] = None
def _serialize(self, db: Optional[Database]=None) ->str:
assert self._dataframe is not None
return (f'{self._dataframe._name}."{self._name}"' if self._name !=
'*' else f'{self._dataframe._name}.*')
def __getitem__(self, field_name: str) ->ColumnField:
"""
Get access to a field of the current column.
Args:
field_name: str
Returns:
Field of the column with the specified name.
"""
return ColumnField(self, field_name=field_name)
def _bind(self, dataframe: Optional['DataFrame']=None, db: Optional[
Database]=None):
""":meta private:"""
c = Column(self._name, self._dataframe)
c._db = (db if db is not None else dataframe._db if dataframe is not
None else self._db)
assert c._db is not None
return c
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ColumnField(Expr):
"""
Inherited from :class:`~expr.Expr`.
Representation of a field of a :class:`~col.Column` of composite type. This
type allows to access to the fields in a dict-like manner.
"""
def __init__(self, column: 'Column', field_name: str) ->None:
""":meta private:"""
self._field_name = field_name
self._column = column
super().__init__(column._dataframe)
def _serialize(self, db: Optional[Database]=None) ->str:
return (f'({self._column._serialize(db=db)})."{self._field_name}"' if
self._field_name != '*' else
f'({self._column._serialize(db=db)}).*')
class Column(Expr):
"""
Inherited from :class:`~expr.Expr`.
Representation of a Python object :class:`~col.Column`.
"""
def __init__(self, name: str, dataframe: 'DataFrame') ->None:
""":meta private:"""
super().__init__(dataframe=dataframe)
self._name = name
self._type: Optional[DataType] = None
def _serialize(self, db: Optional[Database]=None) ->str:
assert self._dataframe is not None
return (f'{self._dataframe._name}."{self._name}"' if self._name !=
'*' else f'{self._dataframe._name}.*')
def __getitem__(self, field_name: str) ->ColumnField:
"""
Get access to a field of the current column.
Args:
field_name: str
Returns:
Field of the column with the specified name.
"""
return ColumnField(self, field_name=field_name)
def _bind(self, dataframe: Optional['DataFrame']=None, db: Optional[
Database]=None):
""":meta private:"""
c = Column(self._name, self._dataframe)
c._db = (db if db is not None else dataframe._db if dataframe is not
None else self._db)
assert c._db is not None
return c
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from typing import TYPE_CHECKING, Optional
from greenplumpython.db import Database
from greenplumpython.expr import Expr
from greenplumpython.type import DataType
if TYPE_CHECKING:
from greenplumpython.dataframe import DataFrame
class ColumnField(Expr):
"""
Inherited from :class:`~expr.Expr`.
Representation of a field of a :class:`~col.Column` of composite type. This
type allows to access to the fields in a dict-like manner.
"""
def __init__(self, column: 'Column', field_name: str) ->None:
""":meta private:"""
self._field_name = field_name
self._column = column
super().__init__(column._dataframe)
def _serialize(self, db: Optional[Database]=None) ->str:
return (f'({self._column._serialize(db=db)})."{self._field_name}"' if
self._field_name != '*' else
f'({self._column._serialize(db=db)}).*')
class Column(Expr):
"""
Inherited from :class:`~expr.Expr`.
Representation of a Python object :class:`~col.Column`.
"""
def __init__(self, name: str, dataframe: 'DataFrame') ->None:
""":meta private:"""
super().__init__(dataframe=dataframe)
self._name = name
self._type: Optional[DataType] = None
def _serialize(self, db: Optional[Database]=None) ->str:
assert self._dataframe is not None
return (f'{self._dataframe._name}."{self._name}"' if self._name !=
'*' else f'{self._dataframe._name}.*')
def __getitem__(self, field_name: str) ->ColumnField:
"""
Get access to a field of the current column.
Args:
field_name: str
Returns:
Field of the column with the specified name.
"""
return ColumnField(self, field_name=field_name)
def _bind(self, dataframe: Optional['DataFrame']=None, db: Optional[
Database]=None):
""":meta private:"""
c = Column(self._name, self._dataframe)
c._db = (db if db is not None else dataframe._db if dataframe is not
None else self._db)
assert c._db is not None
return c
<|reserved_special_token_1|>
"""Utilties to access a column and one field of a column if the column is composite."""
from typing import TYPE_CHECKING, Optional
from greenplumpython.db import Database
from greenplumpython.expr import Expr
from greenplumpython.type import DataType
if TYPE_CHECKING:
from greenplumpython.dataframe import DataFrame
class ColumnField(Expr):
"""
Inherited from :class:`~expr.Expr`.
Representation of a field of a :class:`~col.Column` of composite type. This
type allows to access to the fields in a dict-like manner.
"""
def __init__(
self,
column: "Column",
field_name: str,
) -> None:
# noqa
""":meta private:"""
self._field_name = field_name
self._column = column
super().__init__(column._dataframe)
def _serialize(self, db: Optional[Database] = None) -> str:
return (
f'({self._column._serialize(db=db)})."{self._field_name}"'
if self._field_name != "*"
else f"({self._column._serialize(db=db)}).*"
)
class Column(Expr):
"""
Inherited from :class:`~expr.Expr`.
Representation of a Python object :class:`~col.Column`.
"""
def __init__(self, name: str, dataframe: "DataFrame") -> None:
# noqa: D400
""":meta private:"""
super().__init__(dataframe=dataframe)
self._name = name
self._type: Optional[DataType] = None # TODO: Add type inference
def _serialize(self, db: Optional[Database] = None) -> str:
assert self._dataframe is not None
# Quote both dataframe name and column name to avoid SQL injection.
return (
f'{self._dataframe._name}."{self._name}"'
if self._name != "*"
else f"{self._dataframe._name}.*"
)
def __getitem__(self, field_name: str) -> ColumnField:
"""
Get access to a field of the current column.
Args:
field_name: str
Returns:
Field of the column with the specified name.
"""
return ColumnField(self, field_name=field_name)
def _bind(
self,
dataframe: Optional["DataFrame"] = None,
db: Optional[Database] = None,
):
# noqa D400
""":meta private:"""
c = Column(
self._name,
self._dataframe,
)
c._db = db if db is not None else dataframe._db if dataframe is not None else self._db
assert c._db is not None
return c
|
flexible
|
{
"blob_id": "a52edeec62a6849bda7e5a5481fb6e3d7d9a4c6a",
"index": 8571,
"step-1": "<mask token>\n\n\nclass Column(Expr):\n \"\"\"\n Inherited from :class:`~expr.Expr`.\n\n Representation of a Python object :class:`~col.Column`.\n \"\"\"\n\n def __init__(self, name: str, dataframe: 'DataFrame') ->None:\n \"\"\":meta private:\"\"\"\n super().__init__(dataframe=dataframe)\n self._name = name\n self._type: Optional[DataType] = None\n\n def _serialize(self, db: Optional[Database]=None) ->str:\n assert self._dataframe is not None\n return (f'{self._dataframe._name}.\"{self._name}\"' if self._name !=\n '*' else f'{self._dataframe._name}.*')\n\n def __getitem__(self, field_name: str) ->ColumnField:\n \"\"\"\n Get access to a field of the current column.\n\n Args:\n field_name: str\n\n Returns:\n Field of the column with the specified name.\n \"\"\"\n return ColumnField(self, field_name=field_name)\n\n def _bind(self, dataframe: Optional['DataFrame']=None, db: Optional[\n Database]=None):\n \"\"\":meta private:\"\"\"\n c = Column(self._name, self._dataframe)\n c._db = (db if db is not None else dataframe._db if dataframe is not\n None else self._db)\n assert c._db is not None\n return c\n",
"step-2": "<mask token>\n\n\nclass ColumnField(Expr):\n <mask token>\n\n def __init__(self, column: 'Column', field_name: str) ->None:\n \"\"\":meta private:\"\"\"\n self._field_name = field_name\n self._column = column\n super().__init__(column._dataframe)\n\n def _serialize(self, db: Optional[Database]=None) ->str:\n return (f'({self._column._serialize(db=db)}).\"{self._field_name}\"' if\n self._field_name != '*' else\n f'({self._column._serialize(db=db)}).*')\n\n\nclass Column(Expr):\n \"\"\"\n Inherited from :class:`~expr.Expr`.\n\n Representation of a Python object :class:`~col.Column`.\n \"\"\"\n\n def __init__(self, name: str, dataframe: 'DataFrame') ->None:\n \"\"\":meta private:\"\"\"\n super().__init__(dataframe=dataframe)\n self._name = name\n self._type: Optional[DataType] = None\n\n def _serialize(self, db: Optional[Database]=None) ->str:\n assert self._dataframe is not None\n return (f'{self._dataframe._name}.\"{self._name}\"' if self._name !=\n '*' else f'{self._dataframe._name}.*')\n\n def __getitem__(self, field_name: str) ->ColumnField:\n \"\"\"\n Get access to a field of the current column.\n\n Args:\n field_name: str\n\n Returns:\n Field of the column with the specified name.\n \"\"\"\n return ColumnField(self, field_name=field_name)\n\n def _bind(self, dataframe: Optional['DataFrame']=None, db: Optional[\n Database]=None):\n \"\"\":meta private:\"\"\"\n c = Column(self._name, self._dataframe)\n c._db = (db if db is not None else dataframe._db if dataframe is not\n None else self._db)\n assert c._db is not None\n return c\n",
"step-3": "<mask token>\n\n\nclass ColumnField(Expr):\n \"\"\"\n Inherited from :class:`~expr.Expr`.\n\n Representation of a field of a :class:`~col.Column` of composite type. This\n type allows to access to the fields in a dict-like manner.\n \"\"\"\n\n def __init__(self, column: 'Column', field_name: str) ->None:\n \"\"\":meta private:\"\"\"\n self._field_name = field_name\n self._column = column\n super().__init__(column._dataframe)\n\n def _serialize(self, db: Optional[Database]=None) ->str:\n return (f'({self._column._serialize(db=db)}).\"{self._field_name}\"' if\n self._field_name != '*' else\n f'({self._column._serialize(db=db)}).*')\n\n\nclass Column(Expr):\n \"\"\"\n Inherited from :class:`~expr.Expr`.\n\n Representation of a Python object :class:`~col.Column`.\n \"\"\"\n\n def __init__(self, name: str, dataframe: 'DataFrame') ->None:\n \"\"\":meta private:\"\"\"\n super().__init__(dataframe=dataframe)\n self._name = name\n self._type: Optional[DataType] = None\n\n def _serialize(self, db: Optional[Database]=None) ->str:\n assert self._dataframe is not None\n return (f'{self._dataframe._name}.\"{self._name}\"' if self._name !=\n '*' else f'{self._dataframe._name}.*')\n\n def __getitem__(self, field_name: str) ->ColumnField:\n \"\"\"\n Get access to a field of the current column.\n\n Args:\n field_name: str\n\n Returns:\n Field of the column with the specified name.\n \"\"\"\n return ColumnField(self, field_name=field_name)\n\n def _bind(self, dataframe: Optional['DataFrame']=None, db: Optional[\n Database]=None):\n \"\"\":meta private:\"\"\"\n c = Column(self._name, self._dataframe)\n c._db = (db if db is not None else dataframe._db if dataframe is not\n None else self._db)\n assert c._db is not None\n return c\n",
"step-4": "<mask token>\nfrom typing import TYPE_CHECKING, Optional\nfrom greenplumpython.db import Database\nfrom greenplumpython.expr import Expr\nfrom greenplumpython.type import DataType\nif TYPE_CHECKING:\n from greenplumpython.dataframe import DataFrame\n\n\nclass ColumnField(Expr):\n \"\"\"\n Inherited from :class:`~expr.Expr`.\n\n Representation of a field of a :class:`~col.Column` of composite type. This\n type allows to access to the fields in a dict-like manner.\n \"\"\"\n\n def __init__(self, column: 'Column', field_name: str) ->None:\n \"\"\":meta private:\"\"\"\n self._field_name = field_name\n self._column = column\n super().__init__(column._dataframe)\n\n def _serialize(self, db: Optional[Database]=None) ->str:\n return (f'({self._column._serialize(db=db)}).\"{self._field_name}\"' if\n self._field_name != '*' else\n f'({self._column._serialize(db=db)}).*')\n\n\nclass Column(Expr):\n \"\"\"\n Inherited from :class:`~expr.Expr`.\n\n Representation of a Python object :class:`~col.Column`.\n \"\"\"\n\n def __init__(self, name: str, dataframe: 'DataFrame') ->None:\n \"\"\":meta private:\"\"\"\n super().__init__(dataframe=dataframe)\n self._name = name\n self._type: Optional[DataType] = None\n\n def _serialize(self, db: Optional[Database]=None) ->str:\n assert self._dataframe is not None\n return (f'{self._dataframe._name}.\"{self._name}\"' if self._name !=\n '*' else f'{self._dataframe._name}.*')\n\n def __getitem__(self, field_name: str) ->ColumnField:\n \"\"\"\n Get access to a field of the current column.\n\n Args:\n field_name: str\n\n Returns:\n Field of the column with the specified name.\n \"\"\"\n return ColumnField(self, field_name=field_name)\n\n def _bind(self, dataframe: Optional['DataFrame']=None, db: Optional[\n Database]=None):\n \"\"\":meta private:\"\"\"\n c = Column(self._name, self._dataframe)\n c._db = (db if db is not None else dataframe._db if dataframe is not\n None else self._db)\n assert c._db is not None\n return c\n",
"step-5": "\"\"\"Utilties to access a column and one field of a column if the column is composite.\"\"\"\nfrom typing import TYPE_CHECKING, Optional\n\nfrom greenplumpython.db import Database\nfrom greenplumpython.expr import Expr\nfrom greenplumpython.type import DataType\n\nif TYPE_CHECKING:\n from greenplumpython.dataframe import DataFrame\n\n\nclass ColumnField(Expr):\n \"\"\"\n Inherited from :class:`~expr.Expr`.\n\n Representation of a field of a :class:`~col.Column` of composite type. This\n type allows to access to the fields in a dict-like manner.\n \"\"\"\n\n def __init__(\n self,\n column: \"Column\",\n field_name: str,\n ) -> None:\n # noqa\n \"\"\":meta private:\"\"\"\n self._field_name = field_name\n self._column = column\n super().__init__(column._dataframe)\n\n def _serialize(self, db: Optional[Database] = None) -> str:\n return (\n f'({self._column._serialize(db=db)}).\"{self._field_name}\"'\n if self._field_name != \"*\"\n else f\"({self._column._serialize(db=db)}).*\"\n )\n\n\nclass Column(Expr):\n \"\"\"\n Inherited from :class:`~expr.Expr`.\n\n Representation of a Python object :class:`~col.Column`.\n \"\"\"\n\n def __init__(self, name: str, dataframe: \"DataFrame\") -> None:\n # noqa: D400\n \"\"\":meta private:\"\"\"\n super().__init__(dataframe=dataframe)\n self._name = name\n self._type: Optional[DataType] = None # TODO: Add type inference\n\n def _serialize(self, db: Optional[Database] = None) -> str:\n assert self._dataframe is not None\n # Quote both dataframe name and column name to avoid SQL injection.\n return (\n f'{self._dataframe._name}.\"{self._name}\"'\n if self._name != \"*\"\n else f\"{self._dataframe._name}.*\"\n )\n\n def __getitem__(self, field_name: str) -> ColumnField:\n \"\"\"\n Get access to a field of the current column.\n\n Args:\n field_name: str\n\n Returns:\n Field of the column with the specified name.\n \"\"\"\n return ColumnField(self, field_name=field_name)\n\n def _bind(\n self,\n dataframe: Optional[\"DataFrame\"] = None,\n db: Optional[Database] = None,\n ):\n # noqa D400\n \"\"\":meta private:\"\"\"\n c = Column(\n self._name,\n self._dataframe,\n )\n c._db = db if db is not None else dataframe._db if dataframe is not None else self._db\n assert c._db is not None\n return c\n",
"step-ids": [
6,
9,
10,
12,
13
]
}
|
[
6,
9,
10,
12,
13
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print('my_list consists of: ', my_list)
print()
print('Operations similar to strings')
print('Concatenation')
print("my_list + ['bill'] equals: ", my_list + ['bill'])
print()
print('Repeat')
print('my_list * 3 equals: ', my_list * 3)
print()
print('Indexing')
print('1st element is my_list[0]: ', my_list[0])
print('last element is my_list[-1]: ', my_list[-1])
print()
print('Slicing')
print('First two elements are my_list[0:2]: ', my_list[0:2])
print('Last two elements are my_list[-2:]: ', my_list[-2:])
print('Slice assignment, my_list[:2]=[]: ')
<|reserved_special_token_0|>
print('my_list is: ', my_list)
print()
print('Length')
print('Length is len(my_list): ', len(my_list))
print()
print('New stuff, which modifies the list (not for strings)')
print('Append element to the end, my_list.append(True): ')
my_list.append(True)
print('my_list is: ', my_list)
print('Append list into the list, my_list.append([5,6]): ')
my_list.append([5, 6])
print('my_list is: ', my_list)
print()
print('Extend, can append all elements in a list')
print("Extend single element to the end, my_list.extend('z'): ")
my_list.extend('z')
print('my_list is: ', my_list)
print('Extend a list of elements, my_list.extend([5,6,7]): ')
my_list.extend([5, 6, 7])
print('my_list is: ', my_list)
print()
print('Delete elements')
print('Delete the first element, del my_list[0]: ')
del my_list[0]
print('my_list is: ', my_list)
print('Delete last 4 elements, del my_list[-4:]: ')
del my_list[-4:]
print('my_list is: ', my_list)
<|reserved_special_token_1|>
my_list = [1, 'a', 3.14]
print('my_list consists of: ', my_list)
print()
print('Operations similar to strings')
print('Concatenation')
print("my_list + ['bill'] equals: ", my_list + ['bill'])
print()
print('Repeat')
print('my_list * 3 equals: ', my_list * 3)
print()
print('Indexing')
print('1st element is my_list[0]: ', my_list[0])
print('last element is my_list[-1]: ', my_list[-1])
print()
print('Slicing')
print('First two elements are my_list[0:2]: ', my_list[0:2])
print('Last two elements are my_list[-2:]: ', my_list[-2:])
print('Slice assignment, my_list[:2]=[]: ')
my_list[:2] = []
print('my_list is: ', my_list)
print()
print('Length')
print('Length is len(my_list): ', len(my_list))
print()
print('New stuff, which modifies the list (not for strings)')
print('Append element to the end, my_list.append(True): ')
my_list.append(True)
print('my_list is: ', my_list)
print('Append list into the list, my_list.append([5,6]): ')
my_list.append([5, 6])
print('my_list is: ', my_list)
print()
print('Extend, can append all elements in a list')
print("Extend single element to the end, my_list.extend('z'): ")
my_list.extend('z')
print('my_list is: ', my_list)
print('Extend a list of elements, my_list.extend([5,6,7]): ')
my_list.extend([5, 6, 7])
print('my_list is: ', my_list)
print()
print('Delete elements')
print('Delete the first element, del my_list[0]: ')
del my_list[0]
print('my_list is: ', my_list)
print('Delete last 4 elements, del my_list[-4:]: ')
del my_list[-4:]
print('my_list is: ', my_list)
<|reserved_special_token_1|>
# wfp, 6/6
# simple list stuff
my_list = [1,'a',3.14]
print("my_list consists of: ",my_list)
print()
print("Operations similar to strings")
print("Concatenation")
print("my_list + ['bill'] equals: ", my_list + ["bill"])
print()
print("Repeat")
print("my_list * 3 equals: ", my_list * 3)
print()
print("Indexing")
print("1st element is my_list[0]: ",my_list[0])
print("last element is my_list[-1]: ", my_list[-1])
print()
print("Slicing")
print("First two elements are my_list[0:2]: ",my_list[0:2])
print("Last two elements are my_list[-2:]: ",my_list[-2:])
print("Slice assignment, my_list[:2]=[]: ")
my_list[:2] = []
print("my_list is: ",my_list)
print()
print("Length")
print("Length is len(my_list): ",len(my_list))
print()
print("New stuff, which modifies the list (not for strings)")
print("Append element to the end, my_list.append(True): ")
my_list.append(True)
print("my_list is: ",my_list)
print("Append list into the list, my_list.append([5,6]): ")
my_list.append([5,6])
print("my_list is: ",my_list)
print()
print("Extend, can append all elements in a list")
print("Extend single element to the end, my_list.extend('z'): ")
my_list.extend('z')
print("my_list is: ",my_list)
print("Extend a list of elements, my_list.extend([5,6,7]): ")
my_list.extend([5,6,7])
print("my_list is: ",my_list)
print()
print("Delete elements")
print("Delete the first element, del my_list[0]: ")
del(my_list[0])
print("my_list is: ",my_list)
print("Delete last 4 elements, del my_list[-4:]: ")
del(my_list[-4:])
print("my_list is: ",my_list)
|
flexible
|
{
"blob_id": "1c134cba779459b57f1f3c195aed37d105b94aef",
"index": 9935,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('my_list consists of: ', my_list)\nprint()\nprint('Operations similar to strings')\nprint('Concatenation')\nprint(\"my_list + ['bill'] equals: \", my_list + ['bill'])\nprint()\nprint('Repeat')\nprint('my_list * 3 equals: ', my_list * 3)\nprint()\nprint('Indexing')\nprint('1st element is my_list[0]: ', my_list[0])\nprint('last element is my_list[-1]: ', my_list[-1])\nprint()\nprint('Slicing')\nprint('First two elements are my_list[0:2]: ', my_list[0:2])\nprint('Last two elements are my_list[-2:]: ', my_list[-2:])\nprint('Slice assignment, my_list[:2]=[]: ')\n<mask token>\nprint('my_list is: ', my_list)\nprint()\nprint('Length')\nprint('Length is len(my_list): ', len(my_list))\nprint()\nprint('New stuff, which modifies the list (not for strings)')\nprint('Append element to the end, my_list.append(True): ')\nmy_list.append(True)\nprint('my_list is: ', my_list)\nprint('Append list into the list, my_list.append([5,6]): ')\nmy_list.append([5, 6])\nprint('my_list is: ', my_list)\nprint()\nprint('Extend, can append all elements in a list')\nprint(\"Extend single element to the end, my_list.extend('z'): \")\nmy_list.extend('z')\nprint('my_list is: ', my_list)\nprint('Extend a list of elements, my_list.extend([5,6,7]): ')\nmy_list.extend([5, 6, 7])\nprint('my_list is: ', my_list)\nprint()\nprint('Delete elements')\nprint('Delete the first element, del my_list[0]: ')\ndel my_list[0]\nprint('my_list is: ', my_list)\nprint('Delete last 4 elements, del my_list[-4:]: ')\ndel my_list[-4:]\nprint('my_list is: ', my_list)\n",
"step-3": "my_list = [1, 'a', 3.14]\nprint('my_list consists of: ', my_list)\nprint()\nprint('Operations similar to strings')\nprint('Concatenation')\nprint(\"my_list + ['bill'] equals: \", my_list + ['bill'])\nprint()\nprint('Repeat')\nprint('my_list * 3 equals: ', my_list * 3)\nprint()\nprint('Indexing')\nprint('1st element is my_list[0]: ', my_list[0])\nprint('last element is my_list[-1]: ', my_list[-1])\nprint()\nprint('Slicing')\nprint('First two elements are my_list[0:2]: ', my_list[0:2])\nprint('Last two elements are my_list[-2:]: ', my_list[-2:])\nprint('Slice assignment, my_list[:2]=[]: ')\nmy_list[:2] = []\nprint('my_list is: ', my_list)\nprint()\nprint('Length')\nprint('Length is len(my_list): ', len(my_list))\nprint()\nprint('New stuff, which modifies the list (not for strings)')\nprint('Append element to the end, my_list.append(True): ')\nmy_list.append(True)\nprint('my_list is: ', my_list)\nprint('Append list into the list, my_list.append([5,6]): ')\nmy_list.append([5, 6])\nprint('my_list is: ', my_list)\nprint()\nprint('Extend, can append all elements in a list')\nprint(\"Extend single element to the end, my_list.extend('z'): \")\nmy_list.extend('z')\nprint('my_list is: ', my_list)\nprint('Extend a list of elements, my_list.extend([5,6,7]): ')\nmy_list.extend([5, 6, 7])\nprint('my_list is: ', my_list)\nprint()\nprint('Delete elements')\nprint('Delete the first element, del my_list[0]: ')\ndel my_list[0]\nprint('my_list is: ', my_list)\nprint('Delete last 4 elements, del my_list[-4:]: ')\ndel my_list[-4:]\nprint('my_list is: ', my_list)\n",
"step-4": "# wfp, 6/6\r\n# simple list stuff\r\n\r\nmy_list = [1,'a',3.14]\r\nprint(\"my_list consists of: \",my_list)\r\nprint()\r\n\r\nprint(\"Operations similar to strings\")\r\nprint(\"Concatenation\")\r\nprint(\"my_list + ['bill'] equals: \", my_list + [\"bill\"])\r\nprint()\r\nprint(\"Repeat\")\r\nprint(\"my_list * 3 equals: \", my_list * 3)\r\nprint()\r\nprint(\"Indexing\")\r\nprint(\"1st element is my_list[0]: \",my_list[0])\r\nprint(\"last element is my_list[-1]: \", my_list[-1])\r\nprint()\r\nprint(\"Slicing\")\r\nprint(\"First two elements are my_list[0:2]: \",my_list[0:2])\r\nprint(\"Last two elements are my_list[-2:]: \",my_list[-2:])\r\nprint(\"Slice assignment, my_list[:2]=[]: \")\r\nmy_list[:2] = []\r\nprint(\"my_list is: \",my_list)\r\nprint()\r\nprint(\"Length\")\r\nprint(\"Length is len(my_list): \",len(my_list))\r\nprint()\r\nprint(\"New stuff, which modifies the list (not for strings)\")\r\nprint(\"Append element to the end, my_list.append(True): \")\r\nmy_list.append(True)\r\nprint(\"my_list is: \",my_list)\r\nprint(\"Append list into the list, my_list.append([5,6]): \")\r\nmy_list.append([5,6])\r\nprint(\"my_list is: \",my_list)\r\nprint()\r\nprint(\"Extend, can append all elements in a list\")\r\nprint(\"Extend single element to the end, my_list.extend('z'): \")\r\nmy_list.extend('z')\r\nprint(\"my_list is: \",my_list)\r\nprint(\"Extend a list of elements, my_list.extend([5,6,7]): \")\r\nmy_list.extend([5,6,7])\r\nprint(\"my_list is: \",my_list)\r\nprint()\r\nprint(\"Delete elements\")\r\nprint(\"Delete the first element, del my_list[0]: \")\r\ndel(my_list[0])\r\nprint(\"my_list is: \",my_list)\r\nprint(\"Delete last 4 elements, del my_list[-4:]: \")\r\ndel(my_list[-4:])\r\nprint(\"my_list is: \",my_list)\r\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from flask import Flask, render_template
from config import Config
from flask_bootstrap import Bootstrap
from config import config_options
from flask_login import LoginManager
from flask_wtf.csrf import CSRFProtect
from flask_sqlalchemy import SQLAlchemy
login_manager = LoginManager()
login_manager.session_protection = 'strong'
login_manager.loginview = 'auth.login'
bootstrap = Bootstrap()
csrf=CSRFProtect()
db = SQLAlchemy()
def create_app(config_name):
app= Flask(__name__)
#create app configs
app.config.from_object(Config)
app.config.from_object(config_options[config_name])
app.config['SECRET_KEY']='d686414d5eeb7d38df7e8c385b2c2c47'
#initializing
bootstrap.init_app(app)
csrf.init_app(app)
db.init_app(app)
#registering
from .main import main as main_blueprint
app.register_blueprint(main_blueprint)
from .auth import auth as auth_blueprint
app.register_blueprint(auth_blueprint, url_prefix = '/authenticate')
return app
|
normal
|
{
"blob_id": "2eecc852a6438db19e0ed55ba6cc6610d76c6ed0",
"index": 2207,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef create_app(config_name):\n app = Flask(__name__)\n app.config.from_object(Config)\n app.config.from_object(config_options[config_name])\n app.config['SECRET_KEY'] = 'd686414d5eeb7d38df7e8c385b2c2c47'\n bootstrap.init_app(app)\n csrf.init_app(app)\n db.init_app(app)\n from .main import main as main_blueprint\n app.register_blueprint(main_blueprint)\n from .auth import auth as auth_blueprint\n app.register_blueprint(auth_blueprint, url_prefix='/authenticate')\n return app\n",
"step-3": "<mask token>\nlogin_manager = LoginManager()\nlogin_manager.session_protection = 'strong'\nlogin_manager.loginview = 'auth.login'\nbootstrap = Bootstrap()\ncsrf = CSRFProtect()\ndb = SQLAlchemy()\n\n\ndef create_app(config_name):\n app = Flask(__name__)\n app.config.from_object(Config)\n app.config.from_object(config_options[config_name])\n app.config['SECRET_KEY'] = 'd686414d5eeb7d38df7e8c385b2c2c47'\n bootstrap.init_app(app)\n csrf.init_app(app)\n db.init_app(app)\n from .main import main as main_blueprint\n app.register_blueprint(main_blueprint)\n from .auth import auth as auth_blueprint\n app.register_blueprint(auth_blueprint, url_prefix='/authenticate')\n return app\n",
"step-4": "from flask import Flask, render_template\nfrom config import Config\nfrom flask_bootstrap import Bootstrap\nfrom config import config_options\nfrom flask_login import LoginManager\nfrom flask_wtf.csrf import CSRFProtect\nfrom flask_sqlalchemy import SQLAlchemy\nlogin_manager = LoginManager()\nlogin_manager.session_protection = 'strong'\nlogin_manager.loginview = 'auth.login'\nbootstrap = Bootstrap()\ncsrf = CSRFProtect()\ndb = SQLAlchemy()\n\n\ndef create_app(config_name):\n app = Flask(__name__)\n app.config.from_object(Config)\n app.config.from_object(config_options[config_name])\n app.config['SECRET_KEY'] = 'd686414d5eeb7d38df7e8c385b2c2c47'\n bootstrap.init_app(app)\n csrf.init_app(app)\n db.init_app(app)\n from .main import main as main_blueprint\n app.register_blueprint(main_blueprint)\n from .auth import auth as auth_blueprint\n app.register_blueprint(auth_blueprint, url_prefix='/authenticate')\n return app\n",
"step-5": "from flask import Flask, render_template\nfrom config import Config\nfrom flask_bootstrap import Bootstrap\nfrom config import config_options\nfrom flask_login import LoginManager\nfrom flask_wtf.csrf import CSRFProtect\nfrom flask_sqlalchemy import SQLAlchemy\n\nlogin_manager = LoginManager()\nlogin_manager.session_protection = 'strong'\nlogin_manager.loginview = 'auth.login'\n\nbootstrap = Bootstrap()\ncsrf=CSRFProtect()\ndb = SQLAlchemy()\n\ndef create_app(config_name):\n \n app= Flask(__name__)\n\n #create app configs\n app.config.from_object(Config)\n app.config.from_object(config_options[config_name])\n app.config['SECRET_KEY']='d686414d5eeb7d38df7e8c385b2c2c47'\n \n #initializing\n bootstrap.init_app(app)\n csrf.init_app(app)\n db.init_app(app)\n \n #registering\n from .main import main as main_blueprint\n app.register_blueprint(main_blueprint)\n \n from .auth import auth as auth_blueprint\n app.register_blueprint(auth_blueprint, url_prefix = '/authenticate')\n\n \n return app",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
from .mail_utils import send_mail
from .request_utils import get_host_url
|
flexible
|
{
"blob_id": "74b0ccb5193380ce596313d1ac3f898ff1fdd2f3",
"index": 930,
"step-1": "<mask token>\n",
"step-2": "from .mail_utils import send_mail\nfrom .request_utils import get_host_url\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
<|reserved_special_token_0|>
def phonenumbervalidate(phone):
pattern = '^[0][6-9][0-9]{9}$'
phone = str(phone)
if re.match(pattern, phone):
return True
return False
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def phonenumbervalidate(phone):
pattern = '^[0][6-9][0-9]{9}$'
phone = str(phone)
if re.match(pattern, phone):
return True
return False
<|reserved_special_token_0|>
def validaterollnumber(number):
number = str(number)
pattern = '^[1][5][2][u][1][A][0][1-9][0-6][0-9]'
if re.match(pattern, number):
return True
return False
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def phonenumbervalidate(phone):
pattern = '^[6-9][0-9]{9}$'
phone = str(phone)
if re.match(pattern, phone):
return True
return False
<|reserved_special_token_0|>
def phonenumbervalidate(phone):
pattern = '^[0][6-9][0-9]{9}$'
phone = str(phone)
if re.match(pattern, phone):
return True
return False
<|reserved_special_token_0|>
def validaterollnumber(number):
number = str(number)
pattern = '^[1][5][2][u][1][A][0][1-9][0-6][0-9]'
if re.match(pattern, number):
return True
return False
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def phonenumbervalidate(phone):
pattern = '^[6-9][0-9]{9}$'
phone = str(phone)
if re.match(pattern, phone):
return True
return False
print(phonenumbervalidate(998855451))
print(phonenumbervalidate(9955441))
<|reserved_special_token_0|>
def phonenumbervalidate(phone):
pattern = '^[0][6-9][0-9]{9}$'
phone = str(phone)
if re.match(pattern, phone):
return True
return False
print(phonenumbervalidate('09988554510'))
print(phonenumbervalidate(99554410))
<|reserved_special_token_0|>
def validaterollnumber(number):
number = str(number)
pattern = '^[1][5][2][u][1][A][0][1-9][0-6][0-9]'
if re.match(pattern, number):
return True
return False
print(phonenumbervalidate('152u1A0555'))
print(phonenumbervalidate('152u1A0485'))
<|reserved_special_token_1|>
#!/usr/bin/env python
# coding: utf-8
# In[5]:
import re
def phonenumbervalidate(phone):
pattern ='^[6-9][0-9]{9}$'
phone =str(phone)
if re.match(pattern,phone):
return True
return False
print(phonenumbervalidate(998855451))
print(phonenumbervalidate(9955441))
# In[10]:
import re
def phonenumbervalidate(phone):
pattern ='^[0][6-9][0-9]{9}$'
phone =str(phone)
if re.match(pattern,phone):
return True
return False
print(phonenumbervalidate("09988554510"))
print(phonenumbervalidate(99554410))
# In[11]:
import re
def validaterollnumber(number):
number =str(number)
pattern ="^[1][5][2][u][1][A][0][1-9][0-6][0-9]"
if re.match(pattern,number):
return True
return False
print(phonenumbervalidate("152u1A0555"))
print(phonenumbervalidate("152u1A0485"))
# In[ ]:
|
flexible
|
{
"blob_id": "6b2161379bdd27980d3a515cdf4719ab036845fe",
"index": 8217,
"step-1": "<mask token>\n\n\ndef phonenumbervalidate(phone):\n pattern = '^[0][6-9][0-9]{9}$'\n phone = str(phone)\n if re.match(pattern, phone):\n return True\n return False\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef phonenumbervalidate(phone):\n pattern = '^[0][6-9][0-9]{9}$'\n phone = str(phone)\n if re.match(pattern, phone):\n return True\n return False\n\n\n<mask token>\n\n\ndef validaterollnumber(number):\n number = str(number)\n pattern = '^[1][5][2][u][1][A][0][1-9][0-6][0-9]'\n if re.match(pattern, number):\n return True\n return False\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef phonenumbervalidate(phone):\n pattern = '^[6-9][0-9]{9}$'\n phone = str(phone)\n if re.match(pattern, phone):\n return True\n return False\n\n\n<mask token>\n\n\ndef phonenumbervalidate(phone):\n pattern = '^[0][6-9][0-9]{9}$'\n phone = str(phone)\n if re.match(pattern, phone):\n return True\n return False\n\n\n<mask token>\n\n\ndef validaterollnumber(number):\n number = str(number)\n pattern = '^[1][5][2][u][1][A][0][1-9][0-6][0-9]'\n if re.match(pattern, number):\n return True\n return False\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef phonenumbervalidate(phone):\n pattern = '^[6-9][0-9]{9}$'\n phone = str(phone)\n if re.match(pattern, phone):\n return True\n return False\n\n\nprint(phonenumbervalidate(998855451))\nprint(phonenumbervalidate(9955441))\n<mask token>\n\n\ndef phonenumbervalidate(phone):\n pattern = '^[0][6-9][0-9]{9}$'\n phone = str(phone)\n if re.match(pattern, phone):\n return True\n return False\n\n\nprint(phonenumbervalidate('09988554510'))\nprint(phonenumbervalidate(99554410))\n<mask token>\n\n\ndef validaterollnumber(number):\n number = str(number)\n pattern = '^[1][5][2][u][1][A][0][1-9][0-6][0-9]'\n if re.match(pattern, number):\n return True\n return False\n\n\nprint(phonenumbervalidate('152u1A0555'))\nprint(phonenumbervalidate('152u1A0485'))\n",
"step-5": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[5]:\n\n\nimport re\ndef phonenumbervalidate(phone):\n pattern ='^[6-9][0-9]{9}$'\n phone =str(phone)\n if re.match(pattern,phone):\n return True \n return False\nprint(phonenumbervalidate(998855451))\nprint(phonenumbervalidate(9955441))\n\n\n# In[10]:\n\n\nimport re\ndef phonenumbervalidate(phone):\n pattern ='^[0][6-9][0-9]{9}$'\n phone =str(phone)\n if re.match(pattern,phone):\n return True \n return False\nprint(phonenumbervalidate(\"09988554510\"))\nprint(phonenumbervalidate(99554410))\n\n\n# In[11]:\n\n\nimport re\ndef validaterollnumber(number):\n \n number =str(number)\n pattern =\"^[1][5][2][u][1][A][0][1-9][0-6][0-9]\" \n if re.match(pattern,number):\n return True \n return False\nprint(phonenumbervalidate(\"152u1A0555\"))\nprint(phonenumbervalidate(\"152u1A0485\"))\n\n\n# In[ ]:\n\n\n\n\n",
"step-ids": [
1,
2,
3,
4,
6
]
}
|
[
1,
2,
3,
4,
6
] |
a, b, c, y = 4.4, 0.0, 4.2, 3.0
print(c + a * y * y / b)
|
normal
|
{
"blob_id": "2c43ede960febfb273f1c70c75816848768db4e5",
"index": 6599,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(c + a * y * y / b)\n",
"step-3": "a, b, c, y = 4.4, 0.0, 4.2, 3.0\nprint(c + a * y * y / b)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
# This is a generated file, do not edit
from typing import List
import pydantic
from ..rmf_fleet_msgs.DockParameter import DockParameter
class Dock(pydantic.BaseModel):
fleet_name: str = "" # string
params: List[DockParameter] = [] # rmf_fleet_msgs/DockParameter
class Config:
orm_mode = True
# string fleet_name
# DockParameter[] params
|
normal
|
{
"blob_id": "62d0818395a6093ebf2c410aaadeb8a0250707ab",
"index": 3865,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Dock(pydantic.BaseModel):\n fleet_name: str = ''\n params: List[DockParameter] = []\n\n\n class Config:\n orm_mode = True\n",
"step-3": "from typing import List\nimport pydantic\nfrom ..rmf_fleet_msgs.DockParameter import DockParameter\n\n\nclass Dock(pydantic.BaseModel):\n fleet_name: str = ''\n params: List[DockParameter] = []\n\n\n class Config:\n orm_mode = True\n",
"step-4": "# This is a generated file, do not edit\n\nfrom typing import List\n\nimport pydantic\n\nfrom ..rmf_fleet_msgs.DockParameter import DockParameter\n\n\nclass Dock(pydantic.BaseModel):\n fleet_name: str = \"\" # string\n params: List[DockParameter] = [] # rmf_fleet_msgs/DockParameter\n\n class Config:\n orm_mode = True\n\n\n# string fleet_name\n# DockParameter[] params\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for grpc.framework.foundation.logging_pool."""
import threading
import unittest
from grpc.framework.foundation import logging_pool
_POOL_SIZE = 16
class _CallableObject(object):
def __init__(self):
self._lock = threading.Lock()
self._passed_values = []
def __call__(self, value):
with self._lock:
self._passed_values.append(value)
def passed_values(self):
with self._lock:
return tuple(self._passed_values)
class LoggingPoolTest(unittest.TestCase):
def testUpAndDown(self):
pool = logging_pool.pool(_POOL_SIZE)
pool.shutdown(wait=True)
with logging_pool.pool(_POOL_SIZE) as pool:
self.assertIsNotNone(pool)
def testTaskExecuted(self):
test_list = []
with logging_pool.pool(_POOL_SIZE) as pool:
pool.submit(lambda: test_list.append(object())).result()
self.assertTrue(test_list)
def testException(self):
with logging_pool.pool(_POOL_SIZE) as pool:
raised_exception = pool.submit(lambda: 1 / 0).exception()
self.assertIsNotNone(raised_exception)
def testCallableObjectExecuted(self):
callable_object = _CallableObject()
passed_object = object()
with logging_pool.pool(_POOL_SIZE) as pool:
future = pool.submit(callable_object, passed_object)
self.assertIsNone(future.result())
self.assertSequenceEqual(
(passed_object,), callable_object.passed_values()
)
if __name__ == "__main__":
unittest.main(verbosity=2)
|
normal
|
{
"blob_id": "049950bd4bbf7903218bb8fb3a4c91492d6af17b",
"index": 3252,
"step-1": "<mask token>\n\n\nclass _CallableObject(object):\n\n def __init__(self):\n self._lock = threading.Lock()\n self._passed_values = []\n\n def __call__(self, value):\n with self._lock:\n self._passed_values.append(value)\n <mask token>\n\n\nclass LoggingPoolTest(unittest.TestCase):\n\n def testUpAndDown(self):\n pool = logging_pool.pool(_POOL_SIZE)\n pool.shutdown(wait=True)\n with logging_pool.pool(_POOL_SIZE) as pool:\n self.assertIsNotNone(pool)\n\n def testTaskExecuted(self):\n test_list = []\n with logging_pool.pool(_POOL_SIZE) as pool:\n pool.submit(lambda : test_list.append(object())).result()\n self.assertTrue(test_list)\n\n def testException(self):\n with logging_pool.pool(_POOL_SIZE) as pool:\n raised_exception = pool.submit(lambda : 1 / 0).exception()\n self.assertIsNotNone(raised_exception)\n\n def testCallableObjectExecuted(self):\n callable_object = _CallableObject()\n passed_object = object()\n with logging_pool.pool(_POOL_SIZE) as pool:\n future = pool.submit(callable_object, passed_object)\n self.assertIsNone(future.result())\n self.assertSequenceEqual((passed_object,), callable_object.\n passed_values())\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass _CallableObject(object):\n\n def __init__(self):\n self._lock = threading.Lock()\n self._passed_values = []\n\n def __call__(self, value):\n with self._lock:\n self._passed_values.append(value)\n\n def passed_values(self):\n with self._lock:\n return tuple(self._passed_values)\n\n\nclass LoggingPoolTest(unittest.TestCase):\n\n def testUpAndDown(self):\n pool = logging_pool.pool(_POOL_SIZE)\n pool.shutdown(wait=True)\n with logging_pool.pool(_POOL_SIZE) as pool:\n self.assertIsNotNone(pool)\n\n def testTaskExecuted(self):\n test_list = []\n with logging_pool.pool(_POOL_SIZE) as pool:\n pool.submit(lambda : test_list.append(object())).result()\n self.assertTrue(test_list)\n\n def testException(self):\n with logging_pool.pool(_POOL_SIZE) as pool:\n raised_exception = pool.submit(lambda : 1 / 0).exception()\n self.assertIsNotNone(raised_exception)\n\n def testCallableObjectExecuted(self):\n callable_object = _CallableObject()\n passed_object = object()\n with logging_pool.pool(_POOL_SIZE) as pool:\n future = pool.submit(callable_object, passed_object)\n self.assertIsNone(future.result())\n self.assertSequenceEqual((passed_object,), callable_object.\n passed_values())\n\n\nif __name__ == '__main__':\n unittest.main(verbosity=2)\n",
"step-3": "<mask token>\n_POOL_SIZE = 16\n\n\nclass _CallableObject(object):\n\n def __init__(self):\n self._lock = threading.Lock()\n self._passed_values = []\n\n def __call__(self, value):\n with self._lock:\n self._passed_values.append(value)\n\n def passed_values(self):\n with self._lock:\n return tuple(self._passed_values)\n\n\nclass LoggingPoolTest(unittest.TestCase):\n\n def testUpAndDown(self):\n pool = logging_pool.pool(_POOL_SIZE)\n pool.shutdown(wait=True)\n with logging_pool.pool(_POOL_SIZE) as pool:\n self.assertIsNotNone(pool)\n\n def testTaskExecuted(self):\n test_list = []\n with logging_pool.pool(_POOL_SIZE) as pool:\n pool.submit(lambda : test_list.append(object())).result()\n self.assertTrue(test_list)\n\n def testException(self):\n with logging_pool.pool(_POOL_SIZE) as pool:\n raised_exception = pool.submit(lambda : 1 / 0).exception()\n self.assertIsNotNone(raised_exception)\n\n def testCallableObjectExecuted(self):\n callable_object = _CallableObject()\n passed_object = object()\n with logging_pool.pool(_POOL_SIZE) as pool:\n future = pool.submit(callable_object, passed_object)\n self.assertIsNone(future.result())\n self.assertSequenceEqual((passed_object,), callable_object.\n passed_values())\n\n\nif __name__ == '__main__':\n unittest.main(verbosity=2)\n",
"step-4": "<mask token>\nimport threading\nimport unittest\nfrom grpc.framework.foundation import logging_pool\n_POOL_SIZE = 16\n\n\nclass _CallableObject(object):\n\n def __init__(self):\n self._lock = threading.Lock()\n self._passed_values = []\n\n def __call__(self, value):\n with self._lock:\n self._passed_values.append(value)\n\n def passed_values(self):\n with self._lock:\n return tuple(self._passed_values)\n\n\nclass LoggingPoolTest(unittest.TestCase):\n\n def testUpAndDown(self):\n pool = logging_pool.pool(_POOL_SIZE)\n pool.shutdown(wait=True)\n with logging_pool.pool(_POOL_SIZE) as pool:\n self.assertIsNotNone(pool)\n\n def testTaskExecuted(self):\n test_list = []\n with logging_pool.pool(_POOL_SIZE) as pool:\n pool.submit(lambda : test_list.append(object())).result()\n self.assertTrue(test_list)\n\n def testException(self):\n with logging_pool.pool(_POOL_SIZE) as pool:\n raised_exception = pool.submit(lambda : 1 / 0).exception()\n self.assertIsNotNone(raised_exception)\n\n def testCallableObjectExecuted(self):\n callable_object = _CallableObject()\n passed_object = object()\n with logging_pool.pool(_POOL_SIZE) as pool:\n future = pool.submit(callable_object, passed_object)\n self.assertIsNone(future.result())\n self.assertSequenceEqual((passed_object,), callable_object.\n passed_values())\n\n\nif __name__ == '__main__':\n unittest.main(verbosity=2)\n",
"step-5": "# Copyright 2015 gRPC authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for grpc.framework.foundation.logging_pool.\"\"\"\n\nimport threading\nimport unittest\n\nfrom grpc.framework.foundation import logging_pool\n\n_POOL_SIZE = 16\n\n\nclass _CallableObject(object):\n def __init__(self):\n self._lock = threading.Lock()\n self._passed_values = []\n\n def __call__(self, value):\n with self._lock:\n self._passed_values.append(value)\n\n def passed_values(self):\n with self._lock:\n return tuple(self._passed_values)\n\n\nclass LoggingPoolTest(unittest.TestCase):\n def testUpAndDown(self):\n pool = logging_pool.pool(_POOL_SIZE)\n pool.shutdown(wait=True)\n\n with logging_pool.pool(_POOL_SIZE) as pool:\n self.assertIsNotNone(pool)\n\n def testTaskExecuted(self):\n test_list = []\n\n with logging_pool.pool(_POOL_SIZE) as pool:\n pool.submit(lambda: test_list.append(object())).result()\n\n self.assertTrue(test_list)\n\n def testException(self):\n with logging_pool.pool(_POOL_SIZE) as pool:\n raised_exception = pool.submit(lambda: 1 / 0).exception()\n\n self.assertIsNotNone(raised_exception)\n\n def testCallableObjectExecuted(self):\n callable_object = _CallableObject()\n passed_object = object()\n with logging_pool.pool(_POOL_SIZE) as pool:\n future = pool.submit(callable_object, passed_object)\n self.assertIsNone(future.result())\n self.assertSequenceEqual(\n (passed_object,), callable_object.passed_values()\n )\n\n\nif __name__ == \"__main__\":\n unittest.main(verbosity=2)\n",
"step-ids": [
8,
10,
11,
12,
13
]
}
|
[
8,
10,
11,
12,
13
] |
"""
file: babysit.py
language: python3
author: pan7447@rit.edu Parvathi Nair
author: vpb8262 Vishal Bulchandani
"""
"""
To compute the maximum pay a brother and sister can earn considering jobs that they can work on
together or separately depending on the number of children to babysit
"""
from operator import *
class Job:
"""
Job class which stores the attributes of the jobs
"""
def __init__(self, day, startTime, endTime, noOfChildren, hourlyRate):
self.day=day
self.startTime=startTime
self.endTime=endTime
self.noOfChildren=noOfChildren
self.hourlyRate=hourlyRate
self.value=(endTime-startTime)/100*hourlyRate
def __str__(self):
return str(self.day)+ " " + str(self.startTime) + " "+ str(self.endTime) + " " +str(self.noOfChildren) + " " + str(self.hourlyRate)+ " " + str(self.value)
#total is global variable
total = 0
def takeInput():
"""
Takes input from the console and creates objects and stores in a list jobList
:return: jobList-list in which input is stored as objects
"""
n=int(input())
jobList=[]
#taking n inputs and creating objects
for i in range (n):
str = input().strip('\n').split(" ")
if int(str[1])>=600 and int(str[2])<=2300:
jobs=Job (int(str[0]),int(str[1]),int(str[2]),int(str[3]),int(str[4]))
jobList.append(jobs)
return jobList
def sortInputByEndTimeAndDay(jobList):
"""
Sorts the jobList based on day and then the endTime
:param jobList: list of jobs
:return: jobList in a sorted manner with respect to day and endTime
"""
jobList=sorted(jobList, key= attrgetter('day','endTime'))
return jobList
def divideJobs(jobList, maximum):
"""
Segregates the jobs into list of lists with respect to day, that is jobs done in a particular day is stored in a single index.
:param jobList: sorted jobLists
:param maximum: the maximum amongst the days being considered
:return: segregatedJobs which is a list of lists
"""
segregatedJobs=[[0]]*(maximum)
temp=jobList[0].day
j = 0
for i in range(0,len(jobList)):
if jobList[i].day==temp:
segregatedJobs[j].append(jobList[i])
else:
temp = jobList[i].day
j += 1
segregatedJobs[j]=[0,jobList[i]]
return segregatedJobs
def computeRho(segregatedJob):
"""
To compute the Roh value in a list
:param segregatedJob: jobs done in a particular day
:return: rho: list in which computed rho is stored
"""
#inserting 0 at the 1st position
rho = [0]
count = 0
#calculating rho
for i in range(1,len(segregatedJob)):
j = i-1
while(j>0):
if segregatedJob[i].startTime >= segregatedJob[j].endTime:
count += 1
rho.append(j)
break
j=j-1
if count == 0:
rho.append(0)
count = 0
return rho
def algo(segregatedJob):
"""
Implementing the interval scheduling algorithm
:param segregatedJob: A sorted list of jobs of one particular day
:return: None
"""
global total
rho = computeRho(segregatedJob)
r = len(rho);
S = [[0 for x in range(r)] for y in range(r)]
k = 0
#implementaion of scheduling algorithm
while(k<len(S)):
for j in range(k, len(S)):
if k == j and j != 0 and segregatedJob[j].noOfChildren < 4:
S[j][k] = max(segregatedJob[j].value + S[rho[j]][k - 1], S[j - 1][k - 1])
elif j > k and j != 0 and segregatedJob[j].noOfChildren >= 4:
S[j][k] = S[j - 1][k]
elif k == j and j != 0 and segregatedJob[j].noOfChildren >= 4:
S[j][k] = max(segregatedJob[j].value + S[rho[j]][rho[k]], S[j - 1][k - 1])
elif j > k and j != 0 and segregatedJob[j].noOfChildren < 4:
S[j][k] = max(segregatedJob[j].value + S[rho[j]][k], S[j - 1][k])
else:
pass
S[k][j] = S[j][k]
k += 1
length = len(S)
#Adding the max pay for every individual field in the matrix
total += S[length-1][length-1]
def main():
"""
Main function.
return: None
"""
global total
jobList=takeInput()
jobListSorted=sortInputByEndTimeAndDay(jobList)
maximum=jobListSorted[len(jobListSorted)-1].day
segregatedJobs=divideJobs(jobListSorted, maximum)
for i in range (len(segregatedJobs)):
algo(segregatedJobs[i])
# print the total pay
print(int(total))
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "f57fa2787934dc2a002f82aa1af1f1d9a7f90da5",
"index": 9947,
"step-1": "<mask token>\n\n\nclass Job:\n \"\"\"\n Job class which stores the attributes of the jobs\n \"\"\"\n\n def __init__(self, day, startTime, endTime, noOfChildren, hourlyRate):\n self.day = day\n self.startTime = startTime\n self.endTime = endTime\n self.noOfChildren = noOfChildren\n self.hourlyRate = hourlyRate\n self.value = (endTime - startTime) / 100 * hourlyRate\n\n def __str__(self):\n return str(self.day) + ' ' + str(self.startTime) + ' ' + str(self.\n endTime) + ' ' + str(self.noOfChildren) + ' ' + str(self.hourlyRate\n ) + ' ' + str(self.value)\n\n\n<mask token>\n\n\ndef takeInput():\n \"\"\"\n Takes input from the console and creates objects and stores in a list jobList\n :return: jobList-list in which input is stored as objects\n \"\"\"\n n = int(input())\n jobList = []\n for i in range(n):\n str = input().strip('\\n').split(' ')\n if int(str[1]) >= 600 and int(str[2]) <= 2300:\n jobs = Job(int(str[0]), int(str[1]), int(str[2]), int(str[3]),\n int(str[4]))\n jobList.append(jobs)\n return jobList\n\n\ndef sortInputByEndTimeAndDay(jobList):\n \"\"\"\n Sorts the jobList based on day and then the endTime\n :param jobList: list of jobs\n :return: jobList in a sorted manner with respect to day and endTime\n \"\"\"\n jobList = sorted(jobList, key=attrgetter('day', 'endTime'))\n return jobList\n\n\ndef divideJobs(jobList, maximum):\n \"\"\"\n Segregates the jobs into list of lists with respect to day, that is jobs done in a particular day is stored in a single index.\n :param jobList: sorted jobLists\n :param maximum: the maximum amongst the days being considered\n :return: segregatedJobs which is a list of lists\n \"\"\"\n segregatedJobs = [[0]] * maximum\n temp = jobList[0].day\n j = 0\n for i in range(0, len(jobList)):\n if jobList[i].day == temp:\n segregatedJobs[j].append(jobList[i])\n else:\n temp = jobList[i].day\n j += 1\n segregatedJobs[j] = [0, jobList[i]]\n return segregatedJobs\n\n\ndef computeRho(segregatedJob):\n \"\"\"\n To compute the Roh value in a list\n :param segregatedJob: jobs done in a particular day\n :return: rho: list in which computed rho is stored\n \"\"\"\n rho = [0]\n count = 0\n for i in range(1, len(segregatedJob)):\n j = i - 1\n while j > 0:\n if segregatedJob[i].startTime >= segregatedJob[j].endTime:\n count += 1\n rho.append(j)\n break\n j = j - 1\n if count == 0:\n rho.append(0)\n count = 0\n return rho\n\n\ndef algo(segregatedJob):\n \"\"\"\n Implementing the interval scheduling algorithm\n :param segregatedJob: A sorted list of jobs of one particular day\n :return: None\n \"\"\"\n global total\n rho = computeRho(segregatedJob)\n r = len(rho)\n S = [[(0) for x in range(r)] for y in range(r)]\n k = 0\n while k < len(S):\n for j in range(k, len(S)):\n if k == j and j != 0 and segregatedJob[j].noOfChildren < 4:\n S[j][k] = max(segregatedJob[j].value + S[rho[j]][k - 1], S[\n j - 1][k - 1])\n elif j > k and j != 0 and segregatedJob[j].noOfChildren >= 4:\n S[j][k] = S[j - 1][k]\n elif k == j and j != 0 and segregatedJob[j].noOfChildren >= 4:\n S[j][k] = max(segregatedJob[j].value + S[rho[j]][rho[k]], S\n [j - 1][k - 1])\n elif j > k and j != 0 and segregatedJob[j].noOfChildren < 4:\n S[j][k] = max(segregatedJob[j].value + S[rho[j]][k], S[j - \n 1][k])\n else:\n pass\n S[k][j] = S[j][k]\n k += 1\n length = len(S)\n total += S[length - 1][length - 1]\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Job:\n \"\"\"\n Job class which stores the attributes of the jobs\n \"\"\"\n\n def __init__(self, day, startTime, endTime, noOfChildren, hourlyRate):\n self.day = day\n self.startTime = startTime\n self.endTime = endTime\n self.noOfChildren = noOfChildren\n self.hourlyRate = hourlyRate\n self.value = (endTime - startTime) / 100 * hourlyRate\n\n def __str__(self):\n return str(self.day) + ' ' + str(self.startTime) + ' ' + str(self.\n endTime) + ' ' + str(self.noOfChildren) + ' ' + str(self.hourlyRate\n ) + ' ' + str(self.value)\n\n\n<mask token>\n\n\ndef takeInput():\n \"\"\"\n Takes input from the console and creates objects and stores in a list jobList\n :return: jobList-list in which input is stored as objects\n \"\"\"\n n = int(input())\n jobList = []\n for i in range(n):\n str = input().strip('\\n').split(' ')\n if int(str[1]) >= 600 and int(str[2]) <= 2300:\n jobs = Job(int(str[0]), int(str[1]), int(str[2]), int(str[3]),\n int(str[4]))\n jobList.append(jobs)\n return jobList\n\n\ndef sortInputByEndTimeAndDay(jobList):\n \"\"\"\n Sorts the jobList based on day and then the endTime\n :param jobList: list of jobs\n :return: jobList in a sorted manner with respect to day and endTime\n \"\"\"\n jobList = sorted(jobList, key=attrgetter('day', 'endTime'))\n return jobList\n\n\ndef divideJobs(jobList, maximum):\n \"\"\"\n Segregates the jobs into list of lists with respect to day, that is jobs done in a particular day is stored in a single index.\n :param jobList: sorted jobLists\n :param maximum: the maximum amongst the days being considered\n :return: segregatedJobs which is a list of lists\n \"\"\"\n segregatedJobs = [[0]] * maximum\n temp = jobList[0].day\n j = 0\n for i in range(0, len(jobList)):\n if jobList[i].day == temp:\n segregatedJobs[j].append(jobList[i])\n else:\n temp = jobList[i].day\n j += 1\n segregatedJobs[j] = [0, jobList[i]]\n return segregatedJobs\n\n\ndef computeRho(segregatedJob):\n \"\"\"\n To compute the Roh value in a list\n :param segregatedJob: jobs done in a particular day\n :return: rho: list in which computed rho is stored\n \"\"\"\n rho = [0]\n count = 0\n for i in range(1, len(segregatedJob)):\n j = i - 1\n while j > 0:\n if segregatedJob[i].startTime >= segregatedJob[j].endTime:\n count += 1\n rho.append(j)\n break\n j = j - 1\n if count == 0:\n rho.append(0)\n count = 0\n return rho\n\n\ndef algo(segregatedJob):\n \"\"\"\n Implementing the interval scheduling algorithm\n :param segregatedJob: A sorted list of jobs of one particular day\n :return: None\n \"\"\"\n global total\n rho = computeRho(segregatedJob)\n r = len(rho)\n S = [[(0) for x in range(r)] for y in range(r)]\n k = 0\n while k < len(S):\n for j in range(k, len(S)):\n if k == j and j != 0 and segregatedJob[j].noOfChildren < 4:\n S[j][k] = max(segregatedJob[j].value + S[rho[j]][k - 1], S[\n j - 1][k - 1])\n elif j > k and j != 0 and segregatedJob[j].noOfChildren >= 4:\n S[j][k] = S[j - 1][k]\n elif k == j and j != 0 and segregatedJob[j].noOfChildren >= 4:\n S[j][k] = max(segregatedJob[j].value + S[rho[j]][rho[k]], S\n [j - 1][k - 1])\n elif j > k and j != 0 and segregatedJob[j].noOfChildren < 4:\n S[j][k] = max(segregatedJob[j].value + S[rho[j]][k], S[j - \n 1][k])\n else:\n pass\n S[k][j] = S[j][k]\n k += 1\n length = len(S)\n total += S[length - 1][length - 1]\n\n\ndef main():\n \"\"\"\n Main function.\n return: None\n \"\"\"\n global total\n jobList = takeInput()\n jobListSorted = sortInputByEndTimeAndDay(jobList)\n maximum = jobListSorted[len(jobListSorted) - 1].day\n segregatedJobs = divideJobs(jobListSorted, maximum)\n for i in range(len(segregatedJobs)):\n algo(segregatedJobs[i])\n print(int(total))\n\n\nif __name__ == '__main__':\n main()\n",
"step-3": "<mask token>\n\n\nclass Job:\n \"\"\"\n Job class which stores the attributes of the jobs\n \"\"\"\n\n def __init__(self, day, startTime, endTime, noOfChildren, hourlyRate):\n self.day = day\n self.startTime = startTime\n self.endTime = endTime\n self.noOfChildren = noOfChildren\n self.hourlyRate = hourlyRate\n self.value = (endTime - startTime) / 100 * hourlyRate\n\n def __str__(self):\n return str(self.day) + ' ' + str(self.startTime) + ' ' + str(self.\n endTime) + ' ' + str(self.noOfChildren) + ' ' + str(self.hourlyRate\n ) + ' ' + str(self.value)\n\n\ntotal = 0\n\n\ndef takeInput():\n \"\"\"\n Takes input from the console and creates objects and stores in a list jobList\n :return: jobList-list in which input is stored as objects\n \"\"\"\n n = int(input())\n jobList = []\n for i in range(n):\n str = input().strip('\\n').split(' ')\n if int(str[1]) >= 600 and int(str[2]) <= 2300:\n jobs = Job(int(str[0]), int(str[1]), int(str[2]), int(str[3]),\n int(str[4]))\n jobList.append(jobs)\n return jobList\n\n\ndef sortInputByEndTimeAndDay(jobList):\n \"\"\"\n Sorts the jobList based on day and then the endTime\n :param jobList: list of jobs\n :return: jobList in a sorted manner with respect to day and endTime\n \"\"\"\n jobList = sorted(jobList, key=attrgetter('day', 'endTime'))\n return jobList\n\n\ndef divideJobs(jobList, maximum):\n \"\"\"\n Segregates the jobs into list of lists with respect to day, that is jobs done in a particular day is stored in a single index.\n :param jobList: sorted jobLists\n :param maximum: the maximum amongst the days being considered\n :return: segregatedJobs which is a list of lists\n \"\"\"\n segregatedJobs = [[0]] * maximum\n temp = jobList[0].day\n j = 0\n for i in range(0, len(jobList)):\n if jobList[i].day == temp:\n segregatedJobs[j].append(jobList[i])\n else:\n temp = jobList[i].day\n j += 1\n segregatedJobs[j] = [0, jobList[i]]\n return segregatedJobs\n\n\ndef computeRho(segregatedJob):\n \"\"\"\n To compute the Roh value in a list\n :param segregatedJob: jobs done in a particular day\n :return: rho: list in which computed rho is stored\n \"\"\"\n rho = [0]\n count = 0\n for i in range(1, len(segregatedJob)):\n j = i - 1\n while j > 0:\n if segregatedJob[i].startTime >= segregatedJob[j].endTime:\n count += 1\n rho.append(j)\n break\n j = j - 1\n if count == 0:\n rho.append(0)\n count = 0\n return rho\n\n\ndef algo(segregatedJob):\n \"\"\"\n Implementing the interval scheduling algorithm\n :param segregatedJob: A sorted list of jobs of one particular day\n :return: None\n \"\"\"\n global total\n rho = computeRho(segregatedJob)\n r = len(rho)\n S = [[(0) for x in range(r)] for y in range(r)]\n k = 0\n while k < len(S):\n for j in range(k, len(S)):\n if k == j and j != 0 and segregatedJob[j].noOfChildren < 4:\n S[j][k] = max(segregatedJob[j].value + S[rho[j]][k - 1], S[\n j - 1][k - 1])\n elif j > k and j != 0 and segregatedJob[j].noOfChildren >= 4:\n S[j][k] = S[j - 1][k]\n elif k == j and j != 0 and segregatedJob[j].noOfChildren >= 4:\n S[j][k] = max(segregatedJob[j].value + S[rho[j]][rho[k]], S\n [j - 1][k - 1])\n elif j > k and j != 0 and segregatedJob[j].noOfChildren < 4:\n S[j][k] = max(segregatedJob[j].value + S[rho[j]][k], S[j - \n 1][k])\n else:\n pass\n S[k][j] = S[j][k]\n k += 1\n length = len(S)\n total += S[length - 1][length - 1]\n\n\ndef main():\n \"\"\"\n Main function.\n return: None\n \"\"\"\n global total\n jobList = takeInput()\n jobListSorted = sortInputByEndTimeAndDay(jobList)\n maximum = jobListSorted[len(jobListSorted) - 1].day\n segregatedJobs = divideJobs(jobListSorted, maximum)\n for i in range(len(segregatedJobs)):\n algo(segregatedJobs[i])\n print(int(total))\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "<mask token>\nfrom operator import *\n\n\nclass Job:\n \"\"\"\n Job class which stores the attributes of the jobs\n \"\"\"\n\n def __init__(self, day, startTime, endTime, noOfChildren, hourlyRate):\n self.day = day\n self.startTime = startTime\n self.endTime = endTime\n self.noOfChildren = noOfChildren\n self.hourlyRate = hourlyRate\n self.value = (endTime - startTime) / 100 * hourlyRate\n\n def __str__(self):\n return str(self.day) + ' ' + str(self.startTime) + ' ' + str(self.\n endTime) + ' ' + str(self.noOfChildren) + ' ' + str(self.hourlyRate\n ) + ' ' + str(self.value)\n\n\ntotal = 0\n\n\ndef takeInput():\n \"\"\"\n Takes input from the console and creates objects and stores in a list jobList\n :return: jobList-list in which input is stored as objects\n \"\"\"\n n = int(input())\n jobList = []\n for i in range(n):\n str = input().strip('\\n').split(' ')\n if int(str[1]) >= 600 and int(str[2]) <= 2300:\n jobs = Job(int(str[0]), int(str[1]), int(str[2]), int(str[3]),\n int(str[4]))\n jobList.append(jobs)\n return jobList\n\n\ndef sortInputByEndTimeAndDay(jobList):\n \"\"\"\n Sorts the jobList based on day and then the endTime\n :param jobList: list of jobs\n :return: jobList in a sorted manner with respect to day and endTime\n \"\"\"\n jobList = sorted(jobList, key=attrgetter('day', 'endTime'))\n return jobList\n\n\ndef divideJobs(jobList, maximum):\n \"\"\"\n Segregates the jobs into list of lists with respect to day, that is jobs done in a particular day is stored in a single index.\n :param jobList: sorted jobLists\n :param maximum: the maximum amongst the days being considered\n :return: segregatedJobs which is a list of lists\n \"\"\"\n segregatedJobs = [[0]] * maximum\n temp = jobList[0].day\n j = 0\n for i in range(0, len(jobList)):\n if jobList[i].day == temp:\n segregatedJobs[j].append(jobList[i])\n else:\n temp = jobList[i].day\n j += 1\n segregatedJobs[j] = [0, jobList[i]]\n return segregatedJobs\n\n\ndef computeRho(segregatedJob):\n \"\"\"\n To compute the Roh value in a list\n :param segregatedJob: jobs done in a particular day\n :return: rho: list in which computed rho is stored\n \"\"\"\n rho = [0]\n count = 0\n for i in range(1, len(segregatedJob)):\n j = i - 1\n while j > 0:\n if segregatedJob[i].startTime >= segregatedJob[j].endTime:\n count += 1\n rho.append(j)\n break\n j = j - 1\n if count == 0:\n rho.append(0)\n count = 0\n return rho\n\n\ndef algo(segregatedJob):\n \"\"\"\n Implementing the interval scheduling algorithm\n :param segregatedJob: A sorted list of jobs of one particular day\n :return: None\n \"\"\"\n global total\n rho = computeRho(segregatedJob)\n r = len(rho)\n S = [[(0) for x in range(r)] for y in range(r)]\n k = 0\n while k < len(S):\n for j in range(k, len(S)):\n if k == j and j != 0 and segregatedJob[j].noOfChildren < 4:\n S[j][k] = max(segregatedJob[j].value + S[rho[j]][k - 1], S[\n j - 1][k - 1])\n elif j > k and j != 0 and segregatedJob[j].noOfChildren >= 4:\n S[j][k] = S[j - 1][k]\n elif k == j and j != 0 and segregatedJob[j].noOfChildren >= 4:\n S[j][k] = max(segregatedJob[j].value + S[rho[j]][rho[k]], S\n [j - 1][k - 1])\n elif j > k and j != 0 and segregatedJob[j].noOfChildren < 4:\n S[j][k] = max(segregatedJob[j].value + S[rho[j]][k], S[j - \n 1][k])\n else:\n pass\n S[k][j] = S[j][k]\n k += 1\n length = len(S)\n total += S[length - 1][length - 1]\n\n\ndef main():\n \"\"\"\n Main function.\n return: None\n \"\"\"\n global total\n jobList = takeInput()\n jobListSorted = sortInputByEndTimeAndDay(jobList)\n maximum = jobListSorted[len(jobListSorted) - 1].day\n segregatedJobs = divideJobs(jobListSorted, maximum)\n for i in range(len(segregatedJobs)):\n algo(segregatedJobs[i])\n print(int(total))\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "\"\"\"\nfile: babysit.py\nlanguage: python3\nauthor: pan7447@rit.edu Parvathi Nair\nauthor: vpb8262 Vishal Bulchandani\n\n\"\"\"\n\"\"\"\nTo compute the maximum pay a brother and sister can earn considering jobs that they can work on\ntogether or separately depending on the number of children to babysit\n\n\"\"\"\nfrom operator import *\n\nclass Job:\n \"\"\"\n Job class which stores the attributes of the jobs\n \"\"\"\n def __init__(self, day, startTime, endTime, noOfChildren, hourlyRate):\n self.day=day\n self.startTime=startTime\n self.endTime=endTime\n self.noOfChildren=noOfChildren\n self.hourlyRate=hourlyRate\n self.value=(endTime-startTime)/100*hourlyRate\n\n def __str__(self):\n return str(self.day)+ \" \" + str(self.startTime) + \" \"+ str(self.endTime) + \" \" +str(self.noOfChildren) + \" \" + str(self.hourlyRate)+ \" \" + str(self.value)\n\n#total is global variable\ntotal = 0\ndef takeInput():\n \"\"\"\n Takes input from the console and creates objects and stores in a list jobList\n :return: jobList-list in which input is stored as objects\n \"\"\"\n n=int(input())\n jobList=[]\n\n #taking n inputs and creating objects\n for i in range (n):\n str = input().strip('\\n').split(\" \")\n if int(str[1])>=600 and int(str[2])<=2300:\n jobs=Job (int(str[0]),int(str[1]),int(str[2]),int(str[3]),int(str[4]))\n jobList.append(jobs)\n return jobList\n\ndef sortInputByEndTimeAndDay(jobList):\n \"\"\"\n Sorts the jobList based on day and then the endTime\n :param jobList: list of jobs\n :return: jobList in a sorted manner with respect to day and endTime\n \"\"\"\n jobList=sorted(jobList, key= attrgetter('day','endTime'))\n return jobList\n\n\ndef divideJobs(jobList, maximum):\n \"\"\"\n Segregates the jobs into list of lists with respect to day, that is jobs done in a particular day is stored in a single index.\n :param jobList: sorted jobLists\n :param maximum: the maximum amongst the days being considered\n :return: segregatedJobs which is a list of lists\n \"\"\"\n\n segregatedJobs=[[0]]*(maximum)\n\n temp=jobList[0].day\n j = 0\n for i in range(0,len(jobList)):\n if jobList[i].day==temp:\n segregatedJobs[j].append(jobList[i])\n\n else:\n temp = jobList[i].day\n j += 1\n segregatedJobs[j]=[0,jobList[i]]\n\n return segregatedJobs\n\ndef computeRho(segregatedJob):\n \"\"\"\n To compute the Roh value in a list\n :param segregatedJob: jobs done in a particular day\n :return: rho: list in which computed rho is stored\n \"\"\"\n\n #inserting 0 at the 1st position\n rho = [0]\n count = 0\n\n #calculating rho\n for i in range(1,len(segregatedJob)):\n j = i-1\n while(j>0):\n if segregatedJob[i].startTime >= segregatedJob[j].endTime:\n count += 1\n rho.append(j)\n break\n j=j-1\n if count == 0:\n rho.append(0)\n count = 0\n\n\n return rho\n\n\ndef algo(segregatedJob):\n \"\"\"\n Implementing the interval scheduling algorithm\n :param segregatedJob: A sorted list of jobs of one particular day\n :return: None\n \"\"\"\n global total\n rho = computeRho(segregatedJob)\n r = len(rho);\n\n S = [[0 for x in range(r)] for y in range(r)]\n k = 0\n #implementaion of scheduling algorithm\n while(k<len(S)):\n for j in range(k, len(S)):\n if k == j and j != 0 and segregatedJob[j].noOfChildren < 4:\n S[j][k] = max(segregatedJob[j].value + S[rho[j]][k - 1], S[j - 1][k - 1])\n\n elif j > k and j != 0 and segregatedJob[j].noOfChildren >= 4:\n S[j][k] = S[j - 1][k]\n\n elif k == j and j != 0 and segregatedJob[j].noOfChildren >= 4:\n S[j][k] = max(segregatedJob[j].value + S[rho[j]][rho[k]], S[j - 1][k - 1])\n\n elif j > k and j != 0 and segregatedJob[j].noOfChildren < 4:\n S[j][k] = max(segregatedJob[j].value + S[rho[j]][k], S[j - 1][k])\n else:\n pass\n S[k][j] = S[j][k]\n k += 1\n length = len(S)\n\n #Adding the max pay for every individual field in the matrix\n total += S[length-1][length-1]\n\ndef main():\n \"\"\"\n Main function.\n return: None\n \"\"\"\n global total\n jobList=takeInput()\n jobListSorted=sortInputByEndTimeAndDay(jobList)\n maximum=jobListSorted[len(jobListSorted)-1].day\n segregatedJobs=divideJobs(jobListSorted, maximum)\n for i in range (len(segregatedJobs)):\n algo(segregatedJobs[i])\n\n # print the total pay\n print(int(total))\n\nif __name__ == '__main__':\n main()",
"step-ids": [
9,
11,
12,
13,
14
]
}
|
[
9,
11,
12,
13,
14
] |
a=input("Please enter the elements with spaces between them:").split()
n=len(a)
for i in range(n):
a[i]=int(a[i])
for i in range(n-1):
for j in range(n-i-1):
if a[j]>a[j+1]:
a[j],a[j+1]=a[j+1],a[j]
print("Sortes array :",a)
|
normal
|
{
"blob_id": "5c2a6802e89314c25f0264bbe2bc7ed2689a255a",
"index": 782,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(n):\n a[i] = int(a[i])\nfor i in range(n - 1):\n for j in range(n - i - 1):\n if a[j] > a[j + 1]:\n a[j], a[j + 1] = a[j + 1], a[j]\nprint('Sortes array :', a)\n",
"step-3": "a = input('Please enter the elements with spaces between them:').split()\nn = len(a)\nfor i in range(n):\n a[i] = int(a[i])\nfor i in range(n - 1):\n for j in range(n - i - 1):\n if a[j] > a[j + 1]:\n a[j], a[j + 1] = a[j + 1], a[j]\nprint('Sortes array :', a)\n",
"step-4": "a=input(\"Please enter the elements with spaces between them:\").split()\nn=len(a)\nfor i in range(n):\n a[i]=int(a[i])\nfor i in range(n-1):\n for j in range(n-i-1):\n if a[j]>a[j+1]:\n a[j],a[j+1]=a[j+1],a[j]\nprint(\"Sortes array :\",a)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def getMuseums():
museos = Museo.objects.all()
allMuseums = {}
for museo in museos:
allMuseums[museo.ID_ENTIDAD] = museo.comentario_set.count()
return allMuseums
def getAccessibleMuseums():
museos = Museo.objects.all()
allMuseums = {}
for museo in museos:
if museo.ACCESIBILIDAD == '1':
allMuseums[museo.ID_ENTIDAD] = museo.comentario_set.count()
return allMuseums
def getRanking():
allMuseums = getMuseums()
ranking = sorted(allMuseums.items(), key=operator.itemgetter(1))
ranking.reverse()
return ranking
def getAccessibleRanking():
allMuseums = getAccessibleMuseums()
ranking = sorted(allMuseums.items(), key=operator.itemgetter(1))
ranking.reverse()
return ranking
@csrf_exempt
def mainPage(request):
template = get_template('index.html')
topFive = range(5)
list = '<br>'
markers = ''
if request.method == 'GET' or request.method == 'POST' and request.POST[
'accion'] == 'mostrar':
ranking = getRanking()
list = (list +
"<center><form action='/' method='post'><input type='hidden' name='accion' value='ocultar'>"
+
"<input class='desplegable' type='submit' value='Mostrar museos accesibles'></form></center><div id='scroll'>"
)
if len(ranking) > 0:
for item in topFive:
if ranking[item][1] != 0:
museum = Museo.objects.get(ID_ENTIDAD=ranking[item][0])
list = (list + "<center><a class='titulos' href=" +
museum.CONTENT_URL + '>' + museum.NOMBRE +
'</a><br><b>' + str(museum.comentario_set.count()) +
' Comentarios - ' + str(museum.like_set.count()) +
' Likes</b></br></br>')
list = (list + "<a class='direccion'>" + museum.
CLASE_VIAL + ' ' + museum.NOMBRE_VIA + ', Nº ' +
museum.NUM + ', ' + museum.LOCALIDAD + '</a></br></br>'
)
list = (list + "<a class='info' href=" + '/museos/' +
museum.ID_ENTIDAD +
'/>Más información</a></center></br></br>')
if (museum.LATITUD != 'No disponible' and museum.
LONGITUD != 'No disponible'):
markers = (markers + 'var ' + 'X' + museum.
ID_ENTIDAD +
'info = new google.maps.InfoWindow({' +
"content:'<h1>" + museum.NOMBRE + "</h1>'});" +
'var ' + 'X' + museum.ID_ENTIDAD +
'marker = new google.maps.Marker({' +
'position: {lat: ' + museum.LATITUD + ', lng: ' +
museum.LONGITUD + ' },map: map});' + 'X' +
museum.ID_ENTIDAD +
"marker.addListener('click', function() {" +
'X' + museum.ID_ENTIDAD + 'info.open(map,' +
'X' + museum.ID_ENTIDAD + 'marker);' + '});')
if ranking[0][1] == 0:
list = (list + "<a class='titulos'><center>" +
'No hay museos con comentarios, ¡sé el primero en comentar!'
+ '</center></a></br></br></div>')
else:
list = list + '</div>'
list = (list +
"<center><a class='info' href='/xml'>XML de la página</a></center>"
)
else:
list = (list + "<a class='titulos'><center>" +
'No hay museos con comentarios, ¡sé el primero en comentar!' +
'</center></a></br></br></div>')
elif request.method == 'POST' and request.POST['accion'] == 'ocultar':
ranking = getAccessibleRanking()
list = (list +
"<center><form action='/' method='post'><input type='hidden' name='accion' value='mostrar'>"
+
"<input class='desplegable' type='submit' value='Mostrar todos los museos'></form></center><div id='scroll'>"
)
if len(ranking) > 0:
for item in topFive:
if ranking[item][1] != 0:
museum = Museo.objects.get(ID_ENTIDAD=ranking[item][0])
list = (list + "<center><a class='titulos' href=" +
museum.CONTENT_URL + '>' + museum.NOMBRE +
'</a><br><b>' + str(museum.comentario_set.count()) +
' Comentarios - ' + str(museum.like_set.count()) +
' Likes</b></br></br>')
list = (list + "<a class='direccion'>" + museum.
CLASE_VIAL + ' ' + museum.NOMBRE_VIA + ', Nº ' +
museum.NUM + ', ' + museum.LOCALIDAD + '</a></br></br>'
)
list = (list + "<a class='info' href=" + '/museos/' +
museum.ID_ENTIDAD +
'/>Más información</a></center></br></br>')
if (museum.LATITUD != 'No disponbile' and museum.
LONGITUD != 'No disponible'):
markers = (markers + 'var ' + 'X' + museum.
ID_ENTIDAD +
'info = new google.maps.InfoWindow({' +
"content:'<h1>" + museum.NOMBRE + "</h1>'});" +
'var ' + 'X' + museum.ID_ENTIDAD +
'marker = new google.maps.Marker({' +
'position: {lat: ' + museum.LATITUD + ', lng: ' +
museum.LONGITUD + ' },map: map});' + 'X' +
museum.ID_ENTIDAD +
"marker.addListener('click', function() {" +
'X' + museum.ID_ENTIDAD + 'info.open(map,' +
'X' + museum.ID_ENTIDAD + 'marker);' + '});')
if ranking[0][1] == 0:
list = (list + "<a class='titulos'><center>" +
'No hay museos accesibles con comentarios, ¡sé el primero en comentar!'
+ '</center></a></br></br></div>')
else:
list = list + '</div>'
list = (list +
"<center><a class='info' href='/xml'>XML de la página</a></center>"
)
else:
list = (list + "<a class='titulos'><center>" +
'No hay museos accesibles con comentarios, ¡sé el primero en comentar!'
+ '</center></a></br></br></div>')
style = ''
if request.user.is_authenticated():
login = 1
try:
color = Color.objects.get(usuario=request.user)
color = color.color
except Color.DoesNotExist:
color = 'EEF4F8'
try:
letra = Letra.objects.get(usuario=request.user)
letra = letra.letra
except Letra.DoesNotExist:
letra = '9'
style = (
"body{font-family: 'Helvetica', sans-serif;color: #444444;font-size: "
+ letra + 'pt;background-color: #' + color + ';}')
else:
login = 0
users = User.objects.all()
userList = ''
for user in users:
try:
title = Titulo.objects.get(usuario=user.username)
userList = (userList + "<li><a href='/" + user.username + "'>" +
title.titulo + ' - ' + user.username + '</a></li></br>')
except Titulo.DoesNotExist:
userList = (userList + "<li><a href='/" + user.username +
"'>Página de " + user.username + '</a></li></br>')
return HttpResponse(template.render(Context({'body': list, 'login':
login, 'user': request.user, 'userList': userList, 'formato': style,
'markers': markers})))
@csrf_exempt
def museumsPage(request):
template = get_template('museos.html')
if request.method == 'GET':
museos = Museo.objects.all()
elif request.method == 'POST':
distrito = Distrito.objects.get(nombre=request.POST['distrito'])
museos = distrito.museo_set.all()
list = ''
markers = ''
i = 1
for museo in museos:
list = (list + "<center><a class='titulos'>" + museo.NOMBRE +
'</a></br>')
list = (list + "<a class='info' href=" + '/museos/' + museo.
ID_ENTIDAD + '/>Más información</a></center></br></br>')
if (museo.LATITUD != 'No disponible' and museo.LONGITUD !=
'No disponible'):
markers = (markers + 'var ' + 'X' + museo.ID_ENTIDAD +
'info = new google.maps.InfoWindow({' + "content:'<h1>" +
museo.NOMBRE + "</h1>'});" + 'var ' + 'X' + museo.
ID_ENTIDAD + 'marker = new google.maps.Marker({' +
'position: {lat: ' + museo.LATITUD + ', lng: ' + museo.
LONGITUD + ' },map: map});' + 'X' + museo.ID_ENTIDAD +
"marker.addListener('click', function() {" + 'X' + museo.
ID_ENTIDAD + 'info.open(map,' + 'X' + museo.ID_ENTIDAD +
'marker);' + '});')
style = ''
if request.user.is_authenticated():
login = 1
try:
color = Color.objects.get(usuario=request.user)
color = color.color
except Color.DoesNotExist:
color = 'EEF4F8'
try:
letra = Letra.objects.get(usuario=request.user)
letra = letra.letra
except Letra.DoesNotExist:
letra = '9'
style = (
"body{font-family: 'Helvetica', sans-serif;color: #444444;font-size: "
+ letra + 'pt;background-color: #' + color + ';}')
else:
login = 0
distritos = Distrito.objects.all()
districtList = ''
for distrito in distritos:
districtList = (districtList + "<option value='" + distrito.nombre +
"'>" + distrito.nombre + '</option>')
return HttpResponse(template.render(Context({'body': list, 'login':
login, 'user': request.user, 'districtList': districtList,
'formato': style, 'markers': markers})))
<|reserved_special_token_0|>
@csrf_exempt
def loginPage(request):
if request.method == 'POST':
if not request.user.is_authenticated() and 'login' in request.POST:
username = request.POST['Usuario']
password = request.POST['Contraseña']
user = authenticate(username=username, password=password)
if user is not None:
login(request, user)
elif not request.user.is_authenticated(
) and 'registro' in request.POST:
username = request.POST['Usuario']
password = request.POST['Contraseña']
try:
user = User.objects.get(username=username)
user = authenticate(username=username, password=password)
if user is not None:
login(request, user)
except User.DoesNotExist:
user = User.objects.create_user(username=username, password
=password)
user.save()
request.method = 'GET'
return mainPage(request)
<|reserved_special_token_0|>
def XMLAccesiblePage(request):
template = get_template('personalXML.xml')
user = ''
topList = []
topMuseums = getAccessibleRanking()
topFive = range(5)
for item in topFive:
if topMuseums[item][1] != 0:
museum = Museo.objects.get(ID_ENTIDAD=topMuseums[item][0])
topList = topList + [museum]
return HttpResponse(template.render(Context({'favoriteList': topList,
'user': user})), content_type='text/xml')
<|reserved_special_token_0|>
def aboutPage(request):
template = get_template('about.html')
style = ''
if request.user.is_authenticated():
login = 1
try:
color = Color.objects.get(usuario=request.user)
color = color.color
except Color.DoesNotExist:
color = 'EEF4F8'
try:
letra = Letra.objects.get(usuario=request.user)
letra = letra.letra
except Letra.DoesNotExist:
letra = '9'
style = (
"body{font-family: 'Helvetica', sans-serif;color: #444444;font-size: "
+ letra + 'pt;background-color: #' + color + ';}')
else:
login = 0
return HttpResponse(template.render(Context({'login': login, 'user':
request.user, 'formato': style})))
def updateDB(request):
museos = parseXML('web/museos.xml')
for museo in museos:
try:
distrito = Distrito.objects.get(nombre=museos[museo]['DISTRITO'])
except Distrito.DoesNotExist:
distrito = Distrito(nombre=museos[museo]['DISTRITO'])
distrito.save()
for museo in museos:
try:
A = museos[museo]['ID-ENTIDAD']
except KeyError:
A = 'No disponible'
try:
B = museos[museo]['NOMBRE']
except KeyError:
B = 'No disponible'
try:
C = museos[museo]['DESCRIPCION-ENTIDAD']
except KeyError:
C = 'No disponible'
try:
D = museos[museo]['HORARIO']
except KeyError:
D = 'No disponible'
try:
E = museos[museo]['TRANSPORTE']
except KeyError:
E = 'No disponible'
try:
F = museos[museo]['ACCESIBILIDAD']
except KeyError:
F = 'No disponible'
try:
G = museos[museo]['CONTENT-URL']
except KeyError:
G = 'No disponible'
try:
H = museos[museo]['NOMBRE-VIA']
except KeyError:
H = 'No disponible'
try:
I = museos[museo]['CLASE-VIAL']
except KeyError:
I = 'No disponible'
try:
J = museos[museo]['TIPO-NUM']
except KeyError:
J = 'No disponible'
try:
K = museos[museo]['NUM']
except KeyError:
K = 'No disponible'
try:
L = museos[museo]['LOCALIDAD']
except KeyError:
L = 'No disponible'
try:
M = museos[museo]['PROVINCIA']
except KeyError:
M = 'No disponible'
try:
N = museos[museo]['CODIGO-POSTAL']
except KeyError:
N = 'No disponible'
try:
Ñ = museos[museo]['BARRIO']
except KeyError:
Ñ = 'No disponible'
try:
O = Distrito.objects.get(nombre=museos[museo]['DISTRITO'])
except KeyError:
O = 'No disponible'
try:
P = museos[museo]['COORDENADA-X']
except KeyError:
P = 'No disponible'
try:
Q = museos[museo]['COORDENADA-Y']
except KeyError:
Q = 'No disponible'
try:
R = museos[museo]['LATITUD']
except KeyError:
R = 'No disponible'
try:
S = museos[museo]['LONGITUD']
except KeyError:
S = 'No disponible'
try:
T = museos[museo]['TELEFONO']
except KeyError:
T = 'No disponible'
try:
U = museos[museo]['FAX']
except KeyError:
U = 'No disponible'
try:
V = museos[museo]['EMAIL']
except KeyError:
V = 'No disponible'
try:
W = museos[museo]['TIPO']
except KeyError:
W = 'No disponible'
try:
viejoMuseo = Museo.objects.get(ID_ENTIDAD=A)
except Museo.DoesNotExist:
nuevoMuseo = Museo(ID_ENTIDAD=A, NOMBRE=B, DESCRIPCION_ENTIDAD=
C, HORARIO=D, TRANSPORTE=E, ACCESIBILIDAD=F, CONTENT_URL=G,
NOMBRE_VIA=H, CLASE_VIAL=I, TIPO_NUM=J, NUM=K, LOCALIDAD=L,
PROVINCIA=M, CODIGO_POSTAL=N, BARRIO=Ñ, DISTRITO=O,
COORDENADA_X=P, COORDENADA_Y=Q, LATITUD=R, LONGITUD=S,
TELEFONO=T, FAX=U, EMAIL=V, TIPO=W)
nuevoMuseo.save()
return mainPage(request)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def getMuseums():
museos = Museo.objects.all()
allMuseums = {}
for museo in museos:
allMuseums[museo.ID_ENTIDAD] = museo.comentario_set.count()
return allMuseums
def getAccessibleMuseums():
museos = Museo.objects.all()
allMuseums = {}
for museo in museos:
if museo.ACCESIBILIDAD == '1':
allMuseums[museo.ID_ENTIDAD] = museo.comentario_set.count()
return allMuseums
def getRanking():
allMuseums = getMuseums()
ranking = sorted(allMuseums.items(), key=operator.itemgetter(1))
ranking.reverse()
return ranking
def getAccessibleRanking():
allMuseums = getAccessibleMuseums()
ranking = sorted(allMuseums.items(), key=operator.itemgetter(1))
ranking.reverse()
return ranking
@csrf_exempt
def mainPage(request):
template = get_template('index.html')
topFive = range(5)
list = '<br>'
markers = ''
if request.method == 'GET' or request.method == 'POST' and request.POST[
'accion'] == 'mostrar':
ranking = getRanking()
list = (list +
"<center><form action='/' method='post'><input type='hidden' name='accion' value='ocultar'>"
+
"<input class='desplegable' type='submit' value='Mostrar museos accesibles'></form></center><div id='scroll'>"
)
if len(ranking) > 0:
for item in topFive:
if ranking[item][1] != 0:
museum = Museo.objects.get(ID_ENTIDAD=ranking[item][0])
list = (list + "<center><a class='titulos' href=" +
museum.CONTENT_URL + '>' + museum.NOMBRE +
'</a><br><b>' + str(museum.comentario_set.count()) +
' Comentarios - ' + str(museum.like_set.count()) +
' Likes</b></br></br>')
list = (list + "<a class='direccion'>" + museum.
CLASE_VIAL + ' ' + museum.NOMBRE_VIA + ', Nº ' +
museum.NUM + ', ' + museum.LOCALIDAD + '</a></br></br>'
)
list = (list + "<a class='info' href=" + '/museos/' +
museum.ID_ENTIDAD +
'/>Más información</a></center></br></br>')
if (museum.LATITUD != 'No disponible' and museum.
LONGITUD != 'No disponible'):
markers = (markers + 'var ' + 'X' + museum.
ID_ENTIDAD +
'info = new google.maps.InfoWindow({' +
"content:'<h1>" + museum.NOMBRE + "</h1>'});" +
'var ' + 'X' + museum.ID_ENTIDAD +
'marker = new google.maps.Marker({' +
'position: {lat: ' + museum.LATITUD + ', lng: ' +
museum.LONGITUD + ' },map: map});' + 'X' +
museum.ID_ENTIDAD +
"marker.addListener('click', function() {" +
'X' + museum.ID_ENTIDAD + 'info.open(map,' +
'X' + museum.ID_ENTIDAD + 'marker);' + '});')
if ranking[0][1] == 0:
list = (list + "<a class='titulos'><center>" +
'No hay museos con comentarios, ¡sé el primero en comentar!'
+ '</center></a></br></br></div>')
else:
list = list + '</div>'
list = (list +
"<center><a class='info' href='/xml'>XML de la página</a></center>"
)
else:
list = (list + "<a class='titulos'><center>" +
'No hay museos con comentarios, ¡sé el primero en comentar!' +
'</center></a></br></br></div>')
elif request.method == 'POST' and request.POST['accion'] == 'ocultar':
ranking = getAccessibleRanking()
list = (list +
"<center><form action='/' method='post'><input type='hidden' name='accion' value='mostrar'>"
+
"<input class='desplegable' type='submit' value='Mostrar todos los museos'></form></center><div id='scroll'>"
)
if len(ranking) > 0:
for item in topFive:
if ranking[item][1] != 0:
museum = Museo.objects.get(ID_ENTIDAD=ranking[item][0])
list = (list + "<center><a class='titulos' href=" +
museum.CONTENT_URL + '>' + museum.NOMBRE +
'</a><br><b>' + str(museum.comentario_set.count()) +
' Comentarios - ' + str(museum.like_set.count()) +
' Likes</b></br></br>')
list = (list + "<a class='direccion'>" + museum.
CLASE_VIAL + ' ' + museum.NOMBRE_VIA + ', Nº ' +
museum.NUM + ', ' + museum.LOCALIDAD + '</a></br></br>'
)
list = (list + "<a class='info' href=" + '/museos/' +
museum.ID_ENTIDAD +
'/>Más información</a></center></br></br>')
if (museum.LATITUD != 'No disponbile' and museum.
LONGITUD != 'No disponible'):
markers = (markers + 'var ' + 'X' + museum.
ID_ENTIDAD +
'info = new google.maps.InfoWindow({' +
"content:'<h1>" + museum.NOMBRE + "</h1>'});" +
'var ' + 'X' + museum.ID_ENTIDAD +
'marker = new google.maps.Marker({' +
'position: {lat: ' + museum.LATITUD + ', lng: ' +
museum.LONGITUD + ' },map: map});' + 'X' +
museum.ID_ENTIDAD +
"marker.addListener('click', function() {" +
'X' + museum.ID_ENTIDAD + 'info.open(map,' +
'X' + museum.ID_ENTIDAD + 'marker);' + '});')
if ranking[0][1] == 0:
list = (list + "<a class='titulos'><center>" +
'No hay museos accesibles con comentarios, ¡sé el primero en comentar!'
+ '</center></a></br></br></div>')
else:
list = list + '</div>'
list = (list +
"<center><a class='info' href='/xml'>XML de la página</a></center>"
)
else:
list = (list + "<a class='titulos'><center>" +
'No hay museos accesibles con comentarios, ¡sé el primero en comentar!'
+ '</center></a></br></br></div>')
style = ''
if request.user.is_authenticated():
login = 1
try:
color = Color.objects.get(usuario=request.user)
color = color.color
except Color.DoesNotExist:
color = 'EEF4F8'
try:
letra = Letra.objects.get(usuario=request.user)
letra = letra.letra
except Letra.DoesNotExist:
letra = '9'
style = (
"body{font-family: 'Helvetica', sans-serif;color: #444444;font-size: "
+ letra + 'pt;background-color: #' + color + ';}')
else:
login = 0
users = User.objects.all()
userList = ''
for user in users:
try:
title = Titulo.objects.get(usuario=user.username)
userList = (userList + "<li><a href='/" + user.username + "'>" +
title.titulo + ' - ' + user.username + '</a></li></br>')
except Titulo.DoesNotExist:
userList = (userList + "<li><a href='/" + user.username +
"'>Página de " + user.username + '</a></li></br>')
return HttpResponse(template.render(Context({'body': list, 'login':
login, 'user': request.user, 'userList': userList, 'formato': style,
'markers': markers})))
@csrf_exempt
def museumsPage(request):
template = get_template('museos.html')
if request.method == 'GET':
museos = Museo.objects.all()
elif request.method == 'POST':
distrito = Distrito.objects.get(nombre=request.POST['distrito'])
museos = distrito.museo_set.all()
list = ''
markers = ''
i = 1
for museo in museos:
list = (list + "<center><a class='titulos'>" + museo.NOMBRE +
'</a></br>')
list = (list + "<a class='info' href=" + '/museos/' + museo.
ID_ENTIDAD + '/>Más información</a></center></br></br>')
if (museo.LATITUD != 'No disponible' and museo.LONGITUD !=
'No disponible'):
markers = (markers + 'var ' + 'X' + museo.ID_ENTIDAD +
'info = new google.maps.InfoWindow({' + "content:'<h1>" +
museo.NOMBRE + "</h1>'});" + 'var ' + 'X' + museo.
ID_ENTIDAD + 'marker = new google.maps.Marker({' +
'position: {lat: ' + museo.LATITUD + ', lng: ' + museo.
LONGITUD + ' },map: map});' + 'X' + museo.ID_ENTIDAD +
"marker.addListener('click', function() {" + 'X' + museo.
ID_ENTIDAD + 'info.open(map,' + 'X' + museo.ID_ENTIDAD +
'marker);' + '});')
style = ''
if request.user.is_authenticated():
login = 1
try:
color = Color.objects.get(usuario=request.user)
color = color.color
except Color.DoesNotExist:
color = 'EEF4F8'
try:
letra = Letra.objects.get(usuario=request.user)
letra = letra.letra
except Letra.DoesNotExist:
letra = '9'
style = (
"body{font-family: 'Helvetica', sans-serif;color: #444444;font-size: "
+ letra + 'pt;background-color: #' + color + ';}')
else:
login = 0
distritos = Distrito.objects.all()
districtList = ''
for distrito in distritos:
districtList = (districtList + "<option value='" + distrito.nombre +
"'>" + distrito.nombre + '</option>')
return HttpResponse(template.render(Context({'body': list, 'login':
login, 'user': request.user, 'districtList': districtList,
'formato': style, 'markers': markers})))
<|reserved_special_token_0|>
@csrf_exempt
def loginPage(request):
if request.method == 'POST':
if not request.user.is_authenticated() and 'login' in request.POST:
username = request.POST['Usuario']
password = request.POST['Contraseña']
user = authenticate(username=username, password=password)
if user is not None:
login(request, user)
elif not request.user.is_authenticated(
) and 'registro' in request.POST:
username = request.POST['Usuario']
password = request.POST['Contraseña']
try:
user = User.objects.get(username=username)
user = authenticate(username=username, password=password)
if user is not None:
login(request, user)
except User.DoesNotExist:
user = User.objects.create_user(username=username, password
=password)
user.save()
request.method = 'GET'
return mainPage(request)
<|reserved_special_token_0|>
def userPage(request, user, number):
if number == None:
number = 1
template = get_template('personal.html')
listTotal = ''
favoritos = Favorito.objects.filter(usuario=user)
group = range(5)
count = 0
markers = ''
for favorito in favoritos:
count = count + 1
museum = Museo.objects.get(NOMBRE=favorito.museo)
listTotal = (listTotal + "<a class='titulos' href=" + museum.
CONTENT_URL + '>' + museum.NOMBRE + '</a><br><b>' + str(museum.
comentario_set.count()) + ' Comentarios - ' + str(museum.
like_set.count()) + ' Likes</b></br></br>')
listTotal = (listTotal + "<a class='direccion'>" + museum.
CLASE_VIAL + ' ' + museum.NOMBRE_VIA + ', Nº ' + museum.NUM +
', ' + museum.LOCALIDAD + '</a></br></br>')
listTotal = (listTotal + "<a class='info' href=" + '/museos/' +
museum.ID_ENTIDAD +
'/>Más información</a> <b>Fecha de guardado:' + (datetime.
timedelta(hours=2) + favorito.fecha).strftime(
'%H:%M:%S %d-%m-%Y') + '</b></br></br></br>')
if (museum.LATITUD != 'No disponible' and museum.LONGITUD !=
'No disponible'):
markers = (markers + 'var ' + 'X' + museum.ID_ENTIDAD +
'info = new google.maps.InfoWindow({' + "content:'<h1>" +
museum.NOMBRE + "</h1>'});" + 'var ' + 'X' + museum.
ID_ENTIDAD + 'marker = new google.maps.Marker({' +
'position: {lat: ' + museum.LATITUD + ', lng: ' + museum.
LONGITUD + ' },map: map});' + 'X' + museum.ID_ENTIDAD +
"marker.addListener('click', function() {" + 'X' + museum.
ID_ENTIDAD + 'info.open(map,' + 'X' + museum.ID_ENTIDAD +
'marker);' + '});')
if count % 5 == 0:
listTotal = listTotal + ';'
group = listTotal.split(';')[int(number) - 1]
list = ''
if favoritos.count() % 5 == 0:
pages = int(favoritos.count() / 5)
else:
pages = int(favoritos.count() / 5) + 1
pagesRange = range(pages)
if pages > 1:
list = '<br>'
if int(number) > 1:
list = (list + "<center><div class='pagination'><a href='/" +
user + '/' + str(int(number) - 1) + "'>«</a>")
else:
list = (list + "<center><div class='pagination'><a href='/" +
user + '/' + str(number) + "'>«</a>")
for page in pagesRange:
if page == int(number) - 1:
list = list + "<a class='active' href='/" + user + '/' + str(
page + 1) + "'>" + str(page + 1) + '</a>'
else:
list = list + "<a href='/" + user + '/' + str(page + 1
) + "'>" + str(page + 1) + '</a>'
if int(number) == pages:
list = list + "<a href='/" + user + '/' + str(number
) + "'>»</a></div></center></br>"
else:
list = list + "<a href='/" + user + '/' + str(int(number) + 1
) + "'>»</a></div></center></br>"
list = list + "<div id='scroll'><center>"
for item in group:
list = list + item
if (list == '' or list == "<div id='scroll'><center>"
) and user != 'AnonymousUser':
list = ("<center><a class='titulos'>" +
'Para que aparezcan museos en esta página, ' + user +
' tiene que añadirlos.' + '</a></center></br></br>')
elif (list == '' or list == "<div id='scroll'><center>"
) and user == 'AnonymousUser':
list = ("<center><a class='titulos'>" +
'Para ver tu página personal, primero tienes que loguearte.' +
'</a></center></br></br>')
else:
list = (list + "<center><a class='info' href='/" + user +
"/xml'>XML del usuario</a></center>")
list = list + '</center></div>'
users = User.objects.all()
userList = ''
for user in users:
try:
title = Titulo.objects.get(usuario=user.username)
userList = (userList + "<li><a href='/" + user.username + "'>" +
title.titulo + ' - ' + user.username + '</a></li></br>')
except Titulo.DoesNotExist:
userList = (userList + "<li><a href='/" + user.username +
"'>Página de " + user.username + '</a></li></br>')
style = ''
if request.user.is_authenticated():
login = 1
try:
color = Color.objects.get(usuario=request.user)
color = color.color
except Color.DoesNotExist:
color = 'EEF4F8'
try:
letra = Letra.objects.get(usuario=request.user)
letra = letra.letra
except Letra.DoesNotExist:
letra = '9'
style = (
"body{font-family: 'Helvetica', sans-serif;color: #444444;font-size: "
+ letra + 'pt;background-color: #' + color + ';}')
else:
login = 0
return HttpResponse(template.render(Context({'body': list, 'login':
login, 'user': request.user, 'userList': userList, 'formato': style,
'markers': markers})))
<|reserved_special_token_0|>
def XMLAccesiblePage(request):
template = get_template('personalXML.xml')
user = ''
topList = []
topMuseums = getAccessibleRanking()
topFive = range(5)
for item in topFive:
if topMuseums[item][1] != 0:
museum = Museo.objects.get(ID_ENTIDAD=topMuseums[item][0])
topList = topList + [museum]
return HttpResponse(template.render(Context({'favoriteList': topList,
'user': user})), content_type='text/xml')
<|reserved_special_token_0|>
def aboutPage(request):
template = get_template('about.html')
style = ''
if request.user.is_authenticated():
login = 1
try:
color = Color.objects.get(usuario=request.user)
color = color.color
except Color.DoesNotExist:
color = 'EEF4F8'
try:
letra = Letra.objects.get(usuario=request.user)
letra = letra.letra
except Letra.DoesNotExist:
letra = '9'
style = (
"body{font-family: 'Helvetica', sans-serif;color: #444444;font-size: "
+ letra + 'pt;background-color: #' + color + ';}')
else:
login = 0
return HttpResponse(template.render(Context({'login': login, 'user':
request.user, 'formato': style})))
def updateDB(request):
museos = parseXML('web/museos.xml')
for museo in museos:
try:
distrito = Distrito.objects.get(nombre=museos[museo]['DISTRITO'])
except Distrito.DoesNotExist:
distrito = Distrito(nombre=museos[museo]['DISTRITO'])
distrito.save()
for museo in museos:
try:
A = museos[museo]['ID-ENTIDAD']
except KeyError:
A = 'No disponible'
try:
B = museos[museo]['NOMBRE']
except KeyError:
B = 'No disponible'
try:
C = museos[museo]['DESCRIPCION-ENTIDAD']
except KeyError:
C = 'No disponible'
try:
D = museos[museo]['HORARIO']
except KeyError:
D = 'No disponible'
try:
E = museos[museo]['TRANSPORTE']
except KeyError:
E = 'No disponible'
try:
F = museos[museo]['ACCESIBILIDAD']
except KeyError:
F = 'No disponible'
try:
G = museos[museo]['CONTENT-URL']
except KeyError:
G = 'No disponible'
try:
H = museos[museo]['NOMBRE-VIA']
except KeyError:
H = 'No disponible'
try:
I = museos[museo]['CLASE-VIAL']
except KeyError:
I = 'No disponible'
try:
J = museos[museo]['TIPO-NUM']
except KeyError:
J = 'No disponible'
try:
K = museos[museo]['NUM']
except KeyError:
K = 'No disponible'
try:
L = museos[museo]['LOCALIDAD']
except KeyError:
L = 'No disponible'
try:
M = museos[museo]['PROVINCIA']
except KeyError:
M = 'No disponible'
try:
N = museos[museo]['CODIGO-POSTAL']
except KeyError:
N = 'No disponible'
try:
Ñ = museos[museo]['BARRIO']
except KeyError:
Ñ = 'No disponible'
try:
O = Distrito.objects.get(nombre=museos[museo]['DISTRITO'])
except KeyError:
O = 'No disponible'
try:
P = museos[museo]['COORDENADA-X']
except KeyError:
P = 'No disponible'
try:
Q = museos[museo]['COORDENADA-Y']
except KeyError:
Q = 'No disponible'
try:
R = museos[museo]['LATITUD']
except KeyError:
R = 'No disponible'
try:
S = museos[museo]['LONGITUD']
except KeyError:
S = 'No disponible'
try:
T = museos[museo]['TELEFONO']
except KeyError:
T = 'No disponible'
try:
U = museos[museo]['FAX']
except KeyError:
U = 'No disponible'
try:
V = museos[museo]['EMAIL']
except KeyError:
V = 'No disponible'
try:
W = museos[museo]['TIPO']
except KeyError:
W = 'No disponible'
try:
viejoMuseo = Museo.objects.get(ID_ENTIDAD=A)
except Museo.DoesNotExist:
nuevoMuseo = Museo(ID_ENTIDAD=A, NOMBRE=B, DESCRIPCION_ENTIDAD=
C, HORARIO=D, TRANSPORTE=E, ACCESIBILIDAD=F, CONTENT_URL=G,
NOMBRE_VIA=H, CLASE_VIAL=I, TIPO_NUM=J, NUM=K, LOCALIDAD=L,
PROVINCIA=M, CODIGO_POSTAL=N, BARRIO=Ñ, DISTRITO=O,
COORDENADA_X=P, COORDENADA_Y=Q, LATITUD=R, LONGITUD=S,
TELEFONO=T, FAX=U, EMAIL=V, TIPO=W)
nuevoMuseo.save()
return mainPage(request)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def getMuseums():
museos = Museo.objects.all()
allMuseums = {}
for museo in museos:
allMuseums[museo.ID_ENTIDAD] = museo.comentario_set.count()
return allMuseums
def getAccessibleMuseums():
museos = Museo.objects.all()
allMuseums = {}
for museo in museos:
if museo.ACCESIBILIDAD == '1':
allMuseums[museo.ID_ENTIDAD] = museo.comentario_set.count()
return allMuseums
def getRanking():
allMuseums = getMuseums()
ranking = sorted(allMuseums.items(), key=operator.itemgetter(1))
ranking.reverse()
return ranking
def getAccessibleRanking():
allMuseums = getAccessibleMuseums()
ranking = sorted(allMuseums.items(), key=operator.itemgetter(1))
ranking.reverse()
return ranking
@csrf_exempt
def mainPage(request):
template = get_template('index.html')
topFive = range(5)
list = '<br>'
markers = ''
if request.method == 'GET' or request.method == 'POST' and request.POST[
'accion'] == 'mostrar':
ranking = getRanking()
list = (list +
"<center><form action='/' method='post'><input type='hidden' name='accion' value='ocultar'>"
+
"<input class='desplegable' type='submit' value='Mostrar museos accesibles'></form></center><div id='scroll'>"
)
if len(ranking) > 0:
for item in topFive:
if ranking[item][1] != 0:
museum = Museo.objects.get(ID_ENTIDAD=ranking[item][0])
list = (list + "<center><a class='titulos' href=" +
museum.CONTENT_URL + '>' + museum.NOMBRE +
'</a><br><b>' + str(museum.comentario_set.count()) +
' Comentarios - ' + str(museum.like_set.count()) +
' Likes</b></br></br>')
list = (list + "<a class='direccion'>" + museum.
CLASE_VIAL + ' ' + museum.NOMBRE_VIA + ', Nº ' +
museum.NUM + ', ' + museum.LOCALIDAD + '</a></br></br>'
)
list = (list + "<a class='info' href=" + '/museos/' +
museum.ID_ENTIDAD +
'/>Más información</a></center></br></br>')
if (museum.LATITUD != 'No disponible' and museum.
LONGITUD != 'No disponible'):
markers = (markers + 'var ' + 'X' + museum.
ID_ENTIDAD +
'info = new google.maps.InfoWindow({' +
"content:'<h1>" + museum.NOMBRE + "</h1>'});" +
'var ' + 'X' + museum.ID_ENTIDAD +
'marker = new google.maps.Marker({' +
'position: {lat: ' + museum.LATITUD + ', lng: ' +
museum.LONGITUD + ' },map: map});' + 'X' +
museum.ID_ENTIDAD +
"marker.addListener('click', function() {" +
'X' + museum.ID_ENTIDAD + 'info.open(map,' +
'X' + museum.ID_ENTIDAD + 'marker);' + '});')
if ranking[0][1] == 0:
list = (list + "<a class='titulos'><center>" +
'No hay museos con comentarios, ¡sé el primero en comentar!'
+ '</center></a></br></br></div>')
else:
list = list + '</div>'
list = (list +
"<center><a class='info' href='/xml'>XML de la página</a></center>"
)
else:
list = (list + "<a class='titulos'><center>" +
'No hay museos con comentarios, ¡sé el primero en comentar!' +
'</center></a></br></br></div>')
elif request.method == 'POST' and request.POST['accion'] == 'ocultar':
ranking = getAccessibleRanking()
list = (list +
"<center><form action='/' method='post'><input type='hidden' name='accion' value='mostrar'>"
+
"<input class='desplegable' type='submit' value='Mostrar todos los museos'></form></center><div id='scroll'>"
)
if len(ranking) > 0:
for item in topFive:
if ranking[item][1] != 0:
museum = Museo.objects.get(ID_ENTIDAD=ranking[item][0])
list = (list + "<center><a class='titulos' href=" +
museum.CONTENT_URL + '>' + museum.NOMBRE +
'</a><br><b>' + str(museum.comentario_set.count()) +
' Comentarios - ' + str(museum.like_set.count()) +
' Likes</b></br></br>')
list = (list + "<a class='direccion'>" + museum.
CLASE_VIAL + ' ' + museum.NOMBRE_VIA + ', Nº ' +
museum.NUM + ', ' + museum.LOCALIDAD + '</a></br></br>'
)
list = (list + "<a class='info' href=" + '/museos/' +
museum.ID_ENTIDAD +
'/>Más información</a></center></br></br>')
if (museum.LATITUD != 'No disponbile' and museum.
LONGITUD != 'No disponible'):
markers = (markers + 'var ' + 'X' + museum.
ID_ENTIDAD +
'info = new google.maps.InfoWindow({' +
"content:'<h1>" + museum.NOMBRE + "</h1>'});" +
'var ' + 'X' + museum.ID_ENTIDAD +
'marker = new google.maps.Marker({' +
'position: {lat: ' + museum.LATITUD + ', lng: ' +
museum.LONGITUD + ' },map: map});' + 'X' +
museum.ID_ENTIDAD +
"marker.addListener('click', function() {" +
'X' + museum.ID_ENTIDAD + 'info.open(map,' +
'X' + museum.ID_ENTIDAD + 'marker);' + '});')
if ranking[0][1] == 0:
list = (list + "<a class='titulos'><center>" +
'No hay museos accesibles con comentarios, ¡sé el primero en comentar!'
+ '</center></a></br></br></div>')
else:
list = list + '</div>'
list = (list +
"<center><a class='info' href='/xml'>XML de la página</a></center>"
)
else:
list = (list + "<a class='titulos'><center>" +
'No hay museos accesibles con comentarios, ¡sé el primero en comentar!'
+ '</center></a></br></br></div>')
style = ''
if request.user.is_authenticated():
login = 1
try:
color = Color.objects.get(usuario=request.user)
color = color.color
except Color.DoesNotExist:
color = 'EEF4F8'
try:
letra = Letra.objects.get(usuario=request.user)
letra = letra.letra
except Letra.DoesNotExist:
letra = '9'
style = (
"body{font-family: 'Helvetica', sans-serif;color: #444444;font-size: "
+ letra + 'pt;background-color: #' + color + ';}')
else:
login = 0
users = User.objects.all()
userList = ''
for user in users:
try:
title = Titulo.objects.get(usuario=user.username)
userList = (userList + "<li><a href='/" + user.username + "'>" +
title.titulo + ' - ' + user.username + '</a></li></br>')
except Titulo.DoesNotExist:
userList = (userList + "<li><a href='/" + user.username +
"'>Página de " + user.username + '</a></li></br>')
return HttpResponse(template.render(Context({'body': list, 'login':
login, 'user': request.user, 'userList': userList, 'formato': style,
'markers': markers})))
@csrf_exempt
def museumsPage(request):
template = get_template('museos.html')
if request.method == 'GET':
museos = Museo.objects.all()
elif request.method == 'POST':
distrito = Distrito.objects.get(nombre=request.POST['distrito'])
museos = distrito.museo_set.all()
list = ''
markers = ''
i = 1
for museo in museos:
list = (list + "<center><a class='titulos'>" + museo.NOMBRE +
'</a></br>')
list = (list + "<a class='info' href=" + '/museos/' + museo.
ID_ENTIDAD + '/>Más información</a></center></br></br>')
if (museo.LATITUD != 'No disponible' and museo.LONGITUD !=
'No disponible'):
markers = (markers + 'var ' + 'X' + museo.ID_ENTIDAD +
'info = new google.maps.InfoWindow({' + "content:'<h1>" +
museo.NOMBRE + "</h1>'});" + 'var ' + 'X' + museo.
ID_ENTIDAD + 'marker = new google.maps.Marker({' +
'position: {lat: ' + museo.LATITUD + ', lng: ' + museo.
LONGITUD + ' },map: map});' + 'X' + museo.ID_ENTIDAD +
"marker.addListener('click', function() {" + 'X' + museo.
ID_ENTIDAD + 'info.open(map,' + 'X' + museo.ID_ENTIDAD +
'marker);' + '});')
style = ''
if request.user.is_authenticated():
login = 1
try:
color = Color.objects.get(usuario=request.user)
color = color.color
except Color.DoesNotExist:
color = 'EEF4F8'
try:
letra = Letra.objects.get(usuario=request.user)
letra = letra.letra
except Letra.DoesNotExist:
letra = '9'
style = (
"body{font-family: 'Helvetica', sans-serif;color: #444444;font-size: "
+ letra + 'pt;background-color: #' + color + ';}')
else:
login = 0
distritos = Distrito.objects.all()
districtList = ''
for distrito in distritos:
districtList = (districtList + "<option value='" + distrito.nombre +
"'>" + distrito.nombre + '</option>')
return HttpResponse(template.render(Context({'body': list, 'login':
login, 'user': request.user, 'districtList': districtList,
'formato': style, 'markers': markers})))
<|reserved_special_token_0|>
@csrf_exempt
def loginPage(request):
if request.method == 'POST':
if not request.user.is_authenticated() and 'login' in request.POST:
username = request.POST['Usuario']
password = request.POST['Contraseña']
user = authenticate(username=username, password=password)
if user is not None:
login(request, user)
elif not request.user.is_authenticated(
) and 'registro' in request.POST:
username = request.POST['Usuario']
password = request.POST['Contraseña']
try:
user = User.objects.get(username=username)
user = authenticate(username=username, password=password)
if user is not None:
login(request, user)
except User.DoesNotExist:
user = User.objects.create_user(username=username, password
=password)
user.save()
request.method = 'GET'
return mainPage(request)
def logoutPage(request):
logout(request)
return mainPage(request)
def userPage(request, user, number):
if number == None:
number = 1
template = get_template('personal.html')
listTotal = ''
favoritos = Favorito.objects.filter(usuario=user)
group = range(5)
count = 0
markers = ''
for favorito in favoritos:
count = count + 1
museum = Museo.objects.get(NOMBRE=favorito.museo)
listTotal = (listTotal + "<a class='titulos' href=" + museum.
CONTENT_URL + '>' + museum.NOMBRE + '</a><br><b>' + str(museum.
comentario_set.count()) + ' Comentarios - ' + str(museum.
like_set.count()) + ' Likes</b></br></br>')
listTotal = (listTotal + "<a class='direccion'>" + museum.
CLASE_VIAL + ' ' + museum.NOMBRE_VIA + ', Nº ' + museum.NUM +
', ' + museum.LOCALIDAD + '</a></br></br>')
listTotal = (listTotal + "<a class='info' href=" + '/museos/' +
museum.ID_ENTIDAD +
'/>Más información</a> <b>Fecha de guardado:' + (datetime.
timedelta(hours=2) + favorito.fecha).strftime(
'%H:%M:%S %d-%m-%Y') + '</b></br></br></br>')
if (museum.LATITUD != 'No disponible' and museum.LONGITUD !=
'No disponible'):
markers = (markers + 'var ' + 'X' + museum.ID_ENTIDAD +
'info = new google.maps.InfoWindow({' + "content:'<h1>" +
museum.NOMBRE + "</h1>'});" + 'var ' + 'X' + museum.
ID_ENTIDAD + 'marker = new google.maps.Marker({' +
'position: {lat: ' + museum.LATITUD + ', lng: ' + museum.
LONGITUD + ' },map: map});' + 'X' + museum.ID_ENTIDAD +
"marker.addListener('click', function() {" + 'X' + museum.
ID_ENTIDAD + 'info.open(map,' + 'X' + museum.ID_ENTIDAD +
'marker);' + '});')
if count % 5 == 0:
listTotal = listTotal + ';'
group = listTotal.split(';')[int(number) - 1]
list = ''
if favoritos.count() % 5 == 0:
pages = int(favoritos.count() / 5)
else:
pages = int(favoritos.count() / 5) + 1
pagesRange = range(pages)
if pages > 1:
list = '<br>'
if int(number) > 1:
list = (list + "<center><div class='pagination'><a href='/" +
user + '/' + str(int(number) - 1) + "'>«</a>")
else:
list = (list + "<center><div class='pagination'><a href='/" +
user + '/' + str(number) + "'>«</a>")
for page in pagesRange:
if page == int(number) - 1:
list = list + "<a class='active' href='/" + user + '/' + str(
page + 1) + "'>" + str(page + 1) + '</a>'
else:
list = list + "<a href='/" + user + '/' + str(page + 1
) + "'>" + str(page + 1) + '</a>'
if int(number) == pages:
list = list + "<a href='/" + user + '/' + str(number
) + "'>»</a></div></center></br>"
else:
list = list + "<a href='/" + user + '/' + str(int(number) + 1
) + "'>»</a></div></center></br>"
list = list + "<div id='scroll'><center>"
for item in group:
list = list + item
if (list == '' or list == "<div id='scroll'><center>"
) and user != 'AnonymousUser':
list = ("<center><a class='titulos'>" +
'Para que aparezcan museos en esta página, ' + user +
' tiene que añadirlos.' + '</a></center></br></br>')
elif (list == '' or list == "<div id='scroll'><center>"
) and user == 'AnonymousUser':
list = ("<center><a class='titulos'>" +
'Para ver tu página personal, primero tienes que loguearte.' +
'</a></center></br></br>')
else:
list = (list + "<center><a class='info' href='/" + user +
"/xml'>XML del usuario</a></center>")
list = list + '</center></div>'
users = User.objects.all()
userList = ''
for user in users:
try:
title = Titulo.objects.get(usuario=user.username)
userList = (userList + "<li><a href='/" + user.username + "'>" +
title.titulo + ' - ' + user.username + '</a></li></br>')
except Titulo.DoesNotExist:
userList = (userList + "<li><a href='/" + user.username +
"'>Página de " + user.username + '</a></li></br>')
style = ''
if request.user.is_authenticated():
login = 1
try:
color = Color.objects.get(usuario=request.user)
color = color.color
except Color.DoesNotExist:
color = 'EEF4F8'
try:
letra = Letra.objects.get(usuario=request.user)
letra = letra.letra
except Letra.DoesNotExist:
letra = '9'
style = (
"body{font-family: 'Helvetica', sans-serif;color: #444444;font-size: "
+ letra + 'pt;background-color: #' + color + ';}')
else:
login = 0
return HttpResponse(template.render(Context({'body': list, 'login':
login, 'user': request.user, 'userList': userList, 'formato': style,
'markers': markers})))
def userXMLPage(request, user):
template = get_template('personalXML.xml')
favoriteList = []
favoriteMuseums = Favorito.objects.filter(usuario=user)
for favorite in favoriteMuseums:
favoriteList = favoriteList + [favorite.museo]
return HttpResponse(template.render(Context({'favoriteList':
favoriteList, 'user': user})), content_type='text/xml')
<|reserved_special_token_0|>
def XMLAccesiblePage(request):
template = get_template('personalXML.xml')
user = ''
topList = []
topMuseums = getAccessibleRanking()
topFive = range(5)
for item in topFive:
if topMuseums[item][1] != 0:
museum = Museo.objects.get(ID_ENTIDAD=topMuseums[item][0])
topList = topList + [museum]
return HttpResponse(template.render(Context({'favoriteList': topList,
'user': user})), content_type='text/xml')
@csrf_exempt
def preferencesPage(request, user):
template = get_template('preferencias.html')
if request.method == 'POST':
if 'color' in request.POST:
try:
color = Color.objects.get(usuario=user)
color.color = request.POST['color']
except Color.DoesNotExist:
color = Color(usuario=user, color=request.POST['color'])
color.save()
elif 'tamaño' in request.POST:
try:
size = Letra.objects.get(usuario=user)
size.letra = request.POST['tamaño']
except Letra.DoesNotExist:
size = Letra(usuario=user, letra=request.POST['tamaño'])
size.save()
elif 'título' in request.POST:
try:
title = Titulo.objects.get(usuario=user)
title.titulo = request.POST['título']
except Titulo.DoesNotExist:
title = Titulo(usuario=user, titulo=request.POST['título'])
title.save()
style = ''
if request.user.is_authenticated():
login = 1
try:
color = Color.objects.get(usuario=request.user)
color = color.color
except Color.DoesNotExist:
color = 'EEF4F8'
try:
letra = Letra.objects.get(usuario=request.user)
letra = letra.letra
except Letra.DoesNotExist:
letra = '9'
style = (
"body{font-family: 'Helvetica', sans-serif;color: #444444;font-size: "
+ letra + 'pt;background-color: #' + color + ';}')
else:
login = 0
return HttpResponse(template.render(Context({'login': login, 'user':
user, 'formato': style})))
def aboutPage(request):
template = get_template('about.html')
style = ''
if request.user.is_authenticated():
login = 1
try:
color = Color.objects.get(usuario=request.user)
color = color.color
except Color.DoesNotExist:
color = 'EEF4F8'
try:
letra = Letra.objects.get(usuario=request.user)
letra = letra.letra
except Letra.DoesNotExist:
letra = '9'
style = (
"body{font-family: 'Helvetica', sans-serif;color: #444444;font-size: "
+ letra + 'pt;background-color: #' + color + ';}')
else:
login = 0
return HttpResponse(template.render(Context({'login': login, 'user':
request.user, 'formato': style})))
def updateDB(request):
museos = parseXML('web/museos.xml')
for museo in museos:
try:
distrito = Distrito.objects.get(nombre=museos[museo]['DISTRITO'])
except Distrito.DoesNotExist:
distrito = Distrito(nombre=museos[museo]['DISTRITO'])
distrito.save()
for museo in museos:
try:
A = museos[museo]['ID-ENTIDAD']
except KeyError:
A = 'No disponible'
try:
B = museos[museo]['NOMBRE']
except KeyError:
B = 'No disponible'
try:
C = museos[museo]['DESCRIPCION-ENTIDAD']
except KeyError:
C = 'No disponible'
try:
D = museos[museo]['HORARIO']
except KeyError:
D = 'No disponible'
try:
E = museos[museo]['TRANSPORTE']
except KeyError:
E = 'No disponible'
try:
F = museos[museo]['ACCESIBILIDAD']
except KeyError:
F = 'No disponible'
try:
G = museos[museo]['CONTENT-URL']
except KeyError:
G = 'No disponible'
try:
H = museos[museo]['NOMBRE-VIA']
except KeyError:
H = 'No disponible'
try:
I = museos[museo]['CLASE-VIAL']
except KeyError:
I = 'No disponible'
try:
J = museos[museo]['TIPO-NUM']
except KeyError:
J = 'No disponible'
try:
K = museos[museo]['NUM']
except KeyError:
K = 'No disponible'
try:
L = museos[museo]['LOCALIDAD']
except KeyError:
L = 'No disponible'
try:
M = museos[museo]['PROVINCIA']
except KeyError:
M = 'No disponible'
try:
N = museos[museo]['CODIGO-POSTAL']
except KeyError:
N = 'No disponible'
try:
Ñ = museos[museo]['BARRIO']
except KeyError:
Ñ = 'No disponible'
try:
O = Distrito.objects.get(nombre=museos[museo]['DISTRITO'])
except KeyError:
O = 'No disponible'
try:
P = museos[museo]['COORDENADA-X']
except KeyError:
P = 'No disponible'
try:
Q = museos[museo]['COORDENADA-Y']
except KeyError:
Q = 'No disponible'
try:
R = museos[museo]['LATITUD']
except KeyError:
R = 'No disponible'
try:
S = museos[museo]['LONGITUD']
except KeyError:
S = 'No disponible'
try:
T = museos[museo]['TELEFONO']
except KeyError:
T = 'No disponible'
try:
U = museos[museo]['FAX']
except KeyError:
U = 'No disponible'
try:
V = museos[museo]['EMAIL']
except KeyError:
V = 'No disponible'
try:
W = museos[museo]['TIPO']
except KeyError:
W = 'No disponible'
try:
viejoMuseo = Museo.objects.get(ID_ENTIDAD=A)
except Museo.DoesNotExist:
nuevoMuseo = Museo(ID_ENTIDAD=A, NOMBRE=B, DESCRIPCION_ENTIDAD=
C, HORARIO=D, TRANSPORTE=E, ACCESIBILIDAD=F, CONTENT_URL=G,
NOMBRE_VIA=H, CLASE_VIAL=I, TIPO_NUM=J, NUM=K, LOCALIDAD=L,
PROVINCIA=M, CODIGO_POSTAL=N, BARRIO=Ñ, DISTRITO=O,
COORDENADA_X=P, COORDENADA_Y=Q, LATITUD=R, LONGITUD=S,
TELEFONO=T, FAX=U, EMAIL=V, TIPO=W)
nuevoMuseo.save()
return mainPage(request)
<|reserved_special_token_1|>
from django.shortcuts import render
from django.http import HttpResponse
from django.contrib.auth.models import User
from .models import Museo, Distrito, Comentario, Favorito, Like, Titulo, Letra, Color
from django.views.decorators.csrf import csrf_exempt
from django.contrib.auth import authenticate, login
from django.contrib.auth import logout
from web.parser import parseXML
import operator
from django.template.loader import get_template
from django.template import Context
import datetime
def getMuseums():
museos = Museo.objects.all()
allMuseums = {}
for museo in museos:
allMuseums[museo.ID_ENTIDAD] = museo.comentario_set.count()
return allMuseums
def getAccessibleMuseums():
museos = Museo.objects.all()
allMuseums = {}
for museo in museos:
if museo.ACCESIBILIDAD == '1':
allMuseums[museo.ID_ENTIDAD] = museo.comentario_set.count()
return allMuseums
def getRanking():
allMuseums = getMuseums()
ranking = sorted(allMuseums.items(), key=operator.itemgetter(1))
ranking.reverse()
return ranking
def getAccessibleRanking():
allMuseums = getAccessibleMuseums()
ranking = sorted(allMuseums.items(), key=operator.itemgetter(1))
ranking.reverse()
return ranking
@csrf_exempt
def mainPage(request):
template = get_template('index.html')
topFive = range(5)
list = '<br>'
markers = ''
if request.method == 'GET' or request.method == 'POST' and request.POST[
'accion'] == 'mostrar':
ranking = getRanking()
list = (list +
"<center><form action='/' method='post'><input type='hidden' name='accion' value='ocultar'>"
+
"<input class='desplegable' type='submit' value='Mostrar museos accesibles'></form></center><div id='scroll'>"
)
if len(ranking) > 0:
for item in topFive:
if ranking[item][1] != 0:
museum = Museo.objects.get(ID_ENTIDAD=ranking[item][0])
list = (list + "<center><a class='titulos' href=" +
museum.CONTENT_URL + '>' + museum.NOMBRE +
'</a><br><b>' + str(museum.comentario_set.count()) +
' Comentarios - ' + str(museum.like_set.count()) +
' Likes</b></br></br>')
list = (list + "<a class='direccion'>" + museum.
CLASE_VIAL + ' ' + museum.NOMBRE_VIA + ', Nº ' +
museum.NUM + ', ' + museum.LOCALIDAD + '</a></br></br>'
)
list = (list + "<a class='info' href=" + '/museos/' +
museum.ID_ENTIDAD +
'/>Más información</a></center></br></br>')
if (museum.LATITUD != 'No disponible' and museum.
LONGITUD != 'No disponible'):
markers = (markers + 'var ' + 'X' + museum.
ID_ENTIDAD +
'info = new google.maps.InfoWindow({' +
"content:'<h1>" + museum.NOMBRE + "</h1>'});" +
'var ' + 'X' + museum.ID_ENTIDAD +
'marker = new google.maps.Marker({' +
'position: {lat: ' + museum.LATITUD + ', lng: ' +
museum.LONGITUD + ' },map: map});' + 'X' +
museum.ID_ENTIDAD +
"marker.addListener('click', function() {" +
'X' + museum.ID_ENTIDAD + 'info.open(map,' +
'X' + museum.ID_ENTIDAD + 'marker);' + '});')
if ranking[0][1] == 0:
list = (list + "<a class='titulos'><center>" +
'No hay museos con comentarios, ¡sé el primero en comentar!'
+ '</center></a></br></br></div>')
else:
list = list + '</div>'
list = (list +
"<center><a class='info' href='/xml'>XML de la página</a></center>"
)
else:
list = (list + "<a class='titulos'><center>" +
'No hay museos con comentarios, ¡sé el primero en comentar!' +
'</center></a></br></br></div>')
elif request.method == 'POST' and request.POST['accion'] == 'ocultar':
ranking = getAccessibleRanking()
list = (list +
"<center><form action='/' method='post'><input type='hidden' name='accion' value='mostrar'>"
+
"<input class='desplegable' type='submit' value='Mostrar todos los museos'></form></center><div id='scroll'>"
)
if len(ranking) > 0:
for item in topFive:
if ranking[item][1] != 0:
museum = Museo.objects.get(ID_ENTIDAD=ranking[item][0])
list = (list + "<center><a class='titulos' href=" +
museum.CONTENT_URL + '>' + museum.NOMBRE +
'</a><br><b>' + str(museum.comentario_set.count()) +
' Comentarios - ' + str(museum.like_set.count()) +
' Likes</b></br></br>')
list = (list + "<a class='direccion'>" + museum.
CLASE_VIAL + ' ' + museum.NOMBRE_VIA + ', Nº ' +
museum.NUM + ', ' + museum.LOCALIDAD + '</a></br></br>'
)
list = (list + "<a class='info' href=" + '/museos/' +
museum.ID_ENTIDAD +
'/>Más información</a></center></br></br>')
if (museum.LATITUD != 'No disponbile' and museum.
LONGITUD != 'No disponible'):
markers = (markers + 'var ' + 'X' + museum.
ID_ENTIDAD +
'info = new google.maps.InfoWindow({' +
"content:'<h1>" + museum.NOMBRE + "</h1>'});" +
'var ' + 'X' + museum.ID_ENTIDAD +
'marker = new google.maps.Marker({' +
'position: {lat: ' + museum.LATITUD + ', lng: ' +
museum.LONGITUD + ' },map: map});' + 'X' +
museum.ID_ENTIDAD +
"marker.addListener('click', function() {" +
'X' + museum.ID_ENTIDAD + 'info.open(map,' +
'X' + museum.ID_ENTIDAD + 'marker);' + '});')
if ranking[0][1] == 0:
list = (list + "<a class='titulos'><center>" +
'No hay museos accesibles con comentarios, ¡sé el primero en comentar!'
+ '</center></a></br></br></div>')
else:
list = list + '</div>'
list = (list +
"<center><a class='info' href='/xml'>XML de la página</a></center>"
)
else:
list = (list + "<a class='titulos'><center>" +
'No hay museos accesibles con comentarios, ¡sé el primero en comentar!'
+ '</center></a></br></br></div>')
style = ''
if request.user.is_authenticated():
login = 1
try:
color = Color.objects.get(usuario=request.user)
color = color.color
except Color.DoesNotExist:
color = 'EEF4F8'
try:
letra = Letra.objects.get(usuario=request.user)
letra = letra.letra
except Letra.DoesNotExist:
letra = '9'
style = (
"body{font-family: 'Helvetica', sans-serif;color: #444444;font-size: "
+ letra + 'pt;background-color: #' + color + ';}')
else:
login = 0
users = User.objects.all()
userList = ''
for user in users:
try:
title = Titulo.objects.get(usuario=user.username)
userList = (userList + "<li><a href='/" + user.username + "'>" +
title.titulo + ' - ' + user.username + '</a></li></br>')
except Titulo.DoesNotExist:
userList = (userList + "<li><a href='/" + user.username +
"'>Página de " + user.username + '</a></li></br>')
return HttpResponse(template.render(Context({'body': list, 'login':
login, 'user': request.user, 'userList': userList, 'formato': style,
'markers': markers})))
@csrf_exempt
def museumsPage(request):
template = get_template('museos.html')
if request.method == 'GET':
museos = Museo.objects.all()
elif request.method == 'POST':
distrito = Distrito.objects.get(nombre=request.POST['distrito'])
museos = distrito.museo_set.all()
list = ''
markers = ''
i = 1
for museo in museos:
list = (list + "<center><a class='titulos'>" + museo.NOMBRE +
'</a></br>')
list = (list + "<a class='info' href=" + '/museos/' + museo.
ID_ENTIDAD + '/>Más información</a></center></br></br>')
if (museo.LATITUD != 'No disponible' and museo.LONGITUD !=
'No disponible'):
markers = (markers + 'var ' + 'X' + museo.ID_ENTIDAD +
'info = new google.maps.InfoWindow({' + "content:'<h1>" +
museo.NOMBRE + "</h1>'});" + 'var ' + 'X' + museo.
ID_ENTIDAD + 'marker = new google.maps.Marker({' +
'position: {lat: ' + museo.LATITUD + ', lng: ' + museo.
LONGITUD + ' },map: map});' + 'X' + museo.ID_ENTIDAD +
"marker.addListener('click', function() {" + 'X' + museo.
ID_ENTIDAD + 'info.open(map,' + 'X' + museo.ID_ENTIDAD +
'marker);' + '});')
style = ''
if request.user.is_authenticated():
login = 1
try:
color = Color.objects.get(usuario=request.user)
color = color.color
except Color.DoesNotExist:
color = 'EEF4F8'
try:
letra = Letra.objects.get(usuario=request.user)
letra = letra.letra
except Letra.DoesNotExist:
letra = '9'
style = (
"body{font-family: 'Helvetica', sans-serif;color: #444444;font-size: "
+ letra + 'pt;background-color: #' + color + ';}')
else:
login = 0
distritos = Distrito.objects.all()
districtList = ''
for distrito in distritos:
districtList = (districtList + "<option value='" + distrito.nombre +
"'>" + distrito.nombre + '</option>')
return HttpResponse(template.render(Context({'body': list, 'login':
login, 'user': request.user, 'districtList': districtList,
'formato': style, 'markers': markers})))
@csrf_exempt
def museumPage(request, museumID):
template = get_template('museo.html')
museum = Museo.objects.get(ID_ENTIDAD=museumID)
if request.method == 'POST' and 'comentario' in request.POST:
comment = Comentario(texto=request.POST['comentario'], museo=museum,
usuario=request.user.username)
comment.save()
elif request.method == 'POST' and 'añadir' in request.POST:
fav = Favorito(museo=museum, usuario=request.user)
fav.save()
elif request.method == 'POST' and 'quitar' in request.POST:
Favorito.objects.filter(museo=museum, usuario=request.user).delete()
elif request.method == 'POST' and 'mas' in request.POST:
like = Like(museo=museum, usuario=request.user)
like.save()
elif request.method == 'POST' and 'menos' in request.POST:
Like.objects.filter(museo=museum, usuario=request.user).delete()
comments = museum.comentario_set.all()
message = ("<center><b><a class='titulos_museo'>" + museum.NOMBRE +
"</a></b></center><div id='scroll'></br><center><b><a class='titulos_museo'>Descripción</a></b></center></br><center><a class='texto_museo'>"
+ museum.DESCRIPCION_ENTIDAD +
"</a></center></br><center><b><a class='titulos_museo'>Horario</a></b></center></br><center><a class='texto_museo'>"
+ museum.HORARIO +
"</a></center></br><center><b><a class='titulos_museo'>Accesibilidad</a></b></center></br><center><a class='texto_museo'>"
+ museum.ACCESIBILIDAD +
"</a></center></br><center><b><a class='titulos_museo'>Dirección</a></b></center></br><center><a class='texto_museo'>"
+ museum.CLASE_VIAL + ' ' + museum.NOMBRE_VIA + ', Nº ' + museum.
NUM + ', ' + museum.LOCALIDAD +
"</a><center></br><center><a class='texto_museo'>Barrio: " + museum
.BARRIO +
"</a></center></br><center><a class='texto_museo'>Distrito: " + str
(museum.DISTRITO) +
"</a></center></br><center><b><a class='titulos_museo'>Datos de contacto</a></b></center></br><center><a class='texto_museo'>Teléfono: "
+ museum.TELEFONO +
"</a></center></br><center><a class='texto_museo'>Email: " + museum
.EMAIL +
"</a></center></br><center><b><a class='titulos_museo'>Comentarios</a></b></center></br>"
)
allComments = ''
for comment in comments:
allComments = (allComments + "<center><a class='texto_museo'><b>" +
'Anónimo</b>: ' + comment.texto + ', ' + (datetime.timedelta(
hours=2) + comment.fecha).strftime('%H:%M:%S %d-%m-%Y') +
'</a></center></br>')
message = message + allComments
style = ''
if request.user.is_authenticated():
login = 1
try:
favorito = Favorito.objects.get(museo=museum, usuario=request.user)
favoriteButton = ("<center><form action='/museos/" + museumID +
"/' method='post'><input type='hidden' name='quitar' value='fav'>"
+
"<input class='desplegable' type='submit' value='Quitar de favoritos'></form></center>"
)
except Favorito.DoesNotExist:
favoriteButton = ("<center><form action='/museos/" + museumID +
"/' method='post'><input type='hidden' name='añadir' value='fav'>"
+
"<input class='desplegable' type='submit' value='Añadir a favoritos'></form></center>"
)
try:
like = Like.objects.get(museo=museum, usuario=request.user)
likeButton = ("<center><form action='/museos/" + museumID +
"/' method='post'><input type='hidden' name='menos' value='like'>"
+
"<input class='desplegable' type='submit' value='Dislike'></form></center>"
)
except Like.DoesNotExist:
likeButton = ("<center><form action='/museos/" + museumID +
"/' method='post'><input type='hidden' name='mas' value='like'>"
+
"<input class='desplegable' type='submit' value='Like'></form></center>"
)
try:
color = Color.objects.get(usuario=request.user)
color = color.color
except Color.DoesNotExist:
color = 'EEF4F8'
try:
letra = Letra.objects.get(usuario=request.user)
letra = letra.letra
except Letra.DoesNotExist:
letra = '9'
style = (
"body{font-family: 'Helvetica', sans-serif;color: #444444;font-size: "
+ letra + 'pt;background-color: #' + color + ';}')
else:
login = 0
favoriteButton = ''
likeButton = ''
if (museum.LATITUD != 'No disponbile' and museum.LONGITUD !=
'No disponible'):
marker = ('var ' + 'X' + museum.ID_ENTIDAD +
'info = new google.maps.InfoWindow({' + "content:'<h1>" +
museum.NOMBRE + "</h1>'});" + 'var ' + 'X' + museum.ID_ENTIDAD +
'marker = new google.maps.Marker({' + 'position: {lat: ' +
museum.LATITUD + ', lng: ' + museum.LONGITUD + ' },map: map});' +
'X' + museum.ID_ENTIDAD +
"marker.addListener('click', function() {" + 'X' + museum.
ID_ENTIDAD + 'info.open(map,' + 'X' + museum.ID_ENTIDAD +
'marker);' + '});')
else:
marker = ''
return HttpResponse(template.render(Context({'body': message, 'login':
login, 'user': request.user, 'id': museumID, 'fav': favoriteButton,
'like': likeButton, 'formato': style, 'marker': marker})))
@csrf_exempt
def loginPage(request):
if request.method == 'POST':
if not request.user.is_authenticated() and 'login' in request.POST:
username = request.POST['Usuario']
password = request.POST['Contraseña']
user = authenticate(username=username, password=password)
if user is not None:
login(request, user)
elif not request.user.is_authenticated(
) and 'registro' in request.POST:
username = request.POST['Usuario']
password = request.POST['Contraseña']
try:
user = User.objects.get(username=username)
user = authenticate(username=username, password=password)
if user is not None:
login(request, user)
except User.DoesNotExist:
user = User.objects.create_user(username=username, password
=password)
user.save()
request.method = 'GET'
return mainPage(request)
def logoutPage(request):
logout(request)
return mainPage(request)
def userPage(request, user, number):
if number == None:
number = 1
template = get_template('personal.html')
listTotal = ''
favoritos = Favorito.objects.filter(usuario=user)
group = range(5)
count = 0
markers = ''
for favorito in favoritos:
count = count + 1
museum = Museo.objects.get(NOMBRE=favorito.museo)
listTotal = (listTotal + "<a class='titulos' href=" + museum.
CONTENT_URL + '>' + museum.NOMBRE + '</a><br><b>' + str(museum.
comentario_set.count()) + ' Comentarios - ' + str(museum.
like_set.count()) + ' Likes</b></br></br>')
listTotal = (listTotal + "<a class='direccion'>" + museum.
CLASE_VIAL + ' ' + museum.NOMBRE_VIA + ', Nº ' + museum.NUM +
', ' + museum.LOCALIDAD + '</a></br></br>')
listTotal = (listTotal + "<a class='info' href=" + '/museos/' +
museum.ID_ENTIDAD +
'/>Más información</a> <b>Fecha de guardado:' + (datetime.
timedelta(hours=2) + favorito.fecha).strftime(
'%H:%M:%S %d-%m-%Y') + '</b></br></br></br>')
if (museum.LATITUD != 'No disponible' and museum.LONGITUD !=
'No disponible'):
markers = (markers + 'var ' + 'X' + museum.ID_ENTIDAD +
'info = new google.maps.InfoWindow({' + "content:'<h1>" +
museum.NOMBRE + "</h1>'});" + 'var ' + 'X' + museum.
ID_ENTIDAD + 'marker = new google.maps.Marker({' +
'position: {lat: ' + museum.LATITUD + ', lng: ' + museum.
LONGITUD + ' },map: map});' + 'X' + museum.ID_ENTIDAD +
"marker.addListener('click', function() {" + 'X' + museum.
ID_ENTIDAD + 'info.open(map,' + 'X' + museum.ID_ENTIDAD +
'marker);' + '});')
if count % 5 == 0:
listTotal = listTotal + ';'
group = listTotal.split(';')[int(number) - 1]
list = ''
if favoritos.count() % 5 == 0:
pages = int(favoritos.count() / 5)
else:
pages = int(favoritos.count() / 5) + 1
pagesRange = range(pages)
if pages > 1:
list = '<br>'
if int(number) > 1:
list = (list + "<center><div class='pagination'><a href='/" +
user + '/' + str(int(number) - 1) + "'>«</a>")
else:
list = (list + "<center><div class='pagination'><a href='/" +
user + '/' + str(number) + "'>«</a>")
for page in pagesRange:
if page == int(number) - 1:
list = list + "<a class='active' href='/" + user + '/' + str(
page + 1) + "'>" + str(page + 1) + '</a>'
else:
list = list + "<a href='/" + user + '/' + str(page + 1
) + "'>" + str(page + 1) + '</a>'
if int(number) == pages:
list = list + "<a href='/" + user + '/' + str(number
) + "'>»</a></div></center></br>"
else:
list = list + "<a href='/" + user + '/' + str(int(number) + 1
) + "'>»</a></div></center></br>"
list = list + "<div id='scroll'><center>"
for item in group:
list = list + item
if (list == '' or list == "<div id='scroll'><center>"
) and user != 'AnonymousUser':
list = ("<center><a class='titulos'>" +
'Para que aparezcan museos en esta página, ' + user +
' tiene que añadirlos.' + '</a></center></br></br>')
elif (list == '' or list == "<div id='scroll'><center>"
) and user == 'AnonymousUser':
list = ("<center><a class='titulos'>" +
'Para ver tu página personal, primero tienes que loguearte.' +
'</a></center></br></br>')
else:
list = (list + "<center><a class='info' href='/" + user +
"/xml'>XML del usuario</a></center>")
list = list + '</center></div>'
users = User.objects.all()
userList = ''
for user in users:
try:
title = Titulo.objects.get(usuario=user.username)
userList = (userList + "<li><a href='/" + user.username + "'>" +
title.titulo + ' - ' + user.username + '</a></li></br>')
except Titulo.DoesNotExist:
userList = (userList + "<li><a href='/" + user.username +
"'>Página de " + user.username + '</a></li></br>')
style = ''
if request.user.is_authenticated():
login = 1
try:
color = Color.objects.get(usuario=request.user)
color = color.color
except Color.DoesNotExist:
color = 'EEF4F8'
try:
letra = Letra.objects.get(usuario=request.user)
letra = letra.letra
except Letra.DoesNotExist:
letra = '9'
style = (
"body{font-family: 'Helvetica', sans-serif;color: #444444;font-size: "
+ letra + 'pt;background-color: #' + color + ';}')
else:
login = 0
return HttpResponse(template.render(Context({'body': list, 'login':
login, 'user': request.user, 'userList': userList, 'formato': style,
'markers': markers})))
def userXMLPage(request, user):
template = get_template('personalXML.xml')
favoriteList = []
favoriteMuseums = Favorito.objects.filter(usuario=user)
for favorite in favoriteMuseums:
favoriteList = favoriteList + [favorite.museo]
return HttpResponse(template.render(Context({'favoriteList':
favoriteList, 'user': user})), content_type='text/xml')
def XMLPage(request):
template = get_template('personalXML.xml')
user = ''
topList = []
topMuseums = getRanking()
topFive = range(5)
for item in topFive:
if topMuseums[item][1] != 0:
museum = Museo.objects.get(ID_ENTIDAD=topMuseums[item][0])
topList = topList + [museum]
return HttpResponse(template.render(Context({'favoriteList': topList,
'user': user})), content_type='text/xml')
def XMLAccesiblePage(request):
template = get_template('personalXML.xml')
user = ''
topList = []
topMuseums = getAccessibleRanking()
topFive = range(5)
for item in topFive:
if topMuseums[item][1] != 0:
museum = Museo.objects.get(ID_ENTIDAD=topMuseums[item][0])
topList = topList + [museum]
return HttpResponse(template.render(Context({'favoriteList': topList,
'user': user})), content_type='text/xml')
@csrf_exempt
def preferencesPage(request, user):
template = get_template('preferencias.html')
if request.method == 'POST':
if 'color' in request.POST:
try:
color = Color.objects.get(usuario=user)
color.color = request.POST['color']
except Color.DoesNotExist:
color = Color(usuario=user, color=request.POST['color'])
color.save()
elif 'tamaño' in request.POST:
try:
size = Letra.objects.get(usuario=user)
size.letra = request.POST['tamaño']
except Letra.DoesNotExist:
size = Letra(usuario=user, letra=request.POST['tamaño'])
size.save()
elif 'título' in request.POST:
try:
title = Titulo.objects.get(usuario=user)
title.titulo = request.POST['título']
except Titulo.DoesNotExist:
title = Titulo(usuario=user, titulo=request.POST['título'])
title.save()
style = ''
if request.user.is_authenticated():
login = 1
try:
color = Color.objects.get(usuario=request.user)
color = color.color
except Color.DoesNotExist:
color = 'EEF4F8'
try:
letra = Letra.objects.get(usuario=request.user)
letra = letra.letra
except Letra.DoesNotExist:
letra = '9'
style = (
"body{font-family: 'Helvetica', sans-serif;color: #444444;font-size: "
+ letra + 'pt;background-color: #' + color + ';}')
else:
login = 0
return HttpResponse(template.render(Context({'login': login, 'user':
user, 'formato': style})))
def aboutPage(request):
template = get_template('about.html')
style = ''
if request.user.is_authenticated():
login = 1
try:
color = Color.objects.get(usuario=request.user)
color = color.color
except Color.DoesNotExist:
color = 'EEF4F8'
try:
letra = Letra.objects.get(usuario=request.user)
letra = letra.letra
except Letra.DoesNotExist:
letra = '9'
style = (
"body{font-family: 'Helvetica', sans-serif;color: #444444;font-size: "
+ letra + 'pt;background-color: #' + color + ';}')
else:
login = 0
return HttpResponse(template.render(Context({'login': login, 'user':
request.user, 'formato': style})))
def updateDB(request):
museos = parseXML('web/museos.xml')
for museo in museos:
try:
distrito = Distrito.objects.get(nombre=museos[museo]['DISTRITO'])
except Distrito.DoesNotExist:
distrito = Distrito(nombre=museos[museo]['DISTRITO'])
distrito.save()
for museo in museos:
try:
A = museos[museo]['ID-ENTIDAD']
except KeyError:
A = 'No disponible'
try:
B = museos[museo]['NOMBRE']
except KeyError:
B = 'No disponible'
try:
C = museos[museo]['DESCRIPCION-ENTIDAD']
except KeyError:
C = 'No disponible'
try:
D = museos[museo]['HORARIO']
except KeyError:
D = 'No disponible'
try:
E = museos[museo]['TRANSPORTE']
except KeyError:
E = 'No disponible'
try:
F = museos[museo]['ACCESIBILIDAD']
except KeyError:
F = 'No disponible'
try:
G = museos[museo]['CONTENT-URL']
except KeyError:
G = 'No disponible'
try:
H = museos[museo]['NOMBRE-VIA']
except KeyError:
H = 'No disponible'
try:
I = museos[museo]['CLASE-VIAL']
except KeyError:
I = 'No disponible'
try:
J = museos[museo]['TIPO-NUM']
except KeyError:
J = 'No disponible'
try:
K = museos[museo]['NUM']
except KeyError:
K = 'No disponible'
try:
L = museos[museo]['LOCALIDAD']
except KeyError:
L = 'No disponible'
try:
M = museos[museo]['PROVINCIA']
except KeyError:
M = 'No disponible'
try:
N = museos[museo]['CODIGO-POSTAL']
except KeyError:
N = 'No disponible'
try:
Ñ = museos[museo]['BARRIO']
except KeyError:
Ñ = 'No disponible'
try:
O = Distrito.objects.get(nombre=museos[museo]['DISTRITO'])
except KeyError:
O = 'No disponible'
try:
P = museos[museo]['COORDENADA-X']
except KeyError:
P = 'No disponible'
try:
Q = museos[museo]['COORDENADA-Y']
except KeyError:
Q = 'No disponible'
try:
R = museos[museo]['LATITUD']
except KeyError:
R = 'No disponible'
try:
S = museos[museo]['LONGITUD']
except KeyError:
S = 'No disponible'
try:
T = museos[museo]['TELEFONO']
except KeyError:
T = 'No disponible'
try:
U = museos[museo]['FAX']
except KeyError:
U = 'No disponible'
try:
V = museos[museo]['EMAIL']
except KeyError:
V = 'No disponible'
try:
W = museos[museo]['TIPO']
except KeyError:
W = 'No disponible'
try:
viejoMuseo = Museo.objects.get(ID_ENTIDAD=A)
except Museo.DoesNotExist:
nuevoMuseo = Museo(ID_ENTIDAD=A, NOMBRE=B, DESCRIPCION_ENTIDAD=
C, HORARIO=D, TRANSPORTE=E, ACCESIBILIDAD=F, CONTENT_URL=G,
NOMBRE_VIA=H, CLASE_VIAL=I, TIPO_NUM=J, NUM=K, LOCALIDAD=L,
PROVINCIA=M, CODIGO_POSTAL=N, BARRIO=Ñ, DISTRITO=O,
COORDENADA_X=P, COORDENADA_Y=Q, LATITUD=R, LONGITUD=S,
TELEFONO=T, FAX=U, EMAIL=V, TIPO=W)
nuevoMuseo.save()
return mainPage(request)
<|reserved_special_token_1|>
from django.shortcuts import render
from django.http import HttpResponse
from django.contrib.auth.models import User
from .models import Museo, Distrito, Comentario, Favorito, Like, Titulo, Letra, Color
from django.views.decorators.csrf import csrf_exempt
from django.contrib.auth import authenticate, login
from django.contrib.auth import logout
from web.parser import parseXML
import operator
from django.template.loader import get_template
from django.template import Context
import datetime
def getMuseums():
museos = Museo.objects.all()
allMuseums = {}
for museo in museos:
allMuseums[museo.ID_ENTIDAD] = museo.comentario_set.count()
return allMuseums
def getAccessibleMuseums():
museos = Museo.objects.all()
allMuseums = {}
for museo in museos:
if museo.ACCESIBILIDAD == '1':
allMuseums[museo.ID_ENTIDAD] = museo.comentario_set.count()
return allMuseums
def getRanking():
allMuseums = getMuseums()
ranking = sorted(allMuseums.items(), key = operator.itemgetter(1))
ranking.reverse()
return ranking
def getAccessibleRanking():
allMuseums = getAccessibleMuseums()
ranking = sorted(allMuseums.items(), key = operator.itemgetter(1))
ranking.reverse()
return ranking
@csrf_exempt
def mainPage(request):
template = get_template('index.html')
topFive = range(5)
list = '<br>'
markers = ''
if request.method == 'GET' or (request.method == 'POST' and request.POST['accion'] == 'mostrar'):
ranking = getRanking()
list = (list + "<center><form action='/' method='post'><input type='hidden' name='accion' value='ocultar'>" +
"<input class='desplegable' type='submit' value='Mostrar museos accesibles'></form></center><div id='scroll'>")
if len(ranking) > 0:
for item in topFive:
if ranking[item][1] != 0:
museum = Museo.objects.get(ID_ENTIDAD = ranking[item][0])
list = list + "<center><a class='titulos' href=" + museum.CONTENT_URL + '>' + museum.NOMBRE + '</a><br><b>' + str(museum.comentario_set.count()) + ' Comentarios - ' + str(museum.like_set.count()) + ' Likes</b></br></br>'
list = list + "<a class='direccion'>" + museum.CLASE_VIAL + ' ' + museum.NOMBRE_VIA + ', Nº ' + museum.NUM + ', ' + museum.LOCALIDAD + '</a></br></br>'
list = list + "<a class='info' href=" + "/museos/" + museum.ID_ENTIDAD + '/>Más información</a></center></br></br>'
if museum.LATITUD != 'No disponible' and museum.LONGITUD != 'No disponible':
markers = (markers +
"var " + "X" + museum.ID_ENTIDAD + "info = new google.maps.InfoWindow({" +
"content:'<h1>" + museum.NOMBRE + "</h1>'});" +
"var " + "X" + museum.ID_ENTIDAD + "marker = new google.maps.Marker({" +
"position: {lat: " + museum.LATITUD + ", lng: " + museum.LONGITUD + " },map: map});" +
"X" + museum.ID_ENTIDAD + "marker.addListener('click', function() {" +
"X" + museum.ID_ENTIDAD + "info.open(map," + "X" + museum.ID_ENTIDAD + "marker);" +
"});")
if ranking[0][1] == 0:
list = list + "<a class='titulos'><center>" + 'No hay museos con comentarios, ¡sé el primero en comentar!' + '</center></a></br></br></div>'
else:
list = list + '</div>'
list = list + "<center><a class='info' href='/xml'>XML de la página</a></center>"
else:
list = list + "<a class='titulos'><center>" + 'No hay museos con comentarios, ¡sé el primero en comentar!' + '</center></a></br></br></div>'
elif request.method == 'POST' and request.POST['accion'] == 'ocultar':
ranking = getAccessibleRanking()
list = (list + "<center><form action='/' method='post'><input type='hidden' name='accion' value='mostrar'>" +
"<input class='desplegable' type='submit' value='Mostrar todos los museos'></form></center><div id='scroll'>")
if len(ranking) > 0:
for item in topFive:
if ranking[item][1] != 0:
museum = Museo.objects.get(ID_ENTIDAD = ranking[item][0])
list = list + "<center><a class='titulos' href=" + museum.CONTENT_URL + '>' + museum.NOMBRE + '</a><br><b>' + str(museum.comentario_set.count()) + ' Comentarios - ' + str(museum.like_set.count()) + ' Likes</b></br></br>'
list = list + "<a class='direccion'>" + museum.CLASE_VIAL + ' ' + museum.NOMBRE_VIA + ', Nº ' + museum.NUM + ', ' + museum.LOCALIDAD + '</a></br></br>'
list = list + "<a class='info' href=" + "/museos/" + museum.ID_ENTIDAD + '/>Más información</a></center></br></br>'
if museum.LATITUD != 'No disponbile' and museum.LONGITUD != 'No disponible':
markers = (markers +
"var " + "X" + museum.ID_ENTIDAD + "info = new google.maps.InfoWindow({" +
"content:'<h1>" + museum.NOMBRE + "</h1>'});" +
"var " + "X" + museum.ID_ENTIDAD + "marker = new google.maps.Marker({" +
"position: {lat: " + museum.LATITUD + ", lng: " + museum.LONGITUD + " },map: map});" +
"X" + museum.ID_ENTIDAD + "marker.addListener('click', function() {" +
"X" + museum.ID_ENTIDAD + "info.open(map," + "X" + museum.ID_ENTIDAD + "marker);" +
"});")
if ranking[0][1] == 0:
list = list + "<a class='titulos'><center>" + 'No hay museos accesibles con comentarios, ¡sé el primero en comentar!' + '</center></a></br></br></div>'
else:
list = list + '</div>'
list = list + "<center><a class='info' href='/xml'>XML de la página</a></center>"
else:
list = list + "<a class='titulos'><center>" + 'No hay museos accesibles con comentarios, ¡sé el primero en comentar!' + '</center></a></br></br></div>'
style = ''
if request.user.is_authenticated():
login = 1
try:
color = Color.objects.get(usuario = request.user)
color = color.color
except Color.DoesNotExist:
color = 'EEF4F8'
try:
letra = Letra.objects.get(usuario = request.user)
letra = letra.letra
except Letra.DoesNotExist:
letra = '9'
style = ("body{font-family: 'Helvetica', sans-serif;"
"color: #444444;"
"font-size: " + letra + "pt;"
"background-color: #" + color + ";}")
else:
login = 0
users = User.objects.all()
userList = ''
for user in users:
try:
title = Titulo.objects.get(usuario = user.username)
userList = userList + "<li><a href='/" + user.username + "'>" + title.titulo + ' - ' + user.username + "</a></li></br>"
except Titulo.DoesNotExist:
userList = userList + "<li><a href='/" + user.username + "'>Página de " + user.username + "</a></li></br>"
return HttpResponse(template.render(Context({'body': list, 'login': login, 'user': request.user, 'userList': userList, 'formato': style, 'markers': markers})))
@csrf_exempt
def museumsPage(request):
template = get_template('museos.html')
if request.method == 'GET':
museos = Museo.objects.all()
elif request.method == 'POST':
distrito = Distrito.objects.get(nombre = request.POST['distrito'])
museos = distrito.museo_set.all()
list = ''
markers = ''
i = 1
for museo in museos:
list = list + "<center><a class='titulos'>" + museo.NOMBRE + '</a></br>'
list = list + "<a class='info' href=" + "/museos/" + museo.ID_ENTIDAD + '/>Más información</a></center></br></br>'
if museo.LATITUD != 'No disponible' and museo.LONGITUD != 'No disponible':
markers = (markers +
"var " + "X" + museo.ID_ENTIDAD + "info = new google.maps.InfoWindow({" +
"content:'<h1>" + museo.NOMBRE + "</h1>'});" +
"var " + "X" + museo.ID_ENTIDAD + "marker = new google.maps.Marker({" +
"position: {lat: " + museo.LATITUD + ", lng: " + museo.LONGITUD + " },map: map});" +
"X" + museo.ID_ENTIDAD + "marker.addListener('click', function() {" +
"X" + museo.ID_ENTIDAD + "info.open(map," + "X" + museo.ID_ENTIDAD + "marker);" +
"});")
style = ''
if request.user.is_authenticated():
login = 1
try:
color = Color.objects.get(usuario = request.user)
color = color.color
except Color.DoesNotExist:
color = 'EEF4F8'
try:
letra = Letra.objects.get(usuario = request.user)
letra = letra.letra
except Letra.DoesNotExist:
letra = '9'
style = ("body{font-family: 'Helvetica', sans-serif;"
"color: #444444;"
"font-size: " + letra + "pt;"
"background-color: #" + color + ";}")
else:
login = 0
distritos = Distrito.objects.all()
districtList = ''
for distrito in distritos:
districtList = districtList + "<option value='" + distrito.nombre + "'>" + distrito.nombre + "</option>"
return HttpResponse(template.render(Context({'body': list, 'login': login, 'user': request.user, 'districtList': districtList, 'formato': style, 'markers': markers})))
@csrf_exempt
def museumPage(request, museumID):
template = get_template('museo.html')
museum = Museo.objects.get(ID_ENTIDAD = museumID)
if request.method == 'POST' and 'comentario' in request.POST:
comment = Comentario(texto = request.POST['comentario'], museo = museum, usuario = request.user.username)
comment.save()
elif request.method == 'POST' and 'añadir' in request.POST:
fav = Favorito(museo = museum, usuario = request.user)
fav.save()
elif request.method == 'POST' and 'quitar' in request.POST:
Favorito.objects.filter(museo = museum, usuario = request.user).delete()
elif request.method == 'POST' and 'mas' in request.POST:
like = Like(museo = museum, usuario = request.user)
like.save()
elif request.method == 'POST' and 'menos' in request.POST:
Like.objects.filter(museo = museum, usuario = request.user).delete()
comments = museum.comentario_set.all()
message = ("<center><b><a class='titulos_museo'>" + museum.NOMBRE + "</a></b></center><div id='scroll'></br>"
"<center><b><a class='titulos_museo'>Descripción</a></b></center></br>"
"<center><a class='texto_museo'>" + museum.DESCRIPCION_ENTIDAD + '</a></center></br>'
"<center><b><a class='titulos_museo'>Horario</a></b></center></br>"
"<center><a class='texto_museo'>" + museum.HORARIO + '</a></center></br>'
"<center><b><a class='titulos_museo'>Accesibilidad</a></b></center></br>"
"<center><a class='texto_museo'>" + museum.ACCESIBILIDAD + '</a></center></br>'
"<center><b><a class='titulos_museo'>Dirección</a></b></center></br>"
"<center><a class='texto_museo'>" + museum.CLASE_VIAL + ' ' + museum.NOMBRE_VIA + ', Nº ' + museum.NUM + ', ' + museum.LOCALIDAD + '</a><center></br>'
"<center><a class='texto_museo'>Barrio: " + museum.BARRIO + '</a></center></br>'
"<center><a class='texto_museo'>Distrito: " + str(museum.DISTRITO) + '</a></center></br>'
"<center><b><a class='titulos_museo'>Datos de contacto</a></b></center></br>"
"<center><a class='texto_museo'>Teléfono: " + museum.TELEFONO + '</a></center></br>'
"<center><a class='texto_museo'>Email: " + museum.EMAIL + '</a></center></br>'
"<center><b><a class='titulos_museo'>Comentarios</a></b></center></br>")
allComments = ''
for comment in comments:
allComments = allComments + "<center><a class='texto_museo'><b>" + 'Anónimo</b>: ' + comment.texto + ', ' + (datetime.timedelta(hours=2) + comment.fecha).strftime("%H:%M:%S %d-%m-%Y") + '</a></center></br>'
message = message + allComments
style = ''
if request.user.is_authenticated():
login = 1
try:
favorito = Favorito.objects.get(museo = museum, usuario = request.user)
favoriteButton = ("<center><form action='/museos/" + museumID + "/' method='post'><input type='hidden' name='quitar' value='fav'>" +
"<input class='desplegable' type='submit' value='Quitar de favoritos'></form></center>")
except Favorito.DoesNotExist:
favoriteButton = ("<center><form action='/museos/" + museumID + "/' method='post'><input type='hidden' name='añadir' value='fav'>" +
"<input class='desplegable' type='submit' value='Añadir a favoritos'></form></center>")
try:
like = Like.objects.get(museo = museum, usuario = request.user)
likeButton = ("<center><form action='/museos/" + museumID + "/' method='post'><input type='hidden' name='menos' value='like'>" +
"<input class='desplegable' type='submit' value='Dislike'></form></center>")
except Like.DoesNotExist:
likeButton = ("<center><form action='/museos/" + museumID + "/' method='post'><input type='hidden' name='mas' value='like'>" +
"<input class='desplegable' type='submit' value='Like'></form></center>")
try:
color = Color.objects.get(usuario = request.user)
color = color.color
except Color.DoesNotExist:
color = 'EEF4F8'
try:
letra = Letra.objects.get(usuario = request.user)
letra = letra.letra
except Letra.DoesNotExist:
letra = '9'
style = ("body{font-family: 'Helvetica', sans-serif;"
"color: #444444;"
"font-size: " + letra + "pt;"
"background-color: #" + color + ";}")
else:
login = 0
favoriteButton = ''
likeButton = ''
if museum.LATITUD != 'No disponbile' and museum.LONGITUD != 'No disponible':
marker = ("var " + "X" + museum.ID_ENTIDAD + "info = new google.maps.InfoWindow({" +
"content:'<h1>" + museum.NOMBRE + "</h1>'});" +
"var " + "X" + museum.ID_ENTIDAD + "marker = new google.maps.Marker({" +
"position: {lat: " + museum.LATITUD + ", lng: " + museum.LONGITUD + " },map: map});" +
"X" + museum.ID_ENTIDAD + "marker.addListener('click', function() {" +
"X" + museum.ID_ENTIDAD + "info.open(map," + "X" + museum.ID_ENTIDAD + "marker);" +
"});")
else:
marker = ''
return HttpResponse(template.render(Context({'body': message, 'login': login, 'user': request.user, 'id': museumID, 'fav': favoriteButton, 'like': likeButton, 'formato': style, 'marker': marker})))
@csrf_exempt
def loginPage(request):
if request.method == 'POST':
if not request.user.is_authenticated() and 'login' in request.POST:
username = request.POST['Usuario']
password = request.POST['Contraseña']
user = authenticate(username=username, password=password)
if user is not None:
login(request, user)
elif not request.user.is_authenticated() and 'registro' in request.POST:
username = request.POST['Usuario']
password = request.POST['Contraseña']
try:
user = User.objects.get(username = username)
user = authenticate(username = username, password = password)
if user is not None:
login(request, user)
except User.DoesNotExist:
user = User.objects.create_user(username = username, password = password)
user.save()
request.method = 'GET'
return mainPage(request)
def logoutPage(request):
logout(request)
return mainPage(request)
def userPage(request, user, number):
if number == None:
number = 1
template = get_template('personal.html')
listTotal = ''
favoritos = Favorito.objects.filter(usuario = user)
group = range(5)
count = 0;
markers = ''
for favorito in favoritos:
count = count + 1;
museum = Museo.objects.get(NOMBRE = favorito.museo)
listTotal = listTotal + "<a class='titulos' href=" + museum.CONTENT_URL + '>' + museum.NOMBRE + '</a><br><b>' + str(museum.comentario_set.count()) + ' Comentarios - ' + str(museum.like_set.count()) + ' Likes</b></br></br>'
listTotal = listTotal + "<a class='direccion'>" + museum.CLASE_VIAL + ' ' + museum.NOMBRE_VIA + ', Nº ' + museum.NUM + ', ' + museum.LOCALIDAD + '</a></br></br>'
listTotal = listTotal + "<a class='info' href=" + "/museos/" + museum.ID_ENTIDAD + '/>Más información</a> <b>Fecha de guardado:' + (datetime.timedelta(hours=2) + favorito.fecha).strftime("%H:%M:%S %d-%m-%Y") + '</b></br></br></br>'
if museum.LATITUD != 'No disponible' and museum.LONGITUD != 'No disponible':
markers = (markers +
"var " + "X" + museum.ID_ENTIDAD + "info = new google.maps.InfoWindow({" +
"content:'<h1>" + museum.NOMBRE + "</h1>'});" +
"var " + "X" + museum.ID_ENTIDAD + "marker = new google.maps.Marker({" +
"position: {lat: " + museum.LATITUD + ", lng: " + museum.LONGITUD + " },map: map});" +
"X" + museum.ID_ENTIDAD + "marker.addListener('click', function() {" +
"X" + museum.ID_ENTIDAD + "info.open(map," + "X" + museum.ID_ENTIDAD + "marker);" +
"});")
if (count % 5) == 0:
listTotal = listTotal + ';'
group = listTotal.split(';')[int(number) - 1]
list = ''
if (favoritos.count() % 5) == 0:
pages = int(favoritos.count() / 5)
else:
pages = int(favoritos.count() / 5) + 1
pagesRange = range(pages)
if pages > 1:
list = '<br>'
if int(number) > 1:
list = list + "<center><div class='pagination'><a href='/" + user + "/" + str(int(number) - 1) + "'>«</a>"
else:
list = list + "<center><div class='pagination'><a href='/" + user + "/" + str(number) + "'>«</a>"
for page in pagesRange:
if page == (int(number) - 1):
list = list + "<a class='active' href='/" + user + "/" + str(page + 1) + "'>" + str(page + 1) + "</a>"
else:
list = list + "<a href='/" + user + "/" + str(page + 1) + "'>" + str(page + 1) + "</a>"
if int(number) == pages:
list = list + "<a href='/" + user + "/" + str(number) + "'>»</a></div></center></br>"
else:
list = list + "<a href='/" + user + "/" + str(int(number) + 1) + "'>»</a></div></center></br>"
list = list + "<div id='scroll'><center>"
for item in group:
list = list + item
if (list == '' or list == "<div id='scroll'><center>") and user != 'AnonymousUser':
list = "<center><a class='titulos'>" + 'Para que aparezcan museos en esta página, ' + user + ' tiene que añadirlos.' + '</a></center></br></br>'
elif (list == '' or list == "<div id='scroll'><center>") and user == 'AnonymousUser':
list = "<center><a class='titulos'>" + 'Para ver tu página personal, primero tienes que loguearte.' + '</a></center></br></br>'
else:
list = list + "<center><a class='info' href='/" + user + "/xml'>XML del usuario</a></center>"
list = list + '</center></div>'
users = User.objects.all()
userList = ''
for user in users:
try:
title = Titulo.objects.get(usuario = user.username)
userList = userList + "<li><a href='/" + user.username + "'>" + title.titulo + ' - ' + user.username + "</a></li></br>"
except Titulo.DoesNotExist:
userList = userList + "<li><a href='/" + user.username + "'>Página de " + user.username + "</a></li></br>"
style = ''
if request.user.is_authenticated():
login = 1
try:
color = Color.objects.get(usuario = request.user)
color = color.color
except Color.DoesNotExist:
color = 'EEF4F8'
try:
letra = Letra.objects.get(usuario = request.user)
letra = letra.letra
except Letra.DoesNotExist:
letra = '9'
style = ("body{font-family: 'Helvetica', sans-serif;"
"color: #444444;"
"font-size: " + letra + "pt;"
"background-color: #" + color + ";}")
else:
login = 0
return HttpResponse(template.render(Context({'body': list, 'login': login, 'user': request.user, 'userList': userList, 'formato': style, 'markers': markers})))
def userXMLPage(request, user):
template = get_template("personalXML.xml")
favoriteList = []
favoriteMuseums = Favorito.objects.filter(usuario = user)
for favorite in favoriteMuseums:
favoriteList = favoriteList + [favorite.museo]
return HttpResponse(template.render(Context({'favoriteList': favoriteList, 'user': user})), content_type = "text/xml")
def XMLPage(request):
template = get_template("personalXML.xml")
user = ''
topList = []
topMuseums = getRanking()
topFive = range(5)
for item in topFive:
if topMuseums[item][1] != 0:
museum = Museo.objects.get(ID_ENTIDAD = topMuseums[item][0])
topList = topList + [museum]
return HttpResponse(template.render(Context({'favoriteList': topList, 'user': user})), content_type = "text/xml")
def XMLAccesiblePage(request):
template = get_template("personalXML.xml")
user = ''
topList = []
topMuseums = getAccessibleRanking()
topFive = range(5)
for item in topFive:
if topMuseums[item][1] != 0:
museum = Museo.objects.get(ID_ENTIDAD = topMuseums[item][0])
topList = topList + [museum]
return HttpResponse(template.render(Context({'favoriteList': topList, 'user': user})), content_type = "text/xml")
@csrf_exempt
def preferencesPage(request, user):
template = get_template("preferencias.html")
if request.method == 'POST':
if 'color' in request.POST:
try:
color = Color.objects.get(usuario = user)
color.color = request.POST['color']
except Color.DoesNotExist:
color = Color(usuario = user, color = request.POST['color'])
color.save()
elif 'tamaño' in request.POST:
try:
size = Letra.objects.get(usuario = user)
size.letra = request.POST['tamaño']
except Letra.DoesNotExist:
size = Letra(usuario = user, letra = request.POST['tamaño'])
size.save()
elif 'título' in request.POST:
try:
title = Titulo.objects.get(usuario = user)
title.titulo = request.POST['título']
except Titulo.DoesNotExist:
title = Titulo(usuario = user, titulo = request.POST['título'])
title.save()
style = ''
if request.user.is_authenticated():
login = 1
try:
color = Color.objects.get(usuario = request.user)
color = color.color
except Color.DoesNotExist:
color = 'EEF4F8'
try:
letra = Letra.objects.get(usuario = request.user)
letra = letra.letra
except Letra.DoesNotExist:
letra = '9'
style = ("body{font-family: 'Helvetica', sans-serif;"
"color: #444444;"
"font-size: " + letra + "pt;"
"background-color: #" + color + ";}")
else:
login = 0
return HttpResponse(template.render(Context({'login': login, 'user': user, 'formato': style})))
def aboutPage(request):
template = get_template('about.html')
style = ''
if request.user.is_authenticated():
login = 1
try:
color = Color.objects.get(usuario = request.user)
color = color.color
except Color.DoesNotExist:
color = 'EEF4F8'
try:
letra = Letra.objects.get(usuario = request.user)
letra = letra.letra
except Letra.DoesNotExist:
letra = '9'
style = ("body{font-family: 'Helvetica', sans-serif;"
"color: #444444;"
"font-size: " + letra + "pt;"
"background-color: #" + color + ";}")
else:
login = 0
return HttpResponse(template.render(Context({'login': login, 'user': request.user, 'formato': style})))
def updateDB(request):
#Museo.objects.all().delete()
museos = parseXML('web/museos.xml')
for museo in museos:
try:
distrito = Distrito.objects.get(nombre = museos[museo]['DISTRITO'])
except Distrito.DoesNotExist:
distrito = Distrito(nombre = museos[museo]['DISTRITO'])
distrito.save()
for museo in museos:
try:
A = museos[museo]['ID-ENTIDAD']
except KeyError:
A = 'No disponible'
try:
B = museos[museo]['NOMBRE']
except KeyError:
B = 'No disponible'
try:
C = museos[museo]['DESCRIPCION-ENTIDAD']
except KeyError:
C = 'No disponible'
try:
D = museos[museo]['HORARIO']
except KeyError:
D = 'No disponible'
try:
E = museos[museo]['TRANSPORTE']
except KeyError:
E = 'No disponible'
try:
F = museos[museo]['ACCESIBILIDAD']
except KeyError:
F = 'No disponible'
try:
G = museos[museo]['CONTENT-URL']
except KeyError:
G = 'No disponible'
try:
H = museos[museo]['NOMBRE-VIA']
except KeyError:
H = 'No disponible'
try:
I = museos[museo]['CLASE-VIAL']
except KeyError:
I = 'No disponible'
try:
J = museos[museo]['TIPO-NUM']
except KeyError:
J = 'No disponible'
try:
K = museos[museo]['NUM']
except KeyError:
K = 'No disponible'
try:
L = museos[museo]['LOCALIDAD']
except KeyError:
L = 'No disponible'
try:
M = museos[museo]['PROVINCIA']
except KeyError:
M = 'No disponible'
try:
N = museos[museo]['CODIGO-POSTAL']
except KeyError:
N = 'No disponible'
try:
Ñ = museos[museo]['BARRIO']
except KeyError:
Ñ = 'No disponible'
try:
O = Distrito.objects.get(nombre = museos[museo]['DISTRITO'])
except KeyError:
O = 'No disponible'
try:
P = museos[museo]['COORDENADA-X']
except KeyError:
P = 'No disponible'
try:
Q = museos[museo]['COORDENADA-Y']
except KeyError:
Q = 'No disponible'
try:
R = museos[museo]['LATITUD']
except KeyError:
R = 'No disponible'
try:
S = museos[museo]['LONGITUD']
except KeyError:
S = 'No disponible'
try:
T = museos[museo]['TELEFONO']
except KeyError:
T = 'No disponible'
try:
U = museos[museo]['FAX']
except KeyError:
U = 'No disponible'
try:
V = museos[museo]['EMAIL']
except KeyError:
V = 'No disponible'
try:
W = museos[museo]['TIPO']
except KeyError:
W = 'No disponible'
try:
viejoMuseo = Museo.objects.get(ID_ENTIDAD = A)
except Museo.DoesNotExist:
nuevoMuseo = Museo(
ID_ENTIDAD = A,
NOMBRE = B,
DESCRIPCION_ENTIDAD = C,
HORARIO = D,
TRANSPORTE = E,
ACCESIBILIDAD = F,
CONTENT_URL = G,
NOMBRE_VIA = H,
CLASE_VIAL = I,
TIPO_NUM = J,
NUM = K,
LOCALIDAD = L,
PROVINCIA = M,
CODIGO_POSTAL = N,
BARRIO = Ñ,
DISTRITO = O,
COORDENADA_X = P,
COORDENADA_Y = Q,
LATITUD = R,
LONGITUD = S,
TELEFONO = T,
FAX = U,
EMAIL = V,
TIPO = W)
nuevoMuseo.save()
return mainPage(request)
|
flexible
|
{
"blob_id": "8b2911586e21162bec074732216c410c591f18a8",
"index": 6018,
"step-1": "<mask token>\n\n\ndef getMuseums():\n museos = Museo.objects.all()\n allMuseums = {}\n for museo in museos:\n allMuseums[museo.ID_ENTIDAD] = museo.comentario_set.count()\n return allMuseums\n\n\ndef getAccessibleMuseums():\n museos = Museo.objects.all()\n allMuseums = {}\n for museo in museos:\n if museo.ACCESIBILIDAD == '1':\n allMuseums[museo.ID_ENTIDAD] = museo.comentario_set.count()\n return allMuseums\n\n\ndef getRanking():\n allMuseums = getMuseums()\n ranking = sorted(allMuseums.items(), key=operator.itemgetter(1))\n ranking.reverse()\n return ranking\n\n\ndef getAccessibleRanking():\n allMuseums = getAccessibleMuseums()\n ranking = sorted(allMuseums.items(), key=operator.itemgetter(1))\n ranking.reverse()\n return ranking\n\n\n@csrf_exempt\ndef mainPage(request):\n template = get_template('index.html')\n topFive = range(5)\n list = '<br>'\n markers = ''\n if request.method == 'GET' or request.method == 'POST' and request.POST[\n 'accion'] == 'mostrar':\n ranking = getRanking()\n list = (list +\n \"<center><form action='/' method='post'><input type='hidden' name='accion' value='ocultar'>\"\n +\n \"<input class='desplegable' type='submit' value='Mostrar museos accesibles'></form></center><div id='scroll'>\"\n )\n if len(ranking) > 0:\n for item in topFive:\n if ranking[item][1] != 0:\n museum = Museo.objects.get(ID_ENTIDAD=ranking[item][0])\n list = (list + \"<center><a class='titulos' href=\" +\n museum.CONTENT_URL + '>' + museum.NOMBRE +\n '</a><br><b>' + str(museum.comentario_set.count()) +\n ' Comentarios - ' + str(museum.like_set.count()) +\n ' Likes</b></br></br>')\n list = (list + \"<a class='direccion'>\" + museum.\n CLASE_VIAL + ' ' + museum.NOMBRE_VIA + ', Nº ' +\n museum.NUM + ', ' + museum.LOCALIDAD + '</a></br></br>'\n )\n list = (list + \"<a class='info' href=\" + '/museos/' +\n museum.ID_ENTIDAD +\n '/>Más información</a></center></br></br>')\n if (museum.LATITUD != 'No disponible' and museum.\n LONGITUD != 'No disponible'):\n markers = (markers + 'var ' + 'X' + museum.\n ID_ENTIDAD +\n 'info = new google.maps.InfoWindow({' +\n \"content:'<h1>\" + museum.NOMBRE + \"</h1>'});\" +\n 'var ' + 'X' + museum.ID_ENTIDAD +\n 'marker = new google.maps.Marker({' +\n 'position: {lat: ' + museum.LATITUD + ', lng: ' +\n museum.LONGITUD + ' },map: map});' + 'X' +\n museum.ID_ENTIDAD +\n \"marker.addListener('click', function() {\" +\n 'X' + museum.ID_ENTIDAD + 'info.open(map,' +\n 'X' + museum.ID_ENTIDAD + 'marker);' + '});')\n if ranking[0][1] == 0:\n list = (list + \"<a class='titulos'><center>\" +\n 'No hay museos con comentarios, ¡sé el primero en comentar!'\n + '</center></a></br></br></div>')\n else:\n list = list + '</div>'\n list = (list +\n \"<center><a class='info' href='/xml'>XML de la página</a></center>\"\n )\n else:\n list = (list + \"<a class='titulos'><center>\" +\n 'No hay museos con comentarios, ¡sé el primero en comentar!' +\n '</center></a></br></br></div>')\n elif request.method == 'POST' and request.POST['accion'] == 'ocultar':\n ranking = getAccessibleRanking()\n list = (list +\n \"<center><form action='/' method='post'><input type='hidden' name='accion' value='mostrar'>\"\n +\n \"<input class='desplegable' type='submit' value='Mostrar todos los museos'></form></center><div id='scroll'>\"\n )\n if len(ranking) > 0:\n for item in topFive:\n if ranking[item][1] != 0:\n museum = Museo.objects.get(ID_ENTIDAD=ranking[item][0])\n list = (list + \"<center><a class='titulos' href=\" +\n museum.CONTENT_URL + '>' + museum.NOMBRE +\n '</a><br><b>' + str(museum.comentario_set.count()) +\n ' Comentarios - ' + str(museum.like_set.count()) +\n ' Likes</b></br></br>')\n list = (list + \"<a class='direccion'>\" + museum.\n CLASE_VIAL + ' ' + museum.NOMBRE_VIA + ', Nº ' +\n museum.NUM + ', ' + museum.LOCALIDAD + '</a></br></br>'\n )\n list = (list + \"<a class='info' href=\" + '/museos/' +\n museum.ID_ENTIDAD +\n '/>Más información</a></center></br></br>')\n if (museum.LATITUD != 'No disponbile' and museum.\n LONGITUD != 'No disponible'):\n markers = (markers + 'var ' + 'X' + museum.\n ID_ENTIDAD +\n 'info = new google.maps.InfoWindow({' +\n \"content:'<h1>\" + museum.NOMBRE + \"</h1>'});\" +\n 'var ' + 'X' + museum.ID_ENTIDAD +\n 'marker = new google.maps.Marker({' +\n 'position: {lat: ' + museum.LATITUD + ', lng: ' +\n museum.LONGITUD + ' },map: map});' + 'X' +\n museum.ID_ENTIDAD +\n \"marker.addListener('click', function() {\" +\n 'X' + museum.ID_ENTIDAD + 'info.open(map,' +\n 'X' + museum.ID_ENTIDAD + 'marker);' + '});')\n if ranking[0][1] == 0:\n list = (list + \"<a class='titulos'><center>\" +\n 'No hay museos accesibles con comentarios, ¡sé el primero en comentar!'\n + '</center></a></br></br></div>')\n else:\n list = list + '</div>'\n list = (list +\n \"<center><a class='info' href='/xml'>XML de la página</a></center>\"\n )\n else:\n list = (list + \"<a class='titulos'><center>\" +\n 'No hay museos accesibles con comentarios, ¡sé el primero en comentar!'\n + '</center></a></br></br></div>')\n style = ''\n if request.user.is_authenticated():\n login = 1\n try:\n color = Color.objects.get(usuario=request.user)\n color = color.color\n except Color.DoesNotExist:\n color = 'EEF4F8'\n try:\n letra = Letra.objects.get(usuario=request.user)\n letra = letra.letra\n except Letra.DoesNotExist:\n letra = '9'\n style = (\n \"body{font-family: 'Helvetica', sans-serif;color: #444444;font-size: \"\n + letra + 'pt;background-color: #' + color + ';}')\n else:\n login = 0\n users = User.objects.all()\n userList = ''\n for user in users:\n try:\n title = Titulo.objects.get(usuario=user.username)\n userList = (userList + \"<li><a href='/\" + user.username + \"'>\" +\n title.titulo + ' - ' + user.username + '</a></li></br>')\n except Titulo.DoesNotExist:\n userList = (userList + \"<li><a href='/\" + user.username +\n \"'>Página de \" + user.username + '</a></li></br>')\n return HttpResponse(template.render(Context({'body': list, 'login':\n login, 'user': request.user, 'userList': userList, 'formato': style,\n 'markers': markers})))\n\n\n@csrf_exempt\ndef museumsPage(request):\n template = get_template('museos.html')\n if request.method == 'GET':\n museos = Museo.objects.all()\n elif request.method == 'POST':\n distrito = Distrito.objects.get(nombre=request.POST['distrito'])\n museos = distrito.museo_set.all()\n list = ''\n markers = ''\n i = 1\n for museo in museos:\n list = (list + \"<center><a class='titulos'>\" + museo.NOMBRE +\n '</a></br>')\n list = (list + \"<a class='info' href=\" + '/museos/' + museo.\n ID_ENTIDAD + '/>Más información</a></center></br></br>')\n if (museo.LATITUD != 'No disponible' and museo.LONGITUD !=\n 'No disponible'):\n markers = (markers + 'var ' + 'X' + museo.ID_ENTIDAD +\n 'info = new google.maps.InfoWindow({' + \"content:'<h1>\" +\n museo.NOMBRE + \"</h1>'});\" + 'var ' + 'X' + museo.\n ID_ENTIDAD + 'marker = new google.maps.Marker({' +\n 'position: {lat: ' + museo.LATITUD + ', lng: ' + museo.\n LONGITUD + ' },map: map});' + 'X' + museo.ID_ENTIDAD +\n \"marker.addListener('click', function() {\" + 'X' + museo.\n ID_ENTIDAD + 'info.open(map,' + 'X' + museo.ID_ENTIDAD +\n 'marker);' + '});')\n style = ''\n if request.user.is_authenticated():\n login = 1\n try:\n color = Color.objects.get(usuario=request.user)\n color = color.color\n except Color.DoesNotExist:\n color = 'EEF4F8'\n try:\n letra = Letra.objects.get(usuario=request.user)\n letra = letra.letra\n except Letra.DoesNotExist:\n letra = '9'\n style = (\n \"body{font-family: 'Helvetica', sans-serif;color: #444444;font-size: \"\n + letra + 'pt;background-color: #' + color + ';}')\n else:\n login = 0\n distritos = Distrito.objects.all()\n districtList = ''\n for distrito in distritos:\n districtList = (districtList + \"<option value='\" + distrito.nombre +\n \"'>\" + distrito.nombre + '</option>')\n return HttpResponse(template.render(Context({'body': list, 'login':\n login, 'user': request.user, 'districtList': districtList,\n 'formato': style, 'markers': markers})))\n\n\n<mask token>\n\n\n@csrf_exempt\ndef loginPage(request):\n if request.method == 'POST':\n if not request.user.is_authenticated() and 'login' in request.POST:\n username = request.POST['Usuario']\n password = request.POST['Contraseña']\n user = authenticate(username=username, password=password)\n if user is not None:\n login(request, user)\n elif not request.user.is_authenticated(\n ) and 'registro' in request.POST:\n username = request.POST['Usuario']\n password = request.POST['Contraseña']\n try:\n user = User.objects.get(username=username)\n user = authenticate(username=username, password=password)\n if user is not None:\n login(request, user)\n except User.DoesNotExist:\n user = User.objects.create_user(username=username, password\n =password)\n user.save()\n request.method = 'GET'\n return mainPage(request)\n\n\n<mask token>\n\n\ndef XMLAccesiblePage(request):\n template = get_template('personalXML.xml')\n user = ''\n topList = []\n topMuseums = getAccessibleRanking()\n topFive = range(5)\n for item in topFive:\n if topMuseums[item][1] != 0:\n museum = Museo.objects.get(ID_ENTIDAD=topMuseums[item][0])\n topList = topList + [museum]\n return HttpResponse(template.render(Context({'favoriteList': topList,\n 'user': user})), content_type='text/xml')\n\n\n<mask token>\n\n\ndef aboutPage(request):\n template = get_template('about.html')\n style = ''\n if request.user.is_authenticated():\n login = 1\n try:\n color = Color.objects.get(usuario=request.user)\n color = color.color\n except Color.DoesNotExist:\n color = 'EEF4F8'\n try:\n letra = Letra.objects.get(usuario=request.user)\n letra = letra.letra\n except Letra.DoesNotExist:\n letra = '9'\n style = (\n \"body{font-family: 'Helvetica', sans-serif;color: #444444;font-size: \"\n + letra + 'pt;background-color: #' + color + ';}')\n else:\n login = 0\n return HttpResponse(template.render(Context({'login': login, 'user':\n request.user, 'formato': style})))\n\n\ndef updateDB(request):\n museos = parseXML('web/museos.xml')\n for museo in museos:\n try:\n distrito = Distrito.objects.get(nombre=museos[museo]['DISTRITO'])\n except Distrito.DoesNotExist:\n distrito = Distrito(nombre=museos[museo]['DISTRITO'])\n distrito.save()\n for museo in museos:\n try:\n A = museos[museo]['ID-ENTIDAD']\n except KeyError:\n A = 'No disponible'\n try:\n B = museos[museo]['NOMBRE']\n except KeyError:\n B = 'No disponible'\n try:\n C = museos[museo]['DESCRIPCION-ENTIDAD']\n except KeyError:\n C = 'No disponible'\n try:\n D = museos[museo]['HORARIO']\n except KeyError:\n D = 'No disponible'\n try:\n E = museos[museo]['TRANSPORTE']\n except KeyError:\n E = 'No disponible'\n try:\n F = museos[museo]['ACCESIBILIDAD']\n except KeyError:\n F = 'No disponible'\n try:\n G = museos[museo]['CONTENT-URL']\n except KeyError:\n G = 'No disponible'\n try:\n H = museos[museo]['NOMBRE-VIA']\n except KeyError:\n H = 'No disponible'\n try:\n I = museos[museo]['CLASE-VIAL']\n except KeyError:\n I = 'No disponible'\n try:\n J = museos[museo]['TIPO-NUM']\n except KeyError:\n J = 'No disponible'\n try:\n K = museos[museo]['NUM']\n except KeyError:\n K = 'No disponible'\n try:\n L = museos[museo]['LOCALIDAD']\n except KeyError:\n L = 'No disponible'\n try:\n M = museos[museo]['PROVINCIA']\n except KeyError:\n M = 'No disponible'\n try:\n N = museos[museo]['CODIGO-POSTAL']\n except KeyError:\n N = 'No disponible'\n try:\n Ñ = museos[museo]['BARRIO']\n except KeyError:\n Ñ = 'No disponible'\n try:\n O = Distrito.objects.get(nombre=museos[museo]['DISTRITO'])\n except KeyError:\n O = 'No disponible'\n try:\n P = museos[museo]['COORDENADA-X']\n except KeyError:\n P = 'No disponible'\n try:\n Q = museos[museo]['COORDENADA-Y']\n except KeyError:\n Q = 'No disponible'\n try:\n R = museos[museo]['LATITUD']\n except KeyError:\n R = 'No disponible'\n try:\n S = museos[museo]['LONGITUD']\n except KeyError:\n S = 'No disponible'\n try:\n T = museos[museo]['TELEFONO']\n except KeyError:\n T = 'No disponible'\n try:\n U = museos[museo]['FAX']\n except KeyError:\n U = 'No disponible'\n try:\n V = museos[museo]['EMAIL']\n except KeyError:\n V = 'No disponible'\n try:\n W = museos[museo]['TIPO']\n except KeyError:\n W = 'No disponible'\n try:\n viejoMuseo = Museo.objects.get(ID_ENTIDAD=A)\n except Museo.DoesNotExist:\n nuevoMuseo = Museo(ID_ENTIDAD=A, NOMBRE=B, DESCRIPCION_ENTIDAD=\n C, HORARIO=D, TRANSPORTE=E, ACCESIBILIDAD=F, CONTENT_URL=G,\n NOMBRE_VIA=H, CLASE_VIAL=I, TIPO_NUM=J, NUM=K, LOCALIDAD=L,\n PROVINCIA=M, CODIGO_POSTAL=N, BARRIO=Ñ, DISTRITO=O,\n COORDENADA_X=P, COORDENADA_Y=Q, LATITUD=R, LONGITUD=S,\n TELEFONO=T, FAX=U, EMAIL=V, TIPO=W)\n nuevoMuseo.save()\n return mainPage(request)\n",
"step-2": "<mask token>\n\n\ndef getMuseums():\n museos = Museo.objects.all()\n allMuseums = {}\n for museo in museos:\n allMuseums[museo.ID_ENTIDAD] = museo.comentario_set.count()\n return allMuseums\n\n\ndef getAccessibleMuseums():\n museos = Museo.objects.all()\n allMuseums = {}\n for museo in museos:\n if museo.ACCESIBILIDAD == '1':\n allMuseums[museo.ID_ENTIDAD] = museo.comentario_set.count()\n return allMuseums\n\n\ndef getRanking():\n allMuseums = getMuseums()\n ranking = sorted(allMuseums.items(), key=operator.itemgetter(1))\n ranking.reverse()\n return ranking\n\n\ndef getAccessibleRanking():\n allMuseums = getAccessibleMuseums()\n ranking = sorted(allMuseums.items(), key=operator.itemgetter(1))\n ranking.reverse()\n return ranking\n\n\n@csrf_exempt\ndef mainPage(request):\n template = get_template('index.html')\n topFive = range(5)\n list = '<br>'\n markers = ''\n if request.method == 'GET' or request.method == 'POST' and request.POST[\n 'accion'] == 'mostrar':\n ranking = getRanking()\n list = (list +\n \"<center><form action='/' method='post'><input type='hidden' name='accion' value='ocultar'>\"\n +\n \"<input class='desplegable' type='submit' value='Mostrar museos accesibles'></form></center><div id='scroll'>\"\n )\n if len(ranking) > 0:\n for item in topFive:\n if ranking[item][1] != 0:\n museum = Museo.objects.get(ID_ENTIDAD=ranking[item][0])\n list = (list + \"<center><a class='titulos' href=\" +\n museum.CONTENT_URL + '>' + museum.NOMBRE +\n '</a><br><b>' + str(museum.comentario_set.count()) +\n ' Comentarios - ' + str(museum.like_set.count()) +\n ' Likes</b></br></br>')\n list = (list + \"<a class='direccion'>\" + museum.\n CLASE_VIAL + ' ' + museum.NOMBRE_VIA + ', Nº ' +\n museum.NUM + ', ' + museum.LOCALIDAD + '</a></br></br>'\n )\n list = (list + \"<a class='info' href=\" + '/museos/' +\n museum.ID_ENTIDAD +\n '/>Más información</a></center></br></br>')\n if (museum.LATITUD != 'No disponible' and museum.\n LONGITUD != 'No disponible'):\n markers = (markers + 'var ' + 'X' + museum.\n ID_ENTIDAD +\n 'info = new google.maps.InfoWindow({' +\n \"content:'<h1>\" + museum.NOMBRE + \"</h1>'});\" +\n 'var ' + 'X' + museum.ID_ENTIDAD +\n 'marker = new google.maps.Marker({' +\n 'position: {lat: ' + museum.LATITUD + ', lng: ' +\n museum.LONGITUD + ' },map: map});' + 'X' +\n museum.ID_ENTIDAD +\n \"marker.addListener('click', function() {\" +\n 'X' + museum.ID_ENTIDAD + 'info.open(map,' +\n 'X' + museum.ID_ENTIDAD + 'marker);' + '});')\n if ranking[0][1] == 0:\n list = (list + \"<a class='titulos'><center>\" +\n 'No hay museos con comentarios, ¡sé el primero en comentar!'\n + '</center></a></br></br></div>')\n else:\n list = list + '</div>'\n list = (list +\n \"<center><a class='info' href='/xml'>XML de la página</a></center>\"\n )\n else:\n list = (list + \"<a class='titulos'><center>\" +\n 'No hay museos con comentarios, ¡sé el primero en comentar!' +\n '</center></a></br></br></div>')\n elif request.method == 'POST' and request.POST['accion'] == 'ocultar':\n ranking = getAccessibleRanking()\n list = (list +\n \"<center><form action='/' method='post'><input type='hidden' name='accion' value='mostrar'>\"\n +\n \"<input class='desplegable' type='submit' value='Mostrar todos los museos'></form></center><div id='scroll'>\"\n )\n if len(ranking) > 0:\n for item in topFive:\n if ranking[item][1] != 0:\n museum = Museo.objects.get(ID_ENTIDAD=ranking[item][0])\n list = (list + \"<center><a class='titulos' href=\" +\n museum.CONTENT_URL + '>' + museum.NOMBRE +\n '</a><br><b>' + str(museum.comentario_set.count()) +\n ' Comentarios - ' + str(museum.like_set.count()) +\n ' Likes</b></br></br>')\n list = (list + \"<a class='direccion'>\" + museum.\n CLASE_VIAL + ' ' + museum.NOMBRE_VIA + ', Nº ' +\n museum.NUM + ', ' + museum.LOCALIDAD + '</a></br></br>'\n )\n list = (list + \"<a class='info' href=\" + '/museos/' +\n museum.ID_ENTIDAD +\n '/>Más información</a></center></br></br>')\n if (museum.LATITUD != 'No disponbile' and museum.\n LONGITUD != 'No disponible'):\n markers = (markers + 'var ' + 'X' + museum.\n ID_ENTIDAD +\n 'info = new google.maps.InfoWindow({' +\n \"content:'<h1>\" + museum.NOMBRE + \"</h1>'});\" +\n 'var ' + 'X' + museum.ID_ENTIDAD +\n 'marker = new google.maps.Marker({' +\n 'position: {lat: ' + museum.LATITUD + ', lng: ' +\n museum.LONGITUD + ' },map: map});' + 'X' +\n museum.ID_ENTIDAD +\n \"marker.addListener('click', function() {\" +\n 'X' + museum.ID_ENTIDAD + 'info.open(map,' +\n 'X' + museum.ID_ENTIDAD + 'marker);' + '});')\n if ranking[0][1] == 0:\n list = (list + \"<a class='titulos'><center>\" +\n 'No hay museos accesibles con comentarios, ¡sé el primero en comentar!'\n + '</center></a></br></br></div>')\n else:\n list = list + '</div>'\n list = (list +\n \"<center><a class='info' href='/xml'>XML de la página</a></center>\"\n )\n else:\n list = (list + \"<a class='titulos'><center>\" +\n 'No hay museos accesibles con comentarios, ¡sé el primero en comentar!'\n + '</center></a></br></br></div>')\n style = ''\n if request.user.is_authenticated():\n login = 1\n try:\n color = Color.objects.get(usuario=request.user)\n color = color.color\n except Color.DoesNotExist:\n color = 'EEF4F8'\n try:\n letra = Letra.objects.get(usuario=request.user)\n letra = letra.letra\n except Letra.DoesNotExist:\n letra = '9'\n style = (\n \"body{font-family: 'Helvetica', sans-serif;color: #444444;font-size: \"\n + letra + 'pt;background-color: #' + color + ';}')\n else:\n login = 0\n users = User.objects.all()\n userList = ''\n for user in users:\n try:\n title = Titulo.objects.get(usuario=user.username)\n userList = (userList + \"<li><a href='/\" + user.username + \"'>\" +\n title.titulo + ' - ' + user.username + '</a></li></br>')\n except Titulo.DoesNotExist:\n userList = (userList + \"<li><a href='/\" + user.username +\n \"'>Página de \" + user.username + '</a></li></br>')\n return HttpResponse(template.render(Context({'body': list, 'login':\n login, 'user': request.user, 'userList': userList, 'formato': style,\n 'markers': markers})))\n\n\n@csrf_exempt\ndef museumsPage(request):\n template = get_template('museos.html')\n if request.method == 'GET':\n museos = Museo.objects.all()\n elif request.method == 'POST':\n distrito = Distrito.objects.get(nombre=request.POST['distrito'])\n museos = distrito.museo_set.all()\n list = ''\n markers = ''\n i = 1\n for museo in museos:\n list = (list + \"<center><a class='titulos'>\" + museo.NOMBRE +\n '</a></br>')\n list = (list + \"<a class='info' href=\" + '/museos/' + museo.\n ID_ENTIDAD + '/>Más información</a></center></br></br>')\n if (museo.LATITUD != 'No disponible' and museo.LONGITUD !=\n 'No disponible'):\n markers = (markers + 'var ' + 'X' + museo.ID_ENTIDAD +\n 'info = new google.maps.InfoWindow({' + \"content:'<h1>\" +\n museo.NOMBRE + \"</h1>'});\" + 'var ' + 'X' + museo.\n ID_ENTIDAD + 'marker = new google.maps.Marker({' +\n 'position: {lat: ' + museo.LATITUD + ', lng: ' + museo.\n LONGITUD + ' },map: map});' + 'X' + museo.ID_ENTIDAD +\n \"marker.addListener('click', function() {\" + 'X' + museo.\n ID_ENTIDAD + 'info.open(map,' + 'X' + museo.ID_ENTIDAD +\n 'marker);' + '});')\n style = ''\n if request.user.is_authenticated():\n login = 1\n try:\n color = Color.objects.get(usuario=request.user)\n color = color.color\n except Color.DoesNotExist:\n color = 'EEF4F8'\n try:\n letra = Letra.objects.get(usuario=request.user)\n letra = letra.letra\n except Letra.DoesNotExist:\n letra = '9'\n style = (\n \"body{font-family: 'Helvetica', sans-serif;color: #444444;font-size: \"\n + letra + 'pt;background-color: #' + color + ';}')\n else:\n login = 0\n distritos = Distrito.objects.all()\n districtList = ''\n for distrito in distritos:\n districtList = (districtList + \"<option value='\" + distrito.nombre +\n \"'>\" + distrito.nombre + '</option>')\n return HttpResponse(template.render(Context({'body': list, 'login':\n login, 'user': request.user, 'districtList': districtList,\n 'formato': style, 'markers': markers})))\n\n\n<mask token>\n\n\n@csrf_exempt\ndef loginPage(request):\n if request.method == 'POST':\n if not request.user.is_authenticated() and 'login' in request.POST:\n username = request.POST['Usuario']\n password = request.POST['Contraseña']\n user = authenticate(username=username, password=password)\n if user is not None:\n login(request, user)\n elif not request.user.is_authenticated(\n ) and 'registro' in request.POST:\n username = request.POST['Usuario']\n password = request.POST['Contraseña']\n try:\n user = User.objects.get(username=username)\n user = authenticate(username=username, password=password)\n if user is not None:\n login(request, user)\n except User.DoesNotExist:\n user = User.objects.create_user(username=username, password\n =password)\n user.save()\n request.method = 'GET'\n return mainPage(request)\n\n\n<mask token>\n\n\ndef userPage(request, user, number):\n if number == None:\n number = 1\n template = get_template('personal.html')\n listTotal = ''\n favoritos = Favorito.objects.filter(usuario=user)\n group = range(5)\n count = 0\n markers = ''\n for favorito in favoritos:\n count = count + 1\n museum = Museo.objects.get(NOMBRE=favorito.museo)\n listTotal = (listTotal + \"<a class='titulos' href=\" + museum.\n CONTENT_URL + '>' + museum.NOMBRE + '</a><br><b>' + str(museum.\n comentario_set.count()) + ' Comentarios - ' + str(museum.\n like_set.count()) + ' Likes</b></br></br>')\n listTotal = (listTotal + \"<a class='direccion'>\" + museum.\n CLASE_VIAL + ' ' + museum.NOMBRE_VIA + ', Nº ' + museum.NUM +\n ', ' + museum.LOCALIDAD + '</a></br></br>')\n listTotal = (listTotal + \"<a class='info' href=\" + '/museos/' +\n museum.ID_ENTIDAD +\n '/>Más información</a> <b>Fecha de guardado:' + (datetime.\n timedelta(hours=2) + favorito.fecha).strftime(\n '%H:%M:%S %d-%m-%Y') + '</b></br></br></br>')\n if (museum.LATITUD != 'No disponible' and museum.LONGITUD !=\n 'No disponible'):\n markers = (markers + 'var ' + 'X' + museum.ID_ENTIDAD +\n 'info = new google.maps.InfoWindow({' + \"content:'<h1>\" +\n museum.NOMBRE + \"</h1>'});\" + 'var ' + 'X' + museum.\n ID_ENTIDAD + 'marker = new google.maps.Marker({' +\n 'position: {lat: ' + museum.LATITUD + ', lng: ' + museum.\n LONGITUD + ' },map: map});' + 'X' + museum.ID_ENTIDAD +\n \"marker.addListener('click', function() {\" + 'X' + museum.\n ID_ENTIDAD + 'info.open(map,' + 'X' + museum.ID_ENTIDAD +\n 'marker);' + '});')\n if count % 5 == 0:\n listTotal = listTotal + ';'\n group = listTotal.split(';')[int(number) - 1]\n list = ''\n if favoritos.count() % 5 == 0:\n pages = int(favoritos.count() / 5)\n else:\n pages = int(favoritos.count() / 5) + 1\n pagesRange = range(pages)\n if pages > 1:\n list = '<br>'\n if int(number) > 1:\n list = (list + \"<center><div class='pagination'><a href='/\" +\n user + '/' + str(int(number) - 1) + \"'>«</a>\")\n else:\n list = (list + \"<center><div class='pagination'><a href='/\" +\n user + '/' + str(number) + \"'>«</a>\")\n for page in pagesRange:\n if page == int(number) - 1:\n list = list + \"<a class='active' href='/\" + user + '/' + str(\n page + 1) + \"'>\" + str(page + 1) + '</a>'\n else:\n list = list + \"<a href='/\" + user + '/' + str(page + 1\n ) + \"'>\" + str(page + 1) + '</a>'\n if int(number) == pages:\n list = list + \"<a href='/\" + user + '/' + str(number\n ) + \"'>»</a></div></center></br>\"\n else:\n list = list + \"<a href='/\" + user + '/' + str(int(number) + 1\n ) + \"'>»</a></div></center></br>\"\n list = list + \"<div id='scroll'><center>\"\n for item in group:\n list = list + item\n if (list == '' or list == \"<div id='scroll'><center>\"\n ) and user != 'AnonymousUser':\n list = (\"<center><a class='titulos'>\" +\n 'Para que aparezcan museos en esta página, ' + user +\n ' tiene que añadirlos.' + '</a></center></br></br>')\n elif (list == '' or list == \"<div id='scroll'><center>\"\n ) and user == 'AnonymousUser':\n list = (\"<center><a class='titulos'>\" +\n 'Para ver tu página personal, primero tienes que loguearte.' +\n '</a></center></br></br>')\n else:\n list = (list + \"<center><a class='info' href='/\" + user +\n \"/xml'>XML del usuario</a></center>\")\n list = list + '</center></div>'\n users = User.objects.all()\n userList = ''\n for user in users:\n try:\n title = Titulo.objects.get(usuario=user.username)\n userList = (userList + \"<li><a href='/\" + user.username + \"'>\" +\n title.titulo + ' - ' + user.username + '</a></li></br>')\n except Titulo.DoesNotExist:\n userList = (userList + \"<li><a href='/\" + user.username +\n \"'>Página de \" + user.username + '</a></li></br>')\n style = ''\n if request.user.is_authenticated():\n login = 1\n try:\n color = Color.objects.get(usuario=request.user)\n color = color.color\n except Color.DoesNotExist:\n color = 'EEF4F8'\n try:\n letra = Letra.objects.get(usuario=request.user)\n letra = letra.letra\n except Letra.DoesNotExist:\n letra = '9'\n style = (\n \"body{font-family: 'Helvetica', sans-serif;color: #444444;font-size: \"\n + letra + 'pt;background-color: #' + color + ';}')\n else:\n login = 0\n return HttpResponse(template.render(Context({'body': list, 'login':\n login, 'user': request.user, 'userList': userList, 'formato': style,\n 'markers': markers})))\n\n\n<mask token>\n\n\ndef XMLAccesiblePage(request):\n template = get_template('personalXML.xml')\n user = ''\n topList = []\n topMuseums = getAccessibleRanking()\n topFive = range(5)\n for item in topFive:\n if topMuseums[item][1] != 0:\n museum = Museo.objects.get(ID_ENTIDAD=topMuseums[item][0])\n topList = topList + [museum]\n return HttpResponse(template.render(Context({'favoriteList': topList,\n 'user': user})), content_type='text/xml')\n\n\n<mask token>\n\n\ndef aboutPage(request):\n template = get_template('about.html')\n style = ''\n if request.user.is_authenticated():\n login = 1\n try:\n color = Color.objects.get(usuario=request.user)\n color = color.color\n except Color.DoesNotExist:\n color = 'EEF4F8'\n try:\n letra = Letra.objects.get(usuario=request.user)\n letra = letra.letra\n except Letra.DoesNotExist:\n letra = '9'\n style = (\n \"body{font-family: 'Helvetica', sans-serif;color: #444444;font-size: \"\n + letra + 'pt;background-color: #' + color + ';}')\n else:\n login = 0\n return HttpResponse(template.render(Context({'login': login, 'user':\n request.user, 'formato': style})))\n\n\ndef updateDB(request):\n museos = parseXML('web/museos.xml')\n for museo in museos:\n try:\n distrito = Distrito.objects.get(nombre=museos[museo]['DISTRITO'])\n except Distrito.DoesNotExist:\n distrito = Distrito(nombre=museos[museo]['DISTRITO'])\n distrito.save()\n for museo in museos:\n try:\n A = museos[museo]['ID-ENTIDAD']\n except KeyError:\n A = 'No disponible'\n try:\n B = museos[museo]['NOMBRE']\n except KeyError:\n B = 'No disponible'\n try:\n C = museos[museo]['DESCRIPCION-ENTIDAD']\n except KeyError:\n C = 'No disponible'\n try:\n D = museos[museo]['HORARIO']\n except KeyError:\n D = 'No disponible'\n try:\n E = museos[museo]['TRANSPORTE']\n except KeyError:\n E = 'No disponible'\n try:\n F = museos[museo]['ACCESIBILIDAD']\n except KeyError:\n F = 'No disponible'\n try:\n G = museos[museo]['CONTENT-URL']\n except KeyError:\n G = 'No disponible'\n try:\n H = museos[museo]['NOMBRE-VIA']\n except KeyError:\n H = 'No disponible'\n try:\n I = museos[museo]['CLASE-VIAL']\n except KeyError:\n I = 'No disponible'\n try:\n J = museos[museo]['TIPO-NUM']\n except KeyError:\n J = 'No disponible'\n try:\n K = museos[museo]['NUM']\n except KeyError:\n K = 'No disponible'\n try:\n L = museos[museo]['LOCALIDAD']\n except KeyError:\n L = 'No disponible'\n try:\n M = museos[museo]['PROVINCIA']\n except KeyError:\n M = 'No disponible'\n try:\n N = museos[museo]['CODIGO-POSTAL']\n except KeyError:\n N = 'No disponible'\n try:\n Ñ = museos[museo]['BARRIO']\n except KeyError:\n Ñ = 'No disponible'\n try:\n O = Distrito.objects.get(nombre=museos[museo]['DISTRITO'])\n except KeyError:\n O = 'No disponible'\n try:\n P = museos[museo]['COORDENADA-X']\n except KeyError:\n P = 'No disponible'\n try:\n Q = museos[museo]['COORDENADA-Y']\n except KeyError:\n Q = 'No disponible'\n try:\n R = museos[museo]['LATITUD']\n except KeyError:\n R = 'No disponible'\n try:\n S = museos[museo]['LONGITUD']\n except KeyError:\n S = 'No disponible'\n try:\n T = museos[museo]['TELEFONO']\n except KeyError:\n T = 'No disponible'\n try:\n U = museos[museo]['FAX']\n except KeyError:\n U = 'No disponible'\n try:\n V = museos[museo]['EMAIL']\n except KeyError:\n V = 'No disponible'\n try:\n W = museos[museo]['TIPO']\n except KeyError:\n W = 'No disponible'\n try:\n viejoMuseo = Museo.objects.get(ID_ENTIDAD=A)\n except Museo.DoesNotExist:\n nuevoMuseo = Museo(ID_ENTIDAD=A, NOMBRE=B, DESCRIPCION_ENTIDAD=\n C, HORARIO=D, TRANSPORTE=E, ACCESIBILIDAD=F, CONTENT_URL=G,\n NOMBRE_VIA=H, CLASE_VIAL=I, TIPO_NUM=J, NUM=K, LOCALIDAD=L,\n PROVINCIA=M, CODIGO_POSTAL=N, BARRIO=Ñ, DISTRITO=O,\n COORDENADA_X=P, COORDENADA_Y=Q, LATITUD=R, LONGITUD=S,\n TELEFONO=T, FAX=U, EMAIL=V, TIPO=W)\n nuevoMuseo.save()\n return mainPage(request)\n",
"step-3": "<mask token>\n\n\ndef getMuseums():\n museos = Museo.objects.all()\n allMuseums = {}\n for museo in museos:\n allMuseums[museo.ID_ENTIDAD] = museo.comentario_set.count()\n return allMuseums\n\n\ndef getAccessibleMuseums():\n museos = Museo.objects.all()\n allMuseums = {}\n for museo in museos:\n if museo.ACCESIBILIDAD == '1':\n allMuseums[museo.ID_ENTIDAD] = museo.comentario_set.count()\n return allMuseums\n\n\ndef getRanking():\n allMuseums = getMuseums()\n ranking = sorted(allMuseums.items(), key=operator.itemgetter(1))\n ranking.reverse()\n return ranking\n\n\ndef getAccessibleRanking():\n allMuseums = getAccessibleMuseums()\n ranking = sorted(allMuseums.items(), key=operator.itemgetter(1))\n ranking.reverse()\n return ranking\n\n\n@csrf_exempt\ndef mainPage(request):\n template = get_template('index.html')\n topFive = range(5)\n list = '<br>'\n markers = ''\n if request.method == 'GET' or request.method == 'POST' and request.POST[\n 'accion'] == 'mostrar':\n ranking = getRanking()\n list = (list +\n \"<center><form action='/' method='post'><input type='hidden' name='accion' value='ocultar'>\"\n +\n \"<input class='desplegable' type='submit' value='Mostrar museos accesibles'></form></center><div id='scroll'>\"\n )\n if len(ranking) > 0:\n for item in topFive:\n if ranking[item][1] != 0:\n museum = Museo.objects.get(ID_ENTIDAD=ranking[item][0])\n list = (list + \"<center><a class='titulos' href=\" +\n museum.CONTENT_URL + '>' + museum.NOMBRE +\n '</a><br><b>' + str(museum.comentario_set.count()) +\n ' Comentarios - ' + str(museum.like_set.count()) +\n ' Likes</b></br></br>')\n list = (list + \"<a class='direccion'>\" + museum.\n CLASE_VIAL + ' ' + museum.NOMBRE_VIA + ', Nº ' +\n museum.NUM + ', ' + museum.LOCALIDAD + '</a></br></br>'\n )\n list = (list + \"<a class='info' href=\" + '/museos/' +\n museum.ID_ENTIDAD +\n '/>Más información</a></center></br></br>')\n if (museum.LATITUD != 'No disponible' and museum.\n LONGITUD != 'No disponible'):\n markers = (markers + 'var ' + 'X' + museum.\n ID_ENTIDAD +\n 'info = new google.maps.InfoWindow({' +\n \"content:'<h1>\" + museum.NOMBRE + \"</h1>'});\" +\n 'var ' + 'X' + museum.ID_ENTIDAD +\n 'marker = new google.maps.Marker({' +\n 'position: {lat: ' + museum.LATITUD + ', lng: ' +\n museum.LONGITUD + ' },map: map});' + 'X' +\n museum.ID_ENTIDAD +\n \"marker.addListener('click', function() {\" +\n 'X' + museum.ID_ENTIDAD + 'info.open(map,' +\n 'X' + museum.ID_ENTIDAD + 'marker);' + '});')\n if ranking[0][1] == 0:\n list = (list + \"<a class='titulos'><center>\" +\n 'No hay museos con comentarios, ¡sé el primero en comentar!'\n + '</center></a></br></br></div>')\n else:\n list = list + '</div>'\n list = (list +\n \"<center><a class='info' href='/xml'>XML de la página</a></center>\"\n )\n else:\n list = (list + \"<a class='titulos'><center>\" +\n 'No hay museos con comentarios, ¡sé el primero en comentar!' +\n '</center></a></br></br></div>')\n elif request.method == 'POST' and request.POST['accion'] == 'ocultar':\n ranking = getAccessibleRanking()\n list = (list +\n \"<center><form action='/' method='post'><input type='hidden' name='accion' value='mostrar'>\"\n +\n \"<input class='desplegable' type='submit' value='Mostrar todos los museos'></form></center><div id='scroll'>\"\n )\n if len(ranking) > 0:\n for item in topFive:\n if ranking[item][1] != 0:\n museum = Museo.objects.get(ID_ENTIDAD=ranking[item][0])\n list = (list + \"<center><a class='titulos' href=\" +\n museum.CONTENT_URL + '>' + museum.NOMBRE +\n '</a><br><b>' + str(museum.comentario_set.count()) +\n ' Comentarios - ' + str(museum.like_set.count()) +\n ' Likes</b></br></br>')\n list = (list + \"<a class='direccion'>\" + museum.\n CLASE_VIAL + ' ' + museum.NOMBRE_VIA + ', Nº ' +\n museum.NUM + ', ' + museum.LOCALIDAD + '</a></br></br>'\n )\n list = (list + \"<a class='info' href=\" + '/museos/' +\n museum.ID_ENTIDAD +\n '/>Más información</a></center></br></br>')\n if (museum.LATITUD != 'No disponbile' and museum.\n LONGITUD != 'No disponible'):\n markers = (markers + 'var ' + 'X' + museum.\n ID_ENTIDAD +\n 'info = new google.maps.InfoWindow({' +\n \"content:'<h1>\" + museum.NOMBRE + \"</h1>'});\" +\n 'var ' + 'X' + museum.ID_ENTIDAD +\n 'marker = new google.maps.Marker({' +\n 'position: {lat: ' + museum.LATITUD + ', lng: ' +\n museum.LONGITUD + ' },map: map});' + 'X' +\n museum.ID_ENTIDAD +\n \"marker.addListener('click', function() {\" +\n 'X' + museum.ID_ENTIDAD + 'info.open(map,' +\n 'X' + museum.ID_ENTIDAD + 'marker);' + '});')\n if ranking[0][1] == 0:\n list = (list + \"<a class='titulos'><center>\" +\n 'No hay museos accesibles con comentarios, ¡sé el primero en comentar!'\n + '</center></a></br></br></div>')\n else:\n list = list + '</div>'\n list = (list +\n \"<center><a class='info' href='/xml'>XML de la página</a></center>\"\n )\n else:\n list = (list + \"<a class='titulos'><center>\" +\n 'No hay museos accesibles con comentarios, ¡sé el primero en comentar!'\n + '</center></a></br></br></div>')\n style = ''\n if request.user.is_authenticated():\n login = 1\n try:\n color = Color.objects.get(usuario=request.user)\n color = color.color\n except Color.DoesNotExist:\n color = 'EEF4F8'\n try:\n letra = Letra.objects.get(usuario=request.user)\n letra = letra.letra\n except Letra.DoesNotExist:\n letra = '9'\n style = (\n \"body{font-family: 'Helvetica', sans-serif;color: #444444;font-size: \"\n + letra + 'pt;background-color: #' + color + ';}')\n else:\n login = 0\n users = User.objects.all()\n userList = ''\n for user in users:\n try:\n title = Titulo.objects.get(usuario=user.username)\n userList = (userList + \"<li><a href='/\" + user.username + \"'>\" +\n title.titulo + ' - ' + user.username + '</a></li></br>')\n except Titulo.DoesNotExist:\n userList = (userList + \"<li><a href='/\" + user.username +\n \"'>Página de \" + user.username + '</a></li></br>')\n return HttpResponse(template.render(Context({'body': list, 'login':\n login, 'user': request.user, 'userList': userList, 'formato': style,\n 'markers': markers})))\n\n\n@csrf_exempt\ndef museumsPage(request):\n template = get_template('museos.html')\n if request.method == 'GET':\n museos = Museo.objects.all()\n elif request.method == 'POST':\n distrito = Distrito.objects.get(nombre=request.POST['distrito'])\n museos = distrito.museo_set.all()\n list = ''\n markers = ''\n i = 1\n for museo in museos:\n list = (list + \"<center><a class='titulos'>\" + museo.NOMBRE +\n '</a></br>')\n list = (list + \"<a class='info' href=\" + '/museos/' + museo.\n ID_ENTIDAD + '/>Más información</a></center></br></br>')\n if (museo.LATITUD != 'No disponible' and museo.LONGITUD !=\n 'No disponible'):\n markers = (markers + 'var ' + 'X' + museo.ID_ENTIDAD +\n 'info = new google.maps.InfoWindow({' + \"content:'<h1>\" +\n museo.NOMBRE + \"</h1>'});\" + 'var ' + 'X' + museo.\n ID_ENTIDAD + 'marker = new google.maps.Marker({' +\n 'position: {lat: ' + museo.LATITUD + ', lng: ' + museo.\n LONGITUD + ' },map: map});' + 'X' + museo.ID_ENTIDAD +\n \"marker.addListener('click', function() {\" + 'X' + museo.\n ID_ENTIDAD + 'info.open(map,' + 'X' + museo.ID_ENTIDAD +\n 'marker);' + '});')\n style = ''\n if request.user.is_authenticated():\n login = 1\n try:\n color = Color.objects.get(usuario=request.user)\n color = color.color\n except Color.DoesNotExist:\n color = 'EEF4F8'\n try:\n letra = Letra.objects.get(usuario=request.user)\n letra = letra.letra\n except Letra.DoesNotExist:\n letra = '9'\n style = (\n \"body{font-family: 'Helvetica', sans-serif;color: #444444;font-size: \"\n + letra + 'pt;background-color: #' + color + ';}')\n else:\n login = 0\n distritos = Distrito.objects.all()\n districtList = ''\n for distrito in distritos:\n districtList = (districtList + \"<option value='\" + distrito.nombre +\n \"'>\" + distrito.nombre + '</option>')\n return HttpResponse(template.render(Context({'body': list, 'login':\n login, 'user': request.user, 'districtList': districtList,\n 'formato': style, 'markers': markers})))\n\n\n<mask token>\n\n\n@csrf_exempt\ndef loginPage(request):\n if request.method == 'POST':\n if not request.user.is_authenticated() and 'login' in request.POST:\n username = request.POST['Usuario']\n password = request.POST['Contraseña']\n user = authenticate(username=username, password=password)\n if user is not None:\n login(request, user)\n elif not request.user.is_authenticated(\n ) and 'registro' in request.POST:\n username = request.POST['Usuario']\n password = request.POST['Contraseña']\n try:\n user = User.objects.get(username=username)\n user = authenticate(username=username, password=password)\n if user is not None:\n login(request, user)\n except User.DoesNotExist:\n user = User.objects.create_user(username=username, password\n =password)\n user.save()\n request.method = 'GET'\n return mainPage(request)\n\n\ndef logoutPage(request):\n logout(request)\n return mainPage(request)\n\n\ndef userPage(request, user, number):\n if number == None:\n number = 1\n template = get_template('personal.html')\n listTotal = ''\n favoritos = Favorito.objects.filter(usuario=user)\n group = range(5)\n count = 0\n markers = ''\n for favorito in favoritos:\n count = count + 1\n museum = Museo.objects.get(NOMBRE=favorito.museo)\n listTotal = (listTotal + \"<a class='titulos' href=\" + museum.\n CONTENT_URL + '>' + museum.NOMBRE + '</a><br><b>' + str(museum.\n comentario_set.count()) + ' Comentarios - ' + str(museum.\n like_set.count()) + ' Likes</b></br></br>')\n listTotal = (listTotal + \"<a class='direccion'>\" + museum.\n CLASE_VIAL + ' ' + museum.NOMBRE_VIA + ', Nº ' + museum.NUM +\n ', ' + museum.LOCALIDAD + '</a></br></br>')\n listTotal = (listTotal + \"<a class='info' href=\" + '/museos/' +\n museum.ID_ENTIDAD +\n '/>Más información</a> <b>Fecha de guardado:' + (datetime.\n timedelta(hours=2) + favorito.fecha).strftime(\n '%H:%M:%S %d-%m-%Y') + '</b></br></br></br>')\n if (museum.LATITUD != 'No disponible' and museum.LONGITUD !=\n 'No disponible'):\n markers = (markers + 'var ' + 'X' + museum.ID_ENTIDAD +\n 'info = new google.maps.InfoWindow({' + \"content:'<h1>\" +\n museum.NOMBRE + \"</h1>'});\" + 'var ' + 'X' + museum.\n ID_ENTIDAD + 'marker = new google.maps.Marker({' +\n 'position: {lat: ' + museum.LATITUD + ', lng: ' + museum.\n LONGITUD + ' },map: map});' + 'X' + museum.ID_ENTIDAD +\n \"marker.addListener('click', function() {\" + 'X' + museum.\n ID_ENTIDAD + 'info.open(map,' + 'X' + museum.ID_ENTIDAD +\n 'marker);' + '});')\n if count % 5 == 0:\n listTotal = listTotal + ';'\n group = listTotal.split(';')[int(number) - 1]\n list = ''\n if favoritos.count() % 5 == 0:\n pages = int(favoritos.count() / 5)\n else:\n pages = int(favoritos.count() / 5) + 1\n pagesRange = range(pages)\n if pages > 1:\n list = '<br>'\n if int(number) > 1:\n list = (list + \"<center><div class='pagination'><a href='/\" +\n user + '/' + str(int(number) - 1) + \"'>«</a>\")\n else:\n list = (list + \"<center><div class='pagination'><a href='/\" +\n user + '/' + str(number) + \"'>«</a>\")\n for page in pagesRange:\n if page == int(number) - 1:\n list = list + \"<a class='active' href='/\" + user + '/' + str(\n page + 1) + \"'>\" + str(page + 1) + '</a>'\n else:\n list = list + \"<a href='/\" + user + '/' + str(page + 1\n ) + \"'>\" + str(page + 1) + '</a>'\n if int(number) == pages:\n list = list + \"<a href='/\" + user + '/' + str(number\n ) + \"'>»</a></div></center></br>\"\n else:\n list = list + \"<a href='/\" + user + '/' + str(int(number) + 1\n ) + \"'>»</a></div></center></br>\"\n list = list + \"<div id='scroll'><center>\"\n for item in group:\n list = list + item\n if (list == '' or list == \"<div id='scroll'><center>\"\n ) and user != 'AnonymousUser':\n list = (\"<center><a class='titulos'>\" +\n 'Para que aparezcan museos en esta página, ' + user +\n ' tiene que añadirlos.' + '</a></center></br></br>')\n elif (list == '' or list == \"<div id='scroll'><center>\"\n ) and user == 'AnonymousUser':\n list = (\"<center><a class='titulos'>\" +\n 'Para ver tu página personal, primero tienes que loguearte.' +\n '</a></center></br></br>')\n else:\n list = (list + \"<center><a class='info' href='/\" + user +\n \"/xml'>XML del usuario</a></center>\")\n list = list + '</center></div>'\n users = User.objects.all()\n userList = ''\n for user in users:\n try:\n title = Titulo.objects.get(usuario=user.username)\n userList = (userList + \"<li><a href='/\" + user.username + \"'>\" +\n title.titulo + ' - ' + user.username + '</a></li></br>')\n except Titulo.DoesNotExist:\n userList = (userList + \"<li><a href='/\" + user.username +\n \"'>Página de \" + user.username + '</a></li></br>')\n style = ''\n if request.user.is_authenticated():\n login = 1\n try:\n color = Color.objects.get(usuario=request.user)\n color = color.color\n except Color.DoesNotExist:\n color = 'EEF4F8'\n try:\n letra = Letra.objects.get(usuario=request.user)\n letra = letra.letra\n except Letra.DoesNotExist:\n letra = '9'\n style = (\n \"body{font-family: 'Helvetica', sans-serif;color: #444444;font-size: \"\n + letra + 'pt;background-color: #' + color + ';}')\n else:\n login = 0\n return HttpResponse(template.render(Context({'body': list, 'login':\n login, 'user': request.user, 'userList': userList, 'formato': style,\n 'markers': markers})))\n\n\ndef userXMLPage(request, user):\n template = get_template('personalXML.xml')\n favoriteList = []\n favoriteMuseums = Favorito.objects.filter(usuario=user)\n for favorite in favoriteMuseums:\n favoriteList = favoriteList + [favorite.museo]\n return HttpResponse(template.render(Context({'favoriteList':\n favoriteList, 'user': user})), content_type='text/xml')\n\n\n<mask token>\n\n\ndef XMLAccesiblePage(request):\n template = get_template('personalXML.xml')\n user = ''\n topList = []\n topMuseums = getAccessibleRanking()\n topFive = range(5)\n for item in topFive:\n if topMuseums[item][1] != 0:\n museum = Museo.objects.get(ID_ENTIDAD=topMuseums[item][0])\n topList = topList + [museum]\n return HttpResponse(template.render(Context({'favoriteList': topList,\n 'user': user})), content_type='text/xml')\n\n\n@csrf_exempt\ndef preferencesPage(request, user):\n template = get_template('preferencias.html')\n if request.method == 'POST':\n if 'color' in request.POST:\n try:\n color = Color.objects.get(usuario=user)\n color.color = request.POST['color']\n except Color.DoesNotExist:\n color = Color(usuario=user, color=request.POST['color'])\n color.save()\n elif 'tamaño' in request.POST:\n try:\n size = Letra.objects.get(usuario=user)\n size.letra = request.POST['tamaño']\n except Letra.DoesNotExist:\n size = Letra(usuario=user, letra=request.POST['tamaño'])\n size.save()\n elif 'título' in request.POST:\n try:\n title = Titulo.objects.get(usuario=user)\n title.titulo = request.POST['título']\n except Titulo.DoesNotExist:\n title = Titulo(usuario=user, titulo=request.POST['título'])\n title.save()\n style = ''\n if request.user.is_authenticated():\n login = 1\n try:\n color = Color.objects.get(usuario=request.user)\n color = color.color\n except Color.DoesNotExist:\n color = 'EEF4F8'\n try:\n letra = Letra.objects.get(usuario=request.user)\n letra = letra.letra\n except Letra.DoesNotExist:\n letra = '9'\n style = (\n \"body{font-family: 'Helvetica', sans-serif;color: #444444;font-size: \"\n + letra + 'pt;background-color: #' + color + ';}')\n else:\n login = 0\n return HttpResponse(template.render(Context({'login': login, 'user':\n user, 'formato': style})))\n\n\ndef aboutPage(request):\n template = get_template('about.html')\n style = ''\n if request.user.is_authenticated():\n login = 1\n try:\n color = Color.objects.get(usuario=request.user)\n color = color.color\n except Color.DoesNotExist:\n color = 'EEF4F8'\n try:\n letra = Letra.objects.get(usuario=request.user)\n letra = letra.letra\n except Letra.DoesNotExist:\n letra = '9'\n style = (\n \"body{font-family: 'Helvetica', sans-serif;color: #444444;font-size: \"\n + letra + 'pt;background-color: #' + color + ';}')\n else:\n login = 0\n return HttpResponse(template.render(Context({'login': login, 'user':\n request.user, 'formato': style})))\n\n\ndef updateDB(request):\n museos = parseXML('web/museos.xml')\n for museo in museos:\n try:\n distrito = Distrito.objects.get(nombre=museos[museo]['DISTRITO'])\n except Distrito.DoesNotExist:\n distrito = Distrito(nombre=museos[museo]['DISTRITO'])\n distrito.save()\n for museo in museos:\n try:\n A = museos[museo]['ID-ENTIDAD']\n except KeyError:\n A = 'No disponible'\n try:\n B = museos[museo]['NOMBRE']\n except KeyError:\n B = 'No disponible'\n try:\n C = museos[museo]['DESCRIPCION-ENTIDAD']\n except KeyError:\n C = 'No disponible'\n try:\n D = museos[museo]['HORARIO']\n except KeyError:\n D = 'No disponible'\n try:\n E = museos[museo]['TRANSPORTE']\n except KeyError:\n E = 'No disponible'\n try:\n F = museos[museo]['ACCESIBILIDAD']\n except KeyError:\n F = 'No disponible'\n try:\n G = museos[museo]['CONTENT-URL']\n except KeyError:\n G = 'No disponible'\n try:\n H = museos[museo]['NOMBRE-VIA']\n except KeyError:\n H = 'No disponible'\n try:\n I = museos[museo]['CLASE-VIAL']\n except KeyError:\n I = 'No disponible'\n try:\n J = museos[museo]['TIPO-NUM']\n except KeyError:\n J = 'No disponible'\n try:\n K = museos[museo]['NUM']\n except KeyError:\n K = 'No disponible'\n try:\n L = museos[museo]['LOCALIDAD']\n except KeyError:\n L = 'No disponible'\n try:\n M = museos[museo]['PROVINCIA']\n except KeyError:\n M = 'No disponible'\n try:\n N = museos[museo]['CODIGO-POSTAL']\n except KeyError:\n N = 'No disponible'\n try:\n Ñ = museos[museo]['BARRIO']\n except KeyError:\n Ñ = 'No disponible'\n try:\n O = Distrito.objects.get(nombre=museos[museo]['DISTRITO'])\n except KeyError:\n O = 'No disponible'\n try:\n P = museos[museo]['COORDENADA-X']\n except KeyError:\n P = 'No disponible'\n try:\n Q = museos[museo]['COORDENADA-Y']\n except KeyError:\n Q = 'No disponible'\n try:\n R = museos[museo]['LATITUD']\n except KeyError:\n R = 'No disponible'\n try:\n S = museos[museo]['LONGITUD']\n except KeyError:\n S = 'No disponible'\n try:\n T = museos[museo]['TELEFONO']\n except KeyError:\n T = 'No disponible'\n try:\n U = museos[museo]['FAX']\n except KeyError:\n U = 'No disponible'\n try:\n V = museos[museo]['EMAIL']\n except KeyError:\n V = 'No disponible'\n try:\n W = museos[museo]['TIPO']\n except KeyError:\n W = 'No disponible'\n try:\n viejoMuseo = Museo.objects.get(ID_ENTIDAD=A)\n except Museo.DoesNotExist:\n nuevoMuseo = Museo(ID_ENTIDAD=A, NOMBRE=B, DESCRIPCION_ENTIDAD=\n C, HORARIO=D, TRANSPORTE=E, ACCESIBILIDAD=F, CONTENT_URL=G,\n NOMBRE_VIA=H, CLASE_VIAL=I, TIPO_NUM=J, NUM=K, LOCALIDAD=L,\n PROVINCIA=M, CODIGO_POSTAL=N, BARRIO=Ñ, DISTRITO=O,\n COORDENADA_X=P, COORDENADA_Y=Q, LATITUD=R, LONGITUD=S,\n TELEFONO=T, FAX=U, EMAIL=V, TIPO=W)\n nuevoMuseo.save()\n return mainPage(request)\n",
"step-4": "from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.contrib.auth.models import User\nfrom .models import Museo, Distrito, Comentario, Favorito, Like, Titulo, Letra, Color\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.contrib.auth import authenticate, login\nfrom django.contrib.auth import logout\nfrom web.parser import parseXML\nimport operator\nfrom django.template.loader import get_template\nfrom django.template import Context\nimport datetime\n\n\ndef getMuseums():\n museos = Museo.objects.all()\n allMuseums = {}\n for museo in museos:\n allMuseums[museo.ID_ENTIDAD] = museo.comentario_set.count()\n return allMuseums\n\n\ndef getAccessibleMuseums():\n museos = Museo.objects.all()\n allMuseums = {}\n for museo in museos:\n if museo.ACCESIBILIDAD == '1':\n allMuseums[museo.ID_ENTIDAD] = museo.comentario_set.count()\n return allMuseums\n\n\ndef getRanking():\n allMuseums = getMuseums()\n ranking = sorted(allMuseums.items(), key=operator.itemgetter(1))\n ranking.reverse()\n return ranking\n\n\ndef getAccessibleRanking():\n allMuseums = getAccessibleMuseums()\n ranking = sorted(allMuseums.items(), key=operator.itemgetter(1))\n ranking.reverse()\n return ranking\n\n\n@csrf_exempt\ndef mainPage(request):\n template = get_template('index.html')\n topFive = range(5)\n list = '<br>'\n markers = ''\n if request.method == 'GET' or request.method == 'POST' and request.POST[\n 'accion'] == 'mostrar':\n ranking = getRanking()\n list = (list +\n \"<center><form action='/' method='post'><input type='hidden' name='accion' value='ocultar'>\"\n +\n \"<input class='desplegable' type='submit' value='Mostrar museos accesibles'></form></center><div id='scroll'>\"\n )\n if len(ranking) > 0:\n for item in topFive:\n if ranking[item][1] != 0:\n museum = Museo.objects.get(ID_ENTIDAD=ranking[item][0])\n list = (list + \"<center><a class='titulos' href=\" +\n museum.CONTENT_URL + '>' + museum.NOMBRE +\n '</a><br><b>' + str(museum.comentario_set.count()) +\n ' Comentarios - ' + str(museum.like_set.count()) +\n ' Likes</b></br></br>')\n list = (list + \"<a class='direccion'>\" + museum.\n CLASE_VIAL + ' ' + museum.NOMBRE_VIA + ', Nº ' +\n museum.NUM + ', ' + museum.LOCALIDAD + '</a></br></br>'\n )\n list = (list + \"<a class='info' href=\" + '/museos/' +\n museum.ID_ENTIDAD +\n '/>Más información</a></center></br></br>')\n if (museum.LATITUD != 'No disponible' and museum.\n LONGITUD != 'No disponible'):\n markers = (markers + 'var ' + 'X' + museum.\n ID_ENTIDAD +\n 'info = new google.maps.InfoWindow({' +\n \"content:'<h1>\" + museum.NOMBRE + \"</h1>'});\" +\n 'var ' + 'X' + museum.ID_ENTIDAD +\n 'marker = new google.maps.Marker({' +\n 'position: {lat: ' + museum.LATITUD + ', lng: ' +\n museum.LONGITUD + ' },map: map});' + 'X' +\n museum.ID_ENTIDAD +\n \"marker.addListener('click', function() {\" +\n 'X' + museum.ID_ENTIDAD + 'info.open(map,' +\n 'X' + museum.ID_ENTIDAD + 'marker);' + '});')\n if ranking[0][1] == 0:\n list = (list + \"<a class='titulos'><center>\" +\n 'No hay museos con comentarios, ¡sé el primero en comentar!'\n + '</center></a></br></br></div>')\n else:\n list = list + '</div>'\n list = (list +\n \"<center><a class='info' href='/xml'>XML de la página</a></center>\"\n )\n else:\n list = (list + \"<a class='titulos'><center>\" +\n 'No hay museos con comentarios, ¡sé el primero en comentar!' +\n '</center></a></br></br></div>')\n elif request.method == 'POST' and request.POST['accion'] == 'ocultar':\n ranking = getAccessibleRanking()\n list = (list +\n \"<center><form action='/' method='post'><input type='hidden' name='accion' value='mostrar'>\"\n +\n \"<input class='desplegable' type='submit' value='Mostrar todos los museos'></form></center><div id='scroll'>\"\n )\n if len(ranking) > 0:\n for item in topFive:\n if ranking[item][1] != 0:\n museum = Museo.objects.get(ID_ENTIDAD=ranking[item][0])\n list = (list + \"<center><a class='titulos' href=\" +\n museum.CONTENT_URL + '>' + museum.NOMBRE +\n '</a><br><b>' + str(museum.comentario_set.count()) +\n ' Comentarios - ' + str(museum.like_set.count()) +\n ' Likes</b></br></br>')\n list = (list + \"<a class='direccion'>\" + museum.\n CLASE_VIAL + ' ' + museum.NOMBRE_VIA + ', Nº ' +\n museum.NUM + ', ' + museum.LOCALIDAD + '</a></br></br>'\n )\n list = (list + \"<a class='info' href=\" + '/museos/' +\n museum.ID_ENTIDAD +\n '/>Más información</a></center></br></br>')\n if (museum.LATITUD != 'No disponbile' and museum.\n LONGITUD != 'No disponible'):\n markers = (markers + 'var ' + 'X' + museum.\n ID_ENTIDAD +\n 'info = new google.maps.InfoWindow({' +\n \"content:'<h1>\" + museum.NOMBRE + \"</h1>'});\" +\n 'var ' + 'X' + museum.ID_ENTIDAD +\n 'marker = new google.maps.Marker({' +\n 'position: {lat: ' + museum.LATITUD + ', lng: ' +\n museum.LONGITUD + ' },map: map});' + 'X' +\n museum.ID_ENTIDAD +\n \"marker.addListener('click', function() {\" +\n 'X' + museum.ID_ENTIDAD + 'info.open(map,' +\n 'X' + museum.ID_ENTIDAD + 'marker);' + '});')\n if ranking[0][1] == 0:\n list = (list + \"<a class='titulos'><center>\" +\n 'No hay museos accesibles con comentarios, ¡sé el primero en comentar!'\n + '</center></a></br></br></div>')\n else:\n list = list + '</div>'\n list = (list +\n \"<center><a class='info' href='/xml'>XML de la página</a></center>\"\n )\n else:\n list = (list + \"<a class='titulos'><center>\" +\n 'No hay museos accesibles con comentarios, ¡sé el primero en comentar!'\n + '</center></a></br></br></div>')\n style = ''\n if request.user.is_authenticated():\n login = 1\n try:\n color = Color.objects.get(usuario=request.user)\n color = color.color\n except Color.DoesNotExist:\n color = 'EEF4F8'\n try:\n letra = Letra.objects.get(usuario=request.user)\n letra = letra.letra\n except Letra.DoesNotExist:\n letra = '9'\n style = (\n \"body{font-family: 'Helvetica', sans-serif;color: #444444;font-size: \"\n + letra + 'pt;background-color: #' + color + ';}')\n else:\n login = 0\n users = User.objects.all()\n userList = ''\n for user in users:\n try:\n title = Titulo.objects.get(usuario=user.username)\n userList = (userList + \"<li><a href='/\" + user.username + \"'>\" +\n title.titulo + ' - ' + user.username + '</a></li></br>')\n except Titulo.DoesNotExist:\n userList = (userList + \"<li><a href='/\" + user.username +\n \"'>Página de \" + user.username + '</a></li></br>')\n return HttpResponse(template.render(Context({'body': list, 'login':\n login, 'user': request.user, 'userList': userList, 'formato': style,\n 'markers': markers})))\n\n\n@csrf_exempt\ndef museumsPage(request):\n template = get_template('museos.html')\n if request.method == 'GET':\n museos = Museo.objects.all()\n elif request.method == 'POST':\n distrito = Distrito.objects.get(nombre=request.POST['distrito'])\n museos = distrito.museo_set.all()\n list = ''\n markers = ''\n i = 1\n for museo in museos:\n list = (list + \"<center><a class='titulos'>\" + museo.NOMBRE +\n '</a></br>')\n list = (list + \"<a class='info' href=\" + '/museos/' + museo.\n ID_ENTIDAD + '/>Más información</a></center></br></br>')\n if (museo.LATITUD != 'No disponible' and museo.LONGITUD !=\n 'No disponible'):\n markers = (markers + 'var ' + 'X' + museo.ID_ENTIDAD +\n 'info = new google.maps.InfoWindow({' + \"content:'<h1>\" +\n museo.NOMBRE + \"</h1>'});\" + 'var ' + 'X' + museo.\n ID_ENTIDAD + 'marker = new google.maps.Marker({' +\n 'position: {lat: ' + museo.LATITUD + ', lng: ' + museo.\n LONGITUD + ' },map: map});' + 'X' + museo.ID_ENTIDAD +\n \"marker.addListener('click', function() {\" + 'X' + museo.\n ID_ENTIDAD + 'info.open(map,' + 'X' + museo.ID_ENTIDAD +\n 'marker);' + '});')\n style = ''\n if request.user.is_authenticated():\n login = 1\n try:\n color = Color.objects.get(usuario=request.user)\n color = color.color\n except Color.DoesNotExist:\n color = 'EEF4F8'\n try:\n letra = Letra.objects.get(usuario=request.user)\n letra = letra.letra\n except Letra.DoesNotExist:\n letra = '9'\n style = (\n \"body{font-family: 'Helvetica', sans-serif;color: #444444;font-size: \"\n + letra + 'pt;background-color: #' + color + ';}')\n else:\n login = 0\n distritos = Distrito.objects.all()\n districtList = ''\n for distrito in distritos:\n districtList = (districtList + \"<option value='\" + distrito.nombre +\n \"'>\" + distrito.nombre + '</option>')\n return HttpResponse(template.render(Context({'body': list, 'login':\n login, 'user': request.user, 'districtList': districtList,\n 'formato': style, 'markers': markers})))\n\n\n@csrf_exempt\ndef museumPage(request, museumID):\n template = get_template('museo.html')\n museum = Museo.objects.get(ID_ENTIDAD=museumID)\n if request.method == 'POST' and 'comentario' in request.POST:\n comment = Comentario(texto=request.POST['comentario'], museo=museum,\n usuario=request.user.username)\n comment.save()\n elif request.method == 'POST' and 'añadir' in request.POST:\n fav = Favorito(museo=museum, usuario=request.user)\n fav.save()\n elif request.method == 'POST' and 'quitar' in request.POST:\n Favorito.objects.filter(museo=museum, usuario=request.user).delete()\n elif request.method == 'POST' and 'mas' in request.POST:\n like = Like(museo=museum, usuario=request.user)\n like.save()\n elif request.method == 'POST' and 'menos' in request.POST:\n Like.objects.filter(museo=museum, usuario=request.user).delete()\n comments = museum.comentario_set.all()\n message = (\"<center><b><a class='titulos_museo'>\" + museum.NOMBRE +\n \"</a></b></center><div id='scroll'></br><center><b><a class='titulos_museo'>Descripción</a></b></center></br><center><a class='texto_museo'>\"\n + museum.DESCRIPCION_ENTIDAD +\n \"</a></center></br><center><b><a class='titulos_museo'>Horario</a></b></center></br><center><a class='texto_museo'>\"\n + museum.HORARIO +\n \"</a></center></br><center><b><a class='titulos_museo'>Accesibilidad</a></b></center></br><center><a class='texto_museo'>\"\n + museum.ACCESIBILIDAD +\n \"</a></center></br><center><b><a class='titulos_museo'>Dirección</a></b></center></br><center><a class='texto_museo'>\"\n + museum.CLASE_VIAL + ' ' + museum.NOMBRE_VIA + ', Nº ' + museum.\n NUM + ', ' + museum.LOCALIDAD +\n \"</a><center></br><center><a class='texto_museo'>Barrio: \" + museum\n .BARRIO +\n \"</a></center></br><center><a class='texto_museo'>Distrito: \" + str\n (museum.DISTRITO) +\n \"</a></center></br><center><b><a class='titulos_museo'>Datos de contacto</a></b></center></br><center><a class='texto_museo'>Teléfono: \"\n + museum.TELEFONO +\n \"</a></center></br><center><a class='texto_museo'>Email: \" + museum\n .EMAIL +\n \"</a></center></br><center><b><a class='titulos_museo'>Comentarios</a></b></center></br>\"\n )\n allComments = ''\n for comment in comments:\n allComments = (allComments + \"<center><a class='texto_museo'><b>\" +\n 'Anónimo</b>: ' + comment.texto + ', ' + (datetime.timedelta(\n hours=2) + comment.fecha).strftime('%H:%M:%S %d-%m-%Y') +\n '</a></center></br>')\n message = message + allComments\n style = ''\n if request.user.is_authenticated():\n login = 1\n try:\n favorito = Favorito.objects.get(museo=museum, usuario=request.user)\n favoriteButton = (\"<center><form action='/museos/\" + museumID +\n \"/' method='post'><input type='hidden' name='quitar' value='fav'>\"\n +\n \"<input class='desplegable' type='submit' value='Quitar de favoritos'></form></center>\"\n )\n except Favorito.DoesNotExist:\n favoriteButton = (\"<center><form action='/museos/\" + museumID +\n \"/' method='post'><input type='hidden' name='añadir' value='fav'>\"\n +\n \"<input class='desplegable' type='submit' value='Añadir a favoritos'></form></center>\"\n )\n try:\n like = Like.objects.get(museo=museum, usuario=request.user)\n likeButton = (\"<center><form action='/museos/\" + museumID +\n \"/' method='post'><input type='hidden' name='menos' value='like'>\"\n +\n \"<input class='desplegable' type='submit' value='Dislike'></form></center>\"\n )\n except Like.DoesNotExist:\n likeButton = (\"<center><form action='/museos/\" + museumID +\n \"/' method='post'><input type='hidden' name='mas' value='like'>\"\n +\n \"<input class='desplegable' type='submit' value='Like'></form></center>\"\n )\n try:\n color = Color.objects.get(usuario=request.user)\n color = color.color\n except Color.DoesNotExist:\n color = 'EEF4F8'\n try:\n letra = Letra.objects.get(usuario=request.user)\n letra = letra.letra\n except Letra.DoesNotExist:\n letra = '9'\n style = (\n \"body{font-family: 'Helvetica', sans-serif;color: #444444;font-size: \"\n + letra + 'pt;background-color: #' + color + ';}')\n else:\n login = 0\n favoriteButton = ''\n likeButton = ''\n if (museum.LATITUD != 'No disponbile' and museum.LONGITUD !=\n 'No disponible'):\n marker = ('var ' + 'X' + museum.ID_ENTIDAD +\n 'info = new google.maps.InfoWindow({' + \"content:'<h1>\" +\n museum.NOMBRE + \"</h1>'});\" + 'var ' + 'X' + museum.ID_ENTIDAD +\n 'marker = new google.maps.Marker({' + 'position: {lat: ' +\n museum.LATITUD + ', lng: ' + museum.LONGITUD + ' },map: map});' +\n 'X' + museum.ID_ENTIDAD +\n \"marker.addListener('click', function() {\" + 'X' + museum.\n ID_ENTIDAD + 'info.open(map,' + 'X' + museum.ID_ENTIDAD +\n 'marker);' + '});')\n else:\n marker = ''\n return HttpResponse(template.render(Context({'body': message, 'login':\n login, 'user': request.user, 'id': museumID, 'fav': favoriteButton,\n 'like': likeButton, 'formato': style, 'marker': marker})))\n\n\n@csrf_exempt\ndef loginPage(request):\n if request.method == 'POST':\n if not request.user.is_authenticated() and 'login' in request.POST:\n username = request.POST['Usuario']\n password = request.POST['Contraseña']\n user = authenticate(username=username, password=password)\n if user is not None:\n login(request, user)\n elif not request.user.is_authenticated(\n ) and 'registro' in request.POST:\n username = request.POST['Usuario']\n password = request.POST['Contraseña']\n try:\n user = User.objects.get(username=username)\n user = authenticate(username=username, password=password)\n if user is not None:\n login(request, user)\n except User.DoesNotExist:\n user = User.objects.create_user(username=username, password\n =password)\n user.save()\n request.method = 'GET'\n return mainPage(request)\n\n\ndef logoutPage(request):\n logout(request)\n return mainPage(request)\n\n\ndef userPage(request, user, number):\n if number == None:\n number = 1\n template = get_template('personal.html')\n listTotal = ''\n favoritos = Favorito.objects.filter(usuario=user)\n group = range(5)\n count = 0\n markers = ''\n for favorito in favoritos:\n count = count + 1\n museum = Museo.objects.get(NOMBRE=favorito.museo)\n listTotal = (listTotal + \"<a class='titulos' href=\" + museum.\n CONTENT_URL + '>' + museum.NOMBRE + '</a><br><b>' + str(museum.\n comentario_set.count()) + ' Comentarios - ' + str(museum.\n like_set.count()) + ' Likes</b></br></br>')\n listTotal = (listTotal + \"<a class='direccion'>\" + museum.\n CLASE_VIAL + ' ' + museum.NOMBRE_VIA + ', Nº ' + museum.NUM +\n ', ' + museum.LOCALIDAD + '</a></br></br>')\n listTotal = (listTotal + \"<a class='info' href=\" + '/museos/' +\n museum.ID_ENTIDAD +\n '/>Más información</a> <b>Fecha de guardado:' + (datetime.\n timedelta(hours=2) + favorito.fecha).strftime(\n '%H:%M:%S %d-%m-%Y') + '</b></br></br></br>')\n if (museum.LATITUD != 'No disponible' and museum.LONGITUD !=\n 'No disponible'):\n markers = (markers + 'var ' + 'X' + museum.ID_ENTIDAD +\n 'info = new google.maps.InfoWindow({' + \"content:'<h1>\" +\n museum.NOMBRE + \"</h1>'});\" + 'var ' + 'X' + museum.\n ID_ENTIDAD + 'marker = new google.maps.Marker({' +\n 'position: {lat: ' + museum.LATITUD + ', lng: ' + museum.\n LONGITUD + ' },map: map});' + 'X' + museum.ID_ENTIDAD +\n \"marker.addListener('click', function() {\" + 'X' + museum.\n ID_ENTIDAD + 'info.open(map,' + 'X' + museum.ID_ENTIDAD +\n 'marker);' + '});')\n if count % 5 == 0:\n listTotal = listTotal + ';'\n group = listTotal.split(';')[int(number) - 1]\n list = ''\n if favoritos.count() % 5 == 0:\n pages = int(favoritos.count() / 5)\n else:\n pages = int(favoritos.count() / 5) + 1\n pagesRange = range(pages)\n if pages > 1:\n list = '<br>'\n if int(number) > 1:\n list = (list + \"<center><div class='pagination'><a href='/\" +\n user + '/' + str(int(number) - 1) + \"'>«</a>\")\n else:\n list = (list + \"<center><div class='pagination'><a href='/\" +\n user + '/' + str(number) + \"'>«</a>\")\n for page in pagesRange:\n if page == int(number) - 1:\n list = list + \"<a class='active' href='/\" + user + '/' + str(\n page + 1) + \"'>\" + str(page + 1) + '</a>'\n else:\n list = list + \"<a href='/\" + user + '/' + str(page + 1\n ) + \"'>\" + str(page + 1) + '</a>'\n if int(number) == pages:\n list = list + \"<a href='/\" + user + '/' + str(number\n ) + \"'>»</a></div></center></br>\"\n else:\n list = list + \"<a href='/\" + user + '/' + str(int(number) + 1\n ) + \"'>»</a></div></center></br>\"\n list = list + \"<div id='scroll'><center>\"\n for item in group:\n list = list + item\n if (list == '' or list == \"<div id='scroll'><center>\"\n ) and user != 'AnonymousUser':\n list = (\"<center><a class='titulos'>\" +\n 'Para que aparezcan museos en esta página, ' + user +\n ' tiene que añadirlos.' + '</a></center></br></br>')\n elif (list == '' or list == \"<div id='scroll'><center>\"\n ) and user == 'AnonymousUser':\n list = (\"<center><a class='titulos'>\" +\n 'Para ver tu página personal, primero tienes que loguearte.' +\n '</a></center></br></br>')\n else:\n list = (list + \"<center><a class='info' href='/\" + user +\n \"/xml'>XML del usuario</a></center>\")\n list = list + '</center></div>'\n users = User.objects.all()\n userList = ''\n for user in users:\n try:\n title = Titulo.objects.get(usuario=user.username)\n userList = (userList + \"<li><a href='/\" + user.username + \"'>\" +\n title.titulo + ' - ' + user.username + '</a></li></br>')\n except Titulo.DoesNotExist:\n userList = (userList + \"<li><a href='/\" + user.username +\n \"'>Página de \" + user.username + '</a></li></br>')\n style = ''\n if request.user.is_authenticated():\n login = 1\n try:\n color = Color.objects.get(usuario=request.user)\n color = color.color\n except Color.DoesNotExist:\n color = 'EEF4F8'\n try:\n letra = Letra.objects.get(usuario=request.user)\n letra = letra.letra\n except Letra.DoesNotExist:\n letra = '9'\n style = (\n \"body{font-family: 'Helvetica', sans-serif;color: #444444;font-size: \"\n + letra + 'pt;background-color: #' + color + ';}')\n else:\n login = 0\n return HttpResponse(template.render(Context({'body': list, 'login':\n login, 'user': request.user, 'userList': userList, 'formato': style,\n 'markers': markers})))\n\n\ndef userXMLPage(request, user):\n template = get_template('personalXML.xml')\n favoriteList = []\n favoriteMuseums = Favorito.objects.filter(usuario=user)\n for favorite in favoriteMuseums:\n favoriteList = favoriteList + [favorite.museo]\n return HttpResponse(template.render(Context({'favoriteList':\n favoriteList, 'user': user})), content_type='text/xml')\n\n\ndef XMLPage(request):\n template = get_template('personalXML.xml')\n user = ''\n topList = []\n topMuseums = getRanking()\n topFive = range(5)\n for item in topFive:\n if topMuseums[item][1] != 0:\n museum = Museo.objects.get(ID_ENTIDAD=topMuseums[item][0])\n topList = topList + [museum]\n return HttpResponse(template.render(Context({'favoriteList': topList,\n 'user': user})), content_type='text/xml')\n\n\ndef XMLAccesiblePage(request):\n template = get_template('personalXML.xml')\n user = ''\n topList = []\n topMuseums = getAccessibleRanking()\n topFive = range(5)\n for item in topFive:\n if topMuseums[item][1] != 0:\n museum = Museo.objects.get(ID_ENTIDAD=topMuseums[item][0])\n topList = topList + [museum]\n return HttpResponse(template.render(Context({'favoriteList': topList,\n 'user': user})), content_type='text/xml')\n\n\n@csrf_exempt\ndef preferencesPage(request, user):\n template = get_template('preferencias.html')\n if request.method == 'POST':\n if 'color' in request.POST:\n try:\n color = Color.objects.get(usuario=user)\n color.color = request.POST['color']\n except Color.DoesNotExist:\n color = Color(usuario=user, color=request.POST['color'])\n color.save()\n elif 'tamaño' in request.POST:\n try:\n size = Letra.objects.get(usuario=user)\n size.letra = request.POST['tamaño']\n except Letra.DoesNotExist:\n size = Letra(usuario=user, letra=request.POST['tamaño'])\n size.save()\n elif 'título' in request.POST:\n try:\n title = Titulo.objects.get(usuario=user)\n title.titulo = request.POST['título']\n except Titulo.DoesNotExist:\n title = Titulo(usuario=user, titulo=request.POST['título'])\n title.save()\n style = ''\n if request.user.is_authenticated():\n login = 1\n try:\n color = Color.objects.get(usuario=request.user)\n color = color.color\n except Color.DoesNotExist:\n color = 'EEF4F8'\n try:\n letra = Letra.objects.get(usuario=request.user)\n letra = letra.letra\n except Letra.DoesNotExist:\n letra = '9'\n style = (\n \"body{font-family: 'Helvetica', sans-serif;color: #444444;font-size: \"\n + letra + 'pt;background-color: #' + color + ';}')\n else:\n login = 0\n return HttpResponse(template.render(Context({'login': login, 'user':\n user, 'formato': style})))\n\n\ndef aboutPage(request):\n template = get_template('about.html')\n style = ''\n if request.user.is_authenticated():\n login = 1\n try:\n color = Color.objects.get(usuario=request.user)\n color = color.color\n except Color.DoesNotExist:\n color = 'EEF4F8'\n try:\n letra = Letra.objects.get(usuario=request.user)\n letra = letra.letra\n except Letra.DoesNotExist:\n letra = '9'\n style = (\n \"body{font-family: 'Helvetica', sans-serif;color: #444444;font-size: \"\n + letra + 'pt;background-color: #' + color + ';}')\n else:\n login = 0\n return HttpResponse(template.render(Context({'login': login, 'user':\n request.user, 'formato': style})))\n\n\ndef updateDB(request):\n museos = parseXML('web/museos.xml')\n for museo in museos:\n try:\n distrito = Distrito.objects.get(nombre=museos[museo]['DISTRITO'])\n except Distrito.DoesNotExist:\n distrito = Distrito(nombre=museos[museo]['DISTRITO'])\n distrito.save()\n for museo in museos:\n try:\n A = museos[museo]['ID-ENTIDAD']\n except KeyError:\n A = 'No disponible'\n try:\n B = museos[museo]['NOMBRE']\n except KeyError:\n B = 'No disponible'\n try:\n C = museos[museo]['DESCRIPCION-ENTIDAD']\n except KeyError:\n C = 'No disponible'\n try:\n D = museos[museo]['HORARIO']\n except KeyError:\n D = 'No disponible'\n try:\n E = museos[museo]['TRANSPORTE']\n except KeyError:\n E = 'No disponible'\n try:\n F = museos[museo]['ACCESIBILIDAD']\n except KeyError:\n F = 'No disponible'\n try:\n G = museos[museo]['CONTENT-URL']\n except KeyError:\n G = 'No disponible'\n try:\n H = museos[museo]['NOMBRE-VIA']\n except KeyError:\n H = 'No disponible'\n try:\n I = museos[museo]['CLASE-VIAL']\n except KeyError:\n I = 'No disponible'\n try:\n J = museos[museo]['TIPO-NUM']\n except KeyError:\n J = 'No disponible'\n try:\n K = museos[museo]['NUM']\n except KeyError:\n K = 'No disponible'\n try:\n L = museos[museo]['LOCALIDAD']\n except KeyError:\n L = 'No disponible'\n try:\n M = museos[museo]['PROVINCIA']\n except KeyError:\n M = 'No disponible'\n try:\n N = museos[museo]['CODIGO-POSTAL']\n except KeyError:\n N = 'No disponible'\n try:\n Ñ = museos[museo]['BARRIO']\n except KeyError:\n Ñ = 'No disponible'\n try:\n O = Distrito.objects.get(nombre=museos[museo]['DISTRITO'])\n except KeyError:\n O = 'No disponible'\n try:\n P = museos[museo]['COORDENADA-X']\n except KeyError:\n P = 'No disponible'\n try:\n Q = museos[museo]['COORDENADA-Y']\n except KeyError:\n Q = 'No disponible'\n try:\n R = museos[museo]['LATITUD']\n except KeyError:\n R = 'No disponible'\n try:\n S = museos[museo]['LONGITUD']\n except KeyError:\n S = 'No disponible'\n try:\n T = museos[museo]['TELEFONO']\n except KeyError:\n T = 'No disponible'\n try:\n U = museos[museo]['FAX']\n except KeyError:\n U = 'No disponible'\n try:\n V = museos[museo]['EMAIL']\n except KeyError:\n V = 'No disponible'\n try:\n W = museos[museo]['TIPO']\n except KeyError:\n W = 'No disponible'\n try:\n viejoMuseo = Museo.objects.get(ID_ENTIDAD=A)\n except Museo.DoesNotExist:\n nuevoMuseo = Museo(ID_ENTIDAD=A, NOMBRE=B, DESCRIPCION_ENTIDAD=\n C, HORARIO=D, TRANSPORTE=E, ACCESIBILIDAD=F, CONTENT_URL=G,\n NOMBRE_VIA=H, CLASE_VIAL=I, TIPO_NUM=J, NUM=K, LOCALIDAD=L,\n PROVINCIA=M, CODIGO_POSTAL=N, BARRIO=Ñ, DISTRITO=O,\n COORDENADA_X=P, COORDENADA_Y=Q, LATITUD=R, LONGITUD=S,\n TELEFONO=T, FAX=U, EMAIL=V, TIPO=W)\n nuevoMuseo.save()\n return mainPage(request)\n",
"step-5": "from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.contrib.auth.models import User\nfrom .models import Museo, Distrito, Comentario, Favorito, Like, Titulo, Letra, Color\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.contrib.auth import authenticate, login\nfrom django.contrib.auth import logout\nfrom web.parser import parseXML\nimport operator\nfrom django.template.loader import get_template\nfrom django.template import Context\nimport datetime\n\ndef getMuseums():\n museos = Museo.objects.all()\n allMuseums = {}\n for museo in museos:\n allMuseums[museo.ID_ENTIDAD] = museo.comentario_set.count()\n return allMuseums\n\ndef getAccessibleMuseums():\n museos = Museo.objects.all()\n allMuseums = {}\n for museo in museos:\n if museo.ACCESIBILIDAD == '1':\n allMuseums[museo.ID_ENTIDAD] = museo.comentario_set.count()\n return allMuseums\n\ndef getRanking():\n allMuseums = getMuseums()\n ranking = sorted(allMuseums.items(), key = operator.itemgetter(1))\n ranking.reverse()\n return ranking\n\ndef getAccessibleRanking():\n allMuseums = getAccessibleMuseums()\n ranking = sorted(allMuseums.items(), key = operator.itemgetter(1))\n ranking.reverse()\n return ranking\n\n@csrf_exempt\ndef mainPage(request):\n template = get_template('index.html')\n topFive = range(5)\n list = '<br>'\n markers = ''\n if request.method == 'GET' or (request.method == 'POST' and request.POST['accion'] == 'mostrar'):\n ranking = getRanking()\n list = (list + \"<center><form action='/' method='post'><input type='hidden' name='accion' value='ocultar'>\" +\n \"<input class='desplegable' type='submit' value='Mostrar museos accesibles'></form></center><div id='scroll'>\")\n if len(ranking) > 0:\n for item in topFive:\n if ranking[item][1] != 0:\n museum = Museo.objects.get(ID_ENTIDAD = ranking[item][0])\n list = list + \"<center><a class='titulos' href=\" + museum.CONTENT_URL + '>' + museum.NOMBRE + '</a><br><b>' + str(museum.comentario_set.count()) + ' Comentarios - ' + str(museum.like_set.count()) + ' Likes</b></br></br>'\n list = list + \"<a class='direccion'>\" + museum.CLASE_VIAL + ' ' + museum.NOMBRE_VIA + ', Nº ' + museum.NUM + ', ' + museum.LOCALIDAD + '</a></br></br>'\n list = list + \"<a class='info' href=\" + \"/museos/\" + museum.ID_ENTIDAD + '/>Más información</a></center></br></br>'\n if museum.LATITUD != 'No disponible' and museum.LONGITUD != 'No disponible':\n markers = (markers +\n \"var \" + \"X\" + museum.ID_ENTIDAD + \"info = new google.maps.InfoWindow({\" +\n \"content:'<h1>\" + museum.NOMBRE + \"</h1>'});\" +\n \"var \" + \"X\" + museum.ID_ENTIDAD + \"marker = new google.maps.Marker({\" +\n \"position: {lat: \" + museum.LATITUD + \", lng: \" + museum.LONGITUD + \" },map: map});\" +\n \"X\" + museum.ID_ENTIDAD + \"marker.addListener('click', function() {\" +\n \"X\" + museum.ID_ENTIDAD + \"info.open(map,\" + \"X\" + museum.ID_ENTIDAD + \"marker);\" +\n \"});\")\n if ranking[0][1] == 0:\n list = list + \"<a class='titulos'><center>\" + 'No hay museos con comentarios, ¡sé el primero en comentar!' + '</center></a></br></br></div>'\n else:\n list = list + '</div>'\n list = list + \"<center><a class='info' href='/xml'>XML de la página</a></center>\"\n else:\n list = list + \"<a class='titulos'><center>\" + 'No hay museos con comentarios, ¡sé el primero en comentar!' + '</center></a></br></br></div>'\n elif request.method == 'POST' and request.POST['accion'] == 'ocultar':\n ranking = getAccessibleRanking()\n list = (list + \"<center><form action='/' method='post'><input type='hidden' name='accion' value='mostrar'>\" +\n \"<input class='desplegable' type='submit' value='Mostrar todos los museos'></form></center><div id='scroll'>\")\n if len(ranking) > 0:\n for item in topFive:\n if ranking[item][1] != 0:\n museum = Museo.objects.get(ID_ENTIDAD = ranking[item][0])\n list = list + \"<center><a class='titulos' href=\" + museum.CONTENT_URL + '>' + museum.NOMBRE + '</a><br><b>' + str(museum.comentario_set.count()) + ' Comentarios - ' + str(museum.like_set.count()) + ' Likes</b></br></br>'\n list = list + \"<a class='direccion'>\" + museum.CLASE_VIAL + ' ' + museum.NOMBRE_VIA + ', Nº ' + museum.NUM + ', ' + museum.LOCALIDAD + '</a></br></br>'\n list = list + \"<a class='info' href=\" + \"/museos/\" + museum.ID_ENTIDAD + '/>Más información</a></center></br></br>'\n if museum.LATITUD != 'No disponbile' and museum.LONGITUD != 'No disponible':\n markers = (markers +\n \"var \" + \"X\" + museum.ID_ENTIDAD + \"info = new google.maps.InfoWindow({\" +\n \"content:'<h1>\" + museum.NOMBRE + \"</h1>'});\" +\n \"var \" + \"X\" + museum.ID_ENTIDAD + \"marker = new google.maps.Marker({\" +\n \"position: {lat: \" + museum.LATITUD + \", lng: \" + museum.LONGITUD + \" },map: map});\" +\n \"X\" + museum.ID_ENTIDAD + \"marker.addListener('click', function() {\" +\n \"X\" + museum.ID_ENTIDAD + \"info.open(map,\" + \"X\" + museum.ID_ENTIDAD + \"marker);\" +\n \"});\")\n if ranking[0][1] == 0:\n list = list + \"<a class='titulos'><center>\" + 'No hay museos accesibles con comentarios, ¡sé el primero en comentar!' + '</center></a></br></br></div>'\n else:\n list = list + '</div>'\n list = list + \"<center><a class='info' href='/xml'>XML de la página</a></center>\"\n else:\n list = list + \"<a class='titulos'><center>\" + 'No hay museos accesibles con comentarios, ¡sé el primero en comentar!' + '</center></a></br></br></div>'\n style = ''\n if request.user.is_authenticated():\n login = 1\n try:\n color = Color.objects.get(usuario = request.user)\n color = color.color\n except Color.DoesNotExist:\n color = 'EEF4F8'\n try:\n letra = Letra.objects.get(usuario = request.user)\n letra = letra.letra\n except Letra.DoesNotExist:\n letra = '9'\n style = (\"body{font-family: 'Helvetica', sans-serif;\"\n \"color: #444444;\"\n \"font-size: \" + letra + \"pt;\"\n \"background-color: #\" + color + \";}\")\n else:\n login = 0\n users = User.objects.all()\n userList = ''\n for user in users:\n try:\n title = Titulo.objects.get(usuario = user.username)\n userList = userList + \"<li><a href='/\" + user.username + \"'>\" + title.titulo + ' - ' + user.username + \"</a></li></br>\"\n except Titulo.DoesNotExist:\n userList = userList + \"<li><a href='/\" + user.username + \"'>Página de \" + user.username + \"</a></li></br>\"\n return HttpResponse(template.render(Context({'body': list, 'login': login, 'user': request.user, 'userList': userList, 'formato': style, 'markers': markers})))\n\n@csrf_exempt\ndef museumsPage(request):\n template = get_template('museos.html')\n if request.method == 'GET':\n museos = Museo.objects.all()\n elif request.method == 'POST':\n distrito = Distrito.objects.get(nombre = request.POST['distrito'])\n museos = distrito.museo_set.all()\n list = ''\n markers = ''\n i = 1\n for museo in museos:\n list = list + \"<center><a class='titulos'>\" + museo.NOMBRE + '</a></br>'\n list = list + \"<a class='info' href=\" + \"/museos/\" + museo.ID_ENTIDAD + '/>Más información</a></center></br></br>'\n if museo.LATITUD != 'No disponible' and museo.LONGITUD != 'No disponible':\n markers = (markers +\n \"var \" + \"X\" + museo.ID_ENTIDAD + \"info = new google.maps.InfoWindow({\" +\n \"content:'<h1>\" + museo.NOMBRE + \"</h1>'});\" +\n \"var \" + \"X\" + museo.ID_ENTIDAD + \"marker = new google.maps.Marker({\" +\n \"position: {lat: \" + museo.LATITUD + \", lng: \" + museo.LONGITUD + \" },map: map});\" +\n \"X\" + museo.ID_ENTIDAD + \"marker.addListener('click', function() {\" +\n \"X\" + museo.ID_ENTIDAD + \"info.open(map,\" + \"X\" + museo.ID_ENTIDAD + \"marker);\" +\n \"});\")\n style = ''\n if request.user.is_authenticated():\n login = 1\n try:\n color = Color.objects.get(usuario = request.user)\n color = color.color\n except Color.DoesNotExist:\n color = 'EEF4F8'\n try:\n letra = Letra.objects.get(usuario = request.user)\n letra = letra.letra\n except Letra.DoesNotExist:\n letra = '9'\n style = (\"body{font-family: 'Helvetica', sans-serif;\"\n \"color: #444444;\"\n \"font-size: \" + letra + \"pt;\"\n \"background-color: #\" + color + \";}\")\n else:\n login = 0\n distritos = Distrito.objects.all()\n districtList = ''\n for distrito in distritos:\n districtList = districtList + \"<option value='\" + distrito.nombre + \"'>\" + distrito.nombre + \"</option>\"\n return HttpResponse(template.render(Context({'body': list, 'login': login, 'user': request.user, 'districtList': districtList, 'formato': style, 'markers': markers})))\n\n@csrf_exempt\ndef museumPage(request, museumID):\n template = get_template('museo.html')\n museum = Museo.objects.get(ID_ENTIDAD = museumID)\n if request.method == 'POST' and 'comentario' in request.POST:\n comment = Comentario(texto = request.POST['comentario'], museo = museum, usuario = request.user.username)\n comment.save()\n elif request.method == 'POST' and 'añadir' in request.POST:\n fav = Favorito(museo = museum, usuario = request.user)\n fav.save()\n elif request.method == 'POST' and 'quitar' in request.POST:\n Favorito.objects.filter(museo = museum, usuario = request.user).delete()\n elif request.method == 'POST' and 'mas' in request.POST:\n like = Like(museo = museum, usuario = request.user)\n like.save()\n elif request.method == 'POST' and 'menos' in request.POST:\n Like.objects.filter(museo = museum, usuario = request.user).delete()\n comments = museum.comentario_set.all()\n message = (\"<center><b><a class='titulos_museo'>\" + museum.NOMBRE + \"</a></b></center><div id='scroll'></br>\"\n \"<center><b><a class='titulos_museo'>Descripción</a></b></center></br>\"\n \"<center><a class='texto_museo'>\" + museum.DESCRIPCION_ENTIDAD + '</a></center></br>'\n \"<center><b><a class='titulos_museo'>Horario</a></b></center></br>\"\n \"<center><a class='texto_museo'>\" + museum.HORARIO + '</a></center></br>'\n \"<center><b><a class='titulos_museo'>Accesibilidad</a></b></center></br>\"\n \"<center><a class='texto_museo'>\" + museum.ACCESIBILIDAD + '</a></center></br>'\n \"<center><b><a class='titulos_museo'>Dirección</a></b></center></br>\"\n \"<center><a class='texto_museo'>\" + museum.CLASE_VIAL + ' ' + museum.NOMBRE_VIA + ', Nº ' + museum.NUM + ', ' + museum.LOCALIDAD + '</a><center></br>'\n \"<center><a class='texto_museo'>Barrio: \" + museum.BARRIO + '</a></center></br>'\n \"<center><a class='texto_museo'>Distrito: \" + str(museum.DISTRITO) + '</a></center></br>'\n \"<center><b><a class='titulos_museo'>Datos de contacto</a></b></center></br>\"\n \"<center><a class='texto_museo'>Teléfono: \" + museum.TELEFONO + '</a></center></br>'\n \"<center><a class='texto_museo'>Email: \" + museum.EMAIL + '</a></center></br>'\n \"<center><b><a class='titulos_museo'>Comentarios</a></b></center></br>\")\n allComments = ''\n for comment in comments:\n allComments = allComments + \"<center><a class='texto_museo'><b>\" + 'Anónimo</b>: ' + comment.texto + ', ' + (datetime.timedelta(hours=2) + comment.fecha).strftime(\"%H:%M:%S %d-%m-%Y\") + '</a></center></br>'\n message = message + allComments\n style = ''\n if request.user.is_authenticated():\n login = 1\n try:\n favorito = Favorito.objects.get(museo = museum, usuario = request.user)\n favoriteButton = (\"<center><form action='/museos/\" + museumID + \"/' method='post'><input type='hidden' name='quitar' value='fav'>\" +\n \"<input class='desplegable' type='submit' value='Quitar de favoritos'></form></center>\")\n except Favorito.DoesNotExist:\n favoriteButton = (\"<center><form action='/museos/\" + museumID + \"/' method='post'><input type='hidden' name='añadir' value='fav'>\" +\n \"<input class='desplegable' type='submit' value='Añadir a favoritos'></form></center>\")\n try:\n like = Like.objects.get(museo = museum, usuario = request.user)\n likeButton = (\"<center><form action='/museos/\" + museumID + \"/' method='post'><input type='hidden' name='menos' value='like'>\" +\n \"<input class='desplegable' type='submit' value='Dislike'></form></center>\")\n except Like.DoesNotExist:\n likeButton = (\"<center><form action='/museos/\" + museumID + \"/' method='post'><input type='hidden' name='mas' value='like'>\" +\n \"<input class='desplegable' type='submit' value='Like'></form></center>\")\n try:\n color = Color.objects.get(usuario = request.user)\n color = color.color\n except Color.DoesNotExist:\n color = 'EEF4F8'\n try:\n letra = Letra.objects.get(usuario = request.user)\n letra = letra.letra\n except Letra.DoesNotExist:\n letra = '9'\n style = (\"body{font-family: 'Helvetica', sans-serif;\"\n \"color: #444444;\"\n \"font-size: \" + letra + \"pt;\"\n \"background-color: #\" + color + \";}\")\n else:\n login = 0\n favoriteButton = ''\n likeButton = ''\n if museum.LATITUD != 'No disponbile' and museum.LONGITUD != 'No disponible':\n marker = (\"var \" + \"X\" + museum.ID_ENTIDAD + \"info = new google.maps.InfoWindow({\" +\n \"content:'<h1>\" + museum.NOMBRE + \"</h1>'});\" +\n \"var \" + \"X\" + museum.ID_ENTIDAD + \"marker = new google.maps.Marker({\" +\n \"position: {lat: \" + museum.LATITUD + \", lng: \" + museum.LONGITUD + \" },map: map});\" +\n \"X\" + museum.ID_ENTIDAD + \"marker.addListener('click', function() {\" +\n \"X\" + museum.ID_ENTIDAD + \"info.open(map,\" + \"X\" + museum.ID_ENTIDAD + \"marker);\" +\n \"});\")\n else:\n marker = ''\n return HttpResponse(template.render(Context({'body': message, 'login': login, 'user': request.user, 'id': museumID, 'fav': favoriteButton, 'like': likeButton, 'formato': style, 'marker': marker})))\n\n@csrf_exempt\ndef loginPage(request):\n if request.method == 'POST':\n if not request.user.is_authenticated() and 'login' in request.POST:\n username = request.POST['Usuario']\n password = request.POST['Contraseña']\n user = authenticate(username=username, password=password)\n if user is not None:\n login(request, user)\n elif not request.user.is_authenticated() and 'registro' in request.POST:\n username = request.POST['Usuario']\n password = request.POST['Contraseña']\n try:\n user = User.objects.get(username = username)\n user = authenticate(username = username, password = password)\n if user is not None:\n login(request, user)\n except User.DoesNotExist:\n user = User.objects.create_user(username = username, password = password)\n user.save()\n request.method = 'GET'\n return mainPage(request)\n\ndef logoutPage(request):\n logout(request)\n return mainPage(request)\n\ndef userPage(request, user, number):\n if number == None:\n number = 1\n template = get_template('personal.html')\n listTotal = ''\n favoritos = Favorito.objects.filter(usuario = user)\n group = range(5)\n count = 0;\n markers = ''\n for favorito in favoritos:\n count = count + 1;\n museum = Museo.objects.get(NOMBRE = favorito.museo)\n listTotal = listTotal + \"<a class='titulos' href=\" + museum.CONTENT_URL + '>' + museum.NOMBRE + '</a><br><b>' + str(museum.comentario_set.count()) + ' Comentarios - ' + str(museum.like_set.count()) + ' Likes</b></br></br>'\n listTotal = listTotal + \"<a class='direccion'>\" + museum.CLASE_VIAL + ' ' + museum.NOMBRE_VIA + ', Nº ' + museum.NUM + ', ' + museum.LOCALIDAD + '</a></br></br>'\n listTotal = listTotal + \"<a class='info' href=\" + \"/museos/\" + museum.ID_ENTIDAD + '/>Más información</a> <b>Fecha de guardado:' + (datetime.timedelta(hours=2) + favorito.fecha).strftime(\"%H:%M:%S %d-%m-%Y\") + '</b></br></br></br>'\n if museum.LATITUD != 'No disponible' and museum.LONGITUD != 'No disponible':\n markers = (markers +\n \"var \" + \"X\" + museum.ID_ENTIDAD + \"info = new google.maps.InfoWindow({\" +\n \"content:'<h1>\" + museum.NOMBRE + \"</h1>'});\" +\n \"var \" + \"X\" + museum.ID_ENTIDAD + \"marker = new google.maps.Marker({\" +\n \"position: {lat: \" + museum.LATITUD + \", lng: \" + museum.LONGITUD + \" },map: map});\" +\n \"X\" + museum.ID_ENTIDAD + \"marker.addListener('click', function() {\" +\n \"X\" + museum.ID_ENTIDAD + \"info.open(map,\" + \"X\" + museum.ID_ENTIDAD + \"marker);\" +\n \"});\")\n if (count % 5) == 0:\n listTotal = listTotal + ';'\n group = listTotal.split(';')[int(number) - 1]\n list = ''\n if (favoritos.count() % 5) == 0:\n pages = int(favoritos.count() / 5)\n else:\n pages = int(favoritos.count() / 5) + 1\n pagesRange = range(pages)\n if pages > 1:\n list = '<br>'\n if int(number) > 1:\n list = list + \"<center><div class='pagination'><a href='/\" + user + \"/\" + str(int(number) - 1) + \"'>«</a>\"\n else:\n list = list + \"<center><div class='pagination'><a href='/\" + user + \"/\" + str(number) + \"'>«</a>\"\n for page in pagesRange:\n if page == (int(number) - 1):\n list = list + \"<a class='active' href='/\" + user + \"/\" + str(page + 1) + \"'>\" + str(page + 1) + \"</a>\"\n else:\n list = list + \"<a href='/\" + user + \"/\" + str(page + 1) + \"'>\" + str(page + 1) + \"</a>\"\n if int(number) == pages:\n list = list + \"<a href='/\" + user + \"/\" + str(number) + \"'>»</a></div></center></br>\"\n else:\n list = list + \"<a href='/\" + user + \"/\" + str(int(number) + 1) + \"'>»</a></div></center></br>\"\n list = list + \"<div id='scroll'><center>\"\n for item in group:\n list = list + item\n if (list == '' or list == \"<div id='scroll'><center>\") and user != 'AnonymousUser':\n list = \"<center><a class='titulos'>\" + 'Para que aparezcan museos en esta página, ' + user + ' tiene que añadirlos.' + '</a></center></br></br>'\n elif (list == '' or list == \"<div id='scroll'><center>\") and user == 'AnonymousUser':\n list = \"<center><a class='titulos'>\" + 'Para ver tu página personal, primero tienes que loguearte.' + '</a></center></br></br>'\n else:\n list = list + \"<center><a class='info' href='/\" + user + \"/xml'>XML del usuario</a></center>\"\n list = list + '</center></div>'\n users = User.objects.all()\n userList = ''\n for user in users:\n try:\n title = Titulo.objects.get(usuario = user.username)\n userList = userList + \"<li><a href='/\" + user.username + \"'>\" + title.titulo + ' - ' + user.username + \"</a></li></br>\"\n except Titulo.DoesNotExist:\n userList = userList + \"<li><a href='/\" + user.username + \"'>Página de \" + user.username + \"</a></li></br>\"\n style = ''\n if request.user.is_authenticated():\n login = 1\n try:\n color = Color.objects.get(usuario = request.user)\n color = color.color\n except Color.DoesNotExist:\n color = 'EEF4F8'\n try:\n letra = Letra.objects.get(usuario = request.user)\n letra = letra.letra\n except Letra.DoesNotExist:\n letra = '9'\n style = (\"body{font-family: 'Helvetica', sans-serif;\"\n \"color: #444444;\"\n \"font-size: \" + letra + \"pt;\"\n \"background-color: #\" + color + \";}\")\n else:\n login = 0\n return HttpResponse(template.render(Context({'body': list, 'login': login, 'user': request.user, 'userList': userList, 'formato': style, 'markers': markers})))\n\ndef userXMLPage(request, user):\n template = get_template(\"personalXML.xml\")\n favoriteList = []\n favoriteMuseums = Favorito.objects.filter(usuario = user)\n for favorite in favoriteMuseums:\n favoriteList = favoriteList + [favorite.museo]\n return HttpResponse(template.render(Context({'favoriteList': favoriteList, 'user': user})), content_type = \"text/xml\")\n\ndef XMLPage(request):\n template = get_template(\"personalXML.xml\")\n user = ''\n topList = []\n topMuseums = getRanking()\n topFive = range(5)\n for item in topFive:\n if topMuseums[item][1] != 0:\n museum = Museo.objects.get(ID_ENTIDAD = topMuseums[item][0])\n topList = topList + [museum]\n return HttpResponse(template.render(Context({'favoriteList': topList, 'user': user})), content_type = \"text/xml\")\n\ndef XMLAccesiblePage(request):\n template = get_template(\"personalXML.xml\")\n user = ''\n topList = []\n topMuseums = getAccessibleRanking()\n topFive = range(5)\n for item in topFive:\n if topMuseums[item][1] != 0:\n museum = Museo.objects.get(ID_ENTIDAD = topMuseums[item][0])\n topList = topList + [museum]\n return HttpResponse(template.render(Context({'favoriteList': topList, 'user': user})), content_type = \"text/xml\")\n\n\n@csrf_exempt\ndef preferencesPage(request, user):\n template = get_template(\"preferencias.html\")\n if request.method == 'POST':\n if 'color' in request.POST:\n try:\n color = Color.objects.get(usuario = user)\n color.color = request.POST['color']\n except Color.DoesNotExist:\n color = Color(usuario = user, color = request.POST['color'])\n color.save()\n elif 'tamaño' in request.POST:\n try:\n size = Letra.objects.get(usuario = user)\n size.letra = request.POST['tamaño']\n except Letra.DoesNotExist:\n size = Letra(usuario = user, letra = request.POST['tamaño'])\n size.save()\n elif 'título' in request.POST:\n try:\n title = Titulo.objects.get(usuario = user)\n title.titulo = request.POST['título']\n except Titulo.DoesNotExist:\n title = Titulo(usuario = user, titulo = request.POST['título'])\n title.save()\n style = ''\n if request.user.is_authenticated():\n login = 1\n try:\n color = Color.objects.get(usuario = request.user)\n color = color.color\n except Color.DoesNotExist:\n color = 'EEF4F8'\n try:\n letra = Letra.objects.get(usuario = request.user)\n letra = letra.letra\n except Letra.DoesNotExist:\n letra = '9'\n style = (\"body{font-family: 'Helvetica', sans-serif;\"\n \"color: #444444;\"\n \"font-size: \" + letra + \"pt;\"\n \"background-color: #\" + color + \";}\")\n else:\n login = 0\n return HttpResponse(template.render(Context({'login': login, 'user': user, 'formato': style})))\n\ndef aboutPage(request):\n template = get_template('about.html')\n style = ''\n if request.user.is_authenticated():\n login = 1\n try:\n color = Color.objects.get(usuario = request.user)\n color = color.color\n except Color.DoesNotExist:\n color = 'EEF4F8'\n try:\n letra = Letra.objects.get(usuario = request.user)\n letra = letra.letra\n except Letra.DoesNotExist:\n letra = '9'\n style = (\"body{font-family: 'Helvetica', sans-serif;\"\n \"color: #444444;\"\n \"font-size: \" + letra + \"pt;\"\n \"background-color: #\" + color + \";}\")\n else:\n login = 0\n return HttpResponse(template.render(Context({'login': login, 'user': request.user, 'formato': style})))\n\ndef updateDB(request):\n #Museo.objects.all().delete()\n museos = parseXML('web/museos.xml')\n for museo in museos:\n try:\n distrito = Distrito.objects.get(nombre = museos[museo]['DISTRITO'])\n except Distrito.DoesNotExist:\n distrito = Distrito(nombre = museos[museo]['DISTRITO'])\n distrito.save()\n for museo in museos:\n try:\n A = museos[museo]['ID-ENTIDAD']\n except KeyError:\n A = 'No disponible'\n try:\n B = museos[museo]['NOMBRE']\n except KeyError:\n B = 'No disponible'\n try:\n C = museos[museo]['DESCRIPCION-ENTIDAD']\n except KeyError:\n C = 'No disponible'\n try:\n D = museos[museo]['HORARIO']\n except KeyError:\n D = 'No disponible'\n try:\n E = museos[museo]['TRANSPORTE']\n except KeyError:\n E = 'No disponible'\n try:\n F = museos[museo]['ACCESIBILIDAD']\n except KeyError:\n F = 'No disponible'\n try:\n G = museos[museo]['CONTENT-URL']\n except KeyError:\n G = 'No disponible'\n try:\n H = museos[museo]['NOMBRE-VIA']\n except KeyError:\n H = 'No disponible'\n try:\n I = museos[museo]['CLASE-VIAL']\n except KeyError:\n I = 'No disponible'\n try:\n J = museos[museo]['TIPO-NUM']\n except KeyError:\n J = 'No disponible'\n try:\n K = museos[museo]['NUM']\n except KeyError:\n K = 'No disponible'\n try:\n L = museos[museo]['LOCALIDAD']\n except KeyError:\n L = 'No disponible'\n try:\n M = museos[museo]['PROVINCIA']\n except KeyError:\n M = 'No disponible'\n try:\n N = museos[museo]['CODIGO-POSTAL']\n except KeyError:\n N = 'No disponible'\n try:\n Ñ = museos[museo]['BARRIO']\n except KeyError:\n Ñ = 'No disponible'\n try:\n O = Distrito.objects.get(nombre = museos[museo]['DISTRITO'])\n except KeyError:\n O = 'No disponible'\n try:\n P = museos[museo]['COORDENADA-X']\n except KeyError:\n P = 'No disponible'\n try:\n Q = museos[museo]['COORDENADA-Y']\n except KeyError:\n Q = 'No disponible'\n try:\n R = museos[museo]['LATITUD']\n except KeyError:\n R = 'No disponible'\n try:\n S = museos[museo]['LONGITUD']\n except KeyError:\n S = 'No disponible'\n try:\n T = museos[museo]['TELEFONO']\n except KeyError:\n T = 'No disponible'\n try:\n U = museos[museo]['FAX']\n except KeyError:\n U = 'No disponible'\n try:\n V = museos[museo]['EMAIL']\n except KeyError:\n V = 'No disponible'\n try:\n W = museos[museo]['TIPO']\n except KeyError:\n W = 'No disponible'\n try:\n viejoMuseo = Museo.objects.get(ID_ENTIDAD = A)\n except Museo.DoesNotExist:\n nuevoMuseo = Museo(\n ID_ENTIDAD = A,\n NOMBRE = B,\n DESCRIPCION_ENTIDAD = C,\n HORARIO = D,\n TRANSPORTE = E,\n ACCESIBILIDAD = F,\n CONTENT_URL = G,\n NOMBRE_VIA = H,\n CLASE_VIAL = I,\n TIPO_NUM = J,\n NUM = K,\n LOCALIDAD = L,\n PROVINCIA = M,\n CODIGO_POSTAL = N,\n BARRIO = Ñ,\n DISTRITO = O,\n COORDENADA_X = P,\n COORDENADA_Y = Q,\n LATITUD = R,\n LONGITUD = S,\n TELEFONO = T,\n FAX = U,\n EMAIL = V,\n TIPO = W)\n nuevoMuseo.save()\n return mainPage(request)\n",
"step-ids": [
10,
11,
14,
17,
18
]
}
|
[
10,
11,
14,
17,
18
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
driver.get('https://www.baidu.com')
driver.maximize_window()
<|reserved_special_token_0|>
ActionChains(driver).double_click(element).perform()
<|reserved_special_token_0|>
time.sleep(2)
driver.quit()
<|reserved_special_token_0|>
ActionChains(driver).context_click(element).perform()
time.sleep(2)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
driver = webdriver.Chrome()
driver.get('https://www.baidu.com')
driver.maximize_window()
element = driver.find_element_by_link_text(u'新闻')
ActionChains(driver).double_click(element).perform()
<|reserved_special_token_0|>
time.sleep(2)
driver.quit()
element = driver.find_element_by_link_text('地图')
ActionChains(driver).context_click(element).perform()
time.sleep(2)
<|reserved_special_token_1|>
from selenium import webdriver
from selenium.webdriver import ActionChains
driver = webdriver.Chrome()
driver.get('https://www.baidu.com')
driver.maximize_window()
element = driver.find_element_by_link_text(u'新闻')
ActionChains(driver).double_click(element).perform()
import time
time.sleep(2)
driver.quit()
element = driver.find_element_by_link_text('地图')
ActionChains(driver).context_click(element).perform()
time.sleep(2)
<|reserved_special_token_1|>
#coding=utf-8
from selenium import webdriver
from selenium.webdriver import ActionChains
# 常用鼠标操作
driver = webdriver.Chrome()
driver.get('https://www.baidu.com')
driver.maximize_window()
element = driver.find_element_by_link_text(u"新闻")
#˫ 双击 ‘新闻’ 这个超链接
ActionChains(driver).double_click(element).perform()
import time
time.sleep(2)
driver.quit()
# 右键 单击 ‘新闻’
element = driver.find_element_by_link_text('地图')
ActionChains(driver).context_click(element).perform()
time.sleep(2)
# driver.quit()
|
flexible
|
{
"blob_id": "e3f180d4309ade39ac42a895f7f73469fd20724f",
"index": 4538,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ndriver.get('https://www.baidu.com')\ndriver.maximize_window()\n<mask token>\nActionChains(driver).double_click(element).perform()\n<mask token>\ntime.sleep(2)\ndriver.quit()\n<mask token>\nActionChains(driver).context_click(element).perform()\ntime.sleep(2)\n",
"step-3": "<mask token>\ndriver = webdriver.Chrome()\ndriver.get('https://www.baidu.com')\ndriver.maximize_window()\nelement = driver.find_element_by_link_text(u'新闻')\nActionChains(driver).double_click(element).perform()\n<mask token>\ntime.sleep(2)\ndriver.quit()\nelement = driver.find_element_by_link_text('地图')\nActionChains(driver).context_click(element).perform()\ntime.sleep(2)\n",
"step-4": "from selenium import webdriver\nfrom selenium.webdriver import ActionChains\ndriver = webdriver.Chrome()\ndriver.get('https://www.baidu.com')\ndriver.maximize_window()\nelement = driver.find_element_by_link_text(u'新闻')\nActionChains(driver).double_click(element).perform()\nimport time\ntime.sleep(2)\ndriver.quit()\nelement = driver.find_element_by_link_text('地图')\nActionChains(driver).context_click(element).perform()\ntime.sleep(2)\n",
"step-5": "#coding=utf-8\nfrom selenium import webdriver\nfrom selenium.webdriver import ActionChains\n\n# 常用鼠标操作\ndriver = webdriver.Chrome()\ndriver.get('https://www.baidu.com')\ndriver.maximize_window()\nelement = driver.find_element_by_link_text(u\"新闻\")\n#˫ 双击 ‘新闻’ 这个超链接\nActionChains(driver).double_click(element).perform()\nimport time\ntime.sleep(2)\ndriver.quit()\n# 右键 单击 ‘新闻’\nelement = driver.find_element_by_link_text('地图')\nActionChains(driver).context_click(element).perform()\ntime.sleep(2)\n# driver.quit()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
process.load('Geometry.VeryForwardGeometry.geometryRPFromDD_2018_cfi')
<|reserved_special_token_0|>
process.load('CondCore.CondDB.CondDB_cfi')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
process = cms.Process('GeometryInfo')
process.MessageLogger = cms.Service('MessageLogger', cerr=cms.untracked.
PSet(enable=cms.untracked.bool(False)), cout=cms.untracked.PSet(enable=
cms.untracked.bool(True), threshold=cms.untracked.string('INFO')))
process.load('Geometry.VeryForwardGeometry.geometryRPFromDD_2018_cfi')
process.source = cms.Source('EmptyIOVSource', timetype=cms.string(
'runnumber'), firstValue=cms.uint64(1), lastValue=cms.uint64(1),
interval=cms.uint64(1))
process.maxEvents = cms.untracked.PSet(input=cms.untracked.int32(1))
process.load('CondCore.CondDB.CondDB_cfi')
process.CondDB.connect = 'sqlite_file:CTPPSRPAlignment.db'
process.PoolDBESSource = cms.ESSource('PoolDBESSource', process.CondDB,
DumpStat=cms.untracked.bool(True), toGet=cms.VPSet(cms.PSet(record=cms.
string('RPMisalignedAlignmentRecord'), tag=cms.string(
'CTPPSRPAlignment_misaligned'))))
process.ctppsGeometryInfo = cms.EDAnalyzer('CTPPSGeometryInfo',
geometryType=cms.untracked.string('misaligned'), printRPInfo=cms.
untracked.bool(True), printSensorInfo=cms.untracked.bool(True))
process.p = cms.Path(process.ctppsGeometryInfo)
<|reserved_special_token_1|>
import FWCore.ParameterSet.Config as cms
process = cms.Process('GeometryInfo')
process.MessageLogger = cms.Service('MessageLogger', cerr=cms.untracked.
PSet(enable=cms.untracked.bool(False)), cout=cms.untracked.PSet(enable=
cms.untracked.bool(True), threshold=cms.untracked.string('INFO')))
process.load('Geometry.VeryForwardGeometry.geometryRPFromDD_2018_cfi')
process.source = cms.Source('EmptyIOVSource', timetype=cms.string(
'runnumber'), firstValue=cms.uint64(1), lastValue=cms.uint64(1),
interval=cms.uint64(1))
process.maxEvents = cms.untracked.PSet(input=cms.untracked.int32(1))
process.load('CondCore.CondDB.CondDB_cfi')
process.CondDB.connect = 'sqlite_file:CTPPSRPAlignment.db'
process.PoolDBESSource = cms.ESSource('PoolDBESSource', process.CondDB,
DumpStat=cms.untracked.bool(True), toGet=cms.VPSet(cms.PSet(record=cms.
string('RPMisalignedAlignmentRecord'), tag=cms.string(
'CTPPSRPAlignment_misaligned'))))
process.ctppsGeometryInfo = cms.EDAnalyzer('CTPPSGeometryInfo',
geometryType=cms.untracked.string('misaligned'), printRPInfo=cms.
untracked.bool(True), printSensorInfo=cms.untracked.bool(True))
process.p = cms.Path(process.ctppsGeometryInfo)
<|reserved_special_token_1|>
import FWCore.ParameterSet.Config as cms
process = cms.Process("GeometryInfo")
# minimum of logs
process.MessageLogger = cms.Service("MessageLogger",
cerr = cms.untracked.PSet(
enable = cms.untracked.bool(False)
),
cout = cms.untracked.PSet(
enable = cms.untracked.bool(True),
threshold = cms.untracked.string('INFO')
)
)
# geometry
process.load("Geometry.VeryForwardGeometry.geometryRPFromDD_2018_cfi")
#process.load("Geometry.VeryForwardGeometry.geometryRPFromDD_2017_cfi")
# no events to process
process.source = cms.Source("EmptyIOVSource",
timetype = cms.string('runnumber'),
firstValue = cms.uint64(1),
lastValue = cms.uint64(1),
interval = cms.uint64(1)
)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1)
)
#Database output service
process.load("CondCore.CondDB.CondDB_cfi")
# input database (in this case local sqlite file)
process.CondDB.connect = 'sqlite_file:CTPPSRPAlignment.db'
process.PoolDBESSource = cms.ESSource("PoolDBESSource",
process.CondDB,
DumpStat=cms.untracked.bool(True),
toGet = cms.VPSet(
cms.PSet(
record = cms.string('RPMisalignedAlignmentRecord'),
tag = cms.string("CTPPSRPAlignment_misaligned")
)
)
)
process.ctppsGeometryInfo = cms.EDAnalyzer("CTPPSGeometryInfo",
geometryType = cms.untracked.string("misaligned"),
printRPInfo = cms.untracked.bool(True),
printSensorInfo = cms.untracked.bool(True)
)
process.p = cms.Path(
process.ctppsGeometryInfo
)
|
flexible
|
{
"blob_id": "ac0e301e58ea64465ccd4b2b9aa4ae69283d6d0c",
"index": 6052,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprocess.load('Geometry.VeryForwardGeometry.geometryRPFromDD_2018_cfi')\n<mask token>\nprocess.load('CondCore.CondDB.CondDB_cfi')\n<mask token>\n",
"step-3": "<mask token>\nprocess = cms.Process('GeometryInfo')\nprocess.MessageLogger = cms.Service('MessageLogger', cerr=cms.untracked.\n PSet(enable=cms.untracked.bool(False)), cout=cms.untracked.PSet(enable=\n cms.untracked.bool(True), threshold=cms.untracked.string('INFO')))\nprocess.load('Geometry.VeryForwardGeometry.geometryRPFromDD_2018_cfi')\nprocess.source = cms.Source('EmptyIOVSource', timetype=cms.string(\n 'runnumber'), firstValue=cms.uint64(1), lastValue=cms.uint64(1),\n interval=cms.uint64(1))\nprocess.maxEvents = cms.untracked.PSet(input=cms.untracked.int32(1))\nprocess.load('CondCore.CondDB.CondDB_cfi')\nprocess.CondDB.connect = 'sqlite_file:CTPPSRPAlignment.db'\nprocess.PoolDBESSource = cms.ESSource('PoolDBESSource', process.CondDB,\n DumpStat=cms.untracked.bool(True), toGet=cms.VPSet(cms.PSet(record=cms.\n string('RPMisalignedAlignmentRecord'), tag=cms.string(\n 'CTPPSRPAlignment_misaligned'))))\nprocess.ctppsGeometryInfo = cms.EDAnalyzer('CTPPSGeometryInfo',\n geometryType=cms.untracked.string('misaligned'), printRPInfo=cms.\n untracked.bool(True), printSensorInfo=cms.untracked.bool(True))\nprocess.p = cms.Path(process.ctppsGeometryInfo)\n",
"step-4": "import FWCore.ParameterSet.Config as cms\nprocess = cms.Process('GeometryInfo')\nprocess.MessageLogger = cms.Service('MessageLogger', cerr=cms.untracked.\n PSet(enable=cms.untracked.bool(False)), cout=cms.untracked.PSet(enable=\n cms.untracked.bool(True), threshold=cms.untracked.string('INFO')))\nprocess.load('Geometry.VeryForwardGeometry.geometryRPFromDD_2018_cfi')\nprocess.source = cms.Source('EmptyIOVSource', timetype=cms.string(\n 'runnumber'), firstValue=cms.uint64(1), lastValue=cms.uint64(1),\n interval=cms.uint64(1))\nprocess.maxEvents = cms.untracked.PSet(input=cms.untracked.int32(1))\nprocess.load('CondCore.CondDB.CondDB_cfi')\nprocess.CondDB.connect = 'sqlite_file:CTPPSRPAlignment.db'\nprocess.PoolDBESSource = cms.ESSource('PoolDBESSource', process.CondDB,\n DumpStat=cms.untracked.bool(True), toGet=cms.VPSet(cms.PSet(record=cms.\n string('RPMisalignedAlignmentRecord'), tag=cms.string(\n 'CTPPSRPAlignment_misaligned'))))\nprocess.ctppsGeometryInfo = cms.EDAnalyzer('CTPPSGeometryInfo',\n geometryType=cms.untracked.string('misaligned'), printRPInfo=cms.\n untracked.bool(True), printSensorInfo=cms.untracked.bool(True))\nprocess.p = cms.Path(process.ctppsGeometryInfo)\n",
"step-5": "import FWCore.ParameterSet.Config as cms\nprocess = cms.Process(\"GeometryInfo\")\n\n# minimum of logs\nprocess.MessageLogger = cms.Service(\"MessageLogger\",\n cerr = cms.untracked.PSet(\n enable = cms.untracked.bool(False)\n ),\n cout = cms.untracked.PSet(\n enable = cms.untracked.bool(True),\n threshold = cms.untracked.string('INFO')\n )\n)\n\n# geometry\nprocess.load(\"Geometry.VeryForwardGeometry.geometryRPFromDD_2018_cfi\")\n#process.load(\"Geometry.VeryForwardGeometry.geometryRPFromDD_2017_cfi\")\n\n# no events to process\nprocess.source = cms.Source(\"EmptyIOVSource\",\n timetype = cms.string('runnumber'),\n firstValue = cms.uint64(1),\n lastValue = cms.uint64(1),\n interval = cms.uint64(1)\n)\nprocess.maxEvents = cms.untracked.PSet(\n input = cms.untracked.int32(1)\n)\n\n#Database output service\nprocess.load(\"CondCore.CondDB.CondDB_cfi\")\n# input database (in this case local sqlite file)\nprocess.CondDB.connect = 'sqlite_file:CTPPSRPAlignment.db'\n\nprocess.PoolDBESSource = cms.ESSource(\"PoolDBESSource\",\n process.CondDB,\n DumpStat=cms.untracked.bool(True),\n toGet = cms.VPSet(\n cms.PSet(\n record = cms.string('RPMisalignedAlignmentRecord'),\n tag = cms.string(\"CTPPSRPAlignment_misaligned\")\n )\n )\n)\n\nprocess.ctppsGeometryInfo = cms.EDAnalyzer(\"CTPPSGeometryInfo\",\n geometryType = cms.untracked.string(\"misaligned\"),\n printRPInfo = cms.untracked.bool(True),\n printSensorInfo = cms.untracked.bool(True)\n)\n\nprocess.p = cms.Path(\n process.ctppsGeometryInfo\n)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from output.models.sun_data.ctype.content_type.content_type00401m.content_type00401m_xsd.content_type00401m import (
A1,
A,
)
__all__ = [
"A1",
"A",
]
|
normal
|
{
"blob_id": "846a42a997539a45576d3ecbe0bd290e00b55935",
"index": 3258,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n__all__ = ['A1', 'A']\n",
"step-3": "from output.models.sun_data.ctype.content_type.content_type00401m.content_type00401m_xsd.content_type00401m import A1, A\n__all__ = ['A1', 'A']\n",
"step-4": "from output.models.sun_data.ctype.content_type.content_type00401m.content_type00401m_xsd.content_type00401m import (\n A1,\n A,\n)\n\n__all__ = [\n \"A1\",\n \"A\",\n]\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.