blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
219b1c4a3c79fe2ac43fe7d4043507bcca9d6cbb | 9754b781d751c63dfa5358ad14c91cb82f450acd | /bentoml/server/open_api.py | eed555bd9322d91f10679562f954145b091c917b | [
"Apache-2.0"
] | permissive | iancoffey/BentoML | d91f6116797c65a5eb735b2fa8edc8257e5a2e20 | 13cbd0ab7ab8d3b51da251264cc3ae0aab8f2d72 | refs/heads/master | 2023-01-07T20:06:41.994863 | 2020-06-16T15:12:08 | 2020-06-16T15:12:08 | 272,819,054 | 0 | 0 | Apache-2.0 | 2020-06-16T21:49:26 | 2020-06-16T21:49:25 | null | UTF-8 | Python | false | false | 2,896 | py | # Copyright 2019 Atalaya Tech, Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
from bentoml import config
def get_open_api_spec_json(bento_service):
"""
The docs for all endpoints in Open API format.
"""
docs = OrderedDict(
openapi="3.0.0",
info=OrderedDict(
version=bento_service.version,
title=bento_service.name,
description="To get a client SDK, copy all content from <a "
"href=\"/docs.json\">docs</a> and paste into "
"<a href=\"https://editor.swagger.io\">editor.swagger.io</a> then click "
"the tab <strong>Generate Client</strong> and choose the language.",
),
tags=[{"name": "infra"}, {"name": "app"}],
)
paths = OrderedDict()
default_response = {"200": {"description": "success"}}
paths["/healthz"] = OrderedDict(
get=OrderedDict(
tags=["infra"],
description="Health check endpoint. Expecting an empty response with status"
" code 200 when the service is in health state",
responses=default_response,
)
)
if config("apiserver").getboolean("enable_metrics"):
paths["/metrics"] = OrderedDict(
get=OrderedDict(
tags=["infra"],
description="Prometheus metrics endpoint",
responses=default_response,
)
)
if config("apiserver").getboolean("enable_feedback"):
paths["/feedback"] = OrderedDict(
get=OrderedDict(
tags=["infra"],
description="Predictions feedback endpoint. Expecting feedback request "
"in JSON format and must contain a `request_id` field, which can be "
"obtained from any BentoService API response header",
responses=default_response,
)
)
paths["/feedback"]["post"] = paths["/feedback"]["get"]
for api in bento_service.get_service_apis():
path = "/{}".format(api.name)
paths[path] = OrderedDict(
post=OrderedDict(
tags=["app"],
description=api.doc,
requestBody=OrderedDict(required=True, content=api.request_schema),
responses=default_response,
)
)
docs["paths"] = paths
return docs
| [
"noreply@github.com"
] | iancoffey.noreply@github.com |
748c75b040df983ab508d2ae5d65dc6daf4fbe25 | 48269b11620cc2d39e466f70a906d0db86687bdd | /docs/exts/docs_build/fetch_inventories.py | e9da26442bd5458cdb9d8210a8d8979361e99052 | [
"Apache-2.0",
"BSD-3-Clause",
"Python-2.0",
"MIT"
] | permissive | spkjess/airflow | 505a2511e1fe99fdffa3455e912655cfdbcc8ace | 1efb17b7aa404013cd490ba3dad4f7d2a70d4cb2 | refs/heads/main | 2023-04-01T16:07:36.959821 | 2021-03-15T20:42:36 | 2021-03-15T20:42:36 | 359,291,601 | 1 | 0 | Apache-2.0 | 2021-04-19T01:18:45 | 2021-04-19T01:18:44 | null | UTF-8 | Python | false | false | 3,840 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import concurrent
import concurrent.futures
import datetime
import os
import shutil
import requests
from requests.adapters import DEFAULT_POOLSIZE
from docs.exts.docs_build.docs_builder import ( # pylint: disable=no-name-in-module
get_available_providers_packages,
)
from docs.exts.docs_build.third_party_inventories import ( # pylint: disable=no-name-in-module
THIRD_PARTY_INDEXES,
)
CURRENT_DIR = os.path.dirname(__file__)
ROOT_DIR = os.path.abspath(os.path.join(CURRENT_DIR, os.pardir, os.pardir, os.pardir))
DOCS_DIR = os.path.join(ROOT_DIR, 'docs')
CACHE_DIR = os.path.join(DOCS_DIR, '_inventory_cache')
EXPIRATION_DATE_PATH = os.path.join(DOCS_DIR, '_inventory_cache', "expiration-date")
S3_DOC_URL = "http://apache-airflow-docs.s3-website.eu-central-1.amazonaws.com"
S3_DOC_URL_VERSIONED = S3_DOC_URL + "/docs/{package_name}/latest/objects.inv"
S3_DOC_URL_NON_VERSIONED = S3_DOC_URL + "/docs/{package_name}/objects.inv"
def _fetch_file(session: requests.Session, url: str, path: str):
response = session.get(url, allow_redirects=True, stream=True)
if not response.ok:
print(f"Failed to fetch inventory: {url}")
return
os.makedirs(os.path.dirname(path), exist_ok=True)
with open(path, 'wb') as f:
response.raw.decode_content = True
shutil.copyfileobj(response.raw, f)
print(f"Fetched inventory: {url}")
def _is_outdated(path: str):
if not os.path.exists(path):
return True
delta = datetime.datetime.now() - datetime.datetime.fromtimestamp(os.path.getmtime(path))
return delta > datetime.timedelta(hours=12)
def fetch_inventories():
"""Fetch all inventories for Airflow documentation packages and store in cache."""
os.makedirs(os.path.dirname(CACHE_DIR), exist_ok=True)
to_download = []
for pkg_name in get_available_providers_packages():
to_download.append(
(
S3_DOC_URL_VERSIONED.format(package_name=pkg_name),
f'{CACHE_DIR}/{pkg_name}/objects.inv',
)
)
to_download.append(
(
S3_DOC_URL_VERSIONED.format(package_name='apache-airflow'),
f'{CACHE_DIR}/apache-airflow/objects.inv',
)
)
to_download.append(
(
S3_DOC_URL_NON_VERSIONED.format(package_name='apache-airflow-providers'),
f'{CACHE_DIR}/apache-airflow-providers/objects.inv',
)
)
to_download.extend(
(
f"{doc_url}/objects.inv",
f'{CACHE_DIR}/{pkg_name}/objects.inv',
)
for pkg_name, doc_url in THIRD_PARTY_INDEXES.items()
)
to_download = [(url, path) for url, path in to_download if _is_outdated(path)]
if not to_download:
print("Nothing to do")
return
print(f"To download {len(to_download)} inventorie(s)")
with requests.Session() as session, concurrent.futures.ThreadPoolExecutor(DEFAULT_POOLSIZE) as pool:
for url, path in to_download:
pool.submit(_fetch_file, session=session, url=url, path=path)
| [
"noreply@github.com"
] | spkjess.noreply@github.com |
ea52ceb8295baeb2c4c315b6c127e89d6e0b1584 | c4114eca345fa2c650c9069efc3648ac1c090d05 | /app/models.py | f5233bf6e3fe0c254793724d1079c793217b4629 | [
"MIT"
] | permissive | eruiz67/django-adminlte | b57007fdca75c4594881a1846cd856363389847d | ed8cbaa219bf3754d4413f1e629445c28b714f69 | refs/heads/master | 2023-06-26T01:42:04.148117 | 2021-07-25T22:32:14 | 2021-07-25T22:32:14 | 389,435,872 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,354 | py | #from typing_extensions import Required
from django.db import models
from django.contrib.auth.models import User
from django.conf import settings
from django.db.models.fields.related import ManyToManyField
from django.utils.translation import gettext_lazy as _
from django.utils.timezone import now
from django.core.validators import MinValueValidator, RegexValidator
from django.db.models import Q
class BaseModel(models.Model):
"""
Modelo abstracto que registra los cambios realizados a las tablas de la base de datos. Tiene relaciones con
:model:`auth.User`.
"""
create_date = models.DateTimeField('fecha creación', editable=False, auto_now_add=True)
create_uid = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.SET_NULL,
related_name='+', verbose_name=_('creado por'), editable=False, null=True, blank=True)
write_date = models.DateTimeField(verbose_name=_('última modificación'), null=True, blank=True,
editable=False, auto_now=True)
write_uid = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.SET_NULL,
null=True, blank=True, related_name='+',
verbose_name='modificado por', editable=False)
active = models.BooleanField(verbose_name=_('Activo?'), default=True)
class Meta:
abstract = True
verbose_name = _("Registro")
verbose_name_plural = _("Registros")
class PersonModel(BaseModel):
name = models.CharField(verbose_name=_("Nombre "), max_length=50, blank=False, null=False)
surname1 = models.CharField(verbose_name=_("Primer apellido"), max_length=50, blank=False, null=False)
surname2 = models.CharField(verbose_name=_("Segundo apellido"), max_length=50, blank=False, null=False)
identification = models.CharField(verbose_name=_("Carnet"), max_length=11, unique=True, blank=False, null=False,validators=[RegexValidator(regex=r"(^[\d]{11}$)", message=_("El carnet debe contener 11 dígitos"))])
birthday_date = models.DateField(verbose_name=_("Fecha de nacimiento"), blank=False, null=False)
identification_image_front = models.FileField(verbose_name=_("Foto carnet frontal"), upload_to="app_person_identification_image_front",
null=True, blank=True)
def limit_categories_choices():
#return {'type': 'worker'}
return (Q(type='worker')|Q(type='person'))
categories = models.ManyToManyField("CategoryModel",verbose_name=_("Categorías"),
limit_choices_to=limit_categories_choices, null=True, blank=True)
def __str__(self):
return "{} {} {}".format(self.name, self.surname1, self.surname2)
@property
def get_full_name(self):
return "{} {} {}".format(self.name, self.surname1, self.surname2)
class Meta:
db_table = 'app_person'
managed = True
verbose_name = _('Persona')
verbose_name_plural = _('Personas')
class StudentModel(PersonModel):
YEAR_CHOICES = (
('1', _('Primero')),
('2', _('Segundo')),
('3', _('Tercero')),
('4', _('Cuarto')),
('5', _('Quinto')),
('post', _('Posgrado')),
)
year = models.CharField(verbose_name=_("Año"), max_length=50, choices=YEAR_CHOICES)
class Meta:
db_table = 'app_student'
managed = True
verbose_name = _('Estudiante')
verbose_name_plural = _('Estudiantes')
class WorkerModel(PersonModel):
WORKER_TYPE_CHOICES = (
('fixed', _('Fijo')),
('contract', _('Contrato')),
)
type = models.CharField(verbose_name=_("Tipo trabajador"), max_length=50, choices=WORKER_TYPE_CHOICES )
area = models.ForeignKey("AreaModel", verbose_name=_("Area"),
related_name="workers", on_delete=models.CASCADE,
blank=True, null=True)
department = models.ForeignKey("DepartmentModel", verbose_name=_("Departamento"),
related_name="workers", on_delete=models.CASCADE,
blank=True, null=True)
is_social_service = models.BooleanField(_('Servicio social'), default=False)
class Meta:
db_table = 'app_worker'
managed = True
verbose_name = _('Trabajador')
verbose_name_plural = _('Trabajadores')
class AreaModel(BaseModel):
name = models.CharField(verbose_name=_('Nombre'),max_length=50, unique=True, blank=False, null=False)
description = models.TextField(verbose_name=_('Descripción'),max_length=500, blank=True, null=True)
def __str__(self):
return self.name
class Meta:
db_table = 'app_area'
managed = True
verbose_name = _('Area')
verbose_name_plural = _('Areas')
class DepartmentModel(BaseModel):
name = models.CharField(verbose_name=_('Nombre'),max_length=50, unique=True, blank=False, null=False)
description = models.TextField(verbose_name=_('Descripción'),max_length=500, blank=True, null=True)
area = models.ForeignKey("AreaModel", verbose_name=_("Área"),
related_name="departments", on_delete=models.CASCADE)
def __str__(self):
return "{} - {}".format(self.area.name,self.name)
class Meta:
db_table = 'app_department'
managed = True
verbose_name = _('Departamento')
verbose_name_plural = _('Departamentos')
class CategoryModel(BaseModel):
CATEGORY_TYPE_CHOICES = (
('person', _('Persona')),
('worker', _('Trabajador')),
('student', _('Estudiante')),
)
name = models.CharField(verbose_name=_('Nombre'),max_length=50, unique=True, blank=False, null=False)
description = models.TextField(verbose_name=_('Descripción'),max_length=500, blank=True, null=True)
type = models.CharField(verbose_name=_("Tipo Categoría"), max_length=50,
choices=CATEGORY_TYPE_CHOICES, blank=True, null=True )
def __str__(self):
return self.name
class Meta:
db_table = 'app_category'
managed = True
verbose_name = 'Categoría'
verbose_name_plural = 'Categorías' | [
"ernesto930526@gmail.com"
] | ernesto930526@gmail.com |
1a84703126717aa7ed485aec20504c8540c4f3fb | 1b41f589d7b594c805a02e480a073055683170d2 | /models/tridentnet/resnet_v2.py | 31d8889c12279c4e855a7647b8c13f4dbfcce7b2 | [
"LicenseRef-scancode-proprietary-license",
"Apache-2.0"
] | permissive | KaihuaTang/simpledet | d8278cba93a9e737c08ae824b312c63de84ae2a0 | 87b881ec0855445977aabe0e571c006c59a05a60 | refs/heads/master | 2020-05-18T17:26:01.855916 | 2019-05-03T12:40:26 | 2019-05-03T12:40:26 | 184,555,756 | 0 | 0 | Apache-2.0 | 2019-05-02T09:40:12 | 2019-05-02T09:40:11 | null | UTF-8 | Python | false | false | 12,495 | py | from __future__ import print_function
import mxnet as mx
import mxnext as X
from mxnext.backbone.resnet_v2 import Builder
bn_count = [10000]
class TridentResNetV2Builder(Builder):
def __init__(self):
super(TridentResNetV2Builder, self).__init__()
@staticmethod
def bn_shared(data, name, normalizer, branch_ids=None, share_weight=True):
if branch_ids is None:
branch_ids = range(len(data))
gamma = X.var(name + "_gamma")
beta = X.var(name + "_beta")
moving_mean = X.var(name + "_moving_mean")
moving_var = X.var(name + "_moving_var")
bn_layers = []
for i, data_i in zip(branch_ids, data):
if share_weight:
bn_i = normalizer(data=data_i, name=name + "_shared%d" % i,
gamma=gamma, beta=beta, moving_mean=moving_mean, moving_var=moving_var)
else:
bn_i = normalizer(data=data_i, name=name + "_branch%d" % i)
bn_layers.append(bn_i)
return bn_layers
@staticmethod
def conv_shared(data, name, kernel, num_filter, branch_ids=None, no_bias=True, share_weight=True,
pad=(0, 0), stride=(1, 1), dilate=(1, 1)):
if branch_ids is None:
branch_ids = range(len(data))
weight = X.var(name + '_weight')
if no_bias:
bias = None
else:
bias = X.var(name + '_bias')
conv_layers = []
for i in range(len(data)):
data_i = data[i]
stride_i = stride[i] if type(stride) is list else stride
dilate_i = dilate[i] if type(dilate) is list else dilate
pad_i = pad[i] if type(pad) is list else pad
branch_i = branch_ids[i]
if share_weight:
conv_i = X.conv(data=data_i, kernel=kernel, filter=num_filter, stride=stride_i, dilate=dilate_i, pad=pad_i,
name=name + '_shared%d' % branch_i, no_bias=no_bias, weight=weight, bias=bias)
else:
conv_i = X.conv(data=data_i, kernel=kernel, filter=num_filter, stride=stride_i, dilate=dilate_i, pad=pad_i,
name=name + '_branch%d' % branch_i, no_bias=no_bias)
conv_layers.append(conv_i)
return conv_layers
@staticmethod
def deform_conv_shared(data, name, conv_offset, kernel, num_filter, branch_ids=None, no_bias=True, share_weight=True,
num_deformable_group=4, pad=(0, 0), stride=(1, 1), dilate=(1, 1)):
if branch_ids is None:
branch_ids = range(len(data))
weight = X.var(name + '_weight')
if no_bias:
bias = None
else:
bias = X.var(name + '_bias')
conv_layers = []
for i in range(len(data)):
data_i = data[i]
stride_i = stride[i] if type(stride) is list else stride
dilate_i = dilate[i] if type(dilate) is list else dilate
pad_i = pad[i] if type(pad) is list else pad
conv_offset_i = conv_offset[i] if type(conv_offset) is list else conv_offset
branch_i = branch_ids[i]
if share_weight:
conv_i = mx.contrib.symbol.DeformableConvolution(
data=data_i, offset=conv_offset_i, kernel=kernel, num_filter=num_filter, stride=stride_i, num_deformable_group=4,
dilate=dilate_i, pad=pad_i, no_bias=no_bias, weight=weight, bias=bias, name=name + '_shared%d' % branch_i)
else:
conv_i = mx.contrib.symbol.DeformableConvolution(
data=data_i, offset=conv_offset_i, kernel=kernel, num_filter=num_filter, stride=stride_i, num_deformable_group=4,
dilate=dilate_i, pad=pad_i, no_bias=no_bias, name=name + '_branch%d' % branch_i)
conv_layers.append(conv_i)
return conv_layers
@staticmethod
def stack_branch_symbols(data_list):
data = mx.symbol.stack(*data_list, axis=1)
data = mx.symbol.Reshape(data, (-3, -2))
return data
@classmethod
def resnet_trident_unit(cls, data, name, filter, stride, dilate, proj, norm_type, norm_mom, ndev,
branch_ids, branch_bn_shared, branch_conv_shared, branch_deform=False):
"""
One resnet unit is comprised of 2 or 3 convolutions and a shortcut.
:param data:
:param name:
:param filter:
:param stride:
:param dilate:
:param proj:
:param norm_type:
:param norm_mom:
:param ndev:
:param branch_ids:
:param branch_bn_shared:
:param branch_conv_shared:
:param branch_deform:
:return:
"""
if branch_ids is None:
branch_ids = range(len(data))
norm = X.normalizer_factory(type=norm_type, ndev=ndev, mom=norm_mom)
bn1 = cls.bn_shared(
data, name=name + "_bn1", normalizer=norm, branch_ids=branch_ids, share_weight=branch_bn_shared)
relu1 = [X.relu(bn) for bn in bn1]
conv1 = cls.conv_shared(
relu1, name=name + "_conv1", num_filter=filter // 4, kernel=(1, 1),
branch_ids=branch_ids, share_weight=branch_conv_shared)
bn2 = cls.bn_shared(
conv1, name=name + "_bn2", normalizer=norm, branch_ids=branch_ids, share_weight=branch_bn_shared)
relu2 = [X.relu(bn) for bn in bn2]
if not branch_deform:
conv2 = cls.conv_shared(
relu2, name=name + "_conv2", num_filter=filter // 4, kernel=(3, 3),
pad=dilate, stride=stride, dilate=dilate,
branch_ids=branch_ids, share_weight=branch_conv_shared)
else:
conv2_offset = cls.conv_shared(
relu2, name=name + "_conv2_offset", num_filter=72, kernel=(3, 3),
pad=(1, 1), stride=(1, 1), dilate=(1, 1), no_bias=False,
branch_ids=branch_ids, share_weight=branch_conv_shared)
conv2 = cls.deform_conv_shared(
relu2, name=name + "_conv2", conv_offset=conv2_offset, num_filter=filter // 4, kernel=(3, 3),
pad=dilate, stride=stride, dilate=dilate, num_deformable_group=4,
branch_ids=branch_ids, share_weight=branch_conv_shared)
bn3 = cls.bn_shared(
conv2, name=name + "_bn3", normalizer=norm, branch_ids=branch_ids, share_weight=branch_bn_shared)
relu3 = [X.relu(bn) for bn in bn3]
conv3 = cls.conv_shared(
relu3, name=name + "_conv3", num_filter=filter, kernel=(1, 1),
branch_ids=branch_ids, share_weight=branch_conv_shared)
if proj:
shortcut = cls.conv_shared(
relu1, name=name + "_sc", num_filter=filter, kernel=(1, 1),
branch_ids=branch_ids, share_weight=branch_conv_shared)
else:
shortcut = data
return [X.add(conv3_i, shortcut_i, name=name + "_plus_branch{}".format(i)) \
for i, conv3_i, shortcut_i in zip(branch_ids, conv3, shortcut)]
@classmethod
def resnet_trident_stage(cls, data, name, num_block, filter, stride, dilate, norm_type, norm_mom, ndev,
num_branch, branch_ids, branch_bn_shared, branch_conv_shared, branch_deform):
"""
One resnet stage is comprised of multiple resnet units. Refer to depth config for more information.
:param data:
:param name:
:param num_block:
:param filter:
:param stride:
:param dilate:
:param norm_type:
:param norm_mom:
:param ndev:
:param num_branch:
:param branch_ids:
:param branch_bn_shared:
:param branch_conv_shared:
:return:
"""
assert isinstance(dilate, list) and len(dilate) == num_branch, 'dilate should be a list with num_branch items.'
d = [(d, d) for d in dilate]
data = cls.resnet_unit(data, "{}_unit1".format(name), filter, stride, 1, True, norm_type, norm_mom, ndev)
data = [data] * num_branch
for i in range(2, num_block + 1):
if branch_deform and i >= num_block - 2:
unit_deform = True
else:
unit_deform = False
data = cls.resnet_trident_unit(
data, "{}_unit{}".format(name, i), filter, (1, 1), d, False, norm_type, norm_mom, ndev,
branch_ids, branch_bn_shared, branch_conv_shared, branch_deform=unit_deform)
return data
@classmethod
def resnet_trident_c4(cls, data, num_block, stride, dilate, norm_type, norm_mom, ndev,
num_branch, branch_ids, branch_bn_shared, branch_conv_shared, branch_deform):
return cls.resnet_trident_stage(
data, "stage3", num_block, 1024, stride, dilate, norm_type, norm_mom, ndev,
num_branch, branch_ids, branch_bn_shared, branch_conv_shared, branch_deform)
@classmethod
def resnet_factory(cls, depth, use_3x3_conv0, use_bn_preprocess,
num_branch, branch_dilates, branch_ids, branch_bn_shared, branch_conv_shared, branch_deform,
norm_type="local", norm_mom=0.9, ndev=None, fp16=False):
num_c2_unit, num_c3_unit, num_c4_unit, num_c5_unit = TridentResNetV2Builder.depth_config[depth]
data = X.var("data")
if fp16:
data = X.to_fp16(data, "data_fp16")
c1 = cls.resnet_c1(data, use_3x3_conv0, use_bn_preprocess, norm_type, norm_mom, ndev)
c2 = cls.resnet_c2(c1, num_c2_unit, 1, 1, norm_type, norm_mom, ndev)
c3 = cls.resnet_c3(c2, num_c3_unit, 2, 1, norm_type, norm_mom, ndev)
c4 = cls.resnet_trident_c4(c3, num_c4_unit, 2, branch_dilates, norm_type, norm_mom, ndev,
num_branch, branch_ids, branch_bn_shared, branch_conv_shared, branch_deform)
# stack branch features and merge into batch dim
c4 = cls.stack_branch_symbols(c4)
c5 = cls.resnet_c5(c4, num_c5_unit, 1, 2, norm_type, norm_mom, ndev)
return c1, c2, c3, c4, c5
@classmethod
def resnet_c4_factory(cls, depth, use_3x3_conv0, use_bn_preprocess,
num_branch, branch_dilates, branch_ids, branch_bn_shared, branch_conv_shared, branch_deform,
norm_type="local", norm_mom=0.9, ndev=None, fp16=False):
c1, c2, c3, c4, c5 = cls.resnet_factory(depth, use_3x3_conv0, use_bn_preprocess,
num_branch, branch_dilates, branch_ids, branch_bn_shared, branch_conv_shared, branch_deform,
norm_type, norm_mom, ndev, fp16)
return c4
@classmethod
def resnet_c4c5_factory(cls, depth, use_3x3_conv0, use_bn_preprocess,
num_branch, branch_dilates, branch_ids, branch_bn_shared, branch_conv_shared, branch_deform,
norm_type="local", norm_mom=0.9, ndev=None, fp16=False):
c1, c2, c3, c4, c5 = cls.resnet_factory(depth, use_3x3_conv0, use_bn_preprocess,
num_branch, branch_dilates, branch_ids, branch_bn_shared, branch_conv_shared, branch_deform,
norm_type, norm_mom, ndev, fp16)
c5 = X.fixbn(c5, "bn1")
c5 = X.relu(c5)
return c4, c5
def get_backbone(self, variant, depth, endpoint, normalizer, fp16,
num_branch, branch_dilates, branch_ids, branch_bn_shared, branch_conv_shared, branch_deform):
# parse variant
if variant == "mxnet":
use_bn_preprocess = True
use_3x3_conv0 = False
elif variant == "tusimple":
use_bn_preprocess = False
use_3x3_conv0 = True
else:
raise KeyError("Unknown backbone variant {}".format(variant))
# parse endpoint
if endpoint == "c4":
factory = self.resnet_c4_factory
elif endpoint == "c4c5":
factory = self.resnet_c4c5_factory
else:
raise KeyError("Unknown backbone endpoint {}".format(endpoint))
return factory(depth, use_3x3_conv0, use_bn_preprocess,
num_branch, branch_dilates, branch_ids, branch_bn_shared, branch_conv_shared, branch_deform,
norm_type=normalizer, fp16=fp16)
| [
"chenyuntao08@gmail.com"
] | chenyuntao08@gmail.com |
bfbd99cadf44f02a58515815d840f07f151da9c5 | e62133933fab8fe15af62280e1a63aed571fb518 | /elementary/python/ej8.py | 270c9cc766c1c48178ca89d31001a93cc52cb0ce | [
"MIT"
] | permissive | keyduq/programming-practices | 95a37087cc563f98090818b42ca9bf18b4ee6941 | 56b99db86d74d441c4c88cd313844e4e39c86872 | refs/heads/master | 2021-01-17T20:53:16.409770 | 2016-08-14T05:32:00 | 2016-08-14T05:32:00 | 65,651,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 624 | py | print("""\
Write a program that prints all prime numbers.
(Note: if your programming language does not support arbitrary size numbers, printing all primes up to the largest number you can easily represent is fine too.)
Created By Keyvin Duque <thkeyduq@gmail.com>
""")
n = int(input('Ingrese la cantidad de numeros primos que desea generar: '))
actual = 1 #numero que se va recorriendo
contador = 0
while contador < n:
isPrime = True
for i in range(2, actual):
if actual % i == 0:
isPrime = False
break
if isPrime:
print(actual)
contador += 1
actual += 1
| [
"thkeyduq@gmail.com"
] | thkeyduq@gmail.com |
fbd3495eb1889511b26d78ebe8fb5d8f63fa8a5a | 5e11cbf593a9793359e1ca4f8e4a18af38e32738 | /backend/mobilech_dev_14269/wsgi.py | 15db35e2d85f5f7d4d97682b4b31b7671c76cc1e | [] | no_license | crowdbotics-apps/mobilech-dev-14269 | 6c76edca03e7d48c428d081afc055a9cba358d04 | 50752c0b633d7077ced903c9caccd2aec2f04520 | refs/heads/master | 2023-01-01T15:59:38.895266 | 2020-10-30T17:51:48 | 2020-10-30T17:51:48 | 308,691,856 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 413 | py | """
WSGI config for mobilech_dev_14269 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'mobilech_dev_14269.settings')
application = get_wsgi_application()
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
3abae9787cb41ba76f77728eeaea9b1d2c1e33dc | 70de6e336682acb00b8d8115590eaf1b21a70df6 | /python/evenorodd.py | d5ece6a1d97350f8b4d6e8a0abe2e1d729408585 | [] | no_license | deepthi93k/icta-calicut-fullstack | 1bd42fba0803ef4e173ad1278ac9f50da8bd708d | 8e5e5e4f69ff51ab49ab7ccc391f160661adac41 | refs/heads/master | 2020-04-05T03:04:23.194393 | 2018-11-13T11:14:17 | 2018-11-13T11:14:17 | 156,500,383 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 119 | py | num=int(input("enter a number"))
if(num%2==0):
print(num,"number is even")
else:
print(num,"number is odd") | [
"noreply@github.com"
] | deepthi93k.noreply@github.com |
da3ae45886b2907ae82b1cfa4a5950a21bdd6b21 | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_25090.py | f38847da08bed8417de659027272d8e0da3b17e8 | [] | no_license | AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 54 | py | # Django and postgres: Not connecting
postgresql.conf
| [
"ubuntu@ip-172-31-7-228.us-west-2.compute.internal"
] | ubuntu@ip-172-31-7-228.us-west-2.compute.internal |
51dd1aa02b840a5de1eae0dced48d9f62fdd6f11 | c0a30d0b75f1b09c52b57e696c3b9d1e30c9ebaa | /KPIactivityCus.py | 79bfa1e78e7a451bd9acbaa45d5b8553d5c18c7c | [] | no_license | mbmarcin/other | bfe37116ee415be34f4a52b77f3f492f86f4ae57 | 3db4f77b252d26186c3cb838342f4fa61aa5ed5d | refs/heads/master | 2020-11-27T22:25:22.441027 | 2019-12-22T20:35:06 | 2019-12-22T20:35:06 | 229,627,303 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,151 | py | # -*- coding: utf-8 -*-
"""
Created on Thu May 2 11:49:07 2019
@author: aspr323
"""
import pandas as pd
file = "tableInput.txt"
def getData(nameFile):
"""idCus dateSales"""
dataSet = pd.read_csv(nameFile, sep=';', low_memory=False, dtype={"idCus":'str'})
return dataSet
mainFrame = getData(file)
mainFrame['dateSales'] = pd.to_datetime(mainFrame.loc[:,'dateSales']) #date format for all dates
listCus = mainFrame['idCus'].drop_duplicates().tolist()
#listCus = ['10089018']
t1 = pd.DataFrame(columns=('idCus','status','dni','status2','avg','std'))
maxDate = mainFrame['dateSales'].max()
maxYear = mainFrame['dateSales'].dt.year.max()
maxYearMonth = mainFrame.loc[mainFrame.dateSales.dt.year == maxYear,'dateSales'].dt.month.max()
rowNum = 0
CusidList = list()
for i in listCus:
dta = mainFrame.loc[mainFrame.idCus == i,'dateSales'].sort_values(ascending=False) # data for user
t = mainFrame.loc[(mainFrame.idCus == i) & (mainFrame.dateSales.dt.year == maxYear),'dateSales'].sort_values(ascending=False).to_frame()
t['date_new'] = t.dateSales.shift(periods=1, freq=None, axis=0)
t = t.iloc[1:]
t['df_date'] = (t['date_new'] - t['dateSales']).dt.days.fillna(0).astype(int)
avg = round(t.df_date.mean(),3)
std = round(t.df_date.std(),3)
DaysFromMaxDays = (maxDate-dta.max()).days
DaysFromMaxDays_ = (maxDate-dta[dta.dt.month != maxYearMonth].max()).days
x = 0
if dta[(dta.dt.year == maxYear) & (dta.dt.month == maxYearMonth)].count() == 0:
for a in range(1,4):
if dta[dta.dt.month == (maxDate - pd.DateOffset(months=a)).month].count() > 0:
x += 1
else:
break
if x == 3:
t1.loc[rowNum] = [i,0,DaysFromMaxDays,'neglected',avg,std]
elif int(dta[(dta.dt.year == maxYear)& (dta.dt.month == maxYearMonth)].count()) >= 1 and int(dta[(dta.dt.year == maxYear) & (dta.dt.month != maxYearMonth)].count()) == 0:
t1.loc[rowNum] = [i,'newThisYear',DaysFromMaxDays,0,avg,std]
elif DaysFromMaxDays >= 180:
t1.loc[rowNum] = [i,'lost6M',DaysFromMaxDays,0,avg,std]
elif DaysFromMaxDays >= 90:
t1.loc[rowNum] = [i,'lost3M',DaysFromMaxDays,0,avg,std]
elif DaysFromMaxDays >= 60:
t1.loc[rowNum] = [i,'lost2M',DaysFromMaxDays,0,avg,std]
elif dta[(dta.dt.year == maxYear) & (dta.dt.month == maxYearMonth)].count() > 0 and DaysFromMaxDays_ >= 210:
t1.loc[rowNum] = [i,'backToLife6M',DaysFromMaxDays_,0,avg,std]
elif dta[(dta.dt.year == maxYear) & (dta.dt.month == maxYearMonth)].count() > 0 and DaysFromMaxDays_ >= 120:
t1.loc[rowNum] = [i,'backToLife3M',DaysFromMaxDays_,0,avg,std]
else:
t1.loc[rowNum] = [i,'normal',DaysFromMaxDays,0,avg,std]
rowNum += 1
CusidList.append(i)
if len(CusidList) % 10 == 0:
print("Qty_user: ",len(CusidList), "/",len(listCus))
else:
continue
t1.loc[t1.status != 'normal'].to_csv('KPIuser2.txt', sep=';', index=False, header=True)
print('koniec!')
| [
"noreply@github.com"
] | mbmarcin.noreply@github.com |
15801ee24f13903766a377d675ec340fd3a7fc8f | 706a2b167febb7b453aa540e7eaba07536fbe7ec | /venv/bin/pip | 1bebb9435e9b1aef0b9a064f0110e72744ab1a95 | [] | no_license | rmclean98/GroceryGoat | 2397b10f3752f5ee675cd3cc4f396d6481d747c4 | acb5c7c1143b4897466a887457d07fdc1cf0149a | refs/heads/main | 2023-07-07T09:31:46.959175 | 2021-08-03T19:29:04 | 2021-08-03T19:29:04 | 353,079,910 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 258 | #!/home/rmclean98/Documents/GroceryGoat/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal.cli.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"ryan@mcleandigital.com"
] | ryan@mcleandigital.com | |
14e95b30fef502d2d37a1c6b893748e10fd62be7 | c361a25acecd016677bbd0c6d9fc56de79cf03ed | /PTM/CassandraHost.py | ab3d48ce871b5a763384f888edaa60579991175f | [] | no_license | danielmellado/zephyr | f8931633045959e7e9a974de8b700a287a1ae94e | dc6f85b78b50e599504966154b927fe198d7402d | refs/heads/master | 2021-01-12T22:31:24.479814 | 2015-10-14T05:39:04 | 2015-10-14T06:24:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,469 | py | __author__ = 'micucci'
# Copyright 2015 Midokura SARL
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from NetNSHost import NetNSHost
from common.Exceptions import *
from common.FileLocation import *
from PhysicalTopologyConfig import *
from common.CLI import *
from common.IP import IP
from ConfigurationHandler import FileConfigurationHandler
class CassandraHost(NetNSHost):
def __init__(self, name, ptm):
super(CassandraHost, self).__init__(name, ptm)
self.cassandra_ips = []
self.num_id = '1'
self.init_token = ''
self.ip = IP()
self.configurator = CassandraFileConfiguration()
def do_extra_config_from_ptc_def(self, cfg, impl_cfg):
"""
Configure this host type from a PTC HostDef config and the
implementation-specific configuration
:type cfg: HostDef
:type impl_cfg: ImplementationDef
:return:
"""
if len(cfg.interfaces.values()) > 0 and len(cfg.interfaces.values()[0].ip_addresses) > 0:
self.ip = cfg.interfaces.values()[0].ip_addresses[0]
if 'init_token' in impl_cfg.kwargs:
self.init_token = impl_cfg.kwargs['init_token']
if 'cassandra_ips' in impl_cfg.kwargs:
for i in impl_cfg.kwargs['cassandra_ips']:
self.cassandra_ips.append(IP(i))
if 'id' in impl_cfg.kwargs:
self.num_id = impl_cfg.kwargs['id']
def prepare_config(self):
self.configurator.configure(self.num_id, self.cassandra_ips, self.init_token, self.ip)
log_dir = '/var/log/cassandra.' + self.num_id
self.ptm.log_manager.add_external_log_file(FileLocation(log_dir + '/system.log'), self.num_id,
'%Y-%m-%d %H:%M:%S,%f', 2)
def print_config(self, indent=0):
super(CassandraHost, self).print_config(indent)
print (' ' * (indent + 1)) + 'Num-id: ' + self.num_id
print (' ' * (indent + 1)) + 'Init-token: ' + self.init_token
print (' ' * (indent + 1)) + 'Self-IP: ' + str(self.ip)
print (' ' * (indent + 1)) + 'Cassandra-IPs: ' + ', '.join(str(ip) for ip in self.cassandra_ips)
def do_extra_create_host_cfg_map_for_process_control(self):
return {'num_id': self.num_id, 'ip': self.ip.to_map()}
def do_extra_config_host_for_process_control(self, cfg_map):
self.num_id = cfg_map['num_id']
self.ip = IP.from_map(cfg_map['ip'])
def wait_for_process_start(self):
# Wait a couple seconds for the process to start before polling nodetool
time.sleep(2)
# Checking Cassandra status
retries = 0
max_retries = 10
connected = False
while not connected:
if self.cli.cmd('nodetool -h 127.0.0.1 status', return_status=True) == 0:
connected = True
else:
retries += 1
if retries > max_retries:
raise SocketException('Cassandra host ' + self.num_id + ' timed out while starting')
time.sleep(2)
def prepare_environment(self):
self.configurator.mount_config(self.num_id)
def cleanup_environment(self):
self.configurator.unmount_config()
def control_start(self):
self.cli.cmd('/bin/bash -c "MAX_HEAP_SIZE=128M HEAP_NEWSIZE=64M /etc/init.d/cassandra start"')
def control_stop(self):
self.cli.cmd("/etc/init.d/cassandra stop")
class CassandraFileConfiguration(FileConfigurationHandler):
def __init__(self):
super(CassandraFileConfiguration, self).__init__()
def configure(self, num_id, cassandra_ips, init_token, self_ip):
seed_str = ''.join([ip.ip + ',' for ip in cassandra_ips])[:-1]
etc_dir = '/etc/cassandra.' + num_id
var_lib_dir = '/var/lib/cassandra.' + num_id
var_log_dir = '/var/log/cassandra.' + num_id
var_run_dir = '/run/cassandra.' + num_id
self.cli.rm(etc_dir)
self.cli.copy_dir('/etc/cassandra', etc_dir)
# Work around for https://issues.apache.org/jira/browse/CASSANDRA-5895
self.cli.regex_file(etc_dir + '/cassandra-env.sh', 's/-Xss[1-9][0-9]*k/-Xss228k/')
self.cli.replace_text_in_file(etc_dir + '/cassandra-env.sh',
'# JVM_OPTS="$JVM_OPTS -Djava.rmi.server.hostname=<public name>"',
'JVM_OPTS="$JVM_OPTS -Djava.rmi.server.hostname=' + self_ip.ip + '"')
self.cli.regex_file_multi(etc_dir + '/cassandra.yaml',
"s/^cluster_name:.*$/cluster_name: 'midonet'/",
's/^initial_token:.*$/initial_token: ' + init_token + '/',
"/^seed_provider:/,/^$/ s/seeds:.*$/seeds: '" + seed_str + "'/",
's/^listen_address:.*$/listen_address: ' + self_ip.ip + '/',
's/^rpc_address:.*$/rpc_address: ' + self_ip.ip + '/')
self.cli.rm(var_lib_dir)
self.cli.mkdir(var_lib_dir)
self.cli.chown(var_lib_dir, 'cassandra', 'cassandra')
self.cli.rm(var_log_dir)
self.cli.mkdir(var_log_dir)
self.cli.chown(var_log_dir, 'cassandra', 'cassandra')
self.cli.rm(var_run_dir)
self.cli.mkdir(var_run_dir)
self.cli.chown(var_run_dir, 'cassandra', 'cassandra')
def mount_config(self, num_id):
self.cli.mount('/run/cassandra.' + num_id, '/run/cassandra')
self.cli.mount('/var/lib/cassandra.' + num_id, '/var/lib/cassandra')
self.cli.mount('/var/log/cassandra.' + num_id, '/var/log/cassandra')
self.cli.mount('/etc/cassandra.' + num_id, '/etc/cassandra')
def unmount_config(self):
self.cli.unmount('/run/cassandra')
self.cli.unmount('/var/lib/cassandra')
self.cli.unmount('/var/log/cassandra')
self.cli.unmount('/etc/cassandra')
| [
"micucci@midokura.com"
] | micucci@midokura.com |
503d33f8240c23a69126ea4072031c1de03cd986 | 32827a6d5a4fcfc7607f4edad5d7b7a9ae676989 | /main.py | e96f2cd9d27114579fb00358454a7af9044b7e62 | [] | no_license | todayfirst/pv_prediction | 9e9d32457a5f800f0a1c8af1626e72cb09919569 | 6107ce6c839409cc92e35dbed955937b004fd639 | refs/heads/master | 2021-01-07T17:39:27.084930 | 2020-02-20T08:16:39 | 2020-02-20T08:16:39 | 241,770,847 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,620 | py | # -*- coding: utf-8 -*-
import os
import config
from part import make_blank
from part import import_data
from part import import_coms
from part import make_multitemporal_data
from part import make_multitemporal_coms
from part import erase_skip_and_time
from part import erase_coms
from part import min_max_0to1
from part import make_name
from part import set_train_data
from part import set_train_coms
from part import mlp_model
from part import cnn_model
from part import combine_model
from part import fit_model
from part import test_model
from part import write_evaluation_result
import itertools
import pickle
import pandas as pd
import numpy as np
import pvlib
#%%
# all_case_number : 실험 조건 개수 - config.py 에서 설정한 개수
# site_n : 실험 지역 개수
# case : 현재 루프의 실험번호
# each_case : 현재 루프의 실험번호의 조건들 리스트
# plant_id : 실험 지역 id
# cap : 해당 실험 지역의 발전용량
# weather_ele : 날씨 정보 요소
# comsset : 사용하는 영상의 종류
# cnn_mode : cnn을 사용하는지 안하는지
# hours : 예측에 사용될 자료의 시간대
# no_coms : 위성영상 사용하지 않으려면 1, 사용하면 0
# patch_size : cnn 시 사용할 패치의 개수
# layers : cnn 시 레이어 정보
# combine_layers : cnn 시 레이어 정보
# numofex : 한 조건당 반복실험 횟수
# test_mode : 어떤 방식으로 테스트 할지
# NMAE : nMAE를 계산할 오차들을 저장함
# sum_nmae : 절대오차들의 합을 계산
# minute : 분단위의 영상을 위한 변수
all_case_number = len(config.all_case)
## case = 0
##each_case = config.all_case[0]
## site_n = 1
for site_n in range(1,7):
NMAE = pd.DataFrame()
new_NMAE = pd.DataFrame()
for case, each_case in enumerate(config.all_case):
(plant_id, cap, weather_ele, comsset, cnn_mode, hours, no_coms, patch_size, layers, combine_layers, numofex, test_mode )=config.iterate(case,site_n)
#iter_weather = list(itertools.product(range(2), repeat = 9))
#for iter_in in range(len(iter_weather)):
# weather_ele = []
# for in_weather in range(1,10):
# if iter_weather[iter_in][in_weather-1] == 1:
# weather_ele.append(in_weather)
minute = [0]
NMAE[str(case)] = np.zeros((100))
sum_nmae = 0
if cnn_mode==1:
(zero_time_image, Pzero_time_image) = make_blank.run(comsset, minute, test_mode)
(zero_time_image, Pzero_time_image) = import_coms.run(zero_time_image, Pzero_time_image, test_mode, comsset, plant_id, minute)
basic_data, Pbasic_data = import_data.run(test_mode, comsset, plant_id, minute, weather_ele)
training_data, Ptraining_data = make_multitemporal_data.run(basic_data, Pbasic_data, test_mode, comsset,hours, minute, weather_ele, plant_id)
if cnn_mode==1:
coms_training_data, coms_P_data = make_multitemporal_coms.run(zero_time_image, Pzero_time_image,patch_size, comsset, hours, minute, test_mode, len(basic_data),len(Pbasic_data))
training_data, Ptraining_data = erase_skip_and_time.run(training_data, Ptraining_data,cnn_mode, test_mode, cap, comsset, hours, minute,6, 20 )
if cnn_mode==1:
coms_training_data, coms_P_data = erase_coms.run(Ptraining_data,training_data,
coms_training_data, coms_P_data,test_mode)
print(str(len(training_data))+" : "+str(len(coms_training_data)))
print(str(len(Ptraining_data))+" : "+str(len(coms_P_data)))
if test_mode[0]==1 or test_mode[0] == 4:
Pindex = Ptraining_data.pop("coms_index")
training_data.pop("coms_index")
if not os.path.isdir("./result"+"//t"+str(site_n)+"//"+str(case)):
if not os.path.isdir("./result"+"//t"+str(site_n)):
os.mkdir("./result"+"//t"+str(site_n))
os.mkdir("./result"+"//t"+str(site_n)+"//"+str(case))
training_data.to_csv("./result"+"//t"+str(site_n)+"//"+str(case)+"//"+plant_id+"_Trainingdata.csv", mode='w')
training_data, Ptraining_data,forevalmax, forevalmin,scaled_features,min_save, max_save = min_max_0to1.run(training_data, Ptraining_data, test_mode )
training_data.to_csv("./result"+"//t"+str(site_n)+"//"+str(case)+"//"+plant_id+"_Trainingdata_min_max.csv", mode='w')
with open("./result"+"//t"+str(site_n)+"//"+str(case)+"//"+plant_id+"_min.pickle", 'wb') as handle:
pickle.dump(min_save, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open("./result"+"//t"+str(site_n)+"//"+str(case)+"//"+plant_id+"_max.pickle", 'wb') as handle:
pickle.dump(max_save, handle, protocol=pickle.HIGHEST_PROTOCOL)
#with open('filename.pickle', 'rb') as handle:
# unserialized_data = pickle.load(handle)
for_name = make_name.run(cnn_mode, no_coms, comsset, weather_ele, hours)
# ex_case = 0
for ex_case in range(numofex):
from tensorflow.keras import backend as K
K.clear_session()
if cnn_mode ==1:
if ex_case==0:
train_dataset, test_dataset, train_labels, test_labels, for_name1 = set_train_coms.make_for_cnn(coms_training_data, coms_P_data, test_mode, training_data, Ptraining_data,hours,minute,comsset,cnn_mode,ex_case,for_name)
else:
train_dataset, test_dataset, train_labels, test_labels = set_train_coms.make_for_cnn(coms_training_data, coms_P_data, test_mode, training_data, Ptraining_data,hours,minute,comsset,cnn_mode,ex_case,for_name)
else :
if ex_case==0:
train_dataset, test_dataset, train_labels, test_labels, for_name1,var_list = set_train_data.make_for_mlp(test_mode, training_data, Ptraining_data,hours,minute,comsset,cnn_mode,ex_case,for_name)
with open("./result"+"//t"+str(site_n)+"//"+str(case)+"//"+plant_id+"_var_list.pickle", 'wb') as handle:
pickle.dump(var_list, handle, protocol=pickle.HIGHEST_PROTOCOL)
else:
train_dataset, test_dataset, train_labels, test_labels= set_train_data.make_for_mlp(test_mode, training_data, Ptraining_data,hours,minute,comsset,cnn_mode,ex_case,for_name)
#if 'A' in df.columns:
if cnn_mode ==1:
model_cnn = cnn_model.create_cnn(np.shape(train_dataset[1])[1],np.shape(train_dataset[1])[2],
np.shape(train_dataset[1])[3], layers, ex_case)
model_mlp = mlp_model.create_mlp(np.shape(train_dataset[0])[1],[], False)
model=(combine_model.run(model_mlp, model_cnn, combine_layers))
else:
model=(mlp_model.create_mlp(train_dataset.shape[1],layers, True))
history = fit_model.run(model, train_dataset, train_labels,case,ex_case,50, 0.2,site_n)
#print(test_model.run(ex_case, case, model,history, test_dataset, test_labels,forevalmax, forevalmin, cap))
#NMAE[str(case)][ex_case],test_predictions,test_labels = test_model.run(ex_case, case, model,history, test_dataset, test_labels,forevalmax, forevalmin, cap,site_n)
#print(NMAE[str(case)][ex_case])
''' del model
if cnn_mode ==1:
del model_cnn
del model_mlp
for i in range(10):
gc.collect()'''
#sum_nmae = sum_nmae+NMAE[str(case)][ex_case]
#if(ex_case>4 and sum_nmae>0.065*(ex_case+1)):
# iseval = False
# break
# else:
# iseval = True
#
#if iseval:
# write_evaluation_result.run(plant_id,Pindex,test_dataset,cap,test_labels,case,numofex,forevalmax, forevalmin,test_mode,NMAE,site_n)
#NMAE.to_csv("./result"+"//t"+str(site_n)+"//"+str(case)+"//"+plant_id+"_Result.csv", mode='w')
# new_NMAE[str(iter_in)] = NMAE[str(case)]
# new_NMAE.to_csv("./result"+"//t"+str(site_n+iter_in)+"_weather_Result.csv", mode='w')
# break
# new_NMAE.to_csv("./result"+"//t"+str(site_n)+"_weather_Result.csv", mode='w') | [
"todayfirst@snu.ac.kr"
] | todayfirst@snu.ac.kr |
674ad278b8389b1ced92bde0524545521cefaa48 | c0c7656a638abe35811bb62f86b3698a0772396f | /manage.py | 63ba1ce9cad2bf67a00e2df75de16bd581dced19 | [] | no_license | RoksolanaD/my-first-blog | 641e38aeb8e9895f207a88c1d344da7f62a4ec82 | 8f4e7520165abe061e47466569fdf8d50cb223b3 | refs/heads/master | 2020-12-24T21:00:29.558958 | 2016-04-30T22:48:42 | 2016-04-30T22:48:42 | 56,911,300 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 253 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "pinguiblog.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| [
"rdesyatnyk@gmail.com"
] | rdesyatnyk@gmail.com |
8a827fa5eee6c58cb752bbec439cf755e712d491 | fc90124d5c44cc5fefb200ecb762110976e747d8 | /Portfolio Construction & VaR Backtesting/main.py | 4fb0a6a5241e3fd8e86550975b941b4811c6472c | [] | no_license | brooksgod/CQF | a162605b238474e212c57d54fe6500d9ba412344 | 031e165de52fa42875cdbb0f8d736cf761899f25 | refs/heads/master | 2023-07-16T05:51:30.855764 | 2021-05-22T12:18:41 | 2021-05-22T12:18:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,349 | py | import Portfolio, RiskMeasures
from helpers import *
import os
import pandas as pd
import numpy as np
import datetime as dt
import matplotlib.pyplot as plt
plt.style.use('ggplot')
os.makedirs('Results/', exist_ok=True)
# Q1/Q2 Data
mu = np.array([0.02, 0.07, 0.15, 0.20])
sigma = np.array([0.05, 0.12, 0.17, 0.25])
corr = np.array([[1, 0.3, 0.3, 0.3],
[0.3, 1, 0.6, 0.6],
[0.3, 0.6, 1, 0.6],
[0.3, 0.6, 0.6, 1]])
# Q1
# calculate weights and allocations
weights = {}
risk = {}
target_mu = 4.5 / 100
for stress_factor in [1, 1.25, 1.5]:
stressed_corr = stressed_correlation_matrix(corr, stress_factor)
w, r = compute_risky_only_allocation_and_risk(mu=mu, sigma=sigma, corr=stressed_corr, target_mu=target_mu)
weights[stress_factor] = w
risk[stress_factor] = r
# form weights dataframe indexed by stress factor
weights = pd.DataFrame(weights).transpose()
weights.columns = list('ABCD')
risk = pd.Series(risk)
weights['Risk'] = risk
# save
weights.to_csv('Results/q1_allocation_and_risk.csv')
# create and plot random portfolios
universe = Portfolio.InvestmentUniverse(mu=mu, corr=corr, std=sigma)
normal_ptf_generator = Portfolio.NormalRandomPortfolioGenerator(universe=universe)
random_portfolios = normal_ptf_generator.generate_random_portfolios(number_samples=7000)
normal_ptf_generator.plot_opportunity_set(random_portfolios)
plt.savefig('Results/q1_random_portfolio_plot.png')
# Q2
rfrs = [x / 10000 for x in [50, 100, 150, 175]]
weights = {}
risk = {}
for rfr in rfrs:
w, r = compute_tangency_portfolio_weights_and_risk(mu=mu, corr=corr, sigma=sigma, rfr=rfr)
weights[rfr] = w
risk[rfr] = r
weights = pd.DataFrame(weights).transpose()
weights.columns = list('ABCD')
risk = pd.Series(risk)
weights['Risk'] = risk
weights.to_csv('Results/q2_allocation_and_risk.csv')
for rfr in [100 / 10000, 175 / 10000]:
mvo = Portfolio.MeanVarianceOptimiserRiskyOnly(mu=mu, std=sigma, corr=corr)
mvo_rf = Portfolio.MeanVarianceOptimiserWithRiskFreeAsset(mu=mu, std=sigma, corr=corr, rfr=rfr)
util = Portfolio.MeanVarianceUtils(mvo=mvo, enforce_min_return=True)
util_rf = Portfolio.MeanVarianceUtils(mvo=mvo_rf, enforce_min_return=True)
tangency_return = (mvo.c - rfr * mvo.b) / (mvo.b - rfr * mvo.a)
plot_tangency = False
# determine borders of figure
max_return = 0.2
min_return = 0
if tangency_return > 0:
max_return = tangency_return
plot_tangency = True
else:
min_return = tangency_return
# plot frontier and CML
plt.figure()
util.plot_efficient_frontier(max_return=max_return, min_return=min_return)
util_rf.plot_efficient_frontier(max_return=max_return, min_return=min_return, plot_tangency=plot_tangency)
plt.scatter(0, rfr, marker='x', color='black')
# hack to handle legend
legend = ['Risky Only Efficient Frontier', '_', 'True Efficient Frontier - CML', 'GMV Portfolio']
if rfr < 160 / 10000:
legend += ['Tangency Portfolio']
legend += [f'Risk Free Asset - Rate {round(100 * rfr, 2)}%']
plt.legend(legend)
plt.title(f'Efficient Frontier - Risk Free Rate {round(100 * rfr, 2)}%')
plt.savefig(f'Results/q2_efficient_frontier_{round(100 * rfr, 2)}%.png')
# Q3
data_path = 'SP500.csv'
data = pd.read_csv(data_path, index_col=0)
data.index = [dt.datetime.strptime(d, '%d/%m/%Y') for d in data.index]
idx = ['Breaches', '% Breaches', 'Consec. Breaches', 'Cond. Prob. Consec. Breach']
results = pd.DataFrame()
for method in ['Rolling', 'EWMA']:
if method == 'Rolling':
var_bt = RiskMeasures.VarBacktestRollingVolatility(price_data=data)
elif method == 'EWMA':
var_bt = RiskMeasures.VarBacktestEWMAVolatility(price_data=data)
else:
raise BaseException('Not supposed to be here')
plt.figure()
var_bt.plot_var_versus_realised_return()
plt.savefig(f'Results/q3_{method.lower()}_volatility.png')
# get breaches, pct_breaches, consecutive breaches, conditional probability of consecutive breach
b, pct_b, c, cp_cb = var_bt.get_headline_figures()
df = pd.DataFrame([b, 100 * pct_b, c, cp_cb], index=idx).transpose()
df.index = [method]
results[method] = df.T[method]
results.to_csv('Results/q3_headline_figures.csv')
# Q4
results = pd.DataFrame()
idx = ['Total', 'Total Rel.', 'Asset', 'Asset Rel', 'Liq.', 'Liq Rel.']
params = [['Technology', 16000000, 0.01, 0.03, -0.0035 / 2, 0.015 / 2],
['Gilt 15bp', 40000000, 0, 0.03, -15 / 10000 / 2, 0],
['Gilt 125 bp', 40000000, 0, 0.03, -125 / 10000 / 2, 0]]
for param_set in params:
scenario, notional, mu, sigma, mu_spread, sigma_spread = param_set
total, total_rel, asset, asset_rel, liq, liq_rel = compute_liquidity_adjusted_var(mu=mu, sigma=sigma,
mu_spread=mu_spread,
sigma_spread=sigma_spread,
notional=notional)
df = pd.DataFrame([total, total_rel, asset, asset_rel, liq, liq_rel], index=idx).transpose()
df.index = [scenario]
results[scenario] = df.T[scenario]
results.to_csv('Results/q4_aggregated_figures.csv')
| [
"samuel.forster@cantab.net"
] | samuel.forster@cantab.net |
d69722adf3f19c7b6fd6eb8bed4b49be91b7ff34 | 4d3100a82f778c0fbc4d2b400196bb240c094fd2 | /citeulike/spiders/user.py | 4db736466be4b2df25140387b1f9c6d828ddbd73 | [] | no_license | ziaoang/citeulike | 1ae004bd9985bccbd5c111bd530d3b4880bddf14 | 87f3035e23ee36cd8e0c4d347d198cef1cd49efd | refs/heads/master | 2021-01-21T06:59:23.252846 | 2017-02-27T10:53:39 | 2017-02-27T10:53:39 | 83,300,058 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,284 | py | # -*- coding: utf-8 -*-
import scrapy
from scrapy.http import Request
from collections import defaultdict
class UserSpider(scrapy.Spider):
name = "user"
start_urls = []
max_level = 3
user_ids = set()
user_papers = defaultdict(set)
def __init__(self):
for line in open('data/user_root.txt'):
self.start_urls.append('http://www.citeulike.org' + line.strip())
break
def get_page_count(self, paper_count):
count_per_page = 50
if paper_count <= 0: return 1
return (paper_count - 1) / count_per_page + 1
def parse(self, response):
for request in self.parse_user(response):
yield request
paper_count = int(response.xpath('//title/text()').extract_first().split(' ')[-2])
page_count = self.get_page_count(paper_count)
for page in range(1, page_count):
yield Request(url='%s/page/%d' % (response.url, page + 1),
callback=self.parse_user)
def parse_user(self, response):
t = response.url.split('/')
curr_user_id = t[-1] if t[-2] == 'user' else t[-3]
for paper in response.xpath('//tr[contains(@class, "list")]/@data-article_id').extract():
self.user_papers[curr_user_id].add(paper)
user_ids = set()
for url in response.xpath('//a[@class="pubitem "]/@href').extract():
user_ids.add(url)
self.user_ids.add(url)
for url in response.xpath('//a[@class="othrusr pubitem"]/@href').extract():
user_ids.add(url)
self.user_ids.add(url)
level = response.meta['level'] if 'level' in response.meta else 1
if level + 1 <= self.max_level:
for url in user_ids:
yield Request(url='http://www.citeulike.org' + url,
meta={'level': level + 1},
callback=self.parse)
def closed(self, reason):
df = open('data/users.txt', 'w')
for user_id in self.user_ids:
df.write(user_id + '\n')
df.close()
df = open('data/user_papers.txt', 'w')
for user_id in self.user_papers:
for paper in self.user_papers[user_id]:
df.write(user_id + '\t' + paper + '\n')
df.close()
| [
"ziaoang@gmail.com"
] | ziaoang@gmail.com |
78ebb71e9354b081c36b8ba559d6be8edc3adca6 | c36d16a1bcdc9b529a43b27c67c73a9a02bcb824 | /PVC-tester-3.5.py | 7eaf6842fa28e66db7967330c8f39dddb401c7e8 | [
"MIT"
] | permissive | groovytron/tsp | 033f0adb958b5a12a70d54715066a273e12d5f3b | 9498d3e4a89a7fadeddc267116bd0a4b6eb1af15 | refs/heads/master | 2020-06-12T04:11:16.912668 | 2017-01-15T20:25:57 | 2017-01-15T20:25:57 | 75,606,959 | 1 | 0 | null | 2017-01-15T20:26:07 | 2016-12-05T09:01:23 | Python | ISO-8859-1 | Python | false | false | 4,516 | py | # coding: latin-1
''' Module permettant de tester systématiquement une série de solveurs
pour le problème du voyageur de commerce.
Permet de lancer automatiquement une série de solveurs sur une série de problèmes
et génère une grille de résultats au format CSV.
v0.2, Matthieu Amiguet, HE-Arc
v0.3, hatem Ghorbel, HE-Arc
Python 3.5 Ready, Romain Claret
'''
# PARAMETRES
# =========
# modifier cette partie pour l'adapter à vos besoins
# Le nom des modules à tester
# Ces modules doivent être dans le PYTHONPATH; p.ex. dans le répertoire courant
modules = (
"MPoyPerez",
# Éventuellement d'autres modules pour comparer plusieurs versions...
)
# Liste des tests à effectuer
# sous forme de couples (<datafile>, <maxtime>) où
# <datafile> est le fichier contenant les données du problème et
# <maxtime> le temps (en secondes) imparti pour la résolution
tests = (
('data/pb005.txt',1),
('data/pb010.txt',5),
('data/pb010.txt',10),
('data/pb050.txt',30),
('data/pb050.txt',60),
('data/pb100.txt',20),
('data/pb100.txt',90),
)
# On tolère un dépassement de 5% du temps imparti:
tolerance = 0.05
# Fichier dans lequel écrire les résultats
import sys
outfile = sys.stdout
# ou :
#outfile = open('results.csv', 'w')
# affichage à la console d'informations d'avancement?
verbose = False
# est-ce qu'on veut un affichage graphique?
gui = False
# PROGRAMME
# =========
# Cette partie n'a théoriquement pas à être modifiée
import os
from time import time
from math import hypot
def dist(city1,city2):
x1,y1 = city1
x2,y2 = city2
return hypot(x2 -x1,y2-y1)
def validate(filename, length, path, duration, maxtime):
'''Validation de la solution
retourne une chaîne vide si tout est OK ou un message d'erreur sinon
'''
error = ""
if duration>maxtime * (1+tolerance):
error += "Timeout (%.2f) " % (duration-maxtime)
try:
cities = dict([(name, (int(x),int(y))) for name,x,y in [l.split() for l in open(filename)]])
except:
print(sys.exc_info()[0])
return "(Validation failed...)"
tovisit = list(cities.keys())
try:
totaldist = 0
for (ci, cj) in zip(path, path[1:] +path[0:1]):
totaldist += dist(cities[ci], cities[cj])
tovisit.remove(ci)
if int(totaldist) != int(length):
error += "Wrong dist! (%d instead of %d)" % (length, totaldist)
except KeyError:
error += "City %s does not exist! " % ci
except ValueError:
error += "City %s appears twice in %r! " % (ci, path)
except Exception as e:
error += "Error during validation: %r" % e
if tovisit:
error += "Not all cities visited! %r" % tovisit
return error
if __name__ == '__main__':
# Récupération des différentes implémentations
# On met les différentes fonctions ga_solve() dans un dictionnaire indexé par le nom du module correpsondant
# On en profite pour écrire la ligne d'en-tête du fichier de sortie
solvers = {}
outfile.write('Test;')
for m in modules:
exec ("from %s import ga_solve" % m)
solvers[m] = ga_solve
outfile.write("%s;" % m)
outfile.write('\n')
# Cette partie effectue les tests proprement dits
# et rapporte les résultats dans outfile
for (filename, maxtime) in tests:
if verbose:
print ("--> %s, %d" % (filename, maxtime))
# normalisation du nom de fichier (pour l'aspect multi-plateforme)
filename = os.path.normcase(os.path.normpath(filename))
# Écriture de l'en-tête de ligne
outfile.write("%s (%ds);" % (filename, maxtime))
# Appel des solveurs proprement dits, vérification et écriture des résultats
for m in modules:
if verbose:
print ("## %s" % m)
try:
start = time()
length, path = solvers[m](filename, gui, maxtime)
duration = time()-start
except Exception as e:
outfile.write("%r;" % e)
except SystemExit:
outfile.write("tried to quit!;")
else:
error = validate(filename, length, path, duration, maxtime)
if not error:
outfile.write("%d;" % length)
else:
outfile.write("%s;" % error)
outfile.flush()
outfile.write('\n') | [
"joaquim.perez@he-arc.ch"
] | joaquim.perez@he-arc.ch |
761252bd30193498dcf9bcf5795732eb9495d84a | 08aec07b5db889e586a56e36efea31c21c5695f2 | /day006/ex45.py | a41816142a84e473b30dc6e134437aa140bbef9b | [] | no_license | nileshnegi/hackerrank-python | 7bb19929d2b963f02b37259c06b893c6520f33dc | 0d2ab9ee40156e81b568ab4d5a6d5cd4f6ca7385 | refs/heads/master | 2023-01-07T07:20:44.859336 | 2020-11-11T14:30:21 | 2020-11-11T14:30:21 | 279,877,061 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,533 | py | """
The Captain's Room
Mr. Anant Asankhya is the manager at the INFINITE hotel.
The hotel has an infinite amount of rooms.
One fine day, a finite number of tourists come to stay at the hotel.
The tourists consist of:
→ A Captain.
→ An unknown group of families consisting of ```K``` members per group
The Captain has a separate room, and the rest were given one room per group.
Mr. Anant has an unordered list of randomly arranged room entries.
The list consists of the room numbers for all of the tourists
which will appear ```K``` times per group except for the Captain's room.
Mr. Anant needs you to help him find the Captain's room number.
The total number of tourists or the total number of groups is not known.
You only know the value of ```K``` and the room number list.
"""
if __name__ == "__main__":
K = int(input())
room_list = list(map(int, input().split()))
rooms = dict()
for i in range(len(room_list)):
if rooms.get(room_list[i]) == None:
rooms[room_list[i]] = 1
else:
rooms[room_list[i]] += 1
for key, value in rooms.items():
if value == 1:
print(key)
"""
However this is a set problem. So the solution with sets:
"""
if __name__ == "__main__":
K = int(input())
rooms = map(int, input().split())
single = set()
multiple = set()
for room in rooms:
if room not in single:
single.add(room)
else:
multiple.add(room)
print(single.difference(multiple).pop()) | [
"nilesh_negi@yahoo.co.in"
] | nilesh_negi@yahoo.co.in |
cc8f664cee314b4175de6b85a3ffebc73361d0cf | 4f51aa8e60ca340e6c5a5832438e7d7c9eee8fb5 | /wrapi/endpoints/account.py | 230ed3fde30794f2f9b403cf8de958c963f06ae3 | [
"MIT"
] | permissive | junqed/wrapi | 457e58f3814c5ef6de3cc47c335601e847688da4 | c2b6efab8b7065c7936e0e6f0b806ed2fd00f77d | refs/heads/master | 2023-03-02T02:47:55.794054 | 2021-02-11T08:58:29 | 2021-02-11T08:58:29 | 324,379,318 | 9 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,414 | py | from __future__ import annotations
from typing import Optional, Sequence
from pydantic.fields import Field
from ..api import (
BodyParams,
EndpointData,
WrApiQueryParams,
)
from ..types_.endpoint import BaseEndpoint
from ..types_.inputs import AccountOptionalFields, Metadata
class Account(BaseEndpoint):
metadata: Optional[Metadata]
fields_: Optional[Sequence[AccountOptionalFields]] = Field(None, alias="fields")
@property
def endpoint_data(self) -> EndpointData:
return EndpointData(
method="GET",
url="/account",
query_params=self._query_params,
)
@property
def _query_params(self) -> WrApiQueryParams:
params = WrApiQueryParams()
if self.metadata:
params["metadata"] = self._convert_input(self.metadata)
if self.fields_:
params["fields"] = self._convert_seq(self.fields_)
return params
class ModifyAccount(BaseEndpoint):
metadata: Optional[Sequence[Metadata]]
@property
def endpoint_data(self) -> EndpointData:
return EndpointData(
method="PUT",
url="/account",
body_params=self._body_params,
)
@property
def _body_params(self) -> BodyParams:
body = {}
if self.metadata:
body["metadata"] = self._convert_input_seq(self.metadata)
return body
| [
"eugene.fominykh@team.wrike.com"
] | eugene.fominykh@team.wrike.com |
17057251ad5e6a6db3b8bbf55d6daf24d7be92ef | 434ec954a1c481f17dbb41d82f814405c2bd1e6e | /__init__.py | 9dcc5b7d4b2f8a9eb1377764f50e3d764a314fc5 | [] | no_license | pytsite/plugin-comments_odm | 83ae106529c68e995ff3f9414ffb8b76d64b9704 | d07906d2c57ff0b750cb580c5f2c0e3867b04ac6 | refs/heads/master | 2022-02-11T21:56:59.371544 | 2019-08-06T00:31:59 | 2019-08-06T00:31:59 | 82,923,156 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 530 | py | """PytSite ODM Comments Plugin
"""
__author__ = 'Oleksandr Shepetko'
__email__ = 'a@shepetko.com'
__license__ = 'MIT'
from . import _widget as widget
def plugin_load():
"""Hook
"""
from pytsite import events
from plugins import comments, odm
from . import _model, _driver, _eh
# Register ODM model
odm.register_model('comment', _model.ODMComment)
# Register comments driver
comments.register_driver(_driver.ODM())
events.listen('comments@report_comment', _eh.comments_report_comment)
| [
"a@shepetko.com"
] | a@shepetko.com |
a419f17e8f448960359d04f1206ecd8b48fac4f7 | 88c8dc51713753b7a36dce80ca936b2933575845 | /week07/class_code/w7_creative_task_code_start.py | 8c1fdb2f9546ee089a3f5c20570395591fc0e5fc | [] | no_license | previtus/cci_python | 210e9e4314fb65c2b5542131167a75ece07ad2a9 | ddab2697fc960355ac9e5fac7fc7d462db8b50f4 | refs/heads/master | 2021-05-23T13:25:46.661735 | 2020-01-15T18:11:33 | 2020-01-15T18:11:33 | 253,309,644 | 1 | 0 | null | 2020-04-05T18:57:21 | 2020-04-05T18:57:20 | null | UTF-8 | Python | false | false | 571 | py | infile = open("dracula.txt", "r")
filestring = infile.read()
infile.close()
# in words ==========================================
words = filestring.split()
#for word in words:
# print(word)
# in sentences ==========================================
sentences = filestring.split(".")
# dark magicks!
#import re
#sentences2 = re.split(r' *[\.\?!][\'"\)\]]* *', filestring)
# in paragraphs ==========================================
paragraphs = filestring.split("\n\n") # depends on how your text is written!
#for paragraph in paragraphs:
# print(paragraph)
| [
"previtus@gmail.com"
] | previtus@gmail.com |
f6914286d5c2dd47ef6130c4c5c30b1b76713a6c | 8df093bb73c491e8d8141864a85afae28b5db45a | /pst/validation_tfidf.py | 478db41c628a82bea823b998a4264e672dfc41dd | [] | no_license | nishthaa/persuasive-style-transfer | 885fb634b187f8fa007298fbe7b3ea4696dc5956 | 998a8001e9858e8723dca6c06de0b276e47b48a7 | refs/heads/master | 2022-12-11T18:07:19.040828 | 2018-07-07T17:20:36 | 2018-07-07T17:20:36 | 132,045,695 | 0 | 0 | null | 2022-12-08T02:13:34 | 2018-05-03T20:24:23 | Python | UTF-8 | Python | false | false | 9,377 | py | import numpy as np
import pandas as pd
import nltk
import re
import os
import codecs
from sklearn import feature_extraction
import mpld3
from sklearn.feature_extraction.text import TfidfVectorizer
from nltk.stem.snowball import SnowballStemmer
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.cluster import KMeans
from odds_ratio import get_odds_ratio
import string
import sys
from gensim.models import Word2Vec
from nltk import word_tokenize
import operator
stopwords = nltk.corpus.stopwords.words('english')
stemmer = SnowballStemmer("english")
# here I define a tokenizer and stemmer which returns the set of stems in
# the text that it is passed
def purity_score(clusters, classes):
"""
Calculate the purity score for the given cluster assignments and ground truth classes
:param clusters: the cluster assignments array
:type clusters: numpy.array
:param classes: the ground truth classes
:type classes: numpy.array
:returns: the purity score
:rtype: float
ref: http://www.caner.io/purity-in-python.html
"""
A = np.c_[(clusters, classes)]
n_accurate = 0.
for j in np.unique(A[:, 0]):
z = A[A[:, 0] == j, 1]
x = np.argmax(np.bincount(z))
n_accurate += len(z[z == x])
return n_accurate / A.shape[0]
def tokenize_and_stem(text):
# first tokenize by sentence, then by word to ensure that punctuation is
# caught as it's own token
tokens = [word for sent in nltk.sent_tokenize(
text) for word in nltk.word_tokenize(sent)]
filtered_tokens = []
# filter out any tokens not containing letters (e.g., numeric tokens, raw
# punctuation)
for token in tokens:
if re.search('[a-zA-Z]', token):
filtered_tokens.append(token)
stems = [stemmer.stem(t) for t in filtered_tokens]
return stems
def tokenize_only(text):
# first tokenize by sentence, then by word to ensure that punctuation is
# caught as it's own token
tokens = [word.lower() for sent in nltk.sent_tokenize(text)
for word in nltk.word_tokenize(sent)]
filtered_tokens = []
# filter out any tokens not containing letters (e.g., numeric tokens, raw
# punctuation)
for token in tokens:
if re.search('[a-zA-Z]', token):
filtered_tokens.append(token)
return filtered_tokens
DIR_LINK_TRAIN = "data/convotev1.1/data_stage_one/training_set/"
DIR_LINK_TEST = "data/convotev1.1/data_stage_one/test_set/"
THRESHOLD = int(sys.argv[1])
PUNCTUATION = string.punctuation
# print(THRESHOLD)
training_speeches = os.listdir(DIR_LINK_TRAIN)
tmp = []
tmpn = []
label_ref = {}
labels = []
z = 0
for speech in training_speeches:
if speech[-5] == "Y":
tmp.append(speech)
if speech[:3] not in label_ref:
label_ref[speech[:3]] = z
z += 1
labels.append(label_ref[speech[:3]])
if speech[-5] == "N":
tmpn.append(speech)
# Speeches from the training set labelled "Y"
training_speeches = tmp
training_speeches_n = tmpn
fps = [open(DIR_LINK_TRAIN + file) for file in training_speeches]
train_content = [fp.read() for fp in fps]
for fp in fps:
fp.close()
fps_n = [open(DIR_LINK_TRAIN + file) for file in training_speeches_n]
train_content_n = [fp.read() for fp in fps_n]
for fp in fps_n:
fp.close()
totalvocab_stemmed = []
totalvocab_tokenized = []
for i in train_content:
# for each item in 'synopses', tokenize/stem
allwords_stemmed = tokenize_and_stem(i)
# extend the 'totalvocab_stemmed' list
totalvocab_stemmed.extend(allwords_stemmed)
allwords_tokenized = tokenize_only(i)
totalvocab_tokenized.extend(allwords_tokenized)
vocab_frame = pd.DataFrame(
{'words': totalvocab_tokenized}, index=totalvocab_stemmed)
# print(vocab_frame.head())
tfidf_vectorizer = TfidfVectorizer(max_df=0.8, max_features=200000,
min_df=0.2, stop_words='english',
use_idf=True, tokenizer=tokenize_and_stem, ngram_range=(1, 3))
tfidf_matrix = tfidf_vectorizer.fit_transform(
train_content) # fit the vectorizer to synopses
# print(tfidf_matrix.shape)
terms = tfidf_vectorizer.get_feature_names()
# dist = 1 - cosine_similarity(tfidf_matrix)
num_clusters = 38
km = KMeans(n_clusters=num_clusters, random_state=10)
km.fit(tfidf_matrix)
clusters = km.labels_.tolist()
print("Purity score: ", purity_score(clusters, labels))
speeches = {'title': training_speeches,
'speech_content': train_content, 'cluster': clusters}
frame = pd.DataFrame(speeches, index=[clusters], columns=[
'title', 'cluster'])
print(frame['cluster'].value_counts())
print("Top terms per cluster:")
print()
# sort cluster centers by proximity to centroid
order_centroids = km.cluster_centers_.argsort()[:, :: -1]
central_words = []
true_k = np.unique(labels).shape[0]
terms = tfidf_vectorizer.get_feature_names()
for i in range(true_k):
print("Cluster %d:" % i, end='')
for ind in order_centroids[i, :10]:
print(' %s' % terms[ind], end='\n')
print()
print()
print()
# print(central_words)
tracker = {}
for speech in training_speeches:
if speech[:3] not in tracker:
tracker[speech[:3]] = [0] * num_clusters
for i in range(len(training_speeches)):
tracker[training_speeches[i][:3]][clusters[i]] += 1
for key in tracker:
mx = -1
for i in tracker[key]:
if i > mx:
mx = i
print(key, i)
highORwords, lowORwords = get_odds_ratio(os.listdir(DIR_LINK_TRAIN))
yes_words = {}
no_words = {}
for speech in train_content:
speech = " ".join([word.strip(PUNCTUATION) for word in speech.split(" ")])
toks = speech.split(" ")
toks = [tok.strip() for tok in toks]
for i in range(len(toks)):
if toks[i] not in yes_words:
yes_words[toks[i]] = 1
else:
yes_words[toks[i]] += 1
for speech in train_content_n:
speech = " ".join([word.strip(PUNCTUATION) for word in speech.split(" ")])
toks = speech.split(" ")
toks = [tok.strip() for tok in toks]
for i in range(len(toks)):
if toks[i] not in no_words:
no_words[toks[i]] = 1
else:
no_words[toks[i]] += 1
above_threshold = []
for key in yes_words.keys():
if yes_words[key] > THRESHOLD and key not in no_words:
above_threshold.append((key, yes_words[key]))
above_threshold = sorted(above_threshold, key=operator.itemgetter(1))
all_speeches = train_content + train_content_n
all_speeches_w2v = []
for speech in all_speeches:
speech = word_tokenize(speech)
speech_2 = []
for word in speech:
if word not in string.punctuation:
speech_2.append(word)
speech = speech_2
all_speeches_w2v.append(speech)
# print(all_speeches_w2v[:3])
model = Word2Vec(all_speeches_w2v)
mapping = {}
i = 0
for tup in above_threshold:
word = tup[0]
max_sim = -1
# if i < 100:
# i += 1
# continue
# else:
for wrd in lowORwords:
wrd = wrd.strip()
word = word.strip()
try:
if model.similarity(word, wrd) > max_sim:
mapping[word] = wrd
except:
print(tup, wrd)
final_speeches = []
count_replaced = 0
for speech in train_content:
tmp = speech.split(" ")
for i in range(len(tmp)):
if tmp[i] in mapping:
count_replaced += 1
# print(count_replaced)
tmp[i] = mapping[tmp[i]]
final_speeches.append(" ".join(tmp))
# Now working on replaces speeches
print("After replacement")
tfidf_vectorizer = TfidfVectorizer(max_df=0.8, max_features=200000,
min_df=0.2, stop_words='english',
use_idf=True, tokenizer=tokenize_and_stem, ngram_range=(1, 3))
tfidf_matrix = tfidf_vectorizer.fit_transform(
final_speeches) # fit the vectorizer to synopses
terms = tfidf_vectorizer.get_feature_names()
# dist = 1 - cosine_similarity(tfidf_matrix)
num_clusters = 38
km = KMeans(n_clusters=num_clusters, random_state=10)
km.fit(tfidf_matrix)
clusters_aft = km.labels_.tolist()
print("Purity score: ", purity_score(clusters_aft, labels))
speeches = {'title': training_speeches,
'speech_content': final_speeches, 'cluster': clusters_aft}
frame = pd.DataFrame(speeches, index=[clusters_aft], columns=[
'title', 'cluster'])
print(frame['cluster'].value_counts())
print("Top terms per cluster:")
print()
# sort cluster centers by proximity to centroid
order_centroids = km.cluster_centers_.argsort()[:, :: -1]
central_words = []
true_k = np.unique(labels).shape[0]
terms = tfidf_vectorizer.get_feature_names()
for i in range(true_k):
print("Cluster %d:" % i, end='')
for ind in order_centroids[i, :10]:
print(' %s' % terms[ind], end='\n')
print()
print()
print()
# Tracker for the replaced speeches -------------
tracker = {}
for speech in final_speeches:
if speech[:3] not in tracker:
tracker[speech[:3]] = [0] * num_clusters
for i in range(len(final_speeches)):
tracker[final_speeches[i][:3]][clusters_aft[i]] += 1
for key in tracker:
mx = -1
for i in tracker[key]:
if i > mx:
mx = i
print(key, i)
| [
"aditya16217@iiitd.ac.in"
] | aditya16217@iiitd.ac.in |
f9dbea843b41c244816397512fdb08114b940630 | 19e21d108aea71e086444ff7df3c41d297691863 | /rango/migrations/0003_category_slug.py | b9d5d669e2efa0d01089e1bd8db959956f68d7db | [] | no_license | VioletLover/tango_with_django_project | df3bb3e8dadb846270021b43965ea6f9aac19fbd | a8ecbc04b0d72c6958c6bd244e1a98d86d505611 | refs/heads/master | 2021-06-14T09:36:08.540666 | 2017-05-11T15:49:20 | 2017-05-11T15:49:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 431 | py | # Generated by Django 2.0.dev20170318191452 on 2017-05-02 17:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('rango', '0002_auto_20170502_1615'),
]
operations = [
migrations.AddField(
model_name='category',
name='slug',
field=models.SlugField(default=''),
preserve_default=False,
),
]
| [
"piaoia@163.com"
] | piaoia@163.com |
daec4324015b87354213497f374e4473de978001 | 3d9c7b91ee6410d60078ed59f36f3f7f597b96b8 | /env/bin/twitter-archiver | 5d22d98167874069c1d78a108b0fd719daa15c4a | [] | no_license | lbriones/SentimentalTweets | 77f387f0175ec90963fbbfa1518206f96302e9a8 | 8d61e800514690cd4ea6fea9b8cb49bad16e5862 | refs/heads/master | 2021-01-10T09:16:30.227495 | 2015-10-21T21:18:39 | 2015-10-21T21:18:39 | 44,298,243 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 251 | #!/home/lbriones/dev/django/socialab/env/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from twitter.archiver import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"lbriones.tic@gmail.com"
] | lbriones.tic@gmail.com | |
c5c302080432b8dbb820d4fce47b288f5c518ca9 | c016ed237862591c42fd00b973fced791247be37 | /src/garage/tf/algos/rl2ppo.py | 6526b5c052efcbfb0912dbf1d9965f8d37bd2f62 | [
"MIT"
] | permissive | thanhkaist/garage | 726766a1e6fd465f776c42dc006f331a3b98cbd7 | 1d840df357282a675b8fce839bb0e5f72a8abba9 | refs/heads/master | 2022-11-05T23:40:08.057025 | 2020-06-21T16:59:15 | 2020-06-21T16:59:15 | 274,142,146 | 1 | 0 | MIT | 2020-06-22T13:18:14 | 2020-06-22T13:18:13 | null | UTF-8 | Python | false | false | 5,654 | py | """Proximal Policy Optimization for RL2."""
from garage.tf.algos import RL2
from garage.tf.optimizers import FirstOrderOptimizer
class RL2PPO(RL2):
"""Proximal Policy Optimization specific for RL^2.
See https://arxiv.org/abs/1707.06347 for algorithm reference.
Args:
rl2_max_path_length (int): Maximum length for trajectories with respect
to RL^2. Notice that it is different from the maximum path length
for the inner algorithm.
meta_batch_size (int): Meta batch size.
task_sampler (garage.experiment.TaskSampler): Task sampler.
env_spec (garage.envs.EnvSpec): Environment specification.
policy (garage.tf.policies.StochasticPolicy): Policy.
baseline (garage.tf.baselines.Baseline): The baseline.
scope (str): Scope for identifying the algorithm.
Must be specified if running multiple algorithms
simultaneously, each using different environments
and policies.
max_path_length (int): Maximum length of a single rollout.
discount (float): Discount.
gae_lambda (float): Lambda used for generalized advantage
estimation.
center_adv (bool): Whether to rescale the advantages
so that they have mean 0 and standard deviation 1.
positive_adv (bool): Whether to shift the advantages
so that they are always positive. When used in
conjunction with center_adv the advantages will be
standardized before shifting.
fixed_horizon (bool): Whether to fix horizon.
lr_clip_range (float): The limit on the likelihood ratio between
policies, as in PPO.
max_kl_step (float): The maximum KL divergence between old and new
policies, as in TRPO.
optimizer_args (dict): The arguments of the optimizer.
policy_ent_coeff (float): The coefficient of the policy entropy.
Setting it to zero would mean no entropy regularization.
use_softplus_entropy (bool): Whether to estimate the softmax
distribution of the entropy to prevent the entropy from being
negative.
use_neg_logli_entropy (bool): Whether to estimate the entropy as the
negative log likelihood of the action.
stop_entropy_gradient (bool): Whether to stop the entropy gradient.
entropy_method (str): A string from: 'max', 'regularized',
'no_entropy'. The type of entropy method to use. 'max' adds the
dense entropy to the reward for each time step. 'regularized' adds
the mean entropy to the surrogate objective. See
https://arxiv.org/abs/1805.00909 for more details.
flatten_input (bool): Whether to flatten input along the observation
dimension. If True, for example, an observation with shape (2, 4)
will be flattened to 8.
meta_evaluator (garage.experiment.MetaEvaluator): Evaluator for meta-RL
algorithms.
n_epochs_per_eval (int): If meta_evaluator is passed, meta-evaluation
will be performed every `n_epochs_per_eval` epochs.
name (str): The name of the algorithm.
"""
def __init__(self,
rl2_max_path_length,
meta_batch_size,
task_sampler,
env_spec,
policy,
baseline,
scope=None,
max_path_length=500,
discount=0.99,
gae_lambda=1,
center_adv=True,
positive_adv=False,
fixed_horizon=False,
lr_clip_range=0.01,
max_kl_step=0.01,
optimizer_args=None,
policy_ent_coeff=0.0,
use_softplus_entropy=False,
use_neg_logli_entropy=False,
stop_entropy_gradient=False,
entropy_method='no_entropy',
flatten_input=True,
meta_evaluator=None,
n_epochs_per_eval=10,
name='PPO'):
if optimizer_args is None:
optimizer_args = dict()
super().__init__(rl2_max_path_length=rl2_max_path_length,
meta_batch_size=meta_batch_size,
task_sampler=task_sampler,
env_spec=env_spec,
policy=policy,
baseline=baseline,
scope=scope,
max_path_length=max_path_length,
discount=discount,
gae_lambda=gae_lambda,
center_adv=center_adv,
positive_adv=positive_adv,
fixed_horizon=fixed_horizon,
pg_loss='surrogate_clip',
lr_clip_range=lr_clip_range,
max_kl_step=max_kl_step,
optimizer=FirstOrderOptimizer,
optimizer_args=optimizer_args,
policy_ent_coeff=policy_ent_coeff,
use_softplus_entropy=use_softplus_entropy,
use_neg_logli_entropy=use_neg_logli_entropy,
stop_entropy_gradient=stop_entropy_gradient,
entropy_method=entropy_method,
flatten_input=flatten_input,
meta_evaluator=meta_evaluator,
n_epochs_per_eval=n_epochs_per_eval,
name=name)
| [
"noreply@github.com"
] | thanhkaist.noreply@github.com |
1ca21c5fbe3c345731bc9b168b49f3f7ab94392d | bde6ed092b7b29703737e11c5a5ff90934af3d74 | /hackerrank/data-structures/array/sparse-arrays.py | e092d90c7f10b803449a5336206aab0c80288509 | [] | no_license | takecian/ProgrammingStudyLog | 2ab7ea601e0996b3fa502b81ec141bc3772442b6 | 94485d131c0cc9842f1f4799da2d861dbf09b12a | refs/heads/master | 2023-04-28T16:56:18.943574 | 2023-04-18T06:34:58 | 2023-04-18T06:34:58 | 128,525,713 | 4 | 0 | null | 2022-12-09T06:15:19 | 2018-04-07T12:21:29 | Python | UTF-8 | Python | false | false | 792 | py | # https://www.hackerrank.com/challenges/sparse-arrays/problem
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the matchingStrings function below.
def matchingStrings(strings, queries):
return [strings.count(query) for query in queries]
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
strings_count = int(input())
strings = []
for _ in range(strings_count):
strings_item = input()
strings.append(strings_item)
queries_count = int(input())
queries = []
for _ in range(queries_count):
queries_item = input()
queries.append(queries_item)
res = matchingStrings(strings, queries)
fptr.write('\n'.join(map(str, res)))
fptr.write('\n')
fptr.close()
| [
"takecian@gmail.com"
] | takecian@gmail.com |
5639c3dae3a4081d6ebc7dbe8458ef0dd3b62426 | b8297c40b7d9bd262782bbff5f1093d9218180df | /leetCode/06_Remove_Duplicates_from_Sorted_Array.py | 7068136fd4169c840643753ff698920966eb1a4b | [] | no_license | sweetherb100/python | fbb24682cd7098c0bcf85cfd4dc3cce62352e10b | b4ad4d7e1537cc630cfab7110097a2326e3347a3 | refs/heads/master | 2021-01-25T13:58:08.362808 | 2021-01-08T01:58:57 | 2021-01-08T01:58:57 | 88,394,776 | 0 | 1 | null | 2017-04-16T07:19:49 | 2017-04-16T06:08:40 | Python | UTF-8 | Python | false | false | 1,151 | py | '''
Given a sorted array nums, remove the duplicates in-place such that each element appear only once and return the new length.
Do not allocate extra space for another array, you must do this by modifying the input array 'in-place with O(1)' extra memory.
Example 1:
Given nums = [1,1,2],
Your function should return length = 2, with the first two elements of nums being 1 and 2 respectively.
It doesn't matter what you leave beyond the returned length.
Example 2:
Given nums = [0,0,1,1,1,2,2,3,3,4],
Your function should return length = 5, with the first five elements of nums being modified to 0, 1, 2, 3, and 4 respectively.
It doesn't matter what values are set beyond the returned length.
'''
class Solution(object):
#should change the list by reference and shouldn't use extra space (in-place)
def removeDuplicates(self, nums):
for i in range(len(nums)-1, 0, -1): #starting from the back until index 1
if nums[i] == nums[i-1]:
nums.pop(i) #pop from the back, will not affect for next iteration
return nums
solution = Solution()
print(solution.removeDuplicates([0,0,1,1,1,2,2,3,3,4]))
| [
"sweetherb0411@hotmail.com"
] | sweetherb0411@hotmail.com |
b34f356a30af80027d2c29078af6a0f66263e7db | dc8a337ea1d8a285577d33e5cfd4dbbe846ee1a0 | /src/main/scala/MinCostToConnectAllPoints.py | dea7dced11cb113cb91c76a28ba30e756b63194f | [] | no_license | joestalker1/leetcode | 8a5cdda17abd33c3eef859732f75d7bec77a9d0e | ae392ddbc7eb56cb814b9e9715043c98a89a6314 | refs/heads/master | 2023-04-13T22:09:54.407864 | 2023-04-09T19:22:54 | 2023-04-09T19:22:54 | 131,803,943 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,434 | py | class UnionFind:
def __init__(self, n):
self.par = [i for i in range(n)]
self.rank = [0] * n
def find(self, p):
if self.par[p] != p:
self.par[p] = self.find(self.par[p])
return self.par[p]
def union(self, n1, n2):
p1 = self.find(n1)
p2 = self.find(n2)
if p1 == p2:
return False
if self.rank[p1] > self.rank[p2]:
self.par[p2] = p1
elif self.rank[p2] > self.rank[p1]:
self.par[p1] = p2
else:
self.rank[p1] += 1
self.par[p2] = p1
return True
class Solution:
def minCostConnectPoints(self, points: List[List[int]]):
# assert self._minCostConnectPoints([[0,0],[2,2],[3,10],[5,2],[7,0]]) == 20, 'test1'
return self._minCostConnectPoints(points)
def _minCostConnectPoints(self, points: List[List[int]]) -> int:
n = len(points)
edge = []
for i in range(n):
for j in range(i + 1, n):
w = abs(points[i][0] - points[j][0]) + abs(points[i][1] - points[j][1])
edge.append((w, i, j))
edge.sort()
used_edges = 0
mst = 0
uf = UnionFind(n)
for w, s, e in edge:
if uf.union(s, e):
mst += w
used_edges += 1
if used_edges == n - 1:
break
return mst
| [
"denys@dasera.com"
] | denys@dasera.com |
a010ee33b56ae50fc7517776239307ad230c7ab7 | a48b42a2e677357162bffc882008dea44041ea2b | /convex_hull.py | 2f9d0c417a2b031ef9ca95d4e10f8361cbd74bcf | [] | no_license | jackylee1/ml-project | 254548479f973817d8ad69e9fb9b26877486787f | 6bad5ff5201befcce013b6ebb3189a457f679f0c | refs/heads/master | 2020-03-28T18:13:27.550881 | 2013-06-11T14:48:50 | 2013-06-11T14:48:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,494 | py | #!/usr/bin/python
def cross_product( u, v, w ):
return ( v[ 0 ] - u[ 0 ] ) * ( w[ 1 ] - u[ 1 ] ) - ( v[ 1 ] - u[ 1 ] ) * ( w[ 0 ] - u[ 0 ] )
def monotone_chain( xdata, ydata ):
if len( xdata ) != len( ydata ):
return
points = []
for i in range( len( xdata ) ): points.append( ( xdata[ i ], ydata[ i ] ) )
points = sorted( points )
lower = []
for p in points:
while len( lower ) >= 2 and cross_product( lower[ -2 ], lower[ -1 ], p ) <= 0:
lower.pop()
lower.append( p )
upper = []
for p in reversed( points ):
while len( upper ) >= 2 and cross_product( upper[ -2 ], upper[ -1 ], p ) <= 0:
upper.pop()
upper.append( p )
return lower + upper
if __name__ == '__main__':
import matplotlib.pyplot as plt
from random import randint
# xdata = [ randint( 1, 100 ) for i in xrange( 25 ) ]
# ydata = [ randint( 1, 100 ) for i in xrange( 25 ) ]
from divplot import edgeConv
import cv
import sys
edges = edgeConv( sys.argv[ 1 ] )
xdata, ydata = [], []
for j in xrange( edges.height ):
for i in xrange( edges.width ):
if edges[ j, i ] == 255:
xdata.append( i )
ydata.append( j )
hull = monotone_chain( xdata, ydata )
X = [ x[ 0 ] for x in hull ]
Y = [ y[ 1 ] for y in hull ]
from curve_fit import xWithY
b = -( sum( [ x ** 2 for x in X ] ) - sum( [ y ** 2 for y in Y ] ) ) / ( 2 * sum( xWithY( X, Y ) ) )
from math import sqrt
x = [ b + sqrt( b ** 2 + 1 ), b - sqrt( b ** 2 + 1 ) ]
y = [ 1, 1 ]
plt.plot( X, Y, 'k', x, y, 'o' )
plt.show() | [
"sr.vinay@gmail.com"
] | sr.vinay@gmail.com |
0f782922b34c17a3438dc8c3bef2ffb403a6b2d4 | b5dd8d1b798c94731a84c02d98aafb9147200a85 | /sentence_classification/SABaselineSYNTree/data/Dataloader.py | 8cf5123e195ed3ba7bddf8695827662dae8e3f59 | [] | no_license | zhangmeishan/DepSAWR | 1ae348dd04ec5e46bc5a75c8972b4bc4008528fe | 104f44fd962a42fdee9b1a9332997d35e8461ff4 | refs/heads/master | 2021-07-09T20:56:56.897774 | 2020-10-27T05:41:08 | 2020-10-27T05:41:08 | 206,974,879 | 15 | 3 | null | null | null | null | UTF-8 | Python | false | false | 3,903 | py | from collections import Counter
from data.Vocab import *
from data.SA import *
import numpy as np
import torch
def read_corpus(file_path):
data = []
with open(file_path, 'r') as infile:
for line in infile:
divides = line.strip().split('|||')
section_num = len(divides)
if section_num == 2:
worditems = divides[1].strip().split(' ')
words, heads, rels = [], [], []
for worditem in worditems:
id1 = worditem.rfind('_')
id2 = worditem.rfind('_', 0, id1 - 1)
words.append(worditem[:id2])
heads.append(int(worditem[id2 + 1:id1]))
rels.append(worditem[id1 + 1:])
tag = divides[0].strip()
cur_data = Instance(words, heads, rels, tag)
data.append(cur_data)
return data
def creatVocab(corpusFile, min_occur_count):
word_counter = Counter()
rel_counter = Counter()
tag_counter = Counter()
alldatas = read_corpus(corpusFile)
for inst in alldatas:
for curword, curhead, currel in zip(inst.forms, inst.heads, inst.rels):
word_counter[curword] += 1
rel_counter[currel] += 1
tag_counter[inst.tag] += 1
return SAVocab(word_counter, rel_counter, tag_counter, min_occur_count)
def insts_numberize(insts, vocab):
for inst in insts:
yield inst2id(inst, vocab)
def inst2id(inst, vocab):
inputs = []
for form, rel in zip(inst.forms, inst.rels):
wordid = vocab.word2id(form)
extwordid = vocab.extword2id(form)
relid = vocab.rel2id(rel)
inputs.append([wordid, extwordid, relid])
return inputs, vocab.tag2id(inst.tag), inst
def batch_slice(data, batch_size):
batch_num = int(np.ceil(len(data) / float(batch_size)))
for i in range(batch_num):
cur_batch_size = batch_size if i < batch_num - 1 else len(data) - batch_size * i
insts = [data[i * batch_size + b] for b in range(cur_batch_size)]
yield insts
def data_iter(data, batch_size, shuffle=True):
"""
randomly permute data, then sort by source length, and partition into batches
ensure that the length of insts in each batch
"""
batched_data = []
if shuffle: np.random.shuffle(data)
batched_data.extend(list(batch_slice(data, batch_size)))
if shuffle: np.random.shuffle(batched_data)
for batch in batched_data:
yield batch
def batch_data_variable(batch, vocab):
length = len(batch[0].forms)
batch_size = len(batch)
for b in range(1, batch_size):
if len(batch[b].forms) > length: length = len(batch[b].forms)
words = torch.zeros([batch_size, length], dtype=torch.int64, requires_grad=False)
extwords = torch.zeros([batch_size, length], dtype=torch.int64, requires_grad=False)
rels = torch.zeros([batch_size, length], dtype=torch.int64, requires_grad=False)
masks = torch.zeros([batch_size, length], dtype=torch.float, requires_grad=False)
tags = torch.zeros([batch_size], dtype=torch.int64, requires_grad=False)
lengths = []
heads = []
b = 0
for inputs, tagid, inst in insts_numberize(batch, vocab):
index = 0
length = len(inputs)
lengths.append(length)
heads.append(inst.heads)
tags[b] = tagid
for curword in inputs:
words[b, index] = curword[0]
extwords[b, index] = curword[1]
rels[b, index] = curword[2]
masks[b, index] = 1
index += 1
b += 1
return words, extwords, rels, heads, tags, lengths, masks
def batch_variable_inst(insts, tagids, vocab):
for inst, tagid in zip(insts, tagids):
pred_tag = vocab.id2tag(tagid)
yield Instance(inst.words, inst.heads, inst.rels, pred_tag), pred_tag == inst.tag
| [
"mason.zms@gmail.com"
] | mason.zms@gmail.com |
999fb90e65b6fc2b48dff1808aacdc58a61fa753 | 7a8f6f7a032e026007228b6223763ff046fc457a | /NeoBoot/ex_init.py | 4c0c7866dc4aff5ebed778402b2f0f291f4e3121 | [] | no_license | VARAVAN1/neoboot | 189fb8c5fa6bca1d52dd07e5143fb86d1cc2bced | b316149f7ade85491d3f719cdd3fec8eae4a07aa | refs/heads/master | 2021-08-08T22:37:01.083721 | 2017-11-11T13:44:35 | 2017-11-11T13:44:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 229 | py |
import sys, extract
if len(sys.argv) < 11:
pass
else:
extract.NEOBootMainEx(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5], sys.argv[6], sys.argv[7], sys.argv[8], sys.argv[9], sys.argv[10], sys.argv[11]) | [
"noreply@github.com"
] | VARAVAN1.noreply@github.com |
1e8824e78e6d976e7d8d77973b0743614bcb2f5b | 8faab10608767d340d7b689047b8b95a321043b7 | /backend/access/project.py | 5774c3747bd97e9c9fa27da4db9155ef6b7a64ec | [] | no_license | EFFY017/project_management | 2676bf59c853db6e725a4b104732f8317aa5c13a | b6e18ecc09eed7bf257f824f79c6c91107310315 | refs/heads/master | 2022-04-17T15:38:19.240199 | 2020-04-12T08:51:57 | 2020-04-12T08:51:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,215 | py | import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__) , '..'))
from flask import json, Blueprint
from logic import project, user, work_time
from util.access import *
import config
project_access = Blueprint('project_access', __name__)
@project_access.route('/mine', methods=['GET'])
def project_mine():
request_data = get_value_dict()
if not check_dict(request_data, ['uid', 'career']):
return json.dumps('PARAM ERROR')
if int(request_data['career']) == config.career_project_manager:
data = project.get_info(uid=request_data['uid'], include_reject=True)
else:
data = project.get_info_include_work_time(request_data['uid'])
if has_error(data):
return json.dumps('BACKEND ERROR')
else:
return json.dumps(data)
@project_access.route('/modify/show', methods=['GET'])
def project_modify_show():
request_data = get_value_dict()
if not check_dict(request_data, ['project_id']):
return json.dumps('PARAM ERROR')
data = project.get_info(project_id=request_data['project_id'], detail=True, include_reject=True)[0]
data_project_superior = user.get_project_superior()
if has_error(data):
return json.dumps('BACKEND ERROR')
else:
data['project_superior'] = data_project_superior
return json.dumps(data)
@project_access.route('/modify/save', methods=['POST'])
def project_modify_save():
request_data = get_value_dict()
if not check_dict(request_data, ['id', 'name', 'describe', 'scheduled_time', 'delivery_day', \
'project_superior_id', 'major_milestones', 'adopting_technology', \
'business_area', 'main_function']):
return json.dumps('PARAM ERROR')
data = project.modify(request_data['id'], request_data['name'], request_data['describe'], \
request_data['scheduled_time'], request_data['delivery_day'], request_data['project_superior_id'], \
request_data['major_milestones'], request_data['adopting_technology'], request_data['business_area'], request_data['main_function'])
if has_error(data):
return json.dumps('BACKEND ERROR')
else:
return json.dumps({'status': data})
@project_access.route('/create/show', methods=['GET'])
def project_create_show():
request_data = get_value_dict()
if not check_dict(request_data, ['uid']):
return json.dumps('PARAM ERROR')
data_project_superior = user.get_project_superior()
data_custom = user.get_custom()
if has_error(data_project_superior) or has_error(data_custom):
return json.dumps('BACKEND ERROR')
else:
data = {}
data['project_superior'] = data_project_superior
data['custom'] = data_custom
return json.dumps(data)
@project_access.route('/create/save', methods=['POST'])
def project_create_save():
request_data = get_value_dict()
if not check_dict(request_data, ['name', 'describe', 'development_type', 'scheduled_time', 'delivery_day', \
'project_superior_id', 'custom_id', 'major_milestones', 'adopting_technology', \
'business_area', 'main_function']):
return json.dumps('PARAM ERROR')
data = project.create(request_data['name'], request_data['describe'], request_data['development_type'], \
request_data['scheduled_time'], request_data['delivery_day'], request_data['project_superior_id'], request_data['custom_id'], \
request_data['major_milestones'], request_data['adopting_technology'], request_data['business_area'], request_data['main_function'])
if has_error(data):
return json.dumps('BACKEND ERROR')
else:
return json.dumps({'status': data})
@project_access.route('/work_time', methods=['GET'])
def project_work_time():
request_data = get_value_dict()
if not check_dict(request_data, ['uid', 'project_id']):
return json.dumps('PARAM ERROR')
data = work_time.get_info_by_uid_project_id(request_data['uid'], request_data['project_id'])
if has_error(data):
return json.dumps('BACKEND ERROR')
else:
return json.dumps(data)
| [
"scsse_hqh2016@126.com"
] | scsse_hqh2016@126.com |
a5dc9e10e3560336547353d465ffefd2b64758c2 | 418d5e85e8271949e4119c2e24b68e88a1c240f8 | /web/admin.py | 9fb8cd436e4341e0486e4e4c3cade436067d7348 | [] | no_license | codeartisanacademy/learnwith | 22b856ec3b00494bf706bb08e168db8c97abed62 | 5422c08a3706d9087fad367caa183708ddbae9c8 | refs/heads/master | 2023-01-11T21:45:47.329651 | 2020-11-14T06:29:37 | 2020-11-14T06:29:37 | 293,010,318 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 466 | py | from django_summernote.admin import SummernoteModelAdmin
from django.contrib import admin
from .models import Profile, Topic, Subject, LearningDate, SubjectSubscription
# Register your models here.
class SubjectAdmin(SummernoteModelAdmin):
summernote_fields = ('additional_information')
admin.site.register(Profile)
admin.site.register(Topic)
admin.site.register(Subject, SubjectAdmin)
admin.site.register(LearningDate)
admin.site.register(SubjectSubscription) | [
"wahyudi.coding@gmail.com"
] | wahyudi.coding@gmail.com |
bab84011c59fb43b74ce36be6c18a2045a9d1973 | 3aa507bf530d1b96cbc7b300f3c9ecc8f75a5df4 | /project_1/past_examples/Star_Wars_Manish_Singh/Star Wars.py | 4010551ce66c1977d876bb2c7068ac89cc0852a0 | [] | no_license | endrianasy/MIDS_python_showcase | aa4bc9a5bf388708322839852b758cbf63d93ebe | 69d155522823c8497eb8258291bd425e087bcca7 | refs/heads/master | 2023-03-25T23:45:33.861697 | 2021-02-28T04:49:14 | 2021-02-28T04:49:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,200 | py | #STAR WARS ADVENTURE GAME!
#Manish Singh
import os
import time
import numpy as np
import sys
class item:
def __init__(self, properties, name):
self.properties = properties
self.name = name
def used(self):
self.properties = None
class player:
def __init__(self, character):
if character == "Luke":
self.name = "Luke"
self.last_name = "Skywalker"
self.weapon = "Lightsaber"
self.weapon_damage = 8
self.partner = "R2-D2"
self.health = 10
elif character == "Han":
self.name = "Han"
self.last_name = "Solo"
self.weapon = "Blaster"
self.weapon_damage = 5
self.partner = "Chewbacca"
self.health = 13
self.fanny_pack = ["bread", "bread"]
def hit(self, dam):
self.health -= dam
def weapon_plus(self, val):
self.weapon_damage += val
def use(self, thing, jj):
if thing in self.fanny_pack:
print("You use your " + thing.name)
if thing.properties[0:3] == 'hit':
print(thing.properties[4:], "has been added to your health.")
self.hit(-int(thing.properties[3:]))
print("HEALTH:", self.health)
elif thing.properties[0:6] == 'weapon':
print(thing.properties[7:], "has been added to your weapon damage.")
self.weapon_damage += int(thing.properties[6:])
elif thing.properties[0:2] == "JJ":
if jj == True:
print("JAR JAR: " + thing.properties[4:])
elif jj == False:
print("You don't have a Gungan to use this on.")
else:
print("You cannot use an item that's not in your fanny pack. \n")
self.fanny_pack.remove(thing)
def fight(self, character_, JJ):
"""The fighting interface as the user fights characters."""
os.system('clear')
print("You run into " + character_.name+". You must fight him.")
if JJ == True:
print("JAR JAR: OH NO!!! RUN, " +seos.systelf.name.upper()+ "!!!!! AAAAAAHHHHHHHHH!\n")
print(character_.name.upper() + ": " + character_.catch_phrase)
while self.health>0 and character_.health>0:
print("\nHEALTH:", self.health)
print("ENEMY HEALTH:", character_.health)
rr = input("Press [ENTER] to attack.")
character_.hit(self.weapon_damage)
self.hit(np.random.randint(1,character_.damage+1))
print(character_.name + " hit you.")
print("You hit him back.")
if self.health <=0:
print("Your health is now 0. "+character_.name+" has killed you.")
time.sleep(6)
lose()
elif character_.health <=0:
print("\nYou won the fight.\nHEALTH:", self.health,"\n")
if JJ == True:
print("JAR JAR: How wude of him!\n\n\n")
time.sleep(5.7)
print()
return "Victory"
def trade(self, character_, thing):
if "bread" in self.fanny_pack or "water" in self.fanny_pack:
print(character_.name, " would like to trade a " + thing.name + " for bread.")
print("You currently have", self.fanny_pack.count('bread'), " breads in your fanny pack.")
choice = input("Reply 'yes' or 'no'. ")
if choice.lower() == "yes":
self.fanny_pack.append(thing)
self.fanny_pack.remove("bread")
print(thing.name.title(),"has been added to your fanny pack. You may use this on the final planet, whatever that is. \n")
time.sleep(.5)
else:
pass
def fanny(self):
f = ", ".join([i.name for i in self.fanny_pack if type(i) != str])
print("Your fanny pack contains: " + f +".")
class character:
def __init__(self, nam, weapo, damag, catch_phras, heal):
self.name = nam
self.weapon = weapo
self.damage = damag
self.catch_phrase = catch_phras
self.health = heal
def hit(self, dam):
self.health -= dam
class planet:
def __init__(self, nam, probab, ene, mess, neighb):
self.name = nam
self.message =mess
self.neighbors = neighb
self.probability = probab
self.characters = ene
def disp(self):
"""An easy way to display a planet's properties"""
print("You arrive on "+self.name+". The planet is"+self.message)
class room:
def __init__(self, objects, enemies, message, neighboring_rooms):
self.objects = objects
self.enemies = enemies
self.message = message
self.neighbors = neighboring_rooms
def inspect(self):
if self.objects != []:
print("This room contains: " + ", ".join([i.name for i in self.objects]) + ".")
else:
print("This room is empty.")
def take(self, thing, player):
if thing in self.objects:
self.objects.remove(thing)
player.fanny_pack.append(thing)
print("You added " + thing.name.lower() + " to your fanny pack.")
else:
while True:
thing = input("That is not available in this room. Please try again or type 'cancel' to do something else.\n")
if thing in self.objects:
self.objects.remove(thing)
player.fanny_pack.append(thing)
print("You added " + thing.name.lower() + "to your fanny pack.")
elif thing.lower() == "cancel":
break
else:
print()
continue
def lose():
os.system('clear')
print("GAME OVER.")
print("You have lost, and Darth Vader has won. The Dark side now rules everything. People are suffering due to your shortcomings.")
rr = input("Press [ENTER] to return to the main menu.")
menu()
def initialize(): #starts the game and establishes the player's character.
os.system('clear')
print("A long time ago in a galaxy far, far away...")
time.sleep(3)
os.system('clear')
print("STAR WARS! \nPrincess Leia has been captured by the nefarious Darth Vader! You have to stop him! The fate of the galaxy rests in your hands! Go forth and save her so that she can help the rebels defeat the Empire.")
print("Select your character.")
char = input("Would you like to play as Han Solo or Luke Skywalker? Type your choice and press [ENTER].\n")
if char.lower() not in ["luke", "han", "luke skywalker", "han solo"]:
while char.lower() not in ["luke", "han", "luke skywalker", "han solo"]:
char = input("Han Solo or Luke Skywalker? Type your choice and press [ENTER].\n")
print("Ok, " + char.title() +", enough chit-chat. Time to save the Galaxy. Be warned, it is very easy to fail your mission. Good thing this is a computer game and you can try as many times as you want. \nMay the force be with you.")
rr = input("\n\nPress [ENTER] to continue...")
os.system('clear')
return char.title().split()[0] #in case 1st and last name are used
else:
print("Ok, " + char +", enough chit-chat. Time to save the Galaxy. Be warned, it is very easy to fail your mission. Good thing this is a computer game and you can try as many times as you want. \nMay the force be with you.")
rr = input("\n\nPress [ENTER] to continue...")
os.system('clear')
return char.title().split()[0]
def menu(): #the main menu for the game
os.system('clear')
print("---------------STAR WARS---------------")
print("The anachronous text adventure, by Manish Singh.")
print()
wait = input("Press [ENTER] to play.")
os.system('clear')
game()
def choose_one(data):
"""Allows for an easy way to randomly decide an event."""
return data[np.random.randint(0,len(data))]
def game():
"""The game itself, with all levels as nested functions."""
#Where I define all the game's enemies
Darth_Vader = character("Darth Vader", "lightsaber", 9, "Who's your daddy?", 26)
Wampa = character("Wampa", "claws and teeth", 4, "ROAR!", 1)
droid = character("Battle Droid", "blaster rifle", 4, "Roger roger.", 5)
stormtrooper = character("Stormtrooper", "blaster rifle", 4, "I've been ordered to kill you by Lord Vader. Comply and die quickly.", 10)
Tusken = character("Sand Person", "blaster pistol", 4, "ASSDOFIANS (translation: FIGHT ME AND LOSE!)", 6)
Wookie = character("Wookie", "he's friendly",3,None,None)
Ewok = character("Ewok", None,None,None,None)
#Place where I define all planets in the game
Hoth = planet("Hoth", 1000, Wampa, " cold and snowy", ["Alderaan","Mustafar","Kashyyyk"])
Alderaan = planet("Alderaan", 3, None, " peaceful. But suddenly, you see a green laser approaching in the sky. The planet blows up, courtesy of Darth Vader.", [])
Mustafar = planet("the volcanic planet of Mustafar",3, None, " comletely consumed by lava. Your ship sinks into the magma.",[])
Endor = planet("Endor",3,Ewok , " the home of the Ewoks.",["Kashyyyk"])
Courasant = planet("Courasant", 1000, stormtrooper, " the former capital of the Republic.", ["Tatooine", "Hyperspace Teleporter", "Naboo"])
Kashyyyk = planet("Kashyyyk", 3, Wookie, " the home of the Wookies.", ["Naboo","Hoth", "Endor"])
Naboo = planet("Naboo", 60, droid, " birthplace of Padme Amidala and home of the Gungans.", ["Kashyyyk","Courasant","Hoth","Mustafar","Bespin"])
Bespin = planet("Bespin", 70, stormtrooper, " the home of Lando Calrissian",['Naboo'])
Tatooine = planet("Tatooine",100, Tusken," the birth place of Darth Vader! *gasp*", ["Courasant","Hyperspace Teleporter","Dagobah","Jakku"])
Dagobah = planet("the Dagobah System",3,None, " resting place of the great Jedi Master Yoda",["Tatooine"])
Jakku = planet("Jakku",53,stormtrooper," the home of Rey (remember, this game is anachronistic. She's technically not alive yet but just go with it.)",['Tatooine','Death Star'])
hyperspace_variable = None
Hyperspace_Teleporter = planet("The Hyperspace Teleporter!", 3,None, "... well, it's not a planet. When you press [ENTER], you will be randomly be teleported to a planet. Who knows, you may end up where you need to go! You also may teleport right back here or even teleport to your death. Good luck.", [hyperspace_variable])
Death_Star = planet("the Death Star.", 100000000, Darth_Vader, "... THE PLACE YOU WERE SUPPOSED TO GO! Congratulations!", ['Jakku'])
planets = {"Hoth":Hoth, "Alderaan":Alderaan, "Mustafar":Mustafar, "Endor":Endor, "Courasant":Courasant, "Kashyyyk":Kashyyyk, "Naboo":Naboo, "Bespin":Bespin, "Tatooine":Tatooine, "Dagobah":Dagobah, "Jakku":Jakku, "Hyperspace Teleporter":Hyperspace_Teleporter, "Death Star":Death_Star}
#Where I define all items that a player will find.
health_pack = item("hit 5", "health pack")
gungan_juice = item("JJ Mmmmmm! Tanks for da juice! Me like!", "Gungan Juice")
gungan_repellant = item("JJ AAAAAAAH! WHY DID YOU DO DAT??? IT STING!", "Gungan Repellant")
crystal = item("weapon 3", "crystal")
bomb = item("hit -2", "bomb")
blank_item = item("","")
items = {'health pack' : health_pack, "gungan juice" : gungan_juice, "gungan repellant" : gungan_repellant, 'crystal' : crystal, 'bomb' : bomb}
def string_to_class(text, thing_dict):
result = None
for i in thing_dict:
if thing_dict[i].name.lower() == text.lower():
result = items[text.lower()]
else:
continue
return result
#Where I define all rooms in the Death Star.
room_1 = room([health_pack, gungan_juice], stormtrooper, None, ["room 2"])
room_2 = room([gungan_repellant], stormtrooper, None, ["room 1","room 3", "room 4"])
room_3 = room([], droid, None, ["room 2"])
room_4 = room([health_pack, crystal, bomb], stormtrooper, "Hint: room 5 has the princess.", ["room 5"])
room_5 = room([], Darth_Vader, "You have made it to the final room.", [None])
rooms = {"room 1":room_1, "room 2":room_2, "room 3":room_3, "room 4":room_4, "room 5":room_5}
#Inital setup
char = player(initialize())
name = char.name #will make it much easier for the rest of the code
#Auxiliary Functions
def travel(planet_name, planet_list, JJ):
"""Allows a user to travel from planet to planet."""
if JJ == True:
print("JAR JAR: WHERE ARE WE?? OH NOOOOOOOO!")
if planet_name.name == "The Hyperspace Teleporter!":
print("Welcome to the Hyperspace Teleporter. You will be teleported to a random planet. Good luck!")
k = choose_one(list(planets.keys()))
return planets[k]
else:
planet_name.disp()
print("You may travel to the following planets: " + ", ".join(planet_name.neighbors))
choice = "dummy"
while choice.title() not in planet_name.neighbors:
choice = input("\nSelect a planet from the above, and type it below.\n")
return planet_list[choice.title()]
#Gameplay. Functions within this function make the most sense.
def part1(char):
"""This part introduces the user to the game, and presents a chance that the user dies from the start."""
print("HEALTH: " + str(char.health))
print("You are flying in your starfighter with " + char.partner + ". You have no idea where Darth Vader and the Princess are. You don't know where to go, and time is running out.")
print("You start to compose a text message to Rebel Base, but you do not see an asteroid that is hurdling right toward you. " + char.partner +" tries warning you, but it's too late. You began spiraling downwards. Don't text and fly.")
print("You have a choice here. Option 1 is to enter the escape pod, but you may not make it to a planet. Option 2 is to steer the ship near some planet, but the ship may explode or you may die in the crash. The choice is yours. \n Type [1] to enter escape pod or [2] to stay in ship.")
c = input()
if c=='1':
occurance = np.random.randint(48,100) #People should choose this one for best odds of survival.
if occurance >= 50: #50% odds of survival
os.system('clear')
part2(char, c.lower())
else:
print("Your escape pod never makes it to a planet.")
rr = input("\nPress [ENTER] to continue...")
lose()
elif c == "2":
occurance = np.random.randint(0,100) #50% chance of survival
if occurance >= 50:
os.system('clear')
part2(char, c.lower())
else:
print("Your ship crashes and you die.", char.partner,"mourns your loss.")
rr = input("\nPress [ENTER] to continue...")
lose()
def part2(char, choice):
"""Spawns the player at a random planet, displaying necessary text."""
location = choose_one([Hoth, Courasant, Kashyyyk, Endor])
hitting_counter = np.random.randint(1,3) #will have to pass this value to part 3 of the game.
char.hit(hitting_counter) #crash landing causes damage
print("You chose option " + choice + ". Good decision!")
location.disp()
time.sleep(1)
print("Your crash landing injures you.")
print("HEALTH: " + str(char.health))
print("You should be happy to be alive.")
part3(location, hitting_counter, char)
def part3(location,hitting_counter,char):
"""The most complex part of the game. The user must travel from planet to planet until the Death Star is found. The player is not told this."""
char.health-= hitting_counter
print("As a survival bonus, you have a choice. You can either add 2 to your current weapon damage, or a random number from 1 to 4 to your health. Choose wisely.")
bonus = input("[1] for +2 weapon damage. \n[2] for +1-4 health.\n")
if bonus == '1':
char.weapon_plus(2)
print("Your weapon now does", char.weapon_damage, "damage.")
elif bonus == '2':
char.hit(-np.random.randint(1,5))
print("HEALTH:", char.health)
time.sleep(1.5)
print("\n You and "+char.partner+" find a brand new starfighter on " + location.name)
print("You must now locate the planet where Darth Vader is holding the Princess. Good luck.")
current_location = location
history = []
JJ = False #boolean integer to represent weather Jar Jar Binks is accompanying the player on the journey. Jar Jar begins to follow the player at Naboo.
time.sleep(3)
while current_location != "Death Star":
if current_location in [Mustafar, Alderaan]:
current_location.disp()
time.sleep(5)
lose()
elif current_location in [Kashyyyk, Endor]:
print("You stumble upon a friendly " + current_location.characters.name +".")
trade_item = np.random.choice([bomb, health_pack, crystal, health_pack, health_pack, bomb, bomb])
char.trade(current_location.characters, trade_item)
current_location = travel(current_location, planets, JJ)
if current_location.name == "Naboo":
if "Naboo" not in history:
print("Jar Jar Binks will now accompany you on your journey. Sorry, you can't get rid of him.")
print("JAR JAR: Meesa Jar Jar Binks! Meesa your humble servant! \n\n")
JJ = True
else:
print()
history.append(current_location.name)
if current_location in [Jakku, Tatooine,Courasant,Naboo,Bespin,Hoth]:
if np.random.randint(1,current_location.probability) > 50:
char.fight(current_location.characters, JJ)
else:
continue
if current_location is Death_Star:
break
part4(char, char.health, history, JJ)
def part4(char, health, hist, J):
"""The player naviages the Death Star to locate Vader and the Princess."""
#On to the rest of the game...
os.system('clear')
history = hist
JJ = J
if JJ is True:
print("JAR JAR: MEESA SCARED")
print("JAR JAR: WAT IS DIS METAL TING WE JUST GOT INTO?!?! WEESA GONNA DIE!!")
time.sleep(2)
os.system('clear')
print("Welcome to the Death Star. You will travel from room to room until you find Darth Vader and the Princess.")
print("When you enter a room, you may type 'inspect' to see what items you may add to your fanny pack and what rooms you can enter from your current one, and then type 'take <item>' to add it to your fanny pack.")
print("Type 'fanny' to see the items in your fanny pack. When you want to use an item, simply type 'use <item>' and you will use it.")
print("To travel to a different room, type 'go <room>'.")
rr = input("\nPress [ENTER] to continue.")
print("\n\n")
print("You are now in the landing hangar.")
current_room = room_1
while True:
choice = input("What would you like to do? ")
if choice.lower() == 'inspect':
current_room.inspect()
print("You may travel to ", ", ".join(current_room.neighbors), "\n")
continue
elif choice[0:4].lower() == 'take':
thing = string_to_class(choice[5:], items)
if thing != None:
current_room.take(thing, char)
print()
else:
print("Error in taking that object. \n")
continue
elif choice[0:3].lower() == 'use':
thing = string_to_class(choice[4:], items)
if thing != None:
if thing in char.fanny_pack:
char.use(thing, JJ)
else:
print("You must take an item before you use it.")
else:
print("Sorry, item does not exist. \n")
continue
elif choice[0:2].lower() == 'go':
if choice[3:] in current_room.neighbors:
current_room = rooms[choice[3:].lower()]
print("\nYou are now in " + choice[3:] + ".")
else:
print("Sorry, can't travel there.")
elif choice[0:5].lower() == "fanny":
char.fanny()
else:
print("That is not a valid command.")
if current_room == room_4:
print("You hear mechanic breathing. You must be getting close.")
if current_room == room_5:
break
os.system('clear')
print("Loading final battle... ")
time.sleep(4) #for aesthetic purposes
os.system('clear')
print("You have travelled to all these places: " + ", ".join(history) + ". You have escaped death by the hair on this journey. This is what it all comes down to.")
rr = input("Press [ENTER] to enter the final battle.")
char.fight(Darth_Vader, JJ)
print("YOU WIN!!! CONGRATULATIONS!!!")
time.sleep(3)
part1(char)
credits()
def credits():
"""Credits. Just for fun."""
os.system('clear')
cred = ["-------STAR WARS-------", "The anachronous adventure.", "", "", "Produced by Manish Singh", "Produced for INFO W18", "Teacher: Chris Llop", "Summer, 2016"]
for credit in cred:
print(credit, "\n")
time.sleep(.5)
time.sleep(3)
menu()
#Now, to play the game:
menu() | [
"gunnarkleemann@Gunnars-MacBook-Pro.local"
] | gunnarkleemann@Gunnars-MacBook-Pro.local |
9f2b55e49ea1a8e02acd278b0316e91f06980ded | b0e7b52598587558a37338dc35c76cabae40ea6e | /postscoring_eda.py | d4a1db6ab57228956f446086752b3ddc98a0139a | [] | no_license | sanjeevgadre/Kaggle-RiiiD | c82030ad376799c6e467429fd5ae6dfc32650a53 | 5e97c847440c20f8c1d5dbbc1dc4598e46257968 | refs/heads/master | 2023-02-09T08:44:35.914968 | 2021-01-01T08:55:02 | 2021-01-01T08:55:02 | 312,560,313 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,478 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 27 08:40:43 2020
@author: sanjeev
"""
#%% Libraries
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
#%% Helper variables
DATAPATH = './data/'
#%% Userscores from mean run
userscores = np.genfromtxt(DATAPATH + 'userscores.csv', delimiter = ',')
len(userscores[userscores[:, 8] == 0])
# 88 users remain unscored (unprocessed) after 113M transactions i.e 0.02%
mean = np.mean(userscores[:, 1:8], axis = 0)
# Mean userscores (after 3rd reward policy) - [0.02951912, 0.05758864, 0.04761834, 0.04515786, 0.2037545, 0.04537496, 0.03697]
np.median(userscores[:, 1:8], axis = 0)
# [0.02546064, 0.0573015 , 0.03846656, 0.03403131, 0.19177877, 0.03781714, 0.02767306]
sigma = np.std(userscores[:, 1:8], axis = 0)
# Std dev. (after 3rd reward policy )- [0.20179399, 0.21116786, 0.25834561, 0.25590359, 0.58923841, 0.21340565, 0.22842549]
# The dispersion in userscores has come down dramatically after the reward policy change; ergo the change was appropriate
print("Coeff. of Variance for userscores -->",
np.std(userscores[:, 1:8], axis = 0) / np.mean(userscores[:, 1:8], axis = 0))
# 6.83604271, 3.6668316 , 5.42533904, 5.66686651, 2.89190371, 4.70315931, 6.17867076]
two_sigma_range = np.concatenate(((mean - 2*sigma).reshape(-1, 1),
(mean + 2*sigma).reshape(-1, 1)), axis = 1)
fig, axs = plt.subplots(nrows = 4, ncols = 2, figsize = (10, 15))
COL = 1
for r in range(4):
for c in range(2):
if COL < 8:
axs[r, c].hist(userscores[:, COL], bins = 100, log = True,
range = (two_sigma_range[COL - 1, 0], two_sigma_range[COL - 1, 1]))
COL += 1
plt.savefig('./foo.jpg')
plt.show()
for i in range(7):
foo = np.logical_and(userscores[:, i + 1] > two_sigma_range[i, 0],
userscores[:, i + 1] < two_sigma_range[i, 1]).sum() / len(userscores)
print('Part %i --> %.4f of total records lie within 2 sigma range' % (i+1, foo))
# Part 1 --> 0.9885 of total records lie within 2 sigma range
# Part 2 --> 0.9809 of total records lie within 2 sigma range
# Part 3 --> 0.9858 of total records lie within 2 sigma range
# Part 4 --> 0.9868 of total records lie within 2 sigma range
# Part 5 --> 0.9789 of total records lie within 2 sigma range
# Part 6 --> 0.9849 of total records lie within 2 sigma range
# Part 7 --> 0.9874 of total records lie within 2 sigma range
# We can safely clip the part userscores to within their respective 2 sigma range
corr_coef_ = np.corrcoef(userscores[userscores[:, 8] == 1, 1:8], rowvar = False)
# [1. , 0.16220016, 0.22865256, 0.25533779, 0.16945078, 0.16839158, 0.14140076],
# [0.16220016, 1. , 0.26734779, 0.24921227, 0.34266616, 0.18226742, 0.1666369 ],
# [0.22865256, 0.26734779, 1. , 0.52356998, 0.27068343, 0.29656262, 0.32322765],
# [0.25533779, 0.24921227, 0.52356998, 1. , 0.26399204, 0.32011388, 0.36182182],
# [0.16945078, 0.34266616, 0.27068343, 0.26399204, 1. , 0.34576019, 0.28384395],
# [0.16839158, 0.18226742, 0.29656262, 0.32011388, 0.34576019, 1. , 0.39792441],
# [0.14140076, 0.1666369 , 0.32322765, 0.36182182, 0.28384395, 0.39792441, 1. ]
# There is no meaningful pairwise correlation amongst the part scores
pca_ = PCA().fit(userscores[userscores[:, 8] == 1, 1:8])
print("Variance explained by principal components --> \n", pca_.explained_variance_ratio_)
# [0.57991909, 0.15799407, 0.06727159, 0.05358795, 0.05127978, 0.04694441, 0.04300311]
# The combined variance explained by the two principal components point to likely interaction between
# the scores when building a parametric linear model.
#%% Questions
ques = np.genfromtxt(DATAPATH + 'ques.csv', delimiter = ',')
len(ques[ques[:, 2] == 0])
# 4 questions remain unasked after 145M transactions
np.mean(ques[ques[:, 2] != 0, 3])
# For the questions attempted the mean probability of answering is 0.71
# Correct Attempt Prob
fig, axs = plt.subplots(nrows = 4, ncols = 2, figsize = (10, 15))
COL = 1
for r in range(4):
for c in range(2):
if COL < 8:
axs[r, c].hist(ques[ques[:, 1] == COL, 3], bins = 100)
COL += 1
plt.savefig('./foo.jpg')
plt.show()
#%% Lectures
lecs = np.genfromtxt(DATAPATH + 'lecs.csv', delimiter = ',')
len(lecs[lecs[:, 2] == 0])
# 3 lectures remain without a single view after 145M transactions
| [
"sanjeev_gadre@hotmail.com"
] | sanjeev_gadre@hotmail.com |
f1232d4f83383a9e34a6614c7c7bf33ef983e353 | b24d392755dabd84fb773c90a3bf1e03d0cd11c5 | /Microsoft/SubarraySumEqualsK.py | 35bf3f4d1cdfc0d96cf9e71411f63a675a7eff14 | [] | no_license | Shivani161992/Leetcode_Practise | 5b884268707c19cc1a3012c0fb2321c58d5147c0 | dd6f9a9ac3365562c606947c082f043e5100df05 | refs/heads/master | 2023-08-16T06:34:22.010913 | 2021-09-16T02:01:58 | 2021-09-16T02:01:58 | 279,668,022 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 880 | py | nums = [1, -1, 0]
k = 0
from typing import List
class Solution:
def subarraySum(self, nums: List[int], k: int) -> int:
if len(nums) >0:
totabSubarray=0
for i in range(0, len(nums)):
subArr= 0
for j in range(i, len(nums)):
subArr= subArr + nums[j]
if subArr == k:
totabSubarray= totabSubarray + 1
return totabSubarray
#hashMap solution
count = 0
sumarr = 0
dic = {0: 1}
for n in nums:
sumarr = sumarr + n
if sumarr - k in dic:
count = count + dic[sumarr - k]
if sumarr in dic:
dic[sumarr] = dic[sumarr] + 1
else:
dic[sumarr] = +1
return count
obj=Solution()
print(obj.subarraySum(nums, k))
| [
"shivani@Shivanis-MacBook-Pro.local"
] | shivani@Shivanis-MacBook-Pro.local |
140ce789863a4667d8df69e670d9b4130ad34c0c | c50fd00072603c18c0e17eba1b087e2e5e1b1816 | /trainer/trainer.py | 152eab0f39d1eed74acce37d7ab52a1cee22c87b | [
"MIT"
] | permissive | wzhiyuyu/Wave-U-Net-for-SpeechEnhancement | de934dc83572729a573cb2225e4f98b2cf2f6f45 | 308a2be9d91d931f09e873b19c3d67315b9b5962 | refs/heads/master | 2022-09-26T06:08:34.870718 | 2020-05-29T07:26:19 | 2020-05-29T07:26:19 | 266,736,246 | 8 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,962 | py | import librosa
import librosa.display
import matplotlib.pyplot as plt
import numpy as np
import torch
from trainer.base_trainer import BaseTrainer
from util.utils import compute_STOI, compute_PESQ
plt.switch_backend('agg')
class Trainer(BaseTrainer):
def __init__(
self,
config,
resume: bool,
model,
loss_function,
optimizer,
train_dataloader,
validation_dataloader,
):
super(Trainer, self).__init__(config, resume, model, loss_function, optimizer)
self.train_data_loader = train_dataloader
self.validation_data_loader = validation_dataloader
def _train_epoch(self, epoch):
loss_total = 0.0
for i, (mixture, clean, name) in enumerate(self.train_data_loader):
mixture = mixture.to(self.device)
clean = clean.to(self.device)
self.optimizer.zero_grad()
enhanced = self.model(mixture)
loss = self.loss_function(clean, enhanced)
loss.backward()
self.optimizer.step()
loss_total += loss.item()
dl_len = len(self.train_data_loader)
self.writer.add_scalar(f"Train/Loss", loss_total / dl_len, epoch)
@torch.no_grad()
def _validation_epoch(self, epoch):
visualize_audio_limit = self.validation_custom_config["visualize_audio_limit"]
visualize_waveform_limit = self.validation_custom_config["visualize_waveform_limit"]
visualize_spectrogram_limit = self.validation_custom_config["visualize_spectrogram_limit"]
sample_length = self.validation_custom_config["sample_length"]
stoi_c_n = [] # clean and noisy
stoi_c_d = [] # clean and denoisy
pesq_c_n = []
pesq_c_d = []
for i, (mixture, clean, name) in enumerate(self.validation_data_loader):
assert len(name) == 1, "Only support batch size is 1 in enhancement stage."
name = name[0]
# [1, 1, T]
mixture = mixture.to(self.device)
clean = clean.to(self.device)
# Input of model should fixed length
mixture_chunks = torch.split(mixture, sample_length, dim=2)
if mixture_chunks[-1].shape[-1] != sample_length:
mixture_chunks = mixture_chunks[:-1]
enhanced_chunks = []
for chunk in mixture_chunks:
enhanced_chunks.append(self.model(chunk).detach().cpu())
enhanced = torch.cat(enhanced_chunks, dim=2)
# Back to numpy array
mixture = mixture.cpu().numpy().reshape(-1)
enhanced = enhanced.numpy().reshape(-1)
clean = clean.cpu().numpy().reshape(-1)
min_len = min(len(mixture), len(clean), len(enhanced))
mixture = mixture[:min_len]
clean = clean[:min_len]
enhanced = enhanced[:min_len]
# Visualize audio
if i <= visualize_audio_limit:
self.writer.add_audio(f"Speech/{name}_Noisy", mixture, epoch, sample_rate=16000)
self.writer.add_audio(f"Speech/{name}_Enhanced", enhanced, epoch, sample_rate=16000)
self.writer.add_audio(f"Speech/{name}_Clean", clean, epoch, sample_rate=16000)
# Visualize waveform
if i <= visualize_waveform_limit:
fig, ax = plt.subplots(3, 1)
for j, y in enumerate([mixture, enhanced, clean]):
ax[j].set_title("mean: {:.3f}, std: {:.3f}, max: {:.3f}, min: {:.3f}".format(
np.mean(y),
np.std(y),
np.max(y),
np.min(y)
))
librosa.display.waveplot(y, sr=16000, ax=ax[j])
plt.tight_layout()
self.writer.add_figure(f"Waveform/{name}", fig, epoch)
# Visualize spectrogram
noisy_mag, _ = librosa.magphase(librosa.stft(mixture, n_fft=320, hop_length=160, win_length=320))
enhanced_mag, _ = librosa.magphase(librosa.stft(enhanced, n_fft=320, hop_length=160, win_length=320))
clean_mag, _ = librosa.magphase(librosa.stft(clean, n_fft=320, hop_length=160, win_length=320))
if i <= visualize_spectrogram_limit:
fig, axes = plt.subplots(3, 1, figsize=(6, 6))
for k, mag in enumerate([
noisy_mag,
enhanced_mag,
clean_mag,
]):
axes[k].set_title(f"mean: {np.mean(mag):.3f}, "
f"std: {np.std(mag):.3f}, "
f"max: {np.max(mag):.3f}, "
f"min: {np.min(mag):.3f}")
librosa.display.specshow(librosa.amplitude_to_db(mag), cmap="magma", y_axis="linear", ax=axes[k], sr=16000)
plt.tight_layout()
self.writer.add_figure(f"Spectrogram/{name}", fig, epoch)
# Metric
stoi_c_n.append(compute_STOI(clean, mixture, sr=16000))
stoi_c_d.append(compute_STOI(clean, enhanced, sr=16000))
pesq_c_n.append(compute_PESQ(clean, mixture, sr=16000))
pesq_c_d.append(compute_PESQ(clean, enhanced, sr=16000))
get_metrics_ave = lambda metrics: np.sum(metrics) / len(metrics)
self.writer.add_scalars(f"评价指标均值/STOI", {
"clean 与 noisy": get_metrics_ave(stoi_c_n),
"clean 与 denoisy": get_metrics_ave(stoi_c_d)
}, epoch)
self.writer.add_scalars(f"评价指标均值/PESQ", {
"clean 与 noisy": get_metrics_ave(pesq_c_n),
"clean 与 denoisy": get_metrics_ave(pesq_c_d)
}, epoch)
score = (get_metrics_ave(stoi_c_d) + self._transform_pesq_range(get_metrics_ave(pesq_c_d))) / 2
return score
| [
"wzhiyuyu@gmail.com"
] | wzhiyuyu@gmail.com |
96604c74722e0f980e80c30363a5108a1675cce7 | 0cb4ee4094ab54000eadb1788527db5d40f12f2b | /camp_materials/008_functions.py | c4ce9684932e386a74884802a66050977c858f80 | [] | no_license | wirelessjeano/learnpython | a821fbbf0bd0c191d8b0575e0569fbeb18d3419d | 522c072ada6ac8664dbdf462ac396c364e39b10a | refs/heads/master | 2022-01-22T06:51:29.651576 | 2019-02-23T19:22:16 | 2019-02-23T19:52:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,623 | py | # FUNCTIONS
'''
Provide example of complicated program ppl use everyday.
Explain that it's made up of multiple modules of code, breaking a big problem into smaller problems,
each with its own purpose to fulfill a certain task. And once you've made a module for a specific task,
it's easy to use it again and again in different places.
In Python, these modules that complete subtasks are called functions.
'''
def greet_you():
your_name=input("What's your name? ")
print("Hello " + your_name)
print("My name is Butter")
'''
greet_you()
# Functions need to be called
# Explain what happens when a function is called
def greet_you_again(name): # These input parameters are not actual variables, they are placeholders
print("Hello "+ name)
greet_you_again(input("What's your name? "))
# Parameters vs Arguments
def combine_words(word1, word2):
return word1 + word2
print(combine_words('rain','bow'))
def combine_numbers(num1, num2):
return num1 + num2
print(combine_numbers(1,2))
# Wait, what's the difference between the last two functions?
'''
# Solve for x in ax+b=c
def solve_for_x(a,b,c):
return (c-b)/a
# 2x+3=10
print("If 2x+3=10, then x=", solve_for_x(2, 3, 10))
# Oh what if it's 3x-5=12?
x= solve_for_x(3,-5,12) # function called is replaced by what's returned by it
print("If 3x-5=12, then x=", x)
# Note that functions without a return statement are, by default, returning None (the NoneType data)
print(greet_you())
print(type(greet_you()))
# It's important to write comment for functions
def get_years_alive(tree_rings):
'''
tree_rings, integer
Calculates how many years a tree has been alive for.
Returns an integer, representing the number of years.
'''
return tree_rings*1.2
# Talk about SCOPES of parameters and variables
'''
"You can access a variable inside a function without defining it inside a function. As long
as a variable with that name exists in the main program scope....
You cannot assign a value to a variable inside a function without defining it inside
function first."
---- quoted from the book Get Programming: Learn to Code with Python from Manning
'''
# What happens here?
L = 4
def printL():
print(L)
# L = 6
printL()
# What happens here?
def printQ():
q = 12
print(q)
#print(q)
# What about here?
def set_h():
h = 1
h = 0
set_h()
print(h)
# What about here?
h = 0
def set_h():
h = 1
set_h()
print(h)
# And what's happeing here?
x = 2
def change(variable):
variable = "It's been changed"
change(x)
print(x)
# Functions have access to variables defined in the main program
# The main program can not access variables defined in the function
| [
"omkar.konaraddi@gmail.com"
] | omkar.konaraddi@gmail.com |
9a1f84ef383d0d3a742276e3b41b88ab05eb18a3 | c83d9f38678d76018a5930fb31e938157e637460 | /Services/Userservice.py | d38eef132192e5ef8302eee3d162015e104ad259 | [] | no_license | PaiEllen/Weibo | 7a155d39f00ff7b86249afb9ae29292a836f07a5 | 6c401affafa5c7f299598983968cb33d8b6d9271 | refs/heads/master | 2020-02-26T15:41:00.744011 | 2016-10-12T03:30:05 | 2016-10-12T03:30:05 | 70,659,091 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 783 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import abc
class Userservice:
def check_login(self):
pass
def register(self, username, password, email):
pass
def get_user_info(self, nid):
pass
def get_fans_follow_count(self, userid):
pass
class IUserRepertory(metaclass=abc):
# @abc.abstractmethod
# def check_login(self):
# pass
@abc.abstractmethod
def register(self,username,password,email):
pass
@abc.abstractmethod
def get_user_info(self,nid):
pass
@abc.abstractmethod
def existence(self,username):
pass
@abc.abstractmethod
def get_fans_follow_count(self,userid):
pass
@abc.abstractmethod
def search_user(self,keyword):
pass | [
"ellen_pai@163.com"
] | ellen_pai@163.com |
90fa9d60a31619b8f6dcc62b48a721e9613e2b11 | 596e92d0d484b6e7eee6d322e72e52748fdeaa5d | /test/test_nba_odds_betting_market.py | f8b473cf4cf0f70b059897d47f0677fe275d8489 | [] | no_license | scottypate/sportsdata | f5f61ddc7eb482883f93737c6ce73dd814ed4336 | a07955ab50bf4fff1ce114ed9895095ff770c473 | refs/heads/main | 2023-08-18T16:51:56.452678 | 2021-10-22T12:44:08 | 2021-10-22T12:44:08 | 420,062,350 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,002 | py | # coding: utf-8
"""
NBA v3 Odds
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import sportsdata.nba_odds
from sportsdata.nba_odds.models.nba_odds_betting_market import NbaOddsBettingMarket # noqa: E501
from sportsdata.nba_odds.rest import ApiException
class TestNbaOddsBettingMarket(unittest.TestCase):
"""NbaOddsBettingMarket unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testNbaOddsBettingMarket(self):
"""Test NbaOddsBettingMarket"""
# FIXME: construct object with mandatory attributes with example values
# model = sportsdata.nba_odds.models.nba_odds_betting_market.NbaOddsBettingMarket() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"scotty.pate@auth0.com"
] | scotty.pate@auth0.com |
42485305a6f8fb5ff0005ce27a19470c43a61309 | 08ae1e5065ce3c3931356f4db6b54a82a5da1ebe | /other/cj_project/Pra_tools/week_return_可变基准临时版.py | 44ad917629fd9bace508512ac140bb879a7a7042 | [] | no_license | superman666ai/cj_data | d1f527b9bc49a38f2cc99ef3849d13408b271f2d | f4b0e9ec3be02c8900d0300e09df8e52088efc68 | refs/heads/master | 2020-06-21T14:57:30.285849 | 2019-07-22T00:54:15 | 2019-07-22T00:54:15 | 197,486,121 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 21,300 | py | # encoding=utf-8
"""周收益率相关查询"""
from sql import sql_oracle
from time_tool import Time_tool
import logging
import math
import pandas as pd
import numpy as np
from sklearn.linear_model import LinearRegression
from season_label import SeasonLabel
sl = SeasonLabel()
class Week_return:
"""周收益率相关类
对外接口:
zhoubodong:波动率
:param code: 代码:str
:param start_date:起始日期 :str
:param end_date: 结束日期:str
:return: float
max_down_fund:最大回测
:param code: 代码:str
:param start_date:起始日期 :str
:param end_date: 结束日期:str
:return: float
performance_stability_fof:业绩稳定
:param code: fof 代码:str
:param start_date:起始日期 :str
:param end_date: 结束日期:str
:return: float
compute_alpha2:计算alpha
:param code: 代码:str
:param start_date:起始日期 :str
:param end_date: 结束日期:str
:return: float
abs_return:获取绝对收益率
:param code: 代码:str
:param start_date:起始日期 :str
:param end_date: 结束日期:str
:return: float
performance_stability:业绩稳定,code
:param code: 代码:str
:param start_date:起始日期 :str
:param end_date: 结束日期:str
:return: float
"""
def __init__(self):
self.cu = sql_oracle.cu
self.cu_pra = sql_oracle.cu_pra_sel
self.t = Time_tool()
self.init_table()
self.init_funcs()
self.year_x = 52
def init_table(self):
"""初始化各种数据库表名"""
self.fof_member_info = 't_fof_member_info'
def init_funcs(self):
"""初始化各种外挂方法"""
classes = [Compute_alpha]
for class_ in classes:
class_(self).add_func()
def set_year_x(self, x):
"""设置计算年化时的系数"""
self.year_x = x
def _get_winning(self, df):
"""
计算胜率
:param df:超额收益
:return: 胜率
"""
# 超额收益大于0的个数/总个数
res = len(df.loc[df['超额收益'] > 0]) / len(df)
return res
def get_week_return_year(self, df):
"""根据传入的周收益表计算年化收益"""
# 周收益平均*系数,默认是52
return_mena = df['超额收益'].mean()
res = return_mena * self.year_x
return res
def get_week_return(self, code: str, start_date: str, end_date: str, types: str = 'fund', jz: str = 'biwi'):
"""周超额收益查询"""
# 首先计算出时间序列
time_df = self.t.get_week_trade_days(start_date, end_date)
pre_day = self.t.get_pre_week_trade_day(start_date)
time_list = list(time_df['日期'])
# 对于第一周的计算 需要用到上一周的数据,所有向列表头部插入了一个日期
time_list.insert(0, pre_day)
if types == 'fund':
df = self.get_week_return_fund(code, time_list, jz)
else:
df = self.get_week_return_fund_fof(code, time_list, jz)
return df
def get_week_return_fund(self, code: str, time_list: list, jz: str = 'biwi'):
"""
fund 周收益查询
:param code: 基金代码
:param time_list: 时间序列
:param js: 基准
:return:
"""
df01 = self.get_week_value_fund(code, time_list)
if jz == 'biwi':
try:
df02 = self.get_week_close_biwi(code, time_list)
except:
df02 = self.get_week_close_ejfl(code,time_list)
else:
df02 = self.get_week_close_biwi(code, time_list)
# 超额收益计算 基金收益率-指数收益率
df = pd.merge(df01, df02, on='日期', how='left')
df['超额收益'] = df['周收益率_x'] - df['周收益率_y']
# 去重
df.dropna(inplace=True)
# time_index = df01.index.values
# return_value = df01['周收益率'].values - df02['周收益率'].values
# df = pd.DataFrame(columns=['时间', '超额收益'])
# df['时间'] = time_index
# df['超额收益'] = return_value
df.set_index('日期', inplace=True)
df = df[['超额收益']]
return df
def get_week_return_fund_fof(self, code: str, time_list: list, jz: str = 'zz800'):
"""
fund 周收益查询 fof 版
:param code: 基金代码
:param time_list: 时间序列
:param js: 基准
:return:
"""
df01 = self.get_week_value_fund_fof(code, time_list)
if jz == 'zz800':
# 中证800
index_code = '000906'
df02 = self.get_week_close_zz800(index_code, time_list)
else:
df02 = self.get_week_close_biwi(code, time_list)
# 超额收益计算 基金收益率-指数收益率
df = pd.merge(df01, df02, on='日期', how='left')
df['超额收益'] = df['周收益率_x'] - df['周收益率_y']
# 去空
df.dropna(inplace=True)
# time_index = df01.index.values
# return_value = df01['周收益率'].values - df02['周收益率'].values
# df = pd.DataFrame(columns=['时间', '超额收益'])
# df['时间'] = time_index
# df['超额收益'] = return_value
df.set_index('日期', inplace=True)
df = df[['超额收益']]
return df
def get_week_value_fund(self, code: str, time_list: list):
"""
获取周净值,算周收益,fund
:param code: 基金代码
:param time_list: 每个交易周中的最后一个交易日,含有起始日期的上一个交易周的最后一个交易日
:return: df index 日期,columns收益率
"""
t1 = time_list[0]
t2 = time_list[-1]
sql_week_value_fund = f"""
select f13_1101,f21_1101 from wind.tb_object_1101 left join wind.tb_object_1090 on f14_1101 = f2_1090
where f16_1090 = '{code}' and (f13_1101 >= '{t1}' and f13_1101 <='{t2}')
order by f13_1101
"""
sql_res = self.cu.execute(sql_week_value_fund).fetchall()
df = pd.DataFrame(sql_res, columns=['日期', '复权单位净值'])
df.set_index('日期', inplace=True)
# 筛选出需要的日期
df = df.reindex(time_list)
# df02 = df['复权单位净值'].pct_change()
df['上周复权单位净值'] = df['复权单位净值'].shift()
df['周收益率'] = (df['复权单位净值'] - df['上周复权单位净值']) / df['上周复权单位净值']
# df = df[['周收益率']]
# 去重,其索引,后期merge会用到日期字段
df.dropna(inplace=True)
df.reset_index(inplace=True)
return df
def get_week_value_fund_fof(self, code: str, time_list: list):
"""
获取周净值,算周收益,fof
:param code: 基金代码
:param time_list: 每个交易周中的最后一个交易日,含有起始日期的上一个交易周的最后一个交易日
:return: df index 日期,columns收益率
"""
t1 = time_list[0]
t2 = time_list[-1]
# sql_week_value_fund = f"""
# select f13_1101,f21_1101 from wind.tb_object_1101 left join wind.tb_object_1090 on f14_1101 = f2_1090
# where f16_1090 = '{code}' and (f13_1101 >= '{t1}' and f13_1101 <='{t2}')
# order by f13_1101
# """
sql_week_value_fund = f"""
select tradedate,closeprice from t_fof_value_info where fundid = '{code}'
and tradedate >= '{t1}' and tradedate <= '{t2}'
order by tradedate
"""
print(sql_week_value_fund)
sql_res = self.cu_pra.execute(sql_week_value_fund).fetchall()
df = pd.DataFrame(sql_res, columns=['日期', '复权单位净值'])
# print(df)
df.set_index('日期', inplace=True)
# 筛选出需要的日期
df = df.reindex(time_list)
# df02 = df['复权单位净值'].pct_change()
df['上周复权单位净值'] = df['复权单位净值'].shift()
df['周收益率'] = (df['复权单位净值'] - df['上周复权单位净值']) / df['上周复权单位净值']
# df = df[['周收益率']]
# 去重,其索引,后期merge会用到日期字段
df.dropna(inplace=True)
df.reset_index(inplace=True)
# print(df)
return df
def get_week_close_biwi(self, code: str, time_list: list):
"""
取基准,算周收益,biwi
这里要做异常处理,基准不能有空!基准含有空直接报错!
:param code: 基金代码
:param time_list: 每个交易周中的最后一个交易日,含有起始日期的上一个交易周的最后一个交易日
:return:
"""
t1 = time_list[0]
t2 = time_list[-1]
sql_code = code + 'BI.WI'
sql_week_close_bibw = f"""
select trade_dt,s_dq_close from wind.chinamutualfundbenchmarkeod
where s_info_windcode = '{sql_code}' and (trade_dt >= '{t1}' and trade_dt <='{t2}')
order by trade_dt
"""
sql_res = self.cu.execute(sql_week_close_bibw).fetchall()
assert sql_res, f'{code}基准查询结果为空,请改变基准'
df = pd.DataFrame(sql_res, columns=['日期', '收盘价'])
assert df.iloc[0][0] == t1, f'{code}基准查询结果含有空值,请改变基准'
df.set_index('日期', inplace=True)
# 筛选出需要的日期
df = df.reindex(time_list)
# 计算收益率 close2-close1/close1
df['周收益率'] = df['收盘价'].pct_change()
# 去空值
df.dropna(inplace=True)
# df = df[['周收益率']]
# 去索引,后面merge 会用到日期字段
df.reset_index(inplace=True)
return df
def get_week_close_zz800(self,index_code:str,time_list:list):
"""
取基准,算周收益,biwi
这里要做异常处理,基准不能有空!基准含有空直接报错!
:param code: 基金代码
:param time_list: 每个交易周中的最后一个交易日,含有起始日期的上一个交易周的最后一个交易日
:return:
"""
t1 = time_list[0]
t2 = time_list[-1]
sql_week_close_zz800 = f"""
select f2_1425,f7_1425 from wind.tb_object_1425 left join wind.tb_object_1090 on f1_1425 = f2_1090
where f16_1090 = '{index_code}'
and (f2_1425 >='{t1}' and f2_1425 <= '{t2}')
and f4_1090 = 'S'
order by f2_1425
"""
# print(sql_week_close_zz800)
sql_res = self.cu.execute(sql_week_close_zz800).fetchall()
assert sql_res, f'{code}基准查询结果为空,请改变基准'
df = pd.DataFrame(sql_res, columns=['日期', '收盘价'])
assert df.iloc[0][0] == t1, f'{code}基准查询结果含有空值,请改变基准'
df.set_index('日期', inplace=True)
# 筛选出需要的日期
df = df.reindex(time_list)
# 计算收益率 close2-close1/close1
df['周收益率'] = df['收盘价'].pct_change()
# 去空值
df.dropna(inplace=True)
# df = df[['周收益率']]
# 去索引,后面merge 会用到日期字段
df.reset_index(inplace=True)
return df
def get_week_close_ejfl(self,code:str,time_list:list):
"""
判断二级分类并且取出相应基准的周收盘
:param codeLStr: 基金代码
:param time_list: 时间列表
:return:
"""
ejfl = self.get_ejfl(code,time_list)
index = self.change_ejfl_to_index(ejfl,time_list)
index['日期'] = time_list
index.rename(columns = {'市场组合收益率':'周收益率'},inplace = True)
return index
def get_ejfl(self,code:str,time_list:list):
"""根据代码和输入周期,计算二级分类"""
end_date = time_list[-1]
sql_string = f'''
select * from (
select ejfl from t_fund_classify_his
where rptdate <= {end_date}
and cpdm = {code}
order by rptdate
)
where rownum=1
'''
sql_res = self.cu_pra.execute(sql_string).fetchall()
if sql_res:
res = sql_res[0][0]
else:
res = None
return res
def change_ejfl_to_index(self,fund_type:str,time_list:list):
"""
输入二级分类和时间轴
:param fund_type: 二级分类
:param time_list: 时间轴
:return:
"""
res = sl.get_market(fund_type,time_list)
return res
def unpack_fof(self, fof,start_date,end_date):
"""解包fof"""
sql_unpakc_fof = f"""
select memberfundid,weight from {self.fof_member_info}
where fundid = '{fof}'
"""
sql_res = self.cu_pra.execute(sql_unpakc_fof).fetchall()
df = pd.DataFrame(sql_res, columns=['X', 'P'])
return df
def get_fund_price(self, code, start_date, end_date):
"""计算波动和回测时用到的sql查询方法"""
sql = '''
select
f13_1101 as 截止日期, f21_1101 as 复权单位净值
from
wind.tb_object_1101
left join wind.tb_object_1090
on f2_1090 = f14_1101
where
F16_1090= '%(code)s'
and
F13_1101 >= '%(start_date)s'
and
f13_1101 <= '%(end_date)s'
''' % {'end_date': end_date, 'code': code, 'start_date': start_date}
fund_price = pd.DataFrame(self.cu.execute(sql).fetchall(), columns=['截止日期', '复权单位净值'])
return fund_price
# ************ 下面是对外的接口 *************************
def performance_stability(self, code: str, start_date: str, end_date: str, types: str = 'fund', jz: str = 'biwi'):
"""
计算顺率
:param code:代码
:param start_date:开始日期
:param end_date: 结束日期
:param types: 代码类型,默认 fund
:param jz: 指数类型,默认 biwi
:return:
"""
if types == 'fund':
df_return = self.get_week_return(code, start_date, end_date, types, jz)
res = self._get_winning(df_return)
elif types == 'fof':
# 这里的js是自己重新定义的,理想状态是在外面的js定义好
jz = 'zz800'
df_return = self.get_week_return(code, start_date, end_date, types, jz)
res = self._get_winning(df_return)
else:
res = 0.0
return res
def zhoubodong(self, code='163807', start_date='20190101', end_date='20190225'):
fund_price = self.get_fund_price(code, start_date, end_date)
fund_price2 = fund_price.sort_values(by=['截止日期']).reset_index(drop=True)
fund_price2['fund_return'] = fund_price2.复权单位净值.diff() / fund_price2.复权单位净值.shift(1)
fund_price2.dropna(axis=0, inplace=True)
fund_price2.reset_index(drop=True, inplace=True)
zhou_bodong = fund_price2.fund_return.std() * (math.sqrt(250))
return zhou_bodong
# 计算最大回测
def max_down_fund(self, code='163807', start_date='20150528', end_date='20190225'):
# 输出单只基金的最大回撤,返回一个float数值
# 提取复权净值
fund_price = self.get_fund_price(code, start_date, end_date)
fund_price2 = fund_price.sort_values(by=['截止日期']).reset_index(drop=True)
price_list = fund_price2['复权单位净值'].tolist()
i = np.argmax((np.maximum.accumulate(price_list) - price_list) / np.maximum.accumulate(price_list)) # 结束位置
if i == 0:
max_down_value = 0
else:
j = np.argmax(price_list[:i]) # 开始位置
max_down_value = (price_list[j] - price_list[i]) / (price_list[j])
return -max_down_value
def performance_stability_fof(self, fof: str, start_date: str, end_date: str):
"""
业绩稳定性
:param code: 基金代码
:param start_date: 起始时间
:param end_date: 结束时间
:param types: 代码类型,默认 fund基金
:param jz: 基准指标,默认 biwi
:return:
"""
# 先计算时间列表
time_df = self.t.get_week_trade_days(start_date, end_date)
pre_day = self.t.get_pre_week_trade_day(start_date)
time_list = list(time_df['日期'])
time_list.insert(0, pre_day)
# 再解包产品集D,得到x1,x2,x3和 p1,p2,p3
df_D = self.unpack_fof(fof,start_date,end_date)
x_list = df_D['X'].values
p_list = df_D['P'].values
# print('x_list:',x_list)
# print('p_list:',p_list)
# 计算每个x的胜率
win_list = []
for x in x_list:
df_week_return = self.get_week_return(x, start_date, end_date)
winning = self._get_winning(df_week_return)
win_list.append(winning)
# 对上面的结果做加权平均
print('win_lilst:',win_list)
res = np.average(win_list, weights=p_list)
return res
def abs_return(self, fof: str, start_date: str, end_date: str):
"""获取绝对收益率"""
print('待开发')
return 0.0
pass
class Compute_alpha:
"""计算alpha"""
def __init__(self, wr: Week_return):
self.wr = wr
self.t = self.wr.t
def add_func(self):
"""添加方法到wr中去"""
self.wr.compute_alpha2 = self.compute_alpha2
def compute_alpha2(self,code, start_date, end_date):
"""
计算每周收益率
单基金的复权单位净值 计算周收益率
与 行业基准的复权收盘价收益率 做线性回归
求得 alpha2
:param code:
:param start_date:
:param end_date:
:return:
"""
trade_dates = self.t.get_week_trade_days(start_date, end_date)
wr = self.wr.get_week_return(code, start_date, end_date)
time_df = self.t.get_week_trade_days(start_date, end_date)
pre_day = self.t.get_pre_week_trade_day(start_date)
time_list = list(time_df['日期'])
time_list.insert(0, pre_day)
df01 = self.wr.get_week_value_fund(code, time_list)
df02 = self.wr.get_week_close_biwi(code, time_list)
df01.set_index('日期')
df01 = df01['周收益率']
df01 = pd.DataFrame(df01)
df01.columns = ['基金周收益率']
df02.set_index('日期')
df02 = df02['周收益率']
df02 = pd.DataFrame(df02)
df02.columns = ['指数周收益率']
market = df01.join(df02)
market.columns = ['基金收益率', '市场组合收益率']
df = market
df = df.dropna(axis=0, how='any')
result = self.compute_alpha(df)
logging.info('code: {}, from {} to {}'.format(code, start_date, end_date))
logging.info('alpha2: {}'.format(result))
return result
def compute_alpha(self,df):
X = np.array(df['市场组合收益率']).reshape(df['市场组合收益率'].shape[0], 1)
regr = LinearRegression()
regr.fit(X, df['基金收益率'])
a = regr.intercept_ * 52
return a
year_index_query = Week_return()
zhoubodong = year_index_query.zhoubodong
max_down_fund = year_index_query.max_down_fund
performance_stability_fof = year_index_query.performance_stability_fof
performance_stability = year_index_query.performance_stability
compute_alpha2 = year_index_query.compute_alpha2
abs_return = year_index_query.abs_return
if __name__ == '__main__':
wr = Week_return()
start_date = '20151220'
end_date = '20181231'
# code = '20I502434BFOF2'
# code = '048574593FFOF2'
# code = '15FD60FOF1'
# code = '15FD60FOF1'
# code = '15FD60FOF1'
# aaa = wr.get_week_return_fund('003974',[ '2018%02d01'%i for i in range(1,10)])
# print(aaa)
# print('code:',code)
# ps = performance_stability_fof(code,start_date,end_date)
# print(ps)
# code = '15FD60FOF1'
# print('code:',code)
# ps = performance_stability(code,start_date,end_date)
# print(ps)
# alpha = wr.compute_alpha2(code,start_date,end_date)
# print(alpha)
# win01 = wr.performance_stability(code,start_date,end_date,types='fof')
# print(win01)
# a = wr.get_week_return('202801', start_date, end_date)
# print(a)
# res = wr.get_week_return_year(a)
# print('年化收益率', res)
# winning = wr.get_winning(a)
# print('胜率:', winning)
#
# aaa = wr.performance_stability('202801',start_date,end_date)
# print(aaa)
aaa = wr.get_week_value_fund('202801',["20180104", '20180323'])
print(aaa)
| [
"keepingoner@163.com"
] | keepingoner@163.com |
ada171b8de4f52b845ee161416f13bcbc3d876b7 | 40b9bb17a66ae95d46a932091f74c66642d27534 | /Phases/Phases/Mesh/Generation/MeshGenerator.py | 7b3b4a3c06877aba139501e6cbe0cfee7c3cce62 | [] | no_license | liamlong01/PhasesGit | 041c1213ecd25755f02966989f016167b494ff97 | ab8830aa524400bd66439cff9dd6065f57eade1e | refs/heads/master | 2021-05-14T12:21:54.124316 | 2020-02-18T22:38:44 | 2020-02-18T22:38:44 | 116,406,354 | 0 | 1 | null | 2018-01-30T17:43:59 | 2018-01-05T16:45:44 | C++ | UTF-8 | Python | false | false | 4,791 | py | from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from .NodeGenerator import NodeGenerator
from .EdgeGenerator import EdgeGenerator
from .RectGenerator import RectGenerator
#from .MeshCreator import MeshCreator
from math import log
from Phases.Mesh.Mesh import Mesh
class MeshGenerator(QWidget):
meshGenerated = pyqtSignal(Mesh)
def __init__(self, parent = None):
QWidget.__init__(self, parent)
self.display = GeneratorDisplay(parent = self)
controlWidget = QTabWidget()
#node editor
nodeEdit = NodeGenerator(self.display,parent = self)
controlWidget.addTab(nodeEdit,'Nodes')
#edge editor
edgeEdit = EdgeGenerator(nodeEdit.nodes, self.display,parent = self)
controlWidget.addTab(edgeEdit,'Edges')
#mesh editor
rectCreate = RectGenerator(nodeEdit, edgeEdit, self.display, parent = self)
rectCreate.meshCreated.connect(self.meshGenerated.emit)
controlWidget.addTab(rectCreate,'Rectangle')
#mesh editor
meshCreate = QWidget()
controlWidget.addTab(meshCreate,'Mesh')
#setting up the main display
nodeEdit.nodeEdited.connect(self.display.nodeEdited)
nodeEdit.nodeEdited.connect(edgeEdit.nodeEdited)
nodeEdit.nodeDeleted.connect(edgeEdit.nodeDeleted)
edgeEdit.edgeEdited.connect(self.display.edgeEdited)
self.display.mouseMoved.connect(nodeEdit.position.setText)
#creating the layout
layout = QHBoxLayout(self)
layout.addWidget(controlWidget)
layout.addWidget(self.display)
layout.setStretch(1, 4)
class GeneratorDisplay(QWidget):
mouseMoved = pyqtSignal(str)
def __init__(self,parent = None):
QWidget.__init__(self, parent)
self.setMouseTracking(True) #track mouse position without mouse button pressed
self.nodes = []
self.edges=[]
self.transX = 0
self.transY = 0
self.scaleX = 1
self.scaleY = 1
def paintEvent(self, event):
painter = QPainter()
painter.begin(self)
painter.scale(1,-1)
painter.translate(self.transX,self.transY)
painter.setBrush(Qt.black)
painter.setPen(Qt.black)
for node in self.nodes:
x = node.x*self.scaleX
y = node.y*self.scaleY
painter.drawEllipse(QRectF(x-3,y-3,6,6))
#drawing number labels for each node
#the axes need to be filpped twice so that the text
#is oriented in the correct direction
ndigits = round(log(self.nodes.index(node)+1))+ 1 #number of digits in the number to be placed
#if the number has multiple digits it needs to be moved over more
painter.scale(1,-1)
painter.drawText(x-8*ndigits,-y+2, str(self.nodes.index(node)+1))
painter.scale(1,-1)
for edge in self.edges:
edge.draw(painter)
painter.end()
def mouseMoveEvent(self,event):
self.mouseMoved.emit('x : %s y : %s'%self.getRealXY(event.x(),event.y()))
def nodeEdited(self):
if not self.nodes:
return
self.update()
maxX = max([node.x for node in self.nodes])
minX = min([node.x for node in self.nodes])
deltX = maxX-minX
maxY = max([node.y for node in self.nodes])
minY = min([node.y for node in self.nodes])
deltY=maxY-minY
if not maxX == minX:
self.scaleX = 600/(deltX)
# if maxX==0:
# deltX = 0.00001
# else:
# deltX = maxX
if not maxY==minY:
self.scaleY = 600/(deltY)
# if maxY==0:
# deltY = 0.00001
# else:
# deltY = maxY
self.transX = 307- ((deltX)/2+minX)*self.scaleX
self.transY = -307 - ((deltY)/2+minY)*self.scaleY
def edgeEdited(self,edge):
self.update()
def getRealXY(self, qx, qy):
ndigitsX = round(log(self.scaleX/400,10)) + 2
ndigitsY = round(log(self.scaleY/400,10)) + 2
x = round((round(qx)-self.transX)/self.scaleX,ndigitsX)
y = round((round(qy)+self.transY)/self.scaleY*-1,ndigitsY)
return x,y
| [
"liamlong01@gmail.com"
] | liamlong01@gmail.com |
e3b35314a33165821de87d87bffb5fdb98e137d6 | a1515d99a951d0ad9d3533b8117c4c9de6c43375 | /todos/migrations/0001_initial.py | 2a51d34bce9725c3c1cfa2998e7d36eec249938f | [] | no_license | Almyk/todolist | 42743955170ae03b0de9e4bbda24f3f0eb9b2efb | db5b18d61f96bf48547acb9ecc3e9c8dffae0eb0 | refs/heads/master | 2021-09-10T20:14:43.831104 | 2018-04-01T13:29:11 | 2018-04-01T13:36:59 | 108,864,723 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 709 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-10-27 06:13
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Todo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('text', models.TextField()),
('created_at', models.DateTimeField(blank=True, default=datetime.datetime.now)),
],
),
]
| [
"hansentommy92@gmail.com"
] | hansentommy92@gmail.com |
19c1b60effc30ea4675492687497dea42615bc19 | d5c54cb9739fe0931deb19f1ddc283d9d7dce152 | /tensorflow/fully_connected/mnist_app.py | 1ef4cf224d51417c2b5b06b71d735e99b9464176 | [] | no_license | interestudy/pythonstudy | 6542c5b43c94c39c8f6182da08710421506a8fe8 | be0b524b9a3e037598e3c1ad947b5ed554d9479f | refs/heads/master | 2021-07-09T17:54:20.941253 | 2020-07-12T23:14:58 | 2020-07-12T23:14:58 | 161,978,396 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,473 | py | # _*_ coding:utf-8 _*_
# @Author :ran
# @time :2018-12-15 19:30
# @File :mnist_app.py
# @Software :PyCharm
import tensorflow as tf
import numpy as np
from PIL import Image
from fully_connected import mnist_backward, mnist_forward
# 恢复模型
def restore_model(test_pic_arr):
with tf.Graph().as_default() as tg:
x = tf.placeholder(tf.float32, [None, mnist_forward.INPUT_NODE])
y = mnist_forward.forward(x, None)
pre_value = tf.argmax(y, 1)
# 采用滑动平均的方式从训练好的模型中恢复参数值
variable_averages = tf.train.ExponentialMovingAverage(mnist_backward.MOVING_AVERAGE_DECAY)
variables_to_restore = variable_averages.variables_to_restore()
saver = tf.train.Saver(variables_to_restore)
# 创建会话 开始测试
with tf.Session() as sess:
ckpt = tf.train.get_checkpoint_state(mnist_backward.MODEL_SAVE_PATH)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
pre_value = sess.run(pre_value, feed_dict={x: test_pic_arr})
return pre_value
else:
print("No checkpoint file found")
return -1
# 图片预处理
def pre_pic(pic_name):
img = Image.open(pic_name) # 打开图片生成Image对象
re_im = img.resize((28, 28), Image.ANTIALIAS) # 剪裁为28*28像素 抗锯齿
re_im.convert('L').show() # 显示灰度后的图片
im_arr = np.array(re_im.convert('L')) # 彩色转换为黑白 转换为矩阵
threshold = 50
# 遍历每一个像素 转换为1,0的矩阵
for i in range(28):
for j in range(28):
im_arr[i, j] = 255 - im_arr[i, j]
if im_arr[i][j] < threshold:
im_arr[i][j] = 0
else:
im_arr[i][j] = 255
nm_arr = im_arr.reshape(1, 784)
nm_arr = nm_arr.astype(np.float32)
img_ready = np.multiply(nm_arr, 1.0/255.0)
return img_ready
# 读取控制台信息
def application():
test_num = input('input the number of test pictures:')
# print(eval(test_num))
for i in range(eval(test_num)):
test_pic = input('the path of test picture:')
test_pic_arr = pre_pic(test_pic)
pre_value = restore_model(test_pic_arr)
print("The prediction number is:", pre_value)
def main():
application()
if __name__ == '__main__':
main()
| [
"ran@qq.com"
] | ran@qq.com |
f21b03c6db9441eb6e25be828561ed5b5d1373eb | e30df1530f2a61620e223d35abaea48b3595a01d | /python_assignment_partA/question_9_mainfile.py | b2fac73b6fc874ce80e35dcc5ffd556af1178bad | [] | no_license | Aditya149349/lab | e5c8fcf450d9c149b4b69d2e2337a3cac74dce55 | 635b716a7a9eab9ebd3792b1ff464ba6189529be | refs/heads/master | 2020-07-25T05:06:01.625429 | 2019-12-05T04:56:32 | 2019-12-05T04:56:32 | 208,174,594 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 36 | py | from file import atomic1
atomic1()
| [
"noreply@github.com"
] | Aditya149349.noreply@github.com |
d8a784d4cc3814c72d3f1c2f31291a37067a93cc | bf902add6952d7f7decdb2296bb136eea55bf441 | /YOLO/.history/pytorch-yolo-v3/video_demo_v1_20201106013910.py | 956c4cf76b2215cc7d0d756950fe68064a06bcd4 | [
"MIT"
] | permissive | jphacks/D_2003 | c78fb2b4d05739dbd60eb9224845eb78579afa6f | 60a5684d549862e85bdf758069518702d9925a48 | refs/heads/master | 2023-01-08T16:17:54.977088 | 2020-11-07T06:41:33 | 2020-11-07T06:41:33 | 304,576,949 | 1 | 4 | null | null | null | null | UTF-8 | Python | false | false | 16,591 | py | from __future__ import division
import time
import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
import cv2
from util import *
from darknet import Darknet
from preprocess import prep_image, inp_to_image
import pandas as pd
import random
import argparse
import pickle as pkl
import requests
from requests.auth import HTTPDigestAuth
import io
from PIL import Image, ImageDraw, ImageFilter
import play
import csv
import pprint
with open('csv/Lidar.csv', 'r', encoding="utf-8_sig", newline = '') as f:
l = csv.reader(f)
LiDAR = [row for row in l]
# for row in LiDAR:
# print(row)
def prep_image(img, inp_dim):
# CNNに通すために画像を加工する
orig_im = img
dim = orig_im.shape[1], orig_im.shape[0]
img = cv2.resize(orig_im, (inp_dim, inp_dim))
img_ = img[:,:,::-1].transpose((2,0,1)).copy()
img_ = torch.from_numpy(img_).float().div(255.0).unsqueeze(0)
return img_, orig_im, dim
def count(x, img, count):
# 画像に結果を描画
c1 = tuple(x[1:3].int())
c2 = tuple(x[3:5].int())
cls = int(x[-1])
label = "{0}".format(classes[cls])
print("label:\n", label)
# 人数カウント
if(label=='no-mask'):
count+=1
print(count)
return count
def write(x, img,camId):
global count
global point
p = [0,0]
# 画像に結果を描画
c1 = tuple(x[1:3].int())
c2 = tuple(x[3:5].int())
cls = int(x[-1])
print(camId, "_c0:",c1)
print(camId, "_c1:",c2)
label = "{0}".format(classes[cls])
print("label:", label)
# 人数カウント
if(label=='no-mask'):
count+=1
print(count)
p[0] = (c2[0]+c1[0])/2
p[1] = (c2[1]+c1[1])/2
point[camId].append(p)
color = random.choice(colors)
cv2.rectangle(img, c1, c2,color, 1)
t_size = cv2.getTextSize(label, cv2.FONT_HERSHEY_PLAIN, 1 , 1)[0]
c2 = c1[0] + t_size[0] + 3, c1[1] + t_size[1] + 4
cv2.rectangle(img, c1, c2,color, -1)
cv2.putText(img, label, (c1[0], c1[1] + t_size[1] + 4), cv2.FONT_HERSHEY_PLAIN, 1, [225,255,255], 1);
return img
def arg_parse():
# モジュールの引数を作成
parser = argparse.ArgumentParser(description='YOLO v3 Cam Demo') # ArgumentParserで引数を設定する
parser.add_argument("--confidence", dest = "confidence", help = "Object Confidence to filter predictions", default = 0.25)
# confidenceは信頼性
parser.add_argument("--nms_thresh", dest = "nms_thresh", help = "NMS Threshhold", default = 0.4)
# nms_threshは閾値
parser.add_argument("--reso", dest = 'reso', help =
"Input resolution of the network. Increase to increase accuracy. Decrease to increase speed",
default = "160", type = str)
# resoはCNNの入力解像度で、増加させると精度が上がるが、速度が低下する。
return parser.parse_args() # 引数を解析し、返す
def cvpaste(img, imgback, x, y, angle, scale):
# x and y are the distance from the center of the background image
r = img.shape[0]
c = img.shape[1]
rb = imgback.shape[0]
cb = imgback.shape[1]
hrb=round(rb/2)
hcb=round(cb/2)
hr=round(r/2)
hc=round(c/2)
# Copy the forward image and move to the center of the background image
imgrot = np.zeros((rb,cb,3),np.uint8)
imgrot[hrb-hr:hrb+hr,hcb-hc:hcb+hc,:] = img[:hr*2,:hc*2,:]
# Rotation and scaling
M = cv2.getRotationMatrix2D((hcb,hrb),angle,scale)
imgrot = cv2.warpAffine(imgrot,M,(cb,rb))
# Translation
M = np.float32([[1,0,x],[0,1,y]])
imgrot = cv2.warpAffine(imgrot,M,(cb,rb))
# Makeing mask
imggray = cv2.cvtColor(imgrot,cv2.COLOR_BGR2GRAY)
ret, mask = cv2.threshold(imggray, 10, 255, cv2.THRESH_BINARY)
mask_inv = cv2.bitwise_not(mask)
# Now black-out the area of the forward image in the background image
img1_bg = cv2.bitwise_and(imgback,imgback,mask = mask_inv)
# Take only region of the forward image.
img2_fg = cv2.bitwise_and(imgrot,imgrot,mask = mask)
# Paste the forward image on the background image
imgpaste = cv2.add(img1_bg,img2_fg)
return imgpaste
def cosineTheorem(Lidar, radian1, radian2):
theta = abs(radian1-radian2)
distance = Lidar[radian1][1] ** 2 + Lidar[radian2][1] ** 2 - 2 * Lidar[radian1][1] * Lidar[radian2][1] * math.cos(abs(radian2 - radian1))
return distance
def combinations_count(n, r):
return math.factorial(n) // (math.factorial(n - r) * math.factorial(r))
# def beep(freq, dur=100):
# winsound.Beep(freq, dur)
if __name__ == '__main__':
#学習前YOLO
# cfgfile = "cfg/yolov3.cfg" # 設定ファイル
# weightsfile = "weight/yolov3.weights" # 重みファイル
# classes = load_classes('data/coco.names') # 識別クラスのリスト
#マスク学習後YOLO
cfgfile = "cfg/mask.cfg" # 設定ファイル
weightsfile = "weight/mask_1500.weights" # 重みファイル
classes = load_classes('data/mask.names') # 識別クラスのリスト
num_classes = 80 # クラスの数
args = arg_parse() # 引数を取得
confidence = float(args.confidence) # 信頼性の設定値を取得
nms_thesh = float(args.nms_thresh) # 閾値を取得
start = 0
CUDA = torch.cuda.is_available() # CUDAが使用可能かどうか
num_classes = 80 # クラスの数
bbox_attrs = 5 + num_classes
max = 0 #限界人数
num_camera = 1 #camera数
model = [[] for i in range(num_camera)]
inp_dim = [[] for i in range(num_camera)]
cap = [[] for i in range(num_camera)]
ret = [[] for i in range(num_camera)]
frame = [[] for i in range(num_camera)]
img = [[] for i in range(num_camera)]
orig_im = [[] for i in range(num_camera)]
dim = [[] for i in range(num_camera)]
# output = [[] for i in range(num_camera)]
# output = torch.tensor(output)
# print("output_shape\n", output.shape)
for i in range(num_camera):
model[i] = Darknet(cfgfile) #model1の作成
model[i].load_weights(weightsfile) # model1に重みを読み込む
model[i].net_info["height"] = args.reso
inp_dim[i] = int(model[i].net_info["height"])
assert inp_dim[i] % 32 == 0
assert inp_dim[i] > 32
#mixer.init() #初期化
if CUDA:
for i in range(num_camera):
model[i].cuda() #CUDAが使用可能であればcudaを起動
for i in range(num_camera):
model[i].eval()
cap[0] = cv2.VideoCapture(1) #カメラを指定(USB接続)
# cap[1] = cv2.VideoCapture(1) #カメラを指定(USB接続)
# cap = cv2.VideoCapture("movies/sample.mp4")
#cap = cv2.VideoCapture("movies/one_v2.avi")
# Use the next line if your camera has a username and password
# cap = cv2.VideoCapture('protocol://username:password@IP:port/1')
#cap = cv2.VideoCapture('rtsp://admin:admin@192.168.11.4/1') #(ネットワーク接続)
#cap = cv2.VideoCapture('rtsp://admin:admin@192.168.11.4/80')
#cap = cv2.VideoCapture('http://admin:admin@192.168.11.4:80/video')
#cap = cv2.VideoCapture('http://admin:admin@192.168.11.4/camera-cgi/admin/recorder.cgi?action=start&id=samba')
#cap = cv2.VideoCapture('http://admin:admin@192.168.11.4/recorder.cgi?action=start&id=samba')
#cap = cv2.VideoCapture('http://admin:admin@192.168.11.5:80/snapshot.jpg?user=admin&pwd=admin&strm=0')
print('-1')
#assert cap.isOpened(), 'Cannot capture source' #カメラが起動できたか確認
img1 = cv2.imread("images/phase_1.jpg")
img2 = cv2.imread("images/phase_2.jpg")
img3 = cv2.imread("images/phase_2_red.jpg")
img4 = cv2.imread("images/phase_3.jpg")
#mixer.music.load("voice/voice_3.m4a")
#print(img1)
frames = 0
count_frame = 0 #フレーム数カウント
flag = 0 #密状態(0:疎密,1:密入り)
start = time.time()
print('-1')
while (cap[i].isOpened() for i in range(num_camera)): #カメラが起動している間
count=0 #人数をカウント
point = [[] for i in range(num_camera)]
for i in range(num_camera):
ret[i], frame[i] = cap[i].read() #キャプチャ画像を取得
if (ret[i] for i in range(num_camera)):
# 解析準備としてキャプチャ画像を加工
for i in range(num_camera):
img[i], orig_im[i], dim[i] = prep_image(frame[i], inp_dim[i])
if CUDA:
for i in range(num_camera):
im_dim[i] = im_dim[i].cuda()
img[i] = img[i].cuda()
for i in range(num_camera):
# output[i] = model[i](Variable(img[i]), CUDA)
output = model[i](Variable(img[i]), CUDA)
#print("output:\n", output)
# output[i] = write_results(output[i], confidence, num_classes, nms = True, nms_conf = nms_thesh)
output = write_results(output, confidence, num_classes, nms = True, nms_conf = nms_thesh)
# print("output", i, ":\n", output[i])
print(output.shape)
"""
# FPSの表示
if (type(output[i]) == int for i in range(num_camera)):
print("表示")
frames += 1
print("FPS of the video is {:5.2f}".format( frames / (time.time() - start)))
# qキーを押すとFPS表示の終了
key = cv2.waitKey(1)
if key & 0xFF == ord('q'):
break
continue
for i in range(num_camera):
output[i][:,1:5] = torch.clamp(output[i][:,1:5], 0.0, float(inp_dim[i]))/inp_dim[i]
output[i][:,[1,3]] *= frame[i].shape[1]
output[i][:,[2,4]] *= frame[i].shape[0]
"""
# FPSの表示
if type(output) == int:
print("表示")
frames += 1
print("FPS of the video is {:5.2f}".format( frames / (time.time() - start)))
# qキーを押すとFPS表示の終了
key = cv2.waitKey(1)
if key & 0xFF == ord('q'):
break
continue
for i in range(num_camera):
output[:,1:5] = torch.clamp(output[:,1:5], 0.0, float(inp_dim[i]))/inp_dim[i]
output[:,[1,3]] *= frame[i].shape[1]
output[:,[2,4]] *= frame[i].shape[0]
colors = pkl.load(open("pallete", "rb"))
#count = lambda x: count(x, orig_im, count) #人数をカウント
"""
for i in range(num_camera):
list(map(lambda x: write(x, orig_im[i]), output[i]))
print("count:\n",count)
"""
for i in range(num_camera):
list(map(lambda x: write(x, orig_im[i], i), output))
print("count:\n",count)
print("count_frame", count_frame)
print("framex", frame[0].shape[1])
print("framey", frame[0].shape[0])
print("point0",point[0])
#LiDARの情報の人識別
num_person = 0
radian_lists = []
for count, (radian, length) in enumerate(LiDAR):
radian_cam = [[] for i in range(len(point))]
if count % 90 == 0:
radian_list = []
if count < 90:
for num, p in enumerate(point[0]):
radian_cam[num] = p[0] / frame[0].shape[1] * 100
for dif in range(10):
for radi_num in range(len(radian_cam)):
if int(radian)+dif-5 == int(radian_cam[radi_num]):
num_person += 1
radian_list.append(radian)
elif count < 180:
for num, p in enumerate(point[0]):
radian_cam[num] = p[0] / frame[0].shape[1] * 100
for dif in range(10):
if int(radian)+dif-5 == int(radian_cam):
num_person += 1
radian_list.append(radian)
elif count < 270:
for num, p in enumerate(point[0]):
radian_cam[num] = p[0] / frame[0].shape[1] * 100
for dif in range(10):
if int(radian)+dif-5 == int(radian_cam):
num_person += 1
radian_list.append(radian)
else:
for num, p in enumerate(point[0]):
radian_cam[num] = p[0] / frame[0].shape[1] * 100
for dif in range(10):
if int(radian)+dif-5 == int(radian_cam):
num_person += 1
radian_list.append(radian)
radian_lists.append(radian_list)
dis_list = []
for direction in range(4):
if len(radian_lists[direction]) > 1:
# n = combinations_count(len(radian_lists[direction]), 2)
dis_combination = itertools.combinations(radian_lists[direction], 2)
distance = [[] for i in range(len(dis_combination))]
for num_dis, com_list in enumerate(dis_combination):
distance[num_dis] = cosineTheorem(Lidar,com_list[0], com_list[1])
dis_list.append(distance)
#密集判定
close_list = [0] * 4
dense_list = [0] * 4
for direction in range(4):
close = 0 #密接数
dense = 0 #密集数
for dis in distance[distance]:
if dis < 2:
close += 1
close_list[direction] = 1
if close > 1:
dense_list[direction] = 1
print("close_list", close_list)
print("dense_list", dense_list)
# print("point1",point[1])
if count > max:
count_frame += 1
#print("-1")
if count_frame <= 50:
x=0
y=0
angle=20
scale=1.5
for i in range(num_camera):
imgpaste = cvpaste(img1, orig_im[i], x, y, angle, scale)
if flag == 1:
play.googlehome()
flag += 1
#mixer.music.play(1)
elif count_frame <= 100:
x=-30
y=10
angle=20
scale=1.1
if count_frame%2==1:
for i in range(num_camera):
imgpaste = cvpaste(img2, orig_im[i], x, y, angle, scale)
else:
for i in range(num_camera):
imgpaste = cvpaste(img3, orig_im[i], x, y, angle, scale)
if flag == 2:
play.googlehome()
flag += 1
else:
x=-30
y=0
angle=20
scale=1.5
for i in range(num_camera):
imgpaste = cvpaste(img4, orig_im[i], x, y, angle, scale)
if count_frame > 101: #<--2フレームずらす
print("\007") #警告音
time.sleep(3)
if flag == 3:
play.googlehome()
flag += 1
cv2.imshow("frame", imgpaste)
else:
count_frame = 0
flag = 0
#print("-2")
for i in range(num_camera):
cv2.imshow("frame", orig_im[i])
# play.googlehome()
key = cv2.waitKey(1)
# qキーを押すと動画表示の終了
if key & 0xFF == ord('q'):
break
frames += 1
print("count_frame:\n", count_frame)
print("FPS of the video is {:5.2f}".format( frames / (time.time() - start)))
else:
break
| [
"73480314+ryo-jpg@users.noreply.github.com"
] | 73480314+ryo-jpg@users.noreply.github.com |
9aa5e4ce73d948f7cc44b13bac66cdc3226ff951 | 002f474f18f72b90d4fea892c01b8f02a8fe3fb7 | /main.py | c8e32dae005431fca7a4bec2df71b5c853b2e43a | [] | no_license | nhhdmr/fluxP | 834562b7ffb2f79692ebe99a90b7f9060ce56c8b | d5512f9a4d77ee1c58562affbb2a666c17e099ac | refs/heads/master | 2023-03-31T15:57:11.721222 | 2021-03-31T10:22:59 | 2021-03-31T10:22:59 | 337,989,093 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 115 | py | from GUI import GUI
# Fonction principale
if __name__ == '__main__':
UI = GUI()
UI.window.mainloop()
| [
"nhhdmr@gmail.com"
] | nhhdmr@gmail.com |
6b1a21e187c9c79f07f14c5b2f5a3a03fcf94808 | c7b4baa2779a0fc02e363f07c88b4d1d8cc33ffe | /gahtc/website/migrations/0017_auto_20151121_2057.py | 000afaafe3f0c79142b2d2f5dc90553177043f8f | [] | no_license | NiJeLorg/GAHTC | 6d5c8b2d4b9244c8874ad60c16cd7d55a3535075 | 8ba3360f6e2a8ad0b937a60c3c022eaac4a7cd46 | refs/heads/master | 2022-12-08T19:26:05.800635 | 2018-06-07T02:31:43 | 2018-06-07T02:31:43 | 41,111,268 | 2 | 0 | null | 2022-11-22T01:43:36 | 2015-08-20T18:07:02 | HTML | UTF-8 | Python | false | false | 1,262 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('website', '0016_bundlelecturesegments'),
]
operations = [
migrations.AlterField(
model_name='lecturedocuments',
name='created',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='lectures',
name='created',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='lecturesegments',
name='created',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='lectureslides',
name='created',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='moduledocuments',
name='created',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='modules',
name='created',
field=models.DateTimeField(auto_now_add=True),
),
]
| [
"jd@nijel.org"
] | jd@nijel.org |
52952ae7085befabbe68130215600bca6fed3043 | 4157041d17b74aa183677ce51a341f8e2324d1ac | /one_agent.py | cadc1dcaa18ee6736da9e1f486a7cc7b8eac759b | [] | no_license | chengbinyao98/all | 5caadfd332187e6de7a4e15ac4d43bfc12b1a584 | cc5a1094ac5f54a4e2d791b227c5a06351bfbab5 | refs/heads/master | 2023-04-25T10:48:01.372718 | 2021-05-17T08:28:44 | 2021-05-17T08:28:44 | 367,523,726 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,941 | py | import tensorflow as tf
from agnet1.main1 import Main1
from agent2.main2 import Main2
from tool import Tools
from environment import Env
from draw import DRAW
import matplotlib.pyplot as plt
import numpy as np
if __name__ == '__main__':
env = Env()
tools = Tools()
draw = DRAW()
g1 = tf.Graph()
main1 = Main1(g1)
#
# g2 = tf.Graph()
# main2 = Main2(2, g2)
plt.ion()
plt.figure(figsize=(100, 5)) # 设置画布大小
ax1 = plt.subplot(211)
ax2 = plt.subplot(212)
success = 0
totally = 0
zongzhou = []
while True:
main1.rl.restore_net()
# main2.rl.restore_net()
dic_state = env.reset(tools)
for episodes in range(1000):
dic_action = {}
suss = 0
total = 0
for x in dic_state:
if x not in dic_action:
dic_action[x] = []
if x == 1:
for num in range(len(dic_state[1])):
temp_state = tools.get_list(dic_state[1][num]) # 车组中所有车辆状态合成
temp = main1.rl.real_choose_action(np.array(temp_state)) # 学习到车组的动作组合
dic_action[1].append([int(env.cars_posit[dic_state[1][num][0][3]]) - env.road_range / 2 + temp])
if x == 2:
for num in range(len(dic_state[2])):
temp_state = tools.get_list(dic_state[2][num]) # 车组中所有车辆状态合成
temp1 = []
temp2 = []
for k in range(3):
temp1.append(temp_state[k])
temp2.append(temp_state[k+3])
# temp = main2.rl.real_choose_action(temp_state) # 学习到车组的动作组合
#
# # 车组动作组合转换成车辆的单个动作增量
# add = []
# b = []
# for k in range(2):
# s = temp // env.road_range # 商
# y = temp % env.road_range # 余数
# b = b + [y]
# temp = s
# b.reverse()
# for i in b:
# add.append(i)
action1 = main1.rl.real_choose_action(np.array(temp1))
action2 = main1.rl.real_choose_action(np.array(temp2))
add = [action1,action2]
action = []
for dim in range(2):
action.append(int(env.cars_posit[dic_state[2][num][dim][3]]) - env.road_range / 2 + add[dim])
dic_action[2].append(action)
draw_action = [0 for l in range(len(env.cars_posit))]
for x in dic_state:
for num in range(len(dic_state[x])):
for dim in range(len(dic_state[x][num])):
draw_action[dic_state[x][num][dim][3]] = dic_action[x][num][dim]
draw.piant(env.cars_posit,env.road_range,ax1,env.frame_slot,draw_action)
dic_state_, dic_reward = env.step(dic_action, tools)
print(dic_reward)
for x in dic_reward:
for num in range(len(dic_reward[x])):
for dim in range(x):
suss += dic_reward[x][num][dim]
total += env.beam_slot
print('成功率',suss/total)
dic_state = dic_state_
success += suss
totally += total
zongzhou.append(success/totally)
plt.sca(ax2)
ax2.cla()
plt.plot([i for i in range(len(zongzhou))], zongzhou)
plt.pause(env.frame_slot)
break
| [
"chengbinyao98"
] | chengbinyao98 |
147cfb6b4aa8fb2ea34805eecb5b84ce208f37dd | 4111d229790c4e60ea968631d26c3774d77f5324 | /leetcode/geek-time/rename.py | cb7c6500ff7ad2555e0ba88db6a12d91109a44d9 | [
"MIT"
] | permissive | EricLi404/nb | a6a3e90b79bd8ee2e35d93285dea5650b804e3da | bdfa24db288df791320b5f610981b53aa7963fac | refs/heads/master | 2020-09-12T11:36:33.043546 | 2020-07-31T07:44:59 | 2020-07-31T07:44:59 | 222,411,259 | 0 | 1 | MIT | 2019-12-25T09:14:25 | 2019-11-18T09:31:09 | Python | UTF-8 | Python | false | false | 345 | py | import os
import sys
from glob2 import glob
def rename(d):
f = glob(d + "/*")
for i in f:
if "[" in i and "]" in i:
nm = i.replace("[", "")
nm = nm.replace("]", "_")
os.rename(i, nm)
if __name__ == '__main__':
if len(sys.argv) != 2:
exit(-1)
d = sys.argv[1]
rename(d)
| [
"liyuyang@huami.com"
] | liyuyang@huami.com |
ebee42568d28a2df75386d1e81674a87850e8c27 | f41a34987028eb8a8c41972303833719072c5a28 | /application/core/generators/math_add_medium.py | 2dd5e2e43b757c22bb9e53d99eef79d2742e2f48 | [
"MIT"
] | permissive | opencbsoft/kids-worksheet-generator | c31e1b379c1e6a79e5d55b7396460773f83db43e | be2c020fa3d0646f7fdd36298019c935b7a47f3f | refs/heads/master | 2023-03-06T00:21:01.610352 | 2022-10-26T10:38:39 | 2022-10-26T10:38:39 | 249,153,087 | 1 | 0 | MIT | 2023-02-15T20:02:07 | 2020-03-22T09:44:13 | HTML | UTF-8 | Python | false | false | 1,030 | py | import random
from core.utils import Generator
class Main(Generator):
name = 'Fast medium addition in one minute'
years = [5, 6, 7]
directions = 'Afla cat de multe operatiuni poti rezolva in maxim 1 minut.'
template = 'generators/math.html'
def generate_data(self):
small_numbers = list(range(0, 10))
big_numbers = list(range(11, 80))
results = []
for i in range(self.count):
row = []
while not len(row) == 48:
left_number = random.choice(big_numbers)
right_number = random.choice(small_numbers)
if (left_number, right_number) not in row:
row.append((left_number, right_number))
results.append(row)
self.data = results
return results
def get_context_data(self, iteration):
context = super(Main, self).get_context_data(iteration)
context['items'] = context['items'][iteration]
context['operation'] = '+'
return context
| [
"cristi@cbsoft.ro"
] | cristi@cbsoft.ro |
a82aec7794729478a6d3132d942364c73ac5861b | 42870f8603ea29fdd2c4f39cdfef383f2b04868e | /chapter5/ch5FileExample2.py | a4237351c27d1def3900dcd4ced343fb17eade16 | [] | no_license | NortheastState/CITC1301 | 7de1b575e5c29f6fc8dbfa85d25c48b1e830c8dc | 7641d82cff2652d4dd9459a52125835ba3a50e30 | refs/heads/master | 2021-09-12T21:00:08.790425 | 2018-04-20T17:47:47 | 2018-04-20T17:47:47 | 116,075,069 | 1 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,547 | py | # ===============================================================
#
# Name: David Blair
# Date: 03/10/2018
# Course: CITC 1301
# Section: A70
# Description: Read in a file one line at a time or
# read a file into a list
#
# ===============================================================
def readFromFileUsingReadLines():
try:
# create a file object to write to test.txt file
# note the "r" in the parameter list.
myFile = open("test.txt", "r")
# read in all contents of file
data = myFile.readlines()
# print out the contents
# but since all data is in a list, we can access
# the lines using the array syntax
print()
print(len(data))
print(data[0])
print(data[1])
# I can use a for loop to look through the list
fileLen = len(data)
for i in range(fileLen):
print(data[i])
myFile.close()
except:
print("There was a problem with a file read operation")
def readFromFileUsingReadLine():
try:
# create a file object to write to test.txt file
# note the "r" in the parameter list.
myFile = open("test.txt", "r")
# read in a line at a time using a for loop
for line in myFile:
print(line)
myFile.close()
except:
print("There was a problem with a file read operation")
def main():
readFromFileUsingReadLines()
print()
readFromFileUsingReadLine()
main() | [
"dlblair@northeaststate.edu"
] | dlblair@northeaststate.edu |
bdd3bf6fba4bd626f99d89450d76ac05362a99e1 | a0afdd22430c9324278e21cb2ec71172fa9d9136 | /mango/notification.py | 036c84e556a09be80caaae29c9b60e7b56373f54 | [
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] | permissive | jeromeku/mango-explorer | 8e00cdc5f9154184004afb02637dd10bb98be089 | 5d26b782e25886d794b1f90cbf761fb9ce3100b7 | refs/heads/master | 2023-07-18T05:08:21.882520 | 2021-08-16T21:42:38 | 2021-08-16T21:42:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,379 | py | # # ⚠ Warning
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
# LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
# NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# [🥭 Mango Markets](https://mango.markets/) support is available at:
# [Docs](https://docs.mango.markets/)
# [Discord](https://discord.gg/67jySBhxrg)
# [Twitter](https://twitter.com/mangomarkets)
# [Github](https://github.com/blockworks-foundation)
# [Email](mailto:hello@blockworks.foundation)
import abc
import csv
import logging
import os.path
import requests
import typing
from urllib.parse import unquote
from .liquidationevent import LiquidationEvent
# # 🥭 Notification
#
# This file contains code to send arbitrary notifications.
#
# # 🥭 NotificationTarget class
#
# This base class is the root of the different notification mechanisms.
#
# Derived classes should override `send_notification()` to implement their own sending logic.
#
# Derived classes should not override `send()` since that is the interface outside classes call and it's used to ensure `NotificationTarget`s don't throw an exception when sending.
#
class NotificationTarget(metaclass=abc.ABCMeta):
def __init__(self):
self.logger: logging.Logger = logging.getLogger(self.__class__.__name__)
def send(self, item: typing.Any) -> None:
try:
self.send_notification(item)
except Exception as exception:
self.logger.error(f"Error sending {item} - {self} - {exception}")
@abc.abstractmethod
def send_notification(self, item: typing.Any) -> None:
raise NotImplementedError("NotificationTarget.send() is not implemented on the base type.")
def __repr__(self) -> str:
return f"{self}"
# # 🥭 TelegramNotificationTarget class
#
# The `TelegramNotificationTarget` sends messages to Telegram.
#
# The format for the telegram notification is:
# 1. The word 'telegram'
# 2. A colon ':'
# 3. The chat ID
# 4. An '@' symbol
# 5. The bot token
#
# For example:
# ```
# telegram:<CHAT-ID>@<BOT-TOKEN>
# ```
#
# The [Telegram instructions to create a bot](https://core.telegram.org/bots#creating-a-new-bot)
# show you how to create the bot token.
class TelegramNotificationTarget(NotificationTarget):
def __init__(self, address):
super().__init__()
chat_id, bot_id = address.split("@", 1)
self.chat_id = chat_id
self.bot_id = bot_id
def send_notification(self, item: typing.Any) -> None:
payload = {"disable_notification": True, "chat_id": self.chat_id, "text": str(item)}
url = f"https://api.telegram.org/bot{self.bot_id}/sendMessage"
headers = {"Content-Type": "application/json"}
requests.post(url, json=payload, headers=headers)
def __str__(self) -> str:
return f"Telegram chat ID: {self.chat_id}"
# # 🥭 DiscordNotificationTarget class
#
# The `DiscordNotificationTarget` sends messages to Discord.
#
class DiscordNotificationTarget(NotificationTarget):
def __init__(self, address):
super().__init__()
self.address = address
def send_notification(self, item: typing.Any) -> None:
payload = {
"content": str(item)
}
url = self.address
headers = {"Content-Type": "application/json"}
requests.post(url, json=payload, headers=headers)
def __str__(self) -> str:
return "Discord webhook"
# # 🥭 MailjetNotificationTarget class
#
# The `MailjetNotificationTarget` sends an email through [Mailjet](https://mailjet.com).
#
# In order to pass everything in to the notifier as a single string (needed to stop
# command-line parameters form getting messy), `MailjetNotificationTarget` requires a
# compound string, separated by colons.
# ```
# mailjet:<MAILJET-API-KEY>:<MAILJET-API-SECRET>:FROM-NAME:FROM-ADDRESS:TO-NAME:TO-ADDRESS
#
# ```
# Individual components are URL-encoded (so, for example, spaces are replaces with %20,
# colons are replaced with %3A).
#
# * `<MAILJET-API-KEY>` and `<MAILJET-API-SECRET>` are from your [Mailjet](https://mailjet.com) account.
# * `FROM-NAME` and `TO-NAME` are just text fields that are used as descriptors in the email messages.
# * `FROM-ADDRESS` is the address the email appears to come from. This must be validated with [Mailjet](https://mailjet.com).
# * `TO-ADDRESS` is the destination address - the email account to which the email is being sent.
#
# Mailjet provides a client library, but really we don't need or want more dependencies. This`
# code just replicates the `curl` way of doing things:
# ```
# curl -s \
# -X POST \
# --user "$MJ_APIKEY_PUBLIC:$MJ_APIKEY_PRIVATE" \
# https://api.mailjet.com/v3.1/send \
# -H 'Content-Type: application/json' \
# -d '{
# "SandboxMode":"true",
# "Messages":[
# {
# "From":[
# {
# "Email":"pilot@mailjet.com",
# "Name":"Your Mailjet Pilot"
# }
# ],
# "HTMLPart":"<h3>Dear passenger, welcome to Mailjet!</h3><br />May the delivery force be with you!",
# "Subject":"Your email flight plan!",
# "TextPart":"Dear passenger, welcome to Mailjet! May the delivery force be with you!",
# "To":[
# {
# "Email":"passenger@mailjet.com",
# "Name":"Passenger 1"
# }
# ]
# }
# ]
# }'
# ```
class MailjetNotificationTarget(NotificationTarget):
def __init__(self, encoded_parameters):
super().__init__()
self.address = "https://api.mailjet.com/v3.1/send"
api_key, api_secret, subject, from_name, from_address, to_name, to_address = encoded_parameters.split(":")
self.api_key: str = unquote(api_key)
self.api_secret: str = unquote(api_secret)
self.subject: str = unquote(subject)
self.from_name: str = unquote(from_name)
self.from_address: str = unquote(from_address)
self.to_name: str = unquote(to_name)
self.to_address: str = unquote(to_address)
def send_notification(self, item: typing.Any) -> None:
payload = {
"Messages": [
{
"From": {
"Email": self.from_address,
"Name": self.from_name
},
"Subject": self.subject,
"TextPart": str(item),
"To": [
{
"Email": self.to_address,
"Name": self.to_name
}
]
}
]
}
url = self.address
headers = {"Content-Type": "application/json"}
requests.post(url, json=payload, headers=headers, auth=(self.api_key, self.api_secret))
def __str__(self) -> str:
return f"Mailjet notifications to '{self.to_name}' '{self.to_address}' with subject '{self.subject}'"
# # 🥭 CsvFileNotificationTarget class
#
# Outputs a liquidation event to CSV. Nothing is written if the item is not a
# `LiquidationEvent`.
#
# Headers for the CSV file should be:
# ```
# "Timestamp","Liquidator Name","Group","Succeeded","Signature","Wallet","Margin Account","Token Changes"
# ```
# Token changes are listed as pairs of value plus symbol, so each token change adds two
# columns to the output. Token changes may arrive in different orders, so ordering of token
# changes is not guaranteed to be consistent from transaction to transaction.
#
class CsvFileNotificationTarget(NotificationTarget):
def __init__(self, filename):
super().__init__()
self.filename = filename
def send_notification(self, item: typing.Any) -> None:
if isinstance(item, LiquidationEvent):
event: LiquidationEvent = item
if not os.path.isfile(self.filename) or os.path.getsize(self.filename) == 0:
with open(self.filename, "w") as empty_file:
empty_file.write(
'"Timestamp","Liquidator Name","Group","Succeeded","Signature","Wallet","Margin Account","Token Changes"\n')
with open(self.filename, "a") as csvfile:
result = "Succeeded" if event.succeeded else "Failed"
row_data = [event.timestamp, event.liquidator_name, event.group_name, result,
event.signature, event.wallet_address, event.margin_account_address]
for change in event.changes:
row_data += [f"{change.value:.8f}", change.token.name]
file_writer = csv.writer(csvfile, quoting=csv.QUOTE_MINIMAL)
file_writer.writerow(row_data)
def __str__(self) -> str:
return f"CSV notifications to file {self.filename}"
# # 🥭 FilteringNotificationTarget class
#
# This class takes a `NotificationTarget` and a filter function, and only calls the
# `NotificationTarget` if the filter function returns `True` for the notification item.
#
class FilteringNotificationTarget(NotificationTarget):
def __init__(self, inner_notifier: NotificationTarget, filter_func: typing.Callable[[typing.Any], bool]):
super().__init__()
self.inner_notifier: NotificationTarget = inner_notifier
self.filter_func = filter_func
def send_notification(self, item: typing.Any) -> None:
if self.filter_func(item):
self.inner_notifier.send_notification(item)
def __str__(self) -> str:
return f"Filtering notification target for '{self.inner_notifier}'"
# # 🥭 NotificationHandler class
#
# A bridge between the worlds of notifications and logging. This allows any
# `NotificationTarget` to be plugged in to the `logging` subsystem to receive log messages
# and notify however it chooses.
#
class NotificationHandler(logging.StreamHandler):
def __init__(self, target: NotificationTarget):
logging.StreamHandler.__init__(self)
self.target = target
def emit(self, record):
# Don't send error logging from solanaweb3
if record.name == "solanaweb3.rpc.httprpc.HTTPClient":
return
message = self.format(record)
self.target.send_notification(message)
# # 🥭 parse_subscription_target() function
#
# `parse_subscription_target()` takes a parameter as a string and returns a notification
# target.
#
# This is most likely used when parsing command-line arguments - this function can be used
# in the `type` parameter of an `add_argument()` call.
#
def parse_subscription_target(target):
protocol, destination = target.split(":", 1)
if protocol == "telegram":
return TelegramNotificationTarget(destination)
elif protocol == "discord":
return DiscordNotificationTarget(destination)
elif protocol == "mailjet":
return MailjetNotificationTarget(destination)
elif protocol == "csvfile":
return CsvFileNotificationTarget(destination)
else:
raise Exception(f"Unknown protocol: {protocol}")
| [
"geoff@knife.opgeek.lan"
] | geoff@knife.opgeek.lan |
1fbca8a60b71a90686126ab10fe2745039344b6c | 84a5c4c2e0977d42425771098f5f881c750da7f0 | /neomodel_constraints/fetcher/constraints/v4_1.py | 3bb76bcc8e48273575a33bd210c5b02c050d2956 | [] | no_license | SSripilaipong/neomodel-constraints | 6c3023ba156275e48f5f7ebcbdd283ce8d41f9a1 | 4b91185ba9eec993c58e9ae770fd3d0e90f915ae | refs/heads/main | 2023-07-15T09:58:41.451631 | 2021-08-29T13:19:38 | 2021-08-29T13:19:38 | 390,312,509 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,883 | py | import re
from typing import List, Dict
from neomodel_constraints.connection import ConnectionAbstract
from neomodel_constraints.constraint import ConstraintSet, TypeMapperAbstract
from neomodel_constraints.fetcher.abstract import FetcherAbstract
from .data import Neo4jConstraintQueryRecord
from .util import convert_constraints_with_type_mapper
def extract_record_detail(detail: str) -> Dict:
param_str = re.findall(r'^Constraint\((.*)\)$', detail)[0]
id_ = re.findall(r"id=(\d+?),", param_str)[0]
name = re.findall(r"name='(\w+?)',", param_str)[0]
type_ = re.findall(r"type='(\w+?)',", param_str)[0]
label, prop = re.findall(r"schema=\(:([^ ]+) {(\w+)}\)", param_str)[0]
owned_index = re.findall(r"ownedIndex=(\d+)", param_str)[0]
return {
'id': id_,
'ownedIndexId': owned_index,
'entityType': 'NODE',
'labelsOrTypes': [label],
'type': type_,
'name': name,
'properties': [prop],
}
class ConstraintsFetcher(FetcherAbstract):
def __init__(self, connection: ConnectionAbstract, type_mapper: TypeMapperAbstract):
self.connection: ConnectionAbstract = connection
self.type_mapper: TypeMapperAbstract = type_mapper
def _fetch_raw_data(self) -> List[Neo4jConstraintQueryRecord]:
raw = self.connection.execute('CALL db.constraints')
records = [extract_record_detail(record['details']) for record in raw]
return [Neo4jConstraintQueryRecord(**record) for record in records]
def _convert_constraints(self, raw: List[Neo4jConstraintQueryRecord]) -> ConstraintSet:
return convert_constraints_with_type_mapper(raw, self.type_mapper)
def fetch(self) -> ConstraintSet:
raw: List[Neo4jConstraintQueryRecord] = self._fetch_raw_data()
constraints: ConstraintSet = self._convert_constraints(raw)
return constraints
| [
"santhapon.s@siametrics.com"
] | santhapon.s@siametrics.com |
c93bc33909a5cb0e08e9a0adcc37eb65b8cf5039 | e2f1c508c9433ce276643a23f1de3148b4e9397f | /tests/engine.py | ccac11fc360c2cf932cf1e384230946b118a222e | [
"BSD-3-Clause",
"MIT"
] | permissive | nausicaaa/Coding_Buddy | dff706f76dc2053d00a071baa2281b9b67bd710f | 770b0078f6e4cede4498a38ea5cc3fe819d32b4b | refs/heads/master | 2021-01-18T07:23:59.985308 | 2016-05-15T19:46:06 | 2016-05-15T19:46:06 | 55,987,993 | 0 | 0 | null | 2016-04-11T16:16:27 | 2016-04-11T16:16:26 | null | UTF-8 | Python | false | false | 5,459 | py | from subprocess import call
from os import path
import hitchpostgres
import hitchselenium
import hitchpython
import hitchserve
import hitchredis
import hitchtest
import hitchsmtp
# Get directory above this file
PROJECT_DIRECTORY = path.abspath(path.join(path.dirname(__file__), '..'))
class ExecutionEngine(hitchtest.ExecutionEngine):
"""Engine for orchestating and interacting with the app."""
def set_up(self):
"""Ensure virtualenv present, then run all services."""
python_package = hitchpython.PythonPackage(
python_version=self.settings['python_version']
)
python_package.build()
call([
python_package.pip, "install", "-r",
path.join(PROJECT_DIRECTORY, "requirements/local.txt")
])
postgres_package = hitchpostgres.PostgresPackage()
postgres_package.build()
redis_package = hitchredis.RedisPackage()
redis_package.build()
self.services = hitchserve.ServiceBundle(
project_directory=PROJECT_DIRECTORY,
startup_timeout=float(self.settings["startup_timeout"]),
shutdown_timeout=float(self.settings["shutdown_timeout"]),
)
postgres_user = hitchpostgres.PostgresUser("Coding_Buddy", "password")
self.services['Postgres'] = hitchpostgres.PostgresService(
postgres_package=postgres_package,
users=[postgres_user, ],
databases=[hitchpostgres.PostgresDatabase("Coding_Buddy", postgres_user), ]
)
self.services['HitchSMTP'] = hitchsmtp.HitchSMTPService(port=1025)
self.services['Django'] = hitchpython.DjangoService(
python=python_package.python,
port=8000,
settings="config.settings.local",
needs=[self.services['Postgres'], ],
env_vars=self.settings['environment_variables'],
)
self.services['Redis'] = hitchredis.RedisService(
redis_package=redis_package,
port=16379,
)
self.services['Firefox'] = hitchselenium.SeleniumService(
xvfb=self.settings.get("xvfb", False),
no_libfaketime=True,
)
# import hitchcron
# self.services['Cron'] = hitchcron.CronService(
# run=self.services['Django'].manage("trigger").command,
# every=1,
# needs=[ self.services['Django'], ],
# )
self.services.startup(interactive=False)
# Docs : https://hitchtest.readthedocs.org/en/latest/plugins/hitchselenium.html
self.driver = self.services['Firefox'].driver
self.webapp = hitchselenium.SeleniumStepLibrary(
selenium_webdriver=self.driver,
wait_for_timeout=5,
)
# Add selenium steps
self.click = self.webapp.click
self.wait_to_appear = self.webapp.wait_to_appear
self.wait_to_contain = self.webapp.wait_to_contain
self.wait_for_any_to_contain = self.webapp.wait_for_any_to_contain
self.click_and_dont_wait_for_page_load = self.webapp.click_and_dont_wait_for_page_load
# Configure selenium driver
self.driver.set_window_size(self.settings['window_size']['width'], self.settings['window_size']['height'])
self.driver.set_window_position(0, 0)
self.driver.implicitly_wait(2.0)
self.driver.accept_next_alert = True
def pause(self, message=None):
"""Stop. IPython time."""
if hasattr(self, 'services'):
self.services.start_interactive_mode()
self.ipython(message)
if hasattr(self, 'services'):
self.services.stop_interactive_mode()
def load_website(self):
"""Navigate to website in Firefox."""
self.driver.get(self.services['Django'].url())
self.click("djHideToolBarButton")
def fill_form(self, **kwargs):
"""Fill in a form with id=value."""
for element, text in kwargs.items():
self.driver.find_element_by_id(element).send_keys(text)
def confirm_emails_sent(self, number):
"""Count number of emails sent by app."""
assert len(self.services['HitchSMTP'].logs.json()) == int(number)
def click_on_link_in_last_email(self, which=1):
"""Click on the nth link in the last email sent."""
self.driver.get(
self.services['HitchSMTP'].logs.json()[-1]['links'][which - 1]
)
def wait_for_email(self, containing=None):
"""Wait for, and return email."""
self.services['HitchSMTP'].logs.out.tail.until_json(
lambda email: containing in email['payload'] or containing in email['subject'],
timeout=25,
lines_back=1,
)
def time_travel(self, days=""):
"""Make all services think that time has skipped forward."""
self.services.time_travel(days=int(days))
def on_failure(self):
"""Stop and IPython."""
if not self.settings['quiet']:
if self.settings.get("pause_on_failure", False):
self.pause(message=self.stacktrace.to_template())
def on_success(self):
"""Pause on success if enabled."""
if self.settings.get("pause_on_success", False):
self.pause(message="SUCCESS")
def tear_down(self):
"""Shut down services required to run your test."""
if hasattr(self, 'services'):
self.services.shutdown()
| [
"anetka.wlodarczyk@gmail.com"
] | anetka.wlodarczyk@gmail.com |
ed954de276156e95166db50f4039b934494a89b8 | 0fff2c2f14abe9ebcdf788a2d222876510a4ae3d | /python_tkinter_study/tk之文本框明暗文输入及网格布局.py | 43c5412b8f938c5555951a44df588bcf07afd396 | [
"MIT"
] | permissive | 4ever-blessed/Github_python3_code | 0899551d1382a8699cf6ecf3884d374dd3a7e6e4 | 789fa427b6f1b7996804c0f2d5ad7043607910c0 | refs/heads/master | 2020-03-14T06:30:38.559199 | 2018-07-24T09:12:18 | 2018-07-24T09:12:18 | 131,485,743 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,299 | py | # conda_python3_code
from tkinter import *
from PIL import Image,ImageTk
# PIL is used for decode the JPG PNG format
# Clear text display
# def show():
# print('Your name is %s'% e1.get())
# print('Your id is %s'% e2.get())
# root = Tk()
#
# Label(root,text='name:').grid(row=0,column=0)
# Label(root,text='ID:').grid(row=1,column=0)
#
# e1 = Entry(root)
# e2 = Entry(root)
# e1.grid(row=0,column=1,padx=5,pady=5)
# e2.grid(row=1,column=1,padx=5,pady=5)
#
# Button(root,text='Confirm',width=10,command=show).grid(row=3,column=0,sticky=W,padx=10,pady=5)
# Button(root, text='Exit', width=10, command=root.quit).grid(row=3,column=1,sticky=E,padx=10,pady=5)
#
# mainloop()
# Secret text display
def show():
print('Your ID is %s'% e1.get())
print('Your password is %s'% e2.get())
root = Tk()
Label(root,text='ID:').grid(row=0,column=0)
Label(root,text='password:').grid(row=1,column=0)
v1 = StringVar()
v2 = StringVar()
e1 = Entry(root,textvariable=v1)
e2 = Entry(root,textvariable=v2,show='*')
e1.grid(row=0,column=1,padx=5,pady=5)
e2.grid(row=1,column=1,padx=5,pady=5)
Button(root,text='Confirm',width=10,command=show).grid(row=3,column=0,sticky=W,padx=10,pady=5)
Button(root, text='Exit', width=10, command=root.quit).grid(row=3,column=1,sticky=E,padx=10,pady=5)
mainloop() | [
"is_my_logo@icloud.com"
] | is_my_logo@icloud.com |
73aaf88a3f4bf55ce3928acdfa3759d141ca5256 | 43ce77ad778dcb0816acfb613ae945689342cd46 | /config.py | 5d779b4ee910f37bf3e14d2770b463b03792202d | [] | no_license | vpdesai21/SimulationProject | 45558de4f115002680cec21d76bd831f4a0aca9e | e5781fdefa3a0e54e57bfdda7a349e934cecc223 | refs/heads/master | 2021-01-09T07:12:35.682320 | 2015-06-22T00:19:07 | 2015-06-22T00:19:07 | 35,399,506 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 48 | py | __author__ = 'root'
simulation_time_factor = 1
| [
"vishwa.desai@sjsu.edu"
] | vishwa.desai@sjsu.edu |
a22b262b8040abcc8a2c816cafca92d9d7a2eb41 | 4f3f2ae204a2c709bffde8e3ae478339f743c3e8 | /main/migrations/0011_adminusermodel.py | 358b2995f8e3c29292eb3f3f37b5a02ff519c72b | [] | no_license | Anupam124jain/ICO | a9d5f02d2cd1680c5a4545439238f7325c601373 | 5faf27c7e25bfc3d4fa7cfc3f8dff10583dddaad | refs/heads/master | 2022-12-15T17:37:11.193927 | 2019-01-21T13:39:24 | 2019-01-21T13:39:24 | 166,819,535 | 0 | 0 | null | 2022-12-08T03:00:50 | 2019-01-21T13:33:33 | Python | UTF-8 | Python | false | false | 1,001 | py | # Generated by Django 2.1 on 2018-10-03 10:07
from django.conf import settings
import django.contrib.auth.models
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('auth', '0009_alter_user_last_name_max_length'),
('main', '0010_auto_20181003_1002'),
]
operations = [
migrations.CreateModel(
name='AdminUserModel',
fields=[
('user_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'abstract': False,
},
bases=('auth.user',),
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
]
| [
"anupam124jain@gmail.com"
] | anupam124jain@gmail.com |
f155d5c7c4b74b8ecf84d3a1d6f02fcc2b3ade6b | 6fefd215a7b7e38e53a8e012b890e8fbfde1dbe2 | /logic.py | ced7ab5b8beb4f36cde2b78162d67636573837e7 | [] | no_license | alskgj/rankit_papperlacart | d90e429fd62e46b618b5b52cae34f9fbb5052bea | 50a0183fc41c255dda64302a5d74aad7a2217054 | refs/heads/master | 2021-01-01T16:37:27.661735 | 2017-07-22T16:17:44 | 2017-07-22T16:17:44 | 97,874,526 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,056 | py | from json import loads, dumps
from elo import Match
def write_to_db(name: str, score: float):
"""Writes a new eloscore to memory
:param name: playername
:param score: eloscore
"""
score = float(score)
with open('db.json') as fo:
data = loads(fo.read())
data[name] = score
with open('db.json', 'w') as fo:
fo.write(dumps(data))
def player_exists_in_db(name: str):
"""Checks if a player is saved
:param name: playername
"""
with open('db.json') as fo:
data = loads(fo.read())
return name in data
def calculate_placing(data: dict):
"""
>>> calculate_placing({'dimitri': 100, 'lukboe': 20, 'mario': 50, 'luigi': 50})
{'dimitri': 1, 'mario': 2, 'luigi': 2, 'lukboe': 3}
:param data: takes a dict[str: int]
:return: dict[str: inst] with placing
"""
sortedlist = sorted(data.items(), key=lambda a: int(a[1]), reverse=True)
placing = {}
last = None
lastplace = 0
for name, score in sortedlist:
if score != last:
lastplace += 1
last = score
placing[name] = lastplace
return placing
def update_elo(data: dict):
""" Gets 'raw' data and updates elo accordingly
:param data: dict like {'dimitri': 100, 'lukboe': 20, 'mario': 50, 'luigi': 50}
:return:
"""
# just to make sure all scores are ints, or subclasses of int
for element in data:
data[element] = int(data[element])
# transform dict from {name:score} to {name:placing}
data = calculate_placing(data)
match = Match()
for name, placing in data.items():
match.add_player(name, placing, get_elo_from_db(name))
match.calculate_elo()
for player in match.players:
print(f'{player.name}: {player.elo_pre} -> {player.elo_post}')
write_to_db(player.name, player.elo_post)
def get_elo_from_db(player: str):
""" Elo lookup on database
:param player:
"""
with open('db.json') as fo:
data = loads(fo.read())
return data[player]
| [
"d@vis.ch"
] | d@vis.ch |
cc344ab268871e68262997bb1a4edd0560a0baf8 | 9467f3a54b19098766a3b0341eaac51617fc321b | /utils/build_batch.py | 7fafcc95dbe491be9a2c7c8f0a11100d7a88fc38 | [] | no_license | liangjies/Sentiment-Analysis | 1eedaa583d68536f92944b59ee6f8b6dedbc4a99 | beca6c6612cc3b38b28d711eb39eb72424bdde00 | refs/heads/master | 2020-11-24T05:25:56.081258 | 2019-12-19T08:21:32 | 2019-12-19T08:21:32 | 227,983,207 | 0 | 0 | null | 2019-12-14T07:24:42 | 2019-12-14T07:24:41 | null | UTF-8 | Python | false | false | 6,387 | py | #!/usr/bin/env python
# encoding: utf-8
"""
@version: python3.6
@author: 'zenRRan'
@license: Apache Licence
@contact: zenrran@qq.com
@software: PyCharm
@file: build_batch.py
@time: 2018/10/15 10:44
"""
import random
class Build_Batch:
def __init__(self, features, opts, batch_size, pad_idx, char_padding_id,rel_padding_id=None):
self.batch_size = batch_size
self.features = features
self.shuffle = opts.shuffle
self.sort = opts.sort
self.batch_num = 0
self.batch_features = []
self.data_batchs = [] # [(data, label)]
self.PAD = pad_idx
self.CPAD = char_padding_id
self.RPAD = rel_padding_id
random.seed(opts.seed)
def create_same_sents_length_one_batch(self):
'''
:return:[[[x x x x]
[x x x x]]
[[x x x o]
[x x x o]
[x x x o]]]
'''
self.features = self.sort_features(self.features)
new_list = []
self.batch_features = []
self.data_batchs = []
same_len = True
for feature in self.features:
if len(new_list) != 0 and len(feature.words) != len(new_list[-1].words):
same_len = False
if same_len and len(new_list) < self.batch_size:
new_list.append(feature)
else:
new_list = self.shuffle_data(new_list)
self.batch_features.append(new_list)
ids, char_ids, labels, forest, heads, children_batch_list, tag_rels = self.choose_data_from_features(new_list)
ids_lengths = [len(id) for id in ids]
ids = self.add_pad(ids, self.PAD)
tag_rels = self.add_pad(tag_rels, self.RPAD)
char_ids = self.add_char_pad(char_ids, ids, self.CPAD)
self.data_batchs.append((ids, labels, char_ids, forest, heads, children_batch_list, ids_lengths, tag_rels))
new_list = []
same_len = True
new_list.append(feature)
self.batch_features = self.shuffle_data(self.batch_features)
self.data_batchs = self.shuffle_data(self.data_batchs)
return self.batch_features, self.data_batchs
def create_sorted_normal_batch(self):
'''
:return: [[[x x o o]
[x x x o]
[x x x o]]
[[x x x o]
[x x x x]]]
'''
self.features = self.sort_features(self.features)
new_list = []
self.batch_features = []
self.data_batchs = []
self.features.append([])
for idx, feature in enumerate(self.features):
if len(new_list) < self.batch_size and idx+1 != len(self.features):
new_list.append(feature)
else:
self.batch_num += 1
new_list = self.shuffle_data(new_list)
self.batch_features.append(new_list)
ids, char_ids, labels, forest, heads, children_batch_list, tag_rels = self.choose_data_from_features(new_list)
ids_lengths = [len(id) for id in ids]
ids = self.add_pad(ids, self.PAD)
tag_rels = self.add_pad(tag_rels, self.RPAD)
char_ids = self.add_char_pad(char_ids, ids, self.CPAD)
self.data_batchs.append((ids, labels, char_ids, forest, heads, children_batch_list, ids_lengths, tag_rels))
new_list = []
new_list.append(feature)
self.batch_features = self.shuffle_data(self.batch_features)
self.data_batchs = self.shuffle_data(self.data_batchs)
return self.batch_features, self.data_batchs
def choose_data_from_features(self, features):
ids = []
char_ids = []
labels = []
heads = []
forest = []
# bfs_batch_list = []
children_batch_list = []
tag_rels = []
for feature in features:
ids.append(feature.ids)
char_ids.append(feature.char_ids)
labels.append(feature.label)
heads.append(feature.heads)
forest.append(feature.root)
# bfs_batch_list.append(feature.bfs_list)
tag_rels.append(feature.rels_ids)
rel = [tree.children_index_list for tree in feature.forest]
max_len = feature.length
new_rel = [[0 for _ in range(max_len)] for _ in range(max_len)]
for i, each in enumerate(rel):
for j, index in enumerate(each):
new_rel[i][index] = 1
children_batch_list.append(new_rel)
return ids, char_ids, labels, forest, heads, children_batch_list, tag_rels
def add_char_pad(self, data_list, sents_ids_list, PAD):
'''
:param data_list:[[[x x], [x x x],...],[[x], [x x],...]]
:param PAD: PAD id
:return: [[[x x o], [x x x],...],[[x o], [x x],...]]
'''
new_data_list = []
for sent_list, sent in zip(data_list, sents_ids_list):
word_len = len(sent)
max_len = 0
new_sent_list = []
for word_list in sent_list:
max_len = max(max_len, len(word_list))
for word_list in sent_list:
new_sent_list.append(word_list + [PAD] * (max_len - len(word_list)))
new_data_list.append(new_sent_list + [[PAD] * max_len] * (word_len - len(new_sent_list)))
return new_data_list
def add_pad(self, data_list, PAD):
'''
:param data_list: [[x x x], [x x x x],...]
:return: [[x x x o o], [x x x x o],...]
'''
max_len = 0
new_data_list = []
for data in data_list:
max_len = max(max_len, len(data))
for data in data_list:
new_data_list.append(data + [PAD]*(max_len - len(data)))
return new_data_list
def sort_features(self, features):
if self.sort:
features = sorted(features, key=lambda feature: feature.length)
return features
def shuffle_data(self, data):
if self.shuffle:
random.shuffle(data)
return data
| [
"824203828@qq.com"
] | 824203828@qq.com |
7b121b785157322394491e6bc5bb951878921106 | f57124c12c8bcb1612b3baba1dcaeb57aa80cd0d | /test/test_laodao.py | 9ae8c6b2248276e4c8350164d41db472e2150563 | [] | no_license | ovc1024wz/ovc1024 | 20751ef82f93d6aec4bf1cfd31bd3b6487366f25 | 1933f9ad667535a18547254bdc86593424f3b435 | refs/heads/master | 2020-06-02T20:58:26.772645 | 2019-06-25T07:27:45 | 2019-06-25T07:27:45 | 191,308,050 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,024 | py | #encoding=utf-8
import unittest,time
from coffee.webdriver import driver
from utils.loginpage import LoginPage
from pageobject.laodao import LaoDao
class MyTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
email = "2589240188@qq.com"
password = "123456"
LoginPage().open_and_check()
LoginPage().login(email, password)
def setUp(self):
LaoDao().open_and_check()
self.assertEqual(True, LaoDao().check_if_page_opened())
def test_laodao1(self):
# 删除列表中的该条唠叨
LaoDao().delete_laodao()
time.sleep(5)
def test_laodao2(self):
# 回复列表中的该条唠叨
LaoDao().answer_laodao()
time.sleep(5)
def test_laodao3(self):
# 唠叨框中填写信息
LaoDao().write_laodao()
time.sleep(5)
def test_laodao4(self):
# 查看别人的唠叨
LaoDao().visit_other_laodao()
time.sleep(5)
if __name__ == '__main__':
unittest.main()
| [
"2589240188@qq.com"
] | 2589240188@qq.com |
e22507c0de31cec909fd15ecdd02ceb23b3f3f64 | 99556451586f1f2fc56823c320ed0e8feb3a5f34 | /cronjobs/ios/checkCouchReplication.py | c0585280ba404c9106644ef2422586465ac331b0 | [] | no_license | snoplus/SNOPlusSlowControl | 72c4d71711109b317dd224426198d5db82d8e4a7 | ca633e1cbd3de96c06258cc7706b67f549964054 | refs/heads/master | 2020-05-19T21:48:00.364826 | 2020-04-22T20:25:07 | 2020-04-22T20:25:07 | 185,233,679 | 1 | 0 | null | 2020-04-22T00:50:56 | 2019-05-06T16:31:11 | Python | UTF-8 | Python | false | false | 3,562 | py | #!/usr/bin/python
#This script checks that data stored in the IOS's local couchDB is being
#Replicated to the couch.snopl.us database properly. If not, an alarm
#E-mail is sent to the slow control alarms list.
from __future__ import print_function
import sys
import socket
import json, httplib2, couchdb, string, time, re
import os
import smtplib
import mimetypes
from email.MIMEMultipart import MIMEMultipart
from email.MIMEBase import MIMEBase
from email.MIMEText import MIMEText
def getcreds(location):
f = open(location, "rb")
for line in f:
if "username" in line:
user = re.sub("username ", "", line)
user=user.rstrip()
if "password" in line:
pwd = re.sub("password ", "", line)
pwd=pwd.rstrip()
return user, pwd
#connection info for slow control email notification
gmailUser, gmailPassword = getcreds("/home/slowcontroller/config/gmailcred.conf")
#Connection info for couchdb
couchuser, couchpassword = getcreds("/home/slowcontroller/config/couchcred.conf")
SCcouchuser, SCcouchpassword = getcreds("/home/slowcontroller/config/SCcouchcred.conf")
ios = str(int(socket.gethostname()[3]))
recipientfile=open("/home/slowcontroller/SNOPlusSlowControl/SNOPlusSlowControl/DB/emailList.txt","r")
recipients = recipientfile.readlines();
ios_couch = couchdb.Server()
ios_couch.resource.credentials = (SCcouchuser, SCcouchpassword)
snopl_couch = couchdb.Server('http://couch.snopl.us')
snopl_couch.resource.credentials = (couchuser, couchpassword)
def connectToIosDB(dbName):
status = "ok"
db = {}
try:
db = ios_couch[dbName]
except:
print("Failed to connect to " + dbName, file=sys.stderr)
status = "bad"
return status, db
def connectToSnoplDB(dbName):
status = "ok"
db = {}
try:
db = snopl_couch[dbName]
except:
print("Failed to connect to " + dbName, file=sys.stderr)
status = "bad"
return status, db
def sendMail(subject, text):
try:
msg = MIMEMultipart()
msg['From'] = gmailUser
msg['Subject'] = subject
msg.attach(MIMEText(text))
mailServer = smtplib.SMTP('smtp.gmail.com', 587, timeout=10)
mailServer.ehlo()
mailServer.starttls()
mailServer.ehlo()
mailServer.login(gmailUser, gmailPassword)
msg['To'] = "alarmslist"
mailServer.sendmail(gmailUser, recipients, msg.as_string())
mailServer.close()
except:
pass
return
def getIosTimestamp():
dbParStatus, dbPar = connectToIosDB("slowcontrol-data-5sec")
if dbParStatus is "ok":
queryresults = dbPar.view("slowcontrol-data-5sec/recent"+ios,descending=True,limit=1)
iosTimestamp = queryresults.rows[0].value["timestamp"]
return iosTimestamp
def getSnoplTimestamp():
dbParStatus, dbPar = connectToSnoplDB("slowcontrol-data-5sec")
if dbParStatus is "ok":
queryresults = dbPar.view("slowcontrol-data-5sec/recent"+ios,descending=True,limit=1)
snoplTimestamp = queryresults.rows[0].value["timestamp"]
return snoplTimestamp
if __name__=='__main__':
snoplTimestamp = getSnoplTimestamp()
iosTimestamp = getIosTimestamp()
difference = iosTimestamp-snoplTimestamp
minutes = difference/60
if minutes>5:
subject = "Couch.snopl.us is not up to date with replications!"
print(subject)
body = "Couch.snopl.us 5sec data is now "+str(minutes)+" minutes behind IOS "+ios
sendMail(subject,body)
else:
print("Couch.snopl.us is up to date with replications within 5min.")
| [
"pershint@github.com"
] | pershint@github.com |
c8d33300ad770d352447e92bf47f589d16f4bfe1 | c38d45d315ccb20c3656029c41081ef6a40f1f35 | /ade-programme-utils/adeProgrammeUtils.py | c600bdc4228eb0a146a1d4b470561891651fd9bf | [] | no_license | petronny/aur3-mirror | 3e12eed4901a807354a46399bdfd82763ec9f1b3 | 4279d4dcfcd6585fabd8b52d40d78d7db06c97ad | refs/heads/master | 2020-06-30T06:47:24.836321 | 2019-07-29T09:45:08 | 2019-07-29T09:45:08 | 74,439,663 | 1 | 0 | null | 2016-11-22T06:00:49 | 2016-11-22T06:00:49 | null | UTF-8 | Python | false | false | 11,543 | py | #!/usr/bin/ent python
# -*- coding: utf-8 -*-
""" Imports """
import os
import user
import xml
import xml.dom.minidom as dom
import string
import re
from base64 import b64decode, b64encode
from urllib import URLopener, urlencode
from urllib2 import build_opener, HTTPCookieProcessor, URLError
from cookielib import CookieJar
""" Configuration """
CONFIG_DIR = user.home+os.sep+".config"+os.sep+"ade-programme"+os.sep
CONFIG_FILE = "config.xml"
BASE_IMG_URL = "http://web.univ-ubs.fr/ade/imageEt?projectId=3&idPianoD\
ay=0%2C1%2C2%2C3%2C4&width=800&height=500&lunchName=REPAS&displayMode=1\
057855&showLoad=false&ttl=1272441274368&displayConfId=8"
""" Definitions """
class AdeConnectionUtil:
identifier = None
CJ = None
connection = None
connected = False
username = ""
password = ""
def __init__(self):
""" Initialisation de AdeConnectionUtil
Cette méthode permet de construire le connecteur de site et le
gestionnaire de cookie."""
self.CJ = CookieJar()
self.connection = build_opener(HTTPCookieProcessor(self.CJ))
def setIds(self, newusername, newpassword):
""" Initialisation des username password
Sans ceux-ci, impossible de se connecter."""
checkType(newusername, str)
checkType(newpassword, str)
self.username = newusername
self.password = newpassword
def getConnection(self, force = False, username = None, password = None):
""" Méthode appelée par toutes les autres méthodes de la classe
Elle permet de voir si on a déjà récupérer les cookies nécéssaires
à la connection sur Ade. """
if force:self.connected = False
if not(username and password):
username = self.username
password = self.password
if not(self.connected):
try:
u = self.connection.open("https://cas.univ-ubs.fr/login").read()
f = '<input type="hidden" name="lt" value="'
l = u.rfind(f)
r = u.rfind('" />\n\t\t\t\t\t\t<input type="hidden" name="_eventId"')
data = u[l+len(f):r]
params = urlencode({
"username":username,
"password":password,
"lt":data,
"_eventId":"submit",
"submit":"SE CONNECTER"
})
self.connection.open("https://cas.univ-ubs.fr/login", params)
u = self.connection.open("http://web.univ-ubs.fr/edt/ade/custom/myplanning/myPlanning.jsp?top=top.moniframeesup")
u = u.read()
f = '<FRAME src="../../'
l = u.rfind(f)
r = u.rfind('" name="planning">')
data = u[l+len(f):r]
if len(data) > 500:
raise ValueError
self.connection.open("http://web.univ-ubs.fr/edt/ade/"+data)
except URLError:
raise IOError
self.connected = True
def getGroups(self):
""" Méthode permettant de connaitre les différents groups attachés
à l'année qu'est en train de suivre l'élève qui se connecte. """
self.getConnection()
groups = {}
try:
u = self.connection.open("http://web.univ-ubs.fr/ade/standard/gui/tree.jsp?forceLoad=false&isDirect=true")
u = u.read()
"""start = u.find("treelineselected")
end = u.rfind("treelineselected")
u = u[start:end+300]"""
u = string.join(string.split(u, "javascript:check("), "<DEB>")
u = string.join(string.split(u, ", 'true');\">"), "<SEP>")
u = string.join(string.split(u, ", 'false');\">"), "<SEP>")
u = string.join(string.split(u, "</a>"), "<FIN>")
list = re.findall("<DEB>([0-9]*)<SEP>([0-9a-zA-Z -]*)<FIN>", u)
for i in list:
groups[i[1]] = int(i[0])
except URLError:
raise IOError
return groups
def getCorrectId(self):
""" Permet d'obtenir un identifiant unique afin de pouvoir télécharger
les plannings et autres informations... """
if not(self.identifier):
self.getConnection()
try:
u = self.connection.open("http://web.univ-ubs.fr/ade/custom/modules/plannings/imagemap.jsp?width=800&height=600")
u = u.read()
f = '<img border=0 src="/ade/imageEt?identifier='
l = u.rfind(f)
r = u.rfind('&projectId=3&idPianoWeek=')
self.identifier = u[l+len(f):r]
except URLError:
raise IOError
return self.identifier
def getConnected(self, username = None, password = None):
if username and password:
self.getConnection(True, username, password)
else:
self.getConnection(True)
return self.connected
class AdeProgrammeUtils:
connection = AdeConnectionUtil()
document = None
uo = URLopener()
def __init__(self):
""" Constructeur de la classe AdeProgrammeUtils
Il vérifie si les fichiers de configurations sont présents et,
le cas échéant en crée un. """
if not(os.path.exists(CONFIG_DIR)):
os.mkdir(CONFIG_DIR)
os.chdir(CONFIG_DIR)
if not(os.path.isfile(CONFIG_FILE)):
# Basic configuration of the file
self.setBasicDocument()
else:
try:
self.document = dom.parse(CONFIG_FILE)
self.makeCorrectIds()
con = self.getConnectionIds()
self.connection.setIds(str(con["username"]), str(con["password"]))
except xml.parsers.expat.ExpatError:
# Basic configuration of the file
self.setBasicDocument()
self.saveDocument()
def setBasicDocument(self):
""" Permet de mettre le contenu minimal du document. """
# Basic configuration of the file
self.document = dom.Document()
root = self.document.createElement("data")
# It's the first run for the application
firstrun = self.document.createElement("merge")
root.appendChild(firstrun)
firstrun.setAttribute("id", "firstrun")
firstrun.setAttribute("value", "True")
self.document.appendChild(root)
self.makeCorrectIds()
def makeCorrectIds(self):
""" Permet de lier les id aux identifiants des neuds pour pouvoir
y accéder par le getElementById. """
for i in self.document.getElementsByTagName("*"):
if i.hasAttribute("id"):
i.setIdAttribute("id")
def saveDocument(self):
""" Permet de sauvegarder le fichier de configuration. """
os.chdir(CONFIG_DIR)
f = open(CONFIG_FILE, "w")
f.write(self.document.toxml())
f.close()
def isFirstRun(self):
""" Permet de connaitre si c'est le premier démarrage de
l'application. """
ret = True
firstrun = self.document.getElementById("firstrun")
if firstrun:
if firstrun.hasAttribute("value"):
values = ("False", "True")
val = firstrun.getAttribute("value")
if val in values:
ret = bool(int(values.index(val)))
return ret
def setFirstRun(self, newfirstrun):
""" Permet de définir si le permier démarrage est fini. """
checkType(newfirstrun, bool)
root = self.document.firstChild
firstrun = self.document.getElementById("firstrun")
if not(firstrun):
firstrun = self.document.createElement("merge")
firstrun.setAttribute("id", "firstrun")
firstrun.setIdAttribute("id")
root.appendChild(firstrun)
firstrun.setAttribute("value", str(newfirstrun))
self.saveDocument()
def setConnectionIds(self, newusername, newpassword):
""" Permet à l'utilisateur de changer ses identifiants. """
checkType(newusername, str)
checkType(newpassword, str)
self.connection.setIds(newusername, newpassword)
root = self.document.firstChild
username = self.document.getElementById("username")
password = self.document.getElementById("password")
if not(username):
username = self.document.createElement("merge")
username.setAttribute("id", "username")
username.setIdAttribute("id")
root.appendChild(username)
username.setAttribute("value", newusername)
if not(password):
password = self.document.createElement("merge")
password.setAttribute("id", "password")
password.setIdAttribute("id")
root.appendChild(password)
password.setAttribute("value", b64encode(newpassword))
self.saveDocument()
def getConnectionIds(self):
""" Permet à l'utilisateur d'obtenir ses identifiants. """
username = self.document.getElementById("username")
password = self.document.getElementById("password")
ret = {
"username":"",
"password":""
}
if username and username.hasAttribute("value"):
ret["username"] = username.getAttribute("value")
if password and password.hasAttribute("value"):
ret["password"] = b64decode(password.getAttribute("value"))
return ret
def refreshGroups(self):
""" Permet de rafraichir la liste des groupes utiles à
l'utilisateur. """
groupTable = self.connection.getGroups()
root = self.document.firstChild
groups = self.document.getElementById("groups")
if not(groups):
groups = self.document.createElement("merge")
groups.setAttribute("id", "groups")
groups.setIdAttribute("id")
root.appendChild(groups)
else:
while groups.hasChildNodes():
groups.removeChild(groups.firstChild)
for i in groupTable:
node = self.document.createElement("merge")
node.setAttribute("name", i)
node.setAttribute("value", str(groupTable[i]))
groups.appendChild(node)
self.saveDocument()
def getGroups(self):
""" Permet d'obtenir la liste des groupes. """
groupTable = []
groups = self.document.getElementById("groups")
if groups:
first = groups.firstChild
if first:
node = first
while True:
if node.hasAttribute("name") and node.hasAttribute("value"):
#groupTable[node.getAttribute("name")] = int(node.getAttribute("value"))
groupTable.append(node.getAttribute("name"))
node = node.nextSibling
if not(node):
break
return groupTable
def getGroupsIds(self):
""" Permet d'obtenir la liste des groupes et des identifiants
qui y son rattachés. """
groupTable = {}
groups = self.document.getElementById("groups")
if groups:
first = groups.firstChild
if first:
node = first
while True:
if node.hasAttribute("name") and node.hasAttribute("value"):
groupTable[node.getAttribute("name")] = int(node.getAttribute("value"))
node = node.nextSibling
if not(node):
break
return groupTable
def getUserGroup(self):
""" Permet de connaitre le groupe de l'utilisateur. """
usergroup = 0
group = self.document.getElementById("usergroup")
if group and group.hasAttribute("value"):
usergroup = int(group.getAttribute("value"))
groups = self.getGroupsIds()
for i in groups:
if usergroup == groups[i]:
return i
return 0
def setUserGroup(self, grpName):
""" Permet de définir le groupe de l'utilisateur. """
checkType(grpName, str)
if grpName in self.getGroups():
numGrp = self.getGroupsIds()[grpName]
root = self.document.firstChild
group = self.document.getElementById("usergroup")
if not(group):
group = self.document.createElement("merge")
group.setAttribute("id", "usergroup")
group.setIdAttribute("id")
root.appendChild(group)
group.setAttribute("value", str(numGrp))
self.saveDocument()
def getPlanning(self, semaine, groupId = None, identifier = None):
""" Permet d'avoir l'addresse du planning dans le dossier tmp. """
checkType(semaine, int)
if groupId:checkType(groupId, str)
else:
if self.getUserGroup():
groupId = self.getUserGroup()
else:
raise ReferenceError
groupId = self.getGroupsIds()[groupId]
if identifier:checkType(identifier, str)
else:
identifier = self.connection.getCorrectId()
num = (semaine + 18) % 53
url = BASE_IMG_URL+'&idPianoWeek='+str(num)+'&idTree='+str(groupId)+'&identifier='+identifier
file = self.uo.retrieve(url)[0]
return file
def checkType(object, req):
if type(object) != req:
raise TypeError(str(type(object))+" recieved "+str(req)+" expected.")
| [
"gostrc@gmail.com"
] | gostrc@gmail.com |
5aa0c3741468196957ffba57ea37b13e03fee079 | 1eab574606dffb14a63195de994ee7c2355989b1 | /ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/capture/currentpacket/stack/stack.py | 584e79fdba6258677a42f35e8cbaf4e10d7896e7 | [
"MIT"
] | permissive | steiler/ixnetwork_restpy | 56b3f08726301e9938aaea26f6dcd20ebf53c806 | dd7ec0d311b74cefb1fe310d57b5c8a65d6d4ff9 | refs/heads/master | 2020-09-04T12:10:18.387184 | 2019-11-05T11:29:43 | 2019-11-05T11:29:43 | 219,728,796 | 0 | 0 | null | 2019-11-05T11:28:29 | 2019-11-05T11:28:26 | null | UTF-8 | Python | false | false | 3,235 | py | # MIT LICENSE
#
# Copyright 1997 - 2019 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class Stack(Base):
"""This object specifies the stack properties.
The Stack class encapsulates a list of stack resources that is managed by the system.
A list of resources can be retrieved from the server using the Stack.find() method.
"""
__slots__ = ()
_SDM_NAME = 'stack'
def __init__(self, parent):
super(Stack, self).__init__(parent)
@property
def Field(self):
"""An instance of the Field class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.capture.currentpacket.stack.field.field.Field)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.capture.currentpacket.stack.field.field import Field
return Field(self)
@property
def DisplayName(self):
"""Refers to the name of the stack.
Returns:
str
"""
return self._get_attribute('displayName')
def find(self, DisplayName=None):
"""Finds and retrieves stack data from the server.
All named parameters support regex and can be used to selectively retrieve stack data from the server.
By default the find method takes no parameters and will retrieve all stack data from the server.
Args:
DisplayName (str): Refers to the name of the stack.
Returns:
self: This instance with matching stack data retrieved from the server available through an iterator or index
Raises:
ServerError: The server has encountered an uncategorized error condition
"""
return self._select(locals())
def read(self, href):
"""Retrieves a single instance of stack data from the server.
Args:
href (str): An href to the instance to be retrieved
Returns:
self: This instance with the stack data from the server available through an iterator or index
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
| [
"srvc_cm_packages@keysight.com"
] | srvc_cm_packages@keysight.com |
2661bc5f8553f6cccf0516f15fda9c6515b2d032 | d387119f1139b12c7a7a2c1edb0d9c2973e70a2b | /yolo3/utils.py | 95ae0d008d84706e7c27e267199aea05ad0a1ca1 | [] | no_license | ant1pink/logo-detection | fdbaf1462621601322c144d6dd52468a212c33d7 | 50685a6c9cb35c75ddc42da8383445dce5d83bd7 | refs/heads/master | 2020-04-29T17:26:09.049787 | 2019-03-20T17:37:34 | 2019-03-20T17:37:34 | 176,296,794 | 7 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,922 | py | from functools import reduce
from PIL import Image
import numpy as np
from matplotlib.colors import rgb_to_hsv, hsv_to_rgb
def compose(*funcs):
"""Compose arbitrarily many functions, evaluated left to right.
Reference: https://mathieularose.com/function-composition-in-python/
"""
# return lambda x: reduce(lambda v, f: f(v), funcs, x)
if funcs:
return reduce(lambda f, g: lambda *a, **kw: g(f(*a, **kw)), funcs)
else:
raise ValueError('Composition of empty sequence not supported.')
def letterbox_image(image, size):
'''resize image with unchanged aspect ratio using padding'''
iw, ih = image.size
w, h = size
scale = min(w/iw, h/ih)
nw = int(iw*scale)
nh = int(ih*scale)
image = image.resize((nw,nh), Image.BICUBIC)
new_image = Image.new('RGB', size, (128,128,128))
new_image.paste(image, ((w-nw)//2, (h-nh)//2))
return new_image
def rand(a=0, b=1):
return np.random.rand()*(b-a) + a
def get_random_data(annotation_line, input_shape, random=True, max_boxes=20, jitter=.3, hue=.1, sat=1.5, val=1.5, proc_img=True):
'''random preprocessing for real-time data augmentation'''
line = annotation_line.split()
image = Image.open(line[0])
iw, ih = image.size
h, w = input_shape
box = np.array([np.array(list(map(int,box.split(',')))) for box in line[1:]])
if not random:
# resize image
scale = min(w/iw, h/ih)
nw = int(iw*scale)
nh = int(ih*scale)
dx = (w-nw)//2
dy = (h-nh)//2
image_data=0
if proc_img:
image = image.resize((nw,nh), Image.BICUBIC)
new_image = Image.new('RGB', (w,h), (128,128,128))
new_image.paste(image, (dx, dy))
image_data = np.array(new_image)/255.
# correct boxes
box_data = np.zeros((max_boxes,5))
if len(box)>0:
np.random.shuffle(box)
if len(box)>max_boxes: box = box[:max_boxes]
box[:, [0,2]] = box[:, [0,2]]*scale + dx
box[:, [1,3]] = box[:, [1,3]]*scale + dy
box_data[:len(box)] = box
return image_data, box_data
# resize image
new_ar = w/h * rand(1-jitter,1+jitter)/rand(1-jitter,1+jitter)
scale = rand(.25, 2)
if new_ar < 1:
nh = int(scale*h)
nw = int(nh*new_ar)
else:
nw = int(scale*w)
nh = int(nw/new_ar)
image = image.resize((nw,nh), Image.BICUBIC)
# place image
dx = int(rand(0, w-nw))
dy = int(rand(0, h-nh))
new_image = Image.new('RGB', (w,h), (128,128,128))
new_image.paste(image, (dx, dy))
image = new_image
# flip image or not
flip = rand()<.5
if flip: image = image.transpose(Image.FLIP_LEFT_RIGHT)
# distort image
hue = rand(-hue, hue)
sat = rand(1, sat) if rand()<.5 else 1/rand(1, sat)
val = rand(1, val) if rand()<.5 else 1/rand(1, val)
x = rgb_to_hsv(np.array(image)/255.)
x[..., 0] += hue
x[..., 0][x[..., 0]>1] -= 1
x[..., 0][x[..., 0]<0] += 1
x[..., 1] *= sat
x[..., 2] *= val
x[x>1] = 1
x[x<0] = 0
image_data = hsv_to_rgb(x) # numpy array, 0 to 1
# correct boxes
box_data = np.zeros((max_boxes,5))
if len(box)>0:
np.random.shuffle(box)
box[:, [0,2]] = box[:, [0,2]]*nw/iw + dx
box[:, [1,3]] = box[:, [1,3]]*nh/ih + dy
if flip: box[:, [0,2]] = w - box[:, [2,0]]
box[:, 0:2][box[:, 0:2]<0] = 0
box[:, 2][box[:, 2]>w] = w
box[:, 3][box[:, 3]>h] = h
box_w = box[:, 2] - box[:, 0]
box_h = box[:, 3] - box[:, 1]
box = box[np.logical_and(box_w>1, box_h>1)] # discard invalid box
if len(box)>max_boxes: box = box[:max_boxes]
box_data[:len(box)] = box
return image_data, box_data
| [
"ruizheli.ryze@gmail.com"
] | ruizheli.ryze@gmail.com |
efa11e39b3443d2c2ecd4ad33280e4250b1217f1 | b2067744a72f3409a4235f796c8e64b32b922e4d | /task/urls.py | a627a646db5fed2019448710f4d56f4d5bb0473c | [] | no_license | rsanjoseestepar/tempmed3Dmod | 4b1238f3a49bf22f1cddd3a29dfb0b90bf9ca581 | 14e11673a6298bee3f90f2f635384b7693fa84a1 | refs/heads/master | 2020-05-17T17:29:27.463948 | 2015-10-22T06:45:33 | 2015-10-22T06:45:33 | 42,532,806 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 542 | py |
from django.conf.urls import url
from main import views
from django.contrib.auth import *
from django.conf.urls import patterns, url, include
from django.contrib import admin
from task import views
from django.conf.urls.static import static
from med3Dmodel import settings
from django import forms
from main import forms
from django.contrib.auth import *
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.conf import settings
urlpatterns = [
url(r'^nhdr2nrrd/$', views.nhdr2nrrd, name='nhdr2nrrd'),
]
| [
"rsanjoseestepar@gmail.com"
] | rsanjoseestepar@gmail.com |
1259b25afb75ee0bfcc7c6e204f0ba8394d94744 | 1e82a5c6145fbd6861b863f95613e9406f434559 | /function_scheduling_distributed_framework/publishers/base_publisher.py | 8febd4bc61059b4b42707d4b0f36cee56e8a3ab1 | [
"Apache-2.0"
] | permissive | leiyugithub/distributed_framework | e6c83cf09faa5ee0d6d0ccc1e38fb6729a260c9b | 7a9c74e807f51680c25a9956e49ab319a8943a37 | refs/heads/master | 2020-12-07T13:23:24.354917 | 2020-01-08T08:18:47 | 2020-01-08T08:18:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,941 | py | # -*- coding: utf-8 -*-
# @Author : ydf
# @Time : 2019/8/8 0008 11:57
import abc
import atexit
import json
import uuid
import time
import typing
from functools import wraps
from threading import Lock
import amqpstorm
from pika.exceptions import AMQPError as PikaAMQPError
from function_scheduling_distributed_framework.utils import LoggerLevelSetterMixin, LogManager, decorators, RedisMixin
class RedisAsyncResult(RedisMixin):
def __init__(self, task_id, timeout=120):
self.task_id = task_id
self.timeout = timeout
self._has_pop = False
self._status_and_result = None
def set_timeout(self, timeout=60):
self.timeout = timeout
return self
@property
def status_and_result(self):
if not self._has_pop:
self._status_and_result = json.loads(self.redis_db_frame.blpop(self.task_id, self.timeout)[1])
self._has_pop = True
return self._status_and_result
def get(self):
return self.status_and_result['result']
@property
def result(self):
return self.get()
def is_success(self):
return self.status_and_result['success']
class PriorityConsumingControlConfig:
"""
为每个独立的任务设置控制参数,和函数参数一起发布到中间件。可能有少数时候有这种需求。
例如消费为add函数,可以每个独立的任务设置不同的超时时间,不同的重试次数,是否使用rpc模式。这里的配置优先,可以覆盖生成消费者时候的配置。
"""
def __init__(self, function_timeout: float = None, max_retry_times: int = None,
is_print_detail_exception: bool = None,
msg_expire_senconds: int = None,
is_using_rpc_mode: bool = None):
self.function_timeout = function_timeout
self.max_retry_times = max_retry_times
self.is_print_detail_exception = is_print_detail_exception
self.msg_expire_senconds = msg_expire_senconds
self.is_using_rpc_mode = is_using_rpc_mode
def to_dict(self):
return self.__dict__
class AbstractPublisher(LoggerLevelSetterMixin, metaclass=abc.ABCMeta, ):
has_init_broker = 0
def __init__(self, queue_name, log_level_int=10, logger_prefix='', is_add_file_handler=True, clear_queue_within_init=False, is_add_publish_time=True, ):
"""
:param queue_name:
:param log_level_int:
:param logger_prefix:
:param is_add_file_handler:
:param clear_queue_within_init:
:param is_add_publish_time:是否添加发布时间,以后废弃,都添加。
"""
self._queue_name = queue_name
if logger_prefix != '':
logger_prefix += '--'
logger_name = f'{logger_prefix}{self.__class__.__name__}--{queue_name}'
self.logger = LogManager(logger_name).get_logger_and_add_handlers(log_level_int, log_filename=f'{logger_name}.log' if is_add_file_handler else None) #
# self.rabbit_client = RabbitMqFactory(is_use_rabbitpy=is_use_rabbitpy).get_rabbit_cleint()
# self.channel = self.rabbit_client.creat_a_channel()
# self.queue = self.channel.queue_declare(queue=queue_name, durable=True)
self._lock_for_count = Lock()
self._current_time = None
self.count_per_minute = None
self._init_count()
self.custom_init()
self.logger.info(f'{self.__class__} 被实例化了')
self.publish_msg_num_total = 0
self._is_add_publish_time = is_add_publish_time
self.__init_time = time.time()
atexit.register(self.__at_exit)
if clear_queue_within_init:
self.clear()
def set_is_add_publish_time(self, is_add_publish_time=True):
self._is_add_publish_time = is_add_publish_time
return self
def _init_count(self):
with self._lock_for_count:
self._current_time = time.time()
self.count_per_minute = 0
def custom_init(self):
pass
def publish(self, msg: typing.Union[str, dict],
priority_control_config: PriorityConsumingControlConfig = None):
if isinstance(msg, str):
msg = json.loads(msg)
task_id = f'{self._queue_name}_result:{uuid.uuid4()}'
msg['extra'] = extra_params = {'task_id': task_id, 'publish_time': round(time.time(), 4), 'publish_time_format': time.strftime('%Y-%m-%d %H:%M:%S')}
if priority_control_config:
extra_params.update(priority_control_config.to_dict())
t_start = time.time()
decorators.handle_exception(retry_times=10, is_throw_error=True, time_sleep=0.1)(self.concrete_realization_of_publish)(json.dumps(msg,ensure_ascii=False))
self.logger.debug(f'向{self._queue_name} 队列,推送消息 耗时{round(time.time() - t_start, 4)}秒 {msg}')
with self._lock_for_count:
self.count_per_minute += 1
self.publish_msg_num_total += 1
if time.time() - self._current_time > 10:
self.logger.info(f'10秒内推送了 {self.count_per_minute} 条消息,累计推送了 {self.publish_msg_num_total} 条消息到 {self._queue_name} 中')
self._init_count()
return RedisAsyncResult(task_id)
@abc.abstractmethod
def concrete_realization_of_publish(self, msg):
raise NotImplementedError
@abc.abstractmethod
def clear(self):
raise NotImplementedError
@abc.abstractmethod
def get_message_count(self):
raise NotImplementedError
@abc.abstractmethod
def close(self):
raise NotImplementedError
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
self.logger.warning(f'with中自动关闭publisher连接,累计推送了 {self.publish_msg_num_total} 条消息 ')
def __at_exit(self):
self.logger.warning(f'程序关闭前,{round(time.time() - self.__init_time)} 秒内,累计推送了 {self.publish_msg_num_total} 条消息 到 {self._queue_name} 中')
def deco_mq_conn_error(f):
@wraps(f)
def _deco_mq_conn_error(self, *args, **kwargs):
if not self.has_init_broker:
self.logger.warning(f'对象的方法 【{f.__name__}】 首次使用 rabbitmq channel,进行初始化执行 init_broker 方法')
self.init_broker()
self.has_init_broker = 1
return f(self, *args, **kwargs)
# noinspection PyBroadException
try:
return f(self, *args, **kwargs)
except (PikaAMQPError, amqpstorm.AMQPError) as e: # except Exception as e: # 现在装饰器用到了绝大多出地方,单个异常类型不行。ex
self.logger.error(f'rabbitmq链接出错 ,方法 {f.__name__} 出错 ,{e}')
self.init_broker()
return f(self, *args, **kwargs)
return _deco_mq_conn_error
| [
"909686719@qq.com"
] | 909686719@qq.com |
cf4d178f248c85eea2e81d4251ed313da8f55968 | 183df02c0ceb38a891408039fb604846023b663a | /irasutoya/boot.py | aa5ae98025430a0eacd02d02c129680cd2a7e622 | [] | no_license | Yasu31/m5stickv-playground | 625ae7c5097907151ef49cc57592796611fc899d | 3fe602563958f2203ae422dddfe19d7ca725b181 | refs/heads/master | 2021-03-17T17:26:16.544111 | 2020-03-25T08:21:30 | 2020-03-25T08:21:30 | 247,005,622 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,821 | py | import audio
import gc
import image
import lcd
import sensor
import sys
import time
import uos
import KPU as kpu
from fpioa_manager import *
from machine import I2C
from Maix import I2S, GPIO
#
# initialize
#
lcd.init()
lcd.rotation(2)
i2c = I2C(I2C.I2C0, freq=400000, scl=28, sda=29)
fm.register(board_info.SPK_SD, fm.fpioa.GPIO0)
spk_sd=GPIO(GPIO.GPIO0, GPIO.OUT)
spk_sd.value(1) #Enable the SPK output
fm.register(board_info.SPK_DIN,fm.fpioa.I2S0_OUT_D1)
fm.register(board_info.SPK_BCLK,fm.fpioa.I2S0_SCLK)
fm.register(board_info.SPK_LRCLK,fm.fpioa.I2S0_WS)
wav_dev = I2S(I2S.DEVICE_0)
fm.register(board_info.BUTTON_A, fm.fpioa.GPIO1)
but_a=GPIO(GPIO.GPIO1, GPIO.IN, GPIO.PULL_UP) #PULL_UP is required here!
fm.register(board_info.BUTTON_B, fm.fpioa.GPIO2)
but_b = GPIO(GPIO.GPIO2, GPIO.IN, GPIO.PULL_UP) #PULL_UP is required here!
fm.register(board_info.LED_W, fm.fpioa.GPIO3)
led_w = GPIO(GPIO.GPIO3, GPIO.OUT)
led_w.value(1) #RGBW LEDs are Active Low
fm.register(board_info.LED_R, fm.fpioa.GPIO4)
led_r = GPIO(GPIO.GPIO4, GPIO.OUT)
led_r.value(1) #RGBW LEDs are Active Low
fm.register(board_info.LED_G, fm.fpioa.GPIO5)
led_g = GPIO(GPIO.GPIO5, GPIO.OUT)
led_g.value(1) #RGBW LEDs are Active Low
fm.register(board_info.LED_B, fm.fpioa.GPIO6)
led_b = GPIO(GPIO.GPIO6, GPIO.OUT)
led_b.value(1) #RGBW LEDs are Active Low
def set_backlight(level):
if level > 8:
level = 8
if level < 0:
level = 0
val = (level+7) << 4
i2c.writeto_mem(0x34, 0x91,int(val))
def show_logo():
try:
img = image.Image("/sd/irasutoya/wall.jpeg")
set_backlight(0)
lcd.display(img)
for i in range(9):
set_backlight(i)
time.sleep(0.1)
except:
lcd.draw_string(lcd.width()//2-100,lcd.height()//2-4, "Error: Cannot find logo.jpg", lcd.WHITE, lcd.RED)
def initialize_camera():
err_counter = 0
while 1:
try:
sensor.reset() #Reset sensor may failed, let's try some times
break
except:
err_counter = err_counter + 1
if err_counter == 20:
lcd.draw_string(lcd.width()//2-100,lcd.height()//2-4, "Error: Sensor Init Failed", lcd.WHITE, lcd.RED)
time.sleep(0.1)
continue
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.QVGA) #QVGA=320x240
sensor.run(1)
#
# main
#
show_logo()
if but_a.value() == 0: #If dont want to run the demo
set_backlight(0)
print('[info]: Exit by user operation')
sys.exit()
initialize_camera()
classes = ['airplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor']
task = kpu.load("/sd/irasutoya/20class.kmodel")
anchor = (1.889, 2.5245, 2.9465, 3.94056, 3.99987, 5.3658, 5.155437, 6.92275, 6.718375, 9.01025)
# Anchor data is for bbox, extracted from the training sets.
kpu.init_yolo2(task, 0.5, 0.3, 5, anchor)
irasutoya_icons = [image.Image("/sd/irasutoya/icons_jpeg/{}.jpeg".format(icon_class)) for icon_class in classes]
wall_img = image.Image("/sd/irasutoya/wall.jpeg")
print('[info]: Started.')
but_stu = 1
try:
while(True):
#gc.collect()
img = sensor.snapshot()
code_obj = kpu.run_yolo2(task, img)
img2show = wall_img.copy()
if code_obj: # object detected
for i in code_obj:
icon = irasutoya_icons[i.classid()]
img2show.draw_image(icon, i.x(), i.y(), x_scale=i.w()/icon.width(), y_scale=i.h()/icon.height())
lcd.display(img2show)
except KeyboardInterrupt:
kpu.deinit(task)
sys.exit()
| [
"yasu@Yasunoris-MacBook-Pro.local"
] | yasu@Yasunoris-MacBook-Pro.local |
b9bf4b475ff8fa6fb6ff1205d3217c81700fc851 | 155d10b5a6556d2b7f0d3878832f4f102e6cd8cd | /src/sensors/srf02.py | 370138c7998abc79f45fecaf92c44a61d18023d7 | [] | no_license | leedowthwaite/rpi-uav | 51d8dbce8e4ef09d52038f02ea50a86dc0271ca2 | bce999ae0c8b0831a927f69c53b90ef0ba23bcb7 | refs/heads/master | 2021-01-22T06:58:45.395273 | 2013-04-15T15:07:01 | 2013-04-15T15:07:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,201 | py | #!/usr/bin/python
import time
from Adafruit_I2C import Adafruit_I2C
# ===========================================================================
# SRF02 Class
# ===========================================================================
class SRF02 :
i2c = None
# Default I2C address
__I2CADDR = 0x70
# SRF02 Registers
# read mode
__SRF02_R_SWREVISION = 0x00
__SRF02_R_UNUSED = 0x01
__SRF02_R_RANGEH = 0x02
__SRF02_R_RANGEL = 0x03
__SRF02_R_AUTOTUNEMINH = 0x04
__SRF02_R_AUTOTUNEMINL = 0x05
# write mode
__SRF02_COMMAND = 0x00
# commands
__SRF02_CMD_RRM_INCHES = 0x50
__SRF02_CMD_RRM_CENTIMETERS = 0x51
__SRF02_CMD_RRM_MICROSECONDS = 0x52
__SRF02_CMD_FRM_INCHES = 0x56
__SRF02_CMD_FRM_CENTIMETERS = 0x57
__SRF02_CMD_FRM_MICROSECONDS = 0x58
__SRF02_CMD_8x40KHZ_BURST = 0x5c
__SRF02_CMD_AUTOTUNE_RESTART = 0x60
__SRF02_CMD_CHANGE_ADDR_SEQ1 = 0xa0
__SRF02_CMD_CHANGE_ADDR_SEQ2 = 0xa5
__SRF02_CMD_CHANGE_ADDR_SEQ3 = 0xaa
# Constructor
def __init__(self, address=__I2CADDR, debug=False):
self.i2c = Adafruit_I2C(address,debug=debug,byteSwap=True)
self.address = address
self.debug = debug
def readRange(self, rangingMode):
"Reads the range reported by the sensor (in inches)"
self.i2c.write8(self.__SRF02_COMMAND, rangingMode)
time.sleep(0.070) # Wait more than 65ms
range = self.i2c.readU16(self.__SRF02_R_RANGEH)
if (self.debug):
print "DBG: range: %d" % (range)
return range
def readRangeInches(self):
"Reads the range reported by the sensor (in inches)"
return self.readRange(self.__SRF02_CMD_RRM_INCHES)
def readRangeCentimeters(self):
"Reads the range reported by the sensor (in cm)"
return self.readRange(self.__SRF02_CMD_RRM_CENTIMETERS)
def readRangeMicroseconds(self):
"Reads the range reported by the sensor (in us)"
return self.readRange(self.__SRF02_CMD_RRM_MICROSECONDS)
def readMinRange(self):
"Reads min range of sensor"
min = self.i2c.readU16(self.__SRF02_R_AUTOTUNEMINH)
if (self.debug):
print "DBG: min: %d" % (min)
return min
| [
"leedowthwaite@gmail.com"
] | leedowthwaite@gmail.com |
b7db7de04e278983dbf36f898e9d6f2f0d0d62f4 | 8875142059d8a353c7c5db1f6ca08bf6556a3cc8 | /arp_spoof.py | bcc84a9002a1d649ca7e512b0ca737e2e378f0a1 | [] | no_license | danlarsson/Ethical-hacking | 640b0382bcdadc975db80553f431a487bf63ca4c | 75f7ba56bb16ff77dea08585702b1021ac35412b | refs/heads/master | 2020-04-10T05:46:31.148432 | 2019-01-04T13:49:20 | 2019-01-04T13:49:20 | 160,836,809 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,892 | py | #!/usr/bin/python3
# Does a man in the middle attack.
# echo 1 > /proc/sys/net/ipv4/ip_forward
import scapy.all as scapy
import time
from configparser import ConfigParser
parser = ConfigParser()
parser.read('config.ini')
def spoof(target_ip, spoof_ip):
# op=1 = Request, op=2 = Response
# pdst = IP of target/victim computer
# hwdst = Target/victim computer Mac adress
# psrc = Source of packet (DFGW, router IP addr)
target_mac = get_mac(target_ip)
packet = scapy.ARP(op=2, pdst=target_ip, hwdst=target_mac, psrc=spoof_ip)
# print(packet.show())
# print(packet.summary())
scapy.send(packet, verbose=False)
def restore(destination_ip, source_ip):
destination_mac = get_mac(destination_ip)
source_mac = get_mac(source_ip)
packet = scapy.ARP(op=2, pdst=destination_ip, hwdst=destination_mac, psrc=source_ip, hwsrc=source_mac)
scapy.send(packet, count=4, verbose=False)
def get_mac(ip):
arp_request = scapy.ARP(pdst=ip)
broadcast = scapy.Ether(dst='ff:ff:ff:ff:ff:ff')
arp_broadcast = broadcast/arp_request # Combine the ARP and Ehter packets-
answered_list = scapy.srp(arp_broadcast, timeout=1, verbose=False)[0] # Send and a recieve a packet with a custom Ether. Timeout on 1 second
return answered_list[0][1].hwsrc
target_ip = str(parser.get('spoof', 'target'))
gateway_ip = str(parser.get('spoof', 'gateway'))
sent_packets_count = 0
try:
print('[+] Spoofing target %s and the gateway %s' % (target_ip, gateway_ip))
while True:
spoof(target_ip, gateway_ip)
spoof(gateway_ip, target_ip)
sent_packets_count += 2
print('\r[+] Packets sent ' + str(sent_packets_count), end='')
time.sleep(2)
except KeyboardInterrupt:
print('\n[+] Restoring IP-adresses')
restore(target_ip, gateway_ip)
restore(gateway_ip, target_ip)
print('[+] Exiting gracefuly')
| [
"dan.m.larsson@gmail.com"
] | dan.m.larsson@gmail.com |
849ca1c32aa8848b9a468e99ea240724f0d82aad | c726b783bbbeb71801d8db17095b35ed7ba8742c | /code/task_2/sterioCalibrationRectification.py | ff74e37a5142c0990bd9591ae690edfad2c5a1ff | [] | no_license | suchitj2702/Understanding_camera_model_and_stereo_depth_sensing | a57fd2ace45def46c3b848e2292920df25ec26fd | 6506f85dfae48dc28dc239b6900d39272829b0d4 | refs/heads/main | 2023-07-18T17:38:15.223528 | 2021-08-26T23:55:49 | 2021-08-26T23:55:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,172 | py | import os
import cv2
import numpy as np
import matplotlib.pyplot as plt
import transformations as t
CalibrateImagesDirectory = "../../images/task_1"
TestImagesDirectory = "../../images/task_2"
UndistortedNotRectifiedDirectory = "../../output/task_2/UndistortedNotRectified"
UndistortedAndRectifiedDirectory = "../../output/task_2/UndistortedAndRectified"
CornerPointsDirectory = "../../output/task_2/CornerPointsMarked"
def extract3Dto2Dcorrespondence(camera):
n = 9
m = 6
Points3D = np.zeros((n*m, 3), np.float32)
Points3D[:,:2] = np.mgrid[0: n, 0: m].T.reshape(-1, 2)
PointsArray3D = []
ImageArray2D = []
for filename in os.listdir(CalibrateImagesDirectory):
if filename.startswith(camera):
image_filename = os.path.join(CalibrateImagesDirectory, filename)
img = cv2.imread(image_filename)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, corners = cv2.findChessboardCorners(gray, (n, m), None)
if ret == True:
PointsArray3D.append(Points3D)
ImageArray2D.append(corners)
CamMatrix = np.load('../../parameters/' + camera + 'CameraIntrinsics' + '/IntrinsicMatrix.npy')
DistortionCoeff = np.load('../../parameters/' + camera + 'CameraIntrinsics' + '/DistortCoeff.npy')
for filename in os.listdir(TestImagesDirectory):
if filename.startswith(camera):
image_filename = os.path.join(TestImagesDirectory, filename)
img = cv2.imread(image_filename)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, corners = cv2.findChessboardCorners(gray, (n, m), None)
if ret == True:
cv2.drawChessboardCorners(img, (n, m), corners, ret)
cv2.imwrite(os.path.join(CornerPointsDirectory, filename), img)
cv2.imwrite(os.path.join(UndistortedNotRectifiedDirectory, filename), img)
cv2.imwrite(os.path.join(UndistortedAndRectifiedDirectory, filename), img)
return PointsArray3D, ImageArray2D, CamMatrix, DistortionCoeff
def sterioCalibration():
print("Calibrating cameras...")
PointsArray3D_left, ImageArray2D_left, CamMatrix_left, DistortionCoeff_left= extract3Dto2Dcorrespondence("left")
PointsArray3D_right, ImageArray2D_right, CamMatrix_right, DistortionCoeff_right = extract3Dto2Dcorrespondence("right")
ret, CamMatrix_left, DistortionCoeff_left, CamMatrix_right, DistortionCoeff_right, RotationMatrix, TranslationVector, EssentialMatrix, FundamentalMatrix = cv2.stereoCalibrate(PointsArray3D_left, ImageArray2D_left, ImageArray2D_right, CamMatrix_left, DistortionCoeff_left, CamMatrix_right, DistortionCoeff_right, (640, 480), flags = cv2.CALIB_FIX_INTRINSIC)
undistortedArray_left = undistort(ImageArray2D_left, CamMatrix_left, DistortionCoeff_left)
undistortedArray_right = undistort(ImageArray2D_right, CamMatrix_right, DistortionCoeff_right)
checkCalibration(undistortedArray_left, undistortedArray_right, RotationMatrix, TranslationVector)
np.save('../../parameters/SterioCalibration/RotationMatrix', RotationMatrix)
np.save('../../parameters/SterioCalibration/TranslationVector', TranslationVector)
np.save('../../parameters/SterioCalibration/EssentialMatrix', EssentialMatrix)
np.save('../../parameters/SterioCalibration/FundamentalMatrix', FundamentalMatrix)
print("Rectifying...")
RectifiedRotationMatrix_left, RectifiedRotationMatrix_right, RectifiedProjection_left, RectifiedProjection_right, depthMappingMatrix, roi_left, roi_right = cv2.stereoRectify(CamMatrix_left, DistortionCoeff_left, CamMatrix_right, DistortionCoeff_right, (640, 480), RotationMatrix, TranslationVector, alpha = 0.25, flags = cv2.CALIB_FIX_INTRINSIC)
np.save('../../parameters/SterioRectification/RectifiedRotationMatrix_left', RectifiedRotationMatrix_left)
np.save('../../parameters/SterioRectification/RectifiedRotationMatrix_right', RectifiedRotationMatrix_right)
np.save('../../parameters/SterioRectification/RectifiedProjection_left', RectifiedProjection_left)
np.save('../../parameters/SterioRectification/RectifiedProjection_right', RectifiedProjection_right)
np.save('../../parameters/SterioRectification/depthMappingMatrix', depthMappingMatrix)
plotUndistortedNotRectified("left", CamMatrix_left, DistortionCoeff_left)
plotUndistortedNotRectified("right", CamMatrix_right, DistortionCoeff_right)
plotUndistortedAndRectified("left", CamMatrix_left, DistortionCoeff_left, RectifiedRotationMatrix_left, RectifiedProjection_left, roi_left)
plotUndistortedAndRectified("right", CamMatrix_right, DistortionCoeff_right, RectifiedRotationMatrix_right, RectifiedProjection_right, roi_right)
print("Done")
def undistort(ImageArray2D, CamMatrix, DistortionCoeff):
undistortedArray = []
for ImageArray in ImageArray2D:
undistortedArray.append(cv2.undistortPoints(ImageArray, CamMatrix, DistortionCoeff))
for i in range(len(undistortedArray)):
undistortedArray[i] = undistortedArray[i].transpose().reshape(2, 54)
return undistortedArray
def checkCalibration(undistortedArray_left, undistortedArray_right, RotationMatrix, TranslationVector):
R1 = np.eye(3, 3)
t1 = np.zeros((3, 1))
P1 = np.concatenate((R1, t1), axis = 1)
P2 = np.concatenate((RotationMatrix, TranslationVector), axis = 1)
points3DH = cv2.triangulatePoints(projMatr1 = P1, projMatr2 = P2, projPoints1 = undistortedArray_left[0], projPoints2 = undistortedArray_right[0])
x = points3DH[0]/points3DH[3]
y = points3DH[1]/points3DH[3]
z = points3DH[2]/points3DH[3]
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(x, y, z)
plot_camera(0, 0, 0, ax)
plt.savefig('../../output/task_2/boardPoints3D.png')
def plotUndistortedNotRectified(camera, CamMatrix, DistortionCoeff):
for filename in os.listdir(UndistortedNotRectifiedDirectory):
if filename.startswith(camera):
image_filename = os.path.join(UndistortedNotRectifiedDirectory, filename)
TestImg = cv2.imread(image_filename)
h, w = TestImg.shape[:2]
mapx, mapy = cv2.initUndistortRectifyMap(CamMatrix, DistortionCoeff, np.eye(3, 3), CamMatrix, (w, h), cv2.CV_16SC2)
UndistortedImg = cv2.remap(TestImg, mapx, mapy, cv2.INTER_LINEAR, borderMode = cv2.BORDER_TRANSPARENT)
cv2.imwrite(os.path.join(UndistortedNotRectifiedDirectory, filename), UndistortedImg)
def plotUndistortedAndRectified(camera, CamMatrix, DistortionCoeff, RectifiedRotationMatrix, RectifiedProjection, roi):
for filename in os.listdir(UndistortedAndRectifiedDirectory):
if filename.startswith(camera):
image_filename = os.path.join(UndistortedAndRectifiedDirectory, filename)
TestImg = cv2.imread(image_filename)
h, w = TestImg.shape[:2]
mapx, mapy = cv2.initUndistortRectifyMap(CamMatrix, DistortionCoeff, RectifiedRotationMatrix, RectifiedProjection, (w, h), cv2.CV_32FC1)
UndistortedAndRectifiedImg = cv2.remap(TestImg, mapx, mapy, cv2.INTER_LINEAR, borderMode = cv2.BORDER_TRANSPARENT)
# x, y, w, h = roi
# UndistortedAndRectifiedImg = UndistortedAndRectifiedImg[y: y + h, x: x + w]
cv2.imwrite(os.path.join(UndistortedAndRectifiedDirectory, filename), UndistortedAndRectifiedImg)
def plot_camera(f, tan_x, tan_y, ax):
R_prime = np.identity(3)
t_prime = np.zeros((3, 1))
cam_center_local = np.asarray([[0, 0, 0], [tan_x, tan_y, 1], [tan_x, -tan_y, 1], [0, 0, 0], [tan_x, -tan_y, 1], [-tan_x, -tan_y, 1], [0, 0, 0], [-tan_x, -tan_y, 1], [-tan_x, tan_y, 1], [0, 0, 0], [-tan_x, tan_y, 1], [tan_x, tan_y, 1], [0, 0, 0]]).T
cam_center_local *= f
cam_center = np.matmul(R_prime, cam_center_local) + t_prime
ax.plot(cam_center[0, :], cam_center[1, :], cam_center[2, :], color='k', linewidth=2)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
if __name__ == '__main__':
sterioCalibration()
| [
"suchit@asu.edu"
] | suchit@asu.edu |
37251f91b138c8ef98d57d8e1e0107a83c10e7d2 | 361756a29c63961fd02bd335aca629322b7989a7 | /Week 3/code/q1/spark-app.py | 459713353fa52e4fc715b7874ad7111b09db3b46 | [] | no_license | bbengfort/introduction-to-hadoop-and-spark | 67eadf923028cd53cfcec21fd1a521f6d5fe3569 | 14b9ebd87984277b2a02cdffad0db27082b4d3e9 | refs/heads/master | 2022-12-02T08:00:46.975122 | 2015-12-01T20:37:59 | 2015-12-01T20:37:59 | 46,567,192 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,402 | py | #!/usr/bin/env python
# spark-app.py
# A Spark application that computes the MSE of a linear model on test data.
#
# Author: Benjamin Bengfort <benjamin@bengfort.com>
# Created: Thu Nov 12 07:29:58 2015 -0500
"""
A Spark application that computes the MSE of a linear model on test data.
"""
##########################################################################
## Imports
##########################################################################
import sys
import csv
from functools import partial
from StringIO import StringIO
from pyspark import SparkConf, SparkContext
##########################################################################
## Global Variables
##########################################################################
APP_NAME = "MSE of Blog Comments Regression"
##########################################################################
## Helper functions
##########################################################################
def parse(line):
"""
Splits the line on a CSV and parses it into floats. Returns a tuple of:
(X, y) where X is the vector of independent variables and y is the target
(dependent) variable; in this case the last item in the row.
"""
reader = csv.reader(StringIO(line))
row = [float(x) for x in reader.next()]
return (tuple(row[:-1]), row[-1])
def cost(row, coef, intercept):
"""
Computes the square error given the row.
"""
X, y = row # extract the dependent and independent vals from the tuple.
# Compute the predicted value based on the linear model
yhat = sum([b*x for (b,x) in zip(coef.value, X)]) + intercept.value
# Compute the square error of the prediction
return (y - yhat) ** 2
##########################################################################
## Primary Analysis and Main Method
##########################################################################
def main(sc):
"""
Primary analysis mechanism for Spark application
"""
# Load coefficients and intercept from local file
coef = []
intercept = None
# Load the parameters from the text file
with open('params.txt', 'r') as params:
# Read the file and split on new lines and parse into floats
data = [
float(row.strip())
for row in params.read().split("\n")
if row.strip()
]
coef = data[:-1] # Everything but the last value are the thetas (coefficients)
intercept = data[-1] # The last value is the intercept
# Broadcast the parameters across the Spark cluster
# Note that this data is small enough you could have used a closure
coef = sc.broadcast(coef)
intercept = sc.broadcast(intercept)
# Create an accumulator to sum the squared error
sum_square_error = sc.accumulator(0)
# Load and parse the blog data from HDFS and insert into an RDD
blogs = sc.textFile("blogData").map(parse)
# Map the cost function and accumulate the sum.
error = blogs.map(partial(cost, coef=coef, intercept=intercept))
error.foreach(lambda cost: sum_square_error.add(cost))
# Print and compute the mean.
print sum_square_error.value / error.count()
if __name__ == '__main__':
# Configure Spark
conf = SparkConf().setAppName(APP_NAME)
sc = SparkContext(conf=conf)
# Execute Main functionality
main(sc)
| [
"benjamin@bengfort.com"
] | benjamin@bengfort.com |
4bf1ae5633fdc3997c406cd68914e0913d8ca8bc | 40046dc3ddd6458b5d00259430b9ed283c396bb6 | /polls/views.py | f8ded8831b31784ddcea5e9ffbb98be562a58de0 | [] | no_license | mayela/mysite | 6050f465e66a30149f42d14a14ae39868c7f29ac | c06e96789fe8e6eff3f9dc823834857eadfb09b0 | refs/heads/master | 2020-02-26T15:45:13.970320 | 2016-10-14T01:17:11 | 2016-10-14T01:17:11 | 70,834,365 | 0 | 1 | null | 2016-10-13T18:33:45 | 2016-10-13T18:07:42 | Python | UTF-8 | Python | false | false | 1,498 | py | from django.shortcuts import render, get_object_or_404
from django.http import HttpResponseRedirect, HttpResponse
from django.urls import reverse
from .models import Question, Choice
def index(request):
latest_question_list = Question.objects.order_by('-pub_date')[:5]
context = {'latest_question_list': latest_question_list}
return render(request, 'polls/index.html', context)
def detail(request, question_id):
question = get_object_or_404(Question, pk=question_id)
return render(request, 'polls/detail.html', {'question': question})
def results(request, question_id):
question = get_object_or_404(Question, pk=question_id)
return render(request, 'polls/results.html', {'question': question})
def vote(request, question_id):
question = get_object_or_404(Question, pk=question_id)
try:
selected_choice = question.choice_set.get(pk=request.POST['choice'])
except (KeyError, Choice.DoesNotExist):
# Redisplay the question voting form.
return render(request, 'polls/detail.html', {
'question': question,
'error_message': "You didn't select a choice.",
})
else:
selected_choice.votes += 1
selected_choice.save()
# Always return an HttpResponseRedirect after successfully dealing
# with POST data. This prevents data from being posted twice if a
# user hits the Back button.
return HttpResponseRedirect(reverse('polls:results', args=(question.id,)))
| [
"msanchez@nyxtechnology.com"
] | msanchez@nyxtechnology.com |
4ed043179eca6f4607079e4daf5b02187ec1c8c9 | 0a8bcc7ffdc143d82a351c84f46676a7b4564d1c | /app/config/settings.py | 9f887d232fa278cc393cb78be4e73a11b8807bb1 | [] | no_license | orca9s/ex_class | 354339aaddd882f4f294c3941784d3378769d084 | 0e4c76326226f6bb397c16d94c37aa45ec2973a6 | refs/heads/master | 2020-04-16T01:23:12.751879 | 2019-01-19T09:35:43 | 2019-01-19T09:35:43 | 165,171,185 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,095 | py | """
Django settings for config project.
Generated by 'django-admin startproject' using Django 2.1.5.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'qs8_d7^nhjgcl#jpxux$^-(&obs_=vvrfv4qzvhab^*cqrd46i'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'config.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'config.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'ko-kr'
TIME_ZONE = 'Asia/Seoul'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
| [
"sang93423@gmail.com"
] | sang93423@gmail.com |
8d3e89f29ab6f0f26aaa91f1761c77fcda05d345 | cb89e1ae18b2934df42264b9361fc5c80a76c8f8 | /March-LeetCoding-Challenge-2021/Set Mismatch.py | 321d57a661b7fcd983816ca235fea747938c3841 | [] | no_license | chintanaprabhu/leetcode | 3f4fab58c5c0f282a444a8b19c6ceeb372c07627 | fbf78cd0fe0a4b0f3feb86b3ce81495f70165eda | refs/heads/master | 2021-10-30T15:48:36.197553 | 2021-10-22T00:08:20 | 2021-10-22T00:08:20 | 220,151,020 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 480 | py | class Solution:
def findErrorNums(self, nums: List[int]) -> List[int]:
if not nums:
return
maximum = nums[0]
actualSum = 0
numTable = {}
for num in nums:
if num in numTable:
dup = num
else:
numTable[num] = 1
actualSum += num
n = len(nums)
expSum = n * (n + 1) // 2
missing = expSum - (actualSum - dup)
return [dup, missing]
| [
"noreply@github.com"
] | chintanaprabhu.noreply@github.com |
ddcb81b7107bc2f0f5e4137a87c2a92e27eb7225 | 65df38059db317f87a37337c7c7eec8bf2a6cc2f | /ind.py | 9298ac24140c7424b7ccc658a05c0cc27c1e6ffa | [] | no_license | cujeu/quantocean | 10a5a5b24343433504e5847e455f7dedeeca82fa | 85f5f08192b136b65136d0cb145026cb81b3f56c | refs/heads/master | 2021-06-27T13:02:28.305045 | 2019-06-18T03:10:51 | 2019-06-18T03:10:51 | 137,292,892 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,465 | py | """
Indicators as shown by Peter Bakker at:
https://www.quantopian.com/posts/technical-analysis-indicators-without-talib-code
"""
"""
25-Mar-2018: Fixed syntax to support the newest version of Pandas. Warnings should no longer appear.
Fixed some bugs regarding min_periods and NaN.
If you find any bugs, please report to github.com/palmbook
"""
# Import Built-Ins
import logging
# Import Third-Party
import pandas as pd
import numpy as np
# Import Homebrew
# Init Logging Facilities
log = logging.getLogger(__name__)
def moving_average(df, n, column_name = None):
"""Calculate the moving average for the given data.
:param df: pandas.DataFrame
:param n:
:return: pandas.DataFrame
"""
if column_name is None:
column_name = 'close_price'
MA = pd.Series(df[column_name].rolling(n, min_periods=n).mean(), name='MA_' + str(n))
df = df.join(MA)
return df
def exponential_moving_average(df, n, column_name = None):
"""
:param df: pandas.DataFrame
:param n:
:return: pandas.DataFrame
"""
if column_name is None:
column_name = 'close_price'
EMA = pd.Series(df[column_name].ewm(span=n, min_periods=n).mean(), name='EMA_' + str(n))
df = df.join(EMA)
return df
def momentum(df, n, column_name = None):
"""
:param df: pandas.DataFrame
:param n:
:return: pandas.DataFrame
"""
if column_name is None:
column_name = 'close_price'
M = pd.Series(df[column_name].diff(n), name='Momentum_' + str(n))
df = df.join(M)
return df
def rate_of_change(df, n, column_name = None):
"""
:param df: pandas.DataFrame
:param n:
:return: pandas.DataFrame
"""
if column_name is None:
column_name = 'close_price'
M = df[column_name].diff(n - 1)
N = df[column_name].shift(n - 1)
ROC = pd.Series(M / N, name='ROC_' + str(n))
df = df.join(ROC)
return df
def average_true_range(df, n, column_name = None):
"""
:param df: pandas.DataFrame
:param n:
:return: pandas.DataFrame
"""
i = 0
TR_l = [0.0]
if column_name is None:
column_name = 'close_price'
# print(df.loc[df.index[i + 1], 'low_price'])
#df.loc[df.index[row_number], pd.IndexSlice[:, 'px']]
while i < (len(df.index)-1):
TR = max(df.loc[df.index[i + 1], 'high_price'], df.loc[df.index[i], column_name])
TR = TR - min(df.loc[df.index[i + 1], 'low_price'], df.loc[df.index[i], column_name])
TR_l.append(TR)
i = i + 1
ATR = pd.Series(TR_l).ewm(span=n, min_periods=n).mean()
df['ATR_' + str(n)] = ATR.values
#df.insert(loc=0, column=nm, value= ATR)
#df = df.join(ATR, on = 'open_price') #, how='left', right_on = ['ATR_' + str(n)])
return df
def bollinger_bands_v2(df, n, dist = None, column_name = None):
"""
:param df: pandas.DataFrame
:param n:
:return: pandas.DataFrame
"""
if column_name is None:
column_name = 'close_price'
if dist is None:
dist = 2.0
MA = pd.Series(df[column_name].rolling(n, min_periods=n).mean())
MSD = pd.Series(df[column_name].rolling(n, min_periods=n).std())
b1 = 4 * MSD / MA
B1 = pd.Series(b1, name='BollingerB_' + str(n))
df = df.join(B1)
b2 = (df[column_name] - MA + dist * MSD) / (4 * MSD)
B2 = pd.Series(b2, name='Bollinger%b_' + str(n))
df = df.join(B2)
return df
def bollinger_bands(df, n, dist = None, column_name = None):
"""
:param df: pandas.DataFrame
:param n:
:return: pandas.DataFrame
"""
if column_name is None:
column_name = 'close_price'
if dist is None:
dist = 2.0
MA = pd.Series(df[column_name].rolling(n, min_periods=n).mean())
MSD = pd.Series(df[column_name].rolling(n, min_periods=n).std())
b1 = ( MA + dist * MSD)
B1 = pd.Series(b1, name='BBUpper_' + str(n))
df = df.join(B1)
b2 = ( MA - dist * MSD)
B2 = pd.Series(b2, name='BBLower_' + str(n))
df = df.join(B2)
return df
def ppsr(df, column_name = None):
"""Calculate Pivot Points, Supports and Resistances for given data
:param df: pandas.DataFrame
:return: pandas.DataFrame
"""
if column_name is None:
column_name = 'close_price'
PP = pd.Series((df['high_price'] + df['low_price'] + df[column_name]) / 3)
R1 = pd.Series(2 * PP - df['low_price'])
S1 = pd.Series(2 * PP - df['high_price'])
R2 = pd.Series(PP + df['high_price'] - df['low_price'])
S2 = pd.Series(PP - df['high_price'] + df['low_price'])
R3 = pd.Series(df['high_price'] + 2 * (PP - df['low_price']))
S3 = pd.Series(df['low_price'] - 2 * (df['high_price'] - PP))
psr = {'PP': PP, 'R1': R1, 'S1': S1, 'R2': R2, 'S2': S2, 'R3': R3, 'S3': S3}
PSR = pd.DataFrame(psr)
##print(df.iloc[[2287]]) ##print a row by index
df = df.join(PSR)
return df
def stochastic_oscillator_k(df, column_name = None):
"""Calculate stochastic oscillator %K for given data.
:param df: pandas.DataFrame
:return: pandas.DataFrame
"""
if column_name is None:
column_name = 'close_price'
SOk = pd.Series((df[column_name] - df['low_price']) / (df['high_price'] - df['low_price']), name='stochK')
df = df.join(SOk)
return df
def stochastic_oscillator_d(df, n, column_name = None):
"""Calculate stochastic oscillator %D for given data.
:param df: pandas.DataFrame
:param n:
:return: pandas.DataFrame
"""
if column_name is None:
column_name = 'close_price'
SOk = pd.Series((df[column_name] - df['low_price']) / (df['high_price'] - df['low_price'])) #, name='SO%k')
SOd = pd.Series(SOk.ewm(span=n, min_periods=n).mean(), name='stochD' + str(n))
df = df.join(SOd)
return df
def trix(df, n, column_name = None):
"""Calculate TRIX for given data.
:param df: pandas.DataFrame
:param n:
:return: pandas.DataFrame
"""
if column_name is None:
column_name = 'close_price'
EX1 = df[column_name].ewm(span=n, min_periods=n).mean()
EX2 = EX1.ewm(span=n, min_periods=n).mean()
EX3 = EX2.ewm(span=n, min_periods=n).mean()
i = 0
ROC_l = [np.nan]
while i < (len(df.index)-1):
ROC = (EX3[i + 1] - EX3[i]) / EX3[i]
ROC_l.append(ROC)
i = i + 1
##Trix = pd.Series(ROC_l, name='Trix_' + str(n))
##df = df.join(Trix)
df['Trix_' + str(n)] = pd.Series(ROC_l).values
return df
def average_directional_movement_index(df, n, n_ADX, column_name = None):
"""Calculate the Average Directional Movement Index for given data.
:param df: pandas.DataFrame
:param n:
:param n_ADX:
:return: pandas.DataFrame
"""
if column_name is None:
column_name = 'close_price'
i = 0
UpI = [0]
DoI = [0]
while i < (len(df.index)-1):
UpMove = df.loc[df.index[i + 1], 'high_price'] - df.loc[df.index[i], 'high_price']
DoMove = df.loc[df.index[i + 1], 'low_price'] - df.loc[df.index[i], 'low_price']
if UpMove > DoMove and UpMove > 0:
UpD = UpMove
else:
UpD = 0
UpI.append(UpD)
if DoMove > UpMove and DoMove > 0:
DoD = DoMove
else:
DoD = 0
DoI.append(DoD)
i = i + 1
i = 0
TR_l = [0]
while i < (len(df.index)-1):
TR = max(df.loc[df.index[i + 1], 'high_price'], df.loc[df.index[i], column_name]);
TR = TR - min(df.loc[df.index[i + 1], 'low_price'], df.loc[df.index[i], column_name])
TR_l.append(TR)
i = i + 1
TR_s = pd.Series(TR_l)
ATR = pd.Series(TR_s.ewm(span=n, min_periods=n).mean())
UpI = pd.Series(UpI)
DoI = pd.Series(DoI)
PosDI = pd.Series(UpI.ewm(span=n, min_periods=n).mean() / ATR)
NegDI = pd.Series(DoI.ewm(span=n, min_periods=n).mean() / ATR)
##ADX = pd.Series((abs(PosDI - NegDI) / (PosDI + NegDI)).ewm(span=n_ADX, min_periods=n_ADX).mean(),
## name='ADX_' + str(n) + '_' + str(n_ADX))
##df = df.join(ADX)
ADX = pd.Series((abs(PosDI - NegDI) / (PosDI + NegDI)).ewm(span=n_ADX, min_periods=n_ADX).mean())
df['ADX_' + str(n) + '_' + str(n_ADX)] = ADX.values
return df
def macd(df, n_fast, n_slow, column_name = None):
"""Calculate MACD, MACD Signal and MACD difference
:param df: pandas.DataFrame
:param n_fast:
:param n_slow:
:return: pandas.DataFrame
"""
if column_name is None:
column_name = 'close_price'
EMAfast = pd.Series(df[column_name].ewm(span=n_fast, min_periods=n_slow).mean())
EMAslow = pd.Series(df[column_name].ewm(span=n_slow, min_periods=n_slow).mean())
MACD = pd.Series(EMAfast - EMAslow, name='MACD_' + str(n_fast) + '_' + str(n_slow))
MACDsign = pd.Series(MACD.ewm(span=9, min_periods=9).mean(), name='MACDsign_' + str(n_fast) + '_' + str(n_slow))
MACDdiff = pd.Series(MACD - MACDsign, name='MACDdiff_' + str(n_fast) + '_' + str(n_slow))
df = df.join(MACD)
df = df.join(MACDsign)
df = df.join(MACDdiff)
return df
def mass_index(df):
"""Calculate the Mass Index for given data.
:param df: pandas.DataFrame
:return: pandas.DataFrame
"""
Range = df['high_price'] - df['low_price']
EX1 = Range.ewm(span=9, min_periods=9).mean()
EX2 = EX1.ewm(span=9, min_periods=9).mean()
Mass = EX1 / EX2
MassI = pd.Series(Mass.rolling(25).sum(), name='Mass Index')
df = df.join(MassI)
return df
def vortex_indicator(df, n, column_name = None):
"""Calculate the Vortex Indicator for given data.
Vortex Indicator described here:
http://www.vortexindicator.com/VFX_VORTEX.PDF
:param df: pandas.DataFrame
:param n:
:return: pandas.DataFrame
"""
if column_name is None:
column_name = 'close_price'
i = 0
TR = [0]
while i < (len(df.index)-1):
Range = max(df.loc[df.index[i + 1], 'high_price'], df.loc[df.index[i], column_name])
Range = Range - min(df.loc[df.index[i + 1], 'low_price'], df.loc[df.index[i], column_name])
TR.append(Range)
i = i + 1
i = 0
VM = [0]
while i < (len(df.index)-1):
Range = abs(df.loc[df.index[i + 1], 'high_price'] - df.loc[df.index[i], 'low_price'])
Range = Range - abs(df.loc[df.index[i + 1], 'low_price'] - df.loc[df.index[i], 'high_price'])
VM.append(Range)
i = i + 1
##VI = pd.Series(pd.Series(VM).rolling(n).sum() / pd.Series(TR).rolling(n).sum(), name='Vortex_' + str(n))
##df = df.join(VI)
VI = pd.Series(pd.Series(VM).rolling(n).sum() / pd.Series(TR).rolling(n).sum())
df['Vortex_' + str(n)] = VI.values
return df
def kst_oscillator(df, r1, r2, r3, r4, n1, n2, n3, n4, column_name = None):
"""Calculate KST Oscillator for given data.
:param df: pandas.DataFrame
:param r1:
:param r2:
:param r3:
:param r4:
:param n1:
:param n2:
:param n3:
:param n4:
:return: pandas.DataFrame
"""
M = df[column_name].diff(r1 - 1)
N = df[column_name].shift(r1 - 1)
ROC1 = M / N
M = df[column_name].diff(r2 - 1)
N = df[column_name].shift(r2 - 1)
ROC2 = M / N
M = df[column_name].diff(r3 - 1)
N = df[column_name].shift(r3 - 1)
ROC3 = M / N
M = df[column_name].diff(r4 - 1)
N = df[column_name].shift(r4 - 1)
ROC4 = M / N
KST = pd.Series(
ROC1.rolling(n1).sum() + ROC2.rolling(n2).sum() * 2 + ROC3.rolling(n3).sum() * 3 + ROC4.rolling(n4).sum() * 4,
name='KST_' + str(r1) + '_' + str(r2) + '_' + str(r3) + '_' + str(r4) + '_' + str(n1) + '_' + str(
n2) + '_' + str(n3) + '_' + str(n4))
df = df.join(KST)
return df
def relative_strength_index(df, n, column_name = None):
"""Calculate Relative Strength Index(RSI) for given data.
:param df: pandas.DataFrame
:param n:
:return: pandas.DataFrame
"""
if column_name is None:
column_name = 'close_price'
i = 0
UpI = [0]
DoI = [0]
while i < (len(df.index)-1):
UpMove = df.loc[df.index[i+1], 'high_price'] - df.loc[df.index[i], 'high_price']
DoMove = df.loc[df.index[i], 'low_price'] - df.loc[df.index[i+1], 'low_price']
if UpMove > DoMove and UpMove > 0:
UpD = UpMove
else:
UpD = 0
UpI.append(UpD)
if DoMove > UpMove and DoMove > 0:
DoD = DoMove
else:
DoD = 0
DoI.append(DoD)
i = i + 1
UpI = pd.Series(UpI)
DoI = pd.Series(DoI)
PosDI = pd.Series(UpI.ewm(span=n, min_periods=n).mean())
NegDI = pd.Series(DoI.ewm(span=n, min_periods=n).mean())
##RSI = pd.Series(PosDI / (PosDI + NegDI), name='RSI_' + str(n))
##df = df.join(RSI)
RSI = pd.Series(PosDI / (PosDI + NegDI))
df['RSI_' + str(n)] = RSI.values
return df
def true_strength_index(df, r, s, column_name = None):
"""Calculate True Strength Index (TSI) for given data.
:param df: pandas.DataFrame
:param r:
:param s:
:return: pandas.DataFrame
"""
if column_name is None:
column_name = 'close_price'
M = pd.Series(df[column_name].diff(1))
aM = abs(M)
EMA1 = pd.Series(M.ewm(span=r, min_periods=r).mean())
aEMA1 = pd.Series(aM.ewm(span=r, min_periods=r).mean())
EMA2 = pd.Series(EMA1.ewm(span=s, min_periods=s).mean())
aEMA2 = pd.Series(aEMA1.ewm(span=s, min_periods=s).mean())
TSI = pd.Series(EMA2 / aEMA2, name='TSI_' + str(r) + '_' + str(s))
df = df.join(TSI)
return df
def accumulation_distribution(df, n, column_name = None):
"""Calculate Accumulation/Distribution for given data.
:param df: pandas.DataFrame
:param n:
:return: pandas.DataFrame
"""
if column_name is None:
column_name = 'close_price'
ad = (2 * df[column_name] - df['high_price'] - df['low_price']) / (df['high_price'] - df['low_price']) * df['volume']
M = ad.diff(n - 1)
N = ad.shift(n - 1)
ROC = M / N
#AD = pd.Series(ROC, name='Acc/Dist_ROC_' + str(n))
#df = df.join(AD)
AD = pd.Series(ROC)
df['Acc/Dist_ROC_' + str(n)] = AD.values
return df
def chaikin_oscillator(df, column_name = None):
"""Calculate Chaikin Oscillator for given data.
:param df: pandas.DataFrame
:return: pandas.DataFrame
"""
if column_name is None:
column_name = 'close_price'
ad = (2 * df[column_name] - df['high_price'] - df['low_price']) / (df['high_price'] - df['low_price']) * df['volume']
Chaikin = pd.Series(ad.ewm(span=3, min_periods=3).mean() - ad.ewm(span=10, min_periods=10).mean(), name='Chaikin')
df = df.join(Chaikin)
return df
def money_flow_index(df, n, column_name = None):
"""Calculate Money Flow Index and Ratio for given data.
:param df: pandas.DataFrame
:param n:
:return: pandas.DataFrame
"""
if column_name is None:
column_name = 'close_price'
PP = (df['high_price'] + df['low_price'] + df[column_name]) / 3
i = 0
PosMF = [0]
while i < (len(df.index)-1):
PosMF1 = 0
if PP[i + 1] > PP[i]:
PosMF1 = PP[i + 1] * df.loc[df.index[i+1], 'volume']
PosMF1 = PosMF1 / (PP[i] * df.loc[df.index[i], 'volume'])
PosMF.append(PosMF1)
i = i + 1
#TotMF = PP * df['volume']
#MFR = pd.Series(PosMF_ser / TotMF)
MFI = pd.Series(pd.Series(PosMF).rolling(n, min_periods=n).mean())
df['MFI_' + str(n)] = MFI.values
return df
def on_balance_volume(df, n, column_name = None):
"""Calculate On-Balance Volume for given data.
:param df: pandas.DataFrame
:param n:
:return: pandas.DataFrame
"""
if column_name is None:
column_name = 'close_price'
i = 0
OBV = [0]
while i < (len(df.index)-1):
if df.loc[df.index[i+1], column_name] - df.loc[df.index[i], column_name] > 0:
OBV.append(df.loc[df.index[i+1], 'volume'])
elif df.loc[df.index[i+1], column_name] - df.loc[df.index[i], column_name] < 0:
OBV.append( 0- (df.loc[df.index[i+1], 'volume']))
else:
OBV.append(0)
i = i + 1
OBV_ma = pd.Series(pd.Series(OBV).rolling(n, min_periods=n).mean())
df['OBV_' + str(n)] = OBV_ma.values
return df
def force_index(df, n, column_name = None):
"""Calculate Force Index for given data.
:param df: pandas.DataFrame
:param n:
:return: pandas.DataFrame
"""
if column_name is None:
column_name = 'close_price'
F = pd.Series(df[column_name].diff(n) * df['volume'].diff(n), name='Force_' + str(n))
df = df.join(F)
return df
def ease_of_movement(df, n, column_name = None):
"""Calculate Ease of Movement for given data.
:param df: pandas.DataFrame
:param n:
:return: pandas.DataFrame
"""
if column_name is None:
column_name = 'close_price'
EoM = (df['high_price'] - df['low_price']) * ((df['high_price'] + df['low_price']) - (df['high_price'].diff(1) + df['low_price'].diff(1))) / (2 * df['volume'])
Eom_ma = pd.Series(EoM.rolling(n, min_periods=n).mean(), name='EoM_' + str(n))
df = df.join(Eom_ma)
return df
def commodity_channel_index(df, n, column_name = None):
"""Calculate Commodity Channel Index for given data.
:param df: pandas.DataFrame
:param n:
:return: pandas.DataFrame
"""
if column_name is None:
column_name = 'close_price'
ratio = 0.015
PP = (df['high_price'] + df['low_price'] + df[column_name]) / 3
CCI = pd.Series((PP - PP.rolling(n, min_periods=n).mean()) / PP.rolling(n, min_periods=n).std(),
name='CCI_' + str(n)).divide(ratio)
df = df.join(CCI)
return df
def coppock_curve(df, n, column_name = None):
"""Calculate Coppock Curve for given data.
:param df: pandas.DataFrame
:param n:
:return: pandas.DataFrame
"""
if column_name is None:
column_name = 'close_price'
M = df[column_name].diff(int(n * 11 / 10) - 1)
N = df[column_name].shift(int(n * 11 / 10) - 1)
ROC1 = M / N
M = df[column_name].diff(int(n * 14 / 10) - 1)
N = df[column_name].shift(int(n * 14 / 10) - 1)
ROC2 = M / N
Copp = pd.Series((ROC1 + ROC2).ewm(span=n, min_periods=n).mean(), name='Copp_' + str(n))
df = df.join(Copp)
return df
def keltner_channel(df, n, column_name = None):
"""Calculate Keltner Channel for given data.
:param df: pandas.DataFrame
:param n:
:return: pandas.DataFrame
"""
if column_name is None:
column_name = 'close_price'
KelChM = pd.Series(((df['high_price'] + df['low_price'] + df[column_name]) / 3).rolling(n, min_periods=n).mean(),
name='KelChM_' + str(n))
KelChU = pd.Series(((4 * df['high_price'] - 2 * df['low_price'] + df[column_name]) / 3).rolling(n, min_periods=n).mean(),
name='KelChU_' + str(n))
KelChD = pd.Series(((-2 * df['high_price'] + 4 * df['low_price'] + df[column_name]) / 3).rolling(n, min_periods=n).mean(),
name='KelChD_' + str(n))
df = df.join(KelChM)
df = df.join(KelChU)
df = df.join(KelChD)
return df
def ultimate_oscillator(df, column_name = None):
"""Calculate Ultimate Oscillator for given data.
:param df: pandas.DataFrame
:return: pandas.DataFrame
"""
if column_name is None:
column_name = 'close_price'
i = 0
TR_l = [0]
BP_l = [0]
while i < (len(df.index)-1):
TR = max(df.loc[df.index[i + 1], 'high_price'], df.loc[df.index[i], column_name])
TR = TR - min(df.loc[df.index[i + 1], 'low_price'], df.loc[df.index[i], column_name])
TR_l.append(TR)
BP = df.loc[df.index[i + 1], column_name]
BP = BP - min(df.loc[df.index[i + 1], 'low_price'], df.loc[df.index[i], column_name])
BP_l.append(BP)
i = i + 1
rates = 4+2+1
UltO = pd.Series(((4 * pd.Series(BP_l).rolling(7).sum() / pd.Series(TR_l).rolling(7).sum()) + (
2 * pd.Series(BP_l).rolling(14).sum() / pd.Series(TR_l).rolling(14).sum()) + (
pd.Series(BP_l).rolling(28).sum() / pd.Series(TR_l).rolling(28).sum())) / rates)
df['Ultimate_Osc'] = UltO.values
return df
def donchian_channel(df, n, column_name = None):
"""Calculate donchian channel of given pandas data frame.
:param df: pandas.DataFrame
:param n:
:return: pandas.DataFrame
"""
if column_name is None:
column_name = 'close_price'
i = 0
dc_l = []
while i < n :
dc_l.append(0)
i += 1
i = 0
while (i + n - 1) < (len(df.index)-1):
dc = max(df['high_price'].ix[i:i + n - 1]) - min(df['low_price'].ix[i:i + n - 1])
dc_l.append(dc)
i += 1
donchian_chan = pd.Series(dc_l)
donchian_chan = donchian_chan.shift(n - 1)
df['Donchian_' + str(n)] = donchian_chan.values
return df
def standard_deviation(df, n, column_name = None):
"""Calculate Standard Deviation for given data.
:param df: pandas.DataFrame
:param n:
:return: pandas.DataFrame
"""
if column_name is None:
column_name = 'close_price'
df = df.join(pd.Series(df[column_name].rolling(n, min_periods=n).std(), name='STD_' + str(n)))
return df
| [
"noreply@github.com"
] | cujeu.noreply@github.com |
bc6fc44f74c3620fd9e7b7d0a2ee996258b7e087 | 2346aac932096d7161591afc8f07105eba6de558 | /chapter25_maskrcnn/object_detection_example.py | ea42017f0adf198b95c31e44d4fdb41ffd878eae | [] | no_license | cheeyeo/deep_learning_computer_vision | 3436ac568539bd9ad060c9d81542e82c42e40ff2 | 44fb74e70e7d40717214cd2a0ac2aa6d3bbf5b58 | refs/heads/master | 2020-07-17T00:17:14.112988 | 2019-10-29T15:39:18 | 2019-10-29T15:39:18 | 205,898,970 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,545 | py | # Example on object detection using Mask R-CNN Library
# Uses a pre-trained Mask R-CNN model trained on MSCOCO dataset
from keras.preprocessing.image import load_img
from keras.preprocessing.image import img_to_array
from mrcnn.visualize import display_instances
from mrcnn.model import MaskRCNN
import os
import argparse
from utils import draw_image_with_boxes, load_coco_classes
from config import TestConfig
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", type=str, required=True, help="Image to perform object recognition on.")
ap.add_argument("-m", "--model", default="data/mask_rcnn_coco.h5", type=str, help="Model weights for Mask R-CNN model.")
ap.add_argument("-o", "--object-detection", action="store_true", help="Perform object detection using Mask R-CNN model.")
args = vars(ap.parse_args())
# Define and load model
rcnn = MaskRCNN(mode='inference', model_dir='./', config=TestConfig())
rcnn.load_weights(args["model"], by_name=True)
img = load_img(args["image"])
img_pixels = img_to_array(img)
results = rcnn.detect([img_pixels], verbose=0)
r = results[0]
if args["object_detection"]:
print("[INFO] Performing object detection using display_instances...")
# define 81 classes that the coco model knowns about
class_names = load_coco_classes('data/coco_classes.txt')
display_instances(img_pixels, r['rois'], r['masks'], r['class_ids'], class_names, r['scores'])
else:
draw_image_with_boxes(img, r['rois'])
print('[INFO] Saving image with bounding boxes')
img.save(os.path.join('out', args["image"])) | [
"ckyeo.1@gmail.com"
] | ckyeo.1@gmail.com |
549cc65c2506f0da66011a56b9a6366d3337f073 | 228deb8ea529813fc2d850b1595bd6a71616ce18 | /gamemap.py | 34aa374363b0a1239fac45a8eb11038990ea3fa7 | [] | no_license | johnyburd/hull-breach | a4ddee7b9b8f8557eaf7b439182099ea22a242f0 | 38b374ce3f75169caa667bae8e540bf6fa30d169 | refs/heads/master | 2020-04-14T06:45:51.283424 | 2019-01-04T21:32:09 | 2019-01-04T21:32:09 | 163,695,202 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,369 | py | import random
from entities import Entity
class Tile(Entity):
def __init__(self, image, name, x, y):
super().__init__(name, x, y)
self.image = image
class Level(object):
"""basically one level is one screen"""
def __init__(self, width, height):
self.height = height
self.width = width
self.array = []
self.entitylist = []
for i in range(self.height):
h = []
for i in range(self.width):
h.append("")
self.entitylist.append(h)
self.set_entity(1,1,"dagger")
for i in range(self.height):
h = []
for j in range(self.width):
h.append("dirt") if random.randint(0,10) == 1 else h.append("grass")
self.array.append(h)
def set_entity(self, x, y, entity_id):
self.entitylist[y][x] = entity_id
def move_entity(self, startx, starty, endx, endy):
entitylist[endy][endx] = entitylist[startx][starty]
entitylist[startx][starty] = None
# note that this only supports a 2d entitylist
# eventually the entitylist needs to be 3d?
# maybe
def print_array(self):
for h in self.entitylist:
for i in h:
print(i, end=" ")
print()
class Map(object):
"""levels is an array of levels"""
def __init__(self, width, height):
level = Level(width,height)
self.levels = [level]
self.current_level = 0 # the level that the player is currently on
def get_level(self):
return self.levels[self.current_level] | [
"stevenman42@gmail.com"
] | stevenman42@gmail.com |
71de1566bbaa47c6045df19df096ec489fc4644d | 61bbf24bd02dd52bbb03339cf62fc22a71f9e6be | /addresses/models.py | 9d14291c6edb8e560194318027132bea8270c67c | [] | no_license | stradtkt/Django-Ecommerce | 6bd90f47295439ba92c5a105592aed9fd494affd | afba9f2a5abf979ca5ed4338f0f6ae028928d441 | refs/heads/master | 2020-04-06T15:56:58.198223 | 2018-11-15T20:56:25 | 2018-11-15T20:56:25 | 157,598,943 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,124 | py | from django.db import models
from billing.models import BillingProfile
ADDRESS_TYPES = (
('billing', 'Billing'),
('shipping', 'Shipping'),
)
class Address(models.Model):
billing_profile = models.ForeignKey(BillingProfile, related_name="profile")
address_type = models.CharField(max_length=120, choices=ADDRESS_TYPES)
address_line_1 = models.CharField(max_length=120)
address_line_2 = models.CharField(max_length=120, null=True, blank=True)
city = models.CharField(max_length=120)
country = models.CharField(max_length=120, default='United States of America')
state = models.CharField(max_length=120)
postal_code = models.CharField(max_length=120)
def __str__(self):
return str(self.billing_profile)
def get_address(self):
return "{line1}\n{line2}\n{city}\n{state}, {postal}\n{country}".format(
line1 = self.address_line_1,
line2 = self.address_line_2 or "",
city = self.city,
state = self.state,
postal= self.postal_code,
country = self.country
)
| [
"stradtkt@gmail.com"
] | stradtkt@gmail.com |
f742e2b05ad7d83f7a083d3085ad260e52a54a8b | 5bb4a8ca63d2a6230145c431991086e4dca2238d | /static/img/views.py | 5218d812a96ff2c6825b426cdbd6b02acb2e8891 | [] | no_license | netlabreal/agency_s | ee8f8e01125a1097ca1d78812d6709d93db9e838 | d3a3efd1e1bf22d130a898ae46e43559b0f4f89d | refs/heads/master | 2020-04-21T12:25:11.025955 | 2019-02-05T23:27:40 | 2019-02-05T23:27:40 | 169,561,339 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,718 | py | from django.shortcuts import render, HttpResponse, render_to_response
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from .models import Object, Types, Typ, Rayon, News
from django.views.generic.list import ListView
from django.views.generic import DetailView
import random
def index(request):
#object_list = Object.objects.all()
#paginator = Paginator(object_list, 1)
#page = request.GET.get('page')
#try:
# posts = paginator.page(page)
#except PageNotAnInteger:
# posts = paginator.page(1)
#except EmptyPage:
# posts = paginator.page(paginator.num_pages)
types_list = Types.objects.all()
typs_list = Typ.objects.all()
ray_list = Rayon.objects.all()
news_list = News.objects.all().order_by('?')[:2]
random_objects = Object.objects.all().order_by('?')[:6]
return render(
request,
'index.html',
{
'types': types_list,
'typs': typs_list,
'ray': ray_list,
'news': news_list,
'random': random_objects,
},
)
class Robject(DetailView):
queryset = Object.objects.all()
template_name = "object.html"
def get_object(self, queryset=None):
obj = None
if queryset is None:
queryset = self.get_queryset()
queryset = queryset.filter(pk=self.kwargs.get("pk"))
print(queryset)
try:
obj = queryset.get()
except Exception:
print(queryset.model._meta.verbose_name)
return obj
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
rez = Object.objects.filter(komnat=self.object.komnat)
news_list = News.objects.all().order_by('?')[:2]
rez = rez.exclude(id=self.object.id)
#number_of_records = rez.objects.count()
#k = [random.choice(rez) for k in range(3)]
context['rez'] = rez.order_by('?')[:3]
context['news'] = news_list
return context
class Nobject(DetailView):
queryset = News.objects.all()
template_name = "news.html"
def get_object(self, queryset=None):
obj = None
if queryset is None:
queryset = self.get_queryset()
queryset = queryset.filter(pk=self.kwargs.get("pk"))
try:
obj = queryset.get()
except Exception:
print(queryset.model._meta.verbose_name)
return obj
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['news'] = News.objects.all().order_by('?')[:2]
return context
def objects(request, pk):
return render_to_response("objects.html")
def about(request):
return render_to_response("about.html",{'news': News.objects.all().order_by('?')[:2]})
class AllObjects(ListView):
model = Object
paginate_by = 9
template_name = 'objects.html'
context_object_name = 'objects'
type = typ = ray = cost = s = 0
v1 = s1 = 0
v2 = 25000000
s2 = 250
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super(AllObjects, self).get_context_data(**kwargs)
context['types_list'] = Types.objects.all()
context['typs_list'] = Typ.objects.all()
context['ray_list'] = Rayon.objects.all()
context['news'] = News.objects.all().order_by('?')[:2]
context['search_params'] = (self.type, self.typ, self.ray, self.cost, self.s,)
return context
def get_queryset(self):
k = Object.objects.all().order_by("pk")
if self.request.GET.get('p1'):
self.type = int(self.request.GET.get('p1'))
if self.request.GET.get('p2'):
self.typ = int(self.request.GET.get('p2'))
if self.request.GET.get('p3'):
self.ray = int(self.request.GET.get('p3'))
if self.request.GET.get('p4'):
self.cost = self.request.GET.get('p4')
if self.cost != '0' and self.cost != 0:
self.v1, self.v2 = self.cost.split(',')
if self.request.GET.get('p5'):
self.s = self.request.GET.get('p5')
if self.s != 0 and self.s != '0':
self.s1, self.s2 = self.s.split(',')
if self.type != 0: k = k.filter(type__id=self.type)
if self.typ != 0: k = k.filter(typ__id=self.typ)
if self.ray != 0: k = k.filter(rayon__id=self.ray)
if int(self.v1) != 0 or int(self.v2) != 25000000: k = k.filter(cost__gte=int(self.v1), cost__lte=int(self.v2))
if int(self.s1) != 0 or int(self.s2) != 250: k = k.filter(s__gte=int(self.s1), s__lte=int(self.s2))
return k
| [
"vasa@example.com"
] | vasa@example.com |
7808d0a8114536dd7636e0b310a0f8808eca4bc5 | e88ec2df4fe8b9e233d4a41066a052be2fde77ad | /assignment4/A4latex/extractLinksSnippet.py | 55678d28dd4063b560b48dfa11965a54c00440d2 | [] | no_license | anwala/cs595-f14 | c837061c6ac908ba5ef59c2b39c7150988562ef7 | 064053e20c9fbb6f2ebf03b8102ebf50d143ea86 | refs/heads/master | 2021-01-21T19:20:39.656116 | 2014-12-12T12:21:42 | 2014-12-12T12:21:42 | 23,893,100 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 345 | py | #extract all children URIs
...
deleteIfFileEmptyFlag = True
for link in soup.find_all('a'):
potentialURI = link.get('href')
try:
if( timeoutValueInSeconds > 0 ):
response = urllib2.urlopen(potentialURI, timeout=timeoutValueInSeconds)
else:
response = urllib2.urlopen(potentialURI)
except:
continue
... | [
"anwala@cs.odu.edu"
] | anwala@cs.odu.edu |
4b564031e62fdef5ccdfa2a64f3c21ed58f7a6b4 | f16dbf2fbb7b51f91cd63c942108d66f014667d8 | /venv/Lib/site-packages/tencentcloud/drm/v20181115/drm_client.py | 97f93c8461aad7c0e0cd8bc06d6e63adb81ab766 | [] | no_license | gtinlian/txcloud_eip | 34c2fafc785a0e9b87cc82418d1ccd44155f42e4 | fdf64343443dd5bea165bdb6eaea7d1612536932 | refs/heads/master | 2021-01-27T03:11:27.894700 | 2020-02-27T08:45:13 | 2020-02-27T08:45:13 | 243,471,635 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 12,383 | py | # -*- coding: utf8 -*-
# Copyright (c) 2017-2018 THL A29 Limited, a Tencent company. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from tencentcloud.common.exception.tencent_cloud_sdk_exception import TencentCloudSDKException
from tencentcloud.common.abstract_client import AbstractClient
from tencentcloud.drm.v20181115 import models
class DrmClient(AbstractClient):
_apiVersion = '2018-11-15'
_endpoint = 'drm.tencentcloudapi.com'
def AddFairPlayPem(self, request):
"""本接口用来设置fairplay方案所需的私钥、私钥密钥、ask等信息。
如需使用fairplay方案,请务必先设置私钥。
:param request: Request instance for AddFairPlayPem.
:type request: :class:`tencentcloud.drm.v20181115.models.AddFairPlayPemRequest`
:rtype: :class:`tencentcloud.drm.v20181115.models.AddFairPlayPemResponse`
"""
try:
params = request._serialize()
body = self.call("AddFairPlayPem", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.AddFairPlayPemResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def CreateEncryptKeys(self, request):
"""该接口用来设置加密的秘钥。注意,同一个content id,只能设置一次!
:param request: Request instance for CreateEncryptKeys.
:type request: :class:`tencentcloud.drm.v20181115.models.CreateEncryptKeysRequest`
:rtype: :class:`tencentcloud.drm.v20181115.models.CreateEncryptKeysResponse`
"""
try:
params = request._serialize()
body = self.call("CreateEncryptKeys", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.CreateEncryptKeysResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def CreateLicense(self, request):
"""本接口用来生成DRM方案对应的播放许可证,开发者需提供DRM方案类型、内容类型参数,后台将生成许可证后返回许可证数据
开发者需要转发终端设备发出的许可证请求信息。
:param request: Request instance for CreateLicense.
:type request: :class:`tencentcloud.drm.v20181115.models.CreateLicenseRequest`
:rtype: :class:`tencentcloud.drm.v20181115.models.CreateLicenseResponse`
"""
try:
params = request._serialize()
body = self.call("CreateLicense", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.CreateLicenseResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DeleteFairPlayPem(self, request):
"""本接口用来删除fairplay方案的私钥、ask等信息
注:高风险操作,删除后,您将无法使用腾讯云DRM提供的fairplay服务。
由于缓存,删除操作需要约半小时生效
:param request: Request instance for DeleteFairPlayPem.
:type request: :class:`tencentcloud.drm.v20181115.models.DeleteFairPlayPemRequest`
:rtype: :class:`tencentcloud.drm.v20181115.models.DeleteFairPlayPemResponse`
"""
try:
params = request._serialize()
body = self.call("DeleteFairPlayPem", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DeleteFairPlayPemResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DescribeAllKeys(self, request):
"""本接口用来查询指定DRM类型、ContentType的所有加密秘钥
:param request: Request instance for DescribeAllKeys.
:type request: :class:`tencentcloud.drm.v20181115.models.DescribeAllKeysRequest`
:rtype: :class:`tencentcloud.drm.v20181115.models.DescribeAllKeysResponse`
"""
try:
params = request._serialize()
body = self.call("DescribeAllKeys", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribeAllKeysResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DescribeFairPlayPem(self, request):
"""该接口用来查询设置的FairPlay私钥校验信息。可用该接口校验设置的私钥与本身的私钥是否一致。
:param request: Request instance for DescribeFairPlayPem.
:type request: :class:`tencentcloud.drm.v20181115.models.DescribeFairPlayPemRequest`
:rtype: :class:`tencentcloud.drm.v20181115.models.DescribeFairPlayPemResponse`
"""
try:
params = request._serialize()
body = self.call("DescribeFairPlayPem", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribeFairPlayPemResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DescribeKeys(self, request):
"""开发者需要指定使用的DRM类型、和需要加密的Track类型,后台返回加密使用的密钥
如果加密使用的ContentID没有关联的密钥信息,后台会自动生成新的密钥返回
:param request: Request instance for DescribeKeys.
:type request: :class:`tencentcloud.drm.v20181115.models.DescribeKeysRequest`
:rtype: :class:`tencentcloud.drm.v20181115.models.DescribeKeysResponse`
"""
try:
params = request._serialize()
body = self.call("DescribeKeys", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribeKeysResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def ModifyFairPlayPem(self, request):
"""本接口用来设置fairplay方案所需的私钥、私钥密钥、ask等信息。
如需使用fairplay方案,请务必先设置私钥。
:param request: Request instance for ModifyFairPlayPem.
:type request: :class:`tencentcloud.drm.v20181115.models.ModifyFairPlayPemRequest`
:rtype: :class:`tencentcloud.drm.v20181115.models.ModifyFairPlayPemResponse`
"""
try:
params = request._serialize()
body = self.call("ModifyFairPlayPem", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.ModifyFairPlayPemResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def StartEncryption(self, request):
"""开发者调用该接口,启动一次内容文件的DRM加密工作流
:param request: Request instance for StartEncryption.
:type request: :class:`tencentcloud.drm.v20181115.models.StartEncryptionRequest`
:rtype: :class:`tencentcloud.drm.v20181115.models.StartEncryptionResponse`
"""
try:
params = request._serialize()
body = self.call("StartEncryption", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.StartEncryptionResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message) | [
"gtinlian@sina.com"
] | gtinlian@sina.com |
4ce8ec9202b8397fa82b809428c17b9b50ed4cfa | 501ca55431b019c057dff9659a6928f1f1434187 | /model_builder/TWASFormat.py | 08f43e5daccaf93e20acd0954e88b8559c0329f8 | [] | no_license | Heroico/CStuff | aa9dc00da62109e6842b56034ebaa1283d2e455f | 0aae3912ce40e6a7fb4c2fa4ed342f43a1d9e09f | refs/heads/master | 2020-04-13T22:13:02.357431 | 2017-08-04T15:38:44 | 2017-08-04T15:38:44 | 51,172,828 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,322 | py | import numpy
import os
def build_subpaths(folder, subfolder):
sub_path = os.path.join(folder, subfolder)
sub_path = os.path.join(sub_path, subfolder)
return sub_path
class MTF(object):
snp=0
pos=1
a1=2
a2=3
def load_map(path):
snps = []
with open(path) as file:
for i, line in enumerate(file):
if i==0:
continue
comps = line.strip().split()
row = (comps[0], comps[1], comps[2], comps[3], )
snps.append(row)
return snps
def load_cor(path):
cors = []
with open(path) as file:
for line in file:
c = line.strip()
cors.append(float(c))
return cors
def load_ld(path):
rows = []
with open(path) as file:
for line in file:
row = [float(x) for x in line.strip().split()]
rows.append(row)
array = numpy.array(rows)
i, j = numpy.indices(array.shape)
array[i == j] += 0.01
return array
def build_weights(sub_path):
cor_path = sub_path + ".wgt.cor"
cors = load_cor(cor_path)
ld_path = sub_path + ".wgt.ld"
ld = load_ld(ld_path)
weights = calculate_weights(cors, ld)
return weights
def calculate_weights(cors, ld):
inv = numpy.linalg.inv(ld)
dot = numpy.dot(cors, inv)
return dot | [
"soyunheroe@gmail.com"
] | soyunheroe@gmail.com |
2dcf86b8d3b334a27a7962ae098f62af4a037e83 | e0cfb71a4268367fab77253a2460714a16e830aa | /doctorbot/website/views.py | 32da7106dfec332ef5bf99c76e01b6ff6d1f540a | [
"MIT"
] | permissive | zuxfoucault/DoctorBot_demo | 79b40548dfd5f34b0f2ccb7857e9377610394608 | 82e24078da4d2e6caba728b959812401109e014d | refs/heads/master | 2020-04-24T01:10:17.010551 | 2019-02-20T02:57:57 | 2019-02-20T02:57:57 | 171,589,459 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 343 | py | from django.http import HttpResponse
from django.shortcuts import render_to_response
from rest_framework import generics
from rest_framework import permissions
from rest_framework.decorators import api_view, permission_classes
# Create your views here.
@api_view(['GET'])
def index_view(requests):
return render_to_response('index.html')
| [
"zuxfoucault@gmail.com"
] | zuxfoucault@gmail.com |
1bb353798bb598808d70858c162b571ce8670bb6 | 1441ff8d0c6cac31fb6c2477569ea7ab74288a6d | /jiabin/admin.py | 5ce38c68eb13c1b48066fd04278864b46cde6509 | [] | no_license | hooops/jinbin_plus | 9a899a81f020ec190737e845553a5c7320f83229 | 01d6915d45b9e39236da91e83172c62c1ccf4c7d | refs/heads/master | 2016-09-06T08:15:52.894005 | 2015-07-03T07:25:08 | 2015-07-03T07:25:08 | 38,478,851 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,739 | py | #! -*- coding:utf-8 -*-
from jiabin.models import jiabin_m,send_guest_invitation
#from new_event.models import NewEventTable
from admin_self.common import NewformatEvent
from django.contrib import admin
class jiabin_mAdmin(admin.ModelAdmin):
search_fields =('id','username','cat_event_id')
def id(self,obj):
return obj.id
id.short_description = 'id'
def username(self,obj):
return obj.username
username.short_description = '名称'
def introduce(self,obj):
return obj.introduce
introduce.short_description = '简介'
def baikeURL_s(self,obj):
baike=obj.username
return '<a href="http://baike.baidu.com/search/word?word=%s" target="_blank">%s</a>' % (baike, u'百度百科')
baikeURL_s.short_description = '百度百科'
baikeURL_s.allow_tags = True
def homeurl(self,obj):
event=obj.cat_event_id
return '<a href="http://www.huodongjia.com/event-%s.html" target="_blank">%s</a>' \
% (event, event)
homeurl.short_description = '活动家'
def cat_event_id_new(self,obj):
try:
ev=NewformatEvent(None,obj.cat_event_id)
return '<a href="http://www.huodongjia.com/event-%s.html" target="_blank">%s</a>' % (obj.cat_event_id,ev['id'])
except:
return ''
cat_event_id_new.short_description = '活动id'
cat_event_id_new.allow_tags = True
def jiabin_id(self,obj):
return obj.jiabin_id
jiabin_id.short_description = '活动关联id'
def cat(self,obj):
return obj.cat
cat.short_description = '类别'
def title(self,obj):
return obj.title
title.short_description = '活动名称'
def begin_time(self,obj):
return obj.create_time
begin_time.short_description = '创建时间'
def end_time(self,obj):
return obj.rel_time
end_time.short_description = '最后编辑时间'
def recommend(self,obj):
return obj.imgs
recommend.short_description = '是否推荐'
def company(self,obj):
return obj.company
company.short_description = '公司'
def position(self,obj):
return obj.position
position.short_description = '职位'
raw_id_fields = ['jiabin_id']
list_display=('id','username','company','position','introduce','baikeURL_s','cat_event_id_new','cat','title','begin_time','end_time','recommend')
admin.site.register(jiabin_m,jiabin_mAdmin)
class send_guest_invitation_admin(admin.ModelAdmin):
list_display=('id','in_guest','in_company','in_meeting','event_id','in_name','in_mobilphone','in_message')
search_fields =('id','in_company','in_meeting','event_id','in_name','in_mobilphone','in_message','in_guest')
def in_guest(self,obj):
return obj.in_guest
in_guest.short_description = 'in_guest'
def id(self,obj):
return obj.id
id.short_description = 'id'
def in_company(self,obj):
return obj.in_company
in_company.short_description = 'in_company'
def in_meeting(self,obj):
return obj.in_meeting
in_meeting.short_description = 'in_meeting'
def event_id(self,obj):
return obj.event_id
event_id.short_description = 'event_id'
def in_name(self,obj):
return obj.in_name
in_name.short_description = 'in_name'
def in_mobilphone(self,obj):
return obj.in_mobilphone
in_mobilphone.short_description = 'in_mobilphone'
def in_message(self,obj):
return obj.in_message
in_message.short_description = 'in_message'
# raw_id_fields = ['event_id']
admin.site.register(send_guest_invitation,send_guest_invitation_admin)
#admin.site.register(NewEventTable)
| [
"hooops@qq.com"
] | hooops@qq.com |
8679ba4c0ea19491ac5b5e1d286512c8175a31c0 | 52261a4a48af2d10fa04057c8bed2006b3f8c897 | /ex7_k_means_pca/kMeansInitCentroids.py | ce75a4b727e77a94ba099f68f54e62dec949dccb | [] | no_license | oribro/MLCoursera | fec8f2dd81117a8b741b509b15310a033532f7e6 | f45773a011932be1ee256ebe480b89885dcd0485 | refs/heads/master | 2020-03-27T10:44:54.876080 | 2018-10-19T22:05:39 | 2018-10-19T22:05:39 | 146,442,326 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 765 | py | # %KMEANSINITCENTROIDS This function initializes K centroids that are to be
# %used in K-Means on the dataset X
# % centroids = KMEANSINITCENTROIDS(X, K) returns K initial centroids to be
# % used with the K-Means on the dataset X
# %
#
# % You should return this values correctly
# centroids = zeros(K, size(X, 2));
#
# % ====================== YOUR CODE HERE ======================
# % Instructions: You should set centroids to randomly chosen examples from
# % the dataset X
# %
#
#
#
#
#
#
#
#
# % =============================================================
#
# end
import numpy as np
def kMeansInitCentroids(X, K):
m, n = X.shape
permutation = np.random.permutation(m)
centroids = X[permutation[:K], :]
return centroids
| [
"orib@pcentra.com"
] | orib@pcentra.com |
d135c72038a9c0c01be8b4b8ae588403decf6726 | a9b05f3de50bf287b914d4786537cc81a208eaf8 | /preprocessing/migrations/0001_initial.py | 6d47579c623896aa6d24f9f404c75fbffc4f2935 | [] | no_license | 15101538237ren/AccidentsPrediction | 21b23ee60ca1bf8f7aee12f515db046f0bd94799 | b0248c9fc8c1c5018f79083adc4c2b8130e2dba0 | refs/heads/master | 2018-11-04T18:27:54.049460 | 2018-01-09T13:25:48 | 2018-01-09T13:25:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,709 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='App_Incidence',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('removed', models.DateTimeField(default=None, null=True, editable=False, blank=True)),
('longitude', models.DecimalField(verbose_name=b'\xe7\xbb\x8f\xe5\xba\xa6', max_digits=10, decimal_places=7)),
('latitude', models.DecimalField(verbose_name=b'\xe7\xba\xac\xe5\xba\xa6', max_digits=10, decimal_places=7)),
('place', models.TextField(verbose_name=b'\xe5\x9c\xb0\xe7\x82\xb9')),
('create_time', models.DateTimeField(verbose_name=b'\xe4\xb8\xbe\xe6\x8a\xa5\xe6\x97\xb6\xe9\x97\xb4')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Call_Incidence',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('removed', models.DateTimeField(default=None, null=True, editable=False, blank=True)),
('create_time', models.DateTimeField(verbose_name=b'122\xe6\x8a\xa5\xe8\xad\xa6\xe6\x97\xb6\xe9\x97\xb4')),
('longitude', models.DecimalField(verbose_name=b'\xe7\xbb\x8f\xe5\xba\xa6', max_digits=10, decimal_places=7)),
('latitude', models.DecimalField(verbose_name=b'\xe7\xba\xac\xe5\xba\xa6', max_digits=10, decimal_places=7)),
('place', models.TextField(verbose_name=b'\xe5\x9c\xb0\xe7\x82\xb9')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Violation',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('removed', models.DateTimeField(default=None, null=True, editable=False, blank=True)),
('longitude', models.DecimalField(verbose_name=b'\xe7\xbb\x8f\xe5\xba\xa6', max_digits=10, decimal_places=7)),
('latitude', models.DecimalField(verbose_name=b'\xe7\xba\xac\xe5\xba\xa6', max_digits=10, decimal_places=7)),
('create_time', models.DateTimeField(verbose_name=b'\xe4\xb8\xbe\xe6\x8a\xa5\xe6\x97\xb6\xe9\x97\xb4')),
],
options={
'abstract': False,
},
),
]
| [
"renhongleiz@126.com"
] | renhongleiz@126.com |
39f20b69aac765749dce3c577325b4782d937cad | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03855/s443395866.py | 9abfb8ce858f7f0b73fac8a310592f783ae12145 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,472 | py | from sys import stdin
import sys
import math
from functools import reduce
import functools
import itertools
from collections import deque,Counter
from operator import mul
import copy
n, k, l = list(map(int, input().split()))
road = [[] for i in range(n+1)]
rail = [[] for i in range(n+1)]
for i in range(k):
p, q = list(map(int, input().split()))
road[p].append(q)
road[q].append(p)
for i in range(l):
r, s = list(map(int, input().split()))
rail[r].append(s)
rail[s].append(r)
seen = [0 for i in range(n+1)]
def dfs_stack(u, al, al_c, d):
stack = deque([u])
seen[u] = 1
while len(stack) > 0:
v = stack.pop()
###
al_c[v] = d
###
for w in al[v]:
if seen[w] == 0:
stack.append(w)
seen[w] = 1
if stack == []: break
road_c = [-1 for i in range(n+1)]
rail_c = [-1 for i in range(n+1)]
d = 0
for i in range(1,n+1):
if seen[i] == 0:
dfs_stack(i, road, road_c, d)
d += 1
seen = [0 for i in range(n+1)]
d = 0
for i in range(1,n+1):
if seen[i] == 0:
dfs_stack(i, rail, rail_c, d)
d += 1
dict = {}
for i in range(1, n+1):
if (road_c[i], rail_c[i]) not in dict:
dict[(road_c[i], rail_c[i])] = [i]
else:
dict[(road_c[i], rail_c[i])].append(i)
ans = [0 for i in range(n+1)]
for dd in dict.items():
for j in dd[1]:
ans[j] = str(len(dd[1]))
print(' '.join(ans[1:]))
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
b1e24074f03e48083803e0356c0db6a3cca3b954 | 0ef1a852f052879fb7733ce02c3722fa97324c80 | /cogs/chucknorris.py | 0be66903acddf2c4dcda0d4169b70ff075c40ae6 | [
"MIT"
] | permissive | mwestdev/globalbot | 4805f27a209d5717aa79bb36cb3bfd734108d746 | 827c567ada94e63f34c87174f00d2444f716f0a0 | refs/heads/master | 2021-06-18T11:27:03.931739 | 2017-06-15T09:51:28 | 2017-06-15T09:51:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 640 | py | from discord.ext import commands
import discord
import aiohttp
class Chucknorris:
def __init__(self, bot):
self.bot = bot
self.session = aiohttp.ClientSession(loop=bot.loop)
def __unload(self):
self.session.close()
@commands.command()
async def chucknorris(self):
url = 'http://api.icndb.com/jokes/random'
async with self.session.get(url) as r:
js = await r.json()
joke_string = js['value']['joke']
new_str = str.replace(joke_string, """, "\"");
await self.bot.say(new_str)
def setup(bot):
bot.add_cog(Chucknorris(bot))
| [
"michaelwest299@gmail.com"
] | michaelwest299@gmail.com |
fdc1b9e8da163ba9319d0fc55672ce11631fe1e1 | f37e90775a158ea0ae644e334eac5bba341f4989 | /Python+/Python3/src/core/asterisk.py | 8a4deea474bab401f168aa3a316d4d96fcd53119 | [] | no_license | Aleks-Ya/yaal_examples | 0087bbaf314ca5127051c93b89c8fc2dcd14c1e3 | ec282968abf1b86e54fc2116c39f2d657b51baac | refs/heads/master | 2023-09-01T07:40:44.404550 | 2023-08-27T15:24:34 | 2023-08-29T22:01:46 | 14,327,752 | 4 | 2 | null | 2021-06-16T20:39:19 | 2013-11-12T09:26:08 | Java | UTF-8 | Python | false | false | 561 | py | # Using asterisk function (*) in front of variables
# Asterisk as a function
t = (1, 2, 3)
print(t)
print(*t)
# Single asterisk - any number of arguments AS A TUPLE
a = 1
b = 2
c = 3
def print_args(*args):
print(args)
print_args(a, b, c)
# Double asterisk - any number of arguments AS A DICTIONARY
def print_args(**args):
print(args)
print_args(a=7, b=8, c=9)
# Add key-value pair to a asterisk dictionary
def print_added_args(**args):
print(args)
def add_and_print(**args):
print_added_args(c=3, **args)
add_and_print(a=1, b=2)
| [
"aleksei_iablokov@epam.com"
] | aleksei_iablokov@epam.com |
5aebe0dd77ae7fc145f1a804051dd72cff2b484f | ac51f8ce39ad09183cc48d9ad07bae11e400b06d | /backend/news/migrations/0002_auto_20180520_2148.py | d2d27573f22b6bc56a74b31d5ce8a8714e70e1fd | [] | no_license | koinot-markazi/healing.github.io | d7118a6e03c8edb9a3d05998b5fcec194d2a0ffd | 301b7becc8a801d29cf78ce38fbd8a781fa0dd97 | refs/heads/master | 2020-03-17T23:26:21.281198 | 2018-05-25T07:28:34 | 2018-05-25T07:28:34 | 134,045,034 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 741 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-05-20 16:48
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('news', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='news',
name='date',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='news',
name='short_content',
field=models.TextField(default=1, max_length=511),
preserve_default=False,
),
]
| [
"the.phenomenon@mail.ru"
] | the.phenomenon@mail.ru |
5c9c62c3aa48e5a6db377c6d30804071a57f9151 | abbb1e132b3d339ba2173129085f252e2f3311dc | /model-optimizer/extensions/back/CorrectName.py | 1d1e9c0dd5231d964e8ac163b9be70176224efee | [
"Apache-2.0"
] | permissive | 0xF6/openvino | 56cce18f1eb448e25053fd364bcbc1da9f34debc | 2e6c95f389b195f6d3ff8597147d1f817433cfb3 | refs/heads/master | 2022-12-24T02:49:56.686062 | 2020-09-22T16:05:34 | 2020-09-22T16:05:34 | 297,745,570 | 2 | 0 | Apache-2.0 | 2020-09-22T19:03:06 | 2020-09-22T19:03:04 | null | UTF-8 | Python | false | false | 1,807 | py | """
Copyright (C) 2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from mo.graph.graph import Graph
from mo.back.replacement import BackReplacementPattern
class RestoreOriginalFrameworkName(BackReplacementPattern):
"""
This transformation corrects names of layers to their framework names.
To perform this correction, framework layer name should be in the attribute 'framework_node_name'.
In some cases, renaming is necessary only if some condition is fulfilled. Such condition should be a some
function in the attribute 'rename_condition'.
For example, in the transformation SoftmaxONNXFrontReplacer such condition is
lambda n: len(n.graph.get_op_nodes(name=node_name)) == 0
"""
enabled = True
def find_and_replace_pattern(self, graph: Graph):
for node in graph.get_op_nodes():
if not node.has_valid('framework_node_name'):
continue
if node.has_valid('rename_condition'):
need_renaming = node['rename_condition'](node)
del node['rename_condition']
if need_renaming:
node.name = node['framework_node_name']
else:
node.name = node['framework_node_name']
del node['framework_node_name']
| [
"alexey.suhov@intel.com"
] | alexey.suhov@intel.com |
0699c6935eb3618c4450c5e89f3ea0ee05bf01ae | cb35df97989fcc46831a8adb8de3434b94fd2ecd | /tests/benchmarks/bm_point_mesh_distance.py | bc1da12883da43fa792ce14d561ae6af072b7a70 | [
"MIT",
"BSD-3-Clause"
] | permissive | facebookresearch/pytorch3d | 6d93b28c0f36a4b7efa0a8143726200c252d3502 | a3d99cab6bf5eb69be8d5eb48895da6edd859565 | refs/heads/main | 2023-09-01T16:26:58.756831 | 2023-08-26T20:55:56 | 2023-08-26T20:55:56 | 217,433,767 | 7,964 | 1,342 | NOASSERTION | 2023-08-25T10:00:26 | 2019-10-25T02:23:45 | Python | UTF-8 | Python | false | false | 1,106 | py | # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from itertools import product
from fvcore.common.benchmark import benchmark
from tests.test_point_mesh_distance import TestPointMeshDistance
def bm_point_mesh_distance() -> None:
backend = ["cuda:0"]
kwargs_list = []
batch_size = [4, 8, 16]
num_verts = [100, 1000]
num_faces = [300, 3000]
num_points = [5000, 10000]
test_cases = product(batch_size, num_verts, num_faces, num_points, backend)
for case in test_cases:
n, v, f, p, b = case
kwargs_list.append({"N": n, "V": v, "F": f, "P": p, "device": b})
benchmark(
TestPointMeshDistance.point_mesh_edge,
"POINT_MESH_EDGE",
kwargs_list,
warmup_iters=1,
)
benchmark(
TestPointMeshDistance.point_mesh_face,
"POINT_MESH_FACE",
kwargs_list,
warmup_iters=1,
)
if __name__ == "__main__":
bm_point_mesh_distance()
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
3c1129fdec13db55c9f782c4b1035505754349ac | cd8fdf0408e64763605a8064dc6beb7d91b64a32 | /stud_profile/views.py | 4a7065661f6812d99ca3e3b9e42dd04bf0d0f033 | [] | no_license | SPSPRANAV/shodh_swc | 05b66fa453c969cbdda41a89e0386f2f3c5d0156 | 2f51f303dd398cc318b90112eae4ab1a40d87e6c | refs/heads/master | 2020-09-06T18:13:54.444372 | 2019-11-08T16:31:47 | 2019-11-08T16:31:47 | 220,505,823 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 677 | py | from django.shortcuts import render
from django.db import models
from .models import student
from django.template import loader
from django.http import HttpResponse
from django.forms import modelformset_factory
# Create your views here.
from .forms import UpdateProfile
def userdetails(request):
global form_class
if request.method == 'POST':
form = UpdateProfile(request.POST)
if form.is_valid():
form.save()
return render(request, 'display.html', {'form':form})
else:
form_class = UpdateProfile
return render(request, 'userdetails.html', {
'form': form_class,
}) | [
"phani18@iitg.ac.in"
] | phani18@iitg.ac.in |
1a0c68fc136cb8faba43b827a4977ac6ec13bb9f | b059c2cf1e19932abb179ca3de74ced2759f6754 | /S20/day29/server.py | 02b38215230e14e190eb4dd508011026298f47aa | [] | no_license | Lwk1071373366/zdh | a16e9cad478a64c36227419d324454dfb9c43fd9 | d41032b0edd7d96e147573a26d0e70f3d209dd84 | refs/heads/master | 2020-06-18T02:11:22.740239 | 2019-07-10T08:55:14 | 2019-07-10T08:55:14 | 196,130,277 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,022 | py | # import json
# import struct
# import socket
#
# sk = socket.socket()
# sk.bind(('127.0.0.1',9001))
# sk.listen()
#
# conn,addr = sk.accept()
# num = conn.recv(4)
# num = struct.unpack('i',num)[0]
# str_dic = conn.recv(num).decode('utf-8')
# dic = json.loads(str_dic)
# with open(dic['filename'],'wb') as f :
# content = conn.recv(dic['filesize'])
# f.write(content)
#
# conn.close()
# sk.close()
# import json
# import struct
# import socket
#
# sk = socket.socket()
# sk.bind(('127.0.0.1',9002))
# sk.listen()
#
# conn,addr = sk.accept()
# num = conn.recv(4)
# num = struct.unpack('i',num)[0]
# str_dic = conn.recv(num).decode('utf-8')
# dic = json.loads(str_dic)
# with open(dic['filename'],'wb') as f :
# content = conn.recv(dic['filesize'])
# f.write(content)
# conn.close()
# sk.close()
# import json
# import struct
# import socket
#
# sk = socket.socket()
# sk.bind(('127.0.0.1',9000))
# sk.listen()
#
# conn,addr = sk.accept()
# num = conn.recv(4)
# num = struct.unpack('i',num)[0]
# str_dic = conn.recv(num).decode('utf-8')
# dic = json.loads(str_dic)
# with open(dic['filename'],'wb') as f :
# content = conn.recv(dic['filesize'])
# f.write(content)
# conn.close()
# sk.close()
# import json
# import struct
# import socket
#
# sk = socket.socket()
# sk.bind(('127.0.0.1',9000))
# sk.listen()
#
# conn,addr = sk.accept()
# # print(conn)
# num = conn.recv(4)
# num = struct.unpack('i',num)[0]
# str_dic = conn.recv(num).decode('utf-8')
# dic = json.loads(str_dic)
# with open(dic['filename'],'wb') as f :
# content = conn.recv(dic['filesize'])
# f.write(content)
# conn.close()
# sk.close()
# import socket
# import json
# import struct
#
# sk = socket.socket()
# sk.bind(('127.0.0.1',9000))
# sk.listen()
#
# conn,addr = sk.accept()
# num = sk.recv(4)
# num = struct.unpack('i',num)[0]
# str_dic = conn.recv(num).decode('utf-8')
# dic = json.loads(str_dic)
# with open(dic['filename'],'wb') as f :
# content = conn.rece(dic['filesize'])
# f.write(content)
# conn.close()
# sk.close()
# import json
# import struct
# import socket
#
# sk= socket.socket()
# sk.bind(('127.0.0.1',9000))
# sk.listen()
# conn,addr = sk.accept()
# num = sk.recv(4)
# num = struct.unpack('i',num)[0]
# str_dic = conn.recv(num).decode('utf-8')
# dic = json.loads(str_dic)
# with open(dic['filesize'],'wb') as f :
# content = conn.recv(dic['filesize'])
# f.write(content)
# conn.close()
# sk.close()
# import json
# import socket
# import struct
#
# sk = socket.socket()
# sk.bind(('127.0.0.1',9000))
# sk.listen()
# conn,addr = sk.accept()
# num = conn.recv(4)
# num = struct.unpack('i',num)[0]
# str_dic = conn.recv(num).decode('utf-8')
# dic = json.loads(str_dic)
# with open(dic['filesize'],'wb') as f :
# content = conn.recv(dic['filesize'])
# f.write(content)
# conn.close()
# sk.close()
# import socket
# import json
# import struct
#
# sk = socket.socket()
# sk.bind(('127.0.0.1',9000))
# sk.listen()
# conn,addr = sk.accept()
# num = conn.recv(4)
# num = struct.unpack('i',num)[0]
# str_dic =conn.recv(num).decode('utf-8')
# dic = json.loads(str_dic)
# with open(dic['filename'],'wb') as f :
# content = conn.recv(dic['filesize'])
# f.write()
# conn.close()
# sk.close()
# import socket
# import json
# import struct
# sk = socket.socket()
# sk.bind(('127.0.0.1',9000))
# sk.listen()
# conn,addr = sk.accept()
# num =conn.recv(4)
# num = struct.unpack('i',num)[0]
# str_dic = conn.recv(num).decode('utf-8')
# dic = json.loads(str_dic)
# with open(dic['filename'],'wb') as f :
# content = conn.recv(dic['filesize'])
# f.write(content)
# conn.close()
# sk.close()
# import json,socket,struct
# sk = socket.socket()
# sk.bind(('127.0.0.1',9000))
# sk.listen()
# conn,addr = sk.accept()
# num = conn.recv(4)
# num = struct.unpack('i',num)[0]
# str_dic = conn.recv(num).decode('utf-8')
# dic = json.loads(str_dic)
# with open(dic['filename'],'wb') as f :
# content = conn.recv(dic['filesize'])
# f.write(content)
# conn.close()
# sk.close()
| [
"1071373366@qq.com"
] | 1071373366@qq.com |
54bbd3324b9403964b93eccaad3a8775e18570ed | 90edd7f110d41042b5c0eb2a190835c2dbea5f4c | /phb.py | fa4d0c281cc7d41a1310b40a27d01e3182cf5a58 | [
"MIT"
] | permissive | ochinchina/my-tools | 0b0eacfd88321741a2721b3ae2182dd6461cea52 | 93b5c54582a50a0039c1c1c8238e2689b351afd0 | refs/heads/master | 2023-04-03T00:12:59.725568 | 2023-03-15T09:06:55 | 2023-03-15T09:06:55 | 119,659,568 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,515 | py | #!/usr/bin/python
import urllib2
import argparse
import time
import threading
import traceback
import sys
import logging
import logging.handlers
logger = logging.getLogger( "benchmark" )
class RequestManager:
def __init__( self, total ):
self.total = total
self._lock = threading.Lock()
self.finished = 0
self._failed = 0
self._success = 0
self._print_num = self._get_print_num()
self._success_status = {}
self._failed_status = {}
def success( self, millis ):
with self._lock:
self._success += 1
if millis not in self._success_status:
self._success_status[ millis ] = 0
self._success_status[ millis ] += 1
def failed( self, millis ):
with self._lock:
self._failed += 1
if millis not in self._failed_status:
self._failed_status[ millis ] = 0
self._failed_status[ millis ] += 1
def nextRequest( self ):
with self._lock:
if self.finished >= self.total:
return False, False, 0
else:
self.finished += 1
return True, self.finished % self._print_num == 0 if self._print_num > 0 else False, self.finished
def print_summary( self, total_time ):
for n in self._success_status:
print "%%%.2f in %d ms" % ( self._success_status[n] * 100.0 / self.total, n )
print "success=%d, failed=%d, total time=%.2fs" % ( self._success, self._failed, total_time )
def _get_print_num( self ):
possible_nums = ( 1000, 100, 10 )
for n in possible_nums:
if self.total >= 5 * n:
return n
return 0
def parse_args():
parser = argparse.ArgumentParser( description = "http request like apache bench tool" )
parser.add_argument( "-H", help = "headers", nargs = "+", required = False )
parser.add_argument( "-c", help = "concurrency requests, default 1", default = 1, type = int )
parser.add_argument( "-n", help = "amount of requests, default 100", default = 100, type = int )
parser.add_argument( "-d", "--data", help = "the data to be sent", required = False )
parser.add_argument( "--log-file", help = "the log file", required = False )
parser.add_argument( "url", help = "the url" )
return parser.parse_args()
def do_request( req_mgr, url, headers, data ):
while True:
ok, print_info, num = req_mgr.nextRequest()
if not ok: break
start = time.time()
try:
req = urllib2.Request( url, data = data )
for header in headers: req.add_header( header, headers[header] )
resp = urllib2.urlopen( req )
total = int( (time.time() - start ) * 1000 )
if resp.getcode() / 100 == 2:
req_mgr.success( total )
else:
req_mgr.failed( total )
except Exception as ex:
traceback.print_exc(file=logger)
total = int( (time.time() - start ) * 1000 )
req_mgr.failed( total )
if print_info:
print "request finished %d" % num
def parseHeaders( headers ):
r = {}
if headers is None: return r
for header in headers:
pos = header.find( ':' )
if pos == -1:
print "Invalid header %s" % header
else:
r[ header[0:pos] ] = header[pos+1].strip()
return r
def loadData( data ):
if data is not None and data.startswith( "@" ):
with open( data[1:] ) as fp:
return fp.read()
else:
return data
def init_logger( log_file ):
if log_file is None:
handler = loggging.StreamHandler( sys.stdout )
else:
handler = logging.handlers.RotatingFileHandler( log_file, maxBytes = 50 * 1024 * 1024, backupCount = 5 )
handler.setLevel( logging.DEBUG )
handler.setFormat( logging.Formatter( '%(asctime)s %(name)s - %(message)s' ) )
logger.addHandler( handler )
def main():
args = parse_args()
req_mgr = RequestManager( args.n )
threads = []
headers = parseHeaders( args.H )
data = loadData( args.data )
start = time.time()
for i in xrange( args.c ):
th = threading.Thread( target = do_request, args = ( req_mgr, args.url, headers, data ) )
th.start()
threads.append( th )
for th in threads:
th.join()
req_mgr.print_summary( time.time() - start )
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | ochinchina.noreply@github.com |
4aec4fadd2e1ec390a996939d14e31b075f7e94d | bce4b8f79da5523500ffb62f7584a18ff3f5e6b1 | /BlogpostProject/forms.py | cf87339bec5ac9f62db6b2eec568f88659b2f683 | [] | no_license | inder0198/ID99 | c5b355b18f35ac459c0830efbdc297ac89a2c98d | d77fb51e65e690183b8bab8557189bd4577220e9 | refs/heads/master | 2023-06-15T05:05:59.259889 | 2021-07-10T07:00:54 | 2021-07-10T07:00:54 | 384,633,296 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 140 | py | from django import forms
from .models import *
class BlogPost(forms.ModelForm):
class Meta:
model=Post
fields="__all__" | [
"inderjeet0198@gmail.com"
] | inderjeet0198@gmail.com |
cc9eb4599731978fb386ca905f1334da94eac5f0 | 77ea03e7b7b47070b7bbfb84d61501b84e82221c | /pa4_submission_wrt6af/OnTimePerfReducer.py | 885f3b0c2a60f531371ff7a98ed0adb78ab7bcd2 | [] | no_license | williamtonks/Clouad | 466697bea460a02913f086ece48eb53925279ee8 | cdce1219b8cfe019d9a6bf70cf977507c8c46414 | refs/heads/master | 2023-04-08T02:44:21.270813 | 2021-04-10T20:48:47 | 2021-04-10T20:48:47 | 344,595,087 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,341 | py | #!/usr/bin/env python3
from operator import itemgetter
import sys
current_operator = None
operator = None
current_total_delay = 0.0
current_number_of_flights = 0.0
# input comes from STDIN
for line in sys.stdin:
# remove leading and trailing whitespace
line = line.strip()
# parse the input we got from mapper.py
operator, delay = line.split('\t', 1)
# convert count (currently a string) to int
try:
delay = float(delay)
except ValueError:
# count was not a number, so silently
# ignore/discard this line
continue
# this IF-switch only works because Hadoop sorts map output
# by key (here: word) before it is passed to the reducer
if current_operator == operator:
current_total_delay += delay
current_number_of_flights += 1
else:
if current_operator:
# write result to STDOUT
average = current_total_delay / current_number_of_flights
print("{0}\t{1}".format(current_operator, average))
current_operator = operator
current_total_delay = delay
current_number_of_flights = 1
# do not forget to output the last word if needed!
if current_operator == operator:
average = current_total_delay / current_number_of_flights
print("{0}\t{1}".format(current_operator, average))
| [
"ubuntu@ip-172-31-79-202.ec2.internal"
] | ubuntu@ip-172-31-79-202.ec2.internal |
1a68a54751c29227235113a80caaf9dcdf1b4762 | 90caabd74e9f266ce1ec40975853493aa5ab76d1 | /server.py | 616780ffdff735ab61a19034f822181ce8387d94 | [] | no_license | hkorzeniewski/ECG-ESP32-acquisition | 069aaf0d44a84b72affb4f3dc5082a6458fcb967 | 1c50595beb603c32d40dddbdc437477b5275fa8f | refs/heads/main | 2023-04-09T05:54:03.823965 | 2021-04-22T11:38:59 | 2021-04-22T11:38:59 | 360,493,964 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,009 | py | import socket
from datetime import datetime, timedelta
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import csv
import time
s = socket.socket()
s.bind(('192.168.0.29', 8082 ))
s.listen(0)
print("hello from server")
ekg_content = list()
times = list()
czas_pobrania = 4
start = time.time()
while len(ekg_content)<1000:
client, addr = s.accept()
while True :
content = client.recv(4)
# client.send(wiadomosc.encode())
if len(content) ==0:
break
else:
print(int(content.decode()))
ekg_content.append(int(content.decode()))
times.append(time.time())
client.close()
end = time.time()
print(end-start)
print("got signal")
plt.plot(ekg_content)
plt.show()
with open('test.csv', 'w+', newline='') as f:
# data = list(csv.writer(f))
writer = csv.writer(f)
writer.writerow(times)
writer.writerow(ekg_content)
| [
"56238495+hkorzeniewski@users.noreply.github.com"
] | 56238495+hkorzeniewski@users.noreply.github.com |
2c2989a1b9ed23d38d4520ec0a8daa91d6453622 | cd8eb02ef94cdb36ff20060049562c4e232d949c | /PythonSession-2/Code/HelloPython.py | 5086c171c9b6db1c6c8eed041feed2a1aafd307f | [] | no_license | aaryajahagirdarGITHUB/Anirudh-Gaikwad-comp-Python-Tutorial | d669e55527f0bfd5381bfa0ae83444162b43b8a2 | 9ac43faa58d4135eea5bec270543abbfea06e7f7 | refs/heads/master | 2022-12-16T12:48:18.099117 | 2019-06-20T04:53:45 | 2019-06-20T04:53:45 | 294,915,221 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 239 | py | # Python First Program Hello Python
# For Execute this program open IDLE (Python Version)
# Go to File--->New File--->
# This file save with .py extention(HelloPython.py)
x="Hello Python"
print(x)
y=input("Press any key to Exit") | [
"noreply@github.com"
] | aaryajahagirdarGITHUB.noreply@github.com |
8d8b659f31f0b33986e1d7bd43984a45e18577ac | 50948d4cb10dcb1cc9bc0355918478fb2841322a | /azure-mgmt-network/azure/mgmt/network/v2018_12_01/models/virtual_network_tap.py | 93c6e03ece13f3201e81b3d067b4e9c1753d2d04 | [
"MIT"
] | permissive | xiafu-msft/azure-sdk-for-python | de9cd680b39962702b629a8e94726bb4ab261594 | 4d9560cfd519ee60667f3cc2f5295a58c18625db | refs/heads/master | 2023-08-12T20:36:24.284497 | 2019-05-22T00:55:16 | 2019-05-22T00:55:16 | 187,986,993 | 1 | 0 | MIT | 2020-10-02T01:17:02 | 2019-05-22T07:33:46 | Python | UTF-8 | Python | false | false | 4,312 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class VirtualNetworkTap(Resource):
"""Virtual Network Tap resource.
Variables are only populated by the server, and will be ignored when
sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: Resource tags.
:type tags: dict[str, str]
:ivar network_interface_tap_configurations: Specifies the list of resource
IDs for the network interface IP configuration that needs to be tapped.
:vartype network_interface_tap_configurations:
list[~azure.mgmt.network.v2018_12_01.models.NetworkInterfaceTapConfiguration]
:ivar resource_guid: The resourceGuid property of the virtual network tap.
:vartype resource_guid: str
:ivar provisioning_state: The provisioning state of the virtual network
tap. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:vartype provisioning_state: str
:param destination_network_interface_ip_configuration: The reference to
the private IP Address of the collector nic that will receive the tap
:type destination_network_interface_ip_configuration:
~azure.mgmt.network.v2018_12_01.models.NetworkInterfaceIPConfiguration
:param destination_load_balancer_front_end_ip_configuration: The reference
to the private IP address on the internal Load Balancer that will receive
the tap
:type destination_load_balancer_front_end_ip_configuration:
~azure.mgmt.network.v2018_12_01.models.FrontendIPConfiguration
:param destination_port: The VXLAN destination port that will receive the
tapped traffic.
:type destination_port: int
:param etag: Gets a unique read-only string that changes whenever the
resource is updated.
:type etag: str
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
'network_interface_tap_configurations': {'readonly': True},
'resource_guid': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'network_interface_tap_configurations': {'key': 'properties.networkInterfaceTapConfigurations', 'type': '[NetworkInterfaceTapConfiguration]'},
'resource_guid': {'key': 'properties.resourceGuid', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'destination_network_interface_ip_configuration': {'key': 'properties.destinationNetworkInterfaceIPConfiguration', 'type': 'NetworkInterfaceIPConfiguration'},
'destination_load_balancer_front_end_ip_configuration': {'key': 'properties.destinationLoadBalancerFrontEndIPConfiguration', 'type': 'FrontendIPConfiguration'},
'destination_port': {'key': 'properties.destinationPort', 'type': 'int'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, **kwargs):
super(VirtualNetworkTap, self).__init__(**kwargs)
self.network_interface_tap_configurations = None
self.resource_guid = None
self.provisioning_state = None
self.destination_network_interface_ip_configuration = kwargs.get('destination_network_interface_ip_configuration', None)
self.destination_load_balancer_front_end_ip_configuration = kwargs.get('destination_load_balancer_front_end_ip_configuration', None)
self.destination_port = kwargs.get('destination_port', None)
self.etag = kwargs.get('etag', None)
| [
"lmazuel@microsoft.com"
] | lmazuel@microsoft.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.