content
stringlengths 0
1.05M
| origin
stringclasses 2
values | type
stringclasses 2
values |
|---|---|---|
from app import db
import os
import requests
class Movies(db.Model):
"""
Models the data of movies related to a given location.
"""
id = db.Column(db.Integer, primary_key=True)
movies = db.Column(db.Text)
@staticmethod
def create_entry(query):
"""
Takes in a search query.
Retrieves MovieDB API movie data.
Returns an Movies instance.
"""
MOVIE_API_KEY = os.getenv('MOVIE_API_KEY')
url = 'https://api.themoviedb.org/3/search/movie/'
url += f'?api_key={MOVIE_API_KEY}&language=en-US&page=1&query={query}'
api_data = requests.get(url).json()
return Movies.instantiate_movies(api_data)
@staticmethod
def instantiate_movies(api_data):
"""
Takes in MovieDB API data.
Returns a Movies object.
"""
movies = []
for movie in api_data['results'][:5]:
title = movie['title']
overview = movie['overview']
average_votes = movie['vote_average']
total_votes = movie['vote_count']
image_url = 'https://image.tmdb.org/t/p/w500' + movie['poster_path']
popularity = movie['popularity']
released_on = movie['release_date']
movies.append({
'title': title,
'overview': overview,
'average_votes': average_votes,
'total_votes': total_votes,
'image_url': image_url,
'popularity': popularity,
'released_on': released_on
})
return Movies(movies=movies)
|
nilq/baby-python
|
python
|
#
# Copyright (c) 2015-2021 Thierry Florac <tflorac AT ulthar.net>
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
"""PyAMS_zfiles.zmi module
This module defines base documents container management views.
"""
from zope.interface import Interface
from pyams_form.ajax import ajax_form_config
from pyams_form.field import Fields
from pyams_layer.interfaces import IPyAMSLayer
from pyams_security.interfaces.base import VIEW_SYSTEM_PERMISSION
from pyams_site.interfaces import ISiteRoot
from pyams_skin.interfaces.viewlet import IBreadcrumbItem
from pyams_utils.adapter import adapter_config
from pyams_utils.registry import get_utility, query_utility
from pyams_utils.url import absolute_url
from pyams_viewlet.viewlet import viewlet_config
from pyams_zfiles.interfaces import IDocumentContainer, MANAGE_APPLICATION_PERMISSION, \
MANAGE_DOCUMENT_PERMISSION
from pyams_zmi.form import AdminEditForm
from pyams_zmi.interfaces import IAdminLayer
from pyams_zmi.interfaces.table import ITableElementEditor
from pyams_zmi.interfaces.viewlet import IControlPanelMenu, IMenuHeader, IPropertiesMenu, \
ISiteManagementMenu
from pyams_zmi.table import TableElementEditor
from pyams_zmi.zmi.viewlet.breadcrumb import AdminLayerBreadcrumbItem
from pyams_zmi.zmi.viewlet.menu import NavigationMenuItem
__docformat__ = 'restructuredtext'
from pyams_zfiles import _ # pylint: disable=ungrouped-imports
@viewlet_config(name='document-container.menu',
context=ISiteRoot, layer=IAdminLayer,
manager=IControlPanelMenu, weight=40,
permission=VIEW_SYSTEM_PERMISSION)
class DocumentContainerMenu(NavigationMenuItem):
"""Document container menu"""
icon_class = 'far fa-file-archive'
def __new__(cls, context, request, view, manager): # pylint: disable=unused-arguments
container = query_utility(IDocumentContainer)
if (container is None) or not container.show_home_menu:
return None
return NavigationMenuItem.__new__(cls)
def __init__(self, context, request, view, manager):
super().__init__(context, request, view, manager)
self.container = get_utility(IDocumentContainer)
@property
def label(self):
"""Label getter"""
return self.container.__name__
def get_href(self):
"""Menu URL getter"""
return absolute_url(self.container, self.request, 'admin')
@adapter_config(required=(IDocumentContainer, IAdminLayer, Interface, ISiteManagementMenu),
provides=IMenuHeader)
def document_container_menu_header(context, request, view, manager): # pylint: disable=unused-argument
"""Document container menu header"""
return _("Documents container")
@adapter_config(required=(IDocumentContainer, IAdminLayer, Interface),
provides=ITableElementEditor)
class DocumentContainerElementEditor(TableElementEditor):
"""Document container element editor"""
view_name = 'admin'
modal_target = False
def __new__(cls, context, request, view): # pylint: disable=unused-argument
if not request.has_permission(MANAGE_APPLICATION_PERMISSION, context=context) and \
not request.has_permission(MANAGE_DOCUMENT_PERMISSION, context=context):
return None
return TableElementEditor.__new__(cls)
@adapter_config(required=(IDocumentContainer, IAdminLayer, Interface),
provides=IBreadcrumbItem)
class DocumentContainerBreadcrumbItem(AdminLayerBreadcrumbItem):
"""Document container breadcrumb item"""
label = _("Documents container")
@viewlet_config(name='configuration.menu',
context=IDocumentContainer, layer=IAdminLayer,
manager=ISiteManagementMenu, weight=20,
permission=MANAGE_APPLICATION_PERMISSION,
provides=IPropertiesMenu)
class DocumentContainerPropertiesMenu(NavigationMenuItem):
"""Document container properties menu"""
label = _("Configuration")
icon_class = 'fas fa-sliders-h'
href = '#configuration.html'
@ajax_form_config(name='configuration.html',
context=IDocumentContainer, layer=IPyAMSLayer,
permission=MANAGE_APPLICATION_PERMISSION)
class DocumentContainerConfigurationEditForm(AdminEditForm):
"""Document container properties edit form"""
legend = _("Configuration")
fields = Fields(IDocumentContainer).omit('__parent__', '__name__')
|
nilq/baby-python
|
python
|
from pydocstyle.checker import check
from pydocstyle.checker import violations
import testing
registry = violations.ErrorRegistry
_disabled_checks = [
'D202', # No blank lines allowed after function docstring
'D205', # 1 blank line required between summary line and description
]
def check_all_files():
for filename in testing.list_all_py_files():
for err in check([filename]):
if not err.code in _disabled_checks:
yield err
def lookup_error_params(code):
for group in registry.groups:
for error_params in group.errors:
if error_params.code == code:
return error_params
violations = list(check_all_files())
if violations:
counts = dict()
for e in violations:
print(e)
counts[e.code] = counts.get(e.code, 0) + 1
for n, code in sorted([(n, code) for code, n in counts.items()], reverse=True):
p = lookup_error_params(code)
print('%s %8d %s' % (code, n, p.short_desc))
print('%s %8d violations' % ('tot', len(violations)))
# TODO: exit(1)
|
nilq/baby-python
|
python
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import gsf
def Hex2(val):
return '0x' + ('%02x' % ord(val)).upper()
def Pieces(data, max_size):
"""Yield max_size components from data."""
for i in range(0, len(data), max_size):
yield data[i:i + max_size]
def DumpHex(filename, include_cpp=True):
gsf_file = gsf.GsfFile(filename)
if include_cpp:
print 'c++ setup:'
print
print ' #include <array>'
print ' using std::array;'
print
for record_num, record in enumerate(gsf_file):
if record_num:
print
header_data = record['header_data']
data = record['data']
type_str = record['record_type_str']
header_hex = [Hex2(v) for v in header_data]
data_hex = [Hex2(v) for v in data]
print 'record: ', record_num, type_str
print 'sizes = (%d, %d, %d)' % (record['size_total'],
len(header_hex),
len(data_hex))
print 'header = (', ', '.join(header_hex), ')'
print 'data = (', ', '.join(data_hex), ')'
if not include_cpp:
continue
print 'c++ data:'
print
print ' // Record type:', type_str
print ' const uint32_t size_%d = %d;' % (record_num, len(data));
print ' array<uint8_t, size_%d> data_%d = {{' % (record_num, record_num)
for piece in Pieces(data, 11):
print ' ' + ', '.join([Hex2(v) for v in piece]) + ','
print ' }};'
def main():
parser = argparse.ArgumentParser()
parser.add_argument('filenames', metavar='N', type=str, nargs='+',
help='Files to get info about.')
args = parser.parse_args()
for filename in args.filenames:
DumpHex(filename)
|
nilq/baby-python
|
python
|
# Generated by Django 3.0.2 on 2020-10-13 07:23
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('accounts', '0005_thirdpartycreds'),
]
operations = [
migrations.AlterModelOptions(
name='thirdpartycreds',
options={'verbose_name': 'Third Party Credentials', 'verbose_name_plural': 'Third Party Credentials'},
),
]
|
nilq/baby-python
|
python
|
from skynet.common.base_daos import BaseDao
class BaseModel(object):
DEFAULT_DAO = BaseDao
def __init__(self, dao=None):
if dao is None:
dao = self.DEFAULT_DAO()
self.dao = dao
def populate(self, data):
for k, v in data.iteritems():
k_translated = self.translate(k)
if k_translated and hasattr(self, k_translated):
setattr(self, k_translated, v)
def translate(self, key):
return {}.get(key, key)
|
nilq/baby-python
|
python
|
import os
import json
import html
from datetime import datetime, timedelta
from fastapi import FastAPI, HTTPException
from fastapi.middleware.cors import CORSMiddleware
from starlette.requests import Request
from starlette.responses import JSONResponse
from auth import LEADERBOARD_API_TOKEN
app = FastAPI(redoc_url=None, docs_url=None)
app.token = None
LEADERBOARD = 'leaderboard/leaderboard.json'
app.add_middleware(
CORSMiddleware,
allow_origins="*",
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
class LeaderboardCache:
last_updated = None
data = None
@classmethod
def ensure_file_exists(cls):
if not os.path.exists(LEADERBOARD):
with open(LEADERBOARD, 'w') as fp:
fp.write('{}')
@classmethod
def load(cls):
with open(LEADERBOARD, "r") as fp:
cls.data = json.loads(fp.read())
cls.last_updated = datetime.now()
@classmethod
def dump(cls, data: dict):
with open(LEADERBOARD, "w") as fp:
json.dump(data, fp)
@classmethod
def get(cls):
if not cls.data:
raise HTTPException(500, "Missing data.")
return cls.data
@classmethod
def update(cls, data: str):
data = json.loads(data)
for _, user in data.items():
user['username'] = html.escape(user['username'])
cls.dump(data)
cls.data = data
LeaderboardCache.ensure_file_exists()
LeaderboardCache.load()
@app.post('/leaderboard')
async def post_leaderboard(request: Request):
if request.headers.get("X-Authorization") != LEADERBOARD_API_TOKEN:
raise HTTPException(401)
body = (await request.body()).decode()
LeaderboardCache.update(body)
return "OK"
@app.get('/leaderboard')
async def get_leaderboard():
return JSONResponse(LeaderboardCache.get())
|
nilq/baby-python
|
python
|
# -*- encoding: utf-8 -*-
"""Handle root-services sessions endpoints."""
from .base import RootService
from ..decorators import dyndoc_insert, endpoint
from .responses.sessions import responses
@endpoint("openapi/root/v1/sessions/capabilities/")
class GetSessionCapabilities(RootService):
"""Get the sessions capabilities."""
@dyndoc_insert(responses)
def __init__(self):
"""Instantiate a GetSessionCapabilities request.
>>> import saxo_openapi
>>> import saxo_openapi.endpoints.rootservices as rs
>>> import json
>>> client = saxo_openapi.API(access_token=...)
>>> r = rs.sessions.GetSessionCapabilities()
>>> rv = client.request(r)
>>> print(rv)
Output::
{_v3_GetSessionCapabilities_resp}
"""
super(GetSessionCapabilities, self).__init__()
@endpoint("openapi/root/v1/sessions/capabilities/", "PUT", 202)
class ChangeSessionCapabilities(RootService):
"""Change sessions capabilities."""
RESPONSE_DATA = None
@dyndoc_insert(responses)
def __init__(self, data):
"""Instantiate a ChangeSessionCapabilities request.
>>> import saxo_openapi
>>> import saxo_openapi.endpoints.rootservices as rs
>>> import json
>>> client = saxo_openapi.API(access_token=...)
>>> data = {_v3_ChangeSessionCapabilities_body}
>>> r = rs.sessions.ChangeSessionCapabilities(data=data)
>>> rv = client.request(r)
>>> assert r.status_code == r.expected_status
No data is returned
"""
super(ChangeSessionCapabilities, self).__init__()
self.data = data
@endpoint("openapi/root/v1/sessions/events/subscriptions/", "POST", 201)
class CreateSessionCapabilitiesSubscription(RootService):
"""Set up a new session capabilities subscription. The data stream will
deliver updates from this point."""
@dyndoc_insert(responses)
def __init__(self, data):
"""Instantiate a ChangeSessionCapabilitiesSubscription request.
>>> import saxo_openapi
>>> import saxo_openapi.endpoints.rootservices as rs
>>> import json
>>> client = saxo_openapi.API(access_token=...)
>>> data = {_v3_CreateSessionCapabilitiesSubscription_body}
>>> r = rs.sessions.ChangeSessionCapabilitiesSubscription(data=data)
>>> rv = client.request(r)
>>> print(rv)
Output::
{_v3_CreateSessionCapabilitiesSubscription_resp}
"""
super(CreateSessionCapabilitiesSubscription, self).__init__()
self.data = data
@endpoint("openapi/root/v1/sessions/events/subscriptions/"
"{ContextId}/{ReferenceId}",
"DELETE", 202)
class RemoveSessionCapabilitiesSubscription(RootService):
"""Removes the subscription identified by the specified reference id.
(and streaming context id)."""
RESPONSE_DATA = None
@dyndoc_insert(responses)
def __init__(self, ContextId, ReferenceId):
"""Instantiate a RemoveSessionCapabilitiesSubscription request.
>>> import saxo_openapi
>>> import saxo_openapi.endpoints.rootservices as rs
>>> import json
>>> client = saxo_openapi.API(access_token=...)
>>> r = rs.sessions.RemoveSessionCapabilitiesSubscripion(
... ContextId=ContextId,
... ReferenceId=ReferenceId)
>>> rv = client.request(r)
>>> assert rv.status_code == r.expected_status
No data is returned.
"""
super(RemoveSessionCapabilitiesSubscription, self).__init__(
ContextId=ContextId,
ReferenceId=ReferenceId)
|
nilq/baby-python
|
python
|
from __future__ import unicode_literals
from . import model
from . import collection
from . import fields
from . import related
|
nilq/baby-python
|
python
|
from collection.property_dictionary import PropertyDict
from collection.xml_interface import XMLError
from collection.xml_interface import XMLInterface
from metadata.metadata_api import MetadataError
from metadata.metadata_api import Metadata
from image.envi import ENVIHeader
|
nilq/baby-python
|
python
|
import json
import logging
import re
from datetime import datetime
from decimal import Decimal
from enum import Enum
from functools import singledispatch
from sys import version_info
from typing import Any, Optional, Tuple, Union
from urllib.parse import urlsplit
PY37 = version_info >= (3, 7)
class JSONEncoder(json.JSONEncoder):
def default(self, obj: Any) -> str:
try:
return convert_to_str(obj)
except TypeError:
return json.JSONEncoder.default(self, obj)
def get_host_port(uri: str) -> Tuple[Optional[str], Optional[int]]:
"""Get host and port from provided URI."""
split_uri = urlsplit(uri)
return split_uri.hostname, split_uri.port
def validate_topic_channel_name(name: str) -> None:
"""Validate topic/channel names.
The regex is ``^[.a-zA-Z0-9_-]{2,64}+(#ephemeral)?$``
:raises AssertionError: Value not matches regex.
"""
assert re.match(
r"^[.a-zA-Z0-9_\-]{2,64}(#ephemeral)?$", name,
), "Topic name must matches ^[.a-zA-Z0-9_-]{2,64}+(#ephemeral)?$ regex"
@singledispatch
def convert_to_bytes(value: Any) -> bytes:
"""Dispatch for convertible types.
Allowed types: ``bytes``, ``bytearray``, ``str``, ``int``, ``float``,
``dict``, ``Decimal``, ``dataclass``.
:raises TypeError:
"""
if PY37:
from dataclasses import asdict, is_dataclass
if is_dataclass(value) and not isinstance(value, type):
return convert_to_bytes(asdict(value))
raise TypeError(
"Argument {} expected to be type of "
"bytes, bytearray, str, int, float, dict, Decimal, datetime "
"or dataclass".format(value),
)
@convert_to_bytes.register(bytes)
@convert_to_bytes.register(bytearray)
def _(value: Union[bytes, bytearray]) -> bytes:
"""Convert ``bytes`` or ``bytearray`` to bytes"""
return value
@convert_to_bytes.register(str)
def _str_to_bytes(value: str) -> bytes:
"""Convert ``str`` to bytes"""
return value.encode("utf-8")
@convert_to_bytes.register(int)
@convert_to_bytes.register(float)
@convert_to_bytes.register(Decimal)
def _numbers_to_bytes(value: Union[int, float, Decimal]) -> bytes:
"""Convert ``int``, ``float`` or ``Decimal`` to bytes"""
return str(value).encode("utf-8")
@convert_to_bytes.register(dict)
def _dict_to_bytes(value: dict) -> bytes:
"""Convert ``dict`` to bytes"""
return json.dumps(value, cls=JSONEncoder, separators=(",", ":")).encode("utf-8")
@convert_to_bytes.register(Enum)
def _enum_to_bytes(value: Enum) -> bytes:
"""Convert ``enum`` to bytes"""
return convert_to_bytes(value.name)
@convert_to_bytes.register(datetime)
def _datetime_to_bytes(value: datetime) -> bytes:
"""Convert ``datetime`` to bytes"""
return value.isoformat().encode("utf-8")
@singledispatch
def convert_to_str(value: Any) -> str:
"""Dispatch for convertible types.
Allowed types: ``bytes``, ``bytearray``, ``str``, ``int``, ``float``,
``dict``, ``Decimal``, ``dataclass``.
:raises TypeError:
"""
if PY37:
from dataclasses import asdict, is_dataclass
if is_dataclass(value) and not isinstance(value, type):
return convert_to_str(asdict(value))
raise TypeError(
"Argument {} expected to be type of "
"bytes, bytearray, str, int, float, dict, Decimal, datetime "
"or dataclass".format(value),
)
@convert_to_str.register(str)
def _str_to_str(value: str) -> str:
"""Convert ``str`` to ``str``"""
return value
@convert_to_str.register(bytes)
def _bytes_to_str(value: bytes) -> str:
"""Convert ``bytes`` to ``str``"""
return value.decode("utf-8")
@convert_to_str.register(bytearray)
def _bytearray_to_str(value: bytearray) -> str:
"""Convert ``bytearray`` to ``str``"""
return bytes(value).decode("utf-8")
@convert_to_str.register(int)
@convert_to_str.register(float)
@convert_to_str.register(Decimal)
def _numbers_to_str(value: Union[int, float, Decimal]) -> str:
"""Convert ``int``, ``float`` or ``Decimal`` to ``str``"""
return str(value)
@convert_to_str.register(dict)
def _dict_to_str(value: dict) -> str:
"""Convert ``dict`` to JSON string"""
return json.dumps(value)
@convert_to_str.register(Enum)
def _enum_to_str(value: Enum) -> str:
"""Convert ``enum`` to str"""
return convert_to_str(value.name)
@convert_to_str.register(datetime)
def _datetime_to_str(value: datetime) -> str:
"""Convert ``datetime`` to bytes"""
return value.isoformat()
def get_logger(
debug: bool = False, unique_name: Optional[str] = None,
) -> logging.Logger:
"""Get the ansq logger.
:params debug: Set up debug level.
:type debug: :class:`bool`
:params unique_name: Used to make all loggers unique.
:type unique_name: :class:`str`
"""
logger = logging.getLogger(f"ansq {unique_name}" if unique_name else "ansq")
log_format = "%(asctime)s - %(levelname)s - %(name)s: %(message)s"
logging.basicConfig(format=log_format)
logger.setLevel(logging.DEBUG if debug else logging.INFO)
return logger
def truncate_text(text: str, limit: int = 256) -> str:
"""Truncate a given `text` if the `limit` is reached"""
if limit <= 0:
raise ValueError("limit must be greater than 0")
return text[:limit] + "..." if len(text) > limit else text
|
nilq/baby-python
|
python
|
"""
Adapted from https://github.com/kirubarajan/roft/blob/master/generation/interactive_test.py to
process a batch of inputs.
"""
import argparse
import json
import numpy as np
import os
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
def main(args):
np.random.seed(args.random_seed)
torch.manual_seed(args.random_seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(args.random_seed)
tokenizer = AutoTokenizer.from_pretrained(args.model_name)
model = AutoModelForCausalLM.from_pretrained(args.model_name)
if torch.cuda.is_available():
model = model.cuda()
dirname = os.path.dirname(args.output_file)
if dirname:
os.makedirs(dirname, exist_ok=True)
with open(args.output_file, "w") as out:
with open(args.input_file, "r") as f:
for line in f:
data = json.loads(line)
name = data["name"]
ingredients = "\n".join(data["ingredients"])
input_text = f"HOW TO MAKE: {name}\nIngredients:\n{ingredients}."
input_tensor = tokenizer.encode(input_text, return_tensors="pt").to(
model.device
)
outputs = model.generate(
input_tensor,
do_sample=True,
top_p=args.top_p,
repetition_penalty=args.repetition_penalty,
pad_token_id=tokenizer.eos_token_id,
max_length=args.max_length,
)
recipe = [tokenizer.decode(x) for x in outputs][0]
out.write(json.dumps({"recipe": recipe}) + "\n")
if __name__ == "__main__":
argp = argparse.ArgumentParser()
argp.add_argument("--input-file", required=True)
argp.add_argument("--model-name", required=True)
argp.add_argument("--top-p", type=float, default=0.7)
argp.add_argument("--repetition-penalty", type=float, default=1.2)
argp.add_argument("--max-length", type=int, default=256)
argp.add_argument("--random-seed", type=int, default=4)
argp.add_argument("--output-file", required=True)
args = argp.parse_args()
main(args)
|
nilq/baby-python
|
python
|
#!/bin/python3
import math
count = 0
def count_inversions(a):
length = len(a)
if (length <= 1):
return a
else:
midP = int(math.floor(length / 2))
left = a[:midP]
right = a[midP:]
return merge(count_inversions(left), count_inversions(right))
def merge(left, right):
global count
result = []
i = 0
j = 0
lenL = len(left)
lenR = len(right)
while(i < lenL and j < lenR):
if (left[i] <= right[j]):
result.append(left[i])
i += 1
else:
result.append(right[j])
count += lenL - i
j += 1
while (i < lenL):
result.append(left[i])
i += 1
while (j < lenR):
result.append(right[j])
j += 1
return result
a = [2, 1, 3, 1, 4, 2]
print(count_inversions(a))
print(count)
|
nilq/baby-python
|
python
|
import sklearn
from sklearn.linear_model import Perceptron
from sklearn.datasets import load_iris
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# load data
iris = load_iris()
df = pd.DataFrame(iris.data, columns=iris.feature_names)
df['label'] = iris.target
df.columns = [
'sepal length', 'sepal width', 'petal length', 'petal width', 'label'
]
sklearn.__version__
data = np.array(df.iloc[:100, [0, 1, -1]])
X, y = data[:,:-1], data[:,-1]
y = np.array([1 if i == 1 else -1 for i in y])
"""
clf = Perceptron(fit_intercept=True,
max_iter=1000,
shuffle=True)
clf.fit(X, y)
"""
clf = Perceptron(fit_intercept=True,
max_iter=1000,
# tol 默认收敛就不迭代了 可以比较一下收敛和不收敛的迭代次数
tol=None,
shuffle=True)
clf.fit(X, y)
# Weights assigned to the features.
print(clf.coef_)
# 截距 Constants in decision function.
print(clf.intercept_)
# 画布大小
plt.figure(figsize=(10,10))
# 中文标题
plt.rcParams['font.sans-serif']=['SimHei']
plt.rcParams['axes.unicode_minus'] = False
plt.title('鸢尾花线性数据示例')
plt.scatter(data[:50, 0], data[:50, 1], c='b', label='Iris-setosa',)
plt.scatter(data[50:100, 0], data[50:100, 1], c='orange', label='Iris-versicolor')
# 画感知机的线
x_ponits = np.arange(4, 8)
y_ = -(clf.coef_[0][0]*x_ponits + clf.intercept_)/clf.coef_[0][1]
plt.plot(x_ponits, y_)
# 其他部分
plt.legend() # 显示图例
plt.grid(False) # 不显示网格
plt.xlabel('sepal length')
plt.ylabel('sepal width')
plt.legend()
plt.show()
|
nilq/baby-python
|
python
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""A basic unit test for the Python interface of the BMG C++ Graph.infer method"""
import unittest
import beanmachine.ppl as bm
from beanmachine.ppl.inference import BMGInference
from torch import tensor
from torch.distributions import Bernoulli, Dirichlet
@bm.functional
def c():
return tensor(2.5)
@bm.functional
def c2():
return tensor([1.5, -2.5])
@bm.random_variable
def flip():
return Bernoulli(0.5)
@bm.functional
def flip2():
return flip()
@bm.functional
def flip3():
return flip() + 0
@bm.functional
def flip4():
return 0 + flip()
@bm.functional
def always_false_1():
return 1 < flip()
@bm.functional
def always_false_2():
return flip() < 0
@bm.functional
def invalid_tensor_1():
return tensor([])
@bm.functional
def invalid_tensor_2():
return tensor([[[1.0, 2.0], [3.0, 4.0]], [[5.0, 6.0], [7.0, 8.0]]])
class BMGInferInterfaceTest(unittest.TestCase):
def test_infer_interface_constant_functional(self) -> None:
self.maxDiff = None
# First, let's check expected behavior from a regular BM inference method
samples = bm.SingleSiteNewtonianMonteCarlo().infer([c(), c2()], {}, 1, 1)
observed = samples[c()]
expected = "tensor([[2.5000]])"
self.assertEqual(expected.strip(), str(observed).strip())
observed = samples[c2()]
expected = "tensor([[[ 1.5000, -2.5000]]])" # Note, no ", dtype=torch.float64)"
self.assertEqual(expected.strip(), str(observed).strip())
# Now let's do this in BMG Inference
samples = BMGInference().infer([c(), c2()], {}, 1, 1)
observed = samples[c()]
expected = "tensor([[2.5000]])"
self.assertEqual(expected.strip(), str(observed).strip())
observed = samples[c2()]
expected = "tensor([[[ 1.5000, -2.5000]]], dtype=torch.float64)"
self.assertEqual(expected.strip(), str(observed).strip())
# Again, let's check expected behavior from a regular BM inference method
samples = bm.SingleSiteNewtonianMonteCarlo().infer([c(), c2()], {}, 1, 2)
observed = samples[c()]
expected = """
tensor([[2.5000],
[2.5000]])"""
self.assertEqual(expected.strip(), str(observed).strip())
observed = samples[c2()]
expected = """
tensor([[[ 1.5000, -2.5000]],
[[ 1.5000, -2.5000]]])""" # Note, no ", dtype=torch.float64)"
self.assertEqual(expected.strip(), str(observed).strip())
# And again, in BMG inference
samples = BMGInference().infer([c(), c2()], {}, 1, 2)
observed = samples[c()]
expected = """
tensor([[2.5000],
[2.5000]])"""
self.assertEqual(expected.strip(), str(observed).strip())
observed = samples[c2()]
expected = """
tensor([[[ 1.5000, -2.5000]],
[[ 1.5000, -2.5000]]], dtype=torch.float64)"""
self.assertEqual(expected.strip(), str(observed).strip())
def test_infer_interface_redundant_functionals_1(self) -> None:
self.maxDiff = None
samples = BMGInference().infer([flip(), flip2()], {}, 10)
f = samples[flip()]
f2 = samples[flip2()]
self.assertEqual(str(f), str(f2))
samples = BMGInference().infer([always_false_1(), always_false_2()], {}, 2, 1)
af1 = samples[always_false_1()]
af2 = samples[always_false_2()]
expected = "tensor([[False, False]])"
self.assertEqual(expected, str(af1))
self.assertEqual(expected, str(af2))
def test_infer_interface_redundant_functionals_2(self) -> None:
self.maxDiff = None
samples = BMGInference().infer([flip3(), flip4()], {}, 10)
f3 = samples[flip3()]
f4 = samples[flip4()]
self.assertEqual(str(f3), str(f4))
class SampleModel:
@bm.random_variable
def a(self):
return Dirichlet(tensor([0.5, 0.5]))
@bm.functional
def b(self):
return self.a()[2] ## The index 2 is intentionally out of bounds
def test_infer_interface_runtime_error(self) -> None:
model = self.SampleModel()
with self.assertRaisesRegex(RuntimeError, "Error during BMG inference.*"):
BMGInference().infer([model.a(), model.b()], {}, 10, 4)
|
nilq/baby-python
|
python
|
# this brainfuck source code from https://github.com/kgabis/brainfuck-go/blob/master/bf.go
# and karminski port it to PHP
# and is ported to Python 3.x again
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
import sys
class Brainfuck:
# operators
op_inc_dp = 1
op_dec_dp = 2
op_inc_val = 3
op_dec_val = 4
op_out = 5
op_in = 6
op_jmp_fwd = 7
op_jmp_bck = 8
operator = 0
operand = 1
def compileBf(self, input):
pc = 0
jmpPc = 0
jmpStack = []
program = []
for c in input:
if c == '>':
program.append((self.op_inc_dp, 0))
elif c == '<':
program.append((self.op_dec_dp, 0))
elif c == '+':
program.append((self.op_inc_val, 0))
elif c == '-':
program.append((self.op_dec_val, 0))
elif c == '.':
program.append((self.op_out, 0))
elif c == ',':
program.append((self.op_in, 0))
elif c == '[':
program.append((self.op_jmp_fwd, 0))
jmpStack.append(pc)
elif c == ']':
if not jmpStack:
raise ValueError("Invalid Program")
jmpPc = jmpStack.pop()
program.append((self.op_jmp_bck, jmpPc))
program[jmpPc] = (program[jmpPc][0], pc)
else:
pc -= 1
pc += 1
if jmpStack:
raise ValueError("Invalid Program")
return program
def executeBf(self, program):
data = [0] * 65535
dataPtr = 0
pc = 0
while pc < len(program):
c, val = program[pc]
#print("pc:", pc, "c:", c, "val:", val)
if c == self.op_inc_dp:
dataPtr += 1
elif c == self.op_dec_dp:
dataPtr -= 1
elif c == self.op_inc_val:
data[dataPtr] += 1
elif c == self.op_dec_val:
data[dataPtr] -= 1
elif c == self.op_out:
print(chr(data[dataPtr]), end='')
elif c == self.op_in:
data[dataPtr] = sys.stdin.buffer.read(1)[0]
elif c == self.op_jmp_fwd:
if data[dataPtr] == 0:
pc = val
elif c == self.op_jmp_bck:
if data[dataPtr] > 0:
pc = val
else:
raise ValueError("Unknown operator")
pc += 1
# A mandelbrot set fractal viewer in brainfuck written by Erik Bosman
mandelbrotDotBf = """+++++++++++++[->++>>>+++++>++>+<<<<<<]>>>>>++++++>--->>>>>>>>>>+++++++++++++++[[
>>>>>>>>>]+[<<<<<<<<<]>>>>>>>>>-]+[>>>>>>>>[-]>]<<<<<<<<<[<<<<<<<<<]>>>>>>>>[-]+
<<<<<<<+++++[-[->>>>>>>>>+<<<<<<<<<]>>>>>>>>>]>>>>>>>+>>>>>>>>>>>>>>>>>>>>>>>>>>
>+<<<<<<<<<<<<<<<<<[<<<<<<<<<]>>>[-]+[>>>>>>[>>>>>>>[-]>>]<<<<<<<<<[<<<<<<<<<]>>
>>>>>[-]+<<<<<<++++[-[->>>>>>>>>+<<<<<<<<<]>>>>>>>>>]>>>>>>+<<<<<<+++++++[-[->>>
>>>>>>+<<<<<<<<<]>>>>>>>>>]>>>>>>+<<<<<<<<<<<<<<<<[<<<<<<<<<]>>>[[-]>>>>>>[>>>>>
>>[-<<<<<<+>>>>>>]<<<<<<[->>>>>>+<<+<<<+<]>>>>>>>>]<<<<<<<<<[<<<<<<<<<]>>>>>>>>>
[>>>>>>>>[-<<<<<<<+>>>>>>>]<<<<<<<[->>>>>>>+<<+<<<+<<]>>>>>>>>]<<<<<<<<<[<<<<<<<
<<]>>>>>>>[-<<<<<<<+>>>>>>>]<<<<<<<[->>>>>>>+<<+<<<<<]>>>>>>>>>+++++++++++++++[[
>>>>>>>>>]+>[-]>[-]>[-]>[-]>[-]>[-]>[-]>[-]>[-]<<<<<<<<<[<<<<<<<<<]>>>>>>>>>-]+[
>+>>>>>>>>]<<<<<<<<<[<<<<<<<<<]>>>>>>>>>[>->>>>[-<<<<+>>>>]<<<<[->>>>+<<<<<[->>[
-<<+>>]<<[->>+>>+<<<<]+>>>>>>>>>]<<<<<<<<[<<<<<<<<<]]>>>>>>>>>[>>>>>>>>>]<<<<<<<
<<[>[->>>>>>>>>+<<<<<<<<<]<<<<<<<<<<]>[->>>>>>>>>+<<<<<<<<<]<+>>>>>>>>]<<<<<<<<<
[>[-]<->>>>[-<<<<+>[<->-<<<<<<+>>>>>>]<[->+<]>>>>]<<<[->>>+<<<]<+<<<<<<<<<]>>>>>
>>>>[>+>>>>>>>>]<<<<<<<<<[<<<<<<<<<]>>>>>>>>>[>->>>>>[-<<<<<+>>>>>]<<<<<[->>>>>+
<<<<<<[->>>[-<<<+>>>]<<<[->>>+>+<<<<]+>>>>>>>>>]<<<<<<<<[<<<<<<<<<]]>>>>>>>>>[>>
>>>>>>>]<<<<<<<<<[>>[->>>>>>>>>+<<<<<<<<<]<<<<<<<<<<<]>>[->>>>>>>>>+<<<<<<<<<]<<
+>>>>>>>>]<<<<<<<<<[>[-]<->>>>[-<<<<+>[<->-<<<<<<+>>>>>>]<[->+<]>>>>]<<<[->>>+<<
<]<+<<<<<<<<<]>>>>>>>>>[>>>>[-<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<+>>>>>>>>>>>>>
>>>>>>>>>>>>>>>>>>>>>>>]>>>>>]<<<<<<<<<[<<<<<<<<<]>>>>>>>>>+++++++++++++++[[>>>>
>>>>>]<<<<<<<<<-<<<<<<<<<[<<<<<<<<<]>>>>>>>>>-]+>>>>>>>>>>>>>>>>>>>>>+<<<[<<<<<<
<<<]>>>>>>>>>[>>>[-<<<->>>]+<<<[->>>->[-<<<<+>>>>]<<<<[->>>>+<<<<<<<<<<<<<[<<<<<
<<<<]>>>>[-]+>>>>>[>>>>>>>>>]>+<]]+>>>>[-<<<<->>>>]+<<<<[->>>>-<[-<<<+>>>]<<<[->
>>+<<<<<<<<<<<<[<<<<<<<<<]>>>[-]+>>>>>>[>>>>>>>>>]>[-]+<]]+>[-<[>>>>>>>>>]<<<<<<
<<]>>>>>>>>]<<<<<<<<<[<<<<<<<<<]<<<<<<<[->+>>>-<<<<]>>>>>>>>>+++++++++++++++++++
+++++++>>[-<<<<+>>>>]<<<<[->>>>+<<[-]<<]>>[<<<<<<<+<[-<+>>>>+<<[-]]>[-<<[->+>>>-
<<<<]>>>]>>>>>>>>>>>>>[>>[-]>[-]>[-]>>>>>]<<<<<<<<<[<<<<<<<<<]>>>[-]>>>>>>[>>>>>
[-<<<<+>>>>]<<<<[->>>>+<<<+<]>>>>>>>>]<<<<<<<<<[<<<<<<<<<]>>>>>>>>>[>>[-<<<<<<<<
<+>>>>>>>>>]>>>>>>>]<<<<<<<<<[<<<<<<<<<]>>>>>>>>>+++++++++++++++[[>>>>>>>>>]+>[-
]>[-]>[-]>[-]>[-]>[-]>[-]>[-]>[-]<<<<<<<<<[<<<<<<<<<]>>>>>>>>>-]+[>+>>>>>>>>]<<<
<<<<<<[<<<<<<<<<]>>>>>>>>>[>->>>>>[-<<<<<+>>>>>]<<<<<[->>>>>+<<<<<<[->>[-<<+>>]<
<[->>+>+<<<]+>>>>>>>>>]<<<<<<<<[<<<<<<<<<]]>>>>>>>>>[>>>>>>>>>]<<<<<<<<<[>[->>>>
>>>>>+<<<<<<<<<]<<<<<<<<<<]>[->>>>>>>>>+<<<<<<<<<]<+>>>>>>>>]<<<<<<<<<[>[-]<->>>
[-<<<+>[<->-<<<<<<<+>>>>>>>]<[->+<]>>>]<<[->>+<<]<+<<<<<<<<<]>>>>>>>>>[>>>>>>[-<
<<<<+>>>>>]<<<<<[->>>>>+<<<<+<]>>>>>>>>]<<<<<<<<<[<<<<<<<<<]>>>>>>>>>[>+>>>>>>>>
]<<<<<<<<<[<<<<<<<<<]>>>>>>>>>[>->>>>>[-<<<<<+>>>>>]<<<<<[->>>>>+<<<<<<[->>[-<<+
>>]<<[->>+>>+<<<<]+>>>>>>>>>]<<<<<<<<[<<<<<<<<<]]>>>>>>>>>[>>>>>>>>>]<<<<<<<<<[>
[->>>>>>>>>+<<<<<<<<<]<<<<<<<<<<]>[->>>>>>>>>+<<<<<<<<<]<+>>>>>>>>]<<<<<<<<<[>[-
]<->>>>[-<<<<+>[<->-<<<<<<+>>>>>>]<[->+<]>>>>]<<<[->>>+<<<]<+<<<<<<<<<]>>>>>>>>>
[>>>>[-<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<+>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
]>>>>>]<<<<<<<<<[<<<<<<<<<]>>>>>>>>>[>>>[-<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<+>
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>]>>>>>>]<<<<<<<<<[<<<<<<<<<]>>>>>>>>>++++++++
+++++++[[>>>>>>>>>]<<<<<<<<<-<<<<<<<<<[<<<<<<<<<]>>>>>>>>>-]+[>>>>>>>>[-<<<<<<<+
>>>>>>>]<<<<<<<[->>>>>>>+<<<<<<+<]>>>>>>>>]<<<<<<<<<[<<<<<<<<<]>>>>>>>>>[>>>>>>[
-]>>>]<<<<<<<<<[<<<<<<<<<]>>>>+>[-<-<<<<+>>>>>]>[-<<<<<<[->>>>>+<++<<<<]>>>>>[-<
<<<<+>>>>>]<->+>]<[->+<]<<<<<[->>>>>+<<<<<]>>>>>>[-]<<<<<<+>>>>[-<<<<->>>>]+<<<<
[->>>>->>>>>[>>[-<<->>]+<<[->>->[-<<<+>>>]<<<[->>>+<<<<<<<<<<<<[<<<<<<<<<]>>>[-]
+>>>>>>[>>>>>>>>>]>+<]]+>>>[-<<<->>>]+<<<[->>>-<[-<<+>>]<<[->>+<<<<<<<<<<<[<<<<<
<<<<]>>>>[-]+>>>>>[>>>>>>>>>]>[-]+<]]+>[-<[>>>>>>>>>]<<<<<<<<]>>>>>>>>]<<<<<<<<<
[<<<<<<<<<]>>>>[-<<<<+>>>>]<<<<[->>>>+>>>>>[>+>>[-<<->>]<<[->>+<<]>>>>>>>>]<<<<<
<<<+<[>[->>>>>+<<<<[->>>>-<<<<<<<<<<<<<<+>>>>>>>>>>>[->>>+<<<]<]>[->>>-<<<<<<<<<
<<<<<+>>>>>>>>>>>]<<]>[->>>>+<<<[->>>-<<<<<<<<<<<<<<+>>>>>>>>>>>]<]>[->>>+<<<]<<
<<<<<<<<<<]>>>>[-]<<<<]>>>[-<<<+>>>]<<<[->>>+>>>>>>[>+>[-<->]<[->+<]>>>>>>>>]<<<
<<<<<+<[>[->>>>>+<<<[->>>-<<<<<<<<<<<<<<+>>>>>>>>>>[->>>>+<<<<]>]<[->>>>-<<<<<<<
<<<<<<<+>>>>>>>>>>]<]>>[->>>+<<<<[->>>>-<<<<<<<<<<<<<<+>>>>>>>>>>]>]<[->>>>+<<<<
]<<<<<<<<<<<]>>>>>>+<<<<<<]]>>>>[-<<<<+>>>>]<<<<[->>>>+>>>>>[>>>>>>>>>]<<<<<<<<<
[>[->>>>>+<<<<[->>>>-<<<<<<<<<<<<<<+>>>>>>>>>>>[->>>+<<<]<]>[->>>-<<<<<<<<<<<<<<
+>>>>>>>>>>>]<<]>[->>>>+<<<[->>>-<<<<<<<<<<<<<<+>>>>>>>>>>>]<]>[->>>+<<<]<<<<<<<
<<<<<]]>[-]>>[-]>[-]>>>>>[>>[-]>[-]>>>>>>]<<<<<<<<<[<<<<<<<<<]>>>>>>>>>[>>>>>[-<
<<<+>>>>]<<<<[->>>>+<<<+<]>>>>>>>>]<<<<<<<<<[<<<<<<<<<]>>>>>>>>>+++++++++++++++[
[>>>>>>>>>]+>[-]>[-]>[-]>[-]>[-]>[-]>[-]>[-]>[-]<<<<<<<<<[<<<<<<<<<]>>>>>>>>>-]+
[>+>>>>>>>>]<<<<<<<<<[<<<<<<<<<]>>>>>>>>>[>->>>>[-<<<<+>>>>]<<<<[->>>>+<<<<<[->>
[-<<+>>]<<[->>+>+<<<]+>>>>>>>>>]<<<<<<<<[<<<<<<<<<]]>>>>>>>>>[>>>>>>>>>]<<<<<<<<
<[>[->>>>>>>>>+<<<<<<<<<]<<<<<<<<<<]>[->>>>>>>>>+<<<<<<<<<]<+>>>>>>>>]<<<<<<<<<[
>[-]<->>>[-<<<+>[<->-<<<<<<<+>>>>>>>]<[->+<]>>>]<<[->>+<<]<+<<<<<<<<<]>>>>>>>>>[
>>>[-<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<+>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>]>
>>>>>]<<<<<<<<<[<<<<<<<<<]>>>>>[-]>>>>+++++++++++++++[[>>>>>>>>>]<<<<<<<<<-<<<<<
<<<<[<<<<<<<<<]>>>>>>>>>-]+[>>>[-<<<->>>]+<<<[->>>->[-<<<<+>>>>]<<<<[->>>>+<<<<<
<<<<<<<<[<<<<<<<<<]>>>>[-]+>>>>>[>>>>>>>>>]>+<]]+>>>>[-<<<<->>>>]+<<<<[->>>>-<[-
<<<+>>>]<<<[->>>+<<<<<<<<<<<<[<<<<<<<<<]>>>[-]+>>>>>>[>>>>>>>>>]>[-]+<]]+>[-<[>>
>>>>>>>]<<<<<<<<]>>>>>>>>]<<<<<<<<<[<<<<<<<<<]>>>[-<<<+>>>]<<<[->>>+>>>>>>[>+>>>
[-<<<->>>]<<<[->>>+<<<]>>>>>>>>]<<<<<<<<+<[>[->+>[-<-<<<<<<<<<<+>>>>>>>>>>>>[-<<
+>>]<]>[-<<-<<<<<<<<<<+>>>>>>>>>>>>]<<<]>>[-<+>>[-<<-<<<<<<<<<<+>>>>>>>>>>>>]<]>
[-<<+>>]<<<<<<<<<<<<<]]>>>>[-<<<<+>>>>]<<<<[->>>>+>>>>>[>+>>[-<<->>]<<[->>+<<]>>
>>>>>>]<<<<<<<<+<[>[->+>>[-<<-<<<<<<<<<<+>>>>>>>>>>>[-<+>]>]<[-<-<<<<<<<<<<+>>>>
>>>>>>>]<<]>>>[-<<+>[-<-<<<<<<<<<<+>>>>>>>>>>>]>]<[-<+>]<<<<<<<<<<<<]>>>>>+<<<<<
]>>>>>>>>>[>>>[-]>[-]>[-]>>>>]<<<<<<<<<[<<<<<<<<<]>>>[-]>[-]>>>>>[>>>>>>>[-<<<<<
<+>>>>>>]<<<<<<[->>>>>>+<<<<+<<]>>>>>>>>]<<<<<<<<<[<<<<<<<<<]>>>>+>[-<-<<<<+>>>>
>]>>[-<<<<<<<[->>>>>+<++<<<<]>>>>>[-<<<<<+>>>>>]<->+>>]<<[->>+<<]<<<<<[->>>>>+<<
<<<]+>>>>[-<<<<->>>>]+<<<<[->>>>->>>>>[>>>[-<<<->>>]+<<<[->>>-<[-<<+>>]<<[->>+<<
<<<<<<<<<[<<<<<<<<<]>>>>[-]+>>>>>[>>>>>>>>>]>+<]]+>>[-<<->>]+<<[->>->[-<<<+>>>]<
<<[->>>+<<<<<<<<<<<<[<<<<<<<<<]>>>[-]+>>>>>>[>>>>>>>>>]>[-]+<]]+>[-<[>>>>>>>>>]<
<<<<<<<]>>>>>>>>]<<<<<<<<<[<<<<<<<<<]>>>[-<<<+>>>]<<<[->>>+>>>>>>[>+>[-<->]<[->+
<]>>>>>>>>]<<<<<<<<+<[>[->>>>+<<[->>-<<<<<<<<<<<<<+>>>>>>>>>>[->>>+<<<]>]<[->>>-
<<<<<<<<<<<<<+>>>>>>>>>>]<]>>[->>+<<<[->>>-<<<<<<<<<<<<<+>>>>>>>>>>]>]<[->>>+<<<
]<<<<<<<<<<<]>>>>>[-]>>[-<<<<<<<+>>>>>>>]<<<<<<<[->>>>>>>+<<+<<<<<]]>>>>[-<<<<+>
>>>]<<<<[->>>>+>>>>>[>+>>[-<<->>]<<[->>+<<]>>>>>>>>]<<<<<<<<+<[>[->>>>+<<<[->>>-
<<<<<<<<<<<<<+>>>>>>>>>>>[->>+<<]<]>[->>-<<<<<<<<<<<<<+>>>>>>>>>>>]<<]>[->>>+<<[
->>-<<<<<<<<<<<<<+>>>>>>>>>>>]<]>[->>+<<]<<<<<<<<<<<<]]>>>>[-]<<<<]>>>>[-<<<<+>>
>>]<<<<[->>>>+>[-]>>[-<<<<<<<+>>>>>>>]<<<<<<<[->>>>>>>+<<+<<<<<]>>>>>>>>>[>>>>>>
>>>]<<<<<<<<<[>[->>>>+<<<[->>>-<<<<<<<<<<<<<+>>>>>>>>>>>[->>+<<]<]>[->>-<<<<<<<<
<<<<<+>>>>>>>>>>>]<<]>[->>>+<<[->>-<<<<<<<<<<<<<+>>>>>>>>>>>]<]>[->>+<<]<<<<<<<<
<<<<]]>>>>>>>>>[>>[-]>[-]>>>>>>]<<<<<<<<<[<<<<<<<<<]>>>[-]>[-]>>>>>[>>>>>[-<<<<+
>>>>]<<<<[->>>>+<<<+<]>>>>>>>>]<<<<<<<<<[<<<<<<<<<]>>>>>>>>>[>>>>>>[-<<<<<+>>>>>
]<<<<<[->>>>>+<<<+<<]>>>>>>>>]<<<<<<<<<[<<<<<<<<<]>>>>>>>>>+++++++++++++++[[>>>>
>>>>>]+>[-]>[-]>[-]>[-]>[-]>[-]>[-]>[-]>[-]<<<<<<<<<[<<<<<<<<<]>>>>>>>>>-]+[>+>>
>>>>>>]<<<<<<<<<[<<<<<<<<<]>>>>>>>>>[>->>>>[-<<<<+>>>>]<<<<[->>>>+<<<<<[->>[-<<+
>>]<<[->>+>>+<<<<]+>>>>>>>>>]<<<<<<<<[<<<<<<<<<]]>>>>>>>>>[>>>>>>>>>]<<<<<<<<<[>
[->>>>>>>>>+<<<<<<<<<]<<<<<<<<<<]>[->>>>>>>>>+<<<<<<<<<]<+>>>>>>>>]<<<<<<<<<[>[-
]<->>>>[-<<<<+>[<->-<<<<<<+>>>>>>]<[->+<]>>>>]<<<[->>>+<<<]<+<<<<<<<<<]>>>>>>>>>
[>+>>>>>>>>]<<<<<<<<<[<<<<<<<<<]>>>>>>>>>[>->>>>>[-<<<<<+>>>>>]<<<<<[->>>>>+<<<<
<<[->>>[-<<<+>>>]<<<[->>>+>+<<<<]+>>>>>>>>>]<<<<<<<<[<<<<<<<<<]]>>>>>>>>>[>>>>>>
>>>]<<<<<<<<<[>>[->>>>>>>>>+<<<<<<<<<]<<<<<<<<<<<]>>[->>>>>>>>>+<<<<<<<<<]<<+>>>
>>>>>]<<<<<<<<<[>[-]<->>>>[-<<<<+>[<->-<<<<<<+>>>>>>]<[->+<]>>>>]<<<[->>>+<<<]<+
<<<<<<<<<]>>>>>>>>>[>>>>[-<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<+>>>>>>>>>>>>>>>>>
>>>>>>>>>>>>>>>>>>>]>>>>>]<<<<<<<<<[<<<<<<<<<]>>>>>>>>>+++++++++++++++[[>>>>>>>>
>]<<<<<<<<<-<<<<<<<<<[<<<<<<<<<]>>>>>>>>>-]+>>>>>>>>>>>>>>>>>>>>>+<<<[<<<<<<<<<]
>>>>>>>>>[>>>[-<<<->>>]+<<<[->>>->[-<<<<+>>>>]<<<<[->>>>+<<<<<<<<<<<<<[<<<<<<<<<
]>>>>[-]+>>>>>[>>>>>>>>>]>+<]]+>>>>[-<<<<->>>>]+<<<<[->>>>-<[-<<<+>>>]<<<[->>>+<
<<<<<<<<<<<[<<<<<<<<<]>>>[-]+>>>>>>[>>>>>>>>>]>[-]+<]]+>[-<[>>>>>>>>>]<<<<<<<<]>
>>>>>>>]<<<<<<<<<[<<<<<<<<<]>>->>[-<<<<+>>>>]<<<<[->>>>+<<[-]<<]>>]<<+>>>>[-<<<<
->>>>]+<<<<[->>>>-<<<<<<.>>]>>>>[-<<<<<<<.>>>>>>>]<<<[-]>[-]>[-]>[-]>[-]>[-]>>>[
>[-]>[-]>[-]>[-]>[-]>[-]>>>]<<<<<<<<<[<<<<<<<<<]>>>>>>>>>[>>>>>[-]>>>>]<<<<<<<<<
[<<<<<<<<<]>+++++++++++[-[->>>>>>>>>+<<<<<<<<<]>>>>>>>>>]>>>>+>>>>>>>>>+<<<<<<<<
<<<<<<[<<<<<<<<<]>>>>>>>[-<<<<<<<+>>>>>>>]<<<<<<<[->>>>>>>+[-]>>[>>>>>>>>>]<<<<<
<<<<[>>>>>>>[-<<<<<<+>>>>>>]<<<<<<[->>>>>>+<<<<<<<[<<<<<<<<<]>>>>>>>[-]+>>>]<<<<
<<<<<<]]>>>>>>>[-<<<<<<<+>>>>>>>]<<<<<<<[->>>>>>>+>>[>+>>>>[-<<<<->>>>]<<<<[->>>
>+<<<<]>>>>>>>>]<<+<<<<<<<[>>>>>[->>+<<]<<<<<<<<<<<<<<]>>>>>>>>>[>>>>>>>>>]<<<<<
<<<<[>[-]<->>>>>>>[-<<<<<<<+>[<->-<<<+>>>]<[->+<]>>>>>>>]<<<<<<[->>>>>>+<<<<<<]<
+<<<<<<<<<]>>>>>>>-<<<<[-]+<<<]+>>>>>>>[-<<<<<<<->>>>>>>]+<<<<<<<[->>>>>>>->>[>>
>>>[->>+<<]>>>>]<<<<<<<<<[>[-]<->>>>>>>[-<<<<<<<+>[<->-<<<+>>>]<[->+<]>>>>>>>]<<
<<<<[->>>>>>+<<<<<<]<+<<<<<<<<<]>+++++[-[->>>>>>>>>+<<<<<<<<<]>>>>>>>>>]>>>>+<<<
<<[<<<<<<<<<]>>>>>>>>>[>>>>>[-<<<<<->>>>>]+<<<<<[->>>>>->>[-<<<<<<<+>>>>>>>]<<<<
<<<[->>>>>>>+<<<<<<<<<<<<<<<<[<<<<<<<<<]>>>>[-]+>>>>>[>>>>>>>>>]>+<]]+>>>>>>>[-<
<<<<<<->>>>>>>]+<<<<<<<[->>>>>>>-<<[-<<<<<+>>>>>]<<<<<[->>>>>+<<<<<<<<<<<<<<[<<<
<<<<<<]>>>[-]+>>>>>>[>>>>>>>>>]>[-]+<]]+>[-<[>>>>>>>>>]<<<<<<<<]>>>>>>>>]<<<<<<<
<<[<<<<<<<<<]>>>>[-]<<<+++++[-[->>>>>>>>>+<<<<<<<<<]>>>>>>>>>]>>>>-<<<<<[<<<<<<<
<<]]>>>]<<<<.>>>>>>>>>>[>>>>>>[-]>>>]<<<<<<<<<[<<<<<<<<<]>++++++++++[-[->>>>>>>>
>+<<<<<<<<<]>>>>>>>>>]>>>>>+>>>>>>>>>+<<<<<<<<<<<<<<<[<<<<<<<<<]>>>>>>>>[-<<<<<<
<<+>>>>>>>>]<<<<<<<<[->>>>>>>>+[-]>[>>>>>>>>>]<<<<<<<<<[>>>>>>>>[-<<<<<<<+>>>>>>
>]<<<<<<<[->>>>>>>+<<<<<<<<[<<<<<<<<<]>>>>>>>>[-]+>>]<<<<<<<<<<]]>>>>>>>>[-<<<<<
<<<+>>>>>>>>]<<<<<<<<[->>>>>>>>+>[>+>>>>>[-<<<<<->>>>>]<<<<<[->>>>>+<<<<<]>>>>>>
>>]<+<<<<<<<<[>>>>>>[->>+<<]<<<<<<<<<<<<<<<]>>>>>>>>>[>>>>>>>>>]<<<<<<<<<[>[-]<-
>>>>>>>>[-<<<<<<<<+>[<->-<<+>>]<[->+<]>>>>>>>>]<<<<<<<[->>>>>>>+<<<<<<<]<+<<<<<<
<<<]>>>>>>>>-<<<<<[-]+<<<]+>>>>>>>>[-<<<<<<<<->>>>>>>>]+<<<<<<<<[->>>>>>>>->[>>>
>>>[->>+<<]>>>]<<<<<<<<<[>[-]<->>>>>>>>[-<<<<<<<<+>[<->-<<+>>]<[->+<]>>>>>>>>]<<
<<<<<[->>>>>>>+<<<<<<<]<+<<<<<<<<<]>+++++[-[->>>>>>>>>+<<<<<<<<<]>>>>>>>>>]>>>>>
+>>>>>>>>>>>>>>>>>>>>>>>>>>>+<<<<<<[<<<<<<<<<]>>>>>>>>>[>>>>>>[-<<<<<<->>>>>>]+<
<<<<<[->>>>>>->>[-<<<<<<<<+>>>>>>>>]<<<<<<<<[->>>>>>>>+<<<<<<<<<<<<<<<<<[<<<<<<<
<<]>>>>[-]+>>>>>[>>>>>>>>>]>+<]]+>>>>>>>>[-<<<<<<<<->>>>>>>>]+<<<<<<<<[->>>>>>>>
-<<[-<<<<<<+>>>>>>]<<<<<<[->>>>>>+<<<<<<<<<<<<<<<[<<<<<<<<<]>>>[-]+>>>>>>[>>>>>>
>>>]>[-]+<]]+>[-<[>>>>>>>>>]<<<<<<<<]>>>>>>>>]<<<<<<<<<[<<<<<<<<<]>>>>[-]<<<++++
+[-[->>>>>>>>>+<<<<<<<<<]>>>>>>>>>]>>>>>->>>>>>>>>>>>>>>>>>>>>>>>>>>-<<<<<<[<<<<
<<<<<]]>>>]
"""
def test():
bf = Brainfuck()
program = bf.compileBf(mandelbrotDotBf)
bf.executeBf(program)
if __name__ == '__main__':
test()
|
nilq/baby-python
|
python
|
import os
import platform
import getpass
if(platform.system() == "Windows"):
os.system("cls")
print(" _")
print("__ _____| | ___ ___ _ __ ___ ___ ")
print("\ \ /\ / / _ \ |/ __/ _ \| '_ ` _ \ / _ \ ")
print(" \ V V / __/ | (_| (_) | | | | | | __/ ")
print(" \_/\_/ \___|_|\___\___/|_| |_| |_|\___| ")
print("\n\n Hi " + getpass.getuser() + ", i'm cento and i'm happy to help you")
print("\n ---------------------------------------------")
print("\n italiano")
print("\n ---------------------------------------------")
language = input("\n please, enter a language : ")
if(language == "italiano"):
os.system("python3 language/italiano/verifica.py")
if(platform.system() == "Linux"):
print("\n questo bot non è supportato per linux \n\n")
exit
|
nilq/baby-python
|
python
|
"""
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
SPDX-License-Identifier: MIT-0
Permission is hereby granted, free of charge, to any person obtaining a copy of this
software and associated documentation files (the "Software"), to deal in the Software
without restriction, including without limitation the rights to use, copy, modify,
merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import os
import sys
import time
import json
import logging
logger = logging.getLogger()
logger.setLevel(logging.INFO)
log_handler = logging.StreamHandler(sys.stdout)
logger.addHandler(log_handler)
import http_request_tester as tester
def handle(event, context):
logger.info('handler is triggered: start-test, event={}'.format(event))
logger.info('Records count: {}'.format(len(event['Records'])))
profile_name = os.environ.get('PROFILE_NAME', None)
project_name = os.environ.get('PROJECT_NAME', 'project_name_empty')
project_stage = os.environ.get('PROJECT_STAGE', 'project_stage_empty')
api_endpoint = os.environ.get('API_ENDPOINT', 'api_endpoint_empty')
logger.info('project_name: {}'.format(project_name))
logger.info('project_stage: {}'.format(project_stage))
logger.info('api_endpoint: {}'.format(api_endpoint))
for record in event['Records']:
message = json.loads(record['Sns']['Message'])
interval_in_sec = int(message['Config']['IntervalInSec'])
duration_in_sec = int(message['Config']['DurationInSec'])
logger.info('handler start one-record, message={}'.format(message))
api_gateway_tester = tester.HttpRequestTester(
TestName='ApiGateway',
ProfileName=profile_name,
ProjectName=project_name,
ProjectStage=project_stage,
Endpoint=api_endpoint,
ApiKey=None,
Interval=interval_in_sec,
Duration=duration_in_sec
)
api_gateway_tester.start_loop(message['TestData'])
logger.info('handler finish one record: test-timeout duration_in_sec-{}'.format(duration_in_sec))
|
nilq/baby-python
|
python
|
"""Utility code for argparse"""
import argparse
import yaml
#class StoreDictKeyPair(argparse.Action):
# """An action for reading key-value pairs from command line"""
# def __call__(self, parser, namespace, values, option_string=None):
# my_dict = {}
# for kv in values.split(","):
# k,v = kv.split("=")
# my_dict[k] = v
# setattr(namespace, self.dest, my_dict)
class ReadYaml(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
my_dict = yaml.load(values, Loader=yaml.Loader)
setattr(namespace, self.dest, my_dict)
|
nilq/baby-python
|
python
|
# datastore transations and methods
from sqlalchemy.orm import load_only
from sqlalchemy.sql import text
def count_records(session, model, **kwargs):
row_count = session.query(model).filter_by(**kwargs).count()
return row_count
def delete_record(session, model, **kwargs):
instance = session.query(model).filter_by(**kwargs).one()
session.delete(instance)
def get_column_values(session, model, column, **kwargs):
instances = session.query(model).filter_by(**kwargs).options(
load_only(column)).order_by(column)
return instances
def insert(session, model, **kwargs):
instance = model(**kwargs)
session.add(instance)
session.flush()
return instance
def insert_or_ignore(session, model, **kwargs):
instance = session.query(model).filter_by(**kwargs).first()
if not instance:
instance = model(**kwargs)
session.add(instance)
return instance
def retrieve_first_n(session, model, n, **kwargs):
instances = session.query(model).filter_by(**kwargs).limit(n).all()
return instances
def retrieve_first_record(session, model, **kwargs):
instance = session.query(model).filter_by(**kwargs).order_by(
model.did).first()
return instance
def retrieve_last_record(session, model):
instance = session.query(model).order_by(model.did.desc()).first()
return instance
def retrieve_last_record_filtered(session, model, **kwargs):
instance = session.query(model).filter_by(**kwargs).order_by(
model.did.desc()).first()
return instance
def retrieve_record(session, model, **kwargs):
instance = session.query(model).filter_by(**kwargs).first()
return instance
def retrieve_records(session, model, **kwargs):
instances = session.query(model).filter_by(**kwargs).order_by(
model.did).all()
return instances
def retrieve_cart_order_ids(session, cart_id):
stmn = text("""
SELECT `order`.did
FROM `order`
WHERE cart_id=:cart_id
ORDER BY `order`.did
""")
stmn = stmn.bindparams(cart_id=cart_id)
instances = session.execute(stmn)
return instances
def get_cart_data_view_records(
session, system_id, user='All users', status=''):
if user == 'All users' and status:
stmn = text("""
SELECT cart_id, cart_name, cart_date,
system_id, cart_status, cart_owner, linked
FROM carts_meta
WHERE system_id=:system_id AND cart_status=:status
ORDER BY cart_date DESC
""")
stmn = stmn.bindparams(system_id=system_id, status=status)
elif user == 'All users' and not status:
stmn = text("""
SELECT cart_id, cart_name, cart_date, system_id,
cart_status, cart_owner, linked
FROM carts_meta
WHERE system_id=:system_id
ORDER BY cart_date DESC
""")
stmn = stmn.bindparams(system_id=system_id)
elif user != 'All users' and not status:
stmn = text("""
SELECT cart_id, cart_name, cart_date, system_id,
cart_status, cart_owner, linked
FROM carts_meta
WHERE system_id=:system_id AND cart_owner=:user
ORDER BY cart_date DESC
""")
stmn = stmn.bindparams(system_id=system_id, user=user)
else:
stmn = text("""
SELECT cart_id, cart_name, cart_date, system_id,
cart_status, cart_owner, linked
FROM carts_meta
WHERE system_id=:system_id AND cart_owner=:user AND cart_status=:status
ORDER BY cart_date DESC
""")
stmn = stmn.bindparams(system_id=system_id, user=user, status=status)
instances = session.execute(stmn)
return instances
def retrieve_cart_details_view_stmn(cart_id):
stmn = text("""
SELECT * FROM cart_details
WHERE cart_id=:cart_id
""")
stmn = stmn.bindparams(cart_id=cart_id)
return stmn
def retrieve_unique_vendors_from_cart(session, cart_id):
stmn = text("""
SELECT DISTINCT name
FROM vendor
JOIN `order` ON `order`.vendor_id = vendor.did
WHERE `order`.cart_id=:cart_id
;
""")
stmn = stmn.bindparams(cart_id=cart_id)
instances = session.execute(stmn)
return instances
def update_record(session, model, did, **kwargs):
instance = session.query(model).filter_by(did=did).one()
for key, value in kwargs.items():
setattr(instance, key, value)
def construct_report_query_stmn(system_id, library_id,
user_ids, start_date, end_date):
"""
Creates SQL query statemanet to select datastore records matching
report criteria
args:
system_id: int, datastore system.did
library_id: int, datastore library.did
user_ids: list, list of datastore user.did
start_date: str, starting date (inclusive) in format YYYY-MM-DD
end_date: str, ending date (inclusive) in format YYYY-MM-DD
returns:
stmn: instance of sqlalchemy.sql.expression.TextClause
"""
sql_str = """
SELECT cart.did as cart_id,
cart.created as cart_date,
status.name as cart_status,
user.name as user,
system.name as system,
library.name as library,
`order`.did as order_id,
lang.name as lang_name,
lang.code as lang_code,
audn.name as audn,
vendor.name as vendor,
mattype.name as mattype,
resource.price_disc as price,
branch.code as branch_code,
branch.name as branch_name,
orderlocation.qty as qty,
fund.code as fund
FROM cart
JOIN status ON cart.status_id = status.did
JOIN user ON cart.user_id = user.did
JOIN system ON cart.system_id = system.did
JOIN library ON cart.library_id = library.did
JOIN `order` ON cart.did = `order`.cart_id
JOIN lang ON `order`.lang_id = lang.did
JOIN audn ON `order`.audn_id = audn.did
JOIN vendor ON `order`.vendor_id = vendor.did
JOIN mattype ON `order`.matType_id = mattype.did
JOIN resource ON `order`.did = resource.order_id
JOIN orderlocation ON `order`.did = orderlocation.order_id
JOIN branch ON orderlocation.branch_id = branch.did
JOIN fund ON orderlocation.fund_id = fund.did
WHERE cart.created BETWEEN CAST(:start_date AS DATE) AND CAST(:end_date AS DATE)
AND cart.system_id=:system_id
"""
params = dict(
system_id=system_id,
start_date=f'{start_date}',
end_date=f'{end_date}')
if user_ids:
s = []
sql_str += ' AND ('
for user in list(enumerate(user_ids)):
arg = f'user_{user[0]}'
params[arg] = user[1]
s.append(f'cart.user_id=:{arg}')
sql_str += ' OR '.join(s)
sql_str += ' )'
if library_id is not None:
params['library_id'] = library_id
sql_str += ' AND cart.library_id=:library_id'
stmn = text(sql_str)
stmn = stmn.bindparams(**params)
return stmn
|
nilq/baby-python
|
python
|
import rclpy
from rclpy.node import Node
from rclpy.qos import qos_profile_sensor_data
from sensor_msgs.msg import Image # Image is the message type
import cv2 # OpenCV library
from cv_bridge import CvBridge # Package to convert between ROS and OpenCV Images
import numpy as np
# Naming the Output window
windowname = 'Result'
cv2.namedWindow(windowname)
output = None
x, y, w, h = 0, 0, 0, 0
first_point_saved = False
second_point_saved = False
track_window = (x, y, w, h)
can_track = False
class CamShift(Node):
def __init__(self):
super().__init__('camshift')
self.subscription = self.create_subscription(
Image,
'/image',
self.listener_callback,
qos_profile_sensor_data)
self.subscription # prevent unused variable warning
# Used to convert between ROS and OpenCV images
self.br = CvBridge()
def listener_callback(self, data):
global x, y, w, h, first_point_saved,second_point_saved, track_window, can_track, output, roi_hist, roi
# Display the message on the console
#self.get_logger().info('Receiving image')
# Convert ROS Image message to OpenCV image
#frame = self.br.imgmsg_to_cv2(data, "bgr8")
#ret, frame = self.br.imgmsg_to_cv2(data, "bgr8")
frame = self.br.imgmsg_to_cv2(data, "bgr8")
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# Check if 2nd point is also saved then initialize the tracker
if second_point_saved:
roi_hist, roi = self.initialize(frame, track_window)
second_point_saved = False
can_track = True
# Start tracking
if can_track == True:
dst = cv2.calcBackProject([hsv],[0],roi_hist,[0,180],1)
# apply camshift to get the new location
ret, track_window = cv2.CamShift(dst, track_window, self.term_crit)
# Draw it on image
pts = cv2.boxPoints(ret)
pts = np.int0(pts)
print("track_window")
print("x, y, w, h")
print(track_window)
cv2.imshow('roi', roi)
output = cv2.polylines(frame,[pts],True, 255,2)
else:
output = frame
if first_point_saved:
cv2.circle(output, (x, y), 5, (0, 0, 255), -1)
cv2.destroyWindow('roi')
# Show the output
cv2.imshow(windowname,output)
cv2.waitKey(1)
def click_event(event, px, py, flags, param):
global x, y, w, h, first_point_saved, second_point_saved, track_window, can_track, output
# Left mouse button release event
if event == cv2.EVENT_LBUTTONUP:
if first_point_saved:
w = px-x
h = py-y
track_window = (x, y, w, h)
first_point_saved = False
second_point_saved = True
else:
x = px
y = py
first_point_saved = True
can_track = False
# Right mouse button press event
if event == cv2.EVENT_RBUTTONDOWN:
can_track = False
cv2.setMouseCallback(windowname, click_event) # Start the mouse event
# initialize tracker
def initialize(self, frame, track_window):
x, y, w, h = track_window
# set up the ROI for tracking
roi = frame[y:y+h, x:x+w]
hsv_roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
roi_hist = cv2.calcHist([hsv_roi],[0],None,[180],[0,180])
roi_hist = cv2.normalize(roi_hist,roi_hist,0,255,cv2.NORM_MINMAX)
return roi_hist, roi
# Setup the termination criteria
term_crit = ( cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1 )
def main(args=None):
rclpy.init(args=args)
camshift = CamShift()
rclpy.spin(camshift)
# Destroy the node explicitly
# (optional - otherwise it will be done automatically
# when the garbage collector destroys the node object)
camshift.destroy_node()
rclpy.shutdown()
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
from random import randint
import pygame as pg
from scripts import constants as const
class Bird(pg.sprite.Sprite):
SIZE = const.SPRITE_SIZE[0]
MIN_SPEED = 1
MAX_SPEED = 10
def __init__(self, bird_image):
pg.sprite.Sprite.__init__(self)
self.image = bird_image
self.rect = self.image.get_rect()
self.rect.center = (randint(self.SIZE/2, const.WIDTH/2 - self.SIZE/2),
randint(self.SIZE/2, const.HEIGHT/2 - self.SIZE/2))
self.speed_x = (-1) ** randint(0, 1) * randint(self.MIN_SPEED, self.MAX_SPEED)
self.speed_y = (-1) ** randint(0, 1) * randint(self.MIN_SPEED, self.MAX_SPEED)
if self.speed_x < 0:
self.image = pg.transform.flip(self.image, True, False)
def move(self):
self.rect.x += self.speed_x
self.rect.y += self.speed_y
if self.rect.left < 0 or self.rect.right > const.WIDTH:
self.image = pg.transform.flip(self.image, True, False)
self.speed_x = -self.speed_x
if self.rect.top < 0 or self.rect.bottom > const.HEIGHT:
self.speed_y = -self.speed_y
|
nilq/baby-python
|
python
|
"""
Example showing for tkinter and ttk how to do:
-- Simple animation
-- on a tkinter Canvas.
References:
-- https://effbot.org/tkinterbook/canvas.htm
This is the simplest explanation,
but very old and possibly somewhat out of date.
Everywhere that it says "pack" use "grid" instead.
-- The tkinter.pdf document in this project.
This is by far the most complete reference work for tkinter and ttk.
It is for reference, NOT a tutorial.
-- https://tkdocs.com/tutorial/canvas.html
This is a more complete and up-to-date tutorial than the one above.
It shows each example in four different languages.
Python is the fourth (last) one. Ignore the other-language examples.
The key ideas are:
1. Drawing (and hence animation) is on a tkinter.Canvas.
2. You put an object onto a Canvas with:
id = canvas.create_XXX(POSITION, OTHER-OPTIONS)
where XXX can be any of:
oval, arc, bitmap, image, line, polygon, rectangle, text, window,
and where the specifics of POSITION and OTHER-OPTIONS depends on the
type of object being created. See the example in the code below
for an oval. See the above reference work for details on other types.
3. The ID returned by a call to create_XXX is how you keep track of objects
on a Canvas for future animation (movements, color changes, etc.).
4. There are three basic methods for animating (changing) an object.
Each method is a Canvas method whose first argument
is the ID of the object on the Canvas. You can:
a. MOVE an object BY a given amount by:
canvas.move(ID, delta_x, delta_y)
b. MOVE an object TO a certain position by:
canvas.coords(ID, NEW_POSITION ...)
where the specifics of NEW_POSITION depend on the type of the object.
c. CHANGE OTHER CHARACTERISTICS of objects as in this example:
canvas.coords(ID, fill="blue") # Changes the fill color to "blue"
The specifics of what you can change (and how) depends on the type
of object. See the above reference work for details.
5. You must FIRST construct everything needed for the animation,
and THEN do the root.mainloop() to start the GUI running.
The code below shows one way to accomplish that, using this structure:
a. The main method constructs and then starts an Animation object.
b. The Animation object constructs the GUI, passing itself to the GUI
so that the GUI can later ask the Animation to do stuff.
c. The GUI contains:
-- The one-and-only tkinter.Tk object.
-- Frame(s) and other widgets as desired.
-- A tkinter.Canvas on a Frame.
d. When the GUI is constructed, you include all the tkinter/ttk code
that you have seen in previous examples EXCEPT not (yet) the
root.mainloop()
e. The GUI includes a start method that contains:
root.mainloop()
f. The Animation object (which constructed the GUI) calls the GUI's
start method to start the animation running.
g. The Animation object has a method:
run_one_cycle
that makes all the changes to all the objects in the Animation,
for ONE cycle of the animation, by using the Canvas methods:
move coords itemconfigure
The Animation has access to the Canvas because the Animation
constructed (and stores) the GUI, and the GUI makes and stores
the Canvas.
h. The Animation's run_one_cycle method
is called repeatedly BY THE GUI as follows, all in the GUI class:
def __init__(self, animation):
self.animation = animation
self.root = tkinter.Tk()
...
self.root.after(1000, self.animation_loop)
def animation_loop(self):
self.animation.run_one_cycle()
self.root.after(10, self.animation_loop)
The after method sets a TIMER that is triggered
after the given number of milliseconds (1000 ms in the first call
to after in the above, and 10 ms in the second call to after).
Because it is a TIMER, Tkinter is able to react to button presses
and other stuff while the TIMER is waiting to ring its alarm.
When the TIMER rings its alarm, it calls the second argument
to the after method, which is self.animation_loop in the
above. So, self.animation_loop is called the first time after
1 second (1000 ms), and it runs one cycle of the animation at
that time. Thereafter it repeatedly:
-- Waits 10 ms (via a TIMER that allows other stuff to happen)
-- Calls animation_loop again
-- Runs one cycle of the animation.
In the actual code below, instead of running every 10 ms,
it runs every animation.cycle_ms, so that the Animation object
can control the "refresh rate" of the animation.
See the code below for an example that uses the above structure.
While you are not REQUIRED to use the same structure, it is probably a good
idea to do so for any video-game style game.
This example does NOT include any message-passing with MQTT to other computers.
Other examples cover that topic.
SEE THE UML CLASS DIAGRAM include with this project.
Authors: David Mutchler and his colleagues
at Rose-Hulman Institute of Technology.
"""
import random
import tkinter
from tkinter import ttk
def main():
animation = Animation()
animation.start()
class Animation(object):
""" An animation of Ball objects (per the Ball class defined below). """
def __init__(self):
# Construct the GUI, which constructs and stores a Canvas.
# Store that Canvas in THIS object too, so that animated objects can
# act upon it. Here, our animated objects are all Ball objects,
# stored in the self.balls list, which starts with a single Ball.
# Each Ball needs to have the Canvas so that the Ball can change its
# position and fill color (and anything else it might want to change).
self.gui = GUI(self)
self.canvas = self.gui.canvas
ball = Ball(self.canvas) # Note how each Ball gets the Canvas
self.balls = [ball]
self.cycle_ms = 10 # Run an animation step every 10 ms (approximately)
def start(self):
# Called after the GUI, the Animation, and all the animated objects
# are constructed. The GUI's start method starts the mainloop
# in which the program remains for the remainder of its run.
self.gui.start()
def run_one_cycle(self):
"""
Must make whatever changes animated objects need to make on the Canvas,
for one iteration (cycle) of the animation loop.
"""
# One out of every 200 cycles, make a new Ball.
r = random.randrange(1, 201) # r is between 1 and 200, inclusive
if r == 1:
self.balls.append(Ball(self.canvas))
# Animate each ball.
for ball in self.balls:
ball.run_one_cycle()
class GUI(object):
def __init__(self, animation):
"""
Stores the given Animation object in order to call the Animation
object's run_one_cycle method repeatedly, by using root.after(...)
Constructs all the GUI widgets, but does NOT (yet) call root.mainloop.
:type animation: Animation
"""
self.animation = animation
# The usual Tk and Frame objects, plus any other widgets you want.
self.root = tkinter.Tk()
self.frame = ttk.Frame(self.root, padding=10)
self.frame.grid()
self.canvas = self.make_canvas()
# Starts the animation loop AFTER 1000 ms (i.e., 1 second).
self.root.after(1000, self.animation_loop)
def make_canvas(self):
canvas_width = 400
canvas_height = 300
canvas = tkinter.Canvas(self.frame, width=canvas_width,
height=canvas_height)
canvas.width = canvas_width
canvas.height = canvas_height
canvas.grid()
return canvas
def start(self):
# Called by the Animation object when the program is ready to enter the
# Tk object's mainloop and remain there for the remainder of the run.
self.root.mainloop()
def animation_loop(self):
# Tells the Animation to run one cycle of the animation.
# Then sets up a timer to call this same method again after a few ms.
self.animation.run_one_cycle()
self.root.after(self.animation.cycle_ms, self.animation_loop)
class Ball(object):
def __init__(self, canvas):
"""
The Ball needs the Canvas so that it can update its characteristics
(position, fill color, etc) as the animation runs.
:type canvas: tkinter.Canvas
"""
self.canvas = canvas
# Set the characteristics of the Ball:
# specific x, y and diameter, with a random color.
x = 200
y = 200
self.diameter = 20
self.colors = ["red", "green", "blue"]
r = random.randrange(len(self.colors))
self.color = self.colors[r]
# Make the item on the Canvas for drawing the Ball, storing its ID
# for making changes to the Ball (moving it, changing color, etc.).
# Here, each Ball is a filled circle (actually an oval),
# defined by its upper-left and lower-right corners.
self.id = self.canvas.create_oval(x, y,
x + self.diameter, y + self.diameter,
fill=self.color)
def run_one_cycle(self):
""" Illustrates the 3 basic ways to change (animate) an item. """
# Move RED balls BY a small random amount
# (using the Canvas move method):
if self.color == "red":
delta_x = random.randrange(-5, 6) # Between -5 and 5, inclusive
delta_y = random.randrange(-2, 3) # Between -2 and 2, inclusive
self.canvas.move(self.id, delta_x, delta_y)
# Move GREEN balls TO a certain position, randomly inside a box near
# the upper-left of the window (using the Canvas coords method):
elif self.color == "green":
x = random.randrange(50, 101) # Between 50 and 100, inclusive
y = random.randrange(20, 41) # Between 20 and 40, inclusive
self.canvas.coords(self.id, x, y,
x + self.diameter, y + self.diameter)
# Change balls to a random color, every 100 cycles or so,
# about once a second (using the Canvas itemconfigure method):
r1 = random.randrange(1, 101) # Random between 1 and 100, inclusive
if r1 == 1:
r2 = random.randrange(len(self.colors))
self.color = self.colors[r2]
self.canvas.itemconfigure(self.id, fill=self.color)
main()
|
nilq/baby-python
|
python
|
class LinkedList:
def __init__(self, head):
self.head = head
self.current_element = self.head
# Node navigation
def next(self):
if self.current_element.next is None:
return
self.current_element = self.current_element.next
def go_back_to_head(self):
self.current_element = self.head
# Node queries
def get_current_element(self):
return self.current_element.data
# Subordinate classes
class Node:
"""A Node has two properties:
`data` which represents the instance of data stored in the node
`next` which is a pointer to the next node
"""
def __init__(self, data=None, next=None):
self.data = data
self.next = next
if __name__ == '__main__':
data_set = ['alex', 'siobhan', 'lucy', 'rosie']
linked_list = LinkedList(head=LinkedList.Node(data='alex', next=None))
linked_list.head.next = LinkedList.Node(data='siobhan')
print(linked_list.get_current_element())
linked_list.next()
print(linked_list.get_current_element())
linked_list.go_back_to_head()
print(linked_list.get_current_element())
|
nilq/baby-python
|
python
|
import pandas as pd
import os
import subprocess as sub
import re
import sys
from Bio import SeqUtils
import matplotlib.pyplot as plt
import numpy as np
from scipy import stats
# path = os.path.join(os.path.expanduser('~'),'GENOMES_BACTER_RELEASE69/genbank')
path = "."
# ['DbxRefs','Description','FeaturesNum','assembly_accession','GenomicLen','GenomicName','Keywords','NucsPresent','Organism_des',
# 'SourceDbxRefs','SourceOrganism','SourcePlasmid','SourceStrain','Taxonomy','BioProject','TaxonID','Organism_env',
# 'OptimumTemperature','TemperatureRange','OxygenReq','Habitat','Salinity','crit_NC','crit_WGS','crit_genlen',
# 'crit_features','crit_comp_genome','crit_plasmid']
env_dat = pd.read_csv(os.path.join(path,"summary_organisms_interest.dat"))
taxon_dat = pd.read_csv(os.path.join(path,"arch_taxonomy_interest.dat"))
check_halo = lambda tax_class: any(_ in tax_class for _ in ('Halobacteria','Nanohaloarchaea'))
taxon_dat['halo'] = taxon_dat['tax_lineages'].apply(lambda lins: any( check_halo(lin.split(';')) for lin in lins.split(':') ) )
#['assembly_accession','cDNA','fid','pid','product','protein','status','table','ribosomal','CAI','TrOp']
gen_dat = pd.read_csv(os.path.join(path,"complete_arch_CDS_CAI_DNA_Rnd.dat"))
# PROTEOME LEVEL AMINO ACID FREQUENCIES ...
# "proteome_all.dat"
# # file with the organisms of interest
# dat_fname = os.path.join(bib2_scr_path,'catalog_with_accesion.dat')
# dat = pd.read_csv(dat_fname)
aacids = sorted(list('CMFILVWYAGTSNQDEHRKP'))
cost_vec_path = path
akashi = os.path.join(cost_vec_path,'akashi-cost.d')
argentina = os.path.join(cost_vec_path,'argentina-cost.d')
akashi_cost = pd.read_csv(akashi,header=None,sep=' ')
argentina_cost = pd.read_csv(argentina,header=None,sep=' ')
thermo_freq = pd.read_csv(os.path.join(path,'arch_thermo.dat'),header=None,sep=' ')
akashi_cost.set_index(0,inplace=True)
argentina_cost.set_index(0,inplace=True)
thermo_freq.set_index(0,inplace=True)
akashi_cost.sort_index(inplace=True)
argentina_cost.sort_index(inplace=True)
thermo_freq.sort_index(inplace=True)
#
gen_dat_org = gen_dat.groupby('assembly_accession')
# genom_id = orgs.groups.keys() # env_dat['assembly_accession'] ...
# gen_dat_grouped.get_group(idx)
#
# how to get quantile ...
# q75 = pid_cai['CAI'].quantile(q=0.75)
#
#
num_of_quantiles = 5
#
stat_dat = {'assembly_accession':[],
'OptimumTemperature':[],
'TrOp':[]}
for i in range(num_of_quantiles):
stat_dat['q%d'%i] = []
stat_dat['R20_q%d'%i] = []
stat_dat['Akashi_q%d'%i] = []
#
env_dat_tax = pd.merge(env_dat,taxon_dat,on='assembly_accession')
#
for idx,topt,halo in env_dat_tax[['assembly_accession','OptimumTemperature','halo']].itertuples(index=False):
# excluding halophiles ...
if not halo:
cds_cai_dat = gen_dat_org.get_group(idx)
# is it a translationally optimized organism ?
all,any = cds_cai_dat['TrOp'].all(),cds_cai_dat['TrOp'].any()
if all == any:
trans_opt = all
else: #any != all
print "%s@T=%f: Something wrong is happening: TrOp flag is not same for all ..."%(idx,topt)
# THIS IS just a stupid precaution measure, in case we messed something upstream ...
# not that stupid after all, because NaN is behaving badly here ...
if cds_cai_dat['TrOp'].notnull().all():
#
# we can use this 'qcut' function from pandas to divide our proteins by the quantiles ...
category,bins = pd.qcut(cds_cai_dat['CAI'],q=num_of_quantiles,retbins=True,labels=False)
#
stat_dat['assembly_accession'].append(idx)
stat_dat['OptimumTemperature'].append(topt)
stat_dat['TrOp'].append(trans_opt)
#
# then we could iterate over proteins/cDNAs in these categories ...
for cat in range(num_of_quantiles):
cds_cai_category = cds_cai_dat[category==cat]
total_length = cds_cai_category['protein'].str.len().sum()
IVYWREL = sum(cds_cai_category['protein'].str.count(aa).sum() for aa in list('IVYWREL'))
# IVYWREL = cds_cai_category['protein'].str.count('|'.join("IVYWREL")).sum() # tiny bit slower ...
f_IVYWREL = float(IVYWREL)/float(total_length)
# 20-vector for of amino acid composition ...
aa_freq_20 = np.true_divide([cds_cai_category['protein'].str.count(aa).sum() for aa in aacids],float(total_length))
# slope, intercept, r_value, p_value, std_err = stats.linregress(x,y)
_1,_2,R20,_4,_5 = stats.linregress(aa_freq_20, thermo_freq[1])
# Akashi ...
cost = np.dot(aa_freq_20,akashi_cost[1])
# appending ...
#
#
stat_dat['q%d'%cat].append(f_IVYWREL)
stat_dat['R20_q%d'%cat].append(R20)
stat_dat['Akashi_q%d'%cat].append(cost)
#
#
#
cai_stats_quant = pd.DataFrame(stat_dat)
#
cai_stats_quant_TrOp = cai_stats_quant[cai_stats_quant.TrOp]
cai_stats_quant_noTrOp = cai_stats_quant[~cai_stats_quant.TrOp]
plt.clf()
bins = np.linspace(-0.05,0.05,50)
# plt.hist(list(cai_stats_quant_TrOp.q4 - cai_stats_quant_TrOp.q1),bins=bins,color='blue')
plt.hist(list(cai_stats_quant.q4 - cai_stats_quant.q1),bins=bins,color='red',alpha=0.8)#,cumulative=True)
plt.xlabel("IVYWREL(HExp)-IVYWREL(LExp)")
# plt.show()
plt.savefig("IVYWREL_quantile_hist_arch.png")
plt.clf()
plt.plot(cai_stats_quant.OptimumTemperature,cai_stats_quant.q1,'bo',alpha=0.8)
plt.plot(cai_stats_quant.OptimumTemperature,cai_stats_quant.q4,'ro',alpha=0.8)
plt.xlabel('Temperature')
plt.ylabel('IVYWREL(HE:red;LE:blue)')
# plt.show()
plt.savefig("IVYWREL_dots_compare_arch.png")
plt.clf()
for i in range(num_of_quantiles):
k1 = 'q%d'%i
k2 = 'R20_q%d'%i
k3 = 'Akashi_q%d'%i
#
plt.errorbar([i+1,],cai_stats_quant_noTrOp[cai_stats_quant_noTrOp.OptimumTemperature>0][k1].mean(),yerr=cai_stats_quant_noTrOp[cai_stats_quant_noTrOp.OptimumTemperature>0][k1].std(),fmt='o')
plt.xlim(0,6)
plt.ylabel(k1)
plt.xlabel('CAI quantile')
plt.savefig("IVYWREL_arch_qunatile_trend_Shuff.noTrop.png")
plt.clf()
for i in range(num_of_quantiles):
k1 = 'q%d'%i
k2 = 'R20_q%d'%i
k3 = 'Akashi_q%d'%i
#
plt.errorbar([i+1,],cai_stats_quant_noTrOp[cai_stats_quant_noTrOp.OptimumTemperature>0][k2].mean(),yerr=cai_stats_quant_noTrOp[cai_stats_quant_noTrOp.OptimumTemperature>0][k2].std(),fmt='o')
plt.xlim(0,6)
plt.ylabel(k2)
plt.xlabel('CAI quantile')
plt.savefig("R20_arch_qunatile_trend_Shuff.noTrop.png")
plt.clf()
for i in range(num_of_quantiles):
k1 = 'q%d'%i
k2 = 'R20_q%d'%i
k3 = 'Akashi_q%d'%i
#
plt.errorbar([i+1,],cai_stats_quant_noTrOp[cai_stats_quant_noTrOp.OptimumTemperature>0][k3].mean(),yerr=cai_stats_quant_noTrOp[cai_stats_quant_noTrOp.OptimumTemperature>0][k3].std(),fmt='o')
plt.xlim(0,6)
plt.ylabel(k3)
plt.xlabel('CAI quantile')
plt.savefig("Akashi_arch_qunatile_trend_Shuff.noTrop.png")
#####################################################################################################
plt.clf()
for i in range(num_of_quantiles):
k1 = 'q%d'%i
k2 = 'R20_q%d'%i
k3 = 'Akashi_q%d'%i
#
plt.errorbar([i+1,],cai_stats_quant[cai_stats_quant.OptimumTemperature>0][k1].mean(),yerr=cai_stats_quant[cai_stats_quant.OptimumTemperature>0][k1].std(),fmt='o')
plt.xlim(0,6)
plt.ylabel(k1)
plt.xlabel('CAI quantile')
plt.savefig("IVYWREL_arch_qunatile_trend_Shuff.ALL.png")
plt.clf()
for i in range(num_of_quantiles):
k1 = 'q%d'%i
k2 = 'R20_q%d'%i
k3 = 'Akashi_q%d'%i
#
plt.errorbar([i+1,],cai_stats_quant[cai_stats_quant.OptimumTemperature>0][k2].mean(),yerr=cai_stats_quant[cai_stats_quant.OptimumTemperature>0][k2].std(),fmt='o')
plt.xlim(0,6)
plt.ylabel(k2)
plt.xlabel('CAI quantile')
plt.savefig("R20_arch_qunatile_trend_Shuff.ALL.png")
plt.clf()
for i in range(num_of_quantiles):
k1 = 'q%d'%i
k2 = 'R20_q%d'%i
k3 = 'Akashi_q%d'%i
#
plt.errorbar([i+1,],cai_stats_quant[cai_stats_quant.OptimumTemperature>0][k3].mean(),yerr=cai_stats_quant[cai_stats_quant.OptimumTemperature>0][k3].std(),fmt='o')
plt.xlim(0,6)
plt.ylabel(k3)
plt.xlabel('CAI quantile')
plt.savefig("Akashi_arch_qunatile_trend_Shuff.ALL.png")
#####################################################################################################
plt.clf()
for i in range(num_of_quantiles):
k1 = 'q%d'%i
k2 = 'R20_q%d'%i
k3 = 'Akashi_q%d'%i
#
plt.errorbar([i+1,],cai_stats_quant_TrOp[cai_stats_quant_TrOp.OptimumTemperature>0][k1].mean(),yerr=cai_stats_quant_TrOp[cai_stats_quant_TrOp.OptimumTemperature>0][k1].std(),fmt='o')
plt.xlim(0,6)
plt.ylabel(k1)
plt.xlabel('CAI quantile')
plt.savefig("IVYWREL_arch_qunatile_trend_Shuff.TrOp.png")
plt.clf()
for i in range(num_of_quantiles):
k1 = 'q%d'%i
k2 = 'R20_q%d'%i
k3 = 'Akashi_q%d'%i
#
plt.errorbar([i+1,],cai_stats_quant_TrOp[cai_stats_quant_TrOp.OptimumTemperature>0][k2].mean(),yerr=cai_stats_quant_TrOp[cai_stats_quant_TrOp.OptimumTemperature>0][k2].std(),fmt='o')
plt.xlim(0,6)
plt.ylabel(k2)
plt.xlabel('CAI quantile')
plt.savefig("R20_arch_qunatile_trend_Shuff.TrOp.png")
plt.clf()
for i in range(num_of_quantiles):
k1 = 'q%d'%i
k2 = 'R20_q%d'%i
k3 = 'Akashi_q%d'%i
#
plt.errorbar([i+1,],cai_stats_quant_TrOp[cai_stats_quant_TrOp.OptimumTemperature>0][k3].mean(),yerr=cai_stats_quant_TrOp[cai_stats_quant_TrOp.OptimumTemperature>0][k3].std(),fmt='o')
plt.xlim(0,6)
plt.ylabel(k3)
plt.xlabel('CAI quantile')
plt.savefig("Akashi_arch_qunatile_trend_Shuff.TrOp.png")
# R20 is flat on average (strange bi-modality?!)
# | meso thermo
# ------+-------------
# TrOp | NA NA
# noTrOp| ~~+ ~~-
# Akashi is flat on average (strange local minimum at middle CAI quantile)
# | meso thermo
# ------+-------------
# TrOp | NA NA
# noTrOp| ~ ~
# IVYWREL is declining on average (?!)
# | meso thermo
# ------+-------------
# TrOp | NA NA
# noTrOp| -- --
|
nilq/baby-python
|
python
|
from flask import *
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy.schema import Sequence
app = Flask(__name__, static_url_path='/static') #referencing this while
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///App.sqlite3'
app.config['SECRET_KEY'] = "secret key"
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
class Destination(db.Model):
__tablename__ = "Destination"
DID = db.Column(db.Integer,primary_key=True,autoincrement=True)
Pincode = db.Column(db.Integer)
dod = db.Column(db.String(30))
doa = db.Column(db.String(30))
city = db.Column(db.String(50))
def __init__(self,pin,dod,doa,city):
self.Pincode=pin
self.dod=dod
self.doa=doa
self.city=city
class Passenger(db.Model):
__tablename__ = "Passenger"
PID = db.Column(db.Integer,primary_key=True,autoincrement=True)
fname = db.Column(db.String(30))
lname = db.Column(db.String(30))
noc = db.Column(db.Integer)
noa = db.Column(db.Integer)
address = db.Column(db.String(50))
dob = db.Column(db.String(30))
DID = db.Column(db.Integer, db.ForeignKey('Destination.DID'))
Destination = db.relationship("Destination", backref=db.backref("Destination", uselist=False))
def __init__(self,fname,lname,noc,noa,address,dob,did):
self.fname=fname
self.lname=lname
self.noc=noc
self.noa=noa
self.address=address
self.dob=dob
self.DID=did
class PassengerMobileNumber(db.Model):
__tablename__ = 'PassengerMobileNumber'
id = db.Column(db.Integer, primary_key=True)
PID = db.Column(db.Integer,db.ForeignKey('Passenger.PID'))
MobileNumber=db.Column(db.Integer)
__table_args__ = ( db.UniqueConstraint('PID','MobileNumber'), )
def __init__(self,pid,phnno):
self.MobileNumber=phnno
self.PID=pid
class PassengerDestination(db.Model):
__tablename__ = 'PassengerDestination'
id = db.Column(db.Integer, primary_key=True)
PID = db.Column(db.Integer,db.ForeignKey('Passenger.PID'))
DID = db.Column(db.Integer,db.ForeignKey('Destination.DID'))
__table_args__ = ( db.UniqueConstraint('PID','DID'), )
def __init__(self,pid,did):
self.DID=did
self.PID=pid
class Transaction(db.Model):
__tablename__ = "Transaction"
TransID = db.Column(db.Integer,primary_key=True,autoincrement=True)
Amount = db.Column(db.Integer)
PaymentMode = db.Column(db.String(30))
PID=db.Column(db.Integer, db.ForeignKey('Passenger.PID'))
Passenger = db.relationship("Passenger", backref=db.backref("Passenger", uselist=False))
def __init__(self,Amount,PaymentMode,pid):
self.Amount=Amount
self.PaymentMode=PaymentMode
self.PID=pid
class Room(db.Model):
__tablename__ = "Room"
ROOM_NUMBER = db.Column(db.Integer,primary_key=True)
status = db.Column(db.String(20))
roomtype = db.Column(db.String(20))
PID = db.Column(db.Integer,db.ForeignKey('Passenger.PID'))
def __init__(self,roomtype,Passenger_ID):
self.status="Occupied"
self.roomtype=roomtype
self.PID=Passenger_ID
class Restaurant(db.Model):
__tablename__="Restaurant"
Rest_ID = db.Column(db.String(30),primary_key=True)
No_of_tables = db.Column(db.Integer)
Cuisine = db.Column(db.String(30))
def __init__(self,Restid,c):
self.Rest_ID=Restid
self.Cuisine=c
self.No_of_tables=50
class Table(db.Model):
__tablename__="Table"
S_No = db.Column(db.Integer,primary_key=True)
Table_Number = db.Column(db.Integer,nullable=False)
Rest_ID = db.Column(db.Integer,db.ForeignKey('Restaurant.Rest_ID'),nullable=False)
People_per_table = db.Column(db.Integer)
Tstatus = db.Column(db.String(30),default="Vacant")
PID = db.Column(db.Integer,db.ForeignKey('Passenger.PID'))
__table_args__ = ( db.UniqueConstraint('Table_Number','Rest_ID'), )
def __init__(self,id,ppt,pid):
self.PID=pid
self.Rest_ID=id
self.People_per_table=ppt
@app.route('/Destination.html', methods=['POST',"GET"])
def destination():
return render_template("Destination.html")
@app.route('/Login.html',methods=["POST","GET"])
def login():
return render_template("Login.html")
@app.route('/Restaurants.html')
def restaurant():
return render_template("Restaurants.html")
@app.route('/Restaurants1.html')
def Create():
rest1=Restaurant("ShangPalace","Chinese")
db.session.add(rest1)
rest2=Restaurant("LosLobos","Italian")
db.session.add(rest2)
rest3=Restaurant("SpiceCrossing","Mexican")
db.session.add(rest3)
rest4=Restaurant("LaCucina","Thai")
db.session.add(rest4)
rest5=Restaurant("FoodRepublic","Indian")
db.session.add(rest5)
db.session.commit()
return "<h1>Added successfully<h1>"
@app.route('/')
def home_page():
return render_template("HomePage.html")
@app.route('/About.html')
def about():
return render_template("About.html")
@app.route('/Casino.html')
def casino():
return render_template("Casino.html")
@app.route('/CruiseActivities.html')
def cruise_activities():
return render_template("CruiseActivities.html")
@app.route('/Entertainment.html')
def entertainment():
return render_template("Entertainment.html")
@app.route('/Fitness.html')
def fitness():
return render_template("Fitness.html")
@app.route('/index.html')
def index():
return render_template("index.html")
@app.route('/RestaurantsFoodRepublic.html')
def food_republic():
return render_template("RestaurantsFoodRepublic.html")
@app.route('/RestaurantsLaCucina.html')
def la_cucina():
return render_template("RestaurantsLaCucina.html")
@app.route('/RestaurantsLosLobos.html')
def los_lobos():
return render_template("RestaurantsLosLobos.html")
@app.route('/RestaurantsShangPalace.html')
def shang_palace():
return render_template("RestaurantsShangPalace.html")
@app.route('/RestaurantsSpiceCrossing.html')
def spice_crossing():
return render_template("RestaurantsSpiceCrossing.html")
@app.route('/Spa.html')
def spa():
return render_template("Spa.html")
@app.route('/login', methods = ['POST'])
def login_form():
Pass_ID=request.form['Pass_ID']
passenger_obj = db.session.query(Passenger).get(Pass_ID)
if passenger_obj:
phn = db.session.query(PassengerMobileNumber).filter_by(PID=passenger_obj.PID).all()
if len(phn)==1:
phn1=phn[0].MobileNumber
phn2="Not entered"
else:
phn1=phn[0].MobileNumber
phn2=phn[1].MobileNumber
rooms = db.session.query(Room).filter_by(PID=passenger_obj.PID).all()
rooms_str=""
for a_room in rooms:
rooms_str = rooms_str + str(a_room.ROOM_NUMBER) + ","
trans = db.session.query(Transaction).filter_by(PID=passenger_obj.PID).all()
return render_template('LoginDisplay.html',psngr=passenger_obj,phn1=phn1,phn2=phn2,room=a_room,rooms_str=rooms_str[0:len(rooms_str)-1],trans_obj=trans[0])
else:
return render_template("Warning.html", pid = Pass_ID)
@app.route('/display', methods = ['POST'])
def display():
dest_obj=Destination(request.form['dest_pin'],request.form['dod'],request.form['doa'],request.form['city'])
db.session.add(dest_obj)
db.session.commit()
passenger_obj=Passenger(request.form['firstname'],request.form['lastname'],request.form['children'],request.form['adults'],request.form['address'],request.form['dob'],dest_obj.DID)
db.session.add(passenger_obj)
db.session.commit()
p_d_obj=PassengerDestination(passenger_obj.PID,dest_obj.DID)
db.session.add(p_d_obj)
db.session.commit()
mob_obj=PassengerMobileNumber(passenger_obj.PID,request.form['phn1'])
db.session.add(mob_obj)
db.session.commit()
mob_obj=PassengerMobileNumber(passenger_obj.PID,request.form['phn2'])
db.session.add(mob_obj)
db.session.commit()
trans_obj=Transaction(request.form['amount'],request.form['payment_mode'],passenger_obj.PID)
db.session.add(trans_obj)
db.session.commit()
no_of_rooms = int(request.form['rooms'])
for i in range(no_of_rooms):
room_obj=Room(request.form['roomtype'],passenger_obj.PID)
db.session.add(room_obj)
db.session.commit()
return render_template("Greet.html", obj = passenger_obj)
@app.route('/Restaurant', methods = ['POST'])
def restaurant_booking():
pid = request.form['PID']
query_obj = db.session.query(Passenger).get(pid)
if not query_obj:
return render_template("Warning.html", pid = pid)
else:
query_obj = db.session.query(Restaurant).get(request.form['restaurant'])
if int(request.form['tables']) > query_obj.No_of_tables:
return "We don't have "+str(request.form['tables'])+" tables vacant for now. Sorry for the inconvenience"
else:
query_obj.No_of_tables -= int(request.form['tables'])
for i in range(int(request.form['tables'])):
table=Table(request.form['restaurant'],request.form['ppt'],pid)
return str(request.form['tables'])+" tables have been booked for you Mr."+db.session.query(Passenger).get(pid).fname
if __name__ == "__main__":
db.create_all();
app.run(debug = True)
|
nilq/baby-python
|
python
|
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import scriptcontext as sc
import compas_rhino
from compas_ags.rhino import SettingsForm
from compas_ags.rhino import FormObject
from compas_ags.rhino import ForceObject
__commandname__ = "AGS_toolbar_display"
def RunCommand(is_interactive):
if 'AGS' not in sc.sticky:
compas_rhino.display_message('AGS has not been initialised yet.')
return
scene = sc.sticky['AGS']['scene']
if not scene:
return
# TODO: deal with undo redo
SettingsForm.from_scene(scene, object_types=[FormObject, ForceObject], global_settings=['AGS'])
# ==============================================================================
# Main
# ==============================================================================
if __name__ == '__main__':
RunCommand(True)
|
nilq/baby-python
|
python
|
class DianpingConfig:
def __init__(self):
self.instance_name = "BERTModel.pt"
self.model_name = self.instance_name
self.BERT_MODEL = "bert-base-chinese"
self.max_sent_lens = 64
class SSTConfig:
def __init__(self):
self.instance_name = "BERTModel.pt"
self.model_name = self.instance_name
self.BERT_MODEL = "bert-base-uncased"
self.max_sent_lens = 32
class SNLIConfig:
def __init__(self):
self.instance_name = "BERTModel.pt"
self.model_name = self.instance_name
self.BERT_MODEL = "bert-base-uncased"
self.max_sent_lens = 64
class IMDBConfig:
def __init__(self):
self.instance_name = "BERTModel.pt"
self.model_name = self.instance_name
self.BERT_MODEL = "bert-base-uncased"
self.max_sent_lens = 254
class LCQMCConfig:
def __init__(self):
self.instance_name = "BERTModel.pt"
self.model_name = self.instance_name
self.BERT_MODEL = "bert-base-chinese"
self.max_sent_lens = 64
|
nilq/baby-python
|
python
|
from __future__ import unicode_literals
from djangobmf.apps import ContribTemplate
class EmployeeConfig(ContribTemplate):
name = 'djangobmf.contrib.employee'
label = "djangobmf_employee"
|
nilq/baby-python
|
python
|
import eel
try:
from pyfirmata import Arduino, util
except:
from pip._internal import main as pipmain
pipmain(['install','pyfirmata'])
from pyfirmata import Arduino, util
#Get Operating System Type
import platform
currentOs = platform.system()
if "linux" in currentOs.lower():
currentOs = "linux"
if "windows" in currentOs.lower():
currentOs = "windows"
#Automatically get the port that the Arduino is on and setup the board
port = ""
if currentOs == "linux":
import os
feedback = "/dev/" + os.popen("ls /dev/ | grep ttyACM").read().strip()
if len(feedback) > 11:
port = feedback
elif currentOs == "windows":
import serial.tools.list_ports
ports = list(serial.tools.list_ports.comports())
for p in ports:
p = str(p)
if "Arduino" in p:
port = p.split(' ', 1)[0]
break
board=Arduino(port)
#Set up pins
red = board.get_pin('d:3:p')
green = board.get_pin('d:5:p')
blue = board.get_pin('d:6:p')
commonAnode = True # set this to false for common cathode setup
theloop = ''
loopIncrementor = 0
#Start the web interface
eel.init('web')
def hexToRgb(hex):
hex = str(hex).lstrip('#')
hlen = len(hex)
return(tuple(int(hex[i:i+2], 16) for i in (0, 2, 4)))
def writeRgb(r,g,b):
if commonAnode:
r = 1 - r
g = 1 - g
b = 1 - b
red.write(r)
green.write(g)
blue.write(b)
def writeHex(hex):
myhex = hexToRgb(hex)
writeRgb(myhex[0]/255,myhex[1]/255,myhex[2]/255)
#Turn off LEDs to begin with
if commonAnode:
writeRgb(0,0,0)
else:
writeRgb(1,1,1)
def getSteps(hex,steps):
if type(hex) is list:
rgb = hex
elif type(hex) is tuple:
rgb = list(hex)
else:
rgb = list(hexToRgb(hex))
for i in range(3):
rgb.append(rgb[0]/255/steps)
rgb.pop(0)
return(rgb)
def writeColorPct(color, pct):
rgb = list(hexToRgb(color))
for i in range(3):
rgb[i] = rgb[i] * pct / 100
writeRgb(rgb[0],rgb[1],rgb[2])
@eel.expose
def solid(color):
global loopIncrementor
loopIncrementor += 1
writeHex(color)
@eel.expose
def pulse(colors):
global loopIncrementor
loopIncrementor += 1
theloop = lightLoop(loopIncrementor)
theloop.pulse(colors)
@eel.expose
def fade(colors):
global loopIncrementor
loopIncrementor += 1
theloop = lightLoop(loopIncrementor)
theloop.fade(colors)
@eel.expose
def lightning(color):
global loopIncrementor
loopIncrementor += 1
theloop = lightLoop(loopIncrementor)
theloop.lightning(color)
@eel.expose
def neon(color):
global loopIncrementor
loopIncrementor += 1
theloop = lightLoop(loopIncrementor)
theloop.neon(color)
class lightLoop:
def __init__(self, name):
self.name = name
self.running = True
def pulse(self, colors):
while self.running:
for c in colors:
toWrite = [0,0,0]
increasing = True
steps = getSteps(c,255)
pulseIncrementor = 0
while (increasing == True):
for i in range(3):
toWrite[i] = toWrite[i] + steps[i]
if toWrite[i] > 255:
toWrite[i] = 255
pulseIncrementor += 1
if self.name < loopIncrementor:
self.running = False
if self.running == True:
writeRgb(toWrite[0],toWrite[1],toWrite[2])
eel.sleep(0.01)
else:pass
if pulseIncrementor >= 255:
eel.sleep(1.0)
increasing = False
while increasing == False:
for i in range(3):
toWrite[i] = toWrite[i] - steps[i]
if toWrite[i] <= 0:
toWrite[i] = 0
pulseIncrementor -= 1
if self.name < loopIncrementor:
self.running = False
if self.running == True:
writeRgb(toWrite[0],toWrite[1],toWrite[2])
eel.sleep(0.01)
else: pass
if pulseIncrementor <= 0:
increasing = True
def fade(self, colors):
currentColor = [0,0,0]
while self.running:
for c in colors:
toWrite = list(currentColor)
goto = list(hexToRgb(c))
for i in range(3):
goto[i] = goto[i] - toWrite[i]
steps = goto
for i in range(3):
steps[i] /= 255 #put steps in decimal form
toWrite[i] /= 255 #put toWrite in decimal form
steps[i] /= 255 #break steps into 255 steps
pulseIncrementor = 0
increasing = True
while (increasing == True):
for i in range(3):
toWrite[i] += steps[i]
if toWrite[i] > 1:
toWrite[i] = 1
elif toWrite[i] < 0:
toWrite[i] = 0
pulseIncrementor += 1
if self.name < loopIncrementor:
self.running = False
if self.running == True:
writeRgb(toWrite[0],toWrite[1],toWrite[2])
eel.sleep(0.02)
else:pass
if pulseIncrementor >= 255:
eel.sleep(1.0)
increasing = False
currentColor = list(hexToRgb(c))
def lightning(self, color):
while self.running:
if self.name < loopIncrementor:
self.running = False
if self.running:
writeHex(color)
def neon(self, color):
while self.running:
if self.name < loopIncrementor:
self.running = False
if self.running:
writeHex(color)
eel.start('main.html')
|
nilq/baby-python
|
python
|
from sys import argv
script, filename=argv
print(f"We're going to erase{filename}.")
print("If you don't want that,hit CTRL-C(^C).")
print("If you do want that,hit RETURN.")
input("?")
print("Opening the file..")
target=open(filename,'w')
print("Truncating the file,Goodbye!")
target.truncate()
print("Now I'm going to ask you for three lines.")
line1=input("line1:")
line2=input("line2:")
line3=input("line3:")
print("I'm going to write these to the file.")
target.write(line1)
target.write("\n")
target.write(line2)
target.write("\n")
target.write(line3)
target.write("\n")
print("And finally,we close it")
target.close()
|
nilq/baby-python
|
python
|
# =============================================================================== #
# #
# This file has been generated automatically!! Do not change this manually! #
# #
# =============================================================================== #
from __future__ import annotations
from pydantic import Field
from ..base_object import BaseObject
class CheckStickerSetName(BaseObject):
"""
Checks whether a name can be used for a new sticker set
:param name: Name to be checked
:type name: :class:`str`
"""
ID: str = Field("checkStickerSetName", alias="@type")
name: str
@staticmethod
def read(q: dict) -> CheckStickerSetName:
return CheckStickerSetName.construct(**q)
|
nilq/baby-python
|
python
|
#MenuTitle: Check glyphsets match across open fonts
'''
Find missing glyphs across fonts
'''
def main():
fonts = Glyphs.fonts
glyphsets = {}
try:
for font in fonts:
if font.instances[0].name not in glyphsets:
glyphsets[font.instances[0].name] = set()
print 'Name: %s, Glyphs: %s' % (font.instances[0].name, len(font.glyphs))
for glyph in font.glyphs:
glyphsets[font.instances[0].name].add(glyph.name)
for font1 in glyphsets:
for font2 in glyphsets:
diff_glyphs = glyphsets[font1] - glyphsets[font2]
print font1, '-', font2, diff_glyphs
except AttributeError:
print 'Font does not have any instances'
raise
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
initial = """\
.|||.#..|##.#||..#.|..|..||||..#|##.##..#...|.....
.|#.|#..##...|#.........#.#..#..|#.|#|##..#.#|..#.
#....#|.#|.###||..#.|...|.|.#........#.|.#.#|..#..
|..|#....|#|...#.#..||.#..||......#.........|....|
.|.|..#|...#.|.###.|...||.|.|..|...|#|.#..|.|..|.|
#.....||.#..|..|..||#.||#..|.||..||##.......#.....
||.#..........|....##...|..#.|..#..#|#.#....#..#.#
.#.#|.|.|.##|..#......|...#||..#.||..|..|#....|##.
#.#..||.|...#|...|..#.#.||#.||.#.|.....|##.|....#.
.#......||.|#......#|#.|...||...||##...#...####.#.
.....#..|..#..#|..#...#.|#...||...#.##.||.|..|.||.
.#|.#.|.....|#..#||..|...|...##.#.###|..|.###.|#..
..#.......#.|#.##....#..|##.#......#|......#..#...
.|..#|.#.....#..||..#.#.|##..|#.||#..|.#..|.|##|#|
##|.#........|#.#.#|..|....|.......#..#|.#.|....#.
....##...|....#..............||.|..#........|.....
##||.|.#...|.#|..#....#..|...|..#..#..|##||.....|.
.|.#...|#.......#...#.#..|#....#|#|#..#|...##..||.
.|..|.|..#...##...||#..##|#|..|...#.....#||...##..
.|...|..||#..#|.|.#...|||.|#.||#|......|#|.#..|#..
|##.....|.|#...#||.....#..#.|.#..|.....||....||..#
|.|#|||.....|||..#......#..||........||.#.#..||#||
#.|.|.#.....#....#.#..#||.||..|.#.|....|...#.#...#
|.|....#.#||...#.....#|#|.|.#......##.|.||...#.||.
|...|...|##........|.|...#...|.........|..##..|.##
|.||..|.#.#|.#||...|.|.....#...#.####|.||||..|||.|
.....#..##..|..#|.||#...|..##...##|....##||.##....
#|##..#|.#..|##...|..#.##.|##.....###.|..#.|..#.|.
|.##..|#...|.|.||.......#..#||.....#|..#||##..#|..
..|.#.#.....##.|#|...#........##......#...#...||..
|.#....###|..|##.#...#|....|..#.....#.##.|..|...||
.....#..#.....|.##......#......|..|...##|.|.#..#||
...##.#.......#|.#..||.#|..#|...#...|||.#.......|#
#|..#|....|||...|..#|....#......#..#...|#.......||
...#|##|..........|..###||..#|...|.##.|.#.#...#...
#|##|.#|#...|..#......||..#.|#|..#..|..#|..#......
#||#.#.....|...|..|##|..#|...##.||..#|.|#||.|..|..
#..#..|.|.||...#|.|.|..|..|..|....#.#||.#.....|#.#
#.|.#..##...|..#.|..#..#..#.#||.#.............#...
..|##|.#|.|......|#...|#.#.....|#|#.#.|...|#......
.|.|.|...#..##..#|###..|#....#..#.#..|||.###|##...
|#...|......|...##..|.|#...#..|.#.........#..##.#.
.|...##||#.....#..#..|..#..#.|#.|.||.##.|....|..#|
|#..|..|.#..||...#...#|..##|||##..|.##||#.#.|....|
.......#......|.....||.#..|#.#.#|#.##....|...|.#..
.....#..|...|..##.....|...#...|.|||.##..|.#||.##|.
..#||...|#.#|#|....#..|||.|##..#|.|.........|....#
..#...|.#...|#..#........#...###..##..##||...|..#.
..|.||.#.....|#..|.##...#.|...|#...#||..####..#.|.
.|.....#....||.#...#.......#|........#...#|#|...|#"""
initial = initial.splitlines()
size = (max(map(len, initial)), len(initial))
def convert(grid, pos, debug=False):
x, y = pos
squ = grid[y][x]
adj = []
for xi in range(max((x-1, 0)), min((x+2, size[0]))):
for yi in range(max((y-1, 0)), min((y+2, size[1]))):
if xi == x and yi == y:
continue
adj.append(grid[yi][xi])
if debug:
print(adj)
if squ == ".":
if adj.count("|") >= 3:
return("|")
return(".")
elif squ == "|":
if adj.count("#") >= 3:
return("#")
return("|")
elif squ == "#":
if adj.count("|")>=1 and adj.count("#")>=1:
return("#")
return(".")
def update(grid):
new_grid = []
for y in range(0, size[1]):
new_grid.append("")
for x in range(0, size[0]):
new_grid[y] += convert(grid, (x,y))
return(new_grid)
grid = initial
seen_grids = [grid]
for x in range(1, 1000000001):
grid = update(grid)
if grid in seen_grids:
break
seen_grids.append(grid)
repeat_i = seen_grids.index(grid)
grid = seen_grids[(1000000000-repeat_i) % (len(seen_grids)-repeat_i) + repeat_i]
gridstr = "\n".join(grid)
clear = gridstr.count(".")
wooded = gridstr.count("|")
lumber = gridstr.count("#")
print(wooded*lumber)
|
nilq/baby-python
|
python
|
from group import GroupTestCases
from user import UserTestCases
from permission import PermissionTestCases
from core import *
|
nilq/baby-python
|
python
|
'''
Defines the training step.
'''
import sys
sys.path.append('tfutils')
import tensorflow as tf
from tfutils.base import get_optimizer, get_learning_rate
import numpy as np
import cv2
from curiosity.interaction import models
import h5py
import json
class RawDepthDiscreteActionUpdater:
'''
Provides the training step.
This is probably where we can put parallelization.
Not finished!
'''
def __init__(world_model, rl_model, data_provider, eta):
self.data_provider = data_provider
self.world_model = world_model
self.rl_model = rl_model
self.eta = eta
self.global_step = tf.get_variable('global_step', [], tf.int32, initializer = tf.constant_initializer(0,dtype = tf.int32))
self.action = tf.placeholder = tf.placeholder(tf.float32, [None] + world_model.action_one_hot.get_shape().as_list()[1:])
self.adv = tf.placeholder(tf.float32, [None])
self.r = tf.placeholder(tf.float32, [None])
log_prob_tf = tf.nn.log_softmax(rl_model.logits)
prob_tf = tf.nn.softmax(rl_model.logits)
pi_loss = -tf.reduce_sum(tf.reduce_sum(log_prob_tf * self.ac, [1]) * self.adv)
vf_loss = .5 * tf.reduce_sum(tf.square(rl_model.vf - self.r))
entropy = -tf.reduce_sum(prob_tf * log_prob_tf)
self.rl_loss = pi_loss + 0.5 * vf_loss - entropy * 0.01
rl_opt_params, rl_opt = get_optimizer(learning_rate, self.rl_loss, )
def replace_the_nones(my_list):
'''
Assumes my_list[-1] is np array
'''
return [np.zeros(my_list[-1].shape, dtype = my_list[-1].dtype) if elt is None else elt for elt in my_list]
def postprocess_batch_depth(batch, state_desc):
obs, msg, act, act_post = batch
depths = replace_the_nones(obs[state_desc])
obs_past = np.array([depths[:-1]])
obs_fut = np.array([depths[1:]])
actions = np.array([replace_the_nones(act)])
actions_post = np.array([replace_the_nones(act_post)])
return obs_past, actions, actions_post, obs_fut
# def postprocess_batch_depth(batch):
# depths = np.array([[timepoint if timepoint is not None else np.zeros(obs['depths1'][-1].shape, dtype = obs['depths1'][-1].dtype) for timepoint in obs['depths1']] for obs in batch.states])
# actions = np.array(batch.actions)
# next_depth = np.array([batch.next_state['depths1']])
# return depths, actions, next_depth
def postprocess_batch_for_actionmap(batch, state_desc):
obs, msg, act = batch
prepped = {}
depths = replace_the_nones(obs[state_desc])
depths_past = np.array([depths[:-1]])
depths_fut = np.array([depths[:1]])
objects = np.array([replace_the_nones(obs[state_desc])[:-1]])
actions = np.array([replace_the_nones(act)])
action_ids_list = []
for i in range(2):
action_msg = msg[i]['msg']['actions'] if msg[i] is not None else []
if len(action_msg):
idx = int(action_msg[0]['id'])
else:
idx = -10000#just something that's not an id seen
action_ids_list.append(idx)
action_ids = np.array([action_ids_list])
return depths_past, objects, actions, action_ids, depths_fut
# def postprocess_batch_for_actionmap(batch):
# prepped = {}
# for desc in ['depths1', 'objects1']:
# prepped[desc] = np.array([[timepoint if timepoint is not None else np.zeros(obs[desc][-1].shape, dtype = obs[desc][-1].dtype) for timepoint in obs[desc]] for obs in batch.states])
# actions = np.array([[np.zeros(batch.next_state['action'][-1].shape, batch.next_state['action'][-1].dtype) if timepoint is None else timepoint for timepoint in batch.next_state['action']]])
# print('actions shape')
# print(actions.shape)
# print(len(batch.next_state['action']))
# action_ids_list = []
# for i in range(2):
# action_msg = batch.next_state['msg'][i]['msg']['actions'] if batch.next_state['msg'][i] is not None else []
# if len(action_msg):
# idx = int(action_msg[0]['id'])
# action_ids_list.append(idx)
# action_ids = np.array([action_ids_list])
# next_depths = np.array([batch.next_state['depths1']])
# return prepped['depths1'], prepped['objects1'], actions, action_ids, next_depths
class ExperienceReplayPostprocessor:
def __init__(self, big_save_keys = None, little_save_keys = None, big_save_len = None, big_save_freq = None, state_descriptor = None):
self.big_save_keys = big_save_keys
self.little_save_keys = little_save_keys
self.big_save_len = big_save_len
self.big_save_freq = big_save_freq
self.state_descriptor = state_descriptor
self.big_save_keys.append('map_draw')
self.little_save_keys.append('map_draw')
self.big_save_keys.extend(['act_lr', 'um_lr'])
self.little_save_keys.extend(['act_lr', 'um_lr'])
def postprocess(self, training_results, batch):
global_step = training_results['global_step']
res = {}
if (global_step) % self.big_save_freq < self.big_save_len:
save_keys = self.big_save_keys
#est_losses = [other[1] for other in batch['other']]
#action_sample = [other[2] for other in batch['other']]
res['batch'] = {}
for desc, val in batch.iteritems():
if desc not in ['recent', 'depths1', 'objects1', 'images1']:
res['batch'][desc] = val
res['recent'] = batch['recent']
else:
save_keys = self.little_save_keys
res.update(dict(pair for pair in training_results.iteritems() if pair[0] in save_keys))
#if 'other' in batch['recent']:
# entropies = [other[0] for other in batch['recent']['other']]
# entropies = np.mean(entropies)
# res['entropy'] = entropies
if 'msg' in batch['recent']:
looking_at_obj = [1 if msg is not None and msg['msg']['action_type'] == 'OBJ_ACT' else 0 for msg in batch['recent']['msg']]
res['obj_freq'] = np.mean(looking_at_obj)
elif type(batch['recent']) == list and len(batch['recent'][0]) > 0:
mean_per_provider = []
for provider_recent in batch['recent']:
looking_at_obj = [1 if msg is not None and msg['msg']['action_type'] == 'OBJ_ACT' else 0 for msg in provider_recent['msg']]
mean_per_provider.append(np.mean(looking_at_obj))
res['obj_freq'] = np.mean(mean_per_provider)
res['obj_freq_per_provider_noprint'] = mean_per_provider
return res
class UncertaintyPostprocessor:
def __init__(self, big_save_keys = None, little_save_keys = None, big_save_len = None, big_save_freq = None, state_descriptor = None):
self.big_save_keys = big_save_keys
self.little_save_keys = little_save_keys
self.big_save_len = big_save_len
self.big_save_freq = big_save_freq
self.state_descriptor = state_descriptor
def postprocess(self, training_results, batch):
global_step = training_results['global_step']
res = {}
print('postprocessor deets')
print(global_step)
print(self.big_save_freq)
print(self.big_save_len)
if (global_step) % self.big_save_freq < self.big_save_len:
print('big time')
save_keys = self.big_save_keys
est_losses = [other[1] for other in batch['recent']['other']]
action_sample = [other[2] for other in batch['recent']['other']]
res['batch'] = {'obs' : batch['depths1'], 'act' : batch['action'], 'act_post' : batch['action_post'], 'est_loss' : est_losses, 'action_sample' : action_sample}
res['msg'] = batch['recent']['msg']
else:
print('little time')
save_keys = self.little_save_keys
res.update(dict((k, v) for (k, v) in training_results.iteritems() if k in save_keys))
#res['msg'] = batch['msg'][-1]
entropies = [other[0] for other in batch['recent']['other']]
entropies = np.mean(entropies)
res['entropy'] = entropies
looking_at_obj = [1 if msg is not None and msg['msg']['action_type']['OBJ_ACT'] else 0 for msg in batch['recent']['msg']]
res['obj_freq'] = np.mean(looking_at_obj)
return res
class DataWriteUpdater:
def __init__(self, data_provider, updater_params):
self.data_provider = data_provider
fn = updater_params['hdf5_filename']
N = updater_params['N_save']
height, width = updater_params['image_shape']
act_dim = updater_params['act_dim']
print('setting up save loc')
self.hdf5 = hdf5 = h5py.File(fn, mode = 'a')
dt = h5py.special_dtype(vlen = str)
self.handles = {'msg' : hdf5.require_dataset('msg', shape = (N,), dtype = dt),
'depths1' : hdf5.require_dataset('depths1', shape = (N, height, width, 3), dtype = np.uint8),
'objects1' : hdf5.require_dataset('objects1', shape = (N, height, width, 3), dtype = np.uint8),
'images1': hdf5.require_dataset('images1', shape = (N, height, width, 3), dtype = np.uint8),
'action' : hdf5.require_dataset('action', shape = (N, act_dim), dtype = np.float32),
'action_post' : hdf5.require_dataset('action_post', shape = (N, act_dim), dtype = np.float32)}
print('save loc set up')
self.start = 0
def update(self):
batch = self.data_provider.dequeue_batch()
bs = len(batch['recent']['msg'])
end = self.start + bs
for k in ['depths1', 'objects1', 'images1', 'action', 'action_post']:
tosave = batch['recent'][k]
if k in ['action', 'action_post']:
tosave = tosave.astype(np.float32)
self.handles[k][self.start : end] = batch['recent'][k]
self.handles['msg'][self.start : end] = [json.dumps(msg) for msg in batch['recent']['msg']]
self.start = end
def close(self):
self.hdf5.close()
class LatentUncertaintyValidator:
def __init__(self, models, data_provider):
self.um = models['uncertainty_model']
self.wm = models['world_model']
self.targets = {
'act_pred' : self.wm.act_pred,
'fut_loss' : self.wm.fut_loss, 'act_loss' : self.wm.act_loss, 'um_loss' : self.um.uncertainty_loss,
'estimated_world_loss' : self.um.estimated_world_loss, 'loss_per_example' : self.um.true_loss,
'act_loss_per_example' : self.wm.act_loss_per_example
}
self.dp = data_provider
def run(self, sess):
batch = self.dp.dequeue_batch()
feed_dict = {
self.wm.states : batch['depths1'],
self.wm.action : batch['action'],
self.wm.action_post : batch['action_post'],
self.wm.obj_there : batch['obj_there']
}
res = sess.run(self.targets, feed_dict = feed_dict)
res['batch'] = {}
for desc, val in batch.iteritems():
print(desc)
if desc == 'obj_there':
res['batch'][desc] = val
elif desc != 'recent':
res['batch'][desc] = val[:, -1]
res['recent'] = batch['recent']
class ObjectThereValidater:
def __init__(self, models, data_provider):
self.um = models['uncertainty_model']
self.wm = models['world_model']
self.targets = {'um_loss' : self.um.uncertainty_loss, 'loss_per_example' : self.um.true_loss,
'estimated_world_loss' : self.um.estimated_world_loss}
self.dp = data_provider
def run(self, sess):
batch = self.dp.dequeue_batch()
feed_dict = {
self.wm.states : batch['depths1'],
self.wm.action : batch['action'],
self.wm.obj_there : batch['obj_there']
}
return sess.run(self.targets, feed_dict = feed_dict)
class ActionUncertaintyValidator:
def __init__(self, models, data_provider):
self.um = um = models['uncertainty_model']
self.wm = wm = models['world_model']
self.targets = {'act_pred' : self.wm.act_pred, 'act_loss' : self.wm.act_loss,
'estimated_world_loss' : self.um.estimated_world_loss,
'um_loss' : self.um.uncertainty_loss, 'loss_per_example' : self.um.true_loss}
self.dp = data_provider
def run(self, sess):
batch = self.dp.dequeue_batch()
feed_dict = {
self.wm.states : batch['depths1'],
self.wm.action : batch['action'],
self.wm.action_post : batch['action_post']
}
res = sess.run(self.targets, feed_dict = feed_dict)
res['batch'] = batch
return res
class ActionUncertaintyValidatorWithReadouts:
def __init__(self, model, data_provider):
self.dp = data_provider
self.wm = model['world_model']
self.um = model['uncertainty_model']
self.targets = {}
self.targets.update({k : v for k, v in self.wm.readouts.items() if k not in self.wm.save_to_gfs})
self.targets.update({k : v for k, v in self.um.readouts.items() if k not in self.um.save_to_gfs})
#this should be changed for an online data provider, set to do nothing
self.map_draw_mode = 'specified_indices'
#relies on there being just one obs type
self.state_desc = data_provider.data_lengths['obs'].keys()[0]
self.insert_objthere = False if data_provider.num_objthere is None else True
def run(self, sess):
batch = self.dp.dequeue_batch()
feed_dict = {
self.wm.states : batch[self.state_desc],
self.wm.action : batch['action'],
self.wm.action_post : batch ['action_post']
}
if self.insert_objthere:
feed_dict[self.wm.obj_there_via_msg] = batch['obj_there']
res = sess.run(self.targets, feed_dict = feed_dict)
#TODO case it for online
res['recent'] = {}
#if self.map_draw_mode == 'specified_indices':
# map_draw_res = []
# for idx in self.map_draw_example_indices:
# obs_for_actor = [batch[self.state_desc][idx][t] for t in self.map_draw_timestep_indices]
# action_samples = self.action_sampler.sample_actions()
# action, entropy, estimated_world_loss = self.um.act(sess, action_samples, obs_for_actor)
# to_add = {'example_id' : idx, 'action_sample' : action, 'estimated_world_loss' : estimated_world_loss,
# 'action_samples' : action_samples, 'depths1' : batch[self.state_desc][idx],
# 'action' : batch['action'][idx], 'action_post' : batch['action_post'][idx]}
# map_draw_res.append(to_add)
#res['map_draw'] = map_draw_res
return res
class ObjectThereUpdater:
def __init__(self, world_model, uncertainty_model, data_provider, optimizer_params, learning_rate_params, postprocessor, updater_params):
self.data_provider = data_provider
self.wm = world_model
self.um = uncertainty_model
self.postprocessor = postprocessor
self.global_step = tf.get_variable('global_step', [], tf.int32, initializer = tf.constant_initializer(0,dtype = tf.int32))
self.um_lr_params, um_lr = get_learning_rate(self.global_step, ** learning_rate_params['uncertainty_model'])
um_opt_params, um_opt = get_optimizer(um_lr, self.um.uncertainty_loss, self.global_step, optimizer_params['uncertainty_model'], var_list = self.um.var_list)
self.targets = {'um_loss' : self.um.uncertainty_loss, 'um_lr' : um_lr, 'um_optimizer' : um_opt,
'global_step' : self.global_step, 'loss_per_example' : self.um.true_loss,
'estimated_world_loss' : self.um.estimated_world_loss
}
self.state_desc = updater_params['state_desc']
def update(self, sess, visualize = False):
batch = self.data_provider.dequeue_batch()
state_desc = self.state_desc
feed_dict = {
self.wm.states : batch[state_desc],
self.wm.action : batch['action'],
self.wm.obj_there : batch['obj_there']
}
res = sess.run(self.targets, feed_dict = feed_dict)
res = self.postprocessor.postprocess(res, batch)
return res
class SquareForceMagUpdater:
def __init__(self, models, data_provider, optimizer_params, learning_rate_params, postprocessor, updater_params):
self.dp = data_provider
self.wm = models['world_model']
self.um = models['uncertainty_model']
self.postprocessor = postprocessor
self.global_step = tf.get_variable('global_step', [], tf.int32, initializer = tf.constant_initializer(0,dtype = tf.int32))
self.um_lr_params, um_lr = get_learning_rate(self.global_step, ** learning_rate_params['uncertainty_model'])
um_opt_params, um_opt = get_optimizer(um_lr, self.um.uncertainty_loss, self.global_step, optimizer_params['uncertainty_model'], var_list = self.um.var_list)
self.targets = {'um_loss' : self.um.uncertainty_loss, 'um_lr' : um_lr, 'um_optimizer' : um_opt,
'global_step' : self.global_step, 'loss_per_example' : self.um.true_loss,
'estimated_world_loss' : self.um.estimated_world_loss
}
if self.um.exactly_whats_needed:
self.targets['oh_my_god'] = self.um.oh_my_god
self.state_desc = updater_params['state_desc']
def update(self, sess, visualize = False):
batch = self.dp.dequeue_batch()
state_desc = self.state_desc
feed_dict = {
self.wm.states : batch[state_desc],
self.wm.action : batch['action'],
self.wm.action_post : batch['action_post']
}
if self.um.insert_obj_there:
print('adding obj_there to feed dict')
feed_dict[self.um.obj_there] = batch['obj_there']
res = sess.run(self.targets, feed_dict = feed_dict)
res = self.postprocessor.postprocess(res, batch)
return res
class DebuggingForceMagUpdater:
def __init__(self, models, data_provider, optimizer_params, learning_rate_params, postprocessor, updater_params):
self.dp = data_provider
self.wm = models['world_model']
self.um = models['uncertainty_model']
self.postprocessor = postprocessor
self.global_step = tf.get_variable('global_step', [], tf.int32, initializer = tf.constant_initializer(0, dtype = tf.int32))
print(learning_rate_params.keys())
um_lr_params, um_lr = get_learning_rate(self.global_step, **learning_rate_params['uncertainty_model'])
um_opt_params, um_opt = get_optimizer(um_lr, self.um.uncertainty_loss, self.global_step, optimizer_params['uncertainty_model'])
self.targets = {'um_loss' : self.um.uncertainty_loss, 'um_optimizer' : um_opt, 'global_step' : self.global_step,
'loss_per_example' : self.um.true_loss, 'estimated_world_loss' : self.um.estimated_world_loss, 'ans' : self.um.ans,
'oh_my_god' : self.um.oh_my_god, 'model_parameters' : self.um.var_list}
def update(self, sess):
batch = self.dp.dequeue_batch()
feed_dict = {
self.wm.action : batch['action'],
self.wm.action_post : batch['action_post'],
self.um.obj_there : batch['obj_there']
}
res = sess.run(self.targets, feed_dict = feed_dict)
res = self.postprocessor.postprocess(res, batch)
return res
class LatentFreezeUpdater:
def __init__(self, models, data_provider, optimizer_params, learning_rate_params, postprocessor, updater_params):
self.data_provider = data_provider\
if isinstance(data_provider, list) else [data_provider]
self.wm = models['world_model']
self.um = models['uncertainty_model']
freeze_wm = updater_params['freeze_wm']
freeze_um = updater_params['freeze_um']
self.postprocessor = postprocessor
self.global_step = tf.get_variable('global_step', [], tf.int32, initializer = tf.constant_initializer(0,dtype = tf.int32))
self.act_step = tf.get_variable('act_step', [], tf.int32, initializer = tf.constant_initializer(0,dtype = tf.int32))
self.fut_step = tf.get_variable('fut_step', [], tf.int32, initializer = tf.constant_initializer(0,dtype = tf.int32))
self.um_step = tf.get_variable('ext_uncertainty_step', [], tf.int32, initializer = tf.constant_initializer(0,dtype = tf.int32))
self.targets = {}
self.state_desc = updater_params.get('state_desc', 'depths1')
if not freeze_wm:
act_lr_params, act_lr = get_learning_rate(self.act_step, **learning_rate_params['world_model']['act_model'])
fut_lr_params, fut_lr = get_learning_rate(self.fut_step, **learning_rate_params['world_model']['fut_model'])
act_opt_params, act_opt = get_optimizer(act_lr, self.wm.act_loss, self.act_step, optimizer_params['world_model']['act_model'], var_list = self.wm.act_var_list + self.wm.encode_var_list)
fut_opt_params, fut_opt = get_optimizer(fut_lr, self.wm.fut_loss, self.fut_step, optimizer_params['world_model']['fut_model'], var_list = self.wm.fut_var_list)
self.targets['act_opt'] = act_opt
self.targets['fut_opt'] = fut_opt
self.targets['act_lr'] = act_lr
self.targets['fut_lr'] = fut_lr
if not freeze_um:
um_lr_params, um_lr = get_learning_rate(self.um_step, **learning_rate_params['uncertainty_model'])
um_opt_params, um_opt = get_optimizer(um_lr, self.um.uncertainty_loss, self.um_step, optimizer_params['uncertainty_model'], var_list = self.um.var_list)
self.targets['um_opt'] = um_opt
self.targets['um_lr'] = um_lr
self.targets['global_step'] = self.global_step
global_increment = tf.assign_add(self.global_step, 1)
um_increment = tf.assign_add(self.um.step, 1)
self.targets.update({'global_increment' : global_increment, 'um_increment' : um_increment})
self.targets.update(self.wm.readouts)
self.targets.update(self.um.readouts)
assert set(self.wm.readouts.keys()) != set(self.um.readouts.keys())
def update(self, sess, visualize = False):
if self.um.just_random:
print('Selecting action at random')
batch = {}
for i, dp in enumerate(self.data_provider):
provider_batch = dp.dequeue_batch()
for k in provider_batch:
if k in batch:
batch[k].append(provider_batch[k])
else:
batch[k] = [provider_batch[k]]
for k in ['action', 'action_post', self.state_desc]:
batch[k] = np.concatenate(batch[k], axis=0)
feed_dict = {
self.wm.states : batch[self.state_desc],
self.wm.action : batch['action'],
self.wm.action_post : batch['action_post']
}
res = sess.run(self.targets, feed_dict = feed_dict)
res.pop('um_increment')
res.pop('global_increment')
global_step = res['global_step']
#if self.map_draw_mode is not None and global_step % self.map_draw_freq == 0:
# if self.map_draw_mode == 'specified_indices':
# map_draw_res = []
# for idx in self.map_draw_example_indices:
# obs_for_actor = [batch[self.state_desc][idx][t] for t in self.map_draw_timestep_indices]
# action_samples = self.action_sampler.sample_actions()
# action, entropy, estimated_world_loss = self.um.act(sess, action_samples, obs_for_actor)
# to_add = {'example_id' : idx, 'action_sample' : action, 'estimated_world_loss' : estimated_world_loss,
# 'action_samples' : action_samples, 'depths1' : batch[self.state_desc][idx],
# 'action' : batch['action'][idx], 'action_post' : batch['action_post'][idx]}
# map_draw_res.append(to_add)
# res['map_draw'] = map_draw_res
res = self.postprocessor.postprocess(res, batch)
return res, global_step
class FreezeUpdater:
def __init__(self, models, data_provider, optimizer_params, learning_rate_params, postprocessor, updater_params):
self.data_provider = data_provider \
if isinstance(data_provider, list) else [data_provider]
self.wm = models['world_model']
self.um = models['uncertainty_model']
freeze_wm = updater_params['freeze_wm']
freeze_um = updater_params['freeze_um']
self.postprocessor = postprocessor
self.global_step = tf.get_variable('global_step', [], tf.int32, initializer = tf.constant_initializer(0,dtype = tf.int32))
self.act_lr_params, act_lr = get_learning_rate(self.global_step, ** learning_rate_params['world_model']['act_model'])
self.um_lr_params, um_lr = get_learning_rate(self.global_step, ** learning_rate_params['uncertainty_model'])
num_not_frozen = 0
self.targets = {}
self.state_desc = updater_params.get('state_desc', 'depths1')
if not freeze_wm:
num_not_frozen += 1
act_opt_params, act_opt = get_optimizer(act_lr, self.wm.act_loss, self.global_step, optimizer_params['world_model']['act_model'], var_list = self.wm.act_var_list + self.wm.encode_var_list)
self.targets['act_opt'] = act_opt
if not freeze_um:
num_not_frozen += 1
um_opt_params, um_opt = get_optimizer(um_lr, self.um.uncertainty_loss, self.global_step, optimizer_params['uncertainty_model'], var_list = self.um.var_list)
self.targets['um_opt'] = um_opt
if num_not_frozen == 0:
self.targets['global_step'] = self.global_step
self.targets['increment'] = tf.assign_add(self.global_step, 1)
else:
self.global_step = self.global_step / num_not_frozen
self.targets['global_step'] = self.global_step
self.targets.update({'act_lr' : act_lr, 'um_lr' : um_lr})
assert set(self.wm.readouts.keys()) != set(self.um.readouts.keys())
self.targets.update(self.wm.readouts)
self.targets.update(self.um.readouts)
um_increment = tf.assign_add(self.um.step, 1)
assert 'um_increment' not in self.targets
self.targets['um_increment'] = um_increment
self.obj_there_supervision = updater_params.get('include_obj_there', False)
#self.map_draw_mode = None
#Map drawing. Meant to have options, but for now just assuming one sort of specification
#self.state_desc = updater_params.get('state_desc', 'depths1')
#self.map_draw_mode = updater_params['map_draw_mode']
#this specification specifices batch example indices for which we do a forward pass.
#need to do one forward pass each index because action sampling is the 'batch.'
#self.action_sampler = action_sampler
#assert self.map_draw_mode == 'specified_indices' and self.action_sampler is not None, (self.map_draw_mode, action_sampler)
#self.map_draw_example_indices = updater_params['map_draw_example_indices']
#self.map_draw_timestep_indices = updater_params['map_draw_timestep_indices']
#self.map_draw_freq = updater_params['map_draw_freq']
def update(self, sess, visualize = False):
if self.um.just_random:
print('Selecting action at random')
batch = {}
for i, dp in enumerate(self.data_provider):
provider_batch = dp.dequeue_batch()
for k in provider_batch:
if k in batch:
batch[k].append(provider_batch[k])
else:
batch[k] = [provider_batch[k]]
for k in ['action', 'action_post', self.state_desc]:
batch[k] = np.concatenate(batch[k], axis=0)
feed_dict = {
self.wm.states : batch[self.state_desc],
self.wm.action : batch['action'],
self.wm.action_post : batch['action_post']
}
if self.obj_there_supervision:
batch['obj_there'] = np.concatenate(batch['obj_there'], axis = 0)
feed_dict[self.wm.obj_there_via_msg] = batch['obj_there']
print('state desc! ' + self.state_desc)
res = sess.run(self.targets, feed_dict = feed_dict)
res.pop('um_increment')
global_step = res['global_step']
#if self.map_draw_mode is not None and global_step % self.map_draw_freq == 0:
# if self.map_draw_mode == 'specified_indices':
# map_draw_res = []
# for idx in self.map_draw_example_indices:
# obs_for_actor = [batch[self.state_desc][idx][t] for t in self.map_draw_timestep_indices]
# action_samples = self.action_sampler.sample_actions()
# action, entropy, estimated_world_loss = self.um.act(sess, action_samples, obs_for_actor)
# to_add = {'example_id' : idx, 'action_sample' : action, 'estimated_world_loss' : estimated_world_loss,
# 'action_samples' : action_samples, 'depths1' : batch[self.state_desc][idx],
# 'action' : batch['action'][idx], 'action_post' : batch['action_post'][idx]}
# map_draw_res.append(to_add)
# res['map_draw'] = map_draw_res
res = self.postprocessor.postprocess(res, batch)
return res, global_step
class JustUncertaintyUpdater:
def __init__(self, models, data_provider, optimizer_params, learning_rate_params, postprocessor, updater_params, action_sampler = None):
self.data_provider = data_provider \
if isinstance(data_provider, list) else [data_provider]
self.wm = models['world_model']
self.um = models['uncertainty_model']
self.postprocessor = postprocessor
self.global_step = tf.get_variable('global_step', [], tf.int32, initializer = tf.constant_initializer(0,dtype = tf.int32))
self.um_lr_params, um_lr = get_learning_rate(self.global_step, ** learning_rate_params['uncertainty_model'])
um_opt_params, um_opt = get_optimizer(um_lr, self.um.uncertainty_loss, self.global_step, optimizer_params['uncertainty_model'], var_list = self.um.var_list)
self.targets = {'global_step' : self.global_step, 'um_optimizer' : um_opt}
assert set(self.wm.readouts.keys()) != set(self.um.readouts.keys())
self.targets.update(self.wm.readouts)
self.targets.update(self.um.readouts)
#self.targets = {
# 'fut_pred' : self.wm.fut_pred, 'act_pred' : self.wm.act_pred,
# 'fut_loss' : self.wm.fut_loss, 'act_loss' : self.wm.act_loss,
# 'estimated_world_loss' : self.um.estimated_world_loss,
# ''
# }
#self.targets.update({'um_loss' : self.um.uncertainty_loss, 'um_lr' : um_lr, 'um_optimizer' : um_opt,
# 'global_step' : self.global_step, 'loss_per_example' : self.um.true_loss})
self.map_draw_mode = None
#Map drawing. Meant to have options, but for now just assuming one sort of specification
self.state_desc = updater_params.get('state_desc', 'depths1')
self.map_draw_mode = updater_params['map_draw_mode']
#this specification specifices batch example indices for which we do a forward pass.
#need to do one forward pass each index because action sampling is the 'batch.'
self.action_sampler = action_sampler
assert self.map_draw_mode == 'specified_indices' and self.action_sampler is not None, (self.map_draw_mode, action_sampler)
self.map_draw_example_indices = updater_params['map_draw_example_indices']
self.map_draw_timestep_indices = updater_params['map_draw_timestep_indices']
self.map_draw_freq = updater_params['map_draw_freq']
def update(self, sess, visualize = False):
batch = {}
for i, dp in enumerate(self.data_provider):
provider_batch = dp.dequeue_batch()
for k in provider_batch:
if k in batch:
batch[k].append(provider_batch[k])
else:
batch[k] = [provider_batch[k]]
for k in ['action', 'action_post', 'depths1']:
batch[k] = np.concatenate(batch[k], axis=0)
feed_dict = {
self.wm.states : batch[self.state_desc],
self.wm.action : batch['action'],
self.wm.action_post : batch['action_post']
}
self.targets['global_step'] = self.global_step
res = sess.run(self.targets, feed_dict = feed_dict)
global_step = res['global_step']
if self.map_draw_mode is not None and global_step % self.map_draw_freq == 0:
if self.map_draw_mode == 'specified_indices':
map_draw_res = []
for idx in self.map_draw_example_indices:
obs_for_actor = [batch[self.state_desc][idx][t] for t in self.map_draw_timestep_indices]
action_samples = self.action_sampler.sample_actions()
action, entropy, estimated_world_loss = self.um.act(sess, action_samples, obs_for_actor)
to_add = {'example_id' : idx, 'action_sample' : action, 'estimated_world_loss' : estimated_world_loss,
'action_samples' : action_samples, 'depths1' : batch[self.state_desc][idx],
'action' : batch['action'][idx], 'action_post' : batch['action_post'][idx]}
map_draw_res.append(to_add)
res['map_draw'] = map_draw_res
res = self.postprocessor.postprocess(res, batch)
return res, global_step
class ActionUncertaintyUpdater:
def __init__(self, models, data_provider, optimizer_params, learning_rate_params, postprocessor, updater_params):
self.data_provider = data_provider \
if isinstance(data_provider, list) else [data_provider]
self.wm = models['world_model']
self.um = models['uncertainty_model']
self.postprocessor = postprocessor
self.global_step = tf.get_variable('global_step', [], tf.int32, initializer = tf.constant_initializer(0,dtype = tf.int32))
self.act_lr_params, act_lr = get_learning_rate(self.global_step, ** learning_rate_params['world_model']['act_model'])
self.um_lr_params, um_lr = get_learning_rate(self.global_step, ** learning_rate_params['uncertainty_model'])
act_opt_params, act_opt = get_optimizer(act_lr, self.wm.act_loss, self.global_step, optimizer_params['world_model']['act_model'], var_list = self.wm.act_var_list + self.wm.encode_var_list)
um_opt_params, um_opt = get_optimizer(um_lr, self.um.uncertainty_loss, self.global_step, optimizer_params['uncertainty_model'], var_list = self.um.var_list)
self.global_step = self.global_step / 2
self.targets = {'act_pred' : self.wm.act_pred, 'act_loss' : self.wm.act_loss,
'act_optimizer' : act_opt, 'um_optimizer' : um_opt,
'estimated_world_loss' : self.um.estimated_world_loss,
'um_loss' : self.um.uncertainty_loss, 'loss_per_example' : self.um.true_loss,
'global_step' : self.global_step}
def update(self, sess, visualize = False):
batch = {}
for i, dp in enumerate(self.data_provider):
provider_batch = dp.dequeue_batch()
for k in provider_batch:
if k in batch:
batch[k].append(provider_batch[k])
else:
batch[k] = [provider_batch[k]]
for k in ['action', 'action_post', 'depths1']:
batch[k] = np.concatenate(batch[k], axis=0)
state_desc = 'depths1'
#depths, actions, actions_post, next_depth = postprocess_batch_depth(batch, state_desc)
feed_dict = {
self.wm.states : batch[state_desc],
self.wm.action : batch['action'],
self.wm.action_post : batch['action_post']
}
self.targets['global_step'] = self.global_step
res = sess.run(self.targets, feed_dict = feed_dict)
glstep = res['global_step']
res = self.postprocessor.postprocess(res, batch)
return res, glstep
class LatentUncertaintyUpdater:
def __init__(self, world_model, uncertainty_model, data_provider, optimizer_params, learning_rate_params, postprocessor, updater_params = None):
self.data_provider = data_provider
self.wm = world_model
self.um = uncertainty_model
self.postprocessor = postprocessor
self.global_step = tf.get_variable('global_step', [], tf.int32, initializer = tf.constant_initializer(0,dtype = tf.int32))
self.act_lr_params, act_lr = get_learning_rate(self.global_step, ** learning_rate_params['world_model']['act_model'])
self.fut_lr_params, fut_lr = get_learning_rate(self.global_step, ** learning_rate_params['world_model']['fut_model'])
self.um_lr_params, um_lr = get_learning_rate(self.global_step, ** learning_rate_params['uncertainty_model'])
act_opt_params, act_opt = get_optimizer(act_lr, self.wm.act_loss, self.global_step, optimizer_params['world_model']['act_model'], var_list = self.wm.act_var_list + self.wm.encode_var_list)
fut_opt_params, fut_opt = get_optimizer(fut_lr, self.wm.fut_loss, self.global_step, optimizer_params['world_model']['fut_model'], var_list = self.wm.fut_var_list)
um_opt_params, um_opt = get_optimizer(um_lr, self.um.uncertainty_loss, self.global_step, optimizer_params['uncertainty_model'], var_list = self.um.var_list)
self.global_step = self.global_step / 3
self.targets = {'encoding_i' : self.wm.encoding_i, 'encoding_f' : self.wm.encoding_f,
'fut_pred' : self.wm.fut_pred, 'act_pred' : self.wm.act_pred,
'act_optimizer' : act_opt, 'fut_optimizer' : fut_opt,
'act_lr' : act_lr, 'fut_lr' : fut_lr,
'fut_loss' : self.wm.fut_loss, 'act_loss' : self.wm.act_loss,
'estimated_world_loss' : self.um.estimated_world_loss
}
self.targets.update({'um_loss' : self.um.uncertainty_loss, 'um_lr' : um_lr, 'um_optimizer' : um_opt,
'global_step' : self.global_step, 'loss_per_example' : self.um.true_loss})
self.state_desc = updater_params['state_desc']
#checking that we don't have repeat names
def start(self, sess):
self.data_provider.start_runner(sess)
sess.run(tf.global_variables_initializer())
def update(self, sess, visualize = False):
batch = self.data_provider.dequeue_batch()
state_desc = self.state_desc
#depths, actions, actions_post, next_depth = postprocess_batch_depth(batch, state_desc)
feed_dict = {
self.wm.states : batch[state_desc],
self.wm.action : batch['action'],
self.wm.action_post : batch['action_post']
}
res = sess.run(self.targets, feed_dict = feed_dict)
res = self.postprocessor.postprocess(res, batch)
return res
class UncertaintyUpdater:
def __init__(self, world_model, uncertainty_model, data_provider, optimizer_params, learning_rate_params, postprocessor):
self.data_provider = data_provider
self.world_model = world_model
self.um = uncertainty_model
self.global_step = tf.get_variable('global_step', [], tf.int32, initializer = tf.constant_initializer(0,dtype = tf.int32))
self.wm_lr_params, wm_learning_rate = get_learning_rate(self.global_step, ** learning_rate_params['world_model'])
self.wm_opt_params, wm_opt = get_optimizer(wm_learning_rate, self.world_model.loss, self.global_step, optimizer_params['world_model'])
self.world_model_targets = {'given' : self.world_model.processed_input, 'loss' : self.world_model.loss, 'loss_per_example' : self.world_model.loss_per_example, 'learning_rate' : wm_learning_rate, 'optimizer' : wm_opt, 'prediction' : self.world_model.pred, 'tv' : self.world_model.tv}
self.inc_step = self.global_step.assign_add(1)
self.um_lr_params, um_learning_rate = get_learning_rate(self.global_step, **learning_rate_params['uncertainty_model'])
self.um_lr_params, um_opt = get_optimizer(um_learning_rate, self.um.uncertainty_loss, self.global_step, optimizer_params['uncertainty_model'])
self.global_step = self.global_step / 2
self.um_targets = {'loss' : self.um.uncertainty_loss, 'learning_rate' : um_learning_rate, 'optimizer' : um_opt, 'global_step' : self.global_step}
self.postprocessor = postprocessor
self.world_action_time = self.world_model.action.get_shape().as_list()[1]
def start(self, sess):
self.data_provider.start_runner(sess)
sess.run(tf.global_variables_initializer())
def update(self, sess, visualize = False):
batch = self.data_provider.dequeue_batch()
state_desc = self.um.state_descriptor
wm_feed_dict = {
self.world_model.states : batch[state_desc],
self.world_model.action : batch['action'][:, -self.world_action_time : ]
}
world_model_res = sess.run(self.world_model_targets, feed_dict = wm_feed_dict)
um_feed_dict = {
self.um.s_i : batch[state_desc][:, :-1],
self.um.action_sample : batch['action'][:, -1],
self.um.true_loss : world_model_res['loss_per_example']
}
um_res = sess.run(self.um_targets, feed_dict = um_feed_dict)
wm_res_new = dict(('wm_' + k, v) for k, v in world_model_res.iteritems())
um_res_new = dict(('um_' + k, v) for k, v in um_res.iteritems())
wm_res_new.update(um_res_new)
res = wm_res_new
res['global_step'] = res.pop('um_global_step')
res = self.postprocessor.postprocess(wm_res_new, batch)
return res
class DamianWMUncertaintyUpdater:
def __init__(self, world_model, uncertainty_model, data_provider, optimizer_params, learning_rate_params, postprocessor):
self.data_provider = data_provider
self.world_model = world_model
self.um = uncertainty_model
self.global_step = tf.get_variable('global_step', [], tf.int32, initializer = tf.constant_initializer(0,dtype = tf.int32))
self.wm_lr_params, wm_learning_rate = get_learning_rate(self.global_step, ** learning_rate_params['world_model'])
self.wm_opt_params, wm_opt = get_optimizer(wm_learning_rate, self.world_model.loss, self.global_step, optimizer_params['world_model'])
self.world_model_targets = {'given' : self.world_model.processed_input, 'loss' : self.world_model.loss, 'learning_rate' : wm_learning_rate, 'optimizer' : wm_opt, 'prediction' : self.world_model.pred, 'tv' : self.world_model.tv}
self.inc_step = self.global_step.assign_add(1)
self.wm_lr_params, um_learning_rate = get_learning_rate(self.global_step, **learning_rate_params['uncertainty_model'])
self.wm_lr_params, um_opt = get_optimizer(um_learning_rate, self.um.uncertainty_loss, self.global_step, optimizer_params['uncertainty_model'])
self.um_targets = {'loss' : self.um.uncertainty_loss, 'learning_rate' : um_learning_rate, 'optimizer' : um_opt, 'global_step' : self.global_step}
self.postprocessor = postprocessor
def start(self, sess):
self.data_provider.start_runner(sess)
sess.run(tf.global_variables_initializer())
def update(self, sess, visualize = False):
batch = self.data_provider.dequeue_batch()
depths, objects, actions, action_ids, next_depth = postprocess_batch_for_actionmap(batch)
wm_feed_dict = {
self.world_model.s_i : depths,
self.world_model.s_f : next_depth,
self.world_model.action : actions,
self.world_model.action_id : action_ids,
self.world_model.objects : objects
}
world_model_res = sess.run(self.world_model_targets, feed_dict = wm_feed_dict)
if visualize:
cv2.imshow('pred', world_model_res['prediction'][0] / 4.)#TODO clean up w colors
cv2.imshow('tv', world_model_res['tv'][0] / 4.)
cv2.imshow('processed0', world_model_res['given'][0, 0] / 4.)
cv2.imshow('processed1', world_model_res['given'][0, 1] / 4.)
cv2.waitKey(1)
print('wm loss: ' + str(world_model_res['loss']))
um_feed_dict = {
self.um.s_i : depths,
self.um.action_sample : actions[:, -1],
self.um.true_loss : np.array([world_model_res['loss']])
}
um_res = sess.run(self.um_targets, feed_dict = um_feed_dict)
wm_res_new = dict(('wm_' + k, v) for k, v in world_model_res.iteritems())
um_res_new = dict(('um_' + k, v) for k, v in um_res.iteritems())
wm_res_new.update(um_res_new)
res['global_step'] = res.pop('um_global_step')
res = self.postprocessor.postprocess(wm_res_new, batch)
return res
|
nilq/baby-python
|
python
|
'''
Given an array of integers, there is a sliding window of size k which is moving from the left side of the array to the right, one element at a time. You can only interact with the k numbers in the window. Return an array consisting of the maximum value of each window of elements.
'''
def sliding_window_max(arr, k):
output = []
# loop from k-1 til len(arr) - (k - 1)
for i in range(len(arr)):
if i + (k - 1) == len(arr):
return output
# compare values in windows size
highest = arr[i]
for j in range(1, k):
if arr[i+j] > highest:
highest = arr[i+j]
output.append(highest)
|
nilq/baby-python
|
python
|
# terrascript/provider/chanzuckerberg/snowflake.py
# Automatically generated by tools/makecode.py (24-Sep-2021 15:27:17 UTC)
import terrascript
class snowflake(terrascript.Provider):
"""Terraform provider for managing Snowflake accounts"""
__description__ = "Terraform provider for managing Snowflake accounts"
__namespace__ = "chanzuckerberg"
__name__ = "snowflake"
__source__ = "https://github.com/chanzuckerberg/terraform-provider-snowflake"
__version__ = "0.25.19"
__published__ = "2021-09-10T23:25:20Z"
__tier__ = "community"
__all__ = ["snowflake"]
|
nilq/baby-python
|
python
|
def move_tower(height, from_pole, middle_pole, to_pole):
if height >= 1:
move_tower(height-1, from_pole, to_pole, middle_pole)
print "move disk from {} to {}".format(from_pole, to_pole)
move_tower(height-1, middle_pole, from_pole, to_pole)
|
nilq/baby-python
|
python
|
from getratings.models.ratings import Ratings
class NA_Karthus_Mid_Aatrox(Ratings):
pass
class NA_Karthus_Mid_Ahri(Ratings):
pass
class NA_Karthus_Mid_Akali(Ratings):
pass
class NA_Karthus_Mid_Alistar(Ratings):
pass
class NA_Karthus_Mid_Amumu(Ratings):
pass
class NA_Karthus_Mid_Anivia(Ratings):
pass
class NA_Karthus_Mid_Annie(Ratings):
pass
class NA_Karthus_Mid_Ashe(Ratings):
pass
class NA_Karthus_Mid_AurelionSol(Ratings):
pass
class NA_Karthus_Mid_Azir(Ratings):
pass
class NA_Karthus_Mid_Bard(Ratings):
pass
class NA_Karthus_Mid_Blitzcrank(Ratings):
pass
class NA_Karthus_Mid_Brand(Ratings):
pass
class NA_Karthus_Mid_Braum(Ratings):
pass
class NA_Karthus_Mid_Caitlyn(Ratings):
pass
class NA_Karthus_Mid_Camille(Ratings):
pass
class NA_Karthus_Mid_Cassiopeia(Ratings):
pass
class NA_Karthus_Mid_Chogath(Ratings):
pass
class NA_Karthus_Mid_Corki(Ratings):
pass
class NA_Karthus_Mid_Darius(Ratings):
pass
class NA_Karthus_Mid_Diana(Ratings):
pass
class NA_Karthus_Mid_Draven(Ratings):
pass
class NA_Karthus_Mid_DrMundo(Ratings):
pass
class NA_Karthus_Mid_Ekko(Ratings):
pass
class NA_Karthus_Mid_Elise(Ratings):
pass
class NA_Karthus_Mid_Evelynn(Ratings):
pass
class NA_Karthus_Mid_Ezreal(Ratings):
pass
class NA_Karthus_Mid_Fiddlesticks(Ratings):
pass
class NA_Karthus_Mid_Fiora(Ratings):
pass
class NA_Karthus_Mid_Fizz(Ratings):
pass
class NA_Karthus_Mid_Galio(Ratings):
pass
class NA_Karthus_Mid_Gangplank(Ratings):
pass
class NA_Karthus_Mid_Garen(Ratings):
pass
class NA_Karthus_Mid_Gnar(Ratings):
pass
class NA_Karthus_Mid_Gragas(Ratings):
pass
class NA_Karthus_Mid_Graves(Ratings):
pass
class NA_Karthus_Mid_Hecarim(Ratings):
pass
class NA_Karthus_Mid_Heimerdinger(Ratings):
pass
class NA_Karthus_Mid_Illaoi(Ratings):
pass
class NA_Karthus_Mid_Irelia(Ratings):
pass
class NA_Karthus_Mid_Ivern(Ratings):
pass
class NA_Karthus_Mid_Janna(Ratings):
pass
class NA_Karthus_Mid_JarvanIV(Ratings):
pass
class NA_Karthus_Mid_Jax(Ratings):
pass
class NA_Karthus_Mid_Jayce(Ratings):
pass
class NA_Karthus_Mid_Jhin(Ratings):
pass
class NA_Karthus_Mid_Jinx(Ratings):
pass
class NA_Karthus_Mid_Kalista(Ratings):
pass
class NA_Karthus_Mid_Karma(Ratings):
pass
class NA_Karthus_Mid_Karthus(Ratings):
pass
class NA_Karthus_Mid_Kassadin(Ratings):
pass
class NA_Karthus_Mid_Katarina(Ratings):
pass
class NA_Karthus_Mid_Kayle(Ratings):
pass
class NA_Karthus_Mid_Kayn(Ratings):
pass
class NA_Karthus_Mid_Kennen(Ratings):
pass
class NA_Karthus_Mid_Khazix(Ratings):
pass
class NA_Karthus_Mid_Kindred(Ratings):
pass
class NA_Karthus_Mid_Kled(Ratings):
pass
class NA_Karthus_Mid_KogMaw(Ratings):
pass
class NA_Karthus_Mid_Leblanc(Ratings):
pass
class NA_Karthus_Mid_LeeSin(Ratings):
pass
class NA_Karthus_Mid_Leona(Ratings):
pass
class NA_Karthus_Mid_Lissandra(Ratings):
pass
class NA_Karthus_Mid_Lucian(Ratings):
pass
class NA_Karthus_Mid_Lulu(Ratings):
pass
class NA_Karthus_Mid_Lux(Ratings):
pass
class NA_Karthus_Mid_Malphite(Ratings):
pass
class NA_Karthus_Mid_Malzahar(Ratings):
pass
class NA_Karthus_Mid_Maokai(Ratings):
pass
class NA_Karthus_Mid_MasterYi(Ratings):
pass
class NA_Karthus_Mid_MissFortune(Ratings):
pass
class NA_Karthus_Mid_MonkeyKing(Ratings):
pass
class NA_Karthus_Mid_Mordekaiser(Ratings):
pass
class NA_Karthus_Mid_Morgana(Ratings):
pass
class NA_Karthus_Mid_Nami(Ratings):
pass
class NA_Karthus_Mid_Nasus(Ratings):
pass
class NA_Karthus_Mid_Nautilus(Ratings):
pass
class NA_Karthus_Mid_Nidalee(Ratings):
pass
class NA_Karthus_Mid_Nocturne(Ratings):
pass
class NA_Karthus_Mid_Nunu(Ratings):
pass
class NA_Karthus_Mid_Olaf(Ratings):
pass
class NA_Karthus_Mid_Orianna(Ratings):
pass
class NA_Karthus_Mid_Ornn(Ratings):
pass
class NA_Karthus_Mid_Pantheon(Ratings):
pass
class NA_Karthus_Mid_Poppy(Ratings):
pass
class NA_Karthus_Mid_Quinn(Ratings):
pass
class NA_Karthus_Mid_Rakan(Ratings):
pass
class NA_Karthus_Mid_Rammus(Ratings):
pass
class NA_Karthus_Mid_RekSai(Ratings):
pass
class NA_Karthus_Mid_Renekton(Ratings):
pass
class NA_Karthus_Mid_Rengar(Ratings):
pass
class NA_Karthus_Mid_Riven(Ratings):
pass
class NA_Karthus_Mid_Rumble(Ratings):
pass
class NA_Karthus_Mid_Ryze(Ratings):
pass
class NA_Karthus_Mid_Sejuani(Ratings):
pass
class NA_Karthus_Mid_Shaco(Ratings):
pass
class NA_Karthus_Mid_Shen(Ratings):
pass
class NA_Karthus_Mid_Shyvana(Ratings):
pass
class NA_Karthus_Mid_Singed(Ratings):
pass
class NA_Karthus_Mid_Sion(Ratings):
pass
class NA_Karthus_Mid_Sivir(Ratings):
pass
class NA_Karthus_Mid_Skarner(Ratings):
pass
class NA_Karthus_Mid_Sona(Ratings):
pass
class NA_Karthus_Mid_Soraka(Ratings):
pass
class NA_Karthus_Mid_Swain(Ratings):
pass
class NA_Karthus_Mid_Syndra(Ratings):
pass
class NA_Karthus_Mid_TahmKench(Ratings):
pass
class NA_Karthus_Mid_Taliyah(Ratings):
pass
class NA_Karthus_Mid_Talon(Ratings):
pass
class NA_Karthus_Mid_Taric(Ratings):
pass
class NA_Karthus_Mid_Teemo(Ratings):
pass
class NA_Karthus_Mid_Thresh(Ratings):
pass
class NA_Karthus_Mid_Tristana(Ratings):
pass
class NA_Karthus_Mid_Trundle(Ratings):
pass
class NA_Karthus_Mid_Tryndamere(Ratings):
pass
class NA_Karthus_Mid_TwistedFate(Ratings):
pass
class NA_Karthus_Mid_Twitch(Ratings):
pass
class NA_Karthus_Mid_Udyr(Ratings):
pass
class NA_Karthus_Mid_Urgot(Ratings):
pass
class NA_Karthus_Mid_Varus(Ratings):
pass
class NA_Karthus_Mid_Vayne(Ratings):
pass
class NA_Karthus_Mid_Veigar(Ratings):
pass
class NA_Karthus_Mid_Velkoz(Ratings):
pass
class NA_Karthus_Mid_Vi(Ratings):
pass
class NA_Karthus_Mid_Viktor(Ratings):
pass
class NA_Karthus_Mid_Vladimir(Ratings):
pass
class NA_Karthus_Mid_Volibear(Ratings):
pass
class NA_Karthus_Mid_Warwick(Ratings):
pass
class NA_Karthus_Mid_Xayah(Ratings):
pass
class NA_Karthus_Mid_Xerath(Ratings):
pass
class NA_Karthus_Mid_XinZhao(Ratings):
pass
class NA_Karthus_Mid_Yasuo(Ratings):
pass
class NA_Karthus_Mid_Yorick(Ratings):
pass
class NA_Karthus_Mid_Zac(Ratings):
pass
class NA_Karthus_Mid_Zed(Ratings):
pass
class NA_Karthus_Mid_Ziggs(Ratings):
pass
class NA_Karthus_Mid_Zilean(Ratings):
pass
class NA_Karthus_Mid_Zyra(Ratings):
pass
|
nilq/baby-python
|
python
|
# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.24.*, run `git checkout -b 0.24` or switch to the `0.24` branch on GitHub)
import mlflow.sklearn
import numpy as np
class PythonPredictor:
def __init__(self, config, python_client):
self.client = python_client
def load_model(self, model_path):
return mlflow.sklearn.load_model(model_path)
def predict(self, payload, query_params):
model_name = query_params["model"]
model_version = query_params.get("version", "latest")
model = self.client.get_model(model_name, model_version)
model_input = [
payload["cylinders"],
payload["displacement"],
payload["horsepower"],
payload["weight"],
payload["acceleration"],
]
result = model.predict([model_input]).item()
return {"prediction": result, "model": {"name": model_name, "version": model_version}}
|
nilq/baby-python
|
python
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""PyVoiceChanger."""
import sys
from datetime import datetime
from subprocess import call
from time import sleep
from PyQt5.QtCore import QProcess, Qt, QTimer
from PyQt5.QtGui import QColor, QCursor, QIcon
from PyQt5.QtWidgets import (QApplication, QDial, QGraphicsDropShadowEffect,
QGroupBox, QLabel, QMainWindow, QMenu,
QShortcut, QSystemTrayIcon, QVBoxLayout)
from anglerfish import (check_encoding, make_logger, make_post_exec_msg,
set_process_name, set_single_instance,
set_desktop_launcher)
__version__ = '1.0.0'
__license__ = ' GPLv3+ LGPLv3+ '
__author__ = ' juancarlos '
__email__ = ' juancarlospaco@gmail.com '
__url__ = 'https://github.com/juancarlospaco/pyvoicechanger#pyvoicechanger'
start_time = datetime.now()
desktop_file_content = """
[Desktop Entry]
Comment=Voice Changer App.
Exec=chrt --idle 0 pyvoicechanger.py
GenericName=Voice Changer App.
Icon=audio-input-microphone
Name=PyVoiceChanger
StartupNotify=true
Terminal=false
Type=Application
Categories=Utility
X-DBUS-ServiceName=pyvoicechanger
X-KDE-StartupNotify=true
"""
###############################################################################
class MainWindow(QMainWindow):
"""Voice Changer main window."""
def __init__(self, parent=None):
super(MainWindow, self).__init__()
self.statusBar().showMessage("Move Dial to Deform Microphone Voice !.")
self.setWindowTitle(__doc__)
self.setMinimumSize(240, 240)
self.setMaximumSize(480, 480)
self.resize(self.minimumSize())
self.setWindowIcon(QIcon.fromTheme("audio-input-microphone"))
self.tray = QSystemTrayIcon(self)
self.center()
QShortcut("Ctrl+q", self, activated=lambda: self.close())
self.menuBar().addMenu("&File").addAction("Quit", lambda: exit())
self.menuBar().addMenu("Sound").addAction(
"STOP !", lambda: call('killall rec', shell=True))
windowMenu = self.menuBar().addMenu("&Window")
windowMenu.addAction("Hide", lambda: self.hide())
windowMenu.addAction("Minimize", lambda: self.showMinimized())
windowMenu.addAction("Maximize", lambda: self.showMaximized())
windowMenu.addAction("Restore", lambda: self.showNormal())
windowMenu.addAction("FullScreen", lambda: self.showFullScreen())
windowMenu.addAction("Center", lambda: self.center())
windowMenu.addAction("Top-Left", lambda: self.move(0, 0))
windowMenu.addAction("To Mouse", lambda: self.move_to_mouse_position())
# widgets
group0 = QGroupBox("Voice Deformation")
self.setCentralWidget(group0)
self.process = QProcess(self)
self.process.error.connect(
lambda: self.statusBar().showMessage("Info: Process Killed", 5000))
self.control = QDial()
self.control.setRange(-10, 20)
self.control.setSingleStep(5)
self.control.setValue(0)
self.control.setCursor(QCursor(Qt.OpenHandCursor))
self.control.sliderPressed.connect(
lambda: self.control.setCursor(QCursor(Qt.ClosedHandCursor)))
self.control.sliderReleased.connect(
lambda: self.control.setCursor(QCursor(Qt.OpenHandCursor)))
self.control.valueChanged.connect(
lambda: self.control.setToolTip("<b>" + str(self.control.value())))
self.control.valueChanged.connect(
lambda: self.statusBar().showMessage(
"Voice deformation: " + str(self.control.value()), 5000))
self.control.valueChanged.connect(self.run)
self.control.valueChanged.connect(lambda: self.process.kill())
# Graphic effect
self.glow = QGraphicsDropShadowEffect(self)
self.glow.setOffset(0)
self.glow.setBlurRadius(99)
self.glow.setColor(QColor(99, 255, 255))
self.control.setGraphicsEffect(self.glow)
self.glow.setEnabled(False)
# Timer to start
self.slider_timer = QTimer(self)
self.slider_timer.setSingleShot(True)
self.slider_timer.timeout.connect(self.on_slider_timer_timeout)
# an icon and set focus
QLabel(self.control).setPixmap(
QIcon.fromTheme("audio-input-microphone").pixmap(32))
self.control.setFocus()
QVBoxLayout(group0).addWidget(self.control)
self.menu = QMenu(__doc__)
self.menu.addAction(__doc__).setDisabled(True)
self.menu.setIcon(self.windowIcon())
self.menu.addSeparator()
self.menu.addAction(
"Show / Hide",
lambda: self.hide() if self.isVisible() else self.showNormal())
self.menu.addAction("STOP !", lambda: call('killall rec', shell=True))
self.menu.addSeparator()
self.menu.addAction("Quit", lambda: exit())
self.tray.setContextMenu(self.menu)
self.make_trayicon()
def run(self):
"""Run/Stop the QTimer."""
if self.slider_timer.isActive():
self.slider_timer.stop()
self.glow.setEnabled(True)
call('killall rec', shell=True)
self.slider_timer.start(3000)
def on_slider_timer_timeout(self):
"""Run subprocess to deform voice."""
self.glow.setEnabled(False)
value = int(self.control.value()) * 100
cmd = 'play -q -V0 "|rec -q -V0 -n -d -R riaa bend pitch {0} "'
command = cmd.format(int(value))
log.debug("Voice Deformation Value: {0}".format(value))
log.debug("Voice Deformation Command: {0}".format(command))
self.process.start(command)
if self.isVisible():
self.statusBar().showMessage("Minimizing to System TrayIcon", 3000)
log.debug("Minimizing Main Window to System TrayIcon now...")
sleep(3)
self.hide()
def center(self):
"""Center Window on the Current Screen,with Multi-Monitor support."""
window_geometry = self.frameGeometry()
mousepointer_position = QApplication.desktop().cursor().pos()
screen = QApplication.desktop().screenNumber(mousepointer_position)
centerPoint = QApplication.desktop().screenGeometry(screen).center()
window_geometry.moveCenter(centerPoint)
self.move(window_geometry.topLeft())
def move_to_mouse_position(self):
"""Center the Window on the Current Mouse position."""
window_geometry = self.frameGeometry()
window_geometry.moveCenter(QApplication.desktop().cursor().pos())
self.move(window_geometry.topLeft())
def make_trayicon(self):
"""Make a Tray Icon."""
if self.windowIcon() and __doc__:
self.tray.setIcon(self.windowIcon())
self.tray.setToolTip(__doc__)
self.tray.activated.connect(
lambda: self.hide() if self.isVisible()
else self.showNormal())
return self.tray.show()
###############################################################################
def main():
"""Main Loop."""
global log
log = make_logger("pyvoicechanger")
log.debug(__doc__ + __version__ + __url__)
check_encoding()
set_process_name("pyvoicechanger")
set_single_instance("pyvoicechanger")
set_desktop_launcher("pyvoicechanger", desktop_file_content)
application = QApplication(sys.argv)
application.setApplicationName("pyvoicechanger")
application.setOrganizationName("pyvoicechanger")
application.setOrganizationDomain("pyvoicechanger")
application.setWindowIcon(QIcon.fromTheme("audio-input-microphone"))
application.aboutToQuit.connect(lambda: call('killall rec', shell=True))
mainwindow = MainWindow()
mainwindow.show()
make_post_exec_msg(start_time)
sys.exit(application.exec_())
if __name__ in '__main__':
main()
|
nilq/baby-python
|
python
|
from setuptools import setup
setup(
name='ctab',
version='0.1',
author='Thomas Hunger',
author_email='tehunger@gmail.com',
packages=[
'ctab',
]
)
|
nilq/baby-python
|
python
|
""" Methods to setup the logging """
import os
import yaml
import platform
import logging
import coloredlogs
import logging.config
from funscript_editor.definitions import WINDOWS_LOG_CONFIG_FILE, LINUX_LOG_CONFIG_FILE
from funscript_editor.utils.config import SETTINGS
def create_log_directories(config: dict) -> None:
""" create all log directories for a log configuration
Args:
config (dict): the logging configuration dictionary
"""
if isinstance(config, dict):
for k in config.keys():
create_log_directories(config[k])
if k == 'filename':
os.makedirs(os.path.dirname(os.path.abspath(config[k])), exist_ok=True)
def get_log_config_path() -> str:
""" Get the log config file path for current platfrom
Returns:
str: the log config file path
"""
return WINDOWS_LOG_CONFIG_FILE if platform.system() == 'Windows' else LINUX_LOG_CONFIG_FILE
class LoggerInterface:
""" Logger interface
Args:
name (str): name of the logger instance
"""
def __init__(self, name):
self.name = name
def debug(self, *args):
pass
def info(self, *args):
pass
def warning(self, *args):
pass
def error(self, *args):
pass
def critical(self, *args, exc_info=None):
pass
class DevZeroLogger(LoggerInterface):
""" Logger replacement to suppresses all log messages
Args:
name (str): name of the logger instance
"""
def __init__(self, name):
self.name = name
def debug(self, *args):
pass
def info(self, *args):
pass
def warning(self, *args):
pass
def error(self, *args):
pass
def critical(self, *args, exc_info=None):
pass
class PythonLogger(LoggerInterface):
""" Python Logger Wrapper
Args:
name (str): name of the logger instance
"""
def __init__(self, name):
self.logger = logging.getLogger(name)
def debug(self, *args):
self.logger.debug(*args)
def info(self, *args):
self.logger.info(*args)
def warning(self, *args):
self.logger.warning(*args)
def error(self, *args):
self.logger.error(*args)
def critical(self, *args, exc_info=None):
self.logger.critical(*args, exc_info=exc_info)
def getLogger(name) -> LoggerInterface:
""" Get logger wrapper for python logging.getLogger
Args:
name (str): name of the logger instance
"""
if platform.system() == 'Windows':
if SETTINGS['logging']:
return PythonLogger(name)
else:
return DevZeroLogger(name)
else:
return PythonLogger(name)
def get_logfiles_paths() -> list:
""" Get the logfiles paths from log config
Returns:
list: all logiles paths
"""
try:
result = []
config_path = get_log_config_path()
with open(config_path, 'rt') as f:
for line in f.readlines():
if "filename:" in line:
result.append(line.split(':')[1].strip())
return result
except:
return []
def setup_logging(
default_level :int = logging.INFO,
env_key :str = 'LOG_CFG') -> None:
""" Logging Setup
Args:
default_level (int): logging level e.g. `logging.INFO` (default is `logging.DEBUG`).
env_key (str, optional): env variable name to load a configuration file via environment variable (default is `LOG_CFG`).
"""
config_path = get_log_config_path()
value = os.getenv(env_key, None)
if value: config_path = value
if os.path.exists(config_path):
with open(config_path, 'rt') as f:
try:
config = yaml.safe_load(f.read())
create_log_directories(config)
logging.config.dictConfig(config)
coloredlogs.install(level=default_level)
logging.debug('Loging setup completed')
except Exception as e:
print(e)
print('Error in Logging Configuration. Using default configs')
logging.basicConfig(level=default_level)
coloredlogs.install(level=default_level)
else:
logging.basicConfig(level=default_level)
coloredlogs.install(level=default_level)
print('Failed to load configuration file. Using default configs')
|
nilq/baby-python
|
python
|
#####################################################
# Read active and reactive power from the atm90e32 then
# store within mongodb.
#
# copyright Margaret Johnson, 2020.
# Please credit when evolving your code with this code.
########################################################
from FHmonitor.error_handling import handle_exception
from FHmonitor.atm90_e32_pi import ATM90e32
from FHmonitor.store import MongoDB
from FHmonitor.calibrate import Calibrate
import threading # for blinking LED.
import board # for blinking LED.
import digitalio # for blinking LED.
import logging
logger = logging.getLogger(__name__)
class Monitor:
"""Take active and reactive power readings
from an atm90e32 and store the readings in
the Rasp Pi's mongodb.
Example::
m = Monitor()
m.init_sensor()
Make sure to read all the parameters that can be input to
:meth:`~FHmonitor.monitor.Monitor.init_sensor`.
The values depend on the Power Transformer and CTs being used.
The :meth:`~FHmonitor.monitor.Monitor.blink` method is useful
to turn on and off the LED (for debugging purposes).
"""
def __init__(self, led_pin=None):
self.db = None
self.energy_sensor = None
if led_pin is None:
led_pin = board.D18 # We always wire to GPIO 18.
self.led = digitalio.DigitalInOut(board.D18)
self.led.direction = digitalio.Direction.OUTPUT
####################################################
# Initialize the energy sensor. The properties are
# are written to atm90e32 registers during initialization.
# They are specific to the Power and Current Transformers
# being used. An exception occurs if the write cannot
# be verified.
####################################################
def init_sensor(self):
"""
Initialize the atm90e32 by setting the calibration registry properties.
Calibration is discussed within our
`FitHome wiki <https://github.com/BitKnitting/FitHome/wiki/ElectricityMonitor#calibration>`_ .
:param lineFreq: 4485 for 60 Hz (North America, Default), 389 for 50 Hz (rest of world)
:param PGAGain: Programmable Gain - 0 for 10A (1x), 21 for 100A (2x, Default), 42 for 100A - 200A (4x)
:param VoltageGain: Dependent on transformer being used. Should be measured prior to taking readings.
See the Calibration discussion linked to above.
:param CurrentGainCT1: Dependent on the CTs being used. Should be measured prior to taking readings.
See the Calibration discussion linked to above.
:param CurrentGainCT2: Similar to CurrentGainCT1, but for the second CT.
:return: True if meter is initialized.
False if meter could not be initialized.
""" # noqa
# Get the calibratiion parameters
c = Calibrate()
try:
self.energy_sensor = ATM90e32(c.lineFreq, c.PGAGain, c.VoltageGain,
c.CurrentGain, 0, c.CurrentGain)
logger.info('Energy meter has been initialized.')
# We have an instance of the atm90e32. Let's check if we get
# sensible readings.
sys0 = self.energy_sensor.sys_status0
if (sys0 == 0xFFFF or sys0 == 0):
e = 'EXCEPTION: Cannot connect to the energy meter.'
handle_exception(e)
logger.info('Energy meter is working.')
return True
except Exception as e:
handle_exception(e)
return False
def open_db(self, mongodb="mongodb://localhost:27017/", db="FitHome",
collection="aggregate"):
"""Opens and maintains an instance to the mongo database where
the power readings will be stored.
:param mongodb: URI to the mongo database running on the Raspberry Pi
:param db: Database within mongodb that holds the readings.
:param collection: name of the collection where the readings are held.
:return: True if the database can be opened.
"""
try:
self.db = MongoDB(mongodb, db, collection)
except Exception as e:
self.db = None
handle_exception(e)
return False
return True
def close_db(self):
"""It is more efficient to keep the mongodb open while
using it. However, if you know you will not be doing
any more transactions, it is good to clean up the
connections.
"""
if self.db is not None:
self.db.close()
####################################################
# Get the current active and reactive power readings.
####################################################
def take_reading(self):
"""Read the active and reactive power readings from
the atm90e32 registers.
:return: (Pa, Pr) Where Pa is the float value for the
active power reading and Pr is the float value for
the reactive power reading.
"""
Pa = self.energy_sensor.total_active_power
Pr = self.energy_sensor.total_reactive_power
logger.info(
f'Active Power reading: {Pa:.2f} Reactive Power Reading: {Pr:.2f}')
return Pa, Pr
####################################################
# Store the reading into mongo db.
####################################################
def store_reading(self, Pa, Pr):
"""Store the active and reactive power readings into
the mongodb database.
:param Pa: A floating value representing the active power reading.
Obtained through a call to take_reading().
:param Pr: A floating value representing the reactive power reading.
As with Pa, use take_reading() to retrieve the value from the
energy meter.
Returns True if the readings could be stored.
"""
if self.db is None:
# Try opening with the defaults.
db_opened = self.open_db()
if db_opened is False:
handle_exception('Cannot open the mongo database.')
return False
reading = {"Pa": Pa, "Pr": Pr, }
reading_saved = self.db.save(reading)
if reading_saved is False:
handle_exception('Cannot store the readings.')
return False
return True
####################################################
# Blink the LED
####################################################
def blink(self, ntimes=1):
"""Blink the monitor's LED. Uses Python's Timer object
so that blinking does not pause data capture and
storage.
:param ntimes: Number of times to blink, defaults to 1
:type ntimes: int, optional
"""
def turn_led_on(n):
self.led.value = True
t = threading.Timer(0.5, turn_led_off, [n])
t.start()
def check_led(n):
n -= 1
if n > 0:
turn_led_on(n)
def turn_led_off(n):
self.led.value = False
t = threading.Timer(0.5, check_led, [n])
t.start()
# Start blinking.
assert ntimes > 0
turn_led_on(ntimes)
|
nilq/baby-python
|
python
|
import torch
import numpy as np
from torch import Tensor
from torch.utils.data import Dataset, DataLoader
from torchvision import io
from pathlib import Path
from typing import Tuple
class Wound(Dataset):
"""
num_classes: 18
"""
# explain the purpose of the model
# where is it, how big it is,
# give examples of what each of segments are
# people who are familiar: segmentation
# medical background: application site, trying to identify different areas in a an image
# in the wound we are looking for different types of tissues
# get the story
CLASSES = ['Boundary','PeriWoundPerimeter','WoundPerimeter','Epithellialization','Granulation','Hypergranulation','NecroticSlough','Eschar','OtherWound','DamagedToeNail','HealthyToeNail','Oedematous','Erythematous','OtherSkinUnbroken','Maceration','Excoriation','OtherSkinBroken','HealthySkin']
PALETTE = torch.tensor([[192, 192, 192],[0, 183, 235],[0, 255, 255],[255, 255, 0],[212, 175, 55],[127, 255, 212],[138, 43, 226],[204, 255, 0],[220, 208, 255],[0, 250, 154],[255, 69, 0],[255, 165, 0],[30, 144, 255],[221, 160, 221],[0, 255, 0],[0, 128, 128],[252, 15, 192],[220, 20, 60]])
ID2TRAINID = {0: 255, 1: 255, 2: 255, 3: 255, 4: 255, 5: 255, 6: 255, 7: 0, 8: 1, 9: 255, 10: 255, 11: 2, 12: 3, 13: 4, 14: 255, 15: 255, 16: 255,
17: 5, 18: 255, 19: 6, 20: 7, 21: 8, 22: 9, 23: 10, 24: 11, 25: 12, 26: 13, 27: 14, 28: 15, 29: 255, 30: 255, 31: 16, 32: 17, 33: 18, -1: -1}
def __init__(self, root: str, split: str = 'train', transform = None) -> None:
super().__init__()
assert split in ['train', 'val', 'test']
self.transform = transform
self.n_classes = len(self.CLASSES)
self.ignore_label = 255
self.label_map = np.arange(256)
for id, trainid in self.ID2TRAINID.items():
self.label_map[id] = trainid
img_path = Path(root) / 'leftImg8bit' / split
self.files = list(img_path.rglob('*.png'))
if not self.files:
raise Exception(f"No images found in {img_path}")
print(f"Found {len(self.files)} {split} images.")
def __len__(self) -> int:
return len(self.files)
def __getitem__(self, index: int) -> Tuple[Tensor, Tensor]:
img_path = str(self.files[index])
lbl_path = str(self.files[index]).replace('leftImg8bit', 'gtFine').replace('.png', '_labelIds.png')
image = io.read_image(img_path)
label = io.read_image(lbl_path)
if self.transform:
image, label = self.transform(image, label)
return image, self.encode(label.squeeze().numpy()).long()
def encode(self, label: Tensor) -> Tensor:
label = self.label_map[label]
return torch.from_numpy(label)
# for id, trainid in self.ID2TRAINID.items():
# label[label == id] = trainid
# return label
def decode(self, label: Tensor) -> Tensor:
return self.PALETTE[label.to(int)]
if __name__ == '__main__':
import matplotlib.pyplot as plt
from torchvision import transforms as T
from torchvision.utils import make_grid
from transforms import Compose, RandomResizedCrop, Normalize
root = 'C:\\Users\\sithu\\Documents\\Datasets\\CityScapes'
transform = Compose([RandomResizedCrop((1024, 1024)), Normalize()])
dataset = CityScapes(root, split="train", transform=transform)
dataloader = DataLoader(dataset, shuffle=True, batch_size=4)
image, label = next(iter(dataloader))
print('=========================')
print(image.shape, label.shape)
print(label.unique())
label[label==255] = 0
labels = [dataset.decode(lbl).permute(2, 0, 1) for lbl in label]
labels = torch.stack(labels)
inv_normalize = T.Normalize(
mean=(-0.485/0.229, -0.456/0.224, -0.406/0.225),
std=(1/0.229, 1/0.224, 1/0.225)
)
image = inv_normalize(image)
image *= 255
images = torch.vstack([image, labels])
plt.imshow(make_grid(images, nrow=4).to(torch.uint8).numpy().transpose((1, 2, 0)))
plt.show()
|
nilq/baby-python
|
python
|
#!/usr/bin/python3
def best_score(a_dictionary):
if a_dictionary:
return max(a_dictionary, key=a_dictionary.get)
|
nilq/baby-python
|
python
|
print("before loop")
for count in range(10):
if count > 5:
continue
print(count)
print("after loop")
|
nilq/baby-python
|
python
|
"""Application management util tests"""
# pylint: disable=redefined-outer-name
from types import SimpleNamespace
import pytest
import factory
from django.core.exceptions import ValidationError
from django.core.files.uploadedfile import SimpleUploadedFile
from mitol.common.utils import now_in_utc
from applications.api import derive_application_state
from applications.constants import (
REVIEW_STATUS_APPROVED,
SUBMISSION_VIDEO,
AppStates,
SUBMISSION_QUIZ,
)
from applications.factories import (
BootcampApplicationFactory,
BootcampRunApplicationStepFactory,
ApplicationStepFactory,
ApplicationStepSubmissionFactory,
VideoInterviewSubmissionFactory,
QuizSubmissionFactory,
)
from applications.management.utils import (
migrate_application,
has_same_application_steps,
)
from ecommerce.factories import OrderFactory
from ecommerce.models import Order
from klasses.factories import BootcampFactory, BootcampRunFactory, InstallmentFactory
from profiles.factories import UserFactory
FAKE_FILE_NAME = "file.txt"
FAKE_LINKEDIN_URL = "http://example.com/linkedin"
BOOTCAMP_PRICE = 100
@pytest.fixture()
def bootcamp_data():
"""Fixture for bootcamps data"""
bootcamp = BootcampFactory.create()
bootcamp_runs = BootcampRunFactory.create_batch(2, bootcamp=bootcamp)
InstallmentFactory.create_batch(
len(bootcamp_runs),
amount=BOOTCAMP_PRICE,
bootcamp_run=factory.Iterator(bootcamp_runs),
)
submission_types = [SUBMISSION_VIDEO, SUBMISSION_VIDEO, SUBMISSION_QUIZ]
app_steps = ApplicationStepFactory.create_batch(
len(submission_types),
bootcamp=bootcamp,
submission_type=factory.Iterator(submission_types),
step_order=factory.Iterator([1, 2, 3]),
)
run_app_steps = {
run.id: BootcampRunApplicationStepFactory.create_batch(
len(app_steps),
bootcamp_run=run,
application_step=factory.Iterator(app_steps),
)
for run in bootcamp_runs
}
return SimpleNamespace(
bootcamp=bootcamp,
runs=bootcamp_runs,
app_steps=app_steps,
run_app_steps=run_app_steps,
submission_types=submission_types,
)
@pytest.fixture()
def completed_app_data(bootcamp_data):
"""Fixture with a completed bootcamp application and associated data"""
user = UserFactory.create()
run = bootcamp_data.runs[0]
now = now_in_utc()
application = BootcampApplicationFactory.create(
user=user,
bootcamp_run=run,
resume_file=SimpleUploadedFile(
f"path/to/{FAKE_FILE_NAME}", b"these are the file contents"
),
linkedin_url=FAKE_LINKEDIN_URL,
resume_upload_date=now,
)
submissions = ApplicationStepSubmissionFactory.create_batch(
run.application_steps.count(),
bootcamp_application=application,
run_application_step=factory.Iterator(
run.application_steps.order_by("application_step__step_order").all()
),
content_object=factory.Iterator(
[
VideoInterviewSubmissionFactory.create(),
VideoInterviewSubmissionFactory.create(),
QuizSubmissionFactory.create(),
]
),
submitted_date=now,
review_status=REVIEW_STATUS_APPROVED,
review_status_date=now,
)
order = OrderFactory.create(
application=application,
user=user,
status=Order.FULFILLED,
total_price_paid=BOOTCAMP_PRICE,
)
application.state = derive_application_state(application)
application.save()
return SimpleNamespace(
application=application, submissions=submissions, order=order
)
@pytest.mark.django_db
def test_migrate_application(bootcamp_data, completed_app_data):
"""
migrate_application should create a new application for a user in a new bootcamp run and
copy over data from an existing application.
"""
to_run = bootcamp_data.runs[1]
to_run_application = migrate_application(
from_run_application=completed_app_data.application, to_run=to_run
)
assert completed_app_data.application.state == AppStates.COMPLETE.value
assert to_run_application.state == AppStates.AWAITING_PAYMENT.value
assert to_run_application.user == completed_app_data.application.user
assert to_run_application.bootcamp_run == to_run
assert (
to_run_application.resume_file.name
== completed_app_data.application.resume_file.name
)
assert to_run_application.linkedin_url == FAKE_LINKEDIN_URL
for i, submission in enumerate(to_run_application.submissions.all()):
assert submission.review_status == REVIEW_STATUS_APPROVED
assert submission.run_application_step in bootcamp_data.run_app_steps[to_run.id]
assert submission.object_id == completed_app_data.submissions[i].object_id
@pytest.mark.django_db
def test_migrate_application_different_order(bootcamp_data, completed_app_data):
"""
migrate_application should be able to migrate an application between runs of two different bootcamps, even if the
application steps are in a different order.
"""
new_bootcamp_run = BootcampRunFactory.create()
InstallmentFactory.create(amount=BOOTCAMP_PRICE, bootcamp_run=new_bootcamp_run)
new_app_steps = ApplicationStepFactory.create_batch(
len(bootcamp_data.app_steps),
bootcamp=new_bootcamp_run.bootcamp,
# Use the same application steps as the existing bootcamp, but in reverse order
submission_type=factory.Iterator(reversed(bootcamp_data.submission_types)),
step_order=factory.Iterator([1, 2, 3]),
)
run_app_steps = BootcampRunApplicationStepFactory.create_batch(
len(new_app_steps),
bootcamp_run=new_bootcamp_run,
application_step=factory.Iterator(new_app_steps),
)
new_run_application = migrate_application(
from_run_application=completed_app_data.application, to_run=new_bootcamp_run
)
assert new_run_application.state == AppStates.AWAITING_PAYMENT.value
ordered_submissions = list(
new_run_application.submissions.order_by(
"run_application_step__application_step__step_order"
)
)
for i, submission in enumerate(ordered_submissions):
assert submission.review_status == REVIEW_STATUS_APPROVED
assert submission.run_application_step == run_app_steps[i]
# The submissions for the new application should be copied over for the existing one, but the application steps
# are in a different order.
assert [sub.object_id for sub in ordered_submissions] == [
completed_app_data.submissions[2].object_id,
completed_app_data.submissions[0].object_id,
completed_app_data.submissions[1].object_id,
]
@pytest.mark.django_db
def test_migrate_application_existing(bootcamp_data, completed_app_data):
"""
migrate_application should raise an exception if there is already an application in an approved
state for the 'to' run.
"""
to_run = bootcamp_data.runs[1]
BootcampApplicationFactory.create(
bootcamp_run=to_run,
user=completed_app_data.application.user,
state=AppStates.COMPLETE,
)
with pytest.raises(ValidationError):
migrate_application(
from_run_application=completed_app_data.application, to_run=to_run
)
@pytest.mark.django_db
def test_has_same_application_steps(bootcamp_data):
"""
has_same_application_steps should return True if the two bootcamp ids refer to a
set of equivalent application steps
"""
existing_bootcamp = bootcamp_data.runs[0].bootcamp
assert (
has_same_application_steps(existing_bootcamp.id, existing_bootcamp.id) is True
)
new_bootcamp = BootcampFactory.create()
existing_bootcamp_steps = list(bootcamp_data.app_steps)
ApplicationStepFactory.create_batch(
len(bootcamp_data.app_steps),
bootcamp=new_bootcamp,
submission_type=factory.Iterator(
[step.submission_type for step in existing_bootcamp_steps]
),
step_order=factory.Iterator(
[step.step_order for step in existing_bootcamp_steps]
),
)
assert has_same_application_steps(existing_bootcamp.id, new_bootcamp.id) is True
# If a step is removed/added/updated, this function should return False
step = new_bootcamp.application_steps.first()
step.delete()
assert has_same_application_steps(existing_bootcamp.id, new_bootcamp.id) is False
@pytest.mark.django_db
def test_has_same_application_steps_order():
"""
has_same_application_steps should take a flag that determines whether it will return True if the bootcamps
have the same steps in a different order.
"""
submission_types = [SUBMISSION_VIDEO, SUBMISSION_QUIZ]
bootcamps = BootcampFactory.create_batch(2)
ApplicationStepFactory.create_batch(
len(submission_types),
bootcamp=bootcamps[0],
submission_type=factory.Iterator(submission_types),
step_order=factory.Iterator([1, 2]),
)
ApplicationStepFactory.create_batch(
len(submission_types),
bootcamp=bootcamps[1],
submission_type=factory.Iterator(reversed(submission_types)),
step_order=factory.Iterator([1, 2]),
)
assert (
has_same_application_steps(bootcamps[0].id, bootcamps[1].id, ignore_order=True)
is True
)
assert (
has_same_application_steps(bootcamps[0].id, bootcamps[1].id, ignore_order=False)
is False
)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""
Copyright (c) 2020. Huawei Technologies Co.,Ltd.ALL rights reserved.
This program is licensed under Mulan PSL v2.
You can use it according to the terms and conditions of the Mulan PSL v2.
http://license.coscl.org.cn/MulanPSL2
THIS PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
####################################
# @Author : lemon.higgins
# @Contact : lemon.higgins@aliyun.com
# @Date : 2020-11-10 02:40:04
# @License : Mulan PSL v2
# @Version : 1.0
# @Desc : 收集系统的基础信息
#####################################
import subprocess
import os
import logging
from ruamel import yaml
import json
logging.basicConfig(
level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s"
)
ENV_INFO = {}
def basic_info():
"""
获取linux的基本信息
Returns:
[dict]: [获取的环境信息总结]
"""
ENV_INFO["os"] = subprocess.getoutput(
"cat /etc/os-release | grep '^PRETTY_NAME' | awk -F '=' '{print $NF}' | tr -d '\"\"'"
)
ENV_INFO["hostname"] = subprocess.getoutput("hostname")
ENV_INFO["platform"] = subprocess.getoutput(
"hostnamectl | grep 'Virtualization: kvm' >/dev/nul && echo kvm || echo physical"
)
ENV_INFO["frame"] = subprocess.getoutput("uname -m")
ENV_INFO["kernel version"] = subprocess.getoutput("uname -r")
ENV_INFO["cmdline"] = subprocess.getoutput("cat /proc/cmdline")
return ENV_INFO
def mem_info():
"""
获取环境内存信息
Returns:
[dict]: [获取的环境信息总结]
"""
ENV_INFO["mem info"] = {}
ENV_INFO["mem info"]["mem"] = {}
ENV_INFO["mem info"]["swap"] = {}
ENV_INFO["mem info"]["mem"]["total"] = (
subprocess.getoutput("cat /proc/meminfo | grep MemTotal | awk '{print $2}'")
+ "kB"
)
ENV_INFO["mem info"]["mem"]["free"] = (
subprocess.getoutput("cat /proc/meminfo | grep MemFree | awk '{print $2}'")
+ "kB"
)
ENV_INFO["mem info"]["mem"]["available"] = (
subprocess.getoutput(
"cat /proc/meminfo | grep MemAvailable | awk '{print $2}'"
)
+ "kB"
)
ENV_INFO["mem info"]["mem"]["buffers"] = (
subprocess.getoutput("cat /proc/meminfo | grep Buffers | awk '{print $2}'")
+ "kB"
)
ENV_INFO["mem info"]["mem"]["cache"] = (
subprocess.getoutput("cat /proc/meminfo | grep Cached | awk '{print $2}'")
+ "kB"
)
ENV_INFO["mem info"]["swap"]["total"] = (
subprocess.getoutput("cat /proc/meminfo | grep SwapTotal | awk '{print $2}'")
+ "kB"
)
ENV_INFO["mem info"]["swap"]["free"] = (
subprocess.getoutput("cat /proc/meminfo | grep SwapFree | awk '{print $2}'")
+ "kB"
)
ENV_INFO["mem info"]["swap"]["cache"] = (
subprocess.getoutput("cat /proc/meminfo | grep SwapCached | awk '{print $2}'")
+ "kB"
)
return ENV_INFO
def cpu_info():
"""
获取环境的CPU信息
Returns:
[dict]: [获取的环境信息总结]
"""
ENV_INFO["cpu info"] = {}
ENV_INFO["cpu info"]["processor"] = subprocess.getoutput(
"cat /proc/cpuinfo | grep processor | wc -l"
)
core_num = 0
cores = subprocess.getoutput(
"cat /proc/cpuinfo | grep 'cpu cores' | awk '{print $NF}'"
).split("\n")
for core in cores:
core_num += int(core)
ENV_INFO["cpu info"]["core"] = core_num
ENV_INFO["cpu info"]["model name"] = subprocess.getoutput(
"cat /proc/cpuinfo | grep 'model name' | awk -F ':' '{print $NF}' | sed 's/^ //g' | uniq"
)
ENV_INFO["cpu info"]["cpu MHz"] = subprocess.getoutput(
"cat /proc/cpuinfo | grep 'cpu MHz' | awk '{print $NF}' | uniq"
)
ENV_INFO["cpu info"]["cache size"] = subprocess.getoutput(
"cat /proc/cpuinfo | grep 'cache size' | awk '{print $NF}' | uniq"
)
return ENV_INFO
class NetInfo(object):
"""
获取环境网络基本信息
"""
def dns():
"""
获取系统dns信息
Returns:
[dict]: [获取的环境信息总结]
"""
ENV_INFO["net info"] = {}
resolv = []
for dns in subprocess.getoutput(
"cat /etc/resolv.conf | grep nameserver | awk '{print $NF}'"
).split("\n"):
nameserver = {}
nameserver["nameserver"] = dns
resolv.append(nameserver)
ENV_INFO["net info"]["resolv"] = resolv
return ENV_INFO
def eth_info():
"""
获取网卡信息
Returns:
[dict]: [获取的环境信息总结]
"""
ENV_INFO["net info"] = {}
ENV_INFO["net info"]["eth info"] = []
for id in subprocess.getoutput(
"lspci | grep 'Ethernet' | awk '{print $1}'"
).split("\n"):
if id != "":
ENV_INFO["net info"]["eth info"].append(
subprocess.getoutput(
"lspci -s "
+ id
+ " -v | grep Subsystem: | awk -F 'Subsystem: ' '{print $NF}'"
)
)
return ENV_INFO
def mac(nic):
"""
获取网卡mac地址
Args:
nic ([string]): [网卡名]
Returns:
[dict]: [获取的环境信息总结]
"""
return subprocess.getoutput("cat /sys/class/net/" + nic + "/address")
def status(nic):
"""获取网卡的status信息
Args:
nic ([string]): [网卡名]
Returns:
[dict]: [获取的环境信息总结]
"""
return subprocess.getoutput(
"ip addr show " + nic + " | grep '<.*>' | awk '{print $3}'| tr -d '<>'"
)
def mtu(nic):
"""获取网卡的mtu值
Args:
nic ([string]): [网卡名]
Returns:
[string]: [mtu值]
"""
return subprocess.getoutput(
"ip addr show "
+ nic
+ " | grep 'mtu' | sed -n 's/ /\\n/gp' | sed -n \"$(echo \"$(ip addr show "
+ nic
+ " | grep 'mtu' | sed -n 's/ /\\n/gp' | sed -n '/mtu/=') + 1\" | bc)p\" "
)
def driver(nic):
"""获取网卡驱动信息
Args:
nic ([string]): [网卡名]
Returns:
[string]: [mtu值]
"""
return subprocess.getoutput(
"ethtool -i " + nic + " | grep driver | awk '{print $NF}'"
)
def brigde(nic):
"""确定当前网卡是否是网桥
Returns:
[string]: [YES or NO]
"""
return subprocess.getoutput(
"brctl show | grep " + nic + " >/dev/nul && echo 'YES' || echo 'NO'"
)
def v4_ip(nic):
"""获取ip,route,genmask信息
Returns:
[list]: [ip, route, genmask]
"""
v4_ip = []
for ip in subprocess.getoutput(
"ip addr show " + nic + " | grep 'inet ' | awk '{print $2}' "
).split("\n"):
ipv4 = {}
ipv4["ipv4"] = ip
if ip == "":
ipv4["route"] = ""
ipv4["genmask"] = ""
return ENV_INFO["net info"]["nic"]["v4 ip"].append(ipv4)
ipv4["route"] = subprocess.getoutput(
'ip route | grep "$(echo '
+ ip
+ " | awk -F '/' '{print $1}')\" | awk '{print $1}'"
)
ipv4["genmask"] = subprocess.getoutput(
"ip addr show " + nic + ' | grep "' + ip + " brd\" | awk '{print $4}'"
)
v4_ip.append(ipv4)
return v4_ip
def v6_ip(nic):
"""获取ipv6的基础信息
Returns:
[list]: [ip, route]
"""
v6_ip = []
tmp = []
v6_routes = subprocess.getoutput(
"ip -6 route | grep nexthop | grep " + nic + " | awk '{print $3}'"
).split("\n")
if "fe80::" in subprocess.getoutput(
"ip -6 route | grep 'fe80::' | grep " + nic
):
v6_routes.append("fe80::")
for route in v6_routes:
ipv6 = {}
v6_route = []
if route == "" or route in tmp:
continue
route_h = route.split("::")[0] + ":"
for r in v6_routes:
if route_h in r:
v6_route.append(r)
tmp.append(r)
ipv6["ipv6"] = subprocess.getoutput(
"ip addr show "
+ nic
+ ' | grep "inet6 '
+ route_h
+ "\" | awk '{print $2}'"
)
ipv6["route"] = v6_route
v6_ip.append(ipv6)
return v6_ip
def auto_negotiation(nic):
"""查看网卡的自动协商机制
Returns:
[string]: [off or on]
"""
return subprocess.getoutput(
"ethtool " + nic + " | grep 'Auto-negotiation' | awk '{print $NF}'"
)
def link_detected(nic):
"""链路状态
Returns:
[string]: [yes or no]
"""
return subprocess.getoutput(
"ethtool " + nic + " | grep 'Link detected' | awk '{print $NF}'"
)
def nic_info(nic):
"""获取网卡相关所有信息
Args:
nic (string): 网卡名称
Returns:
[dict]: 网卡信息
"""
nic_info = {}
nic_info["name"] = nic
nic_info["mac"] = NetInfo.mac(nic)
nic_info["status"] = NetInfo.status(nic)
nic_info["mtu"] = NetInfo.mtu(nic)
nic_info["driver"] = NetInfo.driver(nic)
nic_info["brigde"] = NetInfo.brigde(nic)
nic_info["v4 ip"] = NetInfo.v4_ip(nic)
nic_info["v6 ip"] = NetInfo.v6_ip(nic)
nic_info["Auto-negotiation"] = NetInfo.auto_negotiation(nic)
nic_info["Link detected"] = NetInfo.link_detected(nic)
try:
ENV_INFO["net info"]
except:
ENV_INFO["net info"] = {}
ENV_INFO["net info"]["nic"] = nic_info
else:
ENV_INFO["net info"]["nic"].append(nic_info)
return ENV_INFO
def all_nic_info():
"""获取网卡所有的基础信息
Returns:
[list]: [所有的网卡信息]
"""
ENV_INFO["net info"] = {}
ENV_INFO["net info"]["nic"] = []
for nic in subprocess.getoutput("ls /sys/class/net/").split("\n"):
NetInfo.nic_info(nic)
return ENV_INFO
def disk_info():
"""
获取磁盘,目录挂载信息
"""
disk_json = subprocess.getoutput("lsblk -J")
disk = json.loads(disk_json).get("blockdevices")
ENV_INFO["disk info"] = disk
return ENV_INFO
def service_info():
"""
获取环境中所有服务的状态信息
"""
ENV_INFO["service info"] = []
for service in subprocess.getoutput(
"systemctl --all --no-pager | grep -w 'active\|inactive' | sed 's/● / /g' | awk '{print $1}'"
).split("\n"):
service_info = {}
service_info["UNIT"] = service
service = service.replace("\\", "\\\\")
service_info["LOAD"] = subprocess.getoutput(
"systemctl --all --no-pager | grep -w '" + service + "' | awk '{print $2}'"
)
service_info["ACTIVE"] = subprocess.getoutput(
"systemctl --all --no-pager | grep -w '" + service + "' | awk '{print $3}'"
)
service_info["SUB"] = subprocess.getoutput(
"systemctl --all --no-pager | grep -w '" + service + "' | awk '{print $4}'"
)
ENV_INFO["service info"].append(service_info)
pass # TODO
def socket_info():
"""
获取环境socket信息
"""
ENV_INFO["socket info"] = {}
ENV_INFO["socket info"]["used num"] = subprocess.getoutput(
"cat /proc/net/sockstat | grep sockets | awk '{print $NF}'"
)
return ENV_INFO
def process_info():
"""
获取进程信息
"""
ENV_INFO["process info"] = []
for pid in subprocess.getoutput(
"ps -eo pid,ppid,user,rss,pmem,pcpu,vsize,args | grep -vw 'PID PPID USER' | awk '{print $1}'"
):
process = {}
process["pid"] = pid
process["ppid"] = subprocess.getoutput(
"ps -eo pid,ppid,user,rss,pmem,pcpu,vsize,args | grep -w "
+ pid
+ "| awk '{print $2}'"
)
process["user"] = subprocess.getoutput(
"ps -eo pid,ppid,user,rss,pmem,pcpu,vsize,args | grep -w "
+ pid
+ "| awk '{print $2}'"
)
process["rss"] = subprocess.getoutput(
"ps -eo pid,ppid,user,rss,pmem,pcpu,vsize,args | grep -w "
+ pid
+ "| awk '{print $2}'"
)
process["pmem"] = subprocess.getoutput(
"ps -eo pid,ppid,user,rss,pmem,pcpu,vsize,args | grep -w "
+ pid
+ "| awk '{print $2}'"
)
process["pcpu"] = subprocess.getoutput(
"ps -eo pid,ppid,user,rss,pmem,pcpu,vsize,args | grep -w "
+ pid
+ "| awk '{print $2}'"
)
process["vsize"] = subprocess.getoutput(
"ps -eo pid,ppid,user,rss,pmem,pcpu,vsize,args | grep -w "
+ pid
+ "| awk '{print $2}'"
)
process["args"] = subprocess.getoutput(
"ps -eo pid,ppid,user,rss,pmem,pcpu,vsize,args | grep -w "
+ pid
+ "| awk '{print $2}'"
)
ENV_INFO["process info"].append(process)
def collect_log():
"""收集message日志
"""
exitcode, output = subprocess.getstatusoutput(
"log_dir=$(mktemp -d) && cp /var/log/message* ${log_dir} -fr && dmesg > ${log_dir}/kmesg && tar -zcvf "
+ os.getcwd()
+ "/log.tar.gz ${log_dir} && rm -rf ${log_dir}"
)
if exitcode != 0:
logging.error("failed to collect logs.")
exit(1)
def write_yaml(info):
"""
将数据写入导yaml文件中
Args:
info ([dict]): [环境信息数据]
"""
with open(
os.path.split(os.path.realpath(__file__))[0] + "/envInfo.yaml", "w+"
) as f:
yaml.dump(info, f, Dumper=yaml.RoundTripDumper, allow_unicode=True)
def install_rpm(rpm):
"""安装环境信息收集需要的rpm软件包
Args:
rpm (string): 软件包名
"""
exitcode, output = subprocess.getstatusoutput(
"rpm -qa " + rpm + "&& yum -y install " + rpm
)
if exitcode != 0:
logging.error("failed to install rpms:" + rpm)
exit(1)
if __name__ == "__main__":
install_rpm("coreutils grep gawk hostname systemd util-linux systemd procps-ng")
basic_info()
mem_info()
cpu_info()
NetInfo.all_nic_info()
disk_info()
service_info()
process_info()
collect_log()
write_yaml(ENV_INFO)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# @Time : 2022/2/20
# @Author : Zhelong Huang
# @File : client2.py
# @Description: client2
_POS = 2
import os, sys
sys.path.append(os.path.abspath('.'))
from coach import LoadCoach
import argparse
arg = argparse.ArgumentParser()
arg.add_argument('-r', '--render', default=True)
arg.add_argument('-c', '--client', default="Demo")
args = vars(arg.parse_args())
CLIENT_ARGS = {
'url' : 'ws://127.0.0.1:23456/game/client{}'.format(_POS),
'render' : bool(int(args['render']))
}
if __name__ == '__main__':
try:
ws = LoadCoach(args['client'])(**CLIENT_ARGS)
ws.connect()
ws.run_forever()
except KeyboardInterrupt:
ws.close()
|
nilq/baby-python
|
python
|
# A non-empty zero-indexed array A consisting of N integers is given.
#
# A permutation is a sequence containing each element from 1 to N once, and
# only once.
#
# For example, array A such that:
# A = [4, 1, 3, 2]
# is a permutation, but array A such that:
# A = [4, 1, 3]
# is not a permutation, because value 2 is missing.
#
# The goal is to check whether array A is a permutation.
#
# Write a function:
# def solution(A)
# that, given a zero-indexed array A, returns 1 if array A is a permutation
# and 0 if it is not.
#
# For example, given array A such that:
# A = [4, 1, 3, 2]
# the function should return 1.
#
# Given array A such that:
# A = [4, 1, 3]
# the function should return 0.
#
# Assume that:
# * N is an integer within the range [1..100,000];
# * each element of array A is an integer within the range [1..1,000,000,000].
#
# Complexity:
# * expected worst-case time complexity is O(N);
# * expected worst-case space complexity is O(N), beyond input storage (not
# counting the storage required for input arguments).
def solution(A):
N = len(A)
if N == 1:
if A[0] == 1:
return 1
else:
return 0
count = {}
for i in range(N):
if A[i] not in count:
count[A[i]] = 0
count[A[i]] += 1
if count[A[i]] > 1:
return 0
# print(count)
values = count.keys()
# print(values)
if max(values) == N:
return 1
return 0
|
nilq/baby-python
|
python
|
"""Flexmock public API."""
# pylint: disable=no-self-use,too-many-lines
import inspect
import re
import sys
import types
from types import BuiltinMethodType, TracebackType
from typing import Any, Callable, Dict, Iterator, List, NoReturn, Optional, Tuple, Type
from flexmock.exceptions import (
CallOrderError,
ExceptionClassError,
ExceptionMessageError,
FlexmockError,
MethodCallError,
MethodSignatureError,
MockBuiltinError,
StateError,
)
AT_LEAST = "at least"
AT_MOST = "at most"
EXACTLY = "exactly"
SPECIAL_METHODS = (classmethod, staticmethod)
UPDATED_ATTRS = ["should_receive", "should_call", "new_instances"]
DEFAULT_CLASS_ATTRIBUTES = [attr for attr in dir(type) if attr not in dir(type("", (object,), {}))]
# Fix Python 3.6 does not have re.Pattern type
RE_TYPE = type(re.compile(""))
class ReturnValue:
"""ReturnValue"""
def __init__(self, value: Optional[Any] = None, raises: Optional[Exception] = None) -> None:
self.value = value
self.raises = raises
def __str__(self) -> str:
if self.raises:
return f"{self.raises}({_arg_to_str(self.value)})"
if not isinstance(self.value, tuple):
return str(_arg_to_str(self.value))
if len(self.value) == 1:
return str(_arg_to_str(self.value[0]))
values = ", ".join([_arg_to_str(x) for x in self.value])
return f"({values})"
class Mock:
"""Fake object class returned by the flexmock() function."""
def __init__(self, **kwargs: Any) -> None:
"""Mock constructor.
Args:
- kwargs: dict of attribute/value pairs used to initialize the mock object
"""
self._object: Any = self
for attr, value in kwargs.items():
if isinstance(value, property):
setattr(self.__class__, attr, value)
else:
setattr(self, attr, value)
def __enter__(self) -> Any:
return self._object
def __exit__(
self,
exc_type: Optional[Type[BaseException]],
exc_value: Optional[BaseException],
traceback: Optional[TracebackType],
) -> None:
pass
def __call__(self, *args: Any, **kwargs: Any) -> "Mock":
"""Make Expectation.mock() work with parens."""
return self
def __iter__(self) -> Iterator[Any]:
"""Makes the mock object iterable.
Call the instance's version of __iter__ if available, otherwise yield self.
"""
if (
hasattr(self, "__dict__")
and isinstance(self.__dict__, dict)
and "__iter__" in self.__dict__
):
for item in self.__dict__["__iter__"](self):
yield item
else:
yield self
def should_receive(self, name: str) -> "Expectation":
"""Replaces the specified attribute with a fake.
Args:
- name: string name of the attribute to replace
Returns:
- Expectation object which can be used to modify the expectations
on the fake attribute
"""
if name in UPDATED_ATTRS:
raise FlexmockError("unable to replace flexmock methods")
chained_methods = None
if "." in name:
name, chained_methods = name.split(".", 1)
name = self._update_name_if_mangled(name)
self._ensure_object_has_named_attribute(name)
if chained_methods:
if not isinstance(self._object, Mock) and not hasattr(
getattr(self._object, name), "__call__"
):
# Create a partial mock if the given name is callable
# this allows chaining attributes
return_value = _create_partial_mock(getattr(self._object, name))
else:
return_value = Mock()
self._create_expectation(name, return_value)
return return_value.should_receive(chained_methods)
return self._create_expectation(name)
def _update_name_if_mangled(self, name: str) -> str:
"""This allows flexmock to mock methods with name mangling."""
if name.startswith("__") and not name.endswith("__") and not inspect.ismodule(self._object):
class_name: str
if inspect.isclass(self._object):
class_name = self._object.__name__
else:
class_name = self._object.__class__.__name__
name = f"_{class_name.lstrip('_')}__{name.lstrip('_')}"
return name
def _ensure_object_has_named_attribute(self, name: str) -> None:
if not isinstance(self._object, Mock) and not self._hasattr(self._object, name):
if hasattr(self._object, "__name__"):
obj_name = self._object.__name__
else:
obj_name = str(self._object)
raise FlexmockError(f"{obj_name} does not have attribute '{name}'")
def _hasattr(self, obj: Any, name: str) -> bool:
"""Ensure hasattr checks don't create side-effects for properties."""
if not inspect.isclass(obj) and hasattr(obj, "__dict__") and name not in obj.__dict__:
if name in DEFAULT_CLASS_ATTRIBUTES:
return False # avoid false positives for things like __call__
return hasattr(obj.__class__, name)
return hasattr(obj, name)
def should_call(self, name: str) -> "Expectation":
"""Creates a spy.
This means that the original method will be called rather than the fake
version. However, we can still keep track of how many times it's called and
with what arguments, and apply expectations accordingly.
should_call is meaningless/not allowed for non-callable attributes.
Args:
- name: string name of the method
Returns:
- Expectation object
"""
if isinstance(self._object, Mock) and not hasattr(self._object, name):
raise FlexmockError(
f"Mock object does not have attribute '{name}'. "
f'Did you mean to call should_receive("{name}") instead?'
)
expectation = self.should_receive(name)
return expectation.replace_with(expectation.__dict__["_original"])
def new_instances(self, *kargs: Any) -> "Expectation":
"""Overrides __new__ method on the class to return custom objects.
Alias for should_receive('__new__').and_return(kargs).one_by_one
Args:
- kargs: objects to return on each successive call to __new__
Returns:
- Expectation object
"""
if inspect.isclass(self._object):
return self.should_receive("__new__").and_return(kargs).one_by_one()
raise FlexmockError("new_instances can only be called on a class mock")
def _create_expectation(self, name: str, return_value: Optional[Any] = None) -> "Expectation":
expectation = self._get_or_create_expectation(name, return_value)
FlexmockContainer.add_expectation(self, expectation)
if _isproperty(self._object, name):
self._update_property(expectation, name)
elif (
isinstance(self._object, Mock)
or hasattr(getattr(self._object, name), "__call__")
or inspect.isclass(getattr(self._object, name))
):
self._update_method(expectation, name)
else:
self._update_attribute(expectation, name, return_value)
return expectation
def _get_or_create_expectation(
self, name: str, return_value: Optional[Any] = None
) -> "Expectation":
saved_expectations = FlexmockContainer.get_expectations_with_name(self, name)
if saved_expectations:
# If there is already an expectation for the same name, get the
# original object from the FIRST saved expectation.
return Expectation(
self._object,
name=name,
return_value=return_value,
original=saved_expectations[0].__dict__.get("_original"),
method_type=saved_expectations[0].__dict__.get("_method_type"),
)
return Expectation(self._object, name=name, return_value=return_value)
def _create_placeholder_mock_for_proper_teardown(
self, obj: Any, name: str, original: Any
) -> None:
"""Ensures that the given function is replaced on teardown."""
mock = Mock()
mock._object = obj
expectation = Expectation(obj, name=name, original=original)
FlexmockContainer.add_expectation(mock, expectation)
def _update_method(self, expectation: "Expectation", name: str) -> None:
method_instance = self._create_mock_method(name)
if self._hasattr(self._object, name) and not hasattr(expectation, "_original"):
expectation._update_original(name, self._object)
expectation._method_type = self._get_method_type(name, expectation._original)
if expectation._method_type in SPECIAL_METHODS:
expectation._original_function = getattr(self._object, name)
if not inspect.isclass(self._object) or expectation._method_type in SPECIAL_METHODS:
method_instance = types.MethodType(method_instance, self._object)
expectation._local_override = _setattr(self._object, name, method_instance)
if (
expectation._local_override
and not inspect.isclass(self._object)
and not isinstance(self._object, Mock)
and hasattr(self._object.__class__, name)
):
self._update_class_for_magic_builtins(name)
def _get_method_type(self, name: str, method: Callable[..., Any]) -> Any:
"""Get method type of the original method.
Method type is saved because after mocking the base class, it is difficult to determine
the original method type.
"""
method_type = self._get_saved_method_type(name, method)
if method_type is not None:
return method_type
if _is_class_method(method, name):
method_type = classmethod
elif _is_static_method(self._object, name):
method_type = staticmethod
else:
method_type = type(method)
setattr(self._object, f"{name}__flexmock__method_type", method_type)
return method_type
def _get_saved_method_type(self, name: str, method: Callable[..., Any]) -> Optional[Any]:
"""Check method type of the original method if it was saved to the class or base class."""
bound_to = getattr(method, "__self__", None)
if bound_to is not None and inspect.isclass(bound_to):
# Check if the method type was saved in a base class
for cls in inspect.getmro(bound_to):
method_type = vars(cls).get(f"{name}__flexmock__method_type")
if method_type:
return method_type
return None
def _update_class_for_magic_builtins(self, name: str) -> None:
"""Fixes method resolution order for built-in methods.
Replacing magic builtins on instances has no effect as the one attached
to the class takes precedence. To work around it, we update the class'
method to check if the instance in question has one in its own __dict__
and call that instead.
"""
if not (name.startswith("__") and name.endswith("__") and len(name) > 4):
return
original = getattr(self._object.__class__, name)
def updated(self: Any, *kargs: Any, **kwargs: Any) -> Any:
if (
hasattr(self, "__dict__")
and isinstance(self.__dict__, dict)
and name in self.__dict__
):
return self.__dict__[name](*kargs, **kwargs)
return original(self, *kargs, **kwargs)
setattr(self._object.__class__, name, updated)
if updated.__code__ != original.__code__:
self._create_placeholder_mock_for_proper_teardown(
self._object.__class__, name, original
)
def _update_attribute(
self, expectation: "Expectation", name: str, return_value: Optional[Any] = None
) -> None:
expectation._callable = False
if self._hasattr(self._object, name) and not hasattr(expectation, "_original"):
expectation._update_original(name, self._object)
expectation._local_override = _setattr(self._object, name, return_value)
def _update_property(self, expectation: "Expectation", name: str) -> None:
new_name = f"_flexmock__{name}"
obj = self._object
if not inspect.isclass(obj):
obj = obj.__class__
expectation._callable = False
original = getattr(obj, name)
@property # type: ignore
def updated(self: Any) -> Any:
if (
hasattr(self, "__dict__")
and isinstance(self.__dict__, dict)
and name in self.__dict__
):
return self.__dict__[name]
# Return original for instances that are not mocked
return getattr(self, new_name)
setattr(obj, name, updated)
if not hasattr(obj, new_name):
# don't try to double update
FlexmockContainer.add_teardown_property(obj, new_name)
setattr(obj, new_name, original)
self._create_placeholder_mock_for_proper_teardown(obj, name, original)
def _create_mock_method(self, name: str) -> Callable[..., Any]:
def _handle_exception_matching(expectation: Expectation) -> None:
# pylint: disable=misplaced-bare-raise
return_values = _getattr(expectation, "_return_values")
if return_values:
raised, instance = sys.exc_info()[:2]
assert raised, "no exception was raised"
message = str(instance)
expected = return_values[0].raises
if not expected:
raise
args = return_values[0].value
if inspect.isclass(expected):
expected_instance = expected(*args["kargs"], **args["kwargs"])
expected_message = str(expected_instance)
if expected is not raised and expected not in raised.__bases__:
raise ExceptionClassError(
f"Raised exception for call {expectation._name} "
"did not match expectation:\n"
f" Expected:\t{expected}\n"
f" Raised:\t{raised}"
)
if args["kargs"] and isinstance(args["kargs"][0], RE_TYPE):
if not args["kargs"][0].search(message):
raise ExceptionMessageError(
f"Error message mismatch with raised {expected.__name__}:\n"
f" Expected pattern:\n\t/{args['kargs'][0].pattern}/\n"
f" Received message:\n\t'{message}'"
)
elif expected_message and expected_message != message:
raise (
ExceptionMessageError(
f"Error message mismatch with raised {expected.__name__}:\n"
f" Expected message:\n\t'{message}'\n"
f" Received message:\n\t'{expected_message}'"
)
)
elif expected is not raised:
raise ExceptionClassError(
f"Raised exception for call {expectation._name} "
f"did not match expectation:\n"
f" Expected:\t{repr(expected)}\n"
f" Raised:\t{raised}\n\n"
"Did you try to call and_raise with an instance?\n"
'Instead of and_raise(Exception("arg")), try and_raise(Exception, "arg")'
)
else:
raise
def match_return_values(expected: Any, received: Any) -> bool:
if not isinstance(expected, tuple):
expected = (expected,)
if not isinstance(received, tuple):
received = (received,)
if len(received) != len(expected):
return False
for i, val in enumerate(received):
if not _arguments_match(val, expected[i]):
return False
return True
def pass_thru(
expectation: Expectation, runtime_self: Any, *kargs: Any, **kwargs: Any
) -> Any:
return_values = None
try:
original = _getattr(expectation, "_original")
_mock = _getattr(expectation, "_mock")
if inspect.isclass(_mock):
if expectation._method_type in SPECIAL_METHODS:
original = _getattr(expectation, "_original_function")
return_values = original(*kargs, **kwargs)
else:
return_values = original(runtime_self, *kargs, **kwargs)
else:
return_values = original(*kargs, **kwargs)
except Exception:
return _handle_exception_matching(expectation)
expected_values = _getattr(expectation, "_return_values")
if expected_values and not match_return_values(expected_values[0].value, return_values):
expected_value = expected_values[0].value
# Display strings with quotes in the error message
if isinstance(return_values, str):
return_values = repr(return_values)
if isinstance(expected_value, str):
expected_value = repr(expected_value)
raise (
MethodSignatureError(
f"Returned values for call {expectation._name} did not match expectation:\n"
f" Expected:\t{expected_value}\n"
f" Returned:\t{return_values}"
)
)
return return_values
def _handle_matched_expectation(
expectation: Expectation, runtime_self: Any, *kargs: Any, **kwargs: Any
) -> Any:
if not expectation._runnable():
raise StateError(
f"{name} expected to be called when {expectation._get_runnable()} is True"
)
expectation._times_called += 1
expectation._verify(final=False)
_pass_thru = _getattr(expectation, "_pass_thru")
_replace_with = _getattr(expectation, "_replace_with")
if _pass_thru:
return pass_thru(expectation, runtime_self, *kargs, **kwargs)
if _replace_with:
return _replace_with(*kargs, **kwargs)
return_values = _getattr(expectation, "_return_values")
if return_values:
return_value = return_values[0]
del return_values[0]
return_values.append(return_value)
else:
return_value = ReturnValue()
if return_value.raises:
if inspect.isclass(return_value.raises):
raise return_value.raises(
*return_value.value["kargs"], **return_value.value["kwargs"]
)
raise return_value.raises # pylint: disable=raising-bad-type
return return_value.value
def mock_method(runtime_self: Any, *kargs: Any, **kwargs: Any) -> Any:
arguments = {"kargs": kargs, "kwargs": kwargs}
expectation = FlexmockContainer.get_flexmock_expectation(self, name, arguments)
if expectation:
return _handle_matched_expectation(expectation, runtime_self, *kargs, **kwargs)
# inform the user which expectation(s) for the method were _not_ matched
saved_expectations = reversed(FlexmockContainer.get_expectations_with_name(self, name))
error_msg = (
f"Arguments for call {name} did not match expectations:\n"
f" Received call:\t{_format_args(name, arguments)}\n"
)
if saved_expectations:
error_msg += "\n".join(
f" Expected call[{index}]:\t{_format_args(name, expectation._args)}"
for index, expectation in enumerate(saved_expectations, 1)
)
raise MethodSignatureError(error_msg)
return mock_method
def flexmock_teardown() -> None:
"""Performs flexmock-specific teardown tasks."""
saved = {}
instances = []
classes = []
for mock_object, expectations in FlexmockContainer.flexmock_objects.items():
saved[mock_object] = expectations[:]
for expectation in expectations:
_getattr(expectation, "_reset")()
for expectation in expectations:
# Remove method type attributes set by flexmock. This needs to be done after
# resetting all the expectations because method type is needed in expectation teardown.
if inspect.isclass(mock_object) or hasattr(mock_object, "__class__"):
try:
delattr(mock_object._object, f"{expectation._name}__flexmock__method_type")
except (AttributeError, TypeError):
pass
for mock in saved:
obj = mock._object
if not isinstance(obj, Mock) and not inspect.isclass(obj):
instances.append(obj)
if inspect.isclass(obj):
classes.append(obj)
for obj in instances + classes:
for attr in UPDATED_ATTRS:
try:
obj_dict = obj.__dict__
if obj_dict[attr].__code__ is Mock.__dict__[attr].__code__:
del obj_dict[attr]
except Exception:
try:
if getattr(obj, attr).__code__ is Mock.__dict__[attr].__code__:
delattr(obj, attr)
except AttributeError:
pass
FlexmockContainer.teardown_properties()
FlexmockContainer.reset()
# make sure this is done last to keep exceptions here from breaking
# any of the previous steps that cleanup all the changes
for mock_object, expectations in saved.items():
for expectation in expectations:
_getattr(expectation, "_verify")()
class Expectation:
"""Holds expectations about methods.
The information contained in the Expectation object includes method name,
its argument list, return values, and any exceptions that the method might
raise.
"""
def __init__(
self,
mock: Mock,
name: Optional[str] = None,
return_value: Optional[Any] = None,
original: Optional[Any] = None,
method_type: Optional[Any] = None,
) -> None:
if original is not None:
self._original = original
self._name = name
self._times_called: int = 0
self._modifier: str = EXACTLY
self._args: Optional[Dict[str, Any]] = None
self._method_type = method_type
self._argspec: Optional[inspect.FullArgSpec] = None
self._return_values = [ReturnValue(return_value)] if return_value is not None else []
self._replace_with: Optional[Callable[..., Any]] = None
self._original_function: Optional[Callable[..., Any]] = None
self._expected_calls = {EXACTLY: None, AT_LEAST: None, AT_MOST: None}
self._runnable: Callable[..., bool] = lambda: True
self._mock = mock
self._pass_thru = False
self._ordered = False
self._one_by_one = False
self._verified = False
self._callable = True
self._local_override = False
def __str__(self) -> str:
args = _format_args(str(self._name), self._args)
return_values = ", ".join(str(x) for x in self._return_values)
return f"{args} -> ({return_values})"
def __call__(self) -> "Expectation":
return self
def __getattribute__(self, name: str) -> Any:
if name == "once":
return _getattr(self, "times")(1)
if name == "twice":
return _getattr(self, "times")(2)
if name == "never":
return _getattr(self, "times")(0)
if name in ("at_least", "at_most", "ordered", "one_by_one"):
return _getattr(self, name)()
if name == "mock":
return _getattr(self, "mock")()
return _getattr(self, name)
def __getattr__(self, name: str) -> NoReturn:
self.__raise(
AttributeError, f"'{self.__class__.__name__}' object has not attribute '{name}'"
)
def _get_runnable(self) -> str:
"""Ugly hack to get the name of when() condition from the source code."""
name = "condition"
try:
source = inspect.getsource(self._runnable)
if "when(" in source:
name = source.split("when(")[1].split(")")[0]
elif "def " in source:
name = source.split("def ")[1].split("(")[0]
except Exception:
# couldn't get the source, oh well
pass
return name
def _verify_signature_match(self, *kargs: Any, **kwargs: Any) -> None:
if isinstance(self._mock, Mock):
return # no sense in enforcing this for fake objects
allowed = self._argspec
args_len = len(allowed.args)
# self is the first expected argument
has_self = allowed.args and allowed.args[0] == "self"
# Builtin methods take `self` as the first argument but `inspect.ismethod` returns False
# so we need to check for them explicitly
is_builtin_method = isinstance(self._original, BuiltinMethodType) and has_self
# Methods take `self` if not a staticmethod
is_method = inspect.ismethod(self._original) and self._method_type is not staticmethod
# Class init takes `self`
is_class = inspect.isclass(self._original)
# When calling class methods or instance methods on a class method takes `cls`
is_class_method = (
inspect.isfunction(self._original)
and inspect.isclass(self._mock)
and self._method_type is not staticmethod
)
if is_builtin_method or is_method or is_class or is_class_method:
# Do not count `self` or `cls`.
args_len -= 1
minimum = args_len - (allowed.defaults and len(allowed.defaults) or 0)
maximum = None
if allowed.varargs is None and allowed.varkw is None:
maximum = args_len
total_positional = len(kargs + tuple(a for a in kwargs if a in allowed.args))
named_optionals = [
a
for a in kwargs
if allowed.defaults
if a in allowed.args[len(allowed.args) - len(allowed.defaults) :]
]
if allowed.defaults and total_positional == minimum and named_optionals:
minimum += len(named_optionals)
if total_positional < minimum:
arguments = "argument" if minimum == 1 else "arguments"
raise MethodSignatureError(
f"{self._name} requires at least {minimum} {arguments}, "
f"expectation provided {total_positional}"
)
if maximum is not None and total_positional > maximum:
arguments = "argument" if maximum == 1 else "arguments"
raise MethodSignatureError(
f"{self._name} requires at most {maximum} {arguments}, "
f"expectation provided {total_positional}"
)
if args_len == len(kargs) and any(a for a in kwargs if a in allowed.args):
given_args = [a for a in kwargs if a in allowed.args]
arguments = "argument" if len(given_args) == 1 else "arguments"
raise MethodSignatureError(
f"{given_args} already given as positional {arguments} to {self._name}"
)
if not allowed.varkw and any(
a for a in kwargs if a not in allowed.args + allowed.kwonlyargs
):
invalid_arg = [a for a in kwargs if a not in allowed.args + allowed.kwonlyargs][0]
raise MethodSignatureError(
f"{invalid_arg} is not a valid keyword argument to {self._name}"
)
# check that kwonlyargs that don't have default value specified are provided
required_kwonlyargs = [
a for a in allowed.kwonlyargs if a not in (allowed.kwonlydefaults or {})
]
missing_kwonlyargs = [a for a in required_kwonlyargs if a not in kwargs]
if missing_kwonlyargs:
arguments = "argument" if len(missing_kwonlyargs) == 1 else "arguments"
missing_args = '", "'.join(missing_kwonlyargs)
raise MethodSignatureError(
f'{self._name} requires keyword-only {arguments} "{missing_args}"'
)
def _update_original(self, name: str, obj: Any) -> None:
if hasattr(obj, "__dict__") and name in obj.__dict__:
self._original = obj.__dict__[name]
else:
self._original = getattr(obj, name)
self._update_argspec()
def _update_argspec(self) -> None:
original = self.__dict__.get("_original")
if original:
try:
self._argspec = inspect.getfullargspec(original)
except TypeError:
# built-in function: fall back to stupid processing and hope the
# builtins don't change signature
pass
def _normalize_named_args(self, *kargs: Any, **kwargs: Any) -> Dict[str, Any]:
argspec = self._argspec
default = {"kargs": kargs, "kwargs": kwargs}
if not argspec:
return default
ret: Dict[str, Any] = {"kargs": (), "kwargs": kwargs}
if inspect.ismethod(self._original):
args = argspec.args[1:]
else:
args = argspec.args
for i, arg in enumerate(kargs):
if len(args) <= i:
return default
ret["kwargs"][args[i]] = arg
return ret
def __raise(self, exception: Type[Exception], message: str) -> NoReturn:
"""Safe internal raise implementation.
In case we're patching builtins, it's important to reset the
expectation before raising any exceptions or else things like
open() might be stubbed out and the resulting runner errors are very
difficult to diagnose.
"""
self._reset()
raise exception(message)
def _match_args(self, given_args: Any) -> bool:
"""Check if the set of given arguments matches this expectation."""
expected_args = self._args
given_args = self._normalize_named_args(*given_args["kargs"], **given_args["kwargs"])
if expected_args == given_args or expected_args is None:
return True
if (
len(given_args["kargs"]) != len(expected_args["kargs"])
or len(given_args["kwargs"]) != len(expected_args["kwargs"])
or (sorted(given_args["kwargs"].keys()) != sorted(expected_args["kwargs"].keys()))
):
return False
for i, arg in enumerate(given_args["kargs"]):
if not _arguments_match(arg, expected_args["kargs"][i]):
return False
for key, value in given_args["kwargs"].items():
if not _arguments_match(value, expected_args["kwargs"][key]):
return False
return True
def mock(self) -> Mock:
"""Return the mock associated with this expectation."""
return self._mock
def with_args(self, *kargs: Any, **kwargs: Any) -> "Expectation":
"""Override the arguments used to match this expectation's method.
Args:
- kargs: optional keyword arguments
- kwargs: optional named arguments
Returns:
- self, i.e. can be chained with other Expectation methods
"""
if not self._callable:
self.__raise(FlexmockError, "can't use with_args() with attribute stubs")
self._update_argspec()
if self._argspec:
# do this outside try block as TypeError is way too general and catches
# unrelated errors in the verify signature code
self._verify_signature_match(*kargs, **kwargs)
self._args = self._normalize_named_args(*kargs, **kwargs)
else:
self._args = {"kargs": kargs, "kwargs": kwargs}
return self
def and_return(self, *values: Any) -> "Expectation":
"""Override the return value of this expectation's method.
When and_return is given multiple times, each value provided is returned
on successive invocations of the method. It is also possible to mix
and_return with and_raise in the same manner to alternate between returning
a value and raising and exception on different method invocations.
When combined with the one_by_one property, value is treated as a list of
values to be returned in the order specified by successive calls to this
method rather than a single list to be returned each time.
Args:
- values: optional list of return values, defaults to None if not given
Returns:
- self, i.e. can be chained with other Expectation methods
"""
if not values:
value = None
elif len(values) == 1:
value = values[0]
else:
value = values
if not self._callable:
_setattr(self._mock, str(self._name), value)
return self
return_values = _getattr(self, "_return_values")
if not _getattr(self, "_one_by_one"):
value = ReturnValue(value)
return_values.append(value)
else:
try:
return_values.extend([ReturnValue(v) for v in value]) # type: ignore
except TypeError:
return_values.append(ReturnValue(value))
return self
def times(self, number: int) -> "Expectation":
"""Number of times this expectation's method is expected to be called.
There are also 3 aliases for the times() method:
- once() -> times(1)
- twice() -> times(2)
- never() -> times(0)
Args:
- number: int
Returns:
- self, i.e. can be chained with other Expectation methods
"""
if not self._callable:
self.__raise(FlexmockError, "can't use times() with attribute stubs")
expected_calls = _getattr(self, "_expected_calls")
modifier = _getattr(self, "_modifier")
expected_calls[modifier] = number
return self
def one_by_one(self) -> "Expectation":
"""Modifies the return value to be treated as a list of return values.
Each value in the list is returned on successive invocations of the method.
Returns:
- self, i.e. can be chained with other Expectation methods
"""
if not self._callable:
self.__raise(FlexmockError, "can't use one_by_one() with attribute stubs")
if not self._one_by_one:
self._one_by_one = True
return_values = _getattr(self, "_return_values")
saved_values = return_values[:]
self._return_values = return_values = []
for value in saved_values:
try:
for val in value.value:
return_values.append(ReturnValue(val))
except TypeError:
return_values.append(value)
return self
def at_least(self) -> "Expectation":
"""Modifies the associated times() expectation.
When given, an exception will only be raised if the method is called less
than times() specified. Does nothing if times() is not given.
Returns:
- self, i.e. can be chained with other Expectation methods
"""
if not self._callable:
self.__raise(FlexmockError, "can't use at_least() with attribute stubs")
expected_calls = _getattr(self, "_expected_calls")
modifier = _getattr(self, "_modifier")
if expected_calls[AT_LEAST] is not None or modifier == AT_LEAST:
self.__raise(FlexmockError, "cannot use at_least modifier twice")
if modifier == AT_MOST and expected_calls[AT_MOST] is None:
self.__raise(FlexmockError, "cannot use at_least with at_most unset")
self._modifier = AT_LEAST
return self
def at_most(self) -> "Expectation":
"""Modifies the associated "times" expectation.
When given, an exception will only be raised if the method is called more
than times() specified. Does nothing if times() is not given.
Returns:
- self, i.e. can be chained with other Expectation methods
"""
if not self._callable:
self.__raise(FlexmockError, "can't use at_most() with attribute stubs")
expected_calls = _getattr(self, "_expected_calls")
modifier = _getattr(self, "_modifier")
if expected_calls[AT_MOST] is not None or modifier == AT_MOST:
self.__raise(FlexmockError, "cannot use at_most modifier twice")
if modifier == AT_LEAST and expected_calls[AT_LEAST] is None:
self.__raise(FlexmockError, "cannot use at_most with at_least unset")
self._modifier = AT_MOST
return self
def ordered(self) -> "Expectation":
"""Makes the expectation respect the order of should_receive statements.
An exception will be raised if methods are called out of order, determined
by order of should_receive calls in the test.
Returns:
- self, i.e. can be chained with other Expectation methods
"""
if not self._callable:
self.__raise(FlexmockError, "can't use ordered() with attribute stubs")
self._ordered = True
FlexmockContainer.ordered.append(self)
return self
def when(self, func: Callable[..., Any]) -> "Expectation":
"""Sets an outside resource to be checked before executing the method.
Args:
- func: function to call to check if the method should be executed
Returns:
- self, i.e. can be chained with other Expectation methods
"""
if not self._callable:
self.__raise(FlexmockError, "can't use when() with attribute stubs")
if not hasattr(func, "__call__"):
self.__raise(FlexmockError, "when() parameter must be callable")
self._runnable = func
return self
def and_raise(self, exception: Exception, *kargs: Any, **kwargs: Any) -> "Expectation":
"""Specifies the exception to be raised when this expectation is met.
Args:
- exception: class or instance of the exception
- kargs: optional keyword arguments to pass to the exception
- kwargs: optional named arguments to pass to the exception
Returns:
- self, i.e. can be chained with other Expectation methods
"""
if not self._callable:
self.__raise(FlexmockError, "can't use and_raise() with attribute stubs")
args = {"kargs": kargs, "kwargs": kwargs}
return_values = _getattr(self, "_return_values")
return_values.append(ReturnValue(raises=exception, value=args))
return self
def replace_with(self, function: Callable[..., Any]) -> "Expectation":
"""Gives a function to run instead of the mocked out one.
Args:
- function: callable
Returns:
- self, i.e. can be chained with other Expectation methods
"""
if not self._callable:
self.__raise(FlexmockError, "can't use replace_with() with attribute/property stubs")
replace_with = _getattr(self, "_replace_with")
original = self.__dict__.get("_original")
if replace_with:
self.__raise(FlexmockError, "replace_with cannot be specified twice")
if function == original:
self._pass_thru = True
self._replace_with = function
return self
def and_yield(self, *kargs: Any) -> "Expectation":
"""Specifies the list of items to be yielded on successive method calls.
In effect, the mocked object becomes a generator.
Returns:
- self, i.e. can be chained with other Expectation methods
"""
if not self._callable:
self.__raise(FlexmockError, "can't use and_yield() with attribute stubs")
return self.and_return(iter(kargs))
def _verify(self, final: bool = True) -> None:
"""Verify that this expectation has been met.
Args:
final: boolean, True if no further calls to this method expected
(skip checking at_least expectations when False)
Raises:
MethodCallError Exception
"""
failed, message = self._verify_number_of_calls(final)
if failed and not self._verified:
self._verified = True
self.__raise(
MethodCallError,
(
f"{_format_args(str(self._name), self._args)} expected to be called "
f"{message}, called {self._times_called} "
f"{'time' if self._times_called == 1 else 'times'}"
),
)
def _verify_number_of_calls(self, final: bool) -> Tuple[bool, str]:
failed = False
message = ""
expected_calls = _getattr(self, "_expected_calls")
times_called = _getattr(self, "_times_called")
if expected_calls[EXACTLY] is not None:
message = f"exactly {expected_calls[EXACTLY]}"
if final:
if times_called != expected_calls[EXACTLY]:
failed = True
else:
if times_called > expected_calls[EXACTLY]:
failed = True
message += " time" if expected_calls[EXACTLY] == 1 else " times"
else:
if final and expected_calls[AT_LEAST] is not None:
message = f"at least {expected_calls[AT_LEAST]}"
if times_called < expected_calls[AT_LEAST]:
failed = True
message += " time" if expected_calls[AT_LEAST] == 1 else " times"
if expected_calls[AT_MOST] is not None:
if message:
message += " and "
message += f"at most {expected_calls[AT_MOST]}"
if times_called > expected_calls[AT_MOST]:
failed = True
message += " time" if expected_calls[AT_MOST] == 1 else " times"
return failed, message
def _reset(self) -> None:
"""Returns the methods overriden by this expectation to their originals."""
_mock = _getattr(self, "_mock")
if not isinstance(_mock, Mock):
original = self.__dict__.get("_original")
if original:
# name may be unicode but pypy demands dict keys to be str
name = str(_getattr(self, "_name"))
if hasattr(_mock, "__dict__") and name in _mock.__dict__ and self._local_override:
delattr(_mock, name)
elif (
hasattr(_mock, "__dict__")
and name in _mock.__dict__
and isinstance(_mock.__dict__, dict)
):
_mock.__dict__[name] = original
else:
setattr(_mock, name, original)
del self
class FlexmockContainer:
"""Holds global hash of object/expectation mappings."""
flexmock_objects: Dict[Mock, List[Expectation]] = {}
properties: Dict[Any, List[str]] = {}
ordered: List[Expectation] = []
last: Optional[Expectation] = None
@classmethod
def reset(cls) -> None:
"""Reset flexmock state."""
cls.ordered = []
cls.last = None
cls.flexmock_objects = {}
cls.properties = {}
@classmethod
def get_flexmock_expectation(
cls, obj: Mock, name: Optional[str] = None, args: Optional[Any] = None
) -> Optional[Expectation]:
"""Retrieves an existing matching expectation."""
if args is None:
args = {"kargs": (), "kwargs": {}}
if not isinstance(args, dict):
args = {"kargs": args, "kwargs": {}}
if not isinstance(args["kargs"], tuple):
args["kargs"] = (args["kargs"],)
if name and obj in cls.flexmock_objects:
found = None
for expectation in reversed(cls.flexmock_objects[obj]):
if expectation._name == name and expectation._match_args(args):
if expectation in cls.ordered or not expectation._ordered and not found:
found = expectation
if found and found._ordered:
cls._verify_call_order(found, args)
return found
return None
@classmethod
def _verify_call_order(cls, expectation: Expectation, args: Dict[str, Any]) -> None:
if not cls.ordered:
next_method = cls.last
else:
next_method = cls.ordered.pop(0)
cls.last = next_method
if expectation is not next_method and next_method is not None:
raise CallOrderError(
f"{_format_args(str(expectation._name), args)} called before "
f"{_format_args(str(next_method._name), next_method._args)}"
)
@classmethod
def add_expectation(cls, obj: Mock, expectation: Expectation) -> None:
"""Add expectation."""
if obj in cls.flexmock_objects:
cls.flexmock_objects[obj].append(expectation)
else:
cls.flexmock_objects[obj] = [expectation]
@classmethod
def get_expectations_with_name(cls, obj: Mock, name: str) -> List[Expectation]:
"""Get all expectations for given name."""
return [x for x in FlexmockContainer.flexmock_objects.get(obj, []) if x._name == name]
@classmethod
def add_teardown_property(cls, obj: Any, name: str) -> None:
"""Add teardown property."""
if obj in cls.properties:
cls.properties[obj].append(name)
else:
cls.properties[obj] = [name]
@classmethod
def teardown_properties(cls) -> None:
"""Teardown properties."""
for obj, names in cls.properties.items():
for name in names:
delattr(obj, name)
def flexmock(spec: Optional[Any] = None, **kwargs: Any) -> Mock:
"""Main entry point into the flexmock API.
This function is used to either generate a new fake object or take
an existing object (or class or module) and use it as a basis for
a partial mock. In case of a partial mock, the passed in object
is modified to support basic Mock class functionality making
it unnecessary to make successive flexmock() calls on the same
objects to generate new expectations.
Examples:
>>> flexmock(SomeClass)
>>> SomeClass.should_receive('some_method')
NOTE: it's safe to call flexmock() on the same object, it will detect
when an object has already been partially mocked and return it each time.
Args:
- spec: object (or class or module) to mock
- kwargs: method/return_value pairs to attach to the object
Returns:
Mock object if no spec is provided. Otherwise return the spec object.
"""
if spec is not None:
return _create_partial_mock(spec, **kwargs)
# use this intermediate class to attach properties
klass = type("MockClass", (Mock,), {})
return klass(**kwargs) # type: ignore
def _getattr(obj: object, name: str) -> Any:
"""Convenience wrapper to work around custom __getattribute__."""
return object.__getattribute__(obj, name)
def _arg_to_str(arg: Any) -> str:
if isinstance(arg, RE_TYPE):
return f"/{arg.pattern}/"
if isinstance(arg, str):
return f'"{arg}"'
return f"{arg}"
def _format_args(name: str, arguments: Optional[Dict[str, Any]]) -> str:
if arguments is None:
arguments = {"kargs": (), "kwargs": {}}
kargs = ", ".join(_arg_to_str(arg) for arg in arguments["kargs"])
kwargs = ", ".join(f"{k}={_arg_to_str(v)}" for k, v in arguments["kwargs"].items())
if kargs and kwargs:
args = f"{kargs}, {kwargs}"
else:
args = f"{kargs}{kwargs}"
return f"{name}({args})"
def _create_partial_mock(obj_or_class: Any, **kwargs: Any) -> Mock:
"""Create partial mock."""
matches = [x for x in FlexmockContainer.flexmock_objects if x._object is obj_or_class]
if matches:
mock = matches[0]
else:
mock = Mock()
mock._object = obj_or_class
for name, return_value in kwargs.items():
if hasattr(return_value, "__call__"):
mock.should_receive(name).replace_with(return_value)
else:
mock.should_receive(name).and_return(return_value)
if not matches:
FlexmockContainer.add_expectation(mock, Expectation(obj_or_class))
if _attach_flexmock_methods(mock, Mock, obj_or_class) and not inspect.isclass(mock._object):
mock = mock._object
return mock
def _attach_flexmock_methods(mock: Mock, flexmock_class: Type[Mock], obj: Any) -> bool:
try:
for attr in UPDATED_ATTRS:
if hasattr(obj, attr):
if getattr(obj, attr).__code__ is not getattr(flexmock_class, attr).__code__:
return False
for attr in UPDATED_ATTRS:
_setattr(obj, attr, getattr(mock, attr))
except TypeError as exc:
raise MockBuiltinError(
"Python does not allow you to mock builtin objects or modules. "
"Consider wrapping it in a class you can mock instead"
) from exc
except AttributeError as exc:
raise MockBuiltinError(
"Python does not allow you to mock instances of builtin objects. "
"Consider wrapping it in a class you can mock instead"
) from exc
return True
def _arguments_match(arg: Any, expected_arg: Any) -> bool:
if expected_arg == arg:
return True
if inspect.isclass(expected_arg) and isinstance(arg, expected_arg):
return True
if isinstance(expected_arg, RE_TYPE) and expected_arg.search(arg):
return True
return False
def _setattr(obj: Any, name: str, value: Any) -> bool:
"""Ensure we use local __dict__ where possible."""
local_override = False
if hasattr(obj, "__dict__") and isinstance(obj.__dict__, dict):
if name not in obj.__dict__:
# Overriding attribute locally on an instance.
local_override = True
obj.__dict__[name] = value
else:
if inspect.isclass(obj) and not vars(obj).get(name):
# Overriding derived attribute locally on a child class.
local_override = True
setattr(obj, name, value)
return local_override
def _isproperty(obj: Any, name: str) -> bool:
if isinstance(obj, Mock):
return False
if not inspect.isclass(obj) and hasattr(obj, "__dict__") and name not in obj.__dict__:
attr = getattr(obj.__class__, name)
if isinstance(attr, property):
return True
elif inspect.isclass(obj):
attr = getattr(obj, name)
if isinstance(attr, property):
return True
return False
def _is_class_method(method: Callable[..., Any], name: str) -> bool:
"""Check if a method is a classmethod.
This function checks all the classes in the class method resolution in order
to get the correct result for derived methods as well.
"""
bound_to = getattr(method, "__self__", None)
if not inspect.isclass(bound_to):
return False
for cls in inspect.getmro(bound_to):
descriptor = vars(cls).get(name)
if descriptor is not None:
return isinstance(descriptor, classmethod)
return False
def _is_static_method(obj: Any, name: str) -> bool:
try:
return isinstance(inspect.getattr_static(obj, name), staticmethod)
except AttributeError:
# AttributeError is raised when mocking a proxied object
if hasattr(obj, "__mro__"):
for cls in inspect.getmro(obj):
descriptor = vars(cls).get(name)
if descriptor is not None:
return isinstance(descriptor, staticmethod)
return False
|
nilq/baby-python
|
python
|
import bs4
import re
from common import config
# Regular expresion definitions
is_well_former_link = re.compile(r'^https?://.+$')
is_root_path = re.compile(r'^/.+$')
def _build_link(host, link):
if is_well_former_link.match(link):
return link
elif is_root_path.match(link):
return '{}{}'.format(host, link)
else:
return '{host}/{uri}'.format(host=host, uri=link)
class NewsPage:
def __init__(self, news_site_uid):
self._config = config()['news_sites'][news_site_uid]
self._queries = self._config['queries']
self._url = self._config['url']
self._html = None
def _select(self, query_string):
return self._html.select(query_string)
def _select_list(self, query_string_list):
results = []
for query_string in query_string_list:
results = results + self._html.select(query_string)
return results
@property
def url_csv(self):
return self._url
async def visit(self, session):
async with session.get(self._url) as response:
text = await response.text()
self._html = bs4.BeautifulSoup(text, 'html.parser')
class HomePage(NewsPage):
def __init__(self, news_site_uid):
super().__init__(news_site_uid)
@property
def article_links(self):
link_list = []
for link in self._select_list(self._queries['homepage_article_links']):
if link and link.has_attr('href'):
link_list.append(link)
return set(link['href'] for link in link_list)
class ArticlePage(NewsPage):
def __init__(self, news_site_uid, article_url):
super().__init__(news_site_uid)
self._url = _build_link(self._url, article_url)
@property
def body_csv(self):
results = self._select(self._queries['article_body'])
text = ''
for result in results:
text += result.text
return text
@property
def title_csv(self):
result = self._select(self._queries['article_title'])
return result[0].text if len(result) else ''
|
nilq/baby-python
|
python
|
'''
Created on Apr 4, 2016
@author: Noe
'''
class MyClass(object):
'''
classdocs
'''
def __init__(self, params):
'''
Constructor
'''
|
nilq/baby-python
|
python
|
#!/usr/bin/python
from __future__ import absolute_import, division, print_function, unicode_literals
import pi3d
import ConfigParser
from PIL import Image
import sys
#read config
Config = ConfigParser.ConfigParser()
Config.read("config.ini")
xloc = int(Config.get("client",'x_offset'))
yloc = int(Config.get("client",'y_offset'))
x_virtual = int(Config.get("client",'x_virtual'))
y_virtual = int(Config.get("client",'y_virtual'))
ifile = Config.get("client","default_image")
im = Image.open(ifile)
xsize,ysize = im.size
zindex = 5
DISPLAY = pi3d.Display.create(x=0, y=0)
DISPLAY.set_background(0,0,0,0) #black
xloc = xloc + (x_virtual - DISPLAY.width) / 2
yloc = yloc - (y_virtual - DISPLAY.height) / 2
##print("foo %d " % DISPLAY.width)
#sys.exit
shader = pi3d.Shader("uv_flat")
CAMERA = pi3d.Camera(is_3d=False)
mykeys = pi3d.Keyboard()
sprite = pi3d.ImageSprite(ifile, shader, w=xsize, h=ysize, z=zindex)
while DISPLAY.loop_running():
sprite.position(xloc, yloc, zindex)
sprite.draw()
if mykeys.read() == 27:
mykeys.close()
DISPLAY.destroy()
break
|
nilq/baby-python
|
python
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import sys
def solve(s):
open_p = ('[', '{', '(')
close_p = (']', '}', ')')
pair = dict(zip(close_p, open_p)) # key: close_p
stack = list()
for c in s:
if c in open_p:
stack.append(c)
if c in close_p:
if len(stack) == 0:
print('NO')
return
top = stack.pop()
if top != pair[c]:
print('NO')
return
if len(stack) != 0:
print('NO')
return
print('YES')
return
num_tc = int(sys.stdin.readline())
for _ in range(num_tc):
s = sys.stdin.readline().strip()
solve(s)
|
nilq/baby-python
|
python
|
import aiohttp
import os
import pytest
from tokki.travis import TravisClient
from tokki.enums import Status
TOKEN = os.environ["TRAVISCI_TOKEN"]
AGENT = "Tests for Tokki +(https://github.com/ChomusukeBot/Tokki)"
@pytest.mark.asyncio
async def test_no_login():
with pytest.raises(TypeError, match=r": 'token'"):
TravisClient()
@pytest.mark.asyncio
async def test_no_agent():
with pytest.raises(TypeError, match=r": 'useragent'"):
TravisClient(TOKEN)
@pytest.mark.asyncio
async def test_not_found():
with pytest.raises(aiohttp.ClientResponseError) as exception:
client = TravisClient(TOKEN, AGENT)
await client.get_repo("ChomusukeBot/ThisIsAnInvalidRepo")
assert exception.value.status == 404
@pytest.mark.asyncio
async def test_repo():
client = TravisClient(TOKEN, AGENT)
repo = await client.get_repo("ChomusukeBot/TestRepo")
assert repo.name == "TestRepo"
assert repo.site_slug == "ChomusukeBot/TestRepo"
assert repo.repo_slug == "ChomusukeBot/TestRepo"
assert repo.owner == "ChomusukeBot"
assert repo.default_branch == "master"
@pytest.mark.asyncio
async def test_trigger_build():
client = TravisClient(TOKEN, AGENT)
repo = await client.get_repo("ChomusukeBot/TestRepo")
await repo.trigger_build(branch="master", message="Run from Tokki's tests")
@pytest.mark.asyncio
async def test_get_builds():
client = TravisClient(TOKEN, AGENT)
repo = await client.get_repo("ChomusukeBot/TestRepo")
builds = await repo.get_builds(quantity=5)
assert len(builds) == 5
for build in builds:
assert type(build.id) is int
assert type(build.version) is str
assert type(build.status) is Status
assert type(build.branch) is str
|
nilq/baby-python
|
python
|
import argparse
parse = argparse.ArgumentParser(description="test")
parse.add_argument('count' , action='store' , type = int)
parse.add_argument('units',action='store')
parse.add_argument('priseperunit' , action= 'store')
print(parse.parse_args())
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
import numpy
import cv2
import math
from entities.image import Image
from entities.interfaces.scene_interface import SceneInterface
from entities.aligned.aligned_band import AlignedBand
from entities.aligned.aligned_image import AlignedImage
from entities.aligned.aligned_true_color import AlignedTrueColor
from entities.motion_vectors import MotionVectors, MotionVectorsArrows
from entities.ndsi import NDSI
from entities.motion_predicted_ndsi import MotionPredictedNDSI, MotionPredictedNDSIOverlay
from utils.utils import debug_trace
from utils import logging
logger = logging.getLogger(__name__)
class AlignedScene(SceneInterface):
MATCHES_INCLUDED_PERCENT = 0.25
ALLOWED_SHIFTING_DISTANCE = 200
def __init__(self, scene, reference_scene, previous_scene):
SceneInterface.__init__(self)
self.__scene = scene
self.__reference_scene = reference_scene
self.__affine_transform_matrix = None
self.__matches = None
self._red_band = AlignedBand(scene.red_band(), reference_scene, self)
self._green_band = AlignedBand(scene.green_band(), reference_scene, self)
self._blue_band = AlignedBand(scene.blue_band(), reference_scene, self)
self._nir_band = AlignedBand(scene.nir_band(), reference_scene, self)
self._swir1_band = AlignedBand(scene.swir1_band(), reference_scene, self)
self.__bands = [
self._red_band,
self._green_band,
self._blue_band,
self._nir_band,
self._swir1_band,
]
self.__ndsi = NDSI(self._green_band, self._swir1_band)
self.__bands.append(self.__ndsi)
self.__drawn_matches_image = DrawnMatchesImage(scene, reference_scene, self)
self.__bands.append(self.__drawn_matches_image)
self.__true_color = AlignedTrueColor(scene.true_color(), reference_scene, self)
self.__bands.append(self.__true_color)
if previous_scene is not None:
self.__motion_vectors = MotionVectors(previous_scene.ndsi(), self.__ndsi)
self.__bands.append(self.__motion_vectors)
self.__motion_vectors_arrows = MotionVectorsArrows(self.__motion_vectors,
previous_scene.ndsi(),
self.__ndsi)
self.__bands.append(self.__motion_vectors_arrows)
self.__motion_predicted_ndsi = MotionPredictedNDSI(self.__motion_vectors, self.ndsi())
self.__bands.append(self.__motion_predicted_ndsi)
self.__motion_predicted_overlay_ndsi = \
MotionPredictedNDSIOverlay(self.__motion_predicted_ndsi, self.ndsi())
self.__bands.append(self.__motion_predicted_overlay_ndsi)
else:
self.__motion_vectors = None
self.__motion_predicted_ndsi = None
def clear(self):
for b in self.__bands:
b.clear()
def affine_transform_matrix(self) -> numpy.ndarray:
if self.__affine_transform_matrix is None:
self.__calculate_affine_transform_matrix()
return self.__affine_transform_matrix
def __calculate_affine_transform_matrix(self) -> None:
self.__matches = self.__match_descriptors()
self.__prune_low_score_matches()
reference_points, image_points = self.__prune_matches_by_euclidean_distance()
if any(element is None for element in [image_points, reference_points]):
logger.error("Affine transformation matrix could not be computed due to insufficient \
valid matches.")
self.__affine_transform_matrix = None
try:
affine_transform_matrix, inliers = cv2.estimateAffine2D(image_points,
reference_points,
None,
cv2.RANSAC)
self.__affine_transform_matrix = affine_transform_matrix
logger.notice("Affine transformation matrix for scene {} with reference {}\n{}"
.format(self.__scene, self.__reference_scene, affine_transform_matrix))
except Exception as e:
logger.error("Affine transformation failed.\n{}".format(e))
def __match_descriptors(self) -> list:
descriptor_match = cv2.DescriptorMatcher_create(cv2.DESCRIPTOR_MATCHER_BRUTEFORCE_HAMMING)
reference_descriptors = self.__reference_scene.descriptors()
image_descriptors = self.__scene.descriptors()
matches = descriptor_match.match(reference_descriptors, image_descriptors)
return matches
def __prune_low_score_matches(self) -> None:
self.__matches.sort(key=lambda x: x.distance, reverse=False)
matches_count = len(self.__matches)
pruned_matches_count = int(matches_count * self.MATCHES_INCLUDED_PERCENT)
self.__matches = self.__matches[:pruned_matches_count]
def __prune_matches_by_euclidean_distance(self) -> tuple:
pruned_matches = []
reference_points = []
image_points = []
for match in self.__matches:
reference_point = self.__reference_scene.keypoints()[match.queryIdx].pt
image_point = self.__scene.keypoints()[match.trainIdx].pt
if self.__valid_shifting_distance(reference_point, image_point):
reference_points.append(reference_point)
image_points.append(image_point)
pruned_matches.append(match)
self.__matches = pruned_matches
reference_points = numpy.array(reference_points)
image_points = numpy.array(image_points)
return reference_points, image_points
def __valid_shifting_distance(self, reference_point, image_point) -> bool:
euclidean_distance = self.__euclidean_distance(reference_point, image_point)
if euclidean_distance < AlignedScene.ALLOWED_SHIFTING_DISTANCE:
return True
else:
return False
@staticmethod
def __euclidean_distance(image_point, reference_point) -> float:
x_distance = abs(reference_point[0] - image_point[0])
y_distance = abs(reference_point[1] - image_point[1])
distance = math.sqrt(math.pow(x_distance, 2) + (math.pow(y_distance, 2)))
return distance
def scene_id(self) -> str:
return self.__scene.scene_id()
def scene_path(self) -> str:
return self.__scene.scene_path()
def bands(self) -> list:
return self.__bands
def thumbnail(self) -> AlignedBand:
return self.true_color()
def true_color(self) -> AlignedImage:
return self.__true_color
def ndsi(self) -> NDSI:
return self.__ndsi
def matches(self):
if self.__matches is None:
self.affine_transform_matrix()
return self.__matches
def motion_predicted_ndsi(self) -> NDSI:
return self.__motion_predicted_ndsi
def __str__(self):
return "AlignedScene[{}]".format(self.scene_id().scene_id())
def iterate_over_all(self):
logger.notice(self.__str__)
for b in self.__bands:
if b.name() == "Motion Vectros":
continue
b.raw_data()
# Make sure we don't fill the RAM
self.__bands = None
self.__ndsi = None
self.__motion_vectors = None
self.__motion_predicted_ndsi = None
self._red_band = None
self._green_band = None
self._blue_band = None
self._nir_band = None
self._swir1_band = None
class DrawnMatchesImage(Image):
NAME = "Drawn Matches"
def __init__(self, scene, reference_scene, aligned_scene):
self.__reference_scene = reference_scene
self.__scene = scene
self.__aligned_scene = aligned_scene
def name(self):
return self.NAME
def scene_name(self):
return self.__scene.scene_id().scene_id()
def raw_data(self):
pass
def clear(self):
pass
def visual_data(self):
return self.__matches_from_reference_to_image()
def __matches_from_reference_to_image(self):
reference_green_band_8bit = (self.__reference_scene.green_band().visual_data() >> 8).astype(numpy.uint8)
green_band_8bit = (self.__scene.green_band().visual_data() >> 8).astype(numpy.uint8)
drawn_matches_image = cv2.drawMatches(reference_green_band_8bit,
self.__reference_scene.keypoints(),
green_band_8bit,
self.__scene.keypoints(),
self.__aligned_scene.matches(),
None, matchColor=(0, 255, 255),
singlePointColor=(100, 0, 0),
flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
return drawn_matches_image
|
nilq/baby-python
|
python
|
import os
import json
import scipy.io
import pandas
import itertools
import numpy as np
from PIL import Image
from collections import OrderedDict
info = OrderedDict(description = "Testset extracted from put-in-context paper (experiment H)")
licenses = OrderedDict()
catgs = ['airplane','apple','backpack','banana','baseball bat','baseball glove','bench','bicycle','bird','boat','book','bottle','bowl','bus','cake','car','carrot','cell phone','chair','clock','cow','cup','dog','donut','fire hydrant','fork','frisbee','horse','kite','knife','motorcycle','mouse','orange','parking meter','potted plant','remote','sheep','sink','skateboard','skis','snowboard','spoon','sports ball','stop sign','suitcase','surfboard','tennis racket','tie','toothbrush','traffic light','train','truck','umbrella','vase','wine glass']
#imagedir_ori = '/home/mengmi/Projects/Proj_context2/Datasets/MSCOCO/trainColor_oriimg'
#imagedir_bin = '/home/mengmi/Projects/Proj_context2/Datasets/MSCOCO/trainColor_binimg'
imagedir_ori = '/home/mengmi/Projects/Proj_context2/Matlab/Stimulus/keyframe_expH'
imagedir_bin = '/home/mengmi/Projects/Proj_context2/Matlab/Stimulus/keyframe_expA'
#object_data = pandas.read_csv('/home/mengmi/Projects/Proj_context2/Datalist/trainColor_oriimg.txt', header=-1)
#binary_data = pandas.read_csv('/home/mengmi/Projects/Proj_context2/Datalist/trainColor_binimg.txt', header=-1)
#labels = pandas.read_csv('/home/mengmi/Projects/Proj_context2/Datalist/trainColor_label.txt', header=-1)
object_data = pandas.read_csv('/home/dimitar/experiments_I_and_J/expIJ/test_expJ_Color_oriimg.txt', header=-1)
binary_data = pandas.read_csv('/home/dimitar/experiments_I_and_J/expIJ/test_expJ_Color_binimg.txt', header=-1)
labels = pandas.read_csv('/home/dimitar/experiments_I_and_J/expIJ/test_expJ_Color_label.txt', header=-1)
image_cnt = 0
images = [] # fill this list with image annotations
categories = [] # fill this list with category annotations
annotations = [] # fill this list with object annotations
for (_, s), (_, s1), (_, label) in itertools.izip(object_data.iterrows(), binary_data.iterrows(), labels.iterrows()):
image = Image.open(os.path.join(imagedir_ori, s[0]))
bin_mask = np.array(Image.open(os.path.join(imagedir_bin, s1[0])))
A = np.argwhere(bin_mask >= 200)
top, left = A[0]
bottom, right = A[-1]
if bottom < A[-2][0] or right < A[-2][0]:
bottom, right = A[-2]
images.append(OrderedDict(file_name = s[0], height = image.height, width = image.width, id = image_cnt))
annotations.append(OrderedDict(area = (bottom-top)*(right-left), iscrowd = 0, image_id = image_cnt, bbox = [left, top, right - left, bottom - top], category_id = label[0], id = image_cnt))
image_cnt += 1
for i in range(1, 56):
categories.append(OrderedDict(id = i, name = catgs[i-1]))
cocoannotations = OrderedDict(info = info, licenses = licenses, images = images, annotations = annotations, categories = categories)
# save annotations
with open("annotations/test_annotations_exp_J.json", "w") as f:
json.dump(cocoannotations, f)
|
nilq/baby-python
|
python
|
# See https://michaelgoerz.net/notes/extending-sphinx-napoleon-docstring-sections.html
# # -- Fixing bug with google docs showing attributes-------------
from sphinx.ext.napoleon.docstring import GoogleDocstring
# first, we define new methods for any new sections and add them to the class
def parse_keys_section(self, section):
return self._format_fields('Keys', self._consume_fields())
GoogleDocstring._parse_keys_section = parse_keys_section
def parse_attributes_section(self, section):
return self._format_fields('Attributes', self._consume_fields())
GoogleDocstring._parse_attributes_section = parse_attributes_section
def parse_class_attributes_section(self, section):
return self._format_fields('Class Attributes', self._consume_fields())
GoogleDocstring._parse_class_attributes_section = parse_class_attributes_section
# we now patch the parse method to guarantee that the the above methods are
# assigned to the _section dict
def patched_parse(self):
self._sections['keys'] = self._parse_keys_section
self._sections['class attributes'] = self._parse_class_attributes_section
self._unpatched_parse()
GoogleDocstring._unpatched_parse = GoogleDocstring._parse
GoogleDocstring._parse = patched_parse
|
nilq/baby-python
|
python
|
import re
import random
import string
from django import template
from django.template import Context
from django.template.loader import get_template
from django.contrib.auth.models import Group
from django.core.exceptions import PermissionDenied
from crm.models import Person
from cedar_settings.models import GeneralSetting
from cedar.utils.misc_utils import get_back_url_from_context
register = template.Library()
@register.inclusion_tag('cedar/react.html')
def react():
pass
@register.inclusion_tag('cedar/react-dom.html')
def react_dom():
pass
@register.inclusion_tag('cedar/griddle.html')
def griddle():
pass
@register.inclusion_tag('cedar/spinner.html')
def spinner():
pass
@register.inclusion_tag('cedar/back-arrow-link.html')
def back_arrow(div_classes="col s1"):
return {
'div_classes': div_classes
}
@register.inclusion_tag('cedar/user-menu.html', takes_context=True)
def user_menu(context, *args, **kwargs):
# Requires a kwarg: "user_menu_id".
user_menu_id = kwargs.get('user_menu_id')
try:
if context['user'].is_authenticated():
person = Person.objects.get(user_account=context['user'])
else:
raise PermissionDenied
except Person.DoesNotExist:
person = None
# except
return {
'person': person,
'user_menu_id': user_menu_id,
'context': context,
}
@register.inclusion_tag('cedar/messages.html', takes_context=True)
def messages(context, *args, **kwargs):
return {'context': context, }
# is_choice_selected:
# For use when rebuilding modelmultiplechoice fields manually,
# trying to figure out which are selected.
@register.filter()
def is_choice_selected(choice, field_values):
if not field_values:
return ""
# choice id is an int:
if str(choice[0]) in field_values:
return "selected"
else:
return ""
# is_disabled:
# takes a user object and a permission string and checks if the
# user has that permission. If he/she doesn't, it returns the string "disabled"
# which can be used in a materializecss button class.
@register.filter()
def is_disabled(user, permission):
if user.has_perm(permission):
return ""
else:
return "disabled"
# Use this to see if you are in a CREATEVIEW or an UPDATEVIEW.
# useful when re-using a model form for updates and creates:
# Usage:
# {% is_update_view "Update Project" "Create Project" as submit_value %}
@register.assignment_tag(takes_context=True)
def is_update_view(context, text_if_true, text_if_false):
try:
object = context.get('object')
int(object.pk) # This should fail if an normal object w/ pk wasn't supplied.
return text_if_true
except AttributeError as e:
return text_if_false
@register.assignment_tag()
def get_dict_val(dictionary, key):
try:
return dictionary[key]
except:
return None
@register.assignment_tag()
def dict_has_key(dictionary, key):
if key in dictionary:
return True
else:
return False
@register.filter()
def replace_highlight_tags(text, span_class):
return text.replace("<em>", "<span class=\"{}\">".format(span_class)).replace("</em>", "</span>")
@register.assignment_tag(takes_context=True)
def chunkify_search_text(context, search_result, chunk_length):
t = search_result.text
return ['happy', 'trails']
@register.assignment_tag
def sanitize_old(text, repl_char, query):
# Get list of interview participant initials:
participants = Person.objects.filter(roles__name__contains="Participant")
# initials = [participant.initials for participant in participants]
for p in participants:
# Redact initials:
if len(p.initials) > 1: # Skip bad or weird initials
# text = text.replace(p.initials, repl_char * len(p.initials))
initials_str = p.initials.strip()
text = re.sub(r'\b{}\b'.format(initials_str), repl_char * len(initials_str), text)
# Redact names - 5 variations:
# # "Fname Lname"
# name_str = "{} {}".format(p.name_first, p.name_last).strip()
# text = text.replace(name_str, repl_char * len(name_str))
#
# # "FnameLname"
# name_str = "{}{}".format(p.name_first, p.name_last).strip()
# text = text.replace(name_str, repl_char * len(name_str))
# "Fname"
if p.name_first:
name_str = p.name_first.strip()
text = re.sub(r'\b{}\b'.format(name_str), repl_char * len(name_str), text)
# "Lname"
if p.name_first:
name_str = p.name_last.strip()
text = re.sub(r'\b{}\b'.format(name_str), repl_char * len(name_str), text)
# "Indigenous"
if p.indigenous_name:
name_str = p.indigenous_name.strip()
text = text.replace(name_str, repl_char * len(name_str))
return text
@register.filter()
def concat(val1, val2):
return str(val1) + str(val2)
@register.assignment_tag()
def get_model_class(obj):
return obj.__class__
@register.assignment_tag()
def get_model_class_name(obj):
return obj.__class__.__name__
@register.filter()
def get_subclass_model_class_name(obj):
model = obj.__class__
return model.objects.get_subclass(id=obj.id).__class__.__name__
@register.assignment_tag()
def get_model_subclass(obj):
model = obj.__class__
return model.objects.get_subclass(id=obj.id)
@register.assignment_tag()
def is_submodel(obj1, obj2):
return issubclass(obj1.__class__, obj2.__class__)
# -------------------------------------------
# DEPRECATED. See Readme for implementing permissions.
# To use: wrap any html elements with:
# {% if request.user|can_view_sensitive %} {% endif %}
# and they will be filtered out based on user role.
# Currently, "Explorers" are the only restricted group,
# any other role will be able to see stuff.
# -------------------------------------------
@register.filter
def can_view_sensitive(user):
try:
if Group.objects.get(name='Explorer') in user.groups.all():
return False
else:
return True
except Exception as err:
return False
@register.inclusion_tag('cedar/back_button.html', takes_context=True)
def back_button(context, extra=None):
'''
Tries to set a button anchor with the http referer url. Disables
button if no url present
:param context:
:param extra: something to append on to the end of the url
:return:
'''
back_url = get_back_url_from_context(context)
if back_url:
if extra:
# add ending slash if not present
if back_url[-1] != "/":
back_url += "/"
back_url += extra
return {'BACK_URL': back_url}
else:
return {'BACK_URL': False}
@register.inclusion_tag('cedar/cancel_button.html', takes_context=True)
def cancel_button(context, extra=None):
'''
Tries to set a button anchor with the http referer url. Disables
button if no url present.
This actually just called back_button()
:param context:
:param extra: something to append on to the end of the url
:return:
'''
return back_button(context, extra)
@register.inclusion_tag('cedar/edit_submit_button.html', takes_context=True)
def edit_submit_button(context, form_selector, action_text=None):
'''
:param context:
:param form_selector: jquery selector string to get the form
:param action_text: button text. if None, will try to decide if it's a New or Update form
:return:
'''
if not action_text:
action_text = is_update_view(context, "Update", "Create")
return {
'form_selector': form_selector,
'action_text': action_text
}
@register.inclusion_tag('cedar/edit_delete_button.html', takes_context=True)
def edit_delete_button(context, delete_url_string, perm=None):
'''
:param context:
:param delete_url_string: if I call it "delete_url" it would conflict with the template var "delete_url"
:param perm: permission to check, if user doesn't have perm the button will be disabled. Can be None for no check.
:return:
'''
return {
'delete_url': delete_url_string,
'disabled_css': '' if not perm else is_disabled(context.request.user, perm)
}
@register.inclusion_tag('cedar/edit_cancel_button.html', takes_context=True)
def edit_cancel_button(context, cancel_url_string):
'''
What's that, a THIRD cancel button tag? Yes, yes it is.
:param context:
:param cancel_url_string
:return:
'''
return {
'cancel_url': cancel_url_string,
}
@register.assignment_tag()
def get_background_url():
url_obj = GeneralSetting.objects.get('cedar__default_splash_page_background_img')
if isinstance(url_obj, str):
return url_obj
else:
return url_obj.file.url
@register.filter()
def render_boolean(value):
bool_template = get_template("cedar/boolean_template.html")
return bool_template.render(Context({'value': value}))
@register.assignment_tag()
def random_string(num_chars=4):
return ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(num_chars))
|
nilq/baby-python
|
python
|
import os
from django.shortcuts import render_to_response, get_object_or_404
from django.template import RequestContext
from django.http import HttpResponseRedirect, HttpResponse, HttpResponseForbidden, Http404
from django.core.urlresolvers import reverse
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from django.template.loader import select_template
from django.contrib.contenttypes.models import ContentType
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
if "notification" in settings.INSTALLED_APPS:
from notification import models as notification
else:
notification = None
from threadedcomments.models import ThreadedComment
from topics.forms import TopicForm
from topics.models import Topic
class ContentApp(object):
def __init__(self, group_model, content_app_name):
self.group_model = group_model
self.content_app_name = content_app_name
def render(self, template_name, context, context_instance=None):
ctype = ContentType.objects.get_for_model(self.group_model)
return render_to_response([
'%s/%s/%s' % (ctype.app_label, self.content_app_name, template_name),
'%s/%s' % (self.content_app_name, template_name),
], context, context_instance=context_instance)
def get_group(self, slug):
return self.group_model._default_manager.get(slug=slug)
def topics(request, group_slug=None, form_class=TopicForm, template_name="topics.html", app=None):
try:
group = app.get_group(group_slug)
except ObjectDoesNotExist:
raise Http404
is_member = request.user.is_authenticated() and group.user_is_member(request.user) or False
if request.method == "POST":
if request.user.is_authenticated():
if is_member:
topic_form = form_class(request.POST)
if topic_form.is_valid():
topic = topic_form.save(commit=False)
topic.group = group
topic.creator = request.user
topic.save()
request.user.message_set.create(message="You have started the topic %s" % topic.title)
topic_form = form_class() # @@@ is this the right way to reset it?
else:
request.user.message_set.create(message="You are not a member and so cannot start a new topic")
topic_form = form_class()
else:
return HttpResponseForbidden()
else:
topic_form = form_class()
topics = group.get_related_objects(Topic)
return app.render(template_name, {
"group": group,
"topic_form": topic_form,
"is_member": is_member,
"topics": topics,
}, context_instance=RequestContext(request))
def topic(request, topic_id, edit=False, template_name="topic.html", app=None):
topic = get_object_or_404(Topic, id=topic_id)
if request.method == "POST" and edit == True and \
(request.user == topic.creator or request.user == topic.group.creator):
topic.body = request.POST["body"]
topic.save()
return HttpResponseRedirect(topic.get_absolute_url())
return app.render(template_name, {
'topic': topic,
'edit': edit,
}, context_instance=RequestContext(request))
def topic_delete(request, pk, app=None):
topic = Topic.objects.get(pk=pk)
if request.method == "POST" and (request.user == topic.creator or \
request.user == topic.group.creator):
if forums:
ThreadedComment.objects.all_for_object(topic).delete()
topic.delete()
return HttpResponseRedirect(request.POST["next"])
|
nilq/baby-python
|
python
|
'''
Do a parcel analysis of the sounding and plot the parcel temperature
'''
from __future__ import print_function, division
from SkewTplus.skewT import figure
from SkewTplus.sounding import sounding
from SkewTplus.thermodynamics import parcelAnalysis, liftParcel
#Load the sounding data
mySounding = sounding("./exampleSounding.txt")
pressure, temperature, dewPointTemperature = mySounding.getCleanSounding()
# Perform a parcel analysis
# The full parcel analysis field is returned
# Most Unstable parcel : method=0
# Start looking for the most unstable parcel from the first level (initialLevel=0)
# Use at maximum 5 iterations in the bisection method to find the LCL
# Since the sounding temperature and pressure are expressed in Celsius and hPa
# we set the corresponding keywords
myParcelAnalysis = parcelAnalysis(pressure,
temperature,
dewPointTemperature,
hPa=True,
celsius=True,
fullFields=1,
method=0,
initialLevel=0,
tolerance=0.1,
maxIterations=20)
# Print the contents of the dictionary
for key,value in myParcelAnalysis.items():
if isinstance(value, float) :
print("%s = %.1f"%(key,value))
else:
print("%s = %s"%(key,str(value)))
#Plot the parcel trajectory in the SkewT diagram
# First we lift the parcel adiabatically
initialLevel = myParcelAnalysis['initialLevel']
parcelTemperature = liftParcel(temperature[initialLevel],
pressure,
myParcelAnalysis['pressureAtLCL'],
initialLevel=initialLevel,
hPa=True,
celsius=True)
# Create a Figure Manager
mySkewT_Figure = figure()
# Add an Skew-T axes to the Figure
mySkewT_Axes = mySkewT_Figure.add_subplot(111, projection='skewx')
# Plot the parcel temperature
mySkewT_Axes.plot(parcelTemperature, pressure, linewidth=3, color='r' )
# Add a marker for the LCL and the LFC
mySkewT_Axes.plot(myParcelAnalysis['temperatureAtLCL'], myParcelAnalysis['pressureAtLCL'],
marker='o', color='b' , label='LCL')
mySkewT_Axes.plot(myParcelAnalysis['temperatureAtLFC'], myParcelAnalysis['pressureAtLFC'],
marker='o', color='g' , label='LFC')
# Add a legend
mySkewT_Axes.legend(loc='center right')
mySkewT_Axes.set_title("Single Parcel Lifted adiabatically")
mySkewT_Figure.show_plot()
|
nilq/baby-python
|
python
|
from cmath import exp, pi, sin
from re import I
import matplotlib.pyplot as mplt
def FFT(P):
n = len(P)
if n == 1:
return P
else:
w = exp((2.0 * pi * 1.0j) / n)
Pe = []
Po = []
for i in range(0, n, 2):
Pe.append(P[ i ])
for i in range(1, n, 2):
Po.append(P[ i ])
ye = FFT(Pe)
yo = FFT(Po)
y = [0.0] * n
for q in range(int(n * 0.5)):
y[q] = ye[q] + (w**q)*yo[q]
y[q + int(n/2)] = ye[q] - (w**q)*yo[q]
return y
def iFFT(P):
n = len(P)
if n == 1:
return P
else:
w = exp((-2.0 * pi * 1.0j) / n)
Pe = []
Po = []
for i in range(0, n, 2):
Pe.append(P[ i ])
for i in range(1, n, 2):
Po.append(P[ i ])
ye = iFFT(Pe)
yo = iFFT(Po)
y = [0.0] * n
for q in range(int(n * 0.5)):
y[q] = ye[q] + (w**q)*yo[q]
y[q + int(n/2)] = ye[q] - (w**q)*yo[q]
return y
#must be a power of 2
size = 256
testData = []
SAMPLERATE = 44100.0
dt = 1.0/SAMPLERATE
f = 1.0/(size/SAMPLERATE)
time = 0.0
for i in range(size):
testData.append( sin(2.0 * pi * 2.0 * f * time).real + 0.5 * sin(2.0 * pi * 8.0 * f * time).real )
time += dt
fftData = FFT(testData)
##### DO SOMETHING WITH FFT DATA #####
##### DO SOMETHING WITH FFT DATA #####
ifftData = iFFT(fftData)
for q in range( len(ifftData ) ):
ifftData[q] /= size
fig, (ax1, ax2, ax3) = mplt.subplots(3)
ax1.plot( testData, label = 'original' )
ax2.plot( ifftData, label = 'reconstructed' )
ax3.plot( fftData, label = 'FFT' )
ax1.legend( bbox_to_anchor = (1.0, 1), loc = 'upper right' )
ax2.legend( bbox_to_anchor = (1.0, 1), loc = 'upper right' )
ax3.legend( bbox_to_anchor = (1.0, 1), loc = 'upper right' )
mplt.show()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019-04-15 18:21
# @Author : erwin
import pandas as pd
import numpy as np
from common.util_function import *
'''
缺失值处理
1. 采用均值/出现次数设置missing值。对于一列数字,要获取平均值。
2. 对于一列非数字,例如字符,要找到出现频率最高的字符赋值给missing值
3. 删除缺失值
http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.fillna.html
http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.dropna.html
'''
raw_data = {'name': ['Jason', 'Molly', np.nan, np.nan, np.nan],
'nationality': ['USA', 'USA', 'France', 'UK', np.nan],
'age': [42, 52, 36, 24, np.nan],
'none': [np.nan, np.nan, np.nan, np.nan, np.nan],
}
df = pd.DataFrame(raw_data, columns=['name', 'nationality', 'age', 'none'])
print_line("原始数据")
print_br(df)
print_line("检查空值 NaN")
print_br(pd.isnull(df))
print_br(pd.isnull(df.name))
print_line("填充固定值")
print_br(df.fillna(value=5))
print_br(df.none.fillna(value=5))
print_line("填充均值/中位数/众数")
# inplace=True 表示在原来的 dataframe 上修改,inplace=False 表示返回新的 dataframe。
df_tmp = df['age'].fillna(df['age'].mean(), inplace=True)
print_br(df_tmp)
df_tmp = df['age'].fillna(df['age'].median(), inplace=False)
print_br(df_tmp)
df_tmp = df['nationality'].fillna(df['nationality'].mode()[0], inplace=False)
print_br(df_tmp)
print_line("删除全部为NaN值的行/列")
print_br(df.dropna(axis=0, how='all'))
print_br(df.dropna(axis=1, how='all'))
print_line("删除任一为NaN值的行/列")
df = df.drop('none', axis=1).drop(4, axis=0)
print_br(df)
print_br(df.dropna(axis=0, how='any'))
print_br(df.dropna(axis=1, how='any'))
|
nilq/baby-python
|
python
|
"""
The sys command to manage the cmd5 distribution
"""
import glob
import os
import shutil
from cloudmesh.common.util import path_expand
from cloudmesh.shell.command import PluginCommand
from cloudmesh.shell.command import command
from cloudmesh.sys.manage import Command, Git, Version
class SysCommand(PluginCommand):
"""
The system command
"""
# noinspection PyUnusedLocal
@command
def do_sys(self, args, arguments):
"""
::
Usage:
sys upload
sys commit MESSAGE
sys command generate NAME [.]
sys generate command NAME [.]
sys version VERSION
This command does some useful things.
Arguments:
MESSAGE the message to commit
NAME the command to generate
VERSION the version number
Options:
-f specify the file
Description:
cms sys command generate NAME
When you execute this command it
will generate a directory tree for a command
with the name
cloudmesh-NAME
To install the command you need to
cd cloudmesh-NAME
pip install -e .
or
pip install .
cms sys generate command NAME .
cms sys command generate NAME .
the code will be installed in the current directory. This is
helpful, if you already are in a directory fof the name
cloudmesh-NAME, e.g. if you already created it in github and
like to add a command in that github directory.
The commands 'version', 'commit' and 'upload'
are only to be used by Gregor.
cms version
The version command adds a new version to the
VERSION file for cmd5, common, and sys.
This helps to keep the versions aligned across
these modules.
cms commit
The commit command adds a new version and commits
cms upload
The upload command uploads the new version to pypi
"""
print(arguments)
dot = arguments["."]
if arguments.commit:
msg = arguments.MESSAGE
Git.commit(msg)
elif arguments.upload:
Git.upload()
elif arguments.readme and arguments.generate:
name = arguments.NAME
Command.generate(name)
elif arguments.command and arguments.generate:
name = arguments.NAME
Command.generate(name)
if dot:
for file in ["LICENSE",
".bumpversion.cfg",
".gitignore",
"requirements.txt",
"Makefile"]:
try:
os.remove(file)
except:
pass
for entry in glob.glob("cloudmesh-{name}/**".format(name=name)):
shutil.move(entry, path_expand("."))
for entry in glob.glob("cloudmesh-{name}/.*".format(name=name)):
shutil.move(entry, path_expand("."))
shutil.rmtree("cloudmesh-{name}".format(name=name))
elif arguments.version:
version = arguments.VERSION
Version.set(version)
|
nilq/baby-python
|
python
|
import numpy as np
from pypadre.pod.app import PadreApp
from sklearn.datasets import load_iris
from pypadre.examples.base_example import example_app
# create example app
padre_app = example_app()
def create_experiment1(app: PadreApp, name="", project="", auto_main=True):
@app.dataset(name="iris",
columns=['sepal length (cm)', 'sepal width (cm)', 'petal length (cm)',
'petal width (cm)', 'class'], target_features='class')
def dataset():
data = load_iris().data
target = load_iris().target.reshape(-1, 1)
return np.append(data, target, axis=1)
@app.preprocessing(reference_git=__file__)
def preprocessing(dataset, **kwargs):
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(dataset.features())
_features = scaler.transform(dataset.features())
targets = dataset.targets()
new_data = np.hstack((_features, targets))
return new_data
@app.experiment(dataset=dataset, reference_git=__file__, preprocessing_fn=preprocessing,
experiment_name=name, seed=1, project_name=project, auto_main=auto_main)
def experiment():
from sklearn.pipeline import Pipeline
from sklearn.svm import SVC
estimators = [('SVC', SVC(probability=True, C=1.0))]
return Pipeline(estimators)
return experiment
def create_experiment2(app: PadreApp, name="", project="", auto_main=True):
@app.dataset(name="iris",
columns=['sepal length (cm)', 'sepal width (cm)', 'petal length (cm)',
'petal width (cm)', 'class'], target_features='class')
def dataset():
data = load_iris().data
target = load_iris().target.reshape(-1, 1)
return np.append(data, target, axis=1)
@app.custom_splitter(reference_git=__file__)
def custom_splitter(dataset, **kwargs):
idx = np.arange(dataset.size[0])
cutoff = int(len(idx) / 2)
return idx[:cutoff], idx[cutoff:], None
@app.experiment(dataset=dataset, reference_git=__file__, splitting=custom_splitter,
experiment_name=name, seed=1, project_name=project, auto_main=auto_main)
def experiment():
from sklearn.pipeline import Pipeline
from sklearn.svm import SVC
from sklearn.decomposition import PCA
estimators = [('PCA',PCA()),('SVC', SVC(probability=True, C=1.0))]
return Pipeline(estimators)
return experiment
experiment1 = create_experiment1(app=padre_app, name="Iris SVC - preprocessing", project="Iris - experiments")
experiment2 = create_experiment2(app=padre_app, name="Iris SVC - custom_splitting", project="Iris - experiments")
metadata, pipelines = experiment1.compare(experiment2)
print("Experiments metadata: ")
print(metadata)
print("Experiments pipelines: ")
print(pipelines)
|
nilq/baby-python
|
python
|
import socket
import pickle
import struct
import argparse
def send_msg(sock, msg):
msg_pickle = pickle.dumps(msg)
sock.sendall(struct.pack(">I", len(msg_pickle)))
sock.sendall(msg_pickle)
print(msg[0], 'sent to', sock.getpeername())
def recv_msg(sock, expect_msg_type = None):
msg_len = struct.unpack(">I", sock.recv(4))[0]
msg = sock.recv(msg_len, socket.MSG_WAITALL)
msg = pickle.loads(msg)
print(msg[0], 'received from', sock.getpeername())
if (expect_msg_type is not None) and (msg[0] != expect_msg_type):
raise Exception("Expected " + expect_msg_type + " but received " + msg[0])
return msg
def args_parser():
parser = argparse.ArgumentParser()
parser.add_argument('-ip', type=str, default='localhost', help='Server IP address')
parser.add_argument('-port', type=int, default=51018, help='Server port')
parser.add_argument('-size', type=int, default=132863336, help='Number of floating point parameters in message')
parser.add_argument('-sim', type=int, default=10, help='Number of simulation rounds')
args = parser.parse_args()
return args
|
nilq/baby-python
|
python
|
"""
NetCDF Builder
This is currently a test script and will eventuall be made into a module
"""
#==============================================================================
__title__ = "netCDF maker"
__author__ = "Arden Burrell (Manon's original code modified)"
__version__ = "v1.0(02.03.2018)"
__email__ = "arden.burrell@gmail.com"
#==============================================================================
# Set to go up two levels to TSSRESTREND folder
import os
os.chdir('../../')
#==============================================================================
# load modules for netcdf
import scipy.io.netcdf as nc
import collections
import datetime
# Load modules for the files
import numpy as np
from collections import OrderedDict
# Load modules for debugging
import pdb
# +++++ Import plotting and colorpackages +++++
import matplotlib.pyplot as plt
import matplotlib.colors as mpc
import matplotlib as mpl
import palettable
#==============================================================================
def main():
# Create a blank object to hold my info
ncinfo = netCDF_info() #call the class
# =========== load the numpy array ===========
DEMarray = np.load("./Input_data/DEM/GMTED/data/Global_DEM_at_GIMMS.npy")
# plot the data
plt.style.use('classic')
cmap = mpc.ListedColormap(
palettable.matplotlib.Viridis_20.mpl_colors
)
plt.imshow(DEMarray, vmin=0, vmax=5000, cmap=cmap)
plt.colorbar()
plt.show()
# =========== Expand the DIMS ===========
DEMarray3d = np.expand_dims(DEMarray, axis=0)
# =========== Grab lats and lons from an exising netcdf ===========
# NOTE: this netcdf is the exact shape i want to make
file_name = './Input_data/DEM/GMTED/data/10N000E_20101117_gmted_mea075_at_GIMMS.nc'
lat_arr, lon_array = nc_getLatsandLons(file_name)
# =========== Add info ===========
# The data i want to save
ncinfo.data = DEMarray3d
# File name to save into
ncinfo.fname = "./Input_data/DEM/GMTED/data/Global_DEM_GMTED_at_GIMMS.nc"
# The name of the variable to be savesd
ncinfo.var_name = "DEM"
ncinfo.var_lname = "Height_Above_Mean_Sea_Level"
# Number of lats
ncinfo.lat = 2160
# number of lons
ncinfo.lon = 4320
# Fill value, really important for CDO
ncinfo.fill = -99999.0
# Units of my variable (Meters above sea level in this case)
ncinfo.units = "m"
# The dates (This needs work)
ncinfo.dates = datetime.datetime.strptime('20100101','%Y%m%d')
# Array of the latitudes
ncinfo.latitudes = lat_arr
# Array of the longitudes
ncinfo.longitudes = lon_array
# Add Description
ncinfo.description = "Global DEM regrided from the GMTED2012 2010 250m data using CDO remapcon2"
# Add the history (This needs work)
ncinfo.history = "Created " + datetime.datetime.today().strftime("%y/%m/%d")
# =========== Create the netcdf file ===========
write_netcdf(ncinfo)
#==============================================================================
def nc_getLatsandLons(fn):
"""
This takes a netcdf fill and pulls out the lat and lons array
var:
fn, The name of a file to open
return:
lats, np array of the latitude
lons, np array of the longitude
"""
from netCDF4 import Dataset
# load the netcdf file
ncf1 = Dataset(fn, mode='r')
# Pull out the lon and lat data
lats = ncf1.variables["lat"][:]
lons = ncf1.variables["lon"][:]
return lats, lons
class netCDF_info(object):
"""
A class to store the netcdf infomation.
The goal is to move this calls to its own script in the
nc module once i have it working.
"""
def __init__(self): #(self, arg)
# self.arg = arg
# These are none, later i will add ways to automitaccly fill this data
self.data = None
self.fname = None
self.var_name = None
self.var_lname = None
self.lat = None
self.lon = None
self.fill = None
self.units = None
self.dates = None
self.latitudes = None
self.longitudes = None
self.description = None
self.history = None
def date_range(start_date, end_date):
# define time vector
start_date=datetime.datetime.strptime(start_date,'%Y%m%d.%f')
end_date=datetime.datetime.strptime(end_date,'%Y%m%d.%f')
current=[start_date+datetime.timedelta(days=x) for x in range((end_date-start_date).days+1)]
current=[t.strftime('%Y%m%d.%f') for t in current]
return current
def write_netcdf(ncinfo):
""" setup and save a netcdf file
var:
object of my created class netCDF_info
"""
# ========== Create new netcdf ==========
NAME=nc.netcdf_file(ncinfo.fname,'w')
# ========== Set up the Dimensions ==========
NAME.createDimension('time', None) #Question: Shouldn't time be unlimited?
# NAME.createDimension('lev',11)
NAME.createDimension('lat',ncinfo.lat)
NAME.createDimension('lon',ncinfo.lon)
# ========== Setup the Variables ==========
time=NAME.createVariable('time',np.float64,('time',))
# lev=NAME.createVariable('lev',np.int32,('lev',))
lat=NAME.createVariable('lat',np.float64,('lat',))
lon=NAME.createVariable('lon',np.float64,('lon',))
# VAR=NAME.createVariable(str(VAR),np.float64,('time','lev','lat','lon'),)
VAR=NAME.createVariable(ncinfo.var_name,np.float64,('time','lat','lon'),)
# setting the missing value is super important for the file to be cdo readable
setattr(VAR,'missing_value',ncinfo.fill)
setattr(VAR, 'standard_name', ncinfo.var_lname)
# ========== Set the units ==========
time.units= 'day as %Y%m%d'
# lev.units = '-'
lat.units = 'degrees_north'
lon.units = 'degrees_east'
VAR.units = ncinfo.units
# ========== Add data ==========
# creates time vector using the date_range function
# time[:]=[t for t in date_range('20110101.5','20111231.5')]
# lev[:]=PFT_vector
lat[:] = ncinfo.latitudes
lon[:] = ncinfo.longitudes
# THis is a Bodge for singe variable data
VAR[:] = ncinfo.data
#Add global attributes
NAME.description = ncinfo.description
NAME.history = ncinfo.history
# WHATS MISSING
# metadata a whole bunch of metadata
# the standard_name and long_name of the variables
# ========== Close the netcdf ==========
NAME.close()
#==============================================================================
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
lista = enumerate('zero um dois três quatro cinco seis sete oito nove'.split())
numero_string=dict(lista)
string_numero={valor:chave for chave,valor in numero_string.items()}
print (numero_string)
print(string_numero)
def para_numeral(n):
numeros=[]
for digito in str(n):
numeros.append(numero_string[int(digito)])
return ", ".join(numeros)
assert "um" == para_numeral(1)
assert "um, dois" == para_numeral(12)
assert "um, um" == para_numeral(11)
def para_inteiro(string_n):
string=""
lista=string_n.split(", ")
for digito in lista:
string+=str(string_numero[digito])
return int(string)
assert 1== para_inteiro('um')
assert 12== para_inteiro('um, dois')
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 20 00:59:05 2020
@author: Leonardo Saccotelli
"""
import numpy as np
import AlgoritmiAlgebraLineare as al
#------------------- TEST MEDOTO DI ELIMINAZIONE DI GAUSS
#Dimensione della matrice
n = 5000
#Matrice dei coefficienti
matrix = np.random.random((n, n)).astype(float)
#Vettore delle soluzioni
xSol = np.array([i for i in range(1,n+1)])
#Vettore dei termini noti
b = np.dot(matrix, xSol)
# ------ APPLICO GLI ALGORITMI a matrix e b
#Creo la matrice triangolare superiore
matrix, b = al.GaussElimination(matrix, b)
#Calcolo le soluzioni tramite la backwardSubstition
xFind = al.backwardSubstition(matrix, b)
#Calcolo l'errore relativo sulla struttura
#applicando la norma 2
xError = np.linalg.norm((xSol - xFind), 2)
#Calcolo dell'indice di condizionamento del problema
conditionNumber = np.linalg.cond(matrix,1 )
#Stampo la matrice triangolare superiore
print(' Gaussian elimination')
print(' ------------------------------------------------------------')
for i in range(n):
print(' xFind[%2d] = %18.16f xSol[%2d] = %5.3f' % (i, xFind[i], i, xSol[i]))
print(' ------------------------------------------------------------')
print(' Difference ||x-xsol|| = %e\n' %xError)
print(' Matrix condition number = %e' %conditionNumber )
|
nilq/baby-python
|
python
|
"""Lists out the inbuilt plugins in Example"""
from src.example_reporter import ExampleReporter
from src.example_tool import ExampleTool
def get_reporters() -> dict:
"""Return the reporters in plugin"""
return {
"example-reporter": ExampleReporter,
}
def get_tools() -> dict:
"""Return the tools in plugin"""
return {
"example-tool": ExampleTool,
}
|
nilq/baby-python
|
python
|
"""
stanCode Breakout Project
Adapted from Eric Roberts's Breakout by
Sonja Johnson-Yu, Kylie Jue, Nick Bowman,
and Jerry Liao.
YOUR DESCRIPTION HERE
Click mouse to start the game.
When no live is remained or all bricks are cleared, game is over.
"""
from campy.gui.events.timer import pause
from breakoutgraphics import BreakoutGraphics
from campy.gui.events.mouse import onmouseclicked
FRAME_RATE = 1000 / 120 # 120 frames per second
NUM_LIVES = 3 # Number of attempts
# global variable
start_move = False
bounce_back_from_paddle = False
def main():
global start_move
global bounce_back_from_paddle
graphics = BreakoutGraphics()
lives = NUM_LIVES
bricks_number = graphics.brick_cols * graphics.brick_rows
onmouseclicked(start)
graphics_vx = graphics.get_ball_x_velocity()
graphics_vy = graphics.get_ball_y_velocity()
while True:
if start_move is True:
graphics.ball.move(graphics_vx, graphics_vy)
if graphics.ball.x <= 0 or (graphics.ball.x + graphics.ball.width) >= graphics.window.width:
graphics_vx = -graphics_vx
bounce_back_from_paddle = False
if graphics.ball.y <= 0:
graphics_vy = -graphics_vy
bounce_back_from_paddle = False
if graphics.collisions_paddle():
if bounce_back_from_paddle is False:
bounce_back_from_paddle = True
graphics_vy = -graphics_vy
if graphics.collisions_bricks():
removal = graphics.collisions_bricks()
bricks_number -= 1
graphics.window.remove(removal)
graphics_vy = -graphics_vy
bounce_back_from_paddle = False
if graphics.ball.y > graphics.window.height:
lives -= 1
graphics.reset_ball()
start_move = False
if lives == 0:
break
if bricks_number == 0:
graphics.reset_ball()
break
pause(FRAME_RATE)
def start(event):
global start_move
start_move = True
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
'''
jRAT Rat Config Decoder
'''
__description__ = 'jRAT Rat Config Extractor'
__author__ = 'Kevin Breen http://techanarchy.net http://malwareconfig.com'
__version__ = '0.3'
__date__ = '2015/04/03'
#Standard Imports Go Here
import os
import sys
from base64 import b64decode
import string
from zipfile import ZipFile
from optparse import OptionParser
from io import StringIO
#Non Standard Imports
try:
from Crypto.Cipher import AES, DES3
except ImportError:
print("[+] Couldn't Import Cipher, try 'sudo pip install pycrypto'")
# Main Decode Function Goes Here
'''
data is a read of the file
Must return a python dict of values
'''
def run(data):
print("[+] Extracting Data from Jar")
enckey, conf = get_parts(data)
if enckey == None:
return
print("[+] Decoding Config with Key: {0}".format(enckey.encode('hex')))
if len(enckey) == 16:
# Newer versions use a base64 encoded config.dat
if '==' in conf: # this is not a great test but should work 99% of the time
b64_check = True
else:
b64_check = False
if b64_check:
raw_config = new_aes(conf, enckey)
else:
raw_config = old_aes(conf, enckey)
if len(enckey) in [24, 32]:
raw_config = old_des(conf, enckey)
config_dict = parse_config(raw_config, enckey)
return config_dict
#Helper Functions Go Here
# This extracts the Encryption Key and Config File from the Jar and or Dropper
def get_parts(data):
new_zip = StringIO(data)
enckey = None
dropper = None
conf = None
try:
with ZipFile(new_zip, 'r') as zip:
for name in zip.namelist(): # get all the file names
if name == "key.dat": # this file contains the encrytpion key
enckey = zip.read(name)
if name == "enc.dat": # if this file exists, jrat has an installer / dropper
dropper = zip.read(name)
if name == "config.dat": # this is the encrypted config file
conf = zip.read(name)
except:
print("[+] Dropped File is not Jar File starts with Hex Chars: {0}".format(data[:5].encode('hex')))
return None, None
if enckey and conf:
return enckey, conf
elif enckey and dropper:
newkey, conf = get_dropper(enckey, dropper)
return newkey, conf
else:
return None, None
# This extracts the Encryption Key and New conf from a 'Dropper' jar
def get_dropper(enckey, dropper):
try:
split = enckey.split('\x2c')
key = split[0][:16]
print("[+] Dropper Detected")
for x in split: # grab each line of the config and decode it.
try:
drop = b64decode(x).decode('hex')
print(" [-] {0}".format(drop).replace('\x0d\x0a',''))
except:
drop = b64decode(x[16:]).decode('hex')
print(" [-] {0}".format(drop))
new_zipdata = decrypt_aes(key, dropper)
new_key, conf = get_parts(new_zipdata)
return new_key, conf
except:
return None, None
# Returns only printable chars
def string_print(line):
return ''.join((char for char in line if 32 < ord(char) < 127))
# Messy Messy Messy
def messy_split(long_line):
# this is a messy way to split the data but it works for now.
'''
Split on = gives me the right sections but deletes the b64 padding
use modulo math to restore padding.
return new list.
'''
new_list = []
old_list = long_line.split('=')
for line in old_list:
if len(line) != 0:
line += "=" * ((4 - len(line) % 4) % 4)
new_list.append(line)
return new_list
# AES Decrypt
def decrypt_aes(enckey, data):
cipher = AES.new(enckey) # set the cipher
return cipher.decrypt(data) # decrpyt the data
# DES Decrypt
def decrypt_des(enckey, data):
cipher = DES3.new(enckey) # set the ciper
return cipher.decrypt(data) # decrpyt the data
# Process Versions 3.2.2 > 4.2.
def old_aes(conf, enckey):
decoded_config = decrypt_aes(enckey, conf)
clean_config = string_print(decoded_config)
raw_config = clean_config.split('SPLIT')
return raw_config
#Process versions 4.2. >
def new_aes(conf, enckey):
sections = messy_split(conf)
decoded_config = ''
for x in sections:
decoded_config += decrypt_aes(enckey, b64decode(x))
raw_config = string_print(decoded_config).split('SPLIT')
return raw_config
# process versions < 3.2.2
def old_des(conf, enckey):
decoded_config = decrypt_des(enckey, conf)
clean_config = string_print(decoded_config)
raw_config = clean_config.split('SPLIT')
return raw_config
def parse_config(raw_config, enckey):
config_dict = {}
for kv in raw_config:
if kv == '':
continue
kv = string_print(kv)
key, value = kv.split('=')
if key == 'ip':
config_dict['Domain'] = value
if key == 'addresses':
dom_list = value.split(',')
dom_count = 0
for dom in dom_list:
if dom == '':
continue
config_dict['Domain {0}'.format(dom_count)] = value.split(':')[0]
config_dict['Port {0}'.format(dom_count)] = value.split(':')[1]
dom_count += 1
if key == 'port':
config_dict['Port'] = value
if key == 'os':
config_dict['OS'] = value
if key == 'mport':
config_dict['MPort'] = value
if key == 'perms':
config_dict['Perms'] = value
if key == 'error':
config_dict['Error'] = value
if key == 'reconsec':
config_dict['RetryInterval'] = value
if key == 'ti':
config_dict['TI'] = value
if key == 'pass':
config_dict['Password'] = value
if key == 'id':
config_dict['CampaignID'] = value
if key == 'mutex':
config_dict['Mutex'] = value
if key == 'toms':
config_dict['TimeOut'] = value
if key == 'per':
config_dict['Persistance'] = value
if key == 'name':
config_dict['InstallName'] = value
if key == 'tiemout':
config_dict['TimeOutFlag'] = value
if key == 'debugmsg':
config_dict['DebugMsg'] = value
config_dict["EncryptionKey"] = enckey.encode('hex')
return config_dict
#Recursive Function Goes Here
def runRecursive(folder, output):
counter1 = 0
counter2 = 0
print("[+] Writing Configs to File {0}".format(output))
with open(output, 'a+') as out:
#This line will need changing per Decoder
out.write("Filename,CampaignID,Domain,Port,OS,MPort,Perms,Error,RetryInterval,TI,Password,Mutex,TimeOut,Persistance,InstallName,TimeOutFlag,DebugMsg,EncryptionKey\n")
for server in os.listdir(folder):
if os.path.isfile(os.path.join(folder, server)):
print("[+] Processing File {0}".format(server))
fileData = open(os.path.join(folder,server), 'rb').read()
configOut = run(fileData)
if configOut != None:
configOut["TimeOutFlag"] = ''
#This line will need changing per Decoder
out.write('{0},{1},{2},{3},{4},{5},{6},{7},{8},{9},{10},{11},{12},{13},{14},{15},{16},{17}\n'.format(server,configOut["CampaignID"],configOut["Domain"],configOut["Port"],configOut["OS"],configOut["MPort"],configOut["Perms"],configOut["Error"],configOut["RetryInterval"],configOut["TI"],configOut["Password"],configOut["Mutex"],configOut["TimeOut"],configOut["Persistance"],configOut["InstallName"],configOut["TimeOutFlag"],configOut["DebugMsg"],configOut["EncryptionKey"]))
counter1 += 1
counter2 += 1
print("[+] Decoded {0} out of {1} Files".format(counter1, counter2))
return "Complete"
# Main
if __name__ == "__main__":
parser = OptionParser(usage='usage: %prog inFile outConfig\n' + __description__, version='%prog ' + __version__)
parser.add_option("-r", "--recursive", action='store_true', default=False, help="Recursive Mode")
(options, args) = parser.parse_args()
# If we dont have args quit with help page
if len(args) > 0:
pass
else:
parser.print_help()
sys.exit()
# if we want a recursive extract run this function
if options.recursive == True:
if len(args) == 2:
runRecursive(args[0], args[1])
sys.exit()
else:
print("[+] You need to specify Both Dir to read AND Output File")
parser.print_help()
sys.exit()
# If not recurisve try to open file
try:
print("[+] Reading file")
fileData = open(args[0], 'rb').read()
except:
print("[+] Couldn't Open File {0}".format(args[0]))
sys.exit()
#Run the config extraction
print("[+] Searching for Config")
config = run(fileData)
#If we have a config figure out where to dump it out.
if config == None:
print("[+] Config not found")
sys.exit()
#if you gave me two args im going to assume the 2nd arg is where you want to save the file
if len(args) == 2:
print("[+] Writing Config to file {0}".format(args[1]))
with open(args[1], 'a') as outFile:
for key, value in sorted(config.items()):
clean_value = [x for x in value if x in string.printable]
outFile.write("Key: {0}\t Value: {1}\n".format(key,clean_value))
# if no seconds arg then assume you want it printing to screen
else:
print("[+] Printing Config to screen")
for key, value in sorted(config.items()):
clean_value = [x for x in value if x in string.printable]
print(" [-] Key: {0}\t Value: {1}".format(key,clean_value))
print("[+] End of Config")
|
nilq/baby-python
|
python
|
import json
import uuid
from datetime import datetime
from sqlalchemy.dialects.postgresql import UUID
from app import db
# person_team = db.Table(
# "person_team",
# db.Column(
# "person_id",
# UUID,
# db.ForeignKey("person.id", ondelete="CASCADE"),
# primary_key=True,
# ),
# db.Column(
# "team_id", UUID, db.ForeignKey("team.id", ondelete="CASCADE"), primary_key=True
# ),
# db.Index("ix_person_team_person_id_team_id", "team_id", "person_id", unique=True),
# )
# person_project = db.Table(
# "person_project",
# db.Column(
# "person_id",
# UUID,
# db.ForeignKey("person.id", ondelete="CASCADE"),
# primary_key=True,
# ),
# db.Column(
# "project_id",
# UUID,
# db.ForeignKey("project.id", ondelete="CASCADE"),
# primary_key=True,
# ),
# db.Index(
# "ix_person_project_person_id_project_id", "project_id", "person_id", unique=True
# ),
# )
class Organisation(db.Model):
# Fields
id = db.Column(UUID, primary_key=True)
name = db.Column(db.String(), nullable=False, index=True) # Should this be unique too, or just domain?
domain = db.Column(db.String(), nullable=False, index=True, unique=True)
created_at = db.Column(db.DateTime(timezone=True), nullable=False, index=True)
updated_at = db.Column(db.DateTime(timezone=True), nullable=True)
# Relationships
grades = db.relationship("Grade", backref="organisation")
locations = db.relationship("Location", backref="organisation")
people = db.relationship("Person", backref="organisation")
practices = db.relationship("Practice", backref="organisation")
programmes = db.relationship("Programme", backref="organisation")
projects = db.relationship("Project", backref="organisation")
roles = db.relationship("Role", backref="organisation")
# Methods
def __init__(self, name, domain):
self.id = str(uuid.uuid4())
self.name = name.strip()
self.domain = domain.strip().lower()
self.created_at = datetime.utcnow()
def __repr__(self):
return json.dumps(self.as_dict(), separators=(",", ":"))
def as_dict(self):
return {
"id": self.id,
"name": self.name,
"domain": self.domain,
"grades": len(self.grades),
"locations": len(self.locations),
"people": len(self.people),
"practices": len(self.practices),
"programmes": len(self.programmes),
"projects": len(self.projects),
"roles": len(self.roles),
"created_at": self.created_at.isoformat(),
"updated_at": self.updated_at.isoformat() if self.updated_at else None,
}
def list_item(self):
return {
"id": self.id,
"name": self.name,
"domain": self.domain,
}
class Location(db.Model):
# Fields
id = db.Column(UUID, primary_key=True)
name = db.Column(db.String(), nullable=False, index=True)
address = db.Column(db.String(), nullable=False)
organisation_id = db.Column(UUID, db.ForeignKey("organisation.id", ondelete="CASCADE"), nullable=False)
created_at = db.Column(db.DateTime(timezone=True), nullable=False, index=True)
updated_at = db.Column(db.DateTime(timezone=True), nullable=True)
# Relationships
people = db.relationship("Person", backref="location", lazy=True)
# Methods
def __init__(self, name, address, organisation_id):
self.id = str(uuid.uuid4())
self.name = name.strip().title()
self.address = address.strip()
self.organisation_id = str(uuid.UUID(organisation_id, version=4))
self.created_at = datetime.utcnow()
def __repr__(self):
return json.dumps(self.as_dict(), separators=(",", ":"))
def as_dict(self):
return {
"id": self.id,
"name": self.name,
"address": self.address,
"organisation": {
"id": self.organisation.id,
"name": self.organisation.name,
},
"people": len(self.people),
"created_at": self.created_at.isoformat(),
"updated_at": self.updated_at.isoformat() if self.updated_at else None,
}
def list_item(self):
return {"id": self.id, "name": self.name}
class Grade(db.Model):
# Fields
id = db.Column(UUID, primary_key=True)
name = db.Column(db.String(), nullable=False, index=True)
organisation_id = db.Column(UUID, db.ForeignKey("organisation.id", ondelete="CASCADE"), nullable=False)
created_at = db.Column(db.DateTime(timezone=True), nullable=False, index=True)
updated_at = db.Column(db.DateTime(timezone=True), nullable=True)
# Relationships
roles = db.relationship("Role", backref="grade", lazy=True)
# Methods
def __init__(self, name, organisation_id):
self.id = str(uuid.uuid4())
self.name = name.strip()
self.organisation_id = str(uuid.UUID(organisation_id, version=4))
self.created_at = datetime.utcnow()
def __repr__(self):
return json.dumps(self.as_dict(), separators=(",", ":"))
def as_dict(self):
return {
"id": self.id,
"name": self.name,
"organisation": {
"id": self.organisation.id,
"name": self.organisation.name,
},
"roles": len(self.roles),
"created_at": self.created_at.isoformat(),
"updated_at": self.updated_at.isoformat() if self.updated_at else None,
}
def list_item(self):
return {"id": self.id, "name": self.name}
class Practice(db.Model):
# Fields
id = db.Column(UUID, primary_key=True)
name = db.Column(db.String(), nullable=False, index=True)
head_id = db.Column(UUID, db.ForeignKey("person.id", ondelete="SET NULL"), nullable=True, index=True)
cost_centre = db.Column(db.String(), nullable=True)
organisation_id = db.Column(UUID, db.ForeignKey("organisation.id", ondelete="CASCADE"), nullable=False)
created_at = db.Column(db.DateTime(timezone=True), nullable=False, index=True)
updated_at = db.Column(db.DateTime(timezone=True), nullable=True)
# Relationships
head = db.relationship("Person", uselist=False)
roles = db.relationship("Role", backref="practice", lazy=True)
# Methods
def __init__(self, name, head_id, cost_centre, organisation_id):
self.id = str(uuid.uuid4())
self.name = name.strip().title()
self.head_id = str(uuid.UUID(head_id, version=4)) if head_id else None
self.cost_centre = cost_centre.strip() if cost_centre else None
self.organisation_id = str(uuid.UUID(organisation_id, version=4))
self.created_at = datetime.utcnow()
def __repr__(self):
return json.dumps(self.as_dict(), separators=(",", ":"))
def as_dict(self):
return {
"id": self.id,
"name": self.name,
"head": {
"id": self.head.id,
"name": self.head.name,
}
if self.head
else None,
"cost_centre": self.cost_centre,
"organisation": {
"id": self.organisation.id,
"name": self.organisation.name,
},
"roles": len(self.roles),
"created_at": self.created_at.isoformat(),
"updated_at": self.updated_at.isoformat() if self.updated_at else None,
}
def list_item(self):
return {
"id": self.id,
"name": self.name,
"head": {
"id": self.head.id,
"name": self.head.name,
}
if self.head
else None,
}
class Role(db.Model):
# Fields
id = db.Column(UUID, primary_key=True)
title = db.Column(db.String(), nullable=False, index=True)
grade_id = db.Column(UUID, db.ForeignKey("grade.id", ondelete="CASCADE"), nullable=False)
practice_id = db.Column(UUID, db.ForeignKey("practice.id", ondelete="CASCADE"), nullable=True)
organisation_id = db.Column(UUID, db.ForeignKey("organisation.id", ondelete="CASCADE"), nullable=False)
created_at = db.Column(db.DateTime(timezone=True), nullable=False, index=True)
updated_at = db.Column(db.DateTime(timezone=True), nullable=True)
# Relationships
people = db.relationship("Person", backref="role", lazy=True)
# Methods
def __init__(self, title, grade_id, practice_id, organisation_id):
self.id = str(uuid.uuid4())
self.title = title.strip()
self.grade_id = str(uuid.UUID(grade_id, version=4))
self.practice_id = str(uuid.UUID(practice_id, version=4)) if practice_id else None
self.organisation_id = str(uuid.UUID(organisation_id, version=4))
self.created_at = datetime.utcnow()
def __repr__(self):
return json.dumps(self.as_dict(), separators=(",", ":"))
def as_dict(self):
return {
"id": self.id,
"title": self.title,
"grade": {"id": self.grade.id, "name": self.grade.name},
"practice": self.practice.list_item() if self.practice else None,
"organisation": {
"id": self.organisation.id,
"name": self.organisation.name,
},
"people": len(self.people),
"created_at": self.created_at.isoformat(),
"updated_at": self.updated_at.isoformat() if self.updated_at else None,
}
def list_item(self):
return {
"id": self.id,
"title": self.title,
"grade": self.grade.list_item(),
"practice": {"id": self.practice.id, "name": self.practice.name} if self.practice else None,
}
class Person(db.Model):
# Fields
id = db.Column(UUID, primary_key=True)
name = db.Column(db.String, nullable=False)
role_id = db.Column(UUID, db.ForeignKey("role.id", ondelete="CASCADE"), nullable=False, index=True)
organisation_id = db.Column(
UUID,
db.ForeignKey("organisation.id", ondelete="CASCADE"),
nullable=False,
index=True,
)
email_address = db.Column(db.String(254), nullable=False, unique=True)
full_time_equivalent = db.Column(db.Float, nullable=True)
location_id = db.Column(
UUID,
db.ForeignKey("location.id", ondelete="SET NULL"),
nullable=True,
index=True,
)
employment = db.Column(db.String, nullable=True)
created_at = db.Column(db.DateTime(timezone=True), nullable=False, index=True)
updated_at = db.Column(db.DateTime(timezone=True), nullable=True)
# Relationships
# teams = db.relationship(
# "Team",
# secondary=person_team,
# lazy=True,
# backref=db.backref("people", lazy=True),
# )
# projects = db.relationship(
# "Project",
# secondary=person_project,
# lazy=True,
# backref=db.backref("people", lazy=True),
# )
# Methods
def __init__(
self,
name,
role_id,
organisation_id,
email_address,
full_time_equivalent,
location_id,
employment,
):
self.id = str(uuid.uuid4())
self.name = name.strip().title()
self.organisation_id = str(uuid.UUID(organisation_id, version=4))
self.role_id = str(uuid.UUID(role_id, version=4))
self.email_address = email_address.strip().lower()
self.full_time_equivalent = full_time_equivalent
self.location_id = str(uuid.UUID(location_id, version=4))
self.employment = employment.strip()
self.created_at = datetime.utcnow()
def __repr__(self):
return json.dumps(self.as_dict(), separators=(",", ":"))
def as_dict(self):
return {
"id": self.id,
"name": self.name,
"organisation": {
"id": self.organisation.id,
"name": self.organisation.name,
},
"role": self.role.list_item(),
"email_address": self.email_address,
"full_time_equivalent": self.full_time_equivalent,
"location": self.location.list_item(),
"employment": self.employment,
"created_at": self.created_at.isoformat(),
"updated_at": self.updated_at.isoformat() if self.updated_at else None,
}
def list_item(self):
return {
"id": self.id,
"name": self.name,
"role": self.role.list_item(),
"location": self.location.list_item(),
}
class Programme(db.Model):
# Fields
id = db.Column(UUID, primary_key=True)
name = db.Column(db.String(), nullable=False, index=True)
manager_id = db.Column(UUID, db.ForeignKey("person.id", ondelete="SET NULL"), nullable=True, index=True)
organisation_id = db.Column(UUID, db.ForeignKey("organisation.id", ondelete="CASCADE"), nullable=False)
created_at = db.Column(db.DateTime(timezone=True), nullable=False, index=True)
updated_at = db.Column(db.DateTime(timezone=True), nullable=True)
# Relationships
manager = db.relationship("Person", uselist=False)
projects = db.relationship("Project", backref="programme", lazy=True)
# Methods
def __init__(self, name, manager_id, organisation_id):
self.id = str(uuid.uuid4())
self.name = name.strip()
self.manager_id = str(uuid.UUID(manager_id, version=4)) if manager_id else None
self.organisation_id = str(uuid.UUID(organisation_id, version=4))
self.created_at = datetime.utcnow()
def __repr__(self):
return json.dumps(self.as_dict(), separators=(",", ":"))
def as_dict(self):
return {
"id": self.id,
"name": self.name,
"manager": {
"id": self.manager.id,
"name": self.manager.name,
}
if self.manager
else None,
"organisation": {
"id": self.organisation.id,
"name": self.organisation.name,
},
"projects": len(self.projects),
"created_at": self.created_at.isoformat(),
"updated_at": self.updated_at.isoformat() if self.updated_at else None,
}
def list_item(self):
return {
"id": self.id,
"name": self.name,
"manager": {
"id": self.manager.id,
"name": self.manager.name,
}
if self.manager
else None,
}
class Project(db.Model):
# Fields
id = db.Column(UUID, primary_key=True)
name = db.Column(db.String(), nullable=False, index=True)
manager_id = db.Column(UUID, db.ForeignKey("person.id", ondelete="SET NULL"), nullable=True, index=True)
programme_id = db.Column(UUID, db.ForeignKey("programme.id"), nullable=True)
status = db.Column(db.String(), nullable=False, index=True)
organisation_id = db.Column(UUID, db.ForeignKey("organisation.id"), nullable=False)
created_at = db.Column(db.DateTime(timezone=True), nullable=False, index=True)
updated_at = db.Column(db.DateTime(timezone=True), nullable=True)
# Relationships
manager = db.relationship("Person", uselist=False)
# teams = db.relationship("Team", backref="project", lazy=True)
# many to many with person
# Methods
def __init__(self, name, manager_id, programme_id, status, organisation_id):
self.id = str(uuid.uuid4())
self.name = name.strip()
self.manager_id = str(uuid.UUID(manager_id, version=4)) if manager_id else None
self.programme_id = str(uuid.UUID(programme_id, version=4)) if programme_id else None
self.status = status.strip()
self.organisation_id = str(uuid.UUID(organisation_id, version=4))
self.created_at = datetime.utcnow()
def __repr__(self):
return json.dumps(self.as_dict(), separators=(",", ":"))
def as_dict(self):
return {
"id": self.id,
"name": self.name,
"manager": {
"id": self.manager.id,
"name": self.manager.name,
}
if self.manager
else None,
"programme": {
"id": self.programme.id,
"name": self.programme.name,
}
if self.programme
else None,
"status": self.status,
"organisation": {
"id": self.organisation.id,
"name": self.organisation.name,
},
"created_at": self.created_at.isoformat(),
"updated_at": self.updated_at.isoformat() if self.updated_at else None,
}
def list_item(self):
return {
"id": self.id,
"name": self.name,
"manager": {
"id": self.manager.id,
"name": self.manager.name,
}
if self.manager
else None,
"programme": {
"id": self.programme.id,
"name": self.programme.name,
}
if self.programme
else None,
"status": self.status,
}
# class Team(db.Model):
# # Fields
# id = db.Column(UUID, primary_key=True)
# name = db.Column(db.String(), nullable=False, index=True)
# created_at = db.Column(db.DateTime(timezone=True), nullable=False, index=True)
# updated_at = db.Column(db.DateTime(timezone=True), nullable=True)
# # Relationships
# # many to many with person
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
#
# Code to build the catalogue cache
#
# Usage: python build_cache.py
#
from __future__ import print_function
from sys import stdout
__author__ = "Yu Feng and Martin White"
__version__ = "1.0"
__email__ = "yfeng1@berkeley.edu or mjwhite@lbl.gov"
from imaginglss import DECALS
import numpy
from imaginglss.cli import CLI
from imaginglss.analysis import cache
ap = CLI("Build cache")
ns = ap.parse_args()
decals = DECALS(ns.conf)
print('building brick index')
dr = decals.datarelease
print('building tractor cache')
builder = cache.CacheBuilder(decals.sweep_dir, decals.cache_dir, dr.schema.CATALOGUE_COLUMNS)
builder.build()
print('done')
|
nilq/baby-python
|
python
|
#Summe der Zahlen von 1 bis 5
summe=0
for i in [1,2,3,4,5]:
summe=summe+i #Beginn eines Blocks
print("Summe von 1 bis ", i,":",summe) #Ende eines Blocks
print("Ende der Rechnung")
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""
Miscellaneous utilities and tools
"""
import errno
import functools
import keyword
import logging
import os
import re
import shutil
import sys
import traceback
from contextlib import contextmanager
from pathlib import Path
from pkg_resources import parse_version
from . import __version__
from .exceptions import InvalidIdentifier, OldSetuptools
from .log import logger
@contextmanager
def _chdir_logging_context(path, should_log):
"""Private auxiliar function for logging inside chdir"""
if should_log:
logger.report('chdir', path)
with logger.indent():
yield
else:
yield
@contextmanager
def chdir(path, **kwargs):
"""Contextmanager to change into a directory
Args:
path (str): path to change current working directory to
Keyword Args:
log (bool): log activity when true. Default: ``False``.
pretend (bool): skip execution (but log) when pretending.
Default ``False``.
"""
should_pretend = kwargs.get('pretend')
should_log = kwargs.get('log', should_pretend)
# ^ When pretending, automatically output logs
# (after all, this is the primary purpose of pretending)
curr_dir = os.getcwd()
try:
with _chdir_logging_context(path, should_log):
if not should_pretend:
# ToDo: Remove str when we require PY 3.6
os.chdir(str(path)) # str to handle pathlib args
yield
finally:
os.chdir(curr_dir)
def move(*src, **kwargs):
"""Move files or directories to (into) a new location
Args:
*src (str[]): one or more files/directories to be moved
Keyword Args:
target (str): if target is a directory, ``src`` will be moved inside
it. Otherwise, it will be the new path (note that it may be
overwritten)
log (bool): log activity when true. Default: ``False``.
pretend (bool): skip execution (but log) when pretending.
Default ``False``.
"""
target = kwargs['target'] # Required arg
should_pretend = kwargs.get('pretend')
should_log = kwargs.get('log', should_pretend)
# ^ When pretending, automatically output logs
# (after all, this is the primary purpose of pretending)
for path in src:
if not should_pretend:
shutil.move(path, target)
if should_log:
logger.report('move', path, target=target)
def is_valid_identifier(string):
"""Check if string is a valid package name
Args:
string (str): package name
Returns:
bool: True if string is valid package name else False
"""
if not re.match("[_A-Za-z][_a-zA-Z0-9]*$", string):
return False
if keyword.iskeyword(string):
return False
return True
def make_valid_identifier(string):
"""Try to make a valid package name identifier from a string
Args:
string (str): invalid package name
Returns:
str: valid package name as string or :obj:`RuntimeError`
Raises:
:obj:`InvalidIdentifier`: raised if identifier can not be converted
"""
string = string.strip()
string = string.replace("-", "_")
string = string.replace(" ", "_")
string = re.sub('[^_a-zA-Z0-9]', '', string)
string = string.lower()
if is_valid_identifier(string):
return string
else:
raise InvalidIdentifier(
"String cannot be converted to a valid identifier.")
def exceptions2exit(exception_list):
"""Decorator to convert given exceptions to exit messages
This avoids displaying nasty stack traces to end-users
Args:
exception_list [Exception]: list of exceptions to convert
"""
def exceptions2exit_decorator(func):
@functools.wraps(func)
def func_wrapper(*args, **kwargs):
try:
func(*args, **kwargs)
except tuple(exception_list) as e:
if logger.level <= logging.DEBUG:
# user surely wants to see the stacktrace
traceback.print_exc()
print("ERROR: {}".format(e))
sys.exit(1)
return func_wrapper
return exceptions2exit_decorator
# from http://en.wikibooks.org/, Creative Commons Attribution-ShareAlike 3.0
def levenshtein(s1, s2):
"""Calculate the Levenshtein distance between two strings
Args:
s1 (str): first string
s2 (str): second string
Returns:
int: distance between s1 and s2
"""
if len(s1) < len(s2):
return levenshtein(s2, s1)
# len(s1) >= len(s2)
if len(s2) == 0:
return len(s1)
previous_row = range(len(s2) + 1)
for i, c1 in enumerate(s1):
current_row = [i + 1]
for j, c2 in enumerate(s2):
insertions = previous_row[j + 1] + 1
deletions = current_row[j] + 1
substitutions = previous_row[j] + (c1 != c2)
current_row.append(min(insertions, deletions, substitutions))
previous_row = current_row
return previous_row[-1]
def prepare_namespace(namespace_str):
"""Check the validity of namespace_str and split it up into a list
Args:
namespace_str (str): namespace, e.g. "com.blue_yonder"
Returns:
[str]: list of namespaces, e.g. ["com", "com.blue_yonder"]
Raises:
:obj:`InvalidIdentifier` : raised if namespace is not valid
"""
namespaces = namespace_str.split('.') if namespace_str else list()
for namespace in namespaces:
if not is_valid_identifier(namespace):
raise InvalidIdentifier(
"{} is not a valid namespace package.".format(namespace))
return ['.'.join(namespaces[:i+1]) for i in range(len(namespaces))]
def check_setuptools_version():
"""Check minimum required version of setuptools
Check that setuptools has all necessary capabilities for setuptools_scm
as well as support for configuration with the help of ``setup.cfg``.
Raises:
:obj:`OldSetuptools` : raised if necessary capabilities are not met
"""
try:
from setuptools import __version__ as setuptools_ver
from pkg_resources import parse_version
except ImportError:
raise OldSetuptools
setuptools_too_old = parse_version(setuptools_ver) < parse_version('38.3')
setuptools_scm_check_failed = True
if setuptools_too_old or setuptools_scm_check_failed:
raise OldSetuptools
def create_file(path, content, pretend=False):
"""Create a file in the given path.
This function reports the operation in the logs.
Args:
path (str): path in the file system where contents will be written.
content (str): what will be written.
pretend (bool): false by default. File is not written when pretending,
but operation is logged.
"""
if not pretend:
with open(path, 'w', encoding='utf-8') as fh:
fh.write(content)
logger.report('create', path)
def create_directory(path, update=False, pretend=False):
"""Create a directory in the given path.
This function reports the operation in the logs.
Args:
path (str): path in the file system where contents will be written.
update (bool): false by default. A :obj:`OSError` is raised when update
is false and the directory already exists.
pretend (bool): false by default. Directory is not created when
pretending, but operation is logged.
"""
if not pretend:
try:
os.mkdir(path)
except OSError:
if not update:
raise
return # Do not log if not created
logger.report('create', path)
def dasherize(word):
"""Replace underscores with dashes in the string.
Example::
>>> dasherize("foo_bar")
"foo-bar"
Args:
word (str): input word
Returns:
input word with underscores replaced by dashes
"""
return word.replace('_', '-')
def get_id(function):
"""Given a function, calculate its identifier.
A identifier is a string in the format ``<module name>:<function name>``,
similarly to the convention used for setuptools entry points.
Note:
This function does not return a Python 3 ``__qualname__`` equivalent.
If the function is nested inside another function or class, the parent
name is ignored.
Args:
function (callable): function object
Returns:
str: identifier
"""
return '{}:{}'.format(function.__module__, function.__name__)
def localize_path(path_string):
"""Localize path for Windows, Unix, i.e. / or \
Args:
path_string (str): path using /
Returns:
str: path depending on OS
"""
return str(Path(path_string))
#: Windows-specific error code indicating an invalid pathname.
ERROR_INVALID_NAME = 123
def is_pathname_valid(pathname):
"""Check if a pathname is valid
Code by Cecil Curry from StackOverflow
Args:
pathname (str): string to validate
Returns:
`True` if the passed pathname is a valid pathname for the current OS;
`False` otherwise.
"""
# If this pathname is either not a string or is but is empty, this pathname
# is invalid.
try:
if not isinstance(pathname, str) or not pathname:
return False
# Strip this pathname's Windows-specific drive specifier (e.g., `C:\`)
# if any. Since Windows prohibits path components from containing `:`
# characters, failing to strip this `:`-suffixed prefix would
# erroneously invalidate all valid absolute Windows pathnames.
_, pathname = os.path.splitdrive(pathname)
# Directory guaranteed to exist. If the current OS is Windows, this is
# the drive to which Windows was installed (e.g., the "%HOMEDRIVE%"
# environment variable); else, the typical root directory.
root_dirname = os.environ.get('HOMEDRIVE', 'C:') \
if sys.platform == 'win32' else os.path.sep
assert os.path.isdir(root_dirname) # ...Murphy and her ironclad Law
# Append a path separator to this directory if needed.
root_dirname = root_dirname.rstrip(os.path.sep) + os.path.sep
# Test whether each path component split from this pathname is valid or
# not, ignoring non-existent and non-readable path components.
for pathname_part in pathname.split(os.path.sep):
try:
os.lstat(root_dirname + pathname_part)
# If an OS-specific exception is raised, its error code
# indicates whether this pathname is valid or not. Unless this
# is the case, this exception implies an ignorable kernel or
# filesystem complaint (e.g., path not found or inaccessible).
#
# Only the following exceptions indicate invalid pathnames:
#
# * Instances of the Windows-specific "WindowsError" class
# defining the "winerror" attribute whose value is
# "ERROR_INVALID_NAME". Under Windows, "winerror" is more
# fine-grained and hence useful than the generic "errno"
# attribute. When a too-long pathname is passed, for example,
# "errno" is "ENOENT" (i.e., no such file or directory) rather
# than "ENAMETOOLONG" (i.e., file name too long).
# * Instances of the cross-platform "OSError" class defining the
# generic "errno" attribute whose value is either:
# * Under most POSIX-compatible OSes, "ENAMETOOLONG".
# * Under some edge-case OSes (e.g., SunOS, *BSD), "ERANGE".
except OSError as exc:
if hasattr(exc, 'winerror'):
if exc.winerror == ERROR_INVALID_NAME:
return False
elif exc.errno in {errno.ENAMETOOLONG, errno.ERANGE}:
return False
# If a "TypeError" exception was raised, it almost certainly has the
# error message "embedded NUL character" indicating an invalid pathname.
except TypeError:
return False
# If no exception was raised, all path components and hence this
# pathname itself are valid. (Praise be to the curmudgeonly python.)
else:
return True
# If any other exception was raised, this is an unrelated fatal issue
# (e.g., a bug). Permit this exception to unwind the call stack.
#
# Did we mention this should be shipped with Python already?
def on_ro_error(func, path, exc_info):
"""Error handler for ``shutil.rmtree``.
If the error is due to an access error (read only file)
it attempts to add write permission and then retries.
If the error is for another reason it re-raises the error.
Usage : ``shutil.rmtree(path, onerror=onerror)``
Args:
func (callable): function which raised the exception
path (str): path passed to `func`
exc_info (tuple of str): exception info returned by sys.exc_info()
"""
import stat
if not os.access(path, os.W_OK):
# Is the error an access error ?
os.chmod(path, stat.S_IWUSR)
func(path)
else:
raise
def rm_rf(path):
"""Remove a path by all means like `rm -rf` in Linux.
Args (str): Path to remove:
"""
shutil.rmtree(path, onerror=on_ro_error)
|
nilq/baby-python
|
python
|
# coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from odahuflow.sdk.models.base_model_ import Model
from odahuflow.sdk.models import util
class ExternalUrl(Model):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, image_url: str=None, name: str=None, url: str=None): # noqa: E501
"""ExternalUrl - a model defined in Swagger
:param image_url: The image_url of this ExternalUrl. # noqa: E501
:type image_url: str
:param name: The name of this ExternalUrl. # noqa: E501
:type name: str
:param url: The url of this ExternalUrl. # noqa: E501
:type url: str
"""
self.swagger_types = {
'image_url': str,
'name': str,
'url': str
}
self.attribute_map = {
'image_url': 'imageUrl',
'name': 'name',
'url': 'url'
}
self._image_url = image_url
self._name = name
self._url = url
@classmethod
def from_dict(cls, dikt) -> 'ExternalUrl':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The ExternalUrl of this ExternalUrl. # noqa: E501
:rtype: ExternalUrl
"""
return util.deserialize_model(dikt, cls)
@property
def image_url(self) -> str:
"""Gets the image_url of this ExternalUrl.
Optional link to an image which represents a type of the resource, for example the logo of Grafana # noqa: E501
:return: The image_url of this ExternalUrl.
:rtype: str
"""
return self._image_url
@image_url.setter
def image_url(self, image_url: str):
"""Sets the image_url of this ExternalUrl.
Optional link to an image which represents a type of the resource, for example the logo of Grafana # noqa: E501
:param image_url: The image_url of this ExternalUrl.
:type image_url: str
"""
self._image_url = image_url
@property
def name(self) -> str:
"""Gets the name of this ExternalUrl.
Human-readable name # noqa: E501
:return: The name of this ExternalUrl.
:rtype: str
"""
return self._name
@name.setter
def name(self, name: str):
"""Sets the name of this ExternalUrl.
Human-readable name # noqa: E501
:param name: The name of this ExternalUrl.
:type name: str
"""
self._name = name
@property
def url(self) -> str:
"""Gets the url of this ExternalUrl.
Link to a resource # noqa: E501
:return: The url of this ExternalUrl.
:rtype: str
"""
return self._url
@url.setter
def url(self, url: str):
"""Sets the url of this ExternalUrl.
Link to a resource # noqa: E501
:param url: The url of this ExternalUrl.
:type url: str
"""
self._url = url
|
nilq/baby-python
|
python
|
from bs4 import BeautifulSoup, SoupStrainer
import re
import requests
import json
strained = SoupStrainer('a', href=re.compile('saskatchewan.kijiji.ca/f.*QQ'))
soup = BeautifulSoup(requests.get('http://saskatchewan.kijiji.ca').text)
category_dict = {}
for a in soup.findAll(strained):
category_id = None
category = []
for key in str(a.string).split(", "):
category.append(key)
category_id_matches = re.search('CatIdZ(\d+)', a['href'])
if(category_id_matches):
category_id = category_id_matches.group(1)
if(category_id and category):
for key in category:
category_dict[key] = int(category_id)
if(category_dict):
with open('../pykijiji/categories.json', 'w') as f:
json.dump(
category_dict,
f,
sort_keys=True,
indent=2
)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import datetime
from django.conf import settings
from django.db import connection, DatabaseError, transaction
import django_rq
from services.monitoring import test_service
from services.models import Service
def _create_history_partitions():
now = datetime.datetime.now()
required_partitions = [
(now + datetime.timedelta(days=1)).strftime("p%Y%m%d"),
(now + datetime.timedelta(days=2)).strftime("p%Y%m%d"),
(now + datetime.timedelta(days=3)).strftime("p%Y%m%d")
]
partitions_conditions = {
(now + datetime.timedelta(days=1)).strftime(
"p%Y%m%d",
): (now + datetime.timedelta(days=1)).strftime("%Y-%m-%d"),
(now + datetime.timedelta(days=2)).strftime(
"p%Y%m%d",
): (now + datetime.timedelta(days=2)).strftime("%Y-%m-%d"),
(now + datetime.timedelta(days=3)).strftime(
"p%Y%m%d",
): (now + datetime.timedelta(days=3)).strftime("%Y-%m-%d")
}
sql = """
SELECT
partition_name
FROM INFORMATION_SCHEMA.PARTITIONS
WHERE
table_schema=%s AND
table_name='services_servicehistory' AND
partition_name<>'p_other'
ORDER BY partition_name ASC
"""
cursor = connection.cursor()
cursor.execute(sql, [settings.DATABASES['default']['NAME']])
current_partitions = []
for row in cursor.fetchall():
current_partitions.append(row[0])
sql_parts = []
for partition_name in required_partitions:
if partition_name not in current_partitions:
sql_parts.append(
"PARTITION %s VALUES LESS THAN (TO_DAYS('%s'))" % (
partition_name, partitions_conditions[partition_name],
),
)
if not sql_parts:
return
sql = "ALTER TABLE services_servicehistory ADD PARTITION (%s)" % (
",".join(sql_parts),
)
cursor.execute(sql)
def create_history_partitions():
queue = django_rq.get_queue(
name='archiving' if 'archiving' in settings.RQ_QUEUES else 'default',
)
queue.enqueue_call(
func=_create_history_partitions,
timeout=300,
result_ttl=0,
)
def _create_archive_partitions():
now = datetime.datetime.now()
if now.month == 12:
next_year = now.year + 1
next_month = 1
else:
next_year = now.year
next_month = now.month + 1
next_month1 = datetime.date(next_year, next_month, 1)
if next_month1.month == 12:
next_year = next_month1.year + 1
next_month = 1
else:
next_year = next_month1.year
next_month = next_month1.month + 1
next_month2 = datetime.date(next_year, next_month, 1)
required_partitions = [
next_month1.strftime("p%Y%m"),
next_month2.strftime("p%Y%m")
]
partitions_conditions = {
next_month1.strftime("p%Y%m"): next_month1.strftime("%Y-%m-01"),
next_month2.strftime("p%Y%m"): next_month2.strftime("%Y-%m-01"),
}
sql = """
SELECT
partition_name
FROM INFORMATION_SCHEMA.PARTITIONS
WHERE
table_schema=%s AND
table_name='services_servicehistoryarchive' AND
partition_name<>'p_other'
ORDER BY partition_name ASC
"""
cursor = connection.cursor()
cursor.execute(sql, [settings.DATABASES['default']['NAME']])
current_partitions = []
for row in cursor.fetchall():
current_partitions.append(row[0])
sql_parts = []
for partition_name in required_partitions:
if partition_name not in current_partitions:
sql_parts.append(
"PARTITION %s VALUES LESS THAN (TO_DAYS('%s'))" % (
partition_name, partitions_conditions[partition_name])
)
if not sql_parts:
return
sql = "ALTER TABLE services_servicehistoryarchive ADD PARTITION (%s)" % (
",".join(sql_parts),
)
cursor.execute(sql)
def create_archive_partitions():
queue = django_rq.get_queue(
name='archiving' if 'archiving' in settings.RQ_QUEUES else 'default',
)
queue.enqueue_call(
func=_create_archive_partitions,
timeout=300,
result_ttl=0,
)
def _make_history_archive():
transaction.enter_transaction_management()
transaction.managed()
transaction.commit()
date_start = datetime.datetime.now() - datetime.timedelta(days=8)
sql = """
SELECT MIN(id) AS min_id, MAX(id) AS max_id
FROM services_servicehistory
WHERE created >= %s AND created <= %s
ORDER BY id DESC LIMIT 1
"""
cursor = connection.cursor()
cursor.execute(sql, [
date_start.strftime("%Y-%m-%d 00:00:01"),
date_start.strftime("%Y-%m-%d 23:59:59"),
])
row = cursor.fetchone()
if row is None:
return
min_deleted_id = row[0]
max_deleted_id = row[1]
if not min_deleted_id or not max_deleted_id:
return
sql = """
INSERT INTO services_servicehistoryarchive (
response_time,
namelookup_time,
connect_time,
pretransfer_time,
starttransfer_time,
redirect_time,
size_download,
speed_download,
redirect_count,
num_connects,
created,
service_id,
agent_id
)
SELECT
ROUND(AVG(response_time), 2) AS response_time,
ROUND(AVG(namelookup_time), 2) AS namelookup_time,
ROUND(AVG(connect_time), 2) AS connect_time,
ROUND(AVG(pretransfer_time), 2) AS pretransfer_time,
ROUND(AVG(starttransfer_time), 2) AS starttransfer_time,
ROUND(AVG(redirect_time), 2) AS redirect_time,
ROUND(AVG(size_download), 0) AS size_download,
ROUND(AVG(speed_download), 0) AS speed_download,
ROUND(AVG(redirect_count), 0) AS redirect_count,
ROUND(AVG(num_connects), 0) AS num_connects,
CASE
WHEN MINUTE(created) >= 45 THEN date_format(created, '%%Y-%%m-%%d %%H:45')
WHEN MINUTE(created) < 45 AND MINUTE(created) >= 30 THEN date_format(created, '%%Y-%%m-%%d %%H:30')
WHEN MINUTE(created) < 30 AND MINUTE(created) >= 15 THEN date_format(created, '%%Y-%%m-%%d %%H:15')
ELSE date_format(created, '%%Y-%%m-%%d %%H:00')
END AS created_at,
service_id,
agent_id
FROM
services_servicehistory
WHERE
created >= %s AND created <= %s
GROUP BY
created_at, service_id, agent_id;
"""
try:
cursor.execute(sql, [
date_start.strftime("%Y-%m-%d 00:00:01"),
date_start.strftime("%Y-%m-%d 23:59:59"),
])
except DatabaseError:
transaction.rollback()
return
sql = """
DELETE FROM services_servicehistoryextra
WHERE service_history_id >= %s AND service_history_id <= %s
"""
try:
cursor.execute(sql, [min_deleted_id, max_deleted_id])
except DatabaseError:
transaction.rollback()
return
sql = """
SELECT
partition_name
FROM INFORMATION_SCHEMA.PARTITIONS
WHERE
table_schema=%s AND
table_name='services_servicehistory' AND
partition_name<>'p_other'
ORDER BY partition_name ASC
"""
try:
cursor.execute(sql, [settings.DATABASES['default']['NAME']])
except DatabaseError:
transaction.rollback()
return
current_partitions = []
for row in cursor.fetchall():
current_partitions.append(row[0])
partition_to_delete = (
date_start + datetime.timedelta(days=1)
).strftime("p%Y%m%d")
if partition_to_delete not in current_partitions:
return
sql = "ALTER TABLE services_servicehistory DROP PARTITION %s" % (
partition_to_delete,
)
try:
cursor.execute(sql)
except DatabaseError:
transaction.rollback()
return
transaction.commit()
def make_history_archive():
queue = django_rq.get_queue(
name='archiving' if 'archiving' in settings.RQ_QUEUES else 'default',
)
queue.enqueue_call(
func=_make_history_archive,
timeout=3600,
result_ttl=0,
)
def _monitor_service(service):
test_service(service)
def monitor_all():
queue = django_rq.get_queue(
name='dispacher' if 'dispacher' in settings.RQ_QUEUES else 'default',
)
services = Service.objects.filter(is_technical_break=False, is_active=True)
for service in services:
queue.enqueue_call(
func=_monitor_service,
kwargs={'service': service},
timeout=60,
result_ttl=0,
)
|
nilq/baby-python
|
python
|
# src/chara/character.py
import enum
class C_type(enum.Enum):
PLAYER = 0
NPC = 1
OPPONENT = 2
BOSS = 3
class Character():
def __init__(self,name,c_type):
types = Character.__ty()
self.name = name
self.c_type = types[c_type]
# temporary function
def identity(self):
print(str(self.name) + " : " + str(self.c_type))
# private functions
def __ty():
types = {}
types[C_type.PLAYER] = "player"
types[C_type.NPC] = "npc"
types[C_type.OPPONENT] = "opponent"
types[C_type.BOSS] = "boss"
return types
|
nilq/baby-python
|
python
|
from peewee import IntegerField, Model, CompositeKey, ForeignKeyField
from data.db import database
from data.user import User
class Buddies(Model):
buddy1 = ForeignKeyField(User, to_field="id")
buddy2 = ForeignKeyField(User, to_field="id")
class Meta:
database = database
primary_key = CompositeKey('buddy1', 'buddy2')
|
nilq/baby-python
|
python
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
# set a seed for the random number distribution before shuffling the data (images)
random.seed(101)
random.shuffle(dataset)
# set the same seed before shuffling the corresponding labels to get the same random number distribution
random.seed(101)
random.shuffle(labels)
|
nilq/baby-python
|
python
|
""" Financial Modeling Prep Model """
__docformat__ = "numpy"
import pandas as pd
import FundamentalAnalysis as fa
from gamestonk_terminal import config_terminal as cfg
def get_rating(ticker: str) -> pd.DataFrame:
"""Get ratings for a given ticker. [Source: Financial Modeling Prep]
Parameters
----------
ticker : str
Stock ticker
Returns
-------
pd.DataFrame
Rating data
"""
return fa.rating(ticker, cfg.API_KEY_FINANCIALMODELINGPREP)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
"""tests for :mod:`online_pomdp_planning.mcts`"""
from functools import partial
from math import log, sqrt
from typing import Dict
import pytest
from online_pomdp_planning.mcts import (
ActionNode,
DeterministicNode,
MuzeroInferenceOutput,
ObservationNode,
backprop_running_q,
create_muzero_root,
create_root_node_with_child_for_all_actions,
deterministic_qval_backpropagation,
expand_node_with_all_actions,
has_simulated_n_times,
max_q_action_selector,
max_visits_action_selector,
muzero_expand_node,
random_policy,
rollout,
select_action,
select_deterministc_leaf_by_max_scores,
select_leaf_by_max_scores,
ucb,
ucb_scores,
visit_prob_action_selector,
)
from online_pomdp_planning.types import Action
from online_pomdp_planning.utils import MovingStatistic
def test_action_constructor():
"""Tests initiation of action nodes"""
stats = (True, False, 10.0)
p = ObservationNode()
n = ActionNode(stats, p)
assert stats == n.stats
assert p == n.parent
some_other_parent = ObservationNode()
some_other_statistics = (1, 2, 3, 4)
assert some_other_parent != n.parent
assert some_other_statistics != n.stats
@pytest.mark.parametrize("observation", [((0)), (False), ((0, 1))])
def test_action_node_child(observation):
"""checks getting and setting child nodes"""
root = ObservationNode()
n = ActionNode(initial_statistics=None, parent=root)
# if child not in node, do not allow fetching it
with pytest.raises(KeyError):
n.observation_node(observation)
child = ObservationNode(parent=n)
n.add_observation_node(observation, child)
# cannot modify existing child
with pytest.raises(AssertionError):
n.add_observation_node(observation, child)
# now child is in node, make sure the correct thing is returned
assert child == n.observation_node(observation)
@pytest.mark.parametrize(
"parent", [(None), (ActionNode("garbage statistic", ObservationNode()))]
)
def test_observation_node__constructor(parent):
"""Tests initiation of observation nodes"""
n = ObservationNode(parent)
assert parent == n.parent
other_node = ActionNode("garbage statistic", ObservationNode())
assert other_node != n.parent
@pytest.mark.parametrize("action", [((0)), (False), ((0, 1))])
def test_observation_node_child(action):
"""checks getting and setting child nodes"""
n = ObservationNode()
# if child not in node, do not allow fetching it
with pytest.raises(KeyError):
n.action_node(action)
child = ActionNode("some statistic", parent=n)
n.add_action_node(action, child)
# cannot modify existing child
with pytest.raises(AssertionError):
n.add_action_node(action, child)
# now child is in node, make sure the correct thing is returned
assert child == n.action_node(action)
def test_observation_child_stats():
"""Tests getting children statistics"""
node = ObservationNode()
action_1 = -0.5
child_1 = ActionNode((1, 2, 3), node)
node.add_action_node(action_1, child_1)
action_2 = True
child_2 = ActionNode((True, False, ("garbage")), node)
node.add_action_node(action_2, child_2)
assert node.child_stats == {
action_1: child_1.stats,
action_2: child_2.stats,
}
def test_deterministic_node():
"""Tests :class:`DeterministicNode`"""
root = DeterministicNode({"stat1": 1, "stat2": "bla"}, None)
assert not root.expanded
assert root.stats["stat1"] == 1
assert root.child_stats == {}
assert root.parent is None
child = DeterministicNode({"childstat1": 2}, root)
root.add_child("some_action", child)
assert root.expanded
assert not child.expanded
assert root.child("some_action") == child
assert root.parent is None
assert child.parent == root
with pytest.raises(KeyError):
root.child("other action")
assert root.stats["stat1"] == 1
assert root.child_stats == {"some_action": child.stats}
@pytest.mark.parametrize(
"n,it,expectation", [(5, 4, False), (5, 5, True), (5, 6, True), (0, 0, True)]
)
def test_has_simulated_n_times(n, it, expectation):
"""Tests :func:`online_pomdp_planning.mcts.has_simulated_n_times`"""
assert has_simulated_n_times(n, {"iteration": it}) == expectation
def test_has_simulated_n_times_asserts():
"""Tests :func:`online_pomdp_planning.mcts.has_simulated_n_times` assertions"""
with pytest.raises(AssertionError):
has_simulated_n_times(-1, {"iteration": 0})
with pytest.raises(AssertionError):
has_simulated_n_times(1, {"iteration": -1})
with pytest.raises(KeyError):
has_simulated_n_times(10, {"iteration_typo": 100})
@pytest.mark.parametrize(
"actions,init_stats",
[
([False, 1, (10, 2)], "some garbage"),
([], {"qval": 10, "n": 0}),
],
)
def test_create_root_node_with_child_for_all_actions(actions, init_stats):
"""Tests :func:`~online_pomdp_planning.mcts.create_root_node_with_child_for_all_actions`"""
node = create_root_node_with_child_for_all_actions(actions, init_stats)
for a in actions:
assert node.action_node(a).stats == init_stats
assert node.action_node(a).parent == node
assert node.action_node(a).observation_nodes == {}
def test_create_muzero_root():
"""tests :func:`create_muzero_root`"""
latent_state = "latent_state"
reward = 1.2
prior: Dict[Action, float] = {"a1": 0.2, "a3": 0.5, "a5": 0.3}
noise_dirichlet_alpha = 10
noise_exploration_fraction = 0.2
root = create_muzero_root(
latent_state, reward, prior, noise_dirichlet_alpha, noise_exploration_fraction
)
assert root.stats["latent_state"] == latent_state
assert root.stats["reward"] == reward
assert root.stats["qval"] == 0
assert root.stats["n"] == 0
stats = root.child_stats
assert len(stats) == 3
assert pytest.approx(sum(x["prior"] for x in stats.values()), 1)
for a, stat in stats.items():
assert pytest.approx(stat["prior"]) != prior[a]
for a, stat in stats.items():
assert stat["qval"] == 0
assert stat["n"] == 0
assert stat["action"] == a
# tests on prior and setting noise
# little noise:
root = create_muzero_root(
latent_state, reward, prior, noise_dirichlet_alpha, 0.000001
)
for a, stat in root.child_stats.items():
assert pytest.approx(stat["prior"], rel=0.001) == prior[a]
# much noise:
root = create_muzero_root(latent_state, reward, prior, 100000, 1)
for a, stat in root.child_stats.items():
assert pytest.approx(stat["prior"], rel=0.01) == 1 / 3
@pytest.mark.parametrize(
"stats,max_a",
[
({0: {"useless_stuff": None, "qval": 0.1}}, 0),
({0: {"qval": -0.1}}, 0),
({0: {"qval": 0.1, "some usless things": 100}, 10: {"qval": -0.1}}, 0),
({0: {"qval": 0.1}, 10: {"qval": 1}}, 10),
({True: {"qval": 100}, 0: {"qval": 0.1}, 10: {"qval": 1}}, True),
],
)
def test_max_q_action_selector(stats, max_a):
"""tests :func:~online_pomdp_planning.mcts.max_q_action_selector"""
info = {}
assert max_q_action_selector(stats, info) == max_a
sorted_q_vals = info["max_q_action_selector-values"]
assert sorted_q_vals[0][0] == max_a
assert len(sorted_q_vals) == len(stats)
for x in sorted_q_vals:
assert len(x) == 2
print(x)
assert stats[x[0]]["qval"] == x[1]
@pytest.mark.parametrize(
"stats,max_a",
[
({"max_a": {"n": -1}}, "max_a"),
({"max_a": {"n": 11}, False: {"n": 10}}, "max_a"),
(
{False: {"n": 10}, True: {"uselessstuff": 10, "n": 15}, "a1": {"n": 1}},
True,
),
],
)
def test_max_visits_action_selector(stats, max_a):
"""tests :func:`max_visits_action_selector`"""
info = {}
assert max_visits_action_selector(stats, info) == max_a
act_to_visits = info["visit_action_selector-counts"]
assert len(act_to_visits) == len(stats)
assert act_to_visits[0][0] == max_a
for a, n in act_to_visits:
assert stats[a]["n"] == n
@pytest.mark.parametrize(
"stats,tot,max_a",
[
({"max_a": {"n": 1}}, 1, "max_a"),
({"max_a": {"n": 100}, False: {"n": 1}}, 101, "max_a"),
(
{False: {"n": 10}, True: {"uselessstuff": 10, "n": 10000}, "a1": {"n": 0}},
10010,
True,
),
],
)
def test_visit_prob_action_selector(stats, tot, max_a):
"""tests :func:`visit_prob_action_selector`"""
info = {}
assert visit_prob_action_selector(stats, info) == max_a
act_to_visits = info["visit_action_selector-counts"]
assert len(act_to_visits) == len(stats)
assert act_to_visits[0][0] == max_a
for a, n in act_to_visits:
assert stats[a]["n"] == n
acts_to_probs = info["visit_action_selector-probabilities"]
assert acts_to_probs[0][0] == max_a
for a, n in acts_to_probs:
assert stats[a]["n"] / tot == n
@pytest.mark.parametrize(
"o,actions,init_stats",
[
(10, [0, True, (10.0)], {"q-value": 0, "n": 0}),
(10, [0, (10.0)], {"q-value": 10, "n": 0}),
],
)
def test_expand_node_with_all_actions(o, actions, init_stats):
"""tests :func:~online_pomdp_planning.mcts.expand_node_with_all_actions"""
parent = ObservationNode()
stats = 0
node = ActionNode(stats, parent)
info = {}
expand_node_with_all_actions(actions, init_stats, o, node, info)
expansion = node.observation_node(o)
assert info["mcts_num_action_nodes"] == 1
assert expansion.parent is node
assert node.observation_node(o) is expansion
assert len(expansion.action_nodes) == len(actions)
for n in expansion.action_nodes.values():
assert len(n.observation_nodes) == 0
assert n.parent == expansion
assert n.stats == init_stats
assert n.stats is not init_stats # please be copy
def fake_muzero_recurrance_inference(
state, action, value, reward, policy, latent_state
):
"""Just fakes doing inference in muzero"""
return MuzeroInferenceOutput(value, reward, policy, latent_state)
def test_muzero_expand_node():
"""tests "py:func:`muzero_expand_node`"""
info = {}
root = DeterministicNode(
{"latent_state": "root", "reward": 0.5, "n": 0, "qval": 0.0}, None
)
first_leaf = DeterministicNode(
{"prior": 0.1, "action": "a1", "n": 3, "qval": 0.0}, root
)
root.add_child("a1", first_leaf)
assert not first_leaf.expanded
latent_state = "first_leaf_state"
reward = -0.23
value = 2.2
policy = {"a1": 0.4, "a2": 0.6}
returned_value = muzero_expand_node(
first_leaf,
info,
partial(
fake_muzero_recurrance_inference,
value=value,
reward=reward,
policy=policy,
latent_state=latent_state,
),
)
assert returned_value == value
assert first_leaf.stats["latent_state"] == latent_state
assert first_leaf.stats["reward"] == reward
assert len(first_leaf.children) == 2
for stats in first_leaf.child_stats.values():
assert stats["n"] == 0
assert stats["qval"] == 0
for a in ["a1", "a2"]:
assert first_leaf.child(a).stats["prior"] == policy[a]
@pytest.mark.parametrize(
"q,n,n_total,ucb_constant,expected_raise",
[
(123, 0, 234, 452, False),
(0, 0, -234, False, True),
(0, -1, 10, False, True),
(0, 1, 1, 0, False),
(-5.2, 1, 1, 1, False),
],
)
def test_ucb_raises(q, n, n_total, ucb_constant, expected_raise):
"""Tests that :func:`~online_pomdp_planning.mcts.ucb` raises on invalid input"""
if expected_raise:
with pytest.raises(AssertionError):
ucb(q, n, n_total, ucb_constant)
else:
ucb(q, n, n_total, ucb_constant)
@pytest.mark.parametrize(
"q,n,n_total,ucb_constant,expectation",
[
(123, 0, 234, 452, float("inf")),
(0, 1, 1, 1, sqrt(log(1) / 1)),
(-5.2, 1, 1, 1, -5.2 + sqrt(log(1) / 1)),
(134, 3, 4, 1, 134 + sqrt(log(4) / 3)),
(1, 1, 1, 50.3, 1 + 50.3 * sqrt(log(1) / 1)),
(1, 1, 10, 50.3, 1 + 50.3 * sqrt(log(10) / 1)),
],
)
def test_ucb(q, n, n_total, ucb_constant, expectation):
"""Tests :func:`~online_pomdp_planning.mcts.ucb`"""
assert ucb(q, n, n_total, ucb_constant) == expectation
def test_ucb_scores():
"""tests `func:ucb_scores`"""
u = 50.3
action_stats = {
"a1": {"qval": 10, "n": 9},
True: {"qval": 1, "n": 1},
10: {"qval": 3, "n": 0},
}
action_scores = ucb_scores(action_stats, {}, u)
assert {"a1", True, 10} == set(action_scores.keys())
assert action_scores[10] == float("inf")
assert action_scores[True] == 1 + 50.3 * sqrt(log(10) / 1)
@pytest.mark.parametrize(
"expected_action,u,stats",
[
(True, 0, {True: {"qval": 10, "n": 10000}, 2: {"qval": 9, "n": 1}}),
(2, 1, {True: {"qval": 10, "n": 10000}, 2: {"qval": 9, "n": 1}}),
(
(1, 2),
1,
{
True: {"qval": 10, "n": 10000},
2: {"qval": 9, "n": 1},
(1, 2): {"qval": 10, "n": 1},
},
),
],
)
def test_select_with_ucb(expected_action, u, stats):
"""Tests :func:`~online_pomdp_planning.mcts.select_with_ucb`"""
scoring_method = partial(ucb_scores, ucb_constant=u)
assert select_action(stats, {}, scoring_method) == expected_action
def test_select_with_ucb_is_random():
"""Tests :func:`~online_pomdp_planning.mcts.select_with_ucb` is random"""
# 2 == bla
stats = {
True: {"qval": 10, "n": 10000},
2: {"qval": 9, "n": 1},
"bla": {"qval": 9, "n": 1},
}
scoring_method = partial(ucb_scores, ucb_constant=10)
chosen_actions = {select_action(stats, {}, scoring_method) for _ in range(20)}
assert len(chosen_actions) == 2
def construct_ucb_tree(observation_from_simulator) -> ObservationNode:
"""Constructs a particular tree for UCB
Tree: (action -> stats or obs)
- ``False`` -> `(q=3.4, n=3)`:
- ``True``
- `(100)`
- 2:
- `(10, 2)` -> `(qval: 0, n: 0)`
- 2 -> `(q=3.4, n=3)`
According to UCB, the best first action is ``False``, the only second action is `(10, 2)`
"""
root = ObservationNode()
# two initial action nodes, action `False` is better
better_first_action = False
better_first_action_node = ActionNode({"qval": 3.4, "n": 3}, root)
worse_first_action = 2
worse_first_action_node = ActionNode({"qval": -2.0, "n": 4}, root)
root.add_action_node(better_first_action, better_first_action_node)
root.add_action_node(worse_first_action, worse_first_action_node)
# three observation nodes; observation `2` is returned by simulator
first_picked_observation_node = ObservationNode(better_first_action_node)
better_first_action_node.add_observation_node(
observation_from_simulator, first_picked_observation_node
)
better_first_action_node.add_observation_node(
True, ObservationNode(better_first_action_node)
)
better_first_action_node.add_observation_node(
(100), ObservationNode(better_first_action_node)
)
# one leaf action node
leaf_action_node = ActionNode({"qval": 0, "n": 0}, first_picked_observation_node)
better_first_action_node.observation_node(
observation_from_simulator
).add_action_node((10, 2), leaf_action_node)
return root
def run_ucb_select_leaf(observation_from_simulator, root, max_depth=1000):
"""Runs UCB with a typical simulator from root"""
def sim(s, a):
"""Fake simulator, returns state 0, obs 2, reward .5, not terminal, and info"""
return 0, observation_from_simulator, 0.5, False
info = {}
scoring_method = partial(ucb_scores, ucb_constant=1)
chosen_leaf, s, obs, term, rewards = select_leaf_by_max_scores(
sim=sim,
scoring_method=scoring_method,
max_depth=max_depth,
node=root,
info=info,
state=1,
)
return chosen_leaf, s, obs, term, rewards, info
def run_ucb_select_leaf_terminal_sim(observation_from_simulator, root):
"""Runs UCB with a terminal simulator from root"""
def term_sim(s, a):
"""Returns the same as :func:`sim` but sets terminal flag to ``True``"""
return 0, observation_from_simulator, 0.5, True
info = {}
scoring_method = partial(ucb_scores, ucb_constant=1)
chosen_leaf, s, obs, term, rewards = select_leaf_by_max_scores(
sim=term_sim,
scoring_method=scoring_method,
max_depth=1000,
node=root,
info=info,
state=1,
)
return chosen_leaf, s, obs, term, rewards, info
def test_select_leaf_by_max_scores():
"""A specific test on UCB to see what leaf it returns"""
observation_from_simulator = 2
root = construct_ucb_tree(observation_from_simulator)
chosen_leaf, s, obs, term, rewards, info = run_ucb_select_leaf(
observation_from_simulator, root
)
leaf_action_node = root.action_node(False).observation_node(2).action_node((10, 2))
assert chosen_leaf is leaf_action_node, "constructed tree should lead to leaf"
assert s == 0, "simulator always outputs 0 as state"
assert obs == observation_from_simulator, "better output the correct observation"
assert not term, "simulator should indicate it is not terminal"
assert rewards == [0.5, 0.5], "we did two steps of .5 reward"
assert info["ucb_tree_depth"].max == 2
assert info["ucb_num_terminal_sims"] == 0
assert info["leaf_depth"] == 2
# test max depth
for d in [1, 2]:
chosen_leaf, s, obs, term, rewards, info = run_ucb_select_leaf(
observation_from_simulator, root, max_depth=d
)
assert info["ucb_tree_depth"].max == d
assert info["leaf_depth"] == d
assert info["ucb_num_terminal_sims"] == 0
chosen_leaf, s, obs, term, rewards, info = run_ucb_select_leaf_terminal_sim(
observation_from_simulator, root
)
assert chosen_leaf is root.action_node(
False
), "constructed tree should lead to leaf"
assert s == 0, "simulator always outputs 0 as state"
assert obs == observation_from_simulator, "better output the correct observation"
assert term, "simulator should indicate it is not terminal"
assert rewards == [0.5], "we did two steps of .5 reward"
assert info["leaf_depth"] == 1
def test_select_deterministc_leaf_by_max_scores():
"""Some tests on :func:`select_deterministc_leaf_by_max_scores`"""
node_scoring_method = partial(ucb_scores, ucb_constant=10)
info = {}
# if only one leaf, should find it
root = DeterministicNode(
{"latent_state": "root", "reward": 0.5, "n": 0, "qval": 0.0}, None
)
first_leaf = DeterministicNode(
{"prior": 0.1, "action": "a1", "n": 3, "qval": 0.0}, root
)
root.add_child("a1", first_leaf)
assert select_deterministc_leaf_by_max_scores(node_scoring_method, root, info) == (
first_leaf,
None,
)
assert info["ucb_tree_depth"].max == 1
# a second, better, leaf should be picked instead
second_leaf = DeterministicNode(
{"prior": 0.1, "action": "a2", "n": 3, "qval": 5.0}, root
)
root.add_child("a2", second_leaf)
assert select_deterministc_leaf_by_max_scores(node_scoring_method, root, info) == (
second_leaf,
None,
)
assert info["ucb_tree_depth"].max == 1
assert info["ucb_tree_depth"].num == 2
# trying to add more nodes, should pick it
third_leaf = DeterministicNode(
{"prior": 0.1, "action": "a", "n": 3, "qval": -5.0}, second_leaf
)
second_leaf.add_child("s", third_leaf)
assert select_deterministc_leaf_by_max_scores(node_scoring_method, root, info) == (
third_leaf,
None,
)
assert info["ucb_tree_depth"].max == 2
# increasing q value of first (bad) leaf should make it favourable
first_leaf.stats["qval"] = 10000
assert select_deterministc_leaf_by_max_scores(node_scoring_method, root, info) == (
first_leaf,
None,
)
assert info["ucb_tree_depth"].max == 2
assert info["ucb_tree_depth"].num == 4
def test_backprop_running_q_assertion():
"""Tests that :func:`~online_pomdp_planning.mcts.backprop_running_q` raises bad discount"""
some_obs_node = ObservationNode()
with pytest.raises(AssertionError):
backprop_running_q(-1, ActionNode("gargabe", some_obs_node), [], 0, {})
with pytest.raises(AssertionError):
backprop_running_q(1.1, ActionNode("gargabe", some_obs_node), [], 0, {})
@pytest.mark.parametrize(
"discount_factor, new_q_first, new_q_leaf",
[
(0, 10.3 / 4, 7.0),
(1, 12.3 / 4, 2),
# hard math, let's not do that again (3.4*3 + .1 + .9* 7 + .9*.9*-5)
(0.9, 12.55 / 4, 7 - 4.5),
],
)
def test_backprop_running_q(discount_factor, new_q_first, new_q_leaf):
"""Tests :func:`~online_pomdp_planning.mcts.backprop_running_q`"""
observation_from_simulator = 2
root = construct_ucb_tree(observation_from_simulator)
# fake leaf node
leaf_node = root.action_node(False).observation_node(2).action_node((10, 2))
leaf_selection_output = [0.1, 7.0]
leaf_evaluation = -5
backprop_running_q(
discount_factor, leaf_node, leaf_selection_output, leaf_evaluation, {}
)
# lots of math by hand, hope this never needs to be re-computed
# basically we _know_ the path taken, the rewards, and the original tree
# so we can compute what the updated q-values and 'n' are
# q-values are running average, 'n' is just incremented
assert leaf_node.stats["n"] == 1
assert leaf_node.stats["qval"] == pytest.approx(new_q_leaf)
first_chosen_action_node = root.action_node(False)
assert first_chosen_action_node.stats["qval"] == pytest.approx(new_q_first)
assert first_chosen_action_node.stats["n"] == 4
def test_deterministic_qval_backpropagation():
"""Tests :func:`deterministic_qval_backpropagation"""
q_statistic = MovingStatistic()
q_statistic.add(5)
q_statistic.add(-1)
info = {"q_statistic": q_statistic}
# create tree
root = DeterministicNode(
{"latent_state": "root", "reward": 0.5, "n": 0, "qval": 0.0}, None
)
first_leaf = DeterministicNode(
{"prior": 0.1, "action": "a1", "n": 3, "qval": 0.0, "reward": 0}, root
)
root.add_child(first_leaf.stats["action"], first_leaf)
second_leaf = DeterministicNode(
{"prior": 0.9, "action": "a2", "n": 4, "qval": 5.0, "reward": 0.25}, first_leaf
)
first_leaf.add_child(second_leaf.stats["action"], second_leaf)
deterministic_qval_backpropagation(0.9, second_leaf, None, 9.75, info)
assert info["q_statistic"].max > 5
assert info["q_statistic"].min == -1
assert (
root.stats["n"] == 1
and first_leaf.stats["n"] == 4
and second_leaf.stats["n"] == 5
)
# (5 * 4 + 9.75 + .25) / 5
assert second_leaf.stats["qval"] == 6.0
# return = (9.75 + 0.25) * .9 = 9, (3 * 0 + 9) / 4 = 2.25
assert first_leaf.stats["qval"] == 2.25
# return = 9 * .9 + 0.5 = ..., ... / 1
assert root.stats["qval"] == 9 * 0.9 + 0.5
def test_rollout():
"""Tests :func:`~online_pomdp_planning.mcts.rollout`"""
pol = partial(random_policy, ([False, 1, (10, 2)]))
discount_factor = 0.9
depth = 3
terminal = False
state = 1
obs = 0
def sim(s, a):
"""Fake simulator, returns state 0, obs 2, reward .5 and not terminal"""
return 0, 2, 0.5, False
def term_sim(s, a):
"""Returns the same as :func:`sim` but sets terminal flag to ``True``"""
return 0, 2, 0.5, True
assert (
rollout(pol, term_sim, depth, discount_factor, state, obs, t=True, info={}) == 0
)
assert rollout(pol, term_sim, 0, discount_factor, state, obs, terminal, {}) == 0
assert (
rollout(pol, term_sim, depth, discount_factor, state, obs, terminal, {}) == 0.5
), "terminal sim should allow 1 action"
assert (
rollout(pol, sim, 2, discount_factor, state, obs, terminal, {})
== 0.5 + discount_factor * 0.5
), "1 depth should allow 1 action"
if __name__ == "__main__":
pytest.main([__file__])
|
nilq/baby-python
|
python
|
from itertools import product
import torch
import dgl
from dgl.data import citation_graph
from dgl.contrib.data import load_data
from dgl import DGLGraph
from runtime.dgl.gcn import GCN, GCNSPMV
from runtime.dgl.gat import GAT, GATSPMV
from runtime.dgl.rgcn import RGCN, RGCNSPMV
from runtime.dgl.train import train_runtime
from runtime.dgl.hidden import HiddenPrint
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
with HiddenPrint():
Cora = citation_graph.load_cora()
CiteSeer = citation_graph.load_citeseer()
PubMed = citation_graph.load_pubmed()
MUTAG = load_data('mutag') # fair comparison
# One training run before we start tracking duration to warm up GPU.
g = DGLGraph(Cora.graph)
g.set_n_initializer(dgl.init.zero_initializer)
g.add_edges(g.nodes(), g.nodes())
norm = torch.pow(g.in_degrees().float(), -0.5)
norm[torch.isinf(norm)] = 0
g.ndata['norm'] = norm.unsqueeze(1).to(device)
model = GCNSPMV(g, Cora.features.shape[1], Cora.num_labels).to(device)
train_runtime(model, Cora, epochs=200, device=device)
for d, Net in product([Cora, CiteSeer, PubMed], [GCN, GCNSPMV, GAT, GATSPMV]):
g = DGLGraph(d.graph)
g.set_n_initializer(dgl.init.zero_initializer)
g.add_edges(g.nodes(), g.nodes())
norm = torch.pow(g.in_degrees().float(), -0.5)
norm[torch.isinf(norm)] = 0
g.ndata['norm'] = norm.unsqueeze(1).to(device)
model = Net(g, d.features.shape[1], d.num_labels).to(device)
t = train_runtime(model, d, epochs=200, device=device)
print(f'{d.name} - {Net.__name__}: {t:.2f}s')
for d, Net in product([MUTAG], [RGCN, RGCNSPMV]):
g = DGLGraph()
g.add_nodes(d.num_nodes)
g.add_edges(d.edge_src, d.edge_dst)
edge_type = torch.from_numpy(d.edge_type).to(device)
edge_norm = torch.from_numpy(d.edge_norm).to(device)
g.edata.update({'type': edge_type, 'norm': edge_norm})
g.ndata['id'] = torch.arange(d.num_nodes, dtype=torch.long, device=device)
model = Net(g, d.num_nodes, d.num_classes, d.num_rels)
t = train_runtime(model, d, epochs=200, device=device)
print(f'{d.name} - {Net.__name__}: {t:.2f}s')
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import logging
import threading
from datetime import date, datetime, timedelta
from psycopg2 import sql
from odoo import api, fields, models, tools, SUPERUSER_ID
from odoo.osv import expression
from odoo.tools.translate import _
from odoo.tools import email_re, email_split
from odoo.exceptions import UserError, AccessError
from odoo.addons.phone_validation.tools import phone_validation
from collections import OrderedDict, defaultdict
from . import crm_stage
_logger = logging.getLogger(__name__)
CRM_LEAD_FIELDS_TO_MERGE = [
'name',
'partner_id',
'campaign_id',
'company_id',
'country_id',
'team_id',
'state_id',
'stage_id',
'medium_id',
'source_id',
'user_id',
'title',
'city',
'contact_name',
'description',
'mobile',
'partner_name',
'phone',
'probability',
'expected_revenue',
'street',
'street2',
'zip',
'create_date',
'date_action_last',
'email_from',
'email_cc',
'website']
# Subset of partner fields: sync any of those
PARTNER_FIELDS_TO_SYNC = [
'mobile',
'title',
'function',
'website',
]
# Subset of partner fields: sync all or none to avoid mixed addresses
PARTNER_ADDRESS_FIELDS_TO_SYNC = [
'street',
'street2',
'city',
'zip',
'state_id',
'country_id',
]
# Those values have been determined based on benchmark to minimise
# computation time, number of transaction and transaction time.
PLS_COMPUTE_BATCH_STEP = 50000 # odoo.models.PREFETCH_MAX = 1000 but larger cluster can speed up global computation
PLS_UPDATE_BATCH_STEP = 5000
class Lead(models.Model):
_name = "crm.lead"
_description = "Lead/Opportunity"
_order = "priority desc, id desc"
_inherit = ['mail.thread.cc',
'mail.thread.blacklist',
'mail.thread.phone',
'mail.activity.mixin',
'utm.mixin',
'format.address.mixin',
'phone.validation.mixin']
_primary_email = 'email_from'
# Description
name = fields.Char(
'Opportunity', index=True, required=True,
compute='_compute_name', readonly=False, store=True)
user_id = fields.Many2one('res.users', string='Salesperson', index=True, tracking=True, default=lambda self: self.env.user)
user_email = fields.Char('User Email', related='user_id.email', readonly=True)
user_login = fields.Char('User Login', related='user_id.login', readonly=True)
company_id = fields.Many2one('res.company', string='Company', index=True, default=lambda self: self.env.company.id)
referred = fields.Char('Referred By')
description = fields.Text('Notes')
active = fields.Boolean('Active', default=True, tracking=True)
type = fields.Selection([
('lead', 'Lead'), ('opportunity', 'Opportunity')],
index=True, required=True, tracking=15,
default=lambda self: 'lead' if self.env['res.users'].has_group('crm.group_use_lead') else 'opportunity')
priority = fields.Selection(
crm_stage.AVAILABLE_PRIORITIES, string='Priority', index=True,
default=crm_stage.AVAILABLE_PRIORITIES[0][0])
team_id = fields.Many2one(
'crm.team', string='Sales Team', index=True, tracking=True,
compute='_compute_team_id', readonly=False, store=True)
stage_id = fields.Many2one(
'crm.stage', string='Stage', index=True, tracking=True,
compute='_compute_stage_id', readonly=False, store=True,
copy=False, group_expand='_read_group_stage_ids', ondelete='restrict',
domain="['|', ('team_id', '=', False), ('team_id', '=', team_id)]")
kanban_state = fields.Selection([
('grey', 'No next activity planned'),
('red', 'Next activity late'),
('green', 'Next activity is planned')], string='Kanban State',
compute='_compute_kanban_state')
activity_date_deadline_my = fields.Date(
'My Activities Deadline', compute='_compute_activity_date_deadline_my',
search='_search_activity_date_deadline_my', compute_sudo=False,
readonly=True, store=False, groups="base.group_user")
tag_ids = fields.Many2many(
'crm.tag', 'crm_tag_rel', 'lead_id', 'tag_id', string='Tags',
help="Classify and analyze your lead/opportunity categories like: Training, Service")
color = fields.Integer('Color Index', default=0)
# Opportunity specific
expected_revenue = fields.Monetary('Expected Revenue', currency_field='company_currency', tracking=True)
prorated_revenue = fields.Monetary('Prorated Revenue', currency_field='company_currency', store=True, compute="_compute_prorated_revenue")
recurring_revenue = fields.Monetary('Recurring Revenues', currency_field='company_currency', groups="crm.group_use_recurring_revenues")
recurring_plan = fields.Many2one('crm.recurring.plan', string="Recurring Plan", groups="crm.group_use_recurring_revenues")
recurring_revenue_monthly = fields.Monetary('Expected MRR', currency_field='company_currency', store=True,
compute="_compute_recurring_revenue_monthly",
groups="crm.group_use_recurring_revenues")
recurring_revenue_monthly_prorated = fields.Monetary('Prorated MRR', currency_field='company_currency', store=True,
compute="_compute_recurring_revenue_monthly_prorated",
groups="crm.group_use_recurring_revenues")
company_currency = fields.Many2one("res.currency", string='Currency', related='company_id.currency_id', readonly=True)
# Dates
date_closed = fields.Datetime('Closed Date', readonly=True, copy=False)
date_action_last = fields.Datetime('Last Action', readonly=True)
date_open = fields.Datetime(
'Assignment Date', compute='_compute_date_open', readonly=True, store=True)
day_open = fields.Float('Days to Assign', compute='_compute_day_open', store=True)
day_close = fields.Float('Days to Close', compute='_compute_day_close', store=True)
date_last_stage_update = fields.Datetime(
'Last Stage Update', compute='_compute_date_last_stage_update', index=True, readonly=True, store=True)
date_conversion = fields.Datetime('Conversion Date', readonly=True)
date_deadline = fields.Date('Expected Closing', help="Estimate of the date on which the opportunity will be won.")
# Customer / contact
partner_id = fields.Many2one(
'res.partner', string='Customer', index=True, tracking=10,
domain="['|', ('company_id', '=', False), ('company_id', '=', company_id)]",
help="Linked partner (optional). Usually created when converting the lead. You can find a partner by its Name, TIN, Email or Internal Reference.")
partner_is_blacklisted = fields.Boolean('Partner is blacklisted', related='partner_id.is_blacklisted', readonly=True)
contact_name = fields.Char(
'Contact Name', tracking=30,
compute='_compute_contact_name', readonly=False, store=True)
partner_name = fields.Char(
'Company Name', tracking=20, index=True,
compute='_compute_partner_name', readonly=False, store=True,
help='The name of the future partner company that will be created while converting the lead into opportunity')
function = fields.Char('Job Position', compute='_compute_function', readonly=False, store=True)
title = fields.Many2one('res.partner.title', string='Title', compute='_compute_title', readonly=False, store=True)
email_from = fields.Char(
'Email', tracking=40, index=True,
compute='_compute_email_from', inverse='_inverse_email_from', readonly=False, store=True)
phone = fields.Char(
'Phone', tracking=50,
compute='_compute_phone', inverse='_inverse_phone', readonly=False, store=True)
mobile = fields.Char('Mobile', compute='_compute_mobile', readonly=False, store=True)
phone_mobile_search = fields.Char('Phone/Mobile', store=False, search='_search_phone_mobile_search')
phone_state = fields.Selection([
('correct', 'Correct'),
('incorrect', 'Incorrect')], string='Phone Quality', compute="_compute_phone_state", store=True)
email_state = fields.Selection([
('correct', 'Correct'),
('incorrect', 'Incorrect')], string='Email Quality', compute="_compute_email_state", store=True)
website = fields.Char('Website', index=True, help="Website of the contact", compute="_compute_website", readonly=False, store=True)
lang_id = fields.Many2one('res.lang', string='Language')
# Address fields
street = fields.Char('Street', compute='_compute_partner_address_values', readonly=False, store=True)
street2 = fields.Char('Street2', compute='_compute_partner_address_values', readonly=False, store=True)
zip = fields.Char('Zip', change_default=True, compute='_compute_partner_address_values', readonly=False, store=True)
city = fields.Char('City', compute='_compute_partner_address_values', readonly=False, store=True)
state_id = fields.Many2one(
"res.country.state", string='State',
compute='_compute_partner_address_values', readonly=False, store=True,
domain="[('country_id', '=?', country_id)]")
country_id = fields.Many2one(
'res.country', string='Country',
compute='_compute_partner_address_values', readonly=False, store=True)
# Probability (Opportunity only)
probability = fields.Float(
'Probability', group_operator="avg", copy=False,
compute='_compute_probabilities', readonly=False, store=True)
automated_probability = fields.Float('Automated Probability', compute='_compute_probabilities', readonly=True, store=True)
is_automated_probability = fields.Boolean('Is automated probability?', compute="_compute_is_automated_probability")
# External records
meeting_count = fields.Integer('# Meetings', compute='_compute_meeting_count')
lost_reason = fields.Many2one(
'crm.lost.reason', string='Lost Reason',
index=True, ondelete='restrict', tracking=True)
ribbon_message = fields.Char('Ribbon message', compute='_compute_ribbon_message')
_sql_constraints = [
('check_probability', 'check(probability >= 0 and probability <= 100)', 'The probability of closing the deal should be between 0% and 100%!')
]
@api.depends('activity_date_deadline')
def _compute_kanban_state(self):
today = date.today()
for lead in self:
kanban_state = 'grey'
if lead.activity_date_deadline:
lead_date = fields.Date.from_string(lead.activity_date_deadline)
if lead_date >= today:
kanban_state = 'green'
else:
kanban_state = 'red'
lead.kanban_state = kanban_state
@api.depends('activity_ids.date_deadline')
@api.depends_context('uid')
def _compute_activity_date_deadline_my(self):
todo_activities = []
if self.ids:
todo_activities = self.env['mail.activity'].search([
('user_id', '=', self._uid),
('res_model', '=', self._name),
('res_id', 'in', self.ids)
], order='date_deadline ASC')
for record in self:
record.activity_date_deadline_my = next(
(activity.date_deadline for activity in todo_activities if activity.res_id == record.id),
False
)
def _search_activity_date_deadline_my(self, operator, operand):
return ['&', ('activity_ids.user_id', '=', self._uid), ('activity_ids.date_deadline', operator, operand)]
@api.depends('user_id', 'type')
def _compute_team_id(self):
""" When changing the user, also set a team_id or restrict team id
to the ones user_id is member of. """
for lead in self:
# setting user as void should not trigger a new team computation
if not lead.user_id:
continue
user = lead.user_id
if lead.team_id and user in lead.team_id.member_ids | lead.team_id.user_id:
continue
team_domain = [('use_leads', '=', True)] if lead.type == 'lead' else [('use_opportunities', '=', True)]
team = self.env['crm.team']._get_default_team_id(user_id=user.id, domain=team_domain)
lead.team_id = team.id
@api.depends('team_id', 'type')
def _compute_stage_id(self):
for lead in self:
if not lead.stage_id:
lead.stage_id = lead._stage_find(domain=[('fold', '=', False)]).id
@api.depends('user_id')
def _compute_date_open(self):
for lead in self:
lead.date_open = fields.Datetime.now() if lead.user_id else False
@api.depends('stage_id')
def _compute_date_last_stage_update(self):
for lead in self:
lead.date_last_stage_update = fields.Datetime.now()
@api.depends('create_date', 'date_open')
def _compute_day_open(self):
""" Compute difference between create date and open date """
leads = self.filtered(lambda l: l.date_open and l.create_date)
others = self - leads
others.day_open = None
for lead in leads:
date_create = fields.Datetime.from_string(lead.create_date).replace(microsecond=0)
date_open = fields.Datetime.from_string(lead.date_open)
lead.day_open = abs((date_open - date_create).days)
@api.depends('create_date', 'date_closed')
def _compute_day_close(self):
""" Compute difference between current date and log date """
leads = self.filtered(lambda l: l.date_closed and l.create_date)
others = self - leads
others.day_close = None
for lead in leads:
date_create = fields.Datetime.from_string(lead.create_date)
date_close = fields.Datetime.from_string(lead.date_closed)
lead.day_close = abs((date_close - date_create).days)
@api.depends('partner_id')
def _compute_name(self):
for lead in self:
if not lead.name and lead.partner_id and lead.partner_id.name:
lead.name = _("%s's opportunity") % lead.partner_id.name
@api.depends('partner_id')
def _compute_contact_name(self):
""" compute the new values when partner_id has changed """
for lead in self:
lead.update(lead._prepare_contact_name_from_partner(lead.partner_id))
@api.depends('partner_id')
def _compute_partner_name(self):
""" compute the new values when partner_id has changed """
for lead in self:
lead.update(lead._prepare_partner_name_from_partner(lead.partner_id))
@api.depends('partner_id')
def _compute_function(self):
""" compute the new values when partner_id has changed """
for lead in self:
if not lead.function or lead.partner_id.function:
lead.function = lead.partner_id.function
@api.depends('partner_id')
def _compute_title(self):
""" compute the new values when partner_id has changed """
for lead in self:
if not lead.title or lead.partner_id.title:
lead.title = lead.partner_id.title
@api.depends('partner_id')
def _compute_mobile(self):
""" compute the new values when partner_id has changed """
for lead in self:
if not lead.mobile or lead.partner_id.mobile:
lead.mobile = lead.partner_id.mobile
@api.depends('partner_id')
def _compute_website(self):
""" compute the new values when partner_id has changed """
for lead in self:
if not lead.website or lead.partner_id.website:
lead.website = lead.partner_id.website
@api.depends('partner_id')
def _compute_partner_address_values(self):
""" Sync all or none of address fields """
for lead in self:
lead.update(lead._prepare_address_values_from_partner(lead.partner_id))
@api.depends('partner_id.email')
def _compute_email_from(self):
for lead in self:
if lead.partner_id.email and lead.partner_id.email != lead.email_from:
lead.email_from = lead.partner_id.email
def _inverse_email_from(self):
for lead in self:
if lead.partner_id and lead.email_from != lead.partner_id.email:
# force reset
if not lead.email_from or not lead.partner_id.email:
lead.partner_id.email = lead.email_from
# compare formatted values as we may have formatting differences between equivalent email
else:
lead_email_normalized = tools.email_normalize(lead.email_from)
partner_email_normalized = tools.email_normalize(lead.partner_id.email)
if lead_email_normalized != partner_email_normalized:
lead.partner_id.email = lead.email_from
@api.depends('partner_id.phone')
def _compute_phone(self):
for lead in self:
if lead.partner_id.phone and lead.phone != lead.partner_id.phone:
lead.phone = lead.partner_id.phone
def _inverse_phone(self):
for lead in self:
if lead.partner_id and lead.phone != lead.partner_id.phone:
# force reset
if not lead.phone or not lead.partner_id.phone:
lead.partner_id.phone = lead.phone
# compare formatted values as we may have encoding differences between equivalent numbers
else:
lead_phone_formatted = lead.phone_format(lead.phone)
partner_phone_formatted = lead.phone_format(lead.partner_id.phone)
if lead_phone_formatted != partner_phone_formatted:
lead.partner_id.phone = lead.phone
@api.depends('phone', 'country_id.code')
def _compute_phone_state(self):
for lead in self:
phone_status = False
if lead.phone:
country_code = lead.country_id.code if lead.country_id and lead.country_id.code else None
try:
if phone_validation.phone_parse(lead.phone, country_code): # otherwise library not installed
phone_status = 'correct'
except UserError:
phone_status = 'incorrect'
lead.phone_state = phone_status
@api.depends('email_from')
def _compute_email_state(self):
for lead in self:
email_state = False
if lead.email_from:
email_state = 'incorrect'
for email in email_split(lead.email_from):
if tools.email_normalize(email):
email_state = 'correct'
break
lead.email_state = email_state
@api.depends('probability', 'automated_probability')
def _compute_is_automated_probability(self):
""" If probability and automated_probability are equal probability computation
is considered as automatic, aka probability is sync with automated_probability """
for lead in self:
lead.is_automated_probability = tools.float_compare(lead.probability, lead.automated_probability, 2) == 0
@api.depends(lambda self: ['tag_ids', 'stage_id', 'team_id'] + self._pls_get_safe_fields())
def _compute_probabilities(self):
lead_probabilities = self._pls_get_naive_bayes_probabilities()
for lead in self:
if lead.id in lead_probabilities:
was_automated = lead.active and lead.is_automated_probability
lead.automated_probability = lead_probabilities[lead.id]
if was_automated:
lead.probability = lead.automated_probability
@api.depends('expected_revenue', 'probability')
def _compute_prorated_revenue(self):
for lead in self:
lead.prorated_revenue = round((lead.expected_revenue or 0.0) * (lead.probability or 0) / 100.0, 2)
@api.depends('recurring_revenue', 'recurring_plan.number_of_months')
def _compute_recurring_revenue_monthly(self):
for lead in self:
lead.recurring_revenue_monthly = (lead.recurring_revenue or 0.0) / (lead.recurring_plan.number_of_months or 1)
@api.depends('recurring_revenue_monthly', 'probability')
def _compute_recurring_revenue_monthly_prorated(self):
for lead in self:
lead.recurring_revenue_monthly_prorated = (lead.recurring_revenue_monthly or 0.0) * (lead.probability or 0) / 100.0
def _compute_meeting_count(self):
if self.ids:
meeting_data = self.env['calendar.event'].sudo().read_group([
('opportunity_id', 'in', self.ids)
], ['opportunity_id'], ['opportunity_id'])
mapped_data = {m['opportunity_id'][0]: m['opportunity_id_count'] for m in meeting_data}
else:
mapped_data = dict()
for lead in self:
lead.meeting_count = mapped_data.get(lead.id, 0)
@api.depends('email_from', 'phone', 'partner_id')
def _compute_ribbon_message(self):
for lead in self:
# beware: void user input gives '' which is different from False
lead_email_normalized = tools.email_normalize(lead.email_from) or (lead.email_from if lead.email_from else False)
partner_email_normalized = tools.email_normalize(lead.partner_id.email) or lead.partner_id.email
will_write_email = lead_email_normalized != partner_email_normalized if lead.partner_id else False
will_write_phone = False
if lead.partner_id and lead.phone != lead.partner_id.phone:
# if reset -> obviously new value will be propagated
if not lead.phone or not lead.partner_id.phone:
will_write_phone = True
# otherwise compare formatted values as we may have encoding differences
else:
lead_phone_formatted = lead.phone_format(lead.phone)
partner_phone_formatted = lead.phone_format(lead.partner_id.phone)
if lead_phone_formatted != partner_phone_formatted:
will_write_phone = True
if will_write_email and will_write_phone:
lead.ribbon_message = _('By saving this change, the customer email and phone number will also be updated.')
elif will_write_email:
lead.ribbon_message = _('By saving this change, the customer email will also be updated.')
elif will_write_phone:
lead.ribbon_message = _('By saving this change, the customer phone number will also be updated.')
else:
lead.ribbon_message = False
def _search_phone_mobile_search(self, operator, value):
if len(value) <= 2:
raise UserError(_('Please enter at least 3 digits when searching on phone / mobile.'))
query = f"""
SELECT model.id
FROM {self._table} model
WHERE REGEXP_REPLACE(model.phone, '[^\d+]+', '', 'g') SIMILAR TO CONCAT(%s, REGEXP_REPLACE(%s, '\D+', '', 'g'), '%%')
OR REGEXP_REPLACE(model.mobile, '[^\d+]+', '', 'g') SIMILAR TO CONCAT(%s, REGEXP_REPLACE(%s, '\D+', '', 'g'), '%%')
"""
# searching on +32485112233 should also finds 00485112233 (00 / + prefix are both valid)
# we therefore remove it from input value and search for both of them in db
if value.startswith('+') or value.startswith('00'):
if value.startswith('00'):
value = value[2:]
starts_with = '00|\+'
else:
starts_with = '%'
self._cr.execute(query, (starts_with, value, starts_with, value))
res = self._cr.fetchall()
if not res:
return [(0, '=', 1)]
return [('id', 'in', [r[0] for r in res])]
@api.onchange('phone', 'country_id', 'company_id')
def _onchange_phone_validation(self):
if self.phone:
self.phone = self.phone_format(self.phone)
@api.onchange('mobile', 'country_id', 'company_id')
def _onchange_mobile_validation(self):
if self.mobile:
self.mobile = self.phone_format(self.mobile)
def _prepare_values_from_partner(self, partner):
""" Get a dictionary with values coming from partner information to
copy on a lead. Non-address fields get the current lead
values to avoid being reset if partner has no value for them. """
# Sync all address fields from partner, or none, to avoid mixing them.
values = self._prepare_address_values_from_partner(partner)
# For other fields, get the info from the partner, but only if set
values.update({f: partner[f] or self[f] for f in PARTNER_FIELDS_TO_SYNC})
# Fields with specific logic
values.update(self._prepare_contact_name_from_partner(partner))
values.update(self._prepare_partner_name_from_partner(partner))
return self._convert_to_write(values)
def _prepare_address_values_from_partner(self, partner):
# Sync all address fields from partner, or none, to avoid mixing them.
if any(partner[f] for f in PARTNER_ADDRESS_FIELDS_TO_SYNC):
values = {f: partner[f] for f in PARTNER_ADDRESS_FIELDS_TO_SYNC}
else:
values = {f: self[f] for f in PARTNER_ADDRESS_FIELDS_TO_SYNC}
return values
def _prepare_contact_name_from_partner(self, partner):
contact_name = False if partner.is_company else partner.name
return {'contact_name': contact_name or self.contact_name}
def _prepare_partner_name_from_partner(self, partner):
partner_name = partner.parent_id.name
if not partner_name and partner.is_company:
partner_name = partner.name
return {'partner_name': partner_name or self.partner_name}
# ------------------------------------------------------------
# ORM
# ------------------------------------------------------------
def _auto_init(self):
res = super(Lead, self)._auto_init()
tools.create_index(self._cr, 'crm_lead_user_id_team_id_type_index',
self._table, ['user_id', 'team_id', 'type'])
tools.create_index(self._cr, 'crm_lead_create_date_team_id_idx',
self._table, ['create_date', 'team_id'])
return res
@api.model_create_multi
def create(self, vals_list):
for vals in vals_list:
if vals.get('website'):
vals['website'] = self.env['res.partner']._clean_website(vals['website'])
leads = super(Lead, self).create(vals_list)
for lead, values in zip(leads, vals_list):
if any(field in ['active', 'stage_id'] for field in values):
lead._handle_won_lost(values)
return leads
def write(self, vals):
if vals.get('website'):
vals['website'] = self.env['res.partner']._clean_website(vals['website'])
# stage change: update date_last_stage_update
if 'stage_id' in vals:
stage_id = self.env['crm.stage'].browse(vals['stage_id'])
if stage_id.is_won:
vals.update({'probability': 100, 'automated_probability': 100})
# stage change with new stage: update probability and date_closed
if vals.get('probability', 0) >= 100 or not vals.get('active', True):
vals['date_closed'] = fields.Datetime.now()
elif 'probability' in vals:
vals['date_closed'] = False
if any(field in ['active', 'stage_id'] for field in vals):
self._handle_won_lost(vals)
write_result = super(Lead, self).write(vals)
return write_result
@api.model
def search(self, args, offset=0, limit=None, order=None, count=False):
""" Override to support ordering on activity_date_deadline_my.
Ordering through web client calls search_read with an order parameter set.
Search_read then calls search. In this override we therefore override search
to intercept a search without count with an order on activity_date_deadline_my.
In that case we do the search in two steps.
First step: fill with deadline-based results
* Perform a read_group on my activities to get a mapping lead_id / deadline
Remember date_deadline is required, we always have a value for it. Only
the earliest deadline per lead is kept.
* Search leads linked to those activities that also match the asked domain
and order from the original search request.
* Results of that search will be at the top of returned results. Use limit
None because we have to search all leads linked to activities as ordering
on deadline is done in post processing.
* Reorder them according to deadline asc or desc depending on original
search ordering. Finally take only a subset of those leads to fill with
results matching asked offset / limit.
Second step: fill with other results. If first step does not gives results
enough to match offset and limit parameters we fill with a search on other
leads. We keep the asked domain and ordering while filtering out already
scanned leads to keep a coherent results.
All other search and search_read are left untouched by this override to avoid
side effects. Search_count is not affected by this override.
"""
if count or not order or 'activity_date_deadline_my' not in order:
return super(Lead, self).search(args, offset=offset, limit=limit, order=order, count=count)
order_items = [order_item.strip().lower() for order_item in (order or self._order).split(',')]
# Perform a read_group on my activities to get a mapping lead_id / deadline
# Remember date_deadline is required, we always have a value for it. Only
# the earliest deadline per lead is kept.
activity_asc = any('activity_date_deadline_my asc' in item for item in order_items)
my_lead_activities = self.env['mail.activity'].read_group(
[('res_model', '=', self._name), ('user_id', '=', self.env.uid)],
['res_id', 'date_deadline:min'],
['res_id'],
orderby='date_deadline ASC'
)
my_lead_mapping = dict((item['res_id'], item['date_deadline']) for item in my_lead_activities)
my_lead_ids = list(my_lead_mapping.keys())
my_lead_domain = expression.AND([[('id', 'in', my_lead_ids)], args])
my_lead_order = ', '.join(item for item in order_items if 'activity_date_deadline_my' not in item)
# Search leads linked to those activities and order them. See docstring
# of this method for more details.
search_res = super(Lead, self).search(my_lead_domain, offset=0, limit=None, order=my_lead_order, count=count)
my_lead_ids_ordered = sorted(search_res.ids, key=lambda lead_id: my_lead_mapping[lead_id], reverse=not activity_asc)
# keep only requested window (offset + limit, or offset+)
my_lead_ids_keep = my_lead_ids_ordered[offset:(offset + limit)] if limit else my_lead_ids_ordered[offset:]
# keep list of already skipped lead ids to exclude them from future search
my_lead_ids_skip = my_lead_ids_ordered[:(offset + limit)] if limit else my_lead_ids_ordered
# do not go further if limit is achieved
if limit and len(my_lead_ids_keep) >= limit:
return self.browse(my_lead_ids_keep)
# Fill with remaining leads. If a limit is given, simply remove count of
# already fetched. Otherwise keep none. If an offset is set we have to
# reduce it by already fetch results hereabove. Order is updated to exclude
# activity_date_deadline_my when calling super() .
lead_limit = (limit - len(my_lead_ids_keep)) if limit else None
if offset:
lead_offset = max((offset - len(search_res), 0))
else:
lead_offset = 0
lead_order = ', '.join(item for item in order_items if 'activity_date_deadline_my' not in item)
other_lead_res = super(Lead, self).search(
expression.AND([[('id', 'not in', my_lead_ids_skip)], args]),
offset=lead_offset, limit=lead_limit, order=lead_order, count=count
)
return self.browse(my_lead_ids_keep) + other_lead_res
def _handle_won_lost(self, vals):
""" This method handle the state changes :
- To lost : We need to increment corresponding lost count in scoring frequency table
- To won : We need to increment corresponding won count in scoring frequency table
- From lost to Won : We need to decrement corresponding lost count + increment corresponding won count
in scoring frequency table.
- From won to lost : We need to decrement corresponding won count + increment corresponding lost count
in scoring frequency table."""
Lead = self.env['crm.lead']
leads_reach_won = Lead
leads_leave_won = Lead
leads_reach_lost = Lead
leads_leave_lost = Lead
won_stage_ids = self.env['crm.stage'].search([('is_won', '=', True)]).ids
for lead in self:
if 'stage_id' in vals:
if vals['stage_id'] in won_stage_ids:
if lead.probability == 0:
leads_leave_lost |= lead
leads_reach_won |= lead
elif lead.stage_id.id in won_stage_ids and lead.active: # a lead can be lost at won_stage
leads_leave_won |= lead
if 'active' in vals:
if not vals['active'] and lead.active: # archive lead
if lead.stage_id.id in won_stage_ids and lead not in leads_leave_won:
leads_leave_won |= lead
leads_reach_lost |= lead
elif vals['active'] and not lead.active: # restore lead
leads_leave_lost |= lead
leads_reach_won._pls_increment_frequencies(to_state='won')
leads_leave_won._pls_increment_frequencies(from_state='won')
leads_reach_lost._pls_increment_frequencies(to_state='lost')
leads_leave_lost._pls_increment_frequencies(from_state='lost')
@api.returns('self', lambda value: value.id)
def copy(self, default=None):
self.ensure_one()
# set default value in context, if not already set (Put stage to 'new' stage)
context = dict(self._context)
context.setdefault('default_type', self.type)
context.setdefault('default_team_id', self.team_id.id)
# Set date_open to today if it is an opp
default = default or {}
default['date_open'] = fields.Datetime.now() if self.type == 'opportunity' else False
# Do not assign to an archived user
if not self.user_id.active:
default['user_id'] = False
if not self.env.user.has_group('crm.group_use_recurring_revenues'):
default['recurring_revenue'] = 0
default['recurring_plan'] = False
return super(Lead, self.with_context(context)).copy(default=default)
@api.model
def _fields_view_get(self, view_id=None, view_type='form', toolbar=False, submenu=False):
if self._context.get('opportunity_id'):
opportunity = self.browse(self._context['opportunity_id'])
action = opportunity.get_formview_action()
if action.get('views') and any(view_id for view_id in action['views'] if view_id[1] == view_type):
view_id = next(view_id[0] for view_id in action['views'] if view_id[1] == view_type)
res = super(Lead, self)._fields_view_get(view_id=view_id, view_type=view_type, toolbar=toolbar, submenu=submenu)
if view_type == 'form':
res['arch'] = self._fields_view_get_address(res['arch'])
return res
@api.model
def _read_group_stage_ids(self, stages, domain, order):
# retrieve team_id from the context and write the domain
# - ('id', 'in', stages.ids): add columns that should be present
# - OR ('fold', '=', False): add default columns that are not folded
# - OR ('team_ids', '=', team_id), ('fold', '=', False) if team_id: add team columns that are not folded
team_id = self._context.get('default_team_id')
if team_id:
search_domain = ['|', ('id', 'in', stages.ids), '|', ('team_id', '=', False), ('team_id', '=', team_id)]
else:
search_domain = ['|', ('id', 'in', stages.ids), ('team_id', '=', False)]
# perform search
stage_ids = stages._search(search_domain, order=order, access_rights_uid=SUPERUSER_ID)
return stages.browse(stage_ids)
def _stage_find(self, team_id=False, domain=None, order='sequence'):
""" Determine the stage of the current lead with its teams, the given domain and the given team_id
:param team_id
:param domain : base search domain for stage
:returns crm.stage recordset
"""
# collect all team_ids by adding given one, and the ones related to the current leads
team_ids = set()
if team_id:
team_ids.add(team_id)
for lead in self:
if lead.team_id:
team_ids.add(lead.team_id.id)
# generate the domain
if team_ids:
search_domain = ['|', ('team_id', '=', False), ('team_id', 'in', list(team_ids))]
else:
search_domain = [('team_id', '=', False)]
# AND with the domain in parameter
if domain:
search_domain += list(domain)
# perform search, return the first found
return self.env['crm.stage'].search(search_domain, order=order, limit=1)
# ------------------------------------------------------------
# ACTIONS
# ------------------------------------------------------------
def toggle_active(self):
""" When archiving: mark probability as 0. When re-activating
update probability again, for leads and opportunities. """
res = super(Lead, self).toggle_active()
activated = self.filtered(lambda lead: lead.active)
archived = self.filtered(lambda lead: not lead.active)
if activated:
activated.write({'lost_reason': False})
activated._compute_probabilities()
if archived:
archived.write({'probability': 0, 'automated_probability': 0})
return res
def action_set_lost(self, **additional_values):
""" Lost semantic: probability = 0 or active = False """
res = self.action_archive()
if additional_values:
self.write(dict(additional_values))
return res
def action_set_won(self):
""" Won semantic: probability = 100 (active untouched) """
self.action_unarchive()
# group the leads by team_id, in order to write once by values couple (each write leads to frequency increment)
leads_by_won_stage = {}
for lead in self:
stage_id = lead._stage_find(domain=[('is_won', '=', True)])
if stage_id in leads_by_won_stage:
leads_by_won_stage[stage_id] |= lead
else:
leads_by_won_stage[stage_id] = lead
for won_stage_id, leads in leads_by_won_stage.items():
leads.write({'stage_id': won_stage_id.id, 'probability': 100})
return True
def action_set_automated_probability(self):
self.write({'probability': self.automated_probability})
def action_set_won_rainbowman(self):
self.ensure_one()
self.action_set_won()
message = self._get_rainbowman_message()
if message:
return {
'effect': {
'fadeout': 'slow',
'message': message,
'img_url': '/web/image/%s/%s/image_1024' % (self.team_id.user_id._name, self.team_id.user_id.id) if self.team_id.user_id.image_1024 else '/web/static/src/img/smile.svg',
'type': 'rainbow_man',
}
}
return True
def get_rainbowman_message(self):
self.ensure_one()
if self.stage_id.is_won:
return self._get_rainbowman_message()
return False
def _get_rainbowman_message(self):
message = False
if self.user_id and self.team_id and self.expected_revenue:
self.flush() # flush fields to make sure DB is up to date
query = """
SELECT
SUM(CASE WHEN user_id = %(user_id)s THEN 1 ELSE 0 END) as total_won,
MAX(CASE WHEN date_closed >= CURRENT_DATE - INTERVAL '30 days' AND user_id = %(user_id)s THEN expected_revenue ELSE 0 END) as max_user_30,
MAX(CASE WHEN date_closed >= CURRENT_DATE - INTERVAL '7 days' AND user_id = %(user_id)s THEN expected_revenue ELSE 0 END) as max_user_7,
MAX(CASE WHEN date_closed >= CURRENT_DATE - INTERVAL '30 days' AND team_id = %(team_id)s THEN expected_revenue ELSE 0 END) as max_team_30,
MAX(CASE WHEN date_closed >= CURRENT_DATE - INTERVAL '7 days' AND team_id = %(team_id)s THEN expected_revenue ELSE 0 END) as max_team_7
FROM crm_lead
WHERE
type = 'opportunity'
AND
active = True
AND
probability = 100
AND
DATE_TRUNC('year', date_closed) = DATE_TRUNC('year', CURRENT_DATE)
AND
(user_id = %(user_id)s OR team_id = %(team_id)s)
"""
self.env.cr.execute(query, {'user_id': self.user_id.id,
'team_id': self.team_id.id})
query_result = self.env.cr.dictfetchone()
if query_result['total_won'] == 1:
message = _('Go, go, go! Congrats for your first deal.')
elif query_result['max_team_30'] == self.expected_revenue:
message = _('Boom! Team record for the past 30 days.')
elif query_result['max_team_7'] == self.expected_revenue:
message = _('Yeah! Deal of the last 7 days for the team.')
elif query_result['max_user_30'] == self.expected_revenue:
message = _('You just beat your personal record for the past 30 days.')
elif query_result['max_user_7'] == self.expected_revenue:
message = _('You just beat your personal record for the past 7 days.')
return message
def action_schedule_meeting(self):
""" Open meeting's calendar view to schedule meeting on current opportunity.
:return dict: dictionary value for created Meeting view
"""
self.ensure_one()
action = self.env["ir.actions.actions"]._for_xml_id("calendar.action_calendar_event")
partner_ids = self.env.user.partner_id.ids
if self.partner_id:
partner_ids.append(self.partner_id.id)
action['context'] = {
'default_opportunity_id': self.id if self.type == 'opportunity' else False,
'default_partner_id': self.partner_id.id,
'default_partner_ids': partner_ids,
'default_team_id': self.team_id.id,
'default_name': self.name,
}
return action
def action_snooze(self):
self.ensure_one()
today = date.today()
my_next_activity = self.activity_ids.filtered(lambda activity: activity.user_id == self.env.user)[:1]
if my_next_activity:
if my_next_activity.date_deadline < today:
date_deadline = today + timedelta(days=7)
else:
date_deadline = my_next_activity.date_deadline + timedelta(days=7)
my_next_activity.write({
'date_deadline': date_deadline
})
return True
# ------------------------------------------------------------
# BUSINESS
# ------------------------------------------------------------
def log_meeting(self, meeting_subject, meeting_date, duration):
if not duration:
duration = _('unknown')
else:
duration = str(duration)
meet_date = fields.Datetime.from_string(meeting_date)
meeting_usertime = fields.Datetime.to_string(fields.Datetime.context_timestamp(self, meet_date))
html_time = "<time datetime='%s+00:00'>%s</time>" % (meeting_date, meeting_usertime)
message = _("Meeting scheduled at '%s'<br> Subject: %s <br> Duration: %s hours") % (html_time, meeting_subject, duration)
return self.message_post(body=message)
# ------------------------------------------------------------
# MERGE LEADS / OPPS
# ------------------------------------------------------------
def _merge_get_result_type(self):
""" Define the type of the result of the merge. If at least one of the
element to merge is an opp, the resulting new element will be an opp.
Otherwise it will be a lead. """
if any(record.type == 'opportunity' for record in self):
return 'opportunity'
return 'lead'
def _merge_data(self, fields):
""" Prepare lead/opp data into a dictionary for merging. Different types
of fields are processed in different ways:
- text: all the values are concatenated
- m2m and o2m: those fields aren't processed
- m2o: the first not null value prevails (the other are dropped)
- any other type of field: same as m2o
:param fields: list of fields to process
:return dict data: contains the merged values of the new opportunity
"""
# helpers
def _get_first_not_null(attr, opportunities):
for opp in opportunities:
val = opp[attr]
if val:
return val
return False
def _get_first_not_null_id(attr, opportunities):
res = _get_first_not_null(attr, opportunities)
return res.id if res else False
# process the fields' values
data = {}
for field_name in fields:
field = self._fields.get(field_name)
if field is None:
continue
if field.type in ('many2many', 'one2many'):
continue
elif field.type == 'many2one':
data[field_name] = _get_first_not_null_id(field_name, self) # take the first not null
elif field.type == 'text':
data[field_name] = '\n\n'.join(it for it in self.mapped(field_name) if it)
else:
data[field_name] = _get_first_not_null(field_name, self)
# define the resulting type ('lead' or 'opportunity')
data['type'] = self._merge_get_result_type()
return data
def _merge_notify_get_merged_fields_message(self, fields):
""" Generate the message body with the changed values
:param fields : list of fields to track
:returns a list of message bodies for the corresponding leads
"""
bodies = []
for lead in self:
title = "%s : %s\n" % (_('Merged opportunity') if lead.type == 'opportunity' else _('Merged lead'), lead.name)
body = [title]
_fields = self.env['ir.model.fields'].search([
('name', 'in', fields or []),
('model_id.model', '=', lead._name),
])
for field in _fields:
value = getattr(lead, field.name, False)
if field.ttype == 'selection':
selections = lead.fields_get()[field.name]['selection']
value = next((v[1] for v in selections if v[0] == value), value)
elif field.ttype == 'many2one':
if value:
value = value.sudo().display_name
elif field.ttype == 'many2many':
if value:
value = ','.join(
val.display_name
for val in value.sudo()
)
body.append("%s: %s" % (field.field_description, value or ''))
bodies.append("<br/>".join(body + ['<br/>']))
return bodies
def _merge_notify(self, opportunities):
""" Post a message gathering merged leads/opps informations. It explains
which fields has been merged and their new value. `self` is the resulting
merge crm.lead record.
:param opportunities: see ``merge_dependences``
"""
# TODO JEM: mail template should be used instead of fix body, subject text
self.ensure_one()
# mail message's subject
result_type = opportunities._merge_get_result_type()
merge_message = _('Merged leads') if result_type == 'lead' else _('Merged opportunities')
subject = merge_message + ": " + ", ".join(opportunities.mapped('name'))
# message bodies
message_bodies = opportunities._merge_notify_get_merged_fields_message(list(CRM_LEAD_FIELDS_TO_MERGE))
message_body = "\n\n".join(message_bodies)
return self.message_post(body=message_body, subject=subject)
def _merge_opportunity_history(self, opportunities):
""" Move mail.message from the given opportunities to the current one. `self` is the
crm.lead record destination for message of `opportunities`.
:param opportunities: see ``merge_dependences``
"""
self.ensure_one()
for opportunity in opportunities:
for message in opportunity.message_ids:
if message.subject:
subject = _("From %(source_name)s : %(source_subject)s", source_name=opportunity.name, source_subject=message.subject)
else:
subject = _("From %(source_name)s", source_name=opportunity.name)
message.write({
'res_id': self.id,
'subject': subject,
})
return True
def _merge_opportunity_attachments(self, opportunities):
""" Move attachments of given opportunities to the current one `self`, and rename
the attachments having same name than native ones.
:param opportunities: see ``merge_dependences``
"""
self.ensure_one()
# return attachments of opportunity
def _get_attachments(opportunity_id):
return self.env['ir.attachment'].search([('res_model', '=', self._name), ('res_id', '=', opportunity_id)])
first_attachments = _get_attachments(self.id)
# counter of all attachments to move. Used to make sure the name is different for all attachments
count = 1
for opportunity in opportunities:
attachments = _get_attachments(opportunity.id)
for attachment in attachments:
values = {'res_id': self.id}
for attachment_in_first in first_attachments:
if attachment.name == attachment_in_first.name:
values['name'] = "%s (%s)" % (attachment.name, count)
count += 1
attachment.write(values)
return True
def merge_dependences(self, opportunities):
""" Merge dependences (messages, attachments, ...). These dependences will be
transfered to `self`, the most important lead.
:param opportunities : recordset of opportunities to transfer. Does not
include `self` which is the target crm.lead being the result of the merge.
"""
self.ensure_one()
self._merge_notify(opportunities)
self._merge_opportunity_history(opportunities)
self._merge_opportunity_attachments(opportunities)
def merge_opportunity(self, user_id=False, team_id=False, auto_unlink=True):
""" Merge opportunities in one. Different cases of merge:
- merge leads together = 1 new lead
- merge at least 1 opp with anything else (lead or opp) = 1 new opp
The resulting lead/opportunity will be the most important one (based on its confidence level)
updated with values from other opportunities to merge.
:param user_id : the id of the saleperson. If not given, will be determined by `_merge_data`.
:param team : the id of the Sales Team. If not given, will be determined by `_merge_data`.
:return crm.lead record resulting of th merge
"""
if len(self.ids) <= 1:
raise UserError(_('Please select more than one element (lead or opportunity) from the list view.'))
if len(self.ids) > 5 and not self.env.is_superuser():
raise UserError(_("To prevent data loss, Leads and Opportunities can only be merged by groups of 5."))
opportunities = self._sort_by_confidence_level(reverse=True)
# get SORTED recordset of head and tail, and complete list
opportunities_head = opportunities[0]
opportunities_tail = opportunities[1:]
# merge all the sorted opportunity. This means the value of
# the first (head opp) will be a priority.
merged_data = opportunities._merge_data(list(CRM_LEAD_FIELDS_TO_MERGE))
# force value for saleperson and Sales Team
if user_id:
merged_data['user_id'] = user_id
if team_id:
merged_data['team_id'] = team_id
# merge other data (mail.message, attachments, ...) from tail into head
opportunities_head.merge_dependences(opportunities_tail)
# check if the stage is in the stages of the Sales Team. If not, assign the stage with the lowest sequence
if merged_data.get('team_id'):
team_stage_ids = self.env['crm.stage'].search(['|', ('team_id', '=', merged_data['team_id']), ('team_id', '=', False)], order='sequence')
if merged_data.get('stage_id') not in team_stage_ids.ids:
merged_data['stage_id'] = team_stage_ids[0].id if team_stage_ids else False
# write merged data into first opportunity
opportunities_head.write(merged_data)
# delete tail opportunities
# we use the SUPERUSER to avoid access rights issues because as the user had the rights to see the records it should be safe to do so
if auto_unlink:
opportunities_tail.sudo().unlink()
return opportunities_head
def _sort_by_confidence_level(self, reverse=False):
""" Sorting the leads/opps according to the confidence level of its stage, which relates to the probability of winning it
The confidence level increases with the stage sequence
An Opportunity always has higher confidence level than a lead
"""
def opps_key(opportunity):
return opportunity.type == 'opportunity', opportunity.stage_id.sequence, -opportunity._origin.id
return self.sorted(key=opps_key, reverse=reverse)
def _convert_opportunity_data(self, customer, team_id=False):
""" Extract the data from a lead to create the opportunity
:param customer : res.partner record
:param team_id : identifier of the Sales Team to determine the stage
"""
new_team_id = team_id if team_id else self.team_id.id
upd_values = {
'type': 'opportunity',
'date_open': fields.Datetime.now(),
'date_conversion': fields.Datetime.now(),
}
if customer != self.partner_id:
upd_values['partner_id'] = customer.id if customer else False
if not self.stage_id:
stage = self._stage_find(team_id=new_team_id)
upd_values['stage_id'] = stage.id
return upd_values
def convert_opportunity(self, partner_id, user_ids=False, team_id=False):
customer = False
if partner_id:
customer = self.env['res.partner'].browse(partner_id)
for lead in self:
if not lead.active or lead.probability == 100:
continue
vals = lead._convert_opportunity_data(customer, team_id)
lead.write(vals)
if user_ids or team_id:
self.handle_salesmen_assignment(user_ids, team_id)
return True
def _get_lead_duplicates(self, partner=None, email=None, include_lost=False):
""" Search for leads that seem duplicated based on partner / email.
:param partner : optional customer when searching duplicated
:param email: email (possibly formatted) to search
:param boolean include_lost: if True, search includes archived opportunities
(still only active leads are considered). If False, search for active
and not won leads and opportunities;
"""
if not email and not partner:
return self.env['crm.lead']
domain = []
for normalized_email in [tools.email_normalize(email) for email in tools.email_split(email)]:
domain.append(('email_normalized', '=', normalized_email))
if partner:
domain.append(('partner_id', '=', partner.id))
if not domain:
return self.env['crm.lead']
domain = ['|'] * (len(domain) - 1) + domain
if include_lost:
domain += ['|', ('type', '=', 'opportunity'), ('active', '=', True)]
else:
domain += ['&', ('active', '=', True), '|', ('probability', '=', False), ('probability', '<', 100)]
return self.with_context(active_test=False).search(domain)
def _create_customer(self):
""" Create a partner from lead data and link it to the lead.
:return: newly-created partner browse record
"""
Partner = self.env['res.partner']
contact_name = self.contact_name
if not contact_name:
contact_name = Partner._parse_partner_name(self.email_from)[0] if self.email_from else False
if self.partner_name:
partner_company = Partner.create(self._prepare_customer_values(self.partner_name, is_company=True))
elif self.partner_id:
partner_company = self.partner_id
else:
partner_company = None
if contact_name:
return Partner.create(self._prepare_customer_values(contact_name, is_company=False, parent_id=partner_company.id if partner_company else False))
if partner_company:
return partner_company
return Partner.create(self._prepare_customer_values(self.name, is_company=False))
def _prepare_customer_values(self, partner_name, is_company=False, parent_id=False):
""" Extract data from lead to create a partner.
:param name : furtur name of the partner
:param is_company : True if the partner is a company
:param parent_id : id of the parent partner (False if no parent)
:return: dictionary of values to give at res_partner.create()
"""
email_split = tools.email_split(self.email_from)
res = {
'name': partner_name,
'user_id': self.env.context.get('default_user_id') or self.user_id.id,
'comment': self.description,
'team_id': self.team_id.id,
'parent_id': parent_id,
'phone': self.phone,
'mobile': self.mobile,
'email': email_split[0] if email_split else False,
'title': self.title.id,
'function': self.function,
'street': self.street,
'street2': self.street2,
'zip': self.zip,
'city': self.city,
'country_id': self.country_id.id,
'state_id': self.state_id.id,
'website': self.website,
'is_company': is_company,
'type': 'contact'
}
if self.lang_id:
res['lang'] = self.lang_id.code
return res
def _find_matching_partner(self, email_only=False):
""" Try to find a matching partner with available information on the
lead, using notably customer's name, email, ...
:param email_only: Only find a matching based on the email. To use
for automatic process where ilike based on name can be too dangerous
:return: partner browse record
"""
self.ensure_one()
partner = self.partner_id
if not partner and self.email_from:
partner = self.env['res.partner'].search([('email', '=', self.email_from)], limit=1)
if not partner and not email_only:
# search through the existing partners based on the lead's partner or contact name
# to be aligned with _create_customer, search on lead's name as last possibility
for customer_potential_name in [self[field_name] for field_name in ['partner_name', 'contact_name', 'name'] if self[field_name]]:
partner = self.env['res.partner'].search([('name', 'ilike', '%' + customer_potential_name + '%')], limit=1)
if partner:
break
return partner
def handle_partner_assignment(self, force_partner_id=False, create_missing=True):
""" Update customer (partner_id) of leads. Purpose is to set the same
partner on most leads; either through a newly created partner either
through a given partner_id.
:param int force_partner_id: if set, update all leads to that customer;
:param create_missing: for leads without customer, create a new one
based on lead information;
"""
for lead in self:
if force_partner_id:
lead.partner_id = force_partner_id
if not lead.partner_id and create_missing:
partner = lead._create_customer()
lead.partner_id = partner.id
def handle_salesmen_assignment(self, user_ids=None, team_id=False):
""" Assign salesmen and salesteam to a batch of leads. If there are more
leads than salesmen, these salesmen will be assigned in round-robin. E.g.
4 salesmen (S1, S2, S3, S4) for 6 leads (L1, L2, ... L6) will assigned as
following: L1 - S1, L2 - S2, L3 - S3, L4 - S4, L5 - S1, L6 - S2.
:param list user_ids: salesmen to assign
:param int team_id: salesteam to assign
"""
update_vals = {'team_id': team_id} if team_id else {}
if not user_ids:
self.write(update_vals)
else:
lead_ids = self.ids
steps = len(user_ids)
# pass 1 : lead_ids[0:6:3] = [L1,L4]
# pass 2 : lead_ids[1:6:3] = [L2,L5]
# pass 3 : lead_ids[2:6:3] = [L3,L6]
# ...
for idx in range(0, steps):
subset_ids = lead_ids[idx:len(lead_ids):steps]
update_vals['user_id'] = user_ids[idx]
self.env['crm.lead'].browse(subset_ids).write(update_vals)
# ------------------------------------------------------------
# TOOLS
# ------------------------------------------------------------
def redirect_lead_opportunity_view(self):
self.ensure_one()
return {
'name': _('Lead or Opportunity'),
'view_mode': 'form',
'res_model': 'crm.lead',
'domain': [('type', '=', self.type)],
'res_id': self.id,
'view_id': False,
'type': 'ir.actions.act_window',
'context': {'default_type': self.type}
}
@api.model
def get_empty_list_help(self, help):
help_title, sub_title = "", ""
if self._context.get('default_type') == 'lead':
help_title = _('Create a new lead')
else:
help_title = _('Create an opportunity to start playing with your pipeline.')
alias_record = self.env['mail.alias'].search([
('alias_name', '!=', False),
('alias_name', '!=', ''),
('alias_model_id.model', '=', 'crm.lead'),
('alias_parent_model_id.model', '=', 'crm.team'),
('alias_force_thread_id', '=', False)
], limit=1)
if alias_record and alias_record.alias_domain and alias_record.alias_name:
email = '%s@%s' % (alias_record.alias_name, alias_record.alias_domain)
email_link = "<b><a href='mailto:%s'>%s</a></b>" % (email, email)
sub_title = _('Use the top left <i>Create</i> button, or send an email to %s to test the email gateway.') % (email_link)
return '<p class="o_view_nocontent_smiling_face">%s</p><p class="oe_view_nocontent_alias">%s</p>' % (help_title, sub_title)
# ------------------------------------------------------------
# MAILING
# ------------------------------------------------------------
def _creation_subtype(self):
return self.env.ref('crm.mt_lead_create')
def _track_subtype(self, init_values):
self.ensure_one()
if 'stage_id' in init_values and self.probability == 100 and self.stage_id:
return self.env.ref('crm.mt_lead_won')
elif 'lost_reason' in init_values and self.lost_reason:
return self.env.ref('crm.mt_lead_lost')
elif 'stage_id' in init_values:
return self.env.ref('crm.mt_lead_stage')
elif 'active' in init_values and self.active:
return self.env.ref('crm.mt_lead_restored')
elif 'active' in init_values and not self.active:
return self.env.ref('crm.mt_lead_lost')
return super(Lead, self)._track_subtype(init_values)
def _notify_get_groups(self, msg_vals=None):
""" Handle salesman recipients that can convert leads into opportunities
and set opportunities as won / lost. """
groups = super(Lead, self)._notify_get_groups(msg_vals=msg_vals)
local_msg_vals = dict(msg_vals or {})
self.ensure_one()
if self.type == 'lead':
convert_action = self._notify_get_action_link('controller', controller='/lead/convert', **local_msg_vals)
salesman_actions = [{'url': convert_action, 'title': _('Convert to opportunity')}]
else:
won_action = self._notify_get_action_link('controller', controller='/lead/case_mark_won', **local_msg_vals)
lost_action = self._notify_get_action_link('controller', controller='/lead/case_mark_lost', **local_msg_vals)
salesman_actions = [
{'url': won_action, 'title': _('Won')},
{'url': lost_action, 'title': _('Lost')}]
if self.team_id:
custom_params = dict(local_msg_vals, res_id=self.team_id.id, model=self.team_id._name)
salesman_actions.append({
'url': self._notify_get_action_link('view', **custom_params),
'title': _('Sales Team Settings')
})
salesman_group_id = self.env.ref('sales_team.group_sale_salesman').id
new_group = (
'group_sale_salesman', lambda pdata: pdata['type'] == 'user' and salesman_group_id in pdata['groups'], {
'actions': salesman_actions,
})
return [new_group] + groups
def _notify_get_reply_to(self, default=None, records=None, company=None, doc_names=None):
""" Override to set alias of lead and opportunities to their sales team if any. """
aliases = self.mapped('team_id').sudo()._notify_get_reply_to(default=default, records=None, company=company, doc_names=None)
res = {lead.id: aliases.get(lead.team_id.id) for lead in self}
leftover = self.filtered(lambda rec: not rec.team_id)
if leftover:
res.update(super(Lead, leftover)._notify_get_reply_to(default=default, records=None, company=company, doc_names=doc_names))
return res
def _message_get_default_recipients(self):
return {r.id: {
'partner_ids': [],
'email_to': r.email_normalized,
'email_cc': False}
for r in self}
def _message_get_suggested_recipients(self):
recipients = super(Lead, self)._message_get_suggested_recipients()
try:
for lead in self:
if lead.partner_id:
lead._message_add_suggested_recipient(recipients, partner=lead.partner_id, reason=_('Customer'))
elif lead.email_from:
lead._message_add_suggested_recipient(recipients, email=lead.email_from, reason=_('Customer Email'))
except AccessError: # no read access rights -> just ignore suggested recipients because this imply modifying followers
pass
return recipients
@api.model
def message_new(self, msg_dict, custom_values=None):
""" Overrides mail_thread message_new that is called by the mailgateway
through message_process.
This override updates the document according to the email.
"""
# remove external users
if self.env.user.has_group('base.group_portal'):
self = self.with_context(default_user_id=False)
# remove default author when going through the mail gateway. Indeed we
# do not want to explicitly set user_id to False; however we do not
# want the gateway user to be responsible if no other responsible is
# found.
if self._uid == self.env.ref('base.user_root').id:
self = self.with_context(default_user_id=False)
if custom_values is None:
custom_values = {}
defaults = {
'name': msg_dict.get('subject') or _("No Subject"),
'email_from': msg_dict.get('from'),
'partner_id': msg_dict.get('author_id', False),
}
if msg_dict.get('priority') in dict(crm_stage.AVAILABLE_PRIORITIES):
defaults['priority'] = msg_dict.get('priority')
defaults.update(custom_values)
# assign right company
if 'company_id' not in defaults and 'team_id' in defaults:
defaults['company_id'] = self.env['crm.team'].browse(defaults['team_id']).company_id.id
return super(Lead, self).message_new(msg_dict, custom_values=defaults)
def _message_post_after_hook(self, message, msg_vals):
if self.email_from and not self.partner_id:
# we consider that posting a message with a specified recipient (not a follower, a specific one)
# on a document without customer means that it was created through the chatter using
# suggested recipients. This heuristic allows to avoid ugly hacks in JS.
new_partner = message.partner_ids.filtered(lambda partner: partner.email == self.email_from)
if new_partner:
self.search([
('partner_id', '=', False),
('email_from', '=', new_partner.email),
('stage_id.fold', '=', False)]).write({'partner_id': new_partner.id})
return super(Lead, self)._message_post_after_hook(message, msg_vals)
def _message_partner_info_from_emails(self, emails, link_mail=False):
result = super(Lead, self)._message_partner_info_from_emails(emails, link_mail=link_mail)
for partner_info in result:
if not partner_info.get('partner_id') and (self.partner_name or self.contact_name):
emails = email_re.findall(partner_info['full_name'] or '')
email = emails and emails[0] or ''
if email and self.email_from and email.lower() == self.email_from.lower():
partner_info['full_name'] = tools.formataddr((self.contact_name or self.partner_name, email))
break
return result
def _phone_get_number_fields(self):
""" Use mobile or phone fields to compute sanitized phone number """
return ['mobile', 'phone']
@api.model
def get_import_templates(self):
return [{
'label': _('Import Template for Leads & Opportunities'),
'template': '/crm/static/xls/crm_lead.xls'
}]
# ------------------------------------------------------------
# PLS
# ------------------------------------------------------------
# Predictive lead scoring is computing the lead probability, based on won and lost leads from the past
# Each won/lost lead increments a frequency table, where we store, for each field/value couple, the number of
# won and lost leads.
# E.g. : A won lead from Belgium will increase the won count of the frequency country_id='Belgium' by 1.
# The frequencies are split by team_id, so each team has his own frequencies environment. (Team A doesn't impact B)
# There are two main ways to build the frequency table:
# - Live Increment: At each Won/lost, we increment directly the frequencies based on the lead values.
# Done right BEFORE writing the lead as won or lost.
# We consider a lead that will be marked as won or lost.
# Used each time a lead is won or lost, to ensure frequency table is always up to date
# - One shot Rebuild: empty the frequency table and rebuild it from scratch, based on every already won/lost leads
# Done during cron process.
# We consider all the leads that have been already won or lost.
# Used in one shot, when modifying the criteria to take into account (fields or reference date)
# ---------------------------------
# PLS: Probability Computation
# ---------------------------------
def _pls_get_naive_bayes_probabilities(self, batch_mode=False):
"""
In machine learning, naive Bayes classifiers (NBC) are a family of simple "probabilistic classifiers" based on
applying Bayes theorem with strong (naive) independence assumptions between the variables taken into account.
E.g: will TDE eat m&m's depending on his sleep status, the amount of work he has and the fullness of his stomach?
As we use experience to compute the statistics, every day, we will register the variables state + the result.
As the days pass, we will be able to determine, with more and more precision, if TDE will eat m&m's
for a specific combination :
- did sleep very well, a lot of work and stomach full > Will never happen !
- didn't sleep at all, no work at all and empty stomach > for sure !
Following Bayes' Theorem: the probability that an event occurs (to win) under certain conditions is proportional
to the probability to win under each condition separately and the probability to win. We compute a 'Win score'
-> P(Won | A∩B) ∝ P(A∩B | Won)*P(Won) OR S(Won | A∩B) = P(A∩B | Won)*P(Won)
To compute a percentage of probability to win, we also compute the 'Lost score' that is proportional to the
probability to lose under each condition separately and the probability to lose.
-> Probability = S(Won | A∩B) / ( S(Won | A∩B) + S(Lost | A∩B) )
See https://www.youtube.com/watch?v=CPqOCI0ahss can help to get a quick and simple example.
One issue about NBC is when a event occurence is never observed.
E.g: if when TDE has an empty stomach, he always eat m&m's, than the "not eating m&m's when empty stomach' event
will never be observed.
This is called 'zero frequency' and that leads to division (or at least multiplication) by zero.
To avoid this, we add 0.1 in each frequency. With few data, the computation is than not really realistic.
The more we have records to analyse, the more the estimation will be precise.
:return: probability in percent (and integer rounded) that the lead will be won at the current stage.
"""
lead_probabilities = {}
if not self:
return lead_probabilities
# Get all leads values, no matter the team_id
domain = []
if batch_mode:
domain = [
'&',
('active', '=', True), ('id', 'in', self.ids),
'|',
('probability', '=', None),
'&',
('probability', '<', 100), ('probability', '>', 0)
]
leads_values_dict = self._pls_get_lead_pls_values(domain=domain)
if not leads_values_dict:
return lead_probabilities
# Get unique couples to search in frequency table and won leads.
leads_fields = set() # keep unique fields, as a lead can have multiple tag_ids
won_leads = set()
won_stage_ids = self.env['crm.stage'].search([('is_won', '=', True)]).ids
for lead_id, values in leads_values_dict.items():
for field, value in values['values']:
if field == 'stage_id' and value in won_stage_ids:
won_leads.add(lead_id)
leads_fields.add(field)
# get all variable related records from frequency table, no matter the team_id
frequencies = self.env['crm.lead.scoring.frequency'].search([('variable', 'in', list(leads_fields))], order="team_id asc")
# get all team_ids from frequencies
frequency_teams = frequencies.mapped('team_id')
frequency_team_ids = [0] + [team.id for team in frequency_teams]
# 1. Compute each variable value count individually
# regroup each variable to be able to compute their own probabilities
# As all the variable does not enter into account (as we reject unset values in the process)
# each value probability must be computed only with their own variable related total count
# special case: for lead for which team_id is not in frequency table,
# we consider all the records, independently from team_id (this is why we add a result[-1])
result = dict((team_id, dict((field, dict(won_total=0, lost_total=0)) for field in leads_fields)) for team_id in frequency_team_ids)
result[-1] = dict((field, dict(won_total=0, lost_total=0)) for field in leads_fields)
for frequency in frequencies:
team_result = result[frequency.team_id.id if frequency.team_id else 0]
field = frequency['variable']
value = frequency['value']
# To avoid that a tag take to much importance if his subset is too small,
# we ignore the tag frequencies if we have less than 50 won or lost for this tag.
if field == 'tag_id' and (frequency['won_count'] + frequency['lost_count']) < 50:
continue
team_result[field][value] = {'won': frequency['won_count'], 'lost': frequency['lost_count']}
team_result[field]['won_total'] += frequency['won_count']
team_result[field]['lost_total'] += frequency['lost_count']
if value not in result[-1][field]:
result[-1][field][value] = {'won': 0, 'lost': 0}
result[-1][field][value]['won'] += frequency['won_count']
result[-1][field][value]['lost'] += frequency['lost_count']
result[-1][field]['won_total'] += frequency['won_count']
result[-1][field]['lost_total'] += frequency['lost_count']
# Get all won, lost and total count for all records in frequencies per team_id
for team_id in result:
result[team_id]['team_won'], \
result[team_id]['team_lost'], \
result[team_id]['team_total'] = self._pls_get_won_lost_total_count(result[team_id])
save_team_id = None
p_won, p_lost = 1, 1
for lead_id, lead_values in leads_values_dict.items():
# if stage_id is null, return 0 and bypass computation
lead_fields = [value[0] for value in lead_values.get('values', [])]
if not 'stage_id' in lead_fields:
lead_probabilities[lead_id] = 0
continue
# if lead stage is won, return 100
elif lead_id in won_leads:
lead_probabilities[lead_id] = 100
continue
lead_team_id = lead_values['team_id'] if lead_values['team_id'] else 0 # team_id = None -> Convert to 0
lead_team_id = lead_team_id if lead_team_id in result else -1 # team_id not in frequency Table -> convert to -1
if lead_team_id != save_team_id:
save_team_id = lead_team_id
team_won = result[save_team_id]['team_won']
team_lost = result[save_team_id]['team_lost']
team_total = result[save_team_id]['team_total']
# if one count = 0, we cannot compute lead probability
if not team_won or not team_lost:
continue
p_won = team_won / team_total
p_lost = team_lost / team_total
# 2. Compute won and lost score using each variable's individual probability
s_lead_won, s_lead_lost = p_won, p_lost
for field, value in lead_values['values']:
field_result = result.get(save_team_id, {}).get(field)
value = value.origin if hasattr(value, 'origin') else value
value_result = field_result.get(str(value)) if field_result else False
if value_result:
total_won = team_won if field == 'stage_id' else field_result['won_total']
total_lost = team_lost if field == 'stage_id' else field_result['lost_total']
s_lead_won *= value_result['won'] / total_won
s_lead_lost *= value_result['lost'] / total_lost
# 3. Compute Probability to win
lead_probabilities[lead_id] = round(100 * s_lead_won / (s_lead_won + s_lead_lost), 2)
return lead_probabilities
# ---------------------------------
# PLS: Live Increment
# ---------------------------------
def _pls_increment_frequencies(self, from_state=None, to_state=None):
"""
When losing or winning a lead, this method is called to increment each PLS parameter related to the lead
in won_count (if won) or in lost_count (if lost).
This method is also used when reactivating a mistakenly lost lead (using the decrement argument).
In this case, the lost count should be de-increment by 1 for each PLS parameter linked ot the lead.
Live increment must be done before writing the new values because we need to know the state change (from and to).
This would not be an issue for the reach won or reach lost as we just need to increment the frequencies with the
final state of the lead.
This issue is when the lead leaves a closed state because once the new values have been writen, we do not know
what was the previous state that we need to decrement.
This is why 'is_won' and 'decrement' parameters are used to describe the from / to change of his state.
"""
new_frequencies_by_team, existing_frequencies_by_team = self._pls_prepare_update_frequency_table(target_state=from_state or to_state)
# update frequency table
self._pls_update_frequency_table(new_frequencies_by_team, 1 if to_state else -1,
existing_frequencies_by_team=existing_frequencies_by_team)
# ---------------------------------
# PLS: One shot rebuild
# ---------------------------------
def _cron_update_automated_probabilities(self):
""" This cron will :
- rebuild the lead scoring frequency table
- recompute all the automated_probability and align probability if both were aligned
"""
cron_start_date = datetime.now()
self._rebuild_pls_frequency_table()
self._update_automated_probabilities()
_logger.info("Predictive Lead Scoring : Cron duration = %d seconds" % ((datetime.now() - cron_start_date).total_seconds()))
def _rebuild_pls_frequency_table(self):
# Clear the frequencies table (in sql to speed up the cron)
try:
self.check_access_rights('unlink')
except AccessError:
raise UserError(_("You don't have the access needed to run this cron."))
else:
self._cr.execute('TRUNCATE TABLE crm_lead_scoring_frequency')
new_frequencies_by_team, unused = self._pls_prepare_update_frequency_table(rebuild=True)
# update frequency table
self._pls_update_frequency_table(new_frequencies_by_team, 1)
_logger.info("Predictive Lead Scoring : crm.lead.scoring.frequency table rebuilt")
def _update_automated_probabilities(self):
""" Recompute all the automated_probability (and align probability if both were aligned) for all the leads
that are active (not won, nor lost).
For performance matter, as there can be a huge amount of leads to recompute, this cron proceed by batch.
Each batch is performed into its own transaction, in order to minimise the lock time on the lead table
(and to avoid complete lock if there was only 1 transaction that would last for too long -> several minutes).
If a concurrent update occurs, it will simply be put in the queue to get the lock.
"""
pls_start_date = self._pls_get_safe_start_date()
if not pls_start_date:
return
# 1. Get all the leads to recompute created after pls_start_date that are nor won nor lost
# (Won : probability = 100 | Lost : probability = 0 or inactive. Here, inactive won't be returned anyway)
# Get also all the lead without probability --> These are the new leads. Activate auto probability on them.
pending_lead_domain = [
'&',
'&',
('stage_id', '!=', False), ('create_date', '>=', pls_start_date),
'|',
('probability', '=', False),
'&',
('probability', '<', 100), ('probability', '>', 0)
]
leads_to_update = self.env['crm.lead'].search(pending_lead_domain)
leads_to_update_count = len(leads_to_update)
# 2. Compute by batch to avoid memory error
lead_probabilities = {}
for i in range(0, leads_to_update_count, PLS_COMPUTE_BATCH_STEP):
leads_to_update_part = leads_to_update[i:i + PLS_COMPUTE_BATCH_STEP]
lead_probabilities.update(leads_to_update_part._pls_get_naive_bayes_probabilities(batch_mode=True))
_logger.info("Predictive Lead Scoring : New automated probabilities computed")
# 3. Group by new probability to reduce server roundtrips when executing the update
probability_leads = defaultdict(list)
for lead_id, probability in sorted(lead_probabilities.items()):
probability_leads[probability].append(lead_id)
# 4. Update automated_probability (+ probability if both were equal)
update_sql = """UPDATE crm_lead
SET automated_probability = %s,
probability = CASE WHEN (probability = automated_probability OR probability is null)
THEN (%s)
ELSE (probability)
END
WHERE id in %s"""
# Update by a maximum number of leads at the same time, one batch by transaction :
# - avoid memory errors
# - avoid blocking the table for too long with a too big transaction
transactions_count, transactions_failed_count = 0, 0
cron_update_lead_start_date = datetime.now()
auto_commit = not getattr(threading.currentThread(), 'testing', False)
for probability, probability_lead_ids in probability_leads.items():
for lead_ids_current in tools.split_every(PLS_UPDATE_BATCH_STEP, probability_lead_ids):
transactions_count += 1
try:
self.env.cr.execute(update_sql, (probability, probability, tuple(lead_ids_current)))
# auto-commit except in testing mode
if auto_commit:
self.env.cr.commit()
except Exception as e:
_logger.warning("Predictive Lead Scoring : update transaction failed. Error: %s" % e)
transactions_failed_count += 1
_logger.info(
"Predictive Lead Scoring : All automated probabilities updated (%d leads / %d transactions (%d failed) / %d seconds)" % (
leads_to_update_count,
transactions_count,
transactions_failed_count,
(datetime.now() - cron_update_lead_start_date).total_seconds(),
)
)
# ---------------------------------
# PLS: Common parts for both mode
# ---------------------------------
def _pls_prepare_update_frequency_table(self, rebuild=False, target_state=False):
"""
This method is common to Live Increment or Full Rebuild mode, as it shares the main steps.
This method will prepare the frequency dict needed to update the frequency table:
- New frequencies: frequencies that we need to add in the frequency table.
- Existing frequencies: frequencies that are already in the frequency table.
In rebuild mode, only the new frequencies are needed as existing frequencies are truncated.
For each team, each dict contains the frequency in won and lost for each field/value couple
of the target leads.
Target leads are :
- in Live increment mode : given ongoing leads (self)
- in Full rebuild mode : all the closed (won and lost) leads in the DB.
During the frequencies update, with both new and existing frequencies, we can split frequencies to update
and frequencies to add. If a field/value couple already exists in the frequency table, we just update it.
Otherwise, we need to insert a new one.
"""
# Keep eligible leads
pls_start_date = self._pls_get_safe_start_date()
if not pls_start_date:
return {}, {}
if rebuild: # rebuild will treat every closed lead in DB, increment will treat current ongoing leads
pls_leads = self
else:
# Only treat leads created after the PLS start Date
pls_leads = self.filtered(
lambda lead: fields.Date.to_date(pls_start_date) <= fields.Date.to_date(lead.create_date))
if not pls_leads:
return {}, {}
# Extract target leads values
if rebuild: # rebuild is ok
domain = [
'&',
('create_date', '>=', pls_start_date),
'|',
('probability', '=', 100),
'&',
('probability', '=', 0), ('active', '=', False)
]
team_ids = self.env['crm.team'].with_context(active_test=False).search([]).ids + [0] # If team_id is unset, consider it as team 0
else: # increment
domain = [('id', 'in', pls_leads.ids)]
team_ids = pls_leads.mapped('team_id').ids + [0]
leads_values_dict = pls_leads._pls_get_lead_pls_values(domain=domain)
# split leads values by team_id
# get current frequencies related to the target leads
leads_frequency_values_by_team = dict((team_id, []) for team_id in team_ids)
leads_pls_fields = set() # ensure to keep each field unique (can have multiple tag_id leads_values_dict)
for lead_id, values in leads_values_dict.items():
team_id = values.get('team_id', 0) # If team_id is unset, consider it as team 0
lead_frequency_values = {'count': 1}
for field, value in values['values']:
if field != "probability": # was added to lead values in batch mode to know won/lost state, but is not a pls fields.
leads_pls_fields.add(field)
else: # extract lead probability - needed to increment tag_id frequency. (proba always before tag_id)
lead_probability = value
if field == 'tag_id': # handle tag_id separatelly (as in One Shot rebuild mode)
leads_frequency_values_by_team[team_id].append({field: value, 'count': 1, 'probability': lead_probability})
else:
lead_frequency_values[field] = value
leads_frequency_values_by_team[team_id].append(lead_frequency_values)
leads_pls_fields = list(leads_pls_fields)
# get new frequencies
new_frequencies_by_team = {}
for team_id in team_ids:
# prepare fields and tag values for leads by team
new_frequencies_by_team[team_id] = self._pls_prepare_frequencies(
leads_frequency_values_by_team[team_id], leads_pls_fields, target_state=target_state)
# get existing frequencies
existing_frequencies_by_team = {}
if not rebuild: # there is no existing frequency in rebuild mode as they were all deleted.
# read all fields to get everything in memory in one query (instead of having query + prefetch)
existing_frequencies = self.env['crm.lead.scoring.frequency'].search_read(
['&', ('variable', 'in', leads_pls_fields),
'|', ('team_id', 'in', pls_leads.mapped('team_id').ids), ('team_id', '=', False)])
for frequency in existing_frequencies:
team_id = frequency['team_id'][0] if frequency.get('team_id') else 0
if team_id not in existing_frequencies_by_team:
existing_frequencies_by_team[team_id] = dict((field, {}) for field in leads_pls_fields)
existing_frequencies_by_team[team_id][frequency['variable']][frequency['value']] = {
'frequency_id': frequency['id'],
'won': frequency['won_count'],
'lost': frequency['lost_count']
}
return new_frequencies_by_team, existing_frequencies_by_team
def _pls_update_frequency_table(self, new_frequencies_by_team, step, existing_frequencies_by_team=None):
""" Create / update the frequency table in a cross company way, per team_id"""
values_to_update = {}
values_to_create = []
if not existing_frequencies_by_team:
existing_frequencies_by_team = {}
# build the create multi + frequencies to update
for team_id, new_frequencies in new_frequencies_by_team.items():
for field, value in new_frequencies.items():
# frequency already present ?
current_frequencies = existing_frequencies_by_team.get(team_id, {})
for param, result in value.items():
current_frequency_for_couple = current_frequencies.get(field, {}).get(param, {})
# If frequency already present : UPDATE IT
if current_frequency_for_couple:
new_won = current_frequency_for_couple['won'] + (result['won'] * step)
new_lost = current_frequency_for_couple['lost'] + (result['lost'] * step)
# ensure to have always positive frequencies
values_to_update[current_frequency_for_couple['frequency_id']] = {
'won_count': new_won if new_won > 0 else 0.1,
'lost_count': new_lost if new_lost > 0 else 0.1
}
continue
# Else, CREATE a new frequency record.
# We add + 0.1 in won and lost counts to avoid zero frequency issues
# should be +1 but it weights too much on small recordset.
values_to_create.append({
'variable': field,
'value': param,
'won_count': result['won'] + 0.1,
'lost_count': result['lost'] + 0.1,
'team_id': team_id if team_id else None # team_id = 0 means no team_id
})
LeadScoringFrequency = self.env['crm.lead.scoring.frequency'].sudo()
for frequency_id, values in values_to_update.items():
LeadScoringFrequency.browse(frequency_id).write(values)
if values_to_create:
LeadScoringFrequency.create(values_to_create)
# ---------------------------------
# Utility Tools for PLS
# ---------------------------------
# PLS: Config Parameters
# ---------------------
def _pls_get_safe_start_date(self):
""" As config_parameters does not accept Date field,
we get directly the date formated string stored into the Char config field,
as we directly use this string in the sql queries.
To avoid sql injections when using this config param,
we ensure the date string can be effectively a date."""
str_date = self.env['ir.config_parameter'].sudo().get_param('crm.pls_start_date')
if not fields.Date.to_date(str_date):
return False
return str_date
def _pls_get_safe_fields(self):
""" As config_parameters does not accept M2M field,
we the fields from the formated string stored into the Char config field.
To avoid sql injections when using that list, we return only the fields
that are defined on the model. """
pls_fields_config = self.env['ir.config_parameter'].sudo().get_param('crm.pls_fields')
pls_fields = pls_fields_config.split(',') if pls_fields_config else []
pls_safe_fields = [field for field in pls_fields if field in self._fields.keys()]
return pls_safe_fields
# Compute Automated Probability Tools
# -----------------------------------
def _pls_get_won_lost_total_count(self, team_results):
""" Get all won and all lost + total :
first stage can be used to know how many lost and won there is
as won count are equals for all stage
and first stage is always incremented in lost_count
:param frequencies: lead_scoring_frequencies
:return: won count, lost count and total count for all records in frequencies
"""
# TODO : check if we need to handle specific team_id stages [for lost count] (if first stage in sequence is team_specific)
first_stage_id = self.env['crm.stage'].search([('team_id', '=', False)], order='sequence', limit=1)
if str(first_stage_id.id) not in team_results.get('stage_id', []):
return 0, 0, 0
stage_result = team_results['stage_id'][str(first_stage_id.id)]
return stage_result['won'], stage_result['lost'], stage_result['won'] + stage_result['lost']
# PLS: Rebuild Frequency Table Tools
# ----------------------------------
def _pls_prepare_frequencies(self, lead_values, leads_pls_fields, target_state=None):
"""new state is used when getting frequencies for leads that are changing to lost or won.
Stays none if we are checking frequencies for leads already won or lost."""
# Frequencies must include tag_id
pls_fields = set(leads_pls_fields + ['tag_id'])
frequencies = dict((field, {}) for field in pls_fields)
stage_ids = self.env['crm.stage'].search_read([], ['sequence', 'name', 'id'], order='sequence')
stage_sequences = {stage['id']: stage['sequence'] for stage in stage_ids}
# Increment won / lost frequencies by criteria (field / value couple)
for values in lead_values:
if target_state: # ignore probability values if target state (as probability is the old value)
won_count = values['count'] if target_state == 'won' else 0
lost_count = values['count'] if target_state == 'lost' else 0
else:
won_count = values['count'] if values.get('probability', 0) == 100 else 0
lost_count = values['count'] if values.get('probability', 1) == 0 else 0
if 'tag_id' in values:
frequencies = self._pls_increment_frequency_dict(frequencies, 'tag_id', values['tag_id'], won_count, lost_count)
continue
# Else, treat other fields
if 'tag_id' in pls_fields: # tag_id already treated here above.
pls_fields.remove('tag_id')
for field in pls_fields:
if field not in values:
continue
value = values[field]
if value or field in ('email_state', 'phone_state'):
if field == 'stage_id':
if won_count: # increment all stages if won
stages_to_increment = [stage['id'] for stage in stage_ids]
else: # increment only current + previous stages if lost
current_stage_sequence = stage_sequences[value]
stages_to_increment = [stage['id'] for stage in stage_ids if stage['sequence'] <= current_stage_sequence]
for stage_id in stages_to_increment:
frequencies = self._pls_increment_frequency_dict(frequencies, field, stage_id, won_count, lost_count)
else:
frequencies = self._pls_increment_frequency_dict(frequencies, field, value, won_count, lost_count)
return frequencies
def _pls_increment_frequency_dict(self, frequencies, field, value, won, lost):
value = str(value) # Ensure we will always compare strings.
if value not in frequencies[field]:
frequencies[field][value] = {'won': won, 'lost': lost}
else:
frequencies[field][value]['won'] += won
frequencies[field][value]['lost'] += lost
return frequencies
# Common PLS Tools
# ----------------
def _pls_get_lead_pls_values(self, domain=[]):
"""
This methods builds a dict where, for each lead in self or matching the given domain,
we will get a list of field/value couple.
Due to onchange and create, we don't always have the id of the lead to recompute.
When we update few records (one, typically) with onchanges, we build the lead_values (= couple field/value)
using the ORM.
To speed up the computation and avoid making too much DB read inside loops,
we can give a domain to make sql queries to bypass the ORM.
This domain will be used in sql queries to get the values for every lead matching the domain.
:param domain: If set, we get all the leads values via unique sql queries (one for tags, one for other fields),
using the given domain on leads.
If not set, get lead values lead by lead using the ORM.
:return: {lead_id: [(field1: value1), (field2: value2), ...], ...}
"""
leads_values_dict = OrderedDict()
pls_fields = ["stage_id", "team_id"] + self._pls_get_safe_fields()
if domain:
# active_test = False as domain should take active into 'active' field it self
from_clause, where_clause, where_params = self.env['crm.lead'].with_context(active_test=False)._where_calc(domain).get_sql()
str_fields = ", ".join(["{}"] * len(pls_fields))
args = [sql.Identifier(field) for field in pls_fields]
# Get leads values
self.flush(['probability'])
query = """SELECT id, probability, %s
FROM %s
WHERE %s order by team_id asc"""
query = sql.SQL(query % (str_fields, from_clause, where_clause)).format(*args)
self._cr.execute(query, where_params)
lead_results = self._cr.dictfetchall()
# Get tags values
query = """SELECT crm_lead.id as lead_id, t.id as tag_id
FROM %s
LEFT JOIN crm_tag_rel rel ON crm_lead.id = rel.lead_id
LEFT JOIN crm_tag t ON rel.tag_id = t.id
WHERE %s order by crm_lead.team_id asc"""
query = sql.SQL(query % (from_clause, where_clause)).format(*args)
self._cr.execute(query, where_params)
tag_results = self._cr.dictfetchall()
# get all (variable, value) couple for all in self
for lead in lead_results:
lead_values = []
for field in pls_fields + ['probability']: # add probability as used in _pls_prepare_frequencies (needed in rebuild mode)
value = lead[field]
if field == 'team_id': # ignore team_id as stored separately in leads_values_dict[lead_id][team_id]
continue
if value or field == 'probability': # 0 is a correct value for probability
lead_values.append((field, value))
elif field in ('email_state', 'phone_state'): # As ORM reads 'None' as 'False', do the same here
lead_values.append((field, False))
leads_values_dict[lead['id']] = {'values': lead_values, 'team_id': lead['team_id'] or 0}
for tag in tag_results:
if tag['tag_id']:
leads_values_dict[tag['lead_id']]['values'].append(('tag_id', tag['tag_id']))
return leads_values_dict
else:
for lead in self:
lead_values = []
for field in pls_fields:
if field == 'team_id': # ignore team_id as stored separately in leads_values_dict[lead_id][team_id]
continue
value = lead[field].id if isinstance(lead[field], models.BaseModel) else lead[field]
if value or field in ('email_state', 'phone_state'):
lead_values.append((field, value))
for tag in lead.tag_ids:
lead_values.append(('tag_id', tag.id))
leads_values_dict[lead.id] = {'values': lead_values, 'team_id': lead['team_id'].id}
return leads_values_dict
|
nilq/baby-python
|
python
|
import sys
from schemas.input_conf import personal_info
from settings.base_conf import KOBO_PERSONAL_INFO_CSV_MAP
'''
json_structure - the json attributes that are to be extracted from the source json
mapping_format - see oldcuris_elastic_map for an example. import it here
input_format - default input of source json
final_format - final input structure. with other fields other than input format
source - source database
destination - destination database
'''
personal_informations = {
"json_structure": [],
"mapping_file": KOBO_PERSONAL_INFO_CSV_MAP,
"source": "kobo",
"destination": "couchbase"
}
|
nilq/baby-python
|
python
|
""" Test Metadata Tool """
from __future__ import unicode_literals, absolute_import
from tmt.base import Tree
__all__ = ["Tree"]
|
nilq/baby-python
|
python
|
import os
import matplotlib.pyplot as plt
from typing import List, Union, Tuple, Dict
import torch
import pickle
current_dir = os.path.dirname(os.path.realpath(__file__))
CATEGORY = List[Union[int, float]]
RUN_STATS = Dict[str, Union[int, float]]
def plot_score_and_acc_over_docs(
dir_name: str,
stats: List[Tuple[str, RUN_STATS]],
per_docs: int = 5
) -> None:
if not os.path.exists(current_dir + "/plots/" + dir_name):
os.makedirs(current_dir + "/plots/" + dir_name)
averages = calculate_averages(stats, per_docs)
num_docs = [count for count in range(per_docs, len(stats[0][1]['ksmr']) + 1, per_docs)]
bleu_improvement_avg = calculate_score_improvement_averages(averages['orig_nmt_out_bleu'],
averages['post_feedback_bleu'])
chrf_improvement_avg = calculate_score_improvement_averages(averages['orig_nmt_out_chrf'],
averages['post_feedback_chrf'])
save_plot_image(num_docs, averages['ksmr'], 'KSMR', dir_name)
save_plot_image(num_docs, averages['orig_nmt_out_bleu'], 'Original BLEU', dir_name)
save_plot_image(num_docs, averages['orig_nmt_out_chrf'], 'Original ChrF', dir_name)
save_plot_image(num_docs, averages['post_feedback_bleu'], 'Post Feedback BLEU', dir_name)
save_plot_image(num_docs, averages['post_feedback_chrf'], 'Post Feedback ChrF', dir_name)
save_plot_image(num_docs, averages['percent_sent_requested'], 'Percent Sents Requested', dir_name)
save_plot_image(num_docs, bleu_improvement_avg, 'Bleu Improvement', dir_name)
save_plot_image(num_docs, chrf_improvement_avg, 'ChrF Improvement', dir_name)
save_plot_map_ksmr_against_score_improvement(averages['ksmr'], bleu_improvement_avg, dir_name, 'BLEU')
save_plot_map_ksmr_against_score_improvement(averages['ksmr'], chrf_improvement_avg, dir_name, 'ChrF')
def save_plot_image(
num_docs: List[int],
averages: List[Tuple[str, CATEGORY]],
title: str,
folder_name: str
) -> None:
for run in averages:
plt.plot(num_docs, run[1], "--", label=run[0])
plt.title('{} Averages'.format(title))
plt.xlabel('Num Docs')
plt.ylabel(title)
plt.legend()
plt.savefig(current_dir + '/plots/{}/{}.png'.format(folder_name, title))
plt.close()
def calculate_averages(
stats: List[RUN_STATS],
per_docs: int,
) -> Dict[str, Union[List[int], List[float]]]:
categories = ['ksmr', 'post_feedback_bleu', 'post_feedback_chrf', 'percent_sent_requested',
'orig_nmt_out_bleu', 'orig_nmt_out_chrf']
averages = {cat: [] for cat in categories}
for category in categories:
for run in stats:
avgs = calculate_time_step_averages(run[1][category], per_docs)
averages[category].append((run[0], avgs))
return averages
def calculate_time_step_averages(
scores: CATEGORY,
per_docs: int
) -> Union[List[int], List[float]]:
"""
Calculate the running average at each time step
"""
chunk_indexes = [i for i in range(per_docs, len(scores) + 1, per_docs)]
averages = []
for i, count in enumerate(chunk_indexes):
starting_i = 0 if i == 0 else chunk_indexes[i - 1]
docs = scores[starting_i: count]
average = sum(docs) / per_docs
averages.append(average)
return averages
def calculate_score_improvement_averages(
original_score_avgs: List[Tuple[str, List[float]]],
post_feedback_score_avgs: List[Tuple[str, List[float]]],
) -> List[Tuple[str, List[float]]]:
run_improvement_avgs = []
for i in range(len(original_score_avgs)):
assert original_score_avgs[i][0] == post_feedback_score_avgs[i][0]
improve_avgs = [post_feedback_ave - orig_avg
for post_feedback_ave, orig_avg
in zip(post_feedback_score_avgs[i][1], original_score_avgs[i][1])]
run_improvement_avgs.append((original_score_avgs[i][0], improve_avgs))
return run_improvement_avgs
def save_plot_map_ksmr_against_score_improvement(
ksmr_scores: List[Tuple[str, List[int]]],
eval_improvement_scores: List[Tuple[str, List[int]]],
dir_name: str,
title: str
):
for i, run in enumerate(ksmr_scores):
ksmr_values, scores = zip(*sorted(zip(run[1], eval_improvement_scores[i][1])))
plt.plot(ksmr_values, scores, "o--", label=run[0])
plt.title('{} Improvement Across KSMR'.format(title))
plt.xlabel('KSMR (human effort)')
plt.ylabel(title)
plt.legend()
plt.savefig(current_dir + '/plots/{}/{} Improvement v KSMR.png'.format(dir_name, title))
plt.close()
if __name__ == "__main__":
files = [
("Policy 1", current_dir + "/scores_pol_1.p"),
("Policy 2", current_dir + "/scores_pol_2.p"),
("Online", current_dir + "/scores_pol_2_online.p"),
("Learned Sampling AL", current_dir + "/scores_pol_2_learned_AL.p"),
("AL", current_dir + "/scores_pol_2_AL.p")
]
run_stats = []
for run in files:
with open(run[1], "rb") as f:
stats = pickle.load(f)
run_stats.append((run[0], stats))
plot_score_and_acc_over_docs('run_0', run_stats)
|
nilq/baby-python
|
python
|
from molsysmt._private_tools.exceptions import *
from molsysmt.forms.common_gets import *
import numpy as np
from molsysmt.molecular_system import molecular_system_components
from molsysmt._private_tools.files_and_directories import tmp_filename
form_name='file:dcd'
is_form = {
'file:dcd':form_name
}
info=["",""]
has = molecular_system_components.copy()
for ii in ['coordinates', 'box']:
has[ii]=True
def to_file_dcd(item, molecular_system=None, atom_indices='all', frame_indices='all', output_filename=None, copy_if_all=True):
tmp_molecular_system = None
if (atom_indices is 'all') and (frame_indices is 'all'):
if copy_if_all:
tmp_item = extract_item(item, output_filename=output_filename)
if molecular_system is not None:
tmp_molecular_system = molecular_system.combine_with_items(tmp_item)
else:
tmp_item = item
if molecular_system is not None:
tmp_molecular_system = molecular_system
else:
tmp_item = extract_item(item, atom_indices=atom_indices, frame_indices=frame_indices, output_filename=output_filename)
if molecular_system is not None:
tmp_molecular_system = molecular_system.combine_with_items(tmp_item, atom_indices=atom_indices, frame_indices=frame_indices)
return tmp_item, tmp_molecular_system
def extract_item(item, atom_indices='all', frame_indices='all', output_filename=None):
if output_filename is None:
output_filename = tmp_filename(extension='dcd')
if (atom_indices is 'all') and (frame_indices is 'all'):
raise NotImplementedError()
else:
raise NotImplementedError()
return tmp_item
def add(item, from_item, atom_indices='all', frame_indices='all'):
raise NotImplementedError()
def append_frames(item, step=None, time=None, coordinates=None, box=None):
raise NotImplementedError()
###### Get
## system
|
nilq/baby-python
|
python
|
import mongolib
class a():
def aa(self):
a=mongolib.mongodb()
a.log_collect(msg='1gaejiusfuadaifuagusuifhiau afdu gaudf uisg uagsi gaug asyaigasydg aug iug ')
a.log_collect(msg='2')
a.log_input()
a.log_output()
aaaa=a()
aaaa.aa()
|
nilq/baby-python
|
python
|
import inspect
import operator
import re
from datetime import datetime
from decimal import Decimal
from enum import Enum
from functools import reduce
import pymongo
from bson import ObjectId
from pymongo.collection import Collection, ReturnDocument
from pymongo.errors import CollectionInvalid
from appkernel.configuration import config
from appkernel.util import OBJ_PREFIX
from .model import Model, Expression, AppKernelException, SortOrder, Property, Index, TextIndex, UniqueIndex, \
CustomProperty
def xtract(clazz_or_instance):
"""
Extract class name from class, removing the Service/Controller/Resource ending and adding a plural -s or -ies.
:param clazz_or_instance: the class object
:return: the name of the desired collection
"""
clazz_name = clazz_or_instance.__name__ if inspect.isclass(
clazz_or_instance) else clazz_or_instance.__class__.__name__
name = re.split('Service|Controller|Resource', clazz_name)[0]
if name[-2:] in ['sh', 'ch'] or name[-1:] in ['s', 'x', 'z']:
name = f'{name}es'
elif name[-1:] == 'y' and (name[-2:-1] in ["a", "e", "i", "o", "u"] or name[-3:-2] == 'qu'):
name = f'{name[-1:]}ies'
else:
name = f'{name}s'
return name
class Query(object):
"""a class representing the query"""
def __init__(self, *expressions):
self.filter_expr = {}
self.sorting_expr = {}
self.__prep_expressions(*expressions)
def __prep_expressions(self, *expressions):
if not expressions:
return
where = reduce(operator.and_, expressions)
if isinstance(where, Expression):
if isinstance(where.lhs, (Property, CustomProperty)):
if where.lhs.backreference.within_an_array:
# this query is part of an array
self.filter_expr[str(where.lhs.backreference.array_parameter_name)] = where.ops.lmbda(
(where.lhs.backreference.parameter_name, Query.__extract_rhs(where.rhs)))
else:
# its only parameter to parameter comparison
self.filter_expr[str(where.lhs.backreference.parameter_name)] = where.ops.lmbda(
Query.__extract_rhs(where.rhs))
elif isinstance(where.lhs, Expression) and isinstance(where.rhs, Expression):
# two expressions are compared to each other
exprs = []
exprs.extend(self.__xtract_expression(where))
self.filter_expr[str(where.ops)] = [expression for expression in exprs]
def __xtract_expression(self, expression: Expression):
ret_val = []
if isinstance(expression.lhs, Expression):
ret_val.extend(self.__xtract_expression(expression.lhs))
if isinstance(expression.rhs, Expression):
ret_val.extend(self.__xtract_expression(expression.rhs))
if isinstance(expression.lhs, Property):
ret_val.append({
expression.lhs.backreference.parameter_name:
expression.ops.lmbda(Query.__extract_rhs(expression.rhs))
})
if isinstance(expression.rhs, Property):
ret_val.append({expression.lhs.backreference.parameter_name:
expression.ops.lmbda(Query.__extract_rhs(expression.rhs))})
return ret_val
@staticmethod
def __extract_rhs(right_hand_side):
if isinstance(right_hand_side, Property):
return right_hand_side.backreference.parameter_name
elif isinstance(right_hand_side, Enum):
return right_hand_side.name
else:
return right_hand_side
def sort_by(self, *sorting_tuples):
"""
Defines sorting criteria (eg. .sort_by(User.name.desc())
:param sorting_tuples: desc() or asc() on the Model parameter
:return: self for calling further methods on the class
:rtype: Query
"""
self.sorting_expr = list(sorting_tuples)
return self
def find(self):
"""
Creates a cursor based on the filter and sorting criteria and yields the results;
:return: a generator object which yields found instances of Model class
"""
raise NotImplementedError('abstract method')
def find_one(self):
"""
:return: One or none instances of the Model, depending on the query criteria
"""
raise NotImplementedError('abstract method')
def count(self):
"""
:return: the number of items in the repository matching the filter expression;
"""
raise NotImplementedError('abstract method')
def delete(self):
"""
Delete all elements which fulfill the filter criteria (defined in the where method);
:return: the deleted item count
"""
raise NotImplementedError('abstract method')
def get(self, page=0, page_size=100):
"""
Returns the list of found Model instances;
:param page: the current page requested
:param page_size: the size of the page (number of elements requested
:return: the result of the query as a list of Model instance objects
"""
raise NotImplementedError('abstract method')
def mongo_type_converter_to_dict(value: any) -> any:
if isinstance(value, Decimal):
return float(value)
else:
return value
def mongo_type_converter_from_dict(value: any) -> any:
return value
class MongoQuery(Query):
def __init__(self, connection_object: pymongo.collection.Collection, user_class, *expressions):
super().__init__(*expressions)
self.connection: pymongo.collection.Collection = connection_object
self.user_class = user_class
def find(self, page: int = 0, page_size: int = 100) -> Model:
"""
Returns a generator for the number of pages
:param page: current page
:param page_size: number of elements
:return: a generator which can be used in an iteration
"""
if len(self.sorting_expr) == 0:
cursor = self.connection.find(self.filter_expr).skip(page * page_size).limit(page_size)
else:
cursor = self.connection.find(self.filter_expr).sort(self.sorting_expr).skip(page * page_size).limit(
page_size)
if cursor:
for item in cursor:
yield Model.from_dict(item, self.user_class, convert_ids=True,
converter_func=mongo_type_converter_from_dict)
def get(self, page: int = 0, page_size: int = 100) -> list:
"""
Return the complete list of all items corresponding to the query
:param page: current page
:param page_size: the number of elements
:return: a list of all items corresponding the query
"""
return [item for item in self.find(page=page, page_size=page_size)]
def find_one(self):
"""
:return: one instance of the Model or None
:rtype: Model
"""
hit = self.connection.find_one(self.filter_expr)
return Model.from_dict(hit, self.user_class, convert_ids=True,
converter_func=mongo_type_converter_from_dict) if hit else None
def delete(self) -> int:
"""
:return: the delete count
"""
return self.connection.delete_many(self.filter_expr).deleted_count
def count(self) -> int:
return self.connection.count(self.filter_expr)
def __get_update_expression(self, **update_expression):
update_dict = dict()
for key, exp in update_expression.items():
opname = str(exp.ops)
op_expr = update_dict.get(opname, {})
op_expr[key] = exp.ops.lmbda(exp.rhs)
update_dict[opname] = op_expr
return update_dict
def find_one_and_update(self, **update_expression):
upd = self.__get_update_expression(**update_expression)
hit = self.connection.find_one_and_update(self.filter_expr, upd, return_document=ReturnDocument.AFTER)
return Model.from_dict(hit, self.user_class, convert_ids=True,
converter_func=mongo_type_converter_from_dict) if hit else None
def update_one(self, **update_expression) -> int:
upd = self.__get_update_expression(**update_expression)
update_result = self.connection.update_one(self.filter_expr, upd, upsert=False)
return update_result.modified_count
def update_many(self, **update_expression) -> int:
upd = self.__get_update_expression(**update_expression)
update_result = self.connection.update_many(self.filter_expr, upd, upsert=False)
return update_result.modified_count
class RepositoryException(AppKernelException):
def __init__(self, message):
super().__init__(message)
class Repository(object):
@classmethod
def find_by_id(cls, object_id):
"""
Find an object identified by the unique database id
:param object_id: the database id
:return:
"""
raise NotImplementedError('abstract method')
@classmethod
def delete_by_id(cls, object_id):
"""
Delete the object identified by ID
:param object_id: the unique object ID
:return:
"""
raise NotImplementedError('abstract method')
@classmethod
def create_object(cls, document):
"""
Insert a new object in the database
:param document:
:return:
"""
raise NotImplementedError('abstract method')
@classmethod
def replace_object(cls, object_id, document):
"""
Replace the object in the database.
:param object_id:
:param document:
:return:
"""
raise NotImplementedError('abstract method')
@classmethod
def patch_object(cls, document, object_id=None):
raise NotImplementedError('abstract method')
@classmethod
def save_object(cls, document, object_id=None):
raise NotImplementedError('abstract method')
@classmethod
def find(cls, *expressions):
"""
:param expressions:
:type expressions: Expression
:return: a Model Generator
"""
raise NotImplementedError('abstract method')
@classmethod
def find_one(cls, *expressions):
"""
Returns one single instance of the Model.
:param expressions:
:type expressions: Expression
:return: one Model object
:rtype: Model
"""
raise NotImplementedError('abstract method')
@classmethod
def where(cls, *expressions):
"""
Creates and returns a query object, used for further chaining functions like sorting and pagination;
:param expressions: the query filter expressions used to narrow the result-set
:return: a query object preconfigured with the
:rtype: Query
"""
raise NotImplementedError('abstract method')
@classmethod
def find_by_query(cls, query={}, page=1, page_size=50, sort_by=None, sort_order=SortOrder.ASC):
"""
:param query:
:type query: dict
:param page:
:type page: int
:param page_size:
:type page_size: int
:param sort_by:
:param sort_order:
:return:
"""
raise NotImplementedError('abstract method')
@classmethod
def create_cursor_by_query(cls, query):
raise NotImplementedError('abstract method')
@classmethod
def update_many(cls, match_query_dict, update_expression_dict):
"""
:param match_query_dict:
:param update_expression_dict:
:return:
"""
raise NotImplementedError('abstract method')
@classmethod
def delete_many(cls, match_query_dict):
"""
:param match_query_dict:
:return:
"""
raise NotImplementedError('abstract method')
@classmethod
def delete_all(cls):
"""
:return:
"""
raise NotImplementedError('abstract method')
@classmethod
def count(cls, query_filter={}):
"""
Return the number of items matching the query filter
:param query_filter: the raw query type as a dict (using the mongo syntax)
:type query_filter: dict
:return:
"""
raise NotImplementedError('abstract method')
def save(self):
"""
Saves or updates a model instance in the database
:return: the id of the inserted or updated document
"""
raise NotImplementedError('abstract method')
def delete(self):
"""
Delete the current instance.
:raises RepositoryException: in case the instance was not deleted.
"""
raise NotImplementedError('abstract method')
class MongoRepository(Repository):
@classmethod
def init_indexes(cls):
if issubclass(cls, Model):
index_factories = {
Index: MongoRepository.create_index,
TextIndex: MongoRepository.create_text_index,
UniqueIndex: MongoRepository.create_unique_index
}
for key, value in cls.__dict__.items():
if isinstance(value, Property):
if value.index:
fct = index_factories.get(value.index, MongoRepository.not_supported)
fct(cls.get_collection(), key,
value.index.sort_order if hasattr(value.index, 'sort_order') else SortOrder.ASC)
@staticmethod
def version_check(required_version_tuple):
server_info = config.mongo_database.client.server_info()
current_version = tuple(int(i) for i in server_info['version'].split('.'))
if current_version < required_version_tuple:
raise AppKernelException(
'This feature requires a min version of: {}'.format('.'.join(required_version_tuple)))
@classmethod
def add_schema_validation(cls, validation_action='warn'):
"""
:param validation_action: warn or error (MongoDB logs any violations but allows the insertion or update to proceed)
:return:
"""
MongoRepository.version_check(tuple([3, 6, 0]))
try:
config.mongo_database.create_collection(xtract(cls))
except CollectionInvalid:
# schema not found
pass
config.mongo_database.command(
'collMod', xtract(cls),
validator={'$jsonSchema': cls.get_json_schema(mongo_compatibility=True)},
validationLevel='moderate',
validationAction=validation_action
)
@staticmethod
def create_index(collection, field_name, sort_order, unique=False):
# type: (pymongo.collection.Collection, str, SortOrder, bool) -> ()
"""
Args:
collection(pymongo.collection.Collection): the collection to which the index is applied to
field_name(str): the name of the document field which is being indexed
sort_order(SortOrder): the sort order
unique(bool): if true (false by default) it will create a unique index
"""
if field_name not in collection.index_information():
if isinstance(sort_order, SortOrder):
direction = pymongo.ASCENDING if sort_order == SortOrder.ASC else pymongo.DESCENDING
else:
direction = sort_order
collection.create_index(
[(field_name, direction)],
unique=unique, background=True, name='{}_idx'.format(field_name))
@staticmethod
def create_text_index(collection, field_name, *args):
# type: (pymongo.collection.Collection, str, SortOrder, bool) -> ()
MongoRepository.create_index(collection, field_name, pymongo.TEXT)
@staticmethod
def create_unique_index(collection, field_name, sort_order):
MongoRepository.create_index(collection, field_name, sort_order, unique=True)
@staticmethod
def not_supported(*args):
pass
@classmethod
def get_collection(cls) -> pymongo.collection.Collection:
"""
:return: the collection for this model object
:rtype: Collection
"""
db = config.mongo_database
if db is not None:
return db.get_collection(xtract(cls))
else:
raise AppKernelException('The database engine is not set')
@classmethod
def find_by_id(cls, object_id):
assert object_id, 'the id of the lookup object must be provided'
if isinstance(object_id, str) and object_id.startswith(OBJ_PREFIX):
object_id = ObjectId(object_id.split(OBJ_PREFIX)[1])
document_dict = cls.get_collection().find_one({'_id': object_id})
return Model.from_dict(document_dict, cls, convert_ids=True,
converter_func=mongo_type_converter_from_dict) if document_dict else None
@classmethod
def delete_by_id(cls, object_id):
"""
Deletes a document identified by the object id
:param object_id:
:return: true if the object was deleted
"""
delete_result = cls.get_collection().delete_one({'_id': object_id})
return delete_result.deleted_count
@staticmethod
def prepare_document(document, object_id=None):
if isinstance(document, Model):
document_id = document.id
has_id = document_id is not None
document = Model.to_dict(document, convert_id=True, converter_func=mongo_type_converter_to_dict)
elif not isinstance(document, dict):
raise RepositoryException('Only dictionary or Model is accepted.')
else:
document_id = object_id or document.get('id') or document.get('_id')
has_id = document_id is not None
return has_id, document_id, document
@classmethod
def patch_object(cls, document, object_id=None):
return cls.__save_or_update_dict(document, object_id=object_id, insert_if_none_found=False)
@classmethod
def __save_or_update_dict(cls, document, object_id=None, insert_if_none_found: bool = True):
has_id, document_id, document = MongoRepository.prepare_document(document, object_id)
if has_id:
update_result = cls.get_collection().update_one({'_id': document_id}, {'$set': document},
upsert=insert_if_none_found)
db_id = update_result.upserted_id or (document_id if update_result.matched_count > 0 else None)
else:
insert_result = cls.get_collection().insert_one(document)
db_id = insert_result.inserted_id # pylint: disable=C0103
return db_id
@classmethod
def save_object(cls, model: Model, object_id: str = None, insert_if_none_found: bool = True) -> object:
assert model, 'the object must be handed over as a parameter'
assert isinstance(model, Model), 'the object should be a Model'
document = Model.to_dict(model, convert_id=True, converter_func=mongo_type_converter_to_dict)
model.id = cls.__save_or_update_dict(document=document, object_id=object_id)
return model.id
@classmethod
def replace_object(cls, model: Model):
assert model, 'the document must be provided before replacing'
document = Model.to_dict(model, convert_id=True, converter_func=mongo_type_converter_to_dict)
has_id, document_id, document = MongoRepository.prepare_document(document, None)
update_result = cls.get_collection().replace_one({'_id': document_id}, document, upsert=False)
return (update_result.upserted_id or document_id) if update_result.matched_count > 0 else None
@classmethod
def bulk_insert(cls, list_of_model_instances):
return cls.get_collection().insert_many(
[Model.to_dict(model, convert_id=True, converter_func=mongo_type_converter_to_dict) for model in
list_of_model_instances]).inserted_ids
@classmethod
def find(cls, *expressions):
return MongoQuery(cls.get_collection(), cls, *expressions).find()
@classmethod
def find_one(cls, *expressions):
return MongoQuery(cls.get_collection(), cls, *expressions).find_one()
@classmethod
def where(cls, *expressions) -> MongoQuery:
"""
Creates and returns a query object, used for further chaining functions like sorting and pagination;
:param expressions: the query filter expressions used to narrow the result-set
:return: a query object precofigured with the
:rtype: MongoQuery
"""
return MongoQuery(cls.get_collection(), cls, *expressions)
@classmethod
def find_by_query(cls, query={}, page=1, page_size=50, sort_by=None, sort_order=SortOrder.ASC):
"""
query using mongo's built-in query language
:param sort_order:
:param sort_by:
:param page_size:
:param page:
:param query: the query expression as a dictionary
:return: a generator with the query results
"""
cursor = cls.get_collection().find(query).skip((page - 1) * page_size).limit(page_size)
if sort_by:
py_direction = pymongo.ASCENDING if sort_order == SortOrder.ASC else pymongo.DESCENDING
cursor.sort(sort_by, direction=py_direction)
return [Model.from_dict(result, cls, convert_ids=True, converter_func=mongo_type_converter_from_dict) for result
in cursor]
@classmethod
def create_cursor_by_query(cls, query):
cursor = cls.get_collection().find(query)
return (Model.from_dict(result, cls, convert_ids=True, converter_func=mongo_type_converter_from_dict) for result
in cursor)
@classmethod
def update_many(cls, match_query_dict, update_expression_dict):
"""
updates multiple documents in the database
:param match_query_dict: the query expression to match the documents to be updated
:param update_expression_dict:
:return: the number of modified documents
"""
update_result = cls.get_collection().update_many(match_query_dict, update_expression_dict)
return update_result.modified_count
@classmethod
def delete_many(cls, match_query_dict):
return cls.get_collection().delete_many(match_query_dict).deleted_count
@classmethod
def delete_all(cls):
"""
deletes all documents from the collection
:return: the count of deleted documents
"""
return cls.get_collection().delete_many({}).deleted_count
@classmethod
def count(cls, query_filter={}):
return cls.get_collection().count(query_filter)
@classmethod
def aggregate(cls, pipe=[], allow_disk_use=True, batch_size=100):
cursor = cls.get_collection().aggregate(pipe, allowDiskUse=allow_disk_use, batchSize=batch_size)
return [result for result in cursor]
def save(self):
self.id = self.__class__.save_object(self) # pylint: disable=C0103
return self.id
def delete(self):
assert self.id is not None
deleted_count = self.get_collection().delete_one({'_id': self.id}).deleted_count
if deleted_count != 1:
raise RepositoryException("the instance couldn't be deleted")
class AuditableRepository(MongoRepository):
def __init__(self, **kwargs):
super(AuditableRepository, self).__init__()
@classmethod
def save_object(cls, model: Model, object_id=None):
document = Model.to_dict(model, convert_id=True, converter_func=mongo_type_converter_to_dict)
has_id, doc_id, document = MongoRepository.prepare_document(document, object_id)
now = datetime.now()
document.update(updated=now)
if has_id:
# it is an update or a first insert with generated ID
if 'version' in document:
del document['version']
if 'inserted' in document:
del document['inserted']
upsert_expression = {
'$set': document,
'$setOnInsert': {'inserted': now},
'$inc': {'version': 1}
}
update_result = cls.get_collection().update_one({'_id': doc_id}, upsert_expression, upsert=True)
db_id = update_result.upserted_id or doc_id
else:
# it is an insert for sure, we initialise the audit fields
document.update(inserted=now, version=1)
insert_result = cls.get_collection().insert_one(document)
db_id = insert_result.inserted_id
model.id = db_id
return model.id
def save(self):
self.__class__.save_object(self)
return self.id
|
nilq/baby-python
|
python
|
# Generated by Django 3.0.11 on 2021-01-22 10:13
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('cars', '0001_initial'),
('users', '0002_auto_20210122_0713'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='BankAccount',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('bank', models.CharField(max_length=32)),
('agency', models.CharField(max_length=16)),
('balance', models.FloatField(default=0)),
],
),
migrations.CreateModel(
name='Sale',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Created at')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='Updated')),
('active', models.BooleanField(default=True, verbose_name='Active')),
('value', models.FloatField()),
('car', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='cars.Car')),
('customer', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='users.Customer')),
('seller', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Purchase',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Created at')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='Updated')),
('active', models.BooleanField(default=True, verbose_name='Active')),
('value', models.FloatField()),
('buyer_for', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
('car', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='cars.Car')),
('provider', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='users.Customer')),
],
options={
'abstract': False,
},
),
]
|
nilq/baby-python
|
python
|
import math
N = int(input())
sqN = math.floor(math.sqrt(N))
yaku1 = 1
yaku2 = 1
for i in range(sqN, 0, -1):
if N % i == 0:
yaku1 = i
yaku2 = N // i
break
print(yaku1+yaku2-2)
|
nilq/baby-python
|
python
|
import asyncio
import pytest
import unittest
from unittest.mock import MagicMock, patch
from app import Application
@pytest.mark.asyncio
async def test_func1():
app = Application()
func2_stub = MagicMock(return_value='future result!')
func2_coro = asyncio.coroutine(func2_stub)
async with patch.object(Application, 'func2', return_value=func2_coro) as mock:
res = await app.func1()
print(res)
# mock.assert_awaited_with(app.func3())
|
nilq/baby-python
|
python
|
#先引入后面分析、可视化等可能用到的库
import tushare as ts
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sqlalchemy import create_engine
import psycopg2
#正常显示画图时出现的中文和负号
from pylab import mpl
mpl.rcParams['font.sans-serif']=['SimHei']
mpl.rcParams['axes.unicode_minus']=False
#设置token
token = '7dc39867da616d1570e708a70325d4f51836fdec52cd8c3fc92885b6'
pro = ts.pro_api(token)
#数据获取函数,默认时间可以随时改动
#如果报错,把tushare升级到最新
def get_data(code,start='20190101',end='20190425'):
df=ts.pro_bar(ts_code=code, adj='qfq', start_date=start, end_date=end)
return df
#交易代码获取函数,获取最新交易日的代码
#获取当前交易日最新的股票代码和简称
def get_code():
codes = pro.stock_basic(list_status='L').ts_code.values
return codes
engine = create_engine('postgresql+psycopg2://postgres:123456@localhost:5432/postgres')
def insert_sql(data,db_name,if_exists='append'):
#使用try...except..continue避免出现错误,运行崩溃
try:
data.to_sql(db_name,engine,index=False,if_exists=if_exists)
#print(code+'写入数据库成功')
except:
pass
#下载20190101-20190425数据并插入数据库stock_data
#此步骤比较耗费时间,大致25-35分钟左右
for code in get_code():
data=get_data(code)
insert_sql(data,'stock_data')
#读取整张表数据
df=pd.read_sql('stock_data',engine)
print(len(df))
|
nilq/baby-python
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.