text stringlengths 38 1.54M |
|---|
# -*- coding: utf-8 -*-
import pandas as pd
from flask import Flask, render_template, request
import sys
app = Flask(__name__) # create an app instance
data = pd.read_csv("https://raw.githubusercontent.com/joe608939/test/master/hklit_test.csv")
author_data = pd.read_csv('https://raw.githubusercontent.com/joe608939/test/master/(Draft)%20HKLit%20Author%20list%202019_v.6_20190920.csv')
def generate_statistics(new_df, all_name_of_input_writer):
temp_df = new_df
temp_df.insert(temp_df.shape[1], 'Number of times being metioned in title', '')
temp_df.insert(temp_df.shape[1], 'Number of times being mentioned in the fulltext', '')
temp_df['Number of times being metioned in title'] = temp_df['Number of times being metioned in title'].astype(object)
temp_df['Number of times being mentioned in the fulltext'] = temp_df['Number of times being mentioned in the fulltext'].astype(object)
if temp_df.shape[0] == 0:
return temp_df
else:
df_column = temp_df.keys()
df_2 = pd.DataFrame(columns = df_column)
title_count_match_for_each_name = {}
full_text_count_match_for_each_name = {}
for element in all_name_of_input_writer:
title_count_match_for_each_name[element] = 0
full_text_count_match_for_each_name[element] = 0
for i in range(0, temp_df.shape[0]):
title = temp_df.iloc[i]['title']
full_text = temp_df.iloc[i]['fullText']
if str(title) == 'nan':
title = ' '
if str(full_text) == 'nan':
full_text = ' '
temp_mentioned_in_title = []
temp_mentioned_in_fulltext = []
for element in all_name_of_input_writer:
title_count_match_for_each_name[element] += title.count(element)
full_text_count_match_for_each_name[element] += full_text.count(element)
temp_mentioned_in_title.append(element + ' : ' + str(title.count(element)))
temp_mentioned_in_fulltext.append(element + ' : ' + str(full_text.count(element)))
temp_df['Number of times being metioned in title'][i] = temp_mentioned_in_title
temp_df['Number of times being mentioned in the fulltext'][i] = temp_mentioned_in_fulltext
all_mentioned_in_title = []
all_mentioned_in_fulltext = []
for element in all_name_of_input_writer:
all_mentioned_in_title.append(element + ' : ' + str(title_count_match_for_each_name[element]))
all_mentioned_in_fulltext.append(element + ' : ' + str(full_text_count_match_for_each_name[element]))
df_2 = pd.DataFrame(columns = df_column)
df_2 = df_2.append({'url' : 'total number of articles retrieved' , 'Number of times being metioned in title' : temp_df.shape[0]} , ignore_index=True)
df_2 = df_2.append({'url' : 'total number of mention in title' , 'Number of times being metioned in title' : all_mentioned_in_title} , ignore_index=True)
df_2 = df_2.append({'url' : 'total number of mention in fulltext' , 'Number of times being metioned in title' : all_mentioned_in_fulltext} , ignore_index=True)
df_2 = df_2.fillna(' ')
temp_df = pd.concat([temp_df, df_2])
return temp_df
def generating_combined_result_file(input_get_statistics, search_field, data, name_list):
list_for_name_and_frequency = []
df = pd.DataFrame()
for each_name in name_list:
if search_field == 'author':
temp_df = data[data["creator"].str.contains(each_name, na = False)]
elif search_field == 'content':
temp_df = data[~(data["creator"].str.contains(each_name, na = False)) & ((data["title"].str.contains(each_name, na = False)) | (data["fullText"].str.contains(each_name, na = False)))]
list_for_name_and_frequency.append(each_name + ' : ' + str(temp_df.shape[0]))
df = pd.concat([df,temp_df])
df = df.reset_index()
if input_get_statistics == "T":
df = generate_statistics(df, name_list)
return df
def check_name(name_str):
name_list = []
name_list.append(name_str)
for i in range(0,author_data.shape[0]):
temp_name_list = list(set(author_data.iloc[i]['Be Known as ':]))
temp_name_list = [str(x) for x in temp_name_list if str(x) != 'nan' and not str(x).isspace() and str(x)!='same as column 1']
if name_str in temp_name_list:
name_list = name_list + temp_name_list
name_list = list(set(name_list))
return name_list
def get_result_for_creator_name(data,name):
df = data[data["creator"].str.contains(name, na = False)]
df = df.reset_index()
return df
def get_result_for_content(data,name):
df = data[~(data["creator"].str.contains(name, na = False)) & ((data["title"].str.contains(name, na = False)) | (data["fullText"].str.contains(name, na = False)))]
df = df.reset_index()
return df
@app.route("/",methods = ['GET','POST']) # at the end point /
def hello():
if request.method == 'POST':
name = request.form['author_name']
search_field = request.form['search_field']
get_stat = request.form['get_stat']
result = get_result_for_title(data,name)
return render_template('result.html', tables=[result.to_html(classes='data')], titles=result.columns.values)
# call method hello # which returns "hello world"
return render_template('ask_for_input.html') # call method hello # which returns "hello world"
@app.route("/send",methods = ['GET','POST']) # at the end point /
def print_result():
if request.method == 'POST':
name = request.form['author_name']
search_field = request.form['search_field']
get_stat = request.form['get_stat']
get_name_list = request.form['get_name_list']
separte_name = request.form['separte_name']
if get_name_list =='F':
name_list = []
name_list.append(name)
elif get_name_list == 'T':
name_list = check_name(name)
dataframe_collection = {}
if search_field == 'author':
if separte_name =='T':
for name in name_list:
result = get_result_for_creator_name(data,name)
if get_stat == 'T':
result = generate_statistics(result,name_list)
dataframe_collection[name] = result
elif separte_name =='F':
result = generating_combined_result_file(get_stat, search_field, data, name_list)
dataframe_collection[name] = result
elif search_field == 'content':
if separte_name == 'T':
for name in name_list:
result = get_result_for_content(data,name)
if get_stat == 'T':
result = generate_statistics(result,name_list)
dataframe_collection[name] = result
elif separte_name =='F':
result = generating_combined_result_file(get_stat, search_field, data, name_list)
dataframe_collection[name] = result
return render_template('result.html',dataframe_collection = dataframe_collection)
if __name__ == "__main__": # on running python app.py
app.run() |
import threading
import time
#You need to have a function, threads execute a function
def sleeper(n,name):
print("Hi i am{}. Going to sleep for 5 seconds \n".format(name))
time.sleep(n)
print('{} has woken up from sleep \n '.format(name))
threads_list = [] #holds out threads after we initialize them
start = time.time()
for i in range(5):
t = threading.Thread(target=sleeper,name='thread{}'.format(i), args = (5,'thread{}'.format(i)))
threads_list.append(t)
t.start()
print("{} has started".format(t.name))
for t in threads_list:
t.join() #makes sure the threads all end before the hello statement
end = time.time()
executiontime = (end-start)
print('execution time is: ',executiontime)
print('hello') #these will run even when t is running demonstrating concurrency if not locked
print('hellofriend') |
# uncompyle6 version 3.3.5
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.7.3 (default, Apr 24 2019, 15:29:51) [MSC v.1915 64 bit (AMD64)]
# Embedded file name: c:\Jenkins\live\output\win_64_static\Release\python-bundle\MIDI Remote Scripts\Push2\auto_filter.py
# Compiled at: 2019-04-23 16:19:13
from __future__ import absolute_import, print_function, unicode_literals
from ableton.v2.base import EventObject, listens, liveobj_valid
from ableton.v2.control_surface import LiveObjectDecorator, get_parameter_by_name
from .device_decoration import DeviceSwitchOption
from .device_component import DeviceComponentWithTrackColorViewData
from .visualisation_settings import VisualisationGuides
class AutoFilterDeviceDecorator(EventObject, LiveObjectDecorator):
def __init__(self, *a, **k):
super(AutoFilterDeviceDecorator, self).__init__(*a, **k)
self.__on_parameters_changed.subject = self._live_object
self.slope_option = DeviceSwitchOption(name=b'Slope', parameter=get_parameter_by_name(self, b'Slope'))
self.register_disconnectables(self.options)
@property
def options(self):
return (
self.slope_option,)
@listens(b'parameters')
def __on_parameters_changed(self):
self.slope_option.set_parameter(get_parameter_by_name(self, b'Slope'))
class AutoFilterDeviceComponent(DeviceComponentWithTrackColorViewData):
def _parameter_touched(self, parameter):
self._update_visualisation_view_data(self._filter_visualisation_data())
def _parameter_released(self, parameter):
self._update_visualisation_view_data(self._filter_visualisation_data())
def parameters_changed(self):
self._update_visualisation_view_data(self._filter_visualisation_data())
def _set_bank_index(self, bank):
super(AutoFilterDeviceComponent, self)._set_bank_index(bank)
self._update_visualisation_view_data(self._filter_visualisation_data())
self.notify_visualisation_visible()
self.notify_shrink_parameters()
def _initial_visualisation_view_data(self):
view_data = super(AutoFilterDeviceComponent, self)._initial_visualisation_view_data()
view_data.update(self._filter_visualisation_data())
return view_data
def _filter_visualisation_data(self):
touched_parameters = [ self.parameters[button.index] for button in self.parameter_touch_buttons if button.is_pressed
]
parameter_names = [
b'Filter Type',
b'Filter Type (Legacy)',
b'Frequency',
b'Resonance',
b'Resonance (Legacy)']
filter_focus = any([ parameter.parameter.name in parameter_names for parameter in touched_parameters if liveobj_valid(parameter.parameter)
])
return {b'FilterLeft': VisualisationGuides.light_left_x(0),
b'FilterRight': VisualisationGuides.light_right_x(2),
b'FilterFocus': filter_focus}
@listens(b'oscillator_index')
def __on_selected_oscillator_changed(self):
self._update_visualisation_view_data(self._filter_visualisation_data())
@property
def _visualisation_visible(self):
return self._bank != None and self._bank.index in (0, 1)
@property
def _shrink_parameters(self):
return [ self._visualisation_visible and index < 3 for index in xrange(8) ] |
"""
You shouldn't need to modify this file, but if you are making submodules (nested folders + files) you
will need to include a __init__.py file that has relative import statements.
Example
-------
If you have a submodule folder called /foo, with a module inside called bar.py you will need to include
a __init__.py file in /foo with the following import statement:
```
from .bar import *
```
If you don't do this python won't find the contents of the files inside the submodule folder.
""" |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipayCommerceEcShopCreateResponse(AlipayResponse):
def __init__(self):
super(AlipayCommerceEcShopCreateResponse, self).__init__()
self._ec_shop_id = None
@property
def ec_shop_id(self):
return self._ec_shop_id
@ec_shop_id.setter
def ec_shop_id(self, value):
self._ec_shop_id = value
def parse_response_content(self, response_content):
response = super(AlipayCommerceEcShopCreateResponse, self).parse_response_content(response_content)
if 'ec_shop_id' in response:
self.ec_shop_id = response['ec_shop_id']
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import sys
from collections import namedtuple
from datetime import date, timedelta
from typing import TYPE_CHECKING, Dict, Tuple, Union
import pytest
from airflow import PY38, PY311
from airflow.decorators import setup, task as task_decorator, teardown
from airflow.decorators.base import DecoratedMappedOperator
from airflow.exceptions import AirflowException, XComNotFound
from airflow.models import DAG
from airflow.models.baseoperator import BaseOperator
from airflow.models.expandinput import DictOfListsExpandInput
from airflow.models.mappedoperator import MappedOperator
from airflow.models.taskinstance import TaskInstance
from airflow.models.taskmap import TaskMap
from airflow.models.xcom_arg import PlainXComArg, XComArg
from airflow.utils import timezone
from airflow.utils.state import State
from airflow.utils.task_group import TaskGroup
from airflow.utils.trigger_rule import TriggerRule
from airflow.utils.types import DagRunType
from airflow.utils.xcom import XCOM_RETURN_KEY
from tests.operators.test_python import BasePythonTest
if TYPE_CHECKING:
from airflow.models.dagrun import DagRun
DEFAULT_DATE = timezone.datetime(2016, 1, 1)
class TestAirflowTaskDecorator(BasePythonTest):
default_date = DEFAULT_DATE
def test_python_operator_python_callable_is_callable(self):
"""Tests that @task will only instantiate if
the python_callable argument is callable."""
not_callable = {}
with pytest.raises(TypeError):
task_decorator(not_callable)
@pytest.mark.parametrize(
"resolve",
[
pytest.param(eval, id="eval"),
pytest.param(lambda t: t, id="stringify"),
],
)
@pytest.mark.parametrize(
"annotation",
[
"dict",
pytest.param(
"dict[str, int]",
marks=pytest.mark.skipif(
sys.version_info < (3, 9),
reason="PEP 585 is implemented in Python 3.9",
),
),
"Dict",
"Dict[str, int]",
],
)
def test_infer_multiple_outputs_using_dict_typing(self, resolve, annotation):
@task_decorator
def identity_dict(x: int, y: int) -> resolve(annotation):
return {"x": x, "y": y}
assert identity_dict(5, 5).operator.multiple_outputs is True
# Check invoking ``@task_decorator.__call__()`` yields the correct inference.
@task_decorator()
def identity_dict_with_decorator_call(x: int, y: int) -> resolve(annotation):
return {"x": x, "y": y}
assert identity_dict_with_decorator_call(5, 5).operator.multiple_outputs is True
def test_infer_multiple_outputs_forward_annotation(self):
if TYPE_CHECKING:
class FakeTypeCheckingOnlyClass:
...
class UnresolveableName:
...
@task_decorator
def t1(x: "FakeTypeCheckingOnlyClass", y: int) -> Dict[int, int]: # type: ignore[empty-body]
...
assert t1(5, 5).operator.multiple_outputs is True
@task_decorator
def t2(x: "FakeTypeCheckingOnlyClass", y: int) -> "Dict[int, int]": # type: ignore[empty-body]
...
assert t2(5, 5).operator.multiple_outputs is True
with pytest.warns(UserWarning, match="Cannot infer multiple_outputs.*t3") as recwarn:
@task_decorator
def t3( # type: ignore[empty-body]
x: "FakeTypeCheckingOnlyClass",
y: int,
) -> "UnresolveableName[int, int]":
...
line = sys._getframe().f_lineno - 6 if PY38 else sys._getframe().f_lineno - 3
if PY311:
# extra line explaining the error location in Py311
line = line - 1
warn = recwarn[0]
assert warn.filename == __file__
assert warn.lineno == line
assert t3(5, 5).operator.multiple_outputs is False
def test_infer_multiple_outputs_using_other_typing(self):
@task_decorator
def identity_tuple(x: int, y: int) -> Tuple[int, int]:
return x, y
assert identity_tuple(5, 5).operator.multiple_outputs is False
@task_decorator
def identity_int(x: int) -> int:
return x
assert identity_int(5).operator.multiple_outputs is False
@task_decorator
def identity_notyping(x: int):
return x
assert identity_notyping(5).operator.multiple_outputs is False
# The following cases ensure invoking ``@task_decorator.__call__()`` yields the correct inference.
@task_decorator()
def identity_tuple_with_decorator_call(x: int, y: int) -> Tuple[int, int]:
return x, y
assert identity_tuple_with_decorator_call(5, 5).operator.multiple_outputs is False
@task_decorator()
def identity_int_with_decorator_call(x: int) -> int:
return x
assert identity_int_with_decorator_call(5).operator.multiple_outputs is False
@task_decorator()
def identity_notyping_with_decorator_call(x: int):
return x
assert identity_notyping_with_decorator_call(5).operator.multiple_outputs is False
def test_manual_multiple_outputs_false_with_typings(self):
@task_decorator(multiple_outputs=False)
def identity2(x: int, y: int) -> Tuple[int, int]:
return x, y
with self.dag:
res = identity2(8, 4)
dr = self.create_dag_run()
res.operator.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
ti = dr.get_task_instances()[0]
assert res.operator.multiple_outputs is False
assert ti.xcom_pull() == (8, 4)
assert ti.xcom_pull(key="return_value_0") is None
assert ti.xcom_pull(key="return_value_1") is None
def test_multiple_outputs_ignore_typing(self):
@task_decorator
def identity_tuple(x: int, y: int) -> Tuple[int, int]:
return x, y
with self.dag:
ident = identity_tuple(35, 36)
dr = self.create_dag_run()
ident.operator.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
ti = dr.get_task_instances()[0]
assert not ident.operator.multiple_outputs
assert ti.xcom_pull() == (35, 36)
assert ti.xcom_pull(key="return_value_0") is None
assert ti.xcom_pull(key="return_value_1") is None
def test_fails_bad_signature(self):
"""Tests that @task will fail if signature is not binding."""
@task_decorator
def add_number(num: int) -> int:
return num + 2
with pytest.raises(TypeError):
add_number(2, 3)
with pytest.raises(TypeError):
add_number()
add_number("test")
def test_fail_method(self):
"""Tests that @task will fail if signature is not binding."""
with pytest.raises(TypeError):
class Test:
num = 2
@task_decorator
def add_number(self, num: int) -> int:
return self.num + num
def test_fail_multiple_outputs_key_type(self):
@task_decorator(multiple_outputs=True)
def add_number(num: int):
return {2: num}
with self.dag:
ret = add_number(2)
self.create_dag_run()
with pytest.raises(AirflowException):
ret.operator.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
def test_fail_multiple_outputs_no_dict(self):
@task_decorator(multiple_outputs=True)
def add_number(num: int):
return num
with self.dag:
ret = add_number(2)
self.create_dag_run()
with pytest.raises(AirflowException):
ret.operator.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
def test_multiple_outputs_empty_dict(self):
@task_decorator(multiple_outputs=True)
def empty_dict():
return {}
with self.dag:
ret = empty_dict()
dr = self.create_dag_run()
ret.operator.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
ti = dr.get_task_instances()[0]
assert ti.xcom_pull() == {}
def test_multiple_outputs_return_none(self):
@task_decorator(multiple_outputs=True)
def test_func():
return
with self.dag:
ret = test_func()
dr = self.create_dag_run()
ret.operator.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
ti = dr.get_task_instances()[0]
assert ti.xcom_pull() is None
def test_python_callable_arguments_are_templatized(self):
"""Test @task op_args are templatized"""
@task_decorator
def arg_task(*args):
raise RuntimeError("Should not executed")
# Create a named tuple and ensure it is still preserved
# after the rendering is done
Named = namedtuple("Named", ["var1", "var2"])
named_tuple = Named("{{ ds }}", "unchanged")
with self.dag:
ret = arg_task(4, date(2019, 1, 1), "dag {{dag.dag_id}} ran on {{ds}}.", named_tuple)
dr = self.create_dag_run()
ti = TaskInstance(task=ret.operator, run_id=dr.run_id)
rendered_op_args = ti.render_templates().op_args
assert len(rendered_op_args) == 4
assert rendered_op_args[0] == 4
assert rendered_op_args[1] == date(2019, 1, 1)
assert rendered_op_args[2] == f"dag {self.dag_id} ran on {self.ds_templated}."
assert rendered_op_args[3] == Named(self.ds_templated, "unchanged")
def test_python_callable_keyword_arguments_are_templatized(self):
"""Test PythonOperator op_kwargs are templatized"""
@task_decorator
def kwargs_task(an_int, a_date, a_templated_string):
raise RuntimeError("Should not executed")
with self.dag:
ret = kwargs_task(
an_int=4, a_date=date(2019, 1, 1), a_templated_string="dag {{dag.dag_id}} ran on {{ds}}."
)
dr = self.create_dag_run()
ti = TaskInstance(task=ret.operator, run_id=dr.run_id)
rendered_op_kwargs = ti.render_templates().op_kwargs
assert rendered_op_kwargs["an_int"] == 4
assert rendered_op_kwargs["a_date"] == date(2019, 1, 1)
assert rendered_op_kwargs["a_templated_string"] == f"dag {self.dag_id} ran on {self.ds_templated}."
def test_manual_task_id(self):
"""Test manually setting task_id"""
@task_decorator(task_id="some_name")
def do_run():
return 4
with self.dag:
do_run()
assert ["some_name"] == self.dag.task_ids
def test_multiple_calls(self):
"""Test calling task multiple times in a DAG"""
@task_decorator
def do_run():
return 4
with self.dag:
do_run()
assert ["do_run"] == self.dag.task_ids
do_run_1 = do_run()
do_run_2 = do_run()
assert ["do_run", "do_run__1", "do_run__2"] == self.dag.task_ids
assert do_run_1.operator.task_id == "do_run__1"
assert do_run_2.operator.task_id == "do_run__2"
def test_multiple_calls_in_task_group(self):
"""Test calling task multiple times in a TaskGroup"""
@task_decorator
def do_run():
return 4
group_id = "KnightsOfNii"
with self.dag:
with TaskGroup(group_id=group_id):
do_run()
assert [f"{group_id}.do_run"] == self.dag.task_ids
do_run()
assert [f"{group_id}.do_run", f"{group_id}.do_run__1"] == self.dag.task_ids
assert len(self.dag.task_ids) == 2
def test_call_20(self):
"""Test calling decorated function 21 times in a DAG"""
@task_decorator
def __do_run():
return 4
with self.dag:
__do_run()
for _ in range(20):
__do_run()
assert self.dag.task_ids[-1] == "__do_run__20"
def test_multiple_outputs(self):
"""Tests pushing multiple outputs as a dictionary"""
@task_decorator(multiple_outputs=True)
def return_dict(number: int):
return {"number": number + 1, "43": 43}
test_number = 10
with self.dag:
ret = return_dict(test_number)
dr = self.dag.create_dagrun(
run_id=DagRunType.MANUAL,
start_date=timezone.utcnow(),
execution_date=DEFAULT_DATE,
state=State.RUNNING,
)
ret.operator.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
ti = dr.get_task_instances()[0]
assert ti.xcom_pull(key="number") == test_number + 1
assert ti.xcom_pull(key="43") == 43
assert ti.xcom_pull() == {"number": test_number + 1, "43": 43}
def test_default_args(self):
"""Test that default_args are captured when calling the function correctly"""
@task_decorator
def do_run():
return 4
self.dag.default_args["owner"] = "airflow"
with self.dag:
ret = do_run()
assert ret.operator.owner == "airflow"
@task_decorator
def test_apply_default_raise(unknown):
return unknown
with pytest.raises(TypeError):
with self.dag:
test_apply_default_raise()
@task_decorator
def test_apply_default(owner):
return owner
with self.dag:
ret = test_apply_default()
assert "owner" in ret.operator.op_kwargs
def test_xcom_arg(self):
"""Tests that returned key in XComArg is returned correctly"""
@task_decorator
def add_2(number: int):
return number + 2
@task_decorator
def add_num(number: int, num2: int = 2):
return number + num2
test_number = 10
with self.dag:
bigger_number = add_2(test_number)
ret = add_num(bigger_number, XComArg(bigger_number.operator))
dr = self.dag.create_dagrun(
run_id=DagRunType.MANUAL,
start_date=timezone.utcnow(),
execution_date=DEFAULT_DATE,
state=State.RUNNING,
)
bigger_number.operator.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
ret.operator.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
ti_add_num = next(ti for ti in dr.get_task_instances() if ti.task_id == "add_num")
assert ti_add_num.xcom_pull(key=ret.key) == (test_number + 2) * 2
def test_dag_task(self):
"""Tests dag.task property to generate task"""
@self.dag.task
def add_2(number: int):
return number + 2
test_number = 10
res = add_2(test_number)
add_2(res)
assert "add_2" in self.dag.task_ids
def test_dag_task_multiple_outputs(self):
"""Tests dag.task property to generate task with multiple outputs"""
@self.dag.task(multiple_outputs=True)
def add_2(number: int):
return {"1": number + 2, "2": 42}
test_number = 10
add_2(test_number)
add_2(test_number)
assert "add_2" in self.dag.task_ids
@pytest.mark.parametrize(
argnames=["op_doc_attr", "op_doc_value", "expected_doc_md"],
argvalues=[
pytest.param("doc", "task docs.", None, id="set_doc"),
pytest.param("doc_json", '{"task": "docs."}', None, id="set_doc_json"),
pytest.param("doc_md", "task docs.", "task docs.", id="set_doc_md"),
pytest.param("doc_rst", "task docs.", None, id="set_doc_rst"),
pytest.param("doc_yaml", "task:\n\tdocs", None, id="set_doc_yaml"),
pytest.param("doc_md", None, "Adds 2 to number.", id="no_doc_md_use_docstring"),
],
)
def test_task_documentation(self, op_doc_attr, op_doc_value, expected_doc_md):
"""Tests that task_decorator loads doc_md from function doc if doc_md is not explicitly provided."""
kwargs = {}
kwargs[op_doc_attr] = op_doc_value
@task_decorator(**kwargs)
def add_2(number: int):
"""Adds 2 to number."""
return number + 2
test_number = 10
with self.dag:
ret = add_2(test_number)
assert ret.operator.doc_md == expected_doc_md
def test_user_provided_task_id_in_a_loop_is_used(self):
"""Tests that when looping that user provided task_id is used"""
@task_decorator(task_id="hello_task")
def hello():
"""
Print Hello world
"""
print("Hello world")
with self.dag:
for i in range(3):
hello.override(task_id=f"my_task_id_{i * 2}")()
hello() # This task would have hello_task as the task_id
assert self.dag.task_ids == ["my_task_id_0", "my_task_id_2", "my_task_id_4", "hello_task"]
def test_user_provided_pool_and_priority_weight_works(self):
"""Tests that when looping that user provided pool, priority_weight etc is used"""
@task_decorator(task_id="hello_task")
def hello():
"""
Print Hello world
"""
print("Hello world")
with self.dag:
for i in range(3):
hello.override(pool="my_pool", priority_weight=i)()
weights = []
for task in self.dag.tasks:
assert task.pool == "my_pool"
weights.append(task.priority_weight)
assert weights == [0, 1, 2]
def test_python_callable_args_work_as_well_as_baseoperator_args(self):
"""Tests that when looping that user provided pool, priority_weight etc is used"""
@task_decorator(task_id="hello_task")
def hello(x, y):
"""
Print Hello world
"""
print("Hello world", x, y)
return x, y
with self.dag:
output = hello.override(task_id="mytask")(x=2, y=3)
output2 = hello.override()(2, 3) # nothing overridden but should work
assert output.operator.op_kwargs == {"x": 2, "y": 3}
assert output2.operator.op_args == (2, 3)
output.operator.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
output2.operator.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
def test_mapped_decorator_shadow_context() -> None:
@task_decorator
def print_info(message: str, run_id: str = "") -> None:
print(f"{run_id}: {message}")
with pytest.raises(ValueError) as ctx:
print_info.partial(run_id="hi")
assert str(ctx.value) == "cannot call partial() on task context variable 'run_id'"
with pytest.raises(ValueError) as ctx:
print_info.expand(run_id=["hi", "there"])
assert str(ctx.value) == "cannot call expand() on task context variable 'run_id'"
def test_mapped_decorator_wrong_argument() -> None:
@task_decorator
def print_info(message: str, run_id: str = "") -> None:
print(f"{run_id}: {message}")
with pytest.raises(TypeError) as ct:
print_info.partial(wrong_name="hi")
assert str(ct.value) == "partial() got an unexpected keyword argument 'wrong_name'"
with pytest.raises(TypeError) as ct:
print_info.expand(wrong_name=["hi", "there"])
assert str(ct.value) == "expand() got an unexpected keyword argument 'wrong_name'"
with pytest.raises(ValueError) as cv:
print_info.expand(message="hi")
assert str(cv.value) == "expand() got an unexpected type 'str' for keyword argument 'message'"
def test_mapped_decorator():
@task_decorator
def print_info(m1: str, m2: str, run_id: str = "") -> None:
print(f"{run_id}: {m1} {m2}")
@task_decorator
def print_everything(**kwargs) -> None:
print(kwargs)
with DAG("test_mapped_decorator", start_date=DEFAULT_DATE):
t0 = print_info.expand(m1=["a", "b"], m2={"foo": "bar"})
t1 = print_info.partial(m1="hi").expand(m2=[1, 2, 3])
t2 = print_everything.partial(whatever="123").expand(any_key=[1, 2], works=t1)
assert isinstance(t2, XComArg)
assert isinstance(t2.operator, DecoratedMappedOperator)
assert t2.operator.task_id == "print_everything"
assert t2.operator.op_kwargs_expand_input == DictOfListsExpandInput({"any_key": [1, 2], "works": t1})
assert t0.operator.task_id == "print_info"
assert t1.operator.task_id == "print_info__1"
def test_mapped_decorator_invalid_args() -> None:
@task_decorator
def double(number: int):
return number * 2
literal = [1, 2, 3]
with pytest.raises(TypeError, match="arguments 'other', 'b'"):
double.partial(other=[1], b=["a"])
with pytest.raises(TypeError, match="argument 'other'"):
double.expand(number=literal, other=[1])
with pytest.raises(ValueError, match="argument 'number'"):
double.expand(number=1) # type: ignore[arg-type]
def test_partial_mapped_decorator() -> None:
@task_decorator
def product(number: int, multiple: int):
return number * multiple
literal = [1, 2, 3]
with DAG("test_dag", start_date=DEFAULT_DATE) as dag:
quadrupled = product.partial(multiple=3).expand(number=literal)
doubled = product.partial(multiple=2).expand(number=literal)
trippled = product.partial(multiple=3).expand(number=literal)
product.partial(multiple=2) # No operator is actually created.
assert isinstance(doubled, PlainXComArg)
assert isinstance(trippled, PlainXComArg)
assert isinstance(quadrupled, PlainXComArg)
assert dag.task_dict == {
"product": quadrupled.operator,
"product__1": doubled.operator,
"product__2": trippled.operator,
}
assert isinstance(doubled.operator, DecoratedMappedOperator)
assert doubled.operator.op_kwargs_expand_input == DictOfListsExpandInput({"number": literal})
assert doubled.operator.partial_kwargs["op_kwargs"] == {"multiple": 2}
assert isinstance(trippled.operator, DecoratedMappedOperator) # For type-checking on partial_kwargs.
assert trippled.operator.partial_kwargs["op_kwargs"] == {"multiple": 3}
assert doubled.operator is not trippled.operator
def test_mapped_decorator_unmap_merge_op_kwargs(dag_maker, session):
with dag_maker(session=session):
@task_decorator
def task1():
return ["x"]
@task_decorator
def task2(arg1, arg2):
...
task2.partial(arg1=1).expand(arg2=task1())
run = dag_maker.create_dagrun()
# Run task1.
dec = run.task_instance_scheduling_decisions(session=session)
assert [ti.task_id for ti in dec.schedulable_tis] == ["task1"]
dec.schedulable_tis[0].run(session=session)
# Expand task2.
dec = run.task_instance_scheduling_decisions(session=session)
assert [ti.task_id for ti in dec.schedulable_tis] == ["task2"]
ti = dec.schedulable_tis[0]
unmapped = ti.task.unmap((ti.get_template_context(session), session))
assert set(unmapped.op_kwargs) == {"arg1", "arg2"}
def test_mapped_decorator_converts_partial_kwargs(dag_maker, session):
with dag_maker(session=session):
@task_decorator
def task1(arg):
return ["x" * arg]
@task_decorator(retry_delay=30)
def task2(arg1, arg2):
...
task2.partial(arg1=1).expand(arg2=task1.expand(arg=[1, 2]))
run = dag_maker.create_dagrun()
# Expand and run task1.
dec = run.task_instance_scheduling_decisions(session=session)
assert [ti.task_id for ti in dec.schedulable_tis] == ["task1", "task1"]
for ti in dec.schedulable_tis:
ti.run(session=session)
assert not isinstance(ti.task, MappedOperator)
assert ti.task.retry_delay == timedelta(seconds=300) # Operator default.
# Expand task2.
dec = run.task_instance_scheduling_decisions(session=session)
assert [ti.task_id for ti in dec.schedulable_tis] == ["task2", "task2"]
for ti in dec.schedulable_tis:
unmapped = ti.task.unmap((ti.get_template_context(session), session))
assert unmapped.retry_delay == timedelta(seconds=30)
def test_mapped_render_template_fields(dag_maker, session):
@task_decorator
def fn(arg1, arg2):
...
with dag_maker(session=session):
task1 = BaseOperator(task_id="op1")
mapped = fn.partial(arg2="{{ ti.task_id }}").expand(arg1=task1.output)
dr = dag_maker.create_dagrun()
ti: TaskInstance = dr.get_task_instance(task1.task_id, session=session)
ti.xcom_push(key=XCOM_RETURN_KEY, value=["{{ ds }}"], session=session)
session.add(
TaskMap(
dag_id=dr.dag_id,
task_id=task1.task_id,
run_id=dr.run_id,
map_index=-1,
length=1,
keys=None,
)
)
session.flush()
mapped_ti: TaskInstance = dr.get_task_instance(mapped.operator.task_id, session=session)
mapped_ti.map_index = 0
assert isinstance(mapped_ti.task, MappedOperator)
mapped.operator.render_template_fields(context=mapped_ti.get_template_context(session=session))
assert isinstance(mapped_ti.task, BaseOperator)
assert mapped_ti.task.op_kwargs["arg1"] == "{{ ds }}"
assert mapped_ti.task.op_kwargs["arg2"] == "fn"
def test_task_decorator_has_wrapped_attr():
"""
Test @task original underlying function is accessible
through the __wrapped__ attribute.
"""
def org_test_func():
pass
decorated_test_func = task_decorator(org_test_func)
assert hasattr(
decorated_test_func, "__wrapped__"
), "decorated function does not have __wrapped__ attribute"
assert decorated_test_func.__wrapped__ is org_test_func, "__wrapped__ attr is not the original function"
def test_upstream_exception_produces_none_xcom(dag_maker, session):
from airflow.exceptions import AirflowSkipException
from airflow.utils.trigger_rule import TriggerRule
result = None
with dag_maker(session=session) as dag:
@dag.task()
def up1() -> str:
return "example"
@dag.task()
def up2() -> None:
raise AirflowSkipException()
@dag.task(trigger_rule=TriggerRule.NONE_FAILED_MIN_ONE_SUCCESS)
def down(a, b):
nonlocal result
result = f"{a!r} {b!r}"
down(up1(), up2())
dr: DagRun = dag_maker.create_dagrun()
decision = dr.task_instance_scheduling_decisions(session=session)
assert len(decision.schedulable_tis) == 2 # "up1" and "up2"
for ti in decision.schedulable_tis:
ti.run(session=session)
decision = dr.task_instance_scheduling_decisions(session=session)
assert len(decision.schedulable_tis) == 1 # "down"
decision.schedulable_tis[0].run(session=session)
assert result == "'example' None"
@pytest.mark.parametrize("multiple_outputs", [True, False])
def test_multiple_outputs_produces_none_xcom_when_task_is_skipped(dag_maker, session, multiple_outputs):
from airflow.exceptions import AirflowSkipException
from airflow.utils.trigger_rule import TriggerRule
result = None
with dag_maker(session=session) as dag:
@dag.task()
def up1() -> str:
return "example"
@dag.task(multiple_outputs=multiple_outputs)
def up2(x) -> Union[dict, None]:
if x == 2:
return {"x": "example"}
raise AirflowSkipException()
@dag.task(trigger_rule=TriggerRule.NONE_FAILED_MIN_ONE_SUCCESS)
def down(a, b):
nonlocal result
result = f"{a!r} {b!r}"
down(up1(), up2(1)["x"])
dr = dag_maker.create_dagrun()
decision = dr.task_instance_scheduling_decisions(session=session)
assert len(decision.schedulable_tis) == 2 # "up1" and "up2"
for ti in decision.schedulable_tis:
ti.run(session=session)
decision = dr.task_instance_scheduling_decisions(session=session)
assert len(decision.schedulable_tis) == 1 # "down"
if multiple_outputs:
decision.schedulable_tis[0].run(session=session)
assert result == "'example' None"
else:
with pytest.raises(XComNotFound):
decision.schedulable_tis[0].run(session=session)
@pytest.mark.filterwarnings("error")
def test_no_warnings(reset_logging_config, caplog):
@task_decorator
def some_task():
return 1
@task_decorator
def other(x):
...
with DAG(dag_id="test", start_date=DEFAULT_DATE, schedule=None):
other(some_task())
assert caplog.messages == []
def test_task_decorator_dataset(dag_maker, session):
from airflow import Dataset
result = None
uri = "s3://test"
with dag_maker(session=session) as dag:
@dag.task()
def up1() -> Dataset:
return Dataset(uri)
@dag.task()
def up2(src: Dataset) -> str:
return src.uri
@dag.task()
def down(a: str):
nonlocal result
result = a
src = up1()
s = up2(src)
down(s)
dr: DagRun = dag_maker.create_dagrun()
decision = dr.task_instance_scheduling_decisions(session=session)
assert len(decision.schedulable_tis) == 1 # "up1"
decision.schedulable_tis[0].run(session=session)
decision = dr.task_instance_scheduling_decisions(session=session)
assert len(decision.schedulable_tis) == 1 # "up2"
decision.schedulable_tis[0].run(session=session)
decision = dr.task_instance_scheduling_decisions(session=session)
assert len(decision.schedulable_tis) == 1 # "down"
decision.schedulable_tis[0].run(session=session)
assert result == uri
def test_teardown_trigger_rule_selective_application(dag_maker, session):
with dag_maker(session=session) as dag:
@dag.task
def my_work():
return "abc"
@setup
@dag.task
def my_setup():
return "abc"
@teardown
@dag.task
def my_teardown():
return "abc"
work_task = my_work()
setup_task = my_setup()
teardown_task = my_teardown()
assert work_task.operator.trigger_rule == TriggerRule.ALL_SUCCESS
assert setup_task.operator.trigger_rule == TriggerRule.ALL_SUCCESS
assert teardown_task.operator.trigger_rule == TriggerRule.ALL_DONE_SETUP_SUCCESS
def test_teardown_trigger_rule_override_behavior(dag_maker, session):
with dag_maker(session=session) as dag:
@dag.task(trigger_rule=TriggerRule.ONE_SUCCESS)
def my_work():
return "abc"
@setup
@dag.task(trigger_rule=TriggerRule.ONE_SUCCESS)
def my_setup():
return "abc"
@teardown
@dag.task(trigger_rule=TriggerRule.ONE_SUCCESS)
def my_teardown():
return "abc"
work_task = my_work()
setup_task = my_setup()
with pytest.raises(Exception, match="Trigger rule not configurable for teardown tasks."):
my_teardown()
assert work_task.operator.trigger_rule == TriggerRule.ONE_SUCCESS
assert setup_task.operator.trigger_rule == TriggerRule.ONE_SUCCESS
|
import torchvision
import torchvision.datasets as dset
import torchvision.transforms as transforms
from torch.utils.data import DataLoader,Dataset
import matplotlib.pyplot as plt
import torchvision.utils
import numpy as np
import random
from PIL import Image
import torch
from torch.autograd import Variable
import PIL.ImageOps
import torch.nn as nn
from torch import optim
import torch.nn.functional as F
from torch.utils.data.sampler import SubsetRandomSampler
import itertools
import os
import pandas as pd
print(os.listdir("/home/liuxb/luan"))
#Checking if CUDA is available or not
train_on_gpu = torch.cuda.is_available()
if not train_on_gpu:
print('CUDA is not available. Training on CPU ...')
else:
print('CUDA is available! Training on GPU ...')
def imshow(img,text=None,should_save=False):
npimg = img.numpy()
plt.axis("off")
if text:
plt.text(75, 8, text, style='italic',fontweight='bold',
bbox={'facecolor':'white', 'alpha':0.8, 'pad':10})
plt.imshow(np.transpose(npimg, (1, 2, 0)))
plt.show()
def show_plot(iteration,loss):
plt.plot(iteration,loss)
plt.show()
df = pd.read_csv("/home/liuxb/luan/data/train_relationships.csv")
print(df.head())
'''
p1 p2
0 F0002/MID1 F0002/MID3
1 F0002/MID2 F0002/MID3
2 F0005/MID1 F0005/MID2
3 F0005/MID3 F0005/MID2
4 F0009/MID1 F0009/MID4
'''
new = df["p1"].str.split("/", n = 1, expand = True)
# making separate first name column from new data frame
df["Family1"]= new[0]
# making separate last name column from new data frame
df["Person1"]= new[1]
# Dropping old Name columns
df.drop(columns =["p1"], inplace = True)
new = df["p2"].str.split("/", n = 1, expand = True)
# making separate first name column from new data frame
df["Family2"]= new[0]
# making separate last name column from new data frame
df["Person2"]= new[1]
# Dropping old Name columns
df.drop(columns =["p2"], inplace = True)
print(df.head())
'''
Family1 Person1 Family2 Person2
0 F0002 MID1 F0002 MID3
1 F0002 MID2 F0002 MID3
2 F0005 MID1 F0005 MID2
3 F0005 MID3 F0005 MID2
4 F0009 MID1 F0009 MID4
'''
root_dir = '/home/liuxb/luan/data/train/'
temp = []
for index, row in df.iterrows():
if os.path.exists(root_dir+row.Family1+'/'+row.Person1) and os.path.exists(root_dir+row.Family2+'/'+row.Person2):
continue
else:
temp.append(index)
print(len(temp)) #231
df = df.drop(temp, axis=0)
#A new column in the existing dataframe with all values as 1, since these people are all related
df['Related'] = 1
#Creating a dictionary, and storing members of each family
df_dict = {}
for index, row in df.iterrows():
if row['Family1'] in df_dict:
df_dict[row['Family1']].append(row['Person1'])
else:
df_dict[row['Family1']] = [row['Person1']]
#For each family in this dictionary, we'll first make pairs of people
#For each pair, we'll check if they're related in our existing Dataset
#If they're not in the dataframe, means we'll create a row with both persons and related value 0
i=1
for key in df_dict:
pair = list(itertools.combinations(df_dict[key], 2))
for item in pair:
if len(df[(df['Family1']==key)&(df['Person1']==item[0])&(df['Person2']==item[1])])==0 \
and len(df[(df['Family1']==key)&(df['Person1']==item[1])&(df['Person2']==item[0])])==0:
new = {'Family1':key,'Person1':item[0],'Family2':key,'Person2':item[1],'Related':0}
df=df.append(new,ignore_index=True)
#Storing rows only where Person1 and Person2 are not same
df = df[(df['Person1']!=df['Person2'])]
#len(df[(df['Related']==1)])
print(df['Related'].value_counts())
'''
1 3367
0 1566
'''
extra = df['Related'].value_counts()[1]-df['Related'].value_counts()[0]
while extra>=0:
rows = df.sample(n=2)
first = rows.iloc[0,:]
second = rows.iloc[1,:]
if first.Family1!=second.Family1 and first.Family2!=second.Family2:
new1 = {'Family1':first.Family1,'Person1':first.Person1,'Family2':second.Family1,'Person2':second.Person1,'Related':0}
extra=extra-1
if extra==0:
break
new2 = {'Family1':first.Family2,'Person1':first.Person2,'Family2':second.Family2,'Person2':second.Person2,'Related':0}
extra=extra-1
df=df.append(new1,ignore_index=True)
df=df.append(new2,ignore_index=True)
df = df.sample(frac=1).reset_index(drop=True)
print(df['Related'].value_counts())
'''
1 3367
0 3366
Name: Related, dtype: int64
'''
print(df.head())
'''
Family1 Person1 Family2 Person2 Related
0 F0425 MID5 F0425 MID6 0
1 F0539 MID2 F0539 MID4 1
2 F0561 MID1 F0658 MID5 0
3 F0762 MID5 F0579 MID1 0
4 F0194 MID2 F0194 MID3 0
'''
|
countries = {
'0': '🇷🇺 Россия',
'1': '🇺🇦 Украина',
'2': '🇰🇿 Казахстан',
'51': '🇧🇾 Беларусь',
'3': '🇨🇳 Китай',
'15': '🇵🇱 Польша',
'29': '🇷🇸 Сербия',
'34': '🇪🇪 Эстония',
'32': '🇷🇴 Румыния',
'43': '🇩🇪 Германия',
'44': '🇱🇹 Литва',
'82': '🇧🇪 Бельгия',
'83': '🇧🇬 Болгария',
'84': '🇭🇺 Венгрия',
'85': '🇲🇩 Молдова',
'86': '🇮🇹 Италия',
'62': '🇹🇷 Турция',
'63': '🇨🇿 Чехия',
'4': '🇵🇭 Филиппины',
'5': '🇲🇲 Мьянма',
'6': '🇮🇩 Индонезия',
'7': '🇲🇾 Малайзия',
'8': '🇰🇪 Кения',
'10': '🇻🇳 Вьетнам',
'11': '🇰🇬 Кыргызстан',
'12': '🇺🇸 США',
'13': '🇮🇱 Израиль',
'14': '🇭🇰 Гонконг',
'16': '🏴 Англия',
'18': '🇨🇩 Дем. Конго',
'19': '🇳🇬 Нигерия',
'21': '🇪🇬 Египет',
'22': '🇮🇳 Индия',
'23': '🇮🇪 Ирландия',
'24': '🇰🇭 Камбоджа',
'25': '🇱🇦 Лаос',
'26': '🇭🇹 Гаити',
'27': '🇨🇮 Кот д Ивуар',
'28': '🇬🇲 Гамбия',
'30': '🇾🇪 Йемен',
'31': '🇿🇦 ЮАР',
'33': '🇨🇴 Колумбия',
'35': '🇦🇿 Азербайджан',
'36': '🇨🇦 Канада',
'37': '🇲🇦 Марокко',
'38': '🇬🇭 Гана',
'39': '🇦🇷 Аргентина',
'40': '🇺🇿 Узбекистан',
'41': '🇨🇲 Камерун',
'45': '🇭🇷 Хорватия',
'46': '🇸🇪 Швеция',
'47': '🇮🇶 Ирак',
'48': '🇳🇱 Нидерланды',
'49': '🇱🇻 Латвия',
'50': '🇦🇹 Австрия',
'52': '🇹🇭 Таиланд',
'53': '🇸🇦 Сауд. Аравия',
'54': '🇲🇽 Мексика',
'55': '🇹🇼 Тайвань',
'56': '🇪🇸 Испания',
'57': '🇮🇷 Иран',
'58': '🇩🇿 Алжир',
'60': '🇧🇩 Бангладеш',
'61': '🇸🇳 Сенегал',
'64': '🇱🇰 Шри-Ланка',
'65': '🇵🇪 Перу',
'66': '🇵🇰 Пакистан',
'67': 'NZ Новая Зеландия',
'68': '🇬🇳 Гвинея',
'69': '🇲🇱 Мали',
'70': '🇻🇪 Венесуэла',
'71': '🇪🇹 Эфиопия',
'72': '🇲🇳 Монголия',
'73': '🇧🇷 Бразилия',
'74': '🇦🇫 Афганистан',
'75': '🇺🇬 Уганда',
'76': '🇦🇴 Ангола',
'77': '🇨🇾 Кипр',
'78': '🇫🇷 Франция',
'79': '🇬🇳 Папуа-Новая Гвинея',
'80': '🇲🇿 Мозамбик',
'81': '🇳🇵 Непал',
'87': '🇵🇾 Парагвай',
'88': '🇭🇳 Гондурас',
'89': '🇹🇳 Тунис',
'90': '🇳🇮 Никарагуа',
'91': '🇹🇱 Тимор-Лесте',
'92': '🇧🇴 Боливия',
'93': '🇨🇷 Коста Рика',
'94': '🇬🇹 Гватемала',
'95': '🇦🇪 ОАЭ',
'96': '🇿🇼 Зимбабве',
'97': '🇵🇷 Пуэрто-Рико',
'98': '🇸🇩 Судан',
'99': '🇹🇬 Того',
'100': '🇰🇼 Кувейт',
'101': '🇸🇻 Сальвадор',
'102': '🇱🇾 Ливия',
'103': '🇯🇲 Ямайка',
'104': '🇹🇹 Тринидад и Тобаго',
'105': '🇪🇨 Эквадор',
'106': '🇸🇿 Свазиленд',
'107': '🇴🇲 Оман',
'108': '🇧🇦 Босния и Герцеговина',
'109': '🇩🇴 Доминиканская Республика',
'110': '🇸🇾 Сирия',
'111': '🇶🇦 Катар',
'112': '🇵🇦 Панама',
'113': '🇨🇺 Куба',
'114': '🇲🇷 Мавритания',
'115': '🇸🇱 Сьерра-Леоне',
'116': '🇯🇴 Иордания',
'117': '🇵🇹 Португалия',
'118': '🇧🇧 Барбадос',
'119': '🇧🇮 Бурунди',
'120': '🇧🇯 Бенин',
'122': '🇧🇸 Багамы',
'123': '🇧🇼 Ботсвана',
'128': '🇬🇪 Грузия',
'129': '🇬🇷 Греция',
'130': '🇬🇼 Гвинея-Бисау',
'131': '🇬🇾 Гайана',
'133': '🇰🇲 Коморы',
'134': '🇰🇳 Сент-Китс и Невис',
'135': '🇱🇷 Либерия',
'136': '🇱🇸 Лесото',
'137': '🇲🇼 Малави',
'138': '🇳🇦 Намибия',
'139': '🇳🇪 Нигер',
'140': '🇷🇼 Руанда',
'141': '🇸🇰 Словакия',
'142': '🇸🇷 Суринам',
'143': '🇹🇯 Таджикистан',
'145': '🇧🇭 Бахрейн',
'146': '🇷🇪 Реюньон',
'147': '🇿🇲 Замбия',
'148': '🇦🇲 Армения',
'149': '🇸🇴 Сомали',
'150': '🇨🇬 Конго',
'152': '🇧🇫 Буркина-Фасо',
'153': '🇱🇧 Ливан',
'154': '🇬🇦 Габон',
'155': '🇦🇱 Албания',
'157': '🇲🇺 Маврикий',
'158': '🇧🇹 Бутан',
'159': '🇲🇻 Мальдивы',
'161': '🇹🇲 Туркменистан',
'165': '🇱🇺 Люксембург',
'167': '🇬🇳 Экваториальная Гвинея',
'169': '🇦🇬 Антигуа и Барбуда',
'171': '🇲🇪 Черногория',
'173': '🇨🇭 Швейцария',
'174': '🇳🇴 Норвегия',
'176': '🇪🇷 Эритрея',
'177': '🇸🇸 Южный Судан',
'178': '🇸🇹 Сан-Томе и Принсипи',
'181': '🇦🇮 Ангилья',
'183': '🇲🇰 Македония',
'184': '🇸🇨 Республика Сейшелы',
'185': '🇳🇨 Новая Каледония',
'186': '🇨🇻 Кабо-Верде',
} |
# O(nm) time | O(nm) space
def number_of_ways(n, m):
def compute_ways_to_xy(x, y):
if x == y == 0:
return 1
if number_of_ways[x][y] == 0:
ways_top = 0 if x == 0 else compute_ways_to_xy(x-1, y)
ways_left = 0 if y == 0 else compute_ways_to_xy(x, y-1)
number_of_ways[x][y] = ways_left + ways_left
return number_of_ways[x][y]
number_of_ways = [[0] * m for _ in range(n)]
return compute_ways_to_xy(n-1, m-1)
|
#! /usr/bin/env python
import rospy
import actionlib
from geometry_msgs.msg import Twist, Vector3, PoseStamped
# Uses Test.action from actionlib as action messages
from actionlib.msg import TestFeedback, TestResult, TestAction
cnt = 0
posX_0 = 0
posY_0 = 0
posX = 0
posY = 0
movingUp = True
def stopDrone() :
print "> Drone stopped <"
global pub
pub.publish(Twist(Vector3(0,0,0),Vector3(0,0,0)))
def drone_pos_handler(msg):
global cnt, posX_0, posY_0, posX, posY, movingUp
if cnt == 0 :
cnt += 1
stopDrone()
print "> Setting posX_0 and posY_0"
# Takes initial position
posX_0 = msg.pose.position.x
posY_0 = msg.pose.position.y
elif cnt == 1 :
posX = msg.pose.position.x - posX_0
posY = msg.pose.position.y - posY_0
if msg.pose.position.z < 5 :
# MOVE UP
pub.publish(Twist(
Vector3(0,0,0.5),
Vector3(0,0,0)))
elif movingUp == True :
movingUp = False
cnt = 0
stopDrone()
class ArdroneSquareClass(object):
# create messages that are used to publish feedback/result
_feedback = TestFeedback()
_result = TestResult()
def __init__(self):
global sub, pub
pub = rospy.Publisher('cmd_vel', Twist, queue_size=1)
sub = rospy.Subscriber('ground_truth_to_tf/pose', PoseStamped, drone_pos_handler)
# creates the action server
self._as = actionlib.SimpleActionServer("ardrone_square_as", TestAction, self.goal_callback, False)
self._as.start()
def goal_callback(self, goal):
# this callback is called when the action server is called.
# helper variables
r = rospy.Rate(4)
success = True
self._feedback.feedback = 0
tmp = 0
# publish info to the console for the user
rospy.loginfo('"ardron_square_as": Executing, moving drone at time %i' % ( self._feedback.feedback ))
# MOVES THE DRONE
distance = goal.goal
i = 1
while success == True:
if movingUp == False:
# check that preempt (cancelation) has not been requested by the action client
if self._as.is_preempt_requested():
rospy.loginfo('The goal has been cancelled/preempted')
# the following line, sets the client in preempted state (goal cancelled)
self._as.set_preempted()
success = False
# we end the calculation of the Fibonacci sequence
break
global posX_0, posY_0, posX, posY
if i == 1:
print "1 > (%d,%d) -> (%d,%d) | %d" %(posX_0, posY_0, posX, posY, distance)
pub.publish(Twist(
Vector3(0.6,0,0),
Vector3(0,0,0)))
elif i == 2:
print "2 > (%d,%d) -> (%d,%d) | %d" %(posX_0, posY_0, posX, posY, distance)
pub.publish(Twist(
Vector3(0,0.6,0),
Vector3(0,0,0)))
elif i == 3:
print "3 > (%d,%d) -> (%d,%d) | %d" %(posX_0, posY_0, posX, posY, distance)
pub.publish(Twist(
Vector3(-0.6,0,0),
Vector3(0,0,0)))
elif i == 4:
print "4 > (%d,%d) -> (%d,%d) | %d" %(posX_0,posY_0, posX,posY, distance)
pub.publish(Twist(
Vector3(0,-0.6,0),
Vector3(0,0,0)))
# builds the next feedback msg to be sent
tmp += 1
if tmp >= 4 :
tmp = 0
self._feedback.feedback += 1
# publish the feedback
self._as.publish_feedback(self._feedback)
# the sequence is computed at 1 Hz frequency
r.sleep()
if (posX >= distance and i == 1) \
or (posY >= distance and i == 2) \
or (abs(posX) >= distance and i == 3) \
or (abs(posY) >= distance and i == 4) :
i += 1
stopDrone()
global cnt
cnt = 0
posX = 0 # For security and avoid fast success
posY = 0
if i > 4 :
break
stopDrone()
# at this point, either the goal has been achieved (success==true)
# or the client preempted the goal (success==false)
# If success, then we publish the final result
# If not success, we do not publish anything in the result
if success:
self._result.result = self._feedback.feedback
rospy.loginfo('Total spent time moving the drone %d' % self._result.result )
self._as.set_succeeded(self._result)
if __name__ == '__main__':
rospy.init_node('ardrone_as')
ArdroneSquareClass()
rospy.spin() |
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 05 07:21:42 2015
@author: jeppley
"""
import pandas as pd
import os as os
from sklearn import preprocessing
from sklearn.preprocessing import Imputer
from sklearn import ensemble
import numpy
from sklearn.feature_selection import RFECV
from sklearn.cross_validation import cross_val_score
from sklearn.grid_search import GridSearchCV
from sklearn import tree
import matplotlib.pyplot as plt
os.getcwd()
os.chdir('C:\Users\jeppley\Dropbox\yelp_dataset_challenge_academic_dataset')
user_merge = pd.read_csv('user_to_model.csv')
user_merge.head()
user_merge.columns
# including our functions from last week up here for use.
def cleanup_data(df, cutoffPercent = .01):
for col in df:
sizes = df[col].value_counts(normalize = True)
# get the names of the levels that make up less than 1% of the dataset
values_to_delete = sizes[sizes<cutoffPercent].index
df[col].ix[df[col].isin(values_to_delete)] = "Other"
return df
#
def get_binary_values(data_frame):
"""encodes cateogrical features in Pandas.
"""
all_columns = pandas.DataFrame( index = data_frame.index)
for col in data_frame.columns:
data = pandas.get_dummies(data_frame[col], prefix=col.encode('ascii', 'replace'))
all_columns = pandas.concat([all_columns, data], axis=1)
return all_columns
#
def find_zero_var(df):
"""finds columns in the dataframe with zero variance -- ie those
with the same value in every observation.
"""
toKeep = []
toDelete = []
for col in df:
if len(df[col].value_counts()) > 1:
toKeep.append(col)
else:
toDelete.append(col)
##
return {'toKeep':toKeep, 'toDelete':toDelete}
##
def find_perfect_corr(df):
"""finds columns that are eother positively or negatively perfectly correlated (with correlations of +1 or -1), and creates a dict
that includes which columns to drop so that each remaining column
is independent
"""
corrMatrix = df.corr()
corrMatrix.loc[:,:] = numpy.tril(corrMatrix.values, k = -1)
already_in = set()
result = []
for col in corrMatrix:
perfect_corr = corrMatrix[col][abs(numpy.round(corrMatrix[col],10)) == 1.00].index.tolist()
if perfect_corr and col not in already_in:
already_in.update(set(perfect_corr))
perfect_corr.append(col)
result.append(perfect_corr)
toRemove = []
for item in result:
toRemove.append(item[1:(len(item)+1)])
# print toRemove
# toRemove = sum(toRemove, [])
return {'corrGroupings':result, 'toRemove':toRemove}
###
explanatory_features = [col for col in user_merge.columns if col not in ['user_id', 'is_elite2']]
explanatory_df = user_merge[explanatory_features]
explanatory_df.dropna(how='all', inplace = True)
explanatory_colnames = explanatory_df.columns
print explanatory_colnames
response_series = user_merge.is_elite2
response_series.dropna(how='all', inplace = True)
removed = response_series.index[~response_series.index.isin(explanatory_df.index)]
print removed
string_features = explanatory_df.ix[:, explanatory_df.dtypes == 'object']
numeric_features = explanatory_df.ix[:, explanatory_df.dtypes != 'object']
print string_features
print numeric_features
imputer_object = Imputer(missing_values='NaN', strategy='median', axis=0)
imputer_object.fit(numeric_features)
numeric_features = pandas.DataFrame(imputer_object.transform(numeric_features), columns = numeric_features.columns)
## pulling together numeric and encoded data.
explanatory_df = numeric_features
explanatory_df.head()
no_variation = find_zero_var(explanatory_df)
print no_variation
#looks like there is nothing to delete
explanatory_df.drop(no_variation['toDelete'], inplace = True)
# deleting perfect correlation
no_correlation = find_perfect_corr(explanatory_df)
print no_correlation
explanatory_df.drop(no_correlation['toRemove'], 1, inplace = True)
explanatory_df.dtypes
# scaling data
scaler = preprocessing.StandardScaler()
scaler.fit(explanatory_df)
explanatory_df = pandas.DataFrame(scaler.transform(explanatory_df), columns = explanatory_df.columns)
print explanatory_df.head()
# creating a random forest object.
## these are the default values of the classifier
rf = ensemble.RandomForestClassifier(n_estimators=10, criterion='gini', max_depth=None, min_samples_split=2, min_samples_leaf=1, max_features='auto', max_leaf_nodes=None, bootstrap=True, oob_score=False, random_state=None, verbose=0, min_density=None, compute_importances=None)
# let's compute ROC AUC of the random forest.
roc_scores_rf = cross_val_score(rf, explanatory_df, response_series, cv=10, scoring='roc_auc')
roc_score_tree = cross_val_score(tree.DecisionTreeClassifier(), explanatory_df, response_series, cv=10, scoring='roc_auc')
## let's compare the mean ROC AUC
print roc_scores_rf.mean()
print roc_score_tree.mean()
trees_range = range(10, 50, 10) #see what accuracy is like
param_grid = dict(n_estimators = trees_range)#tuning parameters is number estimators
grid = GridSearchCV(rf, param_grid, cv=10, scoring='roc_auc')
grid.fit(explanatory_df, response_series) # often will want to do this after night, and after feature selection
# Check out the scores of the grid search
grid_mean_scores = [result[1] for result in grid.grid_scores_]
# Plot the results of the grid search
plt.figure()
plt.plot(trees_range, grid_mean_scores)
best_rf_tree_est = grid.best_estimator_
# how many trees did the best estiator have?
print best_rf_tree_est.n_estimators
# how accurate was the best estimator?
print grid.best_score_
## did accuracy improve?
user_merge.fans[user_merge.is_elite2==1].describe()
elites = user_merge[user_merge.is_elite2==1]
# density plot of user fans, looks like
elites.fans.plot(kind='density', xlim=(0,100))
#Creating success metric for elites with certain levels of fans
user_merge['Elite_Fans'] = 0
user_merge.head()
user_merge.Elite_Fans[user_merge.fans>20]=1
user_merge.Elite_Fans[user_merge.Elite_Fans==1]
user_merge['totcomp'] = user_merge['compliments_cool'] + user_merge['compliments_hot'] + user_merge['compliments_cute'] + user_merge['compliments_funny'] +user_merge['compliments_more'] +user_merge['compliments_note'] +user_merge['compliments_photos']+user_merge['compliments_plain'] +user_merge['compliments_profile'] +user_merge['compliments_writer'] +user_merge['compliments_list']
user_merge.head()
compliments = user_merge[['compliments_cool', 'compliments_hot', 'compliments_cute', 'compliments_funny', 'compliments_more', 'compliments_note', 'compliments_photos', 'compliments_plain','compliments_profile','compliments_writer','compliments_list', 'totcomp']]
compliments.head()
#Merging compliment calculations back to full dataset for modeling
user_mergenew = user_merge.join(compliments.div(compliments['totcomp'], axis=0), rsuffix='_perc')
user_mergenew.head()
user_mergenew.describe()
user_mergenew['Elite_Fans'].value_counts()
#subsetting to Elite members only
elites = user_mergenew[user_merge.is_elite2==1]
#Among elite members, how many have fans >20 and how many have fans <20
elites['Elite_Fans'].value_counts()
#recoding all nan values for compliments to 0
elites.fillna(0)
#Beginning modeling of what makes an influential Elite vs. a non-influential elite
elites.columns
explanatory_features = elites[['compliments_cool_perc', 'compliments_hot_perc', 'compliments_cute_perc', 'compliments_funny_perc', 'compliments_more_perc', 'compliments_note_perc', 'compliments_photos_perc', 'compliments_plain_perc', 'compliments_profile_perc', 'compliments_writer_perc']]
explanatory_df = explanatory_features
explanatory_df = explanatory_df.fillna(0)
explanatory_df.dropna(how='all', inplace = True)
explanatory_colnames = explanatory_df.columns
print explanatory_colnames
response_series = elites.Elite_Fans
#response_series.dropna(how='all', inplace = True)
#removed = response_series.index[~response_series.index.isin(explanatory_df.index)]
#print removed
string_features = explanatory_df.ix[:, explanatory_df.dtypes == 'object']
numeric_features = explanatory_df.ix[:, explanatory_df.dtypes != 'object']
print string_features
print numeric_features
imputer_object = Imputer(missing_values='NaN', strategy='median', axis=0)
imputer_object.fit(numeric_features)
numeric_features = pandas.DataFrame(imputer_object.transform(numeric_features), columns = numeric_features.columns)
## pulling together numeric and encoded data.
explanatory_df = numeric_features
explanatory_df.head()
no_variation = find_zero_var(explanatory_df)
print no_variation
#looks like there is nothing to delete
explanatory_df.drop(no_variation['toDelete'], inplace = True)
# deleting perfect correlation
no_correlation = find_perfect_corr(explanatory_df)
print no_correlation
explanatory_df.drop(no_correlation['toRemove'], 1, inplace = True)
explanatory_df.dtypes
# scaling data
scaler = preprocessing.StandardScaler()
scaler.fit(explanatory_df)
explanatory_df = pandas.DataFrame(scaler.transform(explanatory_df), columns = explanatory_df.columns)
print explanatory_df.head()
# creating a random forest object.
## these are the default values of the classifier
rf = ensemble.RandomForestClassifier(n_estimators=10, criterion='gini', max_depth=None, min_samples_split=2, min_samples_leaf=1, max_features='auto', max_leaf_nodes=None, bootstrap=True, oob_score=False, random_state=None, verbose=0, min_density=None)
# let's compute ROC AUC of the random forest.
roc_scores_rf = cross_val_score(rf, explanatory_df, response_series, cv=10, scoring='roc_auc')
roc_score_tree = cross_val_score(tree.DecisionTreeClassifier(), explanatory_df, response_series, cv=10, scoring='roc_auc')
## let's compare the mean ROC AUC
print roc_scores_rf.mean()
print roc_score_tree.mean()
#random forest is definitely pretty good here; 84% versus the 65% for a decision tree
trees_range = range(10, 100, 10) #see what accuracy is like
param_grid = dict(n_estimators = trees_range)#tuning parameters is number estimators
grid = GridSearchCV(rf, param_grid, cv=10, scoring='roc_auc')
grid.fit(explanatory_df, response_series) # often will want to do this after night, and after feature selection
# Check out the scores of the grid search
grid_mean_scores = [result[1] for result in grid.grid_scores_]
# Plot the results of the grid search
plt.figure()
plt.plot(trees_range, grid_mean_scores)
#looks like 80 is a good figure
best_rf_tree_est = grid.best_estimator_
# how many trees did the best estiator have?
print best_rf_tree_est.n_estimators
# how accurate was the best estimator? .84
print grid.best_score_
## did accuracy improve? stayed about the same, so think 40 is really the magic number
rf.fit(explanatory_df, response_series)
important_features = rf.feature_importances_
#################
## BOOSTING TREES
#################
boosting_tree = ensemble.GradientBoostingClassifier()
roc_scores_gbm = cross_val_score(boosting_tree, explanatory_df, response_series, cv=10, scoring='roc_auc')
#let's compare our accuracies
print roc_scores_gbm.mean()
print roc_scores_rf.mean()
print roc_score_tree.mean()
# let's tune for num_trees, learning rate, and subsampling percent.
# need to import arange to create ranges for floats
from numpy import arange #pythons range function doesn't allow you to do floats
learning_rate_range = arange(0.01, 0.4, 0.02)
subsampling_range = arange(0.25, 1, 0.25)
n_estimators_range = range(25, 100, 25) #less than RF because by definition you are boosting
param_grid = dict(learning_rate = learning_rate_range, n_estimators = n_estimators_range, subsample = subsampling_range)
gbm_grid = GridSearchCV(boosting_tree, param_grid, cv=10, scoring='roc_auc')
gbm_grid.fit(explanatory_df, response_series)
# find the winning parameters
print gbm_grid.best_params_
# how does this compare to the default settings
# estimators = 100, subsample = 1.0, learning_rate = 0.1
# pull out the best score
print gbm_grid.best_score_
print grid.best_score_
## only slightly better than RF even after all the grid searching
## ROC curve accuracy of the GBM vs RF vs Tree Method
#not doing on all the CV splits
from sklearn.cross_validation import train_test_split
from sklearn import metrics
xTrain, xTest, yTrain, yTest = train_test_split(
explanatory_df, response_series, test_size = 0.3)
#comparing ROCs of our best estimators that came out of grid search
tree_probabilities = pandas.DataFrame(tree.DecisionTreeClassifier().fit(xTrain, yTrain).predict_proba(xTest))#wrap in data frame because 2 columns of probabilities, one for 0 class and 1 class, pandas data frame easy to extract
rf_probabilities = pandas.DataFrame(best_rf_tree_est.fit(xTrain, yTrain).predict_proba(xTest))
gbm_probabilities = pandas.DataFrame(gbm_grid.best_estimator_.fit(xTrain, yTrain).predict_proba(xTest))
tree_fpr, tree_tpr, thresholds = metrics.roc_curve(yTest, tree_probabilities[1])
rf_fpr, rf_tpr, thresholds = metrics.roc_curve(yTest, rf_probabilities[1])
gbm_fpr, gbm_tpr, thresholds = metrics.roc_curve(yTest, gbm_probabilities[1])
plt.figure()
plt.plot(tree_fpr, tree_tpr, color = 'g')
plt.plot(rf_fpr, rf_tpr, color = 'b')
plt.plot(gbm_fpr, gbm_tpr, color = 'r')
plt.xlabel('False Positive Rate (1 - Specificity)')
plt.ylabel('True Positive Rate (Sensitivity)')
## what does this tell us for this sample?
##either random forest or boosting is always to left and above decision tree so these are the better option
##false positive is the "at the expense of axis"
## create partial dependence plot on most important features for gbm.
importances = pandas.DataFrame(gbm_grid.best_estimator_.feature_importances_, index = explanatory_df.columns, columns =['importance'])
#notice these are not sorted
#we do not know if this in importance in that you will be in the hall of fame or not, just that it's important
importances.sort(columns = ['importance'], ascending = False, inplace = True)
print importances
from sklearn.ensemble.partial_dependence import plot_partial_dependence
features = [i for i, j in enumerate(explanatory_df.columns.tolist()) if j in importances.importance[0:3].index.tolist()]
#only take features of top 3 importances, iterate through feature names that only match the top 3 importances
#i tells you which index in this list, that particular name occured in; these features exist in indexes 2,5,16 of column names; j is the feature name
#iterating through two objects simultaneously, loop through the index and value, and only pull out the value when the index is in the top 3 feature importances
fig, axs = plot_partial_dependence(gbm_grid.best_estimator_, explanatory_df, features, feature_names = explanatory_df.columns)
#look at a number of certain, where likelihood really rises a lot [this gives you a threshold], if this was not scaled axis
|
import pybullet as p
import pybullet_data
import time
import math
import numpy as np
import time
from scipy.spatial.transform import Rotation as R
import scipy.stats
import typing as T
import collections
from sim_objects import Arm, Bottle
SimResults = collections.namedtuple(
'SimResults', ['is_fallen', 'is_collision',
'bottle_pos', 'bottle_ori', 'joint_pose', 'z_rot_ang'])
StateTuple = collections.namedtuple('StateTuple', ['bottle_pos', 'bottle_ori', 'joints'])
class ActionSpace(object):
"""Action space defined by incremental changes to individual joints.
These include positive and negative offsets and no-change if specified
"""
default_da_rad = 5.0 * math.pi / 180.0 # default 5 degrees offsets
def __init__(self, num_dof, da_rad=default_da_rad, include_no_change=False,
ignore_last_joint=True):
self.num_dof = num_dof
# to ensure each action's new state is treated as new state due to discretization
self.da_rad = da_rad * 1.2
# self.traj_iter_set = [200, 400]
self.traj_iter_set = [200]
self.max_iters = max(self.traj_iter_set)
pos_moves = np.eye(N=num_dof) * self.da_rad
neg_moves = np.eye(N=num_dof) * -self.da_rad
no_change = np.zeros((1, num_dof))
if ignore_last_joint:
pos_moves = pos_moves[:-1, :]
neg_moves = neg_moves[:-1, :]
if include_no_change:
self.actions_mat = np.vstack([
no_change, pos_moves, neg_moves
])
else:
self.actions_mat = np.vstack([
pos_moves, neg_moves
])
self.num_actions = self.actions_mat.shape[0] * len(self.traj_iter_set)
self.action_ids = list(range(self.num_actions))
def get_action(self, id):
assert (isinstance(id, int))
assert (0 <= id < self.num_actions)
dq_i = id % self.actions_mat.shape[0]
num_iters = self.traj_iter_set[int(id / self.actions_mat.shape[0])]
return self.actions_mat[dq_i, :], num_iters
def get_action_time_cost(self, action: T.Tuple):
num_iters = action[1]
return self.max_iters / float(num_iters)
class EnvParams(object):
def __init__(self, bottle_fill, bottle_fric, bottle_fill_prob,
bottle_fric_prob):
self.bottle_fill = bottle_fill
self.bottle_fric = bottle_fric
self.bottle_fill_prob = bottle_fill_prob
self.bottle_fric_prob = bottle_fric_prob
def __repr__(self):
return "fill, fric, pfill, pfric: %.3f, %.3f, %.2f, %.2f" % (
self.bottle_fill, self.bottle_fric, self.bottle_fill_prob, self.bottle_fric_prob)
def __add__(self, other):
return EnvParams(bottle_fill=self.bottle_fill + other.bottle_fill,
bottle_fric=self.bottle_fric + other.bottle_fric,
bottle_fill_prob=self.bottle_fill_prob + other.bottle_fill_prob,
bottle_fric_prob=self.bottle_fric_prob + other.bottle_fric_prob)
def __radd__(self, other):
if isinstance(other, int):
return self
return self + other
def __truediv__(self, other):
return EnvParams(bottle_fill=self.bottle_fill / other,
bottle_fric=self.bottle_fric / other,
bottle_fill_prob=self.bottle_fill_prob / other,
bottle_fric_prob=self.bottle_fric_prob / other)
class Environment(object):
# pybullet_data built-in models
plane_urdf_filepath = "plane.urdf"
arm_filepath = "kuka_iiwa/model.urdf"
table_filepath = "table/table.urdf"
gripper_path = "kuka_iiwa/kuka_with_gripper.sdf"
INF = 1e10
SIM_AVG = 0
SIM_MOST_COMMON = 1
GRAVITY = -9.81
def __init__(self, arm: Arm, bottle: Bottle, is_viz):
# store arm and objects
self.arm = arm
self.bottle = bottle
# simulation visualization params
self.is_viz = is_viz
self.trail_dur = 1 # length of visulizing arm trajectory
self.SIM_VIZ_FREQ = 1 / 240.
self.goal_line_id = None
self.target_line_id = None
# simulation run params
# if no object moves more than this thresh, terminate sim early
self.max_iters = 600 # max number of iters in case objects oscillating
# Default distributions for sampling bottle parameters
self.fric_distrib = None
self.fillp_distrib = None
self.set_distribs()
# tolerance for terminating simulation
self.min_ang_rot = 0.8 # deg / SIM_VIZ_FREQ
self.fall_ang_thresh = 30 * math.pi / 180.0
self.no_movement_thresh = 3e-3
def set_distribs(self, min_fric=None, max_fric=None, min_fill=None, max_fill=None):
if min_fric is None:
min_fric = self.bottle.min_fric
if max_fric is None:
max_fric = self.bottle.max_fric
if min_fill is None:
min_fill = self.bottle.min_fill
if max_fill is None:
max_fill = self.bottle.max_fill
mean_fric = (min_fric + max_fric) / 2.
std_fric = (max_fric - mean_fric) / 2.5 # want min and max to be at 2.5 std deviations
# NOTE: DO NOT USE KWARGS for scipy norm, use ARGS
# since scipy uses "loc" for mean and "scale" for stddev, avoid passing
# in wrong kwargs and having them ignored
self.fric_distrib = scipy.stats.norm(mean_fric, std_fric)
mean_fillp = (min_fill + max_fill) / 2.
# std_fillp = (max_fill - mean_fillp) / 3.
self.fillp_distrib = scipy.stats.uniform(loc=min_fill, scale=max_fill - min_fill)
print("Mean Fill: %.3f" % mean_fillp)
print("Mean Fric: %.3f, Std: %.3f" % (mean_fric, std_fric))
def change_bottle_pos(self, new_pos):
self.bottle.start_pos = new_pos
def run_multiple_sims(self, sim_params_set: T.List[EnvParams], action,
state: StateTuple):
"""
Simply run multiple simulations with different bottle parameters.
Return a list of all results, each entry as a tuple. Let the planner do
post-processing of these results.
"""
all_results = []
for sim_params in sim_params_set:
results = self.run_sim(action=action,
sim_params=sim_params,
state=state)
all_results.append(results)
# extra optimization: if arm didn't touch bottle, no need for more
# iterations since different bottle friction/mass won't change outcome
if not results.is_collision:
break
return all_results
def run_sim(self, sim_params: EnvParams, action: T.Tuple,
state: StateTuple) -> SimResults:
"""
High-level interface with simulator: run simulation given some current state composed of
bottle pose and arm joint poise. Specify some action to take. Generates a joint-space trajectory
for lower-level simulation function to execute.
"""
if state.joints is None: # use arm's current joint state
state.joints = self.arm.joint_pose
dq, num_iters = action
target_joint_pose = state.joints + dq
joint_traj = np.linspace(state.joints, target_joint_pose, num=num_iters)
bottle_pos = state.bottle_pos
bottle_ori = state.bottle_ori
results = self.simulate_plan(joint_traj=joint_traj,
start_bottle_pos=bottle_pos, start_bottle_ori=bottle_ori,
sim_params=sim_params)
return results
def reset(self):
if self.is_viz: return
p.resetSimulation()
p.setGravity(0, 0, self.GRAVITY)
p.loadURDF(self.plane_urdf_filepath, basePosition=[0, 0, 0])
self.arm.kukaId = p.loadURDF(self.arm_filepath, basePosition=[0, 0, 0])
def command_new_pose(self, joint_pose):
for ji, jval in enumerate(joint_pose):
p.setJointMotorControl2(bodyIndex=self.arm.kukaId,
jointIndex=ji,
controlMode=p.POSITION_CONTROL,
targetPosition=jval,
force=self.arm.force,
positionGain=self.arm.position_gain)
def simulate_plan(self, joint_traj, start_bottle_pos, start_bottle_ori, sim_params: EnvParams) -> SimResults:
"""Run simulation with given joint-space trajectory. Does not reset arm
joint angles after simulation is done, so that value can be guaranteed to be untouched.
Arguments:
joint_traj {[type]} -- N x num_dof trajectory of joints
Returns:
[type] -- [description]
"""
self.reset()
self.arm.reset(joint_traj[0, :])
init_arm_pos = np.array(p.getLinkState(
self.arm.kukaId, self.arm.EE_idx)[4])
prev_arm_pos = np.copy(init_arm_pos)
# create new bottle object with parameters set beforehand
self.bottle.set_fill_proportion(sim_params.bottle_fill)
self.bottle.set_fric(sim_params.bottle_fric)
if start_bottle_pos is not None:
self.bottle.create_sim_bottle(start_bottle_pos, ori=start_bottle_ori)
prev_bottle_pos = start_bottle_pos
prev_bottle_ori = start_bottle_ori
else:
self.bottle.create_sim_bottle(ori=start_bottle_ori)
prev_bottle_pos = self.bottle.start_pos
prev_bottle_ori = self.bottle.start_ori
bottle_stopped = False
is_collision = False
iter = 0
traj_len = joint_traj.shape[0]
while iter < traj_len:
# set target joint pose
next_joint_pose = joint_traj[min(iter, traj_len - 1), :]
self.command_new_pose(next_joint_pose)
# run one sim iter
p.stepSimulation()
contacts = p.getContactPoints(
self.arm.kukaId, self.bottle.bottle_id)
is_collision |= (len(contacts) > 0)
# get feedback and vizualize trajectories
if self.is_viz and prev_arm_pos is not None:
time.sleep(0.002)
ls = p.getLinkState(self.arm.kukaId, self.arm.EE_idx)
arm_pos = ls[4]
# Uncomment below to visualize lines of target and actual trajectory
# also slows down simulation, so only run if trying to visualize
# p.addUserDebugLine(prev_target, next_target, [0, 0, 0.3], 1, 1)
# p.addUserDebugLine(arm_pos, prev_arm_pos, [1, 0, 0], 1,
# self.trail_dur)
prev_arm_pos = arm_pos
# check status of other objects to possibly terminate sim early
# self.bottle.update_pose()
# bottle_vert_stopped = math.isclose(
# self.bottle.pos[2] - prev_bottle_pos[2],
# 0.0, abs_tol=1e-05)
# bottle_horiz_stopped = math.isclose(
# np.linalg.norm(
# np.array(self.bottle.pos)[:2] - np.array(prev_bottle_pos)[:2]),
# 0.0, abs_tol=1e-05)
# angle_diff = abs(self.bottle.calc_vert_angle(ori=prev_bottle_ori) -
# self.bottle.calc_vert_angle()) * 180 / math.pi
# bottle_angle_stopped = angle_diff <= self.min_ang_rot
# bottle_stopped = bottle_vert_stopped and bottle_horiz_stopped # and bottle_angle_stopped
# prev_bottle_pos = self.bottle.pos
# prev_bottle_ori = self.bottle.ori
iter += 1
# generate cost and final position
self.arm.update_joint_pose()
self.bottle.update_pose()
is_fallen, z_rot_ang = self.check_bottle_fallen(ori=self.bottle.ori)
no_movement = np.linalg.norm(start_bottle_pos - self.bottle.pos) < self.no_movement_thresh
# remove bottle object, can't just reset pos since need to change params each iter
p.removeBody(self.bottle.bottle_id)
# , executed_traj
return SimResults(is_fallen=is_fallen, is_collision=is_collision and not no_movement,
bottle_pos=self.bottle.pos, bottle_ori=self.bottle.ori,
joint_pose=self.arm.joint_pose, z_rot_ang=z_rot_ang)
def check_bottle_fallen(self, ori):
angle = self.bottle.calc_vert_angle(ori)
return abs(angle) > self.fall_ang_thresh, angle
def gen_random_env_param_set(self, num=1):
rand_fills, rand_fill_probs = self.get_random_sample_prob(
distrib=self.fillp_distrib, minv=self.bottle.min_fill, maxv=self.bottle.max_fill, num=num)
rand_frics, rand_fric_probs = self.get_random_sample_prob(
distrib=self.fric_distrib, minv=self.bottle.min_fric, maxv=self.bottle.max_fric, num=num)
param_set = []
for i in range(num):
param = EnvParams(bottle_fill=rand_fills[i],
bottle_fric=rand_frics[i],
bottle_fill_prob=rand_fill_probs[i],
bottle_fric_prob=rand_fric_probs[i])
param_set.append(param)
return param_set
@staticmethod
def get_random_sample_prob(distrib, minv, maxv, num=1):
"""get N random samples and their "probability"
Args:
distrib (scipy.stats.distributions.rv_frozen object): [description]
min ([type]): [description]
max ([type]): [description]
num (int, optional): [description]. Defaults to 1.
Returns:
[type]: [description]
"""
rand_vars = distrib.rvs(size=num)
rand_vars = np.clip(rand_vars, minv, maxv)
probs = []
for v in rand_vars:
if v < distrib.mean():
p = distrib.cdf(v)
else:
p = 1 - distrib.cdf(v)
probs.append(p)
return rand_vars, probs
@staticmethod
def draw_line(lineFrom, lineTo, lineColorRGB, lineWidth, lifeTime,
replaceItemUniqueId=None):
if replaceItemUniqueId is not None:
return p.addUserDebugLine(lineFrom, lineTo, lineColorRGB,
lineWidth, lifeTime, replaceItemUniqueId=replaceItemUniqueId)
else:
return p.addUserDebugLine(lineFrom, lineTo, lineColorRGB,
lineWidth, lifeTime)
@staticmethod
def avg_quaternion(quaternions):
"""Finds average of quaternions from this post. Doesn't seem to work
too well though.
https://www.mathworks.com/matlabcentral/fileexchange/40098-tolgabirdal-averaging_quaternions
"""
A = np.zeros((4, 4))
assert (quaternions.shape[0] == 4) # 4 x N
num_quats = quaternions.shape[1]
for i in range(num_quats):
q = quaternions[:, i].reshape((4, 1))
A += (q @ q.T)
A /= float(num_quats)
# can't do eigenvalue decomposition since not square matrix
U, s, VT = np.linalg.svd(A)
# eigenvector corresponding to largest eigenvalue is avg quat
# last eigenvector corresponds to largest eigenvalue
avg_quat = VT[0, :]
return avg_quat # / np.linalg.norm(avg_quat)
@staticmethod
def state_to_str(state):
s = ", ".join(["%.3f" % val for val in state])
return s
def test_environment_avg_quat():
r = R.from_euler('zyx', [
[90, 0, 70],
[45, 20, 0]], degrees=True)
quaternions = r.as_quat().T
avg_quat = Environment.avg_quaternion(quaternions)
avg_angles = R.from_quat(avg_quat).as_euler('zyx', degrees=True)
print(avg_angles)
|
import math
import matplotlib.pyplot as plt
import numpy as np
def makeODE(delta):
ode = lambda sigma, theta: delta * math.exp(theta) - theta
return ode
def makeInverseODE(delta):
ode = lambda theta, sigma: 1/(delta * math.exp(theta) - theta)
return ode
def rk4(f, initX, initY, h, stop, tolerance = False):
x = initX
y = initY
prevY = float("-inf")
computeRecord = []
if tolerance:
condition = lambda currX, currY, lastY: abs(y - lastY) > stop
else:
condition = lambda currX, currY, lastY: currX < stop
while condition(x, y, prevY):
ks = []
ks.append(h * f(x, y))
for i in range(1, 3):
ks.append(h * f(x + h/2, y + ks[i-1]/2))
ks.append(h * f(x + h, y + ks[2]))
computeRecord.append((x, y, ks))
prevY = y
y = y + (ks[0] + 2*ks[1] + 2*ks[2] + ks[3])/6
x += h
return computeRecord
def secantMethod(f, guess1, guess2, tolerance):
x_n1 = guess1
x_n2 = guess2
computeRecord = [(0, x_n1), (1, x_n2)]
counter = 2
while abs((x_n2 - x_n1)/x_n1) > tolerance:
tmp = x_n2
x_n2 = x_n2 - f(x_n2, 0)/((f(x_n2, 0) - f(x_n1, 0))/(x_n2 - x_n1))
computeRecord.append((counter, x_n2))
counter += 1
x_n1 = tmp
return computeRecord
def printResults(results, amountToDisplay=None):
if amountToDisplay is not None:
beginning = results[:amountToDisplay/2]
ending = results[-1 * amountToDisplay/2:]
printResults(beginning)
print "..."
printResults(ending)
return
for r in results:
if isinstance(r, tuple):
for i in r:
if isinstance(i, list):
for j in i:
print j, '&',
else:
print i, '&',
else:
print r, '&',
print '\\'
def getXCoords(points):
getFirst = lambda x: x[0]
return map(getFirst, points)
def getYCoords(points):
getSec = lambda x: x[1]
return map(getSec, points)
def earlySolution(delta, xs):
earlyFunc = (lambda sig: (delta/(delta - 1)) * math.exp((delta - 1) * sig)
- (delta/(delta - 1)))
return map(earlyFunc, xs)
def earlySolutionInverse(delta, ys):
earlyFunc = (lambda theta: (1/(delta - 1))
* np.log((theta + (delta)/(delta - 1))/(delta/(delta - 1))))
return map(earlyFunc, ys)
if __name__ == '__main__':
# 1a) Solve ODE through RK4
ode1 = makeODE(0.2)
# RK4 with tolerance
points = rk4(ode1, 0, 0, 0.00001, 10)
# points = rk4(ode1, 0, 0, 0.001, 0.000001, True)
# printResults(points, 10)
# 1b) Find theta fizzle with rootfinding
# printResults(secantMethod(ode1, 0.0018, 0.019, 0.001))
# 1c) Graph Results
xs = getXCoords(points)
earlyX = np.arange(0, 3, 0.00001)
fizzleLine = lambda x: [0.259 for _ in range(len(xs))]
eqnPlot, = plt.plot(xs, getYCoords(points), 'k')
earlySoln, = plt.plot(earlyX, earlySolution(0.2, earlyX), 'r-')
fizzL, = plt.plot(xs, fizzleLine(xs), 'b--')
plt.legend([eqnPlot, earlySoln, fizzL],
["Integrated ODE", "Early Solution", r'$\theta_{fizzle}$'],
loc=4)
plt.xlabel(r'$\sigma$')
plt.ylabel(r'$\theta$')
plt.title(r'$\theta$ vs $\sigma$')
plt.show()
|
import random
op = ["+", "-", "*", "/", "p", "%", "!", "s", "r"]
a = input("the first number =\n")
def factorial(a):
a = int(a)
r = a
for i in range(1, a):
r = r * i
return(r)
try:
a = float(a)
except:
while type(a) == str:
a = input("please insert the first number in numbers")
try:
a = float(a)
except:
pass
bb = input("please insert the operation and the second number \n")
b = bb[0]
while b in op:
if b == "!":
r = factorial(a)
print(f"{int(a)}{b} = {r} ")
a = r
elif b == "r":
a = int(a)
r = random.randint(1, a)
print(r)
else:
c = bb[1:]
try:
c = float(c)
except:
while type(c) == str:
c = input("please insert the second number in numbers")
try:
c = float(c)
except:
pass
if b == "+" or b == "plus":
r = a + c
elif b == "-":
r = a - c
elif b == "*":
r = a * c
elif b == "/":
if c == 0:
print(" you can not do it hhhhhhhh")
else:
r = a / c
elif b == "%":
if c == 0:
print(" you can not do it hhhhhhhh")
else:
k = a / c * 100
r = (f"{k} %")
else:
print("you inserted a undefined operation ")
print(f"{a} {b} {c} = {r}")
a = r
bb = input("please insert the operation and the second number \n")
b = bb[0]
else:
print(f"bad operation your final result is = {r}")
|
bills = [500, 200, 100, 50, 20, 10, 5, 2, 1]
def find_adequate_bill(num):
for i in range(9):
if num > bills[i]:
return i
return 8
def pay_with_bills_greedy(num):
change = [0, 0, 0, 0, 0, 0, 0, 0, 0]
while num > 0:
bill = int(find_adequate_bill(num))
change[bill] += 1
num -= bills[bill]
return change
print(pay_with_bills_greedy(523))
|
import math
listofprimes=[0]*26
smalllistofprimes=[0]*26
smalllistofnos=[0]*10
listofalphabets=[None]*62
for i in range(26):
listofalphabets[i] = chr(65+i)
j=0
for i in range(26,52,1):
listofalphabets[i] = chr(97+j)
j=j+1
j=0
for i in range(52,62,1):
listofalphabets[i] = chr(48+j)
j=j+1
z=0
j=1
count = 23
i=6
listofprimes[0]=102
listofprimes[1]=103
listofprimes[2]=105
while(count!=0):
flag = 0
for j in range(1, int(math.sqrt(i))+1, 1):
if i%j ==0:
flag = flag+1
if flag ==1:
if int(len(str(i)))==2 or int(len(str(i)))==1 :
x = i+100
else:
x=i
listofprimes[z+3]=x
z=z+1
count = count-1
i=i+1
listofprimes.reverse()
#Removing the 0s
# while(0 in listofprimes):
# listofprimes.remove(0)
countdown=26
i=0
while(countdown!=0):
smalllistofprimes[i]=listofprimes[i]+100
countdown = countdown-1
i= i+1
for i in range(10):
smalllistofnos[i]=chr(65+i)+chr(65+i)+chr(65+i)
keys=listofprimes+smalllistofprimes+smalllistofnos
sbox=dict(zip(listofalphabets, keys))
print(sbox)
#Sbox created.
PT=[]
RevPT=[]
CT=[]
PT=input("Enter the Plaintext here:")
RevPT=PT[::-1]
for i in RevPT:
for j in sbox.keys():
if i==j:
CT.append(sbox[j])
print("The Cipher Text is:")
print(CT)
#Encryption Completed
rev_ans=[]
sbox_key_list=list(sbox.keys())
sbox_value_list=list(sbox.values())
for i in CT:
for j in sbox.values():
if i==j:
rev_ans.append(sbox_key_list[sbox_value_list.index(j)])
rev_ans.reverse()
PT=[]
PT=rev_ans
print("The Plain Text is:")
print(PT)
|
#!/usr/bin/env python3
import numpy as np
result = []
for n in [16, 32, 48, 64, 96]:
row = [n]
for m in [3, 4, 5]:
name = "%02d_%02d" % (m, n)
for i in range(m*m*m):
tmp = []
with open("test004/%s/log%06d.txt" % (name, i)) as fh:
for line in fh:
if "cputime" in line:
tmp.append( float(line.split(":")[-1]) )
row += [np.average(tmp)]
result += [row]
np.savetxt("test004_result2.txt", result)
|
# from rest_framework.decorators import api_view
# from rest_framework.generics import GenericAPIView, ListAPIView, CreateAPIView, RetrieveAPIView, UpdateAPIView, DestroyAPIView, ListCreateAPIView, RetrieveUpdateAPIView, RetrieveDestroyAPIView , RetrieveUpdateDestroyAPIView
# from rest_framework.mixins import ListModelMixin, CreateModelMixin, RetrieveModelMixin, UpdateModelMixin, DestroyModelMixin
from django.shortcuts import render
from rest_framework.response import Response
from .models import Student
from .serializers import StudentSerializer
from rest_framework import status
from rest_framework import viewsets
# Crud Operation by just two classes in concrete view
# class StudentRUD(RetrieveUpdateDestroyAPIView):
# queryset = Student.objects.all()
# serializer_class = StudentSerializer
# class StudentLC(ListCreateAPIView):
# queryset = Student.objects.all()
# serializer_class = StudentSerializer
# # Concrete view classes (All available Options)
# class StudentList(ListAPIView):
# queryset = Student.objects.all()
# serializer_class = StudentSerializer
# class StudentCreate(CreateAPIView):
# queryset = Student.objects.all()
# serializer_class = StudentSerializer
# class StudentRetrieve(RetrieveAPIView):
# queryset = Student.objects.all()
# serializer_class = StudentSerializer
# class StudentUpdate(UpdateAPIView):
# queryset = Student.objects.all()
# serializer_class = StudentSerializer
# class StudentDestroy(DestroyAPIView):
# queryset = Student.objects.all()
# serializer_class = StudentSerializer
# class StudentLC(ListCreateAPIView):
# queryset = Student.objects.all()
# serializer_class = StudentSerializer
# class StudentRU(RetrieveUpdateAPIView):
# queryset = Student.objects.all()
# serializer_class = StudentSerializer
# class StudentRD(RetrieveDestroyAPIView):
# queryset = Student.objects.all()
# serializer_class = StudentSerializer
# class StudentRUD(RetrieveUpdateDestroyAPIView):
# queryset = Student.objects.all()
# serializer_class = StudentSerializer
# # Generic API view and Model Mixin (In two classes)
# # List and Create (pk not needed)
# class StudentLC(GenericAPIView, ListModelMixin, CreateModelMixin):
# queryset = Student.objects.all()
# serializer_class = StudentSerializer
# def get(self, request, *args, **kwargs):
# return self.list(request, *args, **kwargs)
# def post(self, request, *args, **kwargs):
# return self.create(request, *args, **kwargs)
# # Retrieve, update and destroy in one class (all need pk)
# class StudentRUD(GenericAPIView, RetrieveModelMixin, UpdateModelMixin, DestroyModelMixin):
# queryset = Student.objects.all()
# serializer_class = StudentSerializer
# # Get all data
# def get(self, request, *args, **kwargs):
# return self.retrieve(request, *args, **kwargs)
# # Update data
# def put(self, request, *args, **kwargs):
# return self.update(request, *args, **kwargs)
# # Delete data
# def delete(self, request, *args, **kwargs):
# return self.destroy(request, *args, **kwargs)
# # Generic API view and Model Mixin (all operations One by One)
# # Get all data
# class StudentList(GenericAPIView, ListModelMixin):
# queryset = Student.objects.all()
# serializer_class = StudentSerializer
# def get(self, request, *args, **kwargs):
# return self.list(request, *args, **kwargs)
# # Create data in Database
# class StudentCreate(GenericAPIView, CreateModelMixin):
# queryset = Student.objects.all()
# serializer_class = StudentSerializer
# def post(self, request, *args, **kwargs):
# return self.create(request, *args, **kwargs)
# # Get one data
# class StudentRetrieve(GenericAPIView, RetrieveModelMixin):
# queryset = Student.objects.all()
# serializer_class = StudentSerializer
# def get(self, request, *args, **kwargs):
# return self.retrieve(request, *args, **kwargs)
# # Update data
# class StudentUpdate(GenericAPIView, UpdateModelMixin):
# queryset = Student.objects.all()
# serializer_class = StudentSerializer
# def put(self, request, *args, **kwargs):
# return self.update(request, *args, **kwargs)
# # Delete data
# class StudentDestroy(GenericAPIView, DestroyModelMixin):
# queryset = Student.objects.all()
# serializer_class = StudentSerializer
# def delete(self, request, *args, **kwargs):
# return self.destroy(request, *args, **kwargs)
# # Testing by browseable api
# @api_view(['GET','POST','PUT','PATCH','DELETE'])
# def student_api(request,pk=None):
# if request.method == 'GET':
# id = pk
# if id is not None:
# stu = Student.objects.get(id=id)
# serializer = StudentSerializer(stu)
# return Response(serializer.data)
# stu = Student.objects.all()
# serializer = StudentSerializer(stu, many=True)
# return Response(serializer.data)
# if request.method == 'POST':
# serializer = StudentSerializer(data = request.data)
# if serializer.is_valid():
# serializer.save()
# return Response({'msg':'Data created'}, status=status.HTTP_201_CREATED)
# return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
# if request.method == 'PUT':
# id=pk
# stu = Student.objects.get(pk=id)
# serializer = StudentSerializer(stu, data=request.data)
# if serializer.is_valid():
# serializer.save()
# return Response({'msg':'Data updated'})
# return Response(serializer.errors)
# if request.method == 'PATCH':
# id=pk
# stu = Student.objects.get(pk=id)
# serializer = StudentSerializer(stu, data=request.data, partial=True)
# if serializer.is_valid():
# serializer.save()
# return Response({'msg':'Partial Data updated'})
# return Response(serializer.errors)
# if request.method == 'DELETE':
# id = pk
# stu = Student.objects.get(pk=id)
# stu.delete()
# return Response({'msg':'Data Deleted'})
# if we use a 3rd party application for testing thaan we use this function
# @api_view(['GET','POST','PUT','DELETE'])
# def student_api(request):
# if request.method == 'GET':
# id = request.data.get('id')
# if id is not None:
# stu = Student.objects.get(id=id)
# serializer = StudentSerializer(stu)
# return Response(serializer.data)
# stu = Student.objects.all()
# serializer = StudentSerializer(stu, many=True)
# return Response(serializer.data)
# if request.method == 'POST':
# serializer = StudentSerializer(data = request.data)
# if serializer.is_valid():
# serializer.save()
# return Response({'msg':'Data created'})
# return Response(serializer.errors)
# if request.method == 'PUT':
# id = request.data.get('id')
# stu = Student.objects.get(pk=id)
# serializer = StudentSerializer(stu, data=request.data, partial=True)
# if serializer.is_valid():
# serializer.save()
# return Response({'msg':'Data updated'})
# return Response(serializer.errors)
# if request.method == 'DELETE':
# id = request.data.get('id')
# stu = Student.objects.get(pk=id)
# stu.delete()
# return Response({'msg':'Data Deleted'})
|
# Generated by Django 3.0.7 on 2020-07-09 12:55
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('app', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Pet',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nome', models.CharField(max_length=50)),
('nascimento', models.DateField()),
('categoria', models.CharField(choices=[('Ca', 'Cachorro'), ('Ga', 'Gato'), ('Co', 'Coelho')], max_length=2)),
('cor', models.CharField(choices=[('Pr', 'Preto'), ('Br', 'Branco'), ('Ci', 'Cinza'), ('Ma', 'Marrom')], max_length=2)),
('dono', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app.Cliente')),
],
),
]
|
vowels = ['a', 'e', 'i', 'o', 'u']
word = "iliketotravel"
vowel_dict = dict()
for letter in word:
if letter in vowels:
# if letter not in vowel_dict:
# vowel_dict[letter] = 1
# else:
# vowel_dict[letter] += 1
vowel_dict.setdefault(letter,0)
vowel_dict[letter] += 1
for k, v in sorted(vowel_dict.items()):
print(k, "was found", v, "times")
# print(vowel_dict)
fruits = dict()
#----- USE of setdefault(key,value)
# if 'banana' not in fruits:
# fruits['banana'] = 1
fruits.setdefault('pears', 0)
fruits['pears'] += 1
print(fruits)
|
import requests,execjs,re
headers ={
"Accept":"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3",
"Accept-Encoding":"gzip, deflate, br",
"Accept-Language":"zh-CN,zh;q=0.9",
"Cache-Control":"no-cache",
"Connection":"keep-alive",
"Host":"www.guazi.com",
"Pragma":"no-cache",
"Referer":"https://www.guazi.com/wh/honda/",
"Upgrade-Insecure-Requests":"1",
"User-Agent":"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36",
}
res=requests.get("https://www.guazi.com/wh/dazhong/o9/#bread",headers=headers)
# print(res.text)
value= re.findall(r"value=anti\((.*?)\);var name='",res.text)[0]
print("value:",value)
name='antipas'
url=''
def get_antipas(value):
with open("2.js") as f:
js_code = f.read()
antipas = execjs.compile(js_code).call("anti",value)
print("antipas:",antipas)
return "antipas="+antipas
# get_antipas(value)
headers ={
"Accept":"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3",
"Accept-Encoding":"gzip, deflate, br",
"Accept-Language":"zh-CN,zh;q=0.9",
"Cache-Control":"no-cache",
"Connection":"keep-alive",
# "Cookie":get_antipas(value),
# "Cookie":"antipas=82vA8xl42052710W343Ia0;",
"Host":"www.guazi.com",
"Pragma":"no-cache",
"Referer":"https://www.guazi.com/wh/honda/",
"Upgrade-Insecure-Requests":"1",
"User-Agent":"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36",
}
headers1={
"Accept":"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3",
"Accept-Encoding":"gzip, deflate, br",
"Accept-Language":"zh-CN,zh;q=0.9",
"Connection":"keep-alive",
"Cookie":get_antipas(value),
"Host":"www.guazi.com",
"Referer":"https://www.guazi.com/wh/buy/",
"Sec-Fetch-Mode":"navigate",
"Sec-Fetch-Site":"same-origin",
"Sec-Fetch-User":"?1",
"Upgrade-Insecure-Requests":"1",
"User-Agent":"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36",
}
print(headers1)
res=requests.get("https://www.guazi.com/wh/dazhong/o1/#bread",headers=headers)
print(res.text)
|
from sqlalchemy import create_engine
from sqlalchemy.ext.automap import automap_base
import datetime
import time
from datetime import timedelta
engine = create_engine('sqlite:///database.db', echo=True)
# engine = create_engine('postgresql://postgres:1sebaQuinta@localhost:5432/Gym', echo=True)
Base = automap_base()
Base.prepare(engine, reflect=True)
User = Base.classes.users
Prenotation = Base.classes.prenotations
Shift = Base.classes.shifts
# ________________________________________ TO STRING ________________________________________
# Return the given input as a string
def to_string(user=None, shift=None):
if user is not None:
return user.fullname + "(" + user.email + ")\n"
elif shift is not None:
return "Day: " + shift.date.strftime("%Y-%m-%d") +\
" (" + shift.h_start.strftime("%H:%M") +\
" --> " + shift.h_end.strftime("%H:%M") + ")\n"
# ________________________________________ USER ________________________________________
# - Given the email, returns the User who has got that email if exixsts
# - Given the id, returns the User who has got that id if exists
# - Given a prenotation, returns the User who did the prenotation
# Otherwise return None
def get_user(session, id=None, email=None, prenotation=None):
if id is not None:
return session.query(User).filter(User.id == id).one_or_none()
elif email is not None:
return session.query(User).filter(User.email == email).one_or_none()
elif prenotation is not None:
return session.query(User).filter(User.id == prenotation.client_id).one_or_none()
else:
return None
# Returns all user emails
def get_all_emails(session):
return session.query(User.email).all()
# - Given a User adds it to the database
# - Given fullname, email and password of a User adds it to the database
# Returns True if it was added correctly, False if the element was already contained
def add_user(session, user=None, fullname=None, email=None, pwd=None):
if user is not None:
exist = get_user(session, id=user.id)
if exist is not None:
return False
else:
session.add(user)
return True
elif fullname is not None and\
email is not None and\
pwd is not None:
exist = get_user(session, id=id)
if exist is not None:
return False
else:
session.add(User(fullname=fullname, email=email, pwd=pwd))
return True
else:
return False
# Adds all Users from the list given to the Database
# Returns True if all elements were added, False if at least one was already contained
def add_user_from_list(session, user_list):
b = True
for user in user_list:
b &= add_user(session, user=user)
return b
# ________________________________________ SHIFT ________________________________________
# - Given a date returns all Shifts in that day
# - Given a date and a starting hour return the corresponding Shift if exists
# - Given a prenotation returns the corresponding Shift
# Otherwise return None
def get_shift(session, date=None, start=None, prenotation=None):
if date is not None:
if start is None:
return session.query(Shift).filter(Shift.date == date).all()
else:
return session.query(Shift).filter(Shift.date == date, Shift.h_start == start).one_or_none()
elif prenotation is not None:
return session.query(Shift).filter(Shift.id == prenotation.shift_id).one_or_none()
else:
return None
# Returns the list of all shifts for a date given the date, the starting and ending hour, the shift_lenght ad the capacity
def get_daily_shifts(date, hour_start, hour_end, shift_lenght, capacity):
l = []
start = timedelta(hours=hour_start.hour, minutes=hour_start.minute)
lenght = timedelta(hours=shift_lenght.hour, minutes=shift_lenght.minute)
hour_end_ = timedelta(hours=hour_end.hour, minutes=hour_end.minute)
end = start + lenght
while(end <= hour_end_):
l.append(Shift(
date=date,
h_start= datetime.time(hour=start.seconds//3600, minute=(start.seconds//60)%60),
h_end= datetime.time(hour=end.seconds//3600, minute=(end.seconds//60)%60),
capacity=capacity
))
start = end
end = start + lenght
return l
# Returns all user-id who has prenoted for the shift given
def get_usersId_prenoted(session, Shift):
return session.query(User.id).join(Prenotation).filter(Prenotation.shift_id == Shift.id)
# Returns the number of prenotations for the given Shift
def get_prenoted_count(session, shift):
return get_usersId_prenoted(session, Shift).count()
# - Given a Shift adds it to the database
# - Given a date, starting and ending hour and capacity of a Shift adds it to the database
# Returns True if it was added correctly, False if the element was already contained
def add_shift(session, shift=None, date=None, start=None, end=None, capacity=None):
if shift is not None:
exist = get_shift(session, date=shift.date, start=shift.h_start)
if exist is not None:
return False
else:
session.add(shift)
return True
elif date is not None and\
start is not None and\
end is not None and\
capacity is not None:
exist = get_shift(session, date=date, start=start)
if exist is not None:
return False
else:
session.add(Shift(date=date, h_start=start, h_end=end, capacity=capacity))
return True
else:
return False
# Adds all Shifts from the list given to the Database
# Returns True if all elements were added, False if at least one was already contained
def add_shift_from_list(session, shift_list):
b = True
for shift in shift_list:
b &= add_shift(session, shift=shift)
return b
# ________________________________________ PRENOTATION ________________________________________
# - Given a User and a Shift returns the correponding Prenotation if exists
# - Given a User returns all his prenotations
# - Given a Shift returns all prenotations for that Shift
# - Given a date returns all prenotations for that day
# Returns None otherwise
def get_prenotation(session, user=None, shift=None, date=None):
if user is not None and shift is not None:
return session.query(Prenotation).filter(Prenotation.client_id == user.id, Prenotation.shift_id == shift.id).one_or_none()
elif user is not None:
return session.query(Prenotation).filter(Prenotation.client_id == user.id).all()
elif shift is not None:
return session.query(Prenotation.client_id).filter(Prenotation.shift_id == shift.id).all()
elif date is not None:
return session.query(Prenotation).join(Shift).filter(Shift.date == date).all()
else:
return None
# Adds a Prenotation to the Database given the User and the Shift or the Prenotion
# Returns True if it was added correctly,
# False if the element was already contained
# or the maximum capacity has already been reached for that shift
# or the User was already in that turn
def add_prenotation(session, user=None, shift=None, prenotation=None):
if user is not None and shift is not None:
exist = get_prenotation(session, user=user, shift=shift)
if exist is not None:
return False
else:
nprenoted = get_prenoted_count(session, shift=shift)
if(nprenoted < shift.capacity):
prenoted = get_usersId_prenoted(session, Shift)
if user.id not in prenoted:
session.add(Prenotation(client_id=user.id, shift_id=shift.id))
else:
return False
else:
return False
elif prenotation is not None:
user_ = get_user(session, prenotation=prenotation)
shift_ = get_shift(session, prenotation=prenotation)
exist = get_prenotation(session, user=user_, shift=shift_)
if exist is not None:
return False
else:
nprenoted = get_prenoted_count(session, shift=shift_)
if(nprenoted < shift_.capacity):
prenoted = get_usersId_prenoted(session, shift_)
if user.id not in prenoted:
session.add(Prenotation(client_id=user_.id, shift_id=shift_.id))
else:
return False
else:
return False
else:
return False
# Adds all Prenotation from the list given to the Database
# Returns True if all elements were added,
# False if at least one was already contained or the maximum capacity has already been reached for that shift
def add_prenotation_from_list(session, prenotation_list):
b = True
for prenotation in prenotation_list:
b &= add_prenotation(session, prenotation=prenotation)
return b
|
# Functions for calibration of results
from __future__ import division, print_function
import sklearn.metrics as metrics
import numpy as np
import pickle
import keras
from sklearn.preprocessing import OneHotEncoder
from sklearn.metrics import log_loss
import sklearn.metrics as metrics
from scipy.stats import percentileofscore
from sklearn.preprocessing import label_binarize
def evaluate_model(model, weights_file, x_test, y_test, bins = 15, verbose = True, pickle_file = None, x_val = None, y_val = None):
"""
Evaluates the model, in addition calculates the calibration errors and
saves the logits for later use, if "pickle_file" is not None.
Parameters:
model (keras.model): constructed model
weights (string): path to weights file
x_test: (numpy.ndarray) with test data
y_test: (numpy.ndarray) with test data labels
verbose: (boolean) print out results or just return these
pickle_file: (string) path to pickle probabilities given by model
x_val: (numpy.ndarray) with validation data
y_val: (numpy.ndarray) with validation data labels
Returns:
(acc, ece, mce): accuracy of model, ECE and MCE (calibration errors)
"""
# Change last activation to linear (instead of softmax)
last_layer = model.layers.pop()
last_layer.activation = keras.activations.linear
i = model.input
o = last_layer(model.layers[-1].output)
model = keras.models.Model(inputs=i, outputs=[o])
# First load in the weights
model.load_weights(weights_file)
model.compile(optimizer="sgd", loss="categorical_crossentropy")
# Next get predictions
logits = model.predict(x_test, verbose=1)
probs = softmax(logits)
preds = np.argmax(probs, axis=1)
# Find accuracy and error
if y_test.shape[1] > 1: # If 1-hot representation, get back to numeric
y_test = np.array([[np.where(r==1)[0][0]] for r in y_test]) # Back to np array also
accuracy = metrics.accuracy_score(y_test, preds) * 100
error = 100 - accuracy
# Confidence of prediction
ece = ECE(probs, y_test, bin_size = 1/bins)
ece_cw = classwise_ECE(probs, y_test, bins = bins, power = 1)
if verbose:
print("Accuracy:", accuracy)
print("Error:", error)
print("ECE:", ece)
print("MCE:", ece_cw)
# Pickle probabilities for test and validation
if pickle_file:
#Get predictions also for x_val
logits_val = model.predict(x_val)
probs_val = softmax(logits_val)
preds_val = np.argmax(probs_val, axis=1)
#
if y_val.shape[1] > 1: # If 1-hot representation, get back to numeric
y_val = np.array([[np.where(r==1)[0][0]] for r in y_val]) # Also convert back to np.array, TODO argmax?
if verbose:
print("Pickling the probabilities for validation and test.")
print("Validation accuracy: ", metrics.accuracy_score(y_val, preds_val) * 100)
# Write file with pickled data
with open(pickle_file + '.p', 'wb') as f:
pickle.dump([(logits_val, y_val),(logits, y_test)], f)
# Return the basic results
return (accuracy, ece, ece_cw)
def evaluate(probs, y_true, verbose = False, normalize = True, bins = 15):
"""
Evaluate model using various scoring measures: Error Rate, ECE, ece2, ece_cw, ece_cw2, ece_full, ece_full2, mce, mce2, NLL, Brier Score
Params:
probs: a list containing probabilities for all the classes with a shape of (samples, classes)
y_true: a list containing the actual class labels
verbose: (bool) are the scores printed out. (default = False)
normalize: (bool) in case of 1-vs-K calibration, the probabilities need to be normalized.
bins: (int) - into how many bins are probabilities divided (default = 15)
Returns:
(error, ece, mce, loss, brier), returns various scoring measures
"""
preds = np.argmax(probs, axis=1) # Take maximum confidence as prediction
accuracy = metrics.accuracy_score(y_true, preds) * 100
error = 100 - accuracy
# Calculate ECE and ECE2, + Classwise and Full (ECE2 =? Full_ECE)
ece = ECE(probs, y_true, bin_size = 1/bins)
ece2 = ECE(probs, y_true, bin_size = 1/bins, ece_full=True, normalize = normalize)
ece_cw = classwise_ECE(probs, y_true, bins = bins, power = 1)
ece_full = full_ECE(probs, y_true, bins = bins, power = 1)
ece_cw2 = classwise_ECE(probs, y_true, bins = bins, power = 2)
ece_full2 = full_ECE(probs, y_true, bins = bins, power = 2)
# Calculate MCE
mce = MCE(probs, y_true, bin_size = 1/bins, normalize = normalize)
mce2 = MCE(probs, y_true, bin_size = 1/bins, ece_full=True, normalize = normalize)
loss = log_loss(y_true=y_true, y_pred=probs)
#y_prob_true = np.array([probs[i, idx] for i, idx in enumerate(y_true)]) # Probability of positive class
#brier = brier_score_loss(y_true=y_true, y_prob=y_prob_true) # Brier Score (MSE), NB! not correct
brier = Brier(probs, y_true)
if verbose:
print("Accuracy:", accuracy)
print("Error:", error)
print("ECE:", ece)
print("ECE2:", ece2)
print("ECE_CW", ece_cw)
print("ECE_CW2", ece_cw)
print("ECE_FULL", ece_full)
print("ECE_FULL2", ece_full2)
print("MCE:", mce)
print("MCE2:", mce2)
print("Loss:", loss)
print("brier:", brier)
return (error, ece, ece2, ece_cw, ece_cw2, ece_full, ece_full2, mce, mce2, loss, brier)
def evaluate_rip(probs, y_true, verbose = False, normalize = True, bins = 15):
"""
Evaluate model using various scoring measures: Error Rate, ECE, ece2, ece_cw, ece_cw2, ece_full, ece_full2, mce, mce2, NLL, Brier Score
Params:
probs: a list containing probabilities for all the classes with a shape of (samples, classes)
y_true: a list containing the actual class labels
verbose: (bool) are the scores printed out. (default = False)
normalize: (bool) in case of 1-vs-K calibration, the probabilities need to be normalized.
bins: (int) - into how many bins are probabilities divided (default = 15)
Returns:
(error, ece, mce, loss, brier), returns various scoring measures
"""
preds = np.argmax(probs, axis=1) # Take maximum confidence as prediction
accuracy = metrics.accuracy_score(y_true, preds) * 100
error = 100 - accuracy
# Calculate ECE and ECE2, + Classwise and Full (ECE2 =? Full_ECE)
ece = ECE(probs, y_true, bin_size = 1/bins)
ece2 = -1
ece_cw = classwise_ECE(probs, y_true, bins = bins, power = 1)
ece_full = -1
ece_cw2 = -1
ece_full2 = -1
# Calculate MCE
mce = MCE(probs, y_true, bin_size = 1/bins, normalize = normalize)
mce2 = -1
loss = log_loss(y_true=y_true, y_pred=probs)
#y_prob_true = np.array([probs[i, idx] for i, idx in enumerate(y_true)]) # Probability of positive class
#brier = brier_score_loss(y_true=y_true, y_prob=y_prob_true) # Brier Score (MSE), NB! not correct
brier = Brier(probs, y_true)
if verbose:
print("Accuracy:", accuracy)
print("Error:", error)
print("ECE:", ece)
print("ECE2:", ece2)
print("ECE_CW", ece_cw)
print("ECE_CW2", ece_cw)
print("ECE_FULL", ece_full)
print("ECE_FULL2", ece_full2)
print("MCE:", mce)
print("MCE2:", mce2)
print("Loss:", loss)
print("brier:", brier)
return (error, ece, ece2, ece_cw, ece_cw2, ece_full, ece_full2, mce, mce2, loss, brier)
def evaluate_slim(probs, y_true, verbose = False, normalize = True, bins = 15):
"""
Evaluate model using various scoring measures: Error Rate, ECE, ece2, ece_cw, ece_cw2, ece_full, ece_full2, mce, mce2, NLL, Brier Score
Params:
probs: a list containing probabilities for all the classes with a shape of (samples, classes)
y_true: a list containing the actual class labels
verbose: (bool) are the scores printed out. (default = False)
normalize: (bool) in case of 1-vs-K calibration, the probabilities need to be normalized.
bins: (int) - into how many bins are probabilities divided (default = 15)
Returns:
(error, ece, mce, loss, brier), returns various scoring measures
"""
preds = np.argmax(probs, axis=1) # Take maximum confidence as prediction
accuracy = metrics.accuracy_score(y_true, preds) * 100
error = 100 - accuracy
# Calculate ECE and ECE2, + Classwise and Full (ECE2 =? Full_ECE)
ece = ECE(probs, y_true, bin_size = 1/bins)
ece_cw = classwise_ECE(probs, y_true, bins = bins, power = 1)
# Calculate MCE
mce = MCE(probs, y_true, bin_size = 1/bins, normalize = normalize)
loss = log_loss(y_true=y_true, y_pred=probs)
#y_prob_true = np.array([probs[i, idx] for i, idx in enumerate(y_true)]) # Probability of positive class
#brier = brier_score_loss(y_true=y_true, y_prob=y_prob_true) # Brier Score (MSE), NB! not correct
brier = Brier(probs, y_true)
if verbose:
print("Accuracy:", accuracy)
print("Error:", error)
print("ECE:", ece)
print("ECE_CW", ece_cw)
print("MCE:", mce)
print("Loss:", loss)
print("brier:", brier)
return (error, ece, ece_cw, mce, loss, brier)
def softmax(x):
"""
Compute softmax values for each sets of scores in x.
Parameters:
x (numpy.ndarray): array containing m samples with n-dimensions (m,n)
Returns:
x_softmax (numpy.ndarray) softmaxed values for initial (m,n) array
"""
e_x = np.exp(x - np.max(x)) # Subtract max, so the biggest is 0 to avoid numerical instability
# Axis 0 if only one dimensional array
axis = 0 if len(e_x.shape) == 1 else 1
return e_x / e_x.sum(axis=axis, keepdims=1)
def get_preds_all(y_probs, y_true, axis = 1, normalize = False, flatten = True):
y_preds = np.argmax(y_probs, axis=axis) # Take maximum confidence as prediction
y_preds = y_preds.reshape(-1, 1)
if normalize:
y_probs /= np.sum(y_probs, axis=axis).reshape(-1,1)
enc = OneHotEncoder(handle_unknown='ignore', sparse=False)
enc.fit(y_preds)
y_preds = enc.transform(y_preds)
y_true = enc.transform(y_true)
if flatten:
y_preds = y_preds.flatten()
y_true = y_true.flatten()
y_probs = y_probs.flatten()
return y_preds, y_probs, y_true
def compute_acc_bin_legacy(conf_thresh_lower, conf_thresh_upper, conf, pred, true):
"""
# Computes accuracy and average confidence for bin
Args:
conf_thresh_lower (float): Lower Threshold of confidence interval
conf_thresh_upper (float): Upper Threshold of confidence interval
conf (numpy.ndarray): list of confidences
pred (numpy.ndarray): list of predictions
true (numpy.ndarray): list of true labels
Returns:
(accuracy, avg_conf, len_bin): accuracy of bin, confidence of bin and number of elements in bin.
"""
filtered_tuples = [x for x in zip(pred, true, conf) if x[2] > conf_thresh_lower and x[2] <= conf_thresh_upper]
if len(filtered_tuples) < 1:
return 0,0,0
else:
correct = len([x for x in filtered_tuples if x[0] == x[1]]) # How many correct labels
len_bin = len(filtered_tuples) # How many elements falls into given bin
avg_conf = sum([x[2] for x in filtered_tuples]) / len_bin # Avg confidence of BIN
accuracy = float(correct)/len_bin # accuracy of BIN
return accuracy, avg_conf, len_bin
def compute_acc_bin(conf_thresh_lower, conf_thresh_upper, conf, pred, true, ece_full = False):
"""
# Computes accuracy and average confidence for bin
Args:
conf_thresh_lower (float): Lower Threshold of confidence interval
conf_thresh_upper (float): Upper Threshold of confidence interval
conf (numpy.ndarray): list of confidences
pred (numpy.ndarray): list of predictions
true (numpy.ndarray): list of true labels
pred_thresh (float) : float in range (0,1), indicating the prediction threshold
Returns:
(accuracy, avg_conf, len_bin): accuracy of bin, confidence of bin and number of elements in bin.
"""
filtered_tuples = [x for x in zip(pred, true, conf) if (x[2] > conf_thresh_lower or conf_thresh_lower == 0) and x[2] <= conf_thresh_upper]
if len(filtered_tuples) < 1:
return 0,0,0
else:
if ece_full:
len_bin = len(filtered_tuples) # How many elements falls into given bin
avg_conf = sum([x[2] for x in filtered_tuples])/len_bin # Avg confidence of BIN
accuracy = np.mean([x[1] for x in filtered_tuples]) # Mean difference from actual class
else:
correct = len([x for x in filtered_tuples if x[0] == x[1]]) # How many correct labels
len_bin = len(filtered_tuples) # How many elements falls into given bin
avg_conf = sum([x[2] for x in filtered_tuples]) / len_bin # Avg confidence of BIN
accuracy = float(correct)/len_bin # accuracy of BIN
return accuracy, avg_conf, len_bin
def ECE(probs, true, bin_size = 0.1, ece_full = False, normalize = False):
"""
Expected Calibration Error
Args:
probs (numpy.ndarray): list of probabilities (samples, nr_classes)
true (numpy.ndarray): list of true labels (samples, 1)
bin_size: (float): size of one bin (0,1) # TODO should convert to number of bins?
Returns:
ece: expected calibration error
"""
probs = np.array(probs)
true = np.array(true)
if len(true.shape) == 2 and true.shape[1] > 1:
true = true.argmax(axis=1).reshape(-1, 1)
if ece_full:
pred, conf, true = get_preds_all(probs, true, normalize=normalize, flatten=ece_full)
else:
pred = np.argmax(probs, axis=1) # Take maximum confidence as prediction
if normalize:
conf = np.max(probs, axis=1)/np.sum(probs, axis=1)
# Check if everything below or equal to 1?
else:
conf = np.max(probs, axis=1) # Take only maximum confidence
# get predictions, confidences and true labels for all classes
upper_bounds = np.arange(bin_size, 1+bin_size, bin_size) # Get bounds of bins
n = len(conf)
ece = 0 # Starting error
for conf_thresh in upper_bounds: # Go through bounds and find accuracies and confidences
acc, avg_conf, len_bin = compute_acc_bin(conf_thresh-bin_size, conf_thresh, conf, pred, true, ece_full)
ece += np.abs(acc-avg_conf)*len_bin/n # Add weigthed difference to ECE
return ece
def MCE(probs, true, bin_size = 0.1, ece_full=False, normalize = False):
"""
Maximal Calibration Error
Args:
conf (numpy.ndarray): list of confidences
pred (numpy.ndarray): list of predictions
true (numpy.ndarray): list of true labels
bin_size: (float): size of one bin (0,1) # TODO should convert to number of bins?
Returns:
mce: maximum calibration error
"""
if ece_full:
pred, conf, true = get_preds_all(probs, true, normalize=normalize, flatten=ece_full)
else:
pred = np.argmax(probs, axis=1) # Take maximum confidence as prediction
if normalize:
conf = np.max(probs, axis=1)/np.sum(probs, axis=1)
# Check if everything below or equal to 1?
else:
conf = np.max(probs, axis=1) # Take only maximum confidence
upper_bounds = np.arange(bin_size, 1+bin_size, bin_size)
cal_errors = []
for conf_thresh in upper_bounds:
acc, avg_conf, _ = compute_acc_bin(conf_thresh-bin_size, conf_thresh, conf, pred, true, ece_full)
cal_errors.append(np.abs(acc-avg_conf))
return max(cal_errors)
def Brier(probs, true):
"""
Brier score (mean squared error)
Args:
probs (list): 2-D list of probabilities
true (list): 1-D list of true labels
Returns:
brier: brier score
"""
assert len(probs) == len(true)
n = len(true) # number of samples
k = len(probs[0]) # number of classes
brier = 0
for i in range(n): # Go through all the samples
for j in range(k): # Go through all the classes
y = 1 if j == true[i] else 0 # Check if correct class
brier += (probs[i][j] - y)**2 # squared error
return brier/n/k # Mean squared error (should also normalize by number of classes?)
def get_bin_info(conf, pred, true, bin_size = 0.1):
"""
Get accuracy, confidence and elements in bin information for all the bins.
Args:
conf (numpy.ndarray): list of confidences
pred (numpy.ndarray): list of predictions
true (numpy.ndarray): list of true labels
bin_size: (float): size of one bin (0,1) # TODO should convert to number of bins?
Returns:
(acc, conf, len_bins): tuple containing all the necessary info for reliability diagrams.
"""
upper_bounds = np.arange(bin_size, 1+bin_size, bin_size)
accuracies = []
confidences = []
bin_lengths = []
for conf_thresh in upper_bounds:
acc, avg_conf, len_bin = compute_acc_bin(conf_thresh-bin_size, conf_thresh, conf, pred, true)
accuracies.append(acc)
confidences.append(avg_conf)
bin_lengths.append(len_bin)
return accuracies, confidences, bin_lengths
def binary_ECE(probs, y_true, power = 1, bins = 15):
idx = np.digitize(probs, np.linspace(0, 1, bins)) - 1
bin_func = lambda p, y, idx: (np.abs(np.mean(p[idx]) - np.mean(y[idx])) ** power) * np.sum(idx) / len(probs)
ece = 0
for i in np.unique(idx):
ece += bin_func(probs, y_true, idx == i)
return ece
def classwise_ECE(probs, y_true, power = 1, bins = 15):
probs = np.array(probs)
if not np.array_equal(probs.shape, y_true.shape):
y_true = label_binarize(np.array(y_true), classes=range(probs.shape[1]))
n_classes = probs.shape[1]
return np.mean(
[
binary_ECE(
probs[:, c], y_true[:, c].astype(float), power = power, bins = bins
) for c in range(n_classes)
]
)
def simplex_binning(probs, y_true, bins = 15):
probs = np.array(probs)
if not np.array_equal(probs.shape, y_true.shape):
y_true = label_binarize(np.array(y_true), classes=range(probs.shape[1]))
idx = np.digitize(probs, np.linspace(0, 1, bins)) - 1
prob_bins = {}
label_bins = {}
for i, row in enumerate(idx):
try:
prob_bins[','.join([str(r) for r in row])].append(probs[i])
label_bins[','.join([str(r) for r in row])].append(y_true[i])
except KeyError:
prob_bins[','.join([str(r) for r in row])] = [probs[i]]
label_bins[','.join([str(r) for r in row])] = [y_true[i]]
bins = []
for key in prob_bins:
bins.append(
[
len(prob_bins[key]),
np.mean(np.array(prob_bins[key]), axis=0),
np.mean(np.array(label_bins[key]), axis=0)
]
)
return bins
def full_ECE(probs, y_true, bins = 15, power = 1):
n = len(probs)
probs = np.array(probs)
if not np.array_equal(probs.shape, y_true.shape):
y_true = label_binarize(np.array(y_true), classes=range(probs.shape[1]))
idx = np.digitize(probs, np.linspace(0, 1, bins)) - 1
filled_bins = np.unique(idx, axis=0)
s = 0
for bin in filled_bins:
i = np.where((idx == bin).all(axis=1))[0]
s += (len(i)/n) * (
np.abs(np.mean(probs[i], axis=0) - np.mean(y_true[i], axis=0))**power
).sum()
return s
def label_resampling(probs):
c = probs.cumsum(axis=1)
u = np.random.rand(len(c), 1)
choices = (u < c).argmax(axis=1)
y = np.zeros_like(probs)
y[range(len(probs)), choices] = 1
return y
def score_sampling(probs, samples = 10000, ece_function = None):
probs = np.array(probs)
return np.array(
[
ece_function(probs, label_resampling(probs)) for sample in range(samples)
]
)
def pECE(probs, y_true, samples = 10000, ece_function = full_ECE):
probs = np.array(probs)
if not np.array_equal(probs.shape, y_true.shape):
y_true = label_binarize(np.array(y_true), classes=range(probs.shape[1]))
return 1 - (
percentileofscore(
score_sampling(
probs,
samples=samples,
ece_function=ece_function
),
ece_function(probs, y_true)
) / 100
)
|
import pickle
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import itertools
import os
results = None
# matplotlib.rcParams.update({'font.size': 12})
color_list = plt.cm.tab10(np.linspace(0, 1, 10))
colors = {'lstm': color_list[0], 'pf_e2e': color_list[1], 'pf_ind_e2e': color_list[2], 'pf_ind': color_list[3]}
labels = {'lstm': 'LSTM', 'pf_e2e': 'DPF (e2e)', 'pf_ind_e2e': 'DPF (ind+e2e)', 'pf_ind': 'DPF (ind)', 'ff': 'FF', 'odom': 'Odom. baseline'}
# conditions = ['normal', 'no_motion_likelihood', 'learn_odom', 'no_proposer']
# conditions = ['normal', 'learn_odom', 'no_inject']
# clabels = {'normal': 'Default', 'no_motion_likelihood': 'W/o motion likelihood', 'learn_odom': 'Learned odometry', 'no_proposer': 'W/o particle proposer', 'no_inject': "No inject"}
conditions = ['lc2lc', 'pl2lc', 'mx2lc', 'lc2pl', 'pl2pl', 'mx2pl']
clabels = {'lc2lc':'lc2lc', 'lc2pl':'lc2pl', 'pl2lc':'pl2lc', 'pl2pl':'pl2pl', 'mx2lc': 'mx2lc', 'mx2pl': 'mx2pl'}
task = 'nav02'
methods = ['pf_ind', 'pf_e2e', 'pf_ind_e2e', 'lstm']
# methods = ['pf_ind_e2e', 'lstm']
# load results
results = dict()
count = 0
for cond in conditions:
# log_path = '/home/rbo/Desktop/log/'+task+'_ab1'
log_path = '../log/'+cond
for filename in [f for f in os.listdir(log_path) if os.path.isfile(os.path.join(log_path, f))]:
full_filename = os.path.join(log_path, filename)
print('loading {}:'.format(count) + full_filename + ' ...')
try:
# if 'DeepThought' not in filename:
# if 'DeepThought' in filename:
with open(full_filename, 'rb') as f:
result = pickle.load(f)
# result_name = result['task'][0] + '/' + result['method'][0] + '/' + str(result['num_episodes'][0]) + '/' + result['condition'][0]
result_name = cond + '_' + result['exp_params'][0]['file_ending'] #result['exp_params'][0]['task'] + '/' + result['exp_params'][0]['method'] + '/' + str(result['exp_params'][0]['num_episodes']) + '/' + result['exp_params'][0]['ab_cond']
print(result_name)
if result_name not in results.keys():
results[result_name] = result
else:
for key in result.keys():
if key in results[result_name].keys():
results[result_name][key] += result[key]
else:
results[result_name][key] = result[key]
# print(result_name, key)
count += 1
except Exception as e:
print(e)
print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
print()
for result_name in results.keys():
print(result_name, len(results[result_name]['test_mse']))
print('Loaded {} results'.format(count))
# print(results['test_errors'].shape, np.mean(results['test_errors']**2, axis=1))
#print('SHAPE', results['test_mse'].shape)
# plt.figure(1)
# plt.gca().set_color_cycle(None)
# for method in set(results['method']):
task = 'nav02'
# step = 30
step = 3
episodes = [1000]
# episodes = [1000]
fig_names = []
max_1 = 0
max_2 = {n: 0 for n in episodes}
means = dict()
ses = dict()
for num_episodes in episodes:
means[num_episodes] = dict()
ses[num_episodes] = dict()
for method in methods:
means[num_episodes][method] = np.zeros([len(conditions), 5])
# means[num_episodes][method] = np.zeros([len(conditions), 50])
ses[num_episodes][method] = np.zeros([len(conditions), 5])
# ses[num_episodes][method] = np.zeros([len(conditions), 50])
for c, condition in enumerate(conditions):
result_name = condition + '_' + task + '_' + method + '_' + str(num_episodes)
if result_name in results.keys():
result = results[result_name]
# means[num_episodes][method][c] = np.mean(result['test_mse'], axis=0)
# std = np.std(result['test_mse'], axis=0, ddof=1)
# ses[num_episodes][method][c] = std / np.sqrt(len(result['test_mse']))
hist = np.array([[h[i] for i in range(0, 50, 10)] for h in result['test_hist']]) # result x time x sqe [.0, 0.1, .., 10.0]
err = 1. - np.sum(hist[:,:,:10], axis=-1) # sqe < 1.0
# err = np.sum(hist[:,:,:10], axis=-1) # sqe < 1.0
print(result_name, err)
means[num_episodes][method][c] = np.mean(err, axis=0)
ses[num_episodes][method][c] = np.std(err, axis=0, ddof=1) / np.sqrt(len(err))
print(means[num_episodes][method][c])
else:
# print(result_name, 0)
means[num_episodes][method][c] *= np.nan
ses[num_episodes][method][c] *= np.nan
means[num_episodes]['min'] = np.stack([means[num_episodes][method] for method in methods], axis=0).min(axis=1)
fig_name = 'ab1_{}'.format(num_episodes)
fig = plt.figure(fig_name, [6, 3.5])
fig_names.append(fig_name)
ax = fig.add_subplot(111)
# Turn off axis lines and ticks of the big subplot
ax.spines['top'].set_color('none')
ax.spines['bottom'].set_color('none')
ax.spines['left'].set_color('none')
ax.spines['right'].set_color('none')
ax.tick_params(labelcolor='w', top='off', bottom='off', left='off', right='off')
for c, condition in enumerate(conditions):
sax = fig.add_subplot(2, 3, c+1)
for m, method in enumerate(methods):
sax.bar(0.0 - 0.5 + (m+1)/len(methods)*0.8,
means[num_episodes][method][c, step],
0.8/len(methods),
yerr=ses[num_episodes][method][c, step],
color=colors[method], label=labels[method])
text = '{:.3s}'.format('{:.2f}'.format(means[num_episodes][method][c, step])[1:])
plt.text(0.0 - 0.5 + (m+1)/len(methods)*0.8, means[num_episodes][method][c, step] + ses[num_episodes][method][c, step] + 0.05, text, va='center', ha='center', color=colors[method], fontweight='normal')
# sax.set_ylim([0.0, 1.05])
sax.set_ylim([0.0, 1.0])
sax.set_xticks([])
sax.set_yticks([])
# if c % 2 == 0:
# if c >= 2:
if 'lc2' in condition:
xlabel = 'A'
sax.set_ylabel(('A' if '2lc' in condition else 'B'), fontweight = 'bold')
elif 'pl2' in condition:
xlabel = 'B'
elif 'mx2' in condition:
xlabel = 'A+B'
if '2pl' in condition:
sax.set_xlabel(xlabel, fontweight = 'bold')
if c == 0:
plt.legend()
ax.set_xlabel('Trained with policy')
ax.set_ylabel('Error rate in test with policy\n')
plt.tight_layout(h_pad=0.0, w_pad=0.0, pad=0.0)
plt.savefig('../plots/cr/policy.pdf', bbox_inches="tight", transparent=True, dpi=600, frameon=True, facecolor='w', pad_inches=0.01)
plt.show()
|
""" All functions necessary to create and return views """
from datetime import datetime
from math import floor, ceil
from statistics import mean
import json
import requests
from django.core.cache import cache
def get_start_and_end_date(site_id, api_key):
""" Returns the start and the end date of the data in SolarEdge's database """
url = f'https://monitoringapi.solaredge.com/site/{site_id}/dataPeriod?api_key={api_key}'
response = requests.get(url)
data = json.loads(response.content)
start_date = data['dataPeriod']['startDate']
end_date = data['dataPeriod']['endDate']
return(start_date, end_date)
def get_averages(input_list, avg_range):
new_list = []
for index, item in enumerate(input_list):
start_index = index - floor(avg_range / 2) if index - floor(avg_range / 2) >= 0 else 0
end_index = index + ceil(avg_range / 2) if index + ceil(avg_range / 2) <= len(input_list) else len(input_list)
all_values_in_range = []
for i in input_list[start_index: end_index]:
all_values_in_range.append(i[1])
average_value = round(mean(all_values_in_range), 1)
new_list.append([item[0], average_value])
return new_list
def is_it_night(lat, lng):
""" Checks if it is currently night """
url = f'http://api.sunrise-sunset.org/json?lat={lat}&lng={lng}&formatted=0'
response = requests.get(url)
data = json.loads(response.content)['results']
utc_time = datetime.utcnow()
sunrise = datetime.strptime(data['sunrise'], '%Y-%m-%dT%H:%M:%S+00:00')
sunset = datetime.strptime(data['sunset'], '%Y-%m-%dT%H:%M:%S+00:00')
if sunrise < utc_time < sunset:
return False
return True
def get_current_power(site_id, api_key):
""" Checks what current power output is of the solar panels """
url = f'https://monitoringapi.solaredge.com/site/{site_id}/overview?api_key={api_key}'
response = requests.get(url)
data = json.loads(response.content)
current_power = data['overview']['currentPower']['power']
return current_power
def what_is_the_weather(site_id, api_key, lat, lng):
""" checks what the weather is and returns a FontAwesome class """
weather_symbol = cache.get('weather_symbol')
if weather_symbol is not None:
return weather_symbol
is_night = is_it_night(lat, lng)
current_power = get_current_power(site_id, api_key)
if is_night:
weather_symbol = 'fa-moon'
elif current_power > 1000:
weather_symbol = 'fa-sun'
elif current_power > 100:
weather_symbol = 'fa-cloud-sun'
elif current_power >= 0:
weather_symbol = 'fa-cloud'
else:
weather_symbol = 'fa-exclamation-triangle'
cache.set('weather_symbol', weather_symbol, 5 * 60)
return weather_symbol
|
file = open("infoDB.txt", 'w')
boolean = True
detail_list = ["Name : ", "Family : ", "Age : ", "Gender : ", "BirthDate : ", "Nationality : ", "National ID : "]
input_list = []
info_list = []
while boolean:
for i in range(len(detail_list)):
info = detail_list[i] + input(detail_list[i])
file.write(info + "\n")
file.write("-------------------------------------------------------\n")
req = input("register anyone else ? Y/N")
if req == 'n' or req == 'N':
boolean = False
file.close()
|
no1=int(input("Enter number to show number is prime or not"))
flag=0
print(isprime(11))
for i in range (2,no1):
if(i%no1!=0):
flag=1
if (flag==1):
print("not Prime number")
else:
print(" Prime Number")
|
TYPE = "schema:PostalAddress"
class PostalAddress:
def __init__(self,er_event):
self._type = TYPE
#... |
from model import DrivetrainModel
from model.motors import _775pro
from optimizer import Optimizer
if __name__ == "__main__":
model = DrivetrainModel(_775pro(8), gear_ratio=26, robot_mass=68, wheel_diameter=6 * 0.0254,
motor_voltage_limit=12, motor_current_limit=30, max_dist=6)
op = Optimizer(min_ratio=10, max_ratio=50, ratio_step=1,
max_dist=6, distance_step=0.2,
model=model)
op.run()
op.save_xlsx('/tmp/optimize_drivetrain.xlsx')
|
def main():
diagSum = 1
increment = 2
lastNumber = 1
while increment < 1001:
for i in range(4):
lastNumber += increment
diagSum += lastNumber
increment += 2
print(diagSum)
main()
|
from django import forms
# from django.contrib.auth.models import User
from . models import SignupUser
from django.core.validators import validate_email
class userform(forms.Form):
name = forms.CharField(widget=forms.TextInput(attrs={'class': 'input--style-3 cw', 'placeholder': 'Name'}), required=True, max_length=50)
username = forms.CharField(widget=forms.TextInput(attrs={'class': 'input--style-3 cw', 'placeholder': 'Username'}), required=True, max_length=50)
email = forms.EmailField(widget=forms.EmailInput(attrs={'class': 'input--style-3 cw', 'placeholder': 'Email'}), required=True,)
password1 = forms.CharField(widget=forms.PasswordInput(attrs={'class': 'input--style-3 cw', 'placeholder': 'Password'}),required=True,)
password2 = forms.CharField(widget=forms.PasswordInput(attrs={'class': 'input--style-3 cw', 'placeholder': 'Confirm password'}),required=True,)
phone = forms.IntegerField(widget=forms.TextInput(attrs={'class': 'input--style-3 cw', 'placeholder': 'Phone Number'}), required=True, )
class Meta():
model = SignupUser
fields = ['name','username','email','password1','password2','phone']
def clean_username(self):
user= self.cleaned_data['username']
try:
match = SignupUser.objects.get(Username=user)
except:
return self.cleaned_data['username']
raise forms.ValidationError("Username already in use.")
def clean_phone(self):
phone= self.cleaned_data['phone']
try:
match = SignupUser.objects.get(Phone=phone)
except:
return self.cleaned_data['phone']
raise forms.ValidationError("Phone Number already in use.")
def clean_email(self):
email= self.cleaned_data['email']
try:
mt= validate_email(email)
except:
return forms.ValidationError("Email is not in correct format")
try:
match = SignupUser.objects.get(Email=email)
except:
return self.cleaned_data['email']
raise forms.ValidationError("Email already in use.")
return email
def clean_password2(self):
pas = self.cleaned_data['password1']
cpas = self.cleaned_data['password2']
if pas and cpas:
if pas != cpas:
raise forms.ValidationError("Password and Confirm password not matched") |
"""
Created on Fri Jan 13 2017 09:00:00
@author: Peter Harris, NPL\MM
@author: Sam Hunt, NPL\ENV
"""
'''___Python Modules____'''
from copy import deepcopy
from numpy import array, append, zeros
from numpy import sum as npsum
'''___Harmonisation Modules___'''
from harm_data_reader import HarmData
from correl_forms import CorrelForm
class ConvertData:
"""
Class contains methods to convert input data into forms suitable for the pre-conditioner algorithm and the
Gauss-Netwon harmonisation algorithm
:Methods:
:convert2ind:
reparameterises input data such that output data are independent quantities, required for the Gauss-Newton
and pre-conditioner algorithm
:sample4PC:
sample data so that the only remaining correlations arise from systematic effects, required for the
pre-conditioner algorithm
"""
def convert2ind(self, HData):
"""
Return a reparameterisation of the input data such that output data are independent quantities (suitable for
use in the pre-conditioner and Gauss-Newton algorithms)
:param HData: HarmData
Input harmonisation data for conversion
:return:
:HData: HarmData
Reparameterised harmonisation data suitable for pre-conditioner and GN algorithm
"""
# Convert data depending on it's correlation form to remove correlation:
# 1. Random Form
# No correlation, so no action required. Scale by uncertainty.
#
# 2. Random+Systematic Form
# Separate random and systematic components:
# > random component - scale data by random uncertainty
# > systematic component - add 0 value for each block to the end of the final
# block of the covariate
#
# 3. Average Form
# Simulate raw data used to compute averages (results in n_mu + n - 1 variables
# per block, where n_mu is the number of match-ups in the block and n is size of
# the averaging window)
# initialise empty array to store converted harmonisation data
new_values = array([])
# initialise copy of harmonisation idx to update for converted data
new_idx = deepcopy(HData.idx) # deep copy required to ensure copy of nested lists in dict
# counter to track block number through covariate, e.g. 2*N_mu-1 blocks per covariate
# (used to add systematic uncertainty data to the end of the all blocks of a given
# covariate)
n_b_cov = 0
N_bpcov = 2*HData.idx['n_mu'][-1] - 1 # total number of blocks per covariate
# convert data block by depending on correlation form
for i, block_unc in enumerate(HData.unc):
# block parameters
ib = HData.idx['idx'][i] # start idx of data block
ie = HData.idx['idx'][i+1] # end idx of data block
nm = int(HData.idx['N_var'][i]) # number of variables in block
# 1. random type correlation - scale by uncertainty
if block_unc.form == "r":
ur = block_unc.uR # uncertainty
# store old values in new array (scaling by uncertainty)
new_values = append(new_values, HData.values[ib:ie]/ur)
# 2. random+systematic type correlation - separate components
elif block_unc.form == "rs":
# a. random component - scale by uncertainty
uR = block_unc.uR # random uncertainty
# store old values in new array (scaling by random uncertainty)
new_values = append(new_values, HData.values[ib:ie]/uR)
# b. systematic component - at final block of given covariate add
# systemic data for all blocks
n_b_cov += 1 # add to count of blocks through covariate
# when block number is total number of blocks of covariate
if n_b_cov == N_bpcov:
# store systematic uncertainty for each block
# todo - changes needed for Jon's data?
N_sensors = len(set(HData.idx['n_sensor']))-1
new_values = append(new_values, zeros(N_sensors))
# reset counter
new_idx['idx'][i+1:] = [j+N_sensors for j in new_idx['idx'][i+1:]]
n_b_cov = 0
# 3. averaging type correlation - simulate data without averaging
elif block_unc.form == "ave":
# initialise array
Htemp = zeros(block_unc.W.shape[1])
uRtemp = zeros(block_unc.W.shape[1])
next_idx = 0
col = 0
first_idx_prev = 0
for j in xrange(HData.idx['N_var'][i]):
i_value = j + ib
first_idx = block_unc.W.indices[col]
step = first_idx - first_idx_prev
uR = block_unc.uR[j][block_unc.uR[j] != 0] # valid scanline uncertainties
n_w = len(uR) # size of averaging window
w = block_unc.W[j, first_idx:first_idx+n_w].toarray()[0] # W row
if (step == n_w) or (j == 0):
# averaged values
Htemp[next_idx:next_idx+n_w] = HData.values[i_value]/uR
uRtemp[next_idx:next_idx+n_w] = uR
next_idx = next_idx + n_w
elif 0 < step < n_w:
istartk = next_idx + step - n_w
iendk = next_idx + step - 1
try:
uRtemp[next_idx:iendk+1] = uR[-(iendk+1-next_idx):]
# fill all but last missing data of average with averaged value
Htemp[next_idx:iendk] = HData.values[i_value] / uR[-(iendk+1-next_idx):-1]
# compute missing final value in average
Htemp[iendk] = (HData.values[i_value]-sum(Htemp[istartk:iendk] * w[:-1])) / w[-1]
except ValueError:
print 'i', i
print 'j', j
print 'step', step
print 'n_w', n_w
print 'next_idx', next_idx
print 'istartk', istartk
print 'iendk', iendk
print 'uR[-(iendk+1-next_idx):]', uR[-(iendk+1-next_idx):]
pass
next_idx += step
elif step == 0:
pass
first_idx_prev = first_idx
col += n_w
#For testing
#Before = HData.values[ib:ie]
#After = block_unc.W.dot(Htemp)
# store new data values
new_values = append(new_values, Htemp)
block_unc.uR = uRtemp
# update N_var of HData.idx to count new variables
new_len = len(Htemp)
new_idx['N_var'][i] = new_len
new_idx['idx'][i+1:] = [old_idx+new_len-(ie-ib) for old_idx in new_idx['idx'][i+1:]]
# replace old data values array with converted form
HData.values = new_values
HData.idx = new_idx
# scale ks
for i in xrange(len(HData.idx['Im'])):
istart = HData.idx['cNm'][i]
iend = HData.idx['cNm'][i + 1]
HData.ks[istart:iend] /= HData.unck[i].uR
return HData
def sample4PC(self, HData, sf):
"""
Return sample of data for which the only data correlations arise from systematic effects
:param HData: HarmData
Input harmonisation data for conversion
:param sf: int
Sampling factor
:return:
:HData_sample: HarmData
Sampled harmonisation data
"""
# initialise parameters
mcxyz = HData.idx['idx'] # cumulative total of variables data block
mc = HData.idx['cNm'] # cumulative total of match-ups by series
# initialise sampled harmonisation data product
HData_sample = HarmData()
HData_sample.idx = deepcopy(HData.idx)
HData_sample.unc = deepcopy(HData.unc[:])
HData_sample.unck = deepcopy(HData.unck[:])
HData_sample.a = HData.a[:]
HData_sample.sensor_model = HData.sensor_model
HData_sample.adjustment_model = HData.adjustment_model
################################################################################################################
# 1. Sample Data
################################################################################################################
# a. find sampling indices
n_mus = set(HData.idx['n_mu'])
sampling_idxs = {}
# find sampling indices per match-up series
for n_mu in n_mus:
# find W for covariate with largest moving average window (i.e. responsible for the most correlation)
n_w = 0
W = 0
for i, block_unc in enumerate(HData.unc):
if HData.idx['n_mu'][i] == n_mu:
if block_unc.form == 'ave':
if block_unc.uR.shape[1] > n_w:
n_w = block_unc.uR.shape[1]
W = block_unc.W
# produce sampling indices
stop = False
istartW = 0
last_idx = 0
idx = 0
idxs = [idx]
while stop is False:
for j, first_idx in enumerate(W.indices[istartW::n_w]):
step = first_idx - last_idx
current_idx = idx + j
final_idx = len(W.indices[::n_w]) - 1
if current_idx == final_idx:
sampling_idxs[n_mu] = idxs
stop = True
break
elif step >= n_w:
# averaged values
idx += j
idxs.append(idx)
last_idx = first_idx
istartW += j*n_w
break
# b. sample variables
# update idx attribute of HData_sample to describe structure of sampled data
idxs = [0]
total = 0
for i, n_mu in enumerate(HData.idx['n_mu']):
block_samples = len(sampling_idxs[n_mu])
HData_sample.idx['N_var'][i] = block_samples
total += block_samples
idxs.append(int(total))
HData_sample.idx['idx'] = idxs
# sample variables and respective uncertainty data by data block
HData_sample.values = zeros(HData_sample.idx['idx'][-1])
for i, block_unc in enumerate(HData.unc):
# block indices
istart = mcxyz[i]
iend = mcxyz[i+1]
istart_s = HData_sample.idx['idx'][i]
iend_s = HData_sample.idx['idx'][i+1]
s_idx = sampling_idxs[HData.idx['n_mu'][i]]
HData_sample.values[istart_s:iend_s] = HData.values[istart:iend][s_idx]
if block_unc.form == "ave":
HData_sample.unc[i] = CorrelForm("r", zeros(len(s_idx)))
for j, s_i in enumerate(s_idx):
HData_sample.unc[i].uR[j] = npsum(block_unc.W[s_i, :].toarray()[0]**2) ** 0.5
else:
HData_sample.unc[i].uR = deepcopy(block_unc.uR[s_idx])
# c. sample ks
cNm = [0]
total = 0
for i, n_mu in enumerate(n_mus):
n_mu_sample = len(sampling_idxs[n_mu])
HData_sample.idx['Nm'][i] = n_mu_sample
total += n_mu_sample
cNm.append(total)
HData_sample.idx['cNm'] = cNm
print "Sample Size: ", HData_sample.idx['Nm']
# sample k and respective uncertainty data by match-up series
HData_sample.ks = zeros(HData_sample.idx['cNm'][-1])
for i, mu_unck in enumerate(HData.unck):
n_mu = i+1
# match-up series indices
istart = mc[i]
iend = mc[i+1]
istart_s = HData_sample.idx['cNm'][i]
iend_s = HData_sample.idx['cNm'][i+1]
s_idx = sampling_idxs[n_mu]
# sample data
HData_sample.ks[istart_s:iend_s] = HData.ks[istart:iend][s_idx]
HData_sample.unck[i].uR = deepcopy(mu_unck.uR[s_idx])
################################################################################################################
# 2. Convert to Independent Data
################################################################################################################
HData_sample = self.convert2ind(HData_sample)
return HData_sample
if __name__ == "__main__":
def main():
return 0
main()
|
#!/bin/python3
import sys
import os
import os.path as osp
from PIL import Image
from torchvision import transforms
import argparse
import logging
from plan2scene.config_manager import ConfigManager
from plan2scene.texture_gen.custom_transforms.random_crop import RandomResizedCropAndDropAlpha
if __name__ == "__main__":
"""
This script is used to prepare rectified surface crops from OpenSurfaces dataset, which we use to train the substance classifier.
"""
parser = argparse.ArgumentParser(description="Extract rectified surface crops from the OpenSurfaces dataset.")
parser.add_argument("output_path", type=str, help="Output directory to save extracted crops.")
parser.add_argument("input_path", type=str, help="Directory containing rectified surface masks from OpenSurfaces dataset.")
conf = ConfigManager()
conf.add_args(parser)
args = parser.parse_args()
conf.process_args(args)
output_path = args.output_path
input_path = args.input_path
# Configuration used
crop_count = 10
crop_size = (85, 85)
output_size = (128, 128)
image_scaleup = 1
second_crop_min_scale = 0.25
if osp.exists(output_path):
logging.error("Output directory already exist")
sys.exit(1)
if not osp.exists(output_path):
os.makedirs(output_path)
image_file_paths = [osp.join(input_path, a) for a in os.listdir(input_path)]
logging.info("Found {count} files.".format(count = len(image_file_paths)))
with open(osp.join(output_path, "index.html"), "w") as f:
for image_file_path in image_file_paths:
img_name = image_file_path.split("/")[-1]
img = Image.open(image_file_path)
index = 0
for i in range(crop_count):
crop = RandomResizedCropAndDropAlpha(crop_size,100, ratio=(1.0,1.0))(img)
if crop is not None:
crop = transforms.RandomResizedCrop(size=(crop_size),ratio=(1.0,1.0), scale=(second_crop_min_scale,1.0))(crop)
crop = crop.resize(output_size)
logging.info("Saved {file}.".format(file=osp.join(output_path, img_name.split(".")[0] + "_crop%d.png" % index)))
crop.save(osp.join(output_path, img_name.split(".")[0] + "_crop%d.png" % index))
f.write("<div style='float:left; margin:5px;'><img src='%s'/><br><small>%s</small></div>" % (img_name.split(".")[0] + "_crop%d.png" % index, img_name))
index += 1
f.flush() |
import sys
import time
import argparse
from PIL import Image
# from naoqi import ALProxy
# from naoqi import ALBroker
from naoqi import ALModule
from nao_class import NaoWrapper
IP = "10.125.200.124"
data = "nao_data.csv"
# create nao object
my_nao = NaoWrapper(IP, data)
# deactivate fall manager
my_nao.FallManager(False)
my_nao.PostureStandInit(0.2)
my_nao.Control()
time.sleep(3)
my_nao.Rest()
|
class Plane():
#constructor
def __init__(self,n='',nr=0,arc='',nrs=0,dest='',lps=[]):
self.__name=n
self.__number=nr
self.__airline_company=arc
self.__number_seats=nrs
self.__destination=dest
self.__list_passengers=lps[:]
#setter
def set_name(self,n):
self.__name=n
#setter
def set_number(self,nr):
self.__number=nr
#setter
def set_airline_company(self,arc):
self.__airline_company=arc
#setter
def set_number_seats(self,nrs):
self.__number_seats=nrs
#setter
def set_destination(self,dest):
self.__destination=dest
#setter
def set_list_passengers(self,lps):
self.__list_passengers=lps
#getter
def get_name(self):
return self.__name
#getter
def get_number(self):
return self.__number
#getter
def get_airline_company(self):
return self.__airline_company
#getter
def get_number_seats(self):
return self.__number_seats
#getter
def get_destination(self):
return self.__destination
#getter
def get_list_passengers(self):
return self.__list_passengers
def add_passenger(self,passenger):
"""
input: -
output: A modified Plane.
desc.: Add a Passenger to the list of passengers.
"""
self.__list_passengers.append(passenger)
def __str__(self):
return "Name:" + self.__name +" --- "+"Number: "+ str(self.__number) +" --- "+"Airline Company:"+ self.__airline_company +" --- "+"Number of seats:"+ str(self.__number_seats)+" --- "+'Destination:'+self.__destination +" --- "+"List of passengers:"+ str(self.__list_passengers)
def __repr__(self):
return str(self)
|
# Generated by Django 2.1.3 on 2018-12-11 12:40
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('blog', '0003_post_thumbnail'),
]
operations = [
migrations.AlterModelOptions(
name='post',
options={'permissions': (('view_original_img', 'Can view the original image of a blog post'),)},
),
]
|
from datetime import datetime
from typing import Optional
from uuid import UUID, uuid4
import sqlalchemy as sa
import sqlalchemy.dialects.postgresql as pg
from oasst_shared.utils import utcnow
from sqlalchemy import false
from sqlmodel import Field, SQLModel
from .payload_column_type import PayloadContainer, payload_column_type
class Task(SQLModel, table=True):
__tablename__ = "task"
id: Optional[UUID] = Field(
sa_column=sa.Column(
pg.UUID(as_uuid=True), primary_key=True, default=uuid4, server_default=sa.text("gen_random_uuid()")
),
)
created_date: Optional[datetime] = Field(
sa_column=sa.Column(
sa.DateTime(timezone=True), nullable=False, index=True, server_default=sa.func.current_timestamp()
),
)
expiry_date: Optional[datetime] = Field(sa_column=sa.Column(sa.DateTime(timezone=True), nullable=True))
user_id: Optional[UUID] = Field(nullable=True, foreign_key="user.id", index=True)
payload_type: str = Field(nullable=False, max_length=200)
payload: PayloadContainer = Field(sa_column=sa.Column(payload_column_type(PayloadContainer), nullable=False))
api_client_id: UUID = Field(nullable=False, foreign_key="api_client.id")
ack: Optional[bool] = None
done: bool = Field(sa_column=sa.Column(sa.Boolean, nullable=False, server_default=false()))
skipped: bool = Field(sa_column=sa.Column(sa.Boolean, nullable=False, server_default=false()))
skip_reason: Optional[str] = Field(nullable=True, max_length=512)
frontend_message_id: Optional[str] = None
message_tree_id: Optional[UUID] = None
parent_message_id: Optional[UUID] = None
collective: bool = Field(sa_column=sa.Column(sa.Boolean, nullable=False, server_default=false()))
@property
def expired(self) -> bool:
return self.expiry_date is not None and utcnow() > self.expiry_date
|
# Test Name Description
# A_BX_UART_AT&K_0005 To check if hardware flow control working
#
# Requirement
# 1 Euler module
#
# Author: ptnlam
#
# Jira ticket:
#-----------------------------------------------------------------------------------------------------
# -------------------------- DUT InitializAT+SYSRAMon ----------------------------------
test_environment_ready = "Ready"
try:
print "\n------------Test Environment check: Begin------------"
# UART Initialization
print "\nOpen AT Command port"
uart_com = SagOpen(uart_com, 115200, 8, "N", 1, "None")
# Display DUT information
print "\nDisplay DUT information"
print "\nGet model information"
SagSendAT(uart_com, 'AT+FMM\r')
SagWaitnMatchResp(uart_com, ['*\r\nOK\r\n'], 2000)
print "\nGet serial number"
SagSendAT(uart_com, 'AT+CGSN\r')
SagWaitnMatchResp(uart_com, ['*\r\nOK\r\n'], 2000)
print "\nGet revision information"
SagSendAT(uart_com, 'ATI3\r')
SagWaitnMatchResp(uart_com, ['*\r\nOK\r\n'], 2000)
# DUT Initialization
print "\nInitiate DUT"
SagSendAT(uart_com, 'AT\r')
SagWaitnMatchResp(uart_com, ['\r\nOK\r\n'], 2000)
except Exception, e:
print "***** Test environment check fails !!!*****"
print type(e)
print e
test_environment_ready = "Not_Ready"
print "\n------------Test Environment check: End------------"
print "\n----- Test Body Start -----\n"
# -----------------------------------------------------------------------------------
# A_BX_UART_AT&K_0005
# -----------------------------------------------------------------------------------
test_ID = "A_BX_UART_AT&K_0005"
#######################################################################################
# START
#######################################################################################
try:
if test_environment_ready == "Not_Ready" or VarGlobal.statOfItem == "NOK":
raise Exception("---->Problem: Test Environment Is Not Ready !!!")
print "*****************************************************************************************************************"
print "%s: To check if hardware flow control working" % test_ID
print "*****************************************************************************************************************"
print "\nStep 1: Connect to Wi-Fi\n"
SagSendAT(uart_com, 'AT+SRWCFG=1\r')
SagWaitnMatchResp(uart_com, ['\r\nOK\r\n'], 2000)
SagSendAT(uart_com, 'AT+SRWSTACFG="%s","%s",1\r' %(wifi_ssid, wifi_password))
SagWaitnMatchResp(uart_com, ['\r\nOK\r\n'], 2000)
SagSendAT(uart_com, 'AT+SRWSTACON=1\r')
SagWaitnMatchResp(uart_com, ['\r\nOK\r\n'], 2000)
if SagWaitnMatchResp(uart_com, ['*\r\n+SRWSTASTATUS: 1,"%s","%s",*,*\r\n' % (wifi_ssid, wifi_mac_addr)], 20000):
SagWaitnMatchResp(uart_com, ['\r\n+SRWSTAIP: "%s.*","%s","%s"\r\n' % (return_subnet(wifi_dhcp_gateway), wifi_dhcp_subnet_mask, wifi_dhcp_gateway)], 10000)
else:
raise Exception("---->Problem: Module cannot connect to Wi-Fi !!!")
print "\nStep 2: Configure a MQTT session\n"
SagSendAT(uart_com, 'AT+KMQTTCFG=0,"%s",%s,4,"BX310x",0,1,1,"TMA/Euler","BX310x Left",0,0,"%s","%s"\r' %(mqtt_server, mqtt_port, mqtt_user, mqtt_password ))
SagWaitnMatchResp(uart_com, ['\r\n+KMQTTCFG: 1\r\n'], 2000)
SagWaitnMatchResp(uart_com, ['\r\nOK\r\n'], 2000)
print "\nStep 3: Active MQTT session\n"
SagSendAT(uart_com, 'AT+KMQTTCNX=1\r')
SagWaitnMatchResp(uart_com, ['\r\nOK\r\n'], 2000)
print "\nStep 4: Enable Hardware Flow Control\n"
SagSendAT(uart_com, 'AT&K3\r')
SagWaitnMatchResp(uart_com, ['\r\nOK\r\n'], 2000)
print "\nStep 5: Set RTS to high\n"
SagSetRTS(uart_com, 1)
time.sleep(1)
print "\nStep 6: Subscribe to Broker to receive uptime of broker every 11 seconds\n"
SagSendAT(uart_com, 'AT+KMQTTSUB=1,"$SYS/broker/uptime",1\r')
SagWaitnMatchResp(uart_com, ['\r\nOK\r\n'], 5000)
print "\nStep 7: Check every 11 seconds, module receives a message from MQTT brokers\n"
SagWaitnMatchResp(uart_com, ['\r\n+KMQTTSUB: "$SYS/broker/uptime","* seconds"\r\n'], 11000)
SagWaitnMatchResp(uart_com, ['\r\n+KMQTTSUB: "$SYS/broker/uptime","* seconds"\r\n'], 11000)
print "\nStep 8: Set RTS to low\n"
SagSetRTS(uart_com, 0)
print "\nStep 9: Wait for 1 minutes. Check URC on module\n"
time.sleep(60)
if (SagWaitResp(uart_com, ['\r\n*\r\n'], 5000)):
print "\nURC on module\n"
else:
print "\nModule should not receive any URC\n"
print "\nStep 10: Set RTS to high\n"
SagSetRTS(uart_com, 1)
time.sleep(1)
print "\nStep 11: Check response\n"
SagWaitnMatchResp(uart_com, ['\r\n+KMQTTSUB: "$SYS/broker/uptime","* seconds"\r\n'], 11000)
SagWaitnMatchResp(uart_com, ['\r\n+KMQTTSUB: "$SYS/broker/uptime","* seconds"\r\n'], 11000)
SagWaitnMatchResp(uart_com, ['\r\n+KMQTTSUB: "$SYS/broker/uptime","* seconds"\r\n'], 11000)
# print "\nStep 12: Set RTS to low\n"
# SagSetRTS(uart_com, 0)
# print "\nStep 13: Disable Hardware Flow Control\n"
# SagSendAT(uart_com, 'AT&K0')
# SagWaitnMatchResp(uart_com, ['\r\nOK\r\n'], 5000)
print "\nStep 14: Close MQTT session\n"
SagSendAT(uart_com, 'AT+KMQTTCLOSE=1')
SagWaitnMatchResp(uart_com, ['\r\nOK\r\n'], 5000)
print "\nStep 15: Delete MQTT session\n"
SagSendAT(uart_com, 'AT+KMQTTDEL=1')
SagWaitnMatchResp(uart_com, ['\r\nOK\r\n'], 5000)
print "\nTest Steps completed\n"
except Exception, err_msg :
VarGlobal.statOfItem = "NOK"
print Exception, err_msg
SagSendAT(uart_com, 'AT&F\r')
SagWaitnMatchResp(uart_com, ['*\r\nREADY\r\n'], 2000)
#Print test result
PRINT_TEST_RESULT(test_ID, VarGlobal.statOfItem)
# -----------------------------------------------------------------------------------
print "\n----- Test Body End -----\n"
print "-----------Restore Settings---------------"
#Disconnect
SagSendAT(uart_com, 'AT+SRWSTACON=0\r')
SagWaitnMatchResp(uart_com, ['\r\nOK\r\n'], 3000)
SagWaitnMatchResp(uart_com, ['\r\n+SRWSTASTATUS: 0,8\r\n'], 3000)
# Restore DUT
SagSendAT(uart_com, 'AT+SRWCFG=3\r')
SagWaitnMatchResp(uart_com, ['\r\nOK\r\n'], 2000)
# Close UART
SagClose(uart_com) |
import puzzle as game
from copy import deepcopy,copy
from direction import Direction,Coordinate
from board import print_board
from main import clear
from time import sleep
class Node:
def __init__(self,board : list, parent = None):
self.state = board
self.parent = parent
self.g_value = 0
if parent != None:
self.g_value = parent.g_value + 1
self.h_value = self.manhattan_distance(board, game.win_state)
def f(self):
return self.g_value+self.h_value
def manhattan_distance(self, state : list, final: list) -> int:
cost = 0
board_len = len(state)
for i in range(board_len):
for j in range(board_len):
pos = self.get_pos(final, state[i][j])
cost += abs(i - pos[0]) + abs(j - pos[1])
return cost
def get_pos(self, state : list, element):
for i in range(len(state)):
if element in state[i]:
return (i, state[i].index(element))
def get_neighbors(self):
list_nodes = []
for i in range(4):
temp_state = deepcopy(self.state)
x,y = game.find_empty_space(temp_state)
if i == 0:
move = Direction.Up
inc_x,inc_y = Coordinate.Up.value
elif i == 1:
move = Direction.Down
inc_x,inc_y = Coordinate.Down.value
elif i == 2:
move = Direction.Left
inc_x,inc_y = Coordinate.Left.value
elif i == 3:
move = Direction.Right
inc_x,inc_y = Coordinate.Right.value
x = inc_x + x
y = inc_y + y
if game.is_valid_move(x,y, len(temp_state)):
temp_state = game.move(move, list(temp_state))
if temp_state != None:
list_nodes += [Node(temp_state, self)]
return list_nodes
def get_best_nodes(open_list : dict):
first_iter = True
for node in open_list.values():
if first_iter or node.f() < best_f:
first_iter = False
best_node = node
best_f = best_node.f()
return best_node
def a_star(start_state: list):
open_list = { str(start_state) : Node(start_state)}
closed_list = {}
while len(open_list) > 0:
exam_node = get_best_nodes(open_list)
closed_list[str(exam_node.state)] = exam_node
if exam_node.state == game.win_state:
print_path(exam_node)
return True
neighbors = exam_node.get_neighbors()
for node in neighbors:
if str(node.state) in closed_list.keys() or str(node.state) in open_list.keys() and open_list[str(node.state)].f() < node.f():
continue
open_list[str(node.state)] = node
del open_list[str(exam_node.state)]
return None
def array_to_str(arr : list):
arr_str = ""
for i in range(len(arr)):
for j in range(len(arr)):
arr_str += str(arr[i][j])
return arr_str
def generate_sequence(node : Node):
nodes_list = []
while(node != None):
nodes_list.append(list(node.state))
node = node.parent
return nodes_list
def print_path(node):
sequence = generate_sequence(node)
goal_state = array_to_str(game.win_state)
path =""
for i in range(len(sequence)-1,-1,-1):
board = print_board(sequence[i])
path += array_to_str(sequence[i])
path += "\n"
sleep(1)
clear()
print(board)
print(f"Goal state: {goal_state}")
print(f"Steps taken: {len(sequence)-1}")
print("Path taken: ")
print(path)
|
from django.views.generic import ListView, DetailView
from .models import Order
from django.http import Http404
from django.contrib.auth.mixins import LoginRequiredMixin
# Create your views here.
class OrderListView(LoginRequiredMixin, ListView):
def get_queryset(self):
return Order.objects.by_request(self.request)
class OrderDetailView(LoginRequiredMixin, DetailView):
def get_object(self):
qs = Order.objects.by_request(self.request).filter(order_id=self.kwargs.get('order_id'))
if qs.count() == 1:
return qs.first()
raise Http404
|
import logging
from cycler import cycler
from matplotlib.colors import LogNorm, rgb2hex
from matplotlib.figure import Figure
import matplotlib.pyplot as plt
import numpy as np
from itertools import chain
logger = logging.getLogger('pyrain')
STANDARD = {'outline': {'file': 'resources/arena_outline.png',
'alpha': 0.8},
'fieldline': {'file': 'resources/arena_fieldlines.png',
'alpha': 0.3},
'boost': {'file': 'resources/arena_boost.png',
'alpha': 0.8},
'xmin': -5770,
'xmax': 5770,
'ymin': -4096,
'ymax': 4096,
'aspect': 0.71}
WASTELAND = {'outline': {'file': 'resources/wasteland_outline.png',
'alpha': 0.8},
'fieldline': {'file': 'resources/wasteland_fieldlines.png',
'alpha': 0.3},
'xmin': -5980,
'xmax': 5980,
'ymin': -4530,
'ymax': 4530,
'aspect': 0.76}
BOOST = 'boost'
OUTLINE = 'outline'
FIELDLINE = 'fieldline'
# avg car size ~118x82x32 ; Field Size(Excluding Wasteland: 10240x8192*(2000?);
# -5120 - 5120; -4096,4096; 19, 2000
# Goals are roughly 650units deep
# Field length with goals: ~11540 aspect ratio: 0.71
# bins for ~1:1 mapping:87x100x62
def graph_2d(values, mean=True):
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(values['xs'], values['ys'])
if mean:
y_mean = [np.mean(values['ys']) for i in values['xs']]
ax.plot(values['xs'], y_mean, linestyle='--')
plt.show()
def lines2d(x, y, ax, mean=True):
lines = []
l, = ax.plot(x, y)
lines.append(l)
if mean:
y_mean = [np.mean(y) for i in x]
l, = ax.plot(x, y_mean, linestyle='--')
lines.append(l)
return lines
def generate_figure(data, arena, overlays=None, bins=(25, 12), hexbin=False, interpolate=True,
norm=False):
fig = Figure()
ax = fig.add_subplot(111)
x = data['x']
y = data['y']
logger.info("Building Heatmap %s with %d Data Points" % (data['title_short'], len(x)))
cmap = plt.cm.get_cmap('jet')
cmap.set_bad((0, 0, 0.5))
norm = LogNorm() if norm else None
if hexbin:
ax.hexbin(x, y, cmap=cmap, gridsize=bins, norm=norm, extent=[arena['xmin'], arena['xmax'],
arena['ymin'], arena['ymax']])
else:
interpolate = 'bilinear' if interpolate else 'none'
bins = (bins[1], bins[0])
heatmap, xedges, yedges = np.histogram2d(y, x, bins=bins,
range=[(arena['ymin'], arena['ymax']),
(arena['xmin'], arena['xmax'])])
extent = [yedges[0], yedges[-1], xedges[0], xedges[-1]]
ax.imshow(heatmap, extent=extent, norm=norm, cmap=cmap, interpolation=interpolate,
origin='lower', aspect='auto')
ax.autoscale(False)
if overlays:
for overlay in overlays:
im = plt.imread(arena[overlay]['file'])
axi = ax.imshow(im, origin='lower', aspect='auto', alpha=arena[overlay]['alpha'],
extent=[arena['xmin'], arena['xmax'], arena['ymin'], arena['ymax']])
axi.set_zorder(2)
ax.text(0.1, 0, 'Team 0',
transform=ax.transAxes,
bbox=dict(facecolor='white'))
ax.text(0.9, 0, 'Team 1',
horizontalalignment='right',
transform=ax.transAxes,
bbox=dict(facecolor='white'))
pad_x = 110
pad_y = arena['aspect'] * pad_x
ax.set_xlim(arena['xmin'] - pad_x, arena['xmax'] + pad_x)
ax.set_ylim(arena['ymin'] - pad_y, arena['ymax'] + pad_y)
ax.set_title(data['title'], bbox=dict(facecolor='white'))
ax.axis('off')
fig.subplots_adjust(hspace=0, wspace=0, right=1, top=0.9, bottom=0.05, left=0)
fig.patch.set_facecolor((0, 0, .5))
return fig
def set_colormap(ax, colors=10, double=True):
cm = plt.get_cmap('gist_rainbow')
if double:
cycle = list(chain.from_iterable((cm(1.*i/colors), cm(1.*i/colors)) for i in range(colors)))
else:
cycle = [cm(1.*i/colors) for i in range(colors)]
ax.set_prop_cycle(cycler('color', cycle))
def get_rgb(line):
return rgb2hex(line.get_color())
|
def reverse(arr):
left, right = 0, 4
while(left < right):
swap(arr,left,right)
left += 1
right -= 1
return arr
def swap(arr, left, right):
temp = arr[left]
arr[left] = arr[right]
arr[right] = temp
arr = [1,2,3,4,5]
print(reverse(arr)) |
from setuptools import setup, find_packages
setup(
name="cryptpad_auto",
version="0.0.1",
author="Liam Cripwell",
description="A tool to automate CryptPad form generation from data.",
packages=find_packages(),
include_package_data=True,
)
|
import numpy as np
import sys
import unittest
sys.path.append('..')
from src import minimize
class testMinimize(unittest.TestCase):
def test_minimize(self):
n, p = 20, 4
A = np.random.rand(n, n)
A = (A + A.T)/2
def f1(y):
return np.sum(np.diag(np.dot(np.dot(y.T, A), y)))*1./2
def f1y(y):
return np.dot((A + A.T), y)*1./2
def f1yy(y):
B = np.zeros((n*p, n*p))
for j in range(p):
B[j*n:(j+1)*n, j*n:(j+1)*n] = A
return B
y0 = np.vstack([np.eye(p), np.zeros((n-p, p))])
opt_res = minimize(y0, f1, f1y, f1yy)
optval = np.sum(np.sort(np.linalg.eigvals(A))[:p])/2
self.assertTrue(np.isclose(opt_res['value'], optval))
if __name__ == '__main__':
unittest.main()
|
import functools
import pytest
def check_depends(depends):
try:
for dep in depends:
dep()
except Exception as e:
return dep
else:
return True
def pytest_depend(depends):
def pytest_depend_decorator(func):
stat = check_depends(depends)
if stat is True:
return func
else:
return pytest.mark.skip(True, reason="%s[skip] --> %s[Failed]" % (func.__name__, stat.__name__))(func)
return pytest_depend_decorator
|
import sys
from PyQt5 import QtCore, QtWidgets
from PyQt5.QtWidgets import QMainWindow, QLabel, QGridLayout, QWidget, QRadioButton, QButtonGroup
from PyQt5.QtCore import QSize
class Questionnaire(QWidget):
def __init__(self, parent):
super(Questionnaire, self).__init__(parent)
self.layout = QGridLayout()
self.setMinimumSize(QSize(parent.width() * 0.5, parent.height() * 0.5))
# Add instructions at top
self.instructions = QLabel("Rate the following statements for HOW MUCH you experienced: \n(1 being lowest, 5 being highest)", self)
self.instructions.move(50, 10)
self.instructions.adjustSize()
self.layout.addWidget(self.instructions)
# Questions go in here
self.questions = ["1) Your need to read quickly","2) Your sense of a loss of control of speech", "3) Amount of anxiety during reading","4) Amount of time pressure experienced"]
self.questions.reverse()
# New button class for every question
# Allows for Every question to have one answer
Q1_response = QButtonGroup()
Q2_response = QButtonGroup()
Q3_response = QButtonGroup()
Q4_response = QButtonGroup()
self.button_group_questions = [Q1_response, Q2_response, Q3_response, Q4_response]
# Counter for placement of questions/answers
self.adjustCounter = 1
for i in range(len(self.questions)): # Display questions and add buttons to button groups
self.addQuestion(self.button_group_questions[i-1])
self.show()
def addQuestion(self, button_group):
buttonHeight = 60*self.adjustCounter+15 # Change y position of button sets
self.newQuestion = QLabel(self.questions.pop(), self) # Display every question
self.newQuestion.move(50, 60*self.adjustCounter)
self.newQuestion.adjustSize()
self.layout.addWidget(self.newQuestion)
count = 1 # Used for width between buttons
options = ["1", "2", "3", "4", "5"] # Display options of buttons
buttons = [QRadioButton(option, self) for option in options] # Make all options radio buttons
# Display all radio buttons for this button group
for button in buttons:
button.move(50*count,buttonHeight)
self.layout.addWidget(button)
button_group.addButton(button)
button.setCheckable(True)
count += 1
self.adjustCounter += 1 # Will adjust coordinates for next question
def is_completed(self):
for q in self.button_group_questions:
if q.checkedId() == -1:
return False
return True
|
'''
Created on Mar 30, 2020
@author: zen
'''
#if __name__ == '__main__':
RMB = [200, 100, 20, 10, 5, 1]
NUM = 6
X = 628
count = 0
for i in range(NUM):
use = X // RMB[i]
count += use
X = X - RMB[i] * use
print('需要面额为{} 的 {} 张'.format(RMB[i],use))
print('剩余需要支付金额{}'.format(X))
print(count) |
import jsonlines
import argparse
from tqdm import tqdm
parser = argparse.ArgumentParser()
parser.add_argument("--corpus", type=str, default="data/corpus.jsonl")
parser.add_argument("--claims", type=str, required=True)
parser.add_argument("--retrieval", type=str, required=True)
parser.add_argument("--t5_input_ids", type=str, required=True)
parser.add_argument("--t5_input", type=str, required=True)
parser.add_argument("--stride", type=int, default=4)
parser.add_argument("--length", type=int, default=8)
args = parser.parse_args()
abstracts = {doc['doc_id']: doc["abstract"] for doc in jsonlines.open(args.corpus)}
claims = {doc['id']: doc["claim"] for doc in jsonlines.open(args.claims)}
retrieval = {doc['claim_id']: doc["doc_ids"] for doc in jsonlines.open(args.retrieval)}
abstract_rerank_ids = open(args.t5_input_ids, "w")
abstract_rerank = open(args.t5_input, "w")
for claim_id in tqdm(retrieval):
for doc_id in retrieval[claim_id]:
sentences = [sent.strip() for sent in abstracts[doc_id]]
idx = 0
segment = []
while idx < len(sentences):
segment.append(sentences[idx].strip())
if idx == len(sentences) - 1 or len(segment) == args.length:
claim = claims[claim_id]
document = " ".join(segment)
start_idx = idx - len(segment) + 1
abstract_rerank_ids.write(f"{claim_id}\t{doc_id}\t{start_idx}\n")
to_write = f"Query: {claim} Document: {document} Relevant:\n"
abstract_rerank.write(to_write)
if idx != len(sentences) - 1 and len(segment) == args.length:
segment = []
idx = idx - args.stride
idx += 1
abstract_rerank_ids.close()
abstract_rerank.close()
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 14 12:59:36 2020
@author: Tushar Saxena
"""
#Find sum of numbers from different arrays closest to given sum
import time
a = [-1, 3,8, 2, 9, 5]
b = [4, 1, 2, 10, 5, 20]
c = 24 #given sum
summ = 0
diff = c
start = time.time()
x = 0
y = 0
for i in a:
for j in b:
summ = i + j
if abs(c - summ) < diff:
x = i
y = j
diff = abs(c - summ)
print(x, y, summ, diff)
end = time.time()
print(end - start)
##alternate
summ = 0
diff = c
start = time.time()
x = 0
y = 0
while a:
i = max(a)
j = max(b)
summ = i + j
if abs(c - summ) < diff:
diff = abs(c - summ)
x = i
y = j
else:
a.remove(i)
b.remove(j)
print(x, y, summ, diff)
end = time.time()
print(end - start) |
from PySide6.QtWidgets import QApplication, QMainWindow
from PySide6.QtUiTools import QUiLoader
from qt_material import apply_stylesheet
extra = {
# Button colors
'danger': '#dc3545',
'warning': '#ffc107',
'success': '#17a2b8',
# Font
'font_family': 'monoespace',
'font_size': '14px',
'line_height': '14px',
}
########################################################################
class RuntimeStylesheets(QMainWindow):
# ----------------------------------------------------------------------
def __init__(self):
""""""
super().__init__()
self.main = QUiLoader().load('main_window_extra.ui', self)
self.main.pushButton_danger.setProperty('class', 'danger')
self.main.pushButton_warning.setProperty('class', 'warning')
self.main.pushButton_success.setProperty('class', 'success')
if __name__ == "__main__":
app = QApplication()
apply_stylesheet(app, theme='light_blue.xml', extra=extra)
frame = RuntimeStylesheets()
frame.main.show()
app.exec_()
|
# include this gypi to include all the golden master slides.
{
'sources': [
'../gm/aaclip.cpp',
'../gm/aarectmodes.cpp',
'../gm/arithmode.cpp',
'../gm/bigmatrix.cpp',
'../gm/bitmapcopy.cpp',
'../gm/bitmapmatrix.cpp',
'../gm/bitmapfilters.cpp',
'../gm/bitmapscroll.cpp',
'../gm/blurs.cpp',
'../gm/circles.cpp',
'../gm/colormatrix.cpp',
'../gm/complexclip.cpp',
'../gm/complexclip2.cpp',
'../gm/composeshader.cpp',
'../gm/convexpaths.cpp',
'../gm/cubicpaths.cpp',
'../gm/cmykjpeg.cpp',
'../gm/degeneratesegments.cpp',
'../gm/dashcubics.cpp',
'../gm/dashing.cpp',
'../gm/drawbitmaprect.cpp',
'../gm/drawlooper.cpp',
'../gm/extractbitmap.cpp',
'../gm/emptypath.cpp',
'../gm/filltypes.cpp',
'../gm/filltypespersp.cpp',
'../gm/fontscaler.cpp',
'../gm/gammatext.cpp',
'../gm/getpostextpath.cpp',
'../gm/giantbitmap.cpp',
'../gm/gradients.cpp',
'../gm/gradtext.cpp',
'../gm/hairmodes.cpp',
'../gm/hittestpath.cpp',
'../gm/imageblur.cpp',
'../gm/lighting.cpp',
'../gm/imagefiltersbase.cpp',
'../gm/lcdtext.cpp',
'../gm/linepaths.cpp',
'../gm/morphology.cpp',
'../gm/ninepatchstretch.cpp',
'../gm/nocolorbleed.cpp',
'../gm/patheffects.cpp',
'../gm/pathfill.cpp',
'../gm/pathreverse.cpp',
'../gm/points.cpp',
'../gm/poly2poly.cpp',
'../gm/quadpaths.cpp',
'../gm/samplerstress.cpp',
'../gm/shaderbounds.cpp',
'../gm/shadertext.cpp',
'../gm/shadows.cpp',
'../gm/shapes.cpp',
'../gm/simpleaaclip.cpp',
'../gm/strokefill.cpp',
'../gm/strokerects.cpp',
'../gm/strokes.cpp',
'../gm/tablecolorfilter.cpp',
'../gm/testimagefilters.cpp',
'../gm/texdata.cpp',
'../gm/tilemodes.cpp',
'../gm/tinybitmap.cpp',
'../gm/twopointradial.cpp',
'../gm/typeface.cpp',
'../gm/verttext.cpp',
'../gm/verttext2.cpp',
'../gm/xfermodes.cpp',
],
}
# Local Variables:
# tab-width:2
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=2 shiftwidth=2:
|
# -*- coding: utf-8 -*-
class Dog(): # скобки пусты т.к. класс создается с нуля
# Dog это экземпляр класса
'''Простая модель собаки'''
# метод __init__ автоматически выполняется при создании каждого нового экземпляра на базе класса Dog
# метод __init__ с тремя параметрами self, name, age
# self ссылка на экземпляр,она предостовляет конкретному экземпляру доступ к атрибутам и методам класса
def __init__(self, name, age):# приведена строка документации с кратким описанием класса
'''инициализирует атрибуты name и age'''
self.name = name # переменные к которы обращаются через класс называются атрибутами
self.age = age
def sit(self): # функция являющая частью класса называется методом
'''собака садиться по команде'''
print(self.name.title() + ' is now sitting')
def roll_over(self):
'''собака перекатываетсяч по команде'''
print(self.name.title() + ' rolled over!')
#создадим класс представлящим канкретную собаку
# my_dog это отдельный экземпляр класса, созданный на базе класса Dog
my_dog = Dog('willie', 6)
print('My dogs name is ' + my_dog.name.title()) # обращение к атрибутам my_dog.name
print('My dog is ' + str(my_dog.age) + ' years old')
#вызов методов определенных в классе Dog
my_dog.sit()
my_dog.roll_over()
# создадим еще один экземпляр класса
your_dog = Dog('Richerd', 23)
print('My name is dog: ', your_dog.name)
print('My dog is ' + str(your_dog.age), 'years old')
your_dog.sit()
your_dog.roll_over()
|
import pygame
class Enemy(pygame.sprite.Sprite):
def __init__(self, x, y, image, speed, bg_size, music):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.image.load(image).convert_alpha()
self.rect = self.image.get_rect()
if self.rect.w > x:
self.rect.left, self.rect.top = self.rect.w, y - self.rect.height
if x > bg_size[0] - self.rect.w:
self.rect.left, self.rect.top = x - self.rect.w, y - self.rect.height
else:
self.rect.left, self.rect.top = x, y - self.rect.height
self.speed = speed
self.disappear = False
self.bg_size = bg_size
self.music = music
def move(self):
self.rect.top += self.speed
if self.bg_size[1] + self.rect.height < self.rect.bottom:
self.disappear = True
def dead(self):
self.dead = pygame.mixer.Sound(self.music)
self.dead.play() |
class Assistant:
"""Class for simple calls for the UI
Attributes:
assistant_inst (AssistantV2): AssistantV2 object that will call the API
assistant_id (str): The user's assistant ID used for calling
session_id (str): The session ID created with the AssistantV2 object
intent (str): The intent of the user's most recent message
"""
assistant_inst = None
assistant_id = ""
session_id = ""
intent = ""
def __init__(self, assistant, a_id, s_id):
""" Pass in the assistant object, its ID, and its sessionID
Args:
assistant (AssistantV2): AssistantV2 object
a_id (str): Assistant ID of the assistant object passed
s_id (str): Session ID for the assistant object passed
"""
self.assistant = assistant
self.assistant_id = a_id
self.session_id = s_id
def message(self, msg):
""" Send a message to the assistant, return a list of responses
Args:
msg (str): Message to send to the assistant
"""
response_json = self.assistant.message(self.assistant_id, self.session_id, input={'text': msg}, context={'metadata': {'deployment': 'myDeployment'}}).get_result()
# set most likely recent intent
intents = response_json['output']['intents']
if len(intents) > 0:
self.intent = intents[0]['intent']
responses = []
for rsp in response_json['output']['generic']:
if rsp['response_type'] == 'text':
responses.append(rsp['text'])
return responses
def getIntent(self):
""" Returns (str) most recent intent stored by the assistant and removes it"""
intent = self.intent
self.intent = None
return intent
|
#写模式
fo =open("foo.txt","w")
fo.write("joinx test python io stream")
fo.close()
#读模式
fo=open("foo.txt","r+")
str=fo.readline(20)
print(str)
fo.close()
fo=open("foo.txt","r+")
str=fo.readline(3)
print("读取的字符",str)
posistion=fo.tell()
print("当前的文件读取的位置",posistion)
#将文件位置重新定位到文件开头
posistion=fo.seek(0,0)
str=fo.read(30)
print("重新定位后的字符串",str)
fo.close()
import os
#os.rename("foo.txt","joinx.txt")
#fo=open("abc.jpg","w")
os.remove("abc.jpg")
os.mkdir("newdir") |
from aiogram.types import Message
from aiogram.dispatcher.filters.builtin import CommandStart
from utils.utils import get_sticker_hello
from loader import dp
@dp.message_handler(CommandStart())
async def send_welcome(message: Message):
sticker = get_sticker_hello()
await message.answer_sticker(sticker)
await message.answer(f'Привет, {message.from_user.username}') |
from cotton.scm import SCM
from fabric import api as fab
from fabric.api import env
class Git(SCM):
def git(self, *commands):
commands = ' '.join(['git'] + list(commands))
fab.run(commands)
def checkout(self, repository, checkout_to, ref=None):
self.git('clone', repository, checkout_to)
if ref:
with fab.cd(checkout_to):
self.git('checkout', ref)
def branch_name(self, repository, output_file=None, append=False):
return self._log_state(command=['status | grep "On branch"'],
repository=repository,
output_file=output_file,
append=append)
def revision(self, repository, output_file=None, append=False):
return self._log_state(command=['rev-parse HEAD'],
repository=repository,
output_file=output_file,
append=append)
def tag_name(self, repository, output_file=None, append=False):
return self._log_state(command=['describe --tags'],
repository=repository,
output_file=output_file,
append=append)
def _log_state(self, command, repository, output_file=None, append=False):
with fab.settings(fab.hide('warnings'), warn_only=True):
stderr_redirect = ['2>', '/dev/null']
command.extend(stderr_redirect)
with fab.cd(repository):
if output_file:
redirect = '>'
if append:
redirect = '>>'
command.extend([redirect, output_file])
return self.git(*command)
|
from contextlib import contextmanager
import logging
import os
from typing import NamedTuple
from flask import current_app, g
import psycopg2
from psycopg2.pool import ThreadedConnectionPool
from psycopg2.extras import RealDictCursor
pool = None
def setup():
global pool
DATABASE_URL = os.environ['DATABASE_URL']
current_app.logger.info(f"creating db connection pool")
pool = ThreadedConnectionPool(1, 100, dsn=DATABASE_URL, sslmode='require')
@contextmanager
def get_db_connection():
try:
connection = pool.getconn()
yield connection
finally:
pool.putconn(connection)
@contextmanager
def get_db_cursor(commit=False):
with get_db_connection() as connection:
cursor = connection.cursor(cursor_factory=RealDictCursor)
# cursor = connection.cursor()
try:
yield cursor
if commit:
connection.commit()
finally:
cursor.close() |
class Puissance4:
'''Une partie de Puissance 4'''
SYMBOLES = ('.', '@', 'X', 'O', '+')
_JOUEURS_MIN = 2
_JOUEURS_MAX = 4
def __init__(self, largeur=7, hauteur=6, nb_joueurs=2):
'''Initialise une partie avec une grille vide.'''
if largeur < 7 or hauteur < 6:
raise ValueError(f'Dimension minimum 6 lignes x 7 colonnes : {hauteur}x{largeur}')
if nb_joueurs < 2 or nb_joueurs > 4:
raise ValueError(f'Le nombre de joueurs doit être entre' +
f' {Puissance4._JOUEURS_MIN} et {Puissance4._JOUEURS_MAX} : {nb_joueurs}')
self._grille = [[0 for _ in range(largeur)] for _ in range(hauteur)]
self._largeur = largeur
self._hauteur = hauteur
self._nb_joueurs = nb_joueurs
self._joueur_courant = 1
self._partie_finie = False
self._gagnant = None
self._nb_coups = 0
self._pions_par_colonne = [0] * largeur
@property
def largeur(self):
'''Retourne la largeur de la grille.'''
return self._largeur
@property
def hauteur(self):
'''Retourne la hauteur de la grille.'''
return self._hauteur
@property
def nb_joueurs(self):
'''Retourne le nombre de joueurs.'''
return self._nb_joueurs
@property
def joueur_courant(self):
'''Retourne le numéro du joueur courant.
Les numéros commencent à 1.'''
return self._joueur_courant
@property
def partie_finie(self):
'''Retourne True si la partie est terminée.
La partie se termine quand un joueur a aligné 4 pions ou si la grille
est pleine.
'''
return self._partie_finie
@property
def gagnant(self):
'''Retourne le numéro du joueur gagnant.
Si la partie n'est pas finie ou si personne n'a gagné, retourne None.
'''
return self._gagnant
def jouer(self, colonne):
'''Joue un coup dans la colonne donnée et retourne un booléen.
Si le coup est invalide, retourne False.
Sinon, place un pion pour le joueur courant et retourne True.
'''
if self.partie_finie or colonne <= 0 or colonne > self.largeur:
return False
col = colonne - 1
lgn = self.hauteur - 1 - self._pions_par_colonne[col]
if lgn < 0: # Colonne pleine
return False
self._grille[lgn][col] = self.joueur_courant
self._pions_par_colonne[col] += 1
self._nb_coups += 1
if self._coup_gagnant(lgn, col):
self._partie_finie = True
self._gagnant = self.joueur_courant
elif self._nb_coups == self.largeur * self.hauteur:
self._partie_finie = True
else:
self._joueur_courant = self.joueur_courant % self.nb_joueurs + 1
return True
def _coup_gagnant(self, lgn, col):
'''Retourne True si le pion aux coordonnées données fait partie d'un alignement de 4.'''
joueur = self._grille[lgn][col]
def est_valide(l, c):
return (0 <= l < self.hauteur) and (0 <= c < self.largeur)
def compter_pions(dl, dc):
l, c = lgn + dl, col + dc
compte = 0
while est_valide(l, c) and self._grille[l][c] == joueur:
compte += 1
l, c = l + dl, c + dc
return compte
for dl, dc in ((0, 1), (1, 0), (1, 1), (1, -1)):
if compter_pions(dl,dc) + compter_pions(-dl, -dc) + 1 == 4:
return True
return False
def __str__(self):
lst = [''.join(f'{str(i):2s}' for i in range(1, self.largeur + 1))]
for row in self._grille:
lst.append(''.join(f'{Puissance4.SYMBOLES[value]:2s}' for value in row))
return '\n'.join(lst)
def jouer(largeur=7, hauteur=6, nb_joueurs=2):
puiss = Puissance4(largeur, hauteur, nb_joueurs)
while not puiss.partie_finie:
print()
print(puiss)
print()
joueur = puiss.joueur_courant
colonne = int(input(f'Joueur {joueur} ({puiss.SYMBOLES[joueur]}), choisissez une colonne : '))
puiss.jouer(colonne)
print()
print('La partie est finie.')
if puiss.gagnant:
print(f'Bravo joueur {puiss.gagnant}, vous avez gagné !')
else:
print('Match nul.')
|
from MyMainPackage import some_main_script
from mymodule import my_func
from MyMainPackage.SubPackage import mysubscript
some_main_script.report_main()
mysubscript.sub_report()
my_func() |
import json
import time
from urllib import request as r
class GroupRequest():
def __init__(self, token, params, url):
self.token = token
self.params = params
self.base_url = url
self.headers = {"Content-Type": "application/json", "accept": "application/json"}
self.task = ""
self.result = []
def group_request_native(self):
for counter, chunk in enumerate(self.params):
data = {"token": self.token, "request": chunk}
data = json.dumps(data)
data = data.encode()
while True:
req = r.Request(self.base_url + "search/group", method="POST")
req.add_header("Content-Type", "application/json")
req.add_header("accept", "application/json")
res = r.urlopen(req, data=data)
response = json.loads(res.read().decode("utf-8"))
print(f"Chunk {int(counter) + 1} response status: {response['status']}")
if int(response["code"]) != 429:
break
if response["code"] == 0:
self.task = response["response"]["task"]
print(f"Chunk {counter + 1}: created task {self.task}")
self.check_task_status()
else:
print(f"Chunk {counter + 1} response exception: {response['exception']}")
def check_task_status(self):
data = {"token": self.token, "task": self.task}
data = json.dumps(data)
data = data.encode()
while True:
time.sleep(10)
req = r.Request(self.base_url + "status", method="GET")
req.add_header("Content-Type", "application/json")
req.add_header("accept", "application/json")
res = r.urlopen(req, data=data)
response = json.loads(res.read().decode("utf-8"))
if (response["code"] == 0) and (response["response"]["status"] == 0):
print(f"Task {self.task} response status: {response['status']}")
self.get_task_result()
break
def get_task_result(self):
data = {"token": self.token, "task": self.task}
data = json.dumps(data)
data = data.encode()
req = r.Request(self.base_url + "result", method="GET")
req.add_header("Content-Type", "application/json")
req.add_header("accept", "application/json")
res = r.urlopen(req, data=data)
response = json.loads(res.read().decode("utf-8"))
if response["code"] == 0:
self.result.append(response["response"]["result"])
print(f"Task {self.task} done")
def get_result(self):
return self.result |
import unittest
from src.tools.lotto.euromillions import euromillions_analysis
class EuromillionsAnalysisTestCase(unittest.TestCase):
twenty_draws = [['1379', '08-Dec-2020', '1', '4', '21', '24', '46', '2', '12'],
['1378', '04-Dec-2020', '14', '20', '27', '34', '38', '1', '11'],
['1377', '01-Dec-2020', '14', '20', '29', '47', '49', '4', '12'],
['1376', '27-Nov-2020', '2', '5', '8', '14', '16', '8', '9'],
['1375', '24-Nov-2020', '25', '33', '38', '42', '50', '8', '12'],
['1374', '20-Nov-2020', '28', '29', '39', '48', '50', '5', '7'],
['1373', '17-Nov-2020', '16', '19', '25', '30', '44', '2', '6'],
['1372', '13-Nov-2020', '1', '5', '17', '28', '31', '1', '10'],
['1371', '10-Nov-2020', '3', '19', '29', '32', '38', '5', '12'],
['1370', '06-Nov-2020', '7', '12', '37', '40', '50', '1', '2'],
['1369', '03-Nov-2020', '5', '7', '18', '20', '30', '6', '7'],
['1368', '30-Oct-2020', '12', '16', '20', '21', '28', '3', '9'],
['1367', '27-Oct-2020', '13', '15', '28', '32', '44', '3', '12'],
['1366', '23-Oct-2020', '10', '15', '19', '21', '23', '3', '12'],
['1365', '20-Oct-2020', '5', '6', '15', '37', '42', '3', '4'],
['1364', '16-Oct-2020', '15', '33', '38', '40', '50', '3', '6'],
['1363', '13-Oct-2020', '5', '14', '38', '41', '46', '1', '10'],
['1362', '09-Oct-2020', '11', '15', '35', '41', '50', '5', '8'],
['1361', '06-Oct-2020', '4', '21', '36', '41', '47', '9', '11'],
['1360', '02-Oct-2020', '6', '12', '15', '40', '45', '3', '9']]
def test_count_ball_played_in_games_acceptance_test(self):
expected_result = {1: 2, 2: 1, 3: 1, 4: 1, 5: 2, 6: 0, 7: 1, 8: 1, 9: 0, 10: 0, 11: 0, 12: 1, 13: 0, 14: 3,
15: 0, 16: 2, 17: 1, 18: 0, 19: 2, 20: 2, 21: 1, 22: 0, 23: 0, 24: 1, 25: 2, 26: 0, 27: 1,
28: 2, 29: 3, 30: 1, 31: 1, 32: 1, 33: 1, 34: 1, 35: 0, 36: 0, 37: 1, 38: 3, 39: 1, 40: 1,
41: 0, 42: 1, 43: 0, 44: 1, 45: 0, 46: 1, 47: 1, 48: 1, 49: 1, 50: 3}
# when
result = euromillions_analysis.count_ball_played_in_games(self.twenty_draws[0:10])
# debug
print(result)
# then
self.assertEqual(expected_result.get(12), result.get(12))
self.assertEqual(expected_result.get(50), result.get(50))
self.assertEqual(expected_result.get(14), result.get(14))
self.assertEqual(expected_result.get(1), result.get(1))
self.assertEqual(expected_result, result)
def test_count_ball_played_between_games_acceptance_test_with_single_hit(self):
# given
expected_result = {1: 1, 2: 1, 3: 0, 4: 1, 5: 1, 6: 1, 7: 0, 8: 0, 9: 0, 10: 0, 99: 0}
# when
result = euromillions_analysis.count_ball_played_between_games(50, self.twenty_draws)
# debug
print(result)
# then
self.assertEqual(expected_result, result)
def test_count_ball_played_between_games_acceptance_test_with_first_and_99_hit(self):
# given
expected_result = {1: 1, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0, 7: 0, 8: 0, 9: 0, 10: 0, 99: 1}
# when
result = euromillions_analysis.count_ball_played_between_games(4, self.twenty_draws)
# debug
print(result)
# then
self.assertEqual(expected_result, result)
def test_count_ball_played_between_games_acceptance_test_with_multi_hit(self):
# given
expected_result = {1: 2, 2: 1, 3: 0, 4: 0, 5: 0, 6: 0, 7: 0, 8: 1, 9: 0, 10: 0, 99: 0}
# when
result = euromillions_analysis.count_ball_played_between_games(20, self.twenty_draws)
# debug
print(result)
# then
self.assertEqual(expected_result, result)
|
import os
import json
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# try get custom settings
try:
with open(os.path.join(BASE_DIR, "sigec", "mysettings.json"), encoding='utf-8') as f:
USER_SETTINGS = json.load(f)
except:
USER_SETTINGS = {}
VERSION = "1.9.12"
'''
VERSION: 1 - SIGEC FUNCTIONS
RELEASE: 9 - SAAS
FIX: 12 - OFFER GENERATOR LINK
'''
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'p3o3or6fsa-%!!go0=iae9=+c&zl3l@9(2p61$&5r*5e7fk&(1'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = USER_SETTINGS.get('DEBUG', True)
ALLOWED_HOSTS = USER_SETTINGS.get('ALLOWED_HOSTS', ['*'])
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
#Propias
'apps.data',
'apps.cotizaciones',
'apps.reparaciones',
'apps.saas',
# ... other apps
'dynamic_raw_id',
'crispy_forms',
#'admin_footer', #django-admin-footer
]
CRISPY_TEMPLATE_PACK = 'bootstrap3'
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'sigec.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'sigec.wsgi.application'
# ADMIN_FOOTER_DATA = {
# 'site_url': 'http://sv-pruebas/sigec',
# 'site_name': 'SIGeC',
# 'period': '{}'.format(datetime.now().year),
# 'version': 'v{} - '.format(VERSION)
# }
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
#Si se encuentra la config en variables de sistema, se toma dicha configuracion.
#Caso contrario, se toma la por defecto
DATABASES = USER_SETTINGS.get('DATABASES') or {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = USER_SETTINGS.get('LANGUAGE_CODE', 'es-ar')
TIME_ZONE = USER_SETTINGS.get('TIME_ZONE', 'America/Argentina/Buenos_Aires')
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
#'/var/www/static/',
]
STATIC_ROOT = os.path.join(os.path.dirname(BASE_DIR), 'statics_pub')
MEDIA_ROOT = os.path.join(os.path.dirname(BASE_DIR), 'media_uploads')
MONEDA_DEFAULT = 'U$D'
FILE_CLIENTES = "{}\\import\\clientes.csv".format(BASE_DIR)
FILE_PRODUCTOS = "{}\\import\\productos.csv".format(BASE_DIR)
MESSAGE_STORAGE = 'django.contrib.messages.storage.cookie.CookieStorage'
#FOR SAAS OFFERS - porcentual values
FACTOR_TRANSFER_PRICE = 1.51
UE_TO_USD = 1.11
MANTENIMIENTO_ANUAL = 1.18
IMPLEMENTACION = 0.25
COMISION_VENTAS = 0.05
PORCENTAJE_COSTO = 20 #porcentual value
FINANCING = (
('36', '36 meses'),
('48', '48 meses'),
)
HARDWARE = (
('p', 'SPEC'),
('t', 'Terceros'),
)
SELLER = (
('0', 'End User'),
('1', 'Partner'),
('2', 'Mayorista'),
)
PRICING_MANAGEMENT = (
('vf', 'Valor fijo'),
('vm', 'Valor fijo Mensual'),
('rp', 'Rangos de precio'),
('rm', 'Rangos de precio Mensual'),
('pu', 'Variable por cantidad'),
('pm', 'Variable por cantidad Mensual'),
)
|
import os, re, argparse
import torch.optim as optim
import numpy as np
import pandas as pd
from gensim.models import Word2Vec
from tqdm import tqdm
from sklearn.utils import compute_class_weight
from DeepLineDP_model import *
from my_util import *
torch.manual_seed(0)
arg = argparse.ArgumentParser()
arg.add_argument('-dataset',type=str, default='activemq', help='software project name (lowercase)')
arg.add_argument('-batch_size', type=int, default=32)
arg.add_argument('-num_epochs', type=int, default=10)
arg.add_argument('-embed_dim', type=int, default=50, help='word embedding size')
arg.add_argument('-word_gru_hidden_dim', type=int, default=64, help='word attention hidden size')
arg.add_argument('-sent_gru_hidden_dim', type=int, default=64, help='sentence attention hidden size')
arg.add_argument('-word_gru_num_layers', type=int, default=1, help='number of GRU layer at word level')
arg.add_argument('-sent_gru_num_layers', type=int, default=1, help='number of GRU layer at sentence level')
arg.add_argument('-dropout', type=float, default=0.2, help='dropout rate')
arg.add_argument('-lr', type=float, default=0.001, help='learning rate')
arg.add_argument('-exp_name',type=str,default='')
args = arg.parse_args()
# model setting
batch_size = args.batch_size
num_epochs = args.num_epochs
max_grad_norm = 5
embed_dim = args.embed_dim
word_gru_hidden_dim = args.word_gru_hidden_dim
sent_gru_hidden_dim = args.sent_gru_hidden_dim
word_gru_num_layers = args.word_gru_num_layers
sent_gru_num_layers = args.sent_gru_num_layers
word_att_dim = 64
sent_att_dim = 64
use_layer_norm = True
dropout = args.dropout
lr = args.lr
save_every_epochs = 1
exp_name = args.exp_name
max_train_LOC = 900
prediction_dir = '../output/prediction/DeepLineDP/'
save_model_dir = '../output/model/DeepLineDP/'
file_lvl_gt = '../datasets/preprocessed_data/'
weight_dict = {}
def get_loss_weight(labels):
'''
input
labels: a PyTorch tensor that contains labels
output
weight_tensor: a PyTorch tensor that contains weight of defect/clean class
'''
label_list = labels.cpu().numpy().squeeze().tolist()
weight_list = []
for lab in label_list:
if lab == 0:
weight_list.append(weight_dict['clean'])
else:
weight_list.append(weight_dict['defect'])
weight_tensor = torch.tensor(weight_list).reshape(-1,1).cuda()
return weight_tensor
def train_model(dataset_name):
loss_dir = '../output/loss/DeepLineDP/'
actual_save_model_dir = save_model_dir+dataset_name+'/'
if not exp_name == '':
actual_save_model_dir = actual_save_model_dir+exp_name+'/'
loss_dir = loss_dir + exp_name
if not os.path.exists(actual_save_model_dir):
os.makedirs(actual_save_model_dir)
if not os.path.exists(loss_dir):
os.makedirs(loss_dir)
train_rel = all_train_releases[dataset_name]
valid_rel = all_eval_releases[dataset_name][0]
train_df = get_df(train_rel)
valid_df = get_df(valid_rel)
train_code3d, train_label = get_code3d_and_label(train_df, True)
valid_code3d, valid_label = get_code3d_and_label(valid_df, True)
sample_weights = compute_class_weight(class_weight = 'balanced', classes = np.unique(train_label), y = train_label)
weight_dict['defect'] = np.max(sample_weights)
weight_dict['clean'] = np.min(sample_weights)
w2v_dir = get_w2v_path()
word2vec_file_dir = os.path.join(w2v_dir,dataset_name+'-'+str(embed_dim)+'dim.bin')
word2vec = Word2Vec.load(word2vec_file_dir)
print('load Word2Vec for',dataset_name,'finished')
word2vec_weights = get_w2v_weight_for_deep_learning_models(word2vec, embed_dim)
vocab_size = len(word2vec.wv.vocab) + 1 # for unknown tokens
x_train_vec = get_x_vec(train_code3d, word2vec)
x_valid_vec = get_x_vec(valid_code3d, word2vec)
max_sent_len = min(max([len(sent) for sent in (x_train_vec)]), max_train_LOC)
train_dl = get_dataloader(x_train_vec,train_label,batch_size,max_sent_len)
valid_dl = get_dataloader(x_valid_vec, valid_label,batch_size,max_sent_len)
model = HierarchicalAttentionNetwork(
vocab_size=vocab_size,
embed_dim=embed_dim,
word_gru_hidden_dim=word_gru_hidden_dim,
sent_gru_hidden_dim=sent_gru_hidden_dim,
word_gru_num_layers=word_gru_num_layers,
sent_gru_num_layers=sent_gru_num_layers,
word_att_dim=word_att_dim,
sent_att_dim=sent_att_dim,
use_layer_norm=use_layer_norm,
dropout=dropout)
model = model.cuda()
model.sent_attention.word_attention.freeze_embeddings(False)
optimizer = optim.Adam(params=filter(lambda p: p.requires_grad, model.parameters()), lr=lr)
criterion = nn.BCELoss()
checkpoint_files = os.listdir(actual_save_model_dir)
if '.ipynb_checkpoints' in checkpoint_files:
checkpoint_files.remove('.ipynb_checkpoints')
total_checkpoints = len(checkpoint_files)
# no model is trained
if total_checkpoints == 0:
model.sent_attention.word_attention.init_embeddings(word2vec_weights)
current_checkpoint_num = 1
train_loss_all_epochs = []
val_loss_all_epochs = []
else:
checkpoint_nums = [int(re.findall('\d+',s)[0]) for s in checkpoint_files]
current_checkpoint_num = max(checkpoint_nums)
checkpoint = torch.load(actual_save_model_dir+'checkpoint_'+str(current_checkpoint_num)+'epochs.pth')
model.load_state_dict(checkpoint['model_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
loss_df = pd.read_csv(loss_dir+dataset_name+'-loss_record.csv')
train_loss_all_epochs = list(loss_df['train_loss'])
val_loss_all_epochs = list(loss_df['valid_loss'])
current_checkpoint_num = current_checkpoint_num+1 # go to next epoch
print('continue training model from epoch',current_checkpoint_num)
for epoch in tqdm(range(current_checkpoint_num,num_epochs+1)):
train_losses = []
val_losses = []
model.train()
for inputs, labels in train_dl:
inputs_cuda, labels_cuda = inputs.cuda(), labels.cuda()
output, _, __, ___ = model(inputs_cuda)
weight_tensor = get_loss_weight(labels)
criterion.weight = weight_tensor
loss = criterion(output, labels_cuda.reshape(batch_size,1))
train_losses.append(loss.item())
torch.cuda.empty_cache()
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), max_grad_norm)
optimizer.step()
torch.cuda.empty_cache()
train_loss_all_epochs.append(np.mean(train_losses))
with torch.no_grad():
criterion.weight = None
model.eval()
for inputs, labels in valid_dl:
inputs, labels = inputs.cuda(), labels.cuda()
output, _, __, ___ = model(inputs)
val_loss = criterion(output, labels.reshape(batch_size,1))
val_losses.append(val_loss.item())
val_loss_all_epochs.append(np.mean(val_losses))
if epoch % save_every_epochs == 0:
print(dataset_name,'- at epoch:',str(epoch))
if exp_name == '':
torch.save({
'epoch': epoch,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict()
},
actual_save_model_dir+'checkpoint_'+str(epoch)+'epochs.pth')
else:
torch.save({
'epoch': epoch,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict()
},
actual_save_model_dir+'checkpoint_'+exp_name+'_'+str(epoch)+'epochs.pth')
loss_df = pd.DataFrame()
loss_df['epoch'] = np.arange(1,len(train_loss_all_epochs)+1)
loss_df['train_loss'] = train_loss_all_epochs
loss_df['valid_loss'] = val_loss_all_epochs
loss_df.to_csv(loss_dir+dataset_name+'-loss_record.csv',index=False)
dataset_name = args.dataset
train_model(dataset_name) |
import uuid
import datetime
from managment.tasks import Tasks
from managment.instances_repository import InstancesRepository
class Instances(object):
def __init__(self):
self.instances_repository = InstancesRepository()
def get_all_instances(self):
return self.instances_repository.get_all_instances()
def get_instance(self, name):
return self.instances_repository.get_instance(name)
def get_instance_by_uuid(self, uuid):
return self.instances_repository.get_instance_by_uuid(uuid)
def create_instance(self, instance):
instance['uuid'] = str(uuid.uuid4())
instance['status'] = 'pending'
task_uuid = self._create_task("Create a new instance %s using driver %s"
% (instance['name'],instance['driver']),
instance['uuid'])
return self.instances_repository.create_instance(instance)
def delete_instance(self, name):
return self.instances_repository.delete_instance(name)
def update_instance(self, name, instance):
return self.instances_repository.update_instance(name, instance)
def _create_task(self, description, instance_uuid):
task = {}
task['description'] = description
task['status'] = 'pending'
task['date'] = datetime.datetime.now().time().isoformat()
task['status_history'] = []
task['log'] = "received in tasks"
task['type'] = "create_instance"
task['object_uuid'] = instance_uuid
tasks = Tasks()
result = tasks.create_task(task)
if result['status'] == 'ok':
return result['uuid']
else:
return result
return task |
import re
from pandas import concat
from .helpers.utils import collect_codes_and_names
from . import bcb, ipea
def get_series(*codes, start=None, end=None, **kwargs):
"""
Get multiple series from both BCB or IPEA.
Parameters
----------
codes : dict, str, int
Dictionary like {"name1": cod1, "name2": cod2}
or a bunch of code numbers, e.g. cod1, cod2.
start : str, optional
Initial date, month or day first.
end : str, optional
End date, month or day first.
last_n : int, optional
Ignore other arguments and get last n observations.
**kwargs
Passed to pandas.concat.
Returns
-------
pandas.DataFrame
A DataFrame with series' values.
Examples
--------
>>> seriesbr.get_series("BM12_CRLIN12", 20786, start="2015", end="2015")
BM12_CRLIN12 20786
Date
2015-01-01 4.41 26.91
2015-02-01 4.42 27.95
2015-03-01 4.38 27.72
2015-04-01 4.57 28.93
2015-05-01 4.68 29.61
2015-06-01 4.59 30.31
2015-07-01 4.77 31.24
2015-08-01 4.91 31.65
2015-09-01 4.92 31.49
2015-10-01 5.02 32.64
2015-11-01 5.22 33.31
2015-12-01 5.28 31.64
"""
codes, names = collect_codes_and_names(*codes)
series = []
for code, name in zip(codes, names):
if re.search(r"^\d+$", str(code)):
df = bcb.get_serie(code, name, start, end)
else:
df = ipea.get_serie(code, name, start, end)
series.append(df)
return concat(series, axis="columns", **kwargs)
|
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Usuario(User):
observacion = models.CharField(max_length=20, default="no especificado",null=False)
telefono = models.IntegerField(max_length=15,default=00,null=False)
domicilio = models.CharField(max_length=20,default="no especificado",null=False)
notificaciones = models.IntegerField(default=0)
class Meta:
default_permissions = ()
permissions = (
('gestion_usuario','gestion de usuario'),
('consulta_usuario', 'consultar usuarios'),
('modificar_usuario', 'modificar usuarios'),
('eliminar_usuario', 'eliminar usuarios'),
('crear_usuario', 'crear usuarios'),
('listar_usuario', 'listar usuarios'),
)
|
import os
import time
import torch
import torch.nn.functional
from torch import nn, Tensor
import torchvision.models as models
import math
class Model(nn.Module):
def __init__(self):
super().__init__()
# TODO: CODE BEGIN
self.resnet18 = models.resnet18(pretrained=True)
self.fc = nn.Linear(1000, 20)
#raise NotImplementedError
# TODO: CODE END
def forward(self, images: Tensor) -> Tensor:
# TODO: CODE BEGIN
# logits = xxx
x = self.resnet18(images)
x = self.fc(x)
#m = nn.Softmax()
#x = m(x)
#print("x = ", x)
return x
#raise NotImplementedError
# TODO: CODE END
def loss(self, logits: Tensor, multilabels: Tensor) -> Tensor:
# TODO: CODE BEGIN
#loss_func = torch.nn.CrossEntropyLoss()
#print("logits = ", logits)
#print("multilables = ", multilabels)
'''loss = 0
labels = torch.zeros([32,1], dtype=torch.int64)
labels = labels.cuda()
for x in range(0,32):
for y in range(0,20):
#print("logits[x][y] = ", float(logits[x][y])+1e-9)
#print("multilabels[x][y] = ", float(multilabels[x][y]))
loss += float(multilabels[x][y])*math.log(float(logits[x][y])+1e-9)
if multilabels[x][y] == 1:
labels[x].append = y
#print("loss = ", loss)
print("labels = ", labels)
#print("hand loss = ", loss)
#print("multilabels = ", multilabels)
#print("torch.max(multilabels, 1)[1] = ", torch.max(multilabels, 1))
loss = loss_func(logits, labels)
'''
'''criterion = nn.NLLLoss2d()
logits = nn.functional.log_softmax(logits, dim=1)
loss = criterion(logits, multilabels)
print("loss = ", loss)
'''
#print("loss = ", loss)
loss = torch.nn.functional.binary_cross_entropy(torch.sigmoid(logits), multilabels)
return loss
#loss = loss_func(logits, multilabels)
#raise NotImplementedError
# TODO: CODE END
def save(self, path_to_checkpoints_dir: str, step: int) -> str:
path_to_checkpoint = os.path.join(path_to_checkpoints_dir,
'model-{:s}-{:d}.pth'.format(time.strftime('%Y%m%d%H%M'), step))
torch.save(self.state_dict(), path_to_checkpoint)
return path_to_checkpoint
def load(self, path_to_checkpoint: str) -> 'Model':
self.load_state_dict(torch.load(path_to_checkpoint))
return self
|
from klampt import *
from klampt import vectorops,so3,se3
from klampt import resource
import math
#import matrixops
DO_VISUALIZATION = 1
if DO_VISUALIZATION:
from klampt import vis
from klampt.model import coordinates
def point_fit_rotation_3d(apts,bpts):
"""Computes a 3x3 rotation matrix that rotates the points apts to
minimize the distance to bpts. Return value is the klampt.so3
element that minimizes the sum of squared errors ||R*ai-bi||^2."""
assert len(apts)==len(bpts)
import numpy as np
C = np.zeros((3,3))
for (a,b) in zip(apts,bpts):
C += np.dot(np.array(a).reshape((3,1)),np.array(b).reshape((1,3)))
#let A=[a1 ... an]^t, B=[b1 ... bn]^t
#solve for min sum of squares of E=ARt-B
#let C=AtB
#solution is given by CCt = RtCtCR
#Solve C^tR = R^tC with SVD CC^t = R^tC^tCR
#CtRX = RtCX
#C = RCtR
#Ct = RtCRt
#=> CCt = RCtCRt
#solve SVD of C and Ct (giving eigenvectors of CCt and CtC
#C = UWVt => Ct=VWUt
#=> UWUt = RVWVtRt
#=> U=RV => R=UVt
(U,W,Vt) = np.linalg.svd(C)
R = np.dot(U,Vt)
if np.linalg.det(R) < 0:
#it's a mirror. flip the zero
#svd.sortSVs();
if abs(W[2]) > 1e-2:
raise RuntimeError("point_fit_rotation_3d: Uhh... what do we do? SVD of rotation doesn't have a zero singular value")
#negate the last column of V
Vt[2,:] *= -1
R = np.dot(U,Vt)
assert np.linalg.det(R) > 0
return R.flatten().tolist()
def point_fit_xform_3d(apts,bpts):
"""Finds a 3D rigid transform that maps the list of points apts to the
list of points bpts. Return value is a klampt.se3 element that
minimizes the sum of squared errors ||T*ai-bi||^2.
"""
assert len(apts)==len(bpts)
ca = vectorops.div(reduce(apts,vectorops.add),len(apts))
cb = vectorops.div(reduce(bpts,vectorops.add),len(bpts))
arel = [vectorops.sub(a,ca) for a in apts]
brel = [vectorops.sub(b,cb) for b in bpts]
R = point_fit_rotation_3d(arel,brel)
#R minimizes sum_{i=1,...,n} ||R(ai-ca) - (bi-cb)||^2
t = so3.sub(cb,so3.apply(R,ca))
return (R,t)
class VectorStats:
def __init__(self,zero=[0.0],prior=0.0):
self.average = zero[:]
#self.variance = [zero[:] for v in zero]
if not hasattr(prior,'__iter__'):
self.count = [prior]*len(zero)
else:
assert len(zero)==len(prior)
self.count = prior[:]
def add(self,value,weight=1.0):
"""Accumulates a new value, with an optional weight factor. The
weight can either be a float or a list. In the latter case it is
a per-element weight."""
assert len(value) == len(self.average)
if not hasattr(weight,'__iter__'):
weight = [weight]*len(value)
newcount = vectorops.add(self.count,weight)
oldweight = [a/b for (a,b) in zip(self.count,newcount)]
newweight = [1.0/b for b in newcount]
#oldaverage = self.average
self.average = [(1.0-w)*a + w*b for (a,b,w) in zip(self.average,value,newweight)]
#variance = E[vv^T] - E[v]E[v^T]
#variance(n) = 1/n sum_{1 to n+1} v_i*v_i^T - average(n)*average(n)^T
#variance(n+1) = 1/(n+1)sum_{1 to n+1} v_i*v_i^T - average(n+1)*average(n+1)^T
# = 1/(n+1) sum_{1 to n} v_i*v_i^T + 1/(n+1) v_{n+1}*v_{n+1}^T - average(n+1)*average(n+1)^T
# = 1/(n+1)(n variance(n) + average(n)*average(n)^T) - average(n+1)*average(n+1)^T + 1/(n+1) v_{n+1}*v_{n+1}^T
# = n/(n+1) variance(n) + 1/(n+1)(v_{n+1}*v_{n+1}^T + average(n)*average(n)^T) - average(n+1)*average(n+1)^T
#temp1 = matrixops.add(matrixops.outer(oldaverage,oldaverage),matrixops.outer(value,value))
#temp2 = matrixops.madd(matrixops.mul(oldweight,self.variance),temp1,newweight)
#self.variance = matrixops.add(temp2,matrixops.outer(self.average,self.average)
self.count = newcount
class TransformStats:
def __init__(self,zero=se3.identity(),prior=0.0):
self.average = zero
self.count = prior
def add(self,value,weight=1.0):
"""Accumulates a new value, with an optional weight factor.
The weight can either be a float or a list. In the latter
case, it must be a 6-element vector giving the strength of
the observation in the Rx,Ry,Rz directions and tx,ty,tz
directions"""
assert len(value)==2 and len(value[0])==9 and len(value[1])==3,"Value must be a klampt.se3 type"""
if hasattr(weight,'__iter__'):
raise NotImplementedError("Non-uniform transform weights")
newcount = self.count + weight
oldweight = self.count / newcount
newweight = 1.0/newcount
self.average = se3.interpolate(self.average,value,newweight)
self.count = newcount
def calibrate_xform_camera(camera_link_transforms,
marker_link_transforms,
marker_observations,
marker_ids,
observation_relative_errors=None,
camera_initial_guess=None,
marker_initial_guess=None,
regularizationFactor=0,
maxIters=100,
tolerance=1e-7):
"""Single camera calibration function for a camera and markers on some
set of rigid bodies.
Given body transforms and a list of estimated calibration marker observations
in the camera frame, estimates both the camera transform relative to the
camera link as well as the marker transforms relative to their links.
M: is the set of m markers. By default there is at most one marker per link.
Markers can either be point markers (e.g., a mocap ball), or transform
markers (e.g., an AR tag or checkerboard pattern).
O: is the set of n observations, consisting of a reading (Tc_i,Tm_i,o_i,l_i)
where Tc_i is the camera link's transform, Tm_i is the marker link's
transform, o_i is the reading which consists of either a point or transform
estimate in the camera frame, and l_i is the ID of the marker (by default,
just its link)
Output: a tuple (err,Tc,marker_dict) where err is the norm of the
reconstruction residual, Tc is the estimated camera transform relative to the
camera's link, and marker_dict is a dict mapping each marker id to its
estimated position or transform on the marker's link.
Arguments:
- camera_link: an integer index or a RobotModelLink instance on which
the camera lies.
- calibration_configs: a list of the RobotModel configurations q_1,...,q_n
that generated the marker_observations list.
- marker_observations: a list of estimated positions or transformations
of calibration markers o_1,...,o_n, given in the camera's reference
frame (z forward, x right, y down).
If o_i is a 3-vector, the marker is considered to be a point marker.
If a se3 element (R,t) is given, the marker is considered to be
a transform marker. You may not mix point and transform observations
for a single marker ID.
- marker_ids: a list of marker ID #'s l_1,...,l_n corresponding to
each observation, e.g., the link index on which each marker lies.
- observation_relative_errors: if you have an idea of the magnitude of
each observation error, it can be placed into this list. Must be
a list of n floats, 3-lists (point markers), or 6-lists (transform
markers). By default, errors will be set proportionally to the
observed distance between the camera and marker.
- camera_initial_guess: if not None, an initial guess for the camera transform
- marker_initial_guess: if not None, a dictionary containing initial guesses
for the marker transforms
- regularizationFactor: if nonzero, the optimization penalizes deviation
of the estimated camera transform and marker transforms from zero
proportionally to this factor.
- maxIters: maximum number of iterations for optimization.
- tolerance: optimization convergence tolerance. Stops when the change of
estimates falls below this threshold
"""
if len(camera_link_transforms) != len(marker_ids):
raise ValueError("Must provide the same number of marker IDs as camera transforms")
if len(marker_link_transforms) != len(marker_ids):
raise ValueError("Must provide the same number of marker IDs as marker transforms")
if len(marker_observations) != len(marker_ids):
raise ValueError("Must provide the same number of marker observations as marker transforms")
#get all unique marker ids
marker_id_list = list(set(marker_ids))
#detect marker types
marker_types = dict((v,None) for v in marker_id_list)
for i,(obs,id) in enumerate(zip(marker_observations,marker_ids)):
if len(obs)==3:
if marker_types[id] == 't':
raise ValueError("Provided both point and transform observations for observation #%d, id %s\n"%(i,str(id)))
marker_types[id] = 'p'
elif len(obs)==2 and len(obs[0])==9 and len(obs[1])==3:
if marker_types[id] == 'p':
raise ValueError("Provided both point and transform observations for observation #%d, id %s\n"%(i,str(id)))
marker_types[id] = 't'
else:
raise ValueError("Invalid observation for observation #%d, id %s\n"%(i,str(id)))
n = len(marker_observations)
m = len(marker_id_list)
#get all the observation weights
observation_weights = []
if observation_relative_errors is None:
#default weights: proportional to distance
for obs in marker_observations:
if len(obs) == 3:
observation_weights.append(1.0/vectorops.norm(obs))
else:
observation_weights.append(1.0/vectorops.norm(obs[1]))
observation_weights = [1.0]*len(observation_weights)
else:
if len(observation_relative_errors) != n:
raise ValueError("Invalid length of observation errors")
for err in observation_relative_errors:
if hasattr(err,'__iter__'):
observation_weights.append([1.0/v for v in err])
else:
observation_weights.append(1.0/err)
#initial guesses
if camera_initial_guess == None:
camera_initial_guess = se3.identity()
if any(v == 't' for v in marker_types.itervalues()):
#estimate camera rotation from point estimates because rotations are more prone to initialization failures
point_observations = []
marker_point_rel = []
for i,obs in enumerate(marker_observations):
if len(obs)==2:
point_observations.append(obs[1])
else:
point_observations.append(obs)
marker_point_rel.append(se3.mul(se3.inv(camera_link_transforms[i]),marker_link_transforms[i])[1])
camera_initial_guess = (point_fit_rotation_3d(point_observations,marker_point_rel),[0.0]*3)
print "Estimated camera rotation from points:",camera_initial_guess
if marker_initial_guess == None:
marker_initial_guess = dict((l,(se3.identity() if marker_types[l]=='t' else [0.0]*3)) for l in marker_id_list)
else:
marker_initial_guess = marker_initial_guess.copy()
for l in marker_id_list:
if l not in marker_initial_guess:
marker_initial_guess[l] = (se3.identity() if marker_types[l]=='t' else [0.0]*3)
camera_transform = camera_initial_guess
marker_transforms = marker_initial_guess.copy()
if DO_VISUALIZATION:
rgroup = coordinates.addGroup("calibration")
rgroup.addFrame("camera link",worldCoordinates=camera_link_transforms[-1])
rgroup.addFrame("marker link",worldCoordinates=marker_link_transforms[-1])
rgroup.addFrame("camera estimate",parent="camera link",relativeCoordinates=camera_transform)
rgroup.addFrame("marker estimate",parent="marker link",relativeCoordinates=marker_transforms.values()[0])
for i,obs in enumerate(marker_observations):
rgroup.addFrame("obs"+str(i)+" estimate",parent="camera estimate",relativeCoordinates=obs)
vis.add("coordinates",rgroup)
vis.dialog()
point_observations_only = all(marker_types[marker] == 'p' for marker in marker_id_list)
xform_observations_only = all(marker_types[marker] == 't' for marker in marker_id_list)
if not point_observations_only and not xform_observations_only:
raise NotImplementedError("Can't calibrate camera from mixed point/transform markers yet")
for iters in range(maxIters):
#attempt to minimize the error on the following over all observations i
#camera_link_transform(q_i)*camera_transform*observation_i = marker_link_transform(l_i,q_i)*marker_transform(l_i)
#first, we'll assume the camera transform is fixed and then optimize the marker transforms.
#then, we'll assume the marker transforms are fixed and then optimize the camera transform.
#finally we'll check the error to detect convergence
#1. Estimate marker transforms from current camera transform
new_marker_transforms = dict((l,(TransformStats(zero=marker_initial_guess[l],prior=regularizationFactor) if marker_types[l]=='t' else VectorStats(value,zero=[0.0]*3,prior=RegularizationFactor))) for l in marker_id_list)
for i in xrange(n):
marker = marker_ids[i]
Tclink = camera_link_transforms[i]
Tmlink = marker_link_transforms[i]
obs = marker_observations[i]
Trel = se3.mul(se3.inv(Tmlink),se3.mul(Tclink,camera_transform))
if marker_types[marker] == 't':
estimate = se3.mul(Trel,obs)
else:
estimate = se3.apply(Trel,obs)
new_marker_transforms[marker].add(estimate,observation_weights[i])
print "ITERATION",iters
#print " ESTIMATED MARKER TRANSFORMS:",dict((k,v.average) for (k,v) in new_marker_transforms.iteritems())
#2. Estimate camera transform from current marker transforms
new_camera_transform = TransformStats(zero=camera_initial_guess,prior=regularizationFactor)
if point_observations_only:
#TODO: weighted point fitting
relative_points = []
for i in xrange(n):
marker = marker_ids[i]
Tclink = camera_link_transforms[i]
Tmlink = marker_link_transforms[i]
obs = marker_observations[i]
pRel = se3.apply(se3.inv(Tclink),se3.apply(Tmlink,new_marker_transforms[marker].average))
relative_points.append(pRel)
new_camera_transform.add(point_fit_xform_3d(marker_observations,relative_points),sum(observation_weights))
else:
for i in xrange(n):
marker = marker_ids[i]
Tclink = camera_link_transforms[i]
Tmlink = marker_link_transforms[i]
obs = marker_observations[i]
Trel = se3.mul(se3.inv(Tclink),se3.mul(Tmlink,new_marker_transforms[marker].average))
estimate = se3.mul(Trel,se3.inv(obs))
new_camera_transform.add(estimate,observation_weights[i])
#print " ESTIMATED CAMERA TRANSFORMS:",new_camera_transform.average
#3. compute difference between last and current estimates
diff = 0.0
diff += vectorops.normSquared(se3.error(camera_transform,new_camera_transform.average))
for marker in marker_id_list:
if marker_types[marker]=='t':
diff += vectorops.normSquared(se3.error(marker_transforms[marker],new_marker_transforms[marker].average))
else:
diff += vectorops.distanceSquared(marker_transforms[marker],new_marker_transforms[marker].average)
camera_transform = new_camera_transform.average
for marker in marker_id_list:
marker_transforms[marker] = new_marker_transforms[marker].average
if math.sqrt(diff) < tolerance:
#converged!
print "Converged with diff %g on iteration %d"%(math.sqrt(diff),iters)
break
print " ESTIMATE CHANGE:",math.sqrt(diff)
error = 0.0
for i in xrange(n):
marker = marker_ids[i]
Tclink = camera_link_transforms[i]
Tmlink = marker_link_transforms[i]
obs = marker_observations[i]
Tc = se3.mul(Tclink,camera_transform)
if marker_types[marker] == 't':
Tm = se3.mul(Tmlink,marker_transforms[marker])
error += vectorops.normSquared(se3.error(se3.mul(Tc,obs),Tm))
else:
Tm = se3.apply(Tmlink,marker_transforms[marker])
error += vectorops.distanceSquared(se3.apply(Tc,obs),Tm)
print " OBSERVATION ERROR:",math.sqrt(error)
#raw_input()
if DO_VISUALIZATION:
rgroup.setFrameCoordinates("camera estimate",camera_transform)
rgroup.setFrameCoordinates("marker estimate",marker_transforms.values()[0])
for i,obs in enumerate(marker_observations):
rgroup.setFrameCoordinates("obs"+str(i)+" estimate",obs)
vis.add("coordinates",rgroup)
vis.dialog()
if math.sqrt(diff) >= tolerance:
print "Max iters reached"
error = 0.0
for i in xrange(n):
marker = marker_ids[i]
Tclink = camera_link_transforms[i]
Tmlink = marker_link_transforms[i]
obs = marker_observations[i]
Tc = se3.mul(Tclink,camera_transform)
if marker_types[marker] == 't':
Tm = se3.mul(Tmlink,marker_transforms[marker])
error += vectorops.normSquared(se3.error(se3.mul(Tc,obs),Tm))
else:
Tm = se3.apply(Tmlink,marker_transforms[marker])
error += vectorops.distanceSquared(se3.apply(Tc,obs),Tm)
return (math.sqrt(error),camera_transform,marker_transforms)
def calibrate_robot_camera(robot,
camera_link,
calibration_configs,
marker_observations,
marker_ids,
marker_links=None,
observation_relative_errors=None,
camera_initial_guess=None,
marker_initial_guess=None,
regularizationFactor=0,
maxIters=100,
tolerance=1e-7):
"""Single camera calibration function for a camera and markers on a robot.
Given a robot and a list of estimated calibration marker observations
in the camera frame, estimates both the camera transform relative to the
robot's link as well as the marker transforms relative to their links.
M: is the set of m markers. By default there is at most one marker per link.
Markers can either be point markers (e.g., a mocap ball), or transform
markers (e.g., an AR tag or checkerboard pattern).
O: is the set of n observations, consisting of a reading (q_i,o_i,l_i) where
q_i is the robot's (sensed) configuration, o_i is the reading which
consists of either a point or transform estimate in the camera frame,
and l_i is the ID of the marker (by default, just its link)
Output: a tuple (err,Tc,marker_dict) where err is the norm of the
reconstruction residual, Tc is the estimated camera transform relative to the
camera's link, and marker_dict is a dict mapping each marker id to its
estimated position or transform on the marker's link.
Arguments:
- robot: a RobotModel instance
- camera_link: an integer index or a RobotModelLink instance on which
the camera lies.
- calibration_configs: a list of the RobotModel configurations q_1,...,q_n
that generated the marker_observations list.
- marker_observations: a list of estimated positions or transformations
of calibration markers o_1,...,o_n, given in the camera's reference
frame (z forward, x right, y down).
If o_i is a 3-vector, the marker is considered to be a point marker.
If a se3 element (R,t) is given, the marker is considered to be
a transform marker. You may not mix point and transform observations
for a single marker ID.
- marker_ids: a list of marker ID #'s l_1,...,l_n corresponding to
each observation, OR the link indicex on which each marker lies.
If marker_links is given, there may be more than one marker per link,
and these entries are marker ID's that index into the marker_links list.
- marker_links: if provided, this is a dict that maps IDs into link
indices (or RobotModelLink instances)
- observation_relative_errors: if you have an idea of the magnitude of
each observation error, it can be placed into this list. Must be
a list of n floats, 3-lists (point markers), or 6-lists (transform
markers).
- camera_initial_guess: if not None, an initial guess for the camera transform
- marker_initial_guess: if not None, a dictionary containing initial guesses
for the marker transforms
- regularizationFactor: if nonzero, the optimization penalizes deviation
of the estimated camera transform and marker transforms from zero
proportionally to this factor.
- maxIters: maximum number of iterations for optimization.
- tolerance: optimization convergence tolerance. Stops when the change of
estimates falls below this threshold
"""
#get the list of all marker IDs, convert all indices to RobotModelLinks
if len(calibration_configs) != len(marker_observations):
raise ValueError("Must provide the same number of calibration configs as observations")
if len(calibration_configs) != len(marker_ids):
raise ValueError("Must provide the same number of marker IDs as observations")
if isinstance(camera_link,(int,str)):
camera_link = robot.link(camera_link)
marker_id_list = list(set(marker_ids))
if marker_links is None:
marker_links = dict((v,robot.link(v)) for v in marker_id_list)
else:
for i in marker_id_list:
if i not in marker_links:
raise ValueError("There is no marker_link provided for marker id "+str(i))
marker_links = marker_links.copy()
for k,v in marker_links.iteritems():
if isinstance(v,(int,str)):
marker_links[k] = robot.link(v)
#get all the transforms for each observation
camera_link_transforms = []
marker_link_transforms = []
for q,m in zip(calibration_configs,marker_ids):
robot.setConfig(q)
camera_link_transforms.append(camera_link.getTransform())
marker_link_transforms.append(marker_links[m].getTransform())
return calibrate_xform_camera(camera_link_transforms,marker_link_transforms,
marker_observations,marker_ids,
observation_relative_errors=observation_relative_errors,
camera_initial_guess=camera_initial_guess,
marker_initial_guess=marker_initial_guess,
regularizationFactor=regularizationFactor,
maxIters=maxIters,
tolerance=tolerance)
if __name__=="__main__":
"""
#testing rotation fitting
apts = [[5,0,0],[0,3,0],[0,0,1]]
R = so3.rotation((0,0,1),-math.pi/2)
Rest = point_fit_rotation_3d(apts,[so3.apply(R,a) for a in apts])
print Rest
print "Error:"
for a in apts:
print vectorops.distance(so3.apply(R,a),so3.apply(Rest,a))
raw_input()
"""
import sys
import random
robot_fn = "../data/robots/baxter_col.rob"
world = WorldModel()
if not world.readFile(robot_fn):
exit(1)
robot = world.robot(0)
camera_obs_link = "head_camera"
marker_obs_link = "left_gripper"
lc = robot.link(camera_obs_link)
lm = robot.link(marker_obs_link)
pc = robot.link(lc.getParent())
pm = robot.link(lm.getParent())
Tc0 = se3.mul(se3.inv(pc.getTransform()),lc.getTransform())
Tm0 = se3.mul(se3.inv(pm.getTransform()),lm.getTransform())
print "True camera transform",Tc0
print "True marker transform",Tm0
print
camera_link = pc.getName()
marker_link = pm.getName()
#generate synthetic data, corrupted with joint encoder and sensor measurement errors
qmin,qmax = robot.getJointLimits()
numObs = 10
jointEncoderError = 1e-5
sensorErrorRads = 1e-2
sensorErrorMeters = 2e-3
trueCalibrationConfigs = []
calibrationConfigs = []
trueObservations = []
observations = []
for obs in xrange(numObs):
q0 = [random.uniform(a,b) for (a,b) in zip(qmin,qmax)]
#don't move head
for i in range(13):
q0[i] = 0
trueCalibrationConfigs.append(q0)
trueCalibrationConfigs=resource.get("calibration.configs",default=trueCalibrationConfigs,type="Configs",description="Calibration configurations",world=world)
for q0 in trueCalibrationConfigs:
robot.setConfig(q0)
obs0 = se3.mul(se3.inv(lc.getTransform()),lm.getTransform())
dq = [random.uniform(-jointEncoderError,jointEncoderError) for i in range(len(q0))]
dobs = (so3.from_moment([random.uniform(-sensorErrorRads,sensorErrorRads) for i in range(3)]),[random.uniform(-sensorErrorMeters,sensorErrorMeters) for i in range(3)])
calibrationConfigs.append(vectorops.add(q0,dq))
observations.append(se3.mul(obs0,dobs))
trueObservations.append(obs0)
if DO_VISUALIZATION:
rgroup = coordinates.addGroup("calibration ground truth")
rgroup.addFrame("camera link",worldCoordinates=pc.getTransform())
rgroup.addFrame("marker link",worldCoordinates=pm.getTransform())
rgroup.addFrame("camera (ground truth)",parent="camera link",relativeCoordinates=Tc0)
rgroup.addFrame("marker (ground truth)",parent="marker link",relativeCoordinates=Tm0)
for i,(obs,obs0) in enumerate(zip(observations,trueObservations)):
rgroup.addFrame("obs"+str(i)+" (ground truth)",parent="camera (ground truth)",relativeCoordinates=obs0)
rgroup.addFrame("obs"+str(i)+" (from camera)",parent="camera (ground truth)",relativeCoordinates=obs)
vis.add("world",world)
for i,q in enumerate(calibrationConfigs):
vis.add("config"+str(i),q)
app = lc.appearance().clone()
app.setColor(0.5,0.5,0.5,0.1)
vis.setAppearance("config"+str(i),app)
vis.add("simulated coordinates",rgroup)
vis.dialog()
res = calibrate_robot_camera(robot,camera_link,
calibrationConfigs,
observations,
[marker_link]*len(calibrationConfigs))
print
print "Per-observation reconstruction error:",res[0]/numObs
print "Estimated camera transform:",res[1]
print " total error:",vectorops.norm(se3.error(res[1],Tc0))
print " rotation errors:",se3.error(res[1],Tc0)[:3]
print " translation errors:",se3.error(res[1],Tc0)[3:]
print "Estimated marker transform:",res[2][marker_link]
print " error:",vectorops.norm(se3.error(res[2][marker_link],Tm0))
print " rotation errors:",se3.error(res[2][marker_link],Tm0)[:3]
print " translation errors:",se3.error(res[2][marker_link],Tm0)[3:]
vis.kill()
|
"""
bioBakery Workflows: tasks.dadatwo module
A collection of tasks for DADA2 workflow with 16s amplicon sequences
Copyright (c) 2017 Harvard School of Public Health
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from anadama2.tracked import TrackedDirectory, TrackedExecutable
from biobakery_workflows import files, config, utilities
import os,fnmatch
def remove_primers(workflow,fwd_primer,rev_primer,input_folder,output_folder,pair_id,threads):
""" Identifies primers and N filters samples
Args:
workflow (anadama2.workflow): an instance of the workflow class
input_folder (string): path to input folder
output_folder (string): path to output folder
fwd_primer (string): forward primer
rev_primer (string): reverse primer
pair_id (string): pair identifier
threads (string): number of threads
Requires:
dada2, Biostrings, ShortRead, tools r packages
Returns:
string: path to folder with primers removed files
"""
script_path = utilities.get_package_file("identify_primers", "Rscript")
filtN_folder = os.path.join(output_folder,"filtN")
primers_folder = os.path.join(output_folder,"primers")
fwd_primer_file = os.path.join(primers_folder,"fwd_primer_file.txt")
rev_primer_file = os.path.join(primers_folder,"rev_primer_file.txt")
cutadapt_folder = os.path.join(output_folder, "cutadapt")
# run identify primers task
workflow.add_task(
"[vars[0]] \
--input_dir=[args[3]] \
--filtn_dir=[vars[1]] \
--primers_dir=[vars[2]] \
--threads=[args[4]] \
--fwd_primer_file=[targets[0]] \
--rev_primer_file=[targets[1]] \
--fwd_primer=[args[0]] \
--rev_primer=[args[1]] \
--pair_id=[args[2]]",
targets=[fwd_primer_file,rev_primer_file,
TrackedDirectory(filtN_folder)],
args=[fwd_primer, rev_primer, pair_id,input_folder,threads],
vars=[script_path,filtN_folder,primers_folder,output_folder],
name="identify_primers"
)
pair_id2 = pair_id.replace("1", "2",1)
fwd_files = sorted(fnmatch.filter(os.listdir(input_folder), "*"+pair_id+"*.fastq*"))
rev_files = sorted(fnmatch.filter(os.listdir(input_folder), "*" + pair_id2 + "*.fastq*"))
#run cutadapt to remove primers
for i in range(0,len(fwd_files)):
fwd_file=os.path.join(input_folder,fwd_files[i])
rev_file = os.path.join(input_folder, rev_files[i])
workflow.add_task(
cutadapt_do,
depends=[fwd_primer_file,
rev_primer_file,
fwd_file,
rev_file,
TrackedDirectory(filtN_folder),
TrackedExecutable("cutadapt",version_command="echo 'cutadapt' `cutadapt --version`")],
targets=[TrackedDirectory(cutadapt_folder)],
name="remove_primers"
)
return cutadapt_folder
def cutadapt_do(task):
"""Reads primers from the files and runs cutadapt task
Args:
task (anadama2.task): an instance of the task class"""
from anadama2.util import get_name
with open(get_name(task.depends[0])) as f:
FWD = f.read().splitlines()
with open(get_name(task.depends[1])) as f:
REV = f.read().splitlines()
cutadapt_folder=get_name(task.targets[0])
filtN_folder = get_name(task.depends[4])
fwd_filename=os.path.basename(get_name(task.depends[2]))
rev_filename = os.path.basename(get_name(task.depends[3]))
fwd_reads_out=os.path.join(cutadapt_folder,fwd_filename)
rev_reads_out = os.path.join(cutadapt_folder,rev_filename)
fwd_reads_in = os.path.join(filtN_folder,fwd_filename)
rev_reads_in = os.path.join(filtN_folder,rev_filename)
if not os.path.exists(cutadapt_folder):
os.mkdir(cutadapt_folder)
command="cutadapt -g "+FWD[0]+" -a "+REV[1]+" -G "+REV[0]+" -A "+FWD[1]+" -n 2 -o "+fwd_reads_out+\
" -p "+rev_reads_out+" "+fwd_reads_in+" "+ rev_reads_in+" --minimum-length 10"
#run task
utilities.run_task(command, depends=task.depends, targets=task.targets)
def filter_trim(workflow,input_folder,output_folder,maxee,trunc_len_max,pair_id,threads,trunc_len_rev_offset):
""" Filters samples by maxee and trims them, renders quality control plots
of forward and reverse reads for each sample, creates read counts tsv and rds files.
Args:
workflow (anadama2.workflow): an instance of the workflow class
input_folder (string): path to input folder
output_folder (string): path to output folder
maxee (string): maxee value to use for filtering
trunc_len_max (string): max length for truncating reads
pair_id (string): pair identifier
threads (int): number of threads
Requires:
dada2, gridExtra,tools r packages
Returns:
string: path to file that contains read counts before and after filtering
string: path to folder with filtered and trimmed sample files
"""
reads_plotF_png = files.SixteenS.path("readF_qc", output_folder)
reads_plotR_png = files.SixteenS.path("readR_qc", output_folder)
readcounts_tsv_path = os.path.join(output_folder, "Read_counts_after_filtering.tsv")
readcounts_rds_path = os.path.join(output_folder, "Read_counts_filt.rds")
filtered_dir = "filtered_input"
script_path = utilities.get_package_file("filter_and_trim", "Rscript")
workflow.add_task(
"[vars[0]] \
--input_dir=[args[0]]\
--output_dir=[args[1]]\
--filtered_dir=[vars[1]]\
--maxee=[args[2]]\
--trunc_len_max=[args[3]]\
--readcounts_tsv_path=[targets[0]]\
--readcounts_rds_path=[targets[1]]\
--reads_plotF=[targets[2]]\
--reads_plotR=[args[7]]\
--pair_id=[args[4]]\
--threads=[args[5]]\
--trunc_len_rev_offset='[args[6]]'",
depends =[TrackedDirectory(input_folder)],
targets = [readcounts_tsv_path, readcounts_rds_path, reads_plotF_png],
args = [input_folder, output_folder, maxee, trunc_len_max, pair_id, threads, trunc_len_rev_offset, reads_plotR_png],
vars = [script_path,filtered_dir],
name ="filter_and_trim"
)
return readcounts_tsv_path, filtered_dir
def learn_error(workflow, output_folder, filtered_dir, readcounts_tsv_path, threads):
""" Learns error rates for each sample, renders error rates plots for forward and reverse reads
Args:
workflow (anadama2.workflow): an instance of the workflow class
output_folder (string): path to output folder
filtered_dir (string): path to directory with filtered files
readcounts_tsv_path (string): path to read counts after filtering tsv file
threads (int): number of threads
Requires:
dada2, ggplot2 r packages
Returns:
string: path to file that contains error rates of forward reads
string: path to file that contains error rates of reverse reads
"""
error_ratesF_png = files.SixteenS.path("error_ratesF", output_folder)
error_ratesR_png = files.SixteenS.path("error_ratesR", output_folder)
error_ratesF_path= os.path.join(output_folder, "error_ratesFWD.rds")
error_ratesR_path =os.path.join(output_folder, "error_ratesREV.rds")
script_path = utilities.get_package_file("learn_error_rates", "Rscript")
workflow.add_task(
"[vars[0]] \
--output_dir=[args[0]]\
--filtered_dir=[args[1]]\
--error_ratesF_png=[targets[0]]\
--error_ratesR_png=[args[2]]\
--error_ratesF_path=[targets[1]]\
--error_ratesR_path=[args[3]]\
--threads=[vars[1]]",
depends = [readcounts_tsv_path],
targets = [error_ratesF_png, error_ratesF_path],
args = [output_folder, filtered_dir, error_ratesR_png, error_ratesR_path],
vars = [script_path, threads],
name = "learn_error_rates"
)
return error_ratesF_path, error_ratesR_path
def merge_paired_ends(workflow, output_dir, filtered_dir, error_ratesF_path, error_ratesR_path, threads, minoverlap, maxmismatch):
""" Dereplicates and merges paired reads
Args:
workflow (anadama2.workflow): an instance of the workflow class
output_folder (string): path to output folder
filtered_dir (string): path to directory with filtered files
error_ratesF_path (string): path to rds file that contains error rates of forward reads
error_ratesR_path (string): path to rds file that contains error rates of reverse reads
threads (int): number of threads
minoverlap (int): the min number of pairs for overlap for the merge step
maxmismatch (int): the max number of mismatch for pairs to merge
Requires:
dada2, tools r packages
Returns:
string: path to rds file that contains merged and dereplicated reads
"""
mergers_file_path = os.path.join(output_dir, "mergers.rds")
script_path = utilities.get_package_file("merge_paired_ends", "Rscript")
workflow.add_task(
"[vars[0]] \
--output_dir=[args[0]]\
--filtered_dir=[args[1]]\
--error_ratesF_path=[depends[0]]\
--error_ratesR_path=[args[4]]\
--mergers_file_path=[targets[0]]\
--threads=[vars[1]]\
--minoverlap=[args[2]]\
--maxmismatch=[args[3]]",
depends = [error_ratesF_path],
targets = [mergers_file_path],
args = [output_dir, filtered_dir, minoverlap, maxmismatch, error_ratesR_path],
vars = [script_path, threads],
name = "dereplicate_and_merge"
)
return mergers_file_path
def const_seq_table(workflow, output_folder, filtered_dir, mergers_file_path, threads):
""" Builds ASV table, removes chimeras, creates read counts at each step, and fasta file with all sequences
Args:
workflow (anadama2.workflow): an instance of the workflow class
output_folder (string): path to output folder
filtered_dir (string): path to directory with filtered files
mergers_file_path (string): path to rds file that contains merged reads
threads (int): number of threads
Requires:
dada2, tools, seqinr r packages
Returns:
string: path to rds file that contains ASV data
string: path to read counts at each step tsv file
string: path to fasta file with all sequences
"""
read_counts_steps_path = files.SixteenS.path("counts_each_step", output_folder)
seqtab_file_path = os.path.join(output_folder, "seqtab_final.rds")
seqs_fasta_path = os.path.join(output_folder, "sequences.fasta")
readcounts_rds = "Read_counts_filt.rds"
asv_tsv = "all_samples_SV_counts.tsv"
script_path = utilities.get_package_file("const_seq_table", "Rscript")
version_script = utilities.get_package_file("dada2_version", "Rscript")
version_command = """echo 'r' `r -e 'packageVersion("dada2")' | grep -C 1 dada2`"""
workflow.add_task(
"[vars[0]] \
--output_dir=[args[0]]\
--filtered_dir=[args[1]]\
--merged_file_path=[depends[0]]\
--read_counts_steps_path=[targets[0]]\
--readcounts_rds=[vars[2]]\
--asv_tsv=[vars[3]]\
--seqtab_file_path=[targets[1]]\
--seqs_fasta_path=[targets[2]]\
--threads=[vars[1]]",
depends = [mergers_file_path,TrackedExecutable("R", version_command="echo '" + version_script + "' `" + version_script + "`")],
targets = [read_counts_steps_path, seqtab_file_path, seqs_fasta_path],
args = [output_folder, filtered_dir],
vars = [script_path, threads, readcounts_rds, asv_tsv ],
name = "construct_sequence_table"
)
return seqtab_file_path, read_counts_steps_path, seqs_fasta_path
def assign_taxonomy(workflow, output_folder, seqtab_file_path, ref_path, threads, tryRC):
""" Assigns taxonomy using green genes, silva, or rdp database, creates closed reference file
Args:
workflow (anadama2.workflow): an instance of the workflow class
output_folder (string): path to output folder
seqtab_file_path (string): path to rds file that contains ASV data
ref_path (string): reference database name
threads (int):
Requires:
dada2 r package
Returns:
string: path to closed reference file
"""
otu_closed_ref_path = files.SixteenS.path("otu_table_closed_reference", output_folder)
# check what reference db to use for taxonomy assignment
if ref_path == "unite":
refdb_path = config.SixteenS().unite
refdb_species_path = "None"
elif ref_path == "silva":
refdb_path = config.SixteenS().silva_dada2
refdb_species_path = config.SixteenS().silva_species_dada2
elif ref_path == "rdp":
refdb_path = config.SixteenS().rdp_dada2
refdb_species_path = config.SixteenS().rdp_species_dada2
else:
refdb_path = config.SixteenS().greengenes_dada2
refdb_species_path = "None"
script_path = utilities.get_package_file("assign_taxonomy", "Rscript")
workflow.add_task(
"[vars[2]] \
--output_dir=[args[0]]\
--refdb_path=[vars[0]]\
--refdb_species_path=[vars[1]]\
--seqtab_file_path=[depends[0]]\
--otu_closed_ref_path=[targets[0]]\
--threads=[vars[3]]\
--tryRC=[vars[4]] &&\
check_for_reverse_reads.py --input [targets[0]]",
depends = [seqtab_file_path],
targets = [otu_closed_ref_path],
args = [output_folder],
vars =[refdb_path, refdb_species_path, script_path, threads, tryRC],
name = "assign_taxonomy"
)
return otu_closed_ref_path
def remove_tmp_files(workflow, output_folder, otu_closed_ref_path, msa_fasta_path, fasttree_file_path):
""" Removes temporary rds files
Args:
workflow (anadama2.workflow): an instance of the workflow class
output_folder (string): path to output folder.
otu_closed_ref_path (string): path to closed reference file
msa_fasta_path (string): path to msa file
fasttree_file_path (string): path to phylogenetic tree file
Requires:
None
Returns:
None
"""
rm_out_file = os.path.join(output_folder, "tmp_rm.txt")
workflow.add_task(
"rm [args[0]]/*.rds &>[targets[0]] ",
depends = [otu_closed_ref_path, msa_fasta_path, fasttree_file_path],
args = [output_folder],
targets = [rm_out_file],
name = "rm_tmp_files"
)
|
__author__ = "Silei Xiong"
import xml.etree.ElementTree as ET
import string
# from collections import namedtuple
# from urllib.request import urlopen
# from xml.etree.cElementTree import parse
import csv
def read_data(model):
with open('Icon_size_data_base.csv') as f:
f_csv = csv.reader(f, delimiter=',')
# headers = next(f_csv)
#
# ftab_csv = csv.reader(f)
# next(ftab_csv)
# next(ftab_csv)
# for row in ftab_csv:
# print(row)
# [a, b, c, d] = row
# print(a, '\n', b, '\n', c, '\n', d)
# open file or read to string
project_file = open('Buhler_mill_layout1.gPJ', 'r')
file_str = project_file.read()
project_file.close()
# file_str.encode(encoding='utf-8')
root = ET.fromstring(file_str)
# tree = ET.parse("test_flowsheet.gPJ")
# root = tree.getroot()
ModelReferenceCache = root.find('ModelReferenceCache')
ModelReference = ModelReferenceCache.findall('ModelReference')
# ModelEntity = root.find('ModelEntity')
for it in root.iter('Group'):
if it.attrib['name'] == 'Models':
Group_Models = it
ModelEntity = it.findall('ModelEntity')
break
# ModelEntity[0].attrib['name'] = 'mod_test'
a = ET.tostring(root)
new_file = open('new', 'wb')
new_file.write(a)
new_file.close()
# tree.write('output', encoding="us-ascii")
# models = namedtuple('models', 'port')
# ports = namedtuple('ports', 'name content subType Transform Labels')
#Transform = namedtuple('Transform', 'ScaleX ScaleY ScaleX ScaleY TranslateX TranslateY')
class Port:
def __init__(self, it):
self.name = it.get('name')
self.content = it.get('content')
self.subType = it.get('subType')
if 'layer' in it.keys():
self.layer = it.get('layer')
self.Transform = it.find('Transform').attrib
self.Labels = [i.attrib for i in it.iter('Transform')]
def write(self, it):
pass
class Unit:
def __init__(self, it):
self.name = it.get('name')
self.model = it.get('model')
self.layer = it.get('layer')
self.Transform = it.find('Transform').attrib
self.Labels = [i.attrib for i in it.iter('Transform')]
def write(self, it):
pass
class Connection:
def __init__(self, it):
if 'unit_1' in it.keys():
self.unit_1 = it.get('unit_1')
else:
self.unit_1 = it.get('port_1')
if 'unit_2' in it.keys():
self.unit_2 = it.get('unit_2')
else:
self.unit_2 = it.get('port_2')
self.point_1 = it.get('point_1')
self.point_2 = it.get('point_2')
self.points = [float(i) for i in it.find('LineSegment').get('points').split(',')]
def write(self, it):
pass
class Model:
def __init__(self, it):
self.name = it.get('model')
self.port = {i.get('name'):Port(i) for i in it.findall('Port')}
# self.port = dict()
# for it2 in it.findall('Port'):
# self.port[it2.get('name')] = Port(it2)
def write(self, it):
pass
class Flowsheet:
def __init__(self, it):
self.name = it.get('name')
self.layer = [i.get('name') for i in it.find('FlowsheetLayers').findall('Layer')]
# self.Layer = list()
# for it2 in it.find('FlowsheetLayers').findall('Layer'):
# self.Layer = it2.get('name')
self.unit = {i.get('name'):Unit(i) for i in it.findall('Unit')}
self.port = {i.get('name'):Port(i) for i in it.findall('Port')}
self.all_unit = dict(self.unit, **self.port)
self.connection = [Connection(i) for i in it.findall('Connection')]
# self.unit = dict()
# for it2 in it.findall('Unit'):
# self.unit[it2.get('name')] = Unit(it2)
# for it2 in it.findall('Port'):
# self.unit[it2.get('name')] = Port(it2)
# self.connection = list()
# for it2 in it.findall('Connection'):
# self.connection.append(Connection(it2))
def write(self, it):
pass
models = dict()
flowsheets = list()
for it in ModelReference:
models[it.get('model')] = Model(it)
for it in ModelEntity:
flowsheets.append(Flowsheet(it))
|
def count_letters(frase):
dic_mio = {}
for let in frase:
if let in dic_mio:
dic_mio[let] += 1
else:
dic_mio[let] = 1
return dic_mio
|
#!/bin/python2.7
import sys
import subprocess
#TODO: Adding source folder
if len(sys.argv) != 3:
print("Srsly, you need 2 arguments or it won't work.")
print("Usage: python2.7 snekbackup.py [regex] [destination folder] ")
else:
subprocess.call("find / -type f -name " + '"' + sys.argv[1] + '"' + " -exec cp {} " + sys.argv[2], shell=True)
|
# -*- coding: utf-8 -*-
# Copyright (C) 2010-2013 Mag. Christian Tanzer All rights reserved
# Glasauergasse 32, A--1130 Wien, Austria. tanzer@swing.co.at
# ****************************************************************************
# This module is part of the package GTW.OMP.Auth.
#
# This module is licensed under the terms of the BSD 3-Clause License
# <http://www.c-tanzer.at/license/bsd_3c.html>.
# ****************************************************************************
#
#++
# Name
# GTW.OMP.Auth.Group
#
# Purpose
# Model a group of accounts
#
# Revision Dates
# 16-Jan-2010 (CT) Creation
# 28-Feb-2010 (CT) `desc` added
# 18-Nov-2011 (CT) Import `unicode_literals` from `__future__`
# 17-Jun-2013 (CT) Derive `Group` from `MOM.Object`, not `MOM.Named_Object`
# ««revision-date»»···
#--
from _MOM.import_MOM import *
from _GTW import GTW
from _GTW._OMP._Auth import Auth
import _GTW._OMP._Auth.Entity
_Ancestor_Essence = Auth.Object
class _Auth_Group_ (_Ancestor_Essence) :
"""Model a group of accounts."""
_real_name = "Group"
class _Attributes (_Ancestor_Essence._Attributes) :
_Ancestor = _Ancestor_Essence._Attributes
class name (A_String) :
"""Name of %(type_base_name.lower ())s."""
kind = Attr.Primary
max_length = 32
completer = Attr.Completer_Spec (2, Attr.Selector.primary)
# end class name
class desc (A_String) :
"""Description of group"""
kind = Attr.Optional
max_length = 20
# end class desc
# end class _Attributes
Group = _Auth_Group_ # end class
if __name__ != "__main__" :
GTW.OMP.Auth._Export ("*")
### __END__ GTW.OMP.Auth.Group
|
#! /usr/bin/env python3
import os
import sys
from functools import reduce
from itertools import combinations
from time import process_time
def main():
with open(os.path.join(sys.path[0], 'input.txt')) as f:
ids = [l.strip() for l in f.readlines()]
p1 = reduce(
lambda x, y: x * y,
[sum(1 for l in ids if any(l.count(c) == x for c in l)) for x in (2,3)]
)
for first, second in combinations(ids, 2):
z = list(zip(first, second))
if sum(1 for f, s in z if f != s) == 1:
p2 = ''.join(f for f, s in z if f == s)
break
print('P1: {}'.format(p1))
print('P2: {}'.format(p2))
if __name__ == "__main__":
start = process_time()
main()
print('--- {} seconds ---'.format(process_time() - start))
|
from random import randint
from data.dataloaders.bar_dataset import *
from data.dataloaders.bar_dataset_helpers import *
from MeasureVAE.measure_vae import MeasureVAE
from MeasureVAE.vae_trainer import VAETrainer
from sklearn.manifold import TSNE
from sklearn.decomposition import PCA
from sklearn.metrics import mutual_info_score
from sklearn.linear_model import LogisticRegression, LinearRegression
import matplotlib.pyplot as plt
class VAETester(object):
def __init__(
self,
dataset,
model: MeasureVAE,
has_reg_loss=False,
reg_type=None,
reg_dim=0
):
self.dataset = dataset
self.model = model
self.model.eval()
self.has_reg_loss = has_reg_loss
self.trainer_config = ''
if self.has_reg_loss:
if reg_type is not None:
self.reg_type = reg_type
self.reg_dim = reg_dim
if self.reg_type == 'joint':
self.reg_dim = [0, 1]
if self.reg_type == 'joint_rhycomp_noterange':
self.reg_dim = [0, 2]
if self.reg_type == 'four_metrics':
self.reg_dim = [0, 1, 2, 3]
self.trainer_config = '[' + self.reg_type + ',' + str(self.reg_dim) + ']'
self.model.update_trainer_config(self.trainer_config)
self.model.load()
self.model.cuda()
self.filepath = os.path.join('models/',
self.model.__repr__())
self.decoder = self.model.decoder
# freeze decoder
self.train = False
for param in self.decoder.parameters():
param.requires_grad = False
self.z_dim = self.decoder.z_dim
self.batch_size = 1 # PREVIOUSLY IT WAS 1, 256 for testing
self.measure_seq_len = 24 # TODO: remove this hardcoding
self.dir_path = os.path.dirname(os.path.realpath(__file__))
def test_interpretability(self, batch_size, attr_type):
"""
Tests the interpretability of the latent space for a partcular attribute
:param batch_size: int, number of datapoints in mini-batch
:param attr_type: str, attribute type
:return: tuple(int, float): index of dimension with highest mutual info, interpretability score
"""
(_, gen_val, gen_test) = self.dataset.data_loaders(
batch_size=batch_size,
split=(0.01, 0.01)
)
# compute latent vectors and attribute values
z_all = []
attr_all = []
for sample_id, (score_tensor, metadata_tensor) in tqdm(enumerate(gen_test)):
if isinstance(self.dataset, FolkNBarDataset):
batch_size = score_tensor.size(0)
score_tensor = score_tensor.view(batch_size, self.dataset.n_bars, -1)
score_tensor = score_tensor.view(batch_size * self.dataset.n_bars, -1)
metadata_tensor = metadata_tensor.view(batch_size, self.dataset.n_bars, -1)
metadata_tensor = metadata_tensor.view(batch_size * self.dataset.n_bars, -1)
# convert input to torch Variables
score_tensor, metadata_tensor = (
to_cuda_variable_long(score_tensor),
to_cuda_variable_long(metadata_tensor)
)
# compute encoder forward pass
z_dist = self.model.encoder(score_tensor)
# sample from distribution
z_tilde = z_dist.rsample()
# compute attributes
if attr_type == 'rhy_complexity':
attr = self.dataset.get_rhy_complexity(score_tensor)
elif attr_type == 'num_notes':
attr = self.dataset.get_notes_density_in_measure(score_tensor)
elif attr_type == 'note_range':
attr = self.dataset.get_note_range_of_measure(score_tensor)
elif attr_type == 'average_interval_jump':
attr = self.dataset.get_average_pitch_interval_of_measure(score_tensor)
z_all.append(to_numpy(z_tilde.cpu()))
attr_all.append(to_numpy(attr.cpu()))
z_all = np.concatenate(z_all)
attr_all = np.concatenate(attr_all)
# compute mutual information
mutual_info = np.zeros(self.z_dim)
for i in tqdm(range(self.z_dim)):
mutual_info[i] = mutual_info_score(z_all[:, i], attr_all)
dim = np.argmax(mutual_info)
max_mutual_info = np.max(mutual_info)
reg = LinearRegression().fit(z_all[:, dim:dim+1], attr_all)
score = reg.score(z_all[:, dim:dim+1], attr_all)
return dim, score
def test_model(self, batch_size):
"""
Runs the model on the test set
:param batch_size: int, number of datapoints in mini-batch
:return: tuple: mean_loss, mean_accuracy
"""
(_, gen_val, gen_test) = self.dataset.data_loaders(
batch_size=batch_size,
split=(0.01, 0.01)
)
print('Num Test Batches: ', len(gen_test))
mean_loss_test, mean_accuracy_test = self.loss_and_acc_test(gen_test)
print('Test Epoch:')
print(
'\tTest Loss: ', mean_loss_test, '\n'
'\tTest Accuracy: ', mean_accuracy_test * 100
)
def test_interp(self):
"""
Tests the interpolation capabilities of the latent space
:return: None
"""
(_, gen_val, gen_test) = self.dataset.data_loaders(
batch_size=1, # TODO: remove this hard coding
split=(0.01, 0.5)
)
gen_it_test = gen_test.__iter__()
for _ in range(randint(0, len(gen_test))):
tensor_score1, _ = next(gen_it_test)
gen_it_val = gen_val.__iter__()
for _ in range(randint(0, len(gen_val))):
tensor_score2, _ = next(gen_it_val)
tensor_score1 = to_cuda_variable(tensor_score1.long())
tensor_score2 = to_cuda_variable(tensor_score2.long())
self.test_interpolation(tensor_score1, tensor_score2, 10)
def test_interpolation(self, tensor_score1, tensor_score2, n=1):
"""
Tests the interpolation in the latent space for two random points in the
validation and test set
:param tensor_score1: torch tensor, (1, measure_seq_len)
:param tensor_score2: torch tensor, (1, measure_seq_len)
:param n: int, number of points for interpolation
:return:
"""
z_dist1 = self.model.encoder(tensor_score1)
z_dist2 = self.model.encoder(tensor_score2)
z1 = z_dist1.loc
z2 = z_dist2.loc
tensor_score = self.decode_mid_point(z1, z2, n)
# tensor_score = torch.cat((tensor_score1, tensor_score, tensor_score2), 1)
score = self.dataset.tensor_to_m21score(tensor_score.cpu())
score.show()
return score
def decode_mid_point(self, z1, z2, n):
"""
Decodes the mid-point of two latent vectors
:param z1: torch tensor, (1, self.z_dim)
:param z2: torch tensor, (1, self.z_dim)
:param n: int, number of points for interpolation
:return: torch tensor, (1, (n+2) * measure_seq_len)
"""
assert(n >= 1 and isinstance(n, int))
# compute the score_tensors for z1 and z2
dummy_score_tensor = to_cuda_variable(torch.zeros(self.batch_size, self.measure_seq_len))
_, sam1 = self.decoder(z1, dummy_score_tensor, self.train)
_, sam2 = self.decoder(z2, dummy_score_tensor, self.train)
# find the interpolation points and run through decoder
tensor_score = sam1
for i in range(n):
z_interp = z1 + (z2 - z1)*(i+1)/(n+1)
_, sam_interp = self.decoder(z_interp, dummy_score_tensor, self.train)
tensor_score = torch.cat((tensor_score, sam_interp), 1)
tensor_score = torch.cat((tensor_score, sam2), 1).view(1, -1)
# score = self.dataset.tensor_to_score(tensor_score.cpu())
return tensor_score
def test_attr_reg_interpolations(self, num_points=10, dim=0, num_interps=20):
for i in range(num_points):
z = torch.randn(1, self.model.latent_space_dim)
z1 = z.clone()
z2 = z.clone()
z1[:, dim] = -3.
z2[:, dim] = 3.
z1 = to_cuda_variable(z1)
z2 = to_cuda_variable(z2)
tensor_score = self.decode_mid_point(z1, z2, num_interps)
score = self.dataset.tensor_to_m21score(tensor_score.cpu())
score.show()
def loss_and_acc_test(self, data_loader):
"""
Computes loss and accuracy for test data
:param data_loader: torch data loader object
:return: (float, float)
"""
mean_loss = 0
mean_accuracy = 0
for sample_id, (score_tensor, metadata_tensor) in tqdm(enumerate(data_loader)):
if isinstance(self.dataset, FolkNBarDataset):
batch_size = score_tensor.size(0)
score_tensor = score_tensor.view(batch_size, self.dataset.n_bars, -1)
score_tensor = score_tensor.view(batch_size * self.dataset.n_bars, -1)
metadata_tensor = metadata_tensor.view(batch_size, self.dataset.n_bars, -1)
metadata_tensor = metadata_tensor.view(batch_size * self.dataset.n_bars, -1)
# convert input to torch Variables
score_tensor, metadata_tensor = (
to_cuda_variable_long(score_tensor),
to_cuda_variable_long(metadata_tensor)
)
# compute forward pass
weights, samples, _, _, _, _ = self.model(
measure_score_tensor=score_tensor,
measure_metadata_tensor=metadata_tensor,
train=False
)
# compute loss
recons_loss = VAETrainer.mean_crossentropy_loss(
weights=weights,
targets=score_tensor
)
loss = recons_loss
# compute mean loss and accuracy
mean_loss += to_numpy(loss.mean())
accuracy = VAETrainer.mean_accuracy(
weights=weights,
targets=score_tensor
)
mean_accuracy += to_numpy(accuracy)
mean_loss /= len(data_loader)
mean_accuracy /= len(data_loader)
return (
mean_loss,
mean_accuracy
)
def _plot_data_attr_dist(self, gen_test, dim1, dim2, reg_type):
z_all = []
attr_all = []
for sample_id, (score_tensor, metadata_tensor) in tqdm(enumerate(gen_test)):
if isinstance(self.dataset, FolkNBarDataset):
batch_size = score_tensor.size(0)
score_tensor = score_tensor.view(batch_size, self.dataset.n_bars, -1)
score_tensor = score_tensor.view(batch_size * self.dataset.n_bars, -1)
metadata_tensor = metadata_tensor.view(batch_size, self.dataset.n_bars, -1)
metadata_tensor = metadata_tensor.view(batch_size * self.dataset.n_bars, -1)
# convert input to torch Variables
score_tensor, metadata_tensor = (
to_cuda_variable_long(score_tensor),
to_cuda_variable_long(metadata_tensor)
)
# compute encoder forward pass
z_dist = self.model.encoder(score_tensor)
# sample from distribution
z_tilde = z_dist.rsample()
# compute attributes
if reg_type == 'rhy_complexity':
attr = self.dataset.get_rhy_complexity(score_tensor)
elif reg_type == 'num_notes':
attr = self.dataset.get_notes_density_in_measure(score_tensor)
elif reg_type == 'note_range':
attr = self.dataset.get_note_range_of_measure(score_tensor)
z_all.append(z_tilde)
attr_all.append(attr)
z_all = to_numpy(torch.cat(z_all, 0))
attr_all = to_numpy(torch.cat(attr_all, 0))
if self.trainer_config == '':
reg_str = '[no_reg]'
else:
reg_str = self.trainer_config
filename = self.dir_path + '/plots/' + reg_str + 'data_dist_' + reg_type + '_[' \
+ str(dim1) + ',' + str(dim2) + '].png'
self.plot_dim(z_all, attr_all, filename, dim1=dim1, dim2=dim2, xlim=6, ylim=6)
def plot_data_attr_dist(self, dim1=0, dim2=1):
"""
Plots the data distribution
:param dim1: int,
:param dim2: int,
:return:
"""
(_, _, gen_test) = self.dataset.data_loaders(
batch_size=16, # TODO: remove this hard coding
split=(0.7, 0.15)
)
print('Num Test Batches: ', len(gen_test))
self._plot_data_attr_dist(gen_test, dim1, dim2, 'rhy_complexity')
self._plot_data_attr_dist(gen_test, dim1, dim2, 'num_notes')
self._plot_data_attr_dist(gen_test, dim1, dim2, 'note_range')
def plot_attribute_surface(self, z_source, x_min = -3., x_max = 3., y_min = -5., y_max = 5., dim1=0, dim2=1, grid_res=0.5):
"""
Plots the value of an attribute over a surface defined by the dimensions
:param dim1: int,
:param dim2: int,
:param grid_res: float,
:return:
"""
# create the dataspace
x1 = torch.arange(x_min, x_max, grid_res)
x2 = torch.arange(y_min, y_max, grid_res)
z1, z2 = torch.meshgrid([x1, x2])
num_points = z1.size(0) * z1.size(1)
# z = torch.randn(1, self.model.latent_space_dim)
z = z_source
z = z.repeat(num_points, 1)
z[:, dim1] = z1.contiguous().view(1, -1)
z[:, dim2] = z2.contiguous().view(1, -1)
z = to_cuda_variable(z)
# pass the points through the model decoder
mini_batch_size = 1
num_mini_batches = num_points // mini_batch_size
nd_all = []
nr_all = []
rc_all = []
aij_all = []
# ie_all = []
for i in tqdm(range(num_mini_batches)):
# if i > 0:
# break
z_batch = z[i*mini_batch_size:(i+1)*mini_batch_size, :]
dummy_score_tensor = to_cuda_variable(torch.zeros(z_batch.size(0), self.measure_seq_len))
_, samples = self.model.decoder(
z=z_batch,
score_tensor=dummy_score_tensor,
train=self.train
)
samples = samples.view(z_batch.size(0), -1)
note_density = self.dataset.get_notes_density_in_measure(samples)
note_range = self.dataset.get_note_range_of_measure(samples)
rhy_complexity = self.dataset.get_rhy_complexity(samples)
avg_interval_jump = self.dataset.get_average_pitch_interval_of_measure(samples)
# interval_entropy = self.dataset.get_interval_entropy(samples)
nd_all.append(note_density)
nr_all.append(note_range)
rc_all.append(rhy_complexity)
aij_all.append(avg_interval_jump)
# ie_all.append(interval_entropy)
nd_all = to_numpy(torch.cat(nd_all, 0))
nr_all = to_numpy(torch.cat(nr_all, 0))
rc_all = to_numpy(torch.cat(rc_all, 0))
aij_all = to_numpy(torch.cat(aij_all, 0))
print(nd_all.shape)
print(nr_all.shape)
print(rc_all.shape)
print(aij_all.shape)
# ie_all = to_numpy(torch.cat(ie_all, 0))
z = to_numpy(z)
if self.trainer_config == '':
reg_str = '[no_reg]'
else:
reg_str = self.trainer_config
# filename = self.dir_path + '/plots_avgint_upscaled/' + reg_str + 'attr_surf_rhy_complexity_[' \
# + str(dim1) + ',' + str(dim2) + '].png'
# self.plot_dim(z, rc_all, filename, dim1=dim1, dim2=dim2)
# filename = self.dir_path + '/plots_avgint_upscaled/' + reg_str + 'attr_surf_note_range_[' \
# + str(dim1) + ',' + str(dim2) + '].png'
# self.plot_dim(z, nr_all, filename, dim1=dim1, dim2=dim2)
filename = self.dir_path + '/plots_avgint_upscaled/' + reg_str + 'attr_surf_note_density_[' \
+ str(dim1) + ',' + str(dim2) + ']_3.png'
self.plot_dim(z, nd_all, filename, dim1=dim1, dim2=dim2)
filename = self.dir_path + '/plots_avgint_upscaled/' + reg_str + 'attr_surf_avg_interval_jump_[' \
+ str(dim1) + ',' + str(dim2) + ']_3.png'
self.plot_dim(z, aij_all, filename, dim1=dim1, dim2=dim2)
def plot_attribute_dist(self, attribute='num_notes', plt_type='pca'):
"""
Plots the distribution of a particular attribute in the latent space
:param attribute: str,
num_notes, note_range, rhy_entropy, beat_strength, rhy_complexity
:param plt_type: str, 'tsne' or 'pca'
:return:
"""
(_, _, gen_test) = self.dataset.data_loaders(
batch_size=64, # TODO: remove this hard coding
split=(0.01, 0.01)
)
z_all = []
n_all = []
num_samples = 5
for sample_id, (score_tensor, _) in tqdm(enumerate(gen_test)):
# convert input to torch Variables
if isinstance(self.dataset, FolkNBarDataset):
batch_size = score_tensor.size(0)
score_tensor = score_tensor.view(batch_size, self.dataset.n_bars, -1)
score_tensor = score_tensor.view(batch_size * self.dataset.n_bars, -1)
score_tensor = to_cuda_variable_long(score_tensor)
# compute encoder forward pass
z_dist = self.model.encoder(score_tensor)
z_tilde = z_dist.loc
z_all.append(z_tilde)
if attribute == 'num_notes':
attr = self.dataset.get_notes_density_in_measure(score_tensor)
elif attribute == 'note_range':
attr = self.dataset.get_note_range_of_measure(score_tensor)
elif attribute == 'rhy_entropy':
attr = self.dataset.get_rhythmic_entropy(score_tensor)
elif attribute == 'beat_strength':
attr = self.dataset.get_beat_strength(score_tensor)
elif attribute == 'rhy_complexity':
attr = self.dataset.get_rhy_complexity(score_tensor)
else:
raise ValueError('Invalid attribute type')
for i in range(attr.size(0)):
tensor_score = score_tensor[i, :]
start_idx = self.dataset.note2index_dicts[START_SYMBOL]
end_idx = self.dataset.note2index_dicts[END_SYMBOL]
if tensor_score[0] == start_idx:
attr[i] = -0.1
elif tensor_score[0] == end_idx:
attr[i] = -0.2
n_all.append(attr)
if sample_id == num_samples:
break
z_all = torch.cat(z_all, 0)
n_all = torch.cat(n_all, 0)
z_all = to_numpy(z_all)
n_all = to_numpy(n_all)
filename = self.dir_path + '/plots/' + plt_type + '_' + attribute + '_' + \
str(num_samples) + '_measure_vae.png'
if plt_type == 'pca':
self.plot_pca(z_all, n_all, filename)
elif plt_type == 'tsne':
self.plot_tsne(z_all, n_all, filename)
elif plt_type == 'dim':
self.plot_dim(z_all, n_all, filename)
else:
raise ValueError('Invalid plot type')
def plot_transposition_points(self, plt_type='pca'):
"""
Plots a t-SNE plot for data-points comprising of transposed measures
:param plt_type: str, 'tsne' or 'pca'
:return:
"""
filepaths = self.dataset.valid_filepaths
idx = random.randint(0, len(filepaths))
original_score = get_music21_score_from_path(filepaths[idx])
possible_transpositions = self.dataset.all_transposition_intervals(original_score)
z_all = []
n_all = []
n = 0
for trans_int in possible_transpositions:
score_tensor = self.dataset.get_transposed_tensor(
original_score,
trans_int
)
score_tensor = self.dataset.split_tensor_to_bars(score_tensor)
score_tensor = to_cuda_variable_long(score_tensor)
z_dist = self.model.encoder(score_tensor)
z_tilde = z_dist.loc
z_all.append(z_tilde)
t = np.arange(0, z_tilde.size(0))
n_all.append(torch.from_numpy(t))
# n_all.append(torch.ones(z_tilde.size(0)) * n)
n += 1
print(n)
z_all = torch.cat(z_all, 0)
n_all = torch.cat(n_all, 0)
z_all = to_numpy(z_all)
n_all = to_numpy(n_all)
filename = self.dir_path + '/plots/' + plt_type + '_transposition_measure_vae.png'
if plt_type == 'pca':
self.plot_pca(z_all, n_all, filename)
elif plt_type == 'tsne':
self.plot_tsne(z_all, n_all, filename)
else:
raise ValueError('Invalid plot type')
@staticmethod
def plot_pca(data, target, filename):
pca = PCA(n_components=2, whiten=False)
pca.fit(data)
pca_z = pca.transform(data)
plt.scatter(
x=pca_z[:, 0],
y=pca_z[:, 1],
c=target,
cmap='viridis',
alpha=0.3
)
plt.colorbar()
plt.savefig(filename, format='png', dpi=300)
plt.show()
plt.close()
@staticmethod
def plot_tsne(data, target, filename):
tsne = TSNE(n_components=2, verbose=1., perplexity=40, n_iter=300)
tsne_z = tsne.fit_transform(data)
plt.scatter(
x=tsne_z[:, 0],
y=tsne_z[:, 1],
c=target,
cmap="viridis",
alpha=0.3
)
plt.colorbar()
plt.savefig(filename, format='png', dpi=300)
plt.show()
plt.close()
@staticmethod
def plot_dim(data, target, filename, dim1=0, dim2=1, xlim=None, ylim=None):
if xlim is not None:
plt.xlim(-xlim, xlim)
if ylim is not None:
plt.ylim(-ylim, ylim)
plt.scatter(
x=data[:, dim1],
y=data[:, dim2],
c=target,
s=12,
linewidths=0,
cmap="viridis",
alpha=0.5
)
plt.colorbar()
plt.savefig(filename, format='png', dpi=300)
# plt.show()
plt.close()
print('saved: ' + filename)
@staticmethod
def get_cmap(n, name='hsv'):
return plt.cm.get_cmap(name, n) |
#!/usr/bin/python3
from sys import argv
savejson = __import__('7-save_to_json_file').save_to_json_file
loadjson = __import__('8-load_from_json_file').load_from_json_file
filename = "add_item.json"
try:
newlist = loadjson(filename)
except FileNotFoundError:
newlist = []
for i, arg in enumerate(argv):
if i == 0:
continue
newlist.append(arg)
savejson(newlist, filename)
|
"""ResourceSpec classes for elbv2 resources."""
from altimeter.aws.resource.resource_spec import AWSResourceSpec
class ELBV2ResourceSpec(AWSResourceSpec):
"""Abstract base for ResourceSpec classes for elbv2 resources."""
service_name = "elbv2"
|
"""
Program to find the most optimal path to the center of a tumor to perform laparoscopy
Milestones:
1) Available dataset: https://wiki.cancerimagingarchive.net/display/Public/SPIE-AAPM-NCI+PROSTATEx+Challenges#7a2690e0c25948c69ddda9cc3b3905ec
Tumor locations are available
2) Find CNN trained with abdomenal MRIs
2.1) pixel intensities to vector
2.2) PCA and t-SNE
2.2) cluster analysis
3) Reconstruct optimal path
"""
import pandas as pd
import cv2
import numpy as np
import pydicom
from os import listdir
from os.path import isfile, join
import os
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
import ggplot
if __name__ == "__main__":
df_all = pd.DataFrame()
root = '/Users/antonogorodnikov/Documents/Work/Python/Prostate_ADC/'
for path, subdirs, file in os.walk(root):
for name in file:
if ".dcm" in name:
file_path = os.path.join(path, name)
# read file
ds = pydicom.dcmread(file_path)
# make a flatten np array
ds_flat = ds.pixel_array.flatten()
# add to data frame
df_all['/'.join(file_path.split('/')[-2:])] = ds_flat
df_all = df_all/255.0
rndperm = np.random.permutation(df_all.shape[0])
pca_100 = PCA(n_components=100)
pca_result_100 = pca_100.fit_transform(df_all.values)
print('Cumulative explained variation for 50 principal components: {}'.format(
np.sum(pca_100.explained_variance_ratio_)))
time_start = time.time()
tsne = TSNE(n_components=2, verbose=1, perplexity=40, n_iter=300)
tsne_pca_results = tsne.fit_transform(pca_result_100[rndperm])
print
't-SNE done! Time elapsed: {} seconds'.format(time.time() - time_start)
|
from datetime import datetime
import pytest
from fdk_fulltext_search.ingest import create_index, init_info_doc, update_index_info
def mock_update_by_query_result(m_query_success):
if m_query_success:
return {"total": 1}
else:
return {"total": 0}
@pytest.fixture
def init_info_mock(mocker):
return mocker.patch("fdk_fulltext_search.ingest.init_info_doc")
@pytest.fixture
def update_index_info_mock(mocker):
return mocker.patch("fdk_fulltext_search.ingest.update_index_info")
# es_client.index(index="info", body=init_doc)
@pytest.mark.unit
def test_update_info_should_update_doc(mocker, init_info_mock):
# if info indices exists and doc with index_name exists
mocker.patch(
"fdk_fulltext_search.ingest.es_client.indices.exists", return_value=True
)
mock_update_by_query = mocker.patch(
"fdk_fulltext_search.ingest.es_client.update_by_query",
return_value=mock_update_by_query_result(True),
)
update_index_info(index_name="concept")
assert init_info_mock.call_count == 0
assert mock_update_by_query.call_count == 1
assert (
mock_update_by_query.call_args[1]["body"]["query"]["term"]["name"] == "concept"
)
@pytest.mark.unit
def test_update_info_should_init_doc_if_not_found(mocker, init_info_mock):
# if info indices exists and doc with index_name does not exists
mocker.patch(
"fdk_fulltext_search.ingest.es_client.indices.exists", return_value=True
)
mock_update_by_query = mocker.patch(
"fdk_fulltext_search.ingest.es_client.update_by_query",
return_value=mock_update_by_query_result(False),
)
update_index_info(index_name="some_index")
assert mock_update_by_query.call_count == 1
assert init_info_mock.call_count == 1
@pytest.mark.unit
def test_init_info_doc_should_create_indices_and_doc(mocker, mock_single_create):
# if indices does not exist
mocker.patch(
"fdk_fulltext_search.ingest.es_client.indices.exists", return_value=False
)
mock_index_doc = mocker.patch("fdk_fulltext_search.ingest.es_client.index")
init_info_doc(index_name="informationmodels", now=datetime.now())
assert mock_single_create.call_count == 1
assert mock_single_create.call_args[1]["index"] == "info"
assert mock_index_doc.call_count == 1
assert mock_index_doc.call_args[1]["body"]["name"] == "informationmodels"
@pytest.mark.unit
def test_create_index_should_abort_when_new_index_does_not_exist(
mocker, mock_env, mock_single_create, update_index_info_mock
):
# if indices does not exist
mocker.patch(
"fdk_fulltext_search.ingest.es_client.indices.exists", return_value=False
)
create_index(index_alias="dataservices", new_index_name="dataservices-123")
assert mock_single_create.call_count == 1
assert mock_single_create.call_args[1]["index"] == "dataservices-123"
assert update_index_info_mock.call_count == 0
@pytest.mark.unit
def test_create_index_updates_info_index_when_successful(
mocker, mock_env, mock_single_create, update_index_info_mock
):
# if indices exist
mocker.patch(
"fdk_fulltext_search.ingest.es_client.indices.exists", return_value=True
)
create_index(index_alias="dataservices", new_index_name="dataservices-123")
assert mock_single_create.call_args[1]["index"] == "dataservices-123"
assert mock_single_create.call_count == 1
assert mock_single_create.call_args[1]["index"] == "dataservices-123"
assert update_index_info_mock.call_count == 1
|
from django.conf import global_settings
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
},
}
DEBUG = True
SECRET_KEY = 'secret'
ROOT_URLCONF = "testproject.urls"
INSTALLED_APPS = ["log_request_id"]
MIDDLEWARE = [
'log_request_id.middleware.RequestIDMiddleware',
] + list(getattr(global_settings, "MIDDLEWARE", []))
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'request_id': {
'()': 'log_request_id.filters.RequestIDFilter'
}
},
'formatters': {
'standard': {
'format': '%(levelname)-8s [%(asctime)s] [%(request_id)s] %(name)s: %(message)s'
},
},
'handlers': {
'mock': {
'level': 'DEBUG',
'class': 'testproject.handler.MockLoggingHandler',
'filters': ['request_id'],
'formatter': 'standard',
},
},
'loggers': {
'testproject': {
'handlers': ['mock'],
'level': 'DEBUG',
'propagate': False,
},
'log_request_id.middleware': {
'handlers': ['mock'],
'level': 'DEBUG',
'propagate': False,
},
}
}
|
import argparse
import csv
import datetime
import logging
import multiprocessing
import os
import subprocess
import sys
import urllib
import torch
ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
DATA = os.path.join(ROOT, "data")
# https://www.bart.gov/about/reports/ridership
SOURCE_DIR = "http://64.111.127.166/origin-destination/"
SOURCE_FILES = [
"date-hour-soo-dest-2011.csv.gz",
"date-hour-soo-dest-2012.csv.gz",
"date-hour-soo-dest-2013.csv.gz",
"date-hour-soo-dest-2014.csv.gz",
"date-hour-soo-dest-2015.csv.gz",
"date-hour-soo-dest-2016.csv.gz",
"date-hour-soo-dest-2017.csv.gz",
"date-hour-soo-dest-2018.csv.gz",
]
def mkdir_p(path):
if not os.path.exists(path):
os.makedirs(path)
def _load_hourly_od(args_basename):
args, basename = args_basename
filename = os.path.join(DATA, basename.replace(".csv.gz", ".pkl"))
if os.path.exists(filename):
return torch.load(filename)
# Download source files.
mkdir_p(DATA)
gz_filename = os.path.join(DATA, basename)
if not os.path.exists(gz_filename):
url = SOURCE_DIR + basename
logging.debug("downloading {}".format(url))
urllib.request.urlretrieve(url, gz_filename)
csv_filename = gz_filename[:-3]
assert csv_filename.endswith(".csv")
if not os.path.exists(csv_filename):
logging.debug("unzipping {}".format(gz_filename))
subprocess.check_call(["gunzip", "-k", gz_filename])
assert os.path.exists(csv_filename)
# Convert to PyTorch.
logging.debug("converting {}".format(csv_filename))
start_date = datetime.datetime.strptime("2000-01-01", "%Y-%m-%d")
stations = {}
num_rows = sum(1 for _ in open(csv_filename))
logging.info("Formatting {} rows".format(num_rows))
rows = torch.empty((num_rows, 4), dtype=torch.long)
with open(csv_filename) as f:
for i, (date, hour, origin, destin, trip_count) in enumerate(csv.reader(f)):
date = datetime.datetime.strptime(date, "%Y-%m-%d")
date += datetime.timedelta(hours=int(hour))
rows[i, 0] = int((date - start_date).total_seconds() / 3600)
rows[i, 1] = stations.setdefault(origin, len(stations))
rows[i, 2] = stations.setdefault(destin, len(stations))
rows[i, 3] = int(trip_count)
if i % 10000 == 0:
sys.stderr.write(".")
sys.stderr.flush()
# Save data with metadata.
dataset = {
"args": args,
"basename": basename,
"start_date": start_date,
"stations": stations,
"rows": rows,
"schema": ["time_hours", "origin", "destin", "trip_count"],
}
logging.debug("saving {}".format(filename))
torch.save(dataset, filename)
return dataset
def load_hourly_od(args=None):
filename = os.path.join(DATA, "full-counts.pkl")
if os.path.exists(filename):
return torch.load(filename)
datasets = multiprocessing.Pool().map(_load_hourly_od, [
(args, basename)
for basename in SOURCE_FILES
])
stations = sorted(set().union(*(d["stations"].keys() for d in datasets)))
min_time = min(int(d["rows"][:, 0].min()) for d in datasets)
max_time = max(int(d["rows"][:, 0].max()) for d in datasets)
num_rows = max_time - min_time + 1
start_date = datasets[0]["start_date"] + datetime.timedelta(hours=min_time),
logging.info("Loaded data from {} stations, {} hours"
.format(len(stations), num_rows))
result = torch.zeros(num_rows, len(stations), len(stations))
for dataset in datasets:
part_stations = sorted(dataset["stations"], key=dataset["stations"].__getitem__)
part_to_whole = torch.tensor(list(map(stations.index, part_stations)))
time = dataset["rows"][:, 0] - min_time
origin = part_to_whole[dataset["rows"][:, 1]]
destin = part_to_whole[dataset["rows"][:, 2]]
count = dataset["rows"][:, 3].float()
result[time, origin, destin] = count
dataset.clear()
logging.info("Loaded {} shaped data of mean {:0.3g}"
.format(result.shape, result.mean()))
dataset = {
"args": args,
"stations": stations,
"start_date": start_date,
"counts": result,
}
torch.save(dataset, filename)
return dataset
def main(args):
load_hourly_od(args)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="BART data preprocessor")
parser.add_argument("-v", "--verbose", action="store_true")
args = parser.parse_args()
logging.basicConfig(format='%(relativeCreated) 9d %(message)s',
level=logging.DEBUG if args.verbose else logging.INFO)
main(args)
|
#oggpnosn
#hkhr
import webapp2
from lib import BaseHandler, NGO, Project
import random
from google.appengine.api import mail
class CredibilityCheckHandler(BaseHandler):
def get(self):
parameter = {}
ngoQuery = NGO.query(NGO.credibility == False)
ngoList = ngoQuery.fetch(10)
parameter["ngoList"] = ngoList
self.render("adminCredibilityCheck.html", parameter)
class CredibilityCheckNGOHandler(BaseHandler):
def get(self, userid):
parameter = {}
ngoQuery = NGO.query(NGO.userid == userid)
ngo = ngoQuery.fetch(1)
parameter["ngoList"] = ngo
self.render("adminCredibilityCheckNGOPage.html", parameter)
def post(self, userid):
descriptionAuthenticity = self.request.get("descriptionAuthenticity")
eightygAuthenticity = self.request.get("eightygAuthenticity")
ngoQuery = NGO.query(NGO.userid == userid)
ngoList = ngoQuery.fetch(1)
if descriptionAuthenticity == "on" and eightygAuthenticity == "on":
for ngo in ngoList:
mail.send_mail(sender=" <tanaygahlot@gmail.com>",
to= "<"+ngo.email+">",
subject="Your NGO has been approved",
body="""Dear :"""+ngo.name+"""\nYour csr.com account has been approved. You can now visit
http://www.csr.com/ and sign in using your Google Account to access new features.Please let us know if you have any questions.
The csr.com Team
""")
else:
failiureReport = "\nPlaces where your ngo failed\n"
if descriptionAuthenticity != "on":
failiureReport+=" The Description you provided isnt apt for a site like us.\n"
elif eightygAuthenticity != "off":
failiureReport+=" Your 80G no isnt valid\n"
for ngo in ngoList:
mail.send_mail(sender=" <tanaygahlot@gmail.com>",
to= "<"+ngo.email+">",
subject="Your NGO has failed authentication test",
body="""Dear :"""+ ngo.name + failiureReport +"""Please let us know if you have any questions.
The csr.com Team
""")
class CreateFakeNGOAccount(BaseHandler):
def get(self):
for i in range(100):
ngo = NGO()
ngo.name = str(random.randrange(20,10000))
ngo.credibility = False
ngo.eightygRegistrationNumber = str(random.randrange(1,10000))
ngo.description = str(random.randrange(1,100000000000000000000))
ngo.userid = str(random.randrange(1,1000000))
ngo.email = "tanaygahlot@gmail.com"
ngo.put()
self.response.write("Done!")
class CreateFakeProject(BaseHandler):
def get(self):
for i in range(100):
project = Project()
project.title = str(random.randrange(20,10000))
project.authenticity = False
project.description = str(random.randrange(1,100000000000000000000))
project.ngo = str(random.randrange(1,1000000))
project.put()
self.response.write("Done!")
class AdminHandler(BaseHandler):
def get(self):
self.render("adminHomePage.html")
class AuthenticateHandler(BaseHandler):
def get(self):
parameter = {}
projectQuery = Project.query(Project.authenticity == False)
projectList = projectQuery.fetch(10)
parameter["projectList"] = projectList
self.render("adminAuthenticate.html", parameter)
class AuthenticateProjectHandler(BaseHandler):
def get(self, userid):
parameter = {}
projectQuery = Project.query(Project.userid == userid)
project = projectQuery.fetch(1)
parameter["projectList"] = ngo
self.render("adminCredibilityCheckNGOPage.html", parameter)
def post(self, userid):
descriptionAuthenticity = self.request.get("descriptionAuthenticity")
eightygAuthenticity = self.request.get("eightygAuthenticity")
ngoQuery = NGO.query(NGO.userid == userid)
ngoList = ngoQuery.fetch(1)
if descriptionAuthenticity == "on" and eightygAuthenticity == "on":
for ngo in ngoList:
mail.send_mail(sender=" <tanaygahlot@gmail.com>",
to= "<"+ngo.email+">",
subject="Your NGO has been approved",
body="""Dear :"""+ngo.name+"""\nYour csr.com account has been approved. You can now visit
http://www.csr.com/ and sign in using your Google Account to access new features.Please let us know if you have any questions.
The csr.com Team
""")
else:
failiureReport = "\nPlaces where your ngo failed\n"
if descriptionAuthenticity != "on":
failiureReport+=" The Description you provided isnt apt for a site like us.\n"
elif eightygAuthenticity != "off":
failiureReport+=" Your 80G no isnt valid\n"
for ngo in ngoList:
mail.send_mail(sender=" <tanaygahlot@gmail.com>",
to= "<"+ngo.email+">",
subject="Your NGO has failed authentication test",
body="""Dear :"""+ ngo.name + failiureReport +"""Please let us know if you have any questions.
The csr.com Team
""")
app = webapp2.WSGIApplication([('/admin/CredibilityCheck', CredibilityCheckHandler),('/admin/fake/NGO',CreateFakeNGOAccount),('/admin/fake/Project',CreateFakeProject),('/admin/CredibilityCheck/([0-9]+)', CredibilityCheckNGOHandler), ('/admin', AdminHandler ), ('/admin/Authenticate', AuthenticateHandler), ('/admin/Authenticate/([0-9]+)', AuthenticateProjectHandler)])
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'User'
db.create_table('disk_user', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('username', self.gf('django.db.models.fields.CharField')(max_length=30)),
('password', self.gf('django.db.models.fields.CharField')(default='', max_length=50)),
('email', self.gf('django.db.models.fields.EmailField')(max_length=75)),
('headimg', self.gf('django.db.models.fields.files.FileField')(max_length=100)),
('create_date', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('update_date', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
))
db.send_create_signal('disk', ['User'])
def backwards(self, orm):
# Deleting model 'User'
db.delete_table('disk_user')
models = {
'disk.user': {
'Meta': {'object_name': 'User'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'headimg': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '30'})
}
}
complete_apps = ['disk'] |
# ['red', 'yellow', 'blue'].
# f(1) = 1
# f(n) = f(n - 1) + n
# term value colour
# 1 1 'red'
# 2 3 'blue'
# 3 6 'blue'
# 4 10 'red'
# 5 15 'blue'
# 6 21 'blue'
# 7 28 'red'
# (3, 3, 'blue') == [6, 15, 21]
# (100, 4, 'red') == [136, 190, 253, 325]
# 100 < val < 1000000
# 3 < k < 20
D, R = {}, [[], [], []]
for i in range(10000):
D[i] = D.get(i - 1, 0) + i
R[D[i] % 3].append(D[i])
def same_col_seq(val, k, col):
r = ['blue', 'red', 'yellow'].index(col)
return [e for e in R[r] if e > val][:k]
print(same_col_seq(100, 4, 'red')) |
# -*- coding: utf-8 -*-
# Date: 2020/3/17 14:21
"""
some command args
"""
__author__ = 'tianyu'
from argparse import ArgumentParser
import sys
class Parser(ArgumentParser):
def __init__(self, args_name='network'):
self.work_name = args_name
super(Parser, self).__init__(description=f"PyTorch implementation of {args_name}")
self._add_default()
def _add_default(self):
self.add_argument('--dataset', '-ds', type=str, default='', help='the dataset for code')
self.add_argument('-bs', dest='batch_size', type=int, default=1, help='batch size of data loader')
self.add_argument('-j', dest='workers', type=int, default=2, help='the number of worker of data loader')
self.add_argument('-lr', type=float, default=0.01, help='learning rate')
self.add_argument('-wd', dest='weight_decay', default=5e-4, type=float, help='weight decay (default: 5e-4)')
self.add_argument('-mmt', dest='momentum', default=0.9, type=float, help='momentum')
self.add_argument('-dp', dest='dropout', type=float, default=0.5, help='dropout')
self.add_argument('--start_epoch', default=0, type=int, metavar='N', help='manual epoch number (useful on restarts)')
self.add_argument('--epochs', default=200, type=int, help='number of total epochs to run')
self.add_argument('-gpu', type=int, default=None, help='the number of gpu id')
self.add_argument('--exp_suffix', type=str, default='', help='some extensional information for experiment file name')
self.add_argument('--ext', dest='extension', type=str, default='', help='some extensional information, as flag')
self.add_argument('--resume', default='', type=str, nargs='+', metavar='PATH', help='pattern like experiment_name/models')
self.add_argument('-eval', dest='evaluate', default='', type=str, nargs='+', help='checkpoint path on evaluation model')
self.add_argument('--deterministic', action='store_true', help='fix pytorch framework seed to recurrent result')
self.add_argument('--adjust_lr', action='store_true', help='ajust learning rate')
# ========================= Monitor Configs ==========================
self.add_argument('--print_freq', '-p', default=10, type=int, help='print frequency (default: 10)')
self.add_argument('--logger_name', '-lname', default='log.txt', type=str, help='logger name')
self.add_argument('--work_dir', '-wdir', default='', type=str, help='workspace directory')
self.add_argument('--clean_up', default=5, type=int, help='save top-k best checkpoint')
self.add_argument('--debug', action='store_true', help='open debug, setting workers of dataloaer 1')
self.add_argument('--p_bar', action='store_true', help='open process bar')
self.add_argument('--no_tb', action='store_false', help='close tensorboard visualization')
self.add_argument('--nowtime_exp', '-orz', action='store_false', help='automatically add nowtime as the postfix of experiment directory')
# ========================= Snapshot Repo Configs ==========================
self.add_argument('--close_snapgit', dest='snapgit', action='store_false', help='close code snapshot when git tool isn\'t installed in your system or you don\'t want to open this function!')
self.add_argument('--revert_snapgit', type=str, default='', help='commit id for reverting. Don\'t completed hash str but str length of 8 at least ')
def add(self, name, value=''):
sys.argv.extend([name, value])
return self
if __name__ == '__main__':
p = Parser()
args = p.parse_args()
print(args)
|
def hammingWeight(n):
# 第一种方法
# py3中库函数
# return bin(n).count("1")
# 第二种方法
# 直观统计二进制中每一位是否包含1
return sum(1 for i in range(32) if n & (1<<i))
print(hammingWeight(0b00000000000000000000000000001011)) |
import sys, getopt, datetime, codecs
import got3
import pandas as pd
from datetime import datetime
from datetime import timedelta
from time import sleep
def getTweets(Name="", Term="", Start="", End="", maxDayTweets=5000):
tweetCriteria = got3.manager.TweetCriteria()
twList = []
delta = datetime.strptime(End, "%Y-%m-%d")- datetime.strptime(Start, "%Y-%m-%d")
day_count = delta.days
for index in range(0,day_count, 1):
dateStart = (datetime.strptime(Start, "%Y-%m-%d") + timedelta(days=index)).strftime("%Y-%m-%d")
dateEnd = (datetime.strptime(Start, "%Y-%m-%d") + timedelta(days=index + 1)).strftime("%Y-%m-%d")
tweetCriteria = got3.manager.TweetCriteria().setQuerySearch(Term).setSince(dateStart).setUntil(
dateEnd).setMaxTweets(maxDayTweets)
tweets = got3.manager.TweetManager.getTweets(tweetCriteria)
for tweet in tweets:
twList.append([Name, tweet.text, tweet.id, tweet.date, tweet.retweets, tweet.favorites, tweet.mentions, tweet.hashtags])
twDf = pd.DataFrame(columns=["Company", "Text", "ID", "Exact Date", "Retweets", "Favorites", "Mentions", "Hashtags"], data=twList)
return twDf
# main loow
if __name__ == '__main__':
df = pd.read_csv("to_scrape_part_7.csv", sep=",")
df = df[["Company", "Start", "End", "Lookup Term"]]
print(df.head())
coldict = dict(zip(["Company", "Start", "End", "Lookup Term"], [0, 1, 2, 3]))
for index, row in df.iterrows():
starttime = datetime.now()
outputFileName = row[coldict['Company']].lower() + "_tweets.csv"
folderName = outputFileName[:-14]
tweetsDf = getTweets(row[coldict['Company']], row[coldict["Lookup Term"]], row[coldict["Start"]], row[coldict["End"]],
100000)
print(outputFileName + " now contains " + str(len(tweetsDf)) + " Tweets scraped in " + str(datetime.now() - starttime))
# print(tweetsDf.head())
tweetsDf.to_csv("Done_Scrapes/" + folderName + "/" + outputFileName, header=True, index=False)
input("Press enter to exit")
# if index%10 == 0:
# sleep(300) |
# make sure that prints will be supported
import argparse, sys
import random
import time
sys.path.append("..")
print(sys.path)
from blossompy import Blossom
from time import sleep
import simpleaudio as sa #used for playing audio files to facilitate exercise
# seed time for better randomness
random.seed(time.time())
master_robot = None
robots = []
last_cmd, last_args = 'rand', []
def main(args):
bl = Blossom(sequence_dir='../blossompy/src/sequences')
bl.connect() # safe init and connects to blossom and puts blossom in reset position
bl.load_sequences()
bl.do_sequence("breathing/startbreath")
time.sleep(4)
filename = "../blossompy/media/breathing_facilitation.wav"
wave_obj = sa.WaveObject.from_wave_file(filename)
play_obj = wave_obj.play()
print("\nplaying audio\n")
time.sleep(27)
for i in range (0,2):
# get command string
print("\ninhaling . . .\n")
bl.do_sequence("breathing/inhale")
time.sleep(5)
print("\nexhaling . . .\n")
bl.do_sequence("breathing/exhale")
time.sleep(4)
# parse to get argument
bl.do_sequence("breathing/intermediate")
time.sleep(27)
bl.do_sequence("breathing/startbreath")
for i in range (0,2):
# get command string
print("\ninhaling . . .\n")
bl.do_sequence("breathing/inhale")
time.sleep(5)
print("\nexhaling . . .\n")
bl.do_sequence("breathing/exhale")
time.sleep(5)
bl.do_sequence("breathing/intermediate")
time.sleep(20)
bl.do_sequence("breathing/startbreath")
time.sleep(3)
for i in range (0,3):
# get command string
print("\ninhaling . . .\n")
bl.do_sequence("breathing/inhale")
time.sleep(6.5)
print("\nexhaling . . .\n")
bl.do_sequence("breathing/exhale")
time.sleep(4.5)
print("\nFinished! Thanks for trying Blossom Breathing.")
# handle the command and arguments
def parse_args(args):
"""
Parse arguments from starting in terminal
args:
args the arguments from terminal
returns:
parsed arguments
"""
parser = argparse.ArgumentParser()
parser.add_argument('--names', '-n', type=str, nargs='+',
help='Name of the robot.', default=["woody"])
parser.add_argument('--port', '-p', type=int,
help='Port to start server on.', default=8000)
parser.add_argument('--browser-disable', '-b',
help='prevent a browser window from opening with the blossom UI',
action='store_true')
parser.add_argument('--list-robots', '-l',
help='list all robot names', action='store_true')
return parser.parse_args(args)
"""
Generic main handler
"""
if __name__ == "__main__":
main(parse_args(sys.argv[1:]))
|
from __future__ import absolute_import, unicode_literals
from c8.utils import get_col_name
import json
__all__ = [
'StandardFabric',
'AsyncFabric',
'BatchFabric',
'TransactionFabric'
]
from datetime import datetime
from c8.api import APIWrapper
from c8.c8ql import C8QL
from c8 import constants
from c8.executor import (
DefaultExecutor,
AsyncExecutor,
BatchExecutor,
TransactionExecutor,
)
from c8.collection import StandardCollection
from c8.stream_collection import StreamCollection
from c8.exceptions import (
AsyncJobClearError,
AsyncJobListError,
CollectionCreateError,
CollectionDeleteError,
CollectionListError,
FabricDeleteError,
FabricCreateError,
FabricListError,
FabricPropertiesError,
GraphListError,
GraphCreateError,
GraphDeleteError,
ServerConnectionError,
ServerDetailsError,
ServerVersionError,
TransactionExecuteError,
TenantDcListError,
SpotRegionUpdateError
)
from c8 import exceptions as ex
from c8.graph import Graph
from c8.request import Request
import json
import random
import pulsar
from urllib.parse import urlparse
def printdata(event):
"""Prints the event.
:param event: real-time update.
:type event: str | unicode
"""
print(event)
class Fabric(APIWrapper):
"""Base class for Fabric API wrappers.
:param connection: HTTP connection.
:type connection: c8.connection.Connection
:param executor: API executor.
:type executor: c8.executor.Executor
"""
def enum(**enums):
return type('Enum', (), enums)
SPOT_CREATION_TYPES = enum(AUTOMATIC='automatic', NONE='none',
SPOT_REGION='spot_region')
def __init__(self, connection, executor):
self.url = connection.url
self.stream_port = connection.stream_port
self.pulsar_client = None
self.persistent = True
super(Fabric, self).__init__(connection, executor)
def __getitem__(self, name):
"""Return the collection API wrapper.
:param name: Collection name.
:type name: str | unicode
:return: Collection API wrapper.
:rtype: c8.collection.StandardCollection
"""
return self.collection(name)
def _get_col_by_doc(self, document):
"""Return the collection of the given document.
:param document: Document ID or body with "_id" field.
:type document: str | unicode | dict
:return: Collection API wrapper.
:rtype: c8.collection.StandardCollection
:raise c8.exceptions.DocumentParseError: On malformed document.
"""
return self.collection(get_col_name(document))
@property
def name(self):
"""Return fabric name.
:return: Fabric name.
:rtype: str | unicode
"""
return self.fabric_name
@property
def c8ql(self):
"""Return C8QL (C8Db Query Language) API wrapper.
:return: C8QL API wrapper.
:rtype: c8.c8ql.C8QL
"""
return C8QL(self._conn, self._executor)
def on_change(self, collection, callback=printdata):
"""Execute given input function on receiving a change.
:param callback: Function to execute on a change
:type callback: function
:param collections: Collection name or Collection names regex to listen for
:type collections: str
"""
if not collection:
raise ValueError('You must specify a collection on which to watch for realtime data!')
namespace = constants.STREAM_LOCAL_NS_PREFIX + self.fabric_name
topic = "persistent://" + self.tenant_name + "/" + namespace + "/" + collection
subscription_name = self.tenant_name + "-" + self.fabric_name + "-subscription-" + str(random.randint(1, 1000))
print("pyC8 Realtime: Subscribing to topic: " + topic + " on Subscription name: " + subscription_name)
if self.pulsar_client:
print("pyC8 Realtime: Initialized C8Streams connection to " + self.url + ":" + str(self.stream_port))
else:
dcl_local = self.dclist_local()
self.pulsar_client = pulsar.Client('pulsar://' + dcl_local['tags']['url'] + ":" + str(self.stream_port))
consumer = self.pulsar_client.subscribe(topic, subscription_name)
try:
print("pyC8 Realtime: Begin monitoring realtime updates for " + topic)
while True:
msg = consumer.receive()
data = msg.data().decode('utf-8')
jdata = json.loads(data)
# self.consumer.acknowledge(msg)
callback(jdata)
finally:
self.pulsar_client.close()
def properties(self):
"""Return fabric properties.
:return: Fabric properties.
:rtype: dict
:raise c8.exceptions.FabricPropertiesError: If retrieval fails.
"""
request = Request(
method='get',
endpoint='/database/current',
)
def response_handler(resp):
if not resp.is_success:
raise FabricPropertiesError(resp, request)
result = resp.body['result']
result['system'] = result.pop('isSystem')
return result
return self._execute(request, response_handler)
def execute_transaction(self,
command,
params=None,
read=None,
write=None,
sync=None,
timeout=None,
max_size=None,
allow_implicit=None,
intermediate_commit_count=None,
intermediate_commit_size=None):
"""Execute raw Javascript command in transaction.
:param command: Javascript command to execute.
:type command: str | unicode
:param read: Names of collections read during transaction. If parameter
**allow_implicit** is set to True, any undeclared read collections
are loaded lazily.
:type read: [str | unicode]
:param write: Names of collections written to during transaction.
Transaction fails on undeclared write collections.
:type write: [str | unicode]
:param params: Optional parameters passed into the Javascript command.
:type params: dict
:param sync: Block until operation is synchronized to disk.
:type sync: bool
:param timeout: Timeout for waiting on collection locks. If set to 0,
C8Db server waits indefinitely. If not set, system default
value is used.
:type timeout: int
:param max_size: Max transaction size limit in bytes. Applies only
to RocksDB storage engine.
:type max_size: int
:param allow_implicit: If set to True, undeclared read collections are
loaded lazily. If set to False, transaction fails on any undeclared
collections.
:type allow_implicit: bool
:param intermediate_commit_count: Max number of operations after which
an intermediate commit is performed automatically. Applies only to
RocksDB storage engine.
:type intermediate_commit_count: int
:param intermediate_commit_size: Max size of operations in bytes after
which an intermediate commit is performed automatically. Applies
only to RocksDB storage engine.
:type intermediate_commit_size: int
:return: Return value of **command**.
:rtype: str | unicode
:raise c8.exceptions.TransactionExecuteError: If execution fails.
"""
collections = {'allowImplicit': allow_implicit}
if read is not None:
collections['read'] = read
if write is not None:
collections['write'] = write
data = {'action': command}
if collections:
data['collections'] = collections
if params is not None:
data['params'] = params
if timeout is not None:
data['lockTimeout'] = timeout
if sync is not None:
data['waitForSync'] = sync
if max_size is not None:
data['maxTransactionSize'] = max_size
if intermediate_commit_count is not None:
data['intermediateCommitCount'] = intermediate_commit_count
if intermediate_commit_size is not None:
data['intermediateCommitSize'] = intermediate_commit_size
request = Request(
method='post',
endpoint='/transaction',
data=data
)
def response_handler(resp):
if not resp.is_success:
raise TransactionExecuteError(resp, request)
return resp.body.get('result')
return self._execute(request, response_handler)
def update_spot_region(self, tenant, fabric, new_dc):
"""Updates spot primary region for the geo-fabric
:param: tenant: tenant name
:type: str
:param: fabric: fabric name
:type: str
:param: new_dc: New spot region
:type: str
:return: True if request successful,false otherwise
:rtype: bool
:raise c8.exceptions.SpotRegionUpdateError: If updation fails.
"""
request = Request(
method='put',
endpoint='_tenant/{}/_fabric/{}/database/{}'.format(tenant, fabric, new_dc),
)
def response_handler(resp):
if not resp.is_success:
raise SpotRegionUpdateError(resp, request)
return True
return self._execute(request, response_handler)
def fabrics_detail(self):
request = Request(
method='get',
endpoint='/database/user'
)
def response_handler(resp):
if not resp.is_success:
raise FabricListError(resp, request)
return [{
'name': col['name'],
'options': col['options']
} for col in map(dict, resp.body['result'])]
return self._execute(request, response_handler)
def version(self):
"""Return C8Db server version.
:return: Server version.
:rtype: str | unicode
:raise c8.exceptions.ServerVersionError: If retrieval fails.
"""
request = Request(
method='get',
endpoint='/_admin/version',
params={'details': False}
)
def response_handler(resp):
if not resp.is_success:
raise ServerVersionError(resp, request)
return resp.body['version']
return self._execute(request, response_handler)
def ping(self):
"""Ping the C8Db server by sending a test request.
:return: Response code from server.
:rtype: int
:raise c8.exceptions.ServerConnectionError: If ping fails.
"""
request = Request(
method='get',
endpoint='/collection',
)
def response_handler(resp):
code = resp.status_code
if code in {401, 403}:
raise ServerConnectionError('bad username and/or password')
if not resp.is_success:
raise ServerConnectionError(
resp.error_message or 'bad server response')
return code
return self._execute(request, response_handler)
#########################
# Datacenter Management #
#########################
def dclist(self):
"""Return the list of names of Datacenters
:return: DC List.
:rtype: [str | unicode ]
:raise c8.exceptions.TenantListError: If retrieval fails.
"""
request = Request(
method='get',
endpoint='/datacenter/all'
)
def response_handler(resp):
# print("dclist() : Response body: " + str(resp.body))
if not resp.is_success:
raise TenantDcListError(resp, request)
dc_list = []
for dc in resp.body:
dc_list.append(dc['name'])
return dc_list
return self._execute(request, response_handler)
def dclist_detail(self):
"""Return the list of details of Datacenters
:return: DC List.
:rtype: [str | unicode ]
:raise c8.exceptions.TenantListError: If retrieval fails.
"""
request = Request(
method='get',
endpoint='/datacenter/all'
)
def response_handler(resp):
# print("dclist() : Response body: " + str(resp.body))
if not resp.is_success:
raise TenantDcListError(resp, request)
return resp.body
return self._execute(request, response_handler)
def dclist_local(self):
"""Return the list of local Datacenters
:return: DC List.
:rtype: [str | unicode ]
:raise c8.exceptions.TenantListError: If retrieval fails.
"""
request = Request(
method='get',
endpoint='/datacenter/local'
)
def response_handler(resp):
# print("dclist() : Response body: " + str(resp.body))
if not resp.is_success:
raise TenantDcListError(resp, request)
return resp.body
return self._execute(request, response_handler)
#######################
# Fabric Management #
#######################
def fabrics(self):
"""Return the names all fabrics.
:return: Fabric names.
:rtype: [str | unicode]
:raise c8.exceptions.FabricListError: If retrieval fails.
"""
request = Request(
method='get',
endpoint='/database'
)
def response_handler(resp):
if not resp.is_success:
raise FabricListError(resp, request)
return resp.body['result']
return self._execute(request, response_handler)
def has_fabric(self, name):
"""Check if a fabric exists.
:param name: Fabric name.
:type name: str | unicode
:return: True if fabric exists, False otherwise.
:rtype: bool
"""
return name in self.fabrics()
def create_fabric(self, name, spot_creation_type=SPOT_CREATION_TYPES.AUTOMATIC, spot_dc=None, users=None,
dclist=None):
"""Create a new fabric.
:param name: Fabric name.
:type name: str | unicode
:param spot_creation_type: Specifying the mode of creating geo-fabric.If you use AUTOMATIC, a random spot region
will be assigned by the system. If you specify NONE, a geo-fabric is created without
the spot properties. If you specify SPOT_REGION,pass the corresponding spot region in
the spot_dc parameter.
:type name: Enum containing spot region creation types
:param name: Spot Region name, if spot_creation_type is set to SPOT_REGION
:type name: str
:param users: List of users with access to the new fabric, where each
user is a dictionary with fields "username", "password", "active"
and "extra" (see below for example). If not set, only the admin and
current user are granted access.
:type users: [dict]
:param dclist : list of strings of datacenters
:type dclist: [str | unicode]
:return: True if fabric was created successfully.
:rtype: bool
:raise c8.exceptions.FabricCreateError: If create fails.
Here is an example entry for parameter **users**:
.. code-block:: python
{
'username': 'john',
'password': 'password',
'active': True,
'extra': {'Department': 'IT'}
}
"""
data = {'name': name}
if users is not None:
data['users'] = [{
'username': user['username'],
'passwd': user['password'],
'active': user.get('active', True),
'extra': user.get('extra', {})
} for user in users]
options = {}
dcl = ''
if dclist:
# Process dclist param (type list) to build up comma-separated string of DCs
for dc in dclist:
if len(dcl) > 0:
dcl += ','
dcl += dc
options['dcList'] = dcl
if spot_creation_type == self.SPOT_CREATION_TYPES.NONE:
options['spotDc'] = ''
elif spot_creation_type == self.SPOT_CREATION_TYPES.SPOT_REGION and spot_dc:
options['spotDc'] = spot_dc
data['options'] = options
request = Request(
method='post',
endpoint='/database',
data=data
)
def response_handler(resp):
if not resp.is_success:
raise FabricCreateError(resp, request)
return True
return self._execute(request, response_handler)
def delete_fabric(self, name, ignore_missing=False):
"""Delete the fabric.
:param name: Fabric name.
:type name: str | unicode
:param ignore_missing: Do not raise an exception on missing fabric.
:type ignore_missing: bool
:return: True if fabric was deleted successfully, False if fabric
was not found and **ignore_missing** was set to True.
:rtype: bool
:raise c8.exceptions.FabricDeleteError: If delete fails.
"""
request = Request(
method='delete',
endpoint='/database/{}'.format(name)
)
def response_handler(resp):
if resp.error_code == 1228 and ignore_missing:
return False
if not resp.is_success:
raise FabricDeleteError(resp, request)
return resp.body['result']
return self._execute(request, response_handler)
#########################
# Collection Management #
#########################
def collection(self, name):
"""Return the standard collection API wrapper.
:param name: Collection name.
:type name: str | unicode
:return: Standard collection API wrapper.
:rtype: c8.collection.StandardCollection
"""
return StandardCollection(self._conn, self._executor, name)
def has_collection(self, name):
"""Check if collection exists in the fabric.
:param name: Collection name.
:type name: str | unicode
:return: True if collection exists, False otherwise.
:rtype: bool
"""
return any(col['name'] == name for col in self.collections())
def collections(self):
"""Return the collections in the fabric.
:return: Collections in the fabric and their details.
:rtype: [dict]
:raise c8.exceptions.CollectionListError: If retrieval fails.
"""
request = Request(
method='get',
endpoint='/collection'
)
def response_handler(resp):
if not resp.is_success:
raise CollectionListError(resp, request)
return [{
'id': col['id'],
'name': col['name'],
'system': col['isSystem'],
'type': StandardCollection.types[col['type']],
'status': StandardCollection.statuses[col['status']],
} for col in map(dict, resp.body['result'])]
return self._execute(request, response_handler)
def create_collection(self,
name,
sync=False,
edge=False,
user_keys=True,
key_increment=None,
key_offset=None,
key_generator='traditional',
shard_fields=None,
index_bucket_count=None,
sync_replication=None,
enforce_replication_factor=None,
spot_collection=False
):
"""Create a new collection.
:param name: Collection name.
:type name: str | unicode
:param sync: If set to True, document operations via the collection
will block until synchronized to disk by default.
:type sync: bool
:param edge: If set to True, an edge collection is created.
:type edge: bool
:param key_generator: Used for generating document keys. Allowed values
are "traditional" or "autoincrement".
:type key_generator: str | unicode
:param user_keys: If set to True, users are allowed to supply document
keys. If set to False, the key generator is solely responsible for
supplying the key values.
:type user_keys: bool
:param key_increment: Key increment value. Applies only when value of
**key_generator** is set to "autoincrement".
:type key_increment: int
:param key_offset: Key offset value. Applies only when value of
**key_generator** is set to "autoincrement".
:type key_offset: int
:param shard_fields: Field(s) used to determine the target shard.
:type shard_fields: [str | unicode]
:param index_bucket_count: Number of buckets into which indexes using
hash tables are split. The default is 16, and this number has to be
a power of 2 and less than or equal to 1024. For large collections,
one should increase this to avoid long pauses when the hash table
has to be initially built or re-sized, since buckets are re-sized
individually and can be initially built in parallel. For instance,
64 may be a sensible value for 100 million documents.
:type index_bucket_count: int
:param sync_replication: If set to True, server reports success only
when collection is created in all replicas. You can set this to
False for faster server response, and if full replication is not a
concern.
:type sync_replication: bool
:param enforce_replication_factor: Check if there are enough replicas
available at creation time, or halt the operation.
:type enforce_replication_factor: bool
:param spot_collection: If True, it is a spot collection
:type bool
:return: Standard collection API wrapper.
:rtype: c8.collection.StandardCollection
:raise c8.exceptions.CollectionCreateError: If create fails.
"""
key_options = {'type': key_generator, 'allowUserKeys': user_keys}
if key_increment is not None:
key_options['increment'] = key_increment
if key_offset is not None:
key_options['offset'] = key_offset
data = {
'name': name,
'waitForSync': sync,
'keyOptions': key_options,
'type': 3 if edge else 2,
'isSpot': spot_collection
}
if shard_fields is not None:
data['shardKeys'] = shard_fields
if index_bucket_count is not None:
data['indexBuckets'] = index_bucket_count
params = {}
if sync_replication is not None:
params['waitForSyncReplication'] = sync_replication
if enforce_replication_factor is not None:
params['enforceReplicationFactor'] = enforce_replication_factor
request = Request(
method='post',
endpoint='/collection',
params=params,
data=data
)
def response_handler(resp):
if resp.is_success:
return self.collection(name)
raise CollectionCreateError(resp, request)
return self._execute(request, response_handler)
def delete_collection(self, name, ignore_missing=False, system=None):
"""Delete the collection.
:param name: Collection name.
:type name: str | unicode
:param ignore_missing: Do not raise an exception on missing collection.
:type ignore_missing: bool
:param system: Whether the collection is a system collection.
:type system: bool
:return: True if collection was deleted successfully, False if
collection was not found and **ignore_missing** was set to True.
:rtype: bool
:raise c8.exceptions.CollectionDeleteError: If delete fails.
"""
params = {}
if system is not None:
params['isSystem'] = system
request = Request(
method='delete',
endpoint='/collection/{}'.format(name),
params=params
)
def response_handler(resp):
if resp.error_code == 1203 and ignore_missing:
return False
if not resp.is_success:
raise CollectionDeleteError(resp, request)
return True
return self._execute(request, response_handler)
####################
# Graph Management #
####################
def graph(self, name):
"""Return the graph API wrapper.
:param name: Graph name.
:type name: str | unicode
:return: Graph API wrapper.
:rtype: c8.graph.Graph
"""
return Graph(self._conn, self._executor, name)
def has_graph(self, name):
"""Check if a graph exists in the fabric.
:param name: Graph name.
:type name: str | unicode
:return: True if graph exists, False otherwise.
:rtype: bool
"""
for graph in self.graphs():
if graph['name'] == name:
return True
return False
def graphs(self):
"""List all graphs in the fabric.
:return: Graphs in the fabric.
:rtype: [dict]
:raise c8.exceptions.GraphListError: If retrieval fails.
"""
request = Request(method='get', endpoint='/_api/graph')
def response_handler(resp):
if not resp.is_success:
raise GraphListError(resp, request)
return [
{
'id': body['_id'],
'name': body['_key'],
'revision': body['_rev'],
'orphan_collections': body['orphanCollections'],
'edge_definitions': [
{
'edge_collection': definition['collection'],
'from_vertex_collections': definition['from'],
'to_vertex_collections': definition['to'],
}
for definition in body['edgeDefinitions']
],
'shard_count': body.get('numberOfShards'),
'replication_factor': body.get('replicationFactor')
} for body in resp.body['graphs']
]
return self._execute(request, response_handler)
def create_graph(self,
name,
edge_definitions=None,
orphan_collections=None,
shard_count=None):
"""Create a new graph.
:param name: Graph name.
:type name: str | unicode
:param edge_definitions: List of edge definitions, where each edge
definition entry is a dictionary with fields "edge_collection",
"from_vertex_collections" and "to_vertex_collections" (see below
for example).
:type edge_definitions: [dict]
:param orphan_collections: Names of additional vertex collections that
are not in edge definitions.
:type orphan_collections: [str | unicode]
:param shard_count: Number of shards used for every collection in the
graph. To use this, parameter **smart** must be set to True and
every vertex in the graph must have the smart field. This number
cannot be modified later once set. Applies only to enterprise
version of C8Db.
:type shard_count: int
:return: Graph API wrapper.
:rtype: c8.graph.Graph
:raise c8.exceptions.GraphCreateError: If create fails.
Here is an example entry for parameter **edge_definitions**:
.. code-block:: python
{
'edge_collection': 'teach',
'from_vertex_collections': ['teachers'],
'to_vertex_collections': ['lectures']
}
"""
data = {'name': name}
if edge_definitions is not None:
data['edgeDefinitions'] = [{
'collection': definition['edge_collection'],
'from': definition['from_vertex_collections'],
'to': definition['to_vertex_collections']
} for definition in edge_definitions]
if orphan_collections is not None:
data['orphanCollections'] = orphan_collections
if shard_count is not None: # pragma: no cover
data['numberOfShards'] = shard_count
request = Request(
method='post',
endpoint='/_api/graph',
data=data
)
def response_handler(resp):
if resp.is_success:
return Graph(self._conn, self._executor, name)
raise GraphCreateError(resp, request)
return self._execute(request, response_handler)
def delete_graph(self, name, ignore_missing=False, drop_collections=None):
"""Drop the graph of the given name from the fabric.
:param name: Graph name.
:type name: str | unicode
:param ignore_missing: Do not raise an exception on missing graph.
:type ignore_missing: bool
:param drop_collections: Drop the collections of the graph also. This
is only if they are not in use by other graphs.
:type drop_collections: bool
:return: True if graph was deleted successfully, False if graph was not
found and **ignore_missing** was set to True.
:rtype: bool
:raise c8.exceptions.GraphDeleteError: If delete fails.
"""
params = {}
if drop_collections is not None:
params['dropCollections'] = drop_collections
request = Request(
method='delete',
endpoint='/_api/graph/{}'.format(name),
params=params
)
def response_handler(resp):
if resp.error_code == 1924 and ignore_missing:
return False
if not resp.is_success:
raise GraphDeleteError(resp, request)
return True
return self._execute(request, response_handler)
#######################
# Document Management #
#######################
def has_document(self, document, rev=None, check_rev=True):
"""Check if a document exists.
:param document: Document ID or body with "_id" field.
:type document: str | unicode | dict
:param rev: Expected document revision. Overrides value of "_rev" field
in **document** if present.
:type rev: str | unicode
:param check_rev: If set to True, revision of **document** (if given)
is compared against the revision of target document.
:type check_rev: bool
:return: True if document exists, False otherwise.
:rtype: bool
:raise c8.exceptions.DocumentInError: If check fails.
:raise c8.exceptions.DocumentRevisionError: If revisions mismatch.
"""
return self._get_col_by_doc(document).has(
document=document,
rev=rev,
check_rev=check_rev
)
def document(self, document, rev=None, check_rev=True):
"""Return a document.
:param document: Document ID or body with "_id" field.
:type document: str | unicode | dict
:param rev: Expected document revision. Overrides the value of "_rev"
field in **document** if present.
:type rev: str | unicode
:param check_rev: If set to True, revision of **document** (if given)
is compared against the revision of target document.
:type check_rev: bool
:return: Document, or None if not found.
:rtype: dict | None
:raise c8.exceptions.DocumentGetError: If retrieval fails.
:raise c8.exceptions.DocumentRevisionError: If revisions mismatch.
"""
return self._get_col_by_doc(document).get(
document=document,
rev=rev,
check_rev=check_rev
)
def insert_document(self,
collection,
document,
return_new=False,
sync=None,
silent=False):
"""Insert a new document.
:param collection: Collection name.
:type collection: str | unicode
:param document: Document to insert. If it contains the "_key" or "_id"
field, the value is used as the key of the new document (otherwise
it is auto-generated). Any "_rev" field is ignored.
:type document: dict
:param return_new: Include body of the new document in the returned
metadata. Ignored if parameter **silent** is set to True.
:type return_new: bool
:param sync: Block until operation is synchronized to disk.
:type sync: bool
:param silent: If set to True, no document metadata is returned. This
can be used to save resources.
:type silent: bool
:return: Document metadata (e.g. document key, revision) or True if
parameter **silent** was set to True.
:rtype: bool | dict
:raise c8.exceptions.DocumentInsertError: If insert fails.
"""
return self.collection(collection).insert(
document=document,
return_new=return_new,
sync=sync,
silent=silent
)
def update_document(self,
document,
check_rev=True,
merge=True,
keep_none=True,
return_new=False,
return_old=False,
sync=None,
silent=False):
"""Update a document.
:param document: Partial or full document with the updated values. It
must contain the "_id" field.
:type document: dict
:param check_rev: If set to True, revision of **document** (if given)
is compared against the revision of target document.
:type check_rev: bool
:param merge: If set to True, sub-dictionaries are merged instead of
the new one overwriting the old one.
:type merge: bool
:param keep_none: If set to True, fields with value None are retained
in the document. Otherwise, they are removed completely.
:type keep_none: bool
:param return_new: Include body of the new document in the result.
:type return_new: bool
:param return_old: Include body of the old document in the result.
:type return_old: bool
:param sync: Block until operation is synchronized to disk.
:type sync: bool
:param silent: If set to True, no document metadata is returned. This
can be used to save resources.
:type silent: bool
:return: Document metadata (e.g. document key, revision) or True if
parameter **silent** was set to True.
:rtype: bool | dict
:raise c8.exceptions.DocumentUpdateError: If update fails.
:raise c8.exceptions.DocumentRevisionError: If revisions mismatch.
"""
return self._get_col_by_doc(document).update(
document=document,
check_rev=check_rev,
merge=merge,
keep_none=keep_none,
return_new=return_new,
return_old=return_old,
sync=sync,
silent=silent
)
def replace_document(self,
document,
check_rev=True,
return_new=False,
return_old=False,
sync=None,
silent=False):
"""Replace a document.
:param document: New document to replace the old one with. It must
contain the "_id" field. Edge document must also have "_from" and
"_to" fields.
:type document: dict
:param check_rev: If set to True, revision of **document** (if given)
is compared against the revision of target document.
:type check_rev: bool
:param return_new: Include body of the new document in the result.
:type return_new: bool
:param return_old: Include body of the old document in the result.
:type return_old: bool
:param sync: Block until operation is synchronized to disk.
:type sync: bool
:param silent: If set to True, no document metadata is returned. This
can be used to save resources.
:type silent: bool
:return: Document metadata (e.g. document key, revision) or True if
parameter **silent** was set to True.
:rtype: bool | dict
:raise c8.exceptions.DocumentReplaceError: If replace fails.
:raise c8.exceptions.DocumentRevisionError: If revisions mismatch.
"""
return self._get_col_by_doc(document).replace(
document=document,
check_rev=check_rev,
return_new=return_new,
return_old=return_old,
sync=sync,
silent=silent
)
def delete_document(self,
document,
rev=None,
check_rev=True,
ignore_missing=False,
return_old=False,
sync=None,
silent=False):
"""Delete a document.
:param document: Document ID, key or body. Document body must contain
the "_id" field.
:type document: str | unicode | dict
:param rev: Expected document revision. Overrides the value of "_rev"
field in **document** if present.
:type rev: str | unicode
:param check_rev: If set to True, revision of **document** (if given)
is compared against the revision of target document.
:type check_rev: bool
:param ignore_missing: Do not raise an exception on missing document.
This parameter has no effect in transactions where an exception is
always raised on failures.
:type ignore_missing: bool
:param return_old: Include body of the old document in the result.
:type return_old: bool
:param sync: Block until operation is synchronized to disk.
:type sync: bool
:param silent: If set to True, no document metadata is returned. This
can be used to save resources.
:type silent: bool
:return: Document metadata (e.g. document key, revision), or True if
parameter **silent** was set to True, or False if document was not
found and **ignore_missing** was set to True (does not apply in
transactions).
:rtype: bool | dict
:raise c8.exceptions.DocumentDeleteError: If delete fails.
:raise c8.exceptions.DocumentRevisionError: If revisions mismatch.
"""
return self._get_col_by_doc(document).delete(
document=document,
rev=rev,
check_rev=check_rev,
ignore_missing=ignore_missing,
return_old=return_old,
sync=sync,
silent=silent
)
###################
# User Management #
###################
# See tenant.py
#########################
# Permission Management #
#########################
# See tenant.py
########################
# Async Job Management #
########################
# Pratik: APIs not supported in documentation. Waiting for verification
# def async_jobs(self, status, count=None):
# """Return IDs of async jobs with given status.
#
# :param status: Job status (e.g. "pending", "done").
# :type status: str | unicode
# :param count: Max number of job IDs to return.
# :type count: int
# :return: List of job IDs.
# :rtype: [str | unicode]
# :raise c8.exceptions.AsyncJobListError: If retrieval fails.
# """
# params = {}
# if count is not None:
# params['count'] = count
#
# request = Request(
# method='get',
# endpoint='/job/{}'.format(status),
# params=params
# )
#
# def response_handler(resp):
# if resp.is_success:
# return resp.body
# raise AsyncJobListError(resp, request)
#
# return self._execute(request, response_handler)
#
# def clear_async_jobs(self, threshold=None):
# """Clear async job results from the server.
#
# Async jobs that are still queued or running are not stopped.
#
# :param threshold: If specified, only the job results created prior to
# the threshold (a unix timestamp) are deleted. Otherwise, all job
# results are deleted.
# :type threshold: int
# :return: True if job results were cleared successfully.
# :rtype: bool
# :raise c8.exceptions.AsyncJobClearError: If operation fails.
# """
# if threshold is None:
# url = '/job/all'
# params = None
# else:
# url = '/job/expired'
# params = {'stamp': threshold}
#
# request = Request(
# method='delete',
# endpoint=url,
# params=params
# )
#
# def response_handler(resp):
# if resp.is_success:
# return True
# raise AsyncJobClearError(resp, request)
#
# return self._execute(request, response_handler)
########################
# Streams Management #
########################
def stream(self, operation_timeout_seconds=30):
"""Return the stream collection API wrapper.
:return: stream collection API wrapper.
:rtype: c8.stream_collection.StreamCollection
"""
return StreamCollection(self, self._conn, self._executor, self.url, self.stream_port, operation_timeout_seconds)
def streams(self):
"""Get list of all streams under given fabric
:return: List of streams under given fabric.
:rtype: json
:raise c8.exceptions.StreamListError: If retrieving streams fails.
"""
url_endpoint = '/streams'
request = Request(
method='get',
endpoint=url_endpoint
)
def response_handler(resp):
code = resp.status_code
if resp.is_success:
# NOTE: server API returns stream name as field 'topic' - we provide both here for user convenience
return [{
'name': col['topic'],
'topic': col['topic'],
'local': col['local'],
'db': col['db'],
'tenant': col['tenant'],
'type': StreamCollection.types[col['type']],
'status': 'terminated' if 'terminated' in col else 'active',
} for col in map(dict, resp.body['result'])]
elif code == 403:
raise ex.StreamPermissionError(resp, request)
raise ex.StreamConnectionError(resp, request)
return self._execute(request, response_handler)
def persistent_streams(self, local=False):
"""Get list of all streams under given fabric
:param local: Operate on a local stream instead of a global one. Default value: false
:return: List of streams under given fabric.
:rtype: json
:raise c8.exceptions.StreamListError: If retrieving streams fails.
"""
url_endpoint = '/streams/persistent?local={}'.format(local)
request = Request(
method='get',
endpoint=url_endpoint
)
def response_handler(resp):
code = resp.status_code
if resp.is_success:
# NOTE: server API returns stream name as field 'topic' - we provide both here for user convenience
return [{
'name': col['topic'],
'topic': col['topic'],
'local': col['local'],
'db': col['db'],
'tenant': col['tenant'],
'type': StreamCollection.types[col['type']],
'status': 'terminated' if 'terminated' in col else 'active',
} for col in map(dict, resp.body['result'])]
elif code == 403:
raise ex.StreamPermissionError(resp, request)
raise ex.StreamConnectionError(resp, request)
return self._execute(request, response_handler)
# def nonpersistent_streams(self, local=False):
# """Get list of all streams under given fabric
#
# :param persistent: persistent flag (if it is set to True, the API deletes persistent stream.
# If it is set to False, API deletes non-persistent stream)
# :param local: Operate on a local stream instead of a global one. Default value: false
# :return: List of streams under given fabric.
# :rtype: json
# :raise c8.exceptions.StreamListError: If retrieving streams fails.
# """
# url_endpoint = '/streams/non-persistent?local={}'.format(local)
#
# request = Request(
# method='get',
# endpoint=url_endpoint
# )
#
# def response_handler(resp):
# code = resp.status_code
# if resp.is_success:
# # NOTE: server API returns stream name as field 'topic' - we provide both here for user convenience
# return [{
# 'name': col['topic'],
# 'topic': col['topic'],
# 'local': col['local'],
# 'db': col['db'],
# 'tenant': col['tenant'],
# 'type': StreamCollection.types[col['type']],
# 'status': 'terminated' if 'terminated' in col else 'active',
# } for col in map(dict, resp.body['result'])]
#
# elif code == 403:
# raise ex.StreamPermissionError(resp, request)
# raise ex.StreamConnectionError(resp, request)
#
# return self._execute(request, response_handler)
def has_stream(self, stream):
""" Check if the list of streams has a stream with the given name.
:param stream: The name of the stream for which to check in the list of all streams.
:type stream: str | unicode
:return: True=stream found; False=stream not found.
:rtype: bool
"""
return any(mystream['name'] == stream for mystream in self.streams())
def has_persistent_stream(self, stream, local=False):
""" Check if the list of persistent streams has a stream with the given name
and local setting.
:param stream: The name of the stream for which to check in the list of persistent streams.
:type stream: str | unicode
:param local: if True, operate on a local stream instead of a global one. Default value: false
:type local: bool
:return: True=stream found; False=stream not found.
:rtype: bool
"""
return any(mystream['name'] == stream for mystream in self.persistent_streams(local))
# def has_nonpersistent_stream(self, stream, local=False):
# """ Check if the list of nonpersistent streams has a stream with the given name
# and local setting.
#
# :param stream: The name of the stream for which to check in the list of nonpersistent streams
# :type stream: str | unicode
# :param local: if True, operate on a local stream instead of a global one. Default value: false
# :type local: bool
# :return: True=stream found; False=stream not found.
# :rtype: bool
# """
# return any(mystream['name'] == stream for mystream in self.nonpersistent_streams(local))
def create_stream(self, stream, local=False):
"""
Create the stream under the given fabric
:param stream: name of stream
:param local: Operate on a local stream instead of a global one. Default value: false
:return: 200, OK if operation successful
:raise: c8.exceptions.StreamDeleteError: If creating streams fails.
"""
if self.persistent:
url_endpoint = '/streams/' + 'persistent/stream/{}?local={}'.format(stream, local)
# else:
# url_endpoint = '/streams/' + 'non-persistent/stream/{}?local={}'.format(stream, local)
request = Request(
method='post',
endpoint=url_endpoint
)
def response_handler(resp):
code = resp.status_code
if resp.is_success:
return resp.body['result']
elif code == 502:
raise ex.StreamCommunicationError(resp, request)
raise ex.StreamCreateError(resp, request)
return self._execute(request, response_handler)
def delete_stream(self, stream, force=False, local=False):
"""
Delete the streams under the given fabric
:param stream: name of stream
:param force:
:param local: Operate on a local stream instead of a global one. Default value: false
:return: 200, OK if operation successful
:raise: c8.exceptions.StreamDeleteError: If deleting streams fails.
"""
# KARTIK : 20181002 : Stream delete not supported.
# We still have some issues to work through for stream deletion on the
# pulsar side. So for v0.9.0 we only support terminate, and that too
# only for persistent streams.
if not self.persistent:
print(
"WARNING: Delete not yet implemented for nonpersistent streams. Returning 204. Stream will not be deleted.")
# 204 = No Content
return 204
# Persistent stream, let's terminate it instead.
print("WARNING: Delete not yet implemented for persistent streams, calling terminate instead.")
return self.terminate_stream(stream=stream, local=local)
######## HEY HEY DO THE ZOMBIE STOMP ########
# KARTIK : 20181002 : Stream delete not supported.
# TODO : When stream delete is implemented, enable below code and
# remove the above code.
# Below code is dead code for the moment, until delete stream is
# implemented on the server side. Consider it to be "#if 0"-ed out :-)
# (why, yes indeed, that was a C reference)
# if force and persistent:
# url_endpoint = '/streams/persistent/stream/{}?force=true&local={}'.format(stream, local)
# elif force and not persistent:
# url_endpoint = '/streams/non-persistent/stream/{}?force=true&local={}'.format(stream, local)
# elif not force and persistent:
# url_endpoint = '/streams/persistent/stream/{}?local={}'.format(stream, local)
# elif not force and not persistent:
# url_endpoint = '/streams/non-persistent/stream/{}?local={}'.format(stream, local)
#
# request = Request(
# method='delete',
# endpoint=url_endpoint
# )
#
# def response_handler(resp):
# code = resp.status_code
# if resp.is_success:
# return resp.body['result']
# elif code == 403:
# raise ex.StreamPermissionError(resp, request)
# elif code == 412:
# raise ex.StreamDeleteError(resp, request)
# raise ex.StreamConnectionError(resp, request)
#
# return self._execute(request, response_handler)
def terminate_stream(self, stream, local=False):
"""
Terminate a stream. A stream that is terminated will not accept any more messages to be published and will let consumer to drain existing messages in backlog
:param stream: name of stream
:param local: Operate on a local stream instead of a global one. Default value: false
:return: 200, OK if operation successful
:raise: c8.exceptions.StreamPermissionError: Dont have permission.
"""
if self.persistent:
url_endpoint = '/streams/persistent/stream/{}/terminate?local={}'.format(stream, local)
# else:
# # url_endpoint = '/streams/non-persistent/stream/{}/terminate?local={}'.format(stream, local)
# # KARTIK : 20181002 : terminate not supported for nonpersistent
# # streams. Just return 204 = No Content
# print("WARNING: Nonpersistent streams cannot be terminated. Returning 204.")
# return 204
request = Request(
method='post',
endpoint=url_endpoint
)
def response_handler(resp):
code = resp.status_code
if resp.is_success:
return resp.body['result']
elif code == 403:
raise ex.StreamPermissionError(resp, request)
raise ex.StreamConnectionError(resp, request)
return self._execute(request, response_handler)
class StandardFabric(Fabric):
"""Standard fabric API wrapper.
:param connection: HTTP connection.
:type connection: c8.connection.Connection
"""
def __init__(self, connection):
super(StandardFabric, self).__init__(
connection=connection,
executor=DefaultExecutor(connection)
)
def __repr__(self):
return '<StandardFabric {}>'.format(self.name)
def begin_async_execution(self, return_result=True):
"""Begin async execution.
:param return_result: If set to True, API executions return instances
of :class:`c8.job.AsyncJob`, which you can use to retrieve
results from server once available. If set to False, API executions
return None and no results are stored on server.
:type return_result: bool
:return: Fabric API wrapper built specifically for async execution.
:rtype: c8.fabric.AsyncFabric
"""
return AsyncFabric(self._conn, return_result)
def begin_batch_execution(self, return_result=True):
"""Begin batch execution.
:param return_result: If set to True, API executions return instances
of :class:`c8.job.BatchJob` that are populated with results on
commit. If set to False, API executions return None and no results
are tracked client-side.
:type return_result: bool
:return: Fabric API wrapper built specifically for batch execution.
:rtype: c8.fabric.BatchFabric
"""
return BatchFabric(self._conn, return_result)
def begin_transaction(self,
return_result=True,
timeout=None,
sync=None,
read=None,
write=None):
"""Begin transaction.
:param return_result: If set to True, API executions return instances
of :class:`c8.job.TransactionJob` that are populated with
results on commit. If set to False, API executions return None and
no results are tracked client-side.
:type return_result: bool
:param read: Names of collections read during transaction. If not
specified, they are added automatically as jobs are queued.
:type read: [str | unicode]
:param write: Names of collections written to during transaction.
If not specified, they are added automatically as jobs are queued.
:type write: [str | unicode]
:param timeout: Timeout for waiting on collection locks. If set to 0,
C8Db server waits indefinitely. If not set, system default
value is used.
:type timeout: int
:param sync: Block until the transaction is synchronized to disk.
:type sync: bool
:return: Fabric API wrapper built specifically for transactions.
:rtype: c8.fabric.TransactionFabric
"""
return TransactionFabric(
connection=self._conn,
return_result=return_result,
read=read,
write=write,
timeout=timeout,
sync=sync
)
class AsyncFabric(Fabric):
"""Fabric API wrapper tailored specifically for async execution.
See :func:`c8.fabric.StandardFabric.begin_async_execution`.
:param connection: HTTP connection.
:type connection: c8.connection.Connection
:param return_result: If set to True, API executions return instances of
:class:`c8.job.AsyncJob`, which you can use to retrieve results
from server once available. If set to False, API executions return None
and no results are stored on server.
:type return_result: bool
"""
def __init__(self, connection, return_result):
super(AsyncFabric, self).__init__(
connection=connection,
executor=AsyncExecutor(connection, return_result)
)
def __repr__(self):
return '<AsyncFabric {}>'.format(self.name)
class BatchFabric(Fabric):
"""Fabric API wrapper tailored specifically for batch execution.
See :func:`c8.fabric.StandardFabric.begin_batch_execution`.
:param connection: HTTP connection.
:type connection: c8.connection.Connection
:param return_result: If set to True, API executions return instances of
:class:`c8.job.BatchJob` that are populated with results on commit.
If set to False, API executions return None and no results are tracked
client-side.
:type return_result: bool
"""
def __init__(self, connection, return_result):
super(BatchFabric, self).__init__(
connection=connection,
executor=BatchExecutor(connection, return_result)
)
def __repr__(self):
return '<BatchFabric {}>'.format(self.name)
def __enter__(self):
return self
def __exit__(self, exception, *_):
if exception is None:
self._executor.commit()
def queued_jobs(self):
"""Return the queued batch jobs.
:return: Queued batch jobs or None if **return_result** parameter was
set to False during initialization.
:rtype: [c8.job.BatchJob] | None
"""
return self._executor.jobs
def commit(self):
"""Execute the queued requests in a single batch API request.
If **return_result** parameter was set to True during initialization,
:class:`c8.job.BatchJob` instances are populated with results.
:return: Batch jobs, or None if **return_result** parameter was set to
False during initialization.
:rtype: [c8.job.BatchJob] | None
:raise c8.exceptions.BatchStateError: If batch state is invalid
(e.g. batch was already committed or the response size did not
match expected).
:raise c8.exceptions.BatchExecuteError: If commit fails.
"""
return self._executor.commit()
class TransactionFabric(Fabric):
"""Fabric API wrapper tailored specifically for transactions.
See :func:`c8.fabric.StandardFabric.begin_transaction`.
:param connection: HTTP connection.
:type connection: c8.connection.Connection
:param return_result: If set to True, API executions return instances of
:class:`c8.job.TransactionJob` that are populated with results on
commit. If set to False, API executions return None and no results are
tracked client-side.
:type return_result: bool
:param read: Names of collections read during transaction.
:type read: [str | unicode]
:param write: Names of collections written to during transaction.
:type write: [str | unicode]
:param timeout: Timeout for waiting on collection locks. If set to 0, the
C8Db server waits indefinitely. If not set, system default value
is used.
:type timeout: int
:param sync: Block until operation is synchronized to disk.
:type sync: bool
"""
def __init__(self, connection, return_result, read, write, timeout, sync):
super(TransactionFabric, self).__init__(
connection=connection,
executor=TransactionExecutor(
connection=connection,
return_result=return_result,
read=read,
write=write,
timeout=timeout,
sync=sync
)
)
def __repr__(self):
return '<TransactionFabric {}>'.format(self.name)
def __enter__(self):
return self
def __exit__(self, exception, *_):
if exception is None:
self._executor.commit()
def queued_jobs(self):
"""Return the queued transaction jobs.
:return: Queued transaction jobs, or None if **return_result** was set
to False during initialization.
:rtype: [c8.job.TransactionJob] | None
"""
return self._executor.jobs
def commit(self):
"""Execute the queued requests in a single transaction API request.
If **return_result** parameter was set to True during initialization,
:class:`c8.job.TransactionJob` instances are populated with
results.
:return: Transaction jobs, or None if **return_result** parameter was
set to False during initialization.
:rtype: [c8.job.TransactionJob] | None
:raise c8.exceptions.TransactionStateError: If the transaction was
already committed.
:raise c8.exceptions.TransactionExecuteError: If commit fails.
"""
return self._executor.commit()
|
# Should use a balanced tree to ensure O(log(n)) insert and rank.
class BinarySearchTree(object):
def __init__(self):
self.root = None
def insert(self, x):
y = self.root
z = None
while y is not None:
y.size += 1
z = y
if x.key < y.key:
y = y.left
else:
y = y.right
x.parent = z
if z is None:
self.root = x
elif x.key < z.key:
z.left = x
else:
z.right = x
# def search(self, k):
# x = self.root
# while x is not None and x.key != k:
# if x.key < k:
# x = x.left
# else:
# x = x.right
# return x
def rank(self, k):
"""Returns the number of nodes with key <= k."""
x = self.root
left_count = 0
while x is not None:
if x.key <= k:
left_count += 1 + x.left.size if x.left is not None else 1
x = x.right
else:
x = x.left
return left_count
class Node(object):
def __init__(self, key):
self.key = key
self.left = None
self.right = None
self.parent = None
self.size = 1
|
import smbus
import os
import sys
import time
import io
from datetime import datetime, timedelta
import statistics
#Third Party Modules
import pi_servo_hat # Pan/Tilt mast controller
import picamera
tf_width = 299 #Width required for Tensorflow
tf_height = 299 #Height required for Tensorflow
tf_bw = True #Whether Tensflow wants black and white
camera = PiCamera()
servo = pi_servo_hat.PiServoHat()
'''
take_picture() captures a single black and white frame in the dimensions required for TF
arguments: outFile - defines where to store the captured image
'''
def take_picture(outFile):
with picamera() as camera:
camera.vflip = True
camera.hflip = True
camera.contrast = 15
camera.sharpness = 35
camera.saturation = 20
camera.shutter_speed = 0 #auto
camera.color_effects = (128,128) #sets camera to black and white
camera.PiResolution(width=tf_width, Height=tf_height)
camera.capture(outFile, format="jpeg")
'''
take_poicture() captures a single high-resolution image from the pi camera.
Note: this image will need to be downgrated to 299x299 and converted to black and white for TF
arguments: outFile - defines where to store the image
'''
def take_picture_hd(outFile):
with picamera() as camera:
camera.vflip = True
camera.hflip = True
# camera.iso = 400
camera.contrast = 15
camera.sharpness = 35
camera.saturation = 35
#time.sleep(2)
#camera.shutter_speed = camera.exposure_speed
camera.shutter_speed = 0 #auto
#camera.exposure_mode = 'off'
camera.capture(outFile, format="png")
'''
convert_pic_to_tf process pic to TF requirements.
arguments: inFile - name and path of input file
outFile - name and path of outfile
outWidth - width of output image (pixels)
outHeight - height of output image (pixels)
black_and_white - boolean indicating change to black and white
'''
def convert_pic_to_tf(inFile, outFile, outWidth, outHeight, black_and_white=True):
pass
'''
point_camera() uses the servos to point the camera in a particular pan and tilt.
input 1 = up/down
input 0 = left/right
'''
#sets camera servos to center
def center_camera():
servo.move_servo_position(1,0,180)
servo.move_servo_position(0,0,180)
#sets camera servos to level and left
def left_camera():
servo.move_servo_position(1,0,180)
servo.move_servo_position(0,-90,180)
#sets camera servoes to level and right
def right_camera():
servo.move_servo_position(1,0,180)
servo.move_servo_position(0,90,180)
#sets camera servos to up and center
def up_camera():
servo.move_servo_position(1,90,180)
servo.move_servo_position(0,0,180)
def main():
center_camera()
PicCount = 1 #keeps track of picture count
for Look_left():
left_camera()
picFile = "rocko%0.3d.png" % (PicCount)
time.sleep(1)
take_picture(PicFile)
PicCount += 1
for Look_right():
right_camera()
picFile = "rocko%0.3d.png" % (PicCount)
time.sleep(1)
take_picture(PicFile)
PicCount += 1
center_camera()
|
# Stacker model (Lasso + Ridge + XGB + KNN) using Linear Regression
import numpy as np
import pandas as pd
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import cross_val_score
from sklearn.ensemble import RandomForestRegressor
# Function to load in all models.
def load_model(model_name, n_folds=10):
''' Input the model name to be loaded, and n_folds used.
Returns the model that is aggregated from weights predicted from CV sets. '''
train = []
for i in range(n_folds):
train.append(np.loadtxt('../model_4/{}_pred_fold_{}.txt'.format(model_name, i)))
return train
if __name__ == '__main__':
train = pd.read_csv('../train.csv')
test = pd.read_csv('../test.csv')
y_train = np.log1p(train.SalePrice)
outliers_id = np.array([523, 1298])
y_train = y_train.drop(outliers_id)
### Aggregate weights to be passed into layer 1 model
# 1. This is Lasso predicted weights from Kfold training set
train_lasso = load_model('ls', 10)
train_lasso_folds = np.hstack((train_lasso[0], train_lasso[1], train_lasso[2], train_lasso[3], \
train_lasso[4], train_lasso[5], train_lasso[6], \
train_lasso[7], train_lasso[8], train_lasso[9]))
print "\nChecking Lasso trainin set size: ", train_lasso_folds.shape
rmse_check_1 = np.sqrt(mean_squared_error(np.log(train_lasso_folds), y_train))
print "Lasso RMSE: ", rmse_check_1
# 2. This is Ridge predicted weights from Kfold training set
train_ridge = load_model('ridge', 10)
train_ridge_folds = np.hstack((train_ridge[0], train_ridge[1], train_ridge[2], train_ridge[3], \
train_ridge[4], train_ridge[5], train_ridge[6], \
train_ridge[7], train_ridge[8], train_ridge[9]))
print "\nChecking Ridge trainin set size: ", train_ridge_folds.shape
rmse_check_2 = np.sqrt(mean_squared_error(np.log(train_ridge_folds), y_train))
print "Ridge RMSE: ", rmse_check_2
# 3. This is xgb predicted weights from Kfold training set
train_xgb = load_model('xgb', 10)
train_xgb_folds = np.hstack((train_xgb[0], train_xgb[1], train_xgb[2], train_xgb[3], \
train_xgb[4], train_xgb[5], train_xgb[6], \
train_xgb[7], train_xgb[8], train_xgb[9]))
print "\nChecking XGB training set size: ", train_xgb_folds.shape
rmse_check_3 = np.sqrt(mean_squared_error(np.log(train_xgb_folds), y_train))
print "XGB RMSE: ", rmse_check_3
# 4. This is knn predicted weights from Kfold training set
train_knn = load_model('knn', 10)
train_knn_folds = np.hstack((train_knn[0], train_knn[1], train_knn[2], train_knn[3], \
train_knn[4], train_knn[5], train_knn[6], \
train_knn[7], train_knn[8], train_knn[9]))
print "\nChecking knn training set size: ", train_knn_folds.shape
rmse_check_4 = np.sqrt(mean_squared_error(np.log(train_knn_folds), y_train))
print "knn RMSE: ", rmse_check_4
# 5. This is rf predicted weights from Kfold training set
train_rf = load_model('rf', 10)
train_rf_folds = np.hstack((train_rf[0], train_rf[1], train_rf[2], train_rf[3], \
train_rf[4], train_rf[5], train_rf[6], \
train_rf[7], train_rf[8], train_rf[9]))
print "\nChecking rf training set size: ", train_rf_folds.shape
rmse_check_5 = np.sqrt(mean_squared_error(np.log(train_rf_folds), y_train))
print "rf RMSE: ", rmse_check_5
### Read in the prediction on test set. This will be test_x (test set features)
xgb_pred = np.loadtxt('../model_4_test_set/xgb_test_pred.txt')
lasso_pred = np.loadtxt('../model_4_test_set/lasso_test_pred.txt')
ridge_pred = np.loadtxt('../model_4_test_set/ridge_test_pred.txt')
rf_pred = np.loadtxt('../model_4_test_set/rf_test_pred.txt')
knn_pred = np.loadtxt('../model_4_test_set/knn_test_pred.txt')
# Resize the ridge and knn prediction so they can fit into the stacker.
print '\n', type(xgb_pred), lasso_pred.shape, ridge_pred.shape, knn_pred.shape, rf_pred.shape
# Stacking starts here.
layer_1_train_x = np.vstack((train_xgb_folds, train_lasso_folds, train_ridge_folds, train_rf_folds, train_knn_folds)).T
layer_1_test_x = np.vstack((xgb_pred, lasso_pred, ridge_pred, rf_pred, knn_pred)).T
### Use Random Forest to do the stacking
rf_stack = RandomForestRegressor()
rf_stack_rmse = np.sqrt(-cross_val_score(rf_stack, np.log(layer_1_train_x), y_train, cv=5, scoring='neg_mean_squared_error'))
print "\nRF Stacker CV RMSE: ", (rf_stack_rmse.mean())
rf_stack.fit(np.log(layer_1_train_x), y_train)
rf_stack_final_pred = rf_stack.predict(layer_1_test_x)
df_rf_stack_final_pred = pd.DataFrame(np.exp(rf_stack_final_pred), index=test["Id"], columns=["SalePrice"])
df_rf_stack_final_pred.to_csv('submission13_rf_stack.csv', header=True, index_label='Id')
print "\n", df_rf_stack_final_pred.head()
### User Linear Regression to do the stacking
lr = LinearRegression()
lr_rmse = np.sqrt(-cross_val_score(lr, np.log(layer_1_train_x), y_train, cv=5, scoring='neg_mean_squared_error'))
print "\nLinear Stacker CV RMSE: ", (lr_rmse.mean())
lr.fit(np.log(layer_1_train_x), y_train)
final_pred = lr.predict(layer_1_test_x)
df_final_pred = pd.DataFrame(np.exp(final_pred), index=test["Id"], columns=["SalePrice"])
df_final_pred.to_csv('submission12_linear_stack.csv', header=True, index_label='Id')
print "\n", df_final_pred.head()
###################################################################################################################
# Stacker training features: predicted weights from layer_2 models (xgbost, lasso, ...) Kfold training set
# Stacker training target: real training set target
# Stacker testing features: predicted weights from layer_2 models test set
# Stacker testing target: real testing set target (which is on Kaggle, we don't have it) |
import imutils
import threading
import os
import glob
import numpy as np
import cv2
import csv
import time
import sys
from color_markers import *
sys.stdout = open('temp_log', 'w')
width = 5
img = np.zeros((500,500),dtype=np.uint8)
img_with_circles = np.zeros((500,500),dtype=np.uint8)
img_thick = np.zeros((500,500),dtype=np.uint8)
first_warped_frame = np.zeros((500,500),dtype=np.uint8)
list_circles = []
list_export = []
features_result=[]
name_akaze = "perfect_trajectory_akaze.png"
name_circle = "perfect_trajectory_circle.png"
name_thick = "perfect_trajectory_thick.png"
csv_file_name = "results_perfect.csv"
result_dic = []
circle_step = 25;
list_white = []
img_plot = []
adj_x = 0 #this variable is used to adjust the offset while evaluating the trajectory
adj_y = 0 #this variable is used to adjust the offset while evaluating the trajectory
flag_first = True
blue = (255,0,0)
def evaluation():
# load the image and convert it to grayscale
im1 = cv2.imread("perfect_trajectory_akaze.png")
im2 = cv2.imread("perfect_trajectory_akaze.png")
# load the image and convert it to grayscale
gray1 = cv2.cvtColor(im1, cv2.COLOR_BGR2GRAY)
gray2 = cv2.cvtColor(im2, cv2.COLOR_BGR2GRAY)
# initialize the AKAZE descriptor, then detect keypoints and extract
# local invariant descriptors from the image
sift = cv2.xfeatures2d.SIFT_create()
surf = cv2.xfeatures2d.SURF_create()
akaze = cv2.AKAZE_create()
brisk = cv2.BRISK_create()
orb = cv2.ORB_create()
(akazekps1, akazedescs1) = akaze.detectAndCompute(gray1, None)
(akazekps2, akazedescs2) = akaze.detectAndCompute(gray2, None)
(siftkps1, siftdescs1) = sift.detectAndCompute(gray1, None)
(siftkps2, siftdescs2) = sift.detectAndCompute(gray2, None)
(surfkps1, surfdescs1) = surf.detectAndCompute(gray1, None)
(surfkps2, surfdescs2) = surf.detectAndCompute(gray2, None)
(briskkps1, briskdescs1) = brisk.detectAndCompute(gray1, None)
(briskkps2, briskdescs2) = brisk.detectAndCompute(gray2, None)
(orbkps1, orbdescs1) = orb.detectAndCompute(gray1, None)
(orbkps2, orbdescs2) = orb.detectAndCompute(gray2, None)
#print("No of KeyPoints:")
#print("akazekeypoints1: {}, akazedescriptors1: {}".format(len(akazekps1), akazedescs1.shape))
#print("akazekeypoints2: {}, akazedescriptors2: {}".format(len(akazekps2), akazedescs2.shape))
#print("siftkeypoints1: {}, siftdescriptors1: {}".format(len(siftkps1), siftdescs1.shape))
#print("siftkeypoints2: {}, siftdescriptors2: {}".format(len(siftkps2), siftdescs2.shape))
#print("surfkeypoints1: {}, surfdescriptors1: {}".format(len(surfkps1), surfdescs1.shape))
#print("surfkeypoints2: {}, surfdescriptors2: {}".format(len(surfkps2), surfdescs2.shape))
#print("briskkeypoints1: {}, briskdescriptors1: {}".format(len(briskkps1), briskdescs1.shape))
#print("briskkeypoints2: {}, briskdescriptors2: {}".format(len(briskkps2), briskdescs2.shape))
#print("orbkeypoints1: {}, orbdescriptors1: {}".format(len(orbkps1), orbdescs1.shape))
#print("orbkeypoints2: {}, orbdescriptors2: {}".format(len(orbkps2), orbdescs2.shape))
# Match the features
bfakaze = cv2.BFMatcher(cv2.NORM_HAMMING)
bf = cv2.BFMatcher(cv2.NORM_L2)
akazematches = bfakaze.knnMatch(akazedescs1, akazedescs2, k=2)
siftmatches = bf.knnMatch(siftdescs1, siftdescs2, k=2)
surfmatches = bf.knnMatch(surfdescs1, surfdescs2, k=2)
briskmatches = bf.knnMatch(briskdescs1, briskdescs2, k=2)
orbmatches = bf.knnMatch(orbdescs1, orbdescs2, k=2)
# Apply ratio test on AKAZE matches
goodakaze = []
for m, n in akazematches:
if m.distance < 0.9 * n.distance:
goodakaze.append([m])
im3akaze = cv2.drawMatchesKnn(im1, akazekps1, im2, akazekps2, goodakaze[:], None, flags=2)
#cv2.imshow("AKAZE matching", im3akaze)
goodakaze = np.asarray(goodakaze)
features_result.append(goodakaze.shape[0])
# Apply ratio test on SIFT matches
goodsift = []
for m, n in siftmatches:
if m.distance < 0.9 * n.distance:
goodsift.append([m])
im3sift = cv2.drawMatchesKnn(im1, siftkps1, im2, siftkps2, goodsift[:], None, flags=2)
#cv2.imshow("SIFT matching", im3sift)
goodsift = np.asarray(goodsift)
features_result.append(goodsift.shape[0])
# Apply ratio test on SURF matches
goodsurf = []
for m, n in surfmatches:
if m.distance < 0.9 * n.distance:
goodsurf.append([m])
im3surf = cv2.drawMatchesKnn(im1, surfkps1, im2, surfkps2, goodsurf[:], None, flags=2)
#cv2.imshow("SURF matching", im3surf)
goodsurf = np.asarray(goodsurf)
features_result.append(goodsurf.shape[0])
# Apply ratio test on ORB matches
goodorb = []
for m, n in orbmatches:
if m.distance < 0.9 * n.distance:
goodorb.append([m])
im3orb = cv2.drawMatchesKnn(im1, orbkps1, im2, orbkps2, goodorb[:], None, flags=2)
#cv2.imshow("ORB matching", im3orb)
goodorb = np.asarray(goodorb)
features_result.append(goodorb.shape[0])
# Apply ratio test on BRISK matches
goodbrisk = []
for m, n in briskmatches:
if m.distance < 0.9 * n.distance:
goodbrisk.append([m])
im3brisk = cv2.drawMatchesKnn(im1, briskkps1, im2, briskkps2, goodbrisk[:], None, flags=2)
#cv2.imshow("BRISK matching", im3brisk)
goodbrisk = np.asarray(goodbrisk)
features_result.append(goodbrisk.shape[0])
return features_result
def warping(image,contours):
#print("I am in warping")
x1 = contours[0][0][0]
y1 = contours[0][0][1]
x2 = contours[1][0][0]
y2 = contours[1][0][1]
x3 = contours[2][0][0]
y3 = contours[2][0][1]
x4 = contours[3][0][0]
y4 = contours[3][0][1]
s1 = x1 + y1
s2 = x2 + y2
s3 = x3 + y3
s4 = x4 + y4
t = max(s1, s2, s3, s4)
if t == s1:
x2_main = x1
y2_main = y1
x1 = 0
y1 = 0
elif t == s2:
x2_main = x2
y2_main = y2
x2 = 0
y2 = 0
elif t == s3:
x2_main = x3
y2_main = y3
x3 = 0
y3 = 0
else:
x2_main = x4
y2_main = y4
x4 = 0
y4 = 0
# print(x2_main, y2_main)
t = min(s1, s2, s3, s4)
if t == s1:
x4_main = x1
y4_main = y1
x1 = 0
y1 = 0
elif t == s2:
x4_main = x2
y4_main = y2
x2 = 0
y2 = 0
elif t == s3:
x4_main = x3
y4_main = y3
x3 = 0
y3 = 0
else:
x4_main = x4
y4_main = y4
x4 = 0
y4 = 0
# print(x4_main, y4_main)
t = max(x1, x2, x3, x4)
x3_main = t
index_min = np.argmax([x1, x2, x3, x4])
if index_min == 0:
x1 = 0
elif index_min == 1:
x2 = 0
elif index_min == 2:
x3 = 0
else:
x4 = 0
t = max(x1, x2, x3, x4)
x1_main = t
t = max(y1, y2, y3, y4)
y1_main = t
index_min = np.argmax([y1, y2, y3, y4])
if index_min == 0:
y1 = 0
elif index_min == 1:
y2 = 0
elif index_min == 2:
y3 = 0
else:
y4 = 0
t = max(y1, y2, y3, y4)
y3_main = t
mask = np.zeros(image.shape, dtype=np.uint8)
roi_corners = np.array([[(x1_main, y1_main), (x2_main, y2_main), (x3_main, y3_main), (x4_main, y4_main)]], dtype=np.int32)
channel_count = image.shape[2]
ignore_mask_color = (255,) * channel_count
cv2.fillPoly(mask, roi_corners, ignore_mask_color)
masked_image = cv2.bitwise_and(image, mask)
pts1 = np.float32([(x3_main, y3_main), (x2_main, y2_main), (x4_main, y4_main), (x1_main, y1_main)])
pts2 = np.float32([[0, 0], [500, 0], [0, 500], [500, 500]])
M = cv2.getPerspectiveTransform(pts1, pts2)
dst = cv2.warpPerspective(masked_image, M, (500, 500))
cv2.imshow("Warped", dst)
cv2.waitKey(1)
return dst
def filter_top_of_robot(isStandard, frame, x_first = 0, y_first = 0):
#print("I am in filter_top_of_robot")
global img_plot, flag_first, adj_x, adj_y, list_white, img
if not isStandard:
img_plot = cv2.imread(name_thick)
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
lower_red = np.array([140, 150, 150])
upper_red = np.array([170, 255, 255])
mask = cv2.inRange(hsv, lower_red, upper_red)
res = cv2.bitwise_and(frame, frame, mask=mask)
#cv2.imshow('Mask',mask)
#cv2.imshow("Result",res)
#cv2.imshow('Frame',frame)
#print("Thick plot shape:", img_thick.shape)
#cv2.waitKey()
gray = cv2.cvtColor(res, cv2.COLOR_HSV2BGR)
gray = cv2.cvtColor(gray, cv2.COLOR_BGR2GRAY)
gray = cv2.bilateralFilter(gray, 11, 17, 17)
edged = cv2.Canny(gray, 30, 200)
(_, contours, _) = cv2.findContours(edged.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
contours = sorted(contours, key=cv2.contourArea, reverse=True)[:10]
if contours.__len__() != 0:
cnt = contours[0]
(x, y), radius = cv2.minEnclosingCircle(cnt) # Making Minimum Enclosing Circle around the contour to get the coordinates of the centre
center = (int(x), int(y))
# coordinates.append([x,y])
radius = int(radius)
cv2.circle(res, center, radius, (0, 255, 0), 2)
#cv2.imshow("Res", res)
#cv2.waitKey(1)
#print(center, radius)
if (3.14) * (radius * radius) < 400: # This will filter out small contours which are found
# print("small circle")
x = 0
y = 0
if flag_first==True and (x,y)!=(0,0): # This if will be executed if it is the first pixel in the trajectory
adj_x = x_first - x
adj_y = y_first - y
# print(adj_x,adj_y)
flag_first = False
if int(x) != 0 and int(y) != 0:
x = x + adj_x
y = y + adj_y
#print("Image plot: ", img_plot[int(x),int(y),0]==255, center)
if img_plot[int(x),int(y),0]==255: # Check if the pixel is plotted on White Foreground or Black Background
list_white.append(1)
else:
list_white.append(0)
cv2.line(img, (int(x), int(y)), (int(x), int(y)), (255, 255, 255), 1)
else:
cv2.line(img, (int(x), int(y)), (int(x), int(y)), (255, 255, 255), 20)
'''
else:
#cv2.imshow('Frame',frame)
#print("Thick plot shape:", img_thick.shape)
#cv2.waitKey(100)
#print("no contour found bro")
cv2.imshow('Original',frame)
cv2.imshow("Perfect Trajectory",img)
cv2.imshow('Thick Trajectory ',img_thick)
cv2.waitKey(1)
'''
def deleteframes(isStandard, file_name, contours, team_id = 0, x_first = 0, y_first = 0):
global list_white, img_plot, adj_y,adj_x, flag_first, list_circles, img
isFirstFrameCaptured = False
cap = cv2.VideoCapture(file_name)
while(cap.isOpened()):
#print("I am in deleteframes")
ret, image = cap.read()
if ret == False:
break
elif ret == True:
warped_frame = warping(image,contours)
filter_top_of_robot(isStandard, warped_frame, x_first, y_first)
#cv2.imshow("Original", image)
#cv2.waitKey(1)
cap.release()
cv2.destroyAllWindows()
#print("line 374", list_white.__len__())
plot_path = "team_id_" + str(team_id) + ".png"
cv2.imwrite(plot_path, img)
listlen = list_white.__len__()
list_ones = list_white.count(1)
followaccuracy = (list_ones/listlen)*100
#print("ACUURACY MEASURE",followaccuracy)
#print("inside ps line 379:", list_ones, listlen)
result = {'Team_ID': team_id, 'Plot Path':plot_path , "Follow Accuracy":followaccuracy}
list_white = []
img = np.zeros((500,500),dtype=np.uint8)
adj_x = 0 #this variable is used to adjust the offset while evaluating the trajectory
adj_y = 0 #this variable is used to adjust the offset while evaluating the trajectory
flag_first = True
return result
'''
if not isStandard:
list_step = int(list_circles.__len__()/circle_step)
print("List step = " + str(list_step))
print("list circle len = " + str(list_circles.__len__()))
print("image thick", img_thick.shape)
print("*****************")
print(list_circles)
print("*****************")
print("Lenght of circle list: ",list_circles.__len__())
img_with_circles = np.zeros((500,500),dtype=np.uint8)
list_circles = list(set(list(list_circles)))
print("Lenght of unique circle list: ",list_circles.__len__())
for i in range(0,list_circles.__len__(),200):
list_export.append(list_circles[i])
print((int(list_circles[i][0]), int(list_circles[i][1])))
cv2.circle(img_with_circles, (int(list_circles[i][0]), int(list_circles[i][1])), 3, blue, -1)
#cv2.circle(img, center, radius, color[, thickness[, lineType[, shift]]])
cv2.imwrite("first_warped_frame.png", first_warped_frame);
cv2.imwrite(name_akaze,img)
cv2.imwrite(name_circle,img_with_circles)
cv2.imwrite(name_thick,img_thick)
return (list_export)
'''
def getContours(frame):
area=(frame.shape)
frame_area =area[0]*area[1]
#print(frame_area)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
ret,thresh = cv2.threshold(gray,127,255,0)
im2, contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
#temp = cv2.drawContours(gray, contours, 2, (0,255,0), 30)
#cv2.imshow("All Counter", temp)
#cv2.waitKey(100000)
peri = cv2.arcLength(contours[2], True)
approx = cv2.approxPolyDP(contours[2], 0.01 * peri, True)
#3rd element in contours corrosponds to outter fram of arena
return approx#contours[2]
#return contours
def standard_feature(file_name):
#file_name = "output_correct_orientation.avi"#"new_video_final_9_ref.mov" #"output_correct_orientation.avi"
cap = cv2.VideoCapture(file_name) # Capture video from camera
ret, first_frame = cap.read()
#cv2.imwrite("frame0.jpg" , frame)
(coordinates) = getContours(first_frame)
#print(coordinates ,"------------------")
warped_frame = warping(first_frame, coordinates)
cv2.imwrite("warped_first.jpg", warped_frame)
(list_export) = getStandardFeatures(warped_frame)#deleteframes(isStandard = True,file_name = file_name, contours = coordinates)
features_result = evaluation()
length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
cap.set(1, length - 100)
ret, last_frame = cap.read()
cv2.imwrite("standard_last.jpg", last_frame)
#img = cv2.imread('arena_correct_placed.jpg', 1)
red_corners, green_corners, blue_corners= getColorPoints(last_frame, isCenter = False)
result = {'plot_akaze': name_akaze,'plot_circle':name_circle,'plot_thick':name_thick,\
'list_circles':list_export,'features_result':features_result,\
'red_corners': red_corners, 'green_corners' : green_corners,\
'blue_corners' : blue_corners
}
result_dic.append(result.copy())
fields = ['plot_akaze', 'plot_circle', 'plot_thick', 'list_circles',\
'features_result', 'red_corners', 'green_corners', 'blue_corners']
with open(csv_file_name, 'w') as csvfile:
# creating a csv dict writer object
writer = csv.DictWriter(csvfile, fieldnames=fields)
# writing headers (field names)
writer.writeheader()
# writing data rows
writer.writerows(result_dic)
fields = ['Team_ID', 'Plot Path', "Follow Accuracy", 'Red Matches','Green Matches', 'Blue Matches', 'Feature Matching', 'HOG', "Points Travelled"]
with open("Results/Results.csv", 'w') as csvfile:
# creating a csv dict writer object
writer = csv.DictWriter(csvfile, fieldnames=fields)
# writing headers (field names)
writer.writeheader()
# writing data rows
print("Compleated!!!")
# Our operations on the frame come here
# Display the resulting frame
#cv2.imshow('frame',gray)
#cv2.waitKey(5000)
return csv_file_name
|
import numpy as np
import pandas as pd
import random
import os
import argparse
import sys
import time
from datetime import date
###################################################
###################### ARGS #######################
###################################################
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--path',
help='Filename of the input dataset.',
required=True)
parser.add_argument('--model',
help='Model to run.',
choices=['REBUS', 'REBUS_reg', 'REBUS_simple', 'REBUS_ST', 'REBUS_ST_simple', 'REBUS_LT'],
required=True)
parser.add_argument('--damping_fsub',
help='Type of damping factor use for REBUS',
choices=['linear_softmax', 'linear'],
default="linear_softmax")
parser.add_argument('--max_iters',
help='Max number of iterations to run',
default=10000,
type=int)
parser.add_argument('--quit_delta',
help='Number of iterations at which to quit if no improvement.',
default=250,
type=int)
parser.add_argument('--eval_freq',
help='Frequency at which to evaluate model.',
default=25,
type=int)
parser.add_argument('--item_per_user',
help='Number of items test during validation .',
default=100,
type=int)
parser.add_argument('--learning_rate',
help='Initial learning rate.',
default=0.001,
type=float)
parser.add_argument('--num_dims',
help='Model dimensionality.',
default=10,
type=int)
parser.add_argument('--mini_batch_size',
help='Size of the mini Batch.',
default=128,
type=int)
parser.add_argument('--max_lens',
help='maximun lenght for long term history',
default=100,
type=int)
parser.add_argument('--user_min',
help='Number of minimal actions for a user',
default=5,
type=int)
parser.add_argument('--item_min',
help='Number of minimal interaction for a item',
default=5,
type=int)
parser.add_argument('--min_count',
help='Minimun times that a sequence appears in all users history',
default=1,
type=int)
parser.add_argument('--L',
help='Maximun size of a sequence',
default=1,
type=int)
parser.add_argument('--emb_reg',
help='L2 regularization: embbeding regularization.',
default=0.001,
type=float)
parser.add_argument('--bias_reg',
help='L2 regularization: Bias regularization.',
default=0.001,
type=float)
parser.add_argument('--alpha',
help='Alpha for long term.',
default=-1.0,
type=float)
parser.add_argument('--gamma',
help='Gamma to unified the short term and the long term. 0 equal to have only short term, 1 equal to have only long term',
default=0.5,
type=float)
parser.add_argument('--seed',
help='Seed',
default=1,
type=int)
parser.add_argument('--prediction_name_file',
help='Prediction\'s name file',
default=None)
parser.add_argument('--prediction_TopN',
help='TopN recommandation to keep per users',
default=25,
type=int)
parser.add_argument('--evaluation_name_file',
help='Evaluation\'s name file',
default=None)
parser.add_argument('--search_hyperparameters_name_file',
help='Name of the search hyperparameters file (i.e. All evaluation of a grid serach)',
default=None)
parser.add_argument('--production', type=str2bool, nargs='?', const=True,
help='Args to make prediction in all data available (Mode for production use cases not experimental study) ',
default=False)
parser.add_argument('--cold_start_user', type=str2bool, nargs='?', const=True,
help='Args to make prediction in cold start data ',
default=False)
args = parser.parse_args()
print(args)
print('')
return(args)
###################################################
###### EVALUATIONS & PREDICTIONS FUNCTIONS ########
###################################################
HEADER_EVALUATION = ['model', 'dataset', 'date',
'nUsers_init', 'nUsers', 'nUsers_invalid', 'nItems_init', 'nItems', 'nClicks_init', 'nClicks', 'num_pos_events',
'damping_fsub', 'max_iters', 'quit_delta', 'eval_freq', 'item_per_user',
'learning_rate', 'num_dims', 'mini_batch_size', 'max_lens', 'user_min', 'item_min',
'min_count', 'L', 'emb_reg', 'bias_reg', 'alpha', 'gamma',
'timeToTrain', 'timeToEval', 'bestIte',
'best_AUC_val', 'best_AUC_test', 'AUC_val', 'AUC_test',
'HIT5_val', 'HIT5_test', 'HIT10_val', 'HIT10_test', 'HIT25_val', 'HIT25_test', 'HIT50_val', 'HIT50_test',
'NDCG5_val', 'NDCG5_test', 'NDCG10_val', 'NDCG10_test', 'NDCG25_val', 'NDCG25_test', 'NDCG50_val', 'NDCG50_test',
'MRR_val', 'MRR_test']
HEADER_PREDICTION = ['model', 'dataset', 'date',
'user', 'userID', 'item', 'itemID',
'ranking', 'prediction']
def save_results(model, results_model):
if model.args.evaluation_name_file is None:
results_name_file = (model.args.model + "_" + model.dataset.data_name +
"_fsubT_nextItem1_batch_" + str(model.args.mini_batch_size) +
"_userMin_" + str(model.args.user_min) + "_itemMin_" + str(model.args.item_min) +
"_dims_" + str(model.args.num_dims) + "_max_lens_" + str(model.args.max_lens) +
"_" + model.args.damping_fsub + ".csv")
else:
results_name_file = model.args.evaluation_name_file
results_path = os.path.join("02-Resultats", "Evaluations", results_name_file)
print("Creation of \"" + results_path + "\"")
df_results_model = pd.DataFrame(columns=HEADER_EVALUATION)
df_results_model.loc[0] = [model.args.model, model.dataset.data_name, date.today().strftime("%d/%m/%Y"),
model.dataset.nb_users_init, model.dataset.nb_users, model.dataset.nb_users_invalid, model.dataset.nb_items_init, model.dataset.nb_items, model.dataset.nb_events_init, model.dataset.nb_events, model.dataset.nb_train_events,
model.args.damping_fsub, model.args.max_iters, model.args.quit_delta, model.args.eval_freq, model.args.item_per_user,
model.args.learning_rate, model.args.num_dims, model.args.mini_batch_size, model.args.max_lens, model.args.user_min, model.args.item_min,
model.args.min_count, model.args.L, model.args.emb_reg, model.args.bias_reg, model.args.alpha, model.args.gamma,
results_model["time_to_train"], results_model["time_to_eval"], results_model["best_epoch"],
results_model["best_val_auc"], results_model["best_test_auc"], results_model["valid_metrics"]['AUC'], results_model["test_metrics"]['AUC'],
results_model["valid_metrics"]['HIT_5'], results_model["test_metrics"]['HIT_5'], results_model["valid_metrics"]['HIT_10'], results_model["test_metrics"]['HIT_10'],
results_model["valid_metrics"]['HIT_25'], results_model["test_metrics"]['HIT_25'], results_model["valid_metrics"]['HIT_50'], results_model["test_metrics"]['HIT_50'],
results_model["valid_metrics"]['NDCG_5'], results_model["test_metrics"]['NDCG_5'], results_model["valid_metrics"]['NDCG_10'], results_model["test_metrics"]['NDCG_10'],
results_model["valid_metrics"]['NDCG_25'], results_model["test_metrics"]['NDCG_25'], results_model["valid_metrics"]['NDCG_50'], results_model["test_metrics"]['NDCG_50'],
results_model["valid_metrics"]['MRR'], results_model["test_metrics"]['MRR']]
df_results_model.to_csv(results_path, encoding='utf-8', index=False)
if model.args.prediction_name_file is None:
prediction_name_file = (model.args.model + "_" + model.dataset.data_name +
"_fsubT_nextItem1_batch_" + str(model.args.mini_batch_size) +
"_userMin_" + str(model.args.user_min) + "_itemMin_" + str(model.args.item_min) +
"_dims_" + str(model.args.num_dims) + "_max_lens_" + str(model.args.max_lens) +
"_" + model.args.damping_fsub + ".csv")
else:
prediction_name_file = model.args.prediction_name_file
prediction_path = os.path.join("02-Resultats", "Predictions", prediction_name_file)
print("Creation of \"" + prediction_path + "\"")
if len(results_model["test_metrics"]['topK_predictions_users']) != len(results_model["test_metrics"]['topK_predictions_score']):
print("Error : In lenght for prediction")
exit()
if len(results_model["test_metrics"]['topK_predictions_users']) != len(results_model["test_metrics"]['topK_predictions_items']):
print("Error : In lenght for prediction")
exit()
list_prediction_model = []
for u in range(len(results_model["test_metrics"]['topK_predictions_users'])):
for k in range(model.dataset.args.prediction_TopN):
if model.dataset.args.production: # Take dict prodution else take dick train
list_prediction_model .append([model.dataset.args.model, model.dataset.data_name, date.today().strftime("%d/%m/%Y"),
results_model["test_metrics"]['topK_predictions_users'][u], model.dataset.dict_user_id_to_name_prod[results_model["test_metrics"]['topK_predictions_users'][u]],
results_model["test_metrics"]['topK_predictions_items'][u][k], model.dataset.dict_item_id_to_name[results_model["test_metrics"]['topK_predictions_items'][u][k]],
k, results_model["test_metrics"]['topK_predictions_score'][u][k]
])
else:
list_prediction_model .append([model.dataset.args.model, model.dataset.data_name, date.today().strftime("%d/%m/%Y"),
results_model["test_metrics"]['topK_predictions_users'][u], model.dataset.dict_user_id_to_name[results_model["test_metrics"]['topK_predictions_users'][u]],
results_model["test_metrics"]['topK_predictions_items'][u][k], model.dataset.dict_item_id_to_name[results_model["test_metrics"]['topK_predictions_items'][u][k]],
k, results_model["test_metrics"]['topK_predictions_score'][u][k]
])
df_prediction_model = pd.DataFrame(list_prediction_model)
df_prediction_model.columns = HEADER_PREDICTION
df_prediction_model.head(5)
df_prediction_model.to_csv(prediction_path, encoding='utf-8', index=False)
def save_results_cold_start(model, results_model):
if model.args.evaluation_name_file is None:
results_name_file = (model.args.model + "_" + model.dataset.data_name +
"_fsubT_nextItem1_batch_" + str(model.args.mini_batch_size) +
"_userMin_" + str(model.args.user_min) + "_itemMin_" + str(model.args.item_min) +
"_dims_" + str(model.args.num_dims) + "_max_lens_" + str(model.args.max_lens) +
"_" + model.args.damping_fsub + ".csv")
else:
results_name_file = model.args.evaluation_name_file
results_path = os.path.join("02-Resultats", "Evaluations", results_name_file)
print("Creation of \"" + results_path + "\"")
df_results_model = pd.DataFrame(columns=HEADER_EVALUATION)
df_results_model.loc[0] = [model.args.model, model.dataset.data_name, date.today().strftime("%d/%m/%Y"),
model.dataset.nb_users_init, model.dataset.nb_users, model.dataset.nb_users_invalid, model.dataset.nb_items_init, model.dataset.nb_items, model.dataset.nb_events_init, model.dataset.nb_events, model.dataset.nb_train_events,
model.args.damping_fsub, model.args.max_iters, model.args.quit_delta, model.args.eval_freq, model.args.item_per_user,
model.args.learning_rate, model.args.num_dims, model.args.mini_batch_size, model.args.max_lens, model.args.user_min, model.args.item_min,
model.args.min_count, model.args.L, model.args.emb_reg, model.args.bias_reg, model.args.alpha, model.args.gamma,
results_model["time_to_train"], results_model["time_to_eval"], results_model["best_epoch"],
results_model["best_val_auc"], results_model["best_test_auc"], results_model["valid_metrics"]['AUC'], results_model["test_metrics"]['AUC'],
results_model["valid_metrics"]['HIT_5'], results_model["test_metrics"]['HIT_5'], results_model["valid_metrics"]['HIT_10'], results_model["test_metrics"]['HIT_10'],
results_model["valid_metrics"]['HIT_25'], results_model["test_metrics"]['HIT_25'], results_model["valid_metrics"]['HIT_50'], results_model["test_metrics"]['HIT_50'],
results_model["valid_metrics"]['NDCG_5'], results_model["test_metrics"]['NDCG_5'], results_model["valid_metrics"]['NDCG_10'], results_model["test_metrics"]['NDCG_10'],
results_model["valid_metrics"]['NDCG_25'], results_model["test_metrics"]['NDCG_25'], results_model["valid_metrics"]['NDCG_50'], results_model["test_metrics"]['NDCG_50'],
results_model["valid_metrics"]['MRR'], results_model["test_metrics"]['MRR']]
df_results_model.to_csv(results_path, encoding='utf-8', index=False)
if model.args.prediction_name_file is None:
prediction_name_file = (model.args.model + "_" + model.dataset.data_name +
"_fsubT_nextItem1_batch_" + str(model.args.mini_batch_size) +
"_userMin_" + str(model.args.user_min) + "_itemMin_" + str(model.args.item_min) +
"_dims_" + str(model.args.num_dims) + "_max_lens_" + str(model.args.max_lens) +
"_" + model.args.damping_fsub + ".csv")
else:
prediction_name_file = model.args.prediction_name_file
prediction_path = os.path.join("02-Resultats", "Predictions", prediction_name_file)
print("Creation of \"" + prediction_path + "\"")
if len(results_model["valid_metrics"]['topK_predictions_users']) != len(results_model["valid_metrics"]['topK_predictions_score']):
print("Error : In lenght for prediction")
exit()
if len(results_model["valid_metrics"]['topK_predictions_users']) != len(results_model["valid_metrics"]['topK_predictions_items']):
print("Error : In lenght for prediction")
exit()
model.dataset.dict_user_id_to_name_cold_start_user[0]
model.dataset.dict_user_id_to_name[0]
list_prediction_model = []
for u in range(len(results_model["valid_metrics"]['topK_predictions_users'])):
for k in range(model.dataset.args.prediction_TopN):
list_prediction_model .append([model.dataset.args.model, model.dataset.data_name, date.today().strftime("%d/%m/%Y"),
results_model["valid_metrics"]['topK_predictions_users'][u], model.dataset.dict_user_id_to_name_cold_start_user[results_model["valid_metrics"]['topK_predictions_users'][u]],
results_model["valid_metrics"]['topK_predictions_items'][u][k], model.dataset.dict_item_id_to_name[results_model["valid_metrics"]['topK_predictions_items'][u][k]],
k, results_model["valid_metrics"]['topK_predictions_score'][u][k]
])
df_prediction_model = pd.DataFrame(list_prediction_model)
df_prediction_model.columns = HEADER_PREDICTION
df_prediction_model.head(5)
df_prediction_model.to_csv(prediction_path, encoding='utf-8', index=False)
###################################################
################ EVALUATION FUNCTIONS #############
###################################################
def sample_evaluate_valid(model, d, sess):
item_to_eval = d.args.item_per_user + 1
AUC = 0.0
MRR = 0.0
NDCG_5 = 0.0
NDCG_10 = 0.0
NDCG_25 = 0.0
NDCG_50 = 0.0
HIT_5 = 0.0
HIT_10 = 0.0
HIT_25 = 0.0
HIT_50 = 0.0
valid_user = 0.0
u = -1
for u_temp in range(d.nb_users):
# u = 0
if d.valid_data[u_temp][0] == -1 or d.valid_data[u_temp][1] == -1:
# print(u)
continue
else:
u += 1
valid_user += 1.0
users_tmp = np.repeat(d.valid_user_id[u], item_to_eval)
prev_items_tmp = np.repeat(d.valid_prev_item_id[u].reshape((1, 1)), item_to_eval, axis=0)
if d.args.model in d.REBUSMODEL:
list_fsub_items_id_tmp = np.repeat(d.valid_fsub_items_id[u].reshape((1, d.args.L)), item_to_eval, axis=0)
list_fsub_items_values_tmp = np.repeat(d.valid_fsub_items_value[u].reshape((1, d.args.L)), item_to_eval, axis=0)
else:
list_fsub_items_id_tmp = None
list_fsub_items_values_tmp = None
pos_items_tmp = np.append(np.random.randint(0, d.nb_items, size=item_to_eval-1, dtype=np.int32), d.valid_pos_item_id[u])
list_prev_items_pos_tmp = np.append(np.repeat(d.valid_list_prev_items_id[u].reshape((1, d.args.max_lens)), item_to_eval-1, axis=0), d.valid_list_prev_items_id_pos[u].reshape((1, d.args.max_lens)), axis=0)
list_prev_items_pos_tmp[list_prev_items_pos_tmp == pos_items_tmp.reshape((pos_items_tmp.shape[0], 1))] = d.nb_items
predictions = model.predict(sess, users_tmp, prev_items_tmp, list_fsub_items_id_tmp, list_fsub_items_values_tmp, pos_items_tmp, list_prev_items_pos_tmp)
predictions = predictions[0].reshape(item_to_eval)
count_auc_test = np.float32(predictions[item_to_eval-1] > predictions[0:item_to_eval-1]).sum()
rank = item_to_eval - (count_auc_test + 1) # rank strat from 0 to item_to_eval-1
AUC += count_auc_test / (item_to_eval - 1) # We take off one item that corresponding to pos_item
MRR += 1.0 / (rank+1)
if rank < 5:
NDCG_5 += 1 / np.log2(rank + 2)
HIT_5 += 1
if rank < 10:
NDCG_10 += 1 / np.log2(rank + 2)
HIT_10 += 1
if rank < 25:
NDCG_25 += 1 / np.log2(rank + 2)
HIT_25 += 1
if rank < 50:
NDCG_50 += 1 / np.log2(rank + 2)
HIT_50 += 1
# if valid_user % 500 == 0:
# print("user "+str(valid_user))
AUC = AUC/valid_user
MRR = MRR/valid_user
NDCG_5 = NDCG_5/valid_user
NDCG_10 = NDCG_10/valid_user
NDCG_25 = NDCG_25/valid_user
NDCG_50 = NDCG_50/valid_user
HIT_5 = HIT_5/valid_user
HIT_10 = HIT_10/valid_user
HIT_25 = HIT_25/valid_user
HIT_50 = HIT_50/valid_user
return {
'AUC': AUC, 'MRR': MRR,
'NDCG_5': NDCG_5, 'NDCG_10': NDCG_10, 'NDCG_25': NDCG_25, 'NDCG_50': NDCG_50,
'HIT_5': HIT_5, 'HIT_10': HIT_10, 'HIT_25': HIT_25, 'HIT_50': HIT_50,
}
def sample_evaluate_valid_faster(model, d, sess):
print("*** Launch sample_evaluate_valid_faster ****")
item_to_eval = d.args.item_per_user + 1
AUC = 0.0
MRR = 0.0
NDCG_5 = 0.0
NDCG_10 = 0.0
NDCG_25 = 0.0
NDCG_50 = 0.0
HIT_5 = 0.0
HIT_10 = 0.0
HIT_25 = 0.0
HIT_50 = 0.0
valid_user = 0.0
u = -1
u_eval = -1
user_to_eval = 500
users = np.zeros((item_to_eval*user_to_eval,))
prev_items = np.zeros((item_to_eval*user_to_eval, 1))
if d.args.model in d.REBUSMODEL:
list_fsub_items_id = np.zeros((item_to_eval*user_to_eval, d.args.L))
list_fsub_items_values = np.zeros((item_to_eval*user_to_eval, d.args.L))
else:
list_fsub_items_id = None
list_fsub_items_values = None
pos_items = np.zeros((item_to_eval*user_to_eval,))
list_prev_items_pos = np.zeros((item_to_eval*user_to_eval, d.args.max_lens))
for u_temp in range(d.nb_users):
# u = 0
if d.valid_data[u_temp][0] != -1 or d.valid_data[u_temp][1] != -1:
u += 1
u_eval += 1
valid_user += 1.0
users_tmp = np.repeat(d.valid_user_id[u], item_to_eval)
prev_items_tmp = np.repeat(d.valid_prev_item_id[u].reshape((1, 1)), item_to_eval, axis=0)
if d.args.model in d.REBUSMODEL:
list_fsub_items_id_tmp = np.repeat(d.valid_fsub_items_id[u].reshape((1, d.args.L)), item_to_eval, axis=0)
list_fsub_items_values_tmp = np.repeat(d.valid_fsub_items_value[u].reshape((1, d.args.L)), item_to_eval, axis=0)
pos_items_tmp = np.append(np.random.randint(0, d.nb_items, size=item_to_eval-1, dtype=np.int32), d.valid_pos_item_id[u])
list_prev_items_pos_tmp = np.append(np.repeat(d.valid_list_prev_items_id[u].reshape((1, d.args.max_lens)), item_to_eval-1, axis=0), d.valid_list_prev_items_id_pos[u].reshape((1, d.args.max_lens)), axis=0)
list_prev_items_pos_tmp[list_prev_items_pos_tmp == pos_items_tmp.reshape((pos_items_tmp.shape[0], 1))] = d.nb_items
users[u_eval*item_to_eval:u_eval*item_to_eval+item_to_eval] = d.valid_user_id[u]
prev_items[u_eval*item_to_eval:u_eval*item_to_eval+item_to_eval] = d.valid_prev_item_id[u]
if d.args.model in d.REBUSMODEL:
list_fsub_items_id[u_eval*item_to_eval:u_eval*item_to_eval+item_to_eval] = list_fsub_items_id_tmp
list_fsub_items_values[u_eval*item_to_eval:u_eval*item_to_eval+item_to_eval] = list_fsub_items_values_tmp
pos_items[u_eval*item_to_eval:u_eval*item_to_eval+item_to_eval] = pos_items_tmp
list_prev_items_pos[u_eval*item_to_eval:u_eval*item_to_eval+item_to_eval] = list_prev_items_pos_tmp
if u_eval == user_to_eval - 1 or u_temp == d.nb_users-1:
predictions = model.predict(sess, users, prev_items, list_fsub_items_id, list_fsub_items_values, pos_items, list_prev_items_pos)
predictions = predictions[0]
if user_to_eval > d.nb_users - 1 - u_temp:
user_to_eval = d.nb_users - u_temp - 1
u_eval = -1
users = np.zeros((item_to_eval*user_to_eval,))
prev_items = np.zeros((item_to_eval*user_to_eval, 1))
if d.args.model in d.REBUSMODEL:
list_fsub_items_id = np.zeros((item_to_eval*user_to_eval, d.args.L))
list_fsub_items_values = np.zeros((item_to_eval*user_to_eval, d.args.L))
else:
list_fsub_items_id = None
list_fsub_items_values = None
pos_items = np.zeros((item_to_eval*user_to_eval,))
list_prev_items_pos = np.zeros((item_to_eval*user_to_eval, d.args.max_lens))
for uu in range(int(predictions.shape[0]/item_to_eval)):
count_auc_test = np.float32(predictions[uu*(item_to_eval)+item_to_eval-1] > predictions[uu*(item_to_eval):uu*(item_to_eval)+item_to_eval-1]).sum()
rank = item_to_eval - (count_auc_test + 1) # rank strat from 0 to item_to_eval-1
AUC += count_auc_test / (item_to_eval - 1) # We take off one item that corresponding to pos_item
MRR += 1.0 / (rank+1)
if rank < 5:
NDCG_5 += 1 / np.log2(rank + 2)
HIT_5 += 1
if rank < 10:
NDCG_10 += 1 / np.log2(rank + 2)
HIT_10 += 1
if rank < 25:
NDCG_25 += 1 / np.log2(rank + 2)
HIT_25 += 1
if rank < 50:
NDCG_50 += 1 / np.log2(rank + 2)
HIT_50 += 1
AUC = AUC/valid_user
MRR = MRR/valid_user
NDCG_5 = NDCG_5/valid_user
NDCG_10 = NDCG_10/valid_user
NDCG_25 = NDCG_25/valid_user
NDCG_50 = NDCG_50/valid_user
HIT_5 = HIT_5/valid_user
HIT_10 = HIT_10/valid_user
HIT_25 = HIT_25/valid_user
HIT_50 = HIT_50/valid_user
return {
'AUC': AUC, 'MRR': MRR,
'NDCG_5': NDCG_5, 'NDCG_10': NDCG_10, 'NDCG_25': NDCG_25, 'NDCG_50': NDCG_50,
'HIT_5': HIT_5, 'HIT_10': HIT_10, 'HIT_25': HIT_25, 'HIT_50': HIT_50,
}
def sample_evaluate_test(model, d, sess):
item_to_eval = d.args.item_per_user + 1
AUC = 0.0
MRR = 0.0
NDCG_5 = 0.0
NDCG_10 = 0.0
NDCG_25 = 0.0
NDCG_50 = 0.0
HIT_5 = 0.0
HIT_10 = 0.0
HIT_25 = 0.0
HIT_50 = 0.0
valid_user = 0.0
u = -1
for u_temp in range(d.nb_users):
if d.test_data[u_temp][0] == -1 or d.test_data[u_temp][1] == -1:
continue
else:
u += 1
valid_user += 1.0
users_tmp = np.repeat(d.test_user_id[u], item_to_eval)
prev_items_tmp = np.repeat(d.test_prev_item_id[u].reshape((1, 1)), item_to_eval, axis=0)
if d.args.model in d.REBUSMODEL:
list_fsub_items_id_tmp = np.repeat(d.test_fsub_items_id[u].reshape((1, d.args.L)), item_to_eval, axis=0)
list_fsub_items_values_tmp = np.repeat(d.test_fsub_items_value[u].reshape((1, d.args.L)), item_to_eval, axis=0)
else:
list_fsub_items_id_tmp = None
list_fsub_items_values_tmp = None
pos_items_tmp = np.append(np.random.randint(0, d.nb_items, size=item_to_eval-1, dtype=np.int32), d.test_pos_item_id[u])
list_prev_items_pos_tmp = np.append(np.repeat(d.test_list_prev_items_id[u].reshape((1, d.args.max_lens)), item_to_eval-1, axis=0), d.test_list_prev_items_id_pos[u].reshape((1, d.args.max_lens)), axis=0)
list_prev_items_pos_tmp[list_prev_items_pos_tmp == pos_items_tmp.reshape((pos_items_tmp.shape[0], 1))] = d.nb_items
predictions = model.predict(sess, users_tmp, prev_items_tmp, list_fsub_items_id_tmp, list_fsub_items_values_tmp, pos_items_tmp, list_prev_items_pos_tmp)
predictions = predictions[0].reshape(item_to_eval)
count_auc_test = np.float32(predictions[item_to_eval-1] > predictions[0:item_to_eval-1]).sum()
rank = item_to_eval - (count_auc_test + 1) # rank strat from 0 to item_to_eval-1
AUC += count_auc_test / (item_to_eval - 1) # We take off one item that corresponding to pos_item
MRR += 1.0 / (rank+1)
if rank < 5:
NDCG_5 += 1 / np.log2(rank + 2)
HIT_5 += 1
if rank < 10:
NDCG_10 += 1 / np.log2(rank + 2)
HIT_10 += 1
if rank < 25:
NDCG_25 += 1 / np.log2(rank + 2)
HIT_25 += 1
if rank < 50:
NDCG_50 += 1 / np.log2(rank + 2)
HIT_50 += 1
AUC = AUC/valid_user
MRR = MRR/valid_user
NDCG_5 = NDCG_5/valid_user
NDCG_10 = NDCG_10/valid_user
NDCG_25 = NDCG_25/valid_user
NDCG_50 = NDCG_50/valid_user
HIT_5 = HIT_5/valid_user
HIT_10 = HIT_10/valid_user
HIT_25 = HIT_25/valid_user
HIT_50 = HIT_50/valid_user
return {
'AUC': AUC, 'MRR': MRR,
'NDCG_5': NDCG_5, 'NDCG_10': NDCG_10, 'NDCG_25': NDCG_25, 'NDCG_50': NDCG_50,
'HIT_5': HIT_5, 'HIT_10': HIT_10, 'HIT_25': HIT_25, 'HIT_50': HIT_50,
}
def evaluate_valid(model, d, sess):
AUC = 0.0
MRR = 0.0
NDCG_5 = 0.0
NDCG_10 = 0.0
NDCG_25 = 0.0
NDCG_50 = 0.0
HIT_5 = 0.0
HIT_10 = 0.0
HIT_25 = 0.0
HIT_50 = 0.0
valid_user = 0.0
u = -1
for u_temp in range(d.nb_users):
if d.valid_data[u_temp][0] == -1 or d.valid_data[u_temp][1] == -1:
continue
else:
u += 1
valid_user += 1.0
valid_data_set_eval_np = np.setdiff1d(d.np_list_items, d.valid_data_set_np[u_temp]) # We exclude item already seen by user
item_to_eval = len(valid_data_set_eval_np) + 1 # All items that are not in user history except the pos_item
users_tmp = np.repeat(d.valid_user_id[u], item_to_eval)
prev_items_tmp = np.repeat(d.valid_prev_item_id[u].reshape((1, 1)), item_to_eval, axis=0)
if d.args.model in d.REBUSMODEL:
list_fsub_items_id_tmp = np.repeat(d.valid_fsub_items_id[u].reshape((1, d.args.L)), item_to_eval, axis=0)
list_fsub_items_values_tmp = np.repeat(d.valid_fsub_items_value[u].reshape((1, d.args.L)), item_to_eval, axis=0)
else:
list_fsub_items_id_tmp = None
list_fsub_items_values_tmp = None
pos_items_tmp = np.append(valid_data_set_eval_np, d.valid_pos_item_id[u])
list_prev_items_pos_tmp = np.append(np.repeat(d.valid_list_prev_items_id[u].reshape((1, d.args.max_lens)), item_to_eval-1, axis=0), d.valid_list_prev_items_id_pos[u].reshape((1, d.args.max_lens)), axis=0)
list_prev_items_pos_tmp[list_prev_items_pos_tmp == pos_items_tmp.reshape((pos_items_tmp.shape[0], 1))] = d.nb_items
predictions = model.predict(sess, users_tmp, prev_items_tmp, list_fsub_items_id_tmp, list_fsub_items_values_tmp, pos_items_tmp, list_prev_items_pos_tmp)
predictions = predictions[0].reshape(item_to_eval)
count_auc_test = np.float32(predictions[item_to_eval-1] > predictions[0:item_to_eval-1]).sum()
rank = item_to_eval - (count_auc_test + 1) # rank strat from 0 to item_to_eval-1
AUC += count_auc_test / (item_to_eval - 1) # We take off one item that corresponding to pos_item
MRR += 1.0 / (rank+1)
if rank < 5:
NDCG_5 += 1 / np.log2(rank + 2)
HIT_5 += 1
if rank < 10:
NDCG_10 += 1 / np.log2(rank + 2)
HIT_10 += 1
if rank < 25:
NDCG_25 += 1 / np.log2(rank + 2)
HIT_25 += 1
if rank < 50:
NDCG_50 += 1 / np.log2(rank + 2)
HIT_50 += 1
AUC = AUC/valid_user
MRR = MRR/valid_user
NDCG_5 = NDCG_5/valid_user
NDCG_10 = NDCG_10/valid_user
NDCG_25 = NDCG_25/valid_user
NDCG_50 = NDCG_50/valid_user
HIT_5 = HIT_5/valid_user
HIT_10 = HIT_10/valid_user
HIT_25 = HIT_25/valid_user
HIT_50 = HIT_50/valid_user
# print('--- Validation Evaluation --- (AUC = {}), (NDCG_10 = {}), (HIT_10 = {})'.format(AUC, NDCG_10, HIT_10))
return {
'AUC': AUC, 'MRR': MRR,
'NDCG_5': NDCG_5, 'NDCG_10': NDCG_10, 'NDCG_25': NDCG_25, 'NDCG_50': NDCG_50,
'HIT_5': HIT_5, 'HIT_10': HIT_10, 'HIT_25': HIT_25, 'HIT_50': HIT_50,
'valid_user': valid_user
}
def evaluate_test(model, d, sess):
AUC = 0.0
MRR = 0.0
NDCG_5 = 0.0
NDCG_10 = 0.0
NDCG_25 = 0.0
NDCG_50 = 0.0
HIT_5 = 0.0
HIT_10 = 0.0
HIT_25 = 0.0
HIT_50 = 0.0
valid_user = 0.0
topK_predictions_score = []
topK_predictions_items = []
topK_predictions_users = []
u = -1
for u_temp in range(d.nb_users):
# u = 0
if d.test_data[u_temp][0] == -1 or d.test_data[u_temp][1] == -1:
# print(u)
continue
else:
u += 1
valid_user += 1.0
test_data_set_eval_np = np.setdiff1d(d.np_list_items, d.test_data_set_np[u_temp]) # We exclude item already seen by user
item_to_eval = len(test_data_set_eval_np) + 1 # All items that are not in user history except the pos_item
users_tmp = np.repeat(d.test_user_id[u], item_to_eval)
prev_items_tmp = np.repeat(d.test_prev_item_id[u].reshape((1, 1)), item_to_eval, axis=0)
if d.args.model in d.REBUSMODEL:
list_fsub_items_id_tmp = np.repeat(d.test_fsub_items_id[u].reshape((1, d.args.L)), item_to_eval, axis=0)
list_fsub_items_values_tmp = np.repeat(d.test_fsub_items_value[u].reshape((1, d.args.L)), item_to_eval, axis=0)
else:
list_fsub_items_id_tmp = None
list_fsub_items_values_tmp = None
pos_items_tmp = np.append(test_data_set_eval_np, d.test_pos_item_id[u])
list_prev_items_pos_tmp = np.append(np.repeat(d.test_list_prev_items_id[u].reshape((1, d.args.max_lens)), item_to_eval-1, axis=0), d.test_list_prev_items_id_pos[u].reshape((1, d.args.max_lens)), axis=0)
list_prev_items_pos_tmp[list_prev_items_pos_tmp == pos_items_tmp.reshape((pos_items_tmp.shape[0], 1))] = d.nb_items
predictions = model.predict(sess, users_tmp, prev_items_tmp, list_fsub_items_id_tmp, list_fsub_items_values_tmp, pos_items_tmp, list_prev_items_pos_tmp)
predictions = predictions[0].reshape(item_to_eval)
# Evaluation
count_auc_test = np.float32(predictions[item_to_eval-1] > predictions[0:item_to_eval-1]).sum()
rank = item_to_eval - (count_auc_test + 1) # rank strat from 0 to item_to_eval-1
AUC += count_auc_test / (item_to_eval - 1) # We take off one item that corresponding to pos_item
MRR += 1.0 / (rank+1)
if rank < 5:
NDCG_5 += 1 / np.log2(rank + 2)
HIT_5 += 1
if rank < 10:
NDCG_10 += 1 / np.log2(rank + 2)
HIT_10 += 1
if rank < 25:
NDCG_25 += 1 / np.log2(rank + 2)
HIT_25 += 1
if rank < 50:
NDCG_50 += 1 / np.log2(rank + 2)
HIT_50 += 1
# Prediction
if (not d.grid_search) and (not d.args.production):
sort_index = np.argsort(-predictions)
topK_predictions_score.append(predictions[sort_index][0:d.args.prediction_TopN])
topK_predictions_items.append(pos_items_tmp[sort_index][0:d.args.prediction_TopN])
topK_predictions_users.append(u_temp)
AUC = AUC/valid_user
MRR = MRR/valid_user
NDCG_5 = NDCG_5/valid_user
NDCG_10 = NDCG_10/valid_user
NDCG_25 = NDCG_25/valid_user
NDCG_50 = NDCG_50/valid_user
HIT_5 = HIT_5/valid_user
HIT_10 = HIT_10/valid_user
HIT_25 = HIT_25/valid_user
HIT_50 = HIT_50/valid_user
print("--- evaluate_test - valid_user {} ---".format(valid_user))
if (not d.grid_search) and (d.args.production):
topK_predictions_score, topK_predictions_items, topK_predictions_users = prediction_prod(model, d, sess)
# print('--- Test Evaluation --- (AUC = {}), (NDCG_10 = {}), (HIT_10 = {})'.format(AUC, NDCG_10, HIT_10))
return {
'AUC': AUC, 'MRR': MRR,
'NDCG_5': NDCG_5, 'NDCG_10': NDCG_10, 'NDCG_25': NDCG_25, 'NDCG_50': NDCG_50,
'HIT_5': HIT_5, 'HIT_10': HIT_10, 'HIT_25': HIT_25, 'HIT_50': HIT_50,
'valid_user': valid_user,
'topK_predictions_score': topK_predictions_score,
'topK_predictions_items': topK_predictions_items,
'topK_predictions_users': topK_predictions_users,
}
def evaluate_cold_start(model, d, sess):
AUC = 0.0
MRR = 0.0
NDCG_5 = 0.0
NDCG_10 = 0.0
NDCG_25 = 0.0
NDCG_50 = 0.0
HIT_5 = 0.0
HIT_10 = 0.0
HIT_25 = 0.0
HIT_50 = 0.0
valid_user = 0.0
topK_predictions_score = []
topK_predictions_items = []
topK_predictions_users = []
u = -1
for u_temp in range(d.nb_users_cold_start_user):
# u = 0
if d.cold_start_user_test_data[u_temp][0] == -1 or d.cold_start_user_test_data[u_temp][1] == -1:
# print(u)
continue
else:
u += 1
valid_user += 1.0
cold_start_user_test_data_set_eval_np = np.setdiff1d(d.np_list_items, d.cold_start_user_test_data_set_np[u_temp]) # We exclude item already seen by user
item_to_eval = len(cold_start_user_test_data_set_eval_np) + 1 # All items that are not in user history except the pos_item
users_tmp = np.repeat(d.cold_start_user_test_user_id[u], item_to_eval)
prev_items_tmp = np.repeat(d.cold_start_user_test_prev_item_id[u].reshape((1, 1)), item_to_eval, axis=0)
if d.args.model in d.REBUSMODEL:
list_fsub_items_id_tmp = np.repeat(d.cold_start_user_test_fsub_items_id[u].reshape((1, d.args.L)), item_to_eval, axis=0)
list_fsub_items_values_tmp = np.repeat(d.cold_start_user_test_fsub_items_value[u].reshape((1, d.args.L)), item_to_eval, axis=0)
else:
list_fsub_items_id_tmp = None
list_fsub_items_values_tmp = None
pos_items_tmp = np.append(cold_start_user_test_data_set_eval_np, d.cold_start_user_test_pos_item_id[u])
list_prev_items_pos_tmp = np.append(np.repeat(d.cold_start_user_test_list_prev_items_id[u].reshape((1, d.args.max_lens)), item_to_eval-1, axis=0), d.cold_start_user_test_list_prev_items_id_pos[u].reshape((1, d.args.max_lens)), axis=0)
list_prev_items_pos_tmp[list_prev_items_pos_tmp == pos_items_tmp.reshape((pos_items_tmp.shape[0], 1))] = d.nb_items
predictions = model.predict(sess, users_tmp, prev_items_tmp, list_fsub_items_id_tmp, list_fsub_items_values_tmp, pos_items_tmp, list_prev_items_pos_tmp)
predictions = predictions[0].reshape(item_to_eval)
# Evaluation
count_auc_test = np.float32(predictions[item_to_eval-1] > predictions[0:item_to_eval-1]).sum()
rank = item_to_eval - (count_auc_test + 1) # rank strat from 0 to item_to_eval-1
AUC += count_auc_test / (item_to_eval - 1) # We take off one item that corresponding to pos_item
MRR += 1.0 / (rank+1)
if rank < 5:
NDCG_5 += 1 / np.log2(rank + 2)
HIT_5 += 1
if rank < 10:
NDCG_10 += 1 / np.log2(rank + 2)
HIT_10 += 1
if rank < 25:
NDCG_25 += 1 / np.log2(rank + 2)
HIT_25 += 1
if rank < 50:
NDCG_50 += 1 / np.log2(rank + 2)
HIT_50 += 1
if (not d.grid_search) and (not d.args.production):
sort_index = np.argsort(-predictions)
topK_predictions_score.append(predictions[sort_index][0:d.args.prediction_TopN])
topK_predictions_items.append(pos_items_tmp[sort_index][0:d.args.prediction_TopN])
topK_predictions_users.append(u_temp)
AUC = AUC/valid_user
MRR = MRR/valid_user
NDCG_5 = NDCG_5/valid_user
NDCG_10 = NDCG_10/valid_user
NDCG_25 = NDCG_25/valid_user
NDCG_50 = NDCG_50/valid_user
HIT_5 = HIT_5/valid_user
HIT_10 = HIT_10/valid_user
HIT_25 = HIT_25/valid_user
HIT_50 = HIT_50/valid_user
print("--- evaluate_test - valid_user {} ---".format(valid_user))
# print('--- Test Evaluation --- (AUC = {}), (NDCG_10 = {}), (HIT_10 = {})'.format(AUC, NDCG_10, HIT_10))
return {
'AUC': AUC, 'MRR': MRR,
'NDCG_5': NDCG_5, 'NDCG_10': NDCG_10, 'NDCG_25': NDCG_25, 'NDCG_50': NDCG_50,
'HIT_5': HIT_5, 'HIT_10': HIT_10, 'HIT_25': HIT_25, 'HIT_50': HIT_50,
'valid_user': valid_user,
'topK_predictions_score': topK_predictions_score,
'topK_predictions_items': topK_predictions_items,
'topK_predictions_users': topK_predictions_users,
}
def prediction_prod(model, d, sess):
print("----- Start Prediction Production -----")
topK_predictions_score = []
topK_predictions_items = []
topK_predictions_users = []
for u in range(d.nb_users_prod):
prod_data_set_eval_np = np.setdiff1d(d.np_list_items, d.prod_data_set_np[u])
item_to_eval = len(prod_data_set_eval_np) # All items that are not in user history
users_tmp = np.repeat(d.prod_user_id[u], item_to_eval)
prev_items_tmp = np.repeat(d.prod_prev_item_id[u].reshape((1, 1)), item_to_eval, axis=0)
if d.args.model in d.REBUSMODEL:
list_fsub_items_id_tmp = np.repeat(d.prod_fsub_items_id[u].reshape((1, d.args.L)), item_to_eval, axis=0)
list_fsub_items_values_tmp = np.repeat(d.prod_fsub_items_value[u].reshape((1, d.args.L)), item_to_eval, axis=0)
else:
list_fsub_items_id_tmp = None
list_fsub_items_values_tmp = None
pos_items_tmp = np.array(prod_data_set_eval_np)
list_prev_items_pos_tmp = np.repeat(d.prod_list_prev_items_id[u].reshape((1, d.args.max_lens)), item_to_eval, axis=0)
list_prev_items_pos_tmp[list_prev_items_pos_tmp == pos_items_tmp.reshape((pos_items_tmp.shape[0], 1))] = d.nb_items
predictions = model.predict(sess, users_tmp, prev_items_tmp, list_fsub_items_id_tmp, list_fsub_items_values_tmp, pos_items_tmp, list_prev_items_pos_tmp)
predictions = predictions[0].reshape(item_to_eval)
sort_index = np.argsort(-predictions)
topK_predictions_score.append(predictions[sort_index][0:d.args.prediction_TopN])
topK_predictions_items.append(pos_items_tmp[sort_index][0:d.args.prediction_TopN])
topK_predictions_users.append(u)
return topK_predictions_score, topK_predictions_items, topK_predictions_users
def find_path_stars(fsub_set, items_prev):
sequence = ""
path = []
item_start = items_prev[-1]
while True:
if not items_prev:
break
item = str(items_prev.pop())
if sequence == "":
if item in fsub_set:
sequence = item
path.append(int(item))
else:
tmp_sequence = item + "-" + sequence
if tmp_sequence in fsub_set:
sequence = tmp_sequence
path.append(int(item))
if not path:
path.append(item_start)
return path
def find_markov_chains_k(items_prev, K):
sequence = ""
path = []
item_start = items_prev[-1]
count_insert_item = 0
while True:
if count_insert_item >= K:
break
if not items_prev:
break
item = str(items_prev.pop())
count_insert_item += 1
if sequence == "":
sequence = item
path.append(int(item))
else:
sequence = item + "-" + sequence
path.append(int(item))
return path
def damping_linear(x, current_fsub_size):
return -(1/current_fsub_size) * x + 1
def softmax(x):
"""Compute softmax values for each sets of scores in x."""
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum(axis=0)
|
def log_make_dir(dirname):
if not(os.path.isdir(dirname)):
os.mkdir(dirname)
def _reg_loss(x,w):
return np.mean(K.eval(AE_reg_loss(K.constant(x), K.constant(w))))
def log_update_loglik_recons_reg():
loglik.append(Kalman_tools.log_likelihood(w_all,A,b,H,v_all,C,d,Q,R,mu_0,Sig_0))
print('')
print('loglik = ')
print(loglik)
[x_bar, w, _] = AE.predict(x_test)
tmp = np.mean((x_bar - x_test) ** 2)
recons_error.append(tmp)
print('recons_error = ')
print(recons_error)
reg_error.append(_reg_loss(x_test,w))
print('reg_error = ')
print(reg_error)
def log_save_weights(iter_EM, iter_CoorAsc):
if iter_CoorAsc == -1:
AE.save_weights('./tuned_params/' + str(iter_EM) + '_AE_params.h5')
act_map.save_weights('./tuned_params/' + str(iter_EM) + '_act_map_params.h5')
pickle.dump([A,b,H,C,d,Q,R,mu_0,Sig_0], open('./tuned_params/' + str(iter_EM) + '_LDS_params.pkl', 'wb'))
pickle.dump([loglik,recons_error], open('./loglikelihood.pkl', 'wb'))
pickle.dump(E_log, open('./E_log.pkl', 'wb'))
pickle.dump([hist_0,hist_1,hist_2], open('./fit_hists.pkl', 'wb'))
else:
AE.save_weights('./tuned_params/' + str(iter_EM) + '/' + str(iter_CoorAsc) + '_AE_params.h5')
act_map.save_weights('./tuned_params/' + str(iter_EM) + '/' + str(iter_CoorAsc) + '_act_map_params.h5')
pickle.dump([A,b,H,C,d,Q,R,mu_0,Sig_0], open('./tuned_params/' + str(iter_EM) + '/' + str(iter_CoorAsc) + 'LDS_params.pkl', 'wb'))
pickle.dump([loglik,recons_error], open('./results.pkl','wb'))
pickle.dump(E_log, open('./E_log.pkl', 'wb'))
pickle.dump([hist_0,hist_1,hist_2], open('./fit_hists.pkl', 'wb'))
def log_print_fit_hists():
print('-------------------')
print(np.mean(hist_0[-1][list(hist_0[-1].keys())[1]][-10:]))
print(np.mean(hist_0[-1][list(hist_0[-1].keys())[2]][-10:]))
print(np.mean(hist_0[-1][list(hist_0[-1].keys())[3]][-10:]))
print('-------------------')
print('-------------------')
print(np.mean(hist_1[-1][list(hist_1[-1].keys())[1]][-10:]))
print(np.mean(hist_1[-1][list(hist_1[-1].keys())[2]][-10:]))
print(np.mean(hist_1[-1][list(hist_1[-1].keys())[3]][-10:]))
print('-------------------')
print('-------------------')
print(hist_2[-1])
print('-------------------')
def log_update_E_log():
for i in range(len(x_all)):
w_all[i] = enc.predict(x_all[i])
for i in range(len(u_all)):
v_all[i] = act_map.predict(u_all[i])
E_log.append(E_log_P_x_and_z(Ezt, EztztT, Ezt_1ztT,w_all,A,b,H,v_all,C,d,Q,R,mu_0,Sig_0))
print('E[log...] = ', E_log) |
"""Model store which provides pretrained models."""
from __future__ import print_function
import os
import zipfile
from ..utils.download import download, check_sha1
__all__ = ['get_model_file', 'get_resnet_file']
# _model_sha1 = {name: checksum for checksum, name in [
# ('25c4b50959ef024fcc050213a06b614899f94b3d', 'resnet50'),
# ('2a57e44de9c853fa015b172309a1ee7e2d0e4e2a', 'resnet101'),
# ('0d43d698c66aceaa2bc0309f55efdd7ff4b143af', 'resnet152'),
# ]}
_model_sha1 = {name: checksum for checksum, name in [
# resnest
('fb9de5b360976e3e8bd3679d3e93c5409a5eff3c', 'resnest50'),
('966fb78c22323b0c68097c5c1242bd16d3e07fd5', 'resnest101'),
('d7fd712f5a1fcee5b3ce176026fbb6d0d278454a', 'resnest200'),
('51ae5f19032e22af4ec08e695496547acdba5ce5', 'resnest269'),
# rectified
#('9b5dc32b3b36ca1a6b41ecd4906830fc84dae8ed', 'resnet101_rt'),
# resnet other variants
('a75c83cfc89a56a4e8ba71b14f1ec67e923787b3', 'resnet50s'),
('03a0f310d6447880f1b22a83bd7d1aa7fc702c6e', 'resnet101s'),
('36670e8bc2428ecd5b7db1578538e2dd23872813', 'resnet152s'),
# other segmentation backbones
('da4785cfc837bf00ef95b52fb218feefe703011f', 'wideresnet38'),
('b41562160173ee2e979b795c551d3c7143b1e5b5', 'wideresnet50'),
# deepten paper
('1225f149519c7a0113c43a056153c1bb15468ac0', 'deepten_resnet50_minc'),
# segmentation resnet models
('662e979de25a389f11c65e9f1df7e06c2c356381', 'fcn_resnet50s_ade'),
('4de91d5922d4d3264f678b663f874da72e82db00', 'encnet_resnet50s_pcontext'),
('9f27ea13d514d7010e59988341bcbd4140fcc33d', 'encnet_resnet101s_pcontext'),
('07ac287cd77e53ea583f37454e17d30ce1509a4a', 'encnet_resnet50s_ade'),
('3f54fa3b67bac7619cd9b3673f5c8227cf8f4718', 'encnet_resnet101s_ade'),
# resnest segmentation models
('4aba491aaf8e4866a9c9981b210e3e3266ac1f2a', 'fcn_resnest50_ade'),
('2225f09d0f40b9a168d9091652194bc35ec2a5a9', 'deeplab_resnest50_ade'),
('06ca799c8cc148fe0fafb5b6d052052935aa3cc8', 'deeplab_resnest101_ade'),
('7b9e7d3e6f0e2c763c7d77cad14d306c0a31fe05', 'deeplab_resnest200_ade'),
('0074dd10a6e6696f6f521653fb98224e75955496', 'deeplab_resnest269_ade'),
('77a2161deeb1564e8b9c41a4bb7a3f33998b00ad', 'fcn_resnest50_pcontext'),
('08dccbc4f4694baab631e037a374d76d8108c61f', 'deeplab_resnest50_pcontext'),
('faf5841853aae64bd965a7bdc2cdc6e7a2b5d898', 'deeplab_resnest101_pcontext'),
('fe76a26551dd5dcf2d474fd37cba99d43f6e984e', 'deeplab_resnest200_pcontext'),
('b661fd26c49656e01e9487cd9245babb12f37449', 'deeplab_resnest269_pcontext'),
]}
# encoding_repo_url = 'https://hangzh.s3.amazonaws.com/'
encoding_repo_url = 'https://s3.us-west-1.wasabisys.com/encoding'
_url_format = '{repo_url}encoding/models/{file_name}.zip'
def short_hash(name):
if name not in _model_sha1:
raise ValueError('Pretrained model for {name} is not available.'.format(name=name))
return _model_sha1[name][:8]
def get_resnet_file(name, root='~/.torch/models'):
if name not in _model_sha1:
from torchvision.models.resnet import model_urls
if name not in model_urls:
raise ValueError('Pretrained model for {name} is not available.'.format(name=name))
root = os.path.expanduser(root)
return download(model_urls[name],
path=root,
overwrite=True)
file_name = '{name}-{short_hash}'.format(name=name, short_hash=short_hash(name))
root = os.path.expanduser(root)
file_path = os.path.join(root, file_name + '.pth')
sha1_hash = _model_sha1[name]
if os.path.exists(file_path):
if check_sha1(file_path, sha1_hash):
return file_path
else:
print('Mismatch in the content of model file {} detected.' +
' Downloading again.'.format(file_path))
else:
print('Model file {} is not found. Downloading.'.format(file_path))
if not os.path.exists(root):
os.makedirs(root)
zip_file_path = os.path.join(root, file_name + '.zip')
repo_url = os.environ.get('ENCODING_REPO', encoding_repo_url)
if repo_url[-1] != '/':
repo_url = repo_url + '/'
download(_url_format.format(repo_url=repo_url, file_name=file_name),
path=zip_file_path,
overwrite=True)
with zipfile.ZipFile(zip_file_path) as zf:
zf.extractall(root)
os.remove(zip_file_path)
if check_sha1(file_path, sha1_hash):
return file_path
else:
raise ValueError('Downloaded file has different hash. Please try again.')
def get_model_file(name, root='~/.torch/models'):
root = os.path.expanduser(root)
file_path = os.path.join(root, name + '.pth')
if os.path.exists(file_path):
return file_path
else:
raise ValueError('Model file is not found. Downloading or trainning.')
|
# -*- coding: utf-8 -*-
import KBEngine
import random
import SCDefine
import time
import d_avatar_inittab
from KBEDebug import *
from interfaces.GameObject import GameObject
class Avatar(KBEngine.Proxy,GameObject):
"""docstring for Avatar"""
def __init__(self):
KBEngine.Proxy.__init__(self)
GameObject.__init__(self)
self.accountEntity = None
self.cellData["dbid"] = self.databaseID
self.nameB = self.cellData["name"]
self.spaceUTypeB = self.cellData["spaceUType"]
self._destroyTimer = 0
def onClientEnabled(self):
"""
KBEngine method.
该entity被正式激活为可使用, 此时entity已经建立了client对应实体, 可以在此创建它的
cell部分。
"""
INFO_MSG("Avatar[%i-%s] entities enable. spaceUTypeB=%s, entityCall:%s" % (self.id, self.nameB, self.spaceUTypeB, self.client))
if self._destroyTimer > 0:
self.delTimer(self._destroyTimer)
self._destroyTimer = 0
if self.cell is None:
DEBUG_MSG('Avatar::onClientEnabled:position: %s' % (str(self.cellData["position"])))
KBEngine.globalData["Spaces"].loginToSpace(self,self.cellData["position"], self.cellData["direction"], self.SpaceKey)
def onGetCell(self):
"""
KBEngine method.
entity的cell部分实体被创建成功
"""
DEBUG_MSG('Avatar::onGetCell: %s' % self.cell)
def createCell(self,space,spaceKey):
"""
defined method.
创建cell实体
"""
self.SpaceKey = spaceKey
self.createCellEntity(space)
DEBUG_MSG('Avatar::createCell: spaceKey = %i' % spaceKey)
def destroySelf(self):
"""
"""
if self.client is not None:
return
if self.cell is not None:
# 销毁cell实体
self.destroyCellEntity()
return
# 如果帐号ENTITY存在 则也通知销毁它
if self.accountEntity != None:
if time.time() - self.accountEntity.relogin > 1:
self.accountEntity.destroy()
else:
DEBUG_MSG("Avatar[%i].destroySelf: relogin =%i" % (self.id, time.time() - self.accountEntity.relogin))
KBEngine.globalData["Spaces"].logoutSpace(self.id, self.SpaceKey)
# 销毁base
if not self.isDestroyed:
self.destroy()
def transAvatar(self):
"""
exposed.
客户端转换角色
"""
DEBUG_MSG("Avatar[%i].TransAvatar: self.activeAvatar=%s" % (self.id,self.accountEntity.activeAvatar))
# 注意:使用giveClientTo的entity必须是当前baseapp上的entity
if self.accountEntity is None or self.client is None:
return
if self.accountEntity.activeAvatar is not None:
self.accountEntity.activeAvatar = None
self.giveClientTo(self.accountEntity)
# 销毁cell实体
if self.cell is not None:
self.destroyCellEntity()
if not self.isDestroyed:
self.destroy()
#--------------------------------------------------------------------------------------------
# Callbacks
#--------------------------------------------------------------------------------------------
def onTimer(self, tid, userArg):
"""
KBEngine method.
引擎回调timer触发
"""
#DEBUG_MSG("%s::onTimer: %i, tid:%i, arg:%i" % (self.getScriptName(), self.id, tid, userArg))
if SCDefine.TIMER_TYPE_DESTROY == userArg:
self.onDestroyTimer()
GameObject.onTimer(self, tid, userArg)
def onClientDeath(self):
"""
KBEngine method.
entity丢失了客户端实体
"""
DEBUG_MSG("Avatar[%i].onClientDeath:" % self.id)
# 防止正在请求创建cell的同时客户端断开了, 我们延时一段时间来执行销毁cell直到销毁base
# 这段时间内客户端短连接登录则会激活entity
self._destroyTimer = self.addTimer(10, 0, SCDefine.TIMER_TYPE_DESTROY)
def onClientGetCell(self):
"""
KBEngine method.
客户端已经获得了cell部分实体的相关数据
"""
INFO_MSG("Avatar[%i].onClientGetCell:%s" % (self.id, self.client))
def onDestroyTimer(self):
DEBUG_MSG("Avatar::onDestroyTimer: %i" % (self.id))
self.destroySelf()
def onDestroy(self):
"""
KBEngine method.
entity销毁
"""
DEBUG_MSG("Avatar::onDestroy: %i." % self.id)
if self.accountEntity != None:
self.accountEntity.activeAvatar = None
self.accountEntity = None
|
# https://leetcode.com/problems/angle-between-hands-of-a-clock/
class Solution:
def angleClock(self, hour: int, minutes: int) -> float:
hour = hour % 12
hour_angle = 360 * hour / 12 + 360 / 12 * minutes / 60
min_angle = 360 * minutes / 60
angle = abs(hour_angle - min_angle)
if angle > 180:
return 360 - angle
else:
return angle
|
# Thanks to https://github.com/PavelOstyakov/toxic
import sys
import numpy as np
import pandas as pd
import nltk
import tqdm
sentence_length = 256
def tokenize_sentences(sentences, word_to_token):
tokenized_sentences = []
for sentence in tqdm.tqdm(sentences):
if hasattr(sentence, "decode"):
sentence = sentence.decode("utf-8")
words = nltk.tokenize.word_tokenize(sentence)
result = []
for word in words:
word = word.lower()
if word not in word_to_token:
word_to_token[word] = len(word_to_token)
token = word_to_token[word]
result.append(token)
tokenized_sentences.append(result)
return tokenized_sentences, word_to_token
def load_embedding_list(file_path, word_to_token):
word_to_embed = {}
embedding_list = []
with open(file_path) as f:
for row in tqdm.tqdm(f.readlines()[1:-1]):
data = row.split(' ')
word = data[0]
if word in word_to_token:
embedding = np.array([float(num) for num in data[1:-1]],
dtype=np.float32)
embedding_list.append(embedding)
word_to_embed[word] = len(word_to_embed)
return embedding_list, word_to_embed
def token_to_embedding(tokenized_sentences, token_to_word,
word_to_embed, sentence_length):
words_train = []
for sentence in tokenized_sentences:
current_words = []
for token in sentence:
word = token_to_word[token]
embed_idx = word_to_embed.get(word, -1)
# skip unknown words
if embed_idx != -1:
current_words.append(embed_idx)
if len(current_words) == sentence_length:
break
# padding to same length
end_idx = len(word_to_embed)
if len(current_words) < sentence_length:
current_words += [end_idx] * (sentence_length - len(current_words))
words_train.append(current_words)
return words_train
train_csv = 'dataset/train.csv'
if len(sys.argv) > 1:
train_csv = sys.argv[1]
print('Train file: {}'.format(train_csv))
print('Tokenizing train set...')
df = pd.read_csv(train_csv)
list_sentences = df['comment_text'].fillna('').values
tokenized_sentences_train, word_to_token = tokenize_sentences(list_sentences,
{})
print('Tokenizing test set...')
df = pd.read_csv('dataset/test.csv')
list_sentences = df['comment_text'].fillna('').values
tokenized_sentences_test, word_to_token = tokenize_sentences(list_sentences,
word_to_token)
print('Loading embeddings...')
embedding_list, word_to_embed = load_embedding_list('dataset/word.vec',
word_to_token)
embedding_size = len(embedding_list[0])
embedding_list.append(np.zeros(embedding_size, dtype=np.float32)) # end
token_to_word = {token: word for word, token in word_to_token.items()}
del word_to_token
print('Coverting embeddings...')
train_embedding = token_to_embedding(
tokenized_sentences_train,
token_to_word,
word_to_embed,
sentence_length)
test_embedding = token_to_embedding(
tokenized_sentences_test,
token_to_word,
word_to_embed,
sentence_length)
np.savez('dataset/text-embedding.npz', train_embedding=train_embedding,
test_embedding=test_embedding, embedding_list=embedding_list)
np.savez('dataset/valid-words.npz', valid_words=set(token_to_word.values()))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.