text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
"""This test module verifies all circuit operation, gate, and circuit
methods."""
from __future__ import annotations
import numpy as np
import pytest
from hypothesis import given
from bqskit.ir.circuit import Circuit
from bqskit.ir.gate import Gate
from bqskit.ir.gates import CNOTGate
from bqskit.ir.gates import ConstantUnitaryGate
from bqskit.ir.gates import CPIGate
from bqskit.ir.gates import HGate
from bqskit.ir.gates import XGate
from bqskit.ir.gates.constant.cx import CXGate
from bqskit.ir.operation import Operation
from bqskit.ir.point import CircuitPoint
from bqskit.ir.point import CircuitPointLike
from bqskit.utils.test.strategies import circuits
from bqskit.utils.test.strategies import operations
from bqskit.utils.test.types import invalid_type_test
from bqskit.utils.test.types import valid_type_test
def check_no_idle_cycles(circuit: Circuit) -> None:
for cycle_index in range(circuit.num_cycles):
assert not circuit._is_cycle_idle(cycle_index)
class TestCheckValidOperation:
"""This tests `circuit.check_valid_operation`."""
@valid_type_test(Circuit(1).check_valid_operation)
def test_valid_type(self) -> None:
pass
@invalid_type_test(Circuit(1).check_valid_operation)
def test_invalid_type(self) -> None:
pass
def test_location_mismatch_1(self, qubit_gate: Gate) -> None:
circuit = Circuit(qubit_gate.num_qudits)
location = list(range(qubit_gate.num_qudits))
location[-1] += 1
params = [0] * qubit_gate.num_params
op = Operation(qubit_gate, location, params)
try:
circuit.check_valid_operation(op)
except ValueError:
return
except BaseException:
assert False, 'Unexpected Exception'
assert False
def test_location_mismatch_2(self, qutrit_gate: Gate) -> None:
circuit = Circuit(qutrit_gate.num_qudits, qutrit_gate.radixes)
location = list(range(qutrit_gate.num_qudits))
location[-1] += 1
params = [0] * qutrit_gate.num_params
op = Operation(qutrit_gate, location, params)
try:
circuit.check_valid_operation(op)
except ValueError:
return
except BaseException:
assert False, 'Unexpected Exception'
assert False
def test_radix_mismatch_1(self, qubit_gate: Gate) -> None:
circuit = Circuit(qubit_gate.num_qudits, [3] * qubit_gate.num_qudits)
location = list(range(qubit_gate.num_qudits))
params = [0] * qubit_gate.num_params
op = Operation(qubit_gate, location, params)
try:
circuit.check_valid_operation(op)
except ValueError:
return
except BaseException:
assert False, 'Unexpected Exception'
assert False
def test_radix_mismatch_2(self, qutrit_gate: Gate) -> None:
circuit = Circuit(qutrit_gate.num_qudits)
location = list(range(qutrit_gate.num_qudits))
params = [0] * qutrit_gate.num_params
op = Operation(qutrit_gate, location, params)
try:
circuit.check_valid_operation(op)
except ValueError:
return
except BaseException:
assert False, 'Unexpected Exception'
assert False
def test_valid_1(self, gate: Gate) -> None:
circuit = Circuit(gate.num_qudits, gate.radixes)
location = list(range(gate.num_qudits))
params = [0] * gate.num_params
circuit.check_valid_operation(Operation(gate, location, params))
def test_valid_2(self, gate: Gate) -> None:
circuit = Circuit(gate.num_qudits + 2, (2, 2) + gate.radixes)
location = [x + 2 for x in list(range(gate.num_qudits))]
params = [0] * gate.num_params
circuit.check_valid_operation(Operation(gate, location, params))
def test_valid_3(self) -> None:
circuit = Circuit(2, [3, 2])
gate = ConstantUnitaryGate(np.identity(6), [2, 3])
circuit.check_valid_operation(Operation(gate, [1, 0]))
class TestGetOperation:
"""This tests `circuit.get_operation`."""
@valid_type_test(Circuit(1).get_operation)
def test_valid_type(self) -> None:
pass
@invalid_type_test(Circuit(1).get_operation)
def test_invalid_type(self) -> None:
pass
@pytest.mark.parametrize(
'point', [
(0, 0),
(1, 1),
(2, 2),
(3, 3),
(4, 4),
],
)
def test_return_type(self, point: CircuitPointLike) -> None:
circuit = Circuit(5)
for i in range(5):
circuit.append_gate(HGate(), [0])
circuit.append_gate(HGate(), [1])
circuit.append_gate(HGate(), [2])
circuit.append_gate(HGate(), [3])
circuit.append_gate(HGate(), [4])
assert isinstance(circuit.get_operation(point), Operation)
@pytest.mark.parametrize(
'point', [
(-1000, 0),
(1, -100),
(-8, -8),
(-6, -6),
(-7, 4),
(1000, 0),
(1, 100),
(8, 8),
(6, 6),
(5, 4),
(3, 8),
(2, 9),
(8, 2),
],
)
def test_index_error_out_of_bounds(self, point: CircuitPointLike) -> None:
circuit = Circuit(5)
for i in range(5):
circuit.append_gate(HGate(), [0])
circuit.append_gate(HGate(), [1])
circuit.append_gate(HGate(), [2])
circuit.append_gate(HGate(), [3])
circuit.append_gate(HGate(), [4])
try:
circuit.get_operation(point)
except IndexError:
return
assert False, 'Should not have reached here.'
def test_correctness_1(self, r6_qudit_circuit: Circuit) -> None:
for x in range(r6_qudit_circuit.num_cycles):
for y in range(r6_qudit_circuit.num_qudits):
correct = r6_qudit_circuit._circuit[x][y]
if correct is not None:
assert correct is r6_qudit_circuit.get_operation((x, y))
else:
try:
r6_qudit_circuit.get_operation((x, y))
except IndexError:
pass
except BaseException:
assert False, 'Unexpected exception.'
def test_correctness_2(self) -> None:
circuit = Circuit(2)
circuit.append_gate(HGate(), [0])
circuit.append_gate(CNOTGate(), [0, 1])
assert circuit.get_operation((0, 0)).gate == HGate()
assert circuit.get_operation((1, 0)).gate == CNOTGate()
assert circuit.get_operation((1, 1)).gate == CNOTGate()
def test_example(self) -> None:
circuit = Circuit(2)
circuit.append_gate(HGate(), [0])
circuit.append_gate(CNOTGate(), [0, 1])
circuit.get_operation((1, 0)).__repr__() == 'CNOTGate@(0,1)'
class TestPoint:
"""This tests `circuit.point`."""
@valid_type_test(Circuit(1).point)
def test_valid_type(self) -> None:
pass
@invalid_type_test(Circuit(1).point, [IndexError])
def test_invalid_type(self) -> None:
pass
def test_return_type(self) -> None:
circuit = Circuit(5)
for i in range(5):
circuit.append_gate(HGate(), [0])
circuit.append_gate(HGate(), [1])
circuit.append_gate(HGate(), [2])
circuit.append_gate(HGate(), [3])
circuit.append_gate(HGate(), [4])
assert isinstance(circuit.point(HGate()), CircuitPoint)
def test_correctness_1(self, r6_qudit_circuit: Circuit) -> None:
for x in range(r6_qudit_circuit.num_cycles):
for y in range(r6_qudit_circuit.num_qudits):
op = r6_qudit_circuit._circuit[x][y]
if op is not None:
point = r6_qudit_circuit.point(op, (x, y))
assert r6_qudit_circuit.get_operation(point) is op
point = r6_qudit_circuit.point(op, (x, y), (x, y))
assert r6_qudit_circuit.get_operation(point) is op
point = r6_qudit_circuit.point(op.gate, (x, y))
assert r6_qudit_circuit.get_operation(point) is op
point = r6_qudit_circuit.point(op.gate, (x, y), (x, y))
assert r6_qudit_circuit.get_operation(point) is op
def test_correctness_2(self) -> None:
circuit = Circuit(2)
circuit.append_gate(HGate(), [0])
circuit.append_gate(CNOTGate(), [0, 1])
assert circuit.point(HGate()) == (0, 0)
assert circuit.point(CNOTGate()) == (1, 0)
assert circuit.point(Operation(HGate(), [0])) == (0, 0)
assert circuit.point(Operation(CNOTGate(), [0, 1])) == (1, 0)
try:
circuit.point(Operation(CNOTGate(), [1, 0]))
except ValueError:
return
assert False, 'Should not have reached here.'
def test_invalid_value_1(self) -> None:
circuit = Circuit(2)
circuit.append_gate(HGate(), [0])
circuit.append_gate(CNOTGate(), [0, 1])
try:
circuit.point(CPIGate())
except ValueError:
return
assert False, 'Should not have reached here.'
def test_invalid_value_2(self) -> None:
circuit = Circuit(2)
circuit.append_gate(HGate(), [0])
circuit.append_gate(CNOTGate(), [0, 1])
try:
circuit.point(XGate())
except ValueError:
return
assert False, 'Should not have reached here.'
def test_example(self) -> None:
circuit = Circuit(1)
opH = Operation(HGate(), [0])
circuit.append(opH)
assert circuit.point(opH).__repr__(
) == '(0, 0)'
opX = Operation(XGate(), [0])
circuit.append(opX)
assert circuit.point(opX).__repr__(
) == '(1, 0)'
class TestAppend:
"""This tests `circuit.append`."""
@valid_type_test(Circuit(1).append)
def test_valid_type(self) -> None:
pass
@invalid_type_test(Circuit(1).append)
def test_invalid_type(self) -> None:
pass
@given(circuits())
def test_reconstruct(self, circuit: Circuit) -> None:
new_circuit = Circuit(circuit.num_qudits, circuit.radixes)
for op in circuit:
new_circuit.append(op)
check_no_idle_cycles(new_circuit)
assert new_circuit.get_unitary() == circuit.get_unitary()
class TestAppendGate:
"""This tests `circuit.append_gate`."""
@valid_type_test(Circuit(1).append_gate)
def test_valid_type(self) -> None:
pass
@invalid_type_test(Circuit(1).append_gate, [ValueError])
def test_invalid_type(self) -> None:
pass
@given(circuits())
def test_reconstruct(self, circuit: Circuit) -> None:
new_circuit = Circuit(circuit.num_qudits, circuit.radixes)
for op in circuit:
new_circuit.append_gate(op.gate, op.location, op.params)
check_no_idle_cycles(new_circuit)
assert new_circuit.get_unitary() == circuit.get_unitary()
class TestAppendCircuit:
"""This tests `circuit.append_circuit`."""
@valid_type_test(Circuit(1).append_circuit)
def test_valid_type(self) -> None:
pass
@invalid_type_test(Circuit(1).append_circuit)
def test_invalid_type(self) -> None:
pass
@given(circuits())
def test_reconstruct(self, circuit: Circuit) -> None:
new_circuit = Circuit(circuit.num_qudits, circuit.radixes)
new_circuit.append_circuit(circuit, list(range(circuit.num_qudits)))
check_no_idle_cycles(new_circuit)
assert new_circuit.get_unitary() == circuit.get_unitary()
@given(circuits())
def test_reconstruct_larger(self, circuit: Circuit) -> None:
new_circ = Circuit(circuit.num_qudits + 1, circuit.radixes + (2,))
new_circ.append_circuit(circuit, list(range(circuit.num_qudits)))
check_no_idle_cycles(new_circ)
circuit.append_qudit()
assert new_circ.get_unitary() == circuit.get_unitary()
class TestExtend:
"""This tests `circuit.extend`."""
@valid_type_test(Circuit(1).extend)
def test_valid_type(self) -> None:
pass
@invalid_type_test(Circuit(1).extend)
def test_invalid_type(self) -> None:
pass
@given(circuits())
def test_reconstruct(self, circuit: Circuit) -> None:
new_circuit = Circuit(circuit.num_qudits, circuit.radixes)
new_circuit.extend(circuit)
check_no_idle_cycles(new_circuit)
assert new_circuit.get_unitary() == circuit.get_unitary()
class TestInsert:
"""This tests `circuit.insert`."""
@valid_type_test(Circuit(1).insert)
def test_valid_type(self) -> None:
pass
@invalid_type_test(Circuit(1).insert, [ValueError])
def test_invalid_type(self) -> None:
pass
def test_empty(self) -> None:
circuit = Circuit(2)
circuit.insert(0, Operation(CXGate(), (0, 1)))
assert circuit[0, 0] == Operation(CXGate(), (0, 1))
@given(circuits((2, 2, 2, 2)), operations(2, max_qudit=3))
def test_insert(self, circuit: Circuit, op: Operation) -> None:
circuit.insert(0, op)
assert circuit[0, op.location[0]] == op
circuit.insert(circuit.num_cycles, op)
assert circuit[-1, op.location[0]] == op
check_no_idle_cycles(circuit)
class TestInsertGate:
"""This tests `circuit.insert_gate`."""
@valid_type_test(Circuit(1).insert_gate)
def test_valid_type(self) -> None:
pass
@invalid_type_test(Circuit(1).insert_gate, [ValueError])
def test_invalid_type(self) -> None:
pass
def test_empty(self) -> None:
circuit = Circuit(2)
circuit.insert_gate(0, CXGate(), (0, 1))
assert circuit[0, 0] == Operation(CXGate(), (0, 1))
@given(circuits((2, 2, 2, 2)), operations(2, max_qudit=3))
def test_insert(self, circuit: Circuit, op: Operation) -> None:
circuit.insert_gate(0, op.gate, op.location, op.params)
assert circuit[0, op.location[0]] == op
circuit.insert_gate(circuit.num_cycles, op.gate, op.location, op.params)
assert circuit[-1, op.location[0]] == op
check_no_idle_cycles(circuit)
class TestInsertCircuit:
"""This tests `circuit.insert_circuit`."""
@valid_type_test(Circuit(1).insert_circuit)
def test_valid_type(self) -> None:
pass
@invalid_type_test(Circuit(1).insert_circuit, [ValueError, AttributeError])
def test_invalid_type(self) -> None:
pass
@given(circuits((2, 2, 2, 2)))
def test_apply(self, circuit: Circuit) -> None:
new_circuit = Circuit(circuit.num_qudits, circuit.radixes)
location = list(range(circuit.num_qudits))
new_circuit.insert_circuit(0, circuit, location)
U = circuit.get_unitary()
assert U == new_circuit.get_unitary()
check_no_idle_cycles(circuit)
new_circuit.insert_circuit(new_circuit.num_cycles, circuit, location)
assert U @ U == new_circuit.get_unitary()
check_no_idle_cycles(circuit)
new_circuit.insert_circuit(
0,
circuit,
location,
)
assert U @ U @ U == new_circuit.get_unitary()
check_no_idle_cycles(circuit)
class TestRemove:
"""This tests `circuit.remove`."""
@valid_type_test(Circuit(1).remove)
def test_valid_type(self) -> None:
pass
@invalid_type_test(Circuit(1).remove)
def test_invalid_type(self) -> None:
pass
@given(circuits((2, 2, 2, 2)))
def test_remove(self, circuit: Circuit) -> None:
num_ops = circuit.num_operations
while num_ops > 0:
op = list(circuit.operations())[0]
old_count = circuit.count(op)
circuit.remove(op)
assert num_ops - circuit.num_operations == 1
assert old_count - circuit.count(op) == 1
num_ops = circuit.num_operations
check_no_idle_cycles(circuit)
class TestRemoveAll:
"""This tests `circuit.remove_all`."""
@valid_type_test(Circuit(1).remove_all)
def test_valid_type(self) -> None:
pass
@invalid_type_test(Circuit(1).remove_all)
def test_invalid_type(self) -> None:
pass
@given(circuits((2, 2, 2, 2)))
def test_remove_all_op(self, circuit: Circuit) -> None:
num_ops = circuit.num_operations
while num_ops > 0:
op = list(circuit.operations())[0]
old_count = circuit.count(op)
circuit.remove_all(op)
assert num_ops - circuit.num_operations == old_count
assert circuit.count(op) == 0
with pytest.raises((ValueError, IndexError)):
circuit.point(op)
num_ops = circuit.num_operations
check_no_idle_cycles(circuit)
@given(circuits((2, 2, 2, 2)))
def test_remove_all_gate(self, circuit: Circuit) -> None:
num_ops = circuit.num_operations
while num_ops > 0:
op = list(circuit.operations())[0]
old_count = circuit.count(op.gate)
circuit.remove_all(op.gate)
assert num_ops - circuit.num_operations == old_count
assert circuit.count(op.gate) == 0
with pytest.raises((ValueError, IndexError)):
circuit.point(op.gate)
num_ops = circuit.num_operations
check_no_idle_cycles(circuit)
class TestCount:
"""This tests `circuit.count`."""
@valid_type_test(Circuit(1).count)
def test_valid_type(self) -> None:
pass
@invalid_type_test(Circuit(1).count)
def test_invalid_type(self) -> None:
pass
@given(circuits((2, 2, 2, 2)))
def test_count_op(self, circuit: Circuit) -> None:
for op in circuit:
count = circuit.count(op)
start = (0, 0)
for i in range(count):
start = circuit.point(op, start)
start = (start[0] + 1, 0)
with pytest.raises((ValueError, IndexError)):
circuit.point(op, start)
@given(circuits((2, 2, 2, 2)))
def test_count_gate(self, circuit: Circuit) -> None:
for op in circuit:
count = circuit.count(op.gate)
assert count == len([op2 for op2 in circuit if op2.gate == op.gate])
class TestPop:
"""This tests `circuit.pop`."""
@valid_type_test(Circuit(1).pop)
def test_valid_type(self) -> None:
pass
@invalid_type_test(Circuit(1).pop)
def test_invalid_type(self) -> None:
pass
@given(circuits((2, 2, 2, 2)))
def test_pop_all(self, circuit: Circuit) -> None:
for x in range(circuit.num_operations):
assert isinstance(circuit.pop(), Operation)
check_no_idle_cycles(circuit)
assert circuit.num_operations == 0
class TestBatchPop:
"""This tests `circuit.batch_pop`."""
@valid_type_test(Circuit(1).batch_pop)
def test_valid_type(self) -> None:
pass
@invalid_type_test(Circuit(1).batch_pop, [IndexError])
def test_invalid_type(self) -> None:
pass
@given(circuits((2, 2, 2, 2)))
def test_batch_pop_all(self, circuit: Circuit) -> None:
if circuit.num_operations == 0:
return
pts = [
(x, y)
for x in range(circuit.num_cycles)
for y in range(circuit.num_qudits)
]
popped_circuit = circuit.batch_pop(pts)
assert isinstance(popped_circuit, Circuit)
check_no_idle_cycles(popped_circuit)
assert circuit.num_operations == 0
class TestReplace:
"""This tests `circuit.replace`."""
@valid_type_test(Circuit(1).replace)
def test_valid_type(self) -> None:
pass
@given(circuits((2, 2, 2, 2)))
def test_replace(self, circuit: Circuit) -> None:
if circuit.num_operations == 0:
return
op = list(circuit.operations())[0]
point = circuit.point(op)
U = circuit.get_unitary()
circuit.replace(point, op)
assert circuit.get_unitary() == U
class TestBatchReplace:
"""This tests `circuit.replace`."""
@valid_type_test(Circuit(1).batch_replace)
def test_valid_type(self) -> None:
pass
@given(circuits((2, 2, 2, 2)))
def test_batch_replace(self, circuit: Circuit) -> None:
ops = list(circuit.operations())
ops = ops[:1] if len(ops) > 2 else ops
points = [circuit.point(op) for op in ops]
U = circuit.get_unitary()
circuit.batch_replace(points, ops)
assert circuit.get_unitary() == U
class TestReplaceGate:
"""This tests `circuit.replace_gate`."""
@valid_type_test(Circuit(1).replace)
def test_valid_type(self) -> None:
pass
@given(circuits((2, 2, 2, 2)))
def test_replace(self, circuit: Circuit) -> None:
if circuit.num_operations == 0:
return
op = list(circuit.operations())[0]
point = circuit.point(op)
U = circuit.get_unitary()
circuit.replace_gate(point, op.gate, op.location, op.params)
assert circuit.get_unitary() == U
class TestReplaceWithCircuit:
"""This tests `circuit.replace_with_circuit`."""
@valid_type_test(Circuit(1).replace)
def test_valid_type(self) -> None:
pass
@given(circuits((2, 2, 2, 2)))
def test_replace(self, circuit: Circuit) -> None:
if circuit.num_operations == 0:
return
op = list(circuit.operations())[0]
circ = Circuit.from_operation(op)
point = circuit.point(op)
U = circuit.get_unitary()
circuit.replace_with_circuit(point, circ)
assert circuit.get_unitary() == U
class TestCopy:
"""This tests `circuit.copy`."""
@given(circuits((2, 2, 2, 2)))
def test_copy(self, circuit: Circuit) -> None:
new_circuit = circuit.copy()
new_circuit.get_unitary() == circuit.get_unitary()
class TestBecome:
"""This tests `circuit.copy`."""
@given(circuits((2, 2, 2, 2)))
def test_become(self, circuit: Circuit) -> None:
new_circuit = Circuit(circuit.num_qudits, circuit.radixes)
new_circuit.become(circuit)
new_circuit.get_unitary() == circuit.get_unitary()
class TestClear:
"""This tests `circuit.clear`."""
@given(circuits((2, 2, 2, 2)))
def test_clear(self, circuit: Circuit) -> None:
circuit.clear()
assert circuit.num_operations == 0
assert len(circuit.gate_set) == 0
assert circuit.depth == 0
assert circuit.parallelism == 0
assert circuit.num_cycles == 0
assert len(circuit.active_qudits) == 0
|
{"hexsha": "530f60877c1187e365901f57c99067cae5d58b89", "size": 22840, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/ir/circuit/test_op_gate_circ_methods.py", "max_stars_repo_name": "jkalloor3/bqskit", "max_stars_repo_head_hexsha": "ad34a6eae3c0e62d2bd960cd4cd841ba8e845811", "max_stars_repo_licenses": ["BSD-3-Clause-LBNL"], "max_stars_count": 13, "max_stars_repo_stars_event_min_datetime": "2021-05-26T21:32:26.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-15T17:48:10.000Z", "max_issues_repo_path": "tests/ir/circuit/test_op_gate_circ_methods.py", "max_issues_repo_name": "jkalloor3/bqskit", "max_issues_repo_head_hexsha": "ad34a6eae3c0e62d2bd960cd4cd841ba8e845811", "max_issues_repo_licenses": ["BSD-3-Clause-LBNL"], "max_issues_count": 20, "max_issues_repo_issues_event_min_datetime": "2021-05-26T20:17:15.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-27T20:04:10.000Z", "max_forks_repo_path": "tests/ir/circuit/test_op_gate_circ_methods.py", "max_forks_repo_name": "jkalloor3/bqskit", "max_forks_repo_head_hexsha": "ad34a6eae3c0e62d2bd960cd4cd841ba8e845811", "max_forks_repo_licenses": ["BSD-3-Clause-LBNL"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-10-05T16:00:47.000Z", "max_forks_repo_forks_event_max_datetime": "2021-10-08T01:30:06.000Z", "avg_line_length": 32.2598870056, "max_line_length": 80, "alphanum_fraction": 0.6124343257, "include": true, "reason": "import numpy", "num_tokens": 5718}
|
import torch
from sklearn.metrics import confusion_matrix
from sklearn.metrics import f1_score
from sklearn.metrics import recall_score
import numpy as np
def torch_to_numpy(y):
if torch.cuda.is_available():
return y.detach().cpu().numpy()
return y.detach().numpy()
def cont_to_binary(y):
return [1 if x >= 0.5 else 0 for x in y]
def recall(y_hat, y):
y = torch_to_numpy(y)
y_hat = cont_to_binary(torch_to_numpy(y_hat))
return recall_score(y, y_hat)
def f1(y_hat, y):
y = torch_to_numpy(y)
y_hat = cont_to_binary(torch_to_numpy(y_hat))
return f1_score(y, y_hat)
def accuracy(y_hat, y):
final_y_hat = []
if torch.cuda.is_available():
y_hat = y_hat.detach().cpu().numpy()
y = y.detach().cpu().numpy()
else:
y_hat = y_hat.detach().numpy()
y = y.detach().numpy()
final_y_hat += [1 if x > 0.5 else 0 for x in y_hat]
return (sum(1 for a, b in zip(final_y_hat, y) if a == b) / float(len(final_y_hat)))*100
def cm(y_hat, y):
final_y_hat = []
final_y = []
if torch.cuda.is_available():
y_hat = y_hat.detach().cpu().numpy()
y = y.detach().cpu().numpy()
else:
y_hat = y_hat.detach().numpy()
y = y.detach().numpy()
final_y_hat += [1 if x > 0.5 else 0 for x in y_hat]
final_y += [1 if x > 0.5 else 0 for x in y]
tn, fp, fn, tp = confusion_matrix(final_y, final_y_hat).ravel()
# False Positive, False, negative, True positive, true negative
return [tp, tn, fp, fn]
def average_array(data):
if data == []: return 0
return sum(data)/len(data)
def average_arrays(data):
return np.average(data, axis=0)
|
{"hexsha": "d9f05f217bd5db29ba806d6fc952d8d43e6fe958", "size": 1550, "ext": "py", "lang": "Python", "max_stars_repo_path": "calc.py", "max_stars_repo_name": "josharnoldjosh/visdom-plot", "max_stars_repo_head_hexsha": "fde386a4c0dc6b842de4eb59487866b0ebf46706", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-07-15T15:44:04.000Z", "max_stars_repo_stars_event_max_datetime": "2019-07-15T15:44:04.000Z", "max_issues_repo_path": "calc.py", "max_issues_repo_name": "josharnoldjosh/visdom-plot", "max_issues_repo_head_hexsha": "fde386a4c0dc6b842de4eb59487866b0ebf46706", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "calc.py", "max_forks_repo_name": "josharnoldjosh/visdom-plot", "max_forks_repo_head_hexsha": "fde386a4c0dc6b842de4eb59487866b0ebf46706", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.6785714286, "max_line_length": 88, "alphanum_fraction": 0.6780645161, "include": true, "reason": "import numpy", "num_tokens": 460}
|
# Based on https://colab.research.google.com/github/reiinakano/neural-painters/blob/master/notebooks/generate_stroke_examples.ipynb
from lib import surface, tiledsurface, brush
import torch
import numpy as np
from PIL import Image
def point_on_curve_1(t, cx, cy, sx, sy, x1, y1, x2, y2):
ratio = t / 100.0
x3, y3 = multiply_add(sx, sy, x1, y1, ratio)
x4, y4 = multiply_add(cx, cy, x2, y2, ratio)
x5, y5 = difference(x3, y3, x4, y4)
x, y = multiply_add(x3, y3, x5, y5, ratio)
return x, y
def length_and_normal(x1, y1, x2, y2):
x, y = difference(x1, y1, x2, y2)
length = np.sqrt(x * x + y * y)
if length == 0.0:
x, y = 0.0, 0.0
else:
x, y = x / length, y / length
return length, x, y
def multiply_add(x1, y1, x2, y2, d):
x3, y3 = multiply(x2, y2, d)
x, y = add(x1, y1, x3, y3)
return x, y
def multiply(x, y, d):
# Multiply vector
x = x * d
y = y * d
return x, y
def add(x1, y1, x2, y2):
# Add vectors
x = x1 + x2
y = y1 + y2
return x, y
def difference(x1, y1, x2, y2):
# Difference in x and y between two points
x = x2 - x1
y = y2 - y1
return x, y
def midpoint(x1, y1, x2, y2):
# Midpoint between 2 points
x = (x1 + x2) / 2.0
y = (y1 + y2) / 2.0
return x, y
class MyPaintImagesDataLoader:
def __init__(self, H=32, W=32):
self.rng = np.random.default_rng(42)
self.head = 0.25
self.tail = 0.75
self.surface = tiledsurface.Surface()
with open("gan_stroke_generator/brushes/classic/dry_brush.myb") as brush_file:
self.brush_info = brush.BrushInfo(brush_file.read())
self.brush = brush.Brush(self.brush_info)
self.H = H
self.W = W
self.num_action = 9
self.num_images = int(10e9)
def _stroke_to(self, x, y, pressure):
duration = 0.1
self.brush.stroke_to(
self.surface.backend, x, y, pressure, 0.0, 0.0, duration, 0.0, 0.0, 0.0
)
self.surface.end_atomic()
self.surface.begin_atomic()
def _line_settings(self, entry_pressure, pressure):
p2 = (entry_pressure + pressure) / 2
prange1 = p2 - entry_pressure
prange2 = pressure - p2
return p2, prange1, prange2
def curve(
self, control_x, control_y, start_x, start_y, ex, ey, entry_pressure, pressure
):
(
midpoint_p,
prange1,
prange2,
) = self._line_settings(entry_pressure, pressure)
points_in_curve = 100
mx, my = midpoint(start_x, start_y, ex, ey)
length, nx, ny = length_and_normal(mx, my, control_x, control_y)
cx, cy = multiply_add(mx, my, nx, ny, length * 2)
x1, y1 = difference(start_x, start_y, cx, cy)
x2, y2 = difference(cx, cy, ex, ey)
head = points_in_curve * self.head
head_range = int(head) + 1
tail = points_in_curve * self.tail
tail_range = int(tail) + 1
tail_length = points_in_curve - tail
# Beginning
px, py = point_on_curve_1(1, cx, cy, start_x, start_y, x1, y1, x2, y2)
length, nx, ny = length_and_normal(start_x, start_y, px, py)
bx, by = multiply_add(start_x, start_y, nx, ny, 0.25)
self._stroke_to(bx, by, entry_pressure)
pressure = abs(1 / head * prange1 + entry_pressure)
self._stroke_to(px, py, pressure)
for i in range(2, head_range):
px, py = point_on_curve_1(i, cx, cy, start_x, start_y, x1, y1, x2, y2)
pressure = abs(i / head * prange1 + entry_pressure)
self._stroke_to(px, py, pressure)
# Middle
for i in range(head_range, tail_range):
px, py = point_on_curve_1(i, cx, cy, start_x, start_y, x1, y1, x2, y2)
self._stroke_to(px, py, midpoint_p)
# End
for i in range(tail_range, points_in_curve + 1):
px, py = point_on_curve_1(i, cx, cy, start_x, start_y, x1, y1, x2, y2)
pressure = abs((i - tail) / tail_length * prange2 + midpoint_p)
self._stroke_to(px, py, pressure)
return pressure
def draw_stroke(
self,
start_x,
start_y,
end_x,
end_y,
control_x,
control_y,
entry_pressure,
pressure,
size,
color_rgb,
):
start_x = start_x * self.H
start_y = start_y * self.W
end_x = end_x * self.H
end_y = end_y * self.W
control_x = control_x * self.H
control_y = control_y * self.W
self.brush.brushinfo.set_color_rgb(color_rgb)
self.brush.brushinfo.set_base_value("radius_logarithmic", size)
# Move brush to starting point without leaving it on the canvas.
self._stroke_to(start_x, start_y, 0)
self.curve(
control_x,
control_y,
start_x,
start_y,
end_x,
end_y,
entry_pressure,
pressure,
)
# Relieve brush pressure for next jump
self._stroke_to(end_x, end_y, 0)
self.surface.end_atomic()
self.surface.begin_atomic()
def get_mypaint_image(
self,
start_x,
start_y,
end_x,
end_y,
control_x,
control_y,
entry_pressure,
pressure,
size,
color_rgb,
):
self.draw_stroke(
start_x,
start_y,
end_x,
end_y,
control_x,
control_y,
entry_pressure,
pressure,
size,
color_rgb,
)
rect = [0, 0, self.H, self.W]
scanline_strips = surface.scanline_strips_iter(self.surface, rect, single_tile_pattern=True)
img = next(scanline_strips)
self.surface.clear()
self.surface.end_atomic()
self.surface.begin_atomic()
return img
def random_action(self):
return self.rng.uniform(size=[self.num_action])
def __len__(self):
return self.num_images
def __iter__(self):
for _ in range(self.num_images):
action = self.random_action()
img = self.get_mypaint_image(
start_x=action[0],
start_y=action[1],
end_x=action[2],
end_y=action[3],
control_x=action[4],
control_y=action[5],
pressure=action[6],
entry_pressure=action[7],
size=action[8],
color_rgb=[1, 1, 1],
)
img = Image.fromarray(img).convert('L')
# We need to create batch of size 1
img = np.expand_dims(img, axis=0)
# We need to create a channel for img
img = np.expand_dims(img, axis=0)
action = np.expand_dims(action, axis=0)
yield {
"stroke": torch.from_numpy(img.astype(float) / 255.0),
"action": torch.from_numpy(action),
}
|
{"hexsha": "0a6eb0aa97bb599028187806421d967b0b35d7f0", "size": 7085, "ext": "py", "lang": "Python", "max_stars_repo_path": "gan_stroke_generator/mypaint_images_data_loader.py", "max_stars_repo_name": "mxpoliakov/PaintTransformerGAN", "max_stars_repo_head_hexsha": "be845607ad1d839319ab9d11b9c6de3f7c11ded0", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "gan_stroke_generator/mypaint_images_data_loader.py", "max_issues_repo_name": "mxpoliakov/PaintTransformerGAN", "max_issues_repo_head_hexsha": "be845607ad1d839319ab9d11b9c6de3f7c11ded0", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "gan_stroke_generator/mypaint_images_data_loader.py", "max_forks_repo_name": "mxpoliakov/PaintTransformerGAN", "max_forks_repo_head_hexsha": "be845607ad1d839319ab9d11b9c6de3f7c11ded0", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.2768595041, "max_line_length": 131, "alphanum_fraction": 0.5525758645, "include": true, "reason": "import numpy", "num_tokens": 2014}
|
# -*- coding: utf-8 -*-
# File generated according to Generator/ClassesRef/Simulation/OP.csv
# WARNING! All changes made in this file will be lost!
"""Method code available at https://github.com/Eomys/pyleecan/tree/master/pyleecan/Methods/Simulation/OP
"""
from os import linesep
from sys import getsizeof
from logging import getLogger
from ._check import check_var, raise_
from ..Functions.get_logger import get_logger
from ..Functions.save import save
from ..Functions.load import load_init_dict
from ..Functions.Load.import_class import import_class
from copy import deepcopy
from ._frozen import FrozenClass
# Import all class method
# Try/catch to remove unnecessary dependencies in unused method
try:
from ..Methods.Simulation.OP.get_machine_from_parent import get_machine_from_parent
except ImportError as error:
get_machine_from_parent = error
from numpy import isnan
from ._check import InitUnKnowClassError
class OP(FrozenClass):
"""Define the Operating Point of the simulation"""
VERSION = 1
# cf Methods.Simulation.OP.get_machine_from_parent
if isinstance(get_machine_from_parent, ImportError):
get_machine_from_parent = property(
fget=lambda x: raise_(
ImportError(
"Can't use OP method get_machine_from_parent: "
+ str(get_machine_from_parent)
)
)
)
else:
get_machine_from_parent = get_machine_from_parent
# generic save method is available in all object
save = save
# get_logger method is available in all object
get_logger = get_logger
def __init__(
self,
N0=None,
felec=None,
Tem_av_ref=None,
Pem_av_ref=None,
Pem_av_in=None,
efficiency=None,
init_dict=None,
init_str=None,
):
"""Constructor of the class. Can be use in three ways :
- __init__ (arg1 = 1, arg3 = 5) every parameters have name and default values
for pyleecan type, -1 will call the default constructor
- __init__ (init_dict = d) d must be a dictionary with property names as keys
- __init__ (init_str = s) s must be a string
s is the file path to load
ndarray or list can be given for Vector and Matrix
object or dict can be given for pyleecan Object"""
if init_str is not None: # Load from a file
init_dict = load_init_dict(init_str)[1]
if init_dict is not None: # Initialisation by dict
assert type(init_dict) is dict
# Overwrite default value with init_dict content
if "N0" in list(init_dict.keys()):
N0 = init_dict["N0"]
if "felec" in list(init_dict.keys()):
felec = init_dict["felec"]
if "Tem_av_ref" in list(init_dict.keys()):
Tem_av_ref = init_dict["Tem_av_ref"]
if "Pem_av_ref" in list(init_dict.keys()):
Pem_av_ref = init_dict["Pem_av_ref"]
if "Pem_av_in" in list(init_dict.keys()):
Pem_av_in = init_dict["Pem_av_in"]
if "efficiency" in list(init_dict.keys()):
efficiency = init_dict["efficiency"]
# Set the properties (value check and convertion are done in setter)
self.parent = None
self.N0 = N0
self.felec = felec
self.Tem_av_ref = Tem_av_ref
self.Pem_av_ref = Pem_av_ref
self.Pem_av_in = Pem_av_in
self.efficiency = efficiency
# The class is frozen, for now it's impossible to add new properties
self._freeze()
def __str__(self):
"""Convert this object in a readeable string (for print)"""
OP_str = ""
if self.parent is None:
OP_str += "parent = None " + linesep
else:
OP_str += "parent = " + str(type(self.parent)) + " object" + linesep
OP_str += "N0 = " + str(self.N0) + linesep
OP_str += "felec = " + str(self.felec) + linesep
OP_str += "Tem_av_ref = " + str(self.Tem_av_ref) + linesep
OP_str += "Pem_av_ref = " + str(self.Pem_av_ref) + linesep
OP_str += "Pem_av_in = " + str(self.Pem_av_in) + linesep
OP_str += "efficiency = " + str(self.efficiency) + linesep
return OP_str
def __eq__(self, other):
"""Compare two objects (skip parent)"""
if type(other) != type(self):
return False
if other.N0 != self.N0:
return False
if other.felec != self.felec:
return False
if other.Tem_av_ref != self.Tem_av_ref:
return False
if other.Pem_av_ref != self.Pem_av_ref:
return False
if other.Pem_av_in != self.Pem_av_in:
return False
if other.efficiency != self.efficiency:
return False
return True
def compare(self, other, name="self", ignore_list=None, is_add_value=False):
"""Compare two objects and return list of differences"""
if ignore_list is None:
ignore_list = list()
if type(other) != type(self):
return ["type(" + name + ")"]
diff_list = list()
if (
other._N0 is not None
and self._N0 is not None
and isnan(other._N0)
and isnan(self._N0)
):
pass
elif other._N0 != self._N0:
if is_add_value:
val_str = " (self=" + str(self._N0) + ", other=" + str(other._N0) + ")"
diff_list.append(name + ".N0" + val_str)
else:
diff_list.append(name + ".N0")
if (
other._felec is not None
and self._felec is not None
and isnan(other._felec)
and isnan(self._felec)
):
pass
elif other._felec != self._felec:
if is_add_value:
val_str = (
" (self=" + str(self._felec) + ", other=" + str(other._felec) + ")"
)
diff_list.append(name + ".felec" + val_str)
else:
diff_list.append(name + ".felec")
if (
other._Tem_av_ref is not None
and self._Tem_av_ref is not None
and isnan(other._Tem_av_ref)
and isnan(self._Tem_av_ref)
):
pass
elif other._Tem_av_ref != self._Tem_av_ref:
if is_add_value:
val_str = (
" (self="
+ str(self._Tem_av_ref)
+ ", other="
+ str(other._Tem_av_ref)
+ ")"
)
diff_list.append(name + ".Tem_av_ref" + val_str)
else:
diff_list.append(name + ".Tem_av_ref")
if (
other._Pem_av_ref is not None
and self._Pem_av_ref is not None
and isnan(other._Pem_av_ref)
and isnan(self._Pem_av_ref)
):
pass
elif other._Pem_av_ref != self._Pem_av_ref:
if is_add_value:
val_str = (
" (self="
+ str(self._Pem_av_ref)
+ ", other="
+ str(other._Pem_av_ref)
+ ")"
)
diff_list.append(name + ".Pem_av_ref" + val_str)
else:
diff_list.append(name + ".Pem_av_ref")
if (
other._Pem_av_in is not None
and self._Pem_av_in is not None
and isnan(other._Pem_av_in)
and isnan(self._Pem_av_in)
):
pass
elif other._Pem_av_in != self._Pem_av_in:
if is_add_value:
val_str = (
" (self="
+ str(self._Pem_av_in)
+ ", other="
+ str(other._Pem_av_in)
+ ")"
)
diff_list.append(name + ".Pem_av_in" + val_str)
else:
diff_list.append(name + ".Pem_av_in")
if (
other._efficiency is not None
and self._efficiency is not None
and isnan(other._efficiency)
and isnan(self._efficiency)
):
pass
elif other._efficiency != self._efficiency:
if is_add_value:
val_str = (
" (self="
+ str(self._efficiency)
+ ", other="
+ str(other._efficiency)
+ ")"
)
diff_list.append(name + ".efficiency" + val_str)
else:
diff_list.append(name + ".efficiency")
# Filter ignore differences
diff_list = list(filter(lambda x: x not in ignore_list, diff_list))
return diff_list
def __sizeof__(self):
"""Return the size in memory of the object (including all subobject)"""
S = 0 # Full size of the object
S += getsizeof(self.N0)
S += getsizeof(self.felec)
S += getsizeof(self.Tem_av_ref)
S += getsizeof(self.Pem_av_ref)
S += getsizeof(self.Pem_av_in)
S += getsizeof(self.efficiency)
return S
def as_dict(self, type_handle_ndarray=0, keep_function=False, **kwargs):
"""
Convert this object in a json serializable dict (can be use in __init__).
type_handle_ndarray: int
How to handle ndarray (0: tolist, 1: copy, 2: nothing)
keep_function : bool
True to keep the function object, else return str
Optional keyword input parameter is for internal use only
and may prevent json serializability.
"""
OP_dict = dict()
OP_dict["N0"] = self.N0
OP_dict["felec"] = self.felec
OP_dict["Tem_av_ref"] = self.Tem_av_ref
OP_dict["Pem_av_ref"] = self.Pem_av_ref
OP_dict["Pem_av_in"] = self.Pem_av_in
OP_dict["efficiency"] = self.efficiency
# The class name is added to the dict for deserialisation purpose
OP_dict["__class__"] = "OP"
return OP_dict
def copy(self):
"""Creates a deepcopy of the object"""
# Handle deepcopy of all the properties
N0_val = self.N0
felec_val = self.felec
Tem_av_ref_val = self.Tem_av_ref
Pem_av_ref_val = self.Pem_av_ref
Pem_av_in_val = self.Pem_av_in
efficiency_val = self.efficiency
# Creates new object of the same type with the copied properties
obj_copy = type(self)(
N0=N0_val,
felec=felec_val,
Tem_av_ref=Tem_av_ref_val,
Pem_av_ref=Pem_av_ref_val,
Pem_av_in=Pem_av_in_val,
efficiency=efficiency_val,
)
return obj_copy
def _set_None(self):
"""Set all the properties to None (except pyleecan object)"""
self.N0 = None
self.felec = None
self.Tem_av_ref = None
self.Pem_av_ref = None
self.Pem_av_in = None
self.efficiency = None
def _get_N0(self):
"""getter of N0"""
return self._N0
def _set_N0(self, value):
"""setter of N0"""
check_var("N0", value, "float")
self._N0 = value
N0 = property(
fget=_get_N0,
fset=_set_N0,
doc=u"""Rotor speed
:Type: float
""",
)
def _get_felec(self):
"""getter of felec"""
return self._felec
def _set_felec(self, value):
"""setter of felec"""
check_var("felec", value, "float")
self._felec = value
felec = property(
fget=_get_felec,
fset=_set_felec,
doc=u"""Electrical Frequency
:Type: float
""",
)
def _get_Tem_av_ref(self):
"""getter of Tem_av_ref"""
return self._Tem_av_ref
def _set_Tem_av_ref(self, value):
"""setter of Tem_av_ref"""
check_var("Tem_av_ref", value, "float")
self._Tem_av_ref = value
Tem_av_ref = property(
fget=_get_Tem_av_ref,
fset=_set_Tem_av_ref,
doc=u"""Output average electromagnetic torque
:Type: float
""",
)
def _get_Pem_av_ref(self):
"""getter of Pem_av_ref"""
return self._Pem_av_ref
def _set_Pem_av_ref(self, value):
"""setter of Pem_av_ref"""
check_var("Pem_av_ref", value, "float")
self._Pem_av_ref = value
Pem_av_ref = property(
fget=_get_Pem_av_ref,
fset=_set_Pem_av_ref,
doc=u"""Output average Electromagnetic Power
:Type: float
""",
)
def _get_Pem_av_in(self):
"""getter of Pem_av_in"""
return self._Pem_av_in
def _set_Pem_av_in(self, value):
"""setter of Pem_av_in"""
check_var("Pem_av_in", value, "float")
self._Pem_av_in = value
Pem_av_in = property(
fget=_get_Pem_av_in,
fset=_set_Pem_av_in,
doc=u"""Input average power (e.g. for generator mode)
:Type: float
""",
)
def _get_efficiency(self):
"""getter of efficiency"""
return self._efficiency
def _set_efficiency(self, value):
"""setter of efficiency"""
check_var("efficiency", value, "float")
self._efficiency = value
efficiency = property(
fget=_get_efficiency,
fset=_set_efficiency,
doc=u"""Efficiency
:Type: float
""",
)
|
{"hexsha": "7b25b9f195638c9ec6fda5e889bbd21e50a01ac7", "size": 14008, "ext": "py", "lang": "Python", "max_stars_repo_path": "pyleecan/Classes/OP.py", "max_stars_repo_name": "Eomys/Pyleecan", "max_stars_repo_head_hexsha": "4d7f0cbabf0311006963e7a2f435db2ecd901118", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2017-11-27T10:14:34.000Z", "max_stars_repo_stars_event_max_datetime": "2018-09-20T11:30:32.000Z", "max_issues_repo_path": "pyleecan/Classes/OP.py", "max_issues_repo_name": "Eomys/Pyleecan", "max_issues_repo_head_hexsha": "4d7f0cbabf0311006963e7a2f435db2ecd901118", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pyleecan/Classes/OP.py", "max_forks_repo_name": "Eomys/Pyleecan", "max_forks_repo_head_hexsha": "4d7f0cbabf0311006963e7a2f435db2ecd901118", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.96, "max_line_length": 105, "alphanum_fraction": 0.5379069103, "include": true, "reason": "from numpy", "num_tokens": 3345}
|
import random
from itertools import product
from collections import namedtuple
import numpy as np
import tensorflow as tf
from neupy import layers
from neupy.utils import asfloat, shape_to_tuple
from neupy.layers.convolutions import conv_output_shape, deconv_output_shape
from neupy.exceptions import LayerConnectionError
from base import BaseTestCase
class ConvLayersTestCase(BaseTestCase):
def get_shape(self, value):
shape = self.eval(tf.shape(value))
return tuple(shape)
def test_convolution_params(self):
inp = layers.Input((5, 5, 1))
conv = layers.Convolution((2, 2, 6))
# Propagate data through the network in
# order to trigger initialization
(inp >> conv).outputs
self.assertEqual((2, 2, 1, 6), self.get_shape(conv.weight))
self.assertEqual((6,), self.get_shape(conv.bias))
def test_conv_shapes(self):
paddings = ['valid', 'same']
strides = [(1, 1), (2, 1), (2, 2)]
x = asfloat(np.random.random((20, 12, 11, 2)))
for stride, padding in product(strides, paddings):
network = layers.join(
layers.Input((12, 11, 2)),
layers.Convolution((3, 4, 5), padding=padding, stride=stride),
)
y = self.eval(network.output(x))
self.assertShapesEqual(
y.shape[1:],
network.output_shape[1:],
msg='padding={} and stride={}'.format(padding, stride),
)
def test_valid_strides(self):
Case = namedtuple("Case", "stride expected_output")
testcases = (
Case(stride=(4, 4), expected_output=(4, 4)),
Case(stride=(4,), expected_output=(4, 1)),
Case(stride=4, expected_output=(4, 4)),
)
for testcase in testcases:
conv = layers.Convolution(
(2, 3, 1), stride=testcase.stride)
msg = "Input stride size: {}".format(testcase.stride)
self.assertEqual(
testcase.expected_output, conv.stride, msg=msg)
def test_conv_invalid_strides(self):
invalid_strides = (
(4, 4, 4),
-10,
(-5, -5),
(-5, 5),
(-5, 0),
)
for stride in invalid_strides:
msg = "Input stride size: {}".format(stride)
with self.assertRaises(ValueError, msg=msg):
layers.Convolution((2, 3, 1), stride=stride)
def test_valid_padding(self):
valid_paddings = ('VALID', 'SAME', 'same', 'valid', 10, 1, (7, 1))
for padding in valid_paddings:
layers.Convolution((2, 3, 1), padding=padding)
def test_invalid_padding(self):
invalid_paddings = ('invalid mode', -10, (10, -5))
for padding in invalid_paddings:
msg = "Padding: {}".format(padding)
with self.assertRaises(ValueError, msg=msg):
layers.Convolution((2, 3, 1), padding=padding)
def test_conv_output_shape_func_exceptions(self):
with self.assertRaises(ValueError):
# Wrong stride value
conv_output_shape(
dimension_size=5, filter_size=5,
padding='VALID', stride='not int')
with self.assertRaises(ValueError):
# Wrong filter size value
conv_output_shape(
dimension_size=5, filter_size='not int',
padding='SAME', stride=5)
with self.assertRaisesRegexp(ValueError, "unknown \S+ padding value"):
# Wrong padding value
conv_output_shape(
dimension_size=5, filter_size=5,
padding=1.5, stride=5,
)
def test_conv_output_shape_int_padding(self):
output_shape = conv_output_shape(
dimension_size=10,
padding=3,
filter_size=5,
stride=5,
)
self.assertEqual(output_shape, 3)
def test_conv_unknown_dim_size(self):
shape = conv_output_shape(
dimension_size=None, filter_size=5,
padding='VALID', stride=5,
)
self.assertEqual(shape, None)
def test_conv_invalid_padding_exception(self):
error_msg = "greater or equal to zero"
with self.assertRaisesRegexp(ValueError, error_msg):
layers.Convolution((1, 3, 3), padding=-1)
error_msg = "Tuple .+ greater or equal to zero"
with self.assertRaisesRegexp(ValueError, error_msg):
layers.Convolution((1, 3, 3), padding=(2, -1))
with self.assertRaisesRegexp(ValueError, "invalid string value"):
layers.Convolution((1, 3, 3), padding='NOT_SAME')
with self.assertRaisesRegexp(ValueError, "contains two elements"):
layers.Convolution((1, 3, 3), padding=(3, 3, 3))
def test_conv_invalid_input_shape(self):
with self.assertRaises(LayerConnectionError):
layers.join(
layers.Input(10),
layers.Convolution((1, 3, 3)),
)
def test_conv_with_custom_int_padding(self):
network = layers.join(
layers.Input((5, 5, 1)),
layers.Convolution((3, 3, 1), bias=0, weight=1, padding=2),
)
x = asfloat(np.ones((1, 5, 5, 1)))
expected_output = np.array([
[1, 2, 3, 3, 3, 2, 1],
[2, 4, 6, 6, 6, 4, 2],
[3, 6, 9, 9, 9, 6, 3],
[3, 6, 9, 9, 9, 6, 3],
[3, 6, 9, 9, 9, 6, 3],
[2, 4, 6, 6, 6, 4, 2],
[1, 2, 3, 3, 3, 2, 1],
]).reshape((1, 7, 7, 1))
actual_output = self.eval(network.output(x))
np.testing.assert_array_almost_equal(expected_output, actual_output)
def test_conv_with_custom_tuple_padding(self):
inp = layers.Input((5, 5, 1))
conv = layers.Convolution((3, 3, 1), bias=0, weight=1, padding=(0, 2))
network = (inp >> conv)
network.outputs
x = asfloat(np.ones((1, 5, 5, 1)))
expected_output = np.array([
[3, 6, 9, 9, 9, 6, 3],
[3, 6, 9, 9, 9, 6, 3],
[3, 6, 9, 9, 9, 6, 3],
]).reshape((1, 3, 7, 1))
actual_output = self.eval(network.output(x))
np.testing.assert_array_almost_equal(expected_output, actual_output)
self.assertShapesEqual(network.output_shape, (None, 3, 7, 1))
def test_conv_without_bias(self):
inp = layers.Input((5, 5, 1))
conv = layers.Convolution((3, 3, 1), bias=None, weight=1)
network = inp >> conv
network.outputs
x = asfloat(np.ones((1, 5, 5, 1)))
expected_output = 9 * np.ones((1, 3, 3, 1))
actual_output = self.eval(network.output(x))
np.testing.assert_array_almost_equal(expected_output, actual_output)
def test_conv_unknown_input_width_and_height(self):
network = layers.join(
layers.Input((None, None, 3)),
layers.Convolution((3, 3, 5)),
)
self.assertShapesEqual(network.output_shape, (None, None, None, 5))
input_value = asfloat(np.ones((1, 12, 12, 3)))
actual_output = self.eval(network.output(input_value))
self.assertEqual(actual_output.shape, (1, 10, 10, 5))
input_value = asfloat(np.ones((1, 21, 21, 3)))
actual_output = self.eval(network.output(input_value))
self.assertEqual(actual_output.shape, (1, 19, 19, 5))
def test_dilated_convolution(self):
network = layers.join(
layers.Input((6, 6, 1)),
layers.Convolution((3, 3, 1), dilation=2, weight=1, bias=None),
)
input_value = asfloat(np.arange(36).reshape(1, 6, 6, 1))
actual_output = self.eval(network.output(input_value))
self.assertShapesEqual(actual_output.shape, (1, 2, 2, 1))
self.assertShapesEqual(
actual_output.shape[1:],
network.output_shape[1:])
actual_output = actual_output[0, :, :, 0]
expected_output = np.array([
[126, 135], # every row value adds +1 per filter value (+9)
[180, 189], # every col value adds +6 per filter value (+54)
])
np.testing.assert_array_almost_equal(actual_output, expected_output)
def test_convolution_repr(self):
layer = layers.Convolution((3, 3, 10), name='conv')
self.assertEqual(
str(layer),
(
"Convolution((3, 3, 10), padding='VALID', stride=(1, 1), "
"dilation=(1, 1), weight=HeNormal(gain=2), bias=Constant(0), "
"name='conv')"
)
)
def test_conv_output_shape_when_input_unknown(self):
block = layers.join(
layers.Convolution((3, 3, 32)),
layers.Relu(),
layers.BatchNorm(),
)
self.assertShapesEqual(block.input_shape, None)
self.assertShapesEqual(block.output_shape, (None, None, None, 32))
class DeconvolutionTestCase(BaseTestCase):
def test_deconvolution(self):
network = layers.join(
layers.Input((10, 10, 3)),
layers.Convolution((3, 3, 7)),
layers.Deconvolution((3, 3, 4)),
)
shapes = network.output_shapes_per_layer
shapes = {l: shape_to_tuple(s) for l, s in shapes.items()}
self.assertDictEqual(
shapes, {
network.layers[0]: (None, 10, 10, 3),
network.layers[1]: (None, 8, 8, 7),
network.layers[2]: (None, 10, 10, 4),
}
)
input_value = asfloat(np.random.random((1, 10, 10, 3)))
actual_output = self.eval(network.output(input_value))
self.assertEqual(actual_output.shape, (1, 10, 10, 4))
def test_deconvolution_same_padding(self):
network = layers.join(
layers.Input((10, 10, 3)),
layers.Convolution((3, 3, 7), padding='same'),
layers.Deconvolution((3, 3, 4), padding='same'),
)
shapes = network.output_shapes_per_layer
shapes = {l: shape_to_tuple(s) for l, s in shapes.items()}
self.assertDictEqual(
shapes, {
network.layers[0]: (None, 10, 10, 3),
network.layers[1]: (None, 10, 10, 7),
network.layers[2]: (None, 10, 10, 4),
}
)
input_value = asfloat(np.random.random((1, 10, 10, 3)))
actual_output = self.eval(network.output(input_value))
self.assertEqual(actual_output.shape, (1, 10, 10, 4))
def test_deconvolution_int_padding(self):
network = layers.join(
layers.Input((10, 10, 3)),
layers.Convolution((3, 3, 7), padding=9),
layers.Deconvolution((3, 3, 4), padding=9),
)
shapes = network.output_shapes_per_layer
shapes = {l: shape_to_tuple(s) for l, s in shapes.items()}
self.assertDictEqual(
shapes, {
network.layers[0]: (None, 10, 10, 3),
network.layers[1]: (None, 26, 26, 7),
network.layers[2]: (None, 10, 10, 4),
}
)
input_value = asfloat(np.random.random((1, 10, 10, 3)))
actual_output = self.eval(network.output(input_value))
self.assertEqual(actual_output.shape, (1, 10, 10, 4))
def test_deconvolution_tuple_padding(self):
network = layers.join(
layers.Input((10, 10, 3)),
layers.Convolution((3, 3, 7), padding=(9, 3)),
layers.Deconvolution((3, 3, 4), padding=(9, 3)),
)
shapes = network.output_shapes_per_layer
shapes = {l: shape_to_tuple(s) for l, s in shapes.items()}
self.assertSequenceEqual(
shapes, {
network.layers[0]: (None, 10, 10, 3),
network.layers[1]: (None, 26, 14, 7),
network.layers[2]: (None, 10, 10, 4),
}
)
input_value = asfloat(np.random.random((1, 10, 10, 3)))
actual_output = self.eval(network.output(input_value))
self.assertEqual(actual_output.shape, (1, 10, 10, 4))
def test_deconv_unknown_input_width_and_height(self):
network = layers.join(
layers.Input((None, None, 3)),
layers.Convolution((3, 3, 7)),
layers.Deconvolution((3, 3, 4)),
)
shapes = network.output_shapes_per_layer
shapes = {l: shape_to_tuple(s) for l, s in shapes.items()}
self.assertDictEqual(
shapes, {
network.layers[0]: (None, None, None, 3),
network.layers[1]: (None, None, None, 7),
network.layers[2]: (None, None, None, 4),
}
)
input_value = asfloat(np.random.random((1, 10, 10, 3)))
actual_output = self.eval(network.output(input_value))
self.assertEqual(actual_output.shape, (1, 10, 10, 4))
input_value = asfloat(np.random.random((1, 7, 7, 3)))
actual_output = self.eval(network.output(input_value))
self.assertEqual(actual_output.shape, (1, 7, 7, 4))
def test_deconv_output_shape(self):
self.assertEqual(None, deconv_output_shape(None, 3, 'same', 1))
self.assertEqual(12, deconv_output_shape(10, 3, 'valid', 1))
self.assertEqual(16, deconv_output_shape(10, 7, 'valid', 1))
self.assertEqual(10, deconv_output_shape(10, 3, 'same', 1))
self.assertEqual(14, deconv_output_shape(4, 5, 'valid', 3))
self.assertEqual(12, deconv_output_shape(4, 3, 'same', 3))
self.assertEqual(12, deconv_output_shape(4, 7, 'same', 3))
def test_deconv_output_shape_exception(self):
with self.assertRaisesRegexp(ValueError, "unknown \S+ padding"):
deconv_output_shape(10, 3, padding='xxx', stride=1)
with self.assertRaisesRegexp(ValueError, "doesn't support dilation"):
deconv_output_shape(10, 3, padding='valid', stride=1, dilation=2)
def test_deconvolution_for_random_cases(self):
# A few random cases will check if output shape computed from
# the network is the same as the shape that we get after we
# propagated input through the network.
for test_id in range(30):
width = random.randint(7, 20)
height = random.randint(7, 20)
fh = random.randint(1, 7)
fw = random.randint(1, 7)
pad = random.choice([
'valid',
'same',
random.randint(0, 10),
(
random.randint(0, 10),
random.randint(0, 10),
),
])
stride = random.choice([
random.randint(1, 4),
(
random.randint(1, 4),
random.randint(1, 4),
),
])
print('\n------------')
print("Test case #{}".format(test_id))
print('------------')
print("Image shape: {}x{}".format(height, width))
print("Filter shape: {}x{}".format(fh, fw))
print("Padding: {}".format(pad))
print("Stride: {}".format(stride))
network = layers.join(
layers.Input((height, width, 1)),
layers.Convolution((fh, fw, 2), padding=pad, stride=stride),
layers.Deconvolution((fh, fw, 1), padding=pad, stride=stride),
)
input_value = asfloat(np.random.random((1, height, width, 1)))
actual_output = self.eval(network.output(input_value))
self.assertEqual(actual_output.shape[1:], network.output_shape[1:])
def test_deconvolution_repr(self):
layer = layers.Deconvolution((3, 3, 10), name='deconv')
self.assertEqual(
str(layer),
(
"Deconvolution((3, 3, 10), padding='VALID', stride=(1, 1), "
"weight=HeNormal(gain=2), bias=Constant(0), name='deconv')"
)
)
|
{"hexsha": "31228390ffd504c6e9eabf6f98d46fa2f607a7d2", "size": 16048, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/layers/test_convolution.py", "max_stars_repo_name": "FrostByte266/neupy", "max_stars_repo_head_hexsha": "4b7127e5e4178b0cce023ba36542f5ad3f1d798c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 801, "max_stars_repo_stars_event_min_datetime": "2015-09-23T09:24:47.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T19:19:03.000Z", "max_issues_repo_path": "tests/layers/test_convolution.py", "max_issues_repo_name": "FrostByte266/neupy", "max_issues_repo_head_hexsha": "4b7127e5e4178b0cce023ba36542f5ad3f1d798c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 277, "max_issues_repo_issues_event_min_datetime": "2015-09-22T19:48:50.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-11T23:25:32.000Z", "max_forks_repo_path": "tests/layers/test_convolution.py", "max_forks_repo_name": "FrostByte266/neupy", "max_forks_repo_head_hexsha": "4b7127e5e4178b0cce023ba36542f5ad3f1d798c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 194, "max_forks_repo_forks_event_min_datetime": "2015-09-23T15:03:57.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T13:54:46.000Z", "avg_line_length": 35.8214285714, "max_line_length": 79, "alphanum_fraction": 0.55639332, "include": true, "reason": "import numpy", "num_tokens": 4106}
|
function SphereGenerator()
return SphereGenerator(())
end
function ball_on_support(obj::SphereGenerator, arg0::List)
return jcall(obj, "ballOnSupport", EnclosingBall, (List,), arg0)
end
|
{"hexsha": "07139665f09eaa3b3283e1f18e79b8c05de75337", "size": 196, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "gen/HipparchusWrapper/GeometryWrapper/EuclideanWrapper/ThreedWrapper/sphere_generator.jl", "max_stars_repo_name": "JuliaAstrodynamics/Orekit.jl", "max_stars_repo_head_hexsha": "e2dd3d8b2085dcbb1d2c75471dab42d6ddf52c99", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2020-09-07T12:26:02.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-15T16:02:35.000Z", "max_issues_repo_path": "gen/HipparchusWrapper/GeometryWrapper/EuclideanWrapper/ThreedWrapper/sphere_generator.jl", "max_issues_repo_name": "JuliaSpace/Orekit.jl", "max_issues_repo_head_hexsha": "e2dd3d8b2085dcbb1d2c75471dab42d6ddf52c99", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2020-09-05T10:16:29.000Z", "max_issues_repo_issues_event_max_datetime": "2020-09-30T05:17:19.000Z", "max_forks_repo_path": "gen/HipparchusWrapper/GeometryWrapper/EuclideanWrapper/ThreedWrapper/sphere_generator.jl", "max_forks_repo_name": "JuliaSpace/Orekit.jl", "max_forks_repo_head_hexsha": "e2dd3d8b2085dcbb1d2c75471dab42d6ddf52c99", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.7777777778, "max_line_length": 68, "alphanum_fraction": 0.75, "num_tokens": 48}
|
from collections import deque
from importlib import reload
import ddpg_agents
from ddpg_agents import Agent
import torch
import matplotlib.pyplot as plt
from unityagents import UnityEnvironment
import numpy as np
import pandas as pd
import datetime
#env = UnityEnvironment(file_name='./Reacher_single/Reacher_Linux_NoVis/Reacher.x86_64')
env = UnityEnvironment(file_name='./Reacher_single/Reacher_Linux/Reacher.x86_64')
# get the default brain
brain_name = env.brain_names[0]
brain = env.brains[brain_name]
# reset the environment
env_info = env.reset(train_mode=True)[brain_name]
# number of agents
num_agents = len(env_info.agents)
print('Number of agents:', num_agents)
# size of each action
action_size = brain.vector_action_space_size
print('Size of each action:', action_size)
# examine the state space
states = env_info.vector_observations
state_size = states.shape[1]
print('There are {} agents. Each observes a state with length: {}'.format(states.shape[0], state_size))
print('The state for the first agent looks like:', states[0])
print('state size is {} action size is {}'.format(state_size,action_size))
def ddpg(n_episodes=1000, max_t=300, print_every=1, num_updates = 10):
agent = Agent(state_size=state_size, action_size=action_size, random_seed=2, num_updates = num_updates)
scores_deque = deque(maxlen=print_every)
scores = []
for i_episode in range(1, n_episodes+1):
env_info = env.reset(train_mode=True)[brain_name]
states = env_info.vector_observations
agent.reset()
score = np.zeros(num_agents)
for t in range(max_t):
if i_episode == 1 and t == 1: print('training started successfully')
actions = agent.act(states,add_noise=True)
# print('next action is {}'.format(action))
env_info = env.step(actions)[brain_name]
next_states = env_info.vector_observations
rewards = env_info.rewards
dones = env_info.local_done
if i_episode == 1 and t == 1: print('variables are rewards: {} actions: {}'.format(rewards,actions))
# print(done)
# next_state, reward, done, _ = env.step(action)
for state, action, reward, next_state, done in zip(states, actions, rewards, next_states, dones):
agent.step(state, action, reward, next_state, done , t)
states = next_states
# print('reward is {}'.format(rewards))
score += rewards
if t % 10 == 0:
print('episode {} action {}'.format(i_episode, t))
if np.any(done):
print('completed episode {} at t of {}'.format(i_episode,t))
# print(done)
break
scores_deque.append(np.mean(score))
scores.append(np.mean(score))
# print('\rEpisode {}\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_deque)), end="")
torch.save(agent.actor_local.state_dict(), 'checkpoint_actor.pth')
torch.save(agent.critic_local.state_dict(), 'checkpoint_critic.pth')
if i_episode % print_every == 0:
print('\rEpisode {}\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_deque)))
return scores
if __name__=='__main__':
for i_update in [8,7,6]:
scores = ddpg(300,1000,num_updates = i_update)
dt = datetime.datetime.now()
time_for_name = dt.strftime("%d_%H:%M")
df = pd.DataFrame({'scores': scores })
df.to_csv('results/training_result{}update{}.csv'.format(time_for_name,i_update))
fig = plt.figure()
ax = fig.add_subplot(111)
plt.plot(np.arange(1, len(scores)+1), scores)
plt.ylabel('Score')
plt.xlabel('Episode #')
plt.savefig('results/training_plot{}update{}.png'.format(time_for_name,i_update))
|
{"hexsha": "e9eaa0b6e7aa7a0421dfced4417e5dc77f210090", "size": 3824, "ext": "py", "lang": "Python", "max_stars_repo_path": "p2_continuous-control/run_single.py", "max_stars_repo_name": "thanakijwanavit/deep-reinforcement-learning", "max_stars_repo_head_hexsha": "af057d72c6262faa9bd8426082b1f70ea00c7b9c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "p2_continuous-control/run_single.py", "max_issues_repo_name": "thanakijwanavit/deep-reinforcement-learning", "max_issues_repo_head_hexsha": "af057d72c6262faa9bd8426082b1f70ea00c7b9c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 12, "max_issues_repo_issues_event_min_datetime": "2019-12-16T22:23:57.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-10T00:26:35.000Z", "max_forks_repo_path": "p2_continuous-control/run_single.py", "max_forks_repo_name": "thanakijwanavit/deep-reinforcement-learning", "max_forks_repo_head_hexsha": "af057d72c6262faa9bd8426082b1f70ea00c7b9c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.021978022, "max_line_length": 112, "alphanum_fraction": 0.6610878661, "include": true, "reason": "import numpy", "num_tokens": 900}
|
from napari_apr_viewer import napari_get_reader, napari_get_writer, napari_write_image
import pyapr
import numpy as np
import os
# tmp_path is a pytest fixture
def test_writer(tmp_path):
"""Test writer plugin."""
file_dir = os.path.dirname(os.path.abspath(__file__))
my_test_file = os.path.join(file_dir, 'files', 'spheres_tiny.apr')
reader = napari_get_reader(my_test_file)
layer_data_list = reader([my_test_file, my_test_file])
layer_data_tuple = layer_data_list[0]
# directory to write to
target_path = os.path.join(tmp_path, 'some_dir')
layer_types = [x[2] for x in layer_data_list]
# get writer
writer = napari_get_writer(target_path, layer_types)
assert callable(writer)
for i, x in enumerate(layer_data_list):
if 'name' not in x[1]:
x[1]['name'] = 'data{}'.format(i)
# write multiple
paths = writer(target_path, layer_data_list)
assert isinstance(paths, list)
assert len(paths) == len(layer_data_list) == 2
assert None not in paths
# check correctness
apr_gt = layer_data_tuple[0].apr
parts_gt = layer_data_tuple[0].parts
assert _read_and_compare(paths, apr_gt, parts_gt)
# write single
target_path = os.path.join(tmp_path, 'myfile.apr')
path = napari_write_image(target_path, layer_data_tuple)
assert path is not None
# check correctness
assert _read_and_compare(path, apr_gt, parts_gt)
def _read_and_compare(path, apr_gt: pyapr.APR, parts_gt: pyapr.ShortParticles):
if isinstance(path, str):
apr, parts = pyapr.io.read(path)
assert apr.total_number_particles() == apr_gt.total_number_particles() > 0
assert all([apr.org_dims(i) == apr_gt.org_dims(i) for i in range(3)])
assert len(parts) == len(parts_gt)
np.testing.assert_allclose(np.array(parts, copy=False), np.array(parts_gt, copy=False))
return True
elif isinstance(path, list):
return all([_read_and_compare(p, apr_gt, parts_gt) for p in path])
return False
|
{"hexsha": "74470c24d308ec96ff6152ae00d50a24cd38b797", "size": 2035, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/napari_apr_viewer/_tests/test_writer.py", "max_stars_repo_name": "AdaptiveParticles/napari-apr-viewer", "max_stars_repo_head_hexsha": "cc7089bc418dd5ff08f74dce7024920d387fd6c6", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2021-12-01T07:59:07.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-24T11:24:19.000Z", "max_issues_repo_path": "src/napari_apr_viewer/_tests/test_writer.py", "max_issues_repo_name": "AdaptiveParticles/napari-apr-viewer", "max_issues_repo_head_hexsha": "cc7089bc418dd5ff08f74dce7024920d387fd6c6", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2021-12-10T12:51:27.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-05T18:15:19.000Z", "max_forks_repo_path": "src/napari_apr_viewer/_tests/test_writer.py", "max_forks_repo_name": "AdaptiveParticles/napari-apr-viewer", "max_forks_repo_head_hexsha": "cc7089bc418dd5ff08f74dce7024920d387fd6c6", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.9166666667, "max_line_length": 95, "alphanum_fraction": 0.6987714988, "include": true, "reason": "import numpy", "num_tokens": 519}
|
from __future__ import absolute_import, division, print_function
import logging
import os
import json
import numpy as np
from collections import OrderedDict
import torch
from inference.models.vgg import VGGRatioEstimator
from inference.models.resnet import ResNetRatioEstimator
from inference.trainer import RatioTrainer
from inference.utils import create_missing_folders, load_and_check, get_optimizer
from inference.utils import get_loss, clean_log_r, clean_t
from inference.utils import restrict_samplesize
logger = logging.getLogger(__name__)
class ParameterizedRatioEstimator(object):
theta_mean = np.array([0.1, -2.0])
theta_std = np.array([0.1, 0.5])
def __init__(
self,
resolution=64,
n_parameters=2,
n_aux=0,
architecture="resnet",
log_input=False,
rescale_inputs=True,
rescale_theta=True,
zero_bias=False,
):
self.resolution = resolution
self.n_parameters = n_parameters
self.n_aux = n_aux
self.log_input = log_input
self.rescale_inputs = rescale_inputs
self.rescale_theta = rescale_theta
self.architecture = architecture
self.x_scaling_mean = None
self.x_scaling_std = None
self.aux_scaling_mean = None
self.aux_scaling_std = None
self._create_model(zero_bias)
def train(
self,
method,
x,
theta,
theta_alt,
aux=None,
log_r_xz=None,
log_r_xz_alt=None,
t_xz=None,
t_xz_alt=None,
alpha=1.0,
optimizer="adam",
n_epochs=50,
batch_size=256,
initial_lr=0.001,
final_lr=0.0001,
nesterov_momentum=None,
validation_split=0.25,
validation_split_seed=None,
early_stopping=True,
limit_samplesize=None,
verbose="some",
update_input_rescaling=True,
validation_loss_before=None,
):
logger.info("Starting training")
logger.info(" Method: %s", method)
if method in ["cascal", "rascal", "alices"]:
logger.info(" alpha: %s", alpha)
logger.info(" Batch size: %s", batch_size)
logger.info(" Optimizer: %s", optimizer)
logger.info(" Epochs: %s", n_epochs)
logger.info(
" Learning rate: %s initially, decaying to %s",
initial_lr,
final_lr,
)
if optimizer == "sgd":
logger.info(" Nesterov momentum: %s", nesterov_momentum)
logger.info(" Validation split: %s", validation_split)
logger.info(" Early stopping: %s", early_stopping)
if limit_samplesize is None:
logger.info(" Samples: all")
else:
logger.info(" Samples: %s", limit_samplesize)
logger.info(" Update x rescaling: %s", update_input_rescaling)
# Load training data
logger.info("Loading training data")
theta = load_and_check(theta, memmap=False)
theta_alt = load_and_check(theta_alt, memmap=False)
x = load_and_check(x, memmap=True)
log_r_xz = load_and_check(log_r_xz, memmap=False)
log_r_xz_alt = load_and_check(log_r_xz_alt, memmap=False)
t_xz = load_and_check(t_xz, memmap=False)
t_xz_alt = load_and_check(t_xz_alt, memmap=False)
aux = load_and_check(aux, memmap=False)
self._check_required_data(method, log_r_xz, log_r_xz_alt, t_xz, t_xz_alt)
if update_input_rescaling:
self._initialize_input_transform(x, aux)
# Clean up input data
if log_r_xz is not None:
log_r_xz = log_r_xz.reshape((-1, 1))
log_r_xz_alt = log_r_xz_alt.reshape((-1, 1))
theta = theta.reshape((-1, 2))
theta_alt = theta_alt.reshape((-1, 2))
log_r_xz = clean_log_r(log_r_xz)
log_r_xz_alt = clean_log_r(log_r_xz_alt)
t_xz = clean_t(t_xz)
t_xz_alt = clean_t(t_xz_alt)
# Rescale aux, theta, and t_xz
aux = self._transform_aux(aux)
theta = self._transform_theta(theta)
theta_alt = self._transform_theta(theta_alt)
if t_xz is not None:
t_xz = self._transform_t_xz(t_xz)
t_xz_alt = self._transform_t_xz(t_xz_alt)
# Infer dimensions of problem
n_samples = x.shape[0]
n_parameters = theta.shape[1]
resolution_x = x.shape[1]
resolution_y = x.shape[2]
n_aux = 0 if aux is None else aux.shape[1]
logger.info(
"Found %s samples with %s parameters, image resolution %s x %s, and %s auxiliary parameters",
n_samples,
n_parameters,
resolution_x,
resolution_y,
n_aux,
)
if resolution_x != resolution_y:
raise RuntimeError(
"Currently only supports square images, but found resolution {} x {}".format(
resolution_x, resolution_y
)
)
resolution = resolution_x
if n_aux != self.n_aux:
raise RuntimeError(
"Number of auxiliary variables found in data ({}) does not match number of"
"auxiliary variables in model ({})".format(n_aux, self.n_aux)
)
if aux is not None and aux.shape[0] != n_samples:
raise RuntimeError(
"Number of samples in auxiliary variables does not match number of"
"samples ({})".format(aux.shape[0], n_samples)
)
# Limit sample size
if limit_samplesize is not None and limit_samplesize < n_samples:
logger.info(
"Only using %s of %s training samples", limit_samplesize, n_samples
)
x, theta, theta_alt, log_r_xz, log_r_xz_alt, t_xz, t_xz_alt, aux = restrict_samplesize(
limit_samplesize, x, theta, theta_alt, log_r_xz, log_r_xz_alt, t_xz, t_xz_alt, aux
)
# Check consistency of input with model
if n_parameters != self.n_parameters:
raise RuntimeError(
"Number of parameters does not match model: {} vs {}".format(
n_parameters, self.n_parameters
)
)
if resolution != self.resolution:
raise RuntimeError(
"Number of observables does not match model: {} vs {}".format(
resolution, self.resolution
)
)
# Data
data = self._package_training_data(method, x, theta, theta_alt, log_r_xz, log_r_xz_alt, t_xz, t_xz_alt, aux)
# Losses
loss_functions, loss_labels, loss_weights = get_loss(method, alpha)
# Optimizer
opt, opt_kwargs = get_optimizer(optimizer, nesterov_momentum)
# Train model
logger.info("Training model")
trainer = RatioTrainer(self.model, run_on_gpu=True)
result = trainer.train(
data=data,
loss_functions=loss_functions,
loss_weights=loss_weights,
loss_labels=loss_labels,
epochs=n_epochs,
batch_size=batch_size,
optimizer=opt,
optimizer_kwargs=opt_kwargs,
initial_lr=initial_lr,
final_lr=final_lr,
validation_split=validation_split,
validation_split_seed=validation_split_seed,
early_stopping=early_stopping,
verbose=verbose,
validation_loss_before=validation_loss_before,
)
return result
def log_likelihood_ratio(
self,
x,
theta,
aux=None,
test_all_combinations=True,
evaluate_score=False,
evaluate_grad_x=False,
batch_size=1000,
grad_x_theta_index=0,
):
if self.model is None:
raise ValueError("No model -- train or load model before evaluating it!")
# Load training data
logger.debug("Loading evaluation data")
x = load_and_check(x, memmap=True)
aux = load_and_check(aux)
theta = load_and_check(theta)
# Rescale theta and aux
aux = self._transform_aux(aux)
theta = self._transform_theta(theta)
# Evaluate
if test_all_combinations:
logger.debug("Starting ratio evaluation for all combinations")
all_log_r_hat = []
all_t_hat = []
all_grad_x = None
for i, this_theta in enumerate(theta):
logger.debug(
"Starting ratio evaluation for thetas %s / %s: %s",
i + 1,
len(theta),
this_theta,
)
_, log_r_hat, t_hat, x_grad = self._evaluate(
theta0s=[this_theta],
xs=x,
auxs=aux,
evaluate_score=evaluate_score,
evaluate_grad_x=evaluate_grad_x,
batch_size=batch_size,
)
all_log_r_hat.append(log_r_hat)
all_t_hat.append(t_hat)
if x_grad is not None and i == grad_x_theta_index:
all_grad_x = x_grad
all_log_r_hat = np.array(all_log_r_hat)
all_t_hat = np.array(all_t_hat)
else:
logger.debug("Starting ratio evaluation")
_, all_log_r_hat, all_t_hat, all_grad_x = self._evaluate(
theta0s=theta,
xs=x,
auxs=aux,
evaluate_score=evaluate_score,
evaluate_grad_x=evaluate_grad_x,
batch_size=batch_size,
)
logger.debug("Evaluation done")
return all_log_r_hat, all_t_hat, all_grad_x
def _evaluate(
self,
theta0s,
xs,
auxs=None,
evaluate_score=False,
evaluate_grad_x=False,
run_on_gpu=True,
double_precision=False,
batch_size=1000,
):
# Batches
n_xs = len(xs)
n_batches = (n_xs - 1) // batch_size + 1
# results
all_s, all_log_r, all_t, all_x_grad = [], [], [], []
for i_batch in range(n_batches):
x_batch = np.asarray(
np.copy(xs[i_batch * batch_size : (i_batch + 1) * batch_size])
)
if len(theta0s) == n_xs:
theta_batch = np.copy(
theta0s[i_batch * batch_size : (i_batch + 1) * batch_size]
)
else:
theta_batch = np.repeat(
np.copy(theta0s).reshape(1, -1), x_batch.shape[0], axis=0
)
if auxs is not None:
aux_batch = np.copy(
auxs[i_batch * batch_size : (i_batch + 1) * batch_size]
)
else:
aux_batch = None
s, log_r, t, x_grad = self._evaluate_batch(
theta_batch,
x_batch,
aux_batch,
evaluate_score,
evaluate_grad_x,
run_on_gpu,
double_precision,
)
all_s.append(s)
all_log_r.append(log_r)
if t is not None:
all_t.append(t)
if x_grad is not None:
all_x_grad.append(x_grad)
# mash together
all_s = np.concatenate(all_s, 0)
all_log_r = np.concatenate(all_log_r, 0)
if len(all_t) > 0:
all_t = np.concatenate(all_t, 0)
else:
all_t = None
if len(all_x_grad) > 0:
all_x_grad = np.concatenate(all_x_grad, 0)
else:
all_x_grad = None
return all_s, all_log_r, all_t, all_x_grad
def _evaluate_batch(
self,
theta0s,
xs,
auxs,
evaluate_score,
evaluate_grad_x,
run_on_gpu,
double_precision,
):
# CPU or GPU?
run_on_gpu = run_on_gpu and torch.cuda.is_available()
device = torch.device("cuda" if run_on_gpu else "cpu")
dtype = torch.double if double_precision else torch.float
# Prepare data
self.model = self.model.to(device, dtype)
theta0s = torch.from_numpy(theta0s).to(device, dtype)
xs = torch.from_numpy(xs).to(device, dtype)
if auxs is not None:
auxs = torch.from_numpy(auxs).to(device, dtype)
# Evaluate ratio estimator with score or x gradients:
if evaluate_score or evaluate_grad_x:
self.model.eval()
if evaluate_score:
theta0s.requires_grad = True
if evaluate_grad_x:
xs.requires_grad = True
s, log_r, t, x_grad = self.model(
theta0s,
xs,
aux=auxs,
track_score=evaluate_score,
return_grad_x=evaluate_grad_x,
create_gradient_graph=False,
)
# Copy back tensors to CPU
if run_on_gpu:
s = s.cpu()
log_r = log_r.cpu()
if t is not None:
t = t.cpu()
if x_grad is not None:
x_grad = x_grad.cpu()
# Get data and return
s = s.detach().numpy().flatten()
log_r = log_r.detach().numpy().flatten()
if t is not None:
t = t.detach().numpy()
if x_grad is not None:
x_grad = x_grad.detach().numpy()
# Evaluate ratio estimator without score:
else:
with torch.no_grad():
self.model.eval()
s, log_r, _, _ = self.model(
theta0s,
xs,
aux=auxs,
track_score=False,
return_grad_x=False,
create_gradient_graph=False,
)
# Copy back tensors to CPU
if run_on_gpu:
s = s.cpu()
log_r = log_r.cpu()
# Get data and return
s = s.detach().numpy().flatten()
log_r = log_r.detach().numpy().flatten()
t = None
x_grad = None
return s, log_r, t, x_grad
def save(self, filename, save_model=False):
if self.model is None:
raise ValueError("No model -- train or load model before saving!")
# Check paths
create_missing_folders([os.path.dirname(filename)])
# Save settings
logger.debug("Saving settings to %s_settings.json", filename)
settings = self._wrap_settings()
with open(filename + "_settings.json", "w") as f:
json.dump(settings, f)
# Save state dict
logger.debug("Saving state dictionary to %s_state_dict.pt", filename)
torch.save(self.model.state_dict(), filename + "_state_dict.pt")
# Save model
if save_model:
logger.debug("Saving model to %s_model.pt", filename)
torch.save(self.model, filename + "_model.pt")
def load(self, filename):
# Load settings and create model
logger.debug("Loading settings from %s_settings.json", filename)
with open(filename + "_settings.json", "r") as f:
settings = json.load(f)
self._unwrap_settings(settings)
self._create_model()
# Load state dict
logger.debug("Loading state dictionary from %s_state_dict.pt", filename)
self.model.load_state_dict(
torch.load(filename + "_state_dict.pt", map_location="cpu")
)
def _create_model(self, zero_bias=False):
logger.info("Creating model")
logger.info(" Architecture: %s", self.architecture)
logger.info(" Log input: %s", self.log_input)
logger.info(
" Rescale input: %s",
self.x_scaling_std is not None and self.x_scaling_mean is not None,
)
logger.info(
" Weight initialization: %s", "zero bias" if zero_bias else "default"
)
if self.architecture in ["resnet", "resnet18"]:
self.model = ResNetRatioEstimator(
n_parameters=self.n_parameters,
n_aux=self.n_aux,
n_hidden=512,
log_input=self.log_input,
input_mean=self.x_scaling_mean,
input_std=self.x_scaling_std,
zero_bias=zero_bias,
)
elif self.architecture == "resnet50":
self.model = ResNetRatioEstimator(
n_parameters=self.n_parameters,
n_aux=self.n_aux,
cfg=50,
n_hidden=1024,
log_input=self.log_input,
input_mean=self.x_scaling_mean,
input_std=self.x_scaling_std,
zero_bias=zero_bias,
)
elif self.architecture == "vgg":
self.model = VGGRatioEstimator(
n_parameters=self.n_parameters,
log_input=self.log_input,
input_mean=self.x_scaling_mean,
input_std=self.x_scaling_std,
)
else:
raise RuntimeError("Unknown architecture {}".format(self.architecture))
logger.info("Model has %s trainable parameters", self._count_model_parameters())
def _count_model_parameters(self):
return sum(p.numel() for p in self.model.parameters() if p.requires_grad)
def _initialize_input_transform(self, x, aux=None, n_eval=1000):
if self.rescale_inputs and self.log_input:
self.x_scaling_mean = np.mean(np.log(1. + x[:n_eval]))
self.x_scaling_std = np.maximum(np.std(np.log(1. + x[:n_eval])), 1.0e-6)
elif self.rescale_inputs and (not self.log_input):
self.x_scaling_mean = np.mean(x)
self.x_scaling_std = np.maximum(np.std(x), 1.0e-6)
else:
self.x_scaling_mean = None
self.x_scaling_std = None
if self.rescale_inputs and aux is not None:
self.aux_scaling_mean = np.mean(aux, axis=0)
self.aux_scaling_std = np.maximum(np.std(aux, axis=0), 1.0e-6)
else:
self.aux_scaling_mean = None
self.aux_scaling_std = None
self.model.input_mean = self.x_scaling_mean
self.model.input_std = self.x_scaling_std
def _transform_aux(self, aux):
if (
aux is not None
and self.aux_scaling_mean is not None
and self.aux_scaling_std is not None
):
aux = aux - self.aux_scaling_mean[np.newaxis, :]
aux = aux / self.aux_scaling_std[np.newaxis, :]
return aux
def _transform_theta(self, theta):
if self.rescale_theta:
theta = theta - self.theta_mean[np.newaxis, :]
theta = theta / self.theta_std[np.newaxis, :]
return theta
def _transform_t_xz(self, t_xz):
if self.rescale_theta:
t_xz = t_xz * self.theta_std[np.newaxis, :]
return t_xz
def _wrap_settings(self):
settings = {
"resolution": self.resolution,
"n_parameters": self.n_parameters,
"n_aux": self.n_aux,
"architecture": self.architecture,
"log_input": self.log_input,
"rescale_inputs": self.rescale_inputs,
"x_scaling_mean": self.x_scaling_mean,
"x_scaling_std": self.x_scaling_std,
"rescale_theta": self.rescale_theta,
"aux_scaling_mean": []
if self.aux_scaling_mean is None
else list(self.aux_scaling_mean),
"aux_scaling_std": []
if self.aux_scaling_std is None
else list(self.aux_scaling_std),
}
return settings
def _unwrap_settings(self, settings):
self.resolution = int(settings["resolution"])
self.n_parameters = int(settings["n_parameters"])
self.n_aux = int(settings["n_aux"])
self.architecture = str(settings["architecture"])
self.log_input = bool(settings["log_input"])
self.rescale_inputs = str(settings["rescale_inputs"])
self.x_scaling_mean = float(settings["x_scaling_mean"])
self.x_scaling_std = float(settings["x_scaling_std"])
self.rescale_theta = bool(settings["rescale_theta"])
self.aux_scaling_mean = list(settings["aux_scaling_mean"])
if len(self.aux_scaling_mean) == 0:
self.aux_scaling_mean = None
else:
self.aux_scaling_mean = np.array(self.aux_scaling_mean)
self.aux_scaling_std = list(settings["aux_scaling_std"])
if len(self.aux_scaling_std) == 0:
self.aux_scaling_std = None
else:
self.aux_scaling_std = np.array(self.aux_scaling_std)
@staticmethod
def _check_required_data(method, r_xz, r_xz_alt, t_xz, t_xz_alt):
if method in ["cascal", "alices", "rascal"] and (t_xz is None or t_xz_alt is None):
raise RuntimeError(
"Method {} requires joint score information".format(method)
)
if method in ["rolr", "alices", "rascal"] and (r_xz is None or r_xz_alt is None):
raise RuntimeError(
"Method {} requires joint likelihood ratio information".format(method)
)
@staticmethod
def _package_training_data(method, x, theta, theta_alt, log_r_xz, log_r_xz_alt, t_xz, t_xz_alt, aux=None):
data = OrderedDict()
data["x"] = x
data["theta"] = theta
data["theta_alt"] = theta_alt
if method in ["rolr", "alice", "alices", "rascal"]:
data["log_r_xz"] = log_r_xz
data["log_r_xz_alt"] = log_r_xz_alt
if method in ["cascal", "alices", "rascal"]:
data["t_xz"] = t_xz
data["t_xz_alt"] = t_xz_alt
if aux is not None:
data["aux"] = aux
return data
|
{"hexsha": "965bd7b66514a35aa02d9dd569447155e63bf799", "size": 22307, "ext": "py", "lang": "Python", "max_stars_repo_path": "inference/estimator.py", "max_stars_repo_name": "matthewfeickert/mining-for-substructure-lens", "max_stars_repo_head_hexsha": "9360e678aac78b6b260dab55ce264bfddea0c206", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 24, "max_stars_repo_stars_event_min_datetime": "2019-09-06T02:55:57.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-26T19:55:44.000Z", "max_issues_repo_path": "inference/estimator.py", "max_issues_repo_name": "johannbrehmer/mining-for-substructure-lens", "max_issues_repo_head_hexsha": "a15a257051b68adad7ca88a388b0084abab1d97f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "inference/estimator.py", "max_forks_repo_name": "johannbrehmer/mining-for-substructure-lens", "max_forks_repo_head_hexsha": "a15a257051b68adad7ca88a388b0084abab1d97f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 11, "max_forks_repo_forks_event_min_datetime": "2019-09-04T15:42:08.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-01T21:24:13.000Z", "avg_line_length": 34.9092331768, "max_line_length": 116, "alphanum_fraction": 0.5589276909, "include": true, "reason": "import numpy", "num_tokens": 4957}
|
!-------------------------------------------------------------------------------------------------------------
!
!> \file CompExcessGibbsEnergyIDWZ.f90
!> \brief Compute the partial molar 'excess' Gibbs energy of solution phase constituents in an IDWZ
!! solution phase.
!> \author P. Bajpai
!> \sa CompExcessGibbsEnergy.f90
!> \sa CompExcessGibbsEnergyRKMP.f90
!> \sa CompExcessGibbsEnergySUBL.f90
!> \date December 3, 2018
!
!
! Revisions:
! ==========
!
! Date Programmer Description of change
! ---- ---------- ---------------------
! 12/03/2018 P.Bajpai Original code.
!
!
! Purpose:
! ========
!
!> \details The purpose of this subroutine is to compute the partial molar excess Gibbs energy of mixing
!! (dPartialExcessGibbs) of all constituents in a non-ideal solution phase designated as 'QKTO'
!! (Quasi-chemical Kohlter-TOop). The PolyRegular subroutine computes the excess Gibbs energy of mixing of
!! a regular solution sub-system (see PolyRegular for a definition) and the KohlerInterpolate subroutine
!! performs a Kohler interpolation of a sub-system to a phase.
!!
!! The molar excess Gibbs energy of mixing of a binary sub-system for a QKTO model is:
!!
!! \f$ g_{\lambda,z}^{ex} = L_z x_1^a x_2^b \f$
!!
!! where \f$ L_z \f$ is the mixing parameter, \f$ x_1 \f$ and \f$ x_2 \f$ are the mole fractions for
!! constituents 1 and 2 in the binary term and \f$ a \f$ and \f$ b \f$ are the exponents for constituents
!! 1 and 2, respectively.
!!
!! The molar excess Gibbs energy of mixing for solution phase \f$ \lambda \f$ using Kohler's interpolation
!! scheme gives
!!
!! \f$ g_{\lambda}^{ex} = (x_1 + x_2)^2 g_{\lambda,z}^{ex} \f$
!!
!! Similarly, the molar excess Gibbs energy of mixing of a ternary sub-system for a QKTO model is:
!!
!! \f$ g_{\lambda,z}^{ex} = L_z x_1^a x_2^b x_3^c \f$
!!
!! which is related to the molar excess Gibbs energy of mixing of the phase via Kohler's interpolation:
!!
!! \f$ g_{\lambda}^{ex} = (x_1 + x_2 + x_3)^2 g_{\lambda,z}^{ex} \f$
!!
!
! Pertinent variables:
! ====================
!
!> \param[in] iSolnIndex Absolute index of a solution phase
!
! nSpeciesPhase Highest index number of a species in a particular solution phase
! nParam Number of parameters
! iParam Index number of a parameter.
! dChemicalPotential The estimated chemical potential vector. To be precise, this is defined as the
! molar Gibbs energy of the pure species minus the proper chemical potential
! defined by the element potentials.
! dPartialExcessGibbs Partial molar excess Gibbs energy of mixing of species.
! dPartialExcessGibbsLast Partial molar excess Gibbs energy of mixing of species from the last iteration.
! dMolFraction Current estimated mole fraction.
!
!-------------------------------------------------------------------------------------------------------------
subroutine CompExcessGibbsEnergyIDWZ(iSolnIndex)
USE ModuleThermo
USE ModuleGEMSolver
implicit none
integer :: iParam, iSolnIndex
real(8) :: dGParam, xT
real(8), dimension(nMaxParam) :: dPartialGParam
!dPartialExcessGibbs(iSolnIndex) = 0
return
end subroutine CompExcessGibbsEnergyIDWZ
|
{"hexsha": "81ba5338bfb47ece816586cd203f5cee384352d6", "size": 3760, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "src/CompExcessGibbsEnergyIDWZ.f90", "max_stars_repo_name": "parikshitbajpai/thermochimica", "max_stars_repo_head_hexsha": "34a3f3e8a92e9e93f8fc7a739d0c6c00bcddca12", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/CompExcessGibbsEnergyIDWZ.f90", "max_issues_repo_name": "parikshitbajpai/thermochimica", "max_issues_repo_head_hexsha": "34a3f3e8a92e9e93f8fc7a739d0c6c00bcddca12", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/CompExcessGibbsEnergyIDWZ.f90", "max_forks_repo_name": "parikshitbajpai/thermochimica", "max_forks_repo_head_hexsha": "34a3f3e8a92e9e93f8fc7a739d0c6c00bcddca12", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 43.2183908046, "max_line_length": 115, "alphanum_fraction": 0.5515957447, "num_tokens": 994}
|
import numpy as np
import trax
#from trax import layers as tl
#from trax.fastmath import numpy as fastnp
#from trax.supervised import training
# UNIT TEST for UNQ_C1
def test_get_conversation(target):
data = {'file1.json': {'log':[{'text': 'hi'},
{'text': 'hello'},
{'text': 'nice'}]},
'file2.json':{'log':[{'text': 'a b'},
{'text': ''},
{'text': 'good '},
{'text': 'no?'}]}}
res1 = target('file1.json', data)
res2 = target('file2.json', data)
expected1 = ' Person 1: hi Person 2: hello Person 1: nice'
expected2 = ' Person 1: a b Person 2: Person 1: good Person 2: no?'
success = 0
fails = 0
try:
assert res1 == expected1
success += 1
except ValueError:
print('Error in test 1 \nResult : ', res1, 'x \nExpected: ', expected1)
fails += 1
try:
assert res2 == expected2
success += 1
except:
print('Error in test 2 \nResult : ', res2, ' \nExpected: ', expected2)
fails += 1
if fails == 0:
print("\033[92m All tests passed")
else:
print('\033[92m', success," Tests passed")
print('\033[91m', fails, " Tests failed")
# UNIT TEST for UNQ_C2
def test_reversible_layer_forward(target):
f1 = lambda x: x + 2
g1 = lambda x: x * 3
f2 = lambda x: x + 1
g2 = lambda x: x * 2
input_vector1 = np.array([1, 2, 3, 4, 5, 6, 7, 8])
expected1 = np.array([8, 10, 12, 14, 29, 36, 43, 50])
input_vector2 = np.array([1] * 128)
expected2 = np.array([3] * 64 + [7] * 64)
success = 0
fails = 0
try:
res = target(input_vector1, f1, g1)
assert isinstance(res, np.ndarray)
success += 1
except:
print('Wrong type! Output is not of type np.ndarray')
fails += 1
try:
res = target(input_vector1, f1, g1)
assert np.allclose(res, expected1)
success += 1
except ValueError:
print('Error in test 1 \nResult : ', res, 'x \nExpected: ', expected1)
fails += 1
try:
res = target(input_vector2, f2, g2)
assert np.allclose(res, expected2)
success += 1
except:
print('Error in test 2 \nResult : ', res, ' \nExpected: ', expected2)
fails += 1
if fails == 0:
print("\033[92m All tests passed")
else:
print('\033[92m', success," Tests passed")
print('\033[91m', fails, " Tests failed")
# UNIT TEST for UNQ_C3
def test_reversible_layer_reverse(target):
f1 = lambda x: x + 2
g1 = lambda x: x * 3
f2 = lambda x: x + 1
g2 = lambda x: x * 2
input_vector1 = np.array([1, 2, 3, 4, 5, 6, 7, 8])
expected1 = np.array([-3, 0, 3, 6, 2, 0, -2, -4])
input_vector2 = np.array([1] * 128)
expected2 = np.array([1] * 64 + [-1] * 64)
success = 0
fails = 0
try:
res = target(input_vector1, f1, g1)
assert isinstance(res, np.ndarray)
success += 1
except:
print('Wrong type! Output is not of type np.ndarray')
fails += 1
try:
res = target(input_vector1, f1, g1)
assert np.allclose(res, expected1)
success += 1
except ValueError:
print('Error in test 1 \nResult : ', res, 'x \nExpected: ', expected1)
fails += 1
try:
res = target(input_vector2, f2, g2)
assert np.allclose(res, expected2)
success += 1
except:
print('Error in test 2 \nResult : ', res, ' \nExpected: ', expected2)
fails += 1
if fails == 0:
print("\033[92m All tests passed")
else:
print('\033[92m', success," Tests passed")
print('\033[91m', fails, " Tests failed")
# UNIT TEST for UNQ_C4
def test_ReformerLM(target):
test_cases = [
{
"name":"layer_len_check",
"expected":11,
"error":"We found {} layers in your model. It should be 11.\nCheck the LSTM stack before the dense layer"
},
{
"name":"simple_test_check",
"expected":"Serial[ShiftRight(1)Embedding_train_512DropoutPositionalEncodingDup_out2ReversibleSerial_in2_out2[ReversibleHalfResidualV2_in2_out2[Serial[LayerNorm]SelfAttention]ReversibleSwap_in2_out2ReversibleHalfResidualV2_in2_out2[Serial[LayerNormDense_2048DropoutFastGeluDense_512Dropout]]ReversibleSwap_in2_out2ReversibleHalfResidualV2_in2_out2[Serial[LayerNorm]SelfAttention]ReversibleSwap_in2_out2ReversibleHalfResidualV2_in2_out2[Serial[LayerNormDense_2048DropoutFastGeluDense_512Dropout]]ReversibleSwap_in2_out2]Concatenate_in2LayerNormDropoutDense_trainLogSoftmax]",
"error":"The ReformerLM is not defined properly."
}
]
temp_model = target('train')
success = 0
fails = 0
for test_case in test_cases:
try:
if test_case['name'] == "simple_test_check":
assert test_case["expected"] == str(temp_model).replace(' ', '').replace('\n','')
success += 1
if test_case['name'] == "layer_len_check":
if test_case["expected"] == len(temp_model.sublayers):
success += 1
else:
print(test_case["error"].format(len(temp_model.sublayers)))
fails += 1
except:
print(test_case['error'])
fails += 1
if fails == 0:
print("\033[92m All tests passed")
else:
print('\033[92m', success," Tests passed")
print('\033[91m', fails, " Tests failed")
# UNIT TEST for UNQ_C5
def test_tasks(train_task, eval_task):
target = train_task
success = 0
fails = 0
# Test the labeled data parameter for train_task
try:
strlabel = str(target._labeled_data)
assert ("generator" in strlabel) and ("add_loss_weights" in strlabel)
success += 1
except:
fails += 1
print("Wrong labeled data parameter in train_task")
# Test the cross entropy loss data parameter
try:
strlabel = str(target._loss_layer)
assert(strlabel == "CrossEntropyLoss_in3")
success += 1
except:
fails += 1
print("Wrong loss functions. CrossEntropyLoss_in3 was expected")
# Test the optimizer parameter
try:
assert(isinstance(target.optimizer, trax.optimizers.adam.Adam))
success += 1
except:
fails += 1
print("Wrong optimizer")
# Test the schedule parameter
try:
assert(isinstance(target._lr_schedule,trax.supervised.lr_schedules._BodyAndTail))
success += 1
except:
fails += 1
print("Wrong learning rate schedule type")
# Test the _n_steps_per_checkpoint parameter
try:
assert(target._n_steps_per_checkpoint==10)
success += 1
except:
fails += 1
print("Wrong checkpoint step frequency")
target = eval_task
# Test the labeled data parameter for eval_task
try:
strlabel = str(target._labeled_data)
assert ("generator" in strlabel) and ("add_loss_weights" in strlabel)
success += 1
except:
fails += 1
print("Wrong labeled data parameter in eval_task")
# Test the metrics in eval_task
try:
strlabel = str(target._metrics).replace(' ', '')
assert(strlabel == "[CrossEntropyLoss_in3,Accuracy_in3]")
success += 1
except:
fails += 1
print(f"Wrong metrics. found {strlabel} but expected [CrossEntropyLoss_in3,Accuracy_in3]")
if fails == 0:
print("\033[92m All tests passed")
else:
print('\033[92m', success," Tests passed")
print('\033[91m', fails, " Tests failed")
|
{"hexsha": "f72ce252a89798bc51b81ba3b3a05a173b92e02c", "size": 8096, "ext": "py", "lang": "Python", "max_stars_repo_path": "Natural Language Processing with Attention Models/Week 4 - Chatbot/w4_unittest.py", "max_stars_repo_name": "meet-seth/Coursera-Deep-Learning", "max_stars_repo_head_hexsha": "6fbf9d406468c825ffa1ff2e177dbfd43084bace", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 362, "max_stars_repo_stars_event_min_datetime": "2020-10-08T07:34:25.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T05:11:30.000Z", "max_issues_repo_path": "NLP/Learn_by_deeplearning.ai/Course 4 - Attention Models /Labs/Week 4/w4_unittest.py", "max_issues_repo_name": "abcd1758323829/skills", "max_issues_repo_head_hexsha": "195fad43e99de5efe6491817ad2b79e12665cc2a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 7, "max_issues_repo_issues_event_min_datetime": "2020-07-07T16:10:23.000Z", "max_issues_repo_issues_event_max_datetime": "2021-06-04T08:17:55.000Z", "max_forks_repo_path": "NLP/Learn_by_deeplearning.ai/Course 4 - Attention Models /Labs/Week 4/w4_unittest.py", "max_forks_repo_name": "abcd1758323829/skills", "max_forks_repo_head_hexsha": "195fad43e99de5efe6491817ad2b79e12665cc2a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 238, "max_forks_repo_forks_event_min_datetime": "2020-10-08T12:01:31.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-25T08:10:42.000Z", "avg_line_length": 31.874015748, "max_line_length": 580, "alphanum_fraction": 0.5610177866, "include": true, "reason": "import numpy", "num_tokens": 2199}
|
[STATEMENT]
lemma singleDSourceEmpty_Acc:
assumes "DAcc i C = {S}"
and "isNotDSource i S"
shows "Acc i C = {S}"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Acc i C = {S}
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. Acc i C = {S}
[PROOF STEP]
have AccC:"(Acc i C) = (DAcc i C) \<union> (\<Union> S \<in> (DAcc i C). (Acc i S))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Acc i C = DAcc i C \<union> \<Union> (Acc i ` DAcc i C)
[PROOF STEP]
by (rule AccDef)
[PROOF STATE]
proof (state)
this:
Acc i C = DAcc i C \<union> \<Union> (Acc i ` DAcc i C)
goal (1 subgoal):
1. Acc i C = {S}
[PROOF STEP]
from assms
[PROOF STATE]
proof (chain)
picking this:
DAcc i C = {S}
isNotDSource i S
[PROOF STEP]
have "Acc i S = {}"
[PROOF STATE]
proof (prove)
using this:
DAcc i C = {S}
isNotDSource i S
goal (1 subgoal):
1. Acc i S = {}
[PROOF STEP]
by (simp add: isNotDSource_EmptyAcc)
[PROOF STATE]
proof (state)
this:
Acc i S = {}
goal (1 subgoal):
1. Acc i C = {S}
[PROOF STEP]
with AccC
[PROOF STATE]
proof (chain)
picking this:
Acc i C = DAcc i C \<union> \<Union> (Acc i ` DAcc i C)
Acc i S = {}
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
Acc i C = DAcc i C \<union> \<Union> (Acc i ` DAcc i C)
Acc i S = {}
goal (1 subgoal):
1. Acc i C = {S}
[PROOF STEP]
by (metis SUP_empty UN_insert Un_commute Un_empty_left assms(1))
[PROOF STATE]
proof (state)
this:
Acc i C = {S}
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 678, "file": "ComponentDependencies_DataDependencies", "length": 10}
|
**==effn.spg processed by SPAG 4.50J at 14:50 on 30 Jun 1995
FUNCTION EFFN(I,Zeff,T)
IMPLICIT NONE
C*** Start of declarations inserted by SPAG
REAL EFFN , eye , f1 , f2 , f3 , T , t3 , xx , Zeff
INTEGER I , no
C*** End of declarations inserted by SPAG
t3 = T/(Zeff*Zeff*1000.0)
xx = 0.4342*LOG(t3)
IF ( t3.LE.1.0 ) THEN
f1 = 0.266
f2 = 0.13
f3 = 0.13
ELSEIF ( t3.LT.10.**5 ) THEN
f1 = 0.266 + 0.1068*xx - 0.074*SIN(1.2566*xx)
f2 = 0.130 + 0.1160*xx - 0.074*SIN(1.2566*xx)
f3 = 0.130 - 0.0120*xx + 0.050*EXP(-(xx-2.)*(xx-2.))
ELSE
f1 = 0.80
f2 = 0.71
f3 = 0.07
ENDIF
IF ( I.NE.0 ) THEN
IF ( I.GT.18 ) GOTO 200
IF ( I.EQ.2 ) THEN
EFFN = 8.0
ELSEIF ( I.EQ.3 ) THEN
EFFN = 8. - (4.*f1)
ELSEIF ( I.EQ.4 ) THEN
EFFN = 8.*(1.-f1)
ELSEIF ( I.EQ.5 ) THEN
EFFN = 6.6667*(1.-f1)
ELSEIF ( I.EQ.6 ) THEN
EFFN = 5.33333*(1.-f1)
ELSEIF ( I.EQ.7 ) THEN
EFFN = 4.*(1.-f1)
ELSEIF ( I.EQ.8 ) THEN
EFFN = 2.6667*(1.-f1)
ELSEIF ( I.EQ.9 ) THEN
EFFN = 1.33333*(1.-f1)
ELSEIF ( I.EQ.10 ) THEN
EFFN = 18.
ELSEIF ( I.EQ.11 ) THEN
EFFN = 18. - (9.*f2)
ELSEIF ( I.EQ.12 ) THEN
EFFN = 18.0*(1.-f2)
ELSEIF ( I.EQ.13 ) THEN
EFFN = 18.*(1.-f2) - 1.*(9.*f3)
ELSEIF ( I.EQ.14 ) THEN
EFFN = 18.*(1.-f2) - 2.*(9.*f3)
ELSEIF ( I.EQ.15 ) THEN
EFFN = 18.*(1.-f2) - 3.*(9.*f3)
ELSEIF ( I.EQ.16 ) THEN
EFFN = 18.*(1.-f2) - 4.*(9.*f3)
ELSEIF ( I.EQ.17 ) THEN
EFFN = 18.*(1.-f2) - 45.0*f3
ELSEIF ( I.EQ.18 ) THEN
GOTO 200
ELSE
GOTO 100
ENDIF
GOTO 300
ENDIF
100 eye = I
EFFN = 2. - eye
GOTO 300
200 no = 28 - I
EFFN = no*1.8*(1.-3.*f3-f2)
IF ( EFFN.LT.0 ) THEN
eye = I
EFFN = 60. - eye
C GUARD PACKAGE FOR I = 17
C PROBABLY UNNECESSARY
IF ( EFFN.LE.0 ) EFFN = 1.0
ENDIF
300 RETURN
END
|
{"hexsha": "49b4c15fec074630557a01b5cd9623c2954a4b47", "size": 2281, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "xspec/tools/raysmith/effn.f", "max_stars_repo_name": "DougBurke/xspeclmodels", "max_stars_repo_head_hexsha": "4e9caf971af51ab88eb0f8cf678a11f014710013", "max_stars_repo_licenses": ["CC0-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "xspec/tools/raysmith/effn.f", "max_issues_repo_name": "DougBurke/xspeclmodels", "max_issues_repo_head_hexsha": "4e9caf971af51ab88eb0f8cf678a11f014710013", "max_issues_repo_licenses": ["CC0-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "xspec/tools/raysmith/effn.f", "max_forks_repo_name": "DougBurke/xspeclmodels", "max_forks_repo_head_hexsha": "4e9caf971af51ab88eb0f8cf678a11f014710013", "max_forks_repo_licenses": ["CC0-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.8734177215, "max_line_length": 62, "alphanum_fraction": 0.4234984656, "num_tokens": 944}
|
!==========================================================================
elemental function gsw_entropy_part (sa, t, p)
!==========================================================================
!
! entropy minus the terms that are a function of only SA
!
! sa : Absolute Salinity [g/kg]
! t : in-situ temperature [deg C]
! p : sea pressure [dbar]
!
! gsw_entropy_part : entropy part
!--------------------------------------------------------------------------
use gsw_mod_teos10_constants, only : gsw_sfac
use gsw_mod_kinds
implicit none
real (r8), intent(in) :: sa, t, p
real (r8) :: gsw_entropy_part
real (r8) :: x2, x, y, z, g03, g08
x2 = gsw_sfac*sa
x = sqrt(x2)
y = t*0.025_r8
z = p*1e-4_r8
g03 = z*(-270.983805184062_r8 + &
z*(776.153611613101_r8 + z*(-196.51255088122_r8 + (28.9796526294175_r8 - 2.13290083518327_r8*z)*z))) + &
y*(-24715.571866078_r8 + z*(2910.0729080936_r8 + &
z*(-1513.116771538718_r8 + z*(546.959324647056_r8 + z*(-111.1208127634436_r8 + 8.68841343834394_r8*z)))) + &
y*(2210.2236124548363_r8 + z*(-2017.52334943521_r8 + &
z*(1498.081172457456_r8 + z*(-718.6359919632359_r8 + (146.4037555781616_r8 - 4.9892131862671505_r8*z)*z))) + &
y*(-592.743745734632_r8 + z*(1591.873781627888_r8 + &
z*(-1207.261522487504_r8 + (608.785486935364_r8 - 105.4993508931208_r8*z)*z)) + &
y*(290.12956292128547_r8 + z*(-973.091553087975_r8 + &
z*(602.603274510125_r8 + z*(-276.361526170076_r8 + 32.40953340386105_r8*z))) + &
y*(-113.90630790850321_r8 + y*(21.35571525415769_r8 - 67.41756835751434_r8*z) + &
z*(381.06836198507096_r8 + z*(-133.7383902842754_r8 + 49.023632509086724_r8*z)))))))
g08 = x2*(z*(729.116529735046_r8 + &
z*(-343.956902961561_r8 + z*(124.687671116248_r8 + z*(-31.656964386073_r8 + 7.04658803315449_r8*z)))) + &
x*( x*(y*(-137.1145018408982_r8 + y*(148.10030845687618_r8 + y*(-68.5590309679152_r8 + 12.4848504784754_r8*y))) - &
22.6683558512829_r8*z) + z*(-175.292041186547_r8 + (83.1923927801819_r8 - 29.483064349429_r8*z)*z) + &
y*(-86.1329351956084_r8 + z*(766.116132004952_r8 + z*(-108.3834525034224_r8 + 51.2796974779828_r8*z)) + &
y*(-30.0682112585625_r8 - 1380.9597954037708_r8*z + y*(3.50240264723578_r8 + 938.26075044542_r8*z)))) + &
y*(1760.062705994408_r8 + y*(-675.802947790203_r8 + &
y*(365.7041791005036_r8 + y*(-108.30162043765552_r8 + 12.78101825083098_r8*y) + &
z*(-1190.914967948748_r8 + (298.904564555024_r8 - 145.9491676006352_r8*z)*z)) + &
z*(2082.7344423998043_r8 + z*(-614.668925894709_r8 + (340.685093521782_r8 - 33.3848202979239_r8*z)*z))) + &
z*(-1721.528607567954_r8 + z*(674.819060538734_r8 + &
z*(-356.629112415276_r8 + (88.4080716616_r8 - 15.84003094423364_r8*z)*z)))))
gsw_entropy_part = -(g03 + g08)*0.025_r8
return
end function
!--------------------------------------------------------------------------
|
{"hexsha": "0160db551fd26a0d06aaedf74883922235f4f710", "size": 2980, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "MOM6-interface/MOM6/src/equation_of_state/TEOS10/gsw_entropy_part.f90", "max_stars_repo_name": "minsukji/ci-debug", "max_stars_repo_head_hexsha": "3e8bbbe6652b702b61d2896612f6aa8e4aa6c803", "max_stars_repo_licenses": ["Apache-2.0", "CC0-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "MOM6-interface/MOM6/src/equation_of_state/TEOS10/gsw_entropy_part.f90", "max_issues_repo_name": "minsukji/ci-debug", "max_issues_repo_head_hexsha": "3e8bbbe6652b702b61d2896612f6aa8e4aa6c803", "max_issues_repo_licenses": ["Apache-2.0", "CC0-1.0"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2021-05-21T20:21:16.000Z", "max_issues_repo_issues_event_max_datetime": "2021-10-06T17:52:34.000Z", "max_forks_repo_path": "MOM6-interface/MOM6/src/equation_of_state/TEOS10/gsw_entropy_part.f90", "max_forks_repo_name": "minsukji/ci-debug", "max_forks_repo_head_hexsha": "3e8bbbe6652b702b61d2896612f6aa8e4aa6c803", "max_forks_repo_licenses": ["Apache-2.0", "CC0-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 47.3015873016, "max_line_length": 119, "alphanum_fraction": 0.5919463087, "num_tokens": 1132}
|
Require Import Lia Setoid Program.Basics.
From hahn Require Import Hahn.
From PromisingLib Require Import Basic Language.
From imm Require Import Events Prog Execution ProgToExecution.
Require Import AuxDef.
Require Import AuxRel.
Require Import EventStructure.
Require Import LblStep.
Require Import ProgLoc.
Require Import Consistency.
Require Import EventToAction.
Set Implicit Arguments.
Local Open Scope program_scope.
Definition thread_lts (t : thread_id) : Language.t (list label) :=
@Language.mk (list label)
(list Instr.t) state
init
is_terminal
(ilbl_step t).
Definition prog_init_threads (prog : Prog.t) :
IdentMap.t {lang : Language.t (list label) & Language.state lang} :=
IdentMap.mapi
(fun tid (linstr : list Instr.t) =>
existT _ (thread_lts tid) (ProgToExecution.init linstr))
prog.
Definition stable_prog_type := IdentMap.t { linstr & stable_lprog linstr }.
Definition stable_prog_to_prog (prog : stable_prog_type) : Prog.t :=
(IdentMap.map (fun x => projT1 x) prog).
Lemma stable_prog_to_prog_in prog thread :
IdentMap.In thread (stable_prog_to_prog prog) <-> IdentMap.In thread prog.
Proof.
unfold stable_prog_to_prog.
eapply RegMap.Facts.map_in_iff.
Qed.
Lemma stable_prog_to_prog_no_init prog
(PROG_NINIT : ~ IdentMap.In tid_init prog) :
~ IdentMap.In tid_init (stable_prog_to_prog prog).
Proof. by rewrite stable_prog_to_prog_in. Qed.
Definition prog_init_K
(prog : stable_prog_type) :
list (cont_label * {lang : Language.t (list label) & Language.state lang}) :=
map
(fun tidc =>
let tid := fst tidc in
let linstr := projT1 (snd tidc) in
let STBL := projT2 (snd tidc) in
let st' := proj1_sig (get_stable
tid (init linstr) STBL
(rt_refl _ _ (init linstr))) in
(CInit tid, existT _ (thread_lts tid) st'))
(RegMap.elements prog).
Definition prog_l_es_init (prog : stable_prog_type) (locs : list location) :=
ES.init (undup locs) (prog_init_K prog).
Definition prog_es_init (prog : stable_prog_type) :=
prog_l_es_init prog (prog_locs (stable_prog_to_prog prog)).
Lemma prog_es_init_alt (prog : stable_prog_type) :
prog_es_init prog = ES.init
(prog_locs (stable_prog_to_prog prog))
(prog_init_K prog).
Proof.
unfold prog_es_init, prog_l_es_init, prog_locs.
rewrite undup_nodup; eauto.
apply NoDup_nodup.
Qed.
Definition g_locs (G : execution) :=
undup (flatten (map (fun e =>
match e with
| InitEvent l => [l]
| _ => []
end)
(acts G))).
Definition prog_g_es_init prog (G : execution) :=
prog_l_es_init prog (g_locs G).
Lemma prog_g_es_init_alt prog (G : execution) :
prog_g_es_init prog G = ES.init (g_locs G) (prog_init_K prog).
Proof.
unfold prog_g_es_init, prog_l_es_init, g_locs.
rewrite undup_nodup; auto.
Qed.
Lemma prog_l_es_init_ninit locs prog :
ES.acts_ninit_set (prog_l_es_init prog locs) ≡₁ ∅.
Proof.
split; [|basic_solver].
red. unfold prog_l_es_init, ES.init. intros x HH.
apply HH. red. split; auto.
apply HH.
Qed.
Lemma prog_g_es_init_ninit G prog :
ES.acts_ninit_set (prog_g_es_init prog G) ≡₁ ∅.
Proof. apply prog_l_es_init_ninit. Qed.
Lemma prog_l_es_init_sb locs prog :
ES.sb (prog_l_es_init prog locs) ≡ ∅₂.
Proof.
split; [|basic_solver].
unfold prog_l_es_init, ES.init. simpls.
Qed.
Lemma prog_g_es_init_sb G prog :
ES.sb (prog_g_es_init prog G) ≡ ∅₂.
Proof. apply prog_l_es_init_sb. Qed.
Lemma prog_l_es_init_jf locs prog :
ES.jf (prog_l_es_init prog locs) ≡ ∅₂.
Proof.
split; [|basic_solver].
unfold prog_l_es_init, ES.init. simpls.
Qed.
Lemma prog_g_es_init_jf G prog :
ES.jf (prog_g_es_init prog G) ≡ ∅₂.
Proof. apply prog_l_es_init_jf. Qed.
Lemma prog_l_es_init_sw locs prog :
sw (prog_l_es_init prog locs) ≡ ∅₂.
Proof.
split; [|basic_solver].
unfold sw. rewrite prog_l_es_init_jf. basic_solver.
Qed.
Lemma prog_g_es_init_sw G prog :
sw (prog_g_es_init prog G) ≡ ∅₂.
Proof. apply prog_l_es_init_sw. Qed.
Lemma prog_l_es_init_hb locs prog :
hb (prog_l_es_init prog locs) ≡ ∅₂.
Proof.
split; [|basic_solver].
unfold hb.
rewrite prog_l_es_init_sw, prog_l_es_init_sb.
rewrite ct_no_step; basic_solver.
Qed.
Lemma prog_g_es_init_hb G prog :
hb (prog_g_es_init prog G) ≡ ∅₂.
Proof. apply prog_l_es_init_hb. Qed.
Lemma prog_l_es_init_cf locs prog :
ES.cf (prog_l_es_init prog locs) ≡ ∅₂.
Proof.
split; [|basic_solver].
unfold ES.cf. rewrite prog_l_es_init_ninit. basic_solver.
Qed.
Lemma prog_g_es_init_cf G prog :
ES.cf (prog_g_es_init prog G) ≡ ∅₂.
Proof. apply prog_l_es_init_cf. Qed.
Lemma prog_l_es_init_psc_f locs prog :
psc_f (prog_l_es_init prog locs) Weakestmo ≡ ∅₂.
Proof.
unfold psc_f.
rewrite prog_l_es_init_hb.
basic_solver.
Qed.
Lemma prog_l_es_init_scb locs prog :
scb (prog_l_es_init prog locs) ≡ ∅₂.
Proof.
unfold scb.
unfold ES.fr, ES.rf.
rewrite prog_l_es_init_sb.
rewrite prog_l_es_init_hb.
rewrite prog_l_es_init_jf.
basic_solver.
Qed.
Lemma prog_l_es_init_psc_base locs prog :
psc_base (prog_l_es_init prog locs) ≡ ∅₂.
Proof.
unfold psc_base.
rewrite prog_l_es_init_scb.
basic_solver.
Qed.
Lemma prog_l_es_init_rmw locs prog :
ES.rmw (prog_l_es_init prog locs) ≡ ∅₂.
Proof.
split; [|basic_solver].
unfold prog_l_es_init, ES.init. simpls.
Qed.
Hint Rewrite prog_g_es_init_ninit
prog_g_es_init_sb
prog_g_es_init_jf
prog_g_es_init_sw
prog_g_es_init_hb
prog_g_es_init_cf
: prog_g_es_init_db.
Hint Rewrite prog_l_es_init_ninit
prog_l_es_init_sb
prog_l_es_init_jf
prog_l_es_init_sw
prog_l_es_init_hb
prog_l_es_init_cf
prog_l_es_init_psc_f
prog_l_es_init_psc_base
prog_l_es_init_rmw
: prog_l_es_init_db.
Lemma prog_l_es_init_consistent locs prog :
@es_consistent (prog_l_es_init prog locs) Weakestmo.
Proof.
constructor; unfold ecf, ES.jfe, ES.icf.
all: autorewrite with prog_l_es_init_db; auto.
(* 7: apply acyclic_disj. *)
all: basic_solver.
Qed.
Lemma prog_g_es_init_consistent G prog :
@es_consistent (prog_g_es_init prog G) Weakestmo.
Proof. apply prog_l_es_init_consistent. Qed.
Lemma prog_es_init_consistent prog :
@es_consistent (prog_es_init prog) Weakestmo.
Proof. apply prog_l_es_init_consistent. Qed.
Lemma prog_l_es_init_act_in prog locs
e (ACT : ES.acts_set (prog_l_es_init prog locs) e) :
exists l,
In (e, init_write l)
(indexed_list
(map init_write (undup locs))).
Proof.
ins.
assert
(exists b,
In (e, b) (indexed_list
(map init_write (undup locs))))
as [b IN].
{ apply indexed_list_range. desf. }
assert (In b (map init_write (undup locs)))
as BIN.
{ clear -IN.
apply In_map_snd in IN.
rewrite <- indexed_list_map_snd; eauto. }
apply in_map_iff in BIN. destruct BIN as [l [LB INL]].
rewrite <- LB in *. simpls. desf.
eauto.
Qed.
Lemma prog_g_es_init_act_in prog G
e (ACT : ES.acts_set (prog_g_es_init prog G) e) :
exists l,
In (e, init_write l)
(indexed_list
(map init_write (g_locs G))).
Proof.
apply prog_l_es_init_act_in in ACT.
unfold g_locs in *.
rewrite undup_nodup in ACT; auto.
Qed.
Lemma prog_l_es_init_act_lab prog locs
e (ACT : ES.acts_set (prog_l_es_init prog locs) e) :
exists l, ES.lab (prog_l_es_init prog locs) e = Astore Xpln Opln l 0.
Proof.
apply prog_l_es_init_act_in in ACT. destruct ACT as [l LL].
exists l. unfold ES.lab, prog_g_es_init, ES.init.
apply l2f_in; desf.
apply indexed_list_fst_nodup.
Qed.
Lemma prog_g_es_init_act_lab prog G
e (ACT : ES.acts_set (prog_g_es_init prog G) e) :
exists l, ES.lab (prog_g_es_init prog G) e = Astore Xpln Opln l 0.
Proof. by apply prog_l_es_init_act_lab. Qed.
Lemma prog_l_es_init_w locs prog :
ES.acts_set (prog_l_es_init prog locs) ≡₁
ES.acts_set (prog_l_es_init prog locs) ∩₁
(fun a => is_true (is_w (ES.lab (prog_l_es_init prog locs)) a)).
Proof.
split; [|basic_solver].
unfolder. intros. split; auto.
unfold is_w.
apply prog_l_es_init_act_lab in H. desf.
Qed.
Lemma prog_g_es_init_w G prog :
ES.acts_set (prog_g_es_init prog G) ≡₁
ES.acts_set (prog_g_es_init prog G) ∩₁
(fun a => is_true (is_w (ES.lab (prog_g_es_init prog G)) a)).
Proof. apply prog_l_es_init_w. Qed.
Lemma prog_l_es_seqn locs prog x : ES.seqn (prog_l_es_init prog locs) x = 0.
Proof.
unfold ES.seqn. autorewrite with prog_l_es_init_db; eauto.
relsf.
apply countNatP_empty.
Qed.
Lemma prog_g_es_seqn G prog x : ES.seqn (prog_g_es_init prog G) x = 0.
Proof. apply prog_l_es_seqn. Qed.
Lemma prog_l_es_init_init locs prog :
ES.acts_set (prog_l_es_init prog locs) ≡₁
ES.acts_init_set (prog_l_es_init prog locs).
Proof. unfold ES.acts_init_set. simpls. basic_solver. Qed.
Lemma prog_es_init_init prog :
ES.acts_set (prog_es_init prog) ≡₁
ES.acts_init_set (prog_es_init prog).
Proof. apply prog_l_es_init_init. Qed.
Lemma prog_g_es_init_init G prog :
ES.acts_set (prog_g_es_init prog G) ≡₁
ES.acts_init_set (prog_g_es_init prog G).
Proof. apply prog_l_es_init_init. Qed.
Lemma length_nempty {A : Type} (l : list A) (nEmpty : l <> []) :
0 < length l.
Proof.
unfold length.
destruct l.
{ intuition. }
apply Nat.lt_0_succ.
Qed.
Lemma prog_l_es_init_nempty locs prog
(nInitProg : ~ IdentMap.In tid_init prog)
(nLocsEmpty : locs <> []) :
~ ES.acts_init_set (prog_l_es_init prog locs) ≡₁ ∅.
Proof.
intros HH. eapply HH.
apply prog_l_es_init_init.
unfold ES.acts_set.
unfold prog_l_es_init, ES.init.
simpls.
erewrite map_length.
eapply length_nempty.
by apply undup_nonnil.
Qed.
Lemma prog_g_es_init_nempty G prog
(nInitProg : ~ IdentMap.In tid_init prog)
(nLocsEmpty : g_locs G <> []) :
~ ES.acts_init_set (prog_g_es_init prog G) ≡₁ ∅.
Proof. by apply prog_l_es_init_nempty. Qed.
Lemma prog_l_es_init_wf locs prog
(nInitProg : ~ IdentMap.In tid_init prog)
(nLocsEmpty : locs <> []) :
ES.Wf (prog_l_es_init prog locs).
Proof.
assert
(NoDup (map init_write (undup locs)))
as NNDD.
{ apply nodup_map.
2: { ins. intros HH. inv HH. }
unfold g_locs. apply nodup_undup. }
constructor.
all: autorewrite with prog_l_es_init_db; auto.
all: simpls.
all: try basic_solver.
{ ins. red. exists b.
splits; auto.
red. split; auto. }
{ intros e [AA BB].
eapply prog_l_es_init_act_lab; eauto. }
{ red. ins.
destruct SX as [SX _]. apply prog_l_es_init_act_in in SX.
destruct SY as [SY _]. apply prog_l_es_init_act_in in SY.
desf.
assert (l0 = l); subst.
{ unfold loc, init_write in *.
erewrite l2f_in in EQ; eauto.
2: by apply indexed_list_fst_nodup.
erewrite l2f_in in EQ; eauto.
2: by apply indexed_list_fst_nodup.
desf. }
eapply indexed_list_snd_nodup; eauto. }
{ apply prog_l_es_init_nempty; eauto. }
{ red. basic_solver. }
{ unfolder. ins. eexists.
splits; eauto.
2: by red.
apply prog_l_es_seqn. }
{ rewrite prog_l_es_init_w. type_solver. }
{ intros ol a b [[EA _] WA] [[EB _] WB].
set (CA := EA). apply prog_l_es_init_act_in in CA. desf.
set (CB := EB). apply prog_l_es_init_act_in in CB. desf.
assert (l0 = l); subst.
{ unfold loc, init_write in *.
erewrite l2f_in in WB; eauto.
2: by apply indexed_list_fst_nodup.
erewrite l2f_in in WB; eauto.
2: by apply indexed_list_fst_nodup.
desf. }
unfolder. ins. exfalso. apply nEW. splits; auto.
clear -CA CB NNDD.
eapply indexed_list_snd_nodup; eauto. }
{ split; [|basic_solver].
unfolder. ins. desf. splits; auto.
all: eapply prog_l_es_init_w; eauto.
Unshelve. all: auto. }
{ intros HH. desf.
unfold prog_l_es_init, ES.init, ES.cont_thread, ES.cont_set in *.
simpls.
unfold prog_init_K in KK.
apply in_map_iff in KK.
desf. destruct x as [tid k]; simpls; desf.
apply RegMap.elements_complete in KK0.
apply nInitProg.
apply RegMap.Facts.in_find_iff.
rewrite KK0. desf. }
{ intros HH. desf. inv RMW. }
{ unfold prog_l_es_init, ES.init, ES.cont_thread, ES.cont_set in *.
simpls.
unfold prog_init_K in *.
ins.
apply in_map_iff in CK. apply in_map_iff in CK'.
desf.
destruct x. destruct x0.
apply RegMap.elements_complete in CK0.
apply RegMap.elements_complete in CK'0.
simpls; desf. }
{ ins. by apply prog_l_es_init_ninit in EE. }
{ ins. exfalso.
red in inK.
unfold prog_g_es_init, ES.init in *. simpls.
unfold prog_init_K in *.
apply in_map_iff in inK. desf. }
ins. exfalso.
unfold ES.cont_adjacent
in ADJ.
desc.
unfold ES.cont_set,
ES.cont,
prog_g_es_init,
prog_init_K
in KK'.
simpl in KK'.
apply in_map_iff in KK'.
destruct KK' as [HA [HB HC]].
inversion HB. congruence.
Qed.
Lemma prog_g_es_init_wf G prog
(nInitProg : ~ IdentMap.In tid_init prog)
(nLocsEmpty : g_locs G <> []) :
ES.Wf (prog_g_es_init prog G).
Proof. by apply prog_l_es_init_wf. Qed.
Lemma prog_es_init_wf prog
(nInitProg : ~ IdentMap.In tid_init prog)
(nLocsEmpty : prog_locs (stable_prog_to_prog prog) <> []) :
ES.Wf (prog_es_init prog).
Proof. by apply prog_l_es_init_wf. Qed.
Lemma prog_g_es_init_same_lab prog G (WF : Wf G) :
eq_dom (ES.acts_set (prog_g_es_init prog G))
(ES.lab (prog_g_es_init prog G))
(Execution.lab G ∘ e2a (prog_g_es_init prog G)).
Proof.
red. ins.
arewrite (undup (g_locs G) = g_locs G).
{ unfold g_locs. rewrite undup_nodup; auto. }
unfold compose.
apply prog_g_es_init_act_in in DX. desf.
rewrite prog_g_es_init_alt.
unfold e2a, ES.init, ES.acts_set in *; simpls; desf.
unfold Events.loc.
erewrite l2f_in; [|by apply indexed_list_fst_nodup|by eauto].
simpls. rewrite wf_init_lab; auto.
Qed.
Lemma prog_l_es_init_K prog locs k state
(INK : ES.cont_set
(prog_l_es_init prog locs)
(k, existT _
(thread_lts (ES.cont_thread (prog_l_es_init prog locs)
k))
state)) :
exists thread,
⟪ KTID : k = CInit thread ⟫ /\
⟪ STEPS : (istep thread [])* (init (instrs state)) state ⟫ /\
⟪ STBL : stable_state state ⟫.
Proof.
assert (forall A B (c : A) (a b : B)
(OO : (c, a) = (c, b)), a = b) as OO.
{ ins. inv OO. }
ins. red in INK.
unfold prog_l_es_init, ES.init, prog_init_K, ES.cont_thread in *.
simpls.
apply in_map_iff in INK. desc. inv INK.
destruct x. simpls. desf.
apply OO in INK.
inv INK.
destruct s; simpls.
eexists; splits; eauto.
all: pose (AA :=
@proj2_sig
_ _
(get_stable t (init x) s
(rt_refl state (step t) (init x)))).
arewrite
(instrs
(proj1_sig
(get_stable t (init x) s (rt_refl state (step t) (init x)))) =
instrs (init x)).
all: red in AA; desf.
eapply steps_same_instrs; eauto.
apply eps_steps_in_steps. eauto.
Qed.
Lemma prog_g_es_init_K prog G k state
(INK : ES.cont_set
(prog_g_es_init prog G)
(k, existT _
(thread_lts (ES.cont_thread (prog_g_es_init prog G)
k))
state)) :
exists thread,
⟪ KTID : k = CInit thread ⟫ /\
⟪ STEPS : (istep thread [])* (init (instrs state)) state ⟫ /\
⟪ STBL : stable_state state ⟫.
Proof. by apply prog_l_es_init_K. Qed.
Lemma prog_l_es_init_lab prog locs e :
<< ELAB : ES.lab (prog_l_es_init prog locs) e = Afence Orlx >> \/
exists l,
<< ELAB : ES.lab (prog_l_es_init prog locs) e = init_write l >>.
Proof.
unfold prog_l_es_init, ES.init. simpls.
unnw.
edestruct @l2f_v with (A:=nat)
(l:=indexed_list (map init_write (undup locs)))
(a:=e)
(DEC:=Nat.eq_dec).
{ apply indexed_list_fst_nodup. }
2: { desf. left. eauto. }
desf. right.
generalize dependent e.
unfold indexed_list in *.
remember 0 as n. clear Heqn.
generalize dependent n.
induction (undup locs); simpls.
ins. desf; eauto.
Qed.
Lemma prog_g_es_init_lab prog G e :
<< ELAB : ES.lab (prog_g_es_init prog G) e = Afence Orlx >> \/
exists l,
<< ELAB : ES.lab (prog_g_es_init prog G) e = init_write l >>.
Proof. apply prog_l_es_init_lab. Qed.
Lemma traverse_map_indexed_list {A B} (f : A -> B) l :
indexed_list (map f l) =
map (fun p : nat * A => let (a, b) := p in (a, f b))
(indexed_list l).
Proof.
unfold indexed_list in *.
remember 0 as n. clear Heqn.
generalize dependent n.
induction l; simpls.
congruence.
Qed.
Lemma prog_l_es_init_init_loc prog locs :
(fun l => In l locs) ≡₁ ES.init_loc (prog_l_es_init prog locs).
Proof.
split.
{ intros l L_IN.
apply in_undup_iff in L_IN.
specialize (indexed_list_in_exists l (undup locs) L_IN) as [e Foo].
exists e. splits.
{ apply prog_l_es_init_init.
unfold prog_l_es_init, ES.init.
unfold ES.acts_set, ES.next_act. rewrite length_map.
apply indexed_list_range. eauto. }
unfold prog_l_es_init, ES.init. simpl.
unfold Events.loc.
arewrite ((list_to_fun
Nat.eq_dec
(Afence Orlx)
(indexed_list (map init_write (undup locs)))) e =
init_write l); [|done].
apply l2f_in.
{ apply indexed_list_fst_nodup. }
rewrite traverse_map_indexed_list.
eapply in_map with
(f := (fun p : nat * location => let (a, b) := p in (a, init_write b))) in Foo.
auto. }
intros l [a HH]. desf.
unfold prog_l_es_init, ES.init, ES.lab in LOCA.
specialize (l2f_codom (indexed_list (map init_write (undup locs)))
a
(Afence Orlx) Nat.eq_dec) as RR.
desf; unfold loc in LOCA; desf.
all: rewrite traverse_map_indexed_list in RR;
apply in_map_iff in RR; desf.
apply In_map_snd in RR0.
rewrite indexed_list_map_snd in RR0.
by apply in_undup_iff.
Qed.
Lemma prog_g_init_init_loc prog G :
(fun l => In l (g_locs G)) ≡₁ ES.init_loc (prog_g_es_init prog G).
Proof. by apply prog_l_es_init_init_loc. Qed.
Lemma prog_es_init_init_loc prog :
(fun l => In l (prog_locs (stable_prog_to_prog prog))) ≡₁ ES.init_loc (prog_es_init prog).
Proof. by apply prog_l_es_init_init_loc. Qed.
|
{"author": "weakmemory", "repo": "weakestmoToImm", "sha": "7061b6279887aa5777f13b5c5ed6a10fae6740a5", "save_path": "github-repos/coq/weakmemory-weakestmoToImm", "path": "github-repos/coq/weakmemory-weakestmoToImm/weakestmoToImm-7061b6279887aa5777f13b5c5ed6a10fae6740a5/src/construction/ProgES.v"}
|
#!/bin/python3
import sys
sys.path.append(".")
from adder_graph import adder_graph
from adder_graph import adder_node as node
import networkx as nx
import pydot
g = adder_graph(4)
g.add_node(node(0,0,'buffer_node'),style='invis')
g.add_node(node(1,0,'buffer_node'))
g.add_node(node(0,1,'black'))
g.add_node(node(1,1,'black'))
g.add_node(node(0,2,'grey'))
g.add_node(node(1,2,'grey'))
g.add_edge(g[0,0],('gout',0),g[0,1],('gin',0))
g.add_edge(g[0,0],('gout',0),g[1,1],('gin',0))
g.add_edge(g[1,0],('gout',0),g[1,1],('gin',1))
g.add_edge(g[0,1],('pout',0),g[0,2],('gin',1))
g.add_edge(g[0,1],('gout',0),g[1,2],('gin',0))
g.add_edge(g[1,1],('gout',0),g[1,2],('gin',0))
g.add_edge(g[1,1],('pout',0),g[1,2],('gin',1))
# Node connecting to two separate children via different ports
adj_list=g.adj[g[0,1]]
assert(g[0,2] in adj_list)
assert(g[1,2] in adj_list)
assert(g[0,1].outs['pout'][0]==3)
assert(g[0,2].ins['gin'][1]==3)
assert(adj_list[g[0,2]][0]['ins']==('pout',0))
assert(adj_list[g[1,2]][0]['outs']==('gin',0))
# Node connecting to child via multiple ports
adj_list=g.adj[g[1,1]]
assert(g[1,2] in adj_list)
assert(len(adj_list.keys())==3)
assert(adj_list[g[1,2]][0]['outs']!=adj_list[g[1,2]][1]['outs'])
assert(adj_list[g[1,2]][0]['ins']!=adj_list[g[1,2]][1]['ins'])
# Node connecting to two separate children via same port
adj_list=g.adj[g[0,0]]
assert(g[0,1] in adj_list)
assert(g[1,1] in adj_list)
assert(adj_list[g[0,1]][0]['ins']==adj_list[g[1,1]][0]['ins'])
assert(g[1,1]._flat()==" assign n6=1'b0&1'b0;\n assign n5=n2|(1'b0&n1);\n")
#pg=nx.drawing.nx_pydot.to_pydot(g)
#pg.write_png('output.png',prog='neato')
|
{"hexsha": "803ccf79d51b88ad6d95276c0596a6dc5a69863e", "size": 1635, "ext": "py", "lang": "Python", "max_stars_repo_path": "unit_tests/graph_test.py", "max_stars_repo_name": "tdene/synth_opt_adders", "max_stars_repo_head_hexsha": "c94ba6e61468e8867f7a3f8d5af252b0e42664a0", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 24, "max_stars_repo_stars_event_min_datetime": "2021-04-19T21:07:23.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-27T19:33:38.000Z", "max_issues_repo_path": "unit_tests/graph_test.py", "max_issues_repo_name": "tdene/synth_opt_adders", "max_issues_repo_head_hexsha": "c94ba6e61468e8867f7a3f8d5af252b0e42664a0", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 15, "max_issues_repo_issues_event_min_datetime": "2022-01-17T03:12:01.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-14T14:44:57.000Z", "max_forks_repo_path": "unit_tests/graph_test.py", "max_forks_repo_name": "tdene/synth_opt_adders", "max_forks_repo_head_hexsha": "c94ba6e61468e8867f7a3f8d5af252b0e42664a0", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2021-06-09T23:05:45.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-09T22:01:46.000Z", "avg_line_length": 27.25, "max_line_length": 75, "alphanum_fraction": 0.6409785933, "include": true, "reason": "import networkx", "num_tokens": 641}
|
import nltk
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
import string
import numpy as np
f = open('nltk.txt','r',errors='ignore')
t = f.read()
t = t.replace('\n','').lower()
f.close()
st = nltk.sent_tokenize(t)
wt = nltk.word_tokenize(t)
lemmer = nltk.stem.WordNetLemmatizer()
def lemTokens(tokens):
return [lemmer.lemmatize(token) for token in tokens]
remove_punc_dict = dict((ord(punc),None) for punc in string.punctuation)
def lemNormalize(text):
return lemTokens(nltk.word_tokenize(text.lower().translate(remove_punc_dict)))
def response(input):
st.append(input)
tv = TfidfVectorizer(tokenizer=lemNormalize,stop_words='english')
tfidf = tv.fit_transform(st)
vals = cosine_similarity(tfidf[-1],tfidf)
idx = vals.argsort()[0][-2]
flat = vals.flatten()
flat.sort()
req_tfdif = flat[-2]
if req_tfdif == 0:
print("Response Error: Don't understand")
else:
print("\nANSWER: " + st[idx])
response("who is jarvis?")
|
{"hexsha": "456a49fc26497b78dc236e864f90e09125ba4454", "size": 1095, "ext": "py", "lang": "Python", "max_stars_repo_path": "nltkTest.py", "max_stars_repo_name": "pranjal-joshi/Florence", "max_stars_repo_head_hexsha": "ffc022f0c0b4625b4236d78b42bf551c64690cfc", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "nltkTest.py", "max_issues_repo_name": "pranjal-joshi/Florence", "max_issues_repo_head_hexsha": "ffc022f0c0b4625b4236d78b42bf551c64690cfc", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "nltkTest.py", "max_forks_repo_name": "pranjal-joshi/Florence", "max_forks_repo_head_hexsha": "ffc022f0c0b4625b4236d78b42bf551c64690cfc", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.7073170732, "max_line_length": 83, "alphanum_fraction": 0.6757990868, "include": true, "reason": "import numpy", "num_tokens": 275}
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
color = sns.color_palette('colorblind', n_colors=4)
# dist - acc
dist_grouped = pd.read_csv('figures/wiki_dist_correctness_after.csv')
conditions = [
(dist_grouped['locality'] == 0),
(dist_grouped['locality'] == 1),
(dist_grouped['locality'] == 2),
(dist_grouped['locality'] == 3)]
choices = ['no locality',
'same category, different section',
'same section, different category',
'same section, same category']
dist_grouped['Locality'] = np.select(conditions, choices)
dist_grouped['Accuracy'] = dist_grouped['correctness']
dist_grouped['Neg. Distance'] = dist_grouped['dist_right']
fig, ax = plt.subplots(1, 1, figsize=(5, 4))
# sns.scatterplot(x='Neg. Distance', y='Accuracy', hue='Locality', data=dist_grouped, s=8,
# palette=color, ax=ax[0], legend=False)
grouped = pd.read_csv('figures/wiki_rank_after.csv')
grouped = grouped.loc[grouped['rank'] <= 200]
conditions = [
(grouped['locality'] == 0),
(grouped['locality'] == 1),
(grouped['locality'] == 2),
(grouped['locality'] == 3)]
grouped['Locality'] = np.select(conditions, choices)
grouped['Rank'] = grouped['rank']
grouped['Accuracy'] = grouped['correctness']
grouped['Neg. Distance (Modified)'] = grouped['dist']
# rank - acc
# sns.scatterplot(x='Rank', y='Accuracy', hue='Locality', data=grouped, s=8,
# palette=color, ax=ax[1], legend=False)
# rank - dist
sns.scatterplot(x='Rank', y='Neg. Distance (Modified)', hue='Locality', data=grouped, s=8,
palette=color)
fig.tight_layout()
plt.savefig('figures/wiki_after.pdf')
|
{"hexsha": "2229cd515f05f8388592d6c8f8ec0b2d47d9d0a7", "size": 1697, "ext": "py", "lang": "Python", "max_stars_repo_path": "plot_wiki_after.py", "max_stars_repo_name": "frankxu2004/knnlm", "max_stars_repo_head_hexsha": "7a668a916b08a0e82072c8f49eef4a10ad4a8505", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-11-04T01:29:35.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-04T13:46:52.000Z", "max_issues_repo_path": "plot_wiki_after.py", "max_issues_repo_name": "frankxu2004/knnlm-locality", "max_issues_repo_head_hexsha": "7a668a916b08a0e82072c8f49eef4a10ad4a8505", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "plot_wiki_after.py", "max_forks_repo_name": "frankxu2004/knnlm-locality", "max_forks_repo_head_hexsha": "7a668a916b08a0e82072c8f49eef4a10ad4a8505", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.0188679245, "max_line_length": 90, "alphanum_fraction": 0.6588096641, "include": true, "reason": "import numpy", "num_tokens": 469}
|
"""
match two list of stars, provided by ra/dec degree
"""
import numpy as np
import scipy.stats as ss
def star_match ( list_a, list_b, a_ra, a_dec, b_ra, b_dec, a_mag=-1, b_mag=-1,
dis_limit=0.002, mag_limit=-3, allow_dup=False ) :
"""match two list
:param list_a: list a of stars, each item is a star, stars as list with property
:param list_b: list b of stars
:param a_ra: ra field index in list a
:param a_dec: dec field index in list a
:param b_ra: ra field index in list b
:param b_dec: dec field index in list b
:param a_mag: mag field index in list a, -1 means no mag, default is -1
:param b_mag: mag field index in list a, -1 means no mag, default is -1
:param dis_limit: distance limit when matching, default is 0.002 deg, 7.2 arcsec
:param mag_limit: mag difference when checking, 0 means no check,
minus means times of sigma, positive is mag difference, default is -3
:param allow_dup: allow duplicate mapping or not, default is False
:returns: 3 items tuple, index of a, index of b, distance
"""
len_a = len(list_a)
len_b = len(list_b)
ra_a = np.array([k[a_ra] for k in list_a])
dec_a = np.array([k[a_dec] for k in list_a])
ra_b = np.array([k[b_ra] for k in list_b])
dec_b = np.array([k[b_dec] for k in list_b])
if a_mag >= 0 :
mag_a = np.array([k[a_mag] for k in list_a])
else :
mag_a = np.zeros(len_a)
if b_mag >= 0 :
mag_b = np.array([k[b_mag] for k in list_b])
else :
mag_b = np.zeros(len_b)
ra_scale = np.cos(np.median(dec_a) / 180.0 * np.pi)
ix_a = np.argsort(dec_a)
ix_b = np.argsort(dec_b)
out_a , out_b = [] , []
#dis_ra, dis_dec = [], [] #dis_ra/dec only used for debug, test residual
dis_ab = []
pbf = pbt = 0 # point b from/to
for pa in range(len_a) :
ix_pa = ix_a[pa]
ra_p, dec_p = ra_a[ix_pa], dec_a[ix_pa]
# pb walk down to first position [pbf]>=[pa]-dis, [pbt]>=[pa]+dis
while pbf < len_b and dec_b[ix_b[pbf]] < dec_p - dis_limit : pbf += 1
while pbt < len_b and dec_b[ix_b[pbt]] < dec_p + dis_limit : pbt += 1
# exit if p2f runout
if pbf >= len_b : break
# skip if no near star
if pbt - pbf < 1 : continue
# check real distance, include ra
for ix_pb in ix_b[range(pbf, pbt)] :
d_ra = ra_p - ra_b[ix_pb]
d_dec = dec_p - dec_b[ix_pb]
dis = np.sqrt((d_ra * ra_scale) ** 2 + d_dec ** 2)
if dis < dis_limit :
out_a.append(ix_pa)
out_b.append(ix_pb)
#dis_ra.append(d_ra)
#dis_dec.append(d_dec)
dis_ab.append(dis)
out_a = np.array(out_a)
out_b = np.array(out_b)
#dis_ra = np.array(dis_ra)
#dis_dec = np.array(dis_dec)
dis_ab = np.array(dis_ab)
if a_mag >= 0 and b_mag >= 0 and mag_limit != 0 :
# mag difference limit check
mag_diff = mag_a[out_a] - mag_b[out_b]
if mag_limit < 0 :
mag_diff_clip, ml, mh = ss.sigmaclip(mag_diff, 3, 3)
std = mag_diff_clip.std()
mea = mag_diff_clip.mean()
mag_limit_x = - std * mag_limit
else :
mea = mag_diff.mean()
mag_limit_x = mag_limit
ix_m = np.where(np.abs(mag_diff - mea) < mag_limit_x)
out_a = out_a[ix_m]
out_b = out_b[ix_m]
dis_ab = dis_ab[ix_m]
if not allow_dup :
ix_keep = []
uq_a = np.unique(out_a)
for u in uq_a :
ix_dup = np.where(out_a == u)
ix_min = ix_dup[0][ dis_ab[ix_dup].argmin() ]
ix_keep.append(ix_min)
out_a = out_a[ix_keep]
out_b = out_b[ix_keep]
dis_ab = dis_ab[ix_keep]
ix_keep = []
uq_b = np.unique(out_b)
for u in uq_b :
ix_dup = np.where(out_b == u)
ix_min = ix_dup[0][ dis_ab[ix_dup].argmin() ]
ix_keep.append(ix_min)
out_a = out_a[ix_keep]
out_b = out_b[ix_keep]
dis_ab = dis_ab[ix_keep]
return (out_a, out_b, dis_ab)
|
{"hexsha": "f3baf89b5e3a23bbcab31cc7bc8a937811784be8", "size": 4203, "ext": "py", "lang": "Python", "max_stars_repo_path": "star_match.py", "max_stars_repo_name": "RapidLzj/201603", "max_stars_repo_head_hexsha": "dbcefad4a833a936f469186a7eb7106da9a91e74", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "star_match.py", "max_issues_repo_name": "RapidLzj/201603", "max_issues_repo_head_hexsha": "dbcefad4a833a936f469186a7eb7106da9a91e74", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "star_match.py", "max_forks_repo_name": "RapidLzj/201603", "max_forks_repo_head_hexsha": "dbcefad4a833a936f469186a7eb7106da9a91e74", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.3193277311, "max_line_length": 84, "alphanum_fraction": 0.5667380443, "include": true, "reason": "import numpy,import scipy", "num_tokens": 1244}
|
[STATEMENT]
lemma mult_mono_nonpos_nonpos: "a * b \<le> c * d"
if "a \<ge> c" "a \<le> 0" "b \<ge> d" "d \<le> 0" for a b c d::real
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. a * b \<le> c * d
[PROOF STEP]
by (meson dual_order.trans mult_left_mono_neg mult_right_mono_neg that)
|
{"llama_tokens": 138, "file": null, "length": 1}
|
export name, email, time, time_offset
typealias MaybeSignature Union(Void, Signature)
#TODO: better date / time integration when this becomes available in Base
Signature(name::AbstractString, email::AbstractString) = begin
sig_ptr = Ptr{SignatureStruct}[0]
@check ccall((:git_signature_now, libgit2), Cint,
(Ptr{Ptr{SignatureStruct}}, Ptr{UInt8}, Ptr{UInt8}), sig_ptr, name, email)
s = Signature(sig_ptr[1])
ccall((:git_signature_free, libgit2), Void, (Ptr{SignatureStruct},), sig_ptr[1])
return s
end
Signature(ptr::Ptr{SignatureStruct}) = begin
sig = unsafe_load(ptr)::SignatureStruct
name = utf8(bytestring(sig.name))
email = utf8(bytestring(sig.email))
time = sig.when.time
offset = sig.when.offset
return Signature(name, email, time, offset)
end
Base.show(io::IO, s::Signature) = begin
time_str = strftime("%Y-%m-%d %H:%M:%S %Z", s.time)
print(io, "Signature(\"$(name(s))\",\"$(email(s))\",\"$time_str\")")
end
Base.(:(==))(sig1::Signature, sig2::Signature) = (sig1.name == sig2.name &&
sig1.email == sig2.email &&
sig1.time == sig2.time &&
sig1.time_offset == sig2.time_offset)
Base.isequal(sig1::Signature, sig2::Signature) = (sig1 == sig2)
Base.convert(::Type{Ptr{SignatureStruct}}, sig::Signature) = begin
sig_ptr = Ptr{SignatureStruct}[0]
@check ccall((:git_signature_new, libgit2), Cint,
(Ptr{Ptr{SignatureStruct}}, Ptr{UInt8}, Ptr{UInt8}, Cint, Cint),
sig_ptr, sig.name, sig.email, sig.time, sig.time_offset)
return sig_ptr[1]::Ptr{SignatureStruct}
end
name(s::Signature) = s.name
email(s::Signature) = s.email
#TODO: remove
Base.time(s::Signature) = s.time
time_offset(s::Signature) = s.time_offset
|
{"hexsha": "ec9d6f1a53b41d243112511168dcdc6010f05f4b", "size": 1892, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/signature.jl", "max_stars_repo_name": "UnofficialJuliaMirrorSnapshots/LibGit2.jl-76f85450-5226-5b5a-8eaa-529ad045b433", "max_stars_repo_head_hexsha": "5e0adc9e9b9b1bb1963169f7a8e59fb72b79a73f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/signature.jl", "max_issues_repo_name": "UnofficialJuliaMirrorSnapshots/LibGit2.jl-76f85450-5226-5b5a-8eaa-529ad045b433", "max_issues_repo_head_hexsha": "5e0adc9e9b9b1bb1963169f7a8e59fb72b79a73f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/signature.jl", "max_forks_repo_name": "UnofficialJuliaMirrorSnapshots/LibGit2.jl-76f85450-5226-5b5a-8eaa-529ad045b433", "max_forks_repo_head_hexsha": "5e0adc9e9b9b1bb1963169f7a8e59fb72b79a73f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.84, "max_line_length": 91, "alphanum_fraction": 0.616807611, "num_tokens": 503}
|
(* Title: HOL/Word/WordBitwise.thy
Authors: Thomas Sewell, NICTA and Sascha Boehme, TU Muenchen
*)
theory WordBitwise
imports Word
begin
text \<open>Helper constants used in defining addition\<close>
definition
xor3 :: "bool \<Rightarrow> bool \<Rightarrow> bool \<Rightarrow> bool"
where
"xor3 a b c = (a = (b = c))"
definition
carry :: "bool \<Rightarrow> bool \<Rightarrow> bool \<Rightarrow> bool"
where
"carry a b c = ((a \<and> (b \<or> c)) \<or> (b \<and> c))"
lemma carry_simps:
"carry True a b = (a \<or> b)"
"carry a True b = (a \<or> b)"
"carry a b True = (a \<or> b)"
"carry False a b = (a \<and> b)"
"carry a False b = (a \<and> b)"
"carry a b False = (a \<and> b)"
by (auto simp add: carry_def)
lemma xor3_simps:
"xor3 True a b = (a = b)"
"xor3 a True b = (a = b)"
"xor3 a b True = (a = b)"
"xor3 False a b = (a \<noteq> b)"
"xor3 a False b = (a \<noteq> b)"
"xor3 a b False = (a \<noteq> b)"
by (simp_all add: xor3_def)
text \<open>Breaking up word equalities into equalities on their
bit lists. Equalities are generated and manipulated in the
reverse order to to_bl.\<close>
lemma word_eq_rbl_eq:
"(x = y) = (rev (to_bl x) = rev (to_bl y))"
by simp
lemma rbl_word_or:
"rev (to_bl (x OR y)) = map2 op \<or> (rev (to_bl x)) (rev (to_bl y))"
by (simp add: map2_def zip_rev bl_word_or rev_map)
lemma rbl_word_and:
"rev (to_bl (x AND y)) = map2 op \<and> (rev (to_bl x)) (rev (to_bl y))"
by (simp add: map2_def zip_rev bl_word_and rev_map)
lemma rbl_word_xor:
"rev (to_bl (x XOR y)) = map2 op \<noteq> (rev (to_bl x)) (rev (to_bl y))"
by (simp add: map2_def zip_rev bl_word_xor rev_map)
lemma rbl_word_not:
"rev (to_bl (NOT x)) = map Not (rev (to_bl x))"
by (simp add: bl_word_not rev_map)
lemma bl_word_sub:
"to_bl (x - y) = to_bl (x + (- y))"
by simp
lemma rbl_word_1:
"rev (to_bl (1 :: ('a :: len0) word))
= takefill False (len_of TYPE('a)) [True]"
apply (rule_tac s="rev (to_bl (word_succ (0 :: 'a word)))" in trans)
apply simp
apply (simp only: rtb_rbl_ariths(1)[OF refl])
apply simp
apply (case_tac "len_of TYPE('a)")
apply simp
apply (simp add: takefill_alt)
done
lemma rbl_word_if:
"rev (to_bl (if P then x else y))
= map2 (If P) (rev (to_bl x)) (rev (to_bl y))"
by (simp add: map2_def split_def)
lemma rbl_add_carry_Cons:
"(if car then rbl_succ else id) (rbl_add (x # xs) (y # ys))
= xor3 x y car # (if carry x y car then rbl_succ else id)
(rbl_add xs ys)"
by (simp add: carry_def xor3_def)
lemma rbl_add_suc_carry_fold:
"length xs = length ys \<Longrightarrow>
\<forall>car. (if car then rbl_succ else id) (rbl_add xs ys)
= (foldr (\<lambda>(x, y) res car. xor3 x y car # res (carry x y car))
(zip xs ys) (\<lambda>_. [])) car"
apply (erule list_induct2)
apply simp
apply (simp only: rbl_add_carry_Cons)
apply simp
done
lemma to_bl_plus_carry:
"to_bl (x + y)
= rev (foldr (\<lambda>(x, y) res car. xor3 x y car # res (carry x y car))
(rev (zip (to_bl x) (to_bl y))) (\<lambda>_. []) False)"
using rbl_add_suc_carry_fold[where xs="rev (to_bl x)" and ys="rev (to_bl y)"]
apply (simp add: word_add_rbl[OF refl refl])
apply (drule_tac x=False in spec)
apply (simp add: zip_rev)
done
definition
"rbl_plus cin xs ys = foldr
(\<lambda>(x, y) res car. xor3 x y car # res (carry x y car))
(zip xs ys) (\<lambda>_. []) cin"
lemma rbl_plus_simps:
"rbl_plus cin (x # xs) (y # ys)
= xor3 x y cin # rbl_plus (carry x y cin) xs ys"
"rbl_plus cin [] ys = []"
"rbl_plus cin xs [] = []"
by (simp_all add: rbl_plus_def)
lemma rbl_word_plus:
"rev (to_bl (x + y)) = rbl_plus False (rev (to_bl x)) (rev (to_bl y))"
by (simp add: rbl_plus_def to_bl_plus_carry zip_rev)
definition
"rbl_succ2 b xs = (if b then rbl_succ xs else xs)"
lemma rbl_succ2_simps:
"rbl_succ2 b [] = []"
"rbl_succ2 b (x # xs) = (b \<noteq> x) # rbl_succ2 (x \<and> b) xs"
by (simp_all add: rbl_succ2_def)
lemma twos_complement:
"- x = word_succ (NOT x)"
using arg_cong[OF word_add_not[where x=x], where f="\<lambda>a. a - x + 1"]
by (simp add: word_succ_p1 word_sp_01[unfolded word_succ_p1]
del: word_add_not)
lemma rbl_word_neg:
"rev (to_bl (- x)) = rbl_succ2 True (map Not (rev (to_bl x)))"
by (simp add: twos_complement word_succ_rbl[OF refl]
bl_word_not rev_map rbl_succ2_def)
lemma rbl_word_cat:
"rev (to_bl (word_cat x y :: ('a :: len0) word))
= takefill False (len_of TYPE('a)) (rev (to_bl y) @ rev (to_bl x))"
by (simp add: word_cat_bl word_rev_tf)
lemma rbl_word_slice:
"rev (to_bl (slice n w :: ('a :: len0) word))
= takefill False (len_of TYPE('a)) (drop n (rev (to_bl w)))"
apply (simp add: slice_take word_rev_tf rev_take)
apply (cases "n < len_of TYPE('b)", simp_all)
done
lemma rbl_word_ucast:
"rev (to_bl (ucast x :: ('a :: len0) word))
= takefill False (len_of TYPE('a)) (rev (to_bl x))"
apply (simp add: to_bl_ucast takefill_alt)
apply (simp add: rev_drop)
apply (case_tac "len_of TYPE('a) < len_of TYPE('b)")
apply simp_all
done
lemma rbl_shiftl:
"rev (to_bl (w << n)) = takefill False (size w)
(replicate n False @ rev (to_bl w))"
by (simp add: bl_shiftl takefill_alt word_size rev_drop)
lemma rbl_shiftr:
"rev (to_bl (w >> n)) = takefill False (size w)
(drop n (rev (to_bl w)))"
by (simp add: shiftr_slice rbl_word_slice word_size)
definition
"drop_nonempty v n xs
= (if n < length xs then drop n xs else [last (v # xs)])"
lemma drop_nonempty_simps:
"drop_nonempty v (Suc n) (x # xs) = drop_nonempty x n xs"
"drop_nonempty v 0 (x # xs) = (x # xs)"
"drop_nonempty v n [] = [v]"
by (simp_all add: drop_nonempty_def)
definition
"takefill_last x n xs = takefill (last (x # xs)) n xs"
lemma takefill_last_simps:
"takefill_last z (Suc n) (x # xs) = x # takefill_last x n xs"
"takefill_last z 0 xs = []"
"takefill_last z n [] = replicate n z"
apply (simp_all add: takefill_last_def)
apply (simp_all add: takefill_alt)
done
lemma rbl_sshiftr:
"rev (to_bl (w >>> n)) =
takefill_last False (size w)
(drop_nonempty False n (rev (to_bl w)))"
apply (cases "n < size w")
apply (simp add: bl_sshiftr takefill_last_def word_size
takefill_alt rev_take last_rev
drop_nonempty_def)
apply (subgoal_tac "(w >>> n) = of_bl (replicate (size w) (msb w))")
apply (simp add: word_size takefill_last_def takefill_alt
last_rev word_msb_alt word_rev_tf
drop_nonempty_def take_Cons')
apply (case_tac "len_of TYPE('a)", simp_all)
apply (rule word_eqI)
apply (simp add: nth_sshiftr word_size test_bit_of_bl
msb_nth)
done
lemma nth_word_of_int:
"(word_of_int x :: ('a :: len0) word) !! n
= (n < len_of TYPE('a) \<and> bin_nth x n)"
apply (simp add: test_bit_bl word_size to_bl_of_bin)
apply (subst conj_cong[OF refl], erule bin_nth_bl)
apply (auto)
done
lemma nth_scast:
"(scast (x :: ('a :: len) word) :: ('b :: len) word) !! n
= (n < len_of TYPE('b) \<and>
(if n < len_of TYPE('a) - 1 then x !! n
else x !! (len_of TYPE('a) - 1)))"
by (simp add: scast_def nth_word_of_int nth_sint)
lemma rbl_word_scast:
"rev (to_bl (scast x :: ('a :: len) word))
= takefill_last False (len_of TYPE('a))
(rev (to_bl x))"
apply (rule nth_equalityI)
apply (simp add: word_size takefill_last_def)
apply (clarsimp simp: nth_scast takefill_last_def
nth_takefill word_size nth_rev to_bl_nth)
apply (cases "len_of TYPE('b)")
apply simp
apply (clarsimp simp: less_Suc_eq_le linorder_not_less
last_rev word_msb_alt[symmetric]
msb_nth)
done
definition
rbl_mul :: "bool list \<Rightarrow> bool list \<Rightarrow> bool list"
where
"rbl_mul xs ys = foldr (\<lambda>x sm. rbl_plus False (map (op \<and> x) ys) (False # sm))
xs []"
lemma rbl_mul_simps:
"rbl_mul (x # xs) ys
= rbl_plus False (map (op \<and> x) ys) (False # rbl_mul xs ys)"
"rbl_mul [] ys = []"
by (simp_all add: rbl_mul_def)
lemma takefill_le2:
"length xs \<le> n \<Longrightarrow>
takefill x m (takefill x n xs)
= takefill x m xs"
by (simp add: takefill_alt replicate_add[symmetric])
lemma take_rbl_plus:
"\<forall>n b. take n (rbl_plus b xs ys)
= rbl_plus b (take n xs) (take n ys)"
apply (simp add: rbl_plus_def take_zip[symmetric])
apply (rule_tac list="zip xs ys" in list.induct)
apply simp
apply (clarsimp simp: split_def)
apply (case_tac n, simp_all)
done
lemma word_rbl_mul_induct:
fixes y :: "'a :: len word" shows
"length xs \<le> size y
\<Longrightarrow> rbl_mul xs (rev (to_bl y))
= take (length xs) (rev (to_bl (of_bl (rev xs) * y)))"
proof (induct xs)
case Nil
show ?case
by (simp add: rbl_mul_simps)
next
case (Cons z zs)
have rbl_word_plus':
"\<And>(x :: 'a word) y.
to_bl (x + y) = rev (rbl_plus False (rev (to_bl x)) (rev (to_bl y)))"
by (simp add: rbl_word_plus[symmetric])
have mult_bit: "to_bl (of_bl [z] * y) = map (op \<and> z) (to_bl y)"
apply (cases z)
apply (simp cong: map_cong)
apply (simp add: map_replicate_const cong: map_cong)
done
have shiftl: "\<And>xs. of_bl xs * 2 * y = (of_bl xs * y) << 1"
by (simp add: shiftl_t2n)
have zip_take_triv: "\<And>xs ys n. n = length ys
\<Longrightarrow> zip (take n xs) ys = zip xs ys"
by (rule nth_equalityI, simp_all)
show ?case
using Cons
apply (simp add: trans [OF of_bl_append add.commute]
rbl_mul_simps rbl_word_plus'
Cons.hyps distrib_right mult_bit
shiftl rbl_shiftl)
apply (simp add: takefill_alt word_size rev_map take_rbl_plus
min_def)
apply (simp add: rbl_plus_def zip_take_triv)
done
qed
lemma rbl_word_mul:
fixes x :: "'a :: len word"
shows "rev (to_bl (x * y)) = rbl_mul (rev (to_bl x)) (rev (to_bl y))"
using word_rbl_mul_induct[where xs="rev (to_bl x)" and y=y]
by (simp add: word_size)
text \<open>Breaking up inequalities into bitlist properties.\<close>
definition
"rev_bl_order F xs ys =
(length xs = length ys \<and>
((xs = ys \<and> F)
\<or> (\<exists>n < length xs. drop (Suc n) xs = drop (Suc n) ys
\<and> \<not> xs ! n \<and> ys ! n)))"
lemma rev_bl_order_simps:
"rev_bl_order F [] [] = F"
"rev_bl_order F (x # xs) (y # ys)
= rev_bl_order ((y \<and> \<not> x) \<or> ((y \<or> \<not> x) \<and> F)) xs ys"
apply (simp_all add: rev_bl_order_def)
apply (rule conj_cong[OF refl])
apply (cases "xs = ys")
apply (simp add: nth_Cons')
apply blast
apply (simp add: nth_Cons')
apply safe
apply (rule_tac x="n - 1" in exI)
apply simp
apply (rule_tac x="Suc n" in exI)
apply simp
done
lemma rev_bl_order_rev_simp:
"length xs = length ys \<Longrightarrow>
rev_bl_order F (xs @ [x]) (ys @ [y])
= ((y \<and> \<not> x) \<or> ((y \<or> \<not> x) \<and> rev_bl_order F xs ys))"
apply (induct arbitrary: F rule: list_induct2)
apply (auto simp add: rev_bl_order_simps)
done
lemma rev_bl_order_bl_to_bin:
"length xs = length ys
\<Longrightarrow> rev_bl_order True xs ys
= (bl_to_bin (rev xs) \<le> bl_to_bin (rev ys))
\<and> rev_bl_order False xs ys
= (bl_to_bin (rev xs) < bl_to_bin (rev ys))"
apply (induct xs ys rule: list_induct2)
apply (simp_all add: rev_bl_order_simps bl_to_bin_app_cat)
apply (auto simp add: bl_to_bin_def Bit_B0 Bit_B1 add1_zle_eq Bit_def)
done
lemma word_le_rbl:
fixes x :: "('a :: len0) word"
shows "(x \<le> y) = rev_bl_order True (rev (to_bl x)) (rev (to_bl y))"
by (simp add: rev_bl_order_bl_to_bin word_le_def)
lemma word_less_rbl:
fixes x :: "('a :: len0) word"
shows "(x < y) = rev_bl_order False (rev (to_bl x)) (rev (to_bl y))"
by (simp add: word_less_alt rev_bl_order_bl_to_bin)
lemma word_sint_msb_eq:
"sint x = uint x - (if msb x then 2 ^ size x else 0)"
apply (cases "msb x")
apply (rule word_sint.Abs_eqD[where 'a='a], simp_all)
apply (simp add: word_size wi_hom_syms
word_of_int_2p_len)
apply (simp add: sints_num word_size)
apply (rule conjI)
apply (simp add: le_diff_eq')
apply (rule order_trans[where y="2 ^ (len_of TYPE('a) - 1)"])
apply (simp add: power_Suc[symmetric])
apply (simp add: linorder_not_less[symmetric] mask_eq_iff[symmetric])
apply (rule notI, drule word_eqD[where x="size x - 1"])
apply (simp add: msb_nth word_ops_nth_size word_size)
apply (simp add: order_less_le_trans[where y=0])
apply (rule word_uint.Abs_eqD[where 'a='a], simp_all)
apply (simp add: linorder_not_less uints_num word_msb_sint)
apply (rule order_less_le_trans[OF sint_lt])
apply simp
done
lemma word_sle_msb_le:
"(x <=s y) = ((msb y --> msb x) \<and>
((msb x \<and> \<not> msb y) \<or> (x <= y)))"
apply (simp add: word_sle_def word_sint_msb_eq word_size
word_le_def)
apply safe
apply (rule order_trans[OF _ uint_ge_0])
apply (simp add: order_less_imp_le)
apply (erule notE[OF leD])
apply (rule order_less_le_trans[OF _ uint_ge_0])
apply simp
done
lemma word_sless_msb_less:
"(x <s y) = ((msb y --> msb x) \<and>
((msb x \<and> \<not> msb y) \<or> (x < y)))"
by (auto simp add: word_sless_def word_sle_msb_le)
definition
"map_last f xs = (if xs = [] then [] else butlast xs @ [f (last xs)])"
lemma map_last_simps:
"map_last f [] = []"
"map_last f [x] = [f x]"
"map_last f (x # y # zs) = x # map_last f (y # zs)"
by (simp_all add: map_last_def)
lemma word_sle_rbl:
"(x <=s y) = rev_bl_order True (map_last Not (rev (to_bl x)))
(map_last Not (rev (to_bl y)))"
using word_msb_alt[where w=x] word_msb_alt[where w=y]
apply (simp add: word_sle_msb_le word_le_rbl)
apply (subgoal_tac "length (to_bl x) = length (to_bl y)")
apply (cases "to_bl x", simp)
apply (cases "to_bl y", simp)
apply (clarsimp simp: map_last_def rev_bl_order_rev_simp)
apply auto
done
lemma word_sless_rbl:
"(x <s y) = rev_bl_order False (map_last Not (rev (to_bl x)))
(map_last Not (rev (to_bl y)))"
using word_msb_alt[where w=x] word_msb_alt[where w=y]
apply (simp add: word_sless_msb_less word_less_rbl)
apply (subgoal_tac "length (to_bl x) = length (to_bl y)")
apply (cases "to_bl x", simp)
apply (cases "to_bl y", simp)
apply (clarsimp simp: map_last_def rev_bl_order_rev_simp)
apply auto
done
text \<open>Lemmas for unpacking rev (to_bl n) for numerals n and also
for irreducible values and expressions.\<close>
lemma rev_bin_to_bl_simps:
"rev (bin_to_bl 0 x) = []"
"rev (bin_to_bl (Suc n) (numeral (num.Bit0 nm)))
= False # rev (bin_to_bl n (numeral nm))"
"rev (bin_to_bl (Suc n) (numeral (num.Bit1 nm)))
= True # rev (bin_to_bl n (numeral nm))"
"rev (bin_to_bl (Suc n) (numeral (num.One)))
= True # replicate n False"
"rev (bin_to_bl (Suc n) (- numeral (num.Bit0 nm)))
= False # rev (bin_to_bl n (- numeral nm))"
"rev (bin_to_bl (Suc n) (- numeral (num.Bit1 nm)))
= True # rev (bin_to_bl n (- numeral (nm + num.One)))"
"rev (bin_to_bl (Suc n) (- numeral (num.One)))
= True # replicate n True"
"rev (bin_to_bl (Suc n) (- numeral (num.Bit0 nm + num.One)))
= True # rev (bin_to_bl n (- numeral (nm + num.One)))"
"rev (bin_to_bl (Suc n) (- numeral (num.Bit1 nm + num.One)))
= False # rev (bin_to_bl n (- numeral (nm + num.One)))"
"rev (bin_to_bl (Suc n) (- numeral (num.One + num.One)))
= False # rev (bin_to_bl n (- numeral num.One))"
apply (simp_all add: bin_to_bl_def)
apply (simp_all only: bin_to_bl_aux_alt)
apply (simp_all)
apply (simp_all add: bin_to_bl_zero_aux bin_to_bl_minus1_aux)
done
lemma to_bl_upt:
"to_bl x = rev (map (op !! x) [0 ..< size x])"
apply (rule nth_equalityI)
apply (simp add: word_size)
apply (clarsimp simp: to_bl_nth word_size nth_rev)
done
lemma rev_to_bl_upt:
"rev (to_bl x) = map (op !! x) [0 ..< size x]"
by (simp add: to_bl_upt)
lemma upt_eq_list_intros:
"j <= i \<Longrightarrow> [i ..< j] = []"
"\<lbrakk> i = x; x < j; [x + 1 ..< j] = xs \<rbrakk> \<Longrightarrow> [i ..< j] = (x # xs)"
by (simp_all add: upt_eq_Nil_conv upt_eq_Cons_conv)
text \<open>Tactic definition\<close>
ML \<open>
structure Word_Bitwise_Tac =
struct
val word_ss = simpset_of @{theory_context Word};
fun mk_nat_clist ns = List.foldr
(uncurry (Thm.mk_binop @{cterm "Cons :: nat => _"}))
@{cterm "[] :: nat list"} ns;
fun upt_conv ctxt ct =
case Thm.term_of ct of
(@{const upt} $ n $ m) =>
let
val (i, j) = apply2 (snd o HOLogic.dest_number) (n, m);
val ns = map (Numeral.mk_cnumber @{ctyp nat}) (i upto (j - 1))
|> mk_nat_clist;
val prop = Thm.mk_binop @{cterm "op = :: nat list => _"} ct ns
|> Thm.apply @{cterm Trueprop};
in
try (fn () =>
Goal.prove_internal ctxt [] prop
(K (REPEAT_DETERM (resolve_tac ctxt @{thms upt_eq_list_intros} 1
ORELSE simp_tac (put_simpset word_ss ctxt) 1))) |> mk_meta_eq) ()
end
| _ => NONE;
val expand_upt_simproc =
Simplifier.make_simproc @{context} "expand_upt"
{lhss = [@{term "upt x y"}], proc = K upt_conv};
fun word_len_simproc_fn ctxt ct =
case Thm.term_of ct of
Const (@{const_name len_of}, _) $ t => (let
val T = fastype_of t |> dest_Type |> snd |> the_single
val n = Numeral.mk_cnumber @{ctyp nat} (Word_Lib.dest_binT T);
val prop = Thm.mk_binop @{cterm "op = :: nat => _"} ct n
|> Thm.apply @{cterm Trueprop};
in Goal.prove_internal ctxt [] prop (K (simp_tac (put_simpset word_ss ctxt) 1))
|> mk_meta_eq |> SOME end
handle TERM _ => NONE | TYPE _ => NONE)
| _ => NONE;
val word_len_simproc =
Simplifier.make_simproc @{context} "word_len"
{lhss = [@{term "len_of x"}], proc = K word_len_simproc_fn};
(* convert 5 or nat 5 to Suc 4 when n_sucs = 1, Suc (Suc 4) when n_sucs = 2,
or just 5 (discarding nat) when n_sucs = 0 *)
fun nat_get_Suc_simproc_fn n_sucs ctxt ct =
let
val (f $ arg) = Thm.term_of ct;
val n = (case arg of @{term nat} $ n => n | n => n)
|> HOLogic.dest_number |> snd;
val (i, j) = if n > n_sucs then (n_sucs, n - n_sucs)
else (n, 0);
val arg' = List.foldr (op $) (HOLogic.mk_number @{typ nat} j)
(replicate i @{term Suc});
val _ = if arg = arg' then raise TERM ("", []) else ();
fun propfn g = HOLogic.mk_eq (g arg, g arg')
|> HOLogic.mk_Trueprop |> Thm.cterm_of ctxt;
val eq1 = Goal.prove_internal ctxt [] (propfn I)
(K (simp_tac (put_simpset word_ss ctxt) 1));
in Goal.prove_internal ctxt [] (propfn (curry (op $) f))
(K (simp_tac (put_simpset HOL_ss ctxt addsimps [eq1]) 1))
|> mk_meta_eq |> SOME end
handle TERM _ => NONE;
fun nat_get_Suc_simproc n_sucs ts =
Simplifier.make_simproc @{context} "nat_get_Suc"
{lhss = map (fn t => t $ @{term "n :: nat"}) ts,
proc = K (nat_get_Suc_simproc_fn n_sucs)};
val no_split_ss =
simpset_of (put_simpset HOL_ss @{context}
|> Splitter.del_split @{thm if_split});
val expand_word_eq_sss =
(simpset_of (put_simpset HOL_basic_ss @{context} addsimps
@{thms word_eq_rbl_eq word_le_rbl word_less_rbl word_sle_rbl word_sless_rbl}),
map simpset_of [
put_simpset no_split_ss @{context} addsimps
@{thms rbl_word_plus rbl_word_and rbl_word_or rbl_word_not
rbl_word_neg bl_word_sub rbl_word_xor
rbl_word_cat rbl_word_slice rbl_word_scast
rbl_word_ucast rbl_shiftl rbl_shiftr rbl_sshiftr
rbl_word_if},
put_simpset no_split_ss @{context} addsimps
@{thms to_bl_numeral to_bl_neg_numeral to_bl_0 rbl_word_1},
put_simpset no_split_ss @{context} addsimps
@{thms rev_rev_ident rev_replicate rev_map to_bl_upt word_size}
addsimprocs [word_len_simproc],
put_simpset no_split_ss @{context} addsimps
@{thms list.simps split_conv replicate.simps list.map
zip_Cons_Cons zip_Nil drop_Suc_Cons drop_0 drop_Nil
foldr.simps map2_Cons map2_Nil takefill_Suc_Cons
takefill_Suc_Nil takefill.Z rbl_succ2_simps
rbl_plus_simps rev_bin_to_bl_simps append.simps
takefill_last_simps drop_nonempty_simps
rev_bl_order_simps}
addsimprocs [expand_upt_simproc,
nat_get_Suc_simproc 4
[@{term replicate}, @{term "takefill x"},
@{term drop}, @{term "bin_to_bl"},
@{term "takefill_last x"},
@{term "drop_nonempty x"}]],
put_simpset no_split_ss @{context} addsimps @{thms xor3_simps carry_simps if_bool_simps}
])
fun tac ctxt =
let
val (ss, sss) = expand_word_eq_sss;
in
foldr1 (op THEN_ALL_NEW)
((CHANGED o safe_full_simp_tac (put_simpset ss ctxt)) ::
map (fn ss => safe_full_simp_tac (put_simpset ss ctxt)) sss)
end;
end
\<close>
method_setup word_bitwise =
\<open>Scan.succeed (fn ctxt => Method.SIMPLE_METHOD (Word_Bitwise_Tac.tac ctxt 1))\<close>
"decomposer for word equalities and inequalities into bit propositions"
end
|
{"author": "SEL4PROJ", "repo": "jormungand", "sha": "bad97f9817b4034cd705cd295a1f86af880a7631", "save_path": "github-repos/isabelle/SEL4PROJ-jormungand", "path": "github-repos/isabelle/SEL4PROJ-jormungand/jormungand-bad97f9817b4034cd705cd295a1f86af880a7631/case_study/isabelle/src/HOL/Word/WordBitwise.thy"}
|
# basic libs
import numpy as np
import json
import os
import random
from scipy import signal
# pytorch
import torch
from torch.utils.data import Dataset
np.random.seed(42)
class Dataset_train(Dataset):
def __init__(self, patients,aug):
self.patients = patients
self.aug = aug
def __len__(self):
return len(self.patients)
def __getitem__(self, idx):
X, y = self.load_data(idx)
X = torch.tensor(X, dtype=torch.float)
y = torch.tensor(y, dtype=torch.float)
return X, y
def load_data(self, id, train=True):
if self.patients[id][0] == 'A':
data_folder = 'A'
elif self.patients[id][0] == 'Q':
data_folder = 'B'
elif self.patients[id][0] == 'I':
data_folder = 'C'
elif self.patients[id][0] == 'S':
data_folder = 'D'
elif self.patients[id][0] == 'H':
data_folder = 'E'
elif self.patients[id][0] == 'E':
data_folder = 'F'
else:
a = self.patients[id]
print(1)
data_folder = f'./data/{data_folder}/formatted/'
# load waveforms
X = np.load(data_folder+self.patients[id] + '.npy')
# load annotation
y = json.load(open(data_folder + self.patients[id] + '.json'))
# Scale waveform amplitudes
X = (X - np.mean(X)) / np.std(X)
"""
Maybe try this (see method below).
X = apply_amplitude_scaling(X=X, y=y)
"""
# TODO: Seb's augmentation implementation point
# We need a way to inform this method of the sample rate for the dataset.
fs_training = 350
if self.aug is True:
X = self.apply_augmentation(waveform=X, meta_data=y, fs_training=fs_training, max_samples=19000)
#padding
if X.shape[0] < 38000:
padding = np.zeros((38000 - X.shape[0], X.shape[1]))
X = np.concatenate([X, padding], axis=0)
return X, y['labels_training_merged']
# if train:
# # load annotation
# y = json.load(open(data_folder+self.patients[id] + '.json'))
#
# return X, y['labels_training_merged']
# else:
# return X
@staticmethod
def apply_amplitude_scaling(X, y):
"""Get rpeaks for each channel and scale waveform amplitude by median rpeak amplitude of lead I."""
if y['rpeaks']:
for channel_rpeaks in y['rpeaks']:
if channel_rpeaks:
return X / np.median(X[y['rpeaks'][0], 0])
return (X - X.mean()) / X.std()
def apply_augmentation(self, waveform, meta_data, fs_training, max_samples):
# Random resample
waveform = self._random_resample(waveform=waveform, meta_data=meta_data,
fs_training=fs_training, probability=0.25, max_samples=max_samples)
# Random amplitude scale
waveform = self._random_scale(waveform=waveform, probability=0.5)
# Apply synthetic noise
waveform = self._add_synthetic_noise(waveform=waveform, fs_training=fs_training, probability=0.25)
return waveform
def _random_resample(self, waveform, meta_data, fs_training, probability, max_samples):
"""Randomly resample waveform.
bradycardia=3, sinus bradycardia=20, sinus tachycardia=22
"""
if (
meta_data['hr'] != 'nan' and
all(meta_data['labels_training_merged'][label] == 0 for label in [3, 20, 22]) and
self._coin_flip(probability=probability)
):
# Get waveform duration
duration = waveform.shape[0] / fs_training
# Physiological limits
hr_new = int(meta_data['hr'] * np.random.uniform(0.9, 1.1))
if hr_new > 300:
hr_new = 300
elif hr_new < 40:
hr_new = 40
else:
pass
# Get new duration
duration_new = duration * meta_data['hr'] / hr_new
# Get number of samples
samples = int(duration_new * fs_training)
if samples > max_samples:
samples = max_samples
# Resample waveform
waveform = signal.resample_poly(waveform, samples, waveform.shape[0], axis=0).astype(np.float32)
return waveform
else:
return waveform
def _random_scale(self, waveform, probability):
"""Apply random scale factor between 0.25 and 3 to the waveform amplitudes."""
# Get random scale factor
scale_factor = random.uniform(0.25, 3.)
if self._coin_flip(probability):
return waveform * scale_factor
return waveform
def _add_synthetic_noise(self, waveform, fs_training, probability):
"""Add different kinds of synthetic noise to the signal."""
waveform = waveform.squeeze()
for idx in range(waveform.shape[1]):
waveform[:, idx] = self._generate_baseline_wandering_noise(waveform=waveform[:, idx],
fs=fs_training, probability=probability)
waveform[:, idx] = self._generate_high_frequency_noise(waveform=waveform[:, idx],
fs=fs_training, probability=probability)
waveform[:, idx] = self._generate_gaussian_noise(waveform=waveform[:, idx], probability=probability)
waveform[:, idx] = self._generate_pulse_noise(waveform=waveform[:, idx], probability=probability)
return waveform
def _generate_baseline_wandering_noise(self, waveform, fs, probability):
"""Adds baseline wandering to the input signal."""
waveform = waveform.squeeze()
if self._coin_flip(probability):
# Generate time array
time = np.arange(len(waveform)) * 1 / fs
# Get number of baseline signals
baseline_signals = random.randint(1, 5)
# Loop through baseline signals
for baseline_signal in range(baseline_signals):
# Add noise
waveform += random.uniform(0.01, 0.75) * np.sin(2 * np.pi * random.uniform(0.001, 0.5) *
time + random.uniform(0, 60))
return waveform
def _generate_high_frequency_noise(self, waveform, fs, probability=0.5):
"""Adds high frequency sinusoidal noise to the input signal."""
waveform = waveform.squeeze()
if self._coin_flip(probability):
# Generate time array
time = np.arange(len(waveform)) * 1 / fs
# Add noise
waveform += random.uniform(0.001, 0.3) * np.sin(2 * np.pi * random.uniform(50, 200) *
time + random.uniform(0, 60))
return waveform
def _generate_gaussian_noise(self, waveform, probability=0.5):
"""Adds white noise noise to the input signal."""
waveform = waveform.squeeze()
if self._coin_flip(probability):
waveform += np.random.normal(loc=0.0, scale=random.uniform(0.01, 0.25), size=len(waveform))
return waveform
def _generate_pulse_noise(self, waveform, probability=0.5):
"""Adds gaussian pulse to the input signal."""
waveform = waveform.squeeze()
if self._coin_flip(probability):
# Get pulse
pulse = signal.gaussian(int(len(waveform) * random.uniform(0.05, 0.010)), std=random.randint(50, 200))
pulse = np.diff(pulse)
# Get remainder
remainder = len(waveform) - len(pulse)
if remainder >= 0:
left_pad = int(remainder * random.uniform(0., 1.))
right_pad = remainder - left_pad
pulse = np.pad(pulse, (left_pad, right_pad), 'constant', constant_values=0)
pulse = pulse / pulse.max()
waveform += pulse * random.uniform(waveform.max() * 1.5, waveform.max() * 2)
return waveform
@staticmethod
def _coin_flip(probability):
if random.random() < probability:
return True
return False
def get_labels(self):
"""
:param ids: a list of ids for loading from the database
:return: y: numpy array of labels, shape(n_samples,n_labels)
"""
for index, record in enumerate(self.patients):
if record[0] == 'A':
data_folder = 'A'
elif record[0] == 'Q':
data_folder = 'B'
elif record[0] == 'I':
data_folder = 'C'
elif record[0] == 'S':
data_folder = 'D'
elif record[0] == 'H':
data_folder = 'E'
elif record[0] == 'E':
data_folder = 'F'
data_folder = f'./data/{data_folder}/formatted/'
if index == 0:
y = np.array([json.load(open(data_folder+record + '.json'))['labels_training_merged']])
y = np.reshape(y, (1, 27))
else:
temp = np.array([json.load(open(data_folder+record + '.json'))['labels_training_merged']])
temp = np.reshape(temp, (1, 27))
y = np.concatenate((y, temp), axis=0)
return y
def my_collate(self,batch):
"""
This function was created to handle a variable-length of the
:param batch: tuple(data,target)
:return: list[data_tensor(batch_size,n_samples_channels), target_tensor(batch_size,n_classes)]
"""
data = [item[0] for item in batch]
target = [item[1] for item in batch]
# define the max size of the batch
m_size = 0
for element in data:
if m_size < element.shape[0]:
m_size = element.shape[0]
# zero pooling
for index, element in enumerate(data):
if m_size > element.shape[0]:
padding = np.zeros((m_size-element.shape[0], element.shape[1]))
padding = torch.from_numpy(padding)
data[index] = torch.cat([element, padding], dim=0)
padding = padding.detach()
data = torch.stack(data)
target = torch.stack(target)
return [data, target]
class Dataset_test(Dataset_train):
def __init__(self, patients):
super().__init__(patients=patients)
def __getitem__(self, idx):
X,y = self.load_data(idx, train=False)
X = torch.tensor(X, dtype=torch.float)
return X
|
{"hexsha": "9f26b1d0b181eeb219c726b796c6a784cee9d5c5", "size": 10724, "ext": "py", "lang": "Python", "max_stars_repo_path": "kardioml/models/deepecg/train/data_generator_pytorch.py", "max_stars_repo_name": "Seb-Good/physionet-challenge-2020", "max_stars_repo_head_hexsha": "c6f1648a148335babc0a26d8a589120616327548", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 13, "max_stars_repo_stars_event_min_datetime": "2020-12-18T08:09:34.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-15T04:51:46.000Z", "max_issues_repo_path": "kardioml/models/deepecg/train/data_generator_pytorch.py", "max_issues_repo_name": "Seb-Good/physionet-challenge-2020", "max_issues_repo_head_hexsha": "c6f1648a148335babc0a26d8a589120616327548", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "kardioml/models/deepecg/train/data_generator_pytorch.py", "max_forks_repo_name": "Seb-Good/physionet-challenge-2020", "max_forks_repo_head_hexsha": "c6f1648a148335babc0a26d8a589120616327548", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 14, "max_forks_repo_forks_event_min_datetime": "2020-09-16T21:07:38.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-23T14:01:24.000Z", "avg_line_length": 34.0444444444, "max_line_length": 114, "alphanum_fraction": 0.5609847072, "include": true, "reason": "import numpy,from scipy", "num_tokens": 2454}
|
from collections import defaultdict
from typing import Any, Dict, List, Optional, Type, Tuple, Mapping, Iterable
import math
from functools import total_ordering
import numpy as np
import yaml
import shapely.geometry
import shapely.ops
import conveyor_msgs.msg
Range = Tuple[float, float]
Position = Tuple[float, float, float]
LED = Tuple[str, Position]
class Belt:
def __init__(self, uid: str, centerline: shapely.geometry.LineString, width: float):
self.uid = uid
self.centerline = centerline
self.width = width
def __hash__(self) -> int:
return hash(self.uid)
def __repr__(self) -> str:
return repr(self.uid)
@property
def length(self) -> float:
return self.centerline.length
@classmethod
def load(cls: Type, uid: int, centerline: str, width: float, **kwargs: Any) -> 'Belt':
return cls(uid=uid, width=width, centerline=shapely.geometry.LineString(centerline))
@classmethod
def from_strip(cls: Type, strip: 'Strip', width: float = 0.1) -> 'Belt':
return cls(uid=f'strip_{strip.uid}', width=width, centerline=strip.line)
@total_ordering
class Strip:
def __init__(self, uid: int, line: shapely.geometry.LineString, pixels: int, direction: int):
self.uid = uid
self.line = line
self.pixels = pixels
self.direction = direction
@property
def length(self) -> float:
return self.line.length
def draw(self, color: List[int], intervals: List[Range]) -> np.ndarray:
data: List[List[int]] = [[0, 0, 0]] * (self.pixels)
le = self.pixels
for a, b in intervals:
i0 = math.ceil(a * le)
i1 = math.floor(b * le)
data[i0:i1] = [color] * (i1 - i0)
if self.direction == -1:
data = data[::-1]
return np.array(data)
def __hash__(self) -> int:
return hash(self.uid)
def __repr__(self) -> str:
return repr(self.uid)
def __eq__(self, other: Any) -> bool:
return self is other
def __ne__(self, other: Any) -> bool:
return not (self == other)
def __lt__(self, other: 'Strip') -> bool:
return self.uid < other.uid
@classmethod
def load(cls, uid: int, line: str, pixels: int, direction: int, **kwargs: Any) -> 'Strip':
return cls(uid=uid, pixels=pixels, line=shapely.geometry.LineString(line),
direction=direction)
DecomposedBelt = List[Tuple[Range, Tuple[Strip, Range]]]
Decomposition = Mapping[Belt, DecomposedBelt]
PositionOnBelt = Tuple[Belt, float]
IntervalOnStrip = Tuple[Strip, Range]
def overlap(dec: DecomposedBelt, a: float, b: float, belt: Belt) -> DecomposedBelt:
rs: DecomposedBelt = []
for ((c, d), strip_interval) in dec:
if b < c or a > d:
continue
strip, (e, f) = strip_interval
x, y = c, d
if b < d:
y = b
p = belt.centerline.interpolate(b, normalized=True)
f = strip.line.project(p, normalized=True)
if a > c:
x = a
p = belt.centerline.interpolate(a, normalized=True)
e = strip.line.project(p, normalized=True)
rs.append(((x, y), (strip, (e, f))))
return rs
class LEDs:
def __init__(self, leds: List[LED]) -> None:
self.leds = {led[0]: led[1] for led in leds}
@classmethod
def load(cls, data: Dict[str, Any], **kwargs: Any) -> 'LEDs':
return cls(
leds=[(uid, value['position']) for uid, value in data.get('leds', {}).items()])
@classmethod
def load_file(cls, path: str, **kwargs: Any) -> 'LEDs':
with open(path, 'r') as f:
data = yaml.safe_load(f)
return cls.load(data, **kwargs)
class Map:
@property
def belts(self) -> Iterable[Belt]:
return self._belts.values()
@property
def bounding_box(self) -> Tuple[float, float, float, float]:
union = shapely.ops.unary_union([belt.centerline for belt in self.belts])
return union.bounds
@property
def strips(self) -> Iterable[Strip]:
return self._strips.values()
def point_from_msg(self, msg: conveyor_msgs.msg.PositionOnStrip) -> Tuple[float, float, float]:
belt = self._belts[msg.name]
x, y, z = belt.centerline.interpolate(msg.position, normalized=True).coords[0]
return x, y, z
def __init__(self, belts: List[Belt], strips: List[Strip], links: List[Tuple[str, str]],
tol: float = 0.01, tol_o: float = 1e-3) -> None:
if not belts:
# There are only strips ... emulate belts with strips
belts = [Belt.from_strip(strip) for strip in strips]
self._belts = {b.uid: b for b in belts}
self._strips = {s.uid: s for s in strips}
self._next_belt: Dict[Belt, Belt] = {}
self._previous_belt: Dict[Belt, Belt] = {}
for b1 in self.belts:
p1 = b1.centerline.interpolate(0, normalized=True)
p2 = b1.centerline.interpolate(1, normalized=True)
m = b1.width * 0.5 + tol
for b2 in self.belts:
if b1 is b2:
continue
if p1.distance(b2.centerline) < m:
self._previous_belt[b1] = b2
if p2.distance(b2.centerline) < m:
self._next_belt[b1] = b2
if links:
for b1_id, b2_id in links:
b1, b2 = self._belts[b1_id], self._belts[b2_id]
self._next_belt[b1] = b2
self._previous_belt[b2] = b1
dec: Decomposition = defaultdict(list)
for strip in self.strips:
for belt in self.belts:
l1 = strip.line
z1 = l1.coords[0][2]
l2 = belt.centerline
z2 = l2.coords[0][2]
d = l1.distance(l2)
if (abs(z1 - z2) < 0.5 and d < 0.5 * belt.width + tol):
a = l1.project(l2.interpolate(0, normalized=True), normalized=True)
b = l1.project(l2.interpolate(1, normalized=True), normalized=True)
c = l2.project(l1.interpolate(a, normalized=True), normalized=True)
d = l2.project(l1.interpolate(b, normalized=True), normalized=True)
if (b - a) > tol_o and (d - c) > tol_o:
dec[belt].append(((c, d), (strip, (a, b))))
self.dec = dec
def draw(self) -> None:
from matplotlib import pyplot
for belt in self.belts:
pyplot.plot(*belt.centerline.xy, '-', label=f'Belt {belt}')
for strip in self.strips:
pyplot.plot(*strip.line.xy, '--', label=f'Strip {strip}')
pyplot.legend()
pyplot.axis('equal')
@classmethod
def load(cls, data: Dict[str, Any], **kwargs: Any) -> 'Map':
return cls(
belts=[Belt.load(uid=uid, **value) for uid, value in data.get('belts', {}).items()],
strips=[Strip.load(uid=uid, **value) for uid, value in data.get('strips', {}).items()],
links=[(link['from'], link['to']) for link in data.get('links', [])], **kwargs)
@classmethod
def load_file(cls, path: str, **kwargs: Any) -> 'Map':
with open(path, 'r') as f:
data = yaml.safe_load(f)
return cls.load(data, **kwargs)
def project_interval(self, a: PositionOnBelt, b: PositionOnBelt) -> Decomposition:
(belt_a, s_a) = a
(belt_b, s_b) = b
if belt_a is belt_b:
return {belt_a: overlap(self.dec[belt_a], s_a, s_b, belt_a)}
return {belt_a: overlap(self.dec[belt_a], s_a, 1, belt_a),
belt_b: overlap(self.dec[belt_b], 0, s_b, belt_b)}
def next_belt(self, belt: Belt) -> Optional[Belt]:
return self._next_belt.get(belt)
def previous_belt(self, belt: Belt) -> Optional[Belt]:
return self._previous_belt.get(belt)
def interval(self, belt: Belt, position: float, width: float
) -> Tuple[PositionOnBelt, PositionOnBelt]:
le = belt.centerline.length
s = position * belt.centerline.length
a = s - width / 2
b = s + width / 2
p1, p2 = (belt, a / le), (belt, b / le)
if a < 0:
p_belt = self.previous_belt(belt)
if p_belt:
a += p_belt.centerline.length
p1 = (p_belt, a / p_belt.centerline.length)
else:
p1 = (belt, 0)
else:
p1 = (belt, a / le)
if b > le:
n_belt = self.next_belt(belt)
if n_belt:
b -= belt.centerline.length
p2 = (n_belt, b / n_belt.centerline.length)
else:
p2 = (belt, 1)
else:
p2 = (belt, b / le)
return p1, p2
def strips_near(self, belt_uid: str, position: float, width: float
) -> List[IntervalOnStrip]:
belt = self._belts[belt_uid]
a, b = self.interval(belt, position, width)
projection = self.project_interval(a, b)
return [strip_interval for intervals in projection.values()
for _, strip_interval in intervals if strip_interval]
|
{"hexsha": "c8bec67319ad9f1fe2e6533a6f9200362f264920", "size": 9258, "ext": "py", "lang": "Python", "max_stars_repo_path": "docker/pointing-user-interface/code/conveyor_utils/conveyor_utils/utils.py", "max_stars_repo_name": "Gabry993/pointing-user-interface-hri", "max_stars_repo_head_hexsha": "187b1db496a30edcf606b4c0a4e9388556df946a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-11-02T18:37:22.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-08T10:07:52.000Z", "max_issues_repo_path": "docker/pointing-user-interface/code/conveyor_utils/conveyor_utils/utils.py", "max_issues_repo_name": "Gabry993/pointing-user-interface-hri", "max_issues_repo_head_hexsha": "187b1db496a30edcf606b4c0a4e9388556df946a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "docker/pointing-user-interface/code/conveyor_utils/conveyor_utils/utils.py", "max_forks_repo_name": "Gabry993/pointing-user-interface-hri", "max_forks_repo_head_hexsha": "187b1db496a30edcf606b4c0a4e9388556df946a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.2015209125, "max_line_length": 99, "alphanum_fraction": 0.5647007993, "include": true, "reason": "import numpy", "num_tokens": 2467}
|
#!/usr/bin/env python
# Deborah Pelacani Cruz
# https://github.com/dekape
import context
import fullwaveqc.inversion as inv
import numpy as np
import os
def test_thisfunction():
assert 1
def test_functional():
dir_path = os.path.abspath(os.path.dirname(__file__))
job_path = os.path.join(dir_path, "test_data/PARBASE25_8-job001.log")
iter, func = inv.functional(job_path, plot=False)
assert (iter == np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13])).all()
assert (func == np.array([38270., 36650., 36260., 36500., 36100., 36470., 36290., 36520., 36060., 35990., 33260.,
33420., 33280.])).all()
return
def test_steplen():
dir_path = os.path.abspath(os.path.dirname(__file__))
job_path = os.path.join(dir_path, "test_data/PARBASE25_8-job001.log")
iter, slen = inv.steplen(job_path, plot=False)
assert (iter == np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13])).all()
assert (np.allclose(slen, np.array([6.27, 2.152, -1.833, 6.97, -1.678, 7.408, -2.136, 9.502,
0.2, 6.58, -1.868, 3.464, -1.204])))
return
|
{"hexsha": "e1a7dd49468c628ac375ac4d970374d8798d6ebf", "size": 1125, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_inversion.py", "max_stars_repo_name": "msc-acse/acse-9-independent-research-project-dekape", "max_stars_repo_head_hexsha": "d3d2236e47e8604803850c7cacceb826c7649bcb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/test_inversion.py", "max_issues_repo_name": "msc-acse/acse-9-independent-research-project-dekape", "max_issues_repo_head_hexsha": "d3d2236e47e8604803850c7cacceb826c7649bcb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-07-22T08:47:34.000Z", "max_issues_repo_issues_event_max_datetime": "2019-07-22T08:47:34.000Z", "max_forks_repo_path": "tests/test_inversion.py", "max_forks_repo_name": "msc-acse/acse-9-independent-research-project-dekape", "max_forks_repo_head_hexsha": "d3d2236e47e8604803850c7cacceb826c7649bcb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.2903225806, "max_line_length": 117, "alphanum_fraction": 0.6151111111, "include": true, "reason": "import numpy", "num_tokens": 424}
|
[STATEMENT]
lemma real_sqrt_sum_squares_less: "\<bar>x\<bar> < u / sqrt 2 \<Longrightarrow> \<bar>y\<bar> < u / sqrt 2 \<Longrightarrow> sqrt (x\<^sup>2 + y\<^sup>2) < u"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>\<bar>x\<bar> < u / sqrt 2; \<bar>y\<bar> < u / sqrt 2\<rbrakk> \<Longrightarrow> sqrt (x\<^sup>2 + y\<^sup>2) < u
[PROOF STEP]
apply (rule power2_less_imp_less)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<lbrakk>\<bar>x\<bar> < u / sqrt 2; \<bar>y\<bar> < u / sqrt 2\<rbrakk> \<Longrightarrow> (sqrt (x\<^sup>2 + y\<^sup>2))\<^sup>2 < u\<^sup>2
2. \<lbrakk>\<bar>x\<bar> < u / sqrt 2; \<bar>y\<bar> < u / sqrt 2\<rbrakk> \<Longrightarrow> 0 \<le> u
[PROOF STEP]
apply simp
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<lbrakk>\<bar>x\<bar> < u / sqrt 2; \<bar>y\<bar> < u / sqrt 2\<rbrakk> \<Longrightarrow> x\<^sup>2 + y\<^sup>2 < u\<^sup>2
2. \<lbrakk>\<bar>x\<bar> < u / sqrt 2; \<bar>y\<bar> < u / sqrt 2\<rbrakk> \<Longrightarrow> 0 \<le> u
[PROOF STEP]
apply (drule power_strict_mono [OF _ abs_ge_zero pos2])
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<lbrakk>\<bar>y\<bar> < u / sqrt 2; \<bar>x\<bar>\<^sup>2 < (u / sqrt 2)\<^sup>2\<rbrakk> \<Longrightarrow> x\<^sup>2 + y\<^sup>2 < u\<^sup>2
2. \<lbrakk>\<bar>x\<bar> < u / sqrt 2; \<bar>y\<bar> < u / sqrt 2\<rbrakk> \<Longrightarrow> 0 \<le> u
[PROOF STEP]
apply (drule power_strict_mono [OF _ abs_ge_zero pos2])
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<lbrakk>\<bar>x\<bar>\<^sup>2 < (u / sqrt 2)\<^sup>2; \<bar>y\<bar>\<^sup>2 < (u / sqrt 2)\<^sup>2\<rbrakk> \<Longrightarrow> x\<^sup>2 + y\<^sup>2 < u\<^sup>2
2. \<lbrakk>\<bar>x\<bar> < u / sqrt 2; \<bar>y\<bar> < u / sqrt 2\<rbrakk> \<Longrightarrow> 0 \<le> u
[PROOF STEP]
apply (simp add: power_divide)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>\<bar>x\<bar> < u / sqrt 2; \<bar>y\<bar> < u / sqrt 2\<rbrakk> \<Longrightarrow> 0 \<le> u
[PROOF STEP]
apply (drule order_le_less_trans [OF abs_ge_zero])
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>\<bar>y\<bar> < u / sqrt 2; 0 < u / sqrt 2\<rbrakk> \<Longrightarrow> 0 \<le> u
[PROOF STEP]
apply (simp add: zero_less_divide_iff)
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done
|
{"llama_tokens": 1058, "file": null, "length": 8}
|
#
# bias_experiment.py
#
# Experiment in Paper's Section 3.1.1
#
import collections
import json
import os
import shutil
import tempfile
from copy import deepcopy
import click
import numpy as np
import pandas as pd
import torch
from ceem import logger, utils
from ceem.dynamics import *
from ceem.learner import *
from ceem.opt_criteria import *
from ceem.ceem import CEEM
from ceem.smoother import *
from ceem.systems import LorenzAttractor, default_lorenz_attractor
@click.command()
@click.option('--sys-seed', default=4, type=int)
@click.option('--num-seeds', default=10, type=int)
@click.option('--logdir', default='./data/bias_experiment', type=click.Path())
def run(sys_seed, num_seeds, logdir):
# Delete old version
if os.path.exists(logdir):
shutil.rmtree(logdir)
os.mkdir(logdir)
results = collections.defaultdict(list)
ystd = 1e-2
for wstd in [1e-1, 1e-2, 1e-3]:
for seed in range(num_seeds):
tmpdir = tempfile.mkdtemp()
sigma, rho, beta = train(seed, tmpdir, sys_seed, ystd, wstd)
results['ystd'].append(ystd)
results['wstd'].append(wstd)
results['seed'].append(seed)
results['sigma'].append(sigma)
results['rho'].append(rho)
results['beta'].append(beta)
df = pd.DataFrame(results)
df.to_pickle(os.path.join(logdir, 'results.pkl'))
wstd = 1e-3
for ystd in [1e-1, 5e-2]:
for seed in range(num_seeds):
tmpdir = tempfile.mkdtemp()
sigma, rho, beta = train(seed, tmpdir, sys_seed, ystd, wstd)
results['ystd'].append(ystd)
results['wstd'].append(wstd)
results['seed'].append(seed)
results['sigma'].append(sigma)
results['rho'].append(rho)
results['beta'].append(beta)
df = pd.DataFrame(results)
df.to_pickle(os.path.join(logdir, 'results.pkl'))
df = pd.DataFrame(results)
df.to_pickle(os.path.join(logdir, 'results.pkl'))
def train(seed, logdir, sys_seed, ystd, wstd):
torch.set_default_dtype(torch.float64)
logger.setup(logdir, action='d')
# Number of timesteps in the trajectory
T = 128
n = 3
# Batch size
B = 1
k = 1
utils.set_rng_seed(sys_seed)
true_system = default_lorenz_attractor()
dt = true_system._dt
utils.set_rng_seed(43)
# simulate the system
x0mean = torch.tensor([[-6] * k + [-6] * k + [24.] * k]).unsqueeze(0)
# seed for real now
utils.set_rng_seed(seed)
# Rollout with noise
xs = [x0mean]
xs[0] += 5. * torch.randn_like(xs[0])
with torch.no_grad():
for t in range(T - 1):
xs.append(
true_system.step(torch.tensor([0.] * B), xs[-1]) + wstd * torch.randn_like(xs[-1]))
xs = torch.cat(xs, dim=1)
t = torch.tensor(range(T)).unsqueeze(0).to(torch.get_default_dtype())
y = true_system.observe(t, xs).detach()
y += ystd * torch.randn_like(y) # Observation noise
# prep system
system = deepcopy(true_system)
true_params = parameters_to_vector(true_system.parameters())
params = true_params * ((torch.rand_like(true_params) - 0.5) / 5. + 1.) # within 10%
vector_to_parameters(params, system.parameters())
params = list(system.parameters())
# specify smoothing criteria
B = 1
smoothing_criteria = []
for b in range(B):
obscrit = GaussianObservationCriterion(torch.ones(2), t[b:b + 1], y[b:b + 1])
dyncrit = GaussianDynamicsCriterion(wstd / ystd * torch.ones(3), t[b:b + 1])
smoothing_criteria.append(GroupSOSCriterion([obscrit, dyncrit]))
smooth_solver_kwargs = {'verbose': 0, 'tr_rho': 0.001}
# specify learning criteria
learning_criteria = [GaussianDynamicsCriterion(torch.ones(3), t)]
learning_params = [params]
learning_opts = ['scipy_minimize']
learner_opt_kwargs = {'method': 'Nelder-Mead', 'tr_rho': 0.01}
# instantiate CEEM
def ecb(epoch):
logger.logkv('test/rho', float(system._rho))
logger.logkv('test/sigma', float(system._sigma))
logger.logkv('test/beta', float(system._beta))
logger.logkv('test/rho_pcterr_log10',
float(torch.log10((true_system._rho - system._rho).abs() / true_system._rho)))
logger.logkv(
'test/sigma_pcterr_log10',
float(torch.log10((true_system._sigma - system._sigma).abs() / true_system._sigma)))
logger.logkv(
'test/beta_pcterr_log10',
float(torch.log10((true_system._beta - system._beta).abs() / true_system._beta)))
return
epoch_callbacks = [ecb]
class Last10Errors:
def __init__(self):
return
last_10_errors = Last10Errors
last_10_errors._arr = []
def tcb(epoch):
params = list(system.parameters())
vparams = parameters_to_vector(params)
error = (vparams - true_params).norm().item()
last_10_errors._arr.append(float(error))
logger.logkv('test/log10_error', np.log10(error))
if len(last_10_errors._arr) > 10:
last_10_errors._arr = last_10_errors._arr[-10:]
l10err = torch.tensor(last_10_errors._arr)
convcrit = float((l10err.min() - l10err.max()).abs())
logger.logkv('test/log10_convcrit', np.log10(convcrit))
if convcrit < 1e-4:
return True
return False
termination_callback = tcb
ceem = CEEM(smoothing_criteria, learning_criteria, learning_params, learning_opts,
epoch_callbacks, termination_callback)
# run CEEM
x0 = torch.zeros_like(xs)
ceem.train(xs=x0, sys=system, nepochs=500, smooth_solver_kwargs=smooth_solver_kwargs,
learner_opt_kwargs=learner_opt_kwargs)
return float(system._sigma), float(system._rho), float(system._beta)
if __name__ == '__main__':
run()
|
{"hexsha": "77a0afb9a2b36a5df87a213bb9a692803a03eac8", "size": 5963, "ext": "py", "lang": "Python", "max_stars_repo_path": "experiments/lorenz/bias_experiment.py", "max_stars_repo_name": "sisl/CEEM", "max_stars_repo_head_hexsha": "6154587fe3cdb92e8b7f70eedb1262caa1553cc8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2020-06-21T16:50:42.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-14T04:02:01.000Z", "max_issues_repo_path": "experiments/lorenz/bias_experiment.py", "max_issues_repo_name": "sisl/CEEM", "max_issues_repo_head_hexsha": "6154587fe3cdb92e8b7f70eedb1262caa1553cc8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-03-13T07:46:36.000Z", "max_issues_repo_issues_event_max_datetime": "2021-03-16T05:14:47.000Z", "max_forks_repo_path": "experiments/lorenz/bias_experiment.py", "max_forks_repo_name": "sisl/CEEM", "max_forks_repo_head_hexsha": "6154587fe3cdb92e8b7f70eedb1262caa1553cc8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-03-30T12:08:20.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-30T12:08:20.000Z", "avg_line_length": 26.9819004525, "max_line_length": 99, "alphanum_fraction": 0.6288780815, "include": true, "reason": "import numpy", "num_tokens": 1557}
|
(* Title: HOL/Analysis/Gamma_Function.thy
Author: Manuel Eberl, TU München
*)
section \<open>The Gamma Function\<close>
theory Gamma_Function
imports
Equivalence_Lebesgue_Henstock_Integration
Summation_Tests
Harmonic_Numbers
"HOL-Library.Nonpos_Ints"
"HOL-Library.Periodic_Fun"
begin
text \<open>
Several equivalent definitions of the Gamma function and its
most important properties. Also contains the definition and some properties
of the log-Gamma function and the Digamma function and the other Polygamma functions.
Based on the Gamma function, we also prove the Weierstra{\ss} product form of the
sin function and, based on this, the solution of the Basel problem (the
sum over all \<^term>\<open>1 / (n::nat)^2\<close>.
\<close>
lemma pochhammer_eq_0_imp_nonpos_Int:
"pochhammer (x::'a::field_char_0) n = 0 \<Longrightarrow> x \<in> \<int>\<^sub>\<le>\<^sub>0"
by (auto simp: pochhammer_eq_0_iff)
lemma closed_nonpos_Ints [simp]: "closed (\<int>\<^sub>\<le>\<^sub>0 :: 'a :: real_normed_algebra_1 set)"
proof -
have "\<int>\<^sub>\<le>\<^sub>0 = (of_int ` {n. n \<le> 0} :: 'a set)"
by (auto elim!: nonpos_Ints_cases intro!: nonpos_Ints_of_int)
also have "closed \<dots>" by (rule closed_of_int_image)
finally show ?thesis .
qed
lemma plus_one_in_nonpos_Ints_imp: "z + 1 \<in> \<int>\<^sub>\<le>\<^sub>0 \<Longrightarrow> z \<in> \<int>\<^sub>\<le>\<^sub>0"
using nonpos_Ints_diff_Nats[of "z+1" "1"] by simp_all
lemma of_int_in_nonpos_Ints_iff:
"(of_int n :: 'a :: ring_char_0) \<in> \<int>\<^sub>\<le>\<^sub>0 \<longleftrightarrow> n \<le> 0"
by (auto simp: nonpos_Ints_def)
lemma one_plus_of_int_in_nonpos_Ints_iff:
"(1 + of_int n :: 'a :: ring_char_0) \<in> \<int>\<^sub>\<le>\<^sub>0 \<longleftrightarrow> n \<le> -1"
proof -
have "1 + of_int n = (of_int (n + 1) :: 'a)" by simp
also have "\<dots> \<in> \<int>\<^sub>\<le>\<^sub>0 \<longleftrightarrow> n + 1 \<le> 0" by (subst of_int_in_nonpos_Ints_iff) simp_all
also have "\<dots> \<longleftrightarrow> n \<le> -1" by presburger
finally show ?thesis .
qed
lemma one_minus_of_nat_in_nonpos_Ints_iff:
"(1 - of_nat n :: 'a :: ring_char_0) \<in> \<int>\<^sub>\<le>\<^sub>0 \<longleftrightarrow> n > 0"
proof -
have "(1 - of_nat n :: 'a) = of_int (1 - int n)" by simp
also have "\<dots> \<in> \<int>\<^sub>\<le>\<^sub>0 \<longleftrightarrow> n > 0" by (subst of_int_in_nonpos_Ints_iff) presburger
finally show ?thesis .
qed
lemma fraction_not_in_ints:
assumes "\<not>(n dvd m)" "n \<noteq> 0"
shows "of_int m / of_int n \<notin> (\<int> :: 'a :: {division_ring,ring_char_0} set)"
proof
assume "of_int m / (of_int n :: 'a) \<in> \<int>"
then obtain k where "of_int m / of_int n = (of_int k :: 'a)" by (elim Ints_cases)
with assms have "of_int m = (of_int (k * n) :: 'a)" by (auto simp add: field_split_simps)
hence "m = k * n" by (subst (asm) of_int_eq_iff)
hence "n dvd m" by simp
with assms(1) show False by contradiction
qed
lemma fraction_not_in_nats:
assumes "\<not>n dvd m" "n \<noteq> 0"
shows "of_int m / of_int n \<notin> (\<nat> :: 'a :: {division_ring,ring_char_0} set)"
proof
assume "of_int m / of_int n \<in> (\<nat> :: 'a set)"
also note Nats_subset_Ints
finally have "of_int m / of_int n \<in> (\<int> :: 'a set)" .
moreover have "of_int m / of_int n \<notin> (\<int> :: 'a set)"
using assms by (intro fraction_not_in_ints)
ultimately show False by contradiction
qed
lemma not_in_Ints_imp_not_in_nonpos_Ints: "z \<notin> \<int> \<Longrightarrow> z \<notin> \<int>\<^sub>\<le>\<^sub>0"
by (auto simp: Ints_def nonpos_Ints_def)
lemma double_in_nonpos_Ints_imp:
assumes "2 * (z :: 'a :: field_char_0) \<in> \<int>\<^sub>\<le>\<^sub>0"
shows "z \<in> \<int>\<^sub>\<le>\<^sub>0 \<or> z + 1/2 \<in> \<int>\<^sub>\<le>\<^sub>0"
proof-
from assms obtain k where k: "2 * z = - of_nat k" by (elim nonpos_Ints_cases')
thus ?thesis by (cases "even k") (auto elim!: evenE oddE simp: field_simps)
qed
lemma sin_series: "(\<lambda>n. ((-1)^n / fact (2*n+1)) *\<^sub>R z^(2*n+1)) sums sin z"
proof -
from sin_converges[of z] have "(\<lambda>n. sin_coeff n *\<^sub>R z^n) sums sin z" .
also have "(\<lambda>n. sin_coeff n *\<^sub>R z^n) sums sin z \<longleftrightarrow>
(\<lambda>n. ((-1)^n / fact (2*n+1)) *\<^sub>R z^(2*n+1)) sums sin z"
by (subst sums_mono_reindex[of "\<lambda>n. 2*n+1", symmetric])
(auto simp: sin_coeff_def strict_mono_def ac_simps elim!: oddE)
finally show ?thesis .
qed
lemma cos_series: "(\<lambda>n. ((-1)^n / fact (2*n)) *\<^sub>R z^(2*n)) sums cos z"
proof -
from cos_converges[of z] have "(\<lambda>n. cos_coeff n *\<^sub>R z^n) sums cos z" .
also have "(\<lambda>n. cos_coeff n *\<^sub>R z^n) sums cos z \<longleftrightarrow>
(\<lambda>n. ((-1)^n / fact (2*n)) *\<^sub>R z^(2*n)) sums cos z"
by (subst sums_mono_reindex[of "\<lambda>n. 2*n", symmetric])
(auto simp: cos_coeff_def strict_mono_def ac_simps elim!: evenE)
finally show ?thesis .
qed
lemma sin_z_over_z_series:
fixes z :: "'a :: {real_normed_field,banach}"
assumes "z \<noteq> 0"
shows "(\<lambda>n. (-1)^n / fact (2*n+1) * z^(2*n)) sums (sin z / z)"
proof -
from sin_series[of z] have "(\<lambda>n. z * ((-1)^n / fact (2*n+1)) * z^(2*n)) sums sin z"
by (simp add: field_simps scaleR_conv_of_real)
from sums_mult[OF this, of "inverse z"] and assms show ?thesis
by (simp add: field_simps)
qed
lemma sin_z_over_z_series':
fixes z :: "'a :: {real_normed_field,banach}"
assumes "z \<noteq> 0"
shows "(\<lambda>n. sin_coeff (n+1) *\<^sub>R z^n) sums (sin z / z)"
proof -
from sums_split_initial_segment[OF sin_converges[of z], of 1]
have "(\<lambda>n. z * (sin_coeff (n+1) *\<^sub>R z ^ n)) sums sin z" by simp
from sums_mult[OF this, of "inverse z"] assms show ?thesis by (simp add: field_simps)
qed
lemma has_field_derivative_sin_z_over_z:
fixes A :: "'a :: {real_normed_field,banach} set"
shows "((\<lambda>z. if z = 0 then 1 else sin z / z) has_field_derivative 0) (at 0 within A)"
(is "(?f has_field_derivative ?f') _")
proof (rule has_field_derivative_at_within)
have "((\<lambda>z::'a. \<Sum>n. of_real (sin_coeff (n+1)) * z^n)
has_field_derivative (\<Sum>n. diffs (\<lambda>n. of_real (sin_coeff (n+1))) n * 0^n)) (at 0)"
proof (rule termdiffs_strong)
from summable_ignore_initial_segment[OF sums_summable[OF sin_converges[of "1::'a"]], of 1]
show "summable (\<lambda>n. of_real (sin_coeff (n+1)) * (1::'a)^n)" by (simp add: of_real_def)
qed simp
also have "(\<lambda>z::'a. \<Sum>n. of_real (sin_coeff (n+1)) * z^n) = ?f"
proof
fix z
show "(\<Sum>n. of_real (sin_coeff (n+1)) * z^n) = ?f z"
by (cases "z = 0") (insert sin_z_over_z_series'[of z],
simp_all add: scaleR_conv_of_real sums_iff sin_coeff_def)
qed
also have "(\<Sum>n. diffs (\<lambda>n. of_real (sin_coeff (n + 1))) n * (0::'a) ^ n) =
diffs (\<lambda>n. of_real (sin_coeff (Suc n))) 0" by simp
also have "\<dots> = 0" by (simp add: sin_coeff_def diffs_def)
finally show "((\<lambda>z::'a. if z = 0 then 1 else sin z / z) has_field_derivative 0) (at 0)" .
qed
lemma round_Re_minimises_norm:
"norm ((z::complex) - of_int m) \<ge> norm (z - of_int (round (Re z)))"
proof -
let ?n = "round (Re z)"
have "norm (z - of_int ?n) = sqrt ((Re z - of_int ?n)\<^sup>2 + (Im z)\<^sup>2)"
by (simp add: cmod_def)
also have "\<bar>Re z - of_int ?n\<bar> \<le> \<bar>Re z - of_int m\<bar>" by (rule round_diff_minimal)
hence "sqrt ((Re z - of_int ?n)\<^sup>2 + (Im z)\<^sup>2) \<le> sqrt ((Re z - of_int m)\<^sup>2 + (Im z)\<^sup>2)"
by (intro real_sqrt_le_mono add_mono) (simp_all add: abs_le_square_iff)
also have "\<dots> = norm (z - of_int m)" by (simp add: cmod_def)
finally show ?thesis .
qed
lemma Re_pos_in_ball:
assumes "Re z > 0" "t \<in> ball z (Re z/2)"
shows "Re t > 0"
proof -
have "Re (z - t) \<le> norm (z - t)" by (rule complex_Re_le_cmod)
also from assms have "\<dots> < Re z / 2" by (simp add: dist_complex_def)
finally show "Re t > 0" using assms by simp
qed
lemma no_nonpos_Int_in_ball_complex:
assumes "Re z > 0" "t \<in> ball z (Re z/2)"
shows "t \<notin> \<int>\<^sub>\<le>\<^sub>0"
using Re_pos_in_ball[OF assms] by (force elim!: nonpos_Ints_cases)
lemma no_nonpos_Int_in_ball:
assumes "t \<in> ball z (dist z (round (Re z)))"
shows "t \<notin> \<int>\<^sub>\<le>\<^sub>0"
proof
assume "t \<in> \<int>\<^sub>\<le>\<^sub>0"
then obtain n where "t = of_int n" by (auto elim!: nonpos_Ints_cases)
have "dist z (of_int n) \<le> dist z t + dist t (of_int n)" by (rule dist_triangle)
also from assms have "dist z t < dist z (round (Re z))" by simp
also have "\<dots> \<le> dist z (of_int n)"
using round_Re_minimises_norm[of z] by (simp add: dist_complex_def)
finally have "dist t (of_int n) > 0" by simp
with \<open>t = of_int n\<close> show False by simp
qed
lemma no_nonpos_Int_in_ball':
assumes "(z :: 'a :: {euclidean_space,real_normed_algebra_1}) \<notin> \<int>\<^sub>\<le>\<^sub>0"
obtains d where "d > 0" "\<And>t. t \<in> ball z d \<Longrightarrow> t \<notin> \<int>\<^sub>\<le>\<^sub>0"
proof (rule that)
from assms show "setdist {z} \<int>\<^sub>\<le>\<^sub>0 > 0" by (subst setdist_gt_0_compact_closed) auto
next
fix t assume "t \<in> ball z (setdist {z} \<int>\<^sub>\<le>\<^sub>0)"
thus "t \<notin> \<int>\<^sub>\<le>\<^sub>0" using setdist_le_dist[of z "{z}" t "\<int>\<^sub>\<le>\<^sub>0"] by force
qed
lemma no_nonpos_Real_in_ball:
assumes z: "z \<notin> \<real>\<^sub>\<le>\<^sub>0" and t: "t \<in> ball z (if Im z = 0 then Re z / 2 else abs (Im z) / 2)"
shows "t \<notin> \<real>\<^sub>\<le>\<^sub>0"
using z
proof (cases "Im z = 0")
assume A: "Im z = 0"
with z have "Re z > 0" by (force simp add: complex_nonpos_Reals_iff)
with t A Re_pos_in_ball[of z t] show ?thesis by (force simp add: complex_nonpos_Reals_iff)
next
assume A: "Im z \<noteq> 0"
have "abs (Im z) - abs (Im t) \<le> abs (Im z - Im t)" by linarith
also have "\<dots> = abs (Im (z - t))" by simp
also have "\<dots> \<le> norm (z - t)" by (rule abs_Im_le_cmod)
also from A t have "\<dots> \<le> abs (Im z) / 2" by (simp add: dist_complex_def)
finally have "abs (Im t) > 0" using A by simp
thus ?thesis by (force simp add: complex_nonpos_Reals_iff)
qed
subsection \<open>The Euler form and the logarithmic Gamma function\<close>
text \<open>
We define the Gamma function by first defining its multiplicative inverse \<open>rGamma\<close>.
This is more convenient because \<open>rGamma\<close> is entire, which makes proofs of its
properties more convenient because one does not have to watch out for discontinuities.
(e.g. \<open>rGamma\<close> fulfils \<open>rGamma z = z * rGamma (z + 1)\<close> everywhere, whereas the \<open>\<Gamma>\<close> function
does not fulfil the analogous equation on the non-positive integers)
We define the \<open>\<Gamma>\<close> function (resp.\ its reciprocale) in the Euler form. This form has the advantage
that it is a relatively simple limit that converges everywhere. The limit at the poles is 0
(due to division by 0). The functional equation \<open>Gamma (z + 1) = z * Gamma z\<close> follows
immediately from the definition.
\<close>
definition\<^marker>\<open>tag important\<close> Gamma_series :: "('a :: {banach,real_normed_field}) \<Rightarrow> nat \<Rightarrow> 'a" where
"Gamma_series z n = fact n * exp (z * of_real (ln (of_nat n))) / pochhammer z (n+1)"
definition Gamma_series' :: "('a :: {banach,real_normed_field}) \<Rightarrow> nat \<Rightarrow> 'a" where
"Gamma_series' z n = fact (n - 1) * exp (z * of_real (ln (of_nat n))) / pochhammer z n"
definition rGamma_series :: "('a :: {banach,real_normed_field}) \<Rightarrow> nat \<Rightarrow> 'a" where
"rGamma_series z n = pochhammer z (n+1) / (fact n * exp (z * of_real (ln (of_nat n))))"
lemma Gamma_series_altdef: "Gamma_series z n = inverse (rGamma_series z n)"
and rGamma_series_altdef: "rGamma_series z n = inverse (Gamma_series z n)"
unfolding Gamma_series_def rGamma_series_def by simp_all
lemma rGamma_series_minus_of_nat:
"eventually (\<lambda>n. rGamma_series (- of_nat k) n = 0) sequentially"
using eventually_ge_at_top[of k]
by eventually_elim (auto simp: rGamma_series_def pochhammer_of_nat_eq_0_iff)
lemma Gamma_series_minus_of_nat:
"eventually (\<lambda>n. Gamma_series (- of_nat k) n = 0) sequentially"
using eventually_ge_at_top[of k]
by eventually_elim (auto simp: Gamma_series_def pochhammer_of_nat_eq_0_iff)
lemma Gamma_series'_minus_of_nat:
"eventually (\<lambda>n. Gamma_series' (- of_nat k) n = 0) sequentially"
using eventually_gt_at_top[of k]
by eventually_elim (auto simp: Gamma_series'_def pochhammer_of_nat_eq_0_iff)
lemma rGamma_series_nonpos_Ints_LIMSEQ: "z \<in> \<int>\<^sub>\<le>\<^sub>0 \<Longrightarrow> rGamma_series z \<longlonglongrightarrow> 0"
by (elim nonpos_Ints_cases', hypsubst, subst tendsto_cong, rule rGamma_series_minus_of_nat, simp)
lemma Gamma_series_nonpos_Ints_LIMSEQ: "z \<in> \<int>\<^sub>\<le>\<^sub>0 \<Longrightarrow> Gamma_series z \<longlonglongrightarrow> 0"
by (elim nonpos_Ints_cases', hypsubst, subst tendsto_cong, rule Gamma_series_minus_of_nat, simp)
lemma Gamma_series'_nonpos_Ints_LIMSEQ: "z \<in> \<int>\<^sub>\<le>\<^sub>0 \<Longrightarrow> Gamma_series' z \<longlonglongrightarrow> 0"
by (elim nonpos_Ints_cases', hypsubst, subst tendsto_cong, rule Gamma_series'_minus_of_nat, simp)
lemma Gamma_series_Gamma_series':
assumes z: "z \<notin> \<int>\<^sub>\<le>\<^sub>0"
shows "(\<lambda>n. Gamma_series' z n / Gamma_series z n) \<longlonglongrightarrow> 1"
proof (rule Lim_transform_eventually)
from eventually_gt_at_top[of "0::nat"]
show "eventually (\<lambda>n. z / of_nat n + 1 = Gamma_series' z n / Gamma_series z n) sequentially"
proof eventually_elim
fix n :: nat assume n: "n > 0"
from n z have "Gamma_series' z n / Gamma_series z n = (z + of_nat n) / of_nat n"
by (cases n, simp)
(auto simp add: Gamma_series_def Gamma_series'_def pochhammer_rec'
dest: pochhammer_eq_0_imp_nonpos_Int plus_of_nat_eq_0_imp)
also from n have "\<dots> = z / of_nat n + 1" by (simp add: field_split_simps)
finally show "z / of_nat n + 1 = Gamma_series' z n / Gamma_series z n" ..
qed
have "(\<lambda>x. z / of_nat x) \<longlonglongrightarrow> 0"
by (rule tendsto_norm_zero_cancel)
(insert tendsto_mult[OF tendsto_const[of "norm z"] lim_inverse_n],
simp add: norm_divide inverse_eq_divide)
from tendsto_add[OF this tendsto_const[of 1]] show "(\<lambda>n. z / of_nat n + 1) \<longlonglongrightarrow> 1" by simp
qed
text \<open>
We now show that the series that defines the \<open>\<Gamma>\<close> function in the Euler form converges
and that the function defined by it is continuous on the complex halfspace with positive
real part.
We do this by showing that the logarithm of the Euler series is continuous and converges
locally uniformly, which means that the log-Gamma function defined by its limit is also
continuous.
This will later allow us to lift holomorphicity and continuity from the log-Gamma
function to the inverse of the Gamma function, and from that to the Gamma function itself.
\<close>
definition\<^marker>\<open>tag important\<close> ln_Gamma_series :: "('a :: {banach,real_normed_field,ln}) \<Rightarrow> nat \<Rightarrow> 'a" where
"ln_Gamma_series z n = z * ln (of_nat n) - ln z - (\<Sum>k=1..n. ln (z / of_nat k + 1))"
definition\<^marker>\<open>tag unimportant\<close> ln_Gamma_series' :: "('a :: {banach,real_normed_field,ln}) \<Rightarrow> nat \<Rightarrow> 'a" where
"ln_Gamma_series' z n =
- euler_mascheroni*z - ln z + (\<Sum>k=1..n. z / of_nat n - ln (z / of_nat k + 1))"
definition ln_Gamma :: "('a :: {banach,real_normed_field,ln}) \<Rightarrow> 'a" where
"ln_Gamma z = lim (ln_Gamma_series z)"
text \<open>
We now show that the log-Gamma series converges locally uniformly for all complex numbers except
the non-positive integers. We do this by proving that the series is locally Cauchy.
\<close>
context
begin
private lemma ln_Gamma_series_complex_converges_aux:
fixes z :: complex and k :: nat
assumes z: "z \<noteq> 0" and k: "of_nat k \<ge> 2*norm z" "k \<ge> 2"
shows "norm (z * ln (1 - 1/of_nat k) + ln (z/of_nat k + 1)) \<le> 2*(norm z + norm z^2) / of_nat k^2"
proof -
let ?k = "of_nat k :: complex" and ?z = "norm z"
have "z *ln (1 - 1/?k) + ln (z/?k+1) = z*(ln (1 - 1/?k :: complex) + 1/?k) + (ln (1+z/?k) - z/?k)"
by (simp add: algebra_simps)
also have "norm ... \<le> ?z * norm (ln (1-1/?k) + 1/?k :: complex) + norm (ln (1+z/?k) - z/?k)"
by (subst norm_mult [symmetric], rule norm_triangle_ineq)
also have "norm (Ln (1 + -1/?k) - (-1/?k)) \<le> (norm (-1/?k))\<^sup>2 / (1 - norm(-1/?k))"
using k by (intro Ln_approx_linear) (simp add: norm_divide)
hence "?z * norm (ln (1-1/?k) + 1/?k) \<le> ?z * ((norm (1/?k))^2 / (1 - norm (1/?k)))"
by (intro mult_left_mono) simp_all
also have "... \<le> (?z * (of_nat k / (of_nat k - 1))) / of_nat k^2" using k
by (simp add: field_simps power2_eq_square norm_divide)
also have "... \<le> (?z * 2) / of_nat k^2" using k
by (intro divide_right_mono mult_left_mono) (simp_all add: field_simps)
also have "norm (ln (1+z/?k) - z/?k) \<le> norm (z/?k)^2 / (1 - norm (z/?k))" using k
by (intro Ln_approx_linear) (simp add: norm_divide)
hence "norm (ln (1+z/?k) - z/?k) \<le> ?z^2 / of_nat k^2 / (1 - ?z / of_nat k)"
by (simp add: field_simps norm_divide)
also have "... \<le> (?z^2 * (of_nat k / (of_nat k - ?z))) / of_nat k^2" using k
by (simp add: field_simps power2_eq_square)
also have "... \<le> (?z^2 * 2) / of_nat k^2" using k
by (intro divide_right_mono mult_left_mono) (simp_all add: field_simps)
also note add_divide_distrib [symmetric]
finally show ?thesis by (simp only: distrib_left mult.commute)
qed
lemma ln_Gamma_series_complex_converges:
assumes z: "z \<notin> \<int>\<^sub>\<le>\<^sub>0"
assumes d: "d > 0" "\<And>n. n \<in> \<int>\<^sub>\<le>\<^sub>0 \<Longrightarrow> norm (z - of_int n) > d"
shows "uniformly_convergent_on (ball z d) (\<lambda>n z. ln_Gamma_series z n :: complex)"
proof (intro Cauchy_uniformly_convergent uniformly_Cauchy_onI')
fix e :: real assume e: "e > 0"
define e'' where "e'' = (SUP t\<in>ball z d. norm t + norm t^2)"
define e' where "e' = e / (2*e'')"
have "bounded ((\<lambda>t. norm t + norm t^2) ` cball z d)"
by (intro compact_imp_bounded compact_continuous_image) (auto intro!: continuous_intros)
hence "bounded ((\<lambda>t. norm t + norm t^2) ` ball z d)" by (rule bounded_subset) auto
hence bdd: "bdd_above ((\<lambda>t. norm t + norm t^2) ` ball z d)" by (rule bounded_imp_bdd_above)
with z d(1) d(2)[of "-1"] have e''_pos: "e'' > 0" unfolding e''_def
by (subst less_cSUP_iff) (auto intro!: add_pos_nonneg bexI[of _ z])
have e'': "norm t + norm t^2 \<le> e''" if "t \<in> ball z d" for t unfolding e''_def using that
by (rule cSUP_upper[OF _ bdd])
from e z e''_pos have e': "e' > 0" unfolding e'_def
by (intro divide_pos_pos mult_pos_pos add_pos_pos) (simp_all add: field_simps)
have "summable (\<lambda>k. inverse ((real_of_nat k)^2))"
by (rule inverse_power_summable) simp
from summable_partial_sum_bound[OF this e']
obtain M where M: "\<And>m n. M \<le> m \<Longrightarrow> norm (\<Sum>k = m..n. inverse ((real k)\<^sup>2)) < e'"
by auto
define N where "N = max 2 (max (nat \<lceil>2 * (norm z + d)\<rceil>) M)"
{
from d have "\<lceil>2 * (cmod z + d)\<rceil> \<ge> \<lceil>0::real\<rceil>"
by (intro ceiling_mono mult_nonneg_nonneg add_nonneg_nonneg) simp_all
hence "2 * (norm z + d) \<le> of_nat (nat \<lceil>2 * (norm z + d)\<rceil>)" unfolding N_def
by (simp_all)
also have "... \<le> of_nat N" unfolding N_def
by (subst of_nat_le_iff) (rule max.coboundedI2, rule max.cobounded1)
finally have "of_nat N \<ge> 2 * (norm z + d)" .
moreover have "N \<ge> 2" "N \<ge> M" unfolding N_def by simp_all
moreover have "(\<Sum>k=m..n. 1/(of_nat k)\<^sup>2) < e'" if "m \<ge> N" for m n
using M[OF order.trans[OF \<open>N \<ge> M\<close> that]] unfolding real_norm_def
by (subst (asm) abs_of_nonneg) (auto intro: sum_nonneg simp: field_split_simps)
moreover note calculation
} note N = this
show "\<exists>M. \<forall>t\<in>ball z d. \<forall>m\<ge>M. \<forall>n>m. dist (ln_Gamma_series t m) (ln_Gamma_series t n) < e"
unfolding dist_complex_def
proof (intro exI[of _ N] ballI allI impI)
fix t m n assume t: "t \<in> ball z d" and mn: "m \<ge> N" "n > m"
from d(2)[of 0] t have "0 < dist z 0 - dist z t" by (simp add: field_simps dist_complex_def)
also have "dist z 0 - dist z t \<le> dist 0 t" using dist_triangle[of 0 z t]
by (simp add: dist_commute)
finally have t_nz: "t \<noteq> 0" by auto
have "norm t \<le> norm z + norm (t - z)" by (rule norm_triangle_sub)
also from t have "norm (t - z) < d" by (simp add: dist_complex_def norm_minus_commute)
also have "2 * (norm z + d) \<le> of_nat N" by (rule N)
also have "N \<le> m" by (rule mn)
finally have norm_t: "2 * norm t < of_nat m" by simp
have "ln_Gamma_series t m - ln_Gamma_series t n =
(-(t * Ln (of_nat n)) - (-(t * Ln (of_nat m)))) +
((\<Sum>k=1..n. Ln (t / of_nat k + 1)) - (\<Sum>k=1..m. Ln (t / of_nat k + 1)))"
by (simp add: ln_Gamma_series_def algebra_simps)
also have "(\<Sum>k=1..n. Ln (t / of_nat k + 1)) - (\<Sum>k=1..m. Ln (t / of_nat k + 1)) =
(\<Sum>k\<in>{1..n}-{1..m}. Ln (t / of_nat k + 1))" using mn
by (simp add: sum_diff)
also from mn have "{1..n}-{1..m} = {Suc m..n}" by fastforce
also have "-(t * Ln (of_nat n)) - (-(t * Ln (of_nat m))) =
(\<Sum>k = Suc m..n. t * Ln (of_nat (k - 1)) - t * Ln (of_nat k))" using mn
by (subst sum_telescope'' [symmetric]) simp_all
also have "... = (\<Sum>k = Suc m..n. t * Ln (of_nat (k - 1) / of_nat k))" using mn N
by (intro sum_cong_Suc)
(simp_all del: of_nat_Suc add: field_simps Ln_of_nat Ln_of_nat_over_of_nat)
also have "of_nat (k - 1) / of_nat k = 1 - 1 / (of_nat k :: complex)" if "k \<in> {Suc m..n}" for k
using that of_nat_eq_0_iff[of "Suc i" for i] by (cases k) (simp_all add: field_split_simps)
hence "(\<Sum>k = Suc m..n. t * Ln (of_nat (k - 1) / of_nat k)) =
(\<Sum>k = Suc m..n. t * Ln (1 - 1 / of_nat k))" using mn N
by (intro sum.cong) simp_all
also note sum.distrib [symmetric]
also have "norm (\<Sum>k=Suc m..n. t * Ln (1 - 1/of_nat k) + Ln (t/of_nat k + 1)) \<le>
(\<Sum>k=Suc m..n. 2 * (norm t + (norm t)\<^sup>2) / (real_of_nat k)\<^sup>2)" using t_nz N(2) mn norm_t
by (intro order.trans[OF norm_sum sum_mono[OF ln_Gamma_series_complex_converges_aux]]) simp_all
also have "... \<le> 2 * (norm t + norm t^2) * (\<Sum>k=Suc m..n. 1 / (of_nat k)\<^sup>2)"
by (simp add: sum_distrib_left)
also have "... < 2 * (norm t + norm t^2) * e'" using mn z t_nz
by (intro mult_strict_left_mono N mult_pos_pos add_pos_pos) simp_all
also from e''_pos have "... = e * ((cmod t + (cmod t)\<^sup>2) / e'')"
by (simp add: e'_def field_simps power2_eq_square)
also from e''[OF t] e''_pos e
have "\<dots> \<le> e * 1" by (intro mult_left_mono) (simp_all add: field_simps)
finally show "norm (ln_Gamma_series t m - ln_Gamma_series t n) < e" by simp
qed
qed
end
lemma ln_Gamma_series_complex_converges':
assumes z: "(z :: complex) \<notin> \<int>\<^sub>\<le>\<^sub>0"
shows "\<exists>d>0. uniformly_convergent_on (ball z d) (\<lambda>n z. ln_Gamma_series z n)"
proof -
define d' where "d' = Re z"
define d where "d = (if d' > 0 then d' / 2 else norm (z - of_int (round d')) / 2)"
have "of_int (round d') \<in> \<int>\<^sub>\<le>\<^sub>0" if "d' \<le> 0" using that
by (intro nonpos_Ints_of_int) (simp_all add: round_def)
with assms have d_pos: "d > 0" unfolding d_def by (force simp: not_less)
have "d < cmod (z - of_int n)" if "n \<in> \<int>\<^sub>\<le>\<^sub>0" for n
proof (cases "Re z > 0")
case True
from nonpos_Ints_nonpos[OF that] have n: "n \<le> 0" by simp
from True have "d = Re z/2" by (simp add: d_def d'_def)
also from n True have "\<dots> < Re (z - of_int n)" by simp
also have "\<dots> \<le> norm (z - of_int n)" by (rule complex_Re_le_cmod)
finally show ?thesis .
next
case False
with assms nonpos_Ints_of_int[of "round (Re z)"]
have "z \<noteq> of_int (round d')" by (auto simp: not_less)
with False have "d < norm (z - of_int (round d'))" by (simp add: d_def d'_def)
also have "\<dots> \<le> norm (z - of_int n)" unfolding d'_def by (rule round_Re_minimises_norm)
finally show ?thesis .
qed
hence conv: "uniformly_convergent_on (ball z d) (\<lambda>n z. ln_Gamma_series z n)"
by (intro ln_Gamma_series_complex_converges d_pos z) simp_all
from d_pos conv show ?thesis by blast
qed
lemma ln_Gamma_series_complex_converges'': "(z :: complex) \<notin> \<int>\<^sub>\<le>\<^sub>0 \<Longrightarrow> convergent (ln_Gamma_series z)"
by (drule ln_Gamma_series_complex_converges') (auto intro: uniformly_convergent_imp_convergent)
theorem ln_Gamma_complex_LIMSEQ: "(z :: complex) \<notin> \<int>\<^sub>\<le>\<^sub>0 \<Longrightarrow> ln_Gamma_series z \<longlonglongrightarrow> ln_Gamma z"
using ln_Gamma_series_complex_converges'' by (simp add: convergent_LIMSEQ_iff ln_Gamma_def)
lemma exp_ln_Gamma_series_complex:
assumes "n > 0" "z \<notin> \<int>\<^sub>\<le>\<^sub>0"
shows "exp (ln_Gamma_series z n :: complex) = Gamma_series z n"
proof -
from assms obtain m where m: "n = Suc m" by (cases n) blast
from assms have "z \<noteq> 0" by (intro notI) auto
with assms have "exp (ln_Gamma_series z n) =
(of_nat n) powr z / (z * (\<Prod>k=1..n. exp (Ln (z / of_nat k + 1))))"
unfolding ln_Gamma_series_def powr_def by (simp add: exp_diff exp_sum)
also from assms have "(\<Prod>k=1..n. exp (Ln (z / of_nat k + 1))) = (\<Prod>k=1..n. z / of_nat k + 1)"
by (intro prod.cong[OF refl], subst exp_Ln) (auto simp: field_simps plus_of_nat_eq_0_imp)
also have "... = (\<Prod>k=1..n. z + k) / fact n"
by (simp add: fact_prod)
(subst prod_dividef [symmetric], simp_all add: field_simps)
also from m have "z * ... = (\<Prod>k=0..n. z + k) / fact n"
by (simp add: prod.atLeast0_atMost_Suc_shift prod.atLeast_Suc_atMost_Suc_shift del: prod.cl_ivl_Suc)
also have "(\<Prod>k=0..n. z + k) = pochhammer z (Suc n)"
unfolding pochhammer_prod
by (simp add: prod.atLeast0_atMost_Suc atLeastLessThanSuc_atLeastAtMost)
also have "of_nat n powr z / (pochhammer z (Suc n) / fact n) = Gamma_series z n"
unfolding Gamma_series_def using assms by (simp add: field_split_simps powr_def)
finally show ?thesis .
qed
lemma ln_Gamma_series'_aux:
assumes "(z::complex) \<notin> \<int>\<^sub>\<le>\<^sub>0"
shows "(\<lambda>k. z / of_nat (Suc k) - ln (1 + z / of_nat (Suc k))) sums
(ln_Gamma z + euler_mascheroni * z + ln z)" (is "?f sums ?s")
unfolding sums_def
proof (rule Lim_transform)
show "(\<lambda>n. ln_Gamma_series z n + of_real (harm n - ln (of_nat n)) * z + ln z) \<longlonglongrightarrow> ?s"
(is "?g \<longlonglongrightarrow> _")
by (intro tendsto_intros ln_Gamma_complex_LIMSEQ euler_mascheroni_LIMSEQ_of_real assms)
have A: "eventually (\<lambda>n. (\<Sum>k<n. ?f k) - ?g n = 0) sequentially"
using eventually_gt_at_top[of "0::nat"]
proof eventually_elim
fix n :: nat assume n: "n > 0"
have "(\<Sum>k<n. ?f k) = (\<Sum>k=1..n. z / of_nat k - ln (1 + z / of_nat k))"
by (subst atLeast0LessThan [symmetric], subst sum.shift_bounds_Suc_ivl [symmetric],
subst atLeastLessThanSuc_atLeastAtMost) simp_all
also have "\<dots> = z * of_real (harm n) - (\<Sum>k=1..n. ln (1 + z / of_nat k))"
by (simp add: harm_def sum_subtractf sum_distrib_left divide_inverse)
also from n have "\<dots> - ?g n = 0"
by (simp add: ln_Gamma_series_def sum_subtractf algebra_simps)
finally show "(\<Sum>k<n. ?f k) - ?g n = 0" .
qed
show "(\<lambda>n. (\<Sum>k<n. ?f k) - ?g n) \<longlonglongrightarrow> 0" by (subst tendsto_cong[OF A]) simp_all
qed
lemma uniformly_summable_deriv_ln_Gamma:
assumes z: "(z :: 'a :: {real_normed_field,banach}) \<noteq> 0" and d: "d > 0" "d \<le> norm z/2"
shows "uniformly_convergent_on (ball z d)
(\<lambda>k z. \<Sum>i<k. inverse (of_nat (Suc i)) - inverse (z + of_nat (Suc i)))"
(is "uniformly_convergent_on _ (\<lambda>k z. \<Sum>i<k. ?f i z)")
proof (rule Weierstrass_m_test'_ev)
{
fix t assume t: "t \<in> ball z d"
have "norm z = norm (t + (z - t))" by simp
have "norm (t + (z - t)) \<le> norm t + norm (z - t)" by (rule norm_triangle_ineq)
also from t d have "norm (z - t) < norm z / 2" by (simp add: dist_norm)
finally have A: "norm t > norm z / 2" using z by (simp add: field_simps)
have "norm t = norm (z + (t - z))" by simp
also have "\<dots> \<le> norm z + norm (t - z)" by (rule norm_triangle_ineq)
also from t d have "norm (t - z) \<le> norm z / 2" by (simp add: dist_norm norm_minus_commute)
also from z have "\<dots> < norm z" by simp
finally have B: "norm t < 2 * norm z" by simp
note A B
} note ball = this
show "eventually (\<lambda>n. \<forall>t\<in>ball z d. norm (?f n t) \<le> 4 * norm z * inverse (of_nat (Suc n)^2)) sequentially"
using eventually_gt_at_top apply eventually_elim
proof safe
fix t :: 'a assume t: "t \<in> ball z d"
from z ball[OF t] have t_nz: "t \<noteq> 0" by auto
fix n :: nat assume n: "n > nat \<lceil>4 * norm z\<rceil>"
from ball[OF t] t_nz have "4 * norm z > 2 * norm t" by simp
also from n have "\<dots> < of_nat n" by linarith
finally have n: "of_nat n > 2 * norm t" .
hence "of_nat n > norm t" by simp
hence t': "t \<noteq> -of_nat (Suc n)" by (intro notI) (simp del: of_nat_Suc)
with t_nz have "?f n t = 1 / (of_nat (Suc n) * (1 + of_nat (Suc n)/t))"
by (simp add: field_split_simps eq_neg_iff_add_eq_0 del: of_nat_Suc)
also have "norm \<dots> = inverse (of_nat (Suc n)) * inverse (norm (of_nat (Suc n)/t + 1))"
by (simp add: norm_divide norm_mult field_split_simps del: of_nat_Suc)
also {
from z t_nz ball[OF t] have "of_nat (Suc n) / (4 * norm z) \<le> of_nat (Suc n) / (2 * norm t)"
by (intro divide_left_mono mult_pos_pos) simp_all
also have "\<dots> < norm (of_nat (Suc n) / t) - norm (1 :: 'a)"
using t_nz n by (simp add: field_simps norm_divide del: of_nat_Suc)
also have "\<dots> \<le> norm (of_nat (Suc n)/t + 1)" by (rule norm_diff_ineq)
finally have "inverse (norm (of_nat (Suc n)/t + 1)) \<le> 4 * norm z / of_nat (Suc n)"
using z by (simp add: field_split_simps norm_divide mult_ac del: of_nat_Suc)
}
also have "inverse (real_of_nat (Suc n)) * (4 * norm z / real_of_nat (Suc n)) =
4 * norm z * inverse (of_nat (Suc n)^2)"
by (simp add: field_split_simps power2_eq_square del: of_nat_Suc)
finally show "norm (?f n t) \<le> 4 * norm z * inverse (of_nat (Suc n)^2)"
by (simp del: of_nat_Suc)
qed
next
show "summable (\<lambda>n. 4 * norm z * inverse ((of_nat (Suc n))^2))"
by (subst summable_Suc_iff) (simp add: summable_mult inverse_power_summable)
qed
subsection \<open>The Polygamma functions\<close>
lemma summable_deriv_ln_Gamma:
"z \<noteq> (0 :: 'a :: {real_normed_field,banach}) \<Longrightarrow>
summable (\<lambda>n. inverse (of_nat (Suc n)) - inverse (z + of_nat (Suc n)))"
unfolding summable_iff_convergent
by (rule uniformly_convergent_imp_convergent,
rule uniformly_summable_deriv_ln_Gamma[of z "norm z/2"]) simp_all
definition\<^marker>\<open>tag important\<close> Polygamma :: "nat \<Rightarrow> ('a :: {real_normed_field,banach}) \<Rightarrow> 'a" where
"Polygamma n z = (if n = 0 then
(\<Sum>k. inverse (of_nat (Suc k)) - inverse (z + of_nat k)) - euler_mascheroni else
(-1)^Suc n * fact n * (\<Sum>k. inverse ((z + of_nat k)^Suc n)))"
abbreviation\<^marker>\<open>tag important\<close> Digamma :: "('a :: {real_normed_field,banach}) \<Rightarrow> 'a" where
"Digamma \<equiv> Polygamma 0"
lemma Digamma_def:
"Digamma z = (\<Sum>k. inverse (of_nat (Suc k)) - inverse (z + of_nat k)) - euler_mascheroni"
by (simp add: Polygamma_def)
lemma summable_Digamma:
assumes "(z :: 'a :: {real_normed_field,banach}) \<noteq> 0"
shows "summable (\<lambda>n. inverse (of_nat (Suc n)) - inverse (z + of_nat n))"
proof -
have sums: "(\<lambda>n. inverse (z + of_nat (Suc n)) - inverse (z + of_nat n)) sums
(0 - inverse (z + of_nat 0))"
by (intro telescope_sums filterlim_compose[OF tendsto_inverse_0]
tendsto_add_filterlim_at_infinity[OF tendsto_const] tendsto_of_nat)
from summable_add[OF summable_deriv_ln_Gamma[OF assms] sums_summable[OF sums]]
show "summable (\<lambda>n. inverse (of_nat (Suc n)) - inverse (z + of_nat n))" by simp
qed
lemma summable_offset:
assumes "summable (\<lambda>n. f (n + k) :: 'a :: real_normed_vector)"
shows "summable f"
proof -
from assms have "convergent (\<lambda>m. \<Sum>n<m. f (n + k))"
using summable_iff_convergent by blast
hence "convergent (\<lambda>m. (\<Sum>n<k. f n) + (\<Sum>n<m. f (n + k)))"
by (intro convergent_add convergent_const)
also have "(\<lambda>m. (\<Sum>n<k. f n) + (\<Sum>n<m. f (n + k))) = (\<lambda>m. \<Sum>n<m+k. f n)"
proof
fix m :: nat
have "{..<m+k} = {..<k} \<union> {k..<m+k}" by auto
also have "(\<Sum>n\<in>\<dots>. f n) = (\<Sum>n<k. f n) + (\<Sum>n=k..<m+k. f n)"
by (rule sum.union_disjoint) auto
also have "(\<Sum>n=k..<m+k. f n) = (\<Sum>n=0..<m+k-k. f (n + k))"
using sum.shift_bounds_nat_ivl [of f 0 k m] by simp
finally show "(\<Sum>n<k. f n) + (\<Sum>n<m. f (n + k)) = (\<Sum>n<m+k. f n)" by (simp add: atLeast0LessThan)
qed
finally have "(\<lambda>a. sum f {..<a}) \<longlonglongrightarrow> lim (\<lambda>m. sum f {..<m + k})"
by (auto simp: convergent_LIMSEQ_iff dest: LIMSEQ_offset)
thus ?thesis by (auto simp: summable_iff_convergent convergent_def)
qed
lemma Polygamma_converges:
fixes z :: "'a :: {real_normed_field,banach}"
assumes z: "z \<noteq> 0" and n: "n \<ge> 2"
shows "uniformly_convergent_on (ball z d) (\<lambda>k z. \<Sum>i<k. inverse ((z + of_nat i)^n))"
proof (rule Weierstrass_m_test'_ev)
define e where "e = (1 + d / norm z)"
define m where "m = nat \<lceil>norm z * e\<rceil>"
{
fix t assume t: "t \<in> ball z d"
have "norm t = norm (z + (t - z))" by simp
also have "\<dots> \<le> norm z + norm (t - z)" by (rule norm_triangle_ineq)
also from t have "norm (t - z) < d" by (simp add: dist_norm norm_minus_commute)
finally have "norm t < norm z * e" using z by (simp add: divide_simps e_def)
} note ball = this
show "eventually (\<lambda>k. \<forall>t\<in>ball z d. norm (inverse ((t + of_nat k)^n)) \<le>
inverse (of_nat (k - m)^n)) sequentially"
using eventually_gt_at_top[of m] apply eventually_elim
proof (intro ballI)
fix k :: nat and t :: 'a assume k: "k > m" and t: "t \<in> ball z d"
from k have "real_of_nat (k - m) = of_nat k - of_nat m" by (simp add: of_nat_diff)
also have "\<dots> \<le> norm (of_nat k :: 'a) - norm z * e"
unfolding m_def by (subst norm_of_nat) linarith
also from ball[OF t] have "\<dots> \<le> norm (of_nat k :: 'a) - norm t" by simp
also have "\<dots> \<le> norm (of_nat k + t)" by (rule norm_diff_ineq)
finally have "inverse ((norm (t + of_nat k))^n) \<le> inverse (real_of_nat (k - m)^n)" using k n
by (intro le_imp_inverse_le power_mono) (simp_all add: add_ac del: of_nat_Suc)
thus "norm (inverse ((t + of_nat k)^n)) \<le> inverse (of_nat (k - m)^n)"
by (simp add: norm_inverse norm_power power_inverse)
qed
have "summable (\<lambda>k. inverse ((real_of_nat k)^n))"
using inverse_power_summable[of n] n by simp
hence "summable (\<lambda>k. inverse ((real_of_nat (k + m - m))^n))" by simp
thus "summable (\<lambda>k. inverse ((real_of_nat (k - m))^n))" by (rule summable_offset)
qed
lemma Polygamma_converges':
fixes z :: "'a :: {real_normed_field,banach}"
assumes z: "z \<noteq> 0" and n: "n \<ge> 2"
shows "summable (\<lambda>k. inverse ((z + of_nat k)^n))"
using uniformly_convergent_imp_convergent[OF Polygamma_converges[OF assms, of 1], of z]
by (simp add: summable_iff_convergent)
theorem Digamma_LIMSEQ:
fixes z :: "'a :: {banach,real_normed_field}"
assumes z: "z \<noteq> 0"
shows "(\<lambda>m. of_real (ln (real m)) - (\<Sum>n<m. inverse (z + of_nat n))) \<longlonglongrightarrow> Digamma z"
proof -
have "(\<lambda>n. of_real (ln (real n / (real (Suc n))))) \<longlonglongrightarrow> (of_real (ln 1) :: 'a)"
by (intro tendsto_intros LIMSEQ_n_over_Suc_n) simp_all
hence "(\<lambda>n. of_real (ln (real n / (real n + 1)))) \<longlonglongrightarrow> (0 :: 'a)" by (simp add: add_ac)
hence lim: "(\<lambda>n. of_real (ln (real n)) - of_real (ln (real n + 1))) \<longlonglongrightarrow> (0::'a)"
proof (rule Lim_transform_eventually)
show "eventually (\<lambda>n. of_real (ln (real n / (real n + 1))) =
of_real (ln (real n)) - (of_real (ln (real n + 1)) :: 'a)) at_top"
using eventually_gt_at_top[of "0::nat"] by eventually_elim (simp add: ln_div)
qed
from summable_Digamma[OF z]
have "(\<lambda>n. inverse (of_nat (n+1)) - inverse (z + of_nat n))
sums (Digamma z + euler_mascheroni)"
by (simp add: Digamma_def summable_sums)
from sums_diff[OF this euler_mascheroni_sum]
have "(\<lambda>n. of_real (ln (real (Suc n) + 1)) - of_real (ln (real n + 1)) - inverse (z + of_nat n))
sums Digamma z" by (simp add: add_ac)
hence "(\<lambda>m. (\<Sum>n<m. of_real (ln (real (Suc n) + 1)) - of_real (ln (real n + 1))) -
(\<Sum>n<m. inverse (z + of_nat n))) \<longlonglongrightarrow> Digamma z"
by (simp add: sums_def sum_subtractf)
also have "(\<lambda>m. (\<Sum>n<m. of_real (ln (real (Suc n) + 1)) - of_real (ln (real n + 1)))) =
(\<lambda>m. of_real (ln (m + 1)) :: 'a)"
by (subst sum_lessThan_telescope) simp_all
finally show ?thesis by (rule Lim_transform) (insert lim, simp)
qed
theorem Polygamma_LIMSEQ:
fixes z :: "'a :: {banach,real_normed_field}"
assumes "z \<noteq> 0" and "n > 0"
shows "(\<lambda>k. inverse ((z + of_nat k)^Suc n)) sums ((-1) ^ Suc n * Polygamma n z / fact n)"
using Polygamma_converges'[OF assms(1), of "Suc n"] assms(2)
by (simp add: sums_iff Polygamma_def)
theorem has_field_derivative_ln_Gamma_complex [derivative_intros]:
fixes z :: complex
assumes z: "z \<notin> \<real>\<^sub>\<le>\<^sub>0"
shows "(ln_Gamma has_field_derivative Digamma z) (at z)"
proof -
have not_nonpos_Int [simp]: "t \<notin> \<int>\<^sub>\<le>\<^sub>0" if "Re t > 0" for t
using that by (auto elim!: nonpos_Ints_cases')
from z have z': "z \<notin> \<int>\<^sub>\<le>\<^sub>0" and z'': "z \<noteq> 0" using nonpos_Ints_subset_nonpos_Reals nonpos_Reals_zero_I
by blast+
let ?f' = "\<lambda>z k. inverse (of_nat (Suc k)) - inverse (z + of_nat (Suc k))"
let ?f = "\<lambda>z k. z / of_nat (Suc k) - ln (1 + z / of_nat (Suc k))" and ?F' = "\<lambda>z. \<Sum>n. ?f' z n"
define d where "d = min (norm z/2) (if Im z = 0 then Re z / 2 else abs (Im z) / 2)"
from z have d: "d > 0" "norm z/2 \<ge> d" by (auto simp add: complex_nonpos_Reals_iff d_def)
have ball: "Im t = 0 \<longrightarrow> Re t > 0" if "dist z t < d" for t
using no_nonpos_Real_in_ball[OF z, of t] that unfolding d_def by (force simp add: complex_nonpos_Reals_iff)
have sums: "(\<lambda>n. inverse (z + of_nat (Suc n)) - inverse (z + of_nat n)) sums
(0 - inverse (z + of_nat 0))"
by (intro telescope_sums filterlim_compose[OF tendsto_inverse_0]
tendsto_add_filterlim_at_infinity[OF tendsto_const] tendsto_of_nat)
have "((\<lambda>z. \<Sum>n. ?f z n) has_field_derivative ?F' z) (at z)"
using d z ln_Gamma_series'_aux[OF z']
apply (intro has_field_derivative_series'(2)[of "ball z d" _ _ z] uniformly_summable_deriv_ln_Gamma)
apply (auto intro!: derivative_eq_intros add_pos_pos mult_pos_pos dest!: ball
simp: field_simps sums_iff nonpos_Reals_divide_of_nat_iff
simp del: of_nat_Suc)
apply (auto simp add: complex_nonpos_Reals_iff)
done
with z have "((\<lambda>z. (\<Sum>k. ?f z k) - euler_mascheroni * z - Ln z) has_field_derivative
?F' z - euler_mascheroni - inverse z) (at z)"
by (force intro!: derivative_eq_intros simp: Digamma_def)
also have "?F' z - euler_mascheroni - inverse z = (?F' z + -inverse z) - euler_mascheroni" by simp
also from sums have "-inverse z = (\<Sum>n. inverse (z + of_nat (Suc n)) - inverse (z + of_nat n))"
by (simp add: sums_iff)
also from sums summable_deriv_ln_Gamma[OF z'']
have "?F' z + \<dots> = (\<Sum>n. inverse (of_nat (Suc n)) - inverse (z + of_nat n))"
by (subst suminf_add) (simp_all add: add_ac sums_iff)
also have "\<dots> - euler_mascheroni = Digamma z" by (simp add: Digamma_def)
finally have "((\<lambda>z. (\<Sum>k. ?f z k) - euler_mascheroni * z - Ln z)
has_field_derivative Digamma z) (at z)" .
moreover from eventually_nhds_ball[OF d(1), of z]
have "eventually (\<lambda>z. ln_Gamma z = (\<Sum>k. ?f z k) - euler_mascheroni * z - Ln z) (nhds z)"
proof eventually_elim
fix t assume "t \<in> ball z d"
hence "t \<notin> \<int>\<^sub>\<le>\<^sub>0" by (auto dest!: ball elim!: nonpos_Ints_cases)
from ln_Gamma_series'_aux[OF this]
show "ln_Gamma t = (\<Sum>k. ?f t k) - euler_mascheroni * t - Ln t" by (simp add: sums_iff)
qed
ultimately show ?thesis by (subst DERIV_cong_ev[OF refl _ refl])
qed
declare has_field_derivative_ln_Gamma_complex[THEN DERIV_chain2, derivative_intros]
lemma Digamma_1 [simp]: "Digamma (1 :: 'a :: {real_normed_field,banach}) = - euler_mascheroni"
by (simp add: Digamma_def)
lemma Digamma_plus1:
assumes "z \<noteq> 0"
shows "Digamma (z+1) = Digamma z + 1/z"
proof -
have sums: "(\<lambda>k. inverse (z + of_nat k) - inverse (z + of_nat (Suc k)))
sums (inverse (z + of_nat 0) - 0)"
by (intro telescope_sums'[OF filterlim_compose[OF tendsto_inverse_0]]
tendsto_add_filterlim_at_infinity[OF tendsto_const] tendsto_of_nat)
have "Digamma (z+1) = (\<Sum>k. inverse (of_nat (Suc k)) - inverse (z + of_nat (Suc k))) -
euler_mascheroni" (is "_ = suminf ?f - _") by (simp add: Digamma_def add_ac)
also have "suminf ?f = (\<Sum>k. inverse (of_nat (Suc k)) - inverse (z + of_nat k)) +
(\<Sum>k. inverse (z + of_nat k) - inverse (z + of_nat (Suc k)))"
using summable_Digamma[OF assms] sums by (subst suminf_add) (simp_all add: add_ac sums_iff)
also have "(\<Sum>k. inverse (z + of_nat k) - inverse (z + of_nat (Suc k))) = 1/z"
using sums by (simp add: sums_iff inverse_eq_divide)
finally show ?thesis by (simp add: Digamma_def[of z])
qed
theorem Polygamma_plus1:
assumes "z \<noteq> 0"
shows "Polygamma n (z + 1) = Polygamma n z + (-1)^n * fact n / (z ^ Suc n)"
proof (cases "n = 0")
assume n: "n \<noteq> 0"
let ?f = "\<lambda>k. inverse ((z + of_nat k) ^ Suc n)"
have "Polygamma n (z + 1) = (-1) ^ Suc n * fact n * (\<Sum>k. ?f (k+1))"
using n by (simp add: Polygamma_def add_ac)
also have "(\<Sum>k. ?f (k+1)) + (\<Sum>k<1. ?f k) = (\<Sum>k. ?f k)"
using Polygamma_converges'[OF assms, of "Suc n"] n
by (subst suminf_split_initial_segment [symmetric]) simp_all
hence "(\<Sum>k. ?f (k+1)) = (\<Sum>k. ?f k) - inverse (z ^ Suc n)" by (simp add: algebra_simps)
also have "(-1) ^ Suc n * fact n * ((\<Sum>k. ?f k) - inverse (z ^ Suc n)) =
Polygamma n z + (-1)^n * fact n / (z ^ Suc n)" using n
by (simp add: inverse_eq_divide algebra_simps Polygamma_def)
finally show ?thesis .
qed (insert assms, simp add: Digamma_plus1 inverse_eq_divide)
theorem Digamma_of_nat:
"Digamma (of_nat (Suc n) :: 'a :: {real_normed_field,banach}) = harm n - euler_mascheroni"
proof (induction n)
case (Suc n)
have "Digamma (of_nat (Suc (Suc n)) :: 'a) = Digamma (of_nat (Suc n) + 1)" by simp
also have "\<dots> = Digamma (of_nat (Suc n)) + inverse (of_nat (Suc n))"
by (subst Digamma_plus1) (simp_all add: inverse_eq_divide del: of_nat_Suc)
also have "Digamma (of_nat (Suc n) :: 'a) = harm n - euler_mascheroni " by (rule Suc)
also have "\<dots> + inverse (of_nat (Suc n)) = harm (Suc n) - euler_mascheroni"
by (simp add: harm_Suc)
finally show ?case .
qed (simp add: harm_def)
lemma Digamma_numeral: "Digamma (numeral n) = harm (pred_numeral n) - euler_mascheroni"
by (subst of_nat_numeral[symmetric], subst numeral_eq_Suc, subst Digamma_of_nat) (rule refl)
lemma Polygamma_of_real: "x \<noteq> 0 \<Longrightarrow> Polygamma n (of_real x) = of_real (Polygamma n x)"
unfolding Polygamma_def using summable_Digamma[of x] Polygamma_converges'[of x "Suc n"]
by (simp_all add: suminf_of_real)
lemma Polygamma_Real: "z \<in> \<real> \<Longrightarrow> z \<noteq> 0 \<Longrightarrow> Polygamma n z \<in> \<real>"
by (elim Reals_cases, hypsubst, subst Polygamma_of_real) simp_all
lemma Digamma_half_integer:
"Digamma (of_nat n + 1/2 :: 'a :: {real_normed_field,banach}) =
(\<Sum>k<n. 2 / (of_nat (2*k+1))) - euler_mascheroni - of_real (2 * ln 2)"
proof (induction n)
case 0
have "Digamma (1/2 :: 'a) = of_real (Digamma (1/2))" by (simp add: Polygamma_of_real [symmetric])
also have "Digamma (1/2::real) =
(\<Sum>k. inverse (of_nat (Suc k)) - inverse (of_nat k + 1/2)) - euler_mascheroni"
by (simp add: Digamma_def add_ac)
also have "(\<Sum>k. inverse (of_nat (Suc k) :: real) - inverse (of_nat k + 1/2)) =
(\<Sum>k. inverse (1/2) * (inverse (2 * of_nat (Suc k)) - inverse (2 * of_nat k + 1)))"
by (simp_all add: add_ac inverse_mult_distrib[symmetric] ring_distribs del: inverse_divide)
also have "\<dots> = - 2 * ln 2" using sums_minus[OF alternating_harmonic_series_sums']
by (subst suminf_mult) (simp_all add: algebra_simps sums_iff)
finally show ?case by simp
next
case (Suc n)
have nz: "2 * of_nat n + (1:: 'a) \<noteq> 0"
using of_nat_neq_0[of "2*n"] by (simp only: of_nat_Suc) (simp add: add_ac)
hence nz': "of_nat n + (1/2::'a) \<noteq> 0" by (simp add: field_simps)
have "Digamma (of_nat (Suc n) + 1/2 :: 'a) = Digamma (of_nat n + 1/2 + 1)" by simp
also from nz' have "\<dots> = Digamma (of_nat n + 1/2) + 1 / (of_nat n + 1/2)"
by (rule Digamma_plus1)
also from nz nz' have "1 / (of_nat n + 1/2 :: 'a) = 2 / (2 * of_nat n + 1)"
by (subst divide_eq_eq) simp_all
also note Suc
finally show ?case by (simp add: add_ac)
qed
lemma Digamma_one_half: "Digamma (1/2) = - euler_mascheroni - of_real (2 * ln 2)"
using Digamma_half_integer[of 0] by simp
lemma Digamma_real_three_halves_pos: "Digamma (3/2 :: real) > 0"
proof -
have "-Digamma (3/2 :: real) = -Digamma (of_nat 1 + 1/2)" by simp
also have "\<dots> = 2 * ln 2 + euler_mascheroni - 2" by (subst Digamma_half_integer) simp
also note euler_mascheroni_less_13_over_22
also note ln2_le_25_over_36
finally show ?thesis by simp
qed
theorem has_field_derivative_Polygamma [derivative_intros]:
fixes z :: "'a :: {real_normed_field,euclidean_space}"
assumes z: "z \<notin> \<int>\<^sub>\<le>\<^sub>0"
shows "(Polygamma n has_field_derivative Polygamma (Suc n) z) (at z within A)"
proof (rule has_field_derivative_at_within, cases "n = 0")
assume n: "n = 0"
let ?f = "\<lambda>k z. inverse (of_nat (Suc k)) - inverse (z + of_nat k)"
let ?F = "\<lambda>z. \<Sum>k. ?f k z" and ?f' = "\<lambda>k z. inverse ((z + of_nat k)\<^sup>2)"
from no_nonpos_Int_in_ball'[OF z] obtain d where d: "0 < d" "\<And>t. t \<in> ball z d \<Longrightarrow> t \<notin> \<int>\<^sub>\<le>\<^sub>0"
by auto
from z have summable: "summable (\<lambda>k. inverse (of_nat (Suc k)) - inverse (z + of_nat k))"
by (intro summable_Digamma) force
from z have conv: "uniformly_convergent_on (ball z d) (\<lambda>k z. \<Sum>i<k. inverse ((z + of_nat i)\<^sup>2))"
by (intro Polygamma_converges) auto
with d have "summable (\<lambda>k. inverse ((z + of_nat k)\<^sup>2))" unfolding summable_iff_convergent
by (auto dest!: uniformly_convergent_imp_convergent simp: summable_iff_convergent )
have "(?F has_field_derivative (\<Sum>k. ?f' k z)) (at z)"
proof (rule has_field_derivative_series'[of "ball z d" _ _ z])
fix k :: nat and t :: 'a assume t: "t \<in> ball z d"
from t d(2)[of t] show "((\<lambda>z. ?f k z) has_field_derivative ?f' k t) (at t within ball z d)"
by (auto intro!: derivative_eq_intros simp: power2_eq_square simp del: of_nat_Suc
dest!: plus_of_nat_eq_0_imp elim!: nonpos_Ints_cases)
qed (insert d(1) summable conv, (assumption|simp)+)
with z show "(Polygamma n has_field_derivative Polygamma (Suc n) z) (at z)"
unfolding Digamma_def [abs_def] Polygamma_def [abs_def] using n
by (force simp: power2_eq_square intro!: derivative_eq_intros)
next
assume n: "n \<noteq> 0"
from z have z': "z \<noteq> 0" by auto
from no_nonpos_Int_in_ball'[OF z] obtain d where d: "0 < d" "\<And>t. t \<in> ball z d \<Longrightarrow> t \<notin> \<int>\<^sub>\<le>\<^sub>0"
by auto
define n' where "n' = Suc n"
from n have n': "n' \<ge> 2" by (simp add: n'_def)
have "((\<lambda>z. \<Sum>k. inverse ((z + of_nat k) ^ n')) has_field_derivative
(\<Sum>k. - of_nat n' * inverse ((z + of_nat k) ^ (n'+1)))) (at z)"
proof (rule has_field_derivative_series'[of "ball z d" _ _ z])
fix k :: nat and t :: 'a assume t: "t \<in> ball z d"
with d have t': "t \<notin> \<int>\<^sub>\<le>\<^sub>0" "t \<noteq> 0" by auto
show "((\<lambda>a. inverse ((a + of_nat k) ^ n')) has_field_derivative
- of_nat n' * inverse ((t + of_nat k) ^ (n'+1))) (at t within ball z d)" using t'
by (fastforce intro!: derivative_eq_intros simp: divide_simps power_diff dest: plus_of_nat_eq_0_imp)
next
have "uniformly_convergent_on (ball z d)
(\<lambda>k z. (- of_nat n' :: 'a) * (\<Sum>i<k. inverse ((z + of_nat i) ^ (n'+1))))"
using z' n by (intro uniformly_convergent_mult Polygamma_converges) (simp_all add: n'_def)
thus "uniformly_convergent_on (ball z d)
(\<lambda>k z. \<Sum>i<k. - of_nat n' * inverse ((z + of_nat i :: 'a) ^ (n'+1)))"
by (subst (asm) sum_distrib_left) simp
qed (insert Polygamma_converges'[OF z' n'] d, simp_all)
also have "(\<Sum>k. - of_nat n' * inverse ((z + of_nat k) ^ (n' + 1))) =
(- of_nat n') * (\<Sum>k. inverse ((z + of_nat k) ^ (n' + 1)))"
using Polygamma_converges'[OF z', of "n'+1"] n' by (subst suminf_mult) simp_all
finally have "((\<lambda>z. \<Sum>k. inverse ((z + of_nat k) ^ n')) has_field_derivative
- of_nat n' * (\<Sum>k. inverse ((z + of_nat k) ^ (n' + 1)))) (at z)" .
from DERIV_cmult[OF this, of "(-1)^Suc n * fact n :: 'a"]
show "(Polygamma n has_field_derivative Polygamma (Suc n) z) (at z)"
unfolding n'_def Polygamma_def[abs_def] using n by (simp add: algebra_simps)
qed
declare has_field_derivative_Polygamma[THEN DERIV_chain2, derivative_intros]
lemma isCont_Polygamma [continuous_intros]:
fixes f :: "_ \<Rightarrow> 'a :: {real_normed_field,euclidean_space}"
shows "isCont f z \<Longrightarrow> f z \<notin> \<int>\<^sub>\<le>\<^sub>0 \<Longrightarrow> isCont (\<lambda>x. Polygamma n (f x)) z"
by (rule isCont_o2[OF _ DERIV_isCont[OF has_field_derivative_Polygamma]])
lemma continuous_on_Polygamma:
"A \<inter> \<int>\<^sub>\<le>\<^sub>0 = {} \<Longrightarrow> continuous_on A (Polygamma n :: _ \<Rightarrow> 'a :: {real_normed_field,euclidean_space})"
by (intro continuous_at_imp_continuous_on isCont_Polygamma[OF continuous_ident] ballI) blast
lemma isCont_ln_Gamma_complex [continuous_intros]:
fixes f :: "'a::t2_space \<Rightarrow> complex"
shows "isCont f z \<Longrightarrow> f z \<notin> \<real>\<^sub>\<le>\<^sub>0 \<Longrightarrow> isCont (\<lambda>z. ln_Gamma (f z)) z"
by (rule isCont_o2[OF _ DERIV_isCont[OF has_field_derivative_ln_Gamma_complex]])
lemma continuous_on_ln_Gamma_complex [continuous_intros]:
fixes A :: "complex set"
shows "A \<inter> \<real>\<^sub>\<le>\<^sub>0 = {} \<Longrightarrow> continuous_on A ln_Gamma"
by (intro continuous_at_imp_continuous_on ballI isCont_ln_Gamma_complex[OF continuous_ident])
fastforce
lemma deriv_Polygamma:
assumes "z \<notin> \<int>\<^sub>\<le>\<^sub>0"
shows "deriv (Polygamma m) z =
Polygamma (Suc m) (z :: 'a :: {real_normed_field,euclidean_space})"
by (intro DERIV_imp_deriv has_field_derivative_Polygamma assms)
thm has_field_derivative_Polygamma
lemma higher_deriv_Polygamma:
assumes "z \<notin> \<int>\<^sub>\<le>\<^sub>0"
shows "(deriv ^^ n) (Polygamma m) z =
Polygamma (m + n) (z :: 'a :: {real_normed_field,euclidean_space})"
proof -
have "eventually (\<lambda>u. (deriv ^^ n) (Polygamma m) u = Polygamma (m + n) u) (nhds z)"
proof (induction n)
case (Suc n)
from Suc.IH have "eventually (\<lambda>z. eventually (\<lambda>u. (deriv ^^ n) (Polygamma m) u = Polygamma (m + n) u) (nhds z)) (nhds z)"
by (simp add: eventually_eventually)
hence "eventually (\<lambda>z. deriv ((deriv ^^ n) (Polygamma m)) z =
deriv (Polygamma (m + n)) z) (nhds z)"
by eventually_elim (intro deriv_cong_ev refl)
moreover have "eventually (\<lambda>z. z \<in> UNIV - \<int>\<^sub>\<le>\<^sub>0) (nhds z)" using assms
by (intro eventually_nhds_in_open open_Diff open_UNIV) auto
ultimately show ?case by eventually_elim (simp_all add: deriv_Polygamma)
qed simp_all
thus ?thesis by (rule eventually_nhds_x_imp_x)
qed
lemma deriv_ln_Gamma_complex:
assumes "z \<notin> \<real>\<^sub>\<le>\<^sub>0"
shows "deriv ln_Gamma z = Digamma (z :: complex)"
by (intro DERIV_imp_deriv has_field_derivative_ln_Gamma_complex assms)
lemma higher_deriv_ln_Gamma_complex:
assumes "(x::complex) \<notin> \<real>\<^sub>\<le>\<^sub>0"
shows "(deriv ^^ j) ln_Gamma x = (if j = 0 then ln_Gamma x else Polygamma (j - 1) x)"
proof (cases j)
case (Suc j')
have "(deriv ^^ j') (deriv ln_Gamma) x = (deriv ^^ j') Digamma x"
using eventually_nhds_in_open[of "UNIV - \<real>\<^sub>\<le>\<^sub>0" x] assms
by (intro higher_deriv_cong_ev refl)
(auto elim!: eventually_mono simp: open_Diff deriv_ln_Gamma_complex)
also have "\<dots> = Polygamma j' x" using assms
by (subst higher_deriv_Polygamma)
(auto elim!: nonpos_Ints_cases simp: complex_nonpos_Reals_iff)
finally show ?thesis using Suc by (simp del: funpow.simps add: funpow_Suc_right)
qed simp_all
text \<open>
We define a type class that captures all the fundamental properties of the inverse of the Gamma function
and defines the Gamma function upon that. This allows us to instantiate the type class both for
the reals and for the complex numbers with a minimal amount of proof duplication.
\<close>
class\<^marker>\<open>tag unimportant\<close> Gamma = real_normed_field + complete_space +
fixes rGamma :: "'a \<Rightarrow> 'a"
assumes rGamma_eq_zero_iff_aux: "rGamma z = 0 \<longleftrightarrow> (\<exists>n. z = - of_nat n)"
assumes differentiable_rGamma_aux1:
"(\<And>n. z \<noteq> - of_nat n) \<Longrightarrow>
let d = (THE d. (\<lambda>n. \<Sum>k<n. inverse (of_nat (Suc k)) - inverse (z + of_nat k))
\<longlonglongrightarrow> d) - scaleR euler_mascheroni 1
in filterlim (\<lambda>y. (rGamma y - rGamma z + rGamma z * d * (y - z)) /\<^sub>R
norm (y - z)) (nhds 0) (at z)"
assumes differentiable_rGamma_aux2:
"let z = - of_nat n
in filterlim (\<lambda>y. (rGamma y - rGamma z - (-1)^n * (prod of_nat {1..n}) * (y - z)) /\<^sub>R
norm (y - z)) (nhds 0) (at z)"
assumes rGamma_series_aux: "(\<And>n. z \<noteq> - of_nat n) \<Longrightarrow>
let fact' = (\<lambda>n. prod of_nat {1..n});
exp = (\<lambda>x. THE e. (\<lambda>n. \<Sum>k<n. x^k /\<^sub>R fact k) \<longlonglongrightarrow> e);
pochhammer' = (\<lambda>a n. (\<Prod>n = 0..n. a + of_nat n))
in filterlim (\<lambda>n. pochhammer' z n / (fact' n * exp (z * (ln (of_nat n) *\<^sub>R 1))))
(nhds (rGamma z)) sequentially"
begin
subclass banach ..
end
definition "Gamma z = inverse (rGamma z)"
subsection \<open>Basic properties\<close>
lemma Gamma_nonpos_Int: "z \<in> \<int>\<^sub>\<le>\<^sub>0 \<Longrightarrow> Gamma z = 0"
and rGamma_nonpos_Int: "z \<in> \<int>\<^sub>\<le>\<^sub>0 \<Longrightarrow> rGamma z = 0"
using rGamma_eq_zero_iff_aux[of z] unfolding Gamma_def by (auto elim!: nonpos_Ints_cases')
lemma Gamma_nonzero: "z \<notin> \<int>\<^sub>\<le>\<^sub>0 \<Longrightarrow> Gamma z \<noteq> 0"
and rGamma_nonzero: "z \<notin> \<int>\<^sub>\<le>\<^sub>0 \<Longrightarrow> rGamma z \<noteq> 0"
using rGamma_eq_zero_iff_aux[of z] unfolding Gamma_def by (auto elim!: nonpos_Ints_cases')
lemma Gamma_eq_zero_iff: "Gamma z = 0 \<longleftrightarrow> z \<in> \<int>\<^sub>\<le>\<^sub>0"
and rGamma_eq_zero_iff: "rGamma z = 0 \<longleftrightarrow> z \<in> \<int>\<^sub>\<le>\<^sub>0"
using rGamma_eq_zero_iff_aux[of z] unfolding Gamma_def by (auto elim!: nonpos_Ints_cases')
lemma rGamma_inverse_Gamma: "rGamma z = inverse (Gamma z)"
unfolding Gamma_def by simp
lemma rGamma_series_LIMSEQ [tendsto_intros]:
"rGamma_series z \<longlonglongrightarrow> rGamma z"
proof (cases "z \<in> \<int>\<^sub>\<le>\<^sub>0")
case False
hence "z \<noteq> - of_nat n" for n by auto
from rGamma_series_aux[OF this] show ?thesis
by (simp add: rGamma_series_def[abs_def] fact_prod pochhammer_Suc_prod
exp_def of_real_def[symmetric] suminf_def sums_def[abs_def] atLeast0AtMost)
qed (insert rGamma_eq_zero_iff[of z], simp_all add: rGamma_series_nonpos_Ints_LIMSEQ)
theorem Gamma_series_LIMSEQ [tendsto_intros]:
"Gamma_series z \<longlonglongrightarrow> Gamma z"
proof (cases "z \<in> \<int>\<^sub>\<le>\<^sub>0")
case False
hence "(\<lambda>n. inverse (rGamma_series z n)) \<longlonglongrightarrow> inverse (rGamma z)"
by (intro tendsto_intros) (simp_all add: rGamma_eq_zero_iff)
also have "(\<lambda>n. inverse (rGamma_series z n)) = Gamma_series z"
by (simp add: rGamma_series_def Gamma_series_def[abs_def])
finally show ?thesis by (simp add: Gamma_def)
qed (insert Gamma_eq_zero_iff[of z], simp_all add: Gamma_series_nonpos_Ints_LIMSEQ)
lemma Gamma_altdef: "Gamma z = lim (Gamma_series z)"
using Gamma_series_LIMSEQ[of z] by (simp add: limI)
lemma rGamma_1 [simp]: "rGamma 1 = 1"
proof -
have A: "eventually (\<lambda>n. rGamma_series 1 n = of_nat (Suc n) / of_nat n) sequentially"
using eventually_gt_at_top[of "0::nat"]
by (force elim!: eventually_mono simp: rGamma_series_def exp_of_real pochhammer_fact
field_split_simps pochhammer_rec' dest!: pochhammer_eq_0_imp_nonpos_Int)
have "rGamma_series 1 \<longlonglongrightarrow> 1" by (subst tendsto_cong[OF A]) (rule LIMSEQ_Suc_n_over_n)
moreover have "rGamma_series 1 \<longlonglongrightarrow> rGamma 1" by (rule tendsto_intros)
ultimately show ?thesis by (intro LIMSEQ_unique)
qed
lemma rGamma_plus1: "z * rGamma (z + 1) = rGamma z"
proof -
let ?f = "\<lambda>n. (z + 1) * inverse (of_nat n) + 1"
have "eventually (\<lambda>n. ?f n * rGamma_series z n = z * rGamma_series (z + 1) n) sequentially"
using eventually_gt_at_top[of "0::nat"]
proof eventually_elim
fix n :: nat assume n: "n > 0"
hence "z * rGamma_series (z + 1) n = inverse (of_nat n) *
pochhammer z (Suc (Suc n)) / (fact n * exp (z * of_real (ln (of_nat n))))"
by (subst pochhammer_rec) (simp add: rGamma_series_def field_simps exp_add exp_of_real)
also from n have "\<dots> = ?f n * rGamma_series z n"
by (subst pochhammer_rec') (simp_all add: field_split_simps rGamma_series_def)
finally show "?f n * rGamma_series z n = z * rGamma_series (z + 1) n" ..
qed
moreover have "(\<lambda>n. ?f n * rGamma_series z n) \<longlonglongrightarrow> ((z+1) * 0 + 1) * rGamma z"
by (intro tendsto_intros lim_inverse_n)
hence "(\<lambda>n. ?f n * rGamma_series z n) \<longlonglongrightarrow> rGamma z" by simp
ultimately have "(\<lambda>n. z * rGamma_series (z + 1) n) \<longlonglongrightarrow> rGamma z"
by (blast intro: Lim_transform_eventually)
moreover have "(\<lambda>n. z * rGamma_series (z + 1) n) \<longlonglongrightarrow> z * rGamma (z + 1)"
by (intro tendsto_intros)
ultimately show "z * rGamma (z + 1) = rGamma z" using LIMSEQ_unique by blast
qed
lemma pochhammer_rGamma: "rGamma z = pochhammer z n * rGamma (z + of_nat n)"
proof (induction n arbitrary: z)
case (Suc n z)
have "rGamma z = pochhammer z n * rGamma (z + of_nat n)" by (rule Suc.IH)
also note rGamma_plus1 [symmetric]
finally show ?case by (simp add: add_ac pochhammer_rec')
qed simp_all
theorem Gamma_plus1: "z \<notin> \<int>\<^sub>\<le>\<^sub>0 \<Longrightarrow> Gamma (z + 1) = z * Gamma z"
using rGamma_plus1[of z] by (simp add: rGamma_inverse_Gamma field_simps Gamma_eq_zero_iff)
theorem pochhammer_Gamma: "z \<notin> \<int>\<^sub>\<le>\<^sub>0 \<Longrightarrow> pochhammer z n = Gamma (z + of_nat n) / Gamma z"
using pochhammer_rGamma[of z]
by (simp add: rGamma_inverse_Gamma Gamma_eq_zero_iff field_simps)
lemma Gamma_0 [simp]: "Gamma 0 = 0"
and rGamma_0 [simp]: "rGamma 0 = 0"
and Gamma_neg_1 [simp]: "Gamma (- 1) = 0"
and rGamma_neg_1 [simp]: "rGamma (- 1) = 0"
and Gamma_neg_numeral [simp]: "Gamma (- numeral n) = 0"
and rGamma_neg_numeral [simp]: "rGamma (- numeral n) = 0"
and Gamma_neg_of_nat [simp]: "Gamma (- of_nat m) = 0"
and rGamma_neg_of_nat [simp]: "rGamma (- of_nat m) = 0"
by (simp_all add: rGamma_eq_zero_iff Gamma_eq_zero_iff)
lemma Gamma_1 [simp]: "Gamma 1 = 1" unfolding Gamma_def by simp
theorem Gamma_fact: "Gamma (1 + of_nat n) = fact n"
by (simp add: pochhammer_fact pochhammer_Gamma of_nat_in_nonpos_Ints_iff flip: of_nat_Suc)
lemma Gamma_numeral: "Gamma (numeral n) = fact (pred_numeral n)"
by (subst of_nat_numeral[symmetric], subst numeral_eq_Suc,
subst of_nat_Suc, subst Gamma_fact) (rule refl)
lemma Gamma_of_int: "Gamma (of_int n) = (if n > 0 then fact (nat (n - 1)) else 0)"
proof (cases "n > 0")
case True
hence "Gamma (of_int n) = Gamma (of_nat (Suc (nat (n - 1))))" by (subst of_nat_Suc) simp_all
with True show ?thesis by (subst (asm) of_nat_Suc, subst (asm) Gamma_fact) simp
qed (simp_all add: Gamma_eq_zero_iff nonpos_Ints_of_int)
lemma rGamma_of_int: "rGamma (of_int n) = (if n > 0 then inverse (fact (nat (n - 1))) else 0)"
by (simp add: Gamma_of_int rGamma_inverse_Gamma)
lemma Gamma_seriesI:
assumes "(\<lambda>n. g n / Gamma_series z n) \<longlonglongrightarrow> 1"
shows "g \<longlonglongrightarrow> Gamma z"
proof (rule Lim_transform_eventually)
have "1/2 > (0::real)" by simp
from tendstoD[OF assms, OF this]
show "eventually (\<lambda>n. g n / Gamma_series z n * Gamma_series z n = g n) sequentially"
by (force elim!: eventually_mono simp: dist_real_def)
from assms have "(\<lambda>n. g n / Gamma_series z n * Gamma_series z n) \<longlonglongrightarrow> 1 * Gamma z"
by (intro tendsto_intros)
thus "(\<lambda>n. g n / Gamma_series z n * Gamma_series z n) \<longlonglongrightarrow> Gamma z" by simp
qed
lemma Gamma_seriesI':
assumes "f \<longlonglongrightarrow> rGamma z"
assumes "(\<lambda>n. g n * f n) \<longlonglongrightarrow> 1"
assumes "z \<notin> \<int>\<^sub>\<le>\<^sub>0"
shows "g \<longlonglongrightarrow> Gamma z"
proof (rule Lim_transform_eventually)
have "1/2 > (0::real)" by simp
from tendstoD[OF assms(2), OF this] show "eventually (\<lambda>n. g n * f n / f n = g n) sequentially"
by (force elim!: eventually_mono simp: dist_real_def)
from assms have "(\<lambda>n. g n * f n / f n) \<longlonglongrightarrow> 1 / rGamma z"
by (intro tendsto_divide assms) (simp_all add: rGamma_eq_zero_iff)
thus "(\<lambda>n. g n * f n / f n) \<longlonglongrightarrow> Gamma z" by (simp add: Gamma_def divide_inverse)
qed
lemma Gamma_series'_LIMSEQ: "Gamma_series' z \<longlonglongrightarrow> Gamma z"
by (cases "z \<in> \<int>\<^sub>\<le>\<^sub>0") (simp_all add: Gamma_nonpos_Int Gamma_seriesI[OF Gamma_series_Gamma_series']
Gamma_series'_nonpos_Ints_LIMSEQ[of z])
subsection \<open>Differentiability\<close>
lemma has_field_derivative_rGamma_no_nonpos_int:
assumes "z \<notin> \<int>\<^sub>\<le>\<^sub>0"
shows "(rGamma has_field_derivative -rGamma z * Digamma z) (at z within A)"
proof (rule has_field_derivative_at_within)
from assms have "z \<noteq> - of_nat n" for n by auto
from differentiable_rGamma_aux1[OF this]
show "(rGamma has_field_derivative -rGamma z * Digamma z) (at z)"
unfolding Digamma_def suminf_def sums_def[abs_def]
has_field_derivative_def has_derivative_def netlimit_at
by (simp add: Let_def bounded_linear_mult_right mult_ac of_real_def [symmetric])
qed
lemma has_field_derivative_rGamma_nonpos_int:
"(rGamma has_field_derivative (-1)^n * fact n) (at (- of_nat n) within A)"
apply (rule has_field_derivative_at_within)
using differentiable_rGamma_aux2[of n]
unfolding Let_def has_field_derivative_def has_derivative_def netlimit_at
by (simp only: bounded_linear_mult_right mult_ac of_real_def [symmetric] fact_prod) simp
lemma has_field_derivative_rGamma [derivative_intros]:
"(rGamma has_field_derivative (if z \<in> \<int>\<^sub>\<le>\<^sub>0 then (-1)^(nat \<lfloor>norm z\<rfloor>) * fact (nat \<lfloor>norm z\<rfloor>)
else -rGamma z * Digamma z)) (at z within A)"
using has_field_derivative_rGamma_no_nonpos_int[of z A]
has_field_derivative_rGamma_nonpos_int[of "nat \<lfloor>norm z\<rfloor>" A]
by (auto elim!: nonpos_Ints_cases')
declare has_field_derivative_rGamma_no_nonpos_int [THEN DERIV_chain2, derivative_intros]
declare has_field_derivative_rGamma [THEN DERIV_chain2, derivative_intros]
declare has_field_derivative_rGamma_nonpos_int [derivative_intros]
declare has_field_derivative_rGamma_no_nonpos_int [derivative_intros]
declare has_field_derivative_rGamma [derivative_intros]
theorem has_field_derivative_Gamma [derivative_intros]:
"z \<notin> \<int>\<^sub>\<le>\<^sub>0 \<Longrightarrow> (Gamma has_field_derivative Gamma z * Digamma z) (at z within A)"
unfolding Gamma_def [abs_def]
by (fastforce intro!: derivative_eq_intros simp: rGamma_eq_zero_iff)
declare has_field_derivative_Gamma[THEN DERIV_chain2, derivative_intros]
(* TODO: Hide ugly facts properly *)
hide_fact rGamma_eq_zero_iff_aux differentiable_rGamma_aux1 differentiable_rGamma_aux2
differentiable_rGamma_aux2 rGamma_series_aux Gamma_class.rGamma_eq_zero_iff_aux
lemma continuous_on_rGamma [continuous_intros]: "continuous_on A rGamma"
by (rule DERIV_continuous_on has_field_derivative_rGamma)+
lemma continuous_on_Gamma [continuous_intros]: "A \<inter> \<int>\<^sub>\<le>\<^sub>0 = {} \<Longrightarrow> continuous_on A Gamma"
by (rule DERIV_continuous_on has_field_derivative_Gamma)+ blast
lemma isCont_rGamma [continuous_intros]:
"isCont f z \<Longrightarrow> isCont (\<lambda>x. rGamma (f x)) z"
by (rule isCont_o2[OF _ DERIV_isCont[OF has_field_derivative_rGamma]])
lemma isCont_Gamma [continuous_intros]:
"isCont f z \<Longrightarrow> f z \<notin> \<int>\<^sub>\<le>\<^sub>0 \<Longrightarrow> isCont (\<lambda>x. Gamma (f x)) z"
by (rule isCont_o2[OF _ DERIV_isCont[OF has_field_derivative_Gamma]])
subsection\<^marker>\<open>tag unimportant\<close> \<open>The complex Gamma function\<close>
instantiation\<^marker>\<open>tag unimportant\<close> complex :: Gamma
begin
definition\<^marker>\<open>tag unimportant\<close> rGamma_complex :: "complex \<Rightarrow> complex" where
"rGamma_complex z = lim (rGamma_series z)"
lemma rGamma_series_complex_converges:
"convergent (rGamma_series (z :: complex))" (is "?thesis1")
and rGamma_complex_altdef:
"rGamma z = (if z \<in> \<int>\<^sub>\<le>\<^sub>0 then 0 else exp (-ln_Gamma z))" (is "?thesis2")
proof -
have "?thesis1 \<and> ?thesis2"
proof (cases "z \<in> \<int>\<^sub>\<le>\<^sub>0")
case False
have "rGamma_series z \<longlonglongrightarrow> exp (- ln_Gamma z)"
proof (rule Lim_transform_eventually)
from ln_Gamma_series_complex_converges'[OF False]
obtain d where "0 < d" "uniformly_convergent_on (ball z d) (\<lambda>n z. ln_Gamma_series z n)"
by auto
from this(1) uniformly_convergent_imp_convergent[OF this(2), of z]
have "ln_Gamma_series z \<longlonglongrightarrow> lim (ln_Gamma_series z)" by (simp add: convergent_LIMSEQ_iff)
thus "(\<lambda>n. exp (-ln_Gamma_series z n)) \<longlonglongrightarrow> exp (- ln_Gamma z)"
unfolding convergent_def ln_Gamma_def by (intro tendsto_exp tendsto_minus)
from eventually_gt_at_top[of "0::nat"] exp_ln_Gamma_series_complex False
show "eventually (\<lambda>n. exp (-ln_Gamma_series z n) = rGamma_series z n) sequentially"
by (force elim!: eventually_mono simp: exp_minus Gamma_series_def rGamma_series_def)
qed
with False show ?thesis
by (auto simp: convergent_def rGamma_complex_def intro!: limI)
next
case True
then obtain k where "z = - of_nat k" by (erule nonpos_Ints_cases')
also have "rGamma_series \<dots> \<longlonglongrightarrow> 0"
by (subst tendsto_cong[OF rGamma_series_minus_of_nat]) (simp_all add: convergent_const)
finally show ?thesis using True
by (auto simp: rGamma_complex_def convergent_def intro!: limI)
qed
thus "?thesis1" "?thesis2" by blast+
qed
context\<^marker>\<open>tag unimportant\<close>
begin
(* TODO: duplication *)
private lemma rGamma_complex_plus1: "z * rGamma (z + 1) = rGamma (z :: complex)"
proof -
let ?f = "\<lambda>n. (z + 1) * inverse (of_nat n) + 1"
have "eventually (\<lambda>n. ?f n * rGamma_series z n = z * rGamma_series (z + 1) n) sequentially"
using eventually_gt_at_top[of "0::nat"]
proof eventually_elim
fix n :: nat assume n: "n > 0"
hence "z * rGamma_series (z + 1) n = inverse (of_nat n) *
pochhammer z (Suc (Suc n)) / (fact n * exp (z * of_real (ln (of_nat n))))"
by (subst pochhammer_rec) (simp add: rGamma_series_def field_simps exp_add exp_of_real)
also from n have "\<dots> = ?f n * rGamma_series z n"
by (subst pochhammer_rec') (simp_all add: field_split_simps rGamma_series_def add_ac)
finally show "?f n * rGamma_series z n = z * rGamma_series (z + 1) n" ..
qed
moreover have "(\<lambda>n. ?f n * rGamma_series z n) \<longlonglongrightarrow> ((z+1) * 0 + 1) * rGamma z"
using rGamma_series_complex_converges
by (intro tendsto_intros lim_inverse_n)
(simp_all add: convergent_LIMSEQ_iff rGamma_complex_def)
hence "(\<lambda>n. ?f n * rGamma_series z n) \<longlonglongrightarrow> rGamma z" by simp
ultimately have "(\<lambda>n. z * rGamma_series (z + 1) n) \<longlonglongrightarrow> rGamma z"
by (blast intro: Lim_transform_eventually)
moreover have "(\<lambda>n. z * rGamma_series (z + 1) n) \<longlonglongrightarrow> z * rGamma (z + 1)"
using rGamma_series_complex_converges
by (auto intro!: tendsto_mult simp: rGamma_complex_def convergent_LIMSEQ_iff)
ultimately show "z * rGamma (z + 1) = rGamma z" using LIMSEQ_unique by blast
qed
private lemma has_field_derivative_rGamma_complex_no_nonpos_Int:
assumes "(z :: complex) \<notin> \<int>\<^sub>\<le>\<^sub>0"
shows "(rGamma has_field_derivative - rGamma z * Digamma z) (at z)"
proof -
have diff: "(rGamma has_field_derivative - rGamma z * Digamma z) (at z)" if "Re z > 0" for z
proof (subst DERIV_cong_ev[OF refl _ refl])
from that have "eventually (\<lambda>t. t \<in> ball z (Re z/2)) (nhds z)"
by (intro eventually_nhds_in_nhd) simp_all
thus "eventually (\<lambda>t. rGamma t = exp (- ln_Gamma t)) (nhds z)"
using no_nonpos_Int_in_ball_complex[OF that]
by (auto elim!: eventually_mono simp: rGamma_complex_altdef)
next
have "z \<notin> \<real>\<^sub>\<le>\<^sub>0" using that by (simp add: complex_nonpos_Reals_iff)
with that show "((\<lambda>t. exp (- ln_Gamma t)) has_field_derivative (-rGamma z * Digamma z)) (at z)"
by (force elim!: nonpos_Ints_cases intro!: derivative_eq_intros simp: rGamma_complex_altdef)
qed
from assms show "(rGamma has_field_derivative - rGamma z * Digamma z) (at z)"
proof (induction "nat \<lfloor>1 - Re z\<rfloor>" arbitrary: z)
case (Suc n z)
from Suc.prems have z: "z \<noteq> 0" by auto
from Suc.hyps have "n = nat \<lfloor>- Re z\<rfloor>" by linarith
hence A: "n = nat \<lfloor>1 - Re (z + 1)\<rfloor>" by simp
from Suc.prems have B: "z + 1 \<notin> \<int>\<^sub>\<le>\<^sub>0" by (force dest: plus_one_in_nonpos_Ints_imp)
have "((\<lambda>z. z * (rGamma \<circ> (\<lambda>z. z + 1)) z) has_field_derivative
-rGamma (z + 1) * (Digamma (z + 1) * z - 1)) (at z)"
by (rule derivative_eq_intros DERIV_chain Suc refl A B)+ (simp add: algebra_simps)
also have "(\<lambda>z. z * (rGamma \<circ> (\<lambda>z. z + 1 :: complex)) z) = rGamma"
by (simp add: rGamma_complex_plus1)
also from z have "Digamma (z + 1) * z - 1 = z * Digamma z"
by (subst Digamma_plus1) (simp_all add: field_simps)
also have "-rGamma (z + 1) * (z * Digamma z) = -rGamma z * Digamma z"
by (simp add: rGamma_complex_plus1[of z, symmetric])
finally show ?case .
qed (intro diff, simp)
qed
private lemma rGamma_complex_1: "rGamma (1 :: complex) = 1"
proof -
have A: "eventually (\<lambda>n. rGamma_series 1 n = of_nat (Suc n) / of_nat n) sequentially"
using eventually_gt_at_top[of "0::nat"]
by (force elim!: eventually_mono simp: rGamma_series_def exp_of_real pochhammer_fact
field_split_simps pochhammer_rec' dest!: pochhammer_eq_0_imp_nonpos_Int)
have "rGamma_series 1 \<longlonglongrightarrow> 1" by (subst tendsto_cong[OF A]) (rule LIMSEQ_Suc_n_over_n)
thus "rGamma 1 = (1 :: complex)" unfolding rGamma_complex_def by (rule limI)
qed
private lemma has_field_derivative_rGamma_complex_nonpos_Int:
"(rGamma has_field_derivative (-1)^n * fact n) (at (- of_nat n :: complex))"
proof (induction n)
case 0
have A: "(0::complex) + 1 \<notin> \<int>\<^sub>\<le>\<^sub>0" by simp
have "((\<lambda>z. z * (rGamma \<circ> (\<lambda>z. z + 1 :: complex)) z) has_field_derivative 1) (at 0)"
by (rule derivative_eq_intros DERIV_chain refl
has_field_derivative_rGamma_complex_no_nonpos_Int A)+ (simp add: rGamma_complex_1)
thus ?case by (simp add: rGamma_complex_plus1)
next
case (Suc n)
hence A: "(rGamma has_field_derivative (-1)^n * fact n)
(at (- of_nat (Suc n) + 1 :: complex))" by simp
have "((\<lambda>z. z * (rGamma \<circ> (\<lambda>z. z + 1 :: complex)) z) has_field_derivative
(- 1) ^ Suc n * fact (Suc n)) (at (- of_nat (Suc n)))"
by (rule derivative_eq_intros refl A DERIV_chain)+
(simp add: algebra_simps rGamma_complex_altdef)
thus ?case by (simp add: rGamma_complex_plus1)
qed
instance proof
fix z :: complex show "(rGamma z = 0) \<longleftrightarrow> (\<exists>n. z = - of_nat n)"
by (auto simp: rGamma_complex_altdef elim!: nonpos_Ints_cases')
next
fix z :: complex assume "\<And>n. z \<noteq> - of_nat n"
hence "z \<notin> \<int>\<^sub>\<le>\<^sub>0" by (auto elim!: nonpos_Ints_cases')
from has_field_derivative_rGamma_complex_no_nonpos_Int[OF this]
show "let d = (THE d. (\<lambda>n. \<Sum>k<n. inverse (of_nat (Suc k)) - inverse (z + of_nat k))
\<longlonglongrightarrow> d) - euler_mascheroni *\<^sub>R 1 in (\<lambda>y. (rGamma y - rGamma z +
rGamma z * d * (y - z)) /\<^sub>R cmod (y - z)) \<midarrow>z\<rightarrow> 0"
by (simp add: has_field_derivative_def has_derivative_def Digamma_def sums_def [abs_def]
of_real_def[symmetric] suminf_def)
next
fix n :: nat
from has_field_derivative_rGamma_complex_nonpos_Int[of n]
show "let z = - of_nat n in (\<lambda>y. (rGamma y - rGamma z - (- 1) ^ n * prod of_nat {1..n} *
(y - z)) /\<^sub>R cmod (y - z)) \<midarrow>z\<rightarrow> 0"
by (simp add: has_field_derivative_def has_derivative_def fact_prod Let_def)
next
fix z :: complex
from rGamma_series_complex_converges[of z] have "rGamma_series z \<longlonglongrightarrow> rGamma z"
by (simp add: convergent_LIMSEQ_iff rGamma_complex_def)
thus "let fact' = \<lambda>n. prod of_nat {1..n};
exp = \<lambda>x. THE e. (\<lambda>n. \<Sum>k<n. x ^ k /\<^sub>R fact k) \<longlonglongrightarrow> e;
pochhammer' = \<lambda>a n. \<Prod>n = 0..n. a + of_nat n
in (\<lambda>n. pochhammer' z n / (fact' n * exp (z * ln (real_of_nat n) *\<^sub>R 1))) \<longlonglongrightarrow> rGamma z"
by (simp add: fact_prod pochhammer_Suc_prod rGamma_series_def [abs_def] exp_def
of_real_def [symmetric] suminf_def sums_def [abs_def] atLeast0AtMost)
qed
end
end
lemma Gamma_complex_altdef:
"Gamma z = (if z \<in> \<int>\<^sub>\<le>\<^sub>0 then 0 else exp (ln_Gamma (z :: complex)))"
unfolding Gamma_def rGamma_complex_altdef by (simp add: exp_minus)
lemma cnj_rGamma: "cnj (rGamma z) = rGamma (cnj z)"
proof -
have "rGamma_series (cnj z) = (\<lambda>n. cnj (rGamma_series z n))"
by (intro ext) (simp_all add: rGamma_series_def exp_cnj)
also have "... \<longlonglongrightarrow> cnj (rGamma z)" by (intro tendsto_cnj tendsto_intros)
finally show ?thesis unfolding rGamma_complex_def by (intro sym[OF limI])
qed
lemma cnj_Gamma: "cnj (Gamma z) = Gamma (cnj z)"
unfolding Gamma_def by (simp add: cnj_rGamma)
lemma Gamma_complex_real:
"z \<in> \<real> \<Longrightarrow> Gamma z \<in> (\<real> :: complex set)" and rGamma_complex_real: "z \<in> \<real> \<Longrightarrow> rGamma z \<in> \<real>"
by (simp_all add: Reals_cnj_iff cnj_Gamma cnj_rGamma)
lemma field_differentiable_rGamma: "rGamma field_differentiable (at z within A)"
using has_field_derivative_rGamma[of z] unfolding field_differentiable_def by blast
lemma holomorphic_rGamma [holomorphic_intros]: "rGamma holomorphic_on A"
unfolding holomorphic_on_def by (auto intro!: field_differentiable_rGamma)
lemma holomorphic_rGamma' [holomorphic_intros]:
assumes "f holomorphic_on A"
shows "(\<lambda>x. rGamma (f x)) holomorphic_on A"
proof -
have "rGamma \<circ> f holomorphic_on A" using assms
by (intro holomorphic_on_compose assms holomorphic_rGamma)
thus ?thesis by (simp only: o_def)
qed
lemma analytic_rGamma: "rGamma analytic_on A"
unfolding analytic_on_def by (auto intro!: exI[of _ 1] holomorphic_rGamma)
lemma field_differentiable_Gamma: "z \<notin> \<int>\<^sub>\<le>\<^sub>0 \<Longrightarrow> Gamma field_differentiable (at z within A)"
using has_field_derivative_Gamma[of z] unfolding field_differentiable_def by auto
lemma holomorphic_Gamma [holomorphic_intros]: "A \<inter> \<int>\<^sub>\<le>\<^sub>0 = {} \<Longrightarrow> Gamma holomorphic_on A"
unfolding holomorphic_on_def by (auto intro!: field_differentiable_Gamma)
lemma holomorphic_Gamma' [holomorphic_intros]:
assumes "f holomorphic_on A" and "\<And>x. x \<in> A \<Longrightarrow> f x \<notin> \<int>\<^sub>\<le>\<^sub>0"
shows "(\<lambda>x. Gamma (f x)) holomorphic_on A"
proof -
have "Gamma \<circ> f holomorphic_on A" using assms
by (intro holomorphic_on_compose assms holomorphic_Gamma) auto
thus ?thesis by (simp only: o_def)
qed
lemma analytic_Gamma: "A \<inter> \<int>\<^sub>\<le>\<^sub>0 = {} \<Longrightarrow> Gamma analytic_on A"
by (rule analytic_on_subset[of _ "UNIV - \<int>\<^sub>\<le>\<^sub>0"], subst analytic_on_open)
(auto intro!: holomorphic_Gamma)
lemma field_differentiable_ln_Gamma_complex:
"z \<notin> \<real>\<^sub>\<le>\<^sub>0 \<Longrightarrow> ln_Gamma field_differentiable (at (z::complex) within A)"
by (rule field_differentiable_within_subset[of _ _ UNIV])
(force simp: field_differentiable_def intro!: derivative_intros)+
lemma holomorphic_ln_Gamma [holomorphic_intros]: "A \<inter> \<real>\<^sub>\<le>\<^sub>0 = {} \<Longrightarrow> ln_Gamma holomorphic_on A"
unfolding holomorphic_on_def by (auto intro!: field_differentiable_ln_Gamma_complex)
lemma analytic_ln_Gamma: "A \<inter> \<real>\<^sub>\<le>\<^sub>0 = {} \<Longrightarrow> ln_Gamma analytic_on A"
by (rule analytic_on_subset[of _ "UNIV - \<real>\<^sub>\<le>\<^sub>0"], subst analytic_on_open)
(auto intro!: holomorphic_ln_Gamma)
lemma has_field_derivative_rGamma_complex' [derivative_intros]:
"(rGamma has_field_derivative (if z \<in> \<int>\<^sub>\<le>\<^sub>0 then (-1)^(nat \<lfloor>-Re z\<rfloor>) * fact (nat \<lfloor>-Re z\<rfloor>) else
-rGamma z * Digamma z)) (at z within A)"
using has_field_derivative_rGamma[of z] by (auto elim!: nonpos_Ints_cases')
declare has_field_derivative_rGamma_complex'[THEN DERIV_chain2, derivative_intros]
lemma field_differentiable_Polygamma:
fixes z :: complex
shows
"z \<notin> \<int>\<^sub>\<le>\<^sub>0 \<Longrightarrow> Polygamma n field_differentiable (at z within A)"
using has_field_derivative_Polygamma[of z n] unfolding field_differentiable_def by auto
lemma holomorphic_on_Polygamma [holomorphic_intros]: "A \<inter> \<int>\<^sub>\<le>\<^sub>0 = {} \<Longrightarrow> Polygamma n holomorphic_on A"
unfolding holomorphic_on_def by (auto intro!: field_differentiable_Polygamma)
lemma analytic_on_Polygamma: "A \<inter> \<int>\<^sub>\<le>\<^sub>0 = {} \<Longrightarrow> Polygamma n analytic_on A"
by (rule analytic_on_subset[of _ "UNIV - \<int>\<^sub>\<le>\<^sub>0"], subst analytic_on_open)
(auto intro!: holomorphic_on_Polygamma)
subsection\<^marker>\<open>tag unimportant\<close> \<open>The real Gamma function\<close>
lemma rGamma_series_real:
"eventually (\<lambda>n. rGamma_series x n = Re (rGamma_series (of_real x) n)) sequentially"
using eventually_gt_at_top[of "0 :: nat"]
proof eventually_elim
fix n :: nat assume n: "n > 0"
have "Re (rGamma_series (of_real x) n) =
Re (of_real (pochhammer x (Suc n)) / (fact n * exp (of_real (x * ln (real_of_nat n)))))"
using n by (simp add: rGamma_series_def powr_def pochhammer_of_real)
also from n have "\<dots> = Re (of_real ((pochhammer x (Suc n)) /
(fact n * (exp (x * ln (real_of_nat n))))))"
by (subst exp_of_real) simp
also from n have "\<dots> = rGamma_series x n"
by (subst Re_complex_of_real) (simp add: rGamma_series_def powr_def)
finally show "rGamma_series x n = Re (rGamma_series (of_real x) n)" ..
qed
instantiation\<^marker>\<open>tag unimportant\<close> real :: Gamma
begin
definition "rGamma_real x = Re (rGamma (of_real x :: complex))"
instance proof
fix x :: real
have "rGamma x = Re (rGamma (of_real x))" by (simp add: rGamma_real_def)
also have "of_real \<dots> = rGamma (of_real x :: complex)"
by (intro of_real_Re rGamma_complex_real) simp_all
also have "\<dots> = 0 \<longleftrightarrow> x \<in> \<int>\<^sub>\<le>\<^sub>0" by (simp add: rGamma_eq_zero_iff of_real_in_nonpos_Ints_iff)
also have "\<dots> \<longleftrightarrow> (\<exists>n. x = - of_nat n)" by (auto elim!: nonpos_Ints_cases')
finally show "(rGamma x) = 0 \<longleftrightarrow> (\<exists>n. x = - real_of_nat n)" by simp
next
fix x :: real assume "\<And>n. x \<noteq> - of_nat n"
hence x: "complex_of_real x \<notin> \<int>\<^sub>\<le>\<^sub>0"
by (subst of_real_in_nonpos_Ints_iff) (auto elim!: nonpos_Ints_cases')
then have "x \<noteq> 0" by auto
with x have "(rGamma has_field_derivative - rGamma x * Digamma x) (at x)"
by (fastforce intro!: derivative_eq_intros has_vector_derivative_real_field
simp: Polygamma_of_real rGamma_real_def [abs_def])
thus "let d = (THE d. (\<lambda>n. \<Sum>k<n. inverse (of_nat (Suc k)) - inverse (x + of_nat k))
\<longlonglongrightarrow> d) - euler_mascheroni *\<^sub>R 1 in (\<lambda>y. (rGamma y - rGamma x +
rGamma x * d * (y - x)) /\<^sub>R norm (y - x)) \<midarrow>x\<rightarrow> 0"
by (simp add: has_field_derivative_def has_derivative_def Digamma_def sums_def [abs_def]
of_real_def[symmetric] suminf_def)
next
fix n :: nat
have "(rGamma has_field_derivative (-1)^n * fact n) (at (- of_nat n :: real))"
by (fastforce intro!: derivative_eq_intros has_vector_derivative_real_field
simp: Polygamma_of_real rGamma_real_def [abs_def])
thus "let x = - of_nat n in (\<lambda>y. (rGamma y - rGamma x - (- 1) ^ n * prod of_nat {1..n} *
(y - x)) /\<^sub>R norm (y - x)) \<midarrow>x::real\<rightarrow> 0"
by (simp add: has_field_derivative_def has_derivative_def fact_prod Let_def)
next
fix x :: real
have "rGamma_series x \<longlonglongrightarrow> rGamma x"
proof (rule Lim_transform_eventually)
show "(\<lambda>n. Re (rGamma_series (of_real x) n)) \<longlonglongrightarrow> rGamma x" unfolding rGamma_real_def
by (intro tendsto_intros)
qed (insert rGamma_series_real, simp add: eq_commute)
thus "let fact' = \<lambda>n. prod of_nat {1..n};
exp = \<lambda>x. THE e. (\<lambda>n. \<Sum>k<n. x ^ k /\<^sub>R fact k) \<longlonglongrightarrow> e;
pochhammer' = \<lambda>a n. \<Prod>n = 0..n. a + of_nat n
in (\<lambda>n. pochhammer' x n / (fact' n * exp (x * ln (real_of_nat n) *\<^sub>R 1))) \<longlonglongrightarrow> rGamma x"
by (simp add: fact_prod pochhammer_Suc_prod rGamma_series_def [abs_def] exp_def
of_real_def [symmetric] suminf_def sums_def [abs_def] atLeast0AtMost)
qed
end
lemma rGamma_complex_of_real: "rGamma (complex_of_real x) = complex_of_real (rGamma x)"
unfolding rGamma_real_def using rGamma_complex_real by simp
lemma Gamma_complex_of_real: "Gamma (complex_of_real x) = complex_of_real (Gamma x)"
unfolding Gamma_def by (simp add: rGamma_complex_of_real)
lemma rGamma_real_altdef: "rGamma x = lim (rGamma_series (x :: real))"
by (rule sym, rule limI, rule tendsto_intros)
lemma Gamma_real_altdef1: "Gamma x = lim (Gamma_series (x :: real))"
by (rule sym, rule limI, rule tendsto_intros)
lemma Gamma_real_altdef2: "Gamma x = Re (Gamma (of_real x))"
using rGamma_complex_real[OF Reals_of_real[of x]]
by (elim Reals_cases)
(simp only: Gamma_def rGamma_real_def of_real_inverse[symmetric] Re_complex_of_real)
lemma ln_Gamma_series_complex_of_real:
"x > 0 \<Longrightarrow> n > 0 \<Longrightarrow> ln_Gamma_series (complex_of_real x) n = of_real (ln_Gamma_series x n)"
proof -
assume xn: "x > 0" "n > 0"
have "Ln (complex_of_real x / of_nat k + 1) = of_real (ln (x / of_nat k + 1))" if "k \<ge> 1" for k
using that xn by (subst Ln_of_real [symmetric]) (auto intro!: add_nonneg_pos simp: field_simps)
with xn show ?thesis by (simp add: ln_Gamma_series_def Ln_of_real)
qed
lemma ln_Gamma_real_converges:
assumes "(x::real) > 0"
shows "convergent (ln_Gamma_series x)"
proof -
have "(\<lambda>n. ln_Gamma_series (complex_of_real x) n) \<longlonglongrightarrow> ln_Gamma (of_real x)" using assms
by (intro ln_Gamma_complex_LIMSEQ) (auto simp: of_real_in_nonpos_Ints_iff)
moreover from eventually_gt_at_top[of "0::nat"]
have "eventually (\<lambda>n. complex_of_real (ln_Gamma_series x n) =
ln_Gamma_series (complex_of_real x) n) sequentially"
by eventually_elim (simp add: ln_Gamma_series_complex_of_real assms)
ultimately have "(\<lambda>n. complex_of_real (ln_Gamma_series x n)) \<longlonglongrightarrow> ln_Gamma (of_real x)"
by (subst tendsto_cong) assumption+
from tendsto_Re[OF this] show ?thesis by (auto simp: convergent_def)
qed
lemma ln_Gamma_real_LIMSEQ: "(x::real) > 0 \<Longrightarrow> ln_Gamma_series x \<longlonglongrightarrow> ln_Gamma x"
using ln_Gamma_real_converges[of x] unfolding ln_Gamma_def by (simp add: convergent_LIMSEQ_iff)
lemma ln_Gamma_complex_of_real: "x > 0 \<Longrightarrow> ln_Gamma (complex_of_real x) = of_real (ln_Gamma x)"
proof (unfold ln_Gamma_def, rule limI, rule Lim_transform_eventually)
assume x: "x > 0"
show "eventually (\<lambda>n. of_real (ln_Gamma_series x n) =
ln_Gamma_series (complex_of_real x) n) sequentially"
using eventually_gt_at_top[of "0::nat"]
by eventually_elim (simp add: ln_Gamma_series_complex_of_real x)
qed (intro tendsto_of_real, insert ln_Gamma_real_LIMSEQ[of x], simp add: ln_Gamma_def)
lemma Gamma_real_pos_exp: "x > (0 :: real) \<Longrightarrow> Gamma x = exp (ln_Gamma x)"
by (auto simp: Gamma_real_altdef2 Gamma_complex_altdef of_real_in_nonpos_Ints_iff
ln_Gamma_complex_of_real exp_of_real)
lemma ln_Gamma_real_pos: "x > 0 \<Longrightarrow> ln_Gamma x = ln (Gamma x :: real)"
unfolding Gamma_real_pos_exp by simp
lemma ln_Gamma_complex_conv_fact: "n > 0 \<Longrightarrow> ln_Gamma (of_nat n :: complex) = ln (fact (n - 1))"
using ln_Gamma_complex_of_real[of "real n"] Gamma_fact[of "n - 1", where 'a = real]
by (simp add: ln_Gamma_real_pos of_nat_diff Ln_of_real [symmetric])
lemma ln_Gamma_real_conv_fact: "n > 0 \<Longrightarrow> ln_Gamma (real n) = ln (fact (n - 1))"
using Gamma_fact[of "n - 1", where 'a = real]
by (simp add: ln_Gamma_real_pos of_nat_diff Ln_of_real [symmetric])
lemma Gamma_real_pos [simp, intro]: "x > (0::real) \<Longrightarrow> Gamma x > 0"
by (simp add: Gamma_real_pos_exp)
lemma Gamma_real_nonneg [simp, intro]: "x > (0::real) \<Longrightarrow> Gamma x \<ge> 0"
by (simp add: Gamma_real_pos_exp)
lemma has_field_derivative_ln_Gamma_real [derivative_intros]:
assumes x: "x > (0::real)"
shows "(ln_Gamma has_field_derivative Digamma x) (at x)"
proof (subst DERIV_cong_ev[OF refl _ refl])
from assms show "((Re \<circ> ln_Gamma \<circ> complex_of_real) has_field_derivative Digamma x) (at x)"
by (auto intro!: derivative_eq_intros has_vector_derivative_real_field
simp: Polygamma_of_real o_def)
from eventually_nhds_in_nhd[of x "{0<..}"] assms
show "eventually (\<lambda>y. ln_Gamma y = (Re \<circ> ln_Gamma \<circ> of_real) y) (nhds x)"
by (auto elim!: eventually_mono simp: ln_Gamma_complex_of_real interior_open)
qed
lemma field_differentiable_ln_Gamma_real:
"x > 0 \<Longrightarrow> ln_Gamma field_differentiable (at (x::real) within A)"
by (rule field_differentiable_within_subset[of _ _ UNIV])
(auto simp: field_differentiable_def intro!: derivative_intros)+
declare has_field_derivative_ln_Gamma_real[THEN DERIV_chain2, derivative_intros]
lemma deriv_ln_Gamma_real:
assumes "z > 0"
shows "deriv ln_Gamma z = Digamma (z :: real)"
by (intro DERIV_imp_deriv has_field_derivative_ln_Gamma_real assms)
lemma higher_deriv_ln_Gamma_real:
assumes "(x::real) > 0"
shows "(deriv ^^ j) ln_Gamma x = (if j = 0 then ln_Gamma x else Polygamma (j - 1) x)"
proof (cases j)
case (Suc j')
have "(deriv ^^ j') (deriv ln_Gamma) x = (deriv ^^ j') Digamma x"
using eventually_nhds_in_open[of "{0<..}" x] assms
by (intro higher_deriv_cong_ev refl)
(auto elim!: eventually_mono simp: open_Diff deriv_ln_Gamma_real)
also have "\<dots> = Polygamma j' x" using assms
by (subst higher_deriv_Polygamma)
(auto elim!: nonpos_Ints_cases simp: complex_nonpos_Reals_iff)
finally show ?thesis using Suc by (simp del: funpow.simps add: funpow_Suc_right)
qed simp_all
lemma higher_deriv_ln_Gamma_complex_of_real:
assumes "(x :: real) > 0"
shows "(deriv ^^ j) ln_Gamma (complex_of_real x) = of_real ((deriv ^^ j) ln_Gamma x)"
using assms
by (auto simp: higher_deriv_ln_Gamma_real higher_deriv_ln_Gamma_complex
ln_Gamma_complex_of_real Polygamma_of_real)
lemma has_field_derivative_rGamma_real' [derivative_intros]:
"(rGamma has_field_derivative (if x \<in> \<int>\<^sub>\<le>\<^sub>0 then (-1)^(nat \<lfloor>-x\<rfloor>) * fact (nat \<lfloor>-x\<rfloor>) else
-rGamma x * Digamma x)) (at x within A)"
using has_field_derivative_rGamma[of x] by (force elim!: nonpos_Ints_cases')
declare has_field_derivative_rGamma_real'[THEN DERIV_chain2, derivative_intros]
lemma Polygamma_real_odd_pos:
assumes "(x::real) \<notin> \<int>\<^sub>\<le>\<^sub>0" "odd n"
shows "Polygamma n x > 0"
proof -
from assms have "x \<noteq> 0" by auto
with assms show ?thesis
unfolding Polygamma_def using Polygamma_converges'[of x "Suc n"]
by (auto simp: zero_less_power_eq simp del: power_Suc
dest: plus_of_nat_eq_0_imp intro!: mult_pos_pos suminf_pos)
qed
lemma Polygamma_real_even_neg:
assumes "(x::real) > 0" "n > 0" "even n"
shows "Polygamma n x < 0"
using assms unfolding Polygamma_def using Polygamma_converges'[of x "Suc n"]
by (auto intro!: mult_pos_pos suminf_pos)
lemma Polygamma_real_strict_mono:
assumes "x > 0" "x < (y::real)" "even n"
shows "Polygamma n x < Polygamma n y"
proof -
have "\<exists>\<xi>. x < \<xi> \<and> \<xi> < y \<and> Polygamma n y - Polygamma n x = (y - x) * Polygamma (Suc n) \<xi>"
using assms by (intro MVT2 derivative_intros impI allI) (auto elim!: nonpos_Ints_cases)
then obtain \<xi>
where \<xi>: "x < \<xi>" "\<xi> < y"
and Polygamma: "Polygamma n y - Polygamma n x = (y - x) * Polygamma (Suc n) \<xi>"
by auto
note Polygamma
also from \<xi> assms have "(y - x) * Polygamma (Suc n) \<xi> > 0"
by (intro mult_pos_pos Polygamma_real_odd_pos) (auto elim!: nonpos_Ints_cases)
finally show ?thesis by simp
qed
lemma Polygamma_real_strict_antimono:
assumes "x > 0" "x < (y::real)" "odd n"
shows "Polygamma n x > Polygamma n y"
proof -
have "\<exists>\<xi>. x < \<xi> \<and> \<xi> < y \<and> Polygamma n y - Polygamma n x = (y - x) * Polygamma (Suc n) \<xi>"
using assms by (intro MVT2 derivative_intros impI allI) (auto elim!: nonpos_Ints_cases)
then obtain \<xi>
where \<xi>: "x < \<xi>" "\<xi> < y"
and Polygamma: "Polygamma n y - Polygamma n x = (y - x) * Polygamma (Suc n) \<xi>"
by auto
note Polygamma
also from \<xi> assms have "(y - x) * Polygamma (Suc n) \<xi> < 0"
by (intro mult_pos_neg Polygamma_real_even_neg) simp_all
finally show ?thesis by simp
qed
lemma Polygamma_real_mono:
assumes "x > 0" "x \<le> (y::real)" "even n"
shows "Polygamma n x \<le> Polygamma n y"
using Polygamma_real_strict_mono[OF assms(1) _ assms(3), of y] assms(2)
by (cases "x = y") simp_all
lemma Digamma_real_strict_mono: "(0::real) < x \<Longrightarrow> x < y \<Longrightarrow> Digamma x < Digamma y"
by (rule Polygamma_real_strict_mono) simp_all
lemma Digamma_real_mono: "(0::real) < x \<Longrightarrow> x \<le> y \<Longrightarrow> Digamma x \<le> Digamma y"
by (rule Polygamma_real_mono) simp_all
lemma Digamma_real_ge_three_halves_pos:
assumes "x \<ge> 3/2"
shows "Digamma (x :: real) > 0"
proof -
have "0 < Digamma (3/2 :: real)" by (fact Digamma_real_three_halves_pos)
also from assms have "\<dots> \<le> Digamma x" by (intro Polygamma_real_mono) simp_all
finally show ?thesis .
qed
lemma ln_Gamma_real_strict_mono:
assumes "x \<ge> 3/2" "x < y"
shows "ln_Gamma (x :: real) < ln_Gamma y"
proof -
have "\<exists>\<xi>. x < \<xi> \<and> \<xi> < y \<and> ln_Gamma y - ln_Gamma x = (y - x) * Digamma \<xi>"
using assms by (intro MVT2 derivative_intros impI allI) (auto elim!: nonpos_Ints_cases)
then obtain \<xi> where \<xi>: "x < \<xi>" "\<xi> < y"
and ln_Gamma: "ln_Gamma y - ln_Gamma x = (y - x) * Digamma \<xi>"
by auto
note ln_Gamma
also from \<xi> assms have "(y - x) * Digamma \<xi> > 0"
by (intro mult_pos_pos Digamma_real_ge_three_halves_pos) simp_all
finally show ?thesis by simp
qed
lemma Gamma_real_strict_mono:
assumes "x \<ge> 3/2" "x < y"
shows "Gamma (x :: real) < Gamma y"
proof -
from Gamma_real_pos_exp[of x] assms have "Gamma x = exp (ln_Gamma x)" by simp
also have "\<dots> < exp (ln_Gamma y)" by (intro exp_less_mono ln_Gamma_real_strict_mono assms)
also from Gamma_real_pos_exp[of y] assms have "\<dots> = Gamma y" by simp
finally show ?thesis .
qed
theorem log_convex_Gamma_real: "convex_on {0<..} (ln \<circ> Gamma :: real \<Rightarrow> real)"
by (rule convex_on_realI[of _ _ Digamma])
(auto intro!: derivative_eq_intros Polygamma_real_mono Gamma_real_pos
simp: o_def Gamma_eq_zero_iff elim!: nonpos_Ints_cases')
subsection \<open>The uniqueness of the real Gamma function\<close>
text \<open>
The following is a proof of the Bohr--Mollerup theorem, which states that
any log-convex function \<open>G\<close> on the positive reals that fulfils \<open>G(1) = 1\<close> and
satisfies the functional equation \<open>G(x + 1) = x G(x)\<close> must be equal to the
Gamma function.
In principle, if \<open>G\<close> is a holomorphic complex function, one could then extend
this from the positive reals to the entire complex plane (minus the non-positive
integers, where the Gamma function is not defined).
\<close>
context\<^marker>\<open>tag unimportant\<close>
fixes G :: "real \<Rightarrow> real"
assumes G_1: "G 1 = 1"
assumes G_plus1: "x > 0 \<Longrightarrow> G (x + 1) = x * G x"
assumes G_pos: "x > 0 \<Longrightarrow> G x > 0"
assumes log_convex_G: "convex_on {0<..} (ln \<circ> G)"
begin
private lemma G_fact: "G (of_nat n + 1) = fact n"
using G_plus1[of "real n + 1" for n]
by (induction n) (simp_all add: G_1 G_plus1)
private definition S :: "real \<Rightarrow> real \<Rightarrow> real" where
"S x y = (ln (G y) - ln (G x)) / (y - x)"
private lemma S_eq:
"n \<ge> 2 \<Longrightarrow> S (of_nat n) (of_nat n + x) = (ln (G (real n + x)) - ln (fact (n - 1))) / x"
by (subst G_fact [symmetric]) (simp add: S_def add_ac of_nat_diff)
private lemma G_lower:
assumes x: "x > 0" and n: "n \<ge> 1"
shows "Gamma_series x n \<le> G x"
proof -
have "(ln \<circ> G) (real (Suc n)) \<le> ((ln \<circ> G) (real (Suc n) + x) -
(ln \<circ> G) (real (Suc n) - 1)) / (real (Suc n) + x - (real (Suc n) - 1)) *
(real (Suc n) - (real (Suc n) - 1)) + (ln \<circ> G) (real (Suc n) - 1)"
using x n by (intro convex_onD_Icc' convex_on_subset[OF log_convex_G]) auto
hence "S (of_nat n) (of_nat (Suc n)) \<le> S (of_nat (Suc n)) (of_nat (Suc n) + x)"
unfolding S_def using x by (simp add: field_simps)
also have "S (of_nat n) (of_nat (Suc n)) = ln (fact n) - ln (fact (n-1))"
unfolding S_def using n
by (subst (1 2) G_fact [symmetric]) (simp_all add: add_ac of_nat_diff)
also have "\<dots> = ln (fact n / fact (n-1))" by (subst ln_div) simp_all
also from n have "fact n / fact (n - 1) = n" by (cases n) simp_all
finally have "x * ln (real n) + ln (fact n) \<le> ln (G (real (Suc n) + x))"
using x n by (subst (asm) S_eq) (simp_all add: field_simps)
also have "x * ln (real n) + ln (fact n) = ln (exp (x * ln (real n)) * fact n)"
using x by (simp add: ln_mult)
finally have "exp (x * ln (real n)) * fact n \<le> G (real (Suc n) + x)" using x
by (subst (asm) ln_le_cancel_iff) (simp_all add: G_pos)
also have "G (real (Suc n) + x) = pochhammer x (Suc n) * G x"
using G_plus1[of "real (Suc n) + x" for n] G_plus1[of x] x
by (induction n) (simp_all add: pochhammer_Suc add_ac)
finally show "Gamma_series x n \<le> G x"
using x by (simp add: field_simps pochhammer_pos Gamma_series_def)
qed
private lemma G_upper:
assumes x: "x > 0" "x \<le> 1" and n: "n \<ge> 2"
shows "G x \<le> Gamma_series x n * (1 + x / real n)"
proof -
have "(ln \<circ> G) (real n + x) \<le> ((ln \<circ> G) (real n + 1) -
(ln \<circ> G) (real n)) / (real n + 1 - (real n)) *
((real n + x) - real n) + (ln \<circ> G) (real n)"
using x n by (intro convex_onD_Icc' convex_on_subset[OF log_convex_G]) auto
hence "S (of_nat n) (of_nat n + x) \<le> S (of_nat n) (of_nat n + 1)"
unfolding S_def using x by (simp add: field_simps)
also from n have "S (of_nat n) (of_nat n + 1) = ln (fact n) - ln (fact (n-1))"
by (subst (1 2) G_fact [symmetric]) (simp add: S_def add_ac of_nat_diff)
also have "\<dots> = ln (fact n / (fact (n-1)))" using n by (subst ln_div) simp_all
also from n have "fact n / fact (n - 1) = n" by (cases n) simp_all
finally have "ln (G (real n + x)) \<le> x * ln (real n) + ln (fact (n - 1))"
using x n by (subst (asm) S_eq) (simp_all add: field_simps)
also have "\<dots> = ln (exp (x * ln (real n)) * fact (n - 1))" using x
by (simp add: ln_mult)
finally have "G (real n + x) \<le> exp (x * ln (real n)) * fact (n - 1)" using x
by (subst (asm) ln_le_cancel_iff) (simp_all add: G_pos)
also have "G (real n + x) = pochhammer x n * G x"
using G_plus1[of "real n + x" for n] x
by (induction n) (simp_all add: pochhammer_Suc add_ac)
finally have "G x \<le> exp (x * ln (real n)) * fact (n- 1) / pochhammer x n"
using x by (simp add: field_simps pochhammer_pos)
also from n have "fact (n - 1) = fact n / n" by (cases n) simp_all
also have "exp (x * ln (real n)) * \<dots> / pochhammer x n =
Gamma_series x n * (1 + x / real n)" using n x
by (simp add: Gamma_series_def divide_simps pochhammer_Suc)
finally show ?thesis .
qed
private lemma G_eq_Gamma_aux:
assumes x: "x > 0" "x \<le> 1"
shows "G x = Gamma x"
proof (rule antisym)
show "G x \<ge> Gamma x"
proof (rule tendsto_upperbound)
from G_lower[of x] show "eventually (\<lambda>n. Gamma_series x n \<le> G x) sequentially"
using x by (auto intro: eventually_mono[OF eventually_ge_at_top[of "1::nat"]])
qed (simp_all add: Gamma_series_LIMSEQ)
next
show "G x \<le> Gamma x"
proof (rule tendsto_lowerbound)
have "(\<lambda>n. Gamma_series x n * (1 + x / real n)) \<longlonglongrightarrow> Gamma x * (1 + 0)"
by (rule tendsto_intros real_tendsto_divide_at_top
Gamma_series_LIMSEQ filterlim_real_sequentially)+
thus "(\<lambda>n. Gamma_series x n * (1 + x / real n)) \<longlonglongrightarrow> Gamma x" by simp
next
from G_upper[of x] show "eventually (\<lambda>n. Gamma_series x n * (1 + x / real n) \<ge> G x) sequentially"
using x by (auto intro: eventually_mono[OF eventually_ge_at_top[of "2::nat"]])
qed simp_all
qed
theorem Gamma_pos_real_unique:
assumes x: "x > 0"
shows "G x = Gamma x"
proof -
have G_eq: "G (real n + x) = Gamma (real n + x)" if "x \<in> {0<..1}" for n x using that
proof (induction n)
case (Suc n)
from Suc have "x + real n > 0" by simp
hence "x + real n \<notin> \<int>\<^sub>\<le>\<^sub>0" by auto
with Suc show ?case using G_plus1[of "real n + x"] Gamma_plus1[of "real n + x"]
by (auto simp: add_ac)
qed (simp_all add: G_eq_Gamma_aux)
show ?thesis
proof (cases "frac x = 0")
case True
hence "x = of_int (floor x)" by (simp add: frac_def)
with x have x_eq: "x = of_nat (nat (floor x) - 1) + 1" by simp
show ?thesis by (subst (1 2) x_eq, rule G_eq) simp_all
next
case False
from assms have x_eq: "x = of_nat (nat (floor x)) + frac x"
by (simp add: frac_def)
have frac_le_1: "frac x \<le> 1" unfolding frac_def by linarith
show ?thesis
by (subst (1 2) x_eq, rule G_eq, insert False frac_le_1) simp_all
qed
qed
end
subsection \<open>The Beta function\<close>
definition Beta where "Beta a b = Gamma a * Gamma b / Gamma (a + b)"
lemma Beta_altdef: "Beta a b = Gamma a * Gamma b * rGamma (a + b)"
by (simp add: inverse_eq_divide Beta_def Gamma_def)
lemma Beta_commute: "Beta a b = Beta b a"
unfolding Beta_def by (simp add: ac_simps)
lemma has_field_derivative_Beta1 [derivative_intros]:
assumes "x \<notin> \<int>\<^sub>\<le>\<^sub>0" "x + y \<notin> \<int>\<^sub>\<le>\<^sub>0"
shows "((\<lambda>x. Beta x y) has_field_derivative (Beta x y * (Digamma x - Digamma (x + y))))
(at x within A)" unfolding Beta_altdef
by (rule DERIV_cong, (rule derivative_intros assms)+) (simp add: algebra_simps)
lemma Beta_pole1: "x \<in> \<int>\<^sub>\<le>\<^sub>0 \<Longrightarrow> Beta x y = 0"
by (auto simp add: Beta_def elim!: nonpos_Ints_cases')
lemma Beta_pole2: "y \<in> \<int>\<^sub>\<le>\<^sub>0 \<Longrightarrow> Beta x y = 0"
by (auto simp add: Beta_def elim!: nonpos_Ints_cases')
lemma Beta_zero: "x + y \<in> \<int>\<^sub>\<le>\<^sub>0 \<Longrightarrow> Beta x y = 0"
by (auto simp add: Beta_def elim!: nonpos_Ints_cases')
lemma has_field_derivative_Beta2 [derivative_intros]:
assumes "y \<notin> \<int>\<^sub>\<le>\<^sub>0" "x + y \<notin> \<int>\<^sub>\<le>\<^sub>0"
shows "((\<lambda>y. Beta x y) has_field_derivative (Beta x y * (Digamma y - Digamma (x + y))))
(at y within A)"
using has_field_derivative_Beta1[of y x A] assms by (simp add: Beta_commute add_ac)
theorem Beta_plus1_plus1:
assumes "x \<notin> \<int>\<^sub>\<le>\<^sub>0" "y \<notin> \<int>\<^sub>\<le>\<^sub>0"
shows "Beta (x + 1) y + Beta x (y + 1) = Beta x y"
proof -
have "Beta (x + 1) y + Beta x (y + 1) =
(Gamma (x + 1) * Gamma y + Gamma x * Gamma (y + 1)) * rGamma ((x + y) + 1)"
by (simp add: Beta_altdef add_divide_distrib algebra_simps)
also have "\<dots> = (Gamma x * Gamma y) * ((x + y) * rGamma ((x + y) + 1))"
by (subst assms[THEN Gamma_plus1])+ (simp add: algebra_simps)
also from assms have "\<dots> = Beta x y" unfolding Beta_altdef by (subst rGamma_plus1) simp
finally show ?thesis .
qed
theorem Beta_plus1_left:
assumes "x \<notin> \<int>\<^sub>\<le>\<^sub>0"
shows "(x + y) * Beta (x + 1) y = x * Beta x y"
proof -
have "(x + y) * Beta (x + 1) y = Gamma (x + 1) * Gamma y * ((x + y) * rGamma ((x + y) + 1))"
unfolding Beta_altdef by (simp only: ac_simps)
also have "\<dots> = x * Beta x y" unfolding Beta_altdef
by (subst assms[THEN Gamma_plus1] rGamma_plus1)+ (simp only: ac_simps)
finally show ?thesis .
qed
theorem Beta_plus1_right:
assumes "y \<notin> \<int>\<^sub>\<le>\<^sub>0"
shows "(x + y) * Beta x (y + 1) = y * Beta x y"
using Beta_plus1_left[of y x] assms by (simp_all add: Beta_commute add.commute)
lemma Gamma_Gamma_Beta:
assumes "x + y \<notin> \<int>\<^sub>\<le>\<^sub>0"
shows "Gamma x * Gamma y = Beta x y * Gamma (x + y)"
unfolding Beta_altdef using assms Gamma_eq_zero_iff[of "x+y"]
by (simp add: rGamma_inverse_Gamma)
subsection \<open>Legendre duplication theorem\<close>
context
begin
private lemma Gamma_legendre_duplication_aux:
fixes z :: "'a :: Gamma"
assumes "z \<notin> \<int>\<^sub>\<le>\<^sub>0" "z + 1/2 \<notin> \<int>\<^sub>\<le>\<^sub>0"
shows "Gamma z * Gamma (z + 1/2) = exp ((1 - 2*z) * of_real (ln 2)) * Gamma (1/2) * Gamma (2*z)"
proof -
let ?powr = "\<lambda>b a. exp (a * of_real (ln (of_nat b)))"
let ?h = "\<lambda>n. (fact (n-1))\<^sup>2 / fact (2*n-1) * of_nat (2^(2*n)) *
exp (1/2 * of_real (ln (real_of_nat n)))"
{
fix z :: 'a assume z: "z \<notin> \<int>\<^sub>\<le>\<^sub>0" "z + 1/2 \<notin> \<int>\<^sub>\<le>\<^sub>0"
let ?g = "\<lambda>n. ?powr 2 (2*z) * Gamma_series' z n * Gamma_series' (z + 1/2) n /
Gamma_series' (2*z) (2*n)"
have "eventually (\<lambda>n. ?g n = ?h n) sequentially" using eventually_gt_at_top
proof eventually_elim
fix n :: nat assume n: "n > 0"
let ?f = "fact (n - 1) :: 'a" and ?f' = "fact (2*n - 1) :: 'a"
have A: "exp t * exp t = exp (2*t :: 'a)" for t by (subst exp_add [symmetric]) simp
have A: "Gamma_series' z n * Gamma_series' (z + 1/2) n = ?f^2 * ?powr n (2*z + 1/2) /
(pochhammer z n * pochhammer (z + 1/2) n)"
by (simp add: Gamma_series'_def exp_add ring_distribs power2_eq_square A mult_ac)
have B: "Gamma_series' (2*z) (2*n) =
?f' * ?powr 2 (2*z) * ?powr n (2*z) /
(of_nat (2^(2*n)) * pochhammer z n * pochhammer (z+1/2) n)" using n
by (simp add: Gamma_series'_def ln_mult exp_add ring_distribs pochhammer_double)
from z have "pochhammer z n \<noteq> 0" by (auto dest: pochhammer_eq_0_imp_nonpos_Int)
moreover from z have "pochhammer (z + 1/2) n \<noteq> 0" by (auto dest: pochhammer_eq_0_imp_nonpos_Int)
ultimately have "?powr 2 (2*z) * (Gamma_series' z n * Gamma_series' (z + 1/2) n) / Gamma_series' (2*z) (2*n) =
?f^2 / ?f' * of_nat (2^(2*n)) * (?powr n ((4*z + 1)/2) * ?powr n (-2*z))"
using n unfolding A B by (simp add: field_split_simps exp_minus)
also have "?powr n ((4*z + 1)/2) * ?powr n (-2*z) = ?powr n (1/2)"
by (simp add: algebra_simps exp_add[symmetric] add_divide_distrib)
finally show "?g n = ?h n" by (simp only: mult_ac)
qed
moreover from z double_in_nonpos_Ints_imp[of z] have "2 * z \<notin> \<int>\<^sub>\<le>\<^sub>0" by auto
hence "?g \<longlonglongrightarrow> ?powr 2 (2*z) * Gamma z * Gamma (z+1/2) / Gamma (2*z)"
using LIMSEQ_subseq_LIMSEQ[OF Gamma_series'_LIMSEQ, of "(*)2" "2*z"]
by (intro tendsto_intros Gamma_series'_LIMSEQ)
(simp_all add: o_def strict_mono_def Gamma_eq_zero_iff)
ultimately have "?h \<longlonglongrightarrow> ?powr 2 (2*z) * Gamma z * Gamma (z+1/2) / Gamma (2*z)"
by (blast intro: Lim_transform_eventually)
} note lim = this
from assms double_in_nonpos_Ints_imp[of z] have z': "2 * z \<notin> \<int>\<^sub>\<le>\<^sub>0" by auto
from fraction_not_in_ints[of 2 1] have "(1/2 :: 'a) \<notin> \<int>\<^sub>\<le>\<^sub>0"
by (intro not_in_Ints_imp_not_in_nonpos_Ints) simp_all
with lim[of "1/2 :: 'a"] have "?h \<longlonglongrightarrow> 2 * Gamma (1/2 :: 'a)" by (simp add: exp_of_real)
from LIMSEQ_unique[OF this lim[OF assms]] z' show ?thesis
by (simp add: field_split_simps Gamma_eq_zero_iff ring_distribs exp_diff exp_of_real)
qed
text \<open>
The following lemma is somewhat annoying. With a little bit of complex analysis
(Cauchy's integral theorem, to be exact), this would be completely trivial. However,
we want to avoid depending on the complex analysis session at this point, so we prove it
the hard way.
\<close>
private lemma Gamma_reflection_aux:
defines "h \<equiv> \<lambda>z::complex. if z \<in> \<int> then 0 else
(of_real pi * cot (of_real pi*z) + Digamma z - Digamma (1 - z))"
defines "a \<equiv> complex_of_real pi"
obtains h' where "continuous_on UNIV h'" "\<And>z. (h has_field_derivative (h' z)) (at z)"
proof -
define f where "f n = a * of_real (cos_coeff (n+1) - sin_coeff (n+2))" for n
define F where "F z = (if z = 0 then 0 else (cos (a*z) - sin (a*z)/(a*z)) / z)" for z
define g where "g n = complex_of_real (sin_coeff (n+1))" for n
define G where "G z = (if z = 0 then 1 else sin (a*z)/(a*z))" for z
have a_nz: "a \<noteq> 0" unfolding a_def by simp
have "(\<lambda>n. f n * (a*z)^n) sums (F z) \<and> (\<lambda>n. g n * (a*z)^n) sums (G z)"
if "abs (Re z) < 1" for z
proof (cases "z = 0"; rule conjI)
assume "z \<noteq> 0"
note z = this that
from z have sin_nz: "sin (a*z) \<noteq> 0" unfolding a_def by (auto simp: sin_eq_0)
have "(\<lambda>n. of_real (sin_coeff n) * (a*z)^n) sums (sin (a*z))" using sin_converges[of "a*z"]
by (simp add: scaleR_conv_of_real)
from sums_split_initial_segment[OF this, of 1]
have "(\<lambda>n. (a*z) * of_real (sin_coeff (n+1)) * (a*z)^n) sums (sin (a*z))" by (simp add: mult_ac)
from sums_mult[OF this, of "inverse (a*z)"] z a_nz
have A: "(\<lambda>n. g n * (a*z)^n) sums (sin (a*z)/(a*z))"
by (simp add: field_simps g_def)
with z show "(\<lambda>n. g n * (a*z)^n) sums (G z)" by (simp add: G_def)
from A z a_nz sin_nz have g_nz: "(\<Sum>n. g n * (a*z)^n) \<noteq> 0" by (simp add: sums_iff g_def)
have [simp]: "sin_coeff (Suc 0) = 1" by (simp add: sin_coeff_def)
from sums_split_initial_segment[OF sums_diff[OF cos_converges[of "a*z"] A], of 1]
have "(\<lambda>n. z * f n * (a*z)^n) sums (cos (a*z) - sin (a*z) / (a*z))"
by (simp add: mult_ac scaleR_conv_of_real ring_distribs f_def g_def)
from sums_mult[OF this, of "inverse z"] z assms
show "(\<lambda>n. f n * (a*z)^n) sums (F z)" by (simp add: divide_simps mult_ac f_def F_def)
next
assume z: "z = 0"
have "(\<lambda>n. f n * (a * z) ^ n) sums f 0" using powser_sums_zero[of f] z by simp
with z show "(\<lambda>n. f n * (a * z) ^ n) sums (F z)"
by (simp add: f_def F_def sin_coeff_def cos_coeff_def)
have "(\<lambda>n. g n * (a * z) ^ n) sums g 0" using powser_sums_zero[of g] z by simp
with z show "(\<lambda>n. g n * (a * z) ^ n) sums (G z)"
by (simp add: g_def G_def sin_coeff_def cos_coeff_def)
qed
note sums = conjunct1[OF this] conjunct2[OF this]
define h2 where [abs_def]:
"h2 z = (\<Sum>n. f n * (a*z)^n) / (\<Sum>n. g n * (a*z)^n) + Digamma (1 + z) - Digamma (1 - z)" for z
define POWSER where [abs_def]: "POWSER f z = (\<Sum>n. f n * (z^n :: complex))" for f z
define POWSER' where [abs_def]: "POWSER' f z = (\<Sum>n. diffs f n * (z^n))" for f and z :: complex
define h2' where [abs_def]:
"h2' z = a * (POWSER g (a*z) * POWSER' f (a*z) - POWSER f (a*z) * POWSER' g (a*z)) /
(POWSER g (a*z))^2 + Polygamma 1 (1 + z) + Polygamma 1 (1 - z)" for z
have h_eq: "h t = h2 t" if "abs (Re t) < 1" for t
proof -
from that have t: "t \<in> \<int> \<longleftrightarrow> t = 0" by (auto elim!: Ints_cases)
hence "h t = a*cot (a*t) - 1/t + Digamma (1 + t) - Digamma (1 - t)"
unfolding h_def using Digamma_plus1[of t] by (force simp: field_simps a_def)
also have "a*cot (a*t) - 1/t = (F t) / (G t)"
using t by (auto simp add: divide_simps sin_eq_0 cot_def a_def F_def G_def)
also have "\<dots> = (\<Sum>n. f n * (a*t)^n) / (\<Sum>n. g n * (a*t)^n)"
using sums[of t] that by (simp add: sums_iff)
finally show "h t = h2 t" by (simp only: h2_def)
qed
let ?A = "{z. abs (Re z) < 1}"
have "open ({z. Re z < 1} \<inter> {z. Re z > -1})"
using open_halfspace_Re_gt open_halfspace_Re_lt by auto
also have "({z. Re z < 1} \<inter> {z. Re z > -1}) = {z. abs (Re z) < 1}" by auto
finally have open_A: "open ?A" .
hence [simp]: "interior ?A = ?A" by (simp add: interior_open)
have summable_f: "summable (\<lambda>n. f n * z^n)" for z
by (rule powser_inside, rule sums_summable, rule sums[of "\<i> * of_real (norm z + 1) / a"])
(simp_all add: norm_mult a_def del: of_real_add)
have summable_g: "summable (\<lambda>n. g n * z^n)" for z
by (rule powser_inside, rule sums_summable, rule sums[of "\<i> * of_real (norm z + 1) / a"])
(simp_all add: norm_mult a_def del: of_real_add)
have summable_fg': "summable (\<lambda>n. diffs f n * z^n)" "summable (\<lambda>n. diffs g n * z^n)" for z
by (intro termdiff_converges_all summable_f summable_g)+
have "(POWSER f has_field_derivative (POWSER' f z)) (at z)"
"(POWSER g has_field_derivative (POWSER' g z)) (at z)" for z
unfolding POWSER_def POWSER'_def
by (intro termdiffs_strong_converges_everywhere summable_f summable_g)+
note derivs = this[THEN DERIV_chain2[OF _ DERIV_cmult[OF DERIV_ident]], unfolded POWSER_def]
have "isCont (POWSER f) z" "isCont (POWSER g) z" "isCont (POWSER' f) z" "isCont (POWSER' g) z"
for z unfolding POWSER_def POWSER'_def
by (intro isCont_powser_converges_everywhere summable_f summable_g summable_fg')+
note cont = this[THEN isCont_o2[rotated], unfolded POWSER_def POWSER'_def]
{
fix z :: complex assume z: "abs (Re z) < 1"
define d where "d = \<i> * of_real (norm z + 1)"
have d: "abs (Re d) < 1" "norm z < norm d" by (simp_all add: d_def norm_mult del: of_real_add)
have "eventually (\<lambda>z. h z = h2 z) (nhds z)"
using eventually_nhds_in_nhd[of z ?A] using h_eq z
by (auto elim!: eventually_mono)
moreover from sums(2)[OF z] z have nz: "(\<Sum>n. g n * (a * z) ^ n) \<noteq> 0"
unfolding G_def by (auto simp: sums_iff sin_eq_0 a_def)
have A: "z \<in> \<int> \<longleftrightarrow> z = 0" using z by (auto elim!: Ints_cases)
have no_int: "1 + z \<in> \<int> \<longleftrightarrow> z = 0" using z Ints_diff[of "1+z" 1] A
by (auto elim!: nonpos_Ints_cases)
have no_int': "1 - z \<in> \<int> \<longleftrightarrow> z = 0" using z Ints_diff[of 1 "1-z"] A
by (auto elim!: nonpos_Ints_cases)
from no_int no_int' have no_int: "1 - z \<notin> \<int>\<^sub>\<le>\<^sub>0" "1 + z \<notin> \<int>\<^sub>\<le>\<^sub>0" by auto
have "(h2 has_field_derivative h2' z) (at z)" unfolding h2_def
by (rule DERIV_cong, (rule derivative_intros refl derivs[unfolded POWSER_def] nz no_int)+)
(auto simp: h2'_def POWSER_def field_simps power2_eq_square)
ultimately have deriv: "(h has_field_derivative h2' z) (at z)"
by (subst DERIV_cong_ev[OF refl _ refl])
from sums(2)[OF z] z have "(\<Sum>n. g n * (a * z) ^ n) \<noteq> 0"
unfolding G_def by (auto simp: sums_iff a_def sin_eq_0)
hence "isCont h2' z" using no_int unfolding h2'_def[abs_def] POWSER_def POWSER'_def
by (intro continuous_intros cont
continuous_on_compose2[OF _ continuous_on_Polygamma[of "{z. Re z > 0}"]]) auto
note deriv and this
} note A = this
interpret h: periodic_fun_simple' h
proof
fix z :: complex
show "h (z + 1) = h z"
proof (cases "z \<in> \<int>")
assume z: "z \<notin> \<int>"
hence A: "z + 1 \<notin> \<int>" "z \<noteq> 0" using Ints_diff[of "z+1" 1] by auto
hence "Digamma (z + 1) - Digamma (-z) = Digamma z - Digamma (-z + 1)"
by (subst (1 2) Digamma_plus1) simp_all
with A z show "h (z + 1) = h z"
by (simp add: h_def sin_plus_pi cos_plus_pi ring_distribs cot_def)
qed (simp add: h_def)
qed
have h2'_eq: "h2' (z - 1) = h2' z" if z: "Re z > 0" "Re z < 1" for z
proof -
have "((\<lambda>z. h (z - 1)) has_field_derivative h2' (z - 1)) (at z)"
by (rule DERIV_cong, rule DERIV_chain'[OF _ A(1)])
(insert z, auto intro!: derivative_eq_intros)
hence "(h has_field_derivative h2' (z - 1)) (at z)" by (subst (asm) h.minus_1)
moreover from z have "(h has_field_derivative h2' z) (at z)" by (intro A) simp_all
ultimately show "h2' (z - 1) = h2' z" by (rule DERIV_unique)
qed
define h2'' where "h2'' z = h2' (z - of_int \<lfloor>Re z\<rfloor>)" for z
have deriv: "(h has_field_derivative h2'' z) (at z)" for z
proof -
fix z :: complex
have B: "\<bar>Re z - real_of_int \<lfloor>Re z\<rfloor>\<bar> < 1" by linarith
have "((\<lambda>t. h (t - of_int \<lfloor>Re z\<rfloor>)) has_field_derivative h2'' z) (at z)"
unfolding h2''_def by (rule DERIV_cong, rule DERIV_chain'[OF _ A(1)])
(insert B, auto intro!: derivative_intros)
thus "(h has_field_derivative h2'' z) (at z)" by (simp add: h.minus_of_int)
qed
have cont: "continuous_on UNIV h2''"
proof (intro continuous_at_imp_continuous_on ballI)
fix z :: complex
define r where "r = \<lfloor>Re z\<rfloor>"
define A where "A = {t. of_int r - 1 < Re t \<and> Re t < of_int r + 1}"
have "continuous_on A (\<lambda>t. h2' (t - of_int r))" unfolding A_def
by (intro continuous_at_imp_continuous_on isCont_o2[OF _ A(2)] ballI continuous_intros)
(simp_all add: abs_real_def)
moreover have "h2'' t = h2' (t - of_int r)" if t: "t \<in> A" for t
proof (cases "Re t \<ge> of_int r")
case True
from t have "of_int r - 1 < Re t" "Re t < of_int r + 1" by (simp_all add: A_def)
with True have "\<lfloor>Re t\<rfloor> = \<lfloor>Re z\<rfloor>" unfolding r_def by linarith
thus ?thesis by (auto simp: r_def h2''_def)
next
case False
from t have t: "of_int r - 1 < Re t" "Re t < of_int r + 1" by (simp_all add: A_def)
with False have t': "\<lfloor>Re t\<rfloor> = \<lfloor>Re z\<rfloor> - 1" unfolding r_def by linarith
moreover from t False have "h2' (t - of_int r + 1 - 1) = h2' (t - of_int r + 1)"
by (intro h2'_eq) simp_all
ultimately show ?thesis by (auto simp: r_def h2''_def algebra_simps t')
qed
ultimately have "continuous_on A h2''" by (subst continuous_on_cong[OF refl])
moreover {
have "open ({t. of_int r - 1 < Re t} \<inter> {t. of_int r + 1 > Re t})"
by (intro open_Int open_halfspace_Re_gt open_halfspace_Re_lt)
also have "{t. of_int r - 1 < Re t} \<inter> {t. of_int r + 1 > Re t} = A"
unfolding A_def by blast
finally have "open A" .
}
ultimately have C: "isCont h2'' t" if "t \<in> A" for t using that
by (subst (asm) continuous_on_eq_continuous_at) auto
have "of_int r - 1 < Re z" "Re z < of_int r + 1" unfolding r_def by linarith+
thus "isCont h2'' z" by (intro C) (simp_all add: A_def)
qed
from that[OF cont deriv] show ?thesis .
qed
lemma Gamma_reflection_complex:
fixes z :: complex
shows "Gamma z * Gamma (1 - z) = of_real pi / sin (of_real pi * z)"
proof -
let ?g = "\<lambda>z::complex. Gamma z * Gamma (1 - z) * sin (of_real pi * z)"
define g where [abs_def]: "g z = (if z \<in> \<int> then of_real pi else ?g z)" for z :: complex
let ?h = "\<lambda>z::complex. (of_real pi * cot (of_real pi*z) + Digamma z - Digamma (1 - z))"
define h where [abs_def]: "h z = (if z \<in> \<int> then 0 else ?h z)" for z :: complex
\<comment> \<open>@{term g} is periodic with period 1.\<close>
interpret g: periodic_fun_simple' g
proof
fix z :: complex
show "g (z + 1) = g z"
proof (cases "z \<in> \<int>")
case False
hence "z * g z = z * Beta z (- z + 1) * sin (of_real pi * z)" by (simp add: g_def Beta_def)
also have "z * Beta z (- z + 1) = (z + 1 + -z) * Beta (z + 1) (- z + 1)"
using False Ints_diff[of 1 "1 - z"] nonpos_Ints_subset_Ints
by (subst Beta_plus1_left [symmetric]) auto
also have "\<dots> * sin (of_real pi * z) = z * (Beta (z + 1) (-z) * sin (of_real pi * (z + 1)))"
using False Ints_diff[of "z+1" 1] Ints_minus[of "-z"] nonpos_Ints_subset_Ints
by (subst Beta_plus1_right) (auto simp: ring_distribs sin_plus_pi)
also from False have "Beta (z + 1) (-z) * sin (of_real pi * (z + 1)) = g (z + 1)"
using Ints_diff[of "z+1" 1] by (auto simp: g_def Beta_def)
finally show "g (z + 1) = g z" using False by (subst (asm) mult_left_cancel) auto
qed (simp add: g_def)
qed
\<comment> \<open>@{term g} is entire.\<close>
have g_g': "(g has_field_derivative (h z * g z)) (at z)" for z :: complex
proof (cases "z \<in> \<int>")
let ?h' = "\<lambda>z. Beta z (1 - z) * ((Digamma z - Digamma (1 - z)) * sin (z * of_real pi) +
of_real pi * cos (z * of_real pi))"
case False
from False have "eventually (\<lambda>t. t \<in> UNIV - \<int>) (nhds z)"
by (intro eventually_nhds_in_open) (auto simp: open_Diff)
hence "eventually (\<lambda>t. g t = ?g t) (nhds z)" by eventually_elim (simp add: g_def)
moreover {
from False Ints_diff[of 1 "1-z"] have "1 - z \<notin> \<int>" by auto
hence "(?g has_field_derivative ?h' z) (at z)" using nonpos_Ints_subset_Ints
by (auto intro!: derivative_eq_intros simp: algebra_simps Beta_def)
also from False have "sin (of_real pi * z) \<noteq> 0" by (subst sin_eq_0) auto
hence "?h' z = h z * g z"
using False unfolding g_def h_def cot_def by (simp add: field_simps Beta_def)
finally have "(?g has_field_derivative (h z * g z)) (at z)" .
}
ultimately show ?thesis by (subst DERIV_cong_ev[OF refl _ refl])
next
case True
then obtain n where z: "z = of_int n" by (auto elim!: Ints_cases)
let ?t = "(\<lambda>z::complex. if z = 0 then 1 else sin z / z) \<circ> (\<lambda>z. of_real pi * z)"
have deriv_0: "(g has_field_derivative 0) (at 0)"
proof (subst DERIV_cong_ev[OF refl _ refl])
show "eventually (\<lambda>z. g z = of_real pi * Gamma (1 + z) * Gamma (1 - z) * ?t z) (nhds 0)"
using eventually_nhds_ball[OF zero_less_one, of "0::complex"]
proof eventually_elim
fix z :: complex assume z: "z \<in> ball 0 1"
show "g z = of_real pi * Gamma (1 + z) * Gamma (1 - z) * ?t z"
proof (cases "z = 0")
assume z': "z \<noteq> 0"
with z have z'': "z \<notin> \<int>\<^sub>\<le>\<^sub>0" "z \<notin> \<int>" by (auto elim!: Ints_cases)
from Gamma_plus1[OF this(1)] have "Gamma z = Gamma (z + 1) / z" by simp
with z'' z' show ?thesis by (simp add: g_def ac_simps)
qed (simp add: g_def)
qed
have "(?t has_field_derivative (0 * of_real pi)) (at 0)"
using has_field_derivative_sin_z_over_z[of "UNIV :: complex set"]
by (intro DERIV_chain) simp_all
thus "((\<lambda>z. of_real pi * Gamma (1 + z) * Gamma (1 - z) * ?t z) has_field_derivative 0) (at 0)"
by (auto intro!: derivative_eq_intros simp: o_def)
qed
have "((g \<circ> (\<lambda>x. x - of_int n)) has_field_derivative 0 * 1) (at (of_int n))"
using deriv_0 by (intro DERIV_chain) (auto intro!: derivative_eq_intros)
also have "g \<circ> (\<lambda>x. x - of_int n) = g" by (intro ext) (simp add: g.minus_of_int)
finally show "(g has_field_derivative (h z * g z)) (at z)" by (simp add: z h_def)
qed
have g_eq: "g (z/2) * g ((z+1)/2) = Gamma (1/2)^2 * g z" if "Re z > -1" "Re z < 2" for z
proof (cases "z \<in> \<int>")
case True
with that have "z = 0 \<or> z = 1" by (force elim!: Ints_cases)
moreover have "g 0 * g (1/2) = Gamma (1/2)^2 * g 0"
using fraction_not_in_ints[where 'a = complex, of 2 1] by (simp add: g_def power2_eq_square)
moreover have "g (1/2) * g 1 = Gamma (1/2)^2 * g 1"
using fraction_not_in_ints[where 'a = complex, of 2 1]
by (simp add: g_def power2_eq_square Beta_def algebra_simps)
ultimately show ?thesis by force
next
case False
hence z: "z/2 \<notin> \<int>" "(z+1)/2 \<notin> \<int>" using Ints_diff[of "z+1" 1] by (auto elim!: Ints_cases)
hence z': "z/2 \<notin> \<int>\<^sub>\<le>\<^sub>0" "(z+1)/2 \<notin> \<int>\<^sub>\<le>\<^sub>0" by (auto elim!: nonpos_Ints_cases)
from z have "1-z/2 \<notin> \<int>" "1-((z+1)/2) \<notin> \<int>"
using Ints_diff[of 1 "1-z/2"] Ints_diff[of 1 "1-((z+1)/2)"] by auto
hence z'': "1-z/2 \<notin> \<int>\<^sub>\<le>\<^sub>0" "1-((z+1)/2) \<notin> \<int>\<^sub>\<le>\<^sub>0" by (auto elim!: nonpos_Ints_cases)
from z have "g (z/2) * g ((z+1)/2) =
(Gamma (z/2) * Gamma ((z+1)/2)) * (Gamma (1-z/2) * Gamma (1-((z+1)/2))) *
(sin (of_real pi * z/2) * sin (of_real pi * (z+1)/2))"
by (simp add: g_def)
also from z' Gamma_legendre_duplication_aux[of "z/2"]
have "Gamma (z/2) * Gamma ((z+1)/2) = exp ((1-z) * of_real (ln 2)) * Gamma (1/2) * Gamma z"
by (simp add: add_divide_distrib)
also from z'' Gamma_legendre_duplication_aux[of "1-(z+1)/2"]
have "Gamma (1-z/2) * Gamma (1-(z+1)/2) =
Gamma (1-z) * Gamma (1/2) * exp (z * of_real (ln 2))"
by (simp add: add_divide_distrib ac_simps)
finally have "g (z/2) * g ((z+1)/2) = Gamma (1/2)^2 * (Gamma z * Gamma (1-z) *
(2 * (sin (of_real pi*z/2) * sin (of_real pi*(z+1)/2))))"
by (simp add: add_ac power2_eq_square exp_add ring_distribs exp_diff exp_of_real)
also have "sin (of_real pi*(z+1)/2) = cos (of_real pi*z/2)"
using cos_sin_eq[of "- of_real pi * z/2", symmetric]
by (simp add: ring_distribs add_divide_distrib ac_simps)
also have "2 * (sin (of_real pi*z/2) * cos (of_real pi*z/2)) = sin (of_real pi * z)"
by (subst sin_times_cos) (simp add: field_simps)
also have "Gamma z * Gamma (1 - z) * sin (complex_of_real pi * z) = g z"
using \<open>z \<notin> \<int>\<close> by (simp add: g_def)
finally show ?thesis .
qed
have g_eq: "g (z/2) * g ((z+1)/2) = Gamma (1/2)^2 * g z" for z
proof -
define r where "r = \<lfloor>Re z / 2\<rfloor>"
have "Gamma (1/2)^2 * g z = Gamma (1/2)^2 * g (z - of_int (2*r))" by (simp only: g.minus_of_int)
also have "of_int (2*r) = 2 * of_int r" by simp
also have "Re z - 2 * of_int r > -1" "Re z - 2 * of_int r < 2" unfolding r_def by linarith+
hence "Gamma (1/2)^2 * g (z - 2 * of_int r) =
g ((z - 2 * of_int r)/2) * g ((z - 2 * of_int r + 1)/2)"
unfolding r_def by (intro g_eq[symmetric]) simp_all
also have "(z - 2 * of_int r) / 2 = z/2 - of_int r" by simp
also have "g \<dots> = g (z/2)" by (rule g.minus_of_int)
also have "(z - 2 * of_int r + 1) / 2 = (z + 1)/2 - of_int r" by simp
also have "g \<dots> = g ((z+1)/2)" by (rule g.minus_of_int)
finally show ?thesis ..
qed
have g_nz [simp]: "g z \<noteq> 0" for z :: complex
unfolding g_def using Ints_diff[of 1 "1 - z"]
by (auto simp: Gamma_eq_zero_iff sin_eq_0 dest!: nonpos_Ints_Int)
have h_eq: "h z = (h (z/2) + h ((z+1)/2)) / 2" for z
proof -
have "((\<lambda>t. g (t/2) * g ((t+1)/2)) has_field_derivative
(g (z/2) * g ((z+1)/2)) * ((h (z/2) + h ((z+1)/2)) / 2)) (at z)"
by (auto intro!: derivative_eq_intros g_g'[THEN DERIV_chain2] simp: field_simps)
hence "((\<lambda>t. Gamma (1/2)^2 * g t) has_field_derivative
Gamma (1/2)^2 * g z * ((h (z/2) + h ((z+1)/2)) / 2)) (at z)"
by (subst (1 2) g_eq[symmetric]) simp
from DERIV_cmult[OF this, of "inverse ((Gamma (1/2))^2)"]
have "(g has_field_derivative (g z * ((h (z/2) + h ((z+1)/2))/2))) (at z)"
using fraction_not_in_ints[where 'a = complex, of 2 1]
by (simp add: divide_simps Gamma_eq_zero_iff not_in_Ints_imp_not_in_nonpos_Ints)
moreover have "(g has_field_derivative (g z * h z)) (at z)"
using g_g'[of z] by (simp add: ac_simps)
ultimately have "g z * h z = g z * ((h (z/2) + h ((z+1)/2))/2)"
by (intro DERIV_unique)
thus "h z = (h (z/2) + h ((z+1)/2)) / 2" by simp
qed
obtain h' where h'_cont: "continuous_on UNIV h'" and
h_h': "\<And>z. (h has_field_derivative h' z) (at z)"
unfolding h_def by (erule Gamma_reflection_aux)
have h'_eq: "h' z = (h' (z/2) + h' ((z+1)/2)) / 4" for z
proof -
have "((\<lambda>t. (h (t/2) + h ((t+1)/2)) / 2) has_field_derivative
((h' (z/2) + h' ((z+1)/2)) / 4)) (at z)"
by (fastforce intro!: derivative_eq_intros h_h'[THEN DERIV_chain2])
hence "(h has_field_derivative ((h' (z/2) + h' ((z+1)/2))/4)) (at z)"
by (subst (asm) h_eq[symmetric])
from h_h' and this show "h' z = (h' (z/2) + h' ((z+1)/2)) / 4" by (rule DERIV_unique)
qed
have h'_zero: "h' z = 0" for z
proof -
define m where "m = max 1 \<bar>Re z\<bar>"
define B where "B = {t. abs (Re t) \<le> m \<and> abs (Im t) \<le> abs (Im z)}"
have "closed ({t. Re t \<ge> -m} \<inter> {t. Re t \<le> m} \<inter>
{t. Im t \<ge> -\<bar>Im z\<bar>} \<inter> {t. Im t \<le> \<bar>Im z\<bar>})"
(is "closed ?B") by (intro closed_Int closed_halfspace_Re_ge closed_halfspace_Re_le
closed_halfspace_Im_ge closed_halfspace_Im_le)
also have "?B = B" unfolding B_def by fastforce
finally have "closed B" .
moreover have "bounded B" unfolding bounded_iff
proof (intro ballI exI)
fix t assume t: "t \<in> B"
have "norm t \<le> \<bar>Re t\<bar> + \<bar>Im t\<bar>" by (rule cmod_le)
also from t have "\<bar>Re t\<bar> \<le> m" unfolding B_def by blast
also from t have "\<bar>Im t\<bar> \<le> \<bar>Im z\<bar>" unfolding B_def by blast
finally show "norm t \<le> m + \<bar>Im z\<bar>" by - simp
qed
ultimately have compact: "compact B" by (subst compact_eq_bounded_closed) blast
define M where "M = (SUP z\<in>B. norm (h' z))"
have "compact (h' ` B)"
by (intro compact_continuous_image continuous_on_subset[OF h'_cont] compact) blast+
hence bdd: "bdd_above ((\<lambda>z. norm (h' z)) ` B)"
using bdd_above_norm[of "h' ` B"] by (simp add: image_comp o_def compact_imp_bounded)
have "norm (h' z) \<le> M" unfolding M_def by (intro cSUP_upper bdd) (simp_all add: B_def m_def)
also have "M \<le> M/2"
proof (subst M_def, subst cSUP_le_iff)
have "z \<in> B" unfolding B_def m_def by simp
thus "B \<noteq> {}" by auto
next
show "\<forall>z\<in>B. norm (h' z) \<le> M/2"
proof
fix t :: complex assume t: "t \<in> B"
from h'_eq[of t] t have "h' t = (h' (t/2) + h' ((t+1)/2)) / 4" by (simp)
also have "norm \<dots> = norm (h' (t/2) + h' ((t+1)/2)) / 4" by simp
also have "norm (h' (t/2) + h' ((t+1)/2)) \<le> norm (h' (t/2)) + norm (h' ((t+1)/2))"
by (rule norm_triangle_ineq)
also from t have "abs (Re ((t + 1)/2)) \<le> m" unfolding m_def B_def by auto
with t have "t/2 \<in> B" "(t+1)/2 \<in> B" unfolding B_def by auto
hence "norm (h' (t/2)) + norm (h' ((t+1)/2)) \<le> M + M" unfolding M_def
by (intro add_mono cSUP_upper bdd) (auto simp: B_def)
also have "(M + M) / 4 = M / 2" by simp
finally show "norm (h' t) \<le> M/2" by - simp_all
qed
qed (insert bdd, auto)
hence "M \<le> 0" by simp
finally show "h' z = 0" by simp
qed
have h_h'_2: "(h has_field_derivative 0) (at z)" for z
using h_h'[of z] h'_zero[of z] by simp
have g_real: "g z \<in> \<real>" if "z \<in> \<real>" for z
unfolding g_def using that by (auto intro!: Reals_mult Gamma_complex_real)
have h_real: "h z \<in> \<real>" if "z \<in> \<real>" for z
unfolding h_def using that by (auto intro!: Reals_mult Reals_add Reals_diff Polygamma_Real)
have g_nz: "g z \<noteq> 0" for z unfolding g_def using Ints_diff[of 1 "1-z"]
by (auto simp: Gamma_eq_zero_iff sin_eq_0)
from h'_zero h_h'_2 have "\<exists>c. \<forall>z\<in>UNIV. h z = c"
by (intro has_field_derivative_zero_constant) (simp_all add: dist_0_norm)
then obtain c where c: "\<And>z. h z = c" by auto
have "\<exists>u. u \<in> closed_segment 0 1 \<and> Re (g 1) - Re (g 0) = Re (h u * g u * (1 - 0))"
by (intro complex_mvt_line g_g')
then obtain u where u: "u \<in> closed_segment 0 1" "Re (g 1) - Re (g 0) = Re (h u * g u)"
by auto
from u(1) have u': "u \<in> \<real>" unfolding closed_segment_def
by (auto simp: scaleR_conv_of_real)
from u' g_real[of u] g_nz[of u] have "Re (g u) \<noteq> 0" by (auto elim!: Reals_cases)
with u(2) c[of u] g_real[of u] g_nz[of u] u'
have "Re c = 0" by (simp add: complex_is_Real_iff g.of_1)
with h_real[of 0] c[of 0] have "c = 0" by (auto elim!: Reals_cases)
with c have A: "h z * g z = 0" for z by simp
hence "(g has_field_derivative 0) (at z)" for z using g_g'[of z] by simp
hence "\<exists>c'. \<forall>z\<in>UNIV. g z = c'" by (intro has_field_derivative_zero_constant) simp_all
then obtain c' where c: "\<And>z. g z = c'" by (force)
from this[of 0] have "c' = pi" unfolding g_def by simp
with c have "g z = pi" by simp
show ?thesis
proof (cases "z \<in> \<int>")
case False
with \<open>g z = pi\<close> show ?thesis by (auto simp: g_def divide_simps)
next
case True
then obtain n where n: "z = of_int n" by (elim Ints_cases)
with sin_eq_0[of "of_real pi * z"] have "sin (of_real pi * z) = 0" by force
moreover have "of_int (1 - n) \<in> \<int>\<^sub>\<le>\<^sub>0" if "n > 0" using that by (intro nonpos_Ints_of_int) simp
ultimately show ?thesis using n
by (cases "n \<le> 0") (auto simp: Gamma_eq_zero_iff nonpos_Ints_of_int)
qed
qed
lemma rGamma_reflection_complex:
"rGamma z * rGamma (1 - z :: complex) = sin (of_real pi * z) / of_real pi"
using Gamma_reflection_complex[of z]
by (simp add: Gamma_def field_split_simps split: if_split_asm)
lemma rGamma_reflection_complex':
"rGamma z * rGamma (- z :: complex) = -z * sin (of_real pi * z) / of_real pi"
proof -
have "rGamma z * rGamma (-z) = -z * (rGamma z * rGamma (1 - z))"
using rGamma_plus1[of "-z", symmetric] by simp
also have "rGamma z * rGamma (1 - z) = sin (of_real pi * z) / of_real pi"
by (rule rGamma_reflection_complex)
finally show ?thesis by simp
qed
lemma Gamma_reflection_complex':
"Gamma z * Gamma (- z :: complex) = - of_real pi / (z * sin (of_real pi * z))"
using rGamma_reflection_complex'[of z] by (force simp add: Gamma_def field_split_simps)
lemma Gamma_one_half_real: "Gamma (1/2 :: real) = sqrt pi"
proof -
from Gamma_reflection_complex[of "1/2"] fraction_not_in_ints[where 'a = complex, of 2 1]
have "Gamma (1/2 :: complex)^2 = of_real pi" by (simp add: power2_eq_square)
hence "of_real pi = Gamma (complex_of_real (1/2))^2" by simp
also have "\<dots> = of_real ((Gamma (1/2))^2)" by (subst Gamma_complex_of_real) simp_all
finally have "Gamma (1/2)^2 = pi" by (subst (asm) of_real_eq_iff) simp_all
moreover have "Gamma (1/2 :: real) \<ge> 0" using Gamma_real_pos[of "1/2"] by simp
ultimately show ?thesis by (rule real_sqrt_unique [symmetric])
qed
lemma Gamma_one_half_complex: "Gamma (1/2 :: complex) = of_real (sqrt pi)"
proof -
have "Gamma (1/2 :: complex) = Gamma (of_real (1/2))" by simp
also have "\<dots> = of_real (sqrt pi)" by (simp only: Gamma_complex_of_real Gamma_one_half_real)
finally show ?thesis .
qed
theorem Gamma_legendre_duplication:
fixes z :: complex
assumes "z \<notin> \<int>\<^sub>\<le>\<^sub>0" "z + 1/2 \<notin> \<int>\<^sub>\<le>\<^sub>0"
shows "Gamma z * Gamma (z + 1/2) =
exp ((1 - 2*z) * of_real (ln 2)) * of_real (sqrt pi) * Gamma (2*z)"
using Gamma_legendre_duplication_aux[OF assms] by (simp add: Gamma_one_half_complex)
end
subsection\<^marker>\<open>tag unimportant\<close> \<open>Limits and residues\<close>
text \<open>
The inverse of the Gamma function has simple zeros:
\<close>
lemma rGamma_zeros:
"(\<lambda>z. rGamma z / (z + of_nat n)) \<midarrow> (- of_nat n) \<rightarrow> ((-1)^n * fact n :: 'a :: Gamma)"
proof (subst tendsto_cong)
let ?f = "\<lambda>z. pochhammer z n * rGamma (z + of_nat (Suc n)) :: 'a"
from eventually_at_ball'[OF zero_less_one, of "- of_nat n :: 'a" UNIV]
show "eventually (\<lambda>z. rGamma z / (z + of_nat n) = ?f z) (at (- of_nat n))"
by (subst pochhammer_rGamma[of _ "Suc n"])
(auto elim!: eventually_mono simp: field_split_simps pochhammer_rec' eq_neg_iff_add_eq_0)
have "isCont ?f (- of_nat n)" by (intro continuous_intros)
thus "?f \<midarrow> (- of_nat n) \<rightarrow> (- 1) ^ n * fact n" unfolding isCont_def
by (simp add: pochhammer_same)
qed
text \<open>
The simple zeros of the inverse of the Gamma function correspond to simple poles of the Gamma function,
and their residues can easily be computed from the limit we have just proven:
\<close>
lemma Gamma_poles: "filterlim Gamma at_infinity (at (- of_nat n :: 'a :: Gamma))"
proof -
from eventually_at_ball'[OF zero_less_one, of "- of_nat n :: 'a" UNIV]
have "eventually (\<lambda>z. rGamma z \<noteq> (0 :: 'a)) (at (- of_nat n))"
by (auto elim!: eventually_mono nonpos_Ints_cases'
simp: rGamma_eq_zero_iff dist_of_nat dist_minus)
with isCont_rGamma[of "- of_nat n :: 'a", OF continuous_ident]
have "filterlim (\<lambda>z. inverse (rGamma z) :: 'a) at_infinity (at (- of_nat n))"
unfolding isCont_def by (intro filterlim_compose[OF filterlim_inverse_at_infinity])
(simp_all add: filterlim_at)
moreover have "(\<lambda>z. inverse (rGamma z) :: 'a) = Gamma"
by (intro ext) (simp add: rGamma_inverse_Gamma)
ultimately show ?thesis by (simp only: )
qed
lemma Gamma_residues:
"(\<lambda>z. Gamma z * (z + of_nat n)) \<midarrow> (- of_nat n) \<rightarrow> ((-1)^n / fact n :: 'a :: Gamma)"
proof (subst tendsto_cong)
let ?c = "(- 1) ^ n / fact n :: 'a"
from eventually_at_ball'[OF zero_less_one, of "- of_nat n :: 'a" UNIV]
show "eventually (\<lambda>z. Gamma z * (z + of_nat n) = inverse (rGamma z / (z + of_nat n)))
(at (- of_nat n))"
by (auto elim!: eventually_mono simp: field_split_simps rGamma_inverse_Gamma)
have "(\<lambda>z. inverse (rGamma z / (z + of_nat n))) \<midarrow> (- of_nat n) \<rightarrow>
inverse ((- 1) ^ n * fact n :: 'a)"
by (intro tendsto_intros rGamma_zeros) simp_all
also have "inverse ((- 1) ^ n * fact n) = ?c"
by (simp_all add: field_simps flip: power_mult_distrib)
finally show "(\<lambda>z. inverse (rGamma z / (z + of_nat n))) \<midarrow> (- of_nat n) \<rightarrow> ?c" .
qed
subsection \<open>Alternative definitions\<close>
subsubsection \<open>Variant of the Euler form\<close>
definition Gamma_series_euler' where
"Gamma_series_euler' z n =
inverse z * (\<Prod>k=1..n. exp (z * of_real (ln (1 + inverse (of_nat k)))) / (1 + z / of_nat k))"
context
begin
private lemma Gamma_euler'_aux1:
fixes z :: "'a :: {real_normed_field,banach}"
assumes n: "n > 0"
shows "exp (z * of_real (ln (of_nat n + 1))) = (\<Prod>k=1..n. exp (z * of_real (ln (1 + 1 / of_nat k))))"
proof -
have "(\<Prod>k=1..n. exp (z * of_real (ln (1 + 1 / of_nat k)))) =
exp (z * of_real (\<Sum>k = 1..n. ln (1 + 1 / real_of_nat k)))"
by (subst exp_sum [symmetric]) (simp_all add: sum_distrib_left)
also have "(\<Sum>k=1..n. ln (1 + 1 / of_nat k) :: real) = ln (\<Prod>k=1..n. 1 + 1 / real_of_nat k)"
by (subst ln_prod [symmetric]) (auto intro!: add_pos_nonneg)
also have "(\<Prod>k=1..n. 1 + 1 / of_nat k :: real) = (\<Prod>k=1..n. (of_nat k + 1) / of_nat k)"
by (intro prod.cong) (simp_all add: field_split_simps)
also have "(\<Prod>k=1..n. (of_nat k + 1) / of_nat k :: real) = of_nat n + 1"
by (induction n) (simp_all add: prod.nat_ivl_Suc' field_split_simps)
finally show ?thesis ..
qed
theorem Gamma_series_euler':
assumes z: "(z :: 'a :: Gamma) \<notin> \<int>\<^sub>\<le>\<^sub>0"
shows "(\<lambda>n. Gamma_series_euler' z n) \<longlonglongrightarrow> Gamma z"
proof (rule Gamma_seriesI, rule Lim_transform_eventually)
let ?f = "\<lambda>n. fact n * exp (z * of_real (ln (of_nat n + 1))) / pochhammer z (n + 1)"
let ?r = "\<lambda>n. ?f n / Gamma_series z n"
let ?r' = "\<lambda>n. exp (z * of_real (ln (of_nat (Suc n) / of_nat n)))"
from z have z': "z \<noteq> 0" by auto
have "eventually (\<lambda>n. ?r' n = ?r n) sequentially"
using z by (auto simp: field_split_simps Gamma_series_def ring_distribs exp_diff ln_div
intro: eventually_mono eventually_gt_at_top[of "0::nat"] dest: pochhammer_eq_0_imp_nonpos_Int)
moreover have "?r' \<longlonglongrightarrow> exp (z * of_real (ln 1))"
by (intro tendsto_intros LIMSEQ_Suc_n_over_n) simp_all
ultimately show "?r \<longlonglongrightarrow> 1" by (force intro: Lim_transform_eventually)
from eventually_gt_at_top[of "0::nat"]
show "eventually (\<lambda>n. ?r n = Gamma_series_euler' z n / Gamma_series z n) sequentially"
proof eventually_elim
fix n :: nat assume n: "n > 0"
from n z' have "Gamma_series_euler' z n =
exp (z * of_real (ln (of_nat n + 1))) / (z * (\<Prod>k=1..n. (1 + z / of_nat k)))"
by (subst Gamma_euler'_aux1)
(simp_all add: Gamma_series_euler'_def prod.distrib
prod_inversef[symmetric] divide_inverse)
also have "(\<Prod>k=1..n. (1 + z / of_nat k)) = pochhammer (z + 1) n / fact n"
proof (cases n)
case (Suc n')
then show ?thesis
unfolding pochhammer_prod fact_prod
by (simp add: atLeastLessThanSuc_atLeastAtMost field_simps prod_dividef
prod.atLeast_Suc_atMost_Suc_shift del: prod.cl_ivl_Suc)
qed auto
also have "z * \<dots> = pochhammer z (Suc n) / fact n" by (simp add: pochhammer_rec)
finally show "?r n = Gamma_series_euler' z n / Gamma_series z n" by simp
qed
qed
end
subsubsection \<open>Weierstrass form\<close>
definition Gamma_series_Weierstrass :: "'a :: {banach,real_normed_field} \<Rightarrow> nat \<Rightarrow> 'a" where
"Gamma_series_Weierstrass z n =
exp (-euler_mascheroni * z) / z * (\<Prod>k=1..n. exp (z / of_nat k) / (1 + z / of_nat k))"
definition\<^marker>\<open>tag unimportant\<close>
rGamma_series_Weierstrass :: "'a :: {banach,real_normed_field} \<Rightarrow> nat \<Rightarrow> 'a" where
"rGamma_series_Weierstrass z n =
exp (euler_mascheroni * z) * z * (\<Prod>k=1..n. (1 + z / of_nat k) * exp (-z / of_nat k))"
lemma Gamma_series_Weierstrass_nonpos_Ints:
"eventually (\<lambda>k. Gamma_series_Weierstrass (- of_nat n) k = 0) sequentially"
using eventually_ge_at_top[of n] by eventually_elim (auto simp: Gamma_series_Weierstrass_def)
lemma rGamma_series_Weierstrass_nonpos_Ints:
"eventually (\<lambda>k. rGamma_series_Weierstrass (- of_nat n) k = 0) sequentially"
using eventually_ge_at_top[of n] by eventually_elim (auto simp: rGamma_series_Weierstrass_def)
theorem Gamma_Weierstrass_complex: "Gamma_series_Weierstrass z \<longlonglongrightarrow> Gamma (z :: complex)"
proof (cases "z \<in> \<int>\<^sub>\<le>\<^sub>0")
case True
then obtain n where "z = - of_nat n" by (elim nonpos_Ints_cases')
also from True have "Gamma_series_Weierstrass \<dots> \<longlonglongrightarrow> Gamma z"
by (simp add: tendsto_cong[OF Gamma_series_Weierstrass_nonpos_Ints] Gamma_nonpos_Int)
finally show ?thesis .
next
case False
hence z: "z \<noteq> 0" by auto
let ?f = "(\<lambda>x. \<Prod>x = Suc 0..x. exp (z / of_nat x) / (1 + z / of_nat x))"
have A: "exp (ln (1 + z / of_nat n)) = (1 + z / of_nat n)" if "n \<ge> 1" for n :: nat
using False that by (subst exp_Ln) (auto simp: field_simps dest!: plus_of_nat_eq_0_imp)
have "(\<lambda>n. \<Sum>k=1..n. z / of_nat k - ln (1 + z / of_nat k)) \<longlonglongrightarrow> ln_Gamma z + euler_mascheroni * z + ln z"
using ln_Gamma_series'_aux[OF False]
by (simp only: atLeastLessThanSuc_atLeastAtMost [symmetric] One_nat_def
sum.shift_bounds_Suc_ivl sums_def atLeast0LessThan)
from tendsto_exp[OF this] False z have "?f \<longlonglongrightarrow> z * exp (euler_mascheroni * z) * Gamma z"
by (simp add: exp_add exp_sum exp_diff mult_ac Gamma_complex_altdef A)
from tendsto_mult[OF tendsto_const[of "exp (-euler_mascheroni * z) / z"] this] z
show "Gamma_series_Weierstrass z \<longlonglongrightarrow> Gamma z"
by (simp add: exp_minus field_split_simps Gamma_series_Weierstrass_def [abs_def])
qed
lemma tendsto_complex_of_real_iff: "((\<lambda>x. complex_of_real (f x)) \<longlongrightarrow> of_real c) F = (f \<longlongrightarrow> c) F"
by (rule tendsto_of_real_iff)
lemma Gamma_Weierstrass_real: "Gamma_series_Weierstrass x \<longlonglongrightarrow> Gamma (x :: real)"
using Gamma_Weierstrass_complex[of "of_real x"] unfolding Gamma_series_Weierstrass_def[abs_def]
by (subst tendsto_complex_of_real_iff [symmetric])
(simp_all add: exp_of_real[symmetric] Gamma_complex_of_real)
lemma rGamma_Weierstrass_complex: "rGamma_series_Weierstrass z \<longlonglongrightarrow> rGamma (z :: complex)"
proof (cases "z \<in> \<int>\<^sub>\<le>\<^sub>0")
case True
then obtain n where "z = - of_nat n" by (elim nonpos_Ints_cases')
also from True have "rGamma_series_Weierstrass \<dots> \<longlonglongrightarrow> rGamma z"
by (simp add: tendsto_cong[OF rGamma_series_Weierstrass_nonpos_Ints] rGamma_nonpos_Int)
finally show ?thesis .
next
case False
have "rGamma_series_Weierstrass z = (\<lambda>n. inverse (Gamma_series_Weierstrass z n))"
by (simp add: rGamma_series_Weierstrass_def[abs_def] Gamma_series_Weierstrass_def
exp_minus divide_inverse prod_inversef[symmetric] mult_ac)
also from False have "\<dots> \<longlonglongrightarrow> inverse (Gamma z)"
by (intro tendsto_intros Gamma_Weierstrass_complex) (simp add: Gamma_eq_zero_iff)
finally show ?thesis by (simp add: Gamma_def)
qed
subsubsection \<open>Binomial coefficient form\<close>
lemma Gamma_gbinomial:
"(\<lambda>n. ((z + of_nat n) gchoose n) * exp (-z * of_real (ln (of_nat n)))) \<longlonglongrightarrow> rGamma (z+1)"
proof (cases "z = 0")
case False
show ?thesis
proof (rule Lim_transform_eventually)
let ?powr = "\<lambda>a b. exp (b * of_real (ln (of_nat a)))"
show "eventually (\<lambda>n. rGamma_series z n / z =
((z + of_nat n) gchoose n) * ?powr n (-z)) sequentially"
proof (intro always_eventually allI)
fix n :: nat
from False have "((z + of_nat n) gchoose n) = pochhammer z (Suc n) / z / fact n"
by (simp add: gbinomial_pochhammer' pochhammer_rec)
also have "pochhammer z (Suc n) / z / fact n * ?powr n (-z) = rGamma_series z n / z"
by (simp add: rGamma_series_def field_split_simps exp_minus)
finally show "rGamma_series z n / z = ((z + of_nat n) gchoose n) * ?powr n (-z)" ..
qed
from False have "(\<lambda>n. rGamma_series z n / z) \<longlonglongrightarrow> rGamma z / z" by (intro tendsto_intros)
also from False have "rGamma z / z = rGamma (z + 1)" using rGamma_plus1[of z]
by (simp add: field_simps)
finally show "(\<lambda>n. rGamma_series z n / z) \<longlonglongrightarrow> rGamma (z+1)" .
qed
qed (simp_all add: binomial_gbinomial [symmetric])
lemma gbinomial_minus': "(a + of_nat b) gchoose b = (- 1) ^ b * (- (a + 1) gchoose b)"
by (subst gbinomial_minus) (simp add: power_mult_distrib [symmetric])
lemma gbinomial_asymptotic:
fixes z :: "'a :: Gamma"
shows "(\<lambda>n. (z gchoose n) / ((-1)^n / exp ((z+1) * of_real (ln (real n))))) \<longlonglongrightarrow>
inverse (Gamma (- z))"
unfolding rGamma_inverse_Gamma [symmetric] using Gamma_gbinomial[of "-z-1"]
by (subst (asm) gbinomial_minus')
(simp add: add_ac mult_ac divide_inverse power_inverse [symmetric])
lemma fact_binomial_limit:
"(\<lambda>n. of_nat ((k + n) choose n) / of_nat (n ^ k) :: 'a :: Gamma) \<longlonglongrightarrow> 1 / fact k"
proof (rule Lim_transform_eventually)
have "(\<lambda>n. of_nat ((k + n) choose n) / of_real (exp (of_nat k * ln (real_of_nat n))))
\<longlonglongrightarrow> 1 / Gamma (of_nat (Suc k) :: 'a)" (is "?f \<longlonglongrightarrow> _")
using Gamma_gbinomial[of "of_nat k :: 'a"]
by (simp add: binomial_gbinomial Gamma_def field_split_simps exp_of_real [symmetric] exp_minus)
also have "Gamma (of_nat (Suc k)) = fact k" by (simp add: Gamma_fact)
finally show "?f \<longlonglongrightarrow> 1 / fact k" .
show "eventually (\<lambda>n. ?f n = of_nat ((k + n) choose n) / of_nat (n ^ k)) sequentially"
using eventually_gt_at_top[of "0::nat"]
proof eventually_elim
fix n :: nat assume n: "n > 0"
from n have "exp (real_of_nat k * ln (real_of_nat n)) = real_of_nat (n^k)"
by (simp add: exp_of_nat_mult)
thus "?f n = of_nat ((k + n) choose n) / of_nat (n ^ k)" by simp
qed
qed
lemma binomial_asymptotic':
"(\<lambda>n. of_nat ((k + n) choose n) / (of_nat (n ^ k) / fact k) :: 'a :: Gamma) \<longlonglongrightarrow> 1"
using tendsto_mult[OF fact_binomial_limit[of k] tendsto_const[of "fact k :: 'a"]] by simp
lemma gbinomial_Beta:
assumes "z + 1 \<notin> \<int>\<^sub>\<le>\<^sub>0"
shows "((z::'a::Gamma) gchoose n) = inverse ((z + 1) * Beta (z - of_nat n + 1) (of_nat n + 1))"
using assms
proof (induction n arbitrary: z)
case 0
hence "z + 2 \<notin> \<int>\<^sub>\<le>\<^sub>0"
using plus_one_in_nonpos_Ints_imp[of "z+1"] by (auto simp: add.commute)
with 0 show ?case
by (auto simp: Beta_def Gamma_eq_zero_iff Gamma_plus1 [symmetric] add.commute)
next
case (Suc n z)
show ?case
proof (cases "z \<in> \<int>\<^sub>\<le>\<^sub>0")
case True
with Suc.prems have "z = 0"
by (auto elim!: nonpos_Ints_cases simp: algebra_simps one_plus_of_int_in_nonpos_Ints_iff)
show ?thesis
proof (cases "n = 0")
case True
with \<open>z = 0\<close> show ?thesis
by (simp add: Beta_def Gamma_eq_zero_iff Gamma_plus1 [symmetric])
next
case False
with \<open>z = 0\<close> show ?thesis
by (simp_all add: Beta_pole1 one_minus_of_nat_in_nonpos_Ints_iff)
qed
next
case False
have "(z gchoose (Suc n)) = ((z - 1 + 1) gchoose (Suc n))" by simp
also have "\<dots> = (z - 1 gchoose n) * ((z - 1) + 1) / of_nat (Suc n)"
by (subst gbinomial_factors) (simp add: field_simps)
also from False have "\<dots> = inverse (of_nat (Suc n) * Beta (z - of_nat n) (of_nat (Suc n)))"
(is "_ = inverse ?x") by (subst Suc.IH) (simp_all add: field_simps Beta_pole1)
also have "of_nat (Suc n) \<notin> (\<int>\<^sub>\<le>\<^sub>0 :: 'a set)" by (subst of_nat_in_nonpos_Ints_iff) simp_all
hence "?x = (z + 1) * Beta (z - of_nat (Suc n) + 1) (of_nat (Suc n) + 1)"
by (subst Beta_plus1_right [symmetric]) simp_all
finally show ?thesis .
qed
qed
theorem gbinomial_Gamma:
assumes "z + 1 \<notin> \<int>\<^sub>\<le>\<^sub>0"
shows "(z gchoose n) = Gamma (z + 1) / (fact n * Gamma (z - of_nat n + 1))"
proof -
have "(z gchoose n) = Gamma (z + 2) / (z + 1) / (fact n * Gamma (z - of_nat n + 1))"
by (subst gbinomial_Beta[OF assms]) (simp_all add: Beta_def Gamma_fact [symmetric] add_ac)
also from assms have "Gamma (z + 2) / (z + 1) = Gamma (z + 1)"
using Gamma_plus1[of "z+1"] by (auto simp add: field_split_simps)
finally show ?thesis .
qed
subsubsection \<open>Integral form\<close>
lemma integrable_on_powr_from_0':
assumes a: "a > (-1::real)" and c: "c \<ge> 0"
shows "(\<lambda>x. x powr a) integrable_on {0<..c}"
proof -
from c have *: "{0<..c} - {0..c} = {}" "{0..c} - {0<..c} = {0}" by auto
show ?thesis
by (rule integrable_spike_set [OF integrable_on_powr_from_0[OF a c]]) (simp_all add: *)
qed
lemma absolutely_integrable_Gamma_integral:
assumes "Re z > 0" "a > 0"
shows "(\<lambda>t. complex_of_real t powr (z - 1) / of_real (exp (a * t)))
absolutely_integrable_on {0<..}" (is "?f absolutely_integrable_on _")
proof -
have "((\<lambda>x. (Re z - 1) * (ln x / x)) \<longlongrightarrow> (Re z - 1) * 0) at_top"
by (intro tendsto_intros ln_x_over_x_tendsto_0)
hence "((\<lambda>x. ((Re z - 1) * ln x) / x) \<longlongrightarrow> 0) at_top" by simp
from order_tendstoD(2)[OF this, of "a/2"] and \<open>a > 0\<close>
have "eventually (\<lambda>x. (Re z - 1) * ln x / x < a/2) at_top" by simp
from eventually_conj[OF this eventually_gt_at_top[of 0]]
obtain x0 where "\<forall>x\<ge>x0. (Re z - 1) * ln x / x < a/2 \<and> x > 0"
by (auto simp: eventually_at_top_linorder)
hence "x0 > 0" by simp
have "x powr (Re z - 1) / exp (a * x) < exp (-(a/2) * x)" if "x \<ge> x0" for x
proof -
from that and \<open>\<forall>x\<ge>x0. _\<close> have x: "(Re z - 1) * ln x / x < a / 2" "x > 0" by auto
have "x powr (Re z - 1) = exp ((Re z - 1) * ln x)"
using \<open>x > 0\<close> by (simp add: powr_def)
also from x have "(Re z - 1) * ln x < (a * x) / 2" by (simp add: field_simps)
finally show ?thesis by (simp add: field_simps exp_add [symmetric])
qed
note x0 = \<open>x0 > 0\<close> this
have "?f absolutely_integrable_on ({0<..x0} \<union> {x0..})"
proof (rule set_integrable_Un)
show "?f absolutely_integrable_on {0<..x0}"
unfolding set_integrable_def
proof (rule Bochner_Integration.integrable_bound [OF _ _ AE_I2])
show "integrable lebesgue (\<lambda>x. indicat_real {0<..x0} x *\<^sub>R x powr (Re z - 1))"
using x0(1) assms
by (intro nonnegative_absolutely_integrable_1 [unfolded set_integrable_def] integrable_on_powr_from_0') auto
show "(\<lambda>x. indicat_real {0<..x0} x *\<^sub>R (x powr (z - 1) / exp (a * x))) \<in> borel_measurable lebesgue"
by (intro measurable_completion)
(auto intro!: borel_measurable_continuous_on_indicator continuous_intros)
fix x :: real
have "x powr (Re z - 1) / exp (a * x) \<le> x powr (Re z - 1) / 1" if "x \<ge> 0"
using that assms by (intro divide_left_mono) auto
thus "norm (indicator {0<..x0} x *\<^sub>R ?f x) \<le>
norm (indicator {0<..x0} x *\<^sub>R x powr (Re z - 1))"
by (simp_all add: norm_divide norm_powr_real_powr indicator_def)
qed
next
show "?f absolutely_integrable_on {x0..}"
unfolding set_integrable_def
proof (rule Bochner_Integration.integrable_bound [OF _ _ AE_I2])
show "integrable lebesgue (\<lambda>x. indicat_real {x0..} x *\<^sub>R exp (- (a / 2) * x))" using assms
by (intro nonnegative_absolutely_integrable_1 [unfolded set_integrable_def] integrable_on_exp_minus_to_infinity) auto
show "(\<lambda>x. indicat_real {x0..} x *\<^sub>R (x powr (z - 1) / exp (a * x))) \<in> borel_measurable lebesgue" using x0(1)
by (intro measurable_completion)
(auto intro!: borel_measurable_continuous_on_indicator continuous_intros)
fix x :: real
show "norm (indicator {x0..} x *\<^sub>R ?f x) \<le>
norm (indicator {x0..} x *\<^sub>R exp (-(a/2) * x))" using x0
by (auto simp: norm_divide norm_powr_real_powr indicator_def less_imp_le)
qed
qed auto
also have "{0<..x0} \<union> {x0..} = {0<..}" using x0(1) by auto
finally show ?thesis .
qed
lemma integrable_Gamma_integral_bound:
fixes a c :: real
assumes a: "a > -1" and c: "c \<ge> 0"
defines "f \<equiv> \<lambda>x. if x \<in> {0..c} then x powr a else exp (-x/2)"
shows "f integrable_on {0..}"
proof -
have "f integrable_on {0..c}"
by (rule integrable_spike_finite[of "{}", OF _ _ integrable_on_powr_from_0[of a c]])
(insert a c, simp_all add: f_def)
moreover have A: "(\<lambda>x. exp (-x/2)) integrable_on {c..}"
using integrable_on_exp_minus_to_infinity[of "1/2"] by simp
have "f integrable_on {c..}"
by (rule integrable_spike_finite[of "{c}", OF _ _ A]) (simp_all add: f_def)
ultimately show "f integrable_on {0..}"
by (rule integrable_Un') (insert c, auto simp: max_def)
qed
theorem Gamma_integral_complex:
assumes z: "Re z > 0"
shows "((\<lambda>t. of_real t powr (z - 1) / of_real (exp t)) has_integral Gamma z) {0..}"
proof -
have A: "((\<lambda>t. (of_real t) powr (z - 1) * of_real ((1 - t) ^ n))
has_integral (fact n / pochhammer z (n+1))) {0..1}"
if "Re z > 0" for n z using that
proof (induction n arbitrary: z)
case 0
have "((\<lambda>t. complex_of_real t powr (z - 1)) has_integral
(of_real 1 powr z / z - of_real 0 powr z / z)) {0..1}" using 0
by (intro fundamental_theorem_of_calculus_interior)
(auto intro!: continuous_intros derivative_eq_intros has_vector_derivative_real_field)
thus ?case by simp
next
case (Suc n)
let ?f = "\<lambda>t. complex_of_real t powr z / z"
let ?f' = "\<lambda>t. complex_of_real t powr (z - 1)"
let ?g = "\<lambda>t. (1 - complex_of_real t) ^ Suc n"
let ?g' = "\<lambda>t. - ((1 - complex_of_real t) ^ n) * of_nat (Suc n)"
have "((\<lambda>t. ?f' t * ?g t) has_integral
(of_nat (Suc n)) * fact n / pochhammer z (n+2)) {0..1}"
(is "(_ has_integral ?I) _")
proof (rule integration_by_parts_interior[where f' = ?f' and g = ?g])
from Suc.prems show "continuous_on {0..1} ?f" "continuous_on {0..1} ?g"
by (auto intro!: continuous_intros)
next
fix t :: real assume t: "t \<in> {0<..<1}"
show "(?f has_vector_derivative ?f' t) (at t)" using t Suc.prems
by (auto intro!: derivative_eq_intros has_vector_derivative_real_field)
show "(?g has_vector_derivative ?g' t) (at t)"
by (rule has_vector_derivative_real_field derivative_eq_intros refl)+ simp_all
next
from Suc.prems have [simp]: "z \<noteq> 0" by auto
from Suc.prems have A: "Re (z + of_nat n) > 0" for n by simp
have [simp]: "z + of_nat n \<noteq> 0" "z + 1 + of_nat n \<noteq> 0" for n
using A[of n] A[of "Suc n"] by (auto simp add: add.assoc simp del: plus_complex.sel)
have "((\<lambda>x. of_real x powr z * of_real ((1 - x) ^ n) * (- of_nat (Suc n) / z)) has_integral
fact n / pochhammer (z+1) (n+1) * (- of_nat (Suc n) / z)) {0..1}"
(is "(?A has_integral ?B) _")
using Suc.IH[of "z+1"] Suc.prems by (intro has_integral_mult_left) (simp_all add: add_ac pochhammer_rec)
also have "?A = (\<lambda>t. ?f t * ?g' t)" by (intro ext) (simp_all add: field_simps)
also have "?B = - (of_nat (Suc n) * fact n / pochhammer z (n+2))"
by (simp add: field_split_simps pochhammer_rec
prod.shift_bounds_cl_Suc_ivl del: of_nat_Suc)
finally show "((\<lambda>t. ?f t * ?g' t) has_integral (?f 1 * ?g 1 - ?f 0 * ?g 0 - ?I)) {0..1}"
by simp
qed (simp_all add: bounded_bilinear_mult)
thus ?case by simp
qed
have B: "((\<lambda>t. if t \<in> {0..of_nat n} then
of_real t powr (z - 1) * (1 - of_real t / of_nat n) ^ n else 0)
has_integral (of_nat n powr z * fact n / pochhammer z (n+1))) {0..}" for n
proof (cases "n > 0")
case [simp]: True
hence [simp]: "n \<noteq> 0" by auto
with has_integral_affinity01[OF A[OF z, of n], of "inverse (of_nat n)" 0]
have "((\<lambda>x. (of_nat n - of_real x) ^ n * (of_real x / of_nat n) powr (z - 1) / of_nat n ^ n)
has_integral fact n * of_nat n / pochhammer z (n+1)) ((\<lambda>x. real n * x)`{0..1})"
(is "(?f has_integral ?I) ?ivl") by (simp add: field_simps scaleR_conv_of_real)
also from True have "((\<lambda>x. real n*x)`{0..1}) = {0..real n}"
by (subst image_mult_atLeastAtMost) simp_all
also have "?f = (\<lambda>x. (of_real x / of_nat n) powr (z - 1) * (1 - of_real x / of_nat n) ^ n)"
using True by (intro ext) (simp add: field_simps)
finally have "((\<lambda>x. (of_real x / of_nat n) powr (z - 1) * (1 - of_real x / of_nat n) ^ n)
has_integral ?I) {0..real n}" (is ?P) .
also have "?P \<longleftrightarrow> ((\<lambda>x. exp ((z - 1) * of_real (ln (x / of_nat n))) * (1 - of_real x / of_nat n) ^ n)
has_integral ?I) {0..real n}"
by (intro has_integral_spike_finite_eq[of "{0}"]) (auto simp: powr_def Ln_of_real [symmetric])
also have "\<dots> \<longleftrightarrow> ((\<lambda>x. exp ((z - 1) * of_real (ln x - ln (of_nat n))) * (1 - of_real x / of_nat n) ^ n)
has_integral ?I) {0..real n}"
by (intro has_integral_spike_finite_eq[of "{0}"]) (simp_all add: ln_div)
finally have \<dots> .
note B = has_integral_mult_right[OF this, of "exp ((z - 1) * ln (of_nat n))"]
have "((\<lambda>x. exp ((z - 1) * of_real (ln x)) * (1 - of_real x / of_nat n) ^ n)
has_integral (?I * exp ((z - 1) * ln (of_nat n)))) {0..real n}" (is ?P)
by (insert B, subst (asm) mult.assoc [symmetric], subst (asm) exp_add [symmetric])
(simp add: algebra_simps)
also have "?P \<longleftrightarrow> ((\<lambda>x. of_real x powr (z - 1) * (1 - of_real x / of_nat n) ^ n)
has_integral (?I * exp ((z - 1) * ln (of_nat n)))) {0..real n}"
by (intro has_integral_spike_finite_eq[of "{0}"]) (simp_all add: powr_def Ln_of_real)
also have "fact n * of_nat n / pochhammer z (n+1) * exp ((z - 1) * Ln (of_nat n)) =
(of_nat n powr z * fact n / pochhammer z (n+1))"
by (auto simp add: powr_def algebra_simps exp_diff exp_of_real)
finally show ?thesis by (subst has_integral_restrict) simp_all
next
case False
thus ?thesis by (subst has_integral_restrict) (simp_all add: has_integral_refl)
qed
have "eventually (\<lambda>n. Gamma_series z n =
of_nat n powr z * fact n / pochhammer z (n+1)) sequentially"
using eventually_gt_at_top[of "0::nat"]
by eventually_elim (simp add: powr_def algebra_simps Gamma_series_def)
from this and Gamma_series_LIMSEQ[of z]
have C: "(\<lambda>k. of_nat k powr z * fact k / pochhammer z (k+1)) \<longlonglongrightarrow> Gamma z"
by (blast intro: Lim_transform_eventually)
{
fix x :: real assume x: "x \<ge> 0"
have lim_exp: "(\<lambda>k. (1 - x / real k) ^ k) \<longlonglongrightarrow> exp (-x)"
using tendsto_exp_limit_sequentially[of "-x"] by simp
have "(\<lambda>k. of_real x powr (z - 1) * of_real ((1 - x / of_nat k) ^ k))
\<longlonglongrightarrow> of_real x powr (z - 1) * of_real (exp (-x))" (is ?P)
by (intro tendsto_intros lim_exp)
also from eventually_gt_at_top[of "nat \<lceil>x\<rceil>"]
have "eventually (\<lambda>k. of_nat k > x) sequentially" by eventually_elim linarith
hence "?P \<longleftrightarrow> (\<lambda>k. if x \<le> of_nat k then
of_real x powr (z - 1) * of_real ((1 - x / of_nat k) ^ k) else 0)
\<longlonglongrightarrow> of_real x powr (z - 1) * of_real (exp (-x))"
by (intro tendsto_cong) (auto elim!: eventually_mono)
finally have \<dots> .
}
hence D: "\<forall>x\<in>{0..}. (\<lambda>k. if x \<in> {0..real k} then
of_real x powr (z - 1) * (1 - of_real x / of_nat k) ^ k else 0)
\<longlonglongrightarrow> of_real x powr (z - 1) / of_real (exp x)"
by (simp add: exp_minus field_simps cong: if_cong)
have "((\<lambda>x. (Re z - 1) * (ln x / x)) \<longlongrightarrow> (Re z - 1) * 0) at_top"
by (intro tendsto_intros ln_x_over_x_tendsto_0)
hence "((\<lambda>x. ((Re z - 1) * ln x) / x) \<longlongrightarrow> 0) at_top" by simp
from order_tendstoD(2)[OF this, of "1/2"]
have "eventually (\<lambda>x. (Re z - 1) * ln x / x < 1/2) at_top" by simp
from eventually_conj[OF this eventually_gt_at_top[of 0]]
obtain x0 where "\<forall>x\<ge>x0. (Re z - 1) * ln x / x < 1/2 \<and> x > 0"
by (auto simp: eventually_at_top_linorder)
hence x0: "x0 > 0" "\<And>x. x \<ge> x0 \<Longrightarrow> (Re z - 1) * ln x < x / 2" by auto
define h where "h = (\<lambda>x. if x \<in> {0..x0} then x powr (Re z - 1) else exp (-x/2))"
have le_h: "x powr (Re z - 1) * exp (-x) \<le> h x" if x: "x \<ge> 0" for x
proof (cases "x > x0")
case True
from True x0(1) have "x powr (Re z - 1) * exp (-x) = exp ((Re z - 1) * ln x - x)"
by (simp add: powr_def exp_diff exp_minus field_simps exp_add)
also from x0(2)[of x] True have "\<dots> < exp (-x/2)"
by (simp add: field_simps)
finally show ?thesis using True by (auto simp add: h_def)
next
case False
from x have "x powr (Re z - 1) * exp (- x) \<le> x powr (Re z - 1) * 1"
by (intro mult_left_mono) simp_all
with False show ?thesis by (auto simp add: h_def)
qed
have E: "\<forall>x\<in>{0..}. cmod (if x \<in> {0..real k} then of_real x powr (z - 1) *
(1 - complex_of_real x / of_nat k) ^ k else 0) \<le> h x"
(is "\<forall>x\<in>_. ?f x \<le> _") for k
proof safe
fix x :: real assume x: "x \<ge> 0"
{
fix x :: real and n :: nat assume x: "x \<le> of_nat n"
have "(1 - complex_of_real x / of_nat n) = complex_of_real ((1 - x / of_nat n))" by simp
also have "norm \<dots> = \<bar>(1 - x / real n)\<bar>" by (subst norm_of_real) (rule refl)
also from x have "\<dots> = (1 - x / real n)" by (intro abs_of_nonneg) (simp_all add: field_split_simps)
finally have "cmod (1 - complex_of_real x / of_nat n) = 1 - x / real n" .
} note D = this
from D[of x k] x
have "?f x \<le> (if of_nat k \<ge> x \<and> k > 0 then x powr (Re z - 1) * (1 - x / real k) ^ k else 0)"
by (auto simp: norm_mult norm_powr_real_powr norm_power intro!: mult_nonneg_nonneg)
also have "\<dots> \<le> x powr (Re z - 1) * exp (-x)"
by (auto intro!: mult_left_mono exp_ge_one_minus_x_over_n_power_n)
also from x have "\<dots> \<le> h x" by (rule le_h)
finally show "?f x \<le> h x" .
qed
have F: "h integrable_on {0..}" unfolding h_def
by (rule integrable_Gamma_integral_bound) (insert assms x0(1), simp_all)
show ?thesis
by (rule has_integral_dominated_convergence[OF B F E D C])
qed
lemma Gamma_integral_real:
assumes x: "x > (0 :: real)"
shows "((\<lambda>t. t powr (x - 1) / exp t) has_integral Gamma x) {0..}"
proof -
have A: "((\<lambda>t. complex_of_real t powr (complex_of_real x - 1) /
complex_of_real (exp t)) has_integral complex_of_real (Gamma x)) {0..}"
using Gamma_integral_complex[of x] assms by (simp_all add: Gamma_complex_of_real powr_of_real)
have "((\<lambda>t. complex_of_real (t powr (x - 1) / exp t)) has_integral of_real (Gamma x)) {0..}"
by (rule has_integral_eq[OF _ A]) (simp_all add: powr_of_real [symmetric])
from has_integral_linear[OF this bounded_linear_Re] show ?thesis by (simp add: o_def)
qed
lemma absolutely_integrable_Gamma_integral':
assumes "Re z > 0"
shows "(\<lambda>t. complex_of_real t powr (z - 1) / of_real (exp t)) absolutely_integrable_on {0<..}"
using absolutely_integrable_Gamma_integral [OF assms zero_less_one] by simp
lemma Gamma_integral_complex':
assumes z: "Re z > 0"
shows "((\<lambda>t. of_real t powr (z - 1) / of_real (exp t)) has_integral Gamma z) {0<..}"
proof -
have "((\<lambda>t. of_real t powr (z - 1) / of_real (exp t)) has_integral Gamma z) {0..}"
by (rule Gamma_integral_complex) fact+
hence "((\<lambda>t. if t \<in> {0<..} then of_real t powr (z - 1) / of_real (exp t) else 0)
has_integral Gamma z) {0..}"
by (rule has_integral_spike [of "{0}", rotated 2]) auto
also have "?this = ?thesis"
by (subst has_integral_restrict) auto
finally show ?thesis .
qed
lemma Gamma_conv_nn_integral_real:
assumes "s > (0::real)"
shows "Gamma s = nn_integral lborel (\<lambda>t. ennreal (indicator {0..} t * t powr (s - 1) / exp t))"
using nn_integral_has_integral_lebesgue[OF _ Gamma_integral_real[OF assms]] by simp
lemma integrable_Beta:
assumes "a > 0" "b > (0::real)"
shows "set_integrable lborel {0..1} (\<lambda>t. t powr (a - 1) * (1 - t) powr (b - 1))"
proof -
define C where "C = max 1 ((1/2) powr (b - 1))"
define D where "D = max 1 ((1/2) powr (a - 1))"
have C: "(1 - x) powr (b - 1) \<le> C" if "x \<in> {0..1/2}" for x
proof (cases "b < 1")
case False
with that have "(1 - x) powr (b - 1) \<le> (1 powr (b - 1))" by (intro powr_mono2) auto
thus ?thesis by (auto simp: C_def)
qed (insert that, auto simp: max.coboundedI1 max.coboundedI2 powr_mono2' powr_mono2 C_def)
have D: "x powr (a - 1) \<le> D" if "x \<in> {1/2..1}" for x
proof (cases "a < 1")
case False
with that have "x powr (a - 1) \<le> (1 powr (a - 1))" by (intro powr_mono2) auto
thus ?thesis by (auto simp: D_def)
next
case True
qed (insert that, auto simp: max.coboundedI1 max.coboundedI2 powr_mono2' powr_mono2 D_def)
have [simp]: "C \<ge> 0" "D \<ge> 0" by (simp_all add: C_def D_def)
have I1: "set_integrable lborel {0..1/2} (\<lambda>t. t powr (a - 1) * (1 - t) powr (b - 1))"
unfolding set_integrable_def
proof (rule Bochner_Integration.integrable_bound[OF _ _ AE_I2])
have "(\<lambda>t. t powr (a - 1)) integrable_on {0..1/2}"
by (rule integrable_on_powr_from_0) (use assms in auto)
hence "(\<lambda>t. t powr (a - 1)) absolutely_integrable_on {0..1/2}"
by (subst absolutely_integrable_on_iff_nonneg) auto
from integrable_mult_right[OF this [unfolded set_integrable_def], of C]
show "integrable lborel (\<lambda>x. indicat_real {0..1/2} x *\<^sub>R (C * x powr (a - 1)))"
by (subst (asm) integrable_completion) (auto simp: mult_ac)
next
fix x :: real
have "x powr (a - 1) * (1 - x) powr (b - 1) \<le> x powr (a - 1) * C" if "x \<in> {0..1/2}"
using that by (intro mult_left_mono powr_mono2 C) auto
thus "norm (indicator {0..1/2} x *\<^sub>R (x powr (a - 1) * (1 - x) powr (b - 1))) \<le>
norm (indicator {0..1/2} x *\<^sub>R (C * x powr (a - 1)))"
by (auto simp: indicator_def abs_mult mult_ac)
qed (auto intro!: AE_I2 simp: indicator_def)
have I2: "set_integrable lborel {1/2..1} (\<lambda>t. t powr (a - 1) * (1 - t) powr (b - 1))"
unfolding set_integrable_def
proof (rule Bochner_Integration.integrable_bound[OF _ _ AE_I2])
have "(\<lambda>t. t powr (b - 1)) integrable_on {0..1/2}"
by (rule integrable_on_powr_from_0) (use assms in auto)
hence "(\<lambda>t. t powr (b - 1)) integrable_on (cbox 0 (1/2))" by simp
from integrable_affinity[OF this, of "-1" 1]
have "(\<lambda>t. (1 - t) powr (b - 1)) integrable_on {1/2..1}" by simp
hence "(\<lambda>t. (1 - t) powr (b - 1)) absolutely_integrable_on {1/2..1}"
by (subst absolutely_integrable_on_iff_nonneg) auto
from integrable_mult_right[OF this [unfolded set_integrable_def], of D]
show "integrable lborel (\<lambda>x. indicat_real {1/2..1} x *\<^sub>R (D * (1 - x) powr (b - 1)))"
by (subst (asm) integrable_completion) (auto simp: mult_ac)
next
fix x :: real
have "x powr (a - 1) * (1 - x) powr (b - 1) \<le> D * (1 - x) powr (b - 1)" if "x \<in> {1/2..1}"
using that by (intro mult_right_mono powr_mono2 D) auto
thus "norm (indicator {1/2..1} x *\<^sub>R (x powr (a - 1) * (1 - x) powr (b - 1))) \<le>
norm (indicator {1/2..1} x *\<^sub>R (D * (1 - x) powr (b - 1)))"
by (auto simp: indicator_def abs_mult mult_ac)
qed (auto intro!: AE_I2 simp: indicator_def)
have "set_integrable lborel ({0..1/2} \<union> {1/2..1}) (\<lambda>t. t powr (a - 1) * (1 - t) powr (b - 1))"
by (intro set_integrable_Un I1 I2) auto
also have "{0..1/2} \<union> {1/2..1} = {0..(1::real)}" by auto
finally show ?thesis .
qed
lemma integrable_Beta':
assumes "a > 0" "b > (0::real)"
shows "(\<lambda>t. t powr (a - 1) * (1 - t) powr (b - 1)) integrable_on {0..1}"
using integrable_Beta[OF assms] by (rule set_borel_integral_eq_integral)
theorem has_integral_Beta_real:
assumes a: "a > 0" and b: "b > (0 :: real)"
shows "((\<lambda>t. t powr (a - 1) * (1 - t) powr (b - 1)) has_integral Beta a b) {0..1}"
proof -
define B where "B = integral {0..1} (\<lambda>x. x powr (a - 1) * (1 - x) powr (b - 1))"
have [simp]: "B \<ge> 0" unfolding B_def using a b
by (intro integral_nonneg integrable_Beta') auto
from a b have "ennreal (Gamma a * Gamma b) =
(\<integral>\<^sup>+ t. ennreal (indicator {0..} t * t powr (a - 1) / exp t) \<partial>lborel) *
(\<integral>\<^sup>+ t. ennreal (indicator {0..} t * t powr (b - 1) / exp t) \<partial>lborel)"
by (subst ennreal_mult') (simp_all add: Gamma_conv_nn_integral_real)
also have "\<dots> = (\<integral>\<^sup>+t. \<integral>\<^sup>+u. ennreal (indicator {0..} t * t powr (a - 1) / exp t) *
ennreal (indicator {0..} u * u powr (b - 1) / exp u) \<partial>lborel \<partial>lborel)"
by (simp add: nn_integral_cmult nn_integral_multc)
also have "\<dots> = (\<integral>\<^sup>+t. \<integral>\<^sup>+u. ennreal (indicator ({0..}\<times>{0..}) (t,u) * t powr (a - 1) * u powr (b - 1)
/ exp (t + u)) \<partial>lborel \<partial>lborel)"
by (intro nn_integral_cong)
(auto simp: indicator_def divide_ennreal ennreal_mult' [symmetric] exp_add)
also have "\<dots> = (\<integral>\<^sup>+t. \<integral>\<^sup>+u. ennreal (indicator ({0..}\<times>{t..}) (t,u) * t powr (a - 1) *
(u - t) powr (b - 1) / exp u) \<partial>lborel \<partial>lborel)"
proof (rule nn_integral_cong, goal_cases)
case (1 t)
have "(\<integral>\<^sup>+u. ennreal (indicator ({0..}\<times>{0..}) (t,u) * t powr (a - 1) *
u powr (b - 1) / exp (t + u)) \<partial>distr lborel borel ((+) (-t))) =
(\<integral>\<^sup>+u. ennreal (indicator ({0..}\<times>{t..}) (t,u) * t powr (a - 1) *
(u - t) powr (b - 1) / exp u) \<partial>lborel)"
by (subst nn_integral_distr) (auto intro!: nn_integral_cong simp: indicator_def)
thus ?case by (subst (asm) lborel_distr_plus)
qed
also have "\<dots> = (\<integral>\<^sup>+u. \<integral>\<^sup>+t. ennreal (indicator ({0..}\<times>{t..}) (t,u) * t powr (a - 1) *
(u - t) powr (b - 1) / exp u) \<partial>lborel \<partial>lborel)"
by (subst lborel_pair.Fubini')
(auto simp: case_prod_unfold indicator_def cong: measurable_cong_sets)
also have "\<dots> = (\<integral>\<^sup>+u. \<integral>\<^sup>+t. ennreal (indicator {0..u} t * t powr (a - 1) * (u - t) powr (b - 1)) *
ennreal (indicator {0..} u / exp u) \<partial>lborel \<partial>lborel)"
by (intro nn_integral_cong) (auto simp: indicator_def ennreal_mult' [symmetric])
also have "\<dots> = (\<integral>\<^sup>+u. (\<integral>\<^sup>+t. ennreal (indicator {0..u} t * t powr (a - 1) * (u - t) powr (b - 1))
\<partial>lborel) * ennreal (indicator {0..} u / exp u) \<partial>lborel)"
by (subst nn_integral_multc [symmetric]) auto
also have "\<dots> = (\<integral>\<^sup>+u. (\<integral>\<^sup>+t. ennreal (indicator {0..u} t * t powr (a - 1) * (u - t) powr (b - 1))
\<partial>lborel) * ennreal (indicator {0<..} u / exp u) \<partial>lborel)"
by (intro nn_integral_cong_AE eventually_mono[OF AE_lborel_singleton[of 0]])
(auto simp: indicator_def)
also have "\<dots> = (\<integral>\<^sup>+u. ennreal B * ennreal (indicator {0..} u / exp u * u powr (a + b - 1)) \<partial>lborel)"
proof (intro nn_integral_cong, goal_cases)
case (1 u)
show ?case
proof (cases "u > 0")
case True
have "(\<integral>\<^sup>+t. ennreal (indicator {0..u} t * t powr (a - 1) * (u - t) powr (b - 1)) \<partial>lborel) =
(\<integral>\<^sup>+t. ennreal (indicator {0..1} t * (u * t) powr (a - 1) * (u - u * t) powr (b - 1))
\<partial>distr lborel borel ((*) (1 / u)))" (is "_ = nn_integral _ ?f")
using True
by (subst nn_integral_distr) (auto simp: indicator_def field_simps intro!: nn_integral_cong)
also have "distr lborel borel ((*) (1 / u)) = density lborel (\<lambda>_. u)"
using \<open>u > 0\<close> by (subst lborel_distr_mult) auto
also have "nn_integral \<dots> ?f = (\<integral>\<^sup>+x. ennreal (indicator {0..1} x * (u * (u * x) powr (a - 1) *
(u * (1 - x)) powr (b - 1))) \<partial>lborel)" using \<open>u > 0\<close>
by (subst nn_integral_density) (auto simp: ennreal_mult' [symmetric] algebra_simps)
also have "\<dots> = (\<integral>\<^sup>+x. ennreal (u powr (a + b - 1)) *
ennreal (indicator {0..1} x * x powr (a - 1) *
(1 - x) powr (b - 1)) \<partial>lborel)" using \<open>u > 0\<close> a b
by (intro nn_integral_cong)
(auto simp: indicator_def powr_mult powr_add powr_diff mult_ac ennreal_mult' [symmetric])
also have "\<dots> = ennreal (u powr (a + b - 1)) *
(\<integral>\<^sup>+x. ennreal (indicator {0..1} x * x powr (a - 1) *
(1 - x) powr (b - 1)) \<partial>lborel)"
by (subst nn_integral_cmult) auto
also have "((\<lambda>x. x powr (a - 1) * (1 - x) powr (b - 1)) has_integral
integral {0..1} (\<lambda>x. x powr (a - 1) * (1 - x) powr (b - 1))) {0..1}"
using a b by (intro integrable_integral integrable_Beta')
from nn_integral_has_integral_lebesgue[OF _ this] a b
have "(\<integral>\<^sup>+x. ennreal (indicator {0..1} x * x powr (a - 1) *
(1 - x) powr (b - 1)) \<partial>lborel) = B" by (simp add: mult_ac B_def)
finally show ?thesis using \<open>u > 0\<close> by (simp add: ennreal_mult' [symmetric] mult_ac)
qed auto
qed
also have "\<dots> = ennreal B * ennreal (Gamma (a + b))"
using a b by (subst nn_integral_cmult) (auto simp: Gamma_conv_nn_integral_real)
also have "\<dots> = ennreal (B * Gamma (a + b))"
by (subst (1 2) mult.commute, intro ennreal_mult' [symmetric]) (use a b in auto)
finally have "B = Beta a b" using a b Gamma_real_pos[of "a + b"]
by (subst (asm) ennreal_inj) (auto simp: field_simps Beta_def Gamma_eq_zero_iff)
moreover have "(\<lambda>t. t powr (a - 1) * (1 - t) powr (b - 1)) integrable_on {0..1}"
by (intro integrable_Beta' a b)
ultimately show ?thesis by (simp add: has_integral_iff B_def)
qed
subsection \<open>The Weierstra{\ss} product formula for the sine\<close>
theorem sin_product_formula_complex:
fixes z :: complex
shows "(\<lambda>n. of_real pi * z * (\<Prod>k=1..n. 1 - z^2 / of_nat k^2)) \<longlonglongrightarrow> sin (of_real pi * z)"
proof -
let ?f = "rGamma_series_Weierstrass"
have "(\<lambda>n. (- of_real pi * inverse z) * (?f z n * ?f (- z) n))
\<longlonglongrightarrow> (- of_real pi * inverse z) * (rGamma z * rGamma (- z))"
by (intro tendsto_intros rGamma_Weierstrass_complex)
also have "(\<lambda>n. (- of_real pi * inverse z) * (?f z n * ?f (-z) n)) =
(\<lambda>n. of_real pi * z * (\<Prod>k=1..n. 1 - z^2 / of_nat k ^ 2))"
proof
fix n :: nat
have "(- of_real pi * inverse z) * (?f z n * ?f (-z) n) =
of_real pi * z * (\<Prod>k=1..n. (of_nat k - z) * (of_nat k + z) / of_nat k ^ 2)"
by (simp add: rGamma_series_Weierstrass_def mult_ac exp_minus
divide_simps prod.distrib[symmetric] power2_eq_square)
also have "(\<Prod>k=1..n. (of_nat k - z) * (of_nat k + z) / of_nat k ^ 2) =
(\<Prod>k=1..n. 1 - z^2 / of_nat k ^ 2)"
by (intro prod.cong) (simp_all add: power2_eq_square field_simps)
finally show "(- of_real pi * inverse z) * (?f z n * ?f (-z) n) = of_real pi * z * \<dots>"
by (simp add: field_split_simps)
qed
also have "(- of_real pi * inverse z) * (rGamma z * rGamma (- z)) = sin (of_real pi * z)"
by (subst rGamma_reflection_complex') (simp add: field_split_simps)
finally show ?thesis .
qed
lemma sin_product_formula_real:
"(\<lambda>n. pi * (x::real) * (\<Prod>k=1..n. 1 - x^2 / of_nat k^2)) \<longlonglongrightarrow> sin (pi * x)"
proof -
from sin_product_formula_complex[of "of_real x"]
have "(\<lambda>n. of_real pi * of_real x * (\<Prod>k=1..n. 1 - (of_real x)^2 / (of_nat k)^2))
\<longlonglongrightarrow> sin (of_real pi * of_real x :: complex)" (is "?f \<longlonglongrightarrow> ?y") .
also have "?f = (\<lambda>n. of_real (pi * x * (\<Prod>k=1..n. 1 - x^2 / (of_nat k^2))))" by simp
also have "?y = of_real (sin (pi * x))" by (simp only: sin_of_real [symmetric] of_real_mult)
finally show ?thesis by (subst (asm) tendsto_of_real_iff)
qed
lemma sin_product_formula_real':
assumes "x \<noteq> (0::real)"
shows "(\<lambda>n. (\<Prod>k=1..n. 1 - x^2 / of_nat k^2)) \<longlonglongrightarrow> sin (pi * x) / (pi * x)"
using tendsto_divide[OF sin_product_formula_real[of x] tendsto_const[of "pi * x"]] assms
by simp
theorem wallis: "(\<lambda>n. \<Prod>k=1..n. (4*real k^2) / (4*real k^2 - 1)) \<longlonglongrightarrow> pi / 2"
proof -
from tendsto_inverse[OF tendsto_mult[OF
sin_product_formula_real[of "1/2"] tendsto_const[of "2/pi"]]]
have "(\<lambda>n. (\<Prod>k=1..n. inverse (1 - (1/2)\<^sup>2 / (real k)\<^sup>2))) \<longlonglongrightarrow> pi/2"
by (simp add: prod_inversef [symmetric])
also have "(\<lambda>n. (\<Prod>k=1..n. inverse (1 - (1/2)\<^sup>2 / (real k)\<^sup>2))) =
(\<lambda>n. (\<Prod>k=1..n. (4*real k^2)/(4*real k^2 - 1)))"
by (intro ext prod.cong refl) (simp add: field_split_simps)
finally show ?thesis .
qed
subsection \<open>The Solution to the Basel problem\<close>
theorem inverse_squares_sums: "(\<lambda>n. 1 / (n + 1)\<^sup>2) sums (pi\<^sup>2 / 6)"
proof -
define P where "P x n = (\<Prod>k=1..n. 1 - x^2 / of_nat k^2)" for x :: real and n
define K where "K = (\<Sum>n. inverse (real_of_nat (Suc n))^2)"
define f where [abs_def]: "f x = (\<Sum>n. P x n / of_nat (Suc n)^2)" for x
define g where [abs_def]: "g x = (1 - sin (pi * x) / (pi * x))" for x
have sums: "(\<lambda>n. P x n / of_nat (Suc n)^2) sums (if x = 0 then K else g x / x^2)" for x
proof (cases "x = 0")
assume x: "x = 0"
have "summable (\<lambda>n. inverse ((real_of_nat (Suc n))\<^sup>2))"
using inverse_power_summable[of 2] by (subst summable_Suc_iff) simp
thus ?thesis by (simp add: x g_def P_def K_def inverse_eq_divide power_divide summable_sums)
next
assume x: "x \<noteq> 0"
have "(\<lambda>n. P x n - P x (Suc n)) sums (P x 0 - sin (pi * x) / (pi * x))"
unfolding P_def using x by (intro telescope_sums' sin_product_formula_real')
also have "(\<lambda>n. P x n - P x (Suc n)) = (\<lambda>n. (x^2 / of_nat (Suc n)^2) * P x n)"
unfolding P_def by (simp add: prod.nat_ivl_Suc' algebra_simps)
also have "P x 0 = 1" by (simp add: P_def)
finally have "(\<lambda>n. x\<^sup>2 / (of_nat (Suc n))\<^sup>2 * P x n) sums (1 - sin (pi * x) / (pi * x))" .
from sums_divide[OF this, of "x^2"] x show ?thesis unfolding g_def by simp
qed
have "continuous_on (ball 0 1) f"
proof (rule uniform_limit_theorem; (intro always_eventually allI)?)
show "uniform_limit (ball 0 1) (\<lambda>n x. \<Sum>k<n. P x k / of_nat (Suc k)^2) f sequentially"
proof (unfold f_def, rule Weierstrass_m_test)
fix n :: nat and x :: real assume x: "x \<in> ball 0 1"
{
fix k :: nat assume k: "k \<ge> 1"
from x have "x^2 < 1" by (auto simp: abs_square_less_1)
also from k have "\<dots> \<le> of_nat k^2" by simp
finally have "(1 - x^2 / of_nat k^2) \<in> {0..1}" using k
by (simp_all add: field_simps del: of_nat_Suc)
}
hence "(\<Prod>k=1..n. abs (1 - x^2 / of_nat k^2)) \<le> (\<Prod>k=1..n. 1)" by (intro prod_mono) simp
thus "norm (P x n / (of_nat (Suc n)^2)) \<le> 1 / of_nat (Suc n)^2"
unfolding P_def by (simp add: field_simps abs_prod del: of_nat_Suc)
qed (subst summable_Suc_iff, insert inverse_power_summable[of 2], simp add: inverse_eq_divide)
qed (auto simp: P_def intro!: continuous_intros)
hence "isCont f 0" by (subst (asm) continuous_on_eq_continuous_at) simp_all
hence "(f \<midarrow> 0 \<rightarrow> f 0)" by (simp add: isCont_def)
also have "f 0 = K" unfolding f_def P_def K_def by (simp add: inverse_eq_divide power_divide)
finally have "f \<midarrow> 0 \<rightarrow> K" .
moreover have "f \<midarrow> 0 \<rightarrow> pi^2 / 6"
proof (rule Lim_transform_eventually)
define f' where [abs_def]: "f' x = (\<Sum>n. - sin_coeff (n+3) * pi ^ (n+2) * x^n)" for x
have "eventually (\<lambda>x. x \<noteq> (0::real)) (at 0)"
by (auto simp add: eventually_at intro!: exI[of _ 1])
thus "eventually (\<lambda>x. f' x = f x) (at 0)"
proof eventually_elim
fix x :: real assume x: "x \<noteq> 0"
have "sin_coeff 1 = (1 :: real)" "sin_coeff 2 = (0::real)" by (simp_all add: sin_coeff_def)
with sums_split_initial_segment[OF sums_minus[OF sin_converges], of 3 "pi*x"]
have "(\<lambda>n. - (sin_coeff (n+3) * (pi*x)^(n+3))) sums (pi * x - sin (pi*x))"
by (simp add: eval_nat_numeral)
from sums_divide[OF this, of "x^3 * pi"] x
have "(\<lambda>n. - (sin_coeff (n+3) * pi^(n+2) * x^n)) sums ((1 - sin (pi*x) / (pi*x)) / x^2)"
by (simp add: field_split_simps eval_nat_numeral)
with x have "(\<lambda>n. - (sin_coeff (n+3) * pi^(n+2) * x^n)) sums (g x / x^2)"
by (simp add: g_def)
hence "f' x = g x / x^2" by (simp add: sums_iff f'_def)
also have "\<dots> = f x" using sums[of x] x by (simp add: sums_iff g_def f_def)
finally show "f' x = f x" .
qed
have "isCont f' 0" unfolding f'_def
proof (intro isCont_powser_converges_everywhere)
fix x :: real show "summable (\<lambda>n. -sin_coeff (n+3) * pi^(n+2) * x^n)"
proof (cases "x = 0")
assume x: "x \<noteq> 0"
from summable_divide[OF sums_summable[OF sums_split_initial_segment[OF
sin_converges[of "pi*x"]], of 3], of "-pi*x^3"] x
show ?thesis by (simp add: field_split_simps eval_nat_numeral)
qed (simp only: summable_0_powser)
qed
hence "f' \<midarrow> 0 \<rightarrow> f' 0" by (simp add: isCont_def)
also have "f' 0 = pi * pi / fact 3" unfolding f'_def
by (subst powser_zero) (simp add: sin_coeff_def)
finally show "f' \<midarrow> 0 \<rightarrow> pi^2 / 6" by (simp add: eval_nat_numeral)
qed
ultimately have "K = pi^2 / 6" by (rule LIM_unique)
moreover from inverse_power_summable[of 2]
have "summable (\<lambda>n. (inverse (real_of_nat (Suc n)))\<^sup>2)"
by (subst summable_Suc_iff) (simp add: power_inverse)
ultimately show ?thesis unfolding K_def
by (auto simp add: sums_iff power_divide inverse_eq_divide)
qed
end
|
{"author": "seL4", "repo": "isabelle", "sha": "e1ab32a3bb41728cd19541063283e37919978a4c", "save_path": "github-repos/isabelle/seL4-isabelle", "path": "github-repos/isabelle/seL4-isabelle/isabelle-e1ab32a3bb41728cd19541063283e37919978a4c/src/HOL/Analysis/Gamma_Function.thy"}
|
# Re-exported imports
import pandas as pd
from numpy import nan, where
from re import sub
# Hidden imports
import builtins as _builtins
from inspect import stack as _stack
from keyword import iskeyword as _iskeyword
from pkg_resources import get_distribution as _get_distribution
from sys import stderr as _logfile
__version__ = _get_distribution("unitable").version
# Global data frame
_df = pd.DataFrame()
# Utility functions for manipulating caller's locals
_builtins = frozenset(dir(_builtins))
def _generate(name):
"""
Generate a new variable in the caller's locals. Test if the variable name is valid.
"""
_locals = _stack()[2][0].f_locals
if _iskeyword(name):
raise ValueError("cannot name variable '{}' because it is a Python keyword".format(name))
if name in _builtins:
raise ValueError("cannot name variable '{}' because it is a Python builtin".format(name))
if name in _locals:
raise ValueError("cannot name variable '{}' because that name is already in use".format(name))
if not name.isidentifier():
raise ValueError("cannot name variable '{}' because it is an invalid Python variable name".format(name))
_locals[name] = _df[name]
def _drop(name):
"""
Drop a variable from the caller's locals.
"""
global _df
_locals = _stack()[2][0].f_locals
if name in _locals and name in _df.columns:
_locals.pop(name)
else:
raise ValueError("cannot drop variable '{}' because it is not currently loaded".format(name))
def _get_name(obj):
if isinstance(obj, str) or isinstance(obj, bytes):
return str(obj)
elif hasattr(obj, "name"):
return obj.name
else:
raise ValueError("unknown variable '{}'".format(str(obj)))
# DataFrame
def input(values):
global _df
for name in _df.columns: _drop(name)
_df = pd.DataFrame(values)
for name in _df.columns: _generate(name)
print("inputted", len(_df.columns), "variables and", len(_df), "observations", file=_logfile)
data_frame = input
def clear():
global _df
unkept = _df.columns.tolist()
_df = pd.DataFrame()
for name in unkept: _drop(name)
print("dropped", len(unkept), "variables", file=_logfile)
# Input/Output
def _sanitize_name(name):
return sub(r"[^A-Za-z0-9]", "_", name)
def read_csv(filename, **kwargs):
global _df
for name in _df.columns: _drop(name)
_df = pd.read_csv(filename, **kwargs)
_df.columns = list(map(_sanitize_name, _df.columns))
for name in _df.columns: _generate(name)
print("read", len(_df.columns), "variables from", filename, file=_logfile)
def read_tsv(filename, **kwargs):
global _df
for name in _df.columns: _drop(name)
_df = pd.read_csv(filename, sep="\t", **kwargs)
_df.columns = list(map(_sanitize_name, _df.columns))
for name in _df.columns: _generate(name)
print("read", len(_df.columns), "variables from", filename, file=_logfile)
def read_fwf(filename, **kwargs):
global _df
for name in _df.columns: _drop(name)
_df = pd.read_fwf(filename, **kwargs)
_df.columns = list(map(_sanitize_name, _df.columns))
for name in _df.columns: _generate(name)
print("read", len(_df.columns), "variables from", filename, file=_logfile)
def read_excel(filename, **kwargs):
global _df
for name in _df.columns: _drop(name)
_df = pd.read_excel(filename, **kwargs)
_df.columns = list(map(_sanitize_name, _df.columns))
for name in _df.columns: _generate(name)
print("read", len(_df.columns), "variables from", filename, file=_logfile)
import_delimited = read_csv
def write_csv(filename, index=False, **kwargs):
_df.to_csv(filename, index=index, float_format="%g", **kwargs)
print("wrote", len(_df.columns), "variables to", filename, file=_logfile)
def write_tsv(filename, index=False, **kwargs):
_df.to_csv(filename, index=index, float_format="%g", sep="\t", **kwargs)
print("wrote", len(_df.columns), "variables to", filename, file=_logfile)
export_delimited = write_csv
# Column Operations
def generate(name, value):
global _df
_df.loc[:, name] = value
_generate(name)
def replace(variable, value):
global _df
name = _get_name(variable)
_drop(name)
_df.loc[:, name] = value
_generate(name)
def drop(variable):
global _df
name = _get_name(variable)
_drop(name)
del _df[name]
def rename(variable, name):
global _df
old_name = _get_name(variable)
if old_name != name:
_drop(old_name)
_df.rename(columns={old_name: name}, inplace=True)
_generate(name)
# Filtering
def list_if(condition):
return _df[condition]
def keep_if(condition):
global _df
n = len(_df)
for name in _df.columns: _drop(name)
_df = _df.loc[condition, :]
for name in _df.columns: _generate(name)
print("kept", len(_df), "of", n, "observations", file=_logfile)
filter = keep_if
def drop_if(condition):
global _df
n = len(_df)
for name in _df.columns: _drop(name)
_df = _df.loc[~condition, :]
for name in _df.columns: _generate(name)
print("kept", len(_df), "of", n, "observations", file=_logfile)
def drop_duplicates(*args, **kwargs):
global _df
n = len(_df)
for name in _df.columns: _drop(name)
_df = _df.drop_duplicates(*args, **kwargs)
for name in _df.columns: _generate(name)
print("kept", len(_df), "of", n, "observations", file=_logfile)
def keep(*variables):
global _df
kept = list(map(_get_name, variables))
kept_set = frozenset(kept)
for name in _df.columns:
if name not in kept_set: _drop(name)
_df = _df[kept]
print("kept", len(_df.columns), "variables", file=_logfile)
## Sorting by Values
def sort(*variables):
global _df
for name in _df.columns: _drop(name)
_df = _df.sort_values(list(map(_get_name, variables)))
for name in _df.columns: _generate(name)
# String Functions
## Finding Length of String
def strlen(variable):
return variable.str.len()
## Finding Position of Substring
def strpos(variable, substr):
return variable.str.find(substr)
## Extracting Substring by Position
def substr(variable, start, end):
return variable.str[start:end]
## Extracting nth Word
def word(variable, n):
return variable.str.split(" ", expand=True)[n]
## Changing Case
def strupper(variable):
return variable.str.upper()
def strlower(variable):
return variable.str.lower()
def strproper(variable):
return variable.str.title()
# Merging
def merge(df, **kwargs):
global _df
for name in _df.columns: _drop(name)
_df = _df.merge(df, **kwargs)
for name in _df.columns: _generate(name)
def append(df, **kwargs):
global _df
for name in _df.columns: _drop(name)
_df = _df.append(df, ignore_index=True, **kwargs)
for name in _df.columns: _generate(name)
# Missing Data
def dropna(**kwargs):
global _df
n = len(_df)
for name in _df.columns: _drop(name)
_df = _df.dropna(**kwargs)
for name in _df.columns: _generate(name)
print("dropped", n - len(_df), "of", n, "observrations", file=_logfile)
# Aggregation
def groupby(*variables):
return _df.groupby(list(map(_get_name, variables)))
# Dimensions
def nrow():
return len(_df)
def ncol():
return len(_df.columns)
def col_names():
return _df.columns.tolist()
def col_types():
return _df.dtypes
|
{"hexsha": "aed82bdb6d5e7bb1e53b6985e46629517a63277f", "size": 7699, "ext": "py", "lang": "Python", "max_stars_repo_path": "unitable/__init__.py", "max_stars_repo_name": "mhowison/unitable", "max_stars_repo_head_hexsha": "d0841c907b897ea5c77f488cd01b54c57b39f0b2", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-12-20T21:38:02.000Z", "max_stars_repo_stars_event_max_datetime": "2019-12-20T21:38:02.000Z", "max_issues_repo_path": "unitable/__init__.py", "max_issues_repo_name": "mhowison/unitable", "max_issues_repo_head_hexsha": "d0841c907b897ea5c77f488cd01b54c57b39f0b2", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "unitable/__init__.py", "max_forks_repo_name": "mhowison/unitable", "max_forks_repo_head_hexsha": "d0841c907b897ea5c77f488cd01b54c57b39f0b2", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.098540146, "max_line_length": 113, "alphanum_fraction": 0.6550201325, "include": true, "reason": "from numpy", "num_tokens": 1931}
|
\documentclass[a4paper]{article}
\usepackage[left = .85in, right = .5in, top = 1in, bottom = 1in]{geometry}
\usepackage{listings}
\title{\Huge Assignment 8 \\
\Large Implementation of DLL Flow Control \\
Stop and Wait Protocol Using Java}
\begin{document}
\section{Abstract}
\subsection{}
\section{Algorithm}
\subsection{Sender}
\begin{enumerate}
\item Create Socket to connect to server.
\item Create object of ObjectOutputStream and ObjectInputStream to send and receive data from receiver.
\item When frameNoCount < msgSize
\begin{enumerate}
\item Create packet of data length 1, including seqNo.
\item Write frame in invoking Stream.
\item Store frame and increase seqNo then send the frame.
\item Wait for acknowledgement. if(ack == seqNo), send next frame.
\item When frameNoCount = msgSize, end msg.
\item if(ack != seqNo), resend data.
\end{enumerate}
\end{enumerate}
\subsection(Receiver)
\begin{enumerate}
\item Create socket and objects of ObjectOutputStream and ObjectInputStream.
\item Read frame from invoking stream.
\item If sequence number of frame is 1 which is expected, frame is accepted and acknowledgement is sent to sender, else this is duplicate.
\end{enumerate}
\end{document}
|
{"hexsha": "16436f08c3567fc220e7fc462c6e840c9e5005ba", "size": 1224, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "Networking/assignment8/assignment8.tex", "max_stars_repo_name": "ANSEduGroup/6th-sem-labs", "max_stars_repo_head_hexsha": "705f3041190fba4fdb49a7dc18f1f1d8e10c1dbe", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Networking/assignment8/assignment8.tex", "max_issues_repo_name": "ANSEduGroup/6th-sem-labs", "max_issues_repo_head_hexsha": "705f3041190fba4fdb49a7dc18f1f1d8e10c1dbe", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2017-04-10T09:15:30.000Z", "max_issues_repo_issues_event_max_datetime": "2017-04-10T09:16:28.000Z", "max_forks_repo_path": "Networking/assignment8/assignment8.tex", "max_forks_repo_name": "ANSEduGroup/6th-sem-labs", "max_forks_repo_head_hexsha": "705f3041190fba4fdb49a7dc18f1f1d8e10c1dbe", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.0, "max_line_length": 140, "alphanum_fraction": 0.7638888889, "num_tokens": 311}
|
! @@name: fort_sp_common.4f
! @@type: F-fixed
! @@compilable: no
! @@linkable: no
! @@expect: failure
SUBROUTINE COMMON_WRONG()
COMMON /C/ X,Y
! Incorrect because X is a constituent element of C
!$OMP PARALLEL PRIVATE(/C/), SHARED(X) ! { error "PGF90-S-0155-x is used in multiple data sharing clauses" }
! do work here
!$OMP END PARALLEL
END SUBROUTINE COMMON_WRONG
|
{"hexsha": "de2d0186a2774ee057f0983abbbb332b442a6097", "size": 400, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "test/openmp_examples/sources/Example_fort_sp_common.4f.f", "max_stars_repo_name": "kammerdienerb/flang", "max_stars_repo_head_hexsha": "8cc4a02b94713750f09fe6b756d33daced0b4a74", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 716, "max_stars_repo_stars_event_min_datetime": "2017-05-17T17:58:45.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T11:20:58.000Z", "max_issues_repo_path": "test/openmp_examples/sources/Example_fort_sp_common.4f.f", "max_issues_repo_name": "kammerdienerb/flang", "max_issues_repo_head_hexsha": "8cc4a02b94713750f09fe6b756d33daced0b4a74", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 794, "max_issues_repo_issues_event_min_datetime": "2017-05-18T19:27:40.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T23:22:11.000Z", "max_forks_repo_path": "test/openmp_examples/sources/Example_fort_sp_common.4f.f", "max_forks_repo_name": "kammerdienerb/flang", "max_forks_repo_head_hexsha": "8cc4a02b94713750f09fe6b756d33daced0b4a74", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 157, "max_forks_repo_forks_event_min_datetime": "2017-05-17T18:50:33.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-30T07:06:45.000Z", "avg_line_length": 30.7692307692, "max_line_length": 110, "alphanum_fraction": 0.655, "num_tokens": 125}
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import tensorflow as tf
import cv2
import numpy as np
from libs.label_name_dict.label_dict import NAME_LABEL_MAP
from libs.configs import cfgs
def max_length_limitation(length, length_limitation):
return tf.cond(tf.less(length, length_limitation),
true_fn=lambda: length,
false_fn=lambda: length_limitation)
def short_side_resize(img_tensor, gtboxes_and_label, target_shortside_len, length_limitation=1200):
'''
:param img_tensor:[h, w, c], gtboxes_and_label:[-1, 9].
:param target_shortside_len:
:param length_limitation: set max length to avoid OUT OF MEMORY
:return:
'''
img_h, img_w = tf.shape(img_tensor)[0], tf.shape(img_tensor)[1]
new_h, new_w = tf.cond(tf.less(img_h, img_w),
true_fn=lambda: (target_shortside_len,
max_length_limitation(target_shortside_len * img_w // img_h, length_limitation)),
false_fn=lambda: (max_length_limitation(target_shortside_len * img_h // img_w, length_limitation),
target_shortside_len))
img_tensor = tf.expand_dims(img_tensor, axis=0)
img_tensor = tf.image.resize_bilinear(img_tensor, [new_h, new_w])
x1, y1, x2, y2, x3, y3, x4, y4, label = tf.unstack(gtboxes_and_label, axis=1)
x1, x2, x3, x4 = x1 * new_w // img_w, x2 * new_w // img_w, x3 * new_w // img_w, x4 * new_w // img_w
y1, y2, y3, y4 = y1 * new_h // img_h, y2 * new_h // img_h, y3 * new_h // img_h, y4 * new_h // img_h
img_tensor = tf.squeeze(img_tensor, axis=0) # ensure image tensor rank is 3
return img_tensor, tf.transpose(tf.stack([x1, y1, x2, y2, x3, y3, x4, y4, label], axis=0)), new_h, new_w
def short_side_resize_for_inference_data(img_tensor, target_shortside_len, length_limitation=1200, is_resize=True):
if is_resize:
img_h, img_w = tf.shape(img_tensor)[0], tf.shape(img_tensor)[1]
new_h, new_w = tf.cond(tf.less(img_h, img_w),
true_fn=lambda: (target_shortside_len,
max_length_limitation(target_shortside_len * img_w // img_h, length_limitation)),
false_fn=lambda: (max_length_limitation(target_shortside_len * img_h // img_w, length_limitation),
target_shortside_len))
img_tensor = tf.expand_dims(img_tensor, axis=0)
img_tensor = tf.image.resize_bilinear(img_tensor, [new_h, new_w])
img_tensor = tf.squeeze(img_tensor, axis=0) # ensure image tensor rank is 3
return img_tensor
def flip_left_to_right(img_tensor, gtboxes_and_label):
h, w = tf.shape(img_tensor)[0], tf.shape(img_tensor)[1]
img_tensor = tf.image.flip_left_right(img_tensor)
x1, y1, x2, y2, x3, y3, x4, y4, label = tf.unstack(gtboxes_and_label, axis=1)
new_x1 = w - x1
new_x2 = w - x2
new_x3 = w - x3
new_x4 = w - x4
return img_tensor, tf.transpose(tf.stack([new_x1, y1, new_x2, y2, new_x3, y3, new_x4, y4, label], axis=0))
def random_flip_left_right(img_tensor, gtboxes_and_label):
img_tensor, gtboxes_and_label= tf.cond(tf.less(tf.random_uniform(shape=[], minval=0, maxval=1), 0.5),
lambda: flip_left_to_right(img_tensor, gtboxes_and_label),
lambda: (img_tensor, gtboxes_and_label))
return img_tensor, gtboxes_and_label
def aspect_ratio_jittering(img_tensor, gtboxes_and_label, aspect_ratio=(0.8, 1.5)):
ratio_list = tf.range(aspect_ratio[0], aspect_ratio[1], delta=0.025)
ratio = tf.random_shuffle(ratio_list)[0]
img_h, img_w = tf.shape(img_tensor)[0], tf.shape(img_tensor)[1]
areas = img_h * img_w
areas = tf.cast(areas, tf.float32)
short_side = tf.sqrt(areas / ratio)
long_side = short_side * ratio
short_side = tf.cast(short_side, tf.int32)
long_side = tf.cast(long_side, tf.int32)
image, gtbox, new_h, new_w = tf.cond(tf.less(img_w, img_h),
true_fn=lambda: tf_resize_image(img_tensor, gtboxes_and_label, short_side,
long_side),
false_fn=lambda: tf_resize_image(img_tensor, gtboxes_and_label, long_side,
short_side))
return image, gtbox, new_h, new_w
def tf_resize_image(image, gtbox, rw, rh):
img_h, img_w = tf.shape(image)[0], tf.shape(image)[1]
image = tf.image.resize_bilinear(tf.expand_dims(image, axis=0), (rh, rw))
x1, y1, x2, y2, x3, y3, x4, y4, label = tf.unstack(gtbox, axis=1)
new_x1 = x1 * rw // img_w
new_x2 = x2 * rw // img_w
new_x3 = x3 * rw // img_w
new_x4 = x4 * rw // img_w
new_y1 = y1 * rh // img_h
new_y2 = y2 * rh // img_h
new_y3 = y3 * rh // img_h
new_y4 = y4 * rh // img_h
gtbox = tf.transpose(tf.stack([new_x1, new_y1, new_x2, new_y2, new_x3, new_y3, new_x4, new_y4, label], axis=0))
return tf.squeeze(image, axis=0), gtbox, rh, rw
def flip_up_down(img_tensor, gtboxes_and_label):
h, w = tf.shape(img_tensor)[0], tf.shape(img_tensor)[1]
img_tensor = tf.image.flip_up_down(img_tensor)
x1, y1, x2, y2, x3, y3, x4, y4, label = tf.unstack(gtboxes_and_label, axis=1)
new_y1 = h - y1
new_y2 = h - y2
new_y3 = h - y3
new_y4 = h - y4
return img_tensor, tf.transpose(tf.stack([x1, new_y1, x2, new_y2, x3, new_y3, x4, new_y4, label], axis=0))
def random_flip_up_down(img_tensor, gtboxes_and_label):
img_tensor, gtboxes_and_label = tf.cond(tf.less(tf.random_uniform(shape=[], minval=0, maxval=1), 0.5),
lambda: flip_up_down(img_tensor, gtboxes_and_label),
lambda: (img_tensor, gtboxes_and_label))
return img_tensor, gtboxes_and_label
def random_rgb2gray(img_tensor, gtboxes_and_label):
'''
:param img_tensor: tf.float32
:return:
'''
def rgb2gray(img, gtboxes_and_label):
label = gtboxes_and_label[:, -1]
#if cfgs.DATASET_NAME.startswith('DOTA'):
# if NAME_LABEL_MAP['swimming-pool'] in label:
# # do not change color, because swimming-pool need color
# return img
coin = np.random.rand()
if coin < 0.3:
img = np.asarray(img, dtype=np.float32)
r, g, b = img[:, :, 0], img[:, :, 1], img[:, :, 2]
gray = r * 0.299 + g * 0.587 + b * 0.114
img = np.stack([gray, gray, gray], axis=2)
return img
else:
return img
h, w, c = tf.shape(img_tensor)[0], tf.shape(img_tensor)[1], tf.shape(img_tensor)[2]
img_tensor = tf.py_func(rgb2gray,
inp=[img_tensor, gtboxes_and_label],
Tout=tf.float32)
img_tensor = tf.reshape(img_tensor, shape=[h, w, c])
return img_tensor
def rotate_img_np(img, gtboxes_and_label, r_theta):
h, w, c = img.shape
center = (w // 2, h // 2)
M = cv2.getRotationMatrix2D(center, r_theta, 1.0)
cos, sin = np.abs(M[0, 0]), np.abs(M[0, 1])
nW, nH = int(h*sin + w*cos), int(h*cos + w*sin) # new W and new H
M[0, 2] += (nW/2) - center[0]
M[1, 2] += (nH/2) - center[1]
rotated_img = cv2.warpAffine(img, M, (nW, nH))
# -------
new_points_list = []
obj_num = len(gtboxes_and_label)
for st in range(0, 7, 2):
points = gtboxes_and_label[:, st:st+2]
expand_points = np.concatenate((points, np.ones(shape=(obj_num, 1))), axis=1)
new_points = np.dot(M, expand_points.T)
new_points = new_points.T
new_points_list.append(new_points)
gtboxes = np.concatenate(new_points_list, axis=1)
gtboxes_and_label = np.concatenate((gtboxes, gtboxes_and_label[:, -1].reshape(-1, 1)), axis=1)
gtboxes_and_label = np.asarray(gtboxes_and_label, dtype=np.int32)
return rotated_img, gtboxes_and_label
def rotate_img(img_tensor, gtboxes_and_label):
# thetas = tf.constant([-30, -60, -90, 30, 60, 90])
thetas = tf.range(-90, 90+16, delta=15)
# -90, -75, -60, -45, -30, -15, 0, 15, 30, 45, 60, 75, 90
theta = tf.random_shuffle(thetas)[0]
img_tensor, gtboxes_and_label = tf.py_func(rotate_img_np,
inp=[img_tensor, gtboxes_and_label, theta],
Tout=[tf.float32, tf.int32])
h, w, c = tf.shape(img_tensor)[0], tf.shape(img_tensor)[1], tf.shape(img_tensor)[2]
img_tensor = tf.reshape(img_tensor, [h, w, c])
gtboxes_and_label = tf.reshape(gtboxes_and_label, [-1, 9])
return img_tensor, gtboxes_and_label
def random_rotate_img(img_tensor, gtboxes_and_label):
img_tensor, gtboxes_and_label = tf.cond(tf.less(tf.random_uniform(shape=[], minval=0, maxval=1), 0.6),
lambda: rotate_img(img_tensor, gtboxes_and_label),
lambda: (img_tensor, gtboxes_and_label))
return img_tensor, gtboxes_and_label
|
{"hexsha": "863c17d66d3cacc5f614095e275b6c16c59da6ee", "size": 9345, "ext": "py", "lang": "Python", "max_stars_repo_path": "data/io/image_preprocess_multi_gpu.py", "max_stars_repo_name": "RomStriker/R3Det_Tensorflow", "max_stars_repo_head_hexsha": "34bad1a99d4472281f2653448cdd43378f06f753", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "data/io/image_preprocess_multi_gpu.py", "max_issues_repo_name": "RomStriker/R3Det_Tensorflow", "max_issues_repo_head_hexsha": "34bad1a99d4472281f2653448cdd43378f06f753", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "data/io/image_preprocess_multi_gpu.py", "max_forks_repo_name": "RomStriker/R3Det_Tensorflow", "max_forks_repo_head_hexsha": "34bad1a99d4472281f2653448cdd43378f06f753", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.5974576271, "max_line_length": 127, "alphanum_fraction": 0.6071696094, "include": true, "reason": "import numpy", "num_tokens": 2641}
|
#ifndef TYPELIB_IOPLUGINS_HH
#define TYPELIB_IOPLUGINS_HH
#include <boost/type_traits/is_base_and_derived.hpp>
#include <boost/mpl/if.hpp>
namespace Typelib
{
class ExportPlugin;
class ImportPlugin;
class Exporter;
class Importer;
template<typename Type>
struct plugin_traits
{
typedef typename boost::mpl::if_
< boost::is_base_and_derived<Exporter, Type>
, ExportPlugin
, ImportPlugin >::type plugin_base;
typedef typename boost::mpl::if_
< boost::is_base_and_derived<Exporter, Type>
, Exporter
, Importer >::type object_base;
};
template<typename Type>
class GenericIOPlugin
: public plugin_traits<Type>::plugin_base
{
public:
GenericIOPlugin(char const* name)
: plugin_traits<Type>::plugin_base(name) {}
typename plugin_traits<Type>::object_base* create()
{ return new Type; }
};
class TypeDefinitionPlugin
{
public:
virtual ~TypeDefinitionPlugin() {}
virtual void registerTypes(Typelib::Registry& registry) = 0;
};
}
#define TYPELIB_REGISTER_IO2(name, klass1, klass2) extern "C" void registerPlugins(Typelib::PluginManager& manager) {\
manager.add(new Typelib::GenericIOPlugin<klass1>(#name)); \
manager.add(new Typelib::GenericIOPlugin<klass2>(#name)); \
}
#define TYPELIB_REGISTER_IO1(name, klass1) extern "C" void registerPlugins(Typelib::PluginManager& manager) {\
manager.add(new Typelib::GenericIOPlugin<klass1>(#name)); \
}
#endif
|
{"hexsha": "0ff481cb0bf4992123da31753c49408d80016d96", "size": 1584, "ext": "hh", "lang": "C++", "max_stars_repo_path": "typelib/plugins.hh", "max_stars_repo_name": "maltewi/tools-typelib", "max_stars_repo_head_hexsha": "c0a28415b6cea4d5500a00d6e7003554d684748e", "max_stars_repo_licenses": ["CECILL-B"], "max_stars_count": 7.0, "max_stars_repo_stars_event_min_datetime": "2015-05-29T09:59:27.000Z", "max_stars_repo_stars_event_max_datetime": "2020-05-15T13:41:18.000Z", "max_issues_repo_path": "typelib/plugins.hh", "max_issues_repo_name": "maltewi/tools-typelib", "max_issues_repo_head_hexsha": "c0a28415b6cea4d5500a00d6e7003554d684748e", "max_issues_repo_licenses": ["CECILL-B"], "max_issues_count": 68.0, "max_issues_repo_issues_event_min_datetime": "2015-01-06T17:02:27.000Z", "max_issues_repo_issues_event_max_datetime": "2021-01-27T23:54:05.000Z", "max_forks_repo_path": "typelib/plugins.hh", "max_forks_repo_name": "rock-core/tools-typelib", "max_forks_repo_head_hexsha": "b15a6c8500db4add544a8ffd6d1d795c65c5283b", "max_forks_repo_licenses": ["CECILL-B"], "max_forks_count": 18.0, "max_forks_repo_forks_event_min_datetime": "2015-03-20T10:57:52.000Z", "max_forks_repo_forks_event_max_datetime": "2021-01-27T18:53:14.000Z", "avg_line_length": 27.3103448276, "max_line_length": 118, "alphanum_fraction": 0.663510101, "num_tokens": 375}
|
using TiledIteration: TileIterator
using FFTW: fft, dct
function upsample(x::AbstractArray{T,D}, factor::NTuple{D}, offset::NTuple{D} = (fill(0,D)...,)) where {T,D}
@assert all(0 .<= offset .< factor) "offset is out of range"
szout = size(x) .* factor
setindex!(zeros(T, szout), x, StepRange.(offset .+ 1, factor, szout)...)
end
function downsample(x::AbstractArray{T,D}, factor::NTuple{D}, offset::NTuple{D}=(fill(0,D)...,)) where {T,D}
@assert all(0 .<= offset .< factor) "offset is out of range"
x[StepRange.(offset .+ 1, factor, size(x))...]
end
# function downsampledview(x::AbstractArray{T,D}, factor::NTuple{D}, offset::NTuple{D}=(fill(0,D)...,)) where {T,D}
# @assert all(0 .<= offset .< factor) "offset is out of range"
# @view x[StepRange.(offset .+ 1, factor, size(x))...]
# end
representationmatrix(f, sz::NTuple) = representationmatrix(f, sz...)
function representationmatrix(f::Function, sz::Integer...)
hcat([ setindex!(zeros(sz), 1, idx) |> f |> vec for idx in 1:prod(sz) ]...)
end
Base.@pure function haarbasis(d::Integer)
w = walsh(d)
@views [ reshape(w[p,:], fill(2,d)...) |> Array for p in 1:size(w, 1)]
end
Base.@pure function walsh(n::Integer)
# ifelse(n >= 0, sub_walsh(Val(n)), error("n must to be a positive")) # this code is not work correctly.
if n >= 0; sub_walsh(Val(n)) else error("n must to be a positive") end
end
function sub_walsh(::Val{N}) where {N}
w = sub_walsh(Val(N-1))
return [ w w ; w -w ]
end
sub_walsh(::Val{0}) = 1
# matrix-formed CDFT operator for D-dimensional signal
cdftmtx(sz::NTuple) = cdftmtx(sz...)
cdftmtx(sz::Integer...) = cdftmtx(Float64, sz...)
cdftmtx(T::Type, sz::NTuple) = cdftmtx(T, sz...)
cdftmtx(::Type, sz::Integer...) = cdftmtx(Float64, sz...)
cdftmtx(::Type{Complex{T}}, sz...) where {T} = cdftmtx(T, sz...)
Base.@pure function cdftmtx(::Type{T}, sz::Integer...) where T<:AbstractFloat
len = prod(sz)
mtx = representationmatrix(x->fft(T.(x)), sz)
rm = Diagonal(Complex{T}[ cis(-angle(mtx[n,end])/2) for n in 1:len ])
complex(T).(rm * mtx / sqrt(len))
end
permdctmtx(sz::NTuple) = permdctmtx(sz...)
permdctmtx(sz::Integer...) = permdctmtx(Float64, sz...)
permdctmtx(T::Type, sz::NTuple) = permdctmtx(T, sz...)
permdctmtx(::Type, sz::Integer...) = permdctmtx(Float64, sz...)
Base.@pure function permdctmtx(::Type{T}, sz::Integer...) where T<:AbstractFloat
mtx = representationmatrix(x->dct(T.(x)), sz)
isevenids = map(ci->iseven(sum(ci.I .- 1)), CartesianIndices(sz)) |> vec
permids = sortperm(isevenids; rev=true, alg=Base.DEFAULT_STABLE)
@views vcat([ transpose(mtx[pi,:]) for pi in permids ]...)
end
function getMatrixB(P::Integer, angs::AbstractVector{T}) where T
@assert (length(angs) == fld(P,4)) "mismatch number of channels"
hP = fld(P,2)
psangs = (2 .* angs .+ pi) ./ 4
ss, cs = sin.(psangs), cos.(psangs)
LC = map(ss, cs) do s, c
[ (-1im*c) (-1im*s); c (-s) ]
end
LS = map(ss, cs) do s, c
[ s c; (1im*s) (-1im*c) ]
end
pbm = ones(fill(hP % 2,2)...)
C = cat(LC..., pbm; dims=[1,2])
S = cat(LS..., 1im*pbm; dims=[1,2])
[ C conj(C); S conj(S) ] / sqrt(convert(T,2))
end
function analysisbank(nsolt::AbstractNsolt)
M = prod(decimations(nsolt))
ord = orders(nsolt)
# create inpulse signal matrix
mtx0 = reverse(Matrix(I, M, M .* prod(ord .+ 1) ), dims=1)
krncenter = initialStep(nsolt, mtx0 )
nStrides = (cumprod([ M, (ord[1:end-1] .+ 1)... ])...,)
rotdimsfcns = (fill(identity, ndims(nsolt))...,)
krnsym = extendAtoms(nsolt, krncenter, nStrides, rotdimsfcns, border=:circular_traditional)
return shiftFilterSymmetry(nsolt, krnsym)
end
# compatible mode for SaivDr
# function analysisbank_compatible(nsolt::AbstractNsolt)
# #function analysisbank(nsolt::AbstractNsolt)
# M = prod(decimations(nsolt))
# ord = orders(nsolt)
#
# # create inpulse signal matrix
# mtx0 = reverse(Matrix(I, M, M .* prod(ord .+ 1) ), dims=1)
# # mtx0 = circshift(mtx0, (0, -M))
# krncenter = initialStep(nsolt, mtx0 )
#
# nStrides = (cumprod([ M, (ord[1:end-1] .+ 1)... ])...,)
# rotdimsfcns = (fill(identity, ndims(nsolt))...,)
# krnsym = extendAtoms(nsolt, krncenter, nStrides, rotdimsfcns, border=:circular_traditional)
#
# return shiftFilterSymmetry(nsolt, krnsym)
# end
kernels(pfb::PolyphaseFB) = (analysiskernels(pfb), synthesiskernels(pfb))
function analysiskernels(pfb::PolyphaseFB)
df = decimations(pfb)
afb = analysisbank(pfb)
@views map([ reshape(afb[p,:], prod(df), :) for p in 1:size(afb, 1)]) do vf
out = similar(vf, kernelsize(pfb)...)
for (idx, tile) in enumerate(TileIterator(axes(out), df))
out[tile...] = reshape(vf[:, idx], df...)
end
out
end
end
function synthesiskernels(cc::AbstractNsolt)
map(analysiskernels(cc)) do af
reshape(af .|> conj |> vec |> reverse, size(af))
end
end
function mdarray2polyphase(x::AbstractArray{TX,D}, szBlock::NTuple{D,TS}) where {TX,D,TS<:Integer}
nBlocks = fld.(size(x), szBlock)
@assert all(size(x) .% szBlock .== 0) "size error. input data: $(size(x)), block size: $(szBlock)."
# outdata = hcat([ vec(@view x[tile...]) for tile in TileIterator(axes(x), szBlock)]...)
outdata = similar(x, prod(szBlock), prod(nBlocks))
@views for (idx, tile) in enumerate(TileIterator(axes(x), szBlock))
outdata[:,idx] = vec(x[tile...])
end
PolyphaseVector(outdata, nBlocks)
end
function polyphase2mdarray(x::PolyphaseVector{TX,D}, szBlock::NTuple{D,TS}) where {TX,D,TS<:Integer}
@assert (size(x.data, 1) == prod(szBlock)) "size mismatch! 'prod(szBlock)' must be equal to $(size(x.data,1))."
out = similar(x.data, (x.nBlocks .* szBlock)...)
@views for (idx, tile) in enumerate(TileIterator(axes(out), szBlock))
out[tile...] = reshape(x.data[:,idx], szBlock...)
end
out
end
function rotatedimspv(x::PolyphaseVector{T,D}) where {T,D}
data = rotatedimspv(x.data, x.nBlocks[1])
nBlocks = (x.nBlocks[2:end]..., x.nBlocks[1])
return PolyphaseVector(data, nBlocks)
end
function irotatedimspv(x::PolyphaseVector{T,D}) where {T,D}
data = irotatedimspv(x.data, x.nBlocks[1])
nBlocks = (x.nBlocks[end], x.nBlocks[2:end]...)
return PolyphaseVector(data, nBlocks)
end
function rotatedimspv(x::AbstractMatrix, nBlocks::Integer)
@views hcat([ x[:, (1:nBlocks:end) .+ idx] for idx = 0:nBlocks-1 ]...)
end
irotatedimspv(x::AbstractMatrix, nBlocks::Integer) = rotatedimspv(x, fld(size(x, 2), nBlocks))
@inline function unnormalized_butterfly!(xu::T, xl::T) where {T<:AbstractMatrix}
tu, tl = (xu + xl, xu - xl)
xu .= tu
xl .= tl
nothing
end
@inline function half_butterfly!(xu::T, xl::T) where {T<:AbstractMatrix}
tu, tl = (xu + xl, xu - xl) ./ 2
xu .= tu
xl .= tl
nothing
end
function shiftcoefs!(V::Val{:circular}, k::Integer, mtxup::AbstractMatrix, mtxlw::AbstractMatrix, nShift::Integer)
if isodd(k)
shiftcoefs_odd!(V, mtxlw, nShift)
else
shiftcoefs_even!(V, mtxup, nShift)
end
nothing
end
function shiftcoefs_odd!(::Val{:circular}, mtx::AbstractMatrix, nShift::Integer)
mtx .= circshift(mtx, (0, nShift))
end
shiftcoefs_even!(V::Val{:circular}, mtx, nShift) = shiftcoefs_odd!(V, mtx, -nShift)
adjshiftcoefs!(v::Val{:circular}, k, mtxup, mtxlw, nShift::Integer) = shiftcoefs!(v, k, mtxup, mtxlw, -nShift)
# function shiftcoefs!(::Val{:zero}, k::Integer, mtxup::AbstractMatrix, mtxlw::AbstractMatrix, nShift::Integer)
# if isodd(k)
# mtxlw[:, 1+nShift:end] .= @view mtxlw[:, 1:end-nShift]
# mtxlw[:, 1:nShift] .= 0
# else
# mtxup[:, 1:end-nShift] .= @view mtxup[:, 1+nShift:end]
# mtxup[:, end-nShift+1:end] .= 0
# end
# nothing
# end
#
# function adjshiftcoefs!(::Val{:zero}, k::Integer, mtxup::AbstractMatrix, mtxlw::AbstractMatrix, nShift::Integer)
# if isodd(k)
# mtxlw[:, 1:end-nShift] .= @view mtxlw[:, 1+nShift:end]
# mtxlw[:, end-nShift+1:end] .= 0
# else
# mtxup[:, 1+nShift:end] .= @view mtxup[:, 1:end-nShift]
# mtxup[:, 1:nShift] .= 0
# end
# nothing
# end
function shiftcoefs!(::Val{:circular_traditional}, ::Integer, ::Any, mtxlw::AbstractMatrix, nShift::Integer)
mtxlw .= circshift(mtxlw, (0, nShift))
end
adjshiftcoefs!(v::Val{:circular_traditional}, k, mtxup, mtxlw, nShift::Integer) = shiftcoefs!(v, k, mtxup, mtxlw, -nShift)
|
{"hexsha": "a9893e5670fea416f40bbf6ecbcff7ccdb912996", "size": 8522, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/polyphaseMatrices.jl", "max_stars_repo_name": "nixir/MDCDL", "max_stars_repo_head_hexsha": "db176ca1f9adb775168cab3a6c7eeafa0757124c", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/polyphaseMatrices.jl", "max_issues_repo_name": "nixir/MDCDL", "max_issues_repo_head_hexsha": "db176ca1f9adb775168cab3a6c7eeafa0757124c", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/polyphaseMatrices.jl", "max_forks_repo_name": "nixir/MDCDL", "max_forks_repo_head_hexsha": "db176ca1f9adb775168cab3a6c7eeafa0757124c", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.6422764228, "max_line_length": 122, "alphanum_fraction": 0.6286083079, "num_tokens": 2832}
|
'''
This script holds general meta data & configuration paths required for pipeline operation
'''
import os
import numpy as np
# comment out the next line to use in an experiment
#assert False, 'you are importing the template config.py file, import your local experiment specific file'
####################################################################################
############################## experiment details ##################################
####################################################################################
## path parameters -- These MUST be adjusted to your specific dataset
myloc= os.getcwd() # location of this script (WSL file structure)
data_dir= './pre-registered_imgs/' # otherwise use this
# data_dir = '/mnt/z/Marilyne/Axioscan/6-Dhivya/New_folder/Test_D1/' # use this if examining images before pre-registration via tutorial.ipynb
# Specify the experiment here
slide_name= 'D1'
scene_name= 'None' # use 'None' to specify no scene name (cap sensitive)
# These paths probably don't need to be adjusted
lib_dir= '/mnt/c/Users/Public/cyclicIF_processing/cyclicIF_registration/workflow/libs'
script_dir= '/mnt/c/Users/Public/cyclicIF_processing/cyclicIF_registration/workflow/scripts'
output_dir= '/mnt/d/cyclicIF_outputs/6_Dhivya/D1/registered_imgs/' # this is path to registered cores
####################################################################################
############################## core segmentation ###################################
####################################################################################
# image downsampling for core segmentation
# default ~10
downsample_proportion = 10
# remove connected components that are smaller than this
# REMEMBER THIS SCALES WITH `downsample_proportion`
# NOTE: this may need to be adjusted for different core sizes or types
# default ~ 2000
min_obj_size = int( 4e4 / downsample_proportion )
# threshold value used to select core regions (after a gaussian blur)
# default ~ 0.75
core_seg_quantile = 0.74 # (Dhiva D1) #0.785 (Pejovic)
# padding used when selecting a core, eg selects core bounding box + 2*padding
# default ~ 10
padding = 20
# segmentation params
# larger values will create more blur
gaussian_blur_variance = 4000
# core matching clustering method
# options: 'k-means-constrained', 'dbscan'
# note: k-means-constrained has issues if later rounds have more identified cores
# default ~ dbscan
clustering_method = 'dbscan'
# IF DBSCAN
# minimum distance between points to be considered within the same neighborhood
eps = 0.12 # (pejovic~0.15)
min_samples = 2
feats = ['center_x', 'center_y']
feat_importance = np.array([1,1])
####################################################################################
################################### GENERAL ########################################
####################################################################################
# this isn't used anywhere - YET - SimpleITK does use spacing, but I haven't changed it yet - worried how it might change results
pixel_width = 0.65 # microns
pixel_height = 0.65 # microns
####################################################################################
############################## REGISTRATION ########################################
####################################################################################
num_hist_bins = 256
learning_rate = 1e-1 # deprecated - not used in powell optimizer
min_step = 1e-10 # deprecated - not used in powell optimizer
iterations = 500
sampling_percentage = 1.0 # x100%
stepLength=1
stepTolerance = 1e-7
valueTolerance = 1e-7
####################################################################################
############################## QUALITY CONTROL #####################################
####################################################################################
QC_dice_coef = 0.4 # this is the one uesd in `generate_QC_file.py` 12/30/2020
FPR_threshold = 0.5
FNR_threshold = 0.5
hausdorff_distance_threshold = 0.2
####################################################################################
############################## DEDUST PARAMS #####################################
####################################################################################
dedust_gaussian_var = 1e-1
dust_thresh_quantile = 0.999
|
{"hexsha": "80fc4173756463158815a1f71a051e3232bebcf1", "size": 4475, "ext": "py", "lang": "Python", "max_stars_repo_path": "workflow/libs/_config.py", "max_stars_repo_name": "nathanieljevans/cyclicIF_registration", "max_stars_repo_head_hexsha": "0483b8354d0e2b666ca1b47848dca3222c5ddb69", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "workflow/libs/_config.py", "max_issues_repo_name": "nathanieljevans/cyclicIF_registration", "max_issues_repo_head_hexsha": "0483b8354d0e2b666ca1b47848dca3222c5ddb69", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2021-06-04T16:04:46.000Z", "max_issues_repo_issues_event_max_datetime": "2021-06-25T17:08:18.000Z", "max_forks_repo_path": "workflow/libs/_config.py", "max_forks_repo_name": "nathanieljevans/cyclicIF_registration", "max_forks_repo_head_hexsha": "0483b8354d0e2b666ca1b47848dca3222c5ddb69", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.2169811321, "max_line_length": 143, "alphanum_fraction": 0.5027932961, "include": true, "reason": "import numpy", "num_tokens": 870}
|
import numpy as np
import scipy, os
from scipy.signal import butter,lfilter
from scipy.ndimage.filters import gaussian_filter1d
import matplotlib.pyplot as plt
from matplotlib.pyplot import mlab
import xml.etree.ElementTree
samplingRate=30000.
#=================================================================================================
#------------operations on continuous traces-------------------------------------
#=================================================================================================
npix_p3_reference_channels = np.array([ 36, 75, 112, 151, 188, 227, 264, 303, 340, 379])
npix_p2_reference_channels = np.array([1,18,33,50,65,82,97,114,99])
skip_channels = npix_p3_reference_channels #default to phase 3 reference channels
def get_chunk(mm,start,end,channels,sampling_rate=30000):
chunk = mm[int(start*sampling_rate*int(channels)):int(np.floor(end*sampling_rate*(int(channels))))]
#print np.shape(chunk)
return np.reshape(chunk,(int(channels),-1),order='F') * 0.195
#filter a bit of continuous data. uses butterworth filter.
def filterTrace(trace, low, high, sampleHz, order):
low = float(low)
high = float(high)
nyq = 0.5 * sampleHz
low = low / nyq
high = high / nyq
b, a = butter(order, [low, high], btype='band')
filtered = lfilter(b, a, trace)
return filtered
#developmental filter version. not used.
def filterTrace_hard(trace, low, high, sampleHz, order):
low = float(low)
high = float(high)
nyq = 0.5 * sampleHz
low = low / nyq
high = high / nyq
scipy.signal.band_stop_obj()
b, a = butter(order, [low, high], btype='band')
filtered = lfilter(b, a, trace)
scipy.signal.lfilter()
return filtered
#wrapper for filtering continous data of different forms.
#data can be a single continuous trace, a dictionary containing a key called 'data' whose value is a continous trace, or a dictionary of traces, or a dicit
def filtr(data,low, high, sampleHz, order):
if type(data) is dict:
if 'data' in data.keys():
return filterTrace(data['data'],low, high, sampleHz, order)
else:
out = {}
for i,key in enumerate(data.keys()):
out[key] = data[key]
out[key]['data']= filterTrace(data[key]['data'],low, high, sampleHz, order)
return out
else:
return filterTrace(data,low, high, sampleHz, order)
#notch filter a continous trace by filtering in a narrow range and subtracting that from the input trace.
def notch(data,freq, sampleHz):
order = 1
low = freq-2
high = freq +2
if type(data) is dict:
if 'data' in data.keys():
return data['data'] - filterTrace(data['data'],low, high, sampleHz, order)
else:
out = {}
for i,key in enumerate(data.keys()):
out[key] = data[key]
out[key]['data']= data[key]['data'] - filterTrace(data[key]['data'],low, high, sampleHz, order)
return out
else:
return data - filterTrace(data,low, high, sampleHz, order)
#average a continuous trace around a set of timestamps
def average_trials(data,timestamps,window,sampleFreq=25000.):
alltrials = np.zeros((len(timestamps),window*sampleFreq))
average = np.zeros(window*sampleFreq)
skipped = 0
for i,onset in enumerate(timestamps):
average += data[onset:onset+window*sampleFreq]#-np.mean(data[onset:onset-500])
alltrials[i,:] = data[onset:onset+window*sampleFreq]#-np.mean(data[onset:onset-500])
# if np.max(np.abs(data[onset:onset+window*sampleFreq]-np.mean(data[onset:onset+5000]))) < 40000000.0:
# average += data[onset:onset+window*sampleFreq]-np.mean(data[onset:onset+5000])
# alltrials[i,:] = data[onset:onset+window*sampleFreq]-np.mean(data[onset:onset+5000])
# else:
# skipped += 1
# print 'skipped trial: '+str(i+1)
# alltrials[i,:] = data[onset:onset+window*sampleFreq]-np.mean(data[onset:onset+5000])
return alltrials,average/float(len(timestamps-skipped))
#average all continuous traces in an array around a set of timestamps
def average_trials_array(data,timestamps,window,output='avg'):
avgs = {}
alltrials={}
for i,key in enumerate(data.keys()):
if 'data' in data[key].keys():
avgs[key]={}
alltrials[key]={}
alltrials[key]['data'],avgs[key]['data'] = average_trials(data[key]['data'],timestamps,window)
if output == 'trials':
return alltrials
if output == 'both':
return (alltrials,avgs)
if output=='avg':
return avgs
#note: this CSD code does not work! -dan
def CSD_1D(data,channelmap=[],prefix='100_CH',point=1000):
if channelmap == []:
channelmap = data.keys()
elec_pos = []
pots=[]
for i,key in enumerate(channelmap[0]):
key = prefix+str(key).replace(prefix,'')
pots.append([data[key]['data'][point]])
elec_pos.append([(i+i)/2])
pots=np.array(pots)
elec_pos=np.array(elec_pos)
params = {
'xmin': 0,
'xmax': 65.0,
'source_type': 'step',
'n_sources': 64,
'sigma': 0.1
}
k = KCSD(elec_pos, pots, params)
k.estimate_pots()
k.estimate_csd()
k.plot_all()
#note: this CSD code does not work! -dan
def CSD_1D_time(data,channelmap=[],prefix='100_CH',point=1000):
if channelmap == []:
channelmap = data.keys()
numPoints = len(data[data.keys()[0]]['data'])
out_csd = np.zeros((len(data.keys()),numPoints))
out_pots = np.zeros((len(data.keys()),numPoints))
for point in range(numPoints):
print(point)
elec_pos = []
pots=[]
for i,key in enumerate(channelmap[0]):
key = prefix+str(key).replace(prefix,'')
pots.append([data[key]['data'][point]])
elec_pos.append([i+i])
pots=np.array(pots)
elec_pos=np.array(elec_pos)
params = {
'xmin': 0,
'xmax': 130.0,
'source_type': 'step',
'n_sources': 128,
'sigma': 0.2,
}
k = KCSD(elec_pos, pots, params)
k.estimate_pots()
k.estimate_csd()
out_csd[0:np.shape(k.solver.estimated_csd)[0],point]= k.solver.estimated_csd[:,0]
out_pots[0:np.shape(k.solver.estimated_pots)[0],point]= k.solver.estimated_pots[:,0]
return out_csd,out_pots
#k.plot_all()
def etree_to_dict(t):
d = {t.tag : map(etree_to_dict, t.getchildren())}
d.update(('@' + k, v) for k, v in t.attrib.iteritems())
d['text'] = t.text
return d
def get_channel_count(path,from_channel_map = True,from_templates=False):
d = etree_to_dict(xml.etree.ElementTree.parse(os.path.join(path,'settings.xml')).getroot())
chs =0
if from_templates:
return np.load(open(os.path.join(path,'templates.npy'))).shape[-1]
if d['SETTINGS'][1]['SIGNALCHAIN'][0]['@name'] == 'Sources/Neuropix':
for info in d['SETTINGS'][1]['SIGNALCHAIN'][0]['PROCESSOR'][:385]:
if 'CHANNEL' in info.keys():
if info['CHANNEL'][0]['@record'] == '1':
chs +=1
return chs
if d['SETTINGS'][1]['SIGNALCHAIN'][0]['@name'] == 'Sources/Rhythm FPGA':
if from_channel_map:
for nm in d['SETTINGS'][1]['SIGNALCHAIN']:
name = nm['@name']
if name == 'Filters/Channel Map':
#chs = np.shape(d['SETTINGS'][1]['SIGNALCHAIN'][0]['PROCESSOR'][0]['CHANNEL_INFO'])[0]
for info in nm['PROCESSOR']:
if 'CHANNEL' in info.keys():
if info['CHANNEL'][0]['@record'] == '1':
chs +=1
else:
for info in d['SETTINGS'][1]['SIGNALCHAIN'][0]['PROCESSOR'][:385]:
if 'CHANNEL' in info.keys():
if info['CHANNEL'][0]['@record'] == '1':
chs +=1
return chs
#returns the root mean squared of the input data
def RMS(data,start=0,window=0,despike=False):
start = start * samplingRate# sampling rate
if window == 0:
window = len(data)
else:
window = window * samplingRate # sampling rate
#chunk = filterTrace(data[start:start+window], 70, 6000, 25000, 3)[200:window]
chunk = data[int(start):int(start)+int(window)] - np.mean(data[int(start):int(start)+int(window)])
if despike:
chunk = despike_trace(chunk,threshold=180)
return np.sqrt(sum(chunk**2)/float(len(chunk)))
def despike_trace(trace,threshold_sd = 2.5,**kwargs):
if 'threshold' in kwargs.keys():
threshold = kwargs['threshold']
else:
threshold = np.mean(trace)+threshold_sd*np.std(trace)
spike_times_a = mlab.cross_from_below(trace,threshold)
spike_times_b = mlab.cross_from_below(trace,-1*threshold)
for spike_time in np.concatenate((spike_times_b,spike_times_a)):
if spike_time > 30 and spike_time < len(trace)-30:
trace[spike_time - 20:spike_time + 20] = 0#np.random.uniform(-1*threshold,threshold,60)
return trace
def spikeamplitudes_trace(trace,threshold_sd = 3.0,percentile = 0.9,**kwargs):
if 'threshold' in kwargs.keys():
threshold = kwargs['threshold']
else:
threshold = np.mean(trace)+threshold_sd*np.std(trace)
spike_times_a = mlab.cross_from_below(trace,threshold)
amps=[]
for spike_time in spike_times_a:
if spike_time > 30 and spike_time < len(trace)-30:
amps.extend([np.max(np.abs(trace[spike_time-30:spike_time+30]))])
if not len(amps) > 10:
amps= [0]
return np.sort(amps)[int(len(amps)*percentile)]# / 5.0
#returns the peak to peak range of the input data
def p2p(data,start=0,window=0):
start = start * samplingRate# sampling rate
if window == 0:
window = len(data)
else:
window = window * samplingRate # sampling rate
chunk = data[start:start+window]
return np.max(chunk)-np.min(chunk)
#computes a power spectrum of the input data
#optionally, plot the computed spectrum
def b(data,start=0,window=0,plot=False,ymin=1e-24,ymax=1e8,title='',samplingRate=2500):
start = start * samplingRate# sampling rate
if window == 0:
window = len(data)
else:
window = window * samplingRate # sampling rate
chunk = data[start:start+window]/1e6
ps = np.abs(np.fft.fft(chunk))**2
time_step = 1. / samplingRate
freqs = np.fft.fftfreq(chunk.size, time_step)
idx = np.argsort(freqs)
ps = scipy.signal.savgol_filter(ps,5,3)
if plot:
plt.plot(freqs[idx], ps[idx]);
plt.xlim(xmin=0.01);
plt.ylim(ymin=ymin,ymax=ymax)
plt.xscale('log')
plt.yscale('log')
plt.ylabel(r'$power\/density\/\frac{V^2}{Hz}$',color='k',fontsize=18)
plt.xlabel(r'$frequency,\/ Hz$',color='k',fontsize=24)
plt.tick_params(axis='both', which='major', labelsize=24)#;plt.locator_params(axis='y',nbins=6)
plt.title(title)
return (freqs[idx], ps[idx])
def periodogram(data,start=0,window=0,plot=False,ymin=1e-24,ymax=1e8,title='',samplingRate=2500):
start = start * samplingRate# sampling rate
if window == 0:
window = len(data)
else:
window = window * samplingRate # sampling rate
chunk = data[start:start+window]
f,pXX = scipy.signal.periodogram(chunk,samplingRate,nfft=samplingRate)
pXX = scipy.signal.savgol_filter(pXX,3,1)
if plot:
plt.plot(f, pXX);
plt.xlim(xmin=0.5);
plt.ylim(ymin=ymin,ymax=ymax)
plt.xscale('log')
plt.yscale('log')
plt.ylabel(r'$power\/density\/\frac{V^2}{Hz}$',color='k',fontsize=18)
plt.xlabel(r'$frequency,\/ Hz$',color='k',fontsize=24)
plt.tick_params(axis='both', which='major', labelsize=24)#;plt.locator_params(axis='y',nbins=6)
plt.title(title)
return (f, pXX)
def welch_power(data,samplingRate=2500,start=0,window=0,plot=False,ymin=1e-24,ymax=1e8,title=''):
start = start * samplingRate# sampling rate
if window == 0:
window = len(data);print(window)
else:
window = window * samplingRate # sampling rate
chunk = data[start:start+window]
f,pXX = scipy.signal.welch(chunk,samplingRate,nfft=samplingRate/2)
#pXX = scipy.signal.savgol_filter(pXX,3,1)
if plot:
plt.plot(f, pXX);
plt.xlim(xmin=0.01);
plt.ylim(ymin=ymin,ymax=ymax)
plt.xscale('log')
plt.yscale('log')
plt.ylabel(r'$power\/density\/\frac{V^2}{Hz}$',color='k',fontsize=18)
plt.xlabel(r'$frequency,\/ Hz$',color='k',fontsize=24)
plt.tick_params(axis='both', which='major', labelsize=24)#;plt.locator_params(axis='y',nbins=6)
plt.title(title)
return (f, pXX)
#measure the cross-spectral coherence between two traces.
def coherence(x,y,samplingRate = 30000,returnval=None):
spectrum, frequencies = mlab.cohere(x,y,Fs=float(samplingRate),NFFT=int(samplingRate)/5)
if returnval:
if type(returnval) is float:
return np.interp(returnval,frequencies,spectrum)
if type(returnval) is tuple:
return np.trapz(spectrum[np.where(frequencies==returnval[0])[0]:np.where(frequencies==returnval[1])[0]],dx=5.0)
else:
return (spectrum, frequencies)
def get_surface_channel_spikeband(path,start=2.,end=10.,sampling_rate=30000,plot=False,filter_size=2,sigma=1.,filter=False,probemap=None):
mm = np.memmap(path, dtype=np.int16, mode='r')
print(os.path.dirname(path))
num_channels = get_channel_count(os.path.dirname(path),from_channel_map=False)
print(num_channels)
chunk = get_chunk(mm,start,end,num_channels,sampling_rate)
if probemap is not None:
chunk = chunk[probemap,:]
plt.imshow(chunk[:,:30000]);plt.gca().set_aspect(100)
plt.figure()
rms = []
good_channels = []
for ch in range(np.shape(chunk)[0]):
if ch not in skip_channels:
if filter:
data = filtr(chunk[ch,:],300,6000,sampling_rate,3)
else:
data = chunk[ch,:]
rms.extend([RMS(data)])
good_channels.extend([ch])
threshold = np.mean(gaussian_filter1d(rms,filter_size)[::-1][:5])+np.std(gaussian_filter1d(rms,filter_size)[::-1][:5])*sigma #assumes the last 5 are out of the brain; uses the mean + sd of these 5 as the threshold for pial surface
# print(np.where(np.array(rms)<8.))
# print(good_channels[np.where(np.array(rms)<8.)[0].astype(int)])
if plot:
plt.plot(good_channels,gaussian_filter1d(rms,filter_size))
plt.gca().axhline(threshold,color='r')
plt.xlabel('channel number')
plt.ylabel('spike band RMS')
#print(np.where(np.array(rms)<6.))
del mm
try:
surface_channel = good_channels[mlab.cross_from_above(gaussian_filter1d(rms,filter_size),threshold)[0]]
return surface_channel
except:
return None
def get_surface_channel_gamma(path,start=2.,end=10.,sampling_rate=2500,plot=False):
mm = np.memmap(path, dtype=np.int16, mode='r')
num_channels = get_channel_count(os.path.dirname(path))
chunk = get_chunk(mm,start,end,num_channels,sampling_rate)
gm = []
good_channels = []
for ch in range(np.shape(chunk)[0]):
if ch not in skip_channels:
f,pXX = welch_power(chunk[ch,:],start=2,window=8)
gm.extend([pXX[np.where(f>40.)[0][0]]])
good_channels.extend([ch])
threshold = np.max(gm[::-1][:5]) #assumes the last 5 are out of the brain; uses the max gamma on these channels as the threshold
surface_channel = good_channels[mlab.cross_from_above(gaussian_filter1d(gm,0),threshold)[0]]
if plot:
plt.plot(good_channels,gaussian_filter1d(gm,2))
plt.gca().axhline(threshold,color='r')
del mm
return surface_channel
def get_surface_channel_freq(path,frequency_range=[1,5],start=2.,end=10.,sampling_rate=2500,filter_size=2,sigma=2.,plot=False,filter=False,probemap=None):
mm = np.memmap(path, dtype=np.int16, mode='r')
num_channels = get_channel_count(os.path.dirname(path),from_channel_map=False)
chunk = get_chunk(mm,start,end,num_channels,sampling_rate)
if probemap is not None:
chunk = chunk[probemap,:]
gm = []
good_channels = []
for ch in range(np.shape(chunk)[0]):
if ch not in skip_channels:
if filter:
data = filtr(chunk[ch,:],0.1,300,sampling_rate,3)
else:
data = chunk[ch,:]
f,pXX = welch_power(chunk[ch,:],start=2,window=8)
gm.extend([np.mean(pXX[np.where((f>frequency_range[0])&(f<frequency_range[1]))[0]])])
good_channels.extend([ch])
#threshold = np.mean(gm[::-1][:5]) #assumes the last 5 are out of the brain; uses the max gamma on these channels as the threshold
threshold = np.mean(gaussian_filter1d(gm,filter_size)[::-1][:5])+np.std(gaussian_filter1d(gm,filter_size)[::-1][:5])*sigma
if plot:
plt.plot(good_channels,gaussian_filter1d(gm,filter_size))
plt.gca().axhline(threshold,color='r')
plt.xlabel('channel number')
plt.ylabel('power in '+str(frequency_range[0])+' to '+str(frequency_range[1])+' band')
try:
surface_channel = good_channels[mlab.cross_from_above(gaussian_filter1d(gm,filter_size),threshold)[-1]]
return surface_channel
except:
return None
del mm
return surface_channel
def get_probe_freq(path,frequency_range=[1,5],start=2.,end=10.,sampling_rate=2500,filter=False,probemap=None):
mm = np.memmap(path, dtype=np.int16, mode='r')
num_channels = get_channel_count(os.path.dirname(path),from_channel_map=False)
chunk = get_chunk(mm,start,end,num_channels,sampling_rate)
if probemap is not None:
chunk = chunk[probemap,:]
gm = []
good_channels = []
for ch in range(np.shape(chunk)[0]):
if ch not in skip_channels:
if filter != False:
data = filtr(chunk[ch,:],filter[0],filter[1],sampling_rate,3)
else:
data = chunk[ch,:]
f,pXX = welch_power(chunk[ch,:],start=2,window=8)
gm.extend([np.mean(pXX[np.where((f>frequency_range[0])&(f<frequency_range[1]))[0]])])
good_channels.extend([ch])
del mm
return gm
def get_probe_spikeband(path,start=2.,end=10.,sampling_rate=30000,plot=False,filter_size=2,sigma=1.,filter=False,probemap=None):
mm = np.memmap(path, dtype=np.int16, mode='r')
num_channels = get_channel_count(os.path.dirname(path),from_channel_map=False)
#print num_channels
chunk = get_chunk(mm,start,end,num_channels,sampling_rate)
if probemap is not None:
chunk = chunk[probemap,:]
plt.imshow(chunk[:,:30000]);plt.gca().set_aspect(100)
plt.figure()
rms = []
good_channels = []
for ch in range(np.shape(chunk)[0]):
if ch not in skip_channels:
if filter:
data = filtr(chunk[ch,:],300,6000,sampling_rate,3)
else:
data = chunk[ch,:]
rms.extend([RMS(data)])
good_channels.extend([ch])
del mm
return rms
#=================================================================================================
|
{"hexsha": "54bf33623288a303f516cf288873cfa29717e9e6", "size": 18401, "ext": "py", "lang": "Python", "max_stars_repo_path": "utils/continuous_traces.py", "max_stars_repo_name": "danieljdenman/neuropixels_invitro", "max_stars_repo_head_hexsha": "22ba9f0c7ce9cd562e6e351bf96a312b757df1b4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "utils/continuous_traces.py", "max_issues_repo_name": "danieljdenman/neuropixels_invitro", "max_issues_repo_head_hexsha": "22ba9f0c7ce9cd562e6e351bf96a312b757df1b4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "utils/continuous_traces.py", "max_forks_repo_name": "danieljdenman/neuropixels_invitro", "max_forks_repo_head_hexsha": "22ba9f0c7ce9cd562e6e351bf96a312b757df1b4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.1510638298, "max_line_length": 232, "alphanum_fraction": 0.648606054, "include": true, "reason": "import numpy,import scipy,from scipy", "num_tokens": 5173}
|
# Copyright 2021 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
import sympy
import cirq
import cirq.testing
q0, q1, q2, q3 = cirq.LineQubit.range(4)
def test_raises_for_non_commuting_paulis():
with pytest.raises(ValueError, match='commuting'):
cirq.PauliSumExponential(cirq.X(q0) + cirq.Z(q0), np.pi / 2)
def test_raises_for_non_hermitian_pauli():
with pytest.raises(ValueError, match='hermitian'):
cirq.PauliSumExponential(cirq.X(q0) + 1j * cirq.Z(q1), np.pi / 2)
@pytest.mark.parametrize(
'psum_exp, expected_qubits',
(
(cirq.PauliSumExponential(cirq.Z(q1), np.pi / 2), (q1,)),
(
cirq.PauliSumExponential(2j * cirq.X(q0) + 3j * cirq.Y(q2), sympy.Symbol("theta")),
(q0, q2),
),
(
cirq.PauliSumExponential(cirq.X(q0) * cirq.Y(q1) + cirq.Y(q2) * cirq.Z(q3), np.pi),
(q0, q1, q2, q3),
),
),
)
def test_pauli_sum_exponential_qubits(psum_exp, expected_qubits):
assert psum_exp.qubits == expected_qubits
@pytest.mark.parametrize(
'psum_exp, expected_psum_exp',
(
(
cirq.PauliSumExponential(cirq.Z(q0), np.pi / 2),
cirq.PauliSumExponential(cirq.Z(q1), np.pi / 2),
),
(
cirq.PauliSumExponential(2j * cirq.X(q0) + 3j * cirq.Y(q2), sympy.Symbol("theta")),
cirq.PauliSumExponential(2j * cirq.X(q1) + 3j * cirq.Y(q3), sympy.Symbol("theta")),
),
(
cirq.PauliSumExponential(cirq.X(q0) * cirq.Y(q1) + cirq.Y(q1) * cirq.Z(q3), np.pi),
cirq.PauliSumExponential(cirq.X(q1) * cirq.Y(q2) + cirq.Y(q2) * cirq.Z(q3), np.pi),
),
),
)
def test_pauli_sum_exponential_with_qubits(psum_exp, expected_psum_exp):
assert psum_exp.with_qubits(*expected_psum_exp.qubits) == expected_psum_exp
@pytest.mark.parametrize(
'psum, exp',
(
(cirq.Z(q0), np.pi / 2),
(2 * cirq.X(q0) + 3 * cirq.Y(q2), 1),
(cirq.X(q0) * cirq.Y(q1) + cirq.Y(q1) * cirq.Z(q3), np.pi),
),
)
def test_with_parameters_resolved_by(psum, exp):
psum_exp = cirq.PauliSumExponential(psum, sympy.Symbol("theta"))
resolver = cirq.ParamResolver({"theta": exp})
actual = cirq.resolve_parameters(psum_exp, resolver)
expected = cirq.PauliSumExponential(psum, exp)
assert actual == expected
def test_pauli_sum_exponential_parameterized_matrix_raises():
with pytest.raises(ValueError, match='parameterized'):
cirq.PauliSumExponential(cirq.X(q0) + cirq.Z(q1), sympy.Symbol("theta")).matrix()
@pytest.mark.parametrize(
'psum_exp, expected_unitary',
(
(cirq.PauliSumExponential(cirq.X(q0), np.pi / 2), np.array([[0, 1j], [1j, 0]])),
(
cirq.PauliSumExponential(2j * cirq.X(q0) + 3j * cirq.Z(q1), np.pi / 2),
np.array([[1j, 0, 0, 0], [0, -1j, 0, 0], [0, 0, 1j, 0], [0, 0, 0, -1j]]),
),
),
)
def test_pauli_sum_exponential_has_correct_unitary(psum_exp, expected_unitary):
assert cirq.has_unitary(psum_exp)
assert np.allclose(cirq.unitary(psum_exp), expected_unitary)
@pytest.mark.parametrize(
'psum_exp, power, expected_psum',
(
(
cirq.PauliSumExponential(cirq.Z(q1), np.pi / 2),
5,
cirq.PauliSumExponential(cirq.Z(q1), 5 * np.pi / 2),
),
(
cirq.PauliSumExponential(2j * cirq.X(q0) + 3j * cirq.Y(q2), sympy.Symbol("theta")),
5,
cirq.PauliSumExponential(2j * cirq.X(q0) + 3j * cirq.Y(q2), 5 * sympy.Symbol("theta")),
),
(
cirq.PauliSumExponential(cirq.X(q0) * cirq.Y(q1) + cirq.Y(q2) * cirq.Z(q3), np.pi),
5,
cirq.PauliSumExponential(cirq.X(q0) * cirq.Y(q1) + cirq.Y(q2) * cirq.Z(q3), 5 * np.pi),
),
),
)
def test_pauli_sum_exponential_pow(psum_exp, power, expected_psum):
assert psum_exp**power == expected_psum
@pytest.mark.parametrize(
'psum_exp',
(
(cirq.PauliSumExponential(0, np.pi / 2)),
(cirq.PauliSumExponential(2j * cirq.X(q0) + 3j * cirq.Z(q1), np.pi / 2)),
),
)
def test_pauli_sum_exponential_repr(psum_exp):
cirq.testing.assert_equivalent_repr(psum_exp)
@pytest.mark.parametrize(
'psum_exp, expected_str',
(
(cirq.PauliSumExponential(0, np.pi / 2), 'exp(j * 1.5707963267948966 * (0.000))'),
(
cirq.PauliSumExponential(2j * cirq.X(q0) + 4j * cirq.Y(q1), 2),
'exp(2 * (2.000j*X(q(0))+4.000j*Y(q(1))))',
),
(
cirq.PauliSumExponential(0.5 * cirq.X(q0) + 0.6 * cirq.Y(q1), sympy.Symbol("theta")),
'exp(j * theta * (0.500*X(q(0))+0.600*Y(q(1))))',
),
),
)
def test_pauli_sum_exponential_formatting(psum_exp, expected_str):
assert str(psum_exp) == expected_str
|
{"hexsha": "50c4720ca1f44d616f06d6810259cd68d6738bb3", "size": 5370, "ext": "py", "lang": "Python", "max_stars_repo_path": "cirq-core/cirq/ops/pauli_sum_exponential_test.py", "max_stars_repo_name": "Nexuscompute/Cirq", "max_stars_repo_head_hexsha": "640ef8f82d6a56ec95361388ce7976e096cca906", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-02-05T22:17:39.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-05T22:17:39.000Z", "max_issues_repo_path": "cirq-core/cirq/ops/pauli_sum_exponential_test.py", "max_issues_repo_name": "pavoljuhas/Cirq", "max_issues_repo_head_hexsha": "b6d6577be61d216ce2f29f8c64ae5879cf3087d5", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2022-01-16T14:12:15.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-24T03:58:46.000Z", "max_forks_repo_path": "cirq-core/cirq/ops/pauli_sum_exponential_test.py", "max_forks_repo_name": "Nexuscompute/Cirq", "max_forks_repo_head_hexsha": "640ef8f82d6a56ec95361388ce7976e096cca906", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.1481481481, "max_line_length": 99, "alphanum_fraction": 0.6143389199, "include": true, "reason": "import numpy,import sympy", "num_tokens": 1739}
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import unittest
import itertools
import abc
import logging
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
import paddle.inference as paddle_infer
from paddle import compat as cpt
from typing import *
from program_config import TensorConfig, OpConfig, ProgramConfig
from auto_scan_test import AutoScanTest
logging.basicConfig(level=logging.INFO, format="%(message)s")
class TrtLayerAutoScanTest(AutoScanTest):
class TensorRTParam:
'''
TensorRT subgraph engine parameters.
'''
def __init__(self, workspace_size, max_batch_size, min_subgraph_size,
precision, use_static, use_calib_mode):
self.workspace_size = workspace_size
self.max_batch_size = max_batch_size
self.min_subgraph_size = min_subgraph_size
self.precision = precision
self.use_static = use_static
self.use_calib_mode = use_calib_mode
class DynamicShapeParam:
'''
Prepare TensorRT subgraph engine dynamic shape parameters.
'''
def __init__(self, min_input_shape, max_input_shape, optim_input_shape,
disable_trt_plugin_fp16):
self.min_input_shape = min_input_shape
self.max_input_shape = max_input_shape
self.optim_input_shape = optim_input_shape
self.disable_trt_plugin_fp16 = disable_trt_plugin_fp16
def __init__(self, methodName='runTest'):
super(TrtLayerAutoScanTest, self).__init__(methodName)
self.trt_param = self.TensorRTParam(
workspace_size=0,
max_batch_size=4,
min_subgraph_size=0,
precision=paddle_infer.PrecisionType.Float32,
use_static=False,
use_calib_mode=False)
self.dynamic_shape = self.DynamicShapeParam({}, {}, {}, False)
def update_program_input_and_weight_with_attr(self, op_attr_list):
raise NotImplementedError
@abc.abstractmethod
def sample_program_configs(self):
all_op_attrs_keys = []
all_op_attrs_values = []
for op_config in self.ops_config:
all_op_attrs_keys.append(list(op_config["op_attrs"].keys()))
all_op_attrs_values.extend(list(op_config["op_attrs"].values()))
if len(all_op_attrs_values) == 0:
all_op_attrs_values.append([None])
for attrs_sample in itertools.product(*all_op_attrs_values):
op_attr_list = []
index = 0
ops = []
log_str = 'TEST_CASE: '
for i in range(len(self.ops_config)):
op_config = self.ops_config[i]
op_attr = dict(
zip(
list(op_config["op_attrs"].keys()), attrs_sample[
index:index + len(op_config["op_attrs"])]))
if i != len(self.ops_config) - 1:
log_str += op_config['op_type'] + str(op_attr) + ' + '
else:
log_str += op_config['op_type'] + str(op_attr)
op_attr_list.append(op_attr)
index = index + len(op_config["op_attrs"])
ops.append(
OpConfig(
type=op_config["op_type"],
inputs=op_config["op_inputs"],
outputs=op_config["op_outputs"],
attrs=op_attr))
logging.info(log_str)
self.update_program_input_and_weight_with_attr(op_attr_list)
# if no weight need to save, we create a place_holder to help seriazlie params.
if not self.program_weights:
self.program_weights = {
"place_holder_weight": TensorConfig(
shape=[1], data=np.array([1]).astype(np.float32))
}
program_config = ProgramConfig(
ops=ops,
weights=self.program_weights,
inputs=self.program_inputs,
outputs=self.program_outputs)
yield program_config
def create_program_config(
self, use_trt=True,
precision_mode=paddle_infer.PrecisionType.Float32):
config = paddle_infer.Config()
config.disable_glog_info()
config.enable_use_gpu(100, 0)
if use_trt:
config.switch_ir_debug()
config.enable_tensorrt_engine(
max_batch_size=self.trt_param.max_batch_size,
workspace_size=self.trt_param.workspace_size,
min_subgraph_size=self.trt_param.min_subgraph_size,
precision_mode=precision_mode,
use_static=self.trt_param.use_static,
use_calib_mode=self.trt_param.use_calib_mode)
if len(self.dynamic_shape.min_input_shape
) != 0 and self.dynamic_shape.min_input_shape.keys(
) == self.dynamic_shape.max_input_shape.keys(
) and self.dynamic_shape.min_input_shape.keys(
) == self.dynamic_shape.opt_input_shape.keys():
config.set_trt_dynamic_shape_info(
self.dynamic_shape.min_input_shape,
self.dynamic_shape.max_input_shape,
self.dynamic_shape.opt_input_shape,
self.dynamic_shape.disable_trt_plugin_fp16)
return config
@abc.abstractmethod
def sample_predictor_configs(self):
def precision_to_str(p):
if p == paddle_infer.PrecisionType.Float32:
return 'float32'
elif p == paddle_infer.PrecisionType.Half:
return 'half'
elif p == paddle_infer.PrecisionType.Int8:
return 'int8'
else:
raise NotImplementedError('not supported type.')
trt_log_str = ''
if len(self.dynamic_shape.min_input_shape
) != 0 and self.dynamic_shape.min_input_shape.keys(
) == self.dynamic_shape.max_input_shape.keys(
) and self.dynamic_shape.min_input_shape.keys(
) == self.dynamic_shape.opt_input_shape.keys():
trt_log_str += 'dynamic_shape '
else:
trt_log_str += 'static_shape '
trt_log_str += precision_to_str(self.trt_param.precision)
logging.info(' --------- gpu inference ---------')
yield self.create_program_config(use_trt=False)
logging.info(' --------- trt ' + trt_log_str +
' inference ---------')
yield self.create_program_config(
use_trt=True, precision_mode=self.trt_param.precision)
|
{"hexsha": "715006771878795674d9391b926be40d2ed27bc1", "size": 7338, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/paddle/fluid/tests/unittests/ir/inference/trt_layer_auto_scan_test.py", "max_stars_repo_name": "shiyutang/Paddle", "max_stars_repo_head_hexsha": "5c27c2c00bdb69619fa2bf715f6a0e0708579569", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-09-03T14:11:48.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-03T14:11:48.000Z", "max_issues_repo_path": "python/paddle/fluid/tests/unittests/ir/inference/trt_layer_auto_scan_test.py", "max_issues_repo_name": "shiyutang/Paddle", "max_issues_repo_head_hexsha": "5c27c2c00bdb69619fa2bf715f6a0e0708579569", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "python/paddle/fluid/tests/unittests/ir/inference/trt_layer_auto_scan_test.py", "max_forks_repo_name": "shiyutang/Paddle", "max_forks_repo_head_hexsha": "5c27c2c00bdb69619fa2bf715f6a0e0708579569", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.7666666667, "max_line_length": 91, "alphanum_fraction": 0.6113382393, "include": true, "reason": "import numpy", "num_tokens": 1457}
|
# coding: utf-8
# 2021/3/23 @ tongshiwei
import logging
import numpy as np
import torch
from tqdm import tqdm
from torch import nn
from EduCDM import CDM
from sklearn.metrics import roc_auc_score, accuracy_score
class MFNet(nn.Module):
"""Matrix Factorization Network"""
def __init__(self, user_num, item_num, latent_dim):
super(MFNet, self).__init__()
self.user_num = user_num
self.item_num = item_num
self.latent_dim = latent_dim
self.user_embedding = nn.Embedding(self.user_num, self.latent_dim)
self.item_embedding = nn.Embedding(self.item_num, self.latent_dim)
self.response = nn.Linear(2 * self.latent_dim, 1)
def forward(self, user_id, item_id):
user = self.user_embedding(user_id)
item = self.item_embedding(item_id)
return torch.squeeze(torch.sigmoid(self.response(torch.cat([user, item], dim=-1))), dim=-1)
class MCD(CDM):
"""Matrix factorization based Cognitive Diagnosis Model"""
def __init__(self, user_num, item_num, latent_dim):
super(MCD, self).__init__()
self.mf_net = MFNet(user_num, item_num, latent_dim)
def train(self, train_data, test_data=None, *, epoch: int, device="cpu", lr=0.001) -> ...:
loss_function = nn.BCELoss()
trainer = torch.optim.Adam(self.mf_net.parameters(), lr)
for e in range(epoch):
losses = []
for batch_data in tqdm(train_data, "Epoch %s" % e):
user_id, item_id, response = batch_data
user_id: torch.Tensor = user_id.to(device)
item_id: torch.Tensor = item_id.to(device)
predicted_response: torch.Tensor = self.mf_net(user_id, item_id)
response: torch.Tensor = response.to(device)
loss = loss_function(predicted_response, response)
# back propagation
trainer.zero_grad()
loss.backward()
trainer.step()
losses.append(loss.mean().item())
print("[Epoch %d] LogisticLoss: %.6f" % (e, float(np.mean(losses))))
if test_data is not None:
auc, accuracy = self.eval(test_data, device=device)
print("[Epoch %d] auc: %.6f, accuracy: %.6f" % (e, auc, accuracy))
def eval(self, test_data, device="cpu") -> tuple:
self.mf_net.eval()
y_pred = []
y_true = []
for batch_data in tqdm(test_data, "evaluating"):
user_id, item_id, response = batch_data
user_id: torch.Tensor = user_id.to(device)
item_id: torch.Tensor = item_id.to(device)
pred: torch.Tensor = self.mf_net(user_id, item_id)
y_pred.extend(pred.tolist())
y_true.extend(response.tolist())
self.mf_net.train()
return roc_auc_score(y_true, y_pred), accuracy_score(y_true, np.array(y_pred) >= 0.5)
def save(self, filepath):
torch.save(self.mf_net.state_dict(), filepath)
logging.info("save parameters to %s" % filepath)
def load(self, filepath):
self.mf_net.load_state_dict(torch.load(filepath))
logging.info("load parameters from %s" % filepath)
|
{"hexsha": "4233812bc509df9fef7c9a1ef10c37d25915b5dd", "size": 3211, "ext": "py", "lang": "Python", "max_stars_repo_path": "EduCDM/MCD/MCD.py", "max_stars_repo_name": "24miaoge/EduCDM", "max_stars_repo_head_hexsha": "49f7cc28dcef748624fbd7cc7a524826abc5f37e", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-09-02T05:44:43.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-02T05:44:43.000Z", "max_issues_repo_path": "EduCDM/MCD/MCD.py", "max_issues_repo_name": "24miaoge/EduCDM", "max_issues_repo_head_hexsha": "49f7cc28dcef748624fbd7cc7a524826abc5f37e", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "EduCDM/MCD/MCD.py", "max_forks_repo_name": "24miaoge/EduCDM", "max_forks_repo_head_hexsha": "49f7cc28dcef748624fbd7cc7a524826abc5f37e", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.908045977, "max_line_length": 99, "alphanum_fraction": 0.6178760511, "include": true, "reason": "import numpy", "num_tokens": 747}
|
from sklearn.decomposition import PCA
import load_data
import numpy as np
"""Using PCA(principle component analisys) to reduce the dimensions of data"""
#loading mnist data
x_scaled, y = load_data.fetch_data()
#separating training and testing datas
train_x = x_scaled[:60000, :]
train_y = y[:60000]
test_x = x_scaled[60000:, :]
test_y = y[60000:]
#creating a pca object
pca = PCA(.95) #0.95 percentage of information will be preserved.
pca.fit(train_x)
#resampling the data to new dimensions
train_x = pca.transform(train_x)
test_x = pca.transform(test_x)
#saving processed data into .npy file
np.save('train_x', train_x )
np.save('train_y', train_y )
np.save('test_x', test_x)
np.save('test_y', test_y)
|
{"hexsha": "9e31031190f5f704d254e0a67fcc980d394c2a62", "size": 713, "ext": "py", "lang": "Python", "max_stars_repo_path": "pca.py", "max_stars_repo_name": "ShafeekSaleem/MNIST", "max_stars_repo_head_hexsha": "f738ce3f43483bdba93d120b0684f53e300718f6", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pca.py", "max_issues_repo_name": "ShafeekSaleem/MNIST", "max_issues_repo_head_hexsha": "f738ce3f43483bdba93d120b0684f53e300718f6", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pca.py", "max_forks_repo_name": "ShafeekSaleem/MNIST", "max_forks_repo_head_hexsha": "f738ce3f43483bdba93d120b0684f53e300718f6", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.5862068966, "max_line_length": 78, "alphanum_fraction": 0.747545582, "include": true, "reason": "import numpy", "num_tokens": 196}
|
MODULE mecih_I
INTERFACE
!...Generated by Pacific-Sierra Research 77to90 4.4G 10:47:25 03/09/06
SUBROUTINE mecih (DIAG, CIMAT, NMOS, LAB, XY)
USE vast_kind_param,ONLY: DOUBLE
REAL(DOUBLE), DIMENSION(*), INTENT(IN) :: DIAG
REAL(DOUBLE), DIMENSION(*), INTENT(out) :: cimat
REAL(DOUBLE), DIMENSION(*), INTENT(in) :: xy
INTEGER, INTENT(IN) :: LAB
END SUBROUTINE
END INTERFACE
END MODULE
|
{"hexsha": "6ed0fe2359f1aec46a3fb25e0100b9ab93dd0aee", "size": 479, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "2006_MOPAC7.1/src_interfaces/mecih_I.f90", "max_stars_repo_name": "openmopac/MOPAC-archive", "max_stars_repo_head_hexsha": "01510e44246de34a991529297a10bcf831336038", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-12-16T20:53:27.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-16T20:54:11.000Z", "max_issues_repo_path": "2006_MOPAC7.1/src_interfaces/mecih_I.f90", "max_issues_repo_name": "openmopac/MOPAC-archive", "max_issues_repo_head_hexsha": "01510e44246de34a991529297a10bcf831336038", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "2006_MOPAC7.1/src_interfaces/mecih_I.f90", "max_forks_repo_name": "openmopac/MOPAC-archive", "max_forks_repo_head_hexsha": "01510e44246de34a991529297a10bcf831336038", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.8461538462, "max_line_length": 76, "alphanum_fraction": 0.5866388309, "num_tokens": 152}
|
import numpy as np
import pandas as pd
from bs4 import BeautifulSoup
import pickle
import re
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_extraction.text import TfidfVectorizer
import nltk
nltk.download('stopwords')
from nltk.corpus import stopwords
def process_text(review):
# Extract text from html
review_text = BeautifulSoup(review,features="html.parser").get_text()
# Remove non-letters
review_text = re.sub("[^a-zA-Z]"," ", review_text)
# Convert words to lower case and split them
words = review_text.lower().split()
# Remove stopwords
stops = set(stopwords.words("english"))
words = [w for w in words if not w in stops]
review_processed = " ".join(words)
return review_processed
def train(X_train, y_train):
# Pre-process text
X = X_train.apply(lambda x: process_text(x))
# Transform text to features
vect = TfidfVectorizer()
vect.fit(X)
X_featurized = vect.transform(X)
# Train model
model = RandomForestClassifier()
model.fit(X_featurized, y_train)
# Save model and vectorizer
pickle.dump(model, open('./models/model.pkl','wb'))
pickle.dump(vect, open('./models/vect.pkl','wb'))
return
def predict(review, model, vect):
label = {0: 'negative', 1: 'positive'}
review = process_text(review)
X = vect.transform([review])
y = model.predict(X)[0]
proba = np.max(model.predict_proba(X))
return label[y], proba
if __name__ == '__main__':
# Run model training
train_data = pd.read_csv('data/labeledTrainData.tsv',header=0,delimiter='\t',quoting=3)
X_train = train_data['review']
y_train = train_data['sentiment']
train(X_train,y_train)
print('Training complete')
|
{"hexsha": "46aae082f26230ef84e66583b4868e42ff0ea6b8", "size": 1743, "ext": "py", "lang": "Python", "max_stars_repo_path": "5_deployment/streamlit_deploy_example/model.py", "max_stars_repo_name": "DukeAIPI/AIPI540-Deep-Learning-Applications", "max_stars_repo_head_hexsha": "1f8786ef45dd0405608a8782d15e2498153e67a1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 12, "max_stars_repo_stars_event_min_datetime": "2022-01-03T22:25:36.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T17:49:38.000Z", "max_issues_repo_path": "5_deployment/streamlit_deploy_example/model.py", "max_issues_repo_name": "DukeAIPI/AIPI540-Deep-Learning-Applications", "max_issues_repo_head_hexsha": "1f8786ef45dd0405608a8782d15e2498153e67a1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2022-01-10T14:44:37.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-15T23:25:20.000Z", "max_forks_repo_path": "5_deployment/streamlit_deploy_example/model.py", "max_forks_repo_name": "DukeAIPI/AIPI540-Deep-Learning-Applications", "max_forks_repo_head_hexsha": "1f8786ef45dd0405608a8782d15e2498153e67a1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 17, "max_forks_repo_forks_event_min_datetime": "2022-01-04T02:02:10.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-11T16:36:33.000Z", "avg_line_length": 30.5789473684, "max_line_length": 91, "alphanum_fraction": 0.6953528399, "include": true, "reason": "import numpy", "num_tokens": 429}
|
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 25 16:26:32 2021
@author: kibong
"""
# In[]
from AAA import Wav2Vec2Tokenizer, Wav2Vec2ForCTC
from datasets import load_dataset
import soundfile as sf
import sounddevice as sd
import torch
import numpy as np
# load model and tokenizer
tokenizer = Wav2Vec2Tokenizer.from_pretrained("facebook/wav2vec2-base-960h")
model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-960h")
# define function to read in sound file
def map_to_array(batch):
speech, fs = sf.read(batch["file"])
batch["speech"] = speech
batch["fs"] = fs
return batch
# load dummy dataset and read soundfiles
ds = load_dataset("patrickvonplaten/librispeech_asr_dummy", "clean", split="validation")
ds = ds.map(map_to_array)
waveform = np.array(ds["speech"][0])
fs = np.array(ds["fs"][0])
sd.play(waveform, fs)
print('Sample rate:',fs,'Hz')
print('Total time:',len(waveform)/fs,'s')
# tokenize
input_values = tokenizer(ds["speech"][:2], return_tensors="pt", padding="longest").input_values # Batch size 1
# retrieve logits
logits = model(input_values).logits
# take argmax and decode
predicted_ids = torch.argmax(logits, dim=-1)
transcription = tokenizer.batch_decode(predicted_ids)
# In[]
# from datasets import load_dataset
# from AAA import Wav2Vec2ForCTC, Wav2Vec2Tokenizer
# import soundfile as sf
# import torch
# from jiwer import wer
# librispeech_eval = load_dataset("librispeech_asr", "clean", split="test")
# model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-960h").to("cuda")
# tokenizer = Wav2Vec2Tokenizer.from_pretrained("facebook/wav2vec2-base-960h")
# def map_to_array(batch):
# speech, _ = sf.read(batch["file"])
# batch["speech"] = speech
# return batch
# librispeech_eval = librispeech_eval.map(map_to_array)
# def map_to_pred(batch):
# input_values = tokenizer(batch["speech"], return_tensors="pt", padding="longest").input_values
# with torch.no_grad():
# logits = model(input_values.to("cuda")).logits
# predicted_ids = torch.argmax(logits, dim=-1)
# transcription = tokenizer.batch_decode(predicted_ids)
# batch["transcription"] = transcription
# return batch
# result = librispeech_eval.map(map_to_pred, batched=True, batch_size=1, remove_columns=["speech"])
# print("WER:", wer(result["text"], result["transcription"]))
|
{"hexsha": "eea706ca676425299c07f5fd5c4faf98ec462d66", "size": 2438, "ext": "py", "lang": "Python", "max_stars_repo_path": "wav2vec2_libribase_example.py", "max_stars_repo_name": "kkb131/radar2txt", "max_stars_repo_head_hexsha": "4deb26db596c314233299c40684c4f84876dfd6d", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-10-03T07:42:58.000Z", "max_stars_repo_stars_event_max_datetime": "2021-10-12T04:55:30.000Z", "max_issues_repo_path": "wav2vec2_libribase_example.py", "max_issues_repo_name": "kkb131/radar2txt", "max_issues_repo_head_hexsha": "4deb26db596c314233299c40684c4f84876dfd6d", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "wav2vec2_libribase_example.py", "max_forks_repo_name": "kkb131/radar2txt", "max_forks_repo_head_hexsha": "4deb26db596c314233299c40684c4f84876dfd6d", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.7317073171, "max_line_length": 112, "alphanum_fraction": 0.7034454471, "include": true, "reason": "import numpy", "num_tokens": 653}
|
import numpy as np
import matplotlib.pyplot as plt
#---------------------Import coordinate file-------------------------#
f_x = 'simple_bulk/img/subdataset1_geometry/x.txt'
f_l = 'simple_bulk/img/subdataset1_geometry/l.txt'
x = np.loadtxt(f_x, dtype = int)
l = np.loadtxt(f_l, dtype = int)
#-------------------Column Parameters---------------------------------#
L = 40 # length of column
w = 5 # width of column
#-------------------Generate Image-------------------------------------#
def img_gen(L,w,x,l,ii):
# L and w are coumn dimensions
# x and l are files to generate geometry
# ii is the column number to be generated
x = x
l = l
img = np.zeros((L,w), dtype = bool)
img[0,0:w] = 1
img[-1,0:w] = 1
for jj in range(1,L-1):
img[jj,x[ii][jj]-l[ii][jj]:x[ii][jj]+l[ii][jj]] = 1
return img
img = []
for ii in range(0,x.shape):
img.append(img_gen(L,w,x,l,ii)) #img ouput, save as array of images if want to convert to graph
img = np.asarray(img)
np.save('subdataset1/img/img.npy',img)
|
{"hexsha": "813004577d0875d8992c9b8d236857fe6cfa7fdc", "size": 1069, "ext": "py", "lang": "Python", "max_stars_repo_path": "Domain/subdataset1_domain/subdataset1_img.py", "max_stars_repo_name": "pprachas/ABC_dataset", "max_stars_repo_head_hexsha": "61c915853c0229295e728f869b11b113ee59f098", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Domain/subdataset1_domain/subdataset1_img.py", "max_issues_repo_name": "pprachas/ABC_dataset", "max_issues_repo_head_hexsha": "61c915853c0229295e728f869b11b113ee59f098", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Domain/subdataset1_domain/subdataset1_img.py", "max_forks_repo_name": "pprachas/ABC_dataset", "max_forks_repo_head_hexsha": "61c915853c0229295e728f869b11b113ee59f098", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.4102564103, "max_line_length": 99, "alphanum_fraction": 0.5378858746, "include": true, "reason": "import numpy", "num_tokens": 302}
|
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 17 00:42:53 2020
@author: kai
"""
import time
start = time.time()
import numpy as np
import os
import sys
import tensorflow as tf
import cv2
from PIL import Image
import pandas as pd
#if tf.__version__ < '1.4.0':
# raise ImportError('Please upgrade your tensorflow installation to v1.4.* or later!')
os.chdir('C:\\tensorflow_models\\research\\object_detection')
#Env setup
# This is needed to display the images.
#%matplotlib inline
# This is needed since the notebook is stored in the object_detection folder.
sys.path.append("..")
#Object detection imports
from object_detection.utils import label_map_util
from object_detection.utils import visualization_utils as vis_util
#Model preparation
MODEL_NAME = 'cuccumber_saved_model'
# Path to frozen detection graph. This is the actual model that is used for the object detection.
PATH_TO_CKPT = MODEL_NAME + '/frozen_inference_graph.pb'
# List of the strings that is used to add correct label for each box.
PATH_TO_LABELS = os.path.join('data', 'cuccumber.pbtxt')
NUM_CLASSES = 2
#Load a (frozen) Tensorflow model into memory.
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
# Loading label map
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
# Helper code
def load_image_into_numpy_array(image):
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape(
(im_height, im_width, 3)).astype(np.uint8)
#Detection
# For the sake of simplicity we will use only 2 images:
# image1.jpg
# image2.jpg
# If you want to test the code with your images, just add path to the images to the TEST_IMAGE_PATHS.
PATH_TO_TEST_IMAGES_DIR = 'test_images'
TEST_IMAGE_DIRS = os.listdir('C:\\tensorflow_models\\research\\object_detection\\test_images')
os.chdir('C:\\tensorflow_models\\research\\object_detection\\test_images')
# Size, in inches, of the output images.
IMAGE_SIZE = (12, 8)
output_image_path = ("C:\\tensorflow_models\\research\\object_detection\\_result")
# 另外加了输出识别结果框的坐标,保存为.csv表格文件
output_csv_path = ("C:\\tensorflow_models\\research\\object_detection\\_result")
for image_folder in TEST_IMAGE_DIRS:
with detection_graph.as_default():
with tf.Session(graph=detection_graph) as sess:
# Definite input and output Tensors for detection_graph
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Each box represents a part of the image where a particular object was detected.
detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
TEST_IMAGE_PATHS = os.listdir(os.path.join(image_folder))
os.makedirs(output_image_path+image_folder)
data = pd.DataFrame()
for image_path in TEST_IMAGE_PATHS:
image = Image.open(image_folder + '//'+image_path)
width, height = image.size
# the array based representation of the image will be used later in order to prepare the
# result image with boxes and labels on it.
image_np = load_image_into_numpy_array(image)
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
image_np_expanded = np.expand_dims(image_np, axis=0)
# Actual detection.
(boxes, scores, classes, num) = sess.run(
[detection_boxes, detection_scores, detection_classes, num_detections],
feed_dict={image_tensor: image_np_expanded})
# Visualization of the results of a detection.
vis_util.visualize_boxes_and_labels_on_image_array(
image_np,
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
category_index,
use_normalized_coordinates=True,
line_thickness=8)
#write images
#保存识别结果图片
cv2.imwrite(output_image_path+image_folder+'\\'+image_path.split('\\')[-1],cv2.cvtColor(image_np , cv2.COLOR_RGB2BGR))
s_boxes = boxes[scores > 0.5]
s_classes = classes[scores > 0.5]
s_scores=scores[scores>0.5]
#write table
#保存位置坐标结果到 .csv表格
for i in range(len(s_classes)):
newdata= pd.DataFrame(0, index=range(1), columns=range(7))
newdata.iloc[0,0] = image_path.split("\\")[-1].split('.')[0]
newdata.iloc[0,1] = s_boxes[i][0]*height #ymin
newdata.iloc[0,2] = s_boxes[i][1]*width #xmin
newdata.iloc[0,3] = s_boxes[i][2]*height #ymax
newdata.iloc[0,4] = s_boxes[i][3]*width #xmax
newdata.iloc[0,5] = s_scores[i]
newdata.iloc[0,6] = s_classes[i]
data = data.append(newdata)
data.to_csv(output_csv_path+image_folder+'.csv',index = False)
end = time.time()
print("Execution Time: ", end - start)
|
{"hexsha": "762f0ba328927034542b6b12e4e6f00fb49ee8f0", "size": 5960, "ext": "py", "lang": "Python", "max_stars_repo_path": "research/object_detection/testing.py", "max_stars_repo_name": "kailliang/Object_Detection", "max_stars_repo_head_hexsha": "34a6592980c09d021994f51e297556bc98e1461a", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "research/object_detection/testing.py", "max_issues_repo_name": "kailliang/Object_Detection", "max_issues_repo_head_hexsha": "34a6592980c09d021994f51e297556bc98e1461a", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "research/object_detection/testing.py", "max_forks_repo_name": "kailliang/Object_Detection", "max_forks_repo_head_hexsha": "34a6592980c09d021994f51e297556bc98e1461a", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.25, "max_line_length": 129, "alphanum_fraction": 0.6661073826, "include": true, "reason": "import numpy", "num_tokens": 1399}
|
! DESCRIPTION:
! use procedure pointer to invok different subprograms possesing indentical interfaces
! compare it with function pointer in C.
module Calc_mod
implicit none
private
public :: Calc_debug, Calc_normal, Calc_proc
interface
function Calc_proc(real_arg, opt_format) result (ret_val)
real, intent(in) :: real_arg
character (*), intent(in), optional :: opt_format
real :: ret_val
end function Calc_proc
end interface
contains
function Calc_debug(arg1, opt_format) result(ret_val)
real, intent(in) :: arg1
character (*), intent(in), optional :: opt_format
real :: ret_val
ret_val = 0.0
print *,"WITH DEBUG"
end function Calc_debug
function Calc_normal(arg1, opt_format) result(ret_val)
real, intent(in) :: arg1
character (*), intent(in), optional :: opt_format
real :: ret_val
ret_val = 0.0
print *,"NORMAL"
end function Calc_normal
end module Calc_mod
program Proc_pointer_test
use Calc_mod, only: Calc_debug, Calc_normal, Calc_proc
implicit none
procedure (Calc_proc), pointer :: calc_func_ptr => null ()
real :: func_value = 0.0, real_arg = 0.0
integer :: i_two_pass
logical :: debug_on = .false.
do i_two_pass = 1, 2
if (debug_on) then
calc_func_ptr => Calc_debug
else
calc_func_ptr => Calc_normal
end if
select case(i_two_pass)
case (1)
func_value = calc_func_ptr(real_arg)
debug_on = .not. debug_on ! make sure next time it will select the other case.
case(2)
func_value = Calc_func_ptr (real_arg, "WM")
end select
end do
end program Proc_pointer_test
|
{"hexsha": "ec5dac217d85ab73b804921824873e7d6821b470", "size": 1778, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "modern_fotran/NewAttribute/procedure_pointer.f90", "max_stars_repo_name": "ComplicatedPhenomenon/Fortran_Takeoff", "max_stars_repo_head_hexsha": "a13180050367e59a91973af96ab680c2b76097be", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "modern_fotran/NewAttribute/procedure_pointer.f90", "max_issues_repo_name": "ComplicatedPhenomenon/Fortran_Takeoff", "max_issues_repo_head_hexsha": "a13180050367e59a91973af96ab680c2b76097be", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "modern_fotran/NewAttribute/procedure_pointer.f90", "max_forks_repo_name": "ComplicatedPhenomenon/Fortran_Takeoff", "max_forks_repo_head_hexsha": "a13180050367e59a91973af96ab680c2b76097be", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.5373134328, "max_line_length": 97, "alphanum_fraction": 0.6361079865, "num_tokens": 439}
|
# --------------
#Header files
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#path of the data file- path
data = pd.read_csv(path, sep=',', delimiter=None)
data['Gender'].replace(to_replace="-", value="Agender", inplace=True)
#print(data)
gender_count = data['Gender'].value_counts()
gender_count.plot(kind='bar', stacked=False, figsize=(5,5))
plt.ylabel("No. of SHeros")
plt.xticks(rotation=45)
#Code starts here
# --------------
#Code starts here
alignment = data['Alignment'].value_counts()
print(alignment)
alignment.plot(kind='pie', label="Character Alignment" ,autopct="%1.1f%%")
# --------------
#Code starts here
sc_df = data[['Strength', 'Combat']]
ic_df = data[['Intelligence', 'Combat']]
sc_covariance = sc_df.Strength.cov(sc_df.Combat)
ic_covariance = ic_df.Intelligence.cov(ic_df.Combat)
sc_strength = sc_df['Strength'].std()
sc_combat = sc_df['Combat'].std()
ic_intelligence = ic_df['Intelligence'].std()
ic_combat = ic_df['Combat'].std()
sc_pearson = sc_covariance / (sc_strength * sc_combat)
print(sc_pearson)
ic_pearson = ic_covariance / (ic_intelligence * ic_combat)
print(ic_pearson)
# --------------
#Code starts here
total_high = data['Total'].quantile(q=0.99)
super_best = data[data['Total'] > total_high]
super_best_names = list(super_best['Name'])
print(super_best_names)
print(type(super_best_names))
# --------------
#Code starts here
fig, (ax_1, ax_2, ax_3) = plt.subplots(1,3)
ax_1.boxplot(super_best['Intelligence'])
ax_2.boxplot(super_best['Speed'])
ax_3.boxplot(super_best['Power'])
ax_1.set_title('Intelligence')
ax_2.set_title('Speed')
ax_3.set_title('Power')
#fig.subplots_adjust(hspace=0.7)
plt.tight_layout()
|
{"hexsha": "fc06d3f0921a2f7b6b47cc36398dccc74985fd90", "size": 1750, "ext": "py", "lang": "Python", "max_stars_repo_path": "Statistics/code.py", "max_stars_repo_name": "hn1201/ga-learner-dsmp-repo", "max_stars_repo_head_hexsha": "c42656551e930d13df98f6631bb63fe179035ed8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Statistics/code.py", "max_issues_repo_name": "hn1201/ga-learner-dsmp-repo", "max_issues_repo_head_hexsha": "c42656551e930d13df98f6631bb63fe179035ed8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Statistics/code.py", "max_forks_repo_name": "hn1201/ga-learner-dsmp-repo", "max_forks_repo_head_hexsha": "c42656551e930d13df98f6631bb63fe179035ed8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.0263157895, "max_line_length": 75, "alphanum_fraction": 0.6805714286, "include": true, "reason": "import numpy", "num_tokens": 469}
|
//==================================================================================================
/*!
@file
@copyright 2015 NumScale SAS
@copyright 2015 J.T. Lapreste
Distributed under the Boost Software License, Version 1.0.
(See accompanying file LICENSE.md or copy at http://boost.org/LICENSE_1_0.txt)
*/
//==================================================================================================
#ifndef BOOST_SIMD_ARCH_COMMON_GENERIC_FUNCTION_REC_HPP_INCLUDED
#define BOOST_SIMD_ARCH_COMMON_GENERIC_FUNCTION_REC_HPP_INCLUDED
#include <boost/simd/function/rec.hpp>
#include <boost/dispatch/function/overload.hpp>
#include <boost/config.hpp>
namespace boost { namespace simd { namespace ext
{
namespace bd = boost::dispatch;
BOOST_DISPATCH_OVERLOAD ( rec_
, (typename T)
, bd::cpu_
, bd::generic_<bd::unspecified_<T>>
, boost::simd::fast_tag
)
{
BOOST_FORCEINLINE T operator()(T const& a
, fast_tag const& ) const BOOST_NOEXCEPT
{
return rec(a);
}
};
} } }
#endif
|
{"hexsha": "88aef82445affc87262c9f6ba98f5767eabd5e03", "size": 1184, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "include/boost/simd/arch/common/generic/function/rec.hpp", "max_stars_repo_name": "yaeldarmon/boost.simd", "max_stars_repo_head_hexsha": "561316cc54bdc6353ca78f3b6d7e9120acd11144", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "include/boost/simd/arch/common/generic/function/rec.hpp", "max_issues_repo_name": "yaeldarmon/boost.simd", "max_issues_repo_head_hexsha": "561316cc54bdc6353ca78f3b6d7e9120acd11144", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "include/boost/simd/arch/common/generic/function/rec.hpp", "max_forks_repo_name": "yaeldarmon/boost.simd", "max_forks_repo_head_hexsha": "561316cc54bdc6353ca78f3b6d7e9120acd11144", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.1578947368, "max_line_length": 100, "alphanum_fraction": 0.5084459459, "num_tokens": 224}
|
using DataFrames
using Gadfly
using Colors
include("theory.jl")
include("transitions.jl")
tr_chains_d1(θ, N_H, N_E, p_H, p_E) = tr_chains(θ, N_H, N_E, p_H, p_E, 1)
tr_chains_d2(θ, N_H, N_E, p_H, p_E) = tr_chains(θ, N_H, N_E, p_H, p_E, 2)
tr_chains_d10(θ, N_H, N_E, p_H, p_E) = tr_chains(θ, N_H, N_E, p_H, p_E, 10)
tr_chains_d50(θ, N_H, N_E, p_H, p_E) = tr_chains(θ, N_H, N_E, p_H, p_E, 50)
tr_chains_d100(θ, N_H, N_E, p_H, p_E) = tr_chains(θ, N_H, N_E, p_H, p_E, 100)
conj_chains_d1(λ_E, λ_H, p_H, p_E) = conj_chains(λ_E, λ_H, p_H, p_E, 1)
theory_chains_d1(λ_E, λ_H, p_H, p_E) = theory_chains_upper(λ_E, λ_H, p_H, p_E, 1)
conj_chains_d50(λ_E, λ_H, p_H, p_E) = conj_chains(λ_E, λ_H, p_H, p_E, 50)
theory_chains_d50(λ_E, λ_H, p_H, p_E) = theory_chains_upper(λ_E, λ_H, p_H, p_E, 50)
function run_sim(θ, p_E, p_H, T, transition)
N_H = 0
N_E = 0
hist = DataFrame(time = Array{Float64}(0),
N_E = Array{Float64}(0),
N_H = Array{Float64}(0))
for i in 1:T
(N_H, N_E) = transition(θ, N_H, N_E, p_H, p_E)
push!(hist, (i, N_E, N_H))
end
return hist #mean(hist[convert(3*T/4):T,:N_H])
end
#Runs markov chain, not the Jump Process.
function run_sim(λ_E, λ_H, p_E, p_H, T, transition)
θ= λ_H/(λ_H + λ_E)
if transition in [theory_prioE_lower, theory_prioE_upper,
conj_prioE, theory_chains_d1,
conj_chains_d1, theory_chains_d50,
conj_chains_d50]
return transition(λ_E, λ_H, p_E, p_H)
else
h = run_sim(θ, p_E, p_H, T, transition)
h[:w_H] = h[:N_H]/λ_H
h[:w_E] = h[:N_E]/λ_E
return mean(h[Int(3*T/4):T, :w_H])
end
end
function run_sim_E(λ_E, λ_H, p_E, p_H, T, transition)
θ= λ_H/(λ_H + λ_E)
h = run_sim(θ, p_E, p_H, T, transition)
h[:w_H] = h[:N_H]/λ_H
h[:w_E] = h[:N_E]/λ_E
return mean(h[Int(3*T/4):T, :w_E])
end
function run_multiple_sim(λ_E_range, λ_H_range, p_E_range, p_H_range, transitions_list; w_E = false)
df = DataFrame(λ_H = Array{Float64}(0),
λ_E = Array{Float64}(0),
θ = Array{Float64}(0),
p_H = Array{Float64}(0),
p_E = Array{Float64}(0))
for transition in transitions_list
df[Symbol(transition)] = Array{Float64}(0)
end
v = zeros(5 + length(transitions_list))
for λ_H in λ_H_range
v[1] = λ_H
for λ_E in λ_E_range
v[2] = λ_E
θ = λ_H/(λ_H + λ_E)
v[3] = θ
for p_H in p_H_range
v[4] = p_H
for p_E in p_E_range
v[5] = p_E
for (k, transition) in enumerate(transitions_list)
v[5 + k] = run_sim(λ_E, λ_H, p_E, p_H, T, transition)
if w_E
v[5 + k] = run_sim_E(λ_E, λ_H, p_E, p_H, T, transition)
end
end
push!(df, v)
end
end
end
end
return df
end
|
{"hexsha": "7edc7746ced57591c4886cf3946c60ca6e05a620", "size": 3086, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/run_sim.jl", "max_stars_repo_name": "mburq/matching_thickness", "max_stars_repo_head_hexsha": "7f0c17e9d97be80d4af242e81d4f78b0aac930db", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/run_sim.jl", "max_issues_repo_name": "mburq/matching_thickness", "max_issues_repo_head_hexsha": "7f0c17e9d97be80d4af242e81d4f78b0aac930db", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/run_sim.jl", "max_forks_repo_name": "mburq/matching_thickness", "max_forks_repo_head_hexsha": "7f0c17e9d97be80d4af242e81d4f78b0aac930db", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.0681818182, "max_line_length": 100, "alphanum_fraction": 0.546338302, "num_tokens": 1130}
|
string_1 = abcd
efg
string_2 = abc" $\?M
string_3 = \?\\'"
|
{"hexsha": "9ea0b3236e6c43a8f01324a98fc05c68498b2908", "size": 62, "ext": "r", "lang": "R", "max_stars_repo_path": "test/unittest/types/tst.stringconstants.r", "max_stars_repo_name": "alan-maguire/dtrace-utils", "max_stars_repo_head_hexsha": "53b33a89ef7eaeba5ce06d50a4c73fe91c1fa99e", "max_stars_repo_licenses": ["UPL-1.0"], "max_stars_count": 66, "max_stars_repo_stars_event_min_datetime": "2018-04-16T14:28:20.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-15T14:36:05.000Z", "max_issues_repo_path": "test/unittest/types/tst.stringconstants.r", "max_issues_repo_name": "tjfontaine/dtrace-utils", "max_issues_repo_head_hexsha": "1bd5b3825ca0dd641694f795734b9bbbfd3f2ebb", "max_issues_repo_licenses": ["UPL-1.0"], "max_issues_count": 13, "max_issues_repo_issues_event_min_datetime": "2021-01-06T16:28:59.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-10T18:46:58.000Z", "max_forks_repo_path": "test/unittest/types/tst.stringconstants.r", "max_forks_repo_name": "tjfontaine/dtrace-utils", "max_forks_repo_head_hexsha": "1bd5b3825ca0dd641694f795734b9bbbfd3f2ebb", "max_forks_repo_licenses": ["UPL-1.0"], "max_forks_count": 15, "max_forks_repo_forks_event_min_datetime": "2018-07-23T22:35:23.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-18T01:04:36.000Z", "avg_line_length": 7.75, "max_line_length": 20, "alphanum_fraction": 0.564516129, "num_tokens": 30}
|
""" usage: kfold_partition_dataset.py [-h] [-i IMAGEDIR] [-o OUTPUTDIR] [-k KFOLDS] [-x] [-s SEED]
Partition dataset of images into training and testing sets
optional arguments:
-h, --help show this help message and exit
-i IMAGEDIR, --imageDir IMAGEDIR
Path to the folder where the image dataset is stored. If not specified, the CWD will be used.
-o OUTPUTDIR, --outputDir OUTPUTDIR
Path to the output folder where the train and test dirs should be created. Defaults to the same directory as IMAGEDIR.
-k KFOLDS, --kfolds KFOLDS
The number of folds over the total number of images, used for k-fold cross validation. The default is 10.
-x, --xml Set this flag if you want the xml annotation files to be processed and copied over.
-s, --seed SEED Set the seed for shuffling randomization.
"""
import os
import re
from shutil import copyfile
import argparse
import math
import random
# Imports for k-fold cross-validation
from sklearn.model_selection import KFold
import numpy as np
def iterate_dir(source, dest, k, copy_xml):
source = source.replace('\\', '/')
dest = dest.replace('\\', '/')
# Shuffle images first so we get a good test.
images = [f for f in os.listdir(source)
if re.search(r'([a-zA-Z0-9\s_\\.\-\(\):])+(.JPG|.jpg|.jpeg|.png)$', f)]
random.shuffle(images)
if images == None or len(images) == 0:
print("Error: No images found.")
exit(1)
# Fold data set into [k] folds.
# For each fold, use 1/[k] as validation data and [k]-1/[k] as test data.
np_images = np.array(images)
kf = KFold(n_splits=k, shuffle=False)
print(kf)
# Count keeps track of which fold we are in.
kfold_count = 0
for train_index, test_index in kf.split(np_images):
print(f"Current fold: {kfold_count}")
print(f"Size of training data: {train_index.size} Size of testing data: {test_index.size}")
# Make directories.
train_dir = os.path.join(dest, f'train_{kfold_count}fold')
test_dir = os.path.join(dest, f'test_{kfold_count}fold')
if not os.path.exists(train_dir):
os.makedirs(train_dir)
if not os.path.exists(test_dir):
os.makedirs(test_dir)
# Copy training images.
train_images = np.take(np_images, train_index)
for filename in train_images:
copyfile(os.path.join(source, filename),
os.path.join(train_dir, filename))
if copy_xml:
xml_filename = os.path.splitext(filename)[0]+'.xml'
copyfile(os.path.join(source, "../Annotations", xml_filename),
os.path.join(train_dir, xml_filename))
# Copy testing images.
test_images = np.take(np_images, test_index)
for filename in test_images:
copyfile(os.path.join(source, filename),
os.path.join(test_dir, filename))
if copy_xml:
xml_filename = os.path.splitext(filename)[0]+'.xml'
copyfile(os.path.join(source, "../Annotations", xml_filename),
os.path.join(test_dir,xml_filename))
# Update count.
kfold_count += 1
# Example command: python3 scripts/preprocessing/kfold_partition_dataset.py -i ../ECUSTFD-resized-/JPEGImages/ -o workspace/ -k 5
def main():
# Initiate argument parser
parser = argparse.ArgumentParser(description="Partition dataset of images into training and testing sets based on k-folds.",
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument(
'-i', '--imageDir',
help='Path to the folder where the image dataset is stored. If not specified, the CWD will be used.',
type=str,
default=os.getcwd()
)
parser.add_argument(
'-o', '--outputDir',
help='Path to the output folder where the train and test dirs should be created. '
'Defaults to the same directory as IMAGEDIR.',
type=str,
default=None
)
parser.add_argument(
'-k', '--kfolds',
help='The number of folds over the total number of images used for cross-validation. The default is 10.',
default=10,
type=int)
parser.add_argument(
'-x', '--xml',
help='Set this flag if you want the xml annotation files to be processed and copied over.',
action='store_true'
)
parser.add_argument(
'-s', '--seed',
help='Set the seed for shuffling randomization.',
default=10,
type=int)
args = parser.parse_args()
if args.outputDir is None:
args.outputDir = args.imageDir
if args.imageDir is None:
print("Error: Need image directory.")
sys.exit(1)
# Seed random
random.seed(args.seed)
print(f"Random seed is {args.seed}.")
# Now we are ready to start the iteration
iterate_dir(args.imageDir, args.outputDir, args.kfolds, args.xml)
if __name__ == '__main__':
main()
|
{"hexsha": "2877f1f9f56dfe1998bf4c5990a19c38a3966486", "size": 5034, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/preprocessing/kfold_partition_dataset.py", "max_stars_repo_name": "kallentu/chowdr", "max_stars_repo_head_hexsha": "47efd86025836e04c251c06f86c32d5519b2e0a7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2020-10-03T02:12:00.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-09T20:48:46.000Z", "max_issues_repo_path": "scripts/preprocessing/kfold_partition_dataset.py", "max_issues_repo_name": "kallentu/chowdr", "max_issues_repo_head_hexsha": "47efd86025836e04c251c06f86c32d5519b2e0a7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 19, "max_issues_repo_issues_event_min_datetime": "2020-10-03T02:17:47.000Z", "max_issues_repo_issues_event_max_datetime": "2020-11-20T03:50:04.000Z", "max_forks_repo_path": "scripts/preprocessing/kfold_partition_dataset.py", "max_forks_repo_name": "kallentu/chowdr", "max_forks_repo_head_hexsha": "47efd86025836e04c251c06f86c32d5519b2e0a7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-03-31T08:09:53.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-12T10:43:29.000Z", "avg_line_length": 36.4782608696, "max_line_length": 142, "alphanum_fraction": 0.6324990068, "include": true, "reason": "import numpy", "num_tokens": 1188}
|
# Authors: Soledad Galli <solegalli@protonmail.com>
# License: BSD 3 clause
import numpy as np
import pandas as pd
from feature_engine.outliers import Winsorizer
class OutlierTrimmer(Winsorizer):
"""The OutlierTrimmer() removes observations with outliers from the dataset.
The OutlierTrimmer() first calculates the maximum and /or minimum values
beyond which a value will be considered an outlier, and thus removed.
Limits are determined using:
- a Gaussian approximation
- the inter-quantile range proximity rule
- percentiles.
**Gaussian limits:**
- right tail: mean + 3* std
- left tail: mean - 3* std
**IQR limits:**
- right tail: 75th quantile + 3* IQR
- left tail: 25th quantile - 3* IQR
where IQR is the inter-quartile range: 75th quantile - 25th quantile.
**percentiles or quantiles:**
- right tail: 95th percentile
- left tail: 5th percentile
You can select how far out to cap the maximum or minimum values with the
parameter `'fold'`.
If `capping_method='gaussian'` fold gives the value to multiply the std.
If `capping_method='iqr'` fold is the value to multiply the IQR.
If `capping_method='quantile'`, fold is the percentile on each tail that should
be censored. For example, if fold=0.05, the limits will be the 5th and 95th
percentiles. If fold=0.1, the limits will be the 10th and 90th percentiles.
The OutlierTrimmer() works only with numerical variables. A list of variables can
be indicated. Alternatively, it will select all numerical variables.
The transformer first finds the values at one or both tails of the distributions
(fit). The transformer then removes observations with outliers from the dataframe
(transform).
More details in the :ref:`User Guide <outlier_trimmer>`.
Parameters
----------
capping_method: str, default='gaussian'
Desired capping method. Can take 'gaussian', 'iqr' or 'quantiles'.
**'gaussian'**: the transformer will find the maximum and / or minimum values
to cap the variables using the Gaussian approximation.
**'iqr'**: the transformer will find the boundaries using the IQR proximity
rule.
**'quantiles'**: the limits are given by the percentiles.
tail: str, default='right'
Whether to cap outliers on the right, left or both tails of the distribution.
Can take 'left', 'right' or 'both'.
fold: int or float, default=3
How far out to to place the capping values. The number that will multiply
the std or IQR to calculate the capping values. Recommended values, 2
or 3 for the gaussian approximation, or 1.5 or 3 for the IQR proximity
rule.
If `capping_method='quantile'`, then `'fold'` indicates the percentile. So if
`fold=0.05`, the limits will be the 95th and 5th percentiles.
**Note**: Outliers will be removed up to a maximum of the 20th percentiles on
both sides. Thus, when `capping_method='quantile'`, then `'fold'` takes values
between 0 and 0.20.
variables: list, default=None
The list of variables for which the outliers will be removed. If None,
the transformer will find and select all numerical variables.
missing_values: string, default='raise'
Indicates if missing values should be ignored or raised. Sometimes we want to
remove outliers in the raw, original data, sometimes, we may want to remove
outliers in the already pre-transformed data. If missing_values='ignore', the
transformer will ignore missing data when learning the capping parameters or
transforming the data. If missing_values='raise' the transformer will return
an error if the training or the datasets to transform contain missing values.
Attributes
----------
right_tail_caps_:
Dictionary with the maximum values above which values will be removed.
left_tail_caps_ :
Dictionary with the minimum values below which values will be removed.
variables_:
The group of variables that will be transformed.
n_features_in_:
The number of features in the train set used in fit.
Methods
-------
fit:
Find maximum and minimum values.
transform:
Remove outliers.
fit_transform:
Fit to the data. Then transform it.
"""
def transform(self, X: pd.DataFrame) -> pd.DataFrame:
"""
Remove observations with outliers from the dataframe.
Parameters
----------
X : pandas dataframe of shape = [n_samples, n_features]
The data to be transformed.
Returns
-------
X_new: pandas dataframe of shape = [n_samples, n_features]
The dataframe without outlier observations.
"""
X = self._check_transform_input_and_state(X)
for feature in self.right_tail_caps_.keys():
outliers = np.where(
X[feature] > self.right_tail_caps_[feature], True, False
)
X = X.loc[~outliers]
for feature in self.left_tail_caps_.keys():
outliers = np.where(X[feature] < self.left_tail_caps_[feature], True, False)
X = X.loc[~outliers]
return X
|
{"hexsha": "0719241ff61a300d8a928a61ae9664764ba7b856", "size": 5332, "ext": "py", "lang": "Python", "max_stars_repo_path": "feature_engine/outliers/trimmer.py", "max_stars_repo_name": "david-cortes/feature_engine", "max_stars_repo_head_hexsha": "702328d1a072d0911441e10b4eb98b3bfbf19215", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-01-02T19:35:50.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-02T19:35:50.000Z", "max_issues_repo_path": "feature_engine/outliers/trimmer.py", "max_issues_repo_name": "david-cortes/feature_engine", "max_issues_repo_head_hexsha": "702328d1a072d0911441e10b4eb98b3bfbf19215", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "feature_engine/outliers/trimmer.py", "max_forks_repo_name": "david-cortes/feature_engine", "max_forks_repo_head_hexsha": "702328d1a072d0911441e10b4eb98b3bfbf19215", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.8496732026, "max_line_length": 88, "alphanum_fraction": 0.6701050263, "include": true, "reason": "import numpy", "num_tokens": 1244}
|
#####################################################
## librealsense T265 streams test ##
#####################################################
# This assumes .so file is found on the same directory
import pyrealsense2 as rs
# Prettier prints for reverse-engineering
from pprint import pprint
import numpy as np
# Get realsense pipeline handle
pipe = rs.pipeline()
# Print all connected devices and find the T265
devices = rs.context().devices
for i in range(len(devices)):
print('Found device:', devices[i].get_info(rs.camera_info.name), ', with serial number: ', devices[i].get_info(rs.camera_info.serial_number))
# Configure the pipeline
cfg = rs.config()
# Prints a list of available streams, not all are supported by each device
print('Available streams:')
pprint(dir(rs.stream))
# Enable streams you are interested in
cfg.enable_stream(rs.stream.pose) # Positional data (translation, rotation, velocity etc)
cfg.enable_stream(rs.stream.fisheye, 1) # Left camera
cfg.enable_stream(rs.stream.fisheye, 2) # Right camera
# Start the configured pipeline
pipe.start(cfg)
try:
while(1):
frames = pipe.wait_for_frames()
# Left fisheye camera frame
left = frames.get_fisheye_frame(1)
left_data = np.asanyarray(left.get_data())
# Right fisheye camera frame
right = frames.get_fisheye_frame(2)
right_data = np.asanyarray(right.get_data())
print('Left frame', left_data.shape)
print('Right frame', right_data.shape)
# Positional data frame
pose = frames.get_pose_frame()
if pose:
pose_data = pose.get_pose_data()
print("\nFrame number: %5.0f" % (pose.frame_number))
print("Position xyz: % 2.4f % 2.4f % 2.4f" % (pose_data.translation.x, pose_data.translation.y, pose_data.translation.z))
print("Velocity xyz: % 2.4f % 2.4f % 2.4f" % (pose_data.velocity.x, pose_data.velocity.y, pose_data.velocity.z))
print("Accelera xyz: % 2.4f % 2.4f % 2.4f" % (pose_data.acceleration.x, pose_data.acceleration.y, pose_data.acceleration.z))
print("Quatern xyzw: % 2.4f % 2.4f % 2.4f % 2.4f" % (pose_data.rotation.x, pose_data.rotation.y, pose_data.rotation.z, pose_data.rotation.w))
finally:
pipe.stop()
|
{"hexsha": "6e84ae94c14ebfb605193bbe24901c111acf28e5", "size": 2293, "ext": "py", "lang": "Python", "max_stars_repo_path": "robot/src/vision_to_mavros/scripts/t265_test_streams.py", "max_stars_repo_name": "mikobski/Robot-Inspekcyjny", "max_stars_repo_head_hexsha": "925491fc43b71bdaa54dccf60d38da59d244181d", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "robot/src/vision_to_mavros/scripts/t265_test_streams.py", "max_issues_repo_name": "mikobski/Robot-Inspekcyjny", "max_issues_repo_head_hexsha": "925491fc43b71bdaa54dccf60d38da59d244181d", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2020-08-19T21:21:58.000Z", "max_issues_repo_issues_event_max_datetime": "2020-10-05T13:33:19.000Z", "max_forks_repo_path": "robot/src/vision_to_mavros/scripts/t265_test_streams.py", "max_forks_repo_name": "mikobski/Critbot", "max_forks_repo_head_hexsha": "925491fc43b71bdaa54dccf60d38da59d244181d", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.2166666667, "max_line_length": 153, "alphanum_fraction": 0.6506759703, "include": true, "reason": "import numpy", "num_tokens": 579}
|
from gym_torcs import TorcsEnv
import numpy as np
img_dim = [64,64,3]
action_dim = 1
steps = 1000
batch_size = 32
nb_epoch = 100
def get_teacher_action(ob):
steer = ob.angle*10/np.pi
steer -= ob.trackPos*0.10
return np.array([steer])
def img_reshape(input_img):
_img = np.transpose(input_img, (1, 2, 0))
_img = np.flipud(_img)
_img = np.reshape(_img, (1, img_dim[0], img_dim[1], img_dim[2]))
return _img
images_all = np.zeros((0, img_dim[0], img_dim[1], img_dim[2]))
actions_all = np.zeros((0,action_dim))
rewards_all = np.zeros((0,))
img_list = []
action_list = []
reward_list = []
env = TorcsEnv(vision=True, throttle=False)
ob = env.reset(relaunch=True)
print('Collecting data...')
for i in range(steps):
if i == 0:
act = np.array([0.0])
else:
act = get_teacher_action(ob)
if i%100 == 0:
print(i)
ob, reward, done, _ = env.step(act)
img_list.append(ob.img)
action_list.append(act)
reward_list.append(np.array([reward]))
env.end()
print('Packing data into arrays...')
for img, act, rew in zip(img_list, action_list, reward_list):
images_all = np.concatenate([images_all, img_reshape(img)], axis=0)
actions_all = np.concatenate([actions_all, np.reshape(act, [1,action_dim])], axis=0)
rewards_all = np.concatenate([rewards_all, rew], axis=0)
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, MaxPooling2D
from keras.optimizers import Adam
#model from https://github.com/fchollet/keras/blob/master/examples/cifar10_cnn.py
model = Sequential()
model.add(Convolution2D(32, 3, 3, border_mode='same',
input_shape=img_dim))
model.add(Activation('relu'))
model.add(Convolution2D(32, 3, 3))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Convolution2D(64, 3, 3, border_mode='same'))
model.add(Activation('relu'))
model.add(Convolution2D(64, 3, 3))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(action_dim))
model.add(Activation('tanh'))
model.compile(loss='mean_squared_error',
optimizer=Adam(lr=1e-4),
metrics=['mean_squared_error'])
model.fit(images_all, actions_all,
batch_size=batch_size,
nb_epoch=nb_epoch,
shuffle=True)
output_file = open('results.txt', 'w')
#aggregate and retrain
dagger_itr = 5
for itr in range(dagger_itr):
ob_list = []
env = TorcsEnv(vision=True, throttle=False)
ob = env.reset(relaunch=True)
reward_sum = 0.0
for i in range(steps):
act = model.predict(img_reshape(ob.img))
ob, reward, done, _ = env.step(act)
if done is True:
break
else:
ob_list.append(ob)
reward_sum += reward
print(i, reward, reward_sum, done, str(act[0]))
print('Episode done ', itr, i, reward_sum)
output_file.write('Number of Steps: %02d\t Reward: %0.04f\n'%(i, reward_sum))
env.end()
if i==(steps-1):
break
for ob in ob_list:
images_all = np.concatenate([images_all, img_reshape(ob.img)], axis=0)
actions_all = np.concatenate([actions_all, np.reshape(get_teacher_action(ob), [1,action_dim])], axis=0)
model.fit(images_all, actions_all,
batch_size=batch_size,
nb_epoch=nb_epoch,
shuffle=True)
|
{"hexsha": "5828ceda2b923cfac53fd92f6d112a5caa0246d0", "size": 3599, "ext": "py", "lang": "Python", "max_stars_repo_path": "dagger.py", "max_stars_repo_name": "havefun28/imitation-dagger", "max_stars_repo_head_hexsha": "6460b53ae3bdfc9801c5ea621ccc1da4e575c9c7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 67, "max_stars_repo_stars_event_min_datetime": "2017-01-23T05:06:30.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-19T12:57:27.000Z", "max_issues_repo_path": "dagger.py", "max_issues_repo_name": "RuihanGao/imitation-dagger", "max_issues_repo_head_hexsha": "6460b53ae3bdfc9801c5ea621ccc1da4e575c9c7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2017-03-28T16:40:59.000Z", "max_issues_repo_issues_event_max_datetime": "2019-04-07T22:15:38.000Z", "max_forks_repo_path": "dagger.py", "max_forks_repo_name": "RuihanGao/imitation-dagger", "max_forks_repo_head_hexsha": "6460b53ae3bdfc9801c5ea621ccc1da4e575c9c7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 27, "max_forks_repo_forks_event_min_datetime": "2017-03-09T17:24:36.000Z", "max_forks_repo_forks_event_max_datetime": "2020-08-25T13:59:27.000Z", "avg_line_length": 28.3385826772, "max_line_length": 111, "alphanum_fraction": 0.6574048347, "include": true, "reason": "import numpy", "num_tokens": 992}
|
SUBROUTINE TG_QRNG ( gdatim, rngtyp, gtype, iret )
C************************************************************************
C* TG_QRNG *
C* *
C* This subroutine determines whether a GDATIM is a singular time, *
C* multiple times based on forecast hour, or multiple times based on *
C* cycles. *
C* *
C* TG_QRNG ( GDATIM, RNGTYP, GTYPE, IRET ) *
C* *
C* Input parameters: *
C* GDATIM CHAR* Input time *
C* *
C* Output parameters: *
C* RNGTYP INTEGER Type of time indicator *
C* = 0 - not a range *
C* = 1 - range as forecast hours *
C* = 2 - range as cycle hours *
C* GTYPE LOGICAL Grid time indicator *
C* .TRUE. - start with a letter *
C* .FALSE. - start with a numeric *
C* IRET INTEGER Return code *
C* As TG_RANG *
C** *
C* Log: *
C* D.W.Plummer/NCEP 7/98 From TG_RANG *
C* T. Lee/GSC 7/99 Checked grid time type *
C************************************************************************
CHARACTER*(*) gdatim
INTEGER rngtyp
C*
CHARACTER tstart*20, tstop*20, tinc*20, ctype*1, ginput*48
C
CHARACTER xxxx*1
LOGICAL qtype, gtype
QTYPE (xxxx) = ( ( xxxx (1:1) .eq. 'F' ) .or.
+ ( xxxx (1:1) .eq. 'A' ) .or.
+ ( xxxx (1:1) .eq. 'G' ) .or.
+ ( xxxx (1:1) .eq. 'V' ) .or.
+ ( xxxx (1:1) .eq. 'I' ) )
C------------------------------------------------------------------------
iret = 0
rngtyp = 0
gtype = .false.
C
CALL ST_LCUC ( gdatim, ginput, ier )
gtype = QTYPE ( ginput (1:1) ) .or. ( ginput (1:1) .eq. 'L' )
C
C* Break range into parts.
C
CALL ST_RANG ( ginput, tstart, tstop, tinc, itype, ier )
C
C* The returned itype from ST_RANG should not be confused with
C* rngtyp... itype indicates whether ginput is a range ( =0,
C* not a range), a range w/o and increment (=1), or a range w/
C* increment (=2).
C
C* If this is not a range, check for "ALL".
C
IF ( itype .eq. 0 ) THEN
indall = INDEX ( ginput, 'ALL' )
C
C* If ALL is not included somewhere (ALL or FALL or GALL, etc,),
C* this is not a time range, return w/ rngtyp = 0.
C
IF ( indall .eq. 0 ) THEN
RETURN
C
C* If the entire string is "ALL", assume this is a forecast
C* hour range, return w/ rngtyp = 1.
C
ELSE IF ( ginput .eq. 'ALL' ) THEN
rngtyp = 1
RETURN
C
C* If ALL is first, but not entire string, assume this is a
C* cycle range, return w/ rngtyp = 2.
C
ELSE IF ( indall .eq. 1 ) THEN
rngtyp = 2
RETURN
C
C* Check for FALL, AALL, GALL, IALL.
C
ELSE
ctype = ginput ( indall-1 : indall-1 )
IF ( ( ctype .eq. 'F' ) .or. ( ctype .eq. 'A' ) .or.
+ ( ctype .eq. 'G' ) .or. ( ctype .eq. 'I' ) ) THEN
rngtyp = 1
RETURN
ELSE
iret = -7
RETURN
END IF
END IF
END IF
C
IF ( tstart .eq. 'FIRST' .or. tstop .eq. 'LAST' ) THEN
rngtyp = 1
RETURN
END IF
C
C* Check for FIRST or LAST followed by forecast type.
C* Assume this format indicates a cycle range.
C
IF ( ( tstart ( : 5 ) .eq. 'FIRST' ) .and.
+ ( QTYPE ( tstart (6:6) ) ) ) THEN
rngtyp = 2
RETURN
ELSE IF ( ( tstop ( : 4 ) .eq. 'LAST' ) .and.
+ ( QTYPE ( tstop (5:5) ) ) ) THEN
rngtyp = 2
RETURN
END IF
C
C* Check for first character being valid.
C* Assume this format indicates a forecast range.
C
IF ( QTYPE ( tstart (1:1) ) .or.
+ QTYPE ( tstop (1:1) ) ) THEN
rngtyp = 1
RETURN
END IF
C*
RETURN
END
|
{"hexsha": "02687df18c665e4e1c17033f9f1a160e2b39a70e", "size": 3714, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "gempak/source/gemlib/tg/tgqrng.f", "max_stars_repo_name": "oxelson/gempak", "max_stars_repo_head_hexsha": "e7c477814d7084c87d3313c94e192d13d8341fa1", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 42, "max_stars_repo_stars_event_min_datetime": "2015-06-03T15:26:21.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-28T22:36:03.000Z", "max_issues_repo_path": "gempak/source/gemlib/tg/tgqrng.f", "max_issues_repo_name": "oxelson/gempak", "max_issues_repo_head_hexsha": "e7c477814d7084c87d3313c94e192d13d8341fa1", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 60, "max_issues_repo_issues_event_min_datetime": "2015-05-11T21:36:08.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-29T16:22:42.000Z", "max_forks_repo_path": "gempak/source/gemlib/tg/tgqrng.f", "max_forks_repo_name": "oxelson/gempak", "max_forks_repo_head_hexsha": "e7c477814d7084c87d3313c94e192d13d8341fa1", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 27, "max_forks_repo_forks_event_min_datetime": "2016-06-06T21:55:14.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-18T18:23:28.000Z", "avg_line_length": 29.2440944882, "max_line_length": 73, "alphanum_fraction": 0.4981152396, "num_tokens": 1310}
|
from __future__ import division
from __future__ import print_function
import os
import time
import math
import numpy as np
import pyopencl as cl
class CLWrapper:
"class holds information about OpenCL state"
def __init__(self, batchSize, maxT, maxC, kernelVariant=1, enableGPUDebug=False):
"specify size: number of batch elements, number of time-steps, number of characters. Set kernelVariant to either 1 or 2. Set enableGPUDebug to True to debug kernel via CodeXL."
# force rebuild of program such that GPU debugger can attach to kernel
self.enableGPUDebug = enableGPUDebug
if enableGPUDebug:
os.environ['PYOPENCL_COMPILER_OUTPUT'] = '1'
os.environ['PYOPENCL_NO_CACHE'] = '1'
#consts
self.batchSize = batchSize
self.maxT = maxT
self.maxC = maxC
assert kernelVariant in [1, 2]
self.kernelVariant = kernelVariant
# platform, context, queue
platforms = cl.get_platforms()
assert platforms
self.platform = platforms[0] # take first platform
devices = self.platform.get_devices(cl.device_type.GPU) # get GPU devices
assert devices
self.device = devices[0] # take first GPU
self.context = cl.Context([self.device]) # context contains the first GPU
self.queue = cl.CommandQueue(self.context, self.device) # command queue to first GPU
# buffer
sizeOfFloat32 = 4
batchBufSize = batchSize * maxC * maxT * sizeOfFloat32
self.batchBuf = cl.Buffer(self.context, cl.mem_flags.READ_ONLY, size=batchBufSize, hostbuf=None)
self.res = np.zeros([batchSize, maxT]).astype(np.int32)
self.resBuf = cl.Buffer(self.context, cl.mem_flags.WRITE_ONLY, self.res.nbytes)
self.tmpBuf = cl.Buffer(self.context, cl.mem_flags.WRITE_ONLY, self.res.nbytes)
# compile program and use defines for program-constants to avoid passing private variables
buildOptions = '-D STEP_BEGIN={} -D MAX_T={} -D MAX_C={}'.format(2 ** math.ceil(math.log2(maxT)), maxT, maxC)
self.program = cl.Program(self.context, open('BestPathCL.cl').read()).build(buildOptions)
# variant 1: single pass
if kernelVariant == 1:
self.kernel1 = cl.Kernel(self.program, 'bestPathAndCollapse')
self.kernel1.set_arg(0, self.batchBuf)
self.kernel1.set_arg(1, self.resBuf)
# all time-steps must fit into a work-group
assert maxT <= self.kernel1.get_work_group_info(cl.kernel_work_group_info.WORK_GROUP_SIZE, self.device)
# variant 2: two passes
else:
# kernel1: calculate best path
self.kernel1 = cl.Kernel(self.program, 'bestPath')
self.kernel1.set_arg(0, self.batchBuf)
self.kernel1.set_arg(1, self.tmpBuf)
# kernel2: collapse best path
self.kernel2 = cl.Kernel(self.program, 'collapsePath')
self.kernel2.set_arg(0, self.tmpBuf)
self.kernel2.set_arg(1, self.resBuf)
# all chars must fit into a work-group
assert maxC <= self.kernel1.get_work_group_info(cl.kernel_work_group_info.WORK_GROUP_SIZE, self.device)
def compute(self, batch):
"compute best path for each batch element. Returns blank-terminated label strings for batch elements."
# measure time in GPU debug mode
if self.enableGPUDebug:
t0 = time.time()
# copy batch to device
cl.enqueue_write_buffer(self.queue, self.batchBuf, batch.astype(np.float32), is_blocking=False)
# one pass
if self.kernelVariant == 1:
cl.enqueue_nd_range_kernel(self.queue, self.kernel1, (self.batchSize, self.maxT), (1, self.maxT))
# two passes
else:
cl.enqueue_nd_range_kernel(self.queue, self.kernel1, (self.batchSize, self.maxT, self.maxC), (1, 1, self.maxC))
cl.enqueue_nd_range_kernel(self.queue, self.kernel2, (self.batchSize,), None)
# copy result back from GPU and return it
cl.enqueue_read_buffer(self.queue, self.resBuf, self.res, is_blocking=True)
# measure time in GPU debug mode
if self.enableGPUDebug:
t1 = time.time()
print('BestPathCL.compute(...) time: ', t1-t0)
return self.res
def ctcBestPathCL(batch, classes, clWrapper):
"implements best path decoding on the GPU with OpenCL"
# compute best labeling
labelStrBatch = clWrapper.compute(batch)
#go over batch
blank = len(classes)
charStrBatch = []
for b in range(clWrapper.batchSize):
# map to chars
charStr = ''
for label in labelStrBatch[b]:
if label == blank:
break
charStr += classes[label]
charStrBatch.append(charStr)
return charStrBatch
def testBestPathCL():
"test decoder"
classes = 'ab'
mat = np.array([[0.4, 0, 0.6], [0.4, 0, 0.6]])
maxT, maxC = mat.shape
clWrapper = CLWrapper(1, maxT, maxC, enableGPUDebug=True)
print('Test best path decoding (CL)')
expected = ''
actual = ctcBestPathCL(np.stack([mat]), classes, clWrapper)[0]
print('Expected: "' + expected + '"')
print('Actual: "' + actual + '"')
print('OK' if expected == actual else 'ERROR')
if __name__ == '__main__':
testBestPathCL()
|
{"hexsha": "8a3e200e8fa4ee6120626e660d715562af57377f", "size": 4768, "ext": "py", "lang": "Python", "max_stars_repo_path": "ctc_decoder/BestPathCL.py", "max_stars_repo_name": "markschoene/CTCDecoder", "max_stars_repo_head_hexsha": "fbb21853c0b38b6d9a7ba5f86401547f8f655b4c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2022-03-30T12:42:36.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T16:15:48.000Z", "max_issues_repo_path": "src/BestPathCL.py", "max_issues_repo_name": "YongGuCheng/CTCDecoder", "max_issues_repo_head_hexsha": "bdd9ab0c87650c611829714760b57d8ba36d52da", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/BestPathCL.py", "max_forks_repo_name": "YongGuCheng/CTCDecoder", "max_forks_repo_head_hexsha": "bdd9ab0c87650c611829714760b57d8ba36d52da", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-02-09T16:04:51.000Z", "max_forks_repo_forks_event_max_datetime": "2021-02-09T16:04:51.000Z", "avg_line_length": 33.5774647887, "max_line_length": 178, "alphanum_fraction": 0.7277684564, "include": true, "reason": "import numpy", "num_tokens": 1317}
|
\subsubsection{FX Option}
The \lstinline!FXOptionData! node is the trade data container for the \emph{FxOption} trade type. FX options with exercise styles \emph{European} or \emph{American} are supported.
The \lstinline!FXOptionData! node includes one and only one \lstinline!OptionData! trade
component sub-node plus elements specific to the FX Option. The structure of an \lstinline!FXOptionData! node
for an FX Option is shown in Listing \ref{lst:fxoption_data}.
\begin{listing}[H]
%\hrule\medskip
\begin{minted}[fontsize=\footnotesize]{xml}
<FxOptionData>
<OptionData>
...
</OptionData>
<BoughtCurrency>...</BoughtCurrency>
<BoughtAmount>...</BoughtAmount>
<SoldCurrency>...</SoldCurrency>
<SoldAmount>...</SoldAmount>
<FXIndex>...</FXIndex>
</FxOptionData>
\end{minted}
\caption{FX Option data}
\label{lst:fxoption_data}
\end{listing}
The meanings and allowable values of the elements in the \lstinline!FXOptionData! node follow below.
\begin{itemize}
\item OptionData: This is a trade component sub-node outlined in section \ref{ss:option_data}. Note that the
FX option type allows for \emph{European} and \emph{American} option styles only. For option type \emph{Put}, Bought and Sold currencies/amounts are switched compared to the trade data node.
For example, a holder of BoughtCurrency EUR SoldCurrency JPY FX Call Option has the right to buy EUR using JPY, while
holder of the Put counterpart has the right to buy JPY using EUR, or equivalently sell EUR for JPY.
\item BoughtCurrency: The bought currency of the FX option. See OptionData above for more details.
Allowable values: See Currency in Table \ref{tab:allow_stand_data}.
\item BoughtAmount: The amount in the BoughtCurrency.
Allowable values: Any positive real number.
\item SoldCurrency: The sold currency of the FX option. See OptionData above for more details.
Allowable values: See Currency in Table \ref{tab:allow_stand_data}.
\item SoldAmount: The amount in the SoldCurrency.
Allowable values: Any positive real number.
\item FXIndex [Optional]: If the option \textit{European}, has cash settlement and is subject to \textit{Automatic Exercise}, as indicated by the \lstinline!AutomaticExercise! node under \lstinline!OptionData!, this node must be populated with a valid FX index. The FX index is used to retrieve an FX rate on the expiry date that is in turn used to determine the payoff on the cash settlement date. The payoff is in the \lstinline!SoldCurrency! i.e.\ the domestic currency.
Allowable values: A valid FX index from the Table \ref{tab:fxindex_data}.
\end{itemize}
Note that FX Options also cover Precious Metals Options, i.e. with currencies XAU, XAG, XPT, XPD.
|
{"hexsha": "9c6012e11c31d57fc1e0a0ebedc51fcfeb364f16", "size": 2692, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "Docs/UserGuide/tradedata/fxoption.tex", "max_stars_repo_name": "nvolfango/Engine", "max_stars_repo_head_hexsha": "a5ee0fc09d5a50ab36e50d55893b6e484d6e7004", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-02-24T20:43:38.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-24T20:43:38.000Z", "max_issues_repo_path": "Docs/UserGuide/tradedata/fxoption.tex", "max_issues_repo_name": "zhangjiayin/Engine", "max_issues_repo_head_hexsha": "a5ee0fc09d5a50ab36e50d55893b6e484d6e7004", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Docs/UserGuide/tradedata/fxoption.tex", "max_forks_repo_name": "zhangjiayin/Engine", "max_forks_repo_head_hexsha": "a5ee0fc09d5a50ab36e50d55893b6e484d6e7004", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 48.0714285714, "max_line_length": 473, "alphanum_fraction": 0.7771173848, "num_tokens": 687}
|
"""Data transformations supporting using Prophet on bounded data."""
import abc
import numpy as np
import pandas as pd
from scipy import special
class Transform(abc.ABC):
"""Abstract interface to data transformation used to help Prophet forecast in bounded domains.
Converts bounded real data to and from Prophet's working space in which they are unbounded.
Once the data are in the working space, a Prophet model can be trained and used for forecasting.
The forecasts need to be converted from the working space back to real space by the same instance
of `Transform` class.
Implementations must ensure that the result of converting finite real data to work space does not produce
NaNs or infinities. Transformation from real to working space must be strictly order-preserving.
"""
@abc.abstractmethod
def to_work_series(self, data: pd.Series) -> pd.Series:
"""Converts data from real space to working space.
Raises:
ValueError if `data` do not respect the lower and upper bound.
"""
...
@abc.abstractmethod
def to_real_series(self, data: pd.Series) -> pd.Series:
"""Converts data from working space to real space."""
...
@property
@abc.abstractmethod
def lower_bound(self) -> float:
"""Lower bound for real data."""
...
@property
@abc.abstractmethod
def upper_bound(self) -> float:
"""Upper bound for real data."""
...
class Logarithmic(Transform):
"""Transforms non-negative data to/from unbounded representation using a shifted log-transform.
Given eps > 0,
Y_work = ln(eps + Y_real)
Y_real = max(exp(Y_work) - eps, 0)
"""
def __init__(self, eps: float):
"""Constructor.
Args:
eps: Positive constant added to real data before calculating the logarithm, to
avoid producing -Infinity from finite inputs.
"""
super().__init__()
if not (eps > 0):
raise ValueError(f'Epsilon must be positive, got {eps}')
self._eps = eps
def to_work_series(self, data: pd.Series) -> pd.Series:
if not (np.amin(data) >= self.lower_bound):
raise ValueError('Real data out of bounds')
return np.log(self._eps + data)
def to_real_series(self, data: pd.Series) -> pd.Series:
return (np.exp(data) - self._eps).clip(self.lower_bound, None)
@property
def lower_bound(self) -> float:
return 0
@property
def upper_bound(self) -> float:
return np.inf
class Logit(Transform):
"""Transforms data in [0, 1] range to/from unbounded representation using a "compressed" logit transform.
Given 0 < eps << 1/2,
Y_work = logit( eps + Y_real * (1 - 2 * eps) )
Y_real = min( max( (expit(Y_work) - eps) / (1 - 2 * eps), 0), 1)
where logit(p) = ln( p / (1 - p) ) and expit(x) = 1 / (1 + exp(-x)).
"""
def __init__(self, eps: float):
"""Constructor.
Args:
eps: Used to compress the data range from [0, 1] to [eps, 1 - eps], so that the logit transform does not yield
+/- Infinity on valid data.
"""
super().__init__()
if not (eps > 0):
raise ValueError(f'Epsilon must be positive, got {eps}')
if not (eps < 0.5):
raise ValueError(f'Epsilon must be < 1/2, got {eps}')
self._eps = eps
self._width = 1 - 2 * eps
def to_work_series(self, data: pd.Series) -> pd.Series:
if not (np.amin(data) >= self.lower_bound and np.amax(data) <= self.upper_bound):
raise ValueError('Real data out of bounds')
return special.logit(self._eps + data * self._width)
def to_real_series(self, data: pd.Series) -> pd.Series:
return ((special.expit(data) - self._eps) / self._width).clip(self.lower_bound, self.upper_bound)
@property
def lower_bound(self) -> float:
return 0
@property
def upper_bound(self) -> float:
return 1
|
{"hexsha": "851343e80e2aaef3bffddd9738b7e9a9c296c56e", "size": 4063, "ext": "py", "lang": "Python", "max_stars_repo_path": "prophet_utils/transforms.py", "max_stars_repo_name": "WilliamHo1999/prophet-utils", "max_stars_repo_head_hexsha": "13a165d50ec3280215aefc980c60d60859cc9b05", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-08-14T11:31:45.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-10T09:09:59.000Z", "max_issues_repo_path": "prophet_utils/transforms.py", "max_issues_repo_name": "Hey-Savvie/prophet-utils", "max_issues_repo_head_hexsha": "13a165d50ec3280215aefc980c60d60859cc9b05", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "prophet_utils/transforms.py", "max_forks_repo_name": "Hey-Savvie/prophet-utils", "max_forks_repo_head_hexsha": "13a165d50ec3280215aefc980c60d60859cc9b05", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.0152671756, "max_line_length": 122, "alphanum_fraction": 0.6207236032, "include": true, "reason": "import numpy,from scipy", "num_tokens": 989}
|
#!/usr/bin/env python
# coding: utf-8
import numpy as np
import pandas as pd
import scipy as sp
from sklearn.preprocessing import MinMaxScaler
def _neutralize(df, columns, by, proportion=1.0):
scores = df[columns]
exposures = df[by].values
scores = scores - proportion * \
exposures.dot(np.linalg.pinv(exposures).dot(scores))
return scores / scores.std()
def _normalize(df):
X = (df.rank(method="first") - 0.5) / len(df)
return sp.stats.norm.ppf(X)
def normalize_and_neutralize(df, columns, by, proportion=1.0):
# Convert the scores to a normal distribution
df[columns] = _normalize(df[columns])
df[columns] = _neutralize(df, columns, by, proportion)
return df[columns]
TOURNAMENT_NAME = ""
TARGET_NAME = f"target"
PREDICTION_NAME = f"prediction"
BENCHMARK = 0
BAND = 0.2
def score(df):
# method="first" breaks ties based on order in array
return np.corrcoef(
df[TARGET_NAME],
df[PREDICTION_NAME].rank(pct=True, method="first")
)[0, 1]
# The payout function
def payout(scores):
return ((scores - BENCHMARK) / BAND).clip(lower=-1, upper=1)
def Output(p):
return (1. / (1 + np.exp(-p)))
def GPI(data):
return Output(0.097947 * np.tanh((((data["feature_strength19"]) + (((((data["feature_charisma63"]) - (((data["feature_dexterity6"]) - (((data["feature_wisdom36"]) + (((((-((data["feature_constitution56"])))) + (((((data["feature_charisma75"]) - (data["feature_dexterity14"]))) - (data["feature_wisdom3"])))) / 2.0)))))))) - (((data["feature_dexterity14"]) - (((data["feature_charisma63"]) - (data["feature_dexterity7"])))))))) / 2.0)) +
0.099022 * np.tanh((((((((data["feature_charisma55"]) * (data["feature_strength34"]))) * (((data["feature_constitution42"]) * (data["feature_charisma76"]))))) + (((data["feature_strength1"]) - (((data["feature_constitution38"]) - (((((data["feature_charisma76"]) * (((data["feature_charisma55"]) * (data["feature_strength34"]))))) - ((((data["feature_constitution24"]) + (((data["feature_dexterity12"]) - (data["feature_charisma19"])))) / 2.0))))))))) / 2.0)) +
0.093451 * np.tanh(((((-((((((data["feature_constitution62"]) - (((data["feature_constitution18"]) + (((data["feature_charisma58"]) + ((((-((np.tanh((data["feature_constitution79"])))))) - (data["feature_constitution6"]))))))))) / 2.0))))) + ((((data["feature_wisdom20"]) + (((data["feature_wisdom26"]) * (((data["feature_charisma41"]) - (data["feature_charisma43"])))))) / 2.0))) / 2.0)) +
0.100000 * np.tanh(((data["feature_constitution66"]) * (((data["feature_dexterity2"]) * (((data["feature_constitution94"]) * ((((-((((((-((((((data["feature_constitution111"]) * (np.tanh((data["feature_dexterity4"]))))) * (((data["feature_dexterity2"]) * (((data["feature_strength30"]) + (data["feature_dexterity4"])))))))))) + (((data["feature_constitution16"]) - (data["feature_charisma46"])))) / 2.0))))) * 2.0)))))))) +
0.100000 * np.tanh(((data["feature_intelligence2"]) * ((((data["feature_dexterity14"]) + ((((-1.0)) + (((((((data["feature_wisdom44"]) + (data["feature_charisma10"])) / 2.0)) + (((((((data["feature_dexterity9"]) + (((data["feature_wisdom35"]) - (data["feature_charisma69"]))))) - (((data["feature_intelligence2"]) * (data["feature_intelligence2"]))))) - (data["feature_charisma69"])))) / 2.0))))) / 2.0)))) +
0.100000 * np.tanh((((((data["feature_intelligence6"]) + (((data["feature_wisdom23"]) * (((data["feature_wisdom23"]) - (data["feature_charisma10"])))))) / 2.0)) * ((((((data["feature_wisdom23"]) - (data["feature_charisma16"]))) + ((-((((data["feature_wisdom37"]) - ((((((((data["feature_charisma10"]) + (((data["feature_charisma3"]) - (data["feature_constitution50"]))))) * 2.0)) + (data["feature_wisdom23"])) / 2.0)))))))) / 2.0)))) +
0.099804 * np.tanh(((data["feature_charisma50"]) * ((-((((((((data["feature_charisma50"]) - (np.tanh(((((((data["feature_wisdom18"]) + (data["feature_wisdom18"])) / 2.0)) - (data["feature_wisdom41"]))))))) * (data["feature_wisdom8"]))) * ((-((((((data["feature_charisma28"]) - (np.tanh((((data["feature_wisdom45"]) - (data["feature_strength1"]))))))) * (data["feature_wisdom18"]))))))))))))) +
0.099804 * np.tanh(((((data["feature_dexterity11"]) * (np.tanh((((data["feature_dexterity11"]) * (np.tanh((((((data["feature_constitution2"]) - (((data["feature_dexterity11"]) - ((((((data["feature_constitution65"]) + (((data["feature_intelligence9"]) / 2.0))) / 2.0)) * 2.0)))))) / 2.0)))))))))) * ((-((np.tanh((data["feature_dexterity11"])))))))) +
0.099609 * np.tanh(((((data["feature_dexterity9"]) * (((data["feature_wisdom13"]) * (((data["feature_charisma85"]) * ((((((((data["feature_wisdom13"]) + (((data["feature_constitution50"]) - (data["feature_constitution91"])))) / 2.0)) * (((data["feature_wisdom13"]) * (((((data["feature_dexterity9"]) * (((((data["feature_charisma81"]) * (data["feature_wisdom13"]))) * 2.0)))) * 2.0)))))) * 2.0)))))))) / 2.0)) +
0.100000 * np.tanh((-((((data["feature_dexterity4"]) * ((((data["feature_wisdom2"]) + (((data["feature_wisdom36"]) * (((data["feature_wisdom36"]) * (((((data["feature_wisdom1"]) - (((data["feature_wisdom36"]) + (np.tanh(((-((data["feature_strength10"])))))))))) - (((((data["feature_dexterity4"]) + (np.tanh(((-((data["feature_strength10"])))))))) + (data["feature_constitution55"])))))))))) / 2.0))))))))
def GPII(data):
return Output(0.099902 * np.tanh((((((data["feature_charisma37"]) - (data["feature_dexterity11"]))) + (((data["feature_constitution81"]) - (((data["feature_dexterity3"]) - (((((((((((data["feature_charisma37"]) - (data["feature_dexterity11"]))) - (((data["feature_charisma69"]) - (data["feature_charisma19"]))))) + (((data["feature_charisma6"]) - (data["feature_constitution81"])))) / 2.0)) + (((data["feature_charisma6"]) - (data["feature_charisma69"])))) / 2.0))))))) / 2.0)) +
0.097556 * np.tanh(((((((data["feature_strength4"]) + (((((data["feature_charisma63"]) * (((data["feature_charisma63"]) * (((data["feature_strength34"]) + (((((((((data["feature_strength34"]) + (data["feature_strength36"]))) + (data["feature_strength36"]))) * (data["feature_strength36"]))) * (data["feature_dexterity1"]))))))))) - (((((data["feature_dexterity7"]) / 2.0)) * 2.0)))))) - (data["feature_constitution38"]))) / 2.0)) +
0.099902 * np.tanh((((((((data["feature_wisdom35"]) - (np.tanh((((((data["feature_wisdom16"]) * 2.0)) + (data["feature_constitution110"]))))))) / 2.0)) + (((((((-((((data["feature_constitution110"]) - (((data["feature_charisma58"]) + (data["feature_wisdom42"])))))))) - (data["feature_dexterity12"]))) + ((((data["feature_wisdom8"]) + ((-((((data["feature_strength2"]) - (data["feature_wisdom32"]))))))) / 2.0))) / 2.0))) / 2.0)) +
0.095112 * np.tanh(((data["feature_strength19"]) * ((((((((data["feature_charisma85"]) * (data["feature_strength19"]))) + (((data["feature_wisdom23"]) - (data["feature_strength19"])))) / 2.0)) + ((((data["feature_charisma78"]) + ((((((data["feature_constitution31"]) * (data["feature_charisma45"]))) + (((((data["feature_constitution31"]) * (data["feature_strength19"]))) - (((data["feature_constitution11"]) - ((-((data["feature_strength19"]))))))))) / 2.0))) / 2.0)))))) +
0.089736 * np.tanh((-((((data["feature_wisdom2"]) * (((data["feature_constitution46"]) * ((((-((((data["feature_wisdom2"]) - (((((((data["feature_charisma57"]) * 2.0)) / 2.0)) - ((-((((data["feature_constitution27"]) + (data["feature_charisma34"]))))))))))))) / 2.0))))))))) +
0.099511 * np.tanh(((data["feature_charisma50"]) * (((((((data["feature_charisma45"]) + (data["feature_charisma5"]))) + (data["feature_intelligence5"]))) * (((data["feature_wisdom10"]) * (((((data["feature_wisdom42"]) * (data["feature_wisdom27"]))) * ((((data["feature_constitution90"]) + (((((data["feature_charisma79"]) + (data["feature_intelligence5"]))) * (data["feature_wisdom42"])))) / 2.0)))))))))) +
0.100000 * np.tanh(((((((data["feature_charisma35"]) - (np.tanh((data["feature_constitution19"]))))) - (np.tanh((data["feature_strength13"]))))) * ((((data["feature_dexterity13"]) + ((((data["feature_dexterity13"]) + ((-((((((data["feature_constitution15"]) * (((data["feature_charisma74"]) + (data["feature_strength18"]))))) + (data["feature_constitution19"]))))))) / 2.0))) / 2.0)))) +
0.100000 * np.tanh(((((data["feature_dexterity2"]) + (((data["feature_dexterity8"]) + (((data["feature_intelligence8"]) * (((((-3.0)) + ((-((data["feature_constitution40"]))))) / 2.0)))))))) * ((((data["feature_constitution108"]) + ((-(((((data["feature_wisdom24"]) + (np.tanh((((((data["feature_dexterity2"]) * (data["feature_dexterity2"]))) * (data["feature_wisdom33"])))))) / 2.0)))))) / 2.0)))) +
0.099804 * np.tanh(((data["feature_wisdom22"]) * ((-((((((data["feature_charisma69"]) * (((((data["feature_constitution91"]) / 2.0)) + (((((data["feature_charisma35"]) + (((((-((data["feature_charisma69"])))) + (((((((data["feature_dexterity12"]) - (data["feature_strength12"]))) / 2.0)) * 2.0))) / 2.0)))) - (data["feature_constitution91"]))))))) / 2.0))))))) +
0.100000 * np.tanh(((((data["feature_constitution108"]) * (((((-((data["feature_constitution6"])))) + (((((data["feature_wisdom11"]) * 2.0)) + (((np.tanh((((data["feature_constitution108"]) - (data["feature_charisma11"]))))) * 2.0))))) / 2.0)))) * (((((data["feature_wisdom1"]) * (((((data["feature_constitution96"]) - (data["feature_charisma11"]))) + (data["feature_wisdom39"]))))) * ((-((data["feature_constitution6"])))))))))
def GPIII(data):
return Output(0.099902 * np.tanh((((((((data["feature_charisma76"]) * (data["feature_strength1"]))) + (((data["feature_charisma19"]) - (data["feature_dexterity11"]))))) + (((((((-((data["feature_dexterity7"])))) + ((((((data["feature_charisma54"]) - (data["feature_dexterity7"]))) + (((((((data["feature_charisma76"]) * (data["feature_charisma67"]))) * 2.0)) * (data["feature_dexterity7"])))) / 2.0))) / 2.0)) * 2.0))) / 2.0)) +
0.100000 * np.tanh(((((((((((((((np.tanh((data["feature_charisma28"]))) - (data["feature_constitution114"]))) + (((((data["feature_charisma5"]) * (data["feature_charisma5"]))) / 2.0)))) + (((data["feature_wisdom35"]) - ((((data["feature_dexterity12"]) + (data["feature_constitution102"])) / 2.0)))))) / 2.0)) - ((((data["feature_dexterity12"]) + (data["feature_constitution102"])) / 2.0)))) + (data["feature_charisma63"]))) / 2.0)) +
0.100000 * np.tanh((((((data["feature_strength34"]) + (np.tanh((np.tanh(((-((((((((data["feature_dexterity3"]) * 2.0)) * 2.0)) - (((data["feature_charisma77"]) - (((data["feature_constitution84"]) - (((data["feature_constitution34"]) - (((data["feature_wisdom16"]) - (((data["feature_constitution34"]) - ((((data["feature_charisma83"]) + (((data["feature_dexterity4"]) * 2.0))) / 2.0)))))))))))))))))))))) / 2.0)) / 2.0)) +
0.100000 * np.tanh(((data["feature_charisma85"]) * (((data["feature_strength19"]) * (((data["feature_strength19"]) * (((((data["feature_constitution42"]) * (data["feature_dexterity9"]))) * (((data["feature_strength19"]) * (((((((data["feature_strength1"]) * (((data["feature_strength19"]) + (data["feature_charisma2"]))))) + ((-((((data["feature_constitution42"]) - (data["feature_dexterity9"])))))))) + (data["feature_charisma2"]))))))))))))) +
0.100000 * np.tanh(np.tanh(((((((((((data["feature_constitution101"]) / 2.0)) / 2.0)) + (((data["feature_wisdom10"]) * (np.tanh((((np.tanh(((((((-((data["feature_strength9"])))) * 2.0)) * 2.0)))) * 2.0))))))) / 2.0)) * (((data["feature_dexterity6"]) - (((data["feature_wisdom42"]) + ((((data["feature_strength10"]) + (((data["feature_wisdom42"]) - (data["feature_dexterity6"])))) / 2.0)))))))))) +
0.100000 * np.tanh(((((0.0)) + ((((data["feature_wisdom23"]) + ((-(((((((data["feature_constitution7"]) + ((-((((data["feature_charisma10"]) - (data["feature_intelligence4"]))))))) / 2.0)) + ((((data["feature_charisma69"]) + ((((data["feature_constitution47"]) + (np.tanh((((data["feature_constitution46"]) - (((data["feature_charisma10"]) - (data["feature_constitution47"])))))))) / 2.0))) / 2.0)))))))) / 2.0))) / 2.0)) +
0.100000 * np.tanh(((data["feature_dexterity1"]) * ((((-((((data["feature_charisma6"]) * (((data["feature_wisdom43"]) - (((((((-(((-((((data["feature_strength3"]) + (data["feature_charisma45"]))))))))) * (((data["feature_charisma45"]) * (((data["feature_constitution70"]) * 2.0)))))) + (((((data["feature_strength3"]) * 2.0)) / 2.0))) / 2.0))))))))) / 2.0)))) +
0.094135 * np.tanh((((-(((((-((data["feature_wisdom4"])))) * ((((data["feature_charisma29"]) + (((data["feature_strength24"]) * (((data["feature_strength15"]) + (((((data["feature_constitution63"]) + (data["feature_constitution54"]))) * 2.0))))))) / 2.0))))))) * (((data["feature_constitution85"]) * (((data["feature_constitution4"]) * ((-((((((data["feature_strength15"]) * (data["feature_constitution46"]))) * 2.0))))))))))) +
0.097165 * np.tanh((-(((((((((((data["feature_charisma13"]) + (((data["feature_wisdom26"]) + (data["feature_charisma79"]))))) * (data["feature_wisdom26"]))) + (data["feature_intelligence3"])) / 2.0)) * ((((((data["feature_intelligence3"]) + (((data["feature_wisdom42"]) * ((-((((data["feature_charisma13"]) + (((data["feature_charisma45"]) + (data["feature_charisma79"]))))))))))) / 2.0)) / 2.0))))))) +
0.097165 * np.tanh(((data["feature_constitution108"]) * (np.tanh((((data["feature_constitution108"]) * ((((data["feature_constitution39"]) + (((data["feature_dexterity8"]) - (((data["feature_intelligence8"]) + (((data["feature_constitution63"]) - (((data["feature_dexterity8"]) * ((((data["feature_dexterity5"]) + (((data["feature_dexterity2"]) - (((data["feature_constitution108"]) + (data["feature_constitution39"])))))) / 2.0))))))))))) / 2.0)))))))))
def GPIV(data):
return Output(0.099902 * np.tanh(((data["feature_charisma63"]) - ((((((data["feature_dexterity6"]) + (data["feature_dexterity14"])) / 2.0)) + ((((((((-2.0)) * (((data["feature_wisdom23"]) - (((data["feature_charisma9"]) - ((-((((data["feature_dexterity4"]) - (((data["feature_strength34"]) - (((data["feature_dexterity6"]) - ((((data["feature_charisma58"]) + (data["feature_dexterity6"])) / 2.0))))))))))))))))) / 2.0)) / 2.0)))))) +
0.100000 * np.tanh((((((((data["feature_charisma13"]) * (data["feature_charisma54"]))) + ((-((((data["feature_constitution32"]) + (((data["feature_constitution91"]) + ((-((((((data["feature_dexterity1"]) * (data["feature_charisma37"]))) + (((data["feature_strength4"]) * (((data["feature_charisma37"]) + (((data["feature_constitution59"]) * (((data["feature_charisma54"]) + (data["feature_constitution18"])))))))))))))))))))))) / 2.0)) / 2.0)) +
0.094330 * np.tanh(((data["feature_wisdom7"]) * ((-((((data["feature_intelligence3"]) - ((((data["feature_strength22"]) + (((np.tanh((((data["feature_constitution104"]) * (data["feature_strength19"]))))) * (((((((data["feature_strength22"]) - ((-((data["feature_strength19"])))))) - ((-((data["feature_wisdom7"])))))) - ((-((data["feature_wisdom26"]))))))))) / 2.0))))))))) +
0.100000 * np.tanh(((data["feature_constitution101"]) * (((((((((data["feature_charisma53"]) + ((((((data["feature_charisma5"]) + (((((data["feature_constitution12"]) * ((((((data["feature_dexterity13"]) - (data["feature_constitution114"]))) + (data["feature_dexterity13"])) / 2.0)))) - (data["feature_constitution114"])))) / 2.0)) * 2.0)))) * (data["feature_wisdom42"]))) - (np.tanh((data["feature_wisdom16"]))))) / 2.0)))) +
0.093157 * np.tanh(((((((((((data["feature_dexterity4"]) * (data["feature_wisdom36"]))) * (data["feature_wisdom36"]))) + (np.tanh(((-3.0)))))) + (data["feature_charisma29"]))) * (np.tanh((((data["feature_dexterity7"]) + (np.tanh(((-((((((data["feature_wisdom36"]) - (((data["feature_constitution85"]) - (data["feature_strength30"]))))) * (data["feature_constitution78"])))))))))))))) +
0.099707 * np.tanh(((((data["feature_strength3"]) * ((((data["feature_charisma66"]) + (((((((((data["feature_intelligence11"]) + (data["feature_dexterity5"]))) * ((-((((data["feature_dexterity5"]) * ((-((data["feature_dexterity8"]))))))))))) - (data["feature_constitution4"]))) - (((data["feature_intelligence11"]) * (((data["feature_constitution16"]) * (data["feature_intelligence11"])))))))) / 2.0)))) * (data["feature_strength1"]))) +
0.099902 * np.tanh((((((data["feature_wisdom43"]) + ((((data["feature_constitution78"]) + (((data["feature_wisdom33"]) - (((data["feature_wisdom40"]) + (((data["feature_constitution108"]) + (data["feature_wisdom20"])))))))) / 2.0))) / 2.0)) * (((((-((((data["feature_charisma63"]) + (data["feature_dexterity11"])))))) + (((data["feature_wisdom20"]) * (((data["feature_wisdom33"]) * (((data["feature_wisdom33"]) * 2.0))))))) / 2.0)))) +
0.099609 * np.tanh(np.tanh((((((data["feature_charisma35"]) * ((-((((((data["feature_intelligence4"]) * ((((data["feature_intelligence4"]) + (((((data["feature_dexterity3"]) * (((data["feature_charisma35"]) * ((-((data["feature_charisma35"])))))))) * (((data["feature_dexterity3"]) * (((data["feature_intelligence5"]) * (((data["feature_charisma35"]) * 2.0))))))))) / 2.0)))) * 2.0))))))) / 2.0)))) +
0.099804 * np.tanh((((((data["feature_wisdom22"]) + (((data["feature_constitution16"]) * (((((np.tanh(((((data["feature_wisdom41"]) + (((data["feature_constitution97"]) - (((data["feature_constitution81"]) * (data["feature_constitution70"])))))) / 2.0)))) + ((-((((data["feature_constitution16"]) * (data["feature_dexterity12"])))))))) + ((-((data["feature_dexterity12"]))))))))) / 2.0)) * (((data["feature_constitution100"]) * (data["feature_constitution91"]))))) +
0.088270 * np.tanh(((data["feature_charisma75"]) * (((data["feature_strength30"]) * (((((np.tanh((((data["feature_intelligence12"]) + (((data["feature_intelligence12"]) - (((((data["feature_strength15"]) + (((((data["feature_constitution4"]) + (((data["feature_wisdom3"]) - (data["feature_constitution113"]))))) - (data["feature_constitution34"]))))) * 2.0)))))))) * (((data["feature_constitution113"]) * 2.0)))) / 2.0)))))))
def GPV(data):
return Output(0.099902 * np.tanh((((data["feature_charisma18"]) + ((-(((((((data["feature_dexterity11"]) + (((((((((data["feature_dexterity14"]) + ((-((((data["feature_constitution81"]) - (((data["feature_charisma69"]) - (data["feature_strength22"]))))))))) / 2.0)) * 2.0)) + ((((data["feature_charisma69"]) + ((-((((data["feature_constitution89"]) - (((data["feature_dexterity14"]) - (data["feature_charisma42"]))))))))) / 2.0))) / 2.0))) / 2.0)) * 2.0)))))) / 2.0)) +
0.097556 * np.tanh(((((data["feature_dexterity7"]) * (((((data["feature_dexterity1"]) * (((((((data["feature_strength19"]) * (data["feature_charisma63"]))) * (data["feature_charisma63"]))) * 2.0)))) * (data["feature_strength4"]))))) - ((((data["feature_dexterity7"]) + (((((((((data["feature_dexterity7"]) - (data["feature_strength34"]))) * 2.0)) - (data["feature_charisma63"]))) / 2.0))) / 2.0)))) +
0.100000 * np.tanh((((data["feature_wisdom23"]) + ((-((((((((np.tanh((data["feature_intelligence2"]))) + ((-((((data["feature_constitution42"]) - ((((((data["feature_constitution110"]) + (np.tanh((data["feature_constitution62"])))) / 2.0)) * 2.0)))))))) / 2.0)) + ((((np.tanh((np.tanh((((((((data["feature_constitution62"]) * 2.0)) * 2.0)) * 2.0)))))) + (data["feature_intelligence8"])) / 2.0))) / 2.0)))))) / 2.0)) +
0.099902 * np.tanh(((data["feature_dexterity7"]) * ((((data["feature_wisdom42"]) + ((-(((((((data["feature_wisdom7"]) - ((-(((((((data["feature_dexterity7"]) + (data["feature_wisdom7"])) / 2.0)) - (data["feature_wisdom42"])))))))) + (((((data["feature_constitution78"]) - (data["feature_charisma53"]))) - (((data["feature_charisma85"]) - (data["feature_wisdom7"])))))) / 2.0)))))) / 2.0)))) +
0.099707 * np.tanh(((((data["feature_constitution85"]) * ((((((((data["feature_constitution50"]) + (data["feature_constitution38"])) / 2.0)) * (data["feature_constitution50"]))) * 2.0)))) * ((-((((((((data["feature_constitution38"]) + (data["feature_intelligence4"])) / 2.0)) + ((-((((data["feature_charisma55"]) - ((((((data["feature_constitution38"]) - (data["feature_dexterity12"]))) + (np.tanh((np.tanh((data["feature_intelligence4"])))))) / 2.0)))))))) / 2.0))))))) +
0.100000 * np.tanh((-(((-((((((((((((data["feature_wisdom26"]) + (((data["feature_charisma5"]) - (data["feature_strength32"])))) / 2.0)) * (data["feature_wisdom3"]))) + ((((((((((data["feature_wisdom35"]) * (data["feature_wisdom3"]))) + (((data["feature_charisma5"]) - (data["feature_wisdom25"])))) / 2.0)) * (data["feature_wisdom20"]))) - (np.tanh((data["feature_charisma16"])))))) / 2.0)) * (data["feature_wisdom3"]))))))))) +
0.098729 * np.tanh(((data["feature_dexterity4"]) * (((data["feature_dexterity4"]) * (((data["feature_dexterity4"]) * ((((((data["feature_charisma12"]) * (((((data["feature_constitution52"]) + (data["feature_wisdom11"]))) * (data["feature_charisma76"]))))) + (((data["feature_constitution18"]) - (np.tanh((((((data["feature_constitution98"]) + (data["feature_constitution16"]))) * 2.0))))))) / 2.0)))))))) +
0.099120 * np.tanh((((((data["feature_charisma10"]) / 2.0)) + ((((-((((((data["feature_constitution12"]) * (data["feature_constitution68"]))) - (((((data["feature_charisma13"]) - (((data["feature_constitution46"]) - (((data["feature_charisma6"]) - (((data["feature_intelligence3"]) * 2.0)))))))) / 2.0))))))) / 2.0))) / 2.0)) +
0.098925 * np.tanh(((data["feature_constitution54"]) * ((((-((((data["feature_strength15"]) - ((((data["feature_charisma3"]) + ((((((((((data["feature_charisma19"]) * (data["feature_strength15"]))) * 2.0)) * (((((data["feature_charisma19"]) * (data["feature_strength15"]))) * 2.0)))) + (((((data["feature_charisma19"]) * 2.0)) * (np.tanh((data["feature_wisdom19"])))))) / 2.0))) / 2.0))))))) / 2.0)))) +
0.099902 * np.tanh(((data["feature_charisma32"]) * (((data["feature_charisma58"]) * (((((data["feature_strength30"]) + (data["feature_wisdom29"]))) * (((data["feature_charisma77"]) * (((((data["feature_strength15"]) + (((((data["feature_wisdom29"]) + (data["feature_constitution30"]))) * (data["feature_strength1"]))))) * (((data["feature_intelligence7"]) * (data["feature_intelligence7"]))))))))))))))
def GPVI(data):
return Output(0.100000 * np.tanh((((((data["feature_charisma37"]) * (((data["feature_charisma54"]) * (data["feature_charisma63"]))))) + (((((data["feature_charisma76"]) * (data["feature_strength34"]))) - (((((data["feature_dexterity14"]) - (((data["feature_charisma63"]) - (np.tanh((data["feature_dexterity6"]))))))) + (((data["feature_dexterity4"]) - (data["feature_constitution30"])))))))) / 2.0)) +
0.100000 * np.tanh((((data["feature_strength1"]) + (((((data["feature_wisdom23"]) - ((((data["feature_wisdom7"]) + (((data["feature_constitution114"]) - (np.tanh((np.tanh((data["feature_charisma10"])))))))) / 2.0)))) - (((data["feature_charisma69"]) + (((((data["feature_constitution75"]) - (((data["feature_wisdom36"]) / 2.0)))) / 2.0))))))) / 2.0)) +
0.100000 * np.tanh((((((((data["feature_dexterity13"]) * (((((data["feature_strength19"]) + ((-((np.tanh((np.tanh((data["feature_constitution110"])))))))))) * 2.0)))) + (((data["feature_charisma5"]) + ((-((np.tanh((((np.tanh((((((data["feature_dexterity13"]) * 2.0)) * (((data["feature_dexterity4"]) + (data["feature_charisma5"]))))))) + (data["feature_dexterity4"]))))))))))) / 2.0)) / 2.0)) +
0.099511 * np.tanh((((-((((((data["feature_constitution93"]) - (((((data["feature_charisma53"]) * ((-(((((data["feature_strength9"]) + (np.tanh((data["feature_wisdom22"])))) / 2.0))))))) * ((((-((((((((data["feature_constitution93"]) * (data["feature_wisdom35"]))) + (data["feature_wisdom35"]))) + (((data["feature_constitution93"]) * (data["feature_wisdom36"])))))))) * 2.0)))))) / 2.0))))) / 2.0)) +
0.096090 * np.tanh(((data["feature_dexterity4"]) * ((-((((data["feature_dexterity4"]) * (((((((data["feature_wisdom46"]) - (((((data["feature_wisdom42"]) * (data["feature_dexterity4"]))) - (np.tanh((((data["feature_wisdom46"]) - (((data["feature_constitution39"]) - (data["feature_wisdom46"]))))))))))) - (data["feature_wisdom42"]))) / 2.0))))))))) +
0.099902 * np.tanh(((data["feature_wisdom43"]) * (np.tanh((((((data["feature_charisma58"]) - (np.tanh((((((data["feature_intelligence2"]) + (((((data["feature_intelligence2"]) + (((data["feature_intelligence4"]) - (data["feature_charisma59"]))))) - (data["feature_constitution104"]))))) + (((data["feature_dexterity7"]) + (((data["feature_intelligence2"]) + (((data["feature_intelligence2"]) - (data["feature_constitution104"]))))))))))))) / 2.0)))))) +
0.100000 * np.tanh(((data["feature_dexterity14"]) * (((data["feature_charisma47"]) * ((((data["feature_dexterity9"]) + ((((((data["feature_constitution71"]) + (((((data["feature_wisdom10"]) * 2.0)) * (((((((data["feature_wisdom10"]) * 2.0)) - (((np.tanh((data["feature_constitution82"]))) * 2.0)))) * (data["feature_strength3"])))))) / 2.0)) - (((data["feature_strength13"]) * 2.0))))) / 2.0)))))) +
0.100000 * np.tanh(((((data["feature_wisdom10"]) * (((((data["feature_intelligence12"]) + (((((data["feature_dexterity6"]) * (((((data["feature_constitution63"]) - (((data["feature_charisma50"]) - (data["feature_constitution24"]))))) * ((-((((((data["feature_constitution63"]) - (((((data["feature_intelligence12"]) * (data["feature_wisdom8"]))) - (data["feature_constitution63"]))))) * 2.0))))))))) / 2.0)))) / 2.0)))) / 2.0)) +
0.100000 * np.tanh(((data["feature_constitution103"]) * (((data["feature_wisdom21"]) * (((data["feature_constitution2"]) * (((data["feature_constitution54"]) * (((data["feature_wisdom21"]) * (((data["feature_wisdom8"]) + (((((data["feature_constitution2"]) + (((((data["feature_constitution105"]) - (data["feature_wisdom13"]))) + (data["feature_intelligence9"]))))) * ((-((((data["feature_intelligence9"]) - (data["feature_wisdom13"])))))))))))))))))))) +
0.099804 * np.tanh(((((((data["feature_constitution86"]) * (data["feature_strength36"]))) * (data["feature_strength36"]))) * (((data["feature_strength36"]) * (((data["feature_strength36"]) * (np.tanh((((data["feature_charisma70"]) - ((((((((data["feature_constitution12"]) + (data["feature_constitution40"]))) + (np.tanh((data["feature_constitution6"])))) / 2.0)) + (((data["feature_wisdom38"]) - (((data["feature_charisma63"]) / 2.0)))))))))))))))))
def GPVII(data):
return Output(0.099902 * np.tanh(((((((((((((data["feature_wisdom42"]) * (data["feature_wisdom44"]))) - (((data["feature_dexterity6"]) + ((-((((data["feature_charisma63"]) - ((-((((data["feature_constitution102"]) * (((data["feature_charisma81"]) - (((data["feature_dexterity12"]) * 2.0)))))))))))))))))) + (((data["feature_charisma81"]) * (data["feature_charisma63"])))) / 2.0)) * 2.0)) + ((-((data["feature_dexterity4"]))))) / 2.0)) +
0.100000 * np.tanh(((((data["feature_strength19"]) - (((data["feature_constitution85"]) + ((-(((((((((data["feature_strength1"]) * (data["feature_strength19"]))) * 2.0)) + (data["feature_charisma1"])) / 2.0))))))))) * ((((((((data["feature_strength1"]) * (data["feature_dexterity9"]))) * (data["feature_charisma85"]))) + (((data["feature_constitution81"]) * (data["feature_charisma54"])))) / 2.0)))) +
0.099902 * np.tanh((-(((((data["feature_charisma69"]) + (((((-((((((data["feature_charisma69"]) + (((data["feature_strength34"]) + (((data["feature_charisma6"]) - (data["feature_intelligence2"]))))))) + (((data["feature_charisma79"]) + (((data["feature_wisdom32"]) - (data["feature_intelligence2"])))))))))) + (data["feature_constitution7"])) / 2.0))) / 2.0))))) +
0.099511 * np.tanh(((((data["feature_strength19"]) * (((data["feature_constitution26"]) * (data["feature_constitution50"]))))) * (((data["feature_charisma46"]) - (((data["feature_constitution15"]) - (((data["feature_dexterity2"]) + (((((data["feature_constitution114"]) * (((data["feature_dexterity2"]) + (((data["feature_strength19"]) - (data["feature_constitution38"]))))))) - (data["feature_constitution114"]))))))))))) +
0.100000 * np.tanh(((((((-((data["feature_dexterity7"])))) + (((data["feature_wisdom20"]) - ((((((((data["feature_constitution46"]) - (((((data["feature_wisdom12"]) - (data["feature_strength1"]))) * ((-((data["feature_dexterity7"])))))))) - (data["feature_intelligence5"]))) + (((data["feature_wisdom12"]) - (data["feature_strength1"])))) / 2.0))))) / 2.0)) / 2.0)) +
0.099902 * np.tanh(((data["feature_charisma57"]) * ((-((((((((((data["feature_charisma83"]) * (data["feature_charisma57"]))) + ((-((data["feature_charisma36"]))))) / 2.0)) + (((((((((((data["feature_wisdom30"]) * (((data["feature_intelligence9"]) * 2.0)))) * (data["feature_charisma83"]))) * (data["feature_intelligence9"]))) * (data["feature_intelligence9"]))) * (data["feature_intelligence9"])))) / 2.0))))))) +
0.099511 * np.tanh(((data["feature_wisdom8"]) * ((-(((-((((((((((data["feature_constitution97"]) - (data["feature_charisma35"]))) + (data["feature_wisdom41"]))) * (data["feature_strength14"]))) * (((data["feature_strength9"]) * ((((((((data["feature_wisdom8"]) + (data["feature_constitution97"])) / 2.0)) + (data["feature_strength9"]))) * ((((data["feature_charisma28"]) + (data["feature_wisdom22"])) / 2.0)))))))))))))))) +
0.100000 * np.tanh((((-((((data["feature_constitution69"]) * (((((data["feature_strength10"]) * (((((((data["feature_charisma34"]) + (((((data["feature_constitution6"]) * (data["feature_constitution6"]))) - (data["feature_intelligence4"]))))) + (data["feature_constitution69"]))) * (data["feature_intelligence4"]))))) * (((((data["feature_constitution69"]) - (data["feature_dexterity9"]))) / 2.0))))))))) * 2.0)) +
0.100000 * np.tanh(np.tanh((((((data["feature_strength13"]) - (((((((((((((data["feature_charisma11"]) * (data["feature_dexterity7"]))) - (data["feature_strength13"]))) + (data["feature_charisma11"])) / 2.0)) - (data["feature_constitution65"]))) + (data["feature_dexterity11"])) / 2.0)))) * (((((((data["feature_charisma11"]) * (data["feature_charisma45"]))) - (data["feature_dexterity11"]))) / 2.0)))))) +
0.099316 * np.tanh(((((np.tanh((data["feature_wisdom42"]))) * (((((data["feature_wisdom19"]) * (((data["feature_constitution92"]) * 2.0)))) * (data["feature_wisdom42"]))))) * (((((data["feature_charisma50"]) * (((data["feature_charisma50"]) * ((((((data["feature_constitution102"]) + (((((data["feature_charisma37"]) * 2.0)) * 2.0)))) + (((data["feature_charisma37"]) + (data["feature_constitution102"])))) / 2.0)))))) / 2.0)))))
def GPVIII(data):
return Output(0.099902 * np.tanh((((data["feature_charisma46"]) + ((-((((np.tanh((((data["feature_charisma69"]) * ((((((data["feature_dexterity7"]) + (((data["feature_dexterity7"]) - (data["feature_charisma11"]))))) + (((((data["feature_dexterity14"]) + (((data["feature_dexterity11"]) - (data["feature_wisdom5"]))))) + (data["feature_dexterity6"])))) / 2.0)))))) * 2.0)))))) / 2.0)) +
0.100000 * np.tanh((((((data["feature_constitution30"]) * (data["feature_charisma18"]))) + ((((((((((((data["feature_dexterity1"]) * 2.0)) * (((((data["feature_charisma63"]) * 2.0)) * (data["feature_strength1"]))))) + (data["feature_dexterity12"])) / 2.0)) * (((((((data["feature_strength3"]) * (data["feature_charisma63"]))) * ((((data["feature_charisma63"]) + (data["feature_strength14"])) / 2.0)))) * 2.0)))) - (data["feature_dexterity12"])))) / 2.0)) +
0.099902 * np.tanh((((((data["feature_dexterity3"]) * (((data["feature_wisdom36"]) - (((data["feature_wisdom12"]) - (((((data["feature_wisdom23"]) * (((((data["feature_charisma79"]) * 2.0)) / 2.0)))) - (data["feature_strength13"]))))))))) + (((((((((data["feature_strength14"]) / 2.0)) * 2.0)) - (((data["feature_constitution114"]) - (((data["feature_wisdom23"]) * (data["feature_charisma79"]))))))) / 2.0))) / 2.0)) +
0.100000 * np.tanh((((((np.tanh((((data["feature_strength9"]) * (data["feature_wisdom20"]))))) + ((-((((data["feature_constitution20"]) - (((data["feature_charisma28"]) * ((((((((((((((data["feature_charisma13"]) * (((data["feature_wisdom20"]) * 2.0)))) + (data["feature_strength21"])) / 2.0)) * 2.0)) * (data["feature_charisma13"]))) * (data["feature_intelligence6"]))) * 2.0)))))))))) / 2.0)) / 2.0)) +
0.100000 * np.tanh((((((data["feature_charisma85"]) + ((-((((data["feature_dexterity7"]) + (((((data["feature_intelligence4"]) + (((((((data["feature_dexterity6"]) - (data["feature_constitution39"]))) + (((data["feature_intelligence4"]) * ((((((data["feature_constitution58"]) + ((-((((data["feature_dexterity6"]) - (data["feature_constitution39"]))))))) / 2.0)) - (data["feature_charisma61"]))))))) * 2.0)))) / 2.0)))))))) / 2.0)) / 2.0)) +
0.099707 * np.tanh(((((data["feature_constitution41"]) * (((data["feature_charisma35"]) + (data["feature_constitution41"]))))) * (np.tanh((((data["feature_charisma35"]) * ((-(((((data["feature_constitution4"]) + ((-((((data["feature_dexterity2"]) * (((data["feature_constitution81"]) + ((((((data["feature_constitution81"]) + (data["feature_constitution20"])) / 2.0)) * (((data["feature_charisma67"]) + (data["feature_strength3"]))))))))))))) / 2.0))))))))))) +
0.096188 * np.tanh(((data["feature_wisdom24"]) * (((np.tanh((((((data["feature_constitution90"]) / 2.0)) - (np.tanh((((((((data["feature_intelligence2"]) - ((((data["feature_dexterity12"]) + (data["feature_intelligence2"])) / 2.0)))) + (((data["feature_intelligence2"]) + (np.tanh((np.tanh(((-((data["feature_strength13"])))))))))))) * 2.0)))))))) / 2.0)))) +
0.100000 * np.tanh(((data["feature_intelligence4"]) * (((data["feature_intelligence4"]) * ((-(((((((data["feature_intelligence11"]) * (data["feature_constitution75"]))) + (((((data["feature_constitution50"]) + (((data["feature_intelligence4"]) - (((((data["feature_constitution19"]) + (data["feature_intelligence4"]))) + (data["feature_charisma10"]))))))) / 2.0))) / 2.0))))))))) +
0.099316 * np.tanh((-(((((((((((((((data["feature_constitution64"]) * (data["feature_charisma6"]))) * (data["feature_wisdom7"]))) * 2.0)) + (data["feature_dexterity9"])) / 2.0)) * (((data["feature_wisdom7"]) - ((((data["feature_constitution113"]) + (((((data["feature_intelligence12"]) * ((((data["feature_constitution113"]) + (data["feature_constitution67"])) / 2.0)))) * 2.0))) / 2.0)))))) * (data["feature_charisma71"])))))) +
0.099902 * np.tanh(((((data["feature_dexterity7"]) * (((((((((data["feature_wisdom42"]) * (data["feature_wisdom22"]))) - (((data["feature_constitution78"]) + (((((data["feature_constitution56"]) + (((data["feature_constitution65"]) - (data["feature_charisma9"]))))) - (np.tanh((((data["feature_charisma82"]) * (data["feature_wisdom42"]))))))))))) + (data["feature_wisdom42"]))) / 2.0)))) / 2.0)))
def GPIX(data):
return Output(0.099902 * np.tanh((((data["feature_charisma76"]) + ((-(((((data["feature_constitution110"]) + (((((((data["feature_dexterity11"]) + (((data["feature_dexterity4"]) - (data["feature_charisma85"]))))) * 2.0)) - (((((((((data["feature_charisma46"]) - (data["feature_dexterity14"]))) + (data["feature_constitution42"]))) - (data["feature_dexterity14"]))) + (data["feature_constitution42"])))))) / 2.0)))))) / 2.0)) +
0.100000 * np.tanh((((((data["feature_charisma28"]) + ((((((((data["feature_dexterity9"]) + (data["feature_charisma1"]))) * (((((((data["feature_charisma10"]) - (np.tanh((data["feature_charisma69"]))))) + (data["feature_strength34"]))) * 2.0)))) + (((data["feature_wisdom23"]) - ((((((((0.318310)) + (data["feature_charisma69"]))) + (data["feature_intelligence2"]))) * 2.0))))) / 2.0))) / 2.0)) / 2.0)) +
0.099902 * np.tanh(((data["feature_dexterity8"]) * (((data["feature_dexterity8"]) * (((data["feature_wisdom36"]) - (((data["feature_wisdom2"]) + ((((data["feature_constitution37"]) + ((-((((((data["feature_strength3"]) * (data["feature_strength1"]))) * 2.0)))))) / 2.0)))))))))) +
0.100000 * np.tanh(np.tanh(((((((data["feature_charisma79"]) + ((((((((data["feature_wisdom20"]) + (((data["feature_strength22"]) - ((((data["feature_wisdom1"]) + ((((((data["feature_constitution114"]) - (data["feature_charisma31"]))) + (((data["feature_constitution62"]) + (data["feature_strength22"])))) / 2.0))) / 2.0))))) / 2.0)) - ((((data["feature_charisma31"]) + ((((data["feature_constitution114"]) + (data["feature_intelligence4"])) / 2.0))) / 2.0)))) * 2.0))) / 2.0)) / 2.0)))) +
0.095112 * np.tanh((((data["feature_wisdom23"]) + (np.tanh((np.tanh((((((((((((data["feature_charisma36"]) + ((((-(((((data["feature_dexterity11"]) + (data["feature_intelligence4"])) / 2.0))))) * 2.0))) / 2.0)) + ((((-((((data["feature_dexterity2"]) * (data["feature_wisdom23"])))))) * 2.0))) / 2.0)) * 2.0)) * 2.0))))))) / 2.0)) +
0.100000 * np.tanh(((data["feature_dexterity11"]) * (((data["feature_dexterity5"]) * (((((((data["feature_dexterity4"]) * (data["feature_constitution55"]))) - (((data["feature_wisdom21"]) - (((data["feature_charisma63"]) * (((((data["feature_charisma63"]) * (data["feature_charisma13"]))) - (((data["feature_wisdom21"]) - (((data["feature_charisma63"]) * (data["feature_constitution41"]))))))))))))) / 2.0)))))) +
0.100000 * np.tanh((((((((((data["feature_strength1"]) * (((data["feature_wisdom8"]) * 2.0)))) + (((data["feature_charisma3"]) - (((data["feature_constitution12"]) + (((data["feature_strength13"]) - (((((((data["feature_charisma50"]) * (data["feature_wisdom37"]))) * (data["feature_wisdom37"]))) * (((((((data["feature_wisdom8"]) * 2.0)) * 2.0)) * (data["feature_charisma61"])))))))))))) / 2.0)) / 2.0)) / 2.0)) +
0.099804 * np.tanh(((((data["feature_charisma54"]) - (data["feature_constitution42"]))) * ((((((np.tanh(((((((-((((data["feature_charisma54"]) - (data["feature_constitution21"])))))) * (data["feature_constitution21"]))) * 2.0)))) + ((((data["feature_constitution80"]) + ((-((((((data["feature_charisma54"]) - (data["feature_constitution55"]))) - (((data["feature_constitution21"]) - (data["feature_constitution5"]))))))))) / 2.0))) / 2.0)) / 2.0)))) +
0.086217 * np.tanh(((data["feature_wisdom21"]) * (((data["feature_constitution88"]) * (((data["feature_constitution88"]) * (((((((((data["feature_constitution88"]) * (((data["feature_wisdom34"]) + ((((data["feature_wisdom34"]) + (data["feature_wisdom22"])) / 2.0)))))) * (data["feature_wisdom21"]))) - (((data["feature_constitution75"]) * (data["feature_charisma26"]))))) - (((data["feature_constitution88"]) * (data["feature_dexterity7"]))))))))))) +
0.099902 * np.tanh(((data["feature_charisma63"]) * ((((((((((data["feature_wisdom3"]) * ((-((data["feature_constitution6"])))))) * 2.0)) + (((((data["feature_constitution113"]) - (((data["feature_constitution58"]) - ((((((((data["feature_charisma63"]) * (np.tanh((data["feature_strength19"]))))) + (data["feature_charisma82"])) / 2.0)) * 2.0)))))) * (data["feature_dexterity1"])))) / 2.0)) / 2.0)))))
def GPX(data):
return Output(0.097947 * np.tanh(((((data["feature_charisma19"]) - ((((data["feature_dexterity12"]) + (((((data["feature_dexterity7"]) - (data["feature_constitution42"]))) - (((((data["feature_charisma37"]) - (((data["feature_dexterity4"]) - ((((((data["feature_strength4"]) - (data["feature_charisma9"]))) + (((((data["feature_charisma67"]) - (data["feature_dexterity11"]))) - (data["feature_constitution110"])))) / 2.0)))))) * 2.0))))) / 2.0)))) / 2.0)) +
0.099804 * np.tanh(((data["feature_charisma85"]) * (((((((((data["feature_constitution97"]) + (data["feature_strength19"]))) * (((((data["feature_charisma46"]) + (data["feature_wisdom42"]))) / 2.0)))) * (((((data["feature_strength14"]) + (((data["feature_dexterity1"]) * (((data["feature_strength19"]) * (data["feature_constitution39"]))))))) * 2.0)))) * (((data["feature_strength19"]) * (((data["feature_charisma85"]) / 2.0)))))))) +
0.099902 * np.tanh((((data["feature_wisdom23"]) + (((((((((data["feature_charisma79"]) - (data["feature_dexterity14"]))) + ((-2.0)))) - ((((-(((((((((data["feature_constitution89"]) + (((data["feature_strength7"]) - (data["feature_wisdom23"]))))) * 2.0)) + (((((data["feature_wisdom34"]) * 2.0)) + (((np.tanh((data["feature_charisma86"]))) * 2.0))))) / 2.0))))) / 2.0)))) / 2.0))) / 2.0)) +
0.100000 * np.tanh(((((((data["feature_charisma63"]) * (((data["feature_charisma63"]) * (data["feature_dexterity9"]))))) + (np.tanh((((((-((data["feature_constitution114"])))) + ((((-((((data["feature_dexterity6"]) - (((((((data["feature_strength9"]) + ((((np.tanh(((-((data["feature_dexterity9"])))))) + (data["feature_charisma58"])) / 2.0))) / 2.0)) + (data["feature_charisma58"])) / 2.0))))))) * 2.0))) / 2.0)))))) / 2.0)) +
0.099902 * np.tanh((-((((((data["feature_constitution31"]) * (((((-1.0)) + ((((((((data["feature_dexterity10"]) + (data["feature_charisma81"])) / 2.0)) - (((data["feature_wisdom22"]) * (((((data["feature_wisdom5"]) - ((((data["feature_charisma81"]) + (data["feature_intelligence3"])) / 2.0)))) / 2.0)))))) * 2.0))) / 2.0)))) * (((data["feature_constitution8"]) - (((data["feature_dexterity6"]) - (data["feature_wisdom22"])))))))))) +
0.099804 * np.tanh(((((data["feature_constitution50"]) * ((((-((((data["feature_constitution50"]) * ((((data["feature_intelligence4"]) + (((((data["feature_wisdom7"]) + ((((-((((data["feature_wisdom42"]) * (((data["feature_wisdom42"]) * (data["feature_dexterity4"])))))))) - (((data["feature_wisdom42"]) * (data["feature_dexterity4"]))))))) - (data["feature_dexterity4"])))) / 2.0))))))) * 2.0)))) / 2.0)) +
0.100000 * np.tanh(((data["feature_wisdom8"]) * (((((data["feature_strength34"]) - (np.tanh(((((data["feature_constitution62"]) + ((((((data["feature_constitution53"]) + (((((data["feature_intelligence2"]) * 2.0)) + (((((data["feature_charisma19"]) + (((((data["feature_dexterity4"]) - (data["feature_constitution97"]))) * 2.0)))) * 2.0))))) / 2.0)) - (data["feature_strength3"])))) / 2.0)))))) / 2.0)))) +
0.099511 * np.tanh(((data["feature_strength28"]) * (((((data["feature_constitution44"]) / 2.0)) * (((data["feature_dexterity4"]) - ((((((((((data["feature_constitution7"]) + (((data["feature_constitution16"]) * (data["feature_strength28"]))))) * (((data["feature_constitution68"]) + (data["feature_constitution24"]))))) * (((data["feature_strength15"]) + (data["feature_constitution24"]))))) + (data["feature_constitution16"])) / 2.0)))))))) +
0.100000 * np.tanh(((data["feature_constitution113"]) * (((data["feature_charisma13"]) * (((((data["feature_intelligence5"]) * (((((data["feature_charisma47"]) - (data["feature_dexterity4"]))) + (((data["feature_charisma54"]) + (((((data["feature_charisma86"]) * (((((data["feature_wisdom35"]) * (data["feature_wisdom35"]))) * (data["feature_wisdom35"]))))) * 2.0)))))))) * (((data["feature_charisma77"]) * (data["feature_charisma13"]))))))))) +
0.100000 * np.tanh((((((data["feature_strength1"]) + ((((-((data["feature_dexterity7"])))) - (((((((((((((((data["feature_constitution46"]) * (data["feature_strength1"]))) * (((data["feature_constitution54"]) * 2.0)))) * (data["feature_charisma67"]))) * (((data["feature_constitution54"]) * 2.0)))) * (data["feature_constitution54"]))) * (data["feature_strength1"]))) * (((data["feature_strength1"]) * 2.0))))))) / 2.0)) / 2.0)))
def GPXI(data):
return Output(0.100000 * np.tanh(((((((((data["feature_strength4"]) * (data["feature_charisma19"]))) + (((data["feature_strength34"]) + (((((((data["feature_charisma76"]) * (((data["feature_strength4"]) + (data["feature_wisdom8"]))))) * (((data["feature_charisma18"]) * (((data["feature_constitution42"]) + (data["feature_dexterity7"]))))))) - (((data["feature_dexterity7"]) * 2.0)))))))) / 2.0)) / 2.0)) +
0.099707 * np.tanh((-(((((data["feature_dexterity5"]) + ((((((data["feature_dexterity14"]) + ((((data["feature_constitution38"]) + (((((((data["feature_charisma85"]) * (data["feature_charisma69"]))) - (data["feature_charisma46"]))) - (((data["feature_dexterity5"]) * 2.0))))) / 2.0))) / 2.0)) - (((((data["feature_charisma85"]) * 2.0)) / 2.0))))) / 2.0))))) +
0.100000 * np.tanh((((-(((((((((((data["feature_constitution102"]) + (data["feature_constitution102"])) / 2.0)) + (((((np.tanh((((data["feature_constitution40"]) - (data["feature_charisma58"]))))) - (((data["feature_charisma10"]) - (data["feature_constitution10"]))))) - (data["feature_wisdom23"]))))) * ((((data["feature_strength19"]) + (data["feature_wisdom23"])) / 2.0)))) / 2.0))))) / 2.0)) +
0.099511 * np.tanh((-((((data["feature_wisdom3"]) * (((data["feature_constitution78"]) * (np.tanh((((data["feature_dexterity6"]) - (((data["feature_wisdom35"]) * ((((data["feature_charisma77"]) + (((((data["feature_intelligence5"]) + (data["feature_constitution78"]))) * (data["feature_constitution78"])))) / 2.0))))))))))))))) +
0.099902 * np.tanh((((((data["feature_constitution108"]) + (((data["feature_strength3"]) * (data["feature_constitution106"])))) / 2.0)) * ((((((data["feature_constitution52"]) + (data["feature_constitution50"])) / 2.0)) * (np.tanh((((data["feature_dexterity13"]) - ((((data["feature_constitution52"]) + (np.tanh((((((data["feature_constitution108"]) * (((data["feature_constitution82"]) * (data["feature_constitution108"]))))) / 2.0))))) / 2.0)))))))))) +
0.099511 * np.tanh(((((np.tanh((((((-((data["feature_constitution62"])))) + (((data["feature_wisdom10"]) * (((((((((data["feature_wisdom42"]) * (((((data["feature_charisma18"]) * 2.0)) + (data["feature_intelligence12"]))))) + (((data["feature_intelligence12"]) + (data["feature_constitution97"]))))) * (data["feature_wisdom42"]))) - (((data["feature_wisdom10"]) * (data["feature_constitution65"])))))))) / 2.0)))) / 2.0)) / 2.0)) +
0.100000 * np.tanh(((data["feature_charisma67"]) * (((((((((data["feature_wisdom36"]) / 2.0)) + (((data["feature_strength15"]) * ((-((((data["feature_wisdom46"]) - ((((-((((data["feature_wisdom46"]) - (data["feature_constitution9"])))))) * (((data["feature_constitution9"]) * (((data["feature_strength30"]) * (((data["feature_intelligence12"]) + (data["feature_intelligence12"])))))))))))))))))) / 2.0)) / 2.0)))) +
0.099707 * np.tanh(np.tanh((np.tanh((((np.tanh((((data["feature_charisma6"]) * (((data["feature_constitution33"]) * (((data["feature_charisma6"]) * (np.tanh((((data["feature_strength19"]) / 2.0)))))))))))) * (((data["feature_constitution1"]) + (((data["feature_constitution18"]) * (((data["feature_strength19"]) * (((data["feature_constitution18"]) * (((data["feature_constitution18"]) * (data["feature_strength19"]))))))))))))))))) +
0.094233 * np.tanh(((((data["feature_constitution31"]) * (((((((((data["feature_constitution31"]) - (((data["feature_charisma34"]) - (np.tanh(((-((((data["feature_intelligence2"]) * 2.0))))))))))) / 2.0)) / 2.0)) * (((data["feature_wisdom38"]) + (((((data["feature_intelligence2"]) + (data["feature_intelligence8"]))) + (((data["feature_wisdom38"]) - (data["feature_charisma34"]))))))))))) / 2.0)) +
0.090616 * np.tanh(((((((data["feature_wisdom9"]) * (((((((data["feature_charisma41"]) * (data["feature_wisdom41"]))) * ((((data["feature_strength7"]) + ((((data["feature_constitution90"]) + ((-((data["feature_intelligence3"]))))) / 2.0))) / 2.0)))) * 2.0)))) * ((((((data["feature_constitution90"]) + ((-((data["feature_intelligence3"]))))) / 2.0)) + (data["feature_wisdom42"]))))) * (data["feature_charisma41"]))))
def GP(data):
return .1 * (GPI(data) +
GPII(data) +
GPIII(data) +
GPIV(data) +
GPV(data) +
GPVI(data) +
GPVII(data) +
GPVIII(data) +
GPIX(data) +
GPX(data))
tr = pd.read_csv('numerai_training_data.csv')
te = pd.read_csv('numerai_tournament_data.csv')
cols = tr.columns[3:]
cols
tr[PREDICTION_NAME] = GPI(tr)
train_correlations = tr.groupby("era").apply(score)
print(
f"On training the correlation has mean {train_correlations.mean()} and std {train_correlations.std()}")
print(
f"On training the average per-era payout is {payout(train_correlations).mean()}")
print(f"Sharpe {train_correlations.mean()/train_correlations.std()}")
tr[PREDICTION_NAME] = GPII(tr)
train_correlations = tr.groupby("era").apply(score)
print(
f"On training the correlation has mean {train_correlations.mean()} and std {train_correlations.std()}")
print(
f"On training the average per-era payout is {payout(train_correlations).mean()}")
print(f"Sharpe {train_correlations.mean()/train_correlations.std()}")
tr[PREDICTION_NAME] = GPIII(tr)
train_correlations = tr.groupby("era").apply(score)
print(
f"On training the correlation has mean {train_correlations.mean()} and std {train_correlations.std()}")
print(
f"On training the average per-era payout is {payout(train_correlations).mean()}")
print(f"Sharpe {train_correlations.mean()/train_correlations.std()}")
tr[PREDICTION_NAME] = GPIV(tr)
train_correlations = tr.groupby("era").apply(score)
print(
f"On training the correlation has mean {train_correlations.mean()} and std {train_correlations.std()}")
print(
f"On training the average per-era payout is {payout(train_correlations).mean()}")
print(f"Sharpe {train_correlations.mean()/train_correlations.std()}")
tr[PREDICTION_NAME] = GPV(tr)
train_correlations = tr.groupby("era").apply(score)
print(
f"On training the correlation has mean {train_correlations.mean()} and std {train_correlations.std()}")
print(
f"On training the average per-era payout is {payout(train_correlations).mean()}")
print(f"Sharpe {train_correlations.mean()/train_correlations.std()}")
tr[PREDICTION_NAME] = GPVI(tr)
train_correlations = tr.groupby("era").apply(score)
print(
f"On training the correlation has mean {train_correlations.mean()} and std {train_correlations.std()}")
print(
f"On training the average per-era payout is {payout(train_correlations).mean()}")
print(f"Sharpe {train_correlations.mean()/train_correlations.std()}")
tr[PREDICTION_NAME] = GPVII(tr)
train_correlations = tr.groupby("era").apply(score)
print(
f"On training the correlation has mean {train_correlations.mean()} and std {train_correlations.std()}")
print(
f"On training the average per-era payout is {payout(train_correlations).mean()}")
print(f"Sharpe {train_correlations.mean()/train_correlations.std()}")
tr[PREDICTION_NAME] = GPVIII(tr)
train_correlations = tr.groupby("era").apply(score)
print(
f"On training the correlation has mean {train_correlations.mean()} and std {train_correlations.std()}")
print(
f"On training the average per-era payout is {payout(train_correlations).mean()}")
print(f"Sharpe {train_correlations.mean()/train_correlations.std()}")
tr[PREDICTION_NAME] = GPIX(tr)
train_correlations = tr.groupby("era").apply(score)
print(
f"On training the correlation has mean {train_correlations.mean()} and std {train_correlations.std()}")
print(
f"On training the average per-era payout is {payout(train_correlations).mean()}")
print(f"Sharpe {train_correlations.mean()/train_correlations.std()}")
tr[PREDICTION_NAME] = GPX(tr)
train_correlations = tr.groupby("era").apply(score)
print(
f"On training the correlation has mean {train_correlations.mean()} and std {train_correlations.std()}")
print(
f"On training the average per-era payout is {payout(train_correlations).mean()}")
print(f"Sharpe {train_correlations.mean()/train_correlations.std()}")
validation_data = te[te.data_type == "validation"].copy()
validation_data[PREDICTION_NAME] = GP(validation_data)
validation_correlations = validation_data.groupby("era").apply(score)
print(
f"On validation the correlation has mean {validation_correlations.mean()} and std {validation_correlations.std()}")
print(
f"On validation the average per-era payout is {payout(validation_correlations).mean()}")
print(f"Sharpe {validation_correlations.mean()/validation_correlations.std()}")
ex = pd.read_csv('example_predictions.csv')
ex.prediction = GP(te)
ex.to_csv('standard.csv', index=False)
features = ['feature_dexterity4', 'feature_charisma63', 'feature_strength19',
'feature_dexterity7', 'feature_wisdom42', 'feature_strength1',
'feature_intelligence4', 'feature_dexterity11', 'feature_wisdom23',
'feature_intelligence2', 'feature_charisma69',
'feature_dexterity6', 'feature_dexterity12', 'feature_dexterity9',
'feature_dexterity14', 'feature_strength34', 'feature_wisdom36',
'feature_constitution114', 'feature_dexterity2',
'feature_charisma35', 'feature_wisdom35', 'feature_charisma10',
'feature_wisdom8', 'feature_charisma54', 'feature_charisma85',
'feature_wisdom7', 'feature_charisma13', 'feature_wisdom22',
'feature_charisma37', 'feature_constitution50',
'feature_strength3', 'feature_charisma5', 'feature_charisma79',
'feature_strength15', 'feature_wisdom20', 'feature_charisma19',
'feature_constitution42', 'feature_charisma76',
'feature_charisma50', 'feature_strength36', 'feature_dexterity1',
'feature_strength13', 'feature_intelligence9',
'feature_constitution16', 'feature_constitution81',
'feature_constitution108', 'feature_charisma58',
'feature_constitution6', 'feature_constitution110',
'feature_wisdom21', 'feature_charisma6', 'feature_strength9',
'feature_constitution113', 'feature_charisma45',
'feature_constitution46', 'feature_constitution54',
'feature_dexterity13', 'feature_dexterity8',
'feature_constitution39', 'feature_charisma11',
'feature_constitution38', 'feature_wisdom13', 'feature_wisdom3',
'feature_charisma81', 'feature_intelligence12', 'feature_wisdom26',
'feature_constitution62', 'feature_constitution63',
'feature_charisma46', 'feature_intelligence3',
'feature_strength22', 'feature_wisdom10', 'feature_intelligence5',
'feature_constitution97', 'feature_constitution102',
'feature_charisma28', 'feature_constitution91',
'feature_dexterity3', 'feature_constitution88',
'feature_charisma67', 'feature_dexterity5', 'feature_wisdom33',
'feature_strength4', 'feature_strength10', 'feature_strength14',
'feature_constitution4', 'feature_constitution12',
'feature_constitution78', 'feature_intelligence11',
'feature_constitution85', 'feature_strength30',
'feature_constitution24', 'feature_wisdom2', 'feature_wisdom46',
'feature_constitution18', 'feature_constitution7',
'feature_wisdom34', 'feature_charisma83', 'feature_constitution34',
'feature_charisma77', 'feature_wisdom43', 'feature_constitution21',
'feature_constitution93', 'feature_charisma9',
'feature_constitution41', 'feature_constitution69',
'feature_charisma53', 'feature_wisdom12', 'feature_constitution30',
'feature_constitution75', 'feature_constitution104',
'feature_charisma55', 'feature_wisdom1', 'feature_constitution55',
'feature_wisdom37', 'feature_charisma57', 'feature_wisdom41',
'feature_charisma3', 'feature_constitution19',
'feature_constitution2', 'feature_constitution31',
'feature_constitution65', 'feature_wisdom16', 'feature_wisdom18',
'feature_intelligence8', 'feature_charisma61',
'feature_charisma75', 'feature_charisma18',
'feature_constitution68', 'feature_constitution89',
'feature_constitution90', 'feature_constitution58',
'feature_constitution20', 'feature_wisdom5', 'feature_wisdom19',
'feature_wisdom29', 'feature_intelligence7', 'feature_charisma36',
'feature_wisdom32', 'feature_charisma47', 'feature_charisma1',
'feature_charisma82', 'feature_charisma34',
'feature_constitution56', 'feature_charisma86',
'feature_constitution47', 'feature_constitution101',
'feature_charisma2', 'feature_charisma31', 'feature_wisdom24',
'feature_constitution70', 'feature_charisma29',
'feature_constitution40', 'feature_constitution15',
'feature_wisdom44', 'feature_intelligence6', 'feature_strength28',
'feature_charisma16', 'feature_wisdom11', 'feature_constitution26',
'feature_constitution8', 'feature_constitution53',
'feature_dexterity10', 'feature_wisdom38', 'feature_charisma70',
'feature_constitution37', 'feature_wisdom30', 'feature_strength7',
'feature_wisdom45', 'feature_constitution92', 'feature_strength21',
'feature_constitution5', 'feature_constitution80',
'feature_constitution64', 'feature_constitution67',
'feature_charisma71', 'feature_constitution86',
'feature_charisma26', 'feature_charisma41',
'feature_constitution105', 'feature_constitution103',
'feature_wisdom27', 'feature_constitution59',
'feature_constitution32', 'feature_charisma74',
'feature_strength18', 'feature_constitution111',
'feature_strength24', 'feature_wisdom4', 'feature_constitution94',
'feature_constitution79', 'feature_strength12',
'feature_constitution66', 'feature_constitution96',
'feature_constitution84', 'feature_wisdom39',
'feature_constitution27', 'feature_charisma66', 'feature_wisdom40',
'feature_charisma43', 'feature_constitution82',
'feature_constitution71', 'feature_charisma59',
'feature_strength2', 'feature_charisma78', 'feature_charisma32',
'feature_constitution98', 'feature_constitution11',
'feature_constitution52', 'feature_charisma12', 'feature_wisdom25',
'feature_strength32', 'feature_charisma42',
'feature_constitution100', 'feature_constitution44']
validation_data = te.copy()
validation_data["preds"] = GP(validation_data)
validation_data["preds_neutralized"] = validation_data.groupby("era").apply(
# neutralize by 50% within each era
lambda x: normalize_and_neutralize(x, ["preds"], features, 0.5)
)
scaler = MinMaxScaler()
validation_data[PREDICTION_NAME] = scaler.fit_transform(
validation_data[["preds_neutralized"]]) # transform back to 0-1
validation_correlations = validation_data.groupby("era").apply(score)
print(
f"On validation the correlation has mean {validation_correlations.mean()} and std {validation_correlations.std()}")
print(
f"On validation the average per-era payout is {payout(validation_correlations).mean()}")
print(f"Sharpe {validation_correlations.mean()/validation_correlations.std()}")
ex = pd.read_csv('example_predictions.csv')
ex.prediction = validation_data.prediction
ex.to_csv('weaksauce.csv', index=False, float_format='%.6f')
|
{"hexsha": "2cc2ed5f7b2427d7bc96cb72b5365ca0d63a3a33", "size": 60370, "ext": "py", "lang": "Python", "max_stars_repo_path": "allornothing.py", "max_stars_repo_name": "scirpus/numer", "max_stars_repo_head_hexsha": "05c46b6f267c5b7651b79fa009dfe74194a7a0e1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2021-07-20T18:19:48.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-13T14:51:08.000Z", "max_issues_repo_path": "allornothing.py", "max_issues_repo_name": "scirpus/numer", "max_issues_repo_head_hexsha": "05c46b6f267c5b7651b79fa009dfe74194a7a0e1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "allornothing.py", "max_forks_repo_name": "scirpus/numer", "max_forks_repo_head_hexsha": "05c46b6f267c5b7651b79fa009dfe74194a7a0e1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 10, "max_forks_repo_forks_event_min_datetime": "2021-07-20T18:19:54.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-13T01:24:36.000Z", "avg_line_length": 140.7226107226, "max_line_length": 507, "alphanum_fraction": 0.6046380653, "include": true, "reason": "import numpy,import scipy", "num_tokens": 17900}
|
// rocks_sorted_data_impl_test.cpp
/**
* Copyright (C) 2014 MongoDB Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
* as published by the Free Software Foundation.
*
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
* As a special exception, the copyright holders give permission to link the
* code of portions of this program with the OpenSSL library under certain
* conditions as described in each individual source file and distribute
* linked combinations including the program with the OpenSSL library. You
* must comply with the GNU Affero General Public License in all respects for
* all of the code used other than as permitted herein. If you modify file(s)
* with this exception, you may extend this exception to your version of the
* file(s), but you are not obligated to do so. If you do not wish to do so,
* delete this exception statement from your version. If you delete this
* exception statement from all source files in the program, then also delete
* it in the license file.
*/
#include <memory>
#include <boost/shared_ptr.hpp>
#include <boost/filesystem/operations.hpp>
#include <rocksdb/comparator.h>
#include <rocksdb/db.h>
#include <rocksdb/options.h>
#include <rocksdb/slice.h>
#include "mongo/db/operation_context_noop.h"
#include "mongo/db/storage/rocks/rocks_engine.h"
#include "mongo/db/storage/rocks/rocks_sorted_data_impl.h"
#include "mongo/db/storage/rocks/rocks_record_store.h"
#include "mongo/db/storage/rocks/rocks_recovery_unit.h"
#include "mongo/unittest/temp_dir.h"
#include "mongo/unittest/unittest.h"
using namespace mongo;
namespace mongo {
class MyOperationContext : public OperationContextNoop {
public:
MyOperationContext( rocksdb::DB* db )
: OperationContextNoop( new RocksRecoveryUnit( db, false ) ) {
}
};
// to be used in testing
static std::unique_ptr<rocksdb::Comparator> _rocksComparator(
RocksSortedDataImpl::newRocksComparator( Ordering::make( BSON( "a" << 1 ) ) ) );
string _rocksSortedDataTestDir = "mongo-rocks-test";
rocksdb::DB* getDB( string path ) {
boost::filesystem::remove_all( path );
rocksdb::Options options = RocksEngine::dbOptions();
// open DB
rocksdb::DB* db;
rocksdb::Status s = rocksdb::DB::Open(options, path, &db);
ASSERT(s.ok());
return db;
}
const Ordering dummyOrdering = Ordering::make( BSONObj() );
TEST( RocksSortedDataTest, BrainDead ) {
unittest::TempDir td( _rocksSortedDataTestDir );
scoped_ptr<rocksdb::DB> db( getDB( td.path() ) );
{
RocksSortedDataImpl sortedData( db.get(), db->DefaultColumnFamily(), dummyOrdering );
BSONObj key = BSON( "" << 1 );
DiskLoc loc( 5, 16 );
{
MyOperationContext opCtx( db.get() );
{
WriteUnitOfWork uow( &opCtx );
ASSERT( !sortedData.unindex( &opCtx, key, loc ) );
uow.commit();
}
}
{
MyOperationContext opCtx( db.get() );
{
WriteUnitOfWork uow( &opCtx );
Status res = sortedData.insert( &opCtx, key, loc, true );
ASSERT_OK( res );
uow.commit();
}
}
{
MyOperationContext opCtx( db.get() );
{
WriteUnitOfWork uow( &opCtx );
ASSERT( sortedData.unindex( &opCtx, key, loc ) );
uow.commit();
}
}
{
MyOperationContext opCtx( db.get() );
{
WriteUnitOfWork uow( &opCtx );
sortedData.unindex( &opCtx, key, loc );
uow.commit();
}
}
}
}
TEST( RocksSortedDataTest, Locate1 ) {
unittest::TempDir td( _rocksSortedDataTestDir );
scoped_ptr<rocksdb::DB> db( getDB( td.path() ) );
{
RocksSortedDataImpl sortedData( db.get(), db->DefaultColumnFamily(), dummyOrdering );
BSONObj key = BSON( "" << 1 );
DiskLoc loc( 5, 16 );
{
MyOperationContext opCtx( db.get() );
scoped_ptr<SortedDataInterface::Cursor> cursor( sortedData.newCursor( &opCtx, 1 ) );
ASSERT( !cursor->locate( key, loc ) );
}
{
MyOperationContext opCtx( db.get() );
{
WriteUnitOfWork uow( &opCtx );
Status res = sortedData.insert( &opCtx, key, loc, true );
ASSERT_OK( res );
uow.commit();
}
}
{
MyOperationContext opCtx( db.get() );
scoped_ptr<SortedDataInterface::Cursor> cursor( sortedData.newCursor( &opCtx, 1 ) );
ASSERT( cursor->locate( key, loc ) );
ASSERT_EQUALS( key, cursor->getKey() );
ASSERT_EQUALS( loc, cursor->getDiskLoc() );
}
}
}
TEST( RocksSortedDataTest, Locate2 ) {
unittest::TempDir td( _rocksSortedDataTestDir );
scoped_ptr<rocksdb::DB> db( getDB( td.path() ) );
{
RocksSortedDataImpl sortedData( db.get(), db->DefaultColumnFamily(), dummyOrdering );
{
MyOperationContext opCtx( db.get() );
{
WriteUnitOfWork uow( &opCtx );
ASSERT_OK( sortedData.insert( &opCtx, BSON( "" << 1 ), DiskLoc(1,1), true ) );
ASSERT_OK( sortedData.insert( &opCtx, BSON( "" << 2 ), DiskLoc(1,2), true ) );
ASSERT_OK( sortedData.insert( &opCtx, BSON( "" << 3 ), DiskLoc(1,3), true ) );
uow.commit();
}
}
{
MyOperationContext opCtx( db.get() );
scoped_ptr<SortedDataInterface::Cursor> cursor( sortedData.newCursor( &opCtx, 1 ) );
ASSERT( !cursor->locate( BSON( "a" << 2 ), DiskLoc(0,0) ) );
ASSERT( !cursor->isEOF() );
ASSERT_EQUALS( BSON( "" << 2 ), cursor->getKey() );
ASSERT_EQUALS( DiskLoc(1,2), cursor->getDiskLoc() );
cursor->advance();
ASSERT_EQUALS( BSON( "" << 3 ), cursor->getKey() );
ASSERT_EQUALS( DiskLoc(1,3), cursor->getDiskLoc() );
cursor->advance();
ASSERT( cursor->isEOF() );
}
}
}
boost::shared_ptr<rocksdb::ColumnFamilyHandle> makeColumnFamily( rocksdb::DB* db ) {
rocksdb::ColumnFamilyOptions options;
options.comparator = _rocksComparator.get();
rocksdb::ColumnFamilyHandle* cfh;
rocksdb::Status s = db->CreateColumnFamily( options, "simpleColumnFamily", &cfh );
ASSERT( s.ok() );
return boost::shared_ptr<rocksdb::ColumnFamilyHandle>( cfh );
}
TEST( RocksSortedDataTest, LocateInexact ) {
unittest::TempDir td( _rocksSortedDataTestDir );
scoped_ptr<rocksdb::DB> db( getDB( td.path() ) );
{
boost::shared_ptr<rocksdb::ColumnFamilyHandle> cfh = makeColumnFamily( db.get() );
RocksSortedDataImpl sortedData( db.get(), cfh.get(), dummyOrdering );
{
MyOperationContext opCtx( db.get() );
{
WriteUnitOfWork uow( &opCtx );
ASSERT_OK( sortedData.insert( &opCtx, BSON( "" << 1 ), DiskLoc(1,1), true ) );
ASSERT_OK( sortedData.insert( &opCtx, BSON( "" << 3 ), DiskLoc(1,3), true ) );
uow.commit();
}
}
{
MyOperationContext opCtx( db.get() );
scoped_ptr<SortedDataInterface::Cursor> cursor( sortedData.newCursor( &opCtx, 1 ) );
ASSERT_FALSE( cursor->locate( BSON( "a" << 2 ), DiskLoc(0,0) ) );
ASSERT( !cursor->isEOF() );
ASSERT_EQUALS( BSON( "" << 3 ), cursor->getKey() );
ASSERT_EQUALS( DiskLoc(1,3), cursor->getDiskLoc() );
}
}
}
TEST( RocksSortedDataTest, Snapshots ) {
unittest::TempDir td( _rocksSortedDataTestDir );
scoped_ptr<rocksdb::DB> db( getDB( td.path() ) );
{
RocksSortedDataImpl sortedData( db.get(), db->DefaultColumnFamily(), dummyOrdering );
{
MyOperationContext opCtx( db.get() );
{
WriteUnitOfWork uow( &opCtx );
ASSERT_OK( sortedData.insert( &opCtx, BSON( "" << 2 ), DiskLoc(1,2), true ) );
uow.commit();
}
}
{
MyOperationContext opCtx( db.get() );
// get a cursor
scoped_ptr<SortedDataInterface::Cursor> cursor( sortedData.newCursor( &opCtx, 1 ) );
// insert some more stuff
{
WriteUnitOfWork uow( &opCtx );
ASSERT_OK( sortedData.insert( &opCtx, BSON( "" << 1 ), DiskLoc(1,1), true ) );
ASSERT_OK( sortedData.insert( &opCtx, BSON( "" << 3 ), DiskLoc(1,3), true ) );
uow.commit();
}
ASSERT_EQUALS( BSON( "" << 2 ), cursor->getKey() );
ASSERT_EQUALS( DiskLoc(1,2), cursor->getDiskLoc() );
cursor->advance();
// make sure that the cursor can't "see" anything added after it was created.
ASSERT( cursor-> isEOF() );
ASSERT_FALSE( cursor->locate( BSON( "" << 3 ), DiskLoc(1,3) ) );
ASSERT( cursor->isEOF() );
}
}
}
TEST( RocksSortedDataTest, SaveAndRestorePositionSimple ) {
unittest::TempDir td( _rocksSortedDataTestDir );
scoped_ptr<rocksdb::DB> db( getDB( td.path() ) );
{
RocksSortedDataImpl sortedData( db.get(), db->DefaultColumnFamily(), dummyOrdering );
{
MyOperationContext opCtx( db.get() );
{
WriteUnitOfWork uow( &opCtx );
ASSERT_OK( sortedData.insert( &opCtx, BSON( "" << 1 ), DiskLoc(1,1), true ) );
ASSERT_OK( sortedData.insert( &opCtx, BSON( "" << 2 ), DiskLoc(1,2), true ) );
ASSERT_OK( sortedData.insert( &opCtx, BSON( "" << 3 ), DiskLoc(1,3), true ) );
uow.commit();
}
}
{
MyOperationContext opCtx( db.get() );
scoped_ptr<SortedDataInterface::Cursor> cursor( sortedData.newCursor( &opCtx, 1 ) );
ASSERT( !cursor->locate( BSON( "a" << 1 ), DiskLoc(0,0) ) );
ASSERT( !cursor->isEOF() );
ASSERT_EQUALS( BSON( "" << 1 ), cursor->getKey() );
ASSERT_EQUALS( DiskLoc(1,1), cursor->getDiskLoc() );
// save the position
cursor->savePosition();
// restore position
cursor->restorePosition( &opCtx );
ASSERT( !cursor->isEOF() );
ASSERT_EQUALS( BSON( "" << 1 ), cursor->getKey() );
ASSERT_EQUALS( DiskLoc(1,1), cursor->getDiskLoc() );
// repeat, with a different value
ASSERT( !cursor->locate( BSON( "a" << 2 ), DiskLoc(0,0) ) );
ASSERT( !cursor->isEOF() );
ASSERT_EQUALS( BSON( "" << 2 ), cursor->getKey() );
ASSERT_EQUALS( DiskLoc(1,2), cursor->getDiskLoc() );
// save the position
cursor->savePosition();
// restore position
cursor->restorePosition( &opCtx );
ASSERT( !cursor->isEOF() );
ASSERT_EQUALS( BSON( "" << 2 ), cursor->getKey() );
ASSERT_EQUALS( DiskLoc(1,2), cursor->getDiskLoc() );
}
}
}
TEST( RocksSortedDataTest, SaveAndRestorePositionEOF ) {
unittest::TempDir td( _rocksSortedDataTestDir );
scoped_ptr<rocksdb::DB> db( getDB( td.path() ) );
{
RocksSortedDataImpl sortedData( db.get(), db->DefaultColumnFamily(), dummyOrdering );
{
MyOperationContext opCtx( db.get() );
{
WriteUnitOfWork uow( &opCtx );
ASSERT_OK( sortedData.insert( &opCtx, BSON( "" << 1 ), DiskLoc(1,1), true ) );
ASSERT_OK( sortedData.insert( &opCtx, BSON( "" << 2 ), DiskLoc(1,2), true ) );
ASSERT_OK( sortedData.insert( &opCtx, BSON( "" << 3 ), DiskLoc(1,3), true ) );
uow.commit();
}
}
{
MyOperationContext opCtx( db.get() );
scoped_ptr<SortedDataInterface::Cursor> cursor( sortedData.newCursor( &opCtx, 1 ) );
ASSERT( !cursor->locate( BSON( "a" << 1 ), DiskLoc(0,0) ) );
ASSERT( !cursor->isEOF() );
ASSERT_EQUALS( BSON( "" << 1 ), cursor->getKey() );
ASSERT_EQUALS( DiskLoc(1,1), cursor->getDiskLoc() );
// advance to the end
while ( !cursor->isEOF() ) {
cursor->advance();
}
ASSERT( cursor->isEOF() );
// save the position
cursor->savePosition();
// restore position, make sure we're at the end
cursor->restorePosition( &opCtx );
ASSERT( cursor->isEOF() );
}
}
}
TEST( RocksSortedDataTest, SaveAndRestorePositionInsert ) {
unittest::TempDir td( _rocksSortedDataTestDir );
scoped_ptr<rocksdb::DB> db( getDB( td.path() ) );
{
RocksSortedDataImpl sortedData( db.get(), db->DefaultColumnFamily(), dummyOrdering );
{
MyOperationContext opCtx( db.get() );
{
WriteUnitOfWork uow( &opCtx );
ASSERT_OK( sortedData.insert( &opCtx, BSON( "" << 1 ), DiskLoc(1,1), true ) );
ASSERT_OK( sortedData.insert( &opCtx, BSON( "" << 2 ), DiskLoc(1,2), true ) );
ASSERT_OK( sortedData.insert( &opCtx, BSON( "" << 3 ), DiskLoc(1,3), true ) );
uow.commit();
}
}
{
MyOperationContext opCtx( db.get() );
scoped_ptr<SortedDataInterface::Cursor> cursor( sortedData.newCursor( &opCtx, 1 ) );
ASSERT( !cursor->locate( BSON( "" << 3 ), DiskLoc(0,0) ) );
ASSERT( !cursor->isEOF() );
ASSERT_EQUALS( BSON( "" << 3 ), cursor->getKey() );
ASSERT_EQUALS( DiskLoc(1,3), cursor->getDiskLoc() );
// save the position
cursor->savePosition();
{
MyOperationContext opCtx( db.get() );
{
WriteUnitOfWork uow( &opCtx );
ASSERT_OK(
sortedData.insert( &opCtx, BSON( "" << 4 ), DiskLoc(1,4), true ) );
uow.commit();
}
}
// restore position, make sure we don't see the newly inserted value
cursor->restorePosition( &opCtx );
ASSERT( !cursor->isEOF() );
ASSERT_EQUALS( BSON( "" << 3 ), cursor->getKey() );
ASSERT_EQUALS( DiskLoc(1,3), cursor->getDiskLoc() );
cursor->advance();
ASSERT( cursor->isEOF() );
}
}
}
TEST( RocksSortedDataTest, SaveAndRestorePositionDelete2 ) {
unittest::TempDir td( _rocksSortedDataTestDir );
scoped_ptr<rocksdb::DB> db( getDB( td.path() ) );
{
RocksSortedDataImpl sortedData( db.get(), db->DefaultColumnFamily(), dummyOrdering );
{
MyOperationContext opCtx( db.get() );
{
WriteUnitOfWork uow( &opCtx );
ASSERT_OK( sortedData.insert( &opCtx, BSON( "" << 1 ), DiskLoc(1,1), true ) );
ASSERT_OK( sortedData.insert( &opCtx, BSON( "" << 2 ), DiskLoc(1,2), true ) );
ASSERT_OK( sortedData.insert( &opCtx, BSON( "" << 3 ), DiskLoc(1,3), true ) );
uow.commit();
}
}
{
MyOperationContext opCtx( db.get() );
scoped_ptr<SortedDataInterface::Cursor> cursor( sortedData.newCursor( &opCtx, 1 ) );
ASSERT( !cursor->locate( BSON( "" << 2 ), DiskLoc(0,0) ) );
ASSERT( !cursor->isEOF() );
ASSERT_EQUALS( BSON( "" << 2 ), cursor->getKey() );
ASSERT_EQUALS( DiskLoc(1,2), cursor->getDiskLoc() );
// save the position
cursor->savePosition();
{
MyOperationContext opCtx( db.get() );
{
WriteUnitOfWork uow( &opCtx );
ASSERT( sortedData.unindex( &opCtx, BSON( "" << 1 ), DiskLoc(1,1) ) );
uow.commit();
}
}
// restore position
cursor->restorePosition( &opCtx );
ASSERT( !cursor->isEOF() );
ASSERT_EQUALS( BSON( "" << 2 ), cursor->getKey() );
ASSERT_EQUALS( DiskLoc(1,2), cursor->getDiskLoc() );
}
}
}
TEST( RocksSortedDataTest, SaveAndRestorePositionDelete3 ) {
unittest::TempDir td( _rocksSortedDataTestDir );
scoped_ptr<rocksdb::DB> db( getDB( td.path() ) );
{
RocksSortedDataImpl sortedData( db.get(), db->DefaultColumnFamily(), dummyOrdering );
{
MyOperationContext opCtx( db.get() );
{
WriteUnitOfWork uow( &opCtx );
ASSERT_OK( sortedData.insert( &opCtx, BSON( "" << 1 ), DiskLoc(1,1), true ) );
ASSERT_OK( sortedData.insert( &opCtx, BSON( "" << 2 ), DiskLoc(1,2), true ) );
ASSERT_OK( sortedData.insert( &opCtx, BSON( "" << 3 ), DiskLoc(1,3), true ) );
uow.commit();
}
}
{
MyOperationContext opCtx( db.get() );
scoped_ptr<SortedDataInterface::Cursor> cursor( sortedData.newCursor( &opCtx, 1 ) );
ASSERT( !cursor->locate( BSON( "" << 2 ), DiskLoc(0,0) ) );
ASSERT( !cursor->isEOF() );
ASSERT_EQUALS( BSON( "" << 2 ), cursor->getKey() );
ASSERT_EQUALS( DiskLoc(1,2), cursor->getDiskLoc() );
// save the position
cursor->savePosition();
{
MyOperationContext opCtx( db.get() );
{
WriteUnitOfWork uow( &opCtx );
ASSERT( sortedData.unindex( &opCtx, BSON( "" << 3 ), DiskLoc(1,3) ) );
uow.commit();
}
}
// restore position
cursor->restorePosition( &opCtx );
ASSERT( !cursor->isEOF() );
ASSERT_EQUALS( BSON( "" << 2 ), cursor->getKey() );
ASSERT_EQUALS( DiskLoc(1,2), cursor->getDiskLoc() );
// make sure that we can still see the unindexed data, since we're working on
// a snapshot
cursor->advance();
ASSERT( !cursor->isEOF() );
ASSERT_EQUALS( BSON( "" << 3 ), cursor->getKey() );
ASSERT_EQUALS( DiskLoc(1,3), cursor->getDiskLoc() );
cursor->advance();
ASSERT( cursor->isEOF() );
}
}
}
TEST( RocksSortedDataTest, Locate1Reverse ) {
unittest::TempDir td( _rocksSortedDataTestDir );
scoped_ptr<rocksdb::DB> db( getDB( td.path() ) );
{
RocksSortedDataImpl sortedData( db.get(), db->DefaultColumnFamily(), dummyOrdering );
BSONObj key = BSON( "" << 1 );
DiskLoc loc( 5, 16 );
{
MyOperationContext opCtx( db.get() );
scoped_ptr<SortedDataInterface::Cursor> cursor( sortedData.newCursor( &opCtx, -1 ) );
ASSERT( !cursor->locate( key, loc ) );
}
{
MyOperationContext opCtx( db.get() );
{
WriteUnitOfWork uow( &opCtx );
Status res = sortedData.insert( &opCtx, key, loc, true );
ASSERT_OK( res );
uow.commit();
}
}
{
MyOperationContext opCtx( db.get() );
scoped_ptr<SortedDataInterface::Cursor> cursor( sortedData.newCursor( &opCtx, -1 ) );
ASSERT( cursor->locate( key, loc ) );
ASSERT_EQUALS( key, cursor->getKey() );
ASSERT_EQUALS( loc, cursor->getDiskLoc() );
}
}
}
TEST( RocksSortedDataTest, LocateInexactReverse ) {
unittest::TempDir td( _rocksSortedDataTestDir );
scoped_ptr<rocksdb::DB> db( getDB( td.path() ) );
{
boost::shared_ptr<rocksdb::ColumnFamilyHandle> cfh = makeColumnFamily( db.get() );
RocksSortedDataImpl sortedData( db.get(), cfh.get(), dummyOrdering );
{
MyOperationContext opCtx( db.get() );
{
WriteUnitOfWork uow( &opCtx );
ASSERT_OK( sortedData.insert( &opCtx, BSON( "a" << 1 ), DiskLoc(1,1), true ) );
ASSERT_OK( sortedData.insert( &opCtx, BSON( "a" << 3 ), DiskLoc(1,1), true ) );
uow.commit();
}
}
{
MyOperationContext opCtx( db.get() );
scoped_ptr<SortedDataInterface::Cursor> cursor( sortedData.newCursor( &opCtx, -1 ) );
ASSERT_FALSE( cursor->locate( BSON( "a" << 2 ), DiskLoc(1,1) ) );
ASSERT_FALSE( cursor->isEOF() );
ASSERT_EQUALS( BSON( "" << 1 ), cursor->getKey() );
ASSERT_EQUALS( DiskLoc(1,1), cursor->getDiskLoc() );
}
}
}
TEST( RocksSortedDataTest, SaveAndRestorePositionReverseSimple ) {
unittest::TempDir td( _rocksSortedDataTestDir );
scoped_ptr<rocksdb::DB> db( getDB( td.path() ) );
{
RocksSortedDataImpl sortedData( db.get(), db->DefaultColumnFamily(), dummyOrdering );
{
MyOperationContext opCtx( db.get() );
{
WriteUnitOfWork uow( &opCtx );
ASSERT_OK( sortedData.insert( &opCtx, BSON( "" << 1 ), DiskLoc(1,1), true ) );
ASSERT_OK( sortedData.insert( &opCtx, BSON( "" << 2 ), DiskLoc(1,2), true ) );
ASSERT_OK( sortedData.insert( &opCtx, BSON( "" << 3 ), DiskLoc(1,3), true ) );
uow.commit();
}
}
{
MyOperationContext opCtx( db.get() );
scoped_ptr<SortedDataInterface::Cursor> cursor( sortedData.newCursor( &opCtx, -1 ) );
ASSERT( !cursor->locate( BSON( "a" << 1 ), DiskLoc(2,0) ) );
ASSERT( !cursor->isEOF() );
ASSERT_EQUALS( BSON( "" << 1 ), cursor->getKey() );
ASSERT_EQUALS( DiskLoc(1,1), cursor->getDiskLoc() );
// save the position
cursor->savePosition();
// restore position
cursor->restorePosition( &opCtx );
ASSERT( !cursor->isEOF() );
ASSERT_EQUALS( BSON( "" << 1 ), cursor->getKey() );
ASSERT_EQUALS( DiskLoc(1,1), cursor->getDiskLoc() );
// repeat, with a different value
ASSERT( !cursor->locate( BSON( "a" << 2 ), DiskLoc(2,0) ) );
ASSERT( !cursor->isEOF() );
ASSERT_EQUALS( BSON( "" << 2 ), cursor->getKey() );
ASSERT_EQUALS( DiskLoc(1,2), cursor->getDiskLoc() );
// save the position
cursor->savePosition();
// restore position
cursor->restorePosition( &opCtx );
ASSERT( !cursor->isEOF() );
ASSERT_EQUALS( BSON( "" << 2 ), cursor->getKey() );
ASSERT_EQUALS( DiskLoc(1,2), cursor->getDiskLoc() );
}
}
}
TEST( RocksSortedDataTest, SaveAndRestorePositionEOFReverse ) {
unittest::TempDir td( _rocksSortedDataTestDir );
scoped_ptr<rocksdb::DB> db( getDB( td.path() ) );
{
RocksSortedDataImpl sortedData( db.get(), db->DefaultColumnFamily(), dummyOrdering );
{
MyOperationContext opCtx( db.get() );
{
WriteUnitOfWork uow( &opCtx );
ASSERT_OK( sortedData.insert( &opCtx, BSON( "" << 1 ), DiskLoc(1,1), true ) );
ASSERT_OK( sortedData.insert( &opCtx, BSON( "" << 3 ), DiskLoc(1,3), true ) );
ASSERT_OK( sortedData.insert( &opCtx, BSON( "" << 4 ), DiskLoc(1,4), true ) );
uow.commit();
}
}
{
MyOperationContext opCtx( db.get() );
scoped_ptr<SortedDataInterface::Cursor> cursor( sortedData.newCursor( &opCtx, -1 ) );
ASSERT_FALSE( cursor->locate( BSON( "" << 2 ), DiskLoc(1,2) ) );
ASSERT( !cursor->isEOF() );
ASSERT_EQUALS( BSON( "" << 1 ), cursor->getKey() );
ASSERT_EQUALS( DiskLoc(1,1), cursor->getDiskLoc() );
// advance to the end
while ( !cursor->isEOF() ) {
cursor->advance();
}
ASSERT( cursor->isEOF() );
// save the position
cursor->savePosition();
// restore position, make sure we're at the end
cursor->restorePosition( &opCtx );
ASSERT( cursor->isEOF() );
}
}
}
TEST( RocksSortedDataTest, SaveAndRestorePositionInsertReverse ) {
unittest::TempDir td( _rocksSortedDataTestDir );
scoped_ptr<rocksdb::DB> db( getDB( td.path() ) );
{
RocksSortedDataImpl sortedData( db.get(), db->DefaultColumnFamily(), dummyOrdering );
{
MyOperationContext opCtx( db.get() );
{
WriteUnitOfWork uow( &opCtx );
ASSERT_OK( sortedData.insert( &opCtx, BSON( "" << 1 ), DiskLoc(1,1), true ) );
ASSERT_OK( sortedData.insert( &opCtx, BSON( "" << 3 ), DiskLoc(1,3), true ) );
uow.commit();
}
}
{
MyOperationContext opCtx( db.get() );
scoped_ptr<SortedDataInterface::Cursor> cursor( sortedData.newCursor( &opCtx,
-1 ) );
ASSERT( !cursor->locate( BSON( "" << 3 ), DiskLoc(2,0) ) );
ASSERT( !cursor->isEOF() );
ASSERT_EQUALS( BSON( "" << 3 ), cursor->getKey() );
ASSERT_EQUALS( DiskLoc(1,3), cursor->getDiskLoc() );
// save the position
cursor->savePosition();
{
MyOperationContext opCtx( db.get() );
{
WriteUnitOfWork uow( &opCtx );
ASSERT_OK(
sortedData.insert( &opCtx, BSON( "" << 2 ), DiskLoc(1,2), true ) );
uow.commit();
}
}
// restore position, make sure we don't see the newly inserted value
cursor->restorePosition( &opCtx );
ASSERT( !cursor->isEOF() );
ASSERT_EQUALS( BSON( "" << 3 ), cursor->getKey() );
ASSERT_EQUALS( DiskLoc(1,3), cursor->getDiskLoc() );
cursor->advance();
ASSERT( !cursor->isEOF() );
ASSERT_EQUALS( BSON( "" << 1 ), cursor->getKey() );
ASSERT_EQUALS( DiskLoc(1,1), cursor->getDiskLoc() );
cursor->advance();
ASSERT( cursor->isEOF() );
}
}
}
TEST( RocksSortedDataTest, SaveAndRestorePositionDelete1Reverse ) {
unittest::TempDir td( _rocksSortedDataTestDir );
scoped_ptr<rocksdb::DB> db( getDB( td.path() ) );
{
RocksSortedDataImpl sortedData( db.get(), db->DefaultColumnFamily(), dummyOrdering );
{
MyOperationContext opCtx( db.get() );
{
WriteUnitOfWork uow( &opCtx );
ASSERT_OK( sortedData.insert( &opCtx, BSON( "" << 1 ), DiskLoc(1,1), true ) );
ASSERT_OK( sortedData.insert( &opCtx, BSON( "" << 2 ), DiskLoc(1,2), true ) );
ASSERT_OK( sortedData.insert( &opCtx, BSON( "" << 3 ), DiskLoc(1,3), true ) );
uow.commit();
}
}
{
MyOperationContext opCtx( db.get() );
scoped_ptr<SortedDataInterface::Cursor> cursor( sortedData.newCursor( &opCtx,
-1 ) );
ASSERT( !cursor->locate( BSON( "" << 3 ), DiskLoc(2,0) ) );
ASSERT( !cursor->isEOF() );
ASSERT_EQUALS( BSON( "" << 3 ), cursor->getKey() );
ASSERT_EQUALS( DiskLoc(1,3), cursor->getDiskLoc() );
// save the position
cursor->savePosition();
{
MyOperationContext opCtx( db.get() );
{
WriteUnitOfWork uow( &opCtx );
ASSERT( sortedData.unindex( &opCtx, BSON( "" << 3 ), DiskLoc(1,3) ) );
uow.commit();
}
}
// restore position, make sure we still see the deleted key and value, because
// we're using a snapshot
cursor->restorePosition( &opCtx );
ASSERT( !cursor->isEOF() );
ASSERT_EQUALS( BSON( "" << 3 ), cursor->getKey() );
ASSERT_EQUALS( DiskLoc(1,3), cursor->getDiskLoc() );
}
}
}
TEST( RocksSortedDataTest, SaveAndRestorePositionDelete2Reverse ) {
unittest::TempDir td( _rocksSortedDataTestDir );
scoped_ptr<rocksdb::DB> db( getDB( td.path() ) );
{
RocksSortedDataImpl sortedData( db.get(), db->DefaultColumnFamily(), dummyOrdering );
{
MyOperationContext opCtx( db.get() );
{
WriteUnitOfWork uow( &opCtx );
ASSERT_OK( sortedData.insert( &opCtx, BSON( "" << 1 ), DiskLoc(1,1), true ) );
ASSERT_OK( sortedData.insert( &opCtx, BSON( "" << 2 ), DiskLoc(1,2), true ) );
ASSERT_OK( sortedData.insert( &opCtx, BSON( "" << 3 ), DiskLoc(1,3), true ) );
uow.commit();
}
}
{
MyOperationContext opCtx( db.get() );
scoped_ptr<SortedDataInterface::Cursor> cursor( sortedData.newCursor( &opCtx,
-1 ) );
ASSERT( !cursor->locate( BSON( "" << 2 ), DiskLoc(2,0) ) );
ASSERT( !cursor->isEOF() );
ASSERT_EQUALS( BSON( "" << 2 ), cursor->getKey() );
ASSERT_EQUALS( DiskLoc(1,2), cursor->getDiskLoc() );
// save the position
cursor->savePosition();
{
MyOperationContext opCtx( db.get() );
{
WriteUnitOfWork uow( &opCtx );
ASSERT( sortedData.unindex( &opCtx, BSON( "" << 1 ), DiskLoc(1,1) ) );
uow.commit();
}
}
// restore position
cursor->restorePosition( &opCtx );
ASSERT( !cursor->isEOF() );
ASSERT_EQUALS( BSON( "" << 2 ), cursor->getKey() );
ASSERT_EQUALS( DiskLoc(1,2), cursor->getDiskLoc() );
}
}
}
TEST( RocksSortedDataTest, SaveAndRestorePositionDelete3Reverse ) {
unittest::TempDir td( _rocksSortedDataTestDir );
scoped_ptr<rocksdb::DB> db( getDB( td.path() ) );
{
RocksSortedDataImpl sortedData( db.get(), db->DefaultColumnFamily(), dummyOrdering );
{
MyOperationContext opCtx( db.get() );
{
WriteUnitOfWork uow( &opCtx );
ASSERT_OK( sortedData.insert( &opCtx, BSON( "" << 1 ), DiskLoc(1,1), true ) );
ASSERT_OK( sortedData.insert( &opCtx, BSON( "" << 2 ), DiskLoc(1,2), true ) );
ASSERT_OK( sortedData.insert( &opCtx, BSON( "" << 3 ), DiskLoc(1,3), true ) );
uow.commit();
}
}
{
MyOperationContext opCtx( db.get() );
scoped_ptr<SortedDataInterface::Cursor> cursor( sortedData.newCursor( &opCtx,
-1 ) );
ASSERT( !cursor->locate( BSON( "" << 2 ), DiskLoc(2,0) ) );
ASSERT( !cursor->isEOF() );
ASSERT_EQUALS( BSON( "" << 2 ), cursor->getKey() );
ASSERT_EQUALS( DiskLoc(1,2), cursor->getDiskLoc() );
// save the position
cursor->savePosition();
{
MyOperationContext opCtx( db.get() );
{
WriteUnitOfWork uow( &opCtx );
ASSERT( sortedData.unindex( &opCtx, BSON( "" << 1 ), DiskLoc(1,1) ) );
uow.commit();
}
}
// restore position
cursor->restorePosition( &opCtx );
ASSERT( !cursor->isEOF() );
ASSERT_EQUALS( BSON( "" << 2 ), cursor->getKey() );
ASSERT_EQUALS( DiskLoc(1,2), cursor->getDiskLoc() );
// make sure that we can still see the unindexed data, since we're working on
// a snapshot
cursor->advance();
ASSERT( !cursor->isEOF() );
ASSERT_EQUALS( BSON( "" << 1 ), cursor->getKey() );
ASSERT_EQUALS( DiskLoc(1,1), cursor->getDiskLoc() );
cursor->advance();
ASSERT( cursor->isEOF() );
}
}
}
}
|
{"hexsha": "78628a668cc7847ed1ca20253a891a7312135503", "size": 36081, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/mongo/db/storage/rocks/rocks_sorted_data_impl_test.cpp", "max_stars_repo_name": "cyxddgithub/mongo", "max_stars_repo_head_hexsha": "6eb296a66e1f71f12d5483b7144f96d506b055a3", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2015-07-17T04:37:51.000Z", "max_stars_repo_stars_event_max_datetime": "2015-07-17T04:37:51.000Z", "max_issues_repo_path": "src/mongo/db/storage/rocks/rocks_sorted_data_impl_test.cpp", "max_issues_repo_name": "cyxddgithub/mongo", "max_issues_repo_head_hexsha": "6eb296a66e1f71f12d5483b7144f96d506b055a3", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/mongo/db/storage/rocks/rocks_sorted_data_impl_test.cpp", "max_forks_repo_name": "cyxddgithub/mongo", "max_forks_repo_head_hexsha": "6eb296a66e1f71f12d5483b7144f96d506b055a3", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.880387931, "max_line_length": 101, "alphanum_fraction": 0.4795875946, "num_tokens": 8224}
|
function output = calc_traversal_dist(ai)
% This function will generate position coordinates of chain code (ai). Number of
% harmonic elements (n), and number of points for reconstruction (m) must be
% specified.
x_ = 0;
y_ = 0;
for i = 1 : size(ai, 2)
x_ = x_ + sign(6 - ai(i)) * sign(2 - ai(i));
y_ = y_ + sign(4 - ai(i)) * sign(ai(i));
p(i, 1) = x_;
p(i, 2) = y_;
end
output = p;
end
|
{"author": "Sable", "repo": "mcbench-benchmarks", "sha": "ba13b2f0296ef49491b95e3f984c7c41fccdb6d8", "save_path": "github-repos/MATLAB/Sable-mcbench-benchmarks", "path": "github-repos/MATLAB/Sable-mcbench-benchmarks/mcbench-benchmarks-ba13b2f0296ef49491b95e3f984c7c41fccdb6d8/32800-elliptic-fourier-for-shape-analysis/calc_traversal_dist.m"}
|
/* Copyright (c) 2021, the adamantine authors.
*
* This file is subject to the Modified BSD License and may not be distributed
* without copyright and license information. Please refer to the file LICENSE
* for the text and further information on this license.
*/
#define BOOST_TEST_MODULE DataAssimilator
#include <DataAssimilator.hh>
#include <Geometry.hh>
#include <deal.II/fe/fe_q.h>
#include <deal.II/lac/la_parallel_vector.h>
#include "main.cc"
namespace adamantine
{
class DataAssimilatorTester
{
public:
void test_constructor()
{
boost::property_tree::ptree database;
// First checking the dealii default values
DataAssimilator da0(database);
double tol = 1.0e-12;
BOOST_CHECK_SMALL(da0._solver_control.tolerance() - 1.0e-10, tol);
BOOST_CHECK(da0._solver_control.max_steps() == 100);
BOOST_CHECK(da0._additional_data.max_n_tmp_vectors == 30);
// Now explicitly setting them
database.put("solver.convergence_tolerance", 1.0e-6);
database.put("solver.max_iterations", 25);
database.put("solver.max_number_of_temp_vectors", 4);
DataAssimilator da1(database);
BOOST_CHECK_SMALL(da1._solver_control.tolerance() - 1.0e-6, tol);
BOOST_CHECK(da1._solver_control.max_steps() == 25);
BOOST_CHECK(da1._additional_data.max_n_tmp_vectors == 4);
};
void test_calc_kalman_gain()
{
// Create the DoF mapping
MPI_Comm communicator = MPI_COMM_WORLD;
boost::property_tree::ptree database;
database.put("import_mesh", false);
database.put("length", 1);
database.put("length_divisions", 2);
database.put("height", 1);
database.put("height_divisions", 2);
adamantine::Geometry<2> geometry(communicator, database);
dealii::parallel::distributed::Triangulation<2> const &tria =
geometry.get_triangulation();
dealii::FE_Q<2> fe(1);
dealii::DoFHandler<2> dof_handler(tria);
dof_handler.distribute_dofs(fe);
unsigned int sim_size = 5;
unsigned int expt_size = 2;
dealii::Vector<double> expt_vec(2);
expt_vec(0) = 2.5;
expt_vec(1) = 9.5;
std::pair<std::vector<int>, std::vector<int>> indices_and_offsets;
indices_and_offsets.first.resize(2);
indices_and_offsets.second.resize(3); // Offset vector is one longer
indices_and_offsets.first[0] = 1;
indices_and_offsets.first[1] = 3;
indices_and_offsets.second[0] = 0;
indices_and_offsets.second[1] = 1;
indices_and_offsets.second[2] = 2;
boost::property_tree::ptree solver_settings_database;
DataAssimilator da(solver_settings_database);
da._sim_size = sim_size;
da._expt_size = expt_size;
da._num_ensemble_members = 3;
da.update_dof_mapping<2>(dof_handler, indices_and_offsets);
// Create the simulation data
std::vector<dealii::LA::distributed::Vector<double>> data(3);
data[0].reinit(5);
data[0](0) = 1.0;
data[0](1) = 3.0;
data[0](2) = 6.0;
data[0](3) = 9.0;
data[0](4) = 11.0;
data[1].reinit(5);
data[1](0) = 1.5;
data[1](1) = 3.2;
data[1](2) = 6.3;
data[1](3) = 9.7;
data[1](4) = 11.9;
data[2].reinit(5);
data[2](0) = 1.1;
data[2](1) = 3.1;
data[2](2) = 6.1;
data[2](3) = 9.1;
data[2](4) = 11.1;
// Build the sparse experimental covariance matrix
dealii::SparsityPattern pattern(expt_size, expt_size, 1);
pattern.add(0, 0);
pattern.add(1, 1);
pattern.compress();
dealii::SparseMatrix<double> R(pattern);
R.add(0, 0, 0.002);
R.add(1, 1, 0.001);
// Create the (perturbed) innovation
std::vector<dealii::Vector<double>> perturbed_innovation(3);
for (unsigned int sample = 0; sample < perturbed_innovation.size();
++sample)
{
perturbed_innovation[sample].reinit(expt_size);
dealii::Vector<double> temp = da.calc_Hx(data[sample]);
for (unsigned int i = 0; i < expt_size; ++i)
{
perturbed_innovation[sample][i] = expt_vec[i] - temp[i];
}
}
perturbed_innovation[0][0] = perturbed_innovation[0][0] + 0.0008;
perturbed_innovation[0][1] = perturbed_innovation[0][1] - 0.0005;
perturbed_innovation[1][0] = perturbed_innovation[1][0] - 0.001;
perturbed_innovation[1][1] = perturbed_innovation[1][1] + 0.0002;
perturbed_innovation[2][0] = perturbed_innovation[2][0] + 0.0002;
perturbed_innovation[2][1] = perturbed_innovation[2][1] - 0.0009;
// Apply the Kalman gain
std::vector<dealii::LA::distributed::Vector<double>> forecast_shift =
da.apply_kalman_gain(data, R, perturbed_innovation);
double tol = 1.0e-4;
// Reference solution calculated using Python
BOOST_CHECK_CLOSE(forecast_shift[0][0], 0.21352564, tol);
BOOST_CHECK_CLOSE(forecast_shift[0][1], -0.14600986, tol);
BOOST_CHECK_CLOSE(forecast_shift[0][2], -0.02616469, tol);
BOOST_CHECK_CLOSE(forecast_shift[0][3], 0.45321598, tol);
BOOST_CHECK_CLOSE(forecast_shift[0][4], 0.69290631, tol);
BOOST_CHECK_CLOSE(forecast_shift[1][0], -0.27786325, tol);
BOOST_CHECK_CLOSE(forecast_shift[1][1], -0.32946285, tol);
BOOST_CHECK_CLOSE(forecast_shift[1][2], -0.31226298, tol);
BOOST_CHECK_CLOSE(forecast_shift[1][3], -0.24346351, tol);
BOOST_CHECK_CLOSE(forecast_shift[1][4], -0.20906377, tol);
BOOST_CHECK_CLOSE(forecast_shift[2][0], 0.12767094, tol);
BOOST_CHECK_CLOSE(forecast_shift[2][1], -0.20319395, tol);
BOOST_CHECK_CLOSE(forecast_shift[2][2], -0.09290565, tol);
BOOST_CHECK_CLOSE(forecast_shift[2][3], 0.34824753, tol);
BOOST_CHECK_CLOSE(forecast_shift[2][4], 0.56882413, tol);
};
void test_update_dof_mapping()
{
MPI_Comm communicator = MPI_COMM_WORLD;
boost::property_tree::ptree database;
database.put("import_mesh", false);
database.put("length", 1);
database.put("length_divisions", 2);
database.put("height", 1);
database.put("height_divisions", 2);
adamantine::Geometry<2> geometry(communicator, database);
dealii::parallel::distributed::Triangulation<2> const &tria =
geometry.get_triangulation();
dealii::FE_Q<2> fe(1);
dealii::DoFHandler<2> dof_handler(tria);
dof_handler.distribute_dofs(fe);
unsigned int sim_size = 4;
unsigned int expt_size = 3;
std::pair<std::vector<int>, std::vector<int>> indices_and_offsets;
indices_and_offsets.first.resize(3);
indices_and_offsets.second.resize(4); // offset vector is one longer
indices_and_offsets.first[0] = 0;
indices_and_offsets.first[1] = 1;
indices_and_offsets.first[2] = 3;
indices_and_offsets.second[0] = 0;
indices_and_offsets.second[1] = 1;
indices_and_offsets.second[2] = 2;
indices_and_offsets.second[3] = 3;
boost::property_tree::ptree solver_settings_database;
DataAssimilator da(solver_settings_database);
da._sim_size = sim_size;
da._expt_size = expt_size;
da.update_dof_mapping<2>(dof_handler, indices_and_offsets);
BOOST_CHECK(da._expt_to_dof_mapping.first[0] == 0);
BOOST_CHECK(da._expt_to_dof_mapping.first[1] == 1);
BOOST_CHECK(da._expt_to_dof_mapping.first[2] == 2);
BOOST_CHECK(da._expt_to_dof_mapping.second[0] == 0);
BOOST_CHECK(da._expt_to_dof_mapping.second[1] == 1);
BOOST_CHECK(da._expt_to_dof_mapping.second[2] == 3);
};
void test_calc_H()
{
MPI_Comm communicator = MPI_COMM_WORLD;
boost::property_tree::ptree database;
database.put("import_mesh", false);
database.put("length", 1);
database.put("length_divisions", 2);
database.put("height", 1);
database.put("height_divisions", 2);
adamantine::Geometry<2> geometry(communicator, database);
dealii::parallel::distributed::Triangulation<2> const &tria =
geometry.get_triangulation();
dealii::FE_Q<2> fe(1);
dealii::DoFHandler<2> dof_handler(tria);
dof_handler.distribute_dofs(fe);
unsigned int sim_size = 4;
unsigned int expt_size = 3;
std::pair<std::vector<int>, std::vector<int>> indices_and_offsets;
indices_and_offsets.first.resize(3);
indices_and_offsets.second.resize(4); // offset vector is one longer
indices_and_offsets.first[0] = 0;
indices_and_offsets.first[1] = 1;
indices_and_offsets.first[2] = 3;
indices_and_offsets.second[0] = 0;
indices_and_offsets.second[1] = 1;
indices_and_offsets.second[2] = 2;
indices_and_offsets.second[3] = 3;
boost::property_tree::ptree solver_settings_database;
DataAssimilator da(solver_settings_database);
da._sim_size = sim_size;
da._expt_size = expt_size;
da.update_dof_mapping<2>(dof_handler, indices_and_offsets);
dealii::SparsityPattern pattern(expt_size, sim_size, expt_size);
dealii::SparseMatrix<double> H = da.calc_H(pattern);
double tol = 1e-12;
for (unsigned int i = 0; i < expt_size; ++i)
{
for (unsigned int j = 0; j < sim_size; ++j)
{
if (i == 0 && j == 0)
BOOST_CHECK_CLOSE(H(i, j), 1.0, tol);
else if (i == 1 && j == 1)
BOOST_CHECK_CLOSE(H(i, j), 1.0, tol);
else if (i == 2 && j == 3)
BOOST_CHECK_CLOSE(H(i, j), 1.0, tol);
else
BOOST_CHECK_CLOSE(H.el(i, j), 0.0, tol);
}
}
};
void test_calc_Hx()
{
MPI_Comm communicator = MPI_COMM_WORLD;
boost::property_tree::ptree database;
database.put("import_mesh", false);
database.put("length", 1);
database.put("length_divisions", 2);
database.put("height", 1);
database.put("height_divisions", 2);
adamantine::Geometry<2> geometry(communicator, database);
dealii::parallel::distributed::Triangulation<2> const &tria =
geometry.get_triangulation();
dealii::FE_Q<2> fe(1);
dealii::DoFHandler<2> dof_handler(tria);
dof_handler.distribute_dofs(fe);
int sim_size = 4;
int expt_size = 3;
dealii::LA::distributed::Vector<double> sim_vec(dof_handler.n_dofs());
sim_vec(0) = 2.0;
sim_vec(1) = 4.0;
sim_vec(2) = 5.0;
sim_vec(3) = 7.0;
dealii::Vector<double> expt_vec(3);
expt_vec(0) = 2.5;
expt_vec(1) = 4.5;
expt_vec(2) = 8.5;
std::pair<std::vector<int>, std::vector<int>> indices_and_offsets;
indices_and_offsets.first.resize(3);
indices_and_offsets.second.resize(4); // Offset vector is one longer
indices_and_offsets.first[0] = 0;
indices_and_offsets.first[1] = 1;
indices_and_offsets.first[2] = 3;
indices_and_offsets.second[0] = 0;
indices_and_offsets.second[1] = 1;
indices_and_offsets.second[2] = 2;
indices_and_offsets.second[3] = 3;
boost::property_tree::ptree solver_settings_database;
DataAssimilator da(solver_settings_database);
da._sim_size = sim_size;
da._expt_size = expt_size;
da.update_dof_mapping<2>(dof_handler, indices_and_offsets);
dealii::Vector<double> Hx = da.calc_Hx(sim_vec);
double tol = 1e-10;
BOOST_CHECK_CLOSE(Hx(0), 2.0, tol);
BOOST_CHECK_CLOSE(Hx(1), 4.0, tol);
BOOST_CHECK_CLOSE(Hx(2), 7.0, tol);
};
void test_calc_sample_covariance_dense()
{
double tol = 1e-10;
// Trivial case of identical vectors, covariance should be the zero matrix
std::vector<dealii::LA::distributed::Vector<double>> data1(3);
data1[0].reinit(4);
data1[0](0) = 1.0;
data1[0](1) = 3.0;
data1[0](2) = 6.0;
data1[0](3) = 9.0;
data1[1].reinit(4);
data1[1](0) = 1.0;
data1[1](1) = 3.0;
data1[1](2) = 6.0;
data1[1](3) = 9.0;
data1[2].reinit(4);
data1[2](0) = 1.0;
data1[2](1) = 3.0;
data1[2](2) = 6.0;
data1[2](3) = 9.0;
boost::property_tree::ptree solver_settings_database;
DataAssimilator da(solver_settings_database);
dealii::FullMatrix<double> cov = da.calc_sample_covariance_dense(data1);
// Check results
for (unsigned int i = 0; i < 4; ++i)
{
for (unsigned int j = 0; j < 4; ++j)
{
BOOST_CHECK_SMALL(std::abs(cov(i, j)), tol);
}
}
// Non-trivial case, using NumPy solution as the reference
std::vector<dealii::LA::distributed::Vector<double>> data2(3);
data2[0].reinit(5);
data2[0](0) = 1.0;
data2[0](1) = 3.0;
data2[0](2) = 6.0;
data2[0](3) = 9.0;
data2[0](4) = 11.0;
data2[1].reinit(5);
data2[1](0) = 1.5;
data2[1](1) = 3.2;
data2[1](2) = 6.3;
data2[1](3) = 9.7;
data2[1](4) = 11.9;
data2[2].reinit(5);
data2[2](0) = 1.1;
data2[2](1) = 3.1;
data2[2](2) = 6.1;
data2[2](3) = 9.1;
data2[2](4) = 11.1;
da._sim_size = 5;
dealii::FullMatrix<double> cov2 = da.calc_sample_covariance_dense(data2);
BOOST_CHECK_CLOSE(cov2(0, 0), 0.07, tol);
BOOST_CHECK_CLOSE(cov2(1, 0), 0.025, tol);
BOOST_CHECK_CLOSE(cov2(2, 0), 0.04, tol);
BOOST_CHECK_CLOSE(cov2(3, 0), 0.1, tol);
BOOST_CHECK_CLOSE(cov2(4, 0), 0.13, tol);
BOOST_CHECK_CLOSE(cov2(0, 1), 0.025, tol);
BOOST_CHECK_CLOSE(cov2(1, 1), 0.01, tol);
BOOST_CHECK_CLOSE(cov2(2, 1), 0.015, tol);
BOOST_CHECK_CLOSE(cov2(3, 1), 0.035, tol);
BOOST_CHECK_CLOSE(cov2(4, 1), 0.045, tol);
BOOST_CHECK_CLOSE(cov2(0, 2), 0.04, tol);
BOOST_CHECK_CLOSE(cov2(1, 2), 0.015, tol);
BOOST_CHECK_CLOSE(cov2(2, 2), 0.02333333333333, tol);
BOOST_CHECK_CLOSE(cov2(3, 2), 0.05666666666667, tol);
BOOST_CHECK_CLOSE(cov2(4, 2), 0.07333333333333, tol);
BOOST_CHECK_CLOSE(cov2(0, 3), 0.1, tol);
BOOST_CHECK_CLOSE(cov2(1, 3), 0.035, tol);
BOOST_CHECK_CLOSE(cov2(2, 3), 0.05666666666667, tol);
BOOST_CHECK_CLOSE(cov2(3, 3), 0.14333333333333, tol);
BOOST_CHECK_CLOSE(cov2(4, 3), 0.18666666666667, tol);
BOOST_CHECK_CLOSE(cov2(0, 4), 0.13, tol);
BOOST_CHECK_CLOSE(cov2(1, 4), 0.045, tol);
BOOST_CHECK_CLOSE(cov2(2, 4), 0.07333333333333, tol);
BOOST_CHECK_CLOSE(cov2(3, 4), 0.18666666666667, tol);
BOOST_CHECK_CLOSE(cov2(4, 4), 0.24333333333333, tol);
};
void test_fill_noise_vector()
{
boost::property_tree::ptree solver_settings_database;
DataAssimilator da(solver_settings_database);
dealii::SparsityPattern pattern(3, 3, 3);
pattern.add(0, 0);
pattern.add(1, 0);
pattern.add(1, 1);
pattern.add(0, 1);
pattern.add(2, 2);
pattern.compress();
dealii::SparseMatrix<double> R(pattern);
R.add(0, 0, 0.1);
R.add(1, 0, 0.3);
R.add(1, 1, 1.0);
R.add(0, 1, 0.3);
R.add(2, 2, 0.2);
std::vector<dealii::Vector<double>> data;
dealii::Vector<double> ensemble_member(3);
for (unsigned int i = 0; i < 1000; ++i)
{
da.fill_noise_vector(ensemble_member, R);
data.push_back(ensemble_member);
}
dealii::FullMatrix<double> Rtest = da.calc_sample_covariance_dense(data);
double tol = 20.; // Loose 20% tolerance because this is a statistical check
BOOST_CHECK_CLOSE(R(0, 0), Rtest(0, 0), tol);
BOOST_CHECK_CLOSE(R(1, 0), Rtest(1, 0), tol);
BOOST_CHECK_CLOSE(R(1, 1), Rtest(1, 1), tol);
BOOST_CHECK_CLOSE(R(0, 1), Rtest(0, 1), tol);
BOOST_CHECK_CLOSE(R(2, 2), Rtest(2, 2), tol);
};
void test_update_ensemble()
{
// Create the DoF mapping
MPI_Comm communicator = MPI_COMM_WORLD;
boost::property_tree::ptree database;
database.put("import_mesh", false);
database.put("length", 1);
database.put("length_divisions", 2);
database.put("height", 1);
database.put("height_divisions", 2);
adamantine::Geometry<2> geometry(communicator, database);
dealii::parallel::distributed::Triangulation<2> const &tria =
geometry.get_triangulation();
dealii::FE_Q<2> fe(1);
dealii::DoFHandler<2> dof_handler(tria);
dof_handler.distribute_dofs(fe);
int sim_size = 5;
int expt_size = 2;
std::vector<double> expt_vec(2);
expt_vec[0] = 2.5;
expt_vec[1] = 9.5;
std::pair<std::vector<int>, std::vector<int>> indices_and_offsets;
indices_and_offsets.first.resize(2);
indices_and_offsets.second.resize(3); // Offset vector is one longer
indices_and_offsets.first[0] = 1;
indices_and_offsets.first[1] = 3;
indices_and_offsets.second[0] = 0;
indices_and_offsets.second[1] = 1;
indices_and_offsets.second[2] = 2;
boost::property_tree::ptree solver_settings_database;
DataAssimilator da(solver_settings_database);
da._sim_size = sim_size;
da._expt_size = expt_size;
da._num_ensemble_members = 3;
da.update_dof_mapping<2>(dof_handler, indices_and_offsets);
// Create the simulation data
std::vector<dealii::LA::distributed::Vector<double>> data(3);
data[0].reinit(5);
data[0](0) = 1.0;
data[0](1) = 3.0;
data[0](2) = 6.0;
data[0](3) = 9.0;
data[0](4) = 11.0;
data[1].reinit(5);
data[1](0) = 1.5;
data[1](1) = 3.2;
data[1](2) = 6.3;
data[1](3) = 9.7;
data[1](4) = 11.9;
data[2].reinit(5);
data[2](0) = 1.1;
data[2](1) = 3.1;
data[2](2) = 6.1;
data[2](3) = 9.1;
data[2](4) = 11.1;
// Build the sparse experimental covariance matrix
dealii::SparsityPattern pattern(expt_size, expt_size, 1);
pattern.add(0, 0);
pattern.add(1, 1);
pattern.compress();
dealii::SparseMatrix<double> R(pattern);
R.add(0, 0, 0.002);
R.add(1, 1, 0.001);
// Save the data at the observation points before assimilation
std::vector<double> sim_at_expt_pt_1_before(3);
sim_at_expt_pt_1_before.push_back(data[0][1]);
sim_at_expt_pt_1_before.push_back(data[1][1]);
sim_at_expt_pt_1_before.push_back(data[2][1]);
std::vector<double> sim_at_expt_pt_2_before(3);
sim_at_expt_pt_2_before.push_back(data[0][3]);
sim_at_expt_pt_2_before.push_back(data[1][3]);
sim_at_expt_pt_2_before.push_back(data[2][3]);
// Update the simulation data
da.update_ensemble(data, expt_vec, R);
// Save the data at the observation points after assimilation
std::vector<double> sim_at_expt_pt_1_after(3);
sim_at_expt_pt_1_after.push_back(data[0][1]);
sim_at_expt_pt_1_after.push_back(data[1][1]);
sim_at_expt_pt_1_after.push_back(data[2][1]);
std::vector<double> sim_at_expt_pt_2_after(3);
sim_at_expt_pt_2_after.push_back(data[0][3]);
sim_at_expt_pt_2_after.push_back(data[1][3]);
sim_at_expt_pt_2_after.push_back(data[2][3]);
// Check the solution
// The observed points should get closer to the experimental values
// Large entries in R could make these fail spuriously
for (int member = 0; member < 3; ++member)
{
BOOST_CHECK(std::abs(expt_vec[0] - sim_at_expt_pt_1_after[member]) <=
std::abs(expt_vec[0] - sim_at_expt_pt_1_before[member]));
BOOST_CHECK(std::abs(expt_vec[1] - sim_at_expt_pt_2_after[member]) <=
std::abs(expt_vec[1] - sim_at_expt_pt_2_before[member]));
}
};
};
BOOST_AUTO_TEST_CASE(data_assimilator)
{
DataAssimilatorTester dat;
dat.test_constructor();
dat.test_update_dof_mapping();
dat.test_calc_sample_covariance_dense();
dat.test_fill_noise_vector();
dat.test_calc_H();
dat.test_calc_Hx();
dat.test_calc_kalman_gain();
dat.test_update_ensemble();
}
} // namespace adamantine
|
{"hexsha": "9e68687847ee7bab0d68841e312fadb6259e7944", "size": 19040, "ext": "cc", "lang": "C++", "max_stars_repo_path": "tests/test_data_assimilator.cc", "max_stars_repo_name": "Rombur/adamantine", "max_stars_repo_head_hexsha": "45dd37397680fad1eaa64dbb311724c4f727a675", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 4.0, "max_stars_repo_stars_event_min_datetime": "2016-09-03T02:08:26.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-03T01:26:41.000Z", "max_issues_repo_path": "tests/test_data_assimilator.cc", "max_issues_repo_name": "Rombur/adamantine", "max_issues_repo_head_hexsha": "45dd37397680fad1eaa64dbb311724c4f727a675", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 74.0, "max_issues_repo_issues_event_min_datetime": "2016-08-31T18:10:46.000Z", "max_issues_repo_issues_event_max_datetime": "2021-04-08T01:51:44.000Z", "max_forks_repo_path": "tests/test_data_assimilator.cc", "max_forks_repo_name": "Rombur/adamantine", "max_forks_repo_head_hexsha": "45dd37397680fad1eaa64dbb311724c4f727a675", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 3.0, "max_forks_repo_forks_event_min_datetime": "2019-11-12T15:43:38.000Z", "max_forks_repo_forks_event_max_datetime": "2021-01-19T02:58:56.000Z", "avg_line_length": 33.2286212914, "max_line_length": 80, "alphanum_fraction": 0.6592436975, "num_tokens": 6254}
|
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 9 10:54:14 2016
@author: yaric
"""
import numpy as np
import pandas as pd
from sklearn import decomposition
import utils
# the input file prefix of data sets
input_file_prefix = '../../data/training-' # '../../data/training-small-'
output_file_prefix = '../../data/training-preprocessed-'
max_pca_components = 19
def createDataFrame(X, y, y_missing):
"""
Creates pandas data frame from provided numpy arrays
"""
data = np.concatenate((y, X), axis=1)
columns = ['y1', 'y2', 'y3']
for k in range(X.shape[1]):
columns.append('X{}'.format(k))
data_df = pd.DataFrame(data, columns=columns)
ymiss_df = pd.DataFrame(y_missing, columns=['COVAR_y1_MISSING', 'COVAR_y2_MISSING', 'COVAR_y3_MISSING'])
df = data_df.join(ymiss_df)
return df
# import data
train_df = pd.read_csv(input_file_prefix + 'train.csv')
validate_df = pd.read_csv(input_file_prefix + 'validate.csv')
# keep missing flags for both training and validation
ytr_missing = np.array(train_df.loc[ :,'COVAR_y1_MISSING':'COVAR_y3_MISSING'], dtype=bool)
yvl_missing = np.array(validate_df.loc[ :,'COVAR_y1_MISSING':'COVAR_y3_MISSING'], dtype=bool)
# read data
train_df['train_flag'] = True
validate_df['train_flag'] = False
data = pd.concat((train_df, validate_df))
# remove temporary data
del train_df
del validate_df
# basic formatting
Xtr, ytr, Xvl, yvl = utils.format_data(data, preprocessing=False)
del data
#
# do preprocessing
#
scaler = decomposition.RandomizedPCA()
#scaler = decomposition.SparsePCA(n_components=max_pca_components)
#scaler = decomposition.PCA(n_components='mle')
print 'PCA max features to keep: %d' % (max_pca_components)
Xtr = scaler.fit_transform(Xtr) # fit only for train data (http://cs231n.github.io/neural-networks-2/#datapre)
Xvl = scaler.transform(Xvl)
#
# write result
#
train_df = createDataFrame(Xtr, ytr, ytr_missing)
validate_df = createDataFrame(Xvl, yvl, yvl_missing)
train_df.to_csv(output_file_prefix + 'train.csv', header=True, index=False)
validate_df.to_csv(output_file_prefix + 'validate.csv', header=True, index=False)
print '\n---------------------\nResult train:\n%s\n' % (train_df.describe())
print '\n---------------------\nResult validate:\n%s\n' % (validate_df.describe())
|
{"hexsha": "0cccf7d70575a20bc9b3c90adb7f7f33b358ae05", "size": 2309, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/utils/offline_preprocessor.py", "max_stars_repo_name": "yaricom/timeserieslearning", "max_stars_repo_head_hexsha": "6c6c5dc253b47bd6a22a2a97030adba5c5e7512a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 28, "max_stars_repo_stars_event_min_datetime": "2017-03-08T01:22:58.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-18T05:43:16.000Z", "max_issues_repo_path": "src/utils/offline_preprocessor.py", "max_issues_repo_name": "yaricom/timeserieslearning", "max_issues_repo_head_hexsha": "6c6c5dc253b47bd6a22a2a97030adba5c5e7512a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/utils/offline_preprocessor.py", "max_forks_repo_name": "yaricom/timeserieslearning", "max_forks_repo_head_hexsha": "6c6c5dc253b47bd6a22a2a97030adba5c5e7512a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 9, "max_forks_repo_forks_event_min_datetime": "2017-03-13T03:04:11.000Z", "max_forks_repo_forks_event_max_datetime": "2019-03-16T21:47:27.000Z", "avg_line_length": 28.1585365854, "max_line_length": 110, "alphanum_fraction": 0.708531832, "include": true, "reason": "import numpy", "num_tokens": 613}
|
import numpy as np
from ase.dft import kpoints
import pyglib.gutz.ginput as ginput
import pyglib.model.tbASE as tb
# The following is a simple test for the 1-d Hubbard model.
a = tb.AtomsTB("N", [(0, 0, 0)], cell=(1, 1, 1))
a.set_orbitals_spindeg()
aTB = tb.TB(a)
aTB.set_hop([
((1, 0, 0), 0, 0, -1),
((-1, 0, 0), 0, 0, -1),
((0, 0, 0), 0, 0, 0), ])
kps_size = (100, 1, 1)
kps = kpoints.monkhorst_pack(kps_size)
num_k = len(kps)
kps_wt = 1.0 / num_k * np.ones((num_k))
if aTB.Atoms.spindeg:
kps_wt *= 2
num_e = 1.0
num_band_max = 1
# GPARAMBANDS.h5
h1e_list = [np.array([[0, 0], [0, 0]], dtype=np.complex)]
ginput.save_gparambands(kps_wt, num_e, num_band_max, h1e_list=h1e_list)
sigma_list = [np.identity(2, dtype=np.int32)]
v2e = np.zeros((2, 2, 2, 2), dtype=np.complex)
v2e[0, 0, 0, 0] = v2e[0, 0, 1, 1] = v2e[1, 1, 0, 0] = v2e[1, 1, 1, 1] = 6.0
sz = np.asarray(np.diag((1,-1)),dtype=np.complex)
# GPARAM.h5
ginput.save_gparam(sigma_list=sigma_list, iembeddiag=-1, v2e_list=[v2e],
sz_list=[sz])
# BAREHAM_0.h5
aTB.save_bareham(kps)
|
{"hexsha": "27bd915d00fef66f697707a37d10b3d46379110d", "size": 1067, "ext": "py", "lang": "Python", "max_stars_repo_path": "ComRISB/pyglib/pyglib/model/test/test_1band_model/REF/test_tb.py", "max_stars_repo_name": "comscope/comsuite", "max_stars_repo_head_hexsha": "d51c43cad0d15dc3b4d1f45e7df777cdddaa9d6c", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 18, "max_stars_repo_stars_event_min_datetime": "2019-06-15T18:08:21.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-30T05:01:29.000Z", "max_issues_repo_path": "ComRISB/pyglib/pyglib/model/test/test_1band_model/REF/test_tb.py", "max_issues_repo_name": "comscope/Comsuite", "max_issues_repo_head_hexsha": "b80ca9f34c519757d337487c489fb655f7598cc2", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ComRISB/pyglib/pyglib/model/test/test_1band_model/REF/test_tb.py", "max_forks_repo_name": "comscope/Comsuite", "max_forks_repo_head_hexsha": "b80ca9f34c519757d337487c489fb655f7598cc2", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 11, "max_forks_repo_forks_event_min_datetime": "2019-06-05T02:57:55.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-29T02:54:25.000Z", "avg_line_length": 26.0243902439, "max_line_length": 75, "alphanum_fraction": 0.6344892221, "include": true, "reason": "import numpy", "num_tokens": 484}
|
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import math
from matplotlib import gridspec
from scipy.optimize import curve_fit
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
from sklearn.metrics import explained_variance_score
from sklearn.metrics import r2_score
# --------------------------------------------------------------------------- Approximations
def g(x):
"""``Benchmark function.``
Args:
x (int, float): Input.
Returns:
float: Output. :math:`sin(\\frac{1}{2}x) - 2 cos(2x)`
"""
return np.sin(0.5 * x) - 2 * np.cos(2 * x)
def five_interp(x, a0, a1, a2, a3, a4):
"""``Approximation degree = 5``
"""
return a0 + a1 * x + a2 * (x ** 2) + a3 * (x ** 3) + a4 * (x ** 4)
def six_interp(x, a0, a1, a2, a3, a4, a5):
"""``Approximation degree = 6``
"""
return a0 + a1 * x + a2 * (x ** 2) + a3 * (x ** 3) + a4 * (x ** 4) + a5 * (x ** 5)
def seven_interp(x, a0, a1, a2, a3, a4, a5, a6):
"""``Approximation degree = 7``
"""
return (
a0
+ a1 * x
+ a2 * (x ** 2)
+ a3 * (x ** 3)
+ a4 * (x ** 4)
+ a5 * (x ** 5)
+ a6 * (x ** 6)
)
def eight_interp(x, a0, a1, a2, a3, a4, a5, a6, a7):
"""``Approximation degree = 8``
"""
return (
a0
+ a1 * x
+ a2 * (x ** 2)
+ a3 * (x ** 3)
+ a4 * (x ** 4)
+ a5 * (x ** 5)
+ a6 * (x ** 6)
+ a7 * (x ** 7)
)
def nine_interp(x, a0, a1, a2, a3, a4, a5, a6, a7, a8):
"""``Approximation degree = 9``
"""
return (
a0
+ a1 * x
+ a2 * (x ** 2)
+ a3 * (x ** 3)
+ a4 * (x ** 4)
+ a5 * (x ** 5)
+ a6 * (x ** 6)
+ a7 * (x ** 7)
+ a8 * (x ** 8)
)
def ten_interp(x, a0, a1, a2, a3, a4, a5, a6, a7, a8, a9):
"""``Approximation degree = 10``
"""
return (
a0
+ a1 * x
+ a2 * (x ** 2)
+ a3 * (x ** 3)
+ a4 * (x ** 4)
+ a5 * (x ** 5)
+ a6 * (x ** 6)
+ a7 * (x ** 7)
+ a8 * (x ** 8)
+ a9 * (x ** 9)
)
# --------------------------------------------------------------------------- 3.2.1 Benchmark Exercise: Naive Approximation SciPy
class FCMethod:
"""This object uses the scipy ``curve fit`` method to naively approximate a specific
function. It plots the interpolation and the real function for different nodes and
tables the approximation accuracy.
Args:
a (int): Lower bound of interval.
b (int): Upper bound of interval.
n (int): Number of interpolation nodes.
func (function): Benchmark function.
degree (int): Degree of approximation.
"""
def __init__(self, a, b, n, func, degree):
"""Constructor method. It uses the exact same arguments as the :class:`PMethod`
constructor method. This could be achieved by inheritance as well.
"""
self.a = a
self.b = b
self.n = n
self.func = func
self.degree = degree
def check_degree(self, increase):
"""Increases approximation degree if degree lies between 5 and 10.
Args:
increase (bool): Increases approximation degree by one unit if True.
"""
if increase == True and self.degree < 10:
self.degree += 1
elif self.degree < 5 or self.degree > 10:
print("Degree must be between 5 and 10!")
def choose_approx(self):
"""Chooses approximation function by its degree.
"""
if self.degree == 5:
self.approx = five_interp
elif self.degree == 6:
self.approx = six_interp
elif self.degree == 7:
self.approx = seven_interp
elif self.degree == 8:
self.approx = eight_interp
elif self.degree == 9:
self.approx = nine_interp
elif self.degree == 10:
self.approx = ten_interp
def fit_curve(self):
"""Implementation of scipy curve fit method. Uses least square method to find
optimal weight. This yield to the interpolation.
"""
self.xa = np.linspace(self.a, self.b, self.n)
self.xb = np.linspace(self.a, self.b, 3 * self.n)
self.xc = np.linspace(self.a, self.b, 9 * self.n)
self.popta = curve_fit(self.approx, self.xa, self.func(self.xa))[0]
self.poptb = curve_fit(self.approx, self.xb, self.func(self.xb))[0]
self.poptc = curve_fit(self.approx, self.xc, self.func(self.xc))[0]
def plot_naive_interp(self, N, fs, number_1, number_2):
"""Plots true function and approximation as well as approximation error.
Args:
N (int): Number of evaluation nodes.
fs (tuple): Figuresize
number_1 (int, float): Number of first figure.
number_2 (int, float: Number of second figure.
"""
self.x = np.linspace(self.a, self.b, N)
fig = plt.figure(figsize=fs)
gs = gridspec.GridSpec(2, 1, height_ratios=[1.8, 1])
ax0 = plt.subplot(gs[0])
ax0.plot(self.x, self.func(self.x), label="Real Function")
ax0.plot(
self.x,
self.approx(self.x, *self.popta),
label=str(self.n) + " Nodes Approximation",
)
ax0.plot(
self.x,
self.approx(self.x, *self.poptb),
label=str(3 * self.n) + " Nodes Approximation",
)
ax0.plot(
self.x,
self.approx(self.x, *self.poptc),
label=str(9 * self.n) + " Nodes Approximation",
)
ax0.set_title(
f"Figure {number_1}: Naive Approximation Output "
+ str(self.degree)
+ " Degree"
)
plt.grid()
plt.legend(
title="Naive Approximation for different Nodes",
bbox_to_anchor=(1.04, 0.5),
loc="center left",
shadow=True,
fancybox=True,
borderaxespad=0,
title_fontsize=12,
)
plt.setp(ax0.get_xticklabels(), visible=False)
ax1 = plt.subplot(gs[1], sharex=ax0)
ax1.plot(
self.x,
self.approx(self.x, *self.popta) - self.func(self.x),
label=str(self.n) + " Nodes Error",
)
ax1.plot(
self.x,
self.approx(self.x, *self.poptb) - self.func(self.x),
label=str(3 * self.n) + " Nodes Error",
)
ax1.plot(
self.x,
self.approx(self.x, *self.poptc) - self.func(self.x),
label=str(9 * self.n) + " Nodes Error",
)
ax1.set_title(
f"Figure {number_2}: Naive Approximation Error "
+ str(self.degree)
+ " Degree"
)
plt.subplots_adjust(hspace=0.0)
plt.grid()
plt.legend(
title="Error for different Nodes",
bbox_to_anchor=(1.04, 0.5),
loc="center left",
shadow=True,
fancybox=True,
borderaxespad=0,
title_fontsize=12,
)
plt.tight_layout()
plt.show()
def table_error(self, number):
"""Returns approximation accuracy.
Args:
number (int): Number of table.
Returns:
pd.DataFrame: Approximation accuracy.
"""
mae = mean_absolute_error(self.approx(self.x, *self.popta), self.func(self.x))
maea = mean_absolute_error(self.approx(self.x, *self.poptb), self.func(self.x))
maeb = mean_absolute_error(self.approx(self.x, *self.poptc), self.func(self.x))
mse = mean_squared_error(self.approx(self.x, *self.popta), self.func(self.x))
msea = mean_squared_error(self.approx(self.x, *self.poptb), self.func(self.x))
mseb = mean_squared_error(self.approx(self.x, *self.poptc), self.func(self.x))
ev = explained_variance_score(
self.approx(self.x, *self.popta), self.func(self.x)
)
eva = explained_variance_score(
self.approx(self.x, *self.poptb), self.func(self.x)
)
evb = explained_variance_score(
self.approx(self.x, *self.poptc), self.func(self.x)
)
r2 = r2_score(self.approx(self.x, *self.popta), self.func(self.x))
r2a = r2_score(self.approx(self.x, *self.poptb), self.func(self.x))
r2b = r2_score(self.approx(self.x, *self.poptc), self.func(self.x))
df = pd.DataFrame(
[mae, mse, ev, r2],
index=[
"Mean Squared Error",
"Mean Absolute Error",
"Explained Variance",
"$R^2$ Score",
],
columns=[str(self.n) + " Nodes"],
)
dfa = pd.DataFrame(
[maea, msea, eva, r2a],
index=[
"Mean Squared Error",
"Mean Absolute Error",
"Explained Variance",
"$R^2$ Score",
],
columns=[str(3 * self.n) + " Nodes"],
)
dfb = pd.DataFrame(
[maeb, mseb, evb, r2b],
index=[
"Mean Squared Error",
"Mean Absolute Error",
"Explained Variance",
"$R^2$ Score",
],
columns=[str(9 * self.n) + " Nodes"],
)
rslt = pd.concat([df, dfa, dfb], axis=1).style.set_caption(
f"Table {number}: Accuracy of Naive Approximation for "
+ str(self.degree)
+ " Degrees"
)
return rslt
|
{"hexsha": "9605fef62c277fb8f992db78d056b0d9b7124675", "size": 9842, "ext": "py", "lang": "Python", "max_stars_repo_path": "auxiliary/scipy_implementation.py", "max_stars_repo_name": "ArbiKodraj/ML-Approximation", "max_stars_repo_head_hexsha": "e8696fe13e1e8e63f9eb27c68a77b81d578c1a27", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "auxiliary/scipy_implementation.py", "max_issues_repo_name": "ArbiKodraj/ML-Approximation", "max_issues_repo_head_hexsha": "e8696fe13e1e8e63f9eb27c68a77b81d578c1a27", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "auxiliary/scipy_implementation.py", "max_forks_repo_name": "ArbiKodraj/ML-Approximation", "max_forks_repo_head_hexsha": "e8696fe13e1e8e63f9eb27c68a77b81d578c1a27", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.8526645768, "max_line_length": 129, "alphanum_fraction": 0.4953261532, "include": true, "reason": "import numpy,from scipy", "num_tokens": 2627}
|
import math
import numpy as np
import numpy.polynomial as poly
from .errors import ColorIndexError, ParamRangeError, MissingParamError
def get_BC(**kwargs):
"""Get bolometric correction (BC) using a variety of calibration relations.
Available calibration relations:
* `Alonso1995`: returns *BC* in *V* and *K* bands using (*V* − *K*) and
[Fe/H] for dwarfs.
* `Alonso1999`: returns *BC* using *T*:sub:`eff` and [Fe/H] for giants.
* `Flower1996`: returns *BC* using *T*:sub:`eff`.
* `Masana2006`: returns *BC* using *T*:sub:`eff`.
"""
ref = kwargs.pop('ref', None)
if ref == 'Alonso1995':
bc = _get_dwarf_BC_Alonso1995(**kwargs)
elif ref == 'Alonso1999':
bc = _get_giant_BC_Alonso1999(**kwargs)
elif ref == 'Masana2006':
bc = _get_dwarf_BC_Masana2006(**kwargs)
elif ref == 'Flower1996':
bc = _get_BC_Flower1996(**kwargs)
return bc
def _get_BC_Flower1996(**kwargs):
"""Get *BC* in *V* band according to *T*:sub:`eff` using the relation given
by `Flower 1996 <http://adsabs.harvard.edu/abs/1996ApJ...469..355F>`_.
Args:
teff (int or float): Effective temperature (*T*:sub:`eff`).
Returns:
float: *BC* in *V* band.
The coefficients given in Table 6 of `Flower 1996
<http://adsabs.harvard.edu/abs/1996ApJ...469..355F>`_ missed powers of ten.
`Torres 2010 <http://adsabs.harvard.edu/abs/2010AJ....140.1158T>`_ gave the
corrent version in Table 1.
"""
coeff1 = [-0.118115450538963E+06,
0.137145973583929E+06,
-0.636233812100225E+05,
0.147412923562646E+05,
-0.170587278406872E+04,
0.788731721804990E+02,
]
coeff2 = [-0.370510203809015E+05,
0.385672629965804E+05,
-0.150651486316025E+05,
0.261724637119416E+04,
-0.170623810323864E+03,
]
coeff3 = [-0.190537291496456E+05,
0.155144866764412E+05,
-0.421278819301717E+04,
0.381476328422343E+03,
]
teff = kwargs.pop('Teff')
logt = math.log10(teff)
if logt >= 3.9:
coeff = coeff1
elif logt >= 3.7:
coeff = coeff2
else:
coeff = coeff3
p = poly.Polynomial(coeff)
return p(logt)
def _get_dwarf_BC_Alonso1995(**kwargs):
"""Get *BC* in *V* or *K* for dwarfs using the calibration relations given
by `Alonso+ 1995 <http://adsabs.harvard.edu/abs/1995A&A...297..197A>`_.
Parameters:
V_K (float): (*V* − *K*) color
FeH (float): [Fe/H] ratio
band (str, optional): Either "V" or "K"
Returns:
float or dict: *BC*:sub:`V` or *BC*:sub:`K`, if `band` is given; or
(*BC*:sub:`V`, *BC*:sub:`K`), if `band` is not given
Notes:
The empirical zero points of the Sun are adopted in Johnson system:
* (*V* − *K*)\ :sub:`⊙` = 1.486
* *BC*:sub:`⊙`\ (*V*) = −0.12
* *BC*:sub:`⊙`\ (*K*) = 1.366
Examples:
.. code-block:: python
from stella.parameter.bc import get_BC
# find BC in V band
bc_v, bc_k = get_BC(V_K=1.733, FeH=-0.22, ref='Alonso1995')
# or
bc_v, bc_k = get_BC(index='V-K', color=1.733, FeH=-0.22, ref='Alonso1995')
References:
* `Alonso et al., 1995, A&A, 297, 197 <http://adsabs.harvard.edu/abs/1995A&A...297..197A>`_
"""
reference = 'Alonso, 1995, A&A, 297, 197'
V_K = kwargs.pop('V_K', None)
if V_K is None:
index = kwargs.pop('index', None)
if index is not None and index == 'V-K':
V_K = kwargs.pop('color', None)
FeH = kwargs.pop('FeH', None)
band = kwargs.pop('band', None)
extrapolation = kwargs.pop('extrapolation',False)
if FeH == None:
raise MissingParamError('[Fe/H]', reference)
if not extrapolation:
if FeH < -3.0 or FeH > +0.2:
raise ParamRangeError('[Fe/H]', FeH, reference)
elif (-0.5 < FeH <= +0.2 and 0.8 < V_K < 3.0) or \
(-1.5 < FeH <= -0.5 and 0.9 < V_K < 2.6) or \
(-2.5 < FeH <= -1.5 and 1.1 < V_K < 2.3) or \
(-3.0 <=FeH <= -2.5 and 1.2 < V_K < 2.0):
pass
else:
raise ParamRangeError('V-K', V_K, reference)
# coefficients coming from equation 9
coeff1 = np.array([+2.38619e-4, -1.93659e-4, +6.52621e-5, -7.95862e-6,
-1.01449e-5, +8.17345e-6, -2.87876e-6, +5.40944e-7])
# coefficients coming from equation 10
coeff2 = np.array([+2.23403e-4, -1.71897e-4, +5.51085e-5, -6.41071e-6,
-3.71945e-5, +4.99847e-5, -2.41517e-5, +4.10655e-6])
# coefficients coming from equation 9
coeff1 = np.array([[+2.38619e-4, -1.93659e-4, +6.52621e-5, -7.95862e-6],
[-1.01449e-5, +8.17345e-6, -2.87876e-6, +5.40944e-7]])
# coefficients coming from equation 10
coeff2 = np.array([[+2.23403e-4, -1.71897e-4, +5.51085e-5, -6.41071e-6],
[-3.71945e-5, +4.99847e-5, -2.41517e-5, +4.10655e-6]])
phi = lambda coeff: poly.polynomial.polyval2d(FeH, V_K, coeff)
VK_sun = 1.486
phi_sun = poly.polynomial.polyval(VK_sun, coeff1[0])
if extrapolation:
if V_K <= 1.7:
bc_v = -2.5*math.log10(phi(coeff1)/phi_sun) - 0.12
bc_k = -2.5*math.log10(phi(coeff1)/phi_sun) + 1.366
else:
bc_v = -2.5*math.log10(phi(coeff2)/phi_sun) - 0.12
bc_k = -2.5*math.log10(phi(coeff2)/phi_sun) + 1.366
else:
if 0.9 < V_K <= 1.7:
bc_v = -2.5*math.log10(phi(coeff1)/phi_sun) - 0.12
bc_k = -2.5*math.log10(phi(coeff1)/phi_sun) + 1.366
elif 1.7 < V_K <= 2.9:
bc_v = -2.5*math.log10(phi(coeff2)/phi_sun) - 0.12
bc_k = -2.5*math.log10(phi(coeff2)/phi_sun) + 1.366
if band is None:
return (bc_v, bc_k)
elif band == 'V':
return bc_v
elif band == 'K':
return bc_k
else:
return None
def _get_giant_BC_Alonso1999(**kwargs):
"""Get BC for giants using the calibrations relations given by `Alonso+ 1999
<http://adsabs.harvard.edu/abs/1999A&AS..140..261A>`_.
Args:
Teff (float or int): *T*:sub:`eff` of the star
FeH (float): [Fe/H] abundance ratio
extrapolation (bool): use extrapolation of True
Returns:
float: *BC*:sub:`V` for the star
References:
* `Alonso et al., 1999, A&AS, 140, 261 <http://adsabs.harvard.edu/abs/1999A&AS..140..261A>`_
"""
teff = kwargs.pop('Teff', None)
FeH = kwargs.pop('FeH', 0.0)
extrapolation = kwargs.pop('extrapolation', False)
logt = math.log10(teff)
if extrapolation:
if logt <= 3.66: choose = 17
else: choose = 18
else:
if 3.50 <= logt <= 3.67 and +0.2 >= FeH > -0.5: choose = 17
elif 3.56 <= logt <= 3.67 and -0.5 >= FeH > -1.5: choose = 17
elif 3.58 <= logt <= 3.67 and -1.5 >= FeH > -2.5: choose = 17
elif 3.61 <= logt <= 3.67 and -2.5 >= FeH > -3.0: choose = 17
elif 3.65 <= logt <= 3.96 and +0.2 >= FeH > -0.5: choose = 18
elif 3.65 <= logt <= 3.83 and -0.5 >= FeH > -1.5: choose = 18
elif 3.65 <= logt <= 3.80 and -1.5 >= FeH > -2.5: choose = 18
elif 3.65 <= logt <= 3.74 and -2.5 >= FeH > -3.0: choose = 18
else: raise ValueError
x = logt - 3.52
if choose == 17:
bc = -5.531e-2/x - 0.6177 + 4.420*x - 2.669*x**2 + 0.6943*x*FeH \
-0.1071*FeH - 8.612e-3*FeH**2
elif choose == 18:
bc = -9.930e-2/x + 2.887e-2 + 2.275*x - 4.425*x**2 + 0.3505*x*FeH \
-5.558e-2*FeH - 5.375e-3*FeH**2
return bc
def _get_dwarf_BC_Masana2006(**kwargs):
"""Get BC for dwarfs using the calibration relations given by `Masana+ 2006
<http://adsabs.harvard.edu/abs/2006A&A...450..735M>`_.
References
----------
* `Masana et al. 2006, A&A, 450, 735 <http://adsabs.harvard.edu/abs/2006A&A...450..735M>`_
"""
index = kwargs.pop('index')
color = kwargs.pop('color')
FeH = kwargs.pop('FeH', 0.0)
logg = kwargs.pop('logg', 4.2)
extrapolation = kwargs.pop('extrapolation', False)
if index == 'V-K':
if extrapolation or \
(-3.0 < FeH < -1.5 and 1.0 < color < 2.9) or \
(-1.5 <= FeH < -0.5 and 0.5 < color < 2.9) or \
(-0.5 <= FeH < 0.0 and 0.4 < color < 3.0) or \
( 0.5 <= FeH < 0.5 and 0.35 < color < 2.8):
if (extrapolation and color < 1.15) or \
(not extrapolation and
0.35 < color < 1.15 and 3.25 <= logg <= 4.75):
bc = 0.1275 + 0.9907*color - 0.0395*color**2 + 0.0693*FeH + \
0.0140*FeH**2 + 0.0120*color*FeH - 0.0253*logg
elif (extrapolation and color >= 1.15) or \
(not extrapolation and
1.15 <= color < 3.0 and 3.75 <= logg <= 4.75):
bc = -0.1041 + 1.2600*color - 0.1570*color**2 + 0.1460*FeH + \
0.0010*FeH**2 - 0.0631*color*FeH - 0.0079*logg
else:
raise ValueError
return bc
else:
raise ValueError
else:
raise ValueError
|
{"hexsha": "c9610e8a2058a27d94f96d404fe796f7479bb0dd", "size": 9379, "ext": "py", "lang": "Python", "max_stars_repo_path": "stellarlab/parameter/bc.py", "max_stars_repo_name": "wangleon/stella", "max_stars_repo_head_hexsha": "3942f8e687065bb96760140596747cbbe6dad04b", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "stellarlab/parameter/bc.py", "max_issues_repo_name": "wangleon/stella", "max_issues_repo_head_hexsha": "3942f8e687065bb96760140596747cbbe6dad04b", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "stellarlab/parameter/bc.py", "max_forks_repo_name": "wangleon/stella", "max_forks_repo_head_hexsha": "3942f8e687065bb96760140596747cbbe6dad04b", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-10-02T02:18:28.000Z", "max_forks_repo_forks_event_max_datetime": "2021-10-02T02:18:28.000Z", "avg_line_length": 35.5265151515, "max_line_length": 100, "alphanum_fraction": 0.5333191172, "include": true, "reason": "import numpy", "num_tokens": 3400}
|
import warnings
from typing import Tuple, Any, Dict
import numpy
import openslide
import wx
from PIL import Image
from antilles.utils.io import DAO
def get_screen_size() -> Tuple[int, int]:
app = wx.App(False)
size = wx.GetDisplaySize()
del app
return size
screen_size = get_screen_size()
def get_slide_dims(path: str) -> Tuple[int, int]:
with openslide.OpenSlide(DAO.abs(path)) as obj:
return obj.dimensions
def get_mpp_from_openslide(obj) -> float:
mpp_x = float(obj.properties[openslide.PROPERTY_NAME_MPP_X])
mpp_y = float(obj.properties[openslide.PROPERTY_NAME_MPP_Y])
if not numpy.equal(mpp_x, mpp_y):
warnings.warn(
"MPP values are not equal in x and y directions! "
"x: {x}, y: {y}".format(x=mpp_x, y=mpp_y)
)
mpp = numpy.average((mpp_x, mpp_y))
else:
mpp = mpp_x
return mpp
def calc_downsample_factor(dims: Tuple[int, int]) -> float:
# area in which image is displayed is not quite as big as the screen
screen_size_eff = [s * 0.75 for s in screen_size]
w, h = dims
return max(
float(w) / float(screen_size_eff[0]), float(h) / float(screen_size_eff[1])
)
def get_thumbnail(path: str) -> Dict[str, Any]:
try:
with openslide.OpenSlide(DAO.abs(path)) as obj:
dims = obj.dimensions
factor = calc_downsample_factor(dims)
dims_tn = tuple(int(round(float(s) / factor)) for s in dims)
image = obj.get_thumbnail(dims_tn)
except openslide.lowlevel.OpenSlideUnsupportedFormatError:
with Image.open(DAO.abs(path)) as obj:
dims = obj.size
factor = calc_downsample_factor(dims)
dims_tn = tuple(int(round(float(s) / factor)) for s in dims)
image = obj.resize(dims_tn, Image.LANCZOS)
return {"factor": factor, "image": image}
|
{"hexsha": "e2eb243d968cff80d80b0382dbb4c5a426363cbe", "size": 1896, "ext": "py", "lang": "Python", "max_stars_repo_path": "antilles/utils/image.py", "max_stars_repo_name": "biomicrodev/antilles", "max_stars_repo_head_hexsha": "38f1d16494fae750b95d4e9a654038b9aba8e248", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "antilles/utils/image.py", "max_issues_repo_name": "biomicrodev/antilles", "max_issues_repo_head_hexsha": "38f1d16494fae750b95d4e9a654038b9aba8e248", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "antilles/utils/image.py", "max_forks_repo_name": "biomicrodev/antilles", "max_forks_repo_head_hexsha": "38f1d16494fae750b95d4e9a654038b9aba8e248", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.9726027397, "max_line_length": 82, "alphanum_fraction": 0.6418776371, "include": true, "reason": "import numpy", "num_tokens": 495}
|
Require Import ZArith.
Definition pow2_p p := Zpos (iter_pos positive xO xH p).
Definition mersenne p := (pow2_p p - 1)%Z.
Definition next_s mp s := (((s*s) - 2) mod mp)%Z.
Definition lucas_residue p :=
let mp := mersenne p in
let pm2 := (p-2)%positive in
iter_pos Z (next_s mp) 4%Z pm2.
Definition lucas_test p :=
Zeq_bool (lucas_residue p) 0.
Definition p89 := 89%positive.
Definition p521 := 521%positive.
Definition res := lucas_test p521.
Time Eval native_compute in res. (* p521 *)
|
{"author": "maximedenes", "repo": "native-compute-bench", "sha": "ac7891508239f9cc1f7ba0190b1814fb152d2126", "save_path": "github-repos/coq/maximedenes-native-compute-bench", "path": "github-repos/coq/maximedenes-native-compute-bench/native-compute-bench-ac7891508239f9cc1f7ba0190b1814fb152d2126/src/Lucas.v"}
|
[STATEMENT]
lemma finite_Update:
"finite TS \<Longrightarrow> finite ((\<lambda> F. (Rep_pupdate F) (Value ST)) ` (PUpdate (Label TS)))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. finite TS \<Longrightarrow> finite ((\<lambda>F. Rep_pupdate F (Value ST)) ` Expr.PUpdate (Label TS))
[PROOF STEP]
by (rule finite_imageI, auto)
|
{"llama_tokens": 124, "file": "Statecharts_HASem", "length": 1}
|
import matplotlib
import matplotlib.pyplot as plt
matplotlib.use('TKAgg')
import numpy as np
import time, random, math
size=11
#array = random.sample((range(1, size + 1)), size)
array = list(xrange(size, 0, -1))
def bubble_sort(arr, rects):
sorted = True
for x in range(0, size - 1):
update_plot(arr, '#000000', x-1, x+1, 0, rects)
update_plot(arr, '#f3f315', x, x + 2, .3, rects)
if arr[x] > arr[x + 1]:
tmp = arr[x]
arr[x] = arr[x + 1]
arr[x + 1] = tmp
sorted = False
update_plot(arr, '#32cd32', x, x + 2, .3, rects)
update_plot(arr, '#000000', size - 2, size, 0, rects)
if not sorted:
bubble_sort(arr, rects)
else:
update_plot(arr, '#32cd32', 0, size, 0, rects)
def update_plot(arr, color, first, last, nsecs, rects):
for x in range(first, last):
rects[x].set_height(arr[x])
rects[x].set_facecolor(color)
fig.canvas.draw()
if nsecs != 0:
time.sleep(nsecs)
def animated_barplot():
width = 1
rects = plt.bar(range(size), array, width, align = 'center', color='k')
plt.title("Bubble Sort")
plt.xlabel("Index")
plt.tick_params(axis='both', labelbottom='off', labeltop='off', top='off', labelleft='off', left='off', labelright='off', right='off')
plt.xticks(np.arange(size), tuple(map(str, range(size))))
bubble_sort(array, rects)
fig = plt.figure()
win = fig.canvas.manager.window
win.after(10, animated_barplot)
plt.show()
|
{"hexsha": "e5ef937a7364aa6fad9938559bf46e279126a130", "size": 1373, "ext": "py", "lang": "Python", "max_stars_repo_path": "57/bubblesort.py", "max_stars_repo_name": "Chutzpah7/Challenges", "max_stars_repo_head_hexsha": "7481eaf49dca9e8f68b8efa58cde778aa20449a5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "57/bubblesort.py", "max_issues_repo_name": "Chutzpah7/Challenges", "max_issues_repo_head_hexsha": "7481eaf49dca9e8f68b8efa58cde778aa20449a5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "57/bubblesort.py", "max_forks_repo_name": "Chutzpah7/Challenges", "max_forks_repo_head_hexsha": "7481eaf49dca9e8f68b8efa58cde778aa20449a5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.6041666667, "max_line_length": 135, "alphanum_fraction": 0.6605972323, "include": true, "reason": "import numpy", "num_tokens": 440}
|
import math
import importlib
import functools
def generate_inputs(size):
import numpy as np
np.random.seed(17)
shape = (
math.ceil(2 * size ** (1/3)),
math.ceil(2 * size ** (1/3)),
math.ceil(0.25 * size ** (1/3)),
)
# masks
maskT, maskU, maskV, maskW = ((np.random.rand(*shape) < 0.8).astype('float64') for _ in range(4))
# 1d arrays
dxt, dxu = (np.random.randn(shape[0]) for _ in range(2))
dyt, dyu = (np.random.randn(shape[1]) for _ in range(2))
dzt, dzw, zt = (np.random.randn(shape[2]) for _ in range(3))
cost, cosu = (np.random.randn(shape[1]) for _ in range(2))
# 3d arrays
K_iso, K_iso_steep, K_11, K_22, K_33 = (np.random.randn(*shape) for _ in range(5))
# 4d arrays
salt, temp = (np.random.randn(*shape, 3) for _ in range(2))
# 5d arrays
Ai_ez, Ai_nz, Ai_bx, Ai_by = (np.zeros((*shape, 2, 2)) for _ in range(4))
return (
maskT, maskU, maskV, maskW,
dxt, dxu, dyt, dyu, dzt, dzw,
cost, cosu,
salt, temp, zt,
K_iso, K_11, K_22, K_33, Ai_ez, Ai_nz, Ai_bx, Ai_by
)
def try_import(backend):
try:
return importlib.import_module(f'.isoneutral_{backend}', __name__)
except ImportError:
return None
def get_callable(backend, size, device='cpu'):
backend_module = try_import(backend)
inputs = generate_inputs(size)
if hasattr(backend_module, 'prepare_inputs'):
inputs = backend_module.prepare_inputs(*inputs, device=device)
return functools.partial(backend_module.run, *inputs, device=device)
__implementations__ = (
'bohrium',
'cupy',
'numba',
'numpy',
'jax',
'pytorch',
'theano',
)
|
{"hexsha": "c6a56b7377d1abe6a5f7f13caf169f6a21612220", "size": 1714, "ext": "py", "lang": "Python", "max_stars_repo_path": "benchmarks/isoneutral_mixing/__init__.py", "max_stars_repo_name": "mdanatg/pyhpc-benchmarks", "max_stars_repo_head_hexsha": "710d0ab484cae28beab99ddd1167d33574c83b53", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": 230, "max_stars_repo_stars_event_min_datetime": "2019-11-07T07:58:09.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T14:47:58.000Z", "max_issues_repo_path": "benchmarks/isoneutral_mixing/__init__.py", "max_issues_repo_name": "mdanatg/pyhpc-benchmarks", "max_issues_repo_head_hexsha": "710d0ab484cae28beab99ddd1167d33574c83b53", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": 10, "max_issues_repo_issues_event_min_datetime": "2019-12-15T19:05:19.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-17T20:19:51.000Z", "max_forks_repo_path": "benchmarks/isoneutral_mixing/__init__.py", "max_forks_repo_name": "mdanatg/pyhpc-benchmarks", "max_forks_repo_head_hexsha": "710d0ab484cae28beab99ddd1167d33574c83b53", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": 23, "max_forks_repo_forks_event_min_datetime": "2020-02-05T17:30:47.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-08T08:40:05.000Z", "avg_line_length": 25.5820895522, "max_line_length": 101, "alphanum_fraction": 0.6044340723, "include": true, "reason": "import numpy", "num_tokens": 551}
|
(* Title: Extension Orders
Author: Heiko Becker <heikobecker92@gmail.com>, 2016
Author: Jasmin Blanchette <jasmin.blanchette at inria.fr>, 2016
Author: Dmitriy Traytel <traytel@inf.ethz.ch>, 2014
Maintainer: Jasmin Blanchette <jasmin.blanchette at inria.fr>
*)
section \<open>Extension Orders\<close>
theory Extension_Orders
imports Lambda_Free_Util Infinite_Chain "HOL-Cardinals.Wellorder_Extension"
begin
text \<open>
This theory defines locales for categorizing extension orders used for orders on
\<open>\<lambda>\<close>-free higher-order terms and defines variants of the lexicographic and
multiset orders.
\<close>
subsection \<open>Locales\<close>
locale ext =
fixes ext :: "('a \<Rightarrow> 'a \<Rightarrow> bool) \<Rightarrow> 'a list \<Rightarrow> 'a list \<Rightarrow> bool"
assumes
mono_strong: "(\<forall>y \<in> set ys. \<forall>x \<in> set xs. gt y x \<longrightarrow> gt' y x) \<Longrightarrow> ext gt ys xs \<Longrightarrow> ext gt' ys xs" and
map: "finite A \<Longrightarrow> ys \<in> lists A \<Longrightarrow> xs \<in> lists A \<Longrightarrow> (\<forall>x \<in> A. \<not> gt (f x) (f x)) \<Longrightarrow>
(\<forall>z \<in> A. \<forall>y \<in> A. \<forall>x \<in> A. gt (f z) (f y) \<longrightarrow> gt (f y) (f x) \<longrightarrow> gt (f z) (f x)) \<Longrightarrow>
(\<forall>y \<in> A. \<forall>x \<in> A. gt y x \<longrightarrow> gt (f y) (f x)) \<Longrightarrow> ext gt ys xs \<Longrightarrow> ext gt (map f ys) (map f xs)"
begin
lemma mono[mono]: "gt \<le> gt' \<Longrightarrow> ext gt \<le> ext gt'"
using mono_strong by blast
end
locale ext_irrefl = ext +
assumes irrefl: "(\<forall>x \<in> set xs. \<not> gt x x) \<Longrightarrow> \<not> ext gt xs xs"
locale ext_trans = ext +
assumes trans: "zs \<in> lists A \<Longrightarrow> ys \<in> lists A \<Longrightarrow> xs \<in> lists A \<Longrightarrow>
(\<forall>z \<in> A. \<forall>y \<in> A. \<forall>x \<in> A. gt z y \<longrightarrow> gt y x \<longrightarrow> gt z x) \<Longrightarrow> ext gt zs ys \<Longrightarrow> ext gt ys xs \<Longrightarrow>
ext gt zs xs"
locale ext_irrefl_before_trans = ext_irrefl +
assumes trans_from_irrefl: "finite A \<Longrightarrow> zs \<in> lists A \<Longrightarrow> ys \<in> lists A \<Longrightarrow> xs \<in> lists A \<Longrightarrow>
(\<forall>x \<in> A. \<not> gt x x) \<Longrightarrow> (\<forall>z \<in> A. \<forall>y \<in> A. \<forall>x \<in> A. gt z y \<longrightarrow> gt y x \<longrightarrow> gt z x) \<Longrightarrow> ext gt zs ys \<Longrightarrow>
ext gt ys xs \<Longrightarrow> ext gt zs xs"
locale ext_trans_before_irrefl = ext_trans +
assumes irrefl_from_trans: "(\<forall>z \<in> set xs. \<forall>y \<in> set xs. \<forall>x \<in> set xs. gt z y \<longrightarrow> gt y x \<longrightarrow> gt z x) \<Longrightarrow>
(\<forall>x \<in> set xs. \<not> gt x x) \<Longrightarrow> \<not> ext gt xs xs"
locale ext_irrefl_trans_strong = ext_irrefl +
assumes trans_strong: "(\<forall>z \<in> set zs. \<forall>y \<in> set ys. \<forall>x \<in> set xs. gt z y \<longrightarrow> gt y x \<longrightarrow> gt z x) \<Longrightarrow>
ext gt zs ys \<Longrightarrow> ext gt ys xs \<Longrightarrow> ext gt zs xs"
sublocale ext_irrefl_trans_strong < ext_irrefl_before_trans
by standard (erule irrefl, metis in_listsD trans_strong)
sublocale ext_irrefl_trans_strong < ext_trans
by standard (metis in_listsD trans_strong)
sublocale ext_irrefl_trans_strong < ext_trans_before_irrefl
by standard (rule irrefl)
locale ext_snoc = ext +
assumes snoc: "ext gt (xs @ [x]) xs"
locale ext_compat_cons = ext +
assumes compat_cons: "ext gt ys xs \<Longrightarrow> ext gt (x # ys) (x # xs)"
begin
lemma compat_append_left: "ext gt ys xs \<Longrightarrow> ext gt (zs @ ys) (zs @ xs)"
by (induct zs) (auto intro: compat_cons)
end
locale ext_compat_snoc = ext +
assumes compat_snoc: "ext gt ys xs \<Longrightarrow> ext gt (ys @ [x]) (xs @ [x])"
begin
lemma compat_append_right: "ext gt ys xs \<Longrightarrow> ext gt (ys @ zs) (xs @ zs)"
by (induct zs arbitrary: xs ys rule: rev_induct)
(auto intro: compat_snoc simp del: append_assoc simp: append_assoc[symmetric])
end
locale ext_compat_list = ext +
assumes compat_list: "y \<noteq> x \<Longrightarrow> gt y x \<Longrightarrow> ext gt (xs @ y # xs') (xs @ x # xs')"
locale ext_singleton = ext +
assumes singleton: "y \<noteq> x \<Longrightarrow> ext gt [y] [x] \<longleftrightarrow> gt y x"
locale ext_compat_list_strong = ext_compat_cons + ext_compat_snoc + ext_singleton
begin
lemma compat_list: "y \<noteq> x \<Longrightarrow> gt y x \<Longrightarrow> ext gt (xs @ y # xs') (xs @ x # xs')"
using compat_append_left[of gt "y # xs'" "x # xs'" xs]
compat_append_right[of gt, of "[y]" "[x]" xs'] singleton[of y x gt]
by fastforce
end
sublocale ext_compat_list_strong < ext_compat_list
by standard (fact compat_list)
locale ext_total = ext +
assumes total: "(\<forall>y \<in> A. \<forall>x \<in> A. gt y x \<or> gt x y \<or> y = x) \<Longrightarrow> ys \<in> lists A \<Longrightarrow> xs \<in> lists A \<Longrightarrow>
ext gt ys xs \<or> ext gt xs ys \<or> ys = xs"
locale ext_wf = ext +
assumes wf: "wfP (\<lambda>x y. gt y x) \<Longrightarrow> wfP (\<lambda>xs ys. ext gt ys xs)"
locale ext_hd_or_tl = ext +
assumes hd_or_tl: "(\<forall>z y x. gt z y \<longrightarrow> gt y x \<longrightarrow> gt z x) \<Longrightarrow> (\<forall>y x. gt y x \<or> gt x y \<or> y = x) \<Longrightarrow>
length ys = length xs \<Longrightarrow> ext gt (y # ys) (x # xs) \<Longrightarrow> gt y x \<or> ext gt ys xs"
locale ext_wf_bounded = ext_irrefl_before_trans + ext_hd_or_tl
begin
context
fixes gt :: "'a \<Rightarrow> 'a \<Rightarrow> bool"
assumes
gt_irrefl: "\<And>z. \<not> gt z z" and
gt_trans: "\<And>z y x. gt z y \<Longrightarrow> gt y x \<Longrightarrow> gt z x" and
gt_total: "\<And>y x. gt y x \<or> gt x y \<or> y = x" and
gt_wf: "wfP (\<lambda>x y. gt y x)"
begin
lemma irrefl_gt: "\<not> ext gt xs xs"
using irrefl gt_irrefl by simp
lemma trans_gt: "ext gt zs ys \<Longrightarrow> ext gt ys xs \<Longrightarrow> ext gt zs xs"
by (rule trans_from_irrefl[of "set zs \<union> set ys \<union> set xs" zs ys xs gt])
(auto intro: gt_trans simp: gt_irrefl)
lemma hd_or_tl_gt: "length ys = length xs \<Longrightarrow> ext gt (y # ys) (x # xs) \<Longrightarrow> gt y x \<or> ext gt ys xs"
by (rule hd_or_tl) (auto intro: gt_trans simp: gt_total)
lemma wf_same_length_if_total: "wfP (\<lambda>xs ys. length ys = n \<and> length xs = n \<and> ext gt ys xs)"
proof (induct n)
case 0
thus ?case
unfolding wfP_def wf_def using irrefl by auto
next
case (Suc n)
note ih = this(1)
define gt_hd where "\<And>ys xs. gt_hd ys xs \<longleftrightarrow> gt (hd ys) (hd xs)"
define gt_tl where "\<And>ys xs. gt_tl ys xs \<longleftrightarrow> ext gt (tl ys) (tl xs)"
have hd_tl: "gt_hd ys xs \<or> gt_tl ys xs"
if len_ys: "length ys = Suc n" and len_xs: "length xs = Suc n" and ys_gt_xs: "ext gt ys xs"
for n ys xs
using len_ys len_xs ys_gt_xs unfolding gt_hd_def gt_tl_def
by (cases xs; cases ys) (auto simp: hd_or_tl_gt)
show ?case
unfolding wfP_iff_no_inf_chain
proof (intro notI)
let ?gtsn = "\<lambda>ys xs. length ys = n \<and> length xs = n \<and> ext gt ys xs"
let ?gtsSn = "\<lambda>ys xs. length ys = Suc n \<and> length xs = Suc n \<and> ext gt ys xs"
let ?gttlSn = "\<lambda>ys xs. length ys = Suc n \<and> length xs = Suc n \<and> gt_tl ys xs"
assume "\<exists>f. inf_chain ?gtsSn f"
then obtain xs where xs_bad: "bad ?gtsSn xs"
unfolding inf_chain_def bad_def by blast
let ?ff = "worst_chain ?gtsSn gt_hd"
have wf_hd: "wf {(xs, ys). gt_hd ys xs}"
unfolding gt_hd_def by (rule wfP_app[OF gt_wf, of hd, unfolded wfP_def])
have "inf_chain ?gtsSn ?ff"
by (rule worst_chain_bad[OF wf_hd xs_bad])
moreover have "\<not> gt_hd (?ff i) (?ff (Suc i))" for i
by (rule worst_chain_not_gt[OF wf_hd xs_bad]) (blast intro: trans_gt)
ultimately have tl_bad: "inf_chain ?gttlSn ?ff"
unfolding inf_chain_def using hd_tl by blast
have "\<not> inf_chain ?gtsn (tl \<circ> ?ff)"
using wfP_iff_no_inf_chain[THEN iffD1, OF ih] by blast
hence tl_good: "\<not> inf_chain ?gttlSn ?ff"
unfolding inf_chain_def gt_tl_def by force
show False
using tl_bad tl_good by sat
qed
qed
lemma wf_bounded_if_total: "wfP (\<lambda>xs ys. length ys \<le> n \<and> length xs \<le> n \<and> ext gt ys xs)"
unfolding wfP_iff_no_inf_chain
proof (intro notI, induct n rule: less_induct)
case (less n)
note ih = this(1) and ex_bad = this(2)
let ?gtsle = "\<lambda>ys xs. length ys \<le> n \<and> length xs \<le> n \<and> ext gt ys xs"
obtain xs where xs_bad: "bad ?gtsle xs"
using ex_bad unfolding inf_chain_def bad_def by blast
let ?ff = "worst_chain ?gtsle (\<lambda>ys xs. length ys > length xs)"
note wf_len = wf_app[OF wellorder_class.wf, of length, simplified]
have ff_bad: "inf_chain ?gtsle ?ff"
by (rule worst_chain_bad[OF wf_len xs_bad])
have ffi_bad: "\<And>i. bad ?gtsle (?ff i)"
by (rule inf_chain_bad[OF ff_bad])
have len_le_n: "\<And>i. length (?ff i) \<le> n"
using worst_chain_pred[OF wf_len xs_bad] by simp
have len_le_Suc: "\<And>i. length (?ff i) \<le> length (?ff (Suc i))"
using worst_chain_not_gt[OF wf_len xs_bad] not_le_imp_less by (blast intro: trans_gt)
show False
proof (cases "\<exists>k. length (?ff k) = n")
case False
hence len_lt_n: "\<And>i. length (?ff i) < n"
using len_le_n by (blast intro: le_neq_implies_less)
hence nm1_le: "n - 1 < n"
by fastforce
let ?gtslt = "\<lambda>ys xs. length ys \<le> n - 1 \<and> length xs \<le> n - 1 \<and> ext gt ys xs"
have "inf_chain ?gtslt ?ff"
using ff_bad len_lt_n unfolding inf_chain_def
by (metis (no_types, lifting) Suc_diff_1 le_antisym nat_neq_iff not_less0 not_less_eq_eq)
thus False
using ih[OF nm1_le] by blast
next
case True
then obtain k where len_eq_n: "length (?ff k) = n"
by blast
let ?gtssl = "\<lambda>ys xs. length ys = n \<and> length xs = n \<and> ext gt ys xs"
have len_eq_n: "length (?ff (i + k)) = n" for i
by (induct i) (simp add: len_eq_n,
metis (lifting) len_le_n len_le_Suc add_Suc dual_order.antisym)
have "inf_chain ?gtsle (\<lambda>i. ?ff (i + k))"
by (rule inf_chain_offset[OF ff_bad])
hence "inf_chain ?gtssl (\<lambda>i. ?ff (i + k))"
unfolding inf_chain_def using len_eq_n by presburger
hence "\<not> wfP (\<lambda>xs ys. ?gtssl ys xs)"
using wfP_iff_no_inf_chain by blast
thus False
using wf_same_length_if_total[of n] by sat
qed
qed
end
context
fixes gt :: "'a \<Rightarrow> 'a \<Rightarrow> bool"
assumes
gt_irrefl: "\<And>z. \<not> gt z z" and
gt_wf: "wfP (\<lambda>x y. gt y x)"
begin
lemma wf_bounded: "wfP (\<lambda>xs ys. length ys \<le> n \<and> length xs \<le> n \<and> ext gt ys xs)"
proof -
obtain Ge' where
gt_sub_Ge': "{(x, y). gt y x} \<subseteq> Ge'" and
Ge'_wo: "Well_order Ge'" and
Ge'_fld: "Field Ge' = UNIV"
using total_well_order_extension[OF gt_wf[unfolded wfP_def]] by blast
define gt' where "\<And>y x. gt' y x \<longleftrightarrow> y \<noteq> x \<and> (x, y) \<in> Ge'"
have gt_imp_gt': "gt \<le> gt'"
by (auto simp: gt'_def gt_irrefl intro: gt_sub_Ge'[THEN subsetD])
have gt'_irrefl: "\<And>z. \<not> gt' z z"
unfolding gt'_def by simp
have gt'_trans: "\<And>z y x. gt' z y \<Longrightarrow> gt' y x \<Longrightarrow> gt' z x"
using Ge'_wo
unfolding gt'_def well_order_on_def linear_order_on_def partial_order_on_def preorder_on_def
trans_def antisym_def
by blast
have "wf {(x, y). (x, y) \<in> Ge' \<and> x \<noteq> y}"
by (rule Ge'_wo[unfolded well_order_on_def set_diff_eq
case_prod_eta[symmetric, of "\<lambda>xy. xy \<in> Ge' \<and> xy \<notin> Id"] pair_in_Id_conv, THEN conjunct2])
moreover have "\<And>y x. (x, y) \<in> Ge' \<and> x \<noteq> y \<longleftrightarrow> y \<noteq> x \<and> (x, y) \<in> Ge'"
by auto
ultimately have gt'_wf: "wfP (\<lambda>x y. gt' y x)"
unfolding wfP_def gt'_def by simp
have gt'_total: "\<And>x y. gt' y x \<or> gt' x y \<or> y = x"
using Ge'_wo unfolding gt'_def well_order_on_def linear_order_on_def total_on_def Ge'_fld
by blast
have "wfP (\<lambda>xs ys. length ys \<le> n \<and> length xs \<le> n \<and> ext gt' ys xs)"
using wf_bounded_if_total gt'_total gt'_irrefl gt'_trans gt'_wf by blast
thus ?thesis
by (rule wfP_subset) (auto intro: mono[OF gt_imp_gt', THEN predicate2D])
qed
end
end
subsection \<open>Lexicographic Extension\<close>
inductive lexext :: "('a \<Rightarrow> 'a \<Rightarrow> bool) \<Rightarrow> 'a list \<Rightarrow> 'a list \<Rightarrow> bool" for gt where
lexext_Nil: "lexext gt (y # ys) []"
| lexext_Cons: "gt y x \<Longrightarrow> lexext gt (y # ys) (x # xs)"
| lexext_Cons_eq: "lexext gt ys xs \<Longrightarrow> lexext gt (x # ys) (x # xs)"
lemma lexext_simps[simp]:
"lexext gt ys [] \<longleftrightarrow> ys \<noteq> []"
"\<not> lexext gt [] xs"
"lexext gt (y # ys) (x # xs) \<longleftrightarrow> gt y x \<or> x = y \<and> lexext gt ys xs"
proof
show "lexext gt ys [] \<Longrightarrow> (ys \<noteq> [])"
by (metis lexext.cases list.distinct(1))
next
show "ys \<noteq> [] \<Longrightarrow> lexext gt ys []"
by (metis lexext_Nil list.exhaust)
next
show "\<not> lexext gt [] xs"
using lexext.cases by auto
next
show "lexext gt (y # ys) (x # xs) = (gt y x \<or> x = y \<and> lexext gt ys xs)"
proof -
have fwdd: "lexext gt (y # ys) (x # xs) \<longrightarrow> gt y x \<or> x = y \<and> lexext gt ys xs"
proof
assume "lexext gt (y # ys) (x # xs)"
thus "gt y x \<or> x = y \<and> lexext gt ys xs"
using lexext.cases by blast
qed
have backd: "gt y x \<or> x = y \<and> lexext gt ys xs \<longrightarrow> lexext gt (y # ys) (x # xs)"
by (simp add: lexext_Cons lexext_Cons_eq)
show "lexext gt (y # ys) (x # xs) = (gt y x \<or> x = y \<and> lexext gt ys xs)"
using fwdd backd by blast
qed
qed
lemma lexext_mono_strong:
assumes
"\<forall>y \<in> set ys. \<forall>x \<in> set xs. gt y x \<longrightarrow> gt' y x" and
"lexext gt ys xs"
shows "lexext gt' ys xs"
using assms by (induct ys xs rule: list_induct2') auto
lemma lexext_map_strong:
"(\<forall>y \<in> set ys. \<forall>x \<in> set xs. gt y x \<longrightarrow> gt (f y) (f x)) \<Longrightarrow> lexext gt ys xs \<Longrightarrow>
lexext gt (map f ys) (map f xs)"
by (induct ys xs rule: list_induct2') auto
lemma lexext_irrefl:
assumes "\<forall>x \<in> set xs. \<not> gt x x"
shows "\<not> lexext gt xs xs"
using assms by (induct xs) auto
lemma lexext_trans_strong:
assumes
"\<forall>z \<in> set zs. \<forall>y \<in> set ys. \<forall>x \<in> set xs. gt z y \<longrightarrow> gt y x \<longrightarrow> gt z x" and
"lexext gt zs ys" and "lexext gt ys xs"
shows "lexext gt zs xs"
using assms
proof (induct zs arbitrary: ys xs)
case (Cons z zs)
note zs_trans = this(1)
show ?case
using Cons(2-4)
proof (induct ys arbitrary: xs rule: list.induct)
case (Cons y ys)
note ys_trans = this(1) and gt_trans = this(2) and zzs_gt_yys = this(3) and yys_gt_xs = this(4)
show ?case
proof (cases xs)
case xs: (Cons x xs)
thus ?thesis
proof (unfold xs)
note yys_gt_xxs = yys_gt_xs[unfolded xs]
note gt_trans = gt_trans[unfolded xs]
let ?case = "lexext gt (z # zs) (x # xs)"
{
assume "gt z y" and "gt y x"
hence ?case
using gt_trans by simp
}
moreover
{
assume "gt z y" and "x = y"
hence ?case
by simp
}
moreover
{
assume "y = z" and "gt y x"
hence ?case
by simp
}
moreover
{
assume
y_eq_z: "y = z" and
zs_gt_ys: "lexext gt zs ys" and
x_eq_y: "x = y" and
ys_gt_xs: "lexext gt ys xs"
have "lexext gt zs xs"
by (rule zs_trans[OF _ zs_gt_ys ys_gt_xs]) (meson gt_trans[simplified])
hence ?case
by (simp add: x_eq_y y_eq_z)
}
ultimately show ?case
using zzs_gt_yys yys_gt_xxs by force
qed
qed auto
qed auto
qed auto
lemma lexext_snoc: "lexext gt (xs @ [x]) xs"
by (induct xs) auto
lemmas lexext_compat_cons = lexext_Cons_eq
lemma lexext_compat_snoc_if_same_length:
assumes "length ys = length xs" and "lexext gt ys xs"
shows "lexext gt (ys @ [x]) (xs @ [x])"
using assms(2,1) by (induct rule: lexext.induct) auto
lemma lexext_compat_list: "gt y x \<Longrightarrow> lexext gt (xs @ y # xs') (xs @ x # xs')"
by (induct xs) auto
lemma lexext_singleton: "lexext gt [y] [x] \<longleftrightarrow> gt y x"
by simp
lemma lexext_total: "(\<forall>y \<in> B. \<forall>x \<in> A. gt y x \<or> gt x y \<or> y = x) \<Longrightarrow> ys \<in> lists B \<Longrightarrow> xs \<in> lists A \<Longrightarrow>
lexext gt ys xs \<or> lexext gt xs ys \<or> ys = xs"
by (induct ys xs rule: list_induct2') auto
lemma lexext_hd_or_tl: "lexext gt (y # ys) (x # xs) \<Longrightarrow> gt y x \<or> lexext gt ys xs"
by auto
interpretation lexext: ext lexext
by standard (fact lexext_mono_strong, rule lexext_map_strong, metis in_listsD)
interpretation lexext: ext_irrefl_trans_strong lexext
by standard (fact lexext_irrefl, fact lexext_trans_strong)
interpretation lexext: ext_snoc lexext
by standard (fact lexext_snoc)
interpretation lexext: ext_compat_cons lexext
by standard (fact lexext_compat_cons)
interpretation lexext: ext_compat_list lexext
by standard (rule lexext_compat_list)
interpretation lexext: ext_singleton lexext
by standard (rule lexext_singleton)
interpretation lexext: ext_total lexext
by standard (fact lexext_total)
interpretation lexext: ext_hd_or_tl lexext
by standard (rule lexext_hd_or_tl)
interpretation lexext: ext_wf_bounded lexext
by standard
subsection \<open>Reverse (Right-to-Left) Lexicographic Extension\<close>
abbreviation lexext_rev :: "('a \<Rightarrow> 'a \<Rightarrow> bool) \<Rightarrow> 'a list \<Rightarrow> 'a list \<Rightarrow> bool" where
"lexext_rev gt ys xs \<equiv> lexext gt (rev ys) (rev xs)"
lemma lexext_rev_simps[simp]:
"lexext_rev gt ys [] \<longleftrightarrow> ys \<noteq> []"
"\<not> lexext_rev gt [] xs"
"lexext_rev gt (ys @ [y]) (xs @ [x]) \<longleftrightarrow> gt y x \<or> x = y \<and> lexext_rev gt ys xs"
by simp+
lemma lexext_rev_cons_cons:
assumes "length ys = length xs"
shows "lexext_rev gt (y # ys) (x # xs) \<longleftrightarrow> lexext_rev gt ys xs \<or> ys = xs \<and> gt y x"
using assms
proof (induct arbitrary: y x rule: rev_induct2)
case Nil
thus ?case
by simp
next
case (snoc y' ys x' xs)
show ?case
using snoc(2) by auto
qed
lemma lexext_rev_mono_strong:
assumes
"\<forall>y \<in> set ys. \<forall>x \<in> set xs. gt y x \<longrightarrow> gt' y x" and
"lexext_rev gt ys xs"
shows "lexext_rev gt' ys xs"
using assms by (simp add: lexext_mono_strong)
lemma lexext_rev_map_strong:
"(\<forall>y \<in> set ys. \<forall>x \<in> set xs. gt y x \<longrightarrow> gt (f y) (f x)) \<Longrightarrow> lexext_rev gt ys xs \<Longrightarrow>
lexext_rev gt (map f ys) (map f xs)"
by (simp add: lexext_map_strong rev_map)
lemma lexext_rev_irrefl:
assumes "\<forall>x \<in> set xs. \<not> gt x x"
shows "\<not> lexext_rev gt xs xs"
using assms by (simp add: lexext_irrefl)
lemma lexext_rev_trans_strong:
assumes
"\<forall>z \<in> set zs. \<forall>y \<in> set ys. \<forall>x \<in> set xs. gt z y \<longrightarrow> gt y x \<longrightarrow> gt z x" and
"lexext_rev gt zs ys" and "lexext_rev gt ys xs"
shows "lexext_rev gt zs xs"
using assms(1) lexext_trans_strong[OF _ assms(2,3), unfolded set_rev] by sat
lemma lexext_rev_compat_cons_if_same_length:
assumes "length ys = length xs" and "lexext_rev gt ys xs"
shows "lexext_rev gt (x # ys) (x # xs)"
using assms by (simp add: lexext_compat_snoc_if_same_length)
lemma lexext_rev_compat_snoc: "lexext_rev gt ys xs \<Longrightarrow> lexext_rev gt (ys @ [x]) (xs @ [x])"
by (simp add: lexext_compat_cons)
lemma lexext_rev_compat_list: "gt y x \<Longrightarrow> lexext_rev gt (xs @ y # xs') (xs @ x # xs')"
by (induct xs' rule: rev_induct) auto
lemma lexext_rev_singleton: "lexext_rev gt [y] [x] \<longleftrightarrow> gt y x"
by simp
lemma lexext_rev_total:
"(\<forall>y \<in> B. \<forall>x \<in> A. gt y x \<or> gt x y \<or> y = x) \<Longrightarrow> ys \<in> lists B \<Longrightarrow> xs \<in> lists A \<Longrightarrow>
lexext_rev gt ys xs \<or> lexext_rev gt xs ys \<or> ys = xs"
by (rule lexext_total[of _ _ _ "rev ys" "rev xs", simplified])
lemma lexext_rev_hd_or_tl:
assumes
"length ys = length xs" and
"lexext_rev gt (y # ys) (x # xs)"
shows "gt y x \<or> lexext_rev gt ys xs"
using assms lexext_rev_cons_cons by fastforce
interpretation lexext_rev: ext lexext_rev
by standard (fact lexext_rev_mono_strong, rule lexext_rev_map_strong, metis in_listsD)
interpretation lexext_rev: ext_irrefl_trans_strong lexext_rev
by standard (fact lexext_rev_irrefl, fact lexext_rev_trans_strong)
interpretation lexext_rev: ext_compat_snoc lexext_rev
by standard (fact lexext_rev_compat_snoc)
interpretation lexext_rev: ext_compat_list lexext_rev
by standard (rule lexext_rev_compat_list)
interpretation lexext_rev: ext_singleton lexext_rev
by standard (rule lexext_rev_singleton)
interpretation lexext_rev: ext_total lexext_rev
by standard (fact lexext_rev_total)
interpretation lexext_rev: ext_hd_or_tl lexext_rev
by standard (rule lexext_rev_hd_or_tl)
interpretation lexext_rev: ext_wf_bounded lexext_rev
by standard
subsection \<open>Generic Length Extension\<close>
definition lenext :: "('a list \<Rightarrow> 'a list \<Rightarrow> bool) \<Rightarrow> 'a list \<Rightarrow> 'a list \<Rightarrow> bool" where
"lenext gts ys xs \<longleftrightarrow> length ys > length xs \<or> length ys = length xs \<and> gts ys xs"
lemma
lenext_mono_strong: "(gts ys xs \<Longrightarrow> gts' ys xs) \<Longrightarrow> lenext gts ys xs \<Longrightarrow> lenext gts' ys xs" and
lenext_map_strong: "(length ys = length xs \<Longrightarrow> gts ys xs \<Longrightarrow> gts (map f ys) (map f xs)) \<Longrightarrow>
lenext gts ys xs \<Longrightarrow> lenext gts (map f ys) (map f xs)" and
lenext_irrefl: "\<not> gts xs xs \<Longrightarrow> \<not> lenext gts xs xs" and
lenext_trans: "(gts zs ys \<Longrightarrow> gts ys xs \<Longrightarrow> gts zs xs) \<Longrightarrow> lenext gts zs ys \<Longrightarrow> lenext gts ys xs \<Longrightarrow>
lenext gts zs xs" and
lenext_snoc: "lenext gts (xs @ [x]) xs" and
lenext_compat_cons: "(length ys = length xs \<Longrightarrow> gts ys xs \<Longrightarrow> gts (x # ys) (x # xs)) \<Longrightarrow>
lenext gts ys xs \<Longrightarrow> lenext gts (x # ys) (x # xs)" and
lenext_compat_snoc: "(length ys = length xs \<Longrightarrow> gts ys xs \<Longrightarrow> gts (ys @ [x]) (xs @ [x])) \<Longrightarrow>
lenext gts ys xs \<Longrightarrow> lenext gts (ys @ [x]) (xs @ [x])" and
lenext_compat_list: "gts (xs @ y # xs') (xs @ x # xs') \<Longrightarrow>
lenext gts (xs @ y # xs') (xs @ x # xs')" and
lenext_singleton: "lenext gts [y] [x] \<longleftrightarrow> gts [y] [x]" and
lenext_total: "(gts ys xs \<or> gts xs ys \<or> ys = xs) \<Longrightarrow>
lenext gts ys xs \<or> lenext gts xs ys \<or> ys = xs" and
lenext_hd_or_tl: "(length ys = length xs \<Longrightarrow> gts (y # ys) (x # xs) \<Longrightarrow> gt y x \<or> gts ys xs) \<Longrightarrow>
lenext gts (y # ys) (x # xs) \<Longrightarrow> gt y x \<or> lenext gts ys xs"
unfolding lenext_def by auto
subsection \<open>Length-Lexicographic Extension\<close>
abbreviation len_lexext :: "('a \<Rightarrow> 'a \<Rightarrow> bool) \<Rightarrow> 'a list \<Rightarrow> 'a list \<Rightarrow> bool" where
"len_lexext gt \<equiv> lenext (lexext gt)"
lemma len_lexext_mono_strong:
"(\<forall>y \<in> set ys. \<forall>x \<in> set xs. gt y x \<longrightarrow> gt' y x) \<Longrightarrow> len_lexext gt ys xs \<Longrightarrow> len_lexext gt' ys xs"
by (rule lenext_mono_strong[OF lexext_mono_strong])
lemma len_lexext_map_strong:
"(\<forall>y \<in> set ys. \<forall>x \<in> set xs. gt y x \<longrightarrow> gt (f y) (f x)) \<Longrightarrow> len_lexext gt ys xs \<Longrightarrow>
len_lexext gt (map f ys) (map f xs)"
by (rule lenext_map_strong) (metis lexext_map_strong)
lemma len_lexext_irrefl: "(\<forall>x \<in> set xs. \<not> gt x x) \<Longrightarrow> \<not> len_lexext gt xs xs"
by (rule lenext_irrefl[OF lexext_irrefl])
lemma len_lexext_trans_strong:
"(\<forall>z \<in> set zs. \<forall>y \<in> set ys. \<forall>x \<in> set xs. gt z y \<longrightarrow> gt y x \<longrightarrow> gt z x) \<Longrightarrow> len_lexext gt zs ys \<Longrightarrow>
len_lexext gt ys xs \<Longrightarrow> len_lexext gt zs xs"
by (rule lenext_trans[OF lexext_trans_strong])
lemma len_lexext_snoc: "len_lexext gt (xs @ [x]) xs"
by (rule lenext_snoc)
lemma len_lexext_compat_cons: "len_lexext gt ys xs \<Longrightarrow> len_lexext gt (x # ys) (x # xs)"
by (intro lenext_compat_cons lexext_compat_cons)
lemma len_lexext_compat_snoc: "len_lexext gt ys xs \<Longrightarrow> len_lexext gt (ys @ [x]) (xs @ [x])"
by (intro lenext_compat_snoc lexext_compat_snoc_if_same_length)
lemma len_lexext_compat_list: "gt y x \<Longrightarrow> len_lexext gt (xs @ y # xs') (xs @ x # xs')"
by (intro lenext_compat_list lexext_compat_list)
lemma len_lexext_singleton[simp]: "len_lexext gt [y] [x] \<longleftrightarrow> gt y x"
by (simp only: lenext_singleton lexext_singleton)
lemma len_lexext_total: "(\<forall>y \<in> B. \<forall>x \<in> A. gt y x \<or> gt x y \<or> y = x) \<Longrightarrow> ys \<in> lists B \<Longrightarrow> xs \<in> lists A \<Longrightarrow>
len_lexext gt ys xs \<or> len_lexext gt xs ys \<or> ys = xs"
by (rule lenext_total[OF lexext_total])
lemma len_lexext_iff_lenlex: "len_lexext gt ys xs \<longleftrightarrow> (xs, ys) \<in> lenlex {(x, y). gt y x}"
proof -
{
assume "length xs = length ys"
hence "lexext gt ys xs \<longleftrightarrow> (xs, ys) \<in> lex {(x, y). gt y x}"
by (induct xs ys rule: list_induct2) auto
}
thus ?thesis
unfolding lenext_def lenlex_conv by auto
qed
lemma len_lexext_wf: "wfP (\<lambda>x y. gt y x) \<Longrightarrow> wfP (\<lambda>xs ys. len_lexext gt ys xs)"
unfolding wfP_def len_lexext_iff_lenlex by (simp add: wf_lenlex)
lemma len_lexext_hd_or_tl: "len_lexext gt (y # ys) (x # xs) \<Longrightarrow> gt y x \<or> len_lexext gt ys xs"
using lenext_hd_or_tl lexext_hd_or_tl by metis
interpretation len_lexext: ext len_lexext
by standard (fact len_lexext_mono_strong, rule len_lexext_map_strong, metis in_listsD)
interpretation len_lexext: ext_irrefl_trans_strong len_lexext
by standard (fact len_lexext_irrefl, fact len_lexext_trans_strong)
interpretation len_lexext: ext_snoc len_lexext
by standard (fact len_lexext_snoc)
interpretation len_lexext: ext_compat_cons len_lexext
by standard (fact len_lexext_compat_cons)
interpretation len_lexext: ext_compat_snoc len_lexext
by standard (fact len_lexext_compat_snoc)
interpretation len_lexext: ext_compat_list len_lexext
by standard (rule len_lexext_compat_list)
interpretation len_lexext: ext_singleton len_lexext
by standard (rule len_lexext_singleton)
interpretation len_lexext: ext_total len_lexext
by standard (fact len_lexext_total)
interpretation len_lexext: ext_wf len_lexext
by standard (fact len_lexext_wf)
interpretation len_lexext: ext_hd_or_tl len_lexext
by standard (rule len_lexext_hd_or_tl)
interpretation len_lexext: ext_wf_bounded len_lexext
by standard
subsection \<open>Reverse (Right-to-Left) Length-Lexicographic Extension\<close>
abbreviation len_lexext_rev :: "('a \<Rightarrow> 'a \<Rightarrow> bool) \<Rightarrow> 'a list \<Rightarrow> 'a list \<Rightarrow> bool" where
"len_lexext_rev gt \<equiv> lenext (lexext_rev gt)"
lemma len_lexext_rev_mono_strong:
"(\<forall>y \<in> set ys. \<forall>x \<in> set xs. gt y x \<longrightarrow> gt' y x) \<Longrightarrow> len_lexext_rev gt ys xs \<Longrightarrow> len_lexext_rev gt' ys xs"
by (rule lenext_mono_strong) (rule lexext_rev_mono_strong)
lemma len_lexext_rev_map_strong:
"(\<forall>y \<in> set ys. \<forall>x \<in> set xs. gt y x \<longrightarrow> gt (f y) (f x)) \<Longrightarrow> len_lexext_rev gt ys xs \<Longrightarrow>
len_lexext_rev gt (map f ys) (map f xs)"
by (rule lenext_map_strong) (rule lexext_rev_map_strong)
lemma len_lexext_rev_irrefl: "(\<forall>x \<in> set xs. \<not> gt x x) \<Longrightarrow> \<not> len_lexext_rev gt xs xs"
by (rule lenext_irrefl) (rule lexext_rev_irrefl)
lemma len_lexext_rev_trans_strong:
"(\<forall>z \<in> set zs. \<forall>y \<in> set ys. \<forall>x \<in> set xs. gt z y \<longrightarrow> gt y x \<longrightarrow> gt z x) \<Longrightarrow> len_lexext_rev gt zs ys \<Longrightarrow>
len_lexext_rev gt ys xs \<Longrightarrow> len_lexext_rev gt zs xs"
by (rule lenext_trans) (rule lexext_rev_trans_strong)
lemma len_lexext_rev_snoc: "len_lexext_rev gt (xs @ [x]) xs"
by (rule lenext_snoc)
lemma len_lexext_rev_compat_cons: "len_lexext_rev gt ys xs \<Longrightarrow> len_lexext_rev gt (x # ys) (x # xs)"
by (intro lenext_compat_cons lexext_rev_compat_cons_if_same_length)
lemma len_lexext_rev_compat_snoc: "len_lexext_rev gt ys xs \<Longrightarrow> len_lexext_rev gt (ys @ [x]) (xs @ [x])"
by (intro lenext_compat_snoc lexext_rev_compat_snoc)
lemma len_lexext_rev_compat_list: "gt y x \<Longrightarrow> len_lexext_rev gt (xs @ y # xs') (xs @ x # xs')"
by (intro lenext_compat_list lexext_rev_compat_list)
lemma len_lexext_rev_singleton[simp]: "len_lexext_rev gt [y] [x] \<longleftrightarrow> gt y x"
by (simp only: lenext_singleton lexext_rev_singleton)
lemma len_lexext_rev_total: "(\<forall>y \<in> B. \<forall>x \<in> A. gt y x \<or> gt x y \<or> y = x) \<Longrightarrow> ys \<in> lists B \<Longrightarrow>
xs \<in> lists A \<Longrightarrow> len_lexext_rev gt ys xs \<or> len_lexext_rev gt xs ys \<or> ys = xs"
by (rule lenext_total[OF lexext_rev_total])
lemma len_lexext_rev_iff_len_lexext: "len_lexext_rev gt ys xs \<longleftrightarrow> len_lexext gt (rev ys) (rev xs)"
unfolding lenext_def by simp
lemma len_lexext_rev_wf: "wfP (\<lambda>x y. gt y x) \<Longrightarrow> wfP (\<lambda>xs ys. len_lexext_rev gt ys xs)"
unfolding len_lexext_rev_iff_len_lexext
by (rule wfP_app[of "\<lambda>xs ys. len_lexext gt ys xs" rev, simplified]) (rule len_lexext_wf)
lemma len_lexext_rev_hd_or_tl:
"len_lexext_rev gt (y # ys) (x # xs) \<Longrightarrow> gt y x \<or> len_lexext_rev gt ys xs"
using lenext_hd_or_tl lexext_rev_hd_or_tl by metis
interpretation len_lexext_rev: ext len_lexext_rev
by standard (fact len_lexext_rev_mono_strong, rule len_lexext_rev_map_strong, metis in_listsD)
interpretation len_lexext_rev: ext_irrefl_trans_strong len_lexext_rev
by standard (fact len_lexext_rev_irrefl, fact len_lexext_rev_trans_strong)
interpretation len_lexext_rev: ext_snoc len_lexext_rev
by standard (fact len_lexext_rev_snoc)
interpretation len_lexext_rev: ext_compat_cons len_lexext_rev
by standard (fact len_lexext_rev_compat_cons)
interpretation len_lexext_rev: ext_compat_snoc len_lexext_rev
by standard (fact len_lexext_rev_compat_snoc)
interpretation len_lexext_rev: ext_compat_list len_lexext_rev
by standard (rule len_lexext_rev_compat_list)
interpretation len_lexext_rev: ext_singleton len_lexext_rev
by standard (rule len_lexext_rev_singleton)
interpretation len_lexext_rev: ext_total len_lexext_rev
by standard (fact len_lexext_rev_total)
interpretation len_lexext_rev: ext_wf len_lexext_rev
by standard (fact len_lexext_rev_wf)
interpretation len_lexext_rev: ext_hd_or_tl len_lexext_rev
by standard (rule len_lexext_rev_hd_or_tl)
interpretation len_lexext_rev: ext_wf_bounded len_lexext_rev
by standard
subsection \<open>Dershowitz--Manna Multiset Extension\<close>
definition msetext_dersh where
"msetext_dersh gt ys xs = (let N = mset ys; M = mset xs in
(\<exists>Y X. Y \<noteq> {#} \<and> Y \<subseteq># N \<and> M = (N - Y) + X \<and> (\<forall>x. x \<in># X \<longrightarrow> (\<exists>y. y \<in># Y \<and> gt y x))))"
text \<open>
The following proof is based on that of @{thm[source] less_multiset\<^sub>D\<^sub>M_imp_mult}.
\<close>
lemma msetext_dersh_imp_mult_rel:
assumes
ys_a: "ys \<in> lists A" and xs_a: "xs \<in> lists A" and
ys_gt_xs: "msetext_dersh gt ys xs"
shows "(mset xs, mset ys) \<in> mult {(x, y). x \<in> A \<and> y \<in> A \<and> gt y x}"
proof -
obtain Y X where y_nemp: "Y \<noteq> {#}" and y_sub_ys: "Y \<subseteq># mset ys" and
xs_eq: "mset xs = mset ys - Y + X" and ex_y: "\<forall>x. x \<in># X \<longrightarrow> (\<exists>y. y \<in># Y \<and> gt y x)"
using ys_gt_xs[unfolded msetext_dersh_def Let_def] by blast
have ex_y': "\<forall>x. x \<in># X \<longrightarrow> (\<exists>y. y \<in># Y \<and> x \<in> A \<and> y \<in> A \<and> gt y x)"
using ex_y y_sub_ys xs_eq ys_a xs_a by (metis in_listsD mset_subset_eqD set_mset_mset union_iff)
hence "(mset ys - Y + X, mset ys - Y + Y) \<in> mult {(x, y). x \<in> A \<and> y \<in> A \<and> gt y x}"
using y_nemp y_sub_ys by (intro one_step_implies_mult) (auto simp: Bex_def trans_def)
thus ?thesis
using xs_eq y_sub_ys by (simp add: subset_mset.diff_add)
qed
lemma msetext_dersh_imp_mult: "msetext_dersh gt ys xs \<Longrightarrow> (mset xs, mset ys) \<in> mult {(x, y). gt y x}"
using msetext_dersh_imp_mult_rel[of _ UNIV] by auto
lemma mult_imp_msetext_dersh_rel:
assumes
ys_a: "set_mset (mset ys) \<subseteq> A" and xs_a: "set_mset (mset xs) \<subseteq> A" and
in_mult: "(mset xs, mset ys) \<in> mult {(x, y). x \<in> A \<and> y \<in> A \<and> gt y x}" and
trans: "\<forall>z \<in> A. \<forall>y \<in> A. \<forall>x \<in> A. gt z y \<longrightarrow> gt y x \<longrightarrow> gt z x"
shows "msetext_dersh gt ys xs"
using in_mult ys_a xs_a unfolding mult_def msetext_dersh_def Let_def
proof induct
case (base Ys)
then obtain y M0 X where "Ys = M0 + {#y#}" and "mset xs = M0 + X" and "\<forall>a. a \<in># X \<longrightarrow> gt y a"
unfolding mult1_def by auto
thus ?case
by (auto intro: exI[of _ "{#y#}"] exI[of _ X])
next
case (step Ys Zs)
note ys_zs_in_mult1 = this(2) and ih = this(3) and zs_a = this(4) and xs_a = this(5)
have Ys_a: "set_mset Ys \<subseteq> A"
using ys_zs_in_mult1 zs_a unfolding mult1_def by auto
obtain Y X where y_nemp: "Y \<noteq> {#}" and y_sub_ys: "Y \<subseteq># Ys" and xs_eq: "mset xs = Ys - Y + X" and
ex_y: "\<forall>x. x \<in># X \<longrightarrow> (\<exists>y. y \<in># Y \<and> gt y x)"
using ih[OF Ys_a xs_a] by blast
obtain z M0 Ya where zs_eq: "Zs = M0 + {#z#}" and ys_eq: "Ys = M0 + Ya" and
z_gt: "\<forall>y. y \<in># Ya \<longrightarrow> y \<in> A \<and> z \<in> A \<and> gt z y"
using ys_zs_in_mult1[unfolded mult1_def] by auto
let ?Za = "Y - Ya + {#z#}"
let ?Xa = "X + Ya + (Y - Ya) - Y"
have xa_sub_x_ya: "set_mset ?Xa \<subseteq> set_mset (X + Ya)"
by (metis diff_subset_eq_self in_diffD subsetI subset_mset.diff_diff_right)
have x_a: "set_mset X \<subseteq> A"
using xs_a xs_eq by auto
have ya_a: "set_mset Ya \<subseteq> A"
by (simp add: subsetI z_gt)
have ex_y': "\<exists>y. y \<in># Y - Ya + {#z#} \<and> gt y x" if x_in: "x \<in># X + Ya" for x
proof (cases "x \<in># X")
case True
then obtain y where y_in: "y \<in># Y" and y_gt_x: "gt y x"
using ex_y by blast
show ?thesis
proof (cases "y \<in># Ya")
case False
hence "y \<in># Y - Ya + {#z#}"
using y_in by fastforce
thus ?thesis
using y_gt_x by blast
next
case True
hence "y \<in> A" and "z \<in> A" and "gt z y"
using z_gt by blast+
hence "gt z x"
using trans y_gt_x x_a ya_a x_in by (meson subsetCE union_iff)
thus ?thesis
by auto
qed
next
case False
hence "x \<in># Ya"
using x_in by auto
hence "x \<in> A" and "z \<in> A" and "gt z x"
using z_gt by blast+
thus ?thesis
by auto
qed
show ?case
proof (rule exI[of _ ?Za], rule exI[of _ ?Xa], intro conjI)
show "Y - Ya + {#z#} \<subseteq># Zs"
using mset_subset_eq_mono_add subset_eq_diff_conv y_sub_ys ys_eq zs_eq by fastforce
next
show "mset xs = Zs - (Y - Ya + {#z#}) + (X + Ya + (Y - Ya) - Y)"
unfolding xs_eq ys_eq zs_eq by (auto simp: multiset_eq_iff)
next
show "\<forall>x. x \<in># X + Ya + (Y - Ya) - Y \<longrightarrow> (\<exists>y. y \<in># Y - Ya + {#z#} \<and> gt y x)"
using ex_y' xa_sub_x_ya by blast
qed auto
qed
lemma msetext_dersh_map_strong:
assumes
compat_f: "\<forall>y \<in> set ys. \<forall>x \<in> set xs. gt y x \<longrightarrow> gt (f y) (f x)" and
ys_gt_xs: "msetext_dersh gt ys xs"
shows "msetext_dersh gt (map f ys) (map f xs)"
proof -
obtain Y X where
y_nemp: "Y \<noteq> {#}" and y_sub_ys: "Y \<subseteq># mset ys" and xs_eq: "mset xs = mset ys - Y + X" and
ex_y: "\<forall>x. x \<in># X \<longrightarrow> (\<exists>y. y \<in># Y \<and> gt y x)"
using ys_gt_xs[unfolded msetext_dersh_def Let_def mset_map] by blast
have x_sub_xs: "X \<subseteq># mset xs"
using xs_eq by simp
let ?fY = "image_mset f Y"
let ?fX = "image_mset f X"
show ?thesis
unfolding msetext_dersh_def Let_def mset_map
proof (intro exI conjI)
show "image_mset f (mset xs) = image_mset f (mset ys) - ?fY + ?fX"
using xs_eq[THEN arg_cong, of "image_mset f"] y_sub_ys by (metis image_mset_Diff image_mset_union)
next
obtain y where y: "\<forall>x. x \<in># X \<longrightarrow> y x \<in># Y \<and> gt (y x) x"
using ex_y by moura
show "\<forall>fx. fx \<in># ?fX \<longrightarrow> (\<exists>fy. fy \<in># ?fY \<and> gt fy fx)"
proof (intro allI impI)
fix fx
assume "fx \<in># ?fX"
then obtain x where fx: "fx = f x" and x_in: "x \<in># X"
by auto
hence y_in: "y x \<in># Y" and y_gt: "gt (y x) x"
using y[rule_format, OF x_in] by blast+
hence "f (y x) \<in># ?fY \<and> gt (f (y x)) (f x)"
using compat_f y_sub_ys x_sub_xs x_in
by (metis image_eqI in_image_mset mset_subset_eqD set_mset_mset)
thus "\<exists>fy. fy \<in># ?fY \<and> gt fy fx"
unfolding fx by auto
qed
qed (auto simp: y_nemp y_sub_ys image_mset_subseteq_mono)
qed
lemma msetext_dersh_trans:
assumes
zs_a: "zs \<in> lists A" and
ys_a: "ys \<in> lists A" and
xs_a: "xs \<in> lists A" and
trans: "\<forall>z \<in> A. \<forall>y \<in> A. \<forall>x \<in> A. gt z y \<longrightarrow> gt y x \<longrightarrow> gt z x" and
zs_gt_ys: "msetext_dersh gt zs ys" and
ys_gt_xs: "msetext_dersh gt ys xs"
shows "msetext_dersh gt zs xs"
proof (rule mult_imp_msetext_dersh_rel[OF _ _ _ trans])
show "set_mset (mset zs) \<subseteq> A"
using zs_a by auto
next
show "set_mset (mset xs) \<subseteq> A"
using xs_a by auto
next
let ?Gt = "{(x, y). x \<in> A \<and> y \<in> A \<and> gt y x}"
have "(mset xs, mset ys) \<in> mult ?Gt"
by (rule msetext_dersh_imp_mult_rel[OF ys_a xs_a ys_gt_xs])
moreover have "(mset ys, mset zs) \<in> mult ?Gt"
by (rule msetext_dersh_imp_mult_rel[OF zs_a ys_a zs_gt_ys])
ultimately show "(mset xs, mset zs) \<in> mult ?Gt"
unfolding mult_def by simp
qed
lemma msetext_dersh_irrefl_from_trans:
assumes
trans: "\<forall>z \<in> set xs. \<forall>y \<in> set xs. \<forall>x \<in> set xs. gt z y \<longrightarrow> gt y x \<longrightarrow> gt z x" and
irrefl: "\<forall>x \<in> set xs. \<not> gt x x"
shows "\<not> msetext_dersh gt xs xs"
unfolding msetext_dersh_def Let_def
proof clarify
fix Y X
assume y_nemp: "Y \<noteq> {#}" and y_sub_xs: "Y \<subseteq># mset xs" and xs_eq: "mset xs = mset xs - Y + X" and
ex_y: "\<forall>x. x \<in># X \<longrightarrow> (\<exists>y. y \<in># Y \<and> gt y x)"
have x_eq_y: "X = Y"
using y_sub_xs xs_eq by (metis diff_union_cancelL subset_mset.diff_add)
let ?Gt = "{(y, x). y \<in># Y \<and> x \<in># Y \<and> gt y x}"
have "?Gt \<subseteq> set_mset Y \<times> set_mset Y"
by auto
hence fin: "finite ?Gt"
by (auto dest!: infinite_super)
moreover have "irrefl ?Gt"
unfolding irrefl_def using irrefl y_sub_xs by (fastforce dest!: set_mset_mono)
moreover have "trans ?Gt"
unfolding trans_def using trans y_sub_xs by (fastforce dest!: set_mset_mono)
ultimately have acyc: "acyclic ?Gt"
by (rule finite_irrefl_trans_imp_wf[THEN wf_acyclic])
have fin_y: "finite (set_mset Y)"
using y_sub_xs by simp
hence cyc: "\<not> acyclic ?Gt"
proof (rule finite_nonempty_ex_succ_imp_cyclic)
show "\<forall>x \<in># Y. \<exists>y \<in># Y. (y, x) \<in> ?Gt"
using ex_y[unfolded x_eq_y] by auto
qed (auto simp: y_nemp)
show False
using acyc cyc by sat
qed
lemma msetext_dersh_snoc: "msetext_dersh gt (xs @ [x]) xs"
unfolding msetext_dersh_def Let_def
proof (intro exI conjI)
show "mset xs = mset (xs @ [x]) - {#x#} + {#}"
by simp
qed auto
lemma msetext_dersh_compat_cons:
assumes ys_gt_xs: "msetext_dersh gt ys xs"
shows "msetext_dersh gt (x # ys) (x # xs)"
proof -
obtain Y X where
y_nemp: "Y \<noteq> {#}" and y_sub_ys: "Y \<subseteq># mset ys" and xs_eq: "mset xs = mset ys - Y + X" and
ex_y: "\<forall>x. x \<in># X \<longrightarrow> (\<exists>y. y \<in># Y \<and> gt y x)"
using ys_gt_xs[unfolded msetext_dersh_def Let_def mset_map] by blast
show ?thesis
unfolding msetext_dersh_def Let_def
proof (intro exI conjI)
show "Y \<subseteq># mset (x # ys)"
using y_sub_ys
by (metis add_mset_add_single mset.simps(2) mset_subset_eq_add_left
subset_mset.add_increasing2)
next
show "mset (x # xs) = mset (x # ys) - Y + X"
proof -
have "X + (mset ys - Y) = mset xs"
by (simp add: union_commute xs_eq)
hence "mset (x # xs) = X + (mset (x # ys) - Y)"
by (metis add_mset_add_single mset.simps(2) mset_subset_eq_multiset_union_diff_commute
union_mset_add_mset_right y_sub_ys)
thus ?thesis
by (simp add: union_commute)
qed
qed (auto simp: y_nemp ex_y)
qed
lemma msetext_dersh_compat_snoc: "msetext_dersh gt ys xs \<Longrightarrow> msetext_dersh gt (ys @ [x]) (xs @ [x])"
using msetext_dersh_compat_cons[of gt ys xs x] unfolding msetext_dersh_def by simp
lemma msetext_dersh_compat_list:
assumes y_gt_x: "gt y x"
shows "msetext_dersh gt (xs @ y # xs') (xs @ x # xs')"
unfolding msetext_dersh_def Let_def
proof (intro exI conjI)
show "mset (xs @ x # xs') = mset (xs @ y # xs') - {#y#} + {#x#}"
by auto
qed (auto intro: y_gt_x)
lemma msetext_dersh_singleton: "msetext_dersh gt [y] [x] \<longleftrightarrow> gt y x"
unfolding msetext_dersh_def Let_def
by (auto dest: nonempty_subseteq_mset_eq_singleton simp: nonempty_subseteq_mset_iff_singleton)
lemma msetext_dersh_wf:
assumes wf_gt: "wfP (\<lambda>x y. gt y x)"
shows "wfP (\<lambda>xs ys. msetext_dersh gt ys xs)"
proof (rule wfP_subset, rule wfP_app[of "\<lambda>xs ys. (xs, ys) \<in> mult {(x, y). gt y x}" mset])
show "wfP (\<lambda>xs ys. (xs, ys) \<in> mult {(x, y). gt y x})"
using wf_gt unfolding wfP_def by (auto intro: wf_mult)
next
show "(\<lambda>xs ys. msetext_dersh gt ys xs) \<le> (\<lambda>x y. (mset x, mset y) \<in> mult {(x, y). gt y x})"
using msetext_dersh_imp_mult by blast
qed
interpretation msetext_dersh: ext msetext_dersh
by standard (fact msetext_dersh_mono_strong, rule msetext_dersh_map_strong, metis in_listsD)
interpretation msetext_dersh: ext_trans_before_irrefl msetext_dersh
by standard (fact msetext_dersh_trans, fact msetext_dersh_irrefl_from_trans)
interpretation msetext_dersh: ext_snoc msetext_dersh
by standard (fact msetext_dersh_snoc)
interpretation msetext_dersh: ext_compat_cons msetext_dersh
by standard (fact msetext_dersh_compat_cons)
interpretation msetext_dersh: ext_compat_snoc msetext_dersh
by standard (fact msetext_dersh_compat_snoc)
interpretation msetext_dersh: ext_compat_list msetext_dersh
by standard (rule msetext_dersh_compat_list)
interpretation msetext_dersh: ext_singleton msetext_dersh
by standard (rule msetext_dersh_singleton)
interpretation msetext_dersh: ext_wf msetext_dersh
by standard (fact msetext_dersh_wf)
subsection \<open>Huet--Oppen Multiset Extension\<close>
definition msetext_huet where
"msetext_huet gt ys xs = (let N = mset ys; M = mset xs in
M \<noteq> N \<and> (\<forall>x. count M x > count N x \<longrightarrow> (\<exists>y. gt y x \<and> count N y > count M y)))"
lemma msetext_huet_imp_count_gt:
assumes ys_gt_xs: "msetext_huet gt ys xs"
shows "\<exists>x. count (mset ys) x > count (mset xs) x"
proof -
obtain x where "count (mset ys) x \<noteq> count (mset xs) x"
using ys_gt_xs[unfolded msetext_huet_def Let_def] by (fastforce intro: multiset_eqI)
moreover
{
assume "count (mset ys) x < count (mset xs) x"
hence ?thesis
using ys_gt_xs[unfolded msetext_huet_def Let_def] by blast
}
moreover
{
assume "count (mset ys) x > count (mset xs) x"
hence ?thesis
by fast
}
ultimately show ?thesis
by fastforce
qed
lemma msetext_huet_imp_dersh:
assumes huet: "msetext_huet gt ys xs"
shows "msetext_dersh gt ys xs"
proof (unfold msetext_dersh_def Let_def, intro exI conjI)
let ?X = "mset xs - mset ys"
let ?Y = "mset ys - mset xs"
show "?Y \<noteq> {#}"
by (metis msetext_huet_imp_count_gt[OF huet] empty_iff in_diff_count set_mset_empty)
show "?Y \<subseteq># mset ys"
by auto
show "mset xs = mset ys - ?Y + ?X"
by (metis add.commute diff_intersect_right_idem multiset_inter_def subset_mset.inf.cobounded2
subset_mset.le_imp_diff_is_add)
show "\<forall>x. x \<in># ?X \<longrightarrow> (\<exists>y. y \<in># ?Y \<and> gt y x)"
using huet[unfolded msetext_huet_def Let_def, THEN conjunct2] by (meson in_diff_count)
qed
text \<open>
The following proof is based on that of @{thm[source] mult_imp_less_multiset\<^sub>H\<^sub>O}.
\<close>
lemma mult_imp_msetext_huet:
assumes
irrefl: "irreflp gt" and trans: "transp gt" and
in_mult: "(mset xs, mset ys) \<in> mult {(x, y). gt y x}"
shows "msetext_huet gt ys xs"
using in_mult unfolding mult_def msetext_huet_def Let_def
proof (induct rule: trancl_induct)
case (base Ys)
thus ?case
using irrefl unfolding irreflp_def msetext_huet_def Let_def mult1_def
by (auto 0 3 split: if_splits)
next
case (step Ys Zs)
have asym[unfolded antisym_def, simplified]: "antisymp gt"
by (rule irreflp_transp_imp_antisymP[OF irrefl trans])
from step(3) have "mset xs \<noteq> Ys" and
**: "\<And>x. count Ys x < count (mset xs) x \<Longrightarrow> (\<exists>y. gt y x \<and> count (mset xs) y < count Ys y)"
by blast+
from step(2) obtain M0 a K where
*: "Zs = M0 + {#a#}" "Ys = M0 + K" "a \<notin># K" "\<And>b. b \<in># K \<Longrightarrow> gt a b"
using irrefl unfolding mult1_def irreflp_def by force
have "mset xs \<noteq> Zs"
proof (cases "K = {#}")
case True
thus ?thesis
using \<open>mset xs \<noteq> Ys\<close> ** *(1,2) irrefl[unfolded irreflp_def]
by (metis One_nat_def add.comm_neutral count_single diff_union_cancelL lessI
minus_multiset.rep_eq not_add_less2 plus_multiset.rep_eq union_commute zero_less_diff)
next
case False
thus ?thesis
proof -
obtain aa :: "'a \<Rightarrow> 'a" where
f1: "\<forall>a. \<not> count Ys a < count (mset xs) a \<or> gt (aa a) a \<and>
count (mset xs) (aa a) < count Ys (aa a)"
using "**" by moura
have f2: "K + M0 = Ys"
using "*"(2) union_ac(2) by blast
have f3: "\<And>aa. count Zs aa = count M0 aa + count {#a#} aa"
by (simp add: "*"(1))
have f4: "\<And>a. count Ys a = count K a + count M0 a"
using f2 by auto
have f5: "count K a = 0"
by (meson "*"(3) count_inI)
have "Zs - M0 = {#a#}"
using "*"(1) add_diff_cancel_left' by blast
then have f6: "count M0 a < count Zs a"
by (metis in_diff_count union_single_eq_member)
have "\<And>m. count m a = 0 + count m a"
by simp
moreover
{ assume "aa a \<noteq> a"
then have "mset xs = Zs \<and> count Zs (aa a) < count K (aa a) + count M0 (aa a) \<longrightarrow>
count K (aa a) + count M0 (aa a) < count Zs (aa a)"
using f5 f3 f2 f1 "*"(4) asym by (auto dest!: antisympD) }
ultimately show ?thesis
using f6 f5 f4 f1 by (metis less_imp_not_less)
qed
qed
moreover
{
assume "count Zs a \<le> count (mset xs) a"
with \<open>a \<notin># K\<close> have "count Ys a < count (mset xs) a" unfolding *(1,2)
by (auto simp add: not_in_iff)
with ** obtain z where z: "gt z a" "count (mset xs) z < count Ys z"
by blast
with * have "count Ys z \<le> count Zs z"
using asym
by (auto simp: intro: count_inI dest: antisympD)
with z have "\<exists>z. gt z a \<and> count (mset xs) z < count Zs z" by auto
}
note count_a = this
{
fix y
assume count_y: "count Zs y < count (mset xs) y"
have "\<exists>x. gt x y \<and> count (mset xs) x < count Zs x"
proof (cases "y = a")
case True
with count_y count_a show ?thesis by auto
next
case False
show ?thesis
proof (cases "y \<in># K")
case True
with *(4) have "gt a y" by simp
then show ?thesis
by (cases "count Zs a \<le> count (mset xs) a",
blast dest: count_a trans[unfolded transp_def, rule_format], auto dest: count_a)
next
case False
with \<open>y \<noteq> a\<close> have "count Zs y = count Ys y" unfolding *(1,2)
by (simp add: not_in_iff)
with count_y ** obtain z where z: "gt z y" "count (mset xs) z < count Ys z" by auto
show ?thesis
proof (cases "z \<in># K")
case True
with *(4) have "gt a z" by simp
with z(1) show ?thesis
by (cases "count Zs a \<le> count (mset xs) a")
(blast dest: count_a not_le_imp_less trans[unfolded transp_def, rule_format])+
next
case False
with \<open>a \<notin># K\<close> have "count Ys z \<le> count Zs z" unfolding *
by (auto simp add: not_in_iff)
with z show ?thesis by auto
qed
qed
qed
}
ultimately show ?case
unfolding msetext_huet_def Let_def by blast
qed
theorem msetext_huet_eq_dersh: "irreflp gt \<Longrightarrow> transp gt \<Longrightarrow> msetext_dersh gt = msetext_huet gt"
using msetext_huet_imp_dersh msetext_dersh_imp_mult mult_imp_msetext_huet by fast
lemma msetext_huet_mono_strong:
"(\<forall>y \<in> set ys. \<forall>x \<in> set xs. gt y x \<longrightarrow> gt' y x) \<Longrightarrow> msetext_huet gt ys xs \<Longrightarrow> msetext_huet gt' ys xs"
unfolding msetext_huet_def
by (metis less_le_trans mem_Collect_eq not_le not_less0 set_mset_mset[unfolded set_mset_def])
lemma msetext_huet_map:
assumes
fin: "finite A" and
ys_a: "ys \<in> lists A" and xs_a: "xs \<in> lists A" and
irrefl_f: "\<forall>x \<in> A. \<not> gt (f x) (f x)" and
trans_f: "\<forall>z \<in> A. \<forall>y \<in> A. \<forall>x \<in> A. gt (f z) (f y) \<longrightarrow> gt (f y) (f x) \<longrightarrow> gt (f z) (f x)" and
compat_f: "\<forall>y \<in> A. \<forall>x \<in> A. gt y x \<longrightarrow> gt (f y) (f x)" and
ys_gt_xs: "msetext_huet gt ys xs"
shows "msetext_huet gt (map f ys) (map f xs)" (is "msetext_huet _ ?fys ?fxs")
proof -
have irrefl: "\<forall>x \<in> A. \<not> gt x x"
using irrefl_f compat_f by blast
have
ms_xs_ne_ys: "mset xs \<noteq> mset ys" and
ex_gt: "\<forall>x. count (mset ys) x < count (mset xs) x \<longrightarrow>
(\<exists>y. gt y x \<and> count (mset xs) y < count (mset ys) y)"
using ys_gt_xs[unfolded msetext_huet_def Let_def] by blast+
have ex_y: "\<exists>y. gt (f y) (f x) \<and> count (mset ?fxs) (f y) < count (mset (map f ys)) (f y)"
if cnt_x: "count (mset xs) x > count (mset ys) x" for x
proof -
have x_in_a: "x \<in> A"
using cnt_x xs_a dual_order.strict_trans2 by fastforce
obtain y where y_gt_x: "gt y x" and cnt_y: "count (mset ys) y > count (mset xs) y"
using cnt_x ex_gt by blast
have y_in_a: "y \<in> A"
using cnt_y ys_a dual_order.strict_trans2 by fastforce
have wf_gt_f: "wfP (\<lambda>y x. y \<in> A \<and> x \<in> A \<and> gt (f y) (f x))"
by (rule finite_irreflp_transp_imp_wfp)
(auto elim: trans_f[rule_format] simp: fin irrefl_f Collect_case_prod_Sigma irreflp_def
transp_def)
obtain yy where
fyy_gt_fx: "gt (f yy) (f x)" and
cnt_yy: "count (mset ys) yy > count (mset xs) yy" and
max_yy: "\<forall>y \<in> A. yy \<in> A \<longrightarrow> gt (f y) (f yy) \<longrightarrow> gt (f y) (f x) \<longrightarrow>
count (mset xs) y \<ge> count (mset ys) y"
using wfP_eq_minimal[THEN iffD1, OF wf_gt_f, rule_format,
of y "{y. gt (f y) (f x) \<and> count (mset xs) y < count (mset ys) y}", simplified]
y_gt_x cnt_y
by (metis compat_f not_less x_in_a y_in_a)
have yy_in_a: "yy \<in> A"
using cnt_yy ys_a dual_order.strict_trans2 by fastforce
{
assume "count (mset ?fxs) (f yy) \<ge> count (mset ?fys) (f yy)"
then obtain u where fu_eq_fyy: "f u = f yy" and cnt_u: "count (mset xs) u > count (mset ys) u"
using count_image_mset_le_imp_lt cnt_yy mset_map by (metis (mono_tags))
have u_in_a: "u \<in> A"
using cnt_u xs_a dual_order.strict_trans2 by fastforce
obtain v where v_gt_u: "gt v u" and cnt_v: "count (mset ys) v > count (mset xs) v"
using cnt_u ex_gt by blast
have v_in_a: "v \<in> A"
using cnt_v ys_a dual_order.strict_trans2 by fastforce
have fv_gt_fu: "gt (f v) (f u)"
using v_gt_u compat_f v_in_a u_in_a by blast
hence fv_gt_fyy: "gt (f v) (f yy)"
by (simp only: fu_eq_fyy)
have "gt (f v) (f x)"
using fv_gt_fyy fyy_gt_fx v_in_a yy_in_a x_in_a trans_f by blast
hence False
using max_yy[rule_format, of v] fv_gt_fyy v_in_a yy_in_a cnt_v by linarith
}
thus ?thesis
using fyy_gt_fx leI by blast
qed
show ?thesis
unfolding msetext_huet_def Let_def
proof (intro conjI allI impI)
{
assume len_eq: "length xs = length ys"
obtain x where cnt_x: "count (mset xs) x > count (mset ys) x"
using len_eq ms_xs_ne_ys by (metis size_eq_ex_count_lt size_mset)
hence "mset ?fxs \<noteq> mset ?fys"
using ex_y by fastforce
}
thus "mset ?fxs \<noteq> mset (map f ys)"
by (metis length_map size_mset)
next
fix fx
assume cnt_fx: "count (mset ?fxs) fx > count (mset ?fys) fx"
then obtain x where fx: "fx = f x" and cnt_x: "count (mset xs) x > count (mset ys) x"
using count_image_mset_lt_imp_lt mset_map by (metis (mono_tags))
thus "\<exists>fy. gt fy fx \<and> count (mset ?fxs) fy < count (mset (map f ys)) fy"
using ex_y[OF cnt_x] by blast
qed
qed
lemma msetext_huet_irrefl: "(\<forall>x \<in> set xs. \<not> gt x x) \<Longrightarrow> \<not> msetext_huet gt xs xs"
unfolding msetext_huet_def by simp
lemma msetext_huet_trans_from_irrefl:
assumes
fin: "finite A" and
zs_a: "zs \<in> lists A" and ys_a: "ys \<in> lists A" and xs_a: "xs \<in> lists A" and
irrefl: "\<forall>x \<in> A. \<not> gt x x" and
trans: "\<forall>z \<in> A. \<forall>y \<in> A. \<forall>x \<in> A. gt z y \<longrightarrow> gt y x \<longrightarrow> gt z x" and
zs_gt_ys: "msetext_huet gt zs ys" and
ys_gt_xs: "msetext_huet gt ys xs"
shows "msetext_huet gt zs xs"
proof -
have wf_gt: "wfP (\<lambda>y x. y \<in> A \<and> x \<in> A \<and> gt y x)"
by (rule finite_irreflp_transp_imp_wfp)
(auto elim: trans[rule_format] simp: fin irrefl Collect_case_prod_Sigma irreflp_def
transp_def)
show ?thesis
unfolding msetext_huet_def Let_def
proof (intro conjI allI impI)
obtain x where cnt_x: "count (mset zs) x > count (mset ys) x"
using msetext_huet_imp_count_gt[OF zs_gt_ys] by blast
have x_in_a: "x \<in> A"
using cnt_x zs_a dual_order.strict_trans2 by fastforce
obtain xx where
cnt_xx: "count (mset zs) xx > count (mset ys) xx" and
max_xx: "\<forall>y \<in> A. xx \<in> A \<longrightarrow> gt y xx \<longrightarrow> count (mset ys) y \<ge> count (mset zs) y"
using wfP_eq_minimal[THEN iffD1, OF wf_gt, rule_format,
of x "{y. count (mset ys) y < count (mset zs) y}", simplified]
cnt_x
by force
have xx_in_a: "xx \<in> A"
using cnt_xx zs_a dual_order.strict_trans2 by fastforce
show "mset xs \<noteq> mset zs"
proof (cases "count (mset ys) xx \<ge> count (mset xs) xx")
case True
thus ?thesis
using cnt_xx by fastforce
next
case False
hence "count (mset ys) xx < count (mset xs) xx"
by fastforce
then obtain z where z_gt_xx: "gt z xx" and cnt_z: "count (mset ys) z > count (mset xs) z"
using ys_gt_xs[unfolded msetext_huet_def Let_def] by blast
have z_in_a: "z \<in> A"
using cnt_z ys_a dual_order.strict_trans2 by fastforce
have "count (mset zs) z \<le> count (mset ys) z"
using max_xx[rule_format, of z] z_in_a xx_in_a z_gt_xx by blast
moreover
{
assume "count (mset zs) z < count (mset ys) z"
then obtain u where u_gt_z: "gt u z" and cnt_u: "count (mset ys) u < count (mset zs) u"
using zs_gt_ys[unfolded msetext_huet_def Let_def] by blast
have u_in_a: "u \<in> A"
using cnt_u zs_a dual_order.strict_trans2 by fastforce
have u_gt_xx: "gt u xx"
using trans u_in_a z_in_a xx_in_a u_gt_z z_gt_xx by blast
have False
using max_xx[rule_format, of u] u_in_a xx_in_a u_gt_xx cnt_u by fastforce
}
ultimately have "count (mset zs) z = count (mset ys) z"
by fastforce
thus ?thesis
using cnt_z by fastforce
qed
next
fix x
assume cnt_x_xz: "count (mset zs) x < count (mset xs) x"
have x_in_a: "x \<in> A"
using cnt_x_xz xs_a dual_order.strict_trans2 by fastforce
let ?case = "\<exists>y. gt y x \<and> count (mset zs) y > count (mset xs) y"
{
assume cnt_x: "count (mset zs) x < count (mset ys) x"
then obtain y where y_gt_x: "gt y x" and cnt_y: "count (mset zs) y > count (mset ys) y"
using zs_gt_ys[unfolded msetext_huet_def Let_def] by blast
have y_in_a: "y \<in> A"
using cnt_y zs_a dual_order.strict_trans2 by fastforce
obtain yy where
yy_gt_x: "gt yy x" and
cnt_yy: "count (mset zs) yy > count (mset ys) yy" and
max_yy: "\<forall>y \<in> A. yy \<in> A \<longrightarrow> gt y yy \<longrightarrow> gt y x \<longrightarrow> count (mset ys) y \<ge> count (mset zs) y"
using wfP_eq_minimal[THEN iffD1, OF wf_gt, rule_format,
of y "{y. gt y x \<and> count (mset ys) y < count (mset zs) y}", simplified]
y_gt_x cnt_y
by force
have yy_in_a: "yy \<in> A"
using cnt_yy zs_a dual_order.strict_trans2 by fastforce
have ?case
proof (cases "count (mset ys) yy \<ge> count (mset xs) yy")
case True
thus ?thesis
using yy_gt_x cnt_yy by fastforce
next
case False
hence "count (mset ys) yy < count (mset xs) yy"
by fastforce
then obtain z where z_gt_yy: "gt z yy" and cnt_z: "count (mset ys) z > count (mset xs) z"
using ys_gt_xs[unfolded msetext_huet_def Let_def] by blast
have z_in_a: "z \<in> A"
using cnt_z ys_a dual_order.strict_trans2 by fastforce
have z_gt_x: "gt z x"
using trans z_in_a yy_in_a x_in_a z_gt_yy yy_gt_x by blast
have "count (mset zs) z \<le> count (mset ys) z"
using max_yy[rule_format, of z] z_in_a yy_in_a z_gt_yy z_gt_x by blast
moreover
{
assume "count (mset zs) z < count (mset ys) z"
then obtain u where u_gt_z: "gt u z" and cnt_u: "count (mset ys) u < count (mset zs) u"
using zs_gt_ys[unfolded msetext_huet_def Let_def] by blast
have u_in_a: "u \<in> A"
using cnt_u zs_a dual_order.strict_trans2 by fastforce
have u_gt_yy: "gt u yy"
using trans u_in_a z_in_a yy_in_a u_gt_z z_gt_yy by blast
have u_gt_x: "gt u x"
using trans u_in_a z_in_a x_in_a u_gt_z z_gt_x by blast
have False
using max_yy[rule_format, of u] u_in_a yy_in_a u_gt_yy u_gt_x cnt_u by fastforce
}
ultimately have "count (mset zs) z = count (mset ys) z"
by fastforce
thus ?thesis
using z_gt_x cnt_z by fastforce
qed
}
moreover
{
assume "count (mset zs) x \<ge> count (mset ys) x"
hence "count (mset ys) x < count (mset xs) x"
using cnt_x_xz by fastforce
then obtain y where y_gt_x: "gt y x" and cnt_y: "count (mset ys) y > count (mset xs) y"
using ys_gt_xs[unfolded msetext_huet_def Let_def] by blast
have y_in_a: "y \<in> A"
using cnt_y ys_a dual_order.strict_trans2 by fastforce
obtain yy where
yy_gt_x: "gt yy x" and
cnt_yy: "count (mset ys) yy > count (mset xs) yy" and
max_yy: "\<forall>y \<in> A. yy \<in> A \<longrightarrow> gt y yy \<longrightarrow> gt y x \<longrightarrow> count (mset xs) y \<ge> count (mset ys) y"
using wfP_eq_minimal[THEN iffD1, OF wf_gt, rule_format,
of y "{y. gt y x \<and> count (mset xs) y < count (mset ys) y}", simplified]
y_gt_x cnt_y
by force
have yy_in_a: "yy \<in> A"
using cnt_yy ys_a dual_order.strict_trans2 by fastforce
have ?case
proof (cases "count (mset zs) yy \<ge> count (mset ys) yy")
case True
thus ?thesis
using yy_gt_x cnt_yy by fastforce
next
case False
hence "count (mset zs) yy < count (mset ys) yy"
by fastforce
then obtain z where z_gt_yy: "gt z yy" and cnt_z: "count (mset zs) z > count (mset ys) z"
using zs_gt_ys[unfolded msetext_huet_def Let_def] by blast
have z_in_a: "z \<in> A"
using cnt_z zs_a dual_order.strict_trans2 by fastforce
have z_gt_x: "gt z x"
using trans z_in_a yy_in_a x_in_a z_gt_yy yy_gt_x by blast
have "count (mset ys) z \<le> count (mset xs) z"
using max_yy[rule_format, of z] z_in_a yy_in_a z_gt_yy z_gt_x by blast
moreover
{
assume "count (mset ys) z < count (mset xs) z"
then obtain u where u_gt_z: "gt u z" and cnt_u: "count (mset xs) u < count (mset ys) u"
using ys_gt_xs[unfolded msetext_huet_def Let_def] by blast
have u_in_a: "u \<in> A"
using cnt_u ys_a dual_order.strict_trans2 by fastforce
have u_gt_yy: "gt u yy"
using trans u_in_a z_in_a yy_in_a u_gt_z z_gt_yy by blast
have u_gt_x: "gt u x"
using trans u_in_a z_in_a x_in_a u_gt_z z_gt_x by blast
have False
using max_yy[rule_format, of u] u_in_a yy_in_a u_gt_yy u_gt_x cnt_u by fastforce
}
ultimately have "count (mset ys) z = count (mset xs) z"
by fastforce
thus ?thesis
using z_gt_x cnt_z by fastforce
qed
}
ultimately show "\<exists>y. gt y x \<and> count (mset xs) y < count (mset zs) y"
by fastforce
qed
qed
lemma msetext_huet_snoc: "msetext_huet gt (xs @ [x]) xs"
unfolding msetext_huet_def Let_def by simp
lemma msetext_huet_compat_cons: "msetext_huet gt ys xs \<Longrightarrow> msetext_huet gt (x # ys) (x # xs)"
unfolding msetext_huet_def Let_def by auto
lemma msetext_huet_compat_snoc: "msetext_huet gt ys xs \<Longrightarrow> msetext_huet gt (ys @ [x]) (xs @ [x])"
unfolding msetext_huet_def Let_def by auto
lemma msetext_huet_compat_list: "y \<noteq> x \<Longrightarrow> gt y x \<Longrightarrow> msetext_huet gt (xs @ y # xs') (xs @ x # xs')"
unfolding msetext_huet_def Let_def by auto
lemma msetext_huet_singleton: "y \<noteq> x \<Longrightarrow> msetext_huet gt [y] [x] \<longleftrightarrow> gt y x"
unfolding msetext_huet_def by simp
lemma msetext_huet_wf: "wfP (\<lambda>x y. gt y x) \<Longrightarrow> wfP (\<lambda>xs ys. msetext_huet gt ys xs)"
by (erule wfP_subset[OF msetext_dersh_wf]) (auto intro: msetext_huet_imp_dersh)
lemma msetext_huet_hd_or_tl:
assumes
trans: "\<forall>z y x. gt z y \<longrightarrow> gt y x \<longrightarrow> gt z x" and
total: "\<forall>y x. gt y x \<or> gt x y \<or> y = x" and
len_eq: "length ys = length xs" and
yys_gt_xxs: "msetext_huet gt (y # ys) (x # xs)"
shows "gt y x \<or> msetext_huet gt ys xs"
proof -
let ?Y = "mset (y # ys)"
let ?X = "mset (x # xs)"
let ?Ya = "mset ys"
let ?Xa = "mset xs"
have Y_ne_X: "?Y \<noteq> ?X" and
ex_gt_Y: "\<And>xa. count ?X xa > count ?Y xa \<Longrightarrow> \<exists>ya. gt ya xa \<and> count ?Y ya > count ?X ya"
using yys_gt_xxs[unfolded msetext_huet_def Let_def] by auto
obtain yy where
yy: "\<And>xa. count ?X xa > count ?Y xa \<Longrightarrow> gt (yy xa) xa \<and> count ?Y (yy xa) > count ?X (yy xa)"
using ex_gt_Y by metis
have cnt_Y_pres: "count ?Ya xa > count ?Xa xa" if "count ?Y xa > count ?X xa" and "xa \<noteq> y" for xa
using that by (auto split: if_splits)
have cnt_X_pres: "count ?Xa xa > count ?Ya xa" if "count ?X xa > count ?Y xa" and "xa \<noteq> x" for xa
using that by (auto split: if_splits)
{
assume y_eq_x: "y = x"
have "?Xa \<noteq> ?Ya"
using y_eq_x Y_ne_X by simp
moreover have "\<And>xa. count ?Xa xa > count ?Ya xa \<Longrightarrow> \<exists>ya. gt ya xa \<and> count ?Ya ya > count ?Xa ya"
proof -
fix xa :: 'a
assume a1: "count (mset ys) xa < count (mset xs) xa"
from ex_gt_Y obtain aa :: "'a \<Rightarrow> 'a" where
f3: "\<forall>a. \<not> count (mset (y # ys)) a < count (mset (x # xs)) a \<or> gt (aa a) a \<and>
count (mset (x # xs)) (aa a) < count (mset (y # ys)) (aa a)"
by (metis (full_types))
then have f4: "\<And>a. count (mset (x # xs)) (aa a) < count (mset (x # ys)) (aa a) \<or>
\<not> count (mset (x # ys)) a < count (mset (x # xs)) a"
using y_eq_x by meson
have "\<And>a as aa. count (mset ((a::'a) # as)) aa = count (mset as) aa \<or> aa = a"
by fastforce
then have "xa = x \<or> count (mset (x # xs)) (aa xa) < count (mset (x # ys)) (aa xa)"
using f4 a1 by (metis (no_types))
then show "\<exists>a. gt a xa \<and> count (mset xs) a < count (mset ys) a"
using f3 y_eq_x a1 by (metis (no_types) Suc_less_eq count_add_mset mset.simps(2))
qed
ultimately have "msetext_huet gt ys xs"
unfolding msetext_huet_def Let_def by simp
}
moreover
{
assume x_gt_y: "gt x y" and y_ngt_x: "\<not> gt y x"
hence y_ne_x: "y \<noteq> x"
by fast
obtain z where z_cnt: "count ?X z > count ?Y z"
using size_eq_ex_count_lt[of ?Y ?X] size_mset size_mset len_eq Y_ne_X by auto
have Xa_ne_Ya: "?Xa \<noteq> ?Ya"
proof (cases "z = x")
case True
hence "yy z \<noteq> y"
using y_ngt_x yy z_cnt by blast
hence "count ?Ya (yy z) > count ?Xa (yy z)"
using cnt_Y_pres yy z_cnt by blast
thus ?thesis
by auto
next
case False
hence "count ?Xa z > count ?Ya z"
using z_cnt cnt_X_pres by blast
thus ?thesis
by auto
qed
have "\<exists>ya. gt ya xa \<and> count ?Ya ya > count ?Xa ya"
if xa_cnta: "count ?Xa xa > count ?Ya xa" for xa
proof (cases "xa = y")
case xa_eq_y: True
{
assume "count ?Ya x > count ?Xa x"
moreover have "gt x xa"
unfolding xa_eq_y by (rule x_gt_y)
ultimately have ?thesis
by fast
}
moreover
{
assume "count ?Xa x \<ge> count ?Ya x"
hence x_cnt: "count ?X x > count ?Y x"
by (simp add: y_ne_x)
hence yyx_gt_x: "gt (yy x) x" and yyx_cnt: "count ?Y (yy x) > count ?X (yy x)"
using yy by blast+
have yyx_ne_y: "yy x \<noteq> y"
using y_ngt_x yyx_gt_x by auto
have "gt (yy x) xa"
unfolding xa_eq_y using trans yyx_gt_x x_gt_y by blast
moreover have "count ?Ya (yy x) > count ?Xa (yy x)"
using cnt_Y_pres yyx_cnt yyx_ne_y by blast
ultimately have ?thesis
by blast
}
ultimately show ?thesis
by fastforce
next
case False
hence xa_cnt: "count ?X xa > count ?Y xa"
using xa_cnta by fastforce
show ?thesis
proof (cases "yy xa = y \<and> count ?Ya y \<le> count ?Xa y")
case yyxa_ne_y_or: False
have yyxa_gt_xa: "gt (yy xa) xa" and yyxa_cnt: "count ?Y (yy xa) > count ?X (yy xa)"
using yy[OF xa_cnt] by blast+
have "count ?Ya (yy xa) > count ?Xa (yy xa)"
using cnt_Y_pres yyxa_cnt yyxa_ne_y_or by fastforce
thus ?thesis
using yyxa_gt_xa by blast
next
case True
note yyxa_eq_y = this[THEN conjunct1] and y_cnt = this[THEN conjunct2]
{
assume "count ?Ya x > count ?Xa x"
moreover have "gt x xa"
using trans x_gt_y xa_cnt yy yyxa_eq_y by blast
ultimately have ?thesis
by fast
}
moreover
{
assume "count ?Xa x \<ge> count ?Ya x"
hence x_cnt: "count ?X x > count ?Y x"
by (simp add: y_ne_x)
hence yyx_gt_x: "gt (yy x) x" and yyx_cnt: "count ?Y (yy x) > count ?X (yy x)"
using yy by blast+
have yyx_ne_y: "yy x \<noteq> y"
using y_ngt_x yyx_gt_x by auto
have "gt (yy x) xa"
using trans x_gt_y xa_cnt yy yyx_gt_x yyxa_eq_y by blast
moreover have "count ?Ya (yy x) > count ?Xa (yy x)"
using cnt_Y_pres yyx_cnt yyx_ne_y by blast
ultimately have ?thesis
by blast
}
ultimately show ?thesis
by fastforce
qed
qed
hence "msetext_huet gt ys xs"
unfolding msetext_huet_def Let_def using Xa_ne_Ya by fast
}
ultimately show ?thesis
using total by blast
qed
interpretation msetext_huet: ext msetext_huet
by standard (fact msetext_huet_mono_strong, fact msetext_huet_map)
interpretation msetext_huet: ext_irrefl_before_trans msetext_huet
by standard (fact msetext_huet_irrefl, fact msetext_huet_trans_from_irrefl)
interpretation msetext_huet: ext_snoc msetext_huet
by standard (fact msetext_huet_snoc)
interpretation msetext_huet: ext_compat_cons msetext_huet
by standard (fact msetext_huet_compat_cons)
interpretation msetext_huet: ext_compat_snoc msetext_huet
by standard (fact msetext_huet_compat_snoc)
interpretation msetext_huet: ext_compat_list msetext_huet
by standard (fact msetext_huet_compat_list)
interpretation msetext_huet: ext_singleton msetext_huet
by standard (fact msetext_huet_singleton)
interpretation msetext_huet: ext_wf msetext_huet
by standard (fact msetext_huet_wf)
interpretation msetext_huet: ext_hd_or_tl msetext_huet
by standard (rule msetext_huet_hd_or_tl)
interpretation msetext_huet: ext_wf_bounded msetext_huet
by standard
subsection \<open>Componentwise Extension\<close>
definition cwiseext :: "('a \<Rightarrow> 'a \<Rightarrow> bool) \<Rightarrow> 'a list \<Rightarrow> 'a list \<Rightarrow> bool" where
"cwiseext gt ys xs \<longleftrightarrow> length ys = length xs
\<and> (\<forall>i < length ys. gt (ys ! i) (xs ! i) \<or> ys ! i = xs ! i)
\<and> (\<exists>i < length ys. gt (ys ! i) (xs ! i))"
lemma cwiseext_imp_len_lexext:
assumes cw: "cwiseext gt ys xs"
shows "len_lexext gt ys xs"
proof -
have len_eq: "length ys = length xs"
using cw[unfolded cwiseext_def] by sat
moreover have "lexext gt ys xs"
proof -
obtain j where
j_len: "j < length ys" and
j_gt: "gt (ys ! j) (xs ! j)"
using cw[unfolded cwiseext_def] by blast
then obtain j0 where
j0_len: "j0 < length ys" and
j0_gt: "gt (ys ! j0) (xs ! j0)" and
j0_min: "\<And>i. i < j0 \<Longrightarrow> \<not> gt (ys ! i) (xs ! i)"
using wf_eq_minimal[THEN iffD1, OF wf_less, rule_format, of _ "{i. gt (ys ! i) (xs ! i)}",
simplified, OF j_gt]
by (metis less_trans nat_neq_iff)
have j0_eq: "\<And>i. i < j0 \<Longrightarrow> ys ! i = xs ! i"
using cw[unfolded cwiseext_def] by (metis j0_len j0_min less_trans)
have "lexext gt (drop j0 ys) (drop j0 xs)"
using lexext_Cons[of gt _ _ "drop (Suc j0) ys" "drop (Suc j0) xs", OF j0_gt]
by (metis Cons_nth_drop_Suc j0_len len_eq)
thus ?thesis
using cw len_eq j0_len j0_min
proof (induct j0 arbitrary: ys xs)
case (Suc k)
note ih0 = this(1) and gts_dropSk = this(2) and cw = this(3) and len_eq = this(4) and
Sk_len = this(5) and Sk_min = this(6)
have Sk_eq: "\<And>i. i < Suc k \<Longrightarrow> ys ! i = xs ! i"
using cw[unfolded cwiseext_def] by (metis Sk_len Sk_min less_trans)
have k_len: "k < length ys"
using Sk_len by simp
have k_min: "\<And>i. i < k \<Longrightarrow> \<not> gt (ys ! i) (xs ! i)"
using Sk_min by simp
have k_eq: "\<And>i. i < k \<Longrightarrow> ys ! i = xs ! i"
using Sk_eq by simp
note ih = ih0[OF _ cw len_eq k_len k_min]
show ?case
proof (cases "k < length ys")
case k_lt_ys: True
note k_lt_xs = k_lt_ys[unfolded len_eq]
obtain x where x: "x = xs ! k"
by simp
hence y: "x = ys ! k"
using Sk_eq[of k] by simp
have dropk_xs: "drop k xs = x # drop (Suc k) xs"
using k_lt_xs x by (simp add: Cons_nth_drop_Suc)
have dropk_ys: "drop k ys = x # drop (Suc k) ys"
using k_lt_ys y by (simp add: Cons_nth_drop_Suc)
show ?thesis
by (rule ih, unfold dropk_xs dropk_ys, rule lexext_Cons_eq[OF gts_dropSk])
next
case False
hence "drop k xs = []" and "drop k ys = []"
using len_eq by simp_all
hence "lexext gt [] []"
using gts_dropSk by simp
hence "lexext gt (drop k ys) (drop k xs)"
by simp
thus ?thesis
by (rule ih)
qed
qed simp
qed
ultimately show ?thesis
unfolding lenext_def by sat
qed
lemma cwiseext_mono_strong:
"(\<forall>y \<in> set ys. \<forall>x \<in> set xs. gt y x \<longrightarrow> gt' y x) \<Longrightarrow> cwiseext gt ys xs \<Longrightarrow> cwiseext gt' ys xs"
unfolding cwiseext_def by (induct, force, fast)
lemma cwiseext_map_strong:
"(\<forall>y \<in> set ys. \<forall>x \<in> set xs. gt y x \<longrightarrow> gt (f y) (f x)) \<Longrightarrow> cwiseext gt ys xs \<Longrightarrow>
cwiseext gt (map f ys) (map f xs)"
unfolding cwiseext_def by auto
lemma cwiseext_irrefl: "(\<forall>x \<in> set xs. \<not> gt x x) \<Longrightarrow> \<not> cwiseext gt xs xs"
unfolding cwiseext_def by (blast intro: nth_mem)
lemma cwiseext_trans_strong:
assumes
"\<forall>z \<in> set zs. \<forall>y \<in> set ys. \<forall>x \<in> set xs. gt z y \<longrightarrow> gt y x \<longrightarrow> gt z x" and
"cwiseext gt zs ys" and "cwiseext gt ys xs"
shows "cwiseext gt zs xs"
using assms unfolding cwiseext_def by (metis (mono_tags) nth_mem)
lemma cwiseext_compat_cons: "cwiseext gt ys xs \<Longrightarrow> cwiseext gt (x # ys) (x # xs)"
unfolding cwiseext_def
proof (elim conjE, intro conjI)
assume
"length ys = length xs" and
"\<forall>i < length ys. gt (ys ! i) (xs ! i) \<or> ys ! i = xs ! i"
thus "\<forall>i < length (x # ys). gt ((x # ys) ! i) ((x # xs) ! i) \<or> (x # ys) ! i = (x # xs) ! i"
by (simp add: nth_Cons')
next
assume "\<exists>i < length ys. gt (ys ! i) (xs ! i)"
thus "\<exists>i < length (x # ys). gt ((x # ys) ! i) ((x # xs) ! i)"
by fastforce
qed auto
lemma cwiseext_compat_snoc: "cwiseext gt ys xs \<Longrightarrow> cwiseext gt (ys @ [x]) (xs @ [x])"
unfolding cwiseext_def
proof (elim conjE, intro conjI)
assume
"length ys = length xs" and
"\<forall>i < length ys. gt (ys ! i) (xs ! i) \<or> ys ! i = xs ! i"
thus "\<forall>i < length (ys @ [x]).
gt ((ys @ [x]) ! i) ((xs @ [x]) ! i) \<or> (ys @ [x]) ! i = (xs @ [x]) ! i"
by (simp add: nth_append)
next
assume
"length ys = length xs" and
"\<exists>i < length ys. gt (ys ! i) (xs ! i)"
thus "\<exists>i < length (ys @ [x]). gt ((ys @ [x]) ! i) ((xs @ [x]) ! i)"
by (metis length_append_singleton less_Suc_eq nth_append)
qed auto
lemma cwiseext_compat_list:
assumes y_gt_x: "gt y x"
shows "cwiseext gt (xs @ y # xs') (xs @ x # xs')"
unfolding cwiseext_def
proof (intro conjI)
show "\<forall>i < length (xs @ y # xs'). gt ((xs @ y # xs') ! i) ((xs @ x # xs') ! i)
\<or> (xs @ y # xs') ! i = (xs @ x # xs') ! i"
using y_gt_x by (simp add: nth_Cons' nth_append)
next
show "\<exists>i < length (xs @ y # xs'). gt ((xs @ y # xs') ! i) ((xs @ x # xs') ! i)"
using y_gt_x by (metis add_diff_cancel_right' append_is_Nil_conv diff_less length_append
length_greater_0_conv list.simps(3) nth_append_length)
qed auto
lemma cwiseext_singleton: "cwiseext gt [y] [x] \<longleftrightarrow> gt y x"
unfolding cwiseext_def by auto
lemma cwiseext_wf: "wfP (\<lambda>x y. gt y x) \<Longrightarrow> wfP (\<lambda>xs ys. cwiseext gt ys xs)"
by (auto intro: cwiseext_imp_len_lexext wfP_subset[OF len_lexext_wf])
lemma cwiseext_hd_or_tl: "cwiseext gt (y # ys) (x # xs) \<Longrightarrow> gt y x \<or> cwiseext gt ys xs"
unfolding cwiseext_def
proof (elim conjE, intro disj_imp[THEN iffD2, rule_format] conjI)
assume
"\<exists>i < length (y # ys). gt ((y # ys) ! i) ((x # xs) ! i)" and
"\<not> gt y x"
thus "\<exists>i < length ys. gt (ys ! i) (xs ! i)"
by (metis (no_types) One_nat_def diff_le_self diff_less dual_order.strict_trans2
length_Cons less_Suc_eq linorder_neqE_nat not_less0 nth_Cons')
qed auto
locale ext_cwiseext = ext_compat_list + ext_compat_cons
begin
context
fixes gt :: "'a \<Rightarrow> 'a \<Rightarrow> bool"
assumes
gt_irrefl: "\<not> gt x x" and
trans_gt: "ext gt zs ys \<Longrightarrow> ext gt ys xs \<Longrightarrow> ext gt zs xs"
begin
lemma
assumes ys_gtcw_xs: "cwiseext gt ys xs"
shows "ext gt ys xs"
proof -
have "length ys = length xs"
by (rule ys_gtcw_xs[unfolded cwiseext_def, THEN conjunct1])
thus ?thesis
using ys_gtcw_xs
proof (induct rule: list_induct2)
case Nil
thus ?case
unfolding cwiseext_def by simp
next
case (Cons y ys x xs)
note len_ys_eq_xs = this(1) and ih = this(2) and yys_gtcw_xxs = this(3)
have xys_gts_xxs: "ext gt (x # ys) (x # xs)" if ys_ne_xs: "ys \<noteq> xs"
proof -
have ys_gtcw_xs: "cwiseext gt ys xs"
using yys_gtcw_xxs unfolding cwiseext_def
proof (elim conjE, intro conjI)
assume
"\<forall>i < length (y # ys). gt ((y # ys) ! i) ((x # xs) ! i) \<or> (y # ys) ! i = (x # xs) ! i"
hence ge: "\<forall>i < length ys. gt (ys ! i) (xs ! i) \<or> ys ! i = xs ! i"
by auto
thus "\<exists>i < length ys. gt (ys ! i) (xs ! i)"
using ys_ne_xs len_ys_eq_xs nth_equalityI by blast
qed auto
hence "ext gt ys xs"
by (rule ih)
thus "ext gt (x # ys) (x # xs)"
by (rule compat_cons)
qed
have "gt y x \<or> y = x"
using yys_gtcw_xxs unfolding cwiseext_def by fastforce
moreover
{
assume y_eq_x: "y = x"
have ?case
proof (cases "ys = xs")
case True
hence False
using y_eq_x gt_irrefl yys_gtcw_xxs unfolding cwiseext_def by presburger
thus ?thesis
by sat
next
case False
thus ?thesis
using y_eq_x xys_gts_xxs by simp
qed
}
moreover
{
assume "y \<noteq> x" and "gt y x"
hence yys_gts_xys: "ext gt (y # ys) (x # ys)"
using compat_list[of _ _ gt "[]"] by simp
have ?case
proof (cases "ys = xs")
case ys_eq_xs: True
thus ?thesis
using yys_gts_xys by simp
next
case False
thus ?thesis
using yys_gts_xys xys_gts_xxs trans_gt by blast
qed
}
ultimately show ?case
by sat
qed
qed
end
end
interpretation cwiseext: ext cwiseext
by standard (fact cwiseext_mono_strong, rule cwiseext_map_strong, metis in_listsD)
interpretation cwiseext: ext_irrefl_trans_strong cwiseext
by standard (fact cwiseext_irrefl, fact cwiseext_trans_strong)
interpretation cwiseext: ext_compat_cons cwiseext
by standard (fact cwiseext_compat_cons)
interpretation cwiseext: ext_compat_snoc cwiseext
by standard (fact cwiseext_compat_snoc)
interpretation cwiseext: ext_compat_list cwiseext
by standard (rule cwiseext_compat_list)
interpretation cwiseext: ext_singleton cwiseext
by standard (rule cwiseext_singleton)
interpretation cwiseext: ext_wf cwiseext
by standard (rule cwiseext_wf)
interpretation cwiseext: ext_hd_or_tl cwiseext
by standard (rule cwiseext_hd_or_tl)
interpretation cwiseext: ext_wf_bounded cwiseext
by standard
end
|
{"author": "data61", "repo": "PSL", "sha": "2a71eac0db39ad490fe4921a5ce1e4344dc43b12", "save_path": "github-repos/isabelle/data61-PSL", "path": "github-repos/isabelle/data61-PSL/PSL-2a71eac0db39ad490fe4921a5ce1e4344dc43b12/SeLFiE/Example/afp-2020-05-16/thys/Lambda_Free_RPOs/Extension_Orders.thy"}
|
import numpy as np
def iou_metric(y_true_in, y_pred_in):
labels = y_true_in
y_pred = y_pred_in
temp1 = np.histogram2d(labels.flatten(), y_pred.flatten(), bins=([0,0.5,1], [0,0.5, 1]))
intersection = temp1[0]
area_true = np.histogram(labels,bins=[0,0.5,1])[0]
area_pred = np.histogram(y_pred, bins=[0,0.5,1])[0]
area_true = np.expand_dims(area_true, -1)
area_pred = np.expand_dims(area_pred, 0)
# Compute union
union = area_true + area_pred - intersection
# Exclude background from the analysis
intersection = intersection[1:,1:]
intersection[intersection == 0] = 1e-9
union = union[1:,1:]
union[union == 0] = 1e-9
iou = intersection / union
return iou
def dice_metric(y_true_in, y_pred_in):
labels = y_true_in
y_pred = y_pred_in
temp1 = np.histogram2d(labels.flatten(), y_pred.flatten(), bins=([0,0.5,1], [0,0.5, 1]))
intersection = temp1[0]
area_true = np.histogram(labels,bins=[0,0.5,1])[0]
area_pred = np.histogram(y_pred, bins=[0,0.5,1])[0]
area_true = np.expand_dims(area_true, -1)
area_pred = np.expand_dims(area_pred, 0)
# Compute union
union = area_true + area_pred
# Exclude background from the analysis
intersection = intersection[1:,1:]
intersection[intersection == 0] = 1e-9
union = union[1:,1:]
union[union == 0] = 1e-9
dice_m = (2*intersection) / union
return dice_m
|
{"hexsha": "233375fc49d41fffd61d9a1802d0c40cd3587f79", "size": 1496, "ext": "py", "lang": "Python", "max_stars_repo_path": "scores_comp.py", "max_stars_repo_name": "DevikalyanDas/NucleiSegnet-Paper-with-Code", "max_stars_repo_head_hexsha": "d8fbf9cc13160e21166fff905688c93d4900bdd2", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2021-04-27T13:53:49.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-25T01:48:02.000Z", "max_issues_repo_path": "scores_comp.py", "max_issues_repo_name": "DevikalyanDas/NucleiSegnet-Paper-with-Code", "max_issues_repo_head_hexsha": "d8fbf9cc13160e21166fff905688c93d4900bdd2", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-05-10T11:11:55.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-18T09:01:50.000Z", "max_forks_repo_path": "scores_comp.py", "max_forks_repo_name": "DevikalyanDas/NucleiSegnet-Paper-with-Code", "max_forks_repo_head_hexsha": "d8fbf9cc13160e21166fff905688c93d4900bdd2", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-05-12T14:15:33.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-12T14:15:33.000Z", "avg_line_length": 28.2264150943, "max_line_length": 93, "alphanum_fraction": 0.618315508, "include": true, "reason": "import numpy", "num_tokens": 459}
|
module BoehmBerarducci
%default total
NatQ : Type
NatQ = (A : Type) -> (A -> A) -> A -> A
unNatQ : {A : Type} -> (A -> A) -> A -> NatQ -> A
unNatQ f a q = q _ f a
succQ : NatQ -> NatQ
succQ q = \_, f, a => f (q _ f a)
zeroQ : NatQ
zeroQ = \_, f, a => a
fromNatQ : NatQ -> Nat
fromNatQ q = unNatQ S Z q
toNatQ : Nat -> NatQ
toNatQ (S n) = succQ (toNatQ n)
toNatQ Z = zeroQ
iterated : Nat -> (a -> a) -> a -> a
iterated (S n) f a = f (iterated n f a)
iterated Z f a = a
test_iterated : (n : Nat) -> iterated n S Z = n
test_iterated (S n) = rewrite test_iterated n in Refl
test_iterated Z = Refl
test_fromNatQ : (n : Nat) -> fromNatQ (iterated n succQ zeroQ) = n
test_fromNatQ (S n) = rewrite test_fromNatQ n in Refl
test_fromNatQ Z = Refl
test_toNatQ : (n : Nat) -> toNatQ n = iterated n succQ zeroQ
test_toNatQ (S n) = rewrite test_toNatQ n in Refl
test_toNatQ Z = Refl
ListQ : Type -> Type
ListQ A = (B : Type) -> (A -> B -> B) -> B -> B
unListQ : {A, B : Type} -> (A -> B -> B) -> B -> ListQ A -> B
unListQ f b q = q _ f b
consQ : {A : Type} -> A -> ListQ A -> ListQ A
consQ a q = \_, f, b => f a (q _ f b)
nilQ : {A : Type} -> ListQ A
nilQ = \_, f, b => b
fromListQ : {A : Type} -> ListQ A -> List A
fromListQ q = unListQ (::) [] q
toListQ : {A : Type} -> List A -> ListQ A
toListQ (a :: aa) = consQ a (toListQ aa)
toListQ [] = nilQ
|
{"hexsha": "278477ce2148da6d965dc4290414ac975bb485b9", "size": 1377, "ext": "idr", "lang": "Idris", "max_stars_repo_path": "idris-explicit/BoehmBerarducci.idr", "max_stars_repo_name": "mietek/scott-encoding", "max_stars_repo_head_hexsha": "14e819383dd8730e1c3cbd9c2ce53335bd95188b", "max_stars_repo_licenses": ["X11", "MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2015-05-27T19:33:52.000Z", "max_stars_repo_stars_event_max_datetime": "2019-08-08T16:31:59.000Z", "max_issues_repo_path": "idris-explicit/BoehmBerarducci.idr", "max_issues_repo_name": "mietek/scott-encoding", "max_issues_repo_head_hexsha": "14e819383dd8730e1c3cbd9c2ce53335bd95188b", "max_issues_repo_licenses": ["X11", "MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "idris-explicit/BoehmBerarducci.idr", "max_forks_repo_name": "mietek/scott-encoding", "max_forks_repo_head_hexsha": "14e819383dd8730e1c3cbd9c2ce53335bd95188b", "max_forks_repo_licenses": ["X11", "MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.95, "max_line_length": 66, "alphanum_fraction": 0.5664488017, "num_tokens": 556}
|
#include <algorithm>
#include <fstream>
#include <boost/assert.hpp>
#include "nlohmann/json.hpp"
#include "utility/type/XY.hpp"
#include "utility/type/RowColumn.hpp"
#include "utility/wrapper/sfVector2.hpp"
#include "utility/wrapper/sfMakeColor.hpp"
#include "Menu.hpp"
namespace nemo
{
////////////////////////////////////////////////////////////////////////////////
// //
////////////////////////////////////////////////////////////////////////////////
Menu::Menu(
const XYPair pos,
const XYPair dim,
const Row rows,
const Column cols,
const XYPair outer_margins,
const XYPair inner_margins,
const bool align_center,
const size_t char_sz,
const TextBoxColor option_color,
const TextBoxColor cursor_color,
const TextBoxColor box_color,
const std::string& font_file
)
: align_center_(align_center)
, rows_(rows)
, cols_(cols)
, rc1d_conv_(cols)
, option_color_(option_color)
, cursor_color_(cursor_color)
, cursor_rc_({ Row(0), Column(0) })
, char_sz_(char_sz)
{
const auto x0y0 = XYPair(XValue(0.f), YValue(0.f));
BOOST_ASSERT(pos >= x0y0);
BOOST_ASSERT(dim >= x0y0);
BOOST_ASSERT(rows > 0);
BOOST_ASSERT(cols > 0);
BOOST_ASSERT(char_sz > 0);
BOOST_ASSERT(outer_margins >= x0y0);
BOOST_ASSERT(inner_margins >= x0y0);
// Load the font file.
BOOST_VERIFY(font_.loadFromFile(font_file));
// Create the menu box.
box_.setSize(sfVector2(dim));
box_.setPosition(sfVector2(pos));
box_.setOutlineThickness(-1.f);
box_.setFillColor(box_color.backgnd_);
box_.setOutlineColor(box_color.border_);
// The area of the menu inside the margins is reserved for menu options. From
// there, the number of rows and columns of options determine each option's
// width and height. For now, inner margins are included in the calculated
// width and height.
const auto option_dim_v = sfVector2(
(dim.x_ - XValue(2.f) * outer_margins.x_) / XValue(int(cols_)),
(dim.y_ - YValue(2.f) * outer_margins.y_) / YValue(int(rows_))
);
// Create background cells to contain the menu options in one page. To save
// memory, only one page worth is needed since the options outside of them
// won't be drawn on screen and all cells' positions stayed the same from
// page to page. We can decide the color of a cell later based on the
// contained option's colorset.
const auto noptions_per_page = int(rows_) * int(cols_);
options_.reserve(noptions_per_page);
cells_.reserve(noptions_per_page);
for (auto i = 0; i < noptions_per_page; ++i) {
// Adjust cell cize so that inner margins can be inserted between them.
const auto inner_margins_v = sfVector2(inner_margins);
const auto cell_dim_v = option_dim_v - 2.f * inner_margins_v;
BOOST_ASSERT(cell_dim_v.x > char_sz_ && cell_dim_v.y > char_sz_);
sf::RectangleShape cell(cell_dim_v);
// Insert inner margins.
cell.setOrigin(-inner_margins_v);
// Place the cell in the appropriate spot in the menu. Cells are lined
// from left to right, down across rows.
const auto rc_i = rc1d_conv_.toRowColumn(i);
cell.setPosition(
inner_margins_v
+ sfVector2(
XValue(option_dim_v.x * int(rc_i.c_)),
YValue(option_dim_v.y * int(rc_i.r_))
)
+ sfVector2(pos)
);
// The colors of the cells are decided in drawOption() since they can
// change depending on whether a cursor is hovering over one or if the
// client has requested a specific menu option to be colored differently.
cell.setOutlineThickness(1.f);
cells_.push_back(cell);
}
}
////////////////////////////////////////////////////////////////////////////////
// //
////////////////////////////////////////////////////////////////////////////////
Menu::Menu(const std::string& file)
: Menu(parseFile(file))
{
}
////////////////////////////////////////////////////////////////////////////////
// //
////////////////////////////////////////////////////////////////////////////////
Menu&
Menu::add(const int id, const std::string& txt)
{
// Make sure there is no other menu option that has the new ID.
const auto it = find(id);
BOOST_ASSERT(it == options_.cend());
// Create the option's graphical text.
sf::Text option_txt(txt, font_, char_sz_);
// Add the option to the menu.
options_.push_back({ id, option_txt, option_color_ });
// Preset the option text's position on the menu for future rendering. Since
// it was just added to menu, we can use the index of the last element in the
// menu option container.
presetTextPosition(options_.size() - 1);
return *this;
}
////////////////////////////////////////////////////////////////////////////////
// //
////////////////////////////////////////////////////////////////////////////////
Menu&
Menu::remove(const int id)
{
// Delete the option.
auto iter = find(id);
BOOST_ASSERT(iter != options_.cend());
iter = options_.erase(iter);
// All options that followed the deleted one need to have their text's render
// positions shifted frontward one slot. presetTextPosition() already
// accounts for this when called after the option is deleted.
for (auto it = iter; it != options_.cend(); ++it) {
presetTextPosition(it - options_.cbegin());
}
if (const auto cur_idx = rc1d_conv_.to1D(cursor_rc_);
cur_idx == static_cast<decltype(cur_idx)>(options_.size()))
{
// The cursor was on the last option before one of the options was
// removed. Move the cursor frontward because it's now hovering over
// an invalidated space.
moveLeft();
}
return *this;
}
////////////////////////////////////////////////////////////////////////////////
// //
////////////////////////////////////////////////////////////////////////////////
Menu&
Menu::changeOptionText(const int id, const std::string& txt)
{
const auto it = find(id);
BOOST_ASSERT(it != options_.cend());
it->txt_.setString(txt);
return *this;
}
////////////////////////////////////////////////////////////////////////////////
// //
////////////////////////////////////////////////////////////////////////////////
Menu&
Menu::changeOptionColor(const int id, const TextBoxColor color)
{
const auto it = find(id);
BOOST_ASSERT(it != options_.cend());
setOptionColor(it - options_.cbegin(), color);
return *this;
}
////////////////////////////////////////////////////////////////////////////////
// //
////////////////////////////////////////////////////////////////////////////////
bool
Menu::empty()
const noexcept
{
return options_.empty();
}
////////////////////////////////////////////////////////////////////////////////
// //
////////////////////////////////////////////////////////////////////////////////
void
Menu::moveUp()
noexcept
{
if (const auto last = rc1d_conv_.toRowColumn(options_.size() - 1);
last.r_ == 0)
{
// Up => left in a horizontal menu, which can be determined based on
// whether last menu option is on the first row.
move(Direction::Left);
}
else {
// move() checks that the menu isn't empty, so no need to deal with that
// here.
move(Direction::Up);
}
}
////////////////////////////////////////////////////////////////////////////////
// //
////////////////////////////////////////////////////////////////////////////////
void
Menu::moveDown()
noexcept
{
if (const auto last = rc1d_conv_.toRowColumn(options_.size() - 1);
last.r_ == 0)
{
// Down => right in a horizontal menu.
move(Direction::Right);
}
else {
// move() checks that the menu isn't empty.
move(Direction::Down);
}
}
////////////////////////////////////////////////////////////////////////////////
// //
////////////////////////////////////////////////////////////////////////////////
void
Menu::moveRight()
noexcept
{
// move() checks that the menu isn't empty, so no need to deal with that
// here.
if (cols_ == 1) {
// Right => down in a vertical menu. The number of columns is always
// capped at the number of columns per page, unlike the number of rows in
// moveUp() and moveDown()'s cases.
move(Direction::Down);
}
else {
move(Direction::Right);
}
}
////////////////////////////////////////////////////////////////////////////////
// //
////////////////////////////////////////////////////////////////////////////////
void
Menu::moveLeft()
noexcept
{
// move() checks that the menu isn't empty.
if (cols_ == 1) {
// Left => up in a vertical menu.
move(Direction::Up);
}
else {
move(Direction::Left);
}
}
////////////////////////////////////////////////////////////////////////////////
// //
////////////////////////////////////////////////////////////////////////////////
void
Menu::draw(sf::RenderWindow& window)
{
// Draw the menu box.
window.draw(box_);
// All the menu options aren't going to be drawned on screen. Only the page
// of options that has the cursor needs to be drawn.
const auto page_sz = cells_.size();
const auto idx = rc1d_conv_.to1D(cursor_rc_);
const auto cur_page = idx / page_sz;
// Draw from the first to the last option of that page. In case that page
// happens to be the last one, since the page doesn't necessarily have all
// its rows and columns filled, be sure to stop at the very last option.
const auto start = cur_page * page_sz;
const auto n = options_.size();
const auto end = std::min(start + page_sz, n);
for (auto i = start; i < end; ++i) {
drawOption(i, window);
}
if (n > page_sz) {
// The options fill up the menu past one page, so draw the current page
// number out of the total. This would let the player would know where
// they are. drawPageRef() draws the navigation arrow indicators as well.
drawPageRef(window);
}
}
////////////////////////////////////////////////////////////////////////////////
// //
////////////////////////////////////////////////////////////////////////////////
std::optional<int>
Menu::cursorAt()
const
{
if (options_.empty()) {
// Empty menu.
return {};
}
const auto idx = rc1d_conv_.to1D(cursor_rc_);
return { options_[idx].id_ };
}
////////////////////////////////////////////////////////////////////////////////
// //
////////////////////////////////////////////////////////////////////////////////
void
Menu::presetTextPosition(const int idx)
{
BOOST_ASSERT(idx >= 0 && idx < static_cast<decltype(idx)>(options_.size()));
// Menu options are positioned from left to right down across rows. After
// a page is filled, the graphical positions start over from the top left
// for a new page.
const auto& cell = cells_[idx % cells_.size()];
auto& txt = options_[idx].txt_;
txt.setOrigin(cell.getOrigin());
txt.setPosition(cell.getPosition());
// Vertically center this option in the cell it is placed in. Horizontal
// alignment is center if requested during the menu's construction, left
// otherwise.
constexpr auto center_pt = .475f;
const auto cell_size = cell.getSize();
const auto txt_width = txt.getLocalBounds().width;
const auto vtalign = center_pt * (cell_size.y - char_sz_);
const auto hzalign = align_center_
? center_pt * (cell_size.x - txt_width)
: 10.f;
txt.move(sfVector2( XValue(hzalign), YValue(vtalign) ));
}
////////////////////////////////////////////////////////////////////////////////
// //
////////////////////////////////////////////////////////////////////////////////
void
Menu::setOptionColor(const int idx, const TextBoxColor color)
{
BOOST_ASSERT(idx >= 0 && idx < static_cast<decltype(idx)>(options_.size()));
options_[idx].color_ = color;
}
////////////////////////////////////////////////////////////////////////////////
// //
////////////////////////////////////////////////////////////////////////////////
void
Menu::drawOption(const int idx, sf::RenderWindow& window)
{
BOOST_ASSERT(idx >= 0 && idx < static_cast<decltype(idx)>(options_.size()));
// Although we can have pages of menu options, to save memory, we have only a
// page worth of cells to use. Grab the one the menu option would be drawn
// on.
auto& cell = cells_[idx % cells_.size()];
auto& option = options_[idx];
// If the cursor is over this menu option, then use the cursor's colorset
// instead of the option's normal set.
const auto cursor_idx = rc1d_conv_.to1D(cursor_rc_);
const auto color = idx != cursor_idx ? option.color_ : cursor_color_;
option.txt_.setFillColor(color.txt_);
cell.setFillColor(color.backgnd_);
cell.setOutlineColor(color.border_);
// Draw the cell first, then the text over it.
window.draw(cell);
window.draw(option.txt_);
}
////////////////////////////////////////////////////////////////////////////////
// //
////////////////////////////////////////////////////////////////////////////////
void
Menu::drawPageRef(sf::RenderWindow& window)
const
{
// Get the page number the cursor is on as well as the total number of pages
// of options the menu has.
const auto page_sz = cells_.size();
const auto npages = (options_.size() - 1) / page_sz + 1;
const auto cur_page = rc1d_conv_.to1D(cursor_rc_) / page_sz;
const auto atpage_txt = std::to_string(cur_page + 1)
+ " / "
+ std::to_string(npages);
// Draw a small box that will contain the page numbers and maybe the
// navigation arrow indicators.
constexpr auto atpage_box_height = 25.f;
constexpr auto atpage_box_width = 5.f * atpage_box_height;
sf::RectangleShape atpage_box(sfVector2(
XValue(atpage_box_width),
YValue(atpage_box_height)
));
// The box should have the same background layer as the menu box's since it
// will be appended to the menu.
atpage_box.setFillColor(box_.getFillColor());
atpage_box.setOutlineColor(box_.getOutlineColor());
atpage_box.setOutlineThickness(box_.getOutlineThickness());
// Place it directly below the bottom right corner of the menu.
atpage_box.setPosition(box_.getPosition() + box_.getSize());
atpage_box.move(sfVector2(
XValue(-atpage_box_width),
YValue(box_.getOutlineThickness())
));
window.draw(atpage_box);
// Draw the page numbers on the right half of the box.
constexpr auto atpage_txt_height = float(atpage_box_height) - 9.f;
sf::Text atpage(atpage_txt, font_, atpage_txt_height);
atpage.setOrigin(sfVector2( XValue(0.f), YValue(-2.f) ));
atpage.setPosition(
atpage_box.getPosition()
+ sfVector2(XValue(.5f * atpage_box_width), YValue(0.f))
);
// Use the menu options' default text color for the page number and n
// avigation arrow indicators.
atpage.setFillColor(option_color_.txt_);
window.draw(atpage);
if (npages > 1) {
// Draw the navigation arrow indicators on the left half of the box.
constexpr auto arrow_sz = float(atpage_box_height) - 7.f;
constexpr auto arrow_radius = .5f * arrow_sz;
constexpr auto arrow_padding = .5f * arrow_radius;
// Up arrow.
sf::CircleShape up(arrow_radius, 3);
up.setFillColor(option_color_.txt_);
up.setOrigin(-arrow_padding, -arrow_padding);
up.setPosition(
atpage_box.getPosition()
+ sfVector2(XValue(arrow_padding), YValue(2.f))
);
window.draw(up);
// Down arrow right next to the up arrow.
sf::CircleShape down(up);
down.scale(sfVector2( XValue(1.f), YValue(-1.f) ));
down.move(sfVector2(
XValue(2.f * arrow_radius),
YValue(2.5f * arrow_radius)
));
window.draw(down);
}
}
////////////////////////////////////////////////////////////////////////////////
// //
////////////////////////////////////////////////////////////////////////////////
void
Menu::move(const Direction dir)
noexcept
{
if (options_.empty()) {
// No menu options => no cursor => no movement.
return;
}
// The cursor should be able to wrap around the ends of the menu. Moving the
// cursor left when it is at the leftmost option should take it to the
// rightmost option at the same row, and vice versa. Similarly, moving it up
// when it is at the topmost option should take it to the bottomost option at
// the same column, and vice versa. The wrapping should take into account
// that the bottomost row may be partially filled, which column the cursor
// can move to in the last row.
// Get the row and column indices of the last option.
const auto last = rc1d_conv_.toRowColumn(options_.size() - 1);
// Get the rightmost column at the current row the cursor is on. It's needed
// when moving left and right.
auto& [r, c] = cursor_rc_;
const auto right_c = r < last.r_ ? cols_ - Column(1) : last.c_;
switch (dir) {
// Up/down changes the row index.
// If the cursor will move to the bottom row but there's no option exactly
// below it, move it to the last option.
case Direction::Up:
r = r > 0 ? r - Row(1) : last.r_;
c = r < last.r_ ? c : std::min(c, last.c_);
break;
case Direction::Down:
r = r < last.r_ ? r + Row(1) : Row(0);
c = r < last.r_ ? c : std::min(c, last.c_);
break;
// Left/right changes the column index
case Direction::Right:
c = c < right_c ? c + Column(1) : Column(0);
break;
case Direction::Left:
c = c > 0 ? c - Column(1) : right_c;
break;
default:
break;
}
}
////////////////////////////////////////////////////////////////////////////////
// //
////////////////////////////////////////////////////////////////////////////////
auto
Menu::find(const int id)
-> decltype(options_.begin())
{
auto it = std::find_if(options_.begin(), options_.end(),
[id](const auto& option) {
return id == option.id_;
}
);
return it;
}
////////////////////////////////////////////////////////////////////////////////
// //
////////////////////////////////////////////////////////////////////////////////
Menu::Menu(CtorArgs args)
: Menu(
args.pos_,
args.dim_,
args.rows_,
args.cols_,
args.outer_margins_,
args.inner_margins_,
args.align_center_,
args.char_sz_,
args.option_color_,
args.cursor_color_,
args.box_color_,
args.font_file_
)
{
}
////////////////////////////////////////////////////////////////////////////////
// //
////////////////////////////////////////////////////////////////////////////////
typename Menu::CtorArgs
Menu::parseFile(const std::string& file)
const
{
// Load json file.
std::ifstream ifs(file);
BOOST_ASSERT(ifs.is_open());
nlohmann::json js;
ifs >> js;
constexpr auto position = "position";
constexpr auto dimensions = "dimensions";
constexpr auto margins = "margins";
constexpr auto horizontal = "horizontal";
constexpr auto vertical = "vertical";
constexpr auto options = "options";
constexpr auto cursor = "cursor";
constexpr auto box = "box";
constexpr auto colors = "colors";
constexpr auto text = "text";
constexpr auto background = "background";
constexpr auto border = "border";
try {
// Extract the customizations.
const auto pos = XYPair(
XValue(js.at(position).at("x")),
YValue(js.at(position).at("y"))
);
const auto dim = XYPair(
XValue(js.at(dimensions).at("width")),
YValue(js.at(dimensions).at("height"))
);
const auto outer_margins = XYPair(
XValue(js.at(box).at(margins).at(horizontal)),
YValue(js.at(box).at(margins).at(vertical))
);
const auto inner_margins = XYPair(
XValue(js.at(options).at(margins).at(horizontal)),
YValue(js.at(options).at(margins).at(vertical))
);
const auto rows = Row(js.at(options).at("rows"));
const auto cols = Column(js.at(options).at("columns"));
const auto align_center = js.at(options).at("center");
const auto char_sz = js.at(options).at("size");
const auto font_file = js.at("font");
const auto option_color = TextBoxColor(
sfMakeColor(js.at(options).at(colors).at(text)),
sfMakeColor(js.at(options).at(colors).at(background)),
sfMakeColor(js.at(options).at(colors).at(border))
);
const auto cursor_color = TextBoxColor(
sfMakeColor(js.at(cursor).at(colors).at(text)),
sfMakeColor(js.at(cursor).at(colors).at(background)),
sfMakeColor(js.at(cursor).at(colors).at(border))
);
const auto box_color = TextBoxColor(
sf::Color::Black,
sfMakeColor(js.at(box).at(colors).at(background)),
sfMakeColor(js.at(box).at(colors).at(border))
);
return {
pos,
dim,
rows,
cols,
outer_margins,
inner_margins,
align_center,
char_sz,
option_color,
cursor_color,
box_color,
font_file
};
}
catch (const nlohmann::json::out_of_range& e) {
// BOOST_LOG_TRIVIAL(error) << file << " parsing failed. " << e.what();
return CtorArgs();
}
}
////////////////////////////////////////////////////////////////////////////////
// //
////////////////////////////////////////////////////////////////////////////////
Menu::CtorArgs::CtorArgs(
const XYPair pos,
const XYPair dim,
const Row rows,
const Column cols,
const XYPair outer_margins,
const XYPair inner_margins,
const bool align_center,
const size_t char_sz,
const TextBoxColor option_color,
const TextBoxColor cursor_color,
const TextBoxColor box_color,
const std::string& font_file
)
: pos_(pos)
, dim_(dim)
, rows_(rows)
, cols_(cols)
, outer_margins_(outer_margins)
, inner_margins_(inner_margins)
, align_center_(align_center)
, char_sz_(char_sz)
, option_color_(option_color)
, cursor_color_(cursor_color)
, box_color_(box_color)
, font_file_(font_file)
{
}
////////////////////////////////////////////////////////////////////////////////
// //
////////////////////////////////////////////////////////////////////////////////
}
|
{"hexsha": "84a52b96ef5e4034f03838adec5525fa272c2515", "size": 23075, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/menu/Menu.cpp", "max_stars_repo_name": "zhec9/nemo", "max_stars_repo_head_hexsha": "b719b89933ce722a14355e7ed825a76dea680501", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/menu/Menu.cpp", "max_issues_repo_name": "zhec9/nemo", "max_issues_repo_head_hexsha": "b719b89933ce722a14355e7ed825a76dea680501", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/menu/Menu.cpp", "max_forks_repo_name": "zhec9/nemo", "max_forks_repo_head_hexsha": "b719b89933ce722a14355e7ed825a76dea680501", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.6964285714, "max_line_length": 80, "alphanum_fraction": 0.523900325, "num_tokens": 4867}
|
# Electro I. Pregunta bono 2: Análisis en DC gráfico.
# Autor : Rafael Moreno
# Fecha : 24/01/20
# Prof : Anibal Carpio
from matplotlib import pyplot as plt
import pandas as pd
import numpy as np
# Open file
filename = 'Grafica Diodo 1-n4004 GP.csv'
data = pd.read_csv(filename)
# Diode I-V characteristics
Vd = data['X--Trace 1::[V_pn]']
Id = data['Y--Trace 1::[V_pn]']
# Load line
Il = 5 / 2000 - np.dot(Vd, 1/2000) # Il = Vps / R - Vd / R
# (A) to (mA)
Id = np.dot(Id,1000)
Il = np.dot(Il,1000)
# primero se calcula Il - Id
# se obtienen los signos correspondientes con np.sign
# se determina el cambio de signo con np.diff (donde se cruzan las lineas)
# se determina el índice de dicho cambio con np.argwhere
# flatten devuelve el arreglo en una dimension
idx = np.argwhere(np.diff(np.sign(Il - Id))).flatten()
# Plotting
plt.plot(Vd, Id, label = 'Característica I-V del Diodo')
plt.plot(Vd, Il, label = 'Linea de carga')
plt.ylim(0, 3)
plt.grid()
plt.ylabel('Id (mA)')
plt.xlabel('Vd (V)')
plt.title('Análisis en DC Gráfico')
plt.plot(Vd[idx], Id[idx], 'ro', label = f'Q-point ({Vd[int(idx)]} V,{float(Id[idx])} mA)')
plt.legend(loc = 'best')
plt.show()
|
{"hexsha": "c277c7554670717fe7570f08d58e4bbfa1da4997", "size": 1177, "ext": "py", "lang": "Python", "max_stars_repo_path": "Pregunta bono 2.py", "max_stars_repo_name": "ImArcangel/Q-Point-Diode", "max_stars_repo_head_hexsha": "b8c8825493ffe7f286c07a9e212ac5b18ec3bcb6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Pregunta bono 2.py", "max_issues_repo_name": "ImArcangel/Q-Point-Diode", "max_issues_repo_head_hexsha": "b8c8825493ffe7f286c07a9e212ac5b18ec3bcb6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Pregunta bono 2.py", "max_forks_repo_name": "ImArcangel/Q-Point-Diode", "max_forks_repo_head_hexsha": "b8c8825493ffe7f286c07a9e212ac5b18ec3bcb6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.75, "max_line_length": 91, "alphanum_fraction": 0.6703483432, "include": true, "reason": "import numpy", "num_tokens": 398}
|
section "Invariant Context Simplifications"
theory invContext_simps
imports repliss_sem
begin
text "Here we prove various simplifications for the invariant contexts."
lemma invContext_unchanged_happensBefore:
assumes "co c \<triangleq> t" and "ts t \<triangleq> Uncommitted"
shows "invContextH co to ts (hbOld \<union> vis \<times> {c}) cs ki io ir
= invContextH co to ts hbOld cs ki io ir "
using assms by (auto simp add: invContextH_def restrict_relation_def committedCallsH_def isCommittedH_def)
lemma invContext_unchanged_happensBefore2:
assumes "co c = None"
shows "invContextH co to ts (hbOld \<union> vis \<times> {c}) cs ki io ir
= invContextH co to ts hbOld cs ki io ir "
using assms by (auto simp add: invContextH_def restrict_relation_def committedCallsH_def isCommittedH_def)
lemma committedCallsH_notin:
assumes "co c = None"
shows "c \<notin> committedCallsH co ts"
by (simp add: assms committedCallsH_def isCommittedH_def)
lemma committedCallsH_in:
shows "(c \<in> committedCallsH co ts) \<longleftrightarrow> (case co c of None \<Rightarrow> False | Some t \<Rightarrow> ts t \<triangleq> Committed) "
by (auto simp add: committedCallsH_def isCommittedH_def split: option.splits)
lemma committedCalls_unchanged_callOrigin:
assumes a1: "ts t \<triangleq> Uncommitted"
and a2: "co c = None"
shows "committedCallsH (co(c \<mapsto> t)) ts = committedCallsH co ts"
using a1 a2 by (auto simp add: committedCallsH_def isCommittedH_def)
lemma invContextH_map_update_all:
assumes "co c = None" and "ts t \<triangleq> Uncommitted"
shows "invContextH (co(c \<mapsto> t)) to ts hb cs ki io ir =
invContextH co to ts hb cs ki io ir "
using assms by (auto simp add: invContextH_def committedCallsH_notin committedCalls_unchanged_callOrigin)
lemma invContextH_update_calls:
assumes "co c \<triangleq> t" and "ts t \<triangleq> Uncommitted"
shows "invContextH co to ts hb (cs(c \<mapsto> newCall)) ki io ir =
invContextH co to ts hb cs ki io ir "
using assms by (auto simp add: invContextH_def committedCallsH_in)
lemma committedCallsH_update_uncommitted:
assumes "ts t = None"
shows "committedCallsH co (ts(t \<mapsto> Uncommitted))
= committedCallsH co ts"
using assms by (auto simp add: committedCallsH_def isCommittedH_def, force)
lemma invContextH_update_txstatus:
assumes "ts t = None"
shows "invContextH co to (ts(t\<mapsto>Uncommitted)) hb cs ki io ir =
invContextH co to ts hb cs ki io ir "
using assms by (auto simp add: invContextH_def restrict_map_def committedCallsH_update_uncommitted)
lemmas invContextH_simps = invContextH_update_calls invContextH_map_update_all invContextH_update_txstatus
end
|
{"author": "peterzeller", "repo": "repliss-isabelle", "sha": "f43744678cc9c5a4684e8bd0e9c83510bae1d9a4", "save_path": "github-repos/isabelle/peterzeller-repliss-isabelle", "path": "github-repos/isabelle/peterzeller-repliss-isabelle/repliss-isabelle-f43744678cc9c5a4684e8bd0e9c83510bae1d9a4/invContext_simps.thy"}
|
from controlSBML.control_sbml import ControlSBML
from controlSBML import control_sbml
import helpers
import numpy as np
import pandas as pd
import os
import unittest
import tellurium as te
IGNORE_TEST = False
IS_PLOT = False
TEST_DIR = os.path.dirname(os.path.abspath(__file__))
ANTIMONY_FILE = os.path.join(TEST_DIR, "Model_antimony.ant")
#############################
# Tests
#############################
class TestControlSBML(unittest.TestCase):
def setUp(self):
# Cannot modify self.control
self.ctlsb = ControlSBML(ANTIMONY_FILE)
def testConstructor(self):
if IGNORE_TEST:
return
self.assertTrue("RoadRunner" in str(type(self.ctlsb.roadrunner)))
def testConstructWithRoadrunner(self):
if IGNORE_TEST:
return
model = te.loada(helpers.TEST_PATH_1)
ctlsb1 = ControlSBML(model)
ctlsb2 = ControlSBML(helpers.TEST_PATH_1)
diff = set(ctlsb1.get().values()).symmetric_difference(
ctlsb2.get().values())
self.assertEqual(len(diff), 0)
if __name__ == '__main__':
unittest.main()
|
{"hexsha": "ccae3a660c1f6642e6914411c9b26ebd9748a502", "size": 1114, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_control_sbml.py", "max_stars_repo_name": "ModelEngineering/controlSBML", "max_stars_repo_head_hexsha": "64587a7b22961a52aaf2b4e4a04fd1a0e3bc7a6b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/test_control_sbml.py", "max_issues_repo_name": "ModelEngineering/controlSBML", "max_issues_repo_head_hexsha": "64587a7b22961a52aaf2b4e4a04fd1a0e3bc7a6b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/test_control_sbml.py", "max_forks_repo_name": "ModelEngineering/controlSBML", "max_forks_repo_head_hexsha": "64587a7b22961a52aaf2b4e4a04fd1a0e3bc7a6b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.2173913043, "max_line_length": 73, "alphanum_fraction": 0.6570915619, "include": true, "reason": "import numpy", "num_tokens": 264}
|
from dataclasses import dataclass
from typing import List, Literal
from numpy import positive
from xarray_dataclasses import Attr
from datetime import datetime
from toolz import curry
@dataclass
class VariableAttrs:
standard_name: str
long_name: str
units: str
@dataclass
class AltitudeAttrs:
standard_name: str = "height"
long_name: str = "vertical distance above the surface"
units: str = "m"
positive: str = "up"
axis: str = "Z"
@dataclass
class LatitudeAttrs:
standard_name: str = "latitude"
units: str = "degree_north"
valid_min: float = -90.0
valid_max: float = 90.0
axis: str = "Y"
grid_mapping: str = "crs"
coordinate_reference_frame: str = "urn:ogc:def:crs:EPSG::4326"
@dataclass
class LongitudeAttrs:
standard_name: str = "longitude"
units: str = "degree_east"
valid_min: float = -180.0
valid_max: float = 180.0
axis: str = "X"
grid_mapping: str = "crs"
coordinate_reference_frame: str = "urn:ogc:def:crs:EPSG::4326"
@dataclass
class TimeAttrs:
"""Specs for the Time axis."""
standard_name: str = "time"
long_name: str = "Time of measurement"
axis: str = "T"
# units is filled by xarray, based on time interval
@dataclass
class DepthAttrs:
"""Specs for the Z axis."""
standard_name: str = "depth"
long_name: str = "Depth of measurement"
positive: str = "down"
units: str = "m"
axis: str = "Z"
reference: str = "sea_level"
coordinate_reference_frame: str = "urn:ogc:def:crs:EPSG::CRF 5831"
@dataclass
class DatasetAttrs:
title: str
date_created: datetime
keywords: List[str]
time_coverage_start: str
time_coverage_end: str
geospatial_lat_min: float
geospatial_lat_max: float
geospatial_lon_min: float
geospatial_lon_max: float
featureType: str
keywords_vocabulary: str = "GCM:GCMD Keywords"
data_owner: str = "Norwegian Institute for Water Research"
summary: str = ""
geospatial_vertical_positive: str = "down"
processing_level: str = "Missing data has been filled with fillValue."
Conventions: str = "CF-1.6, ACDD-1.3"
netcdf_version: str = "4"
publisher_name: str = "NIVA"
publisher_email: str = "post[..]niva.no"
publisher_url: str = "niva.no"
licence: str = 'Freely distributed. Must credit the source of data, e.g. "Data fra Norsk Institut for Vannforskning", "Based on data from the Norwegian Institute for Water Research". Data and products are licensed under Norwegian license for public data (NLOD) and Creative Commons Attribution 3.0 Norway.'
history: str = "Initial data"
|
{"hexsha": "38f02f9a484bdbd012a489716faa48ac6761e589", "size": 2638, "ext": "py", "lang": "Python", "max_stars_repo_path": "hello-xarray/cfxarray/attributes.py", "max_stars_repo_name": "NIVANorge/s-enda-playground", "max_stars_repo_head_hexsha": "56ae0a8978f0ba8a5546330786c882c31e17757a", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "hello-xarray/cfxarray/attributes.py", "max_issues_repo_name": "NIVANorge/s-enda-playground", "max_issues_repo_head_hexsha": "56ae0a8978f0ba8a5546330786c882c31e17757a", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "hello-xarray/cfxarray/attributes.py", "max_forks_repo_name": "NIVANorge/s-enda-playground", "max_forks_repo_head_hexsha": "56ae0a8978f0ba8a5546330786c882c31e17757a", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.7684210526, "max_line_length": 310, "alphanum_fraction": 0.6899166035, "include": true, "reason": "from numpy", "num_tokens": 742}
|
from screeninfo import get_monitors
import pygame
from pygame.locals import *
import os
import sys
from flick import Flick
import time
from record_data import RecordData
from live_recorder import LiveRecorder
from sklearn.externals import joblib
import numpy as np
from preprocess import preprocess_recordings
from subprocess import Popen
pygame.init()
def get_display_resolution():
"""
| Returns half of width and height of screen in pixels
"""
h_str = str(get_monitors()[0])
for char in ['+', '(', ')', 'x']:
h_str = h_str.replace(char, '|')
w, h = (h_str.split('|')[1], h_str.split('|')[2])
return (int(w)/2, int(h)/2)
def time_str():
return time.strftime("%H_%M_%d_%m_%Y", time.gmtime())
def render_waiting_screen(text_string=None, time_black = 0.):
pygame.font.init()
display_x, display_y = get_display_resolution()
display_x, display_y = (2 * display_x, 2 * display_y)
os.environ['SDL_VIDEO_CENTERED'] = '1'
window = pygame.display.set_mode((display_x, display_y), pygame.NOFRAME, 32)
pygame.display.set_caption("SSVEP")
if time_black > 0:
window.fill((0., 0., 0.))
timer_event = USEREVENT + 1
pygame.time.set_timer(timer_event, int(time_black)*1000)
else:
myfont = pygame.font.SysFont("arial", 50)
press_string = "Please press the Any-Key to continue..."
textsurface1 = myfont.render(press_string, False, (0, 0, 0))
text_rect1 = textsurface1.get_rect(center=(display_x/2, display_y/2+100))
if text_string:
textsurface2 = myfont.render(text_string, False, (0, 0, 0))
text_rect2 = textsurface2.get_rect(center=(display_x/2, display_y/2-100))
window.fill((100, 100, 150))
window.blit(textsurface1, text_rect1)
if text_string:
window.blit(textsurface2, text_rect2)
pygame.display.update()
while True:
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
if event.type == KEYDOWN:
if event.key == K_ESCAPE:
exit()
else:
pygame.quit()
return False
if not (time_black > 0.):
window.blit(textsurface1, text_rect1)
if text_string:
window.blit(textsurface2, text_rect2)
else:
if event.type == timer_event:
pygame.quit()
return False
pygame.display.update()
def begin_experiment_1(freq, trials=20):
if not os.path.isdir("REC"):
os.mkdir("REC")
render_waiting_screen("Welcome to this experiment")
render_waiting_screen("The experiment will start now... there will be breaks between the flickering tiles!")
recorder = RecordData(256., 20., freq)
recorder.start_recording()
for i in range(0, int(trials)):
recorder.add_trial(int(freq))
Flick(float(freq)).flicker(15.)
recorder.add_trial(0.)
render_waiting_screen(text_string=None, time_black=5.)
filename = "REC/%s_freq_%s.mat" % (time_str(), freq)
recorder.stop_recording_and_dump(filename)
recorder.killswitch.terminate = True
recorder = None
render_waiting_screen("That was the last one, thank you for participation!")
sys.exit()
def begin_experiment_2(str_list):
display_x, display_y = get_display_resolution()
display_x, display_y = (2 * display_x, 2 * display_y)
os.environ['SDL_VIDEO_CENTERED'] = '1'
window = pygame.display.set_mode((display_x, display_y), pygame.NOFRAME, 32)
pygame.display.set_caption("SSVEP")
window.fill((0, 0, 0))
pygame.display.update()
if os.name == 'nt':
for command in str_list:
command_parts = command.split(" ")
#print("start /d "+command)
#os.system("start /d "+command)
Popen(command_parts)
elif os.name == 'posix':
os.system("|".join(str_list))
else:
print("Could not get OS-name!")
def start_live_classifier():
window_metrics = (200, 200)
os.environ['SDL_VIDEO_CENTERED'] = '1'
window = pygame.display.set_mode(window_metrics, pygame.NOFRAME, 0)
pygame.display.set_caption("classifier window")
pygame.mouse.set_visible(False)
arrow = pygame.transform.scale(pygame.image.load("src/res/arrow.png"), window_metrics)
stop = pygame.transform.scale(pygame.image.load("src/res/stop.png"), window_metrics)
arrow_metrics = window_metrics
window.blit(stop, (0, 0))
pygame.display.update()
# Start Recording
recorder = LiveRecorder()
recorder.start_recording()
time.sleep(1)
#labels, features = getData(np.load('19_06_05_07_2017_freq_19.mat.npy'))
do_run = True
model_file_QDA, model_file_LDA, model_file_MLP = ('src/QDA.pkl', 'src/LDA.pkl', 'src/MLP.pkl')
QDA = joblib.load(model_file_QDA)
LDA = joblib.load(model_file_LDA)
MLP = joblib.load(model_file_MLP)
label = None
time.sleep(5)
label_list = []
while do_run:
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
if event.type == KEYDOWN or label:
try:
a = event.key
label = None
except AttributeError:
event.key = None
if event.key == K_ESCAPE:
do_run = False
elif event.key == K_UP or label == 13.0: # UP is 13 Hz
window.fill((0., 0., 0.))
window.blit(rot_center(arrow, 180), (0, 0))
# TODO move robot up
elif event.key == K_DOWN or label == 17.0: # DOWN is 17 Hz
window.fill((0., 0., 0.))
window.blit(arrow, (0, 0))
# TODO move robot down
elif event.key == K_RIGHT or label == 15.0: # RIGHT is 15 Hz
window.fill((0., 0., 0.))
window.blit(rot_center(arrow, 90), (0, 0))
# TODO move robot right
elif event.key == K_LEFT or label == 19.0: # LEFT is 19 Hz:
window.fill((0., 0., 0.))
window.blit(rot_center(arrow, 270), (0, 0))
# TODO move robot left
elif event.key == K_SPACE or label == 0.0: # No frequency
window.fill((0., 0., 0.))
window.blit(stop, (0, 0))
# TODO stop robot
label = None
elif event.type == KEYUP:
window.fill((0., 0., 0.))
window.blit(stop, (0, 0))
# TODO stop robot
pygame.display.flip()
features = recorder.get_features()
#print(features)
label_LDA = LDA.predict([features])[0]
label_QDA = QDA.predict([features])[0]
label_MLP = MLP.predict([features])[0]
print("LDA: %s QDA: %s MLP: %s" %(label_LDA, label_QDA, label_MLP))
for tmp_label in [label_LDA, label_QDA, label_MLP]:
label_list.append(tmp_label)
if len(label_list) >= 10*3:
count_list = [label_list.count(13.), label_list.count(15.)]
count_list.append(label_list.count(17.))
count_list.append(label_list.count(19.))
index = np.argmax(count_list)
label = [13., 15., 17., 19.][index]
print("Mayor Label: %s" % label)
label_list = []
#print("Recognized Label: %s" % label)
time.sleep(1.)
# May dump labeled data after recording?
filename = "REC/live_%s_freq_%s.mat" % (time_str(), freq)
def rot_center(image, angle):
"""rotate an image while keeping its center and size"""
orig_rect = image.get_rect()
rot_image = pygame.transform.rotate(image, angle)
rot_rect = orig_rect.copy()
rot_rect.center = rot_image.get_rect().center
rot_image = rot_image.subsurface(rot_rect).copy()
return rot_image
if __name__ == "__main__":
begin_experiment_1()
exit()
|
{"hexsha": "b31eb948baa84b8041cdc6ef2c36f2f6296ef6ac", "size": 8231, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/ssvep/utils.py", "max_stars_repo_name": "gumpy-hybridBCI/gumpy-Realtime", "max_stars_repo_head_hexsha": "163f72ff4d8734cbfd13848e21ce7d4cafc6e8e9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2018-04-07T17:30:51.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-23T09:50:53.000Z", "max_issues_repo_path": "src/utils.py", "max_issues_repo_name": "swxie/SSVEP-Brain-Computer-Interface", "max_issues_repo_head_hexsha": "1c4a0c899475d484f4427a94e65cfbd8b71c6904", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-02-09T03:52:20.000Z", "max_issues_repo_issues_event_max_datetime": "2019-02-09T03:52:20.000Z", "max_forks_repo_path": "src/utils.py", "max_forks_repo_name": "swxie/SSVEP-Brain-Computer-Interface", "max_forks_repo_head_hexsha": "1c4a0c899475d484f4427a94e65cfbd8b71c6904", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2019-01-12T02:48:25.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-03T20:07:51.000Z", "avg_line_length": 35.3261802575, "max_line_length": 112, "alphanum_fraction": 0.5821892844, "include": true, "reason": "import numpy", "num_tokens": 2006}
|
[STATEMENT]
lemma dbm_entry_dbm_min3:
assumes "dbm_entry_val u (Some c) None (min a b)"
shows "dbm_entry_val u (Some c) None b"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. dbm_entry_val u (Some c) None b
[PROOF STEP]
using dbm_entry_val_mono_3[folded less_eq, OF assms]
[PROOF STATE]
proof (prove)
using this:
min a b \<le> ?b' \<Longrightarrow> dbm_entry_val u (Some c) None ?b'
goal (1 subgoal):
1. dbm_entry_val u (Some c) None b
[PROOF STEP]
by auto
|
{"llama_tokens": 206, "file": "Timed_Automata_DBM_Operations", "length": 2}
|
[STATEMENT]
lemma additive_wp_PC:
"\<lbrakk> additive (wp a); additive (wp b) \<rbrakk> \<Longrightarrow> additive (wp (a \<^bsub>P\<^esub>\<oplus> b))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>Transformers.additive (wp a); Transformers.additive (wp b)\<rbrakk> \<Longrightarrow> Transformers.additive (wp (a \<^bsub>P\<^esub>\<oplus> b))
[PROOF STEP]
by(rule additiveI, simp add:additiveD field_simps wp_eval)
|
{"llama_tokens": 171, "file": "pGCL_Determinism", "length": 1}
|
#!/usr/bin/env python
import sys,os,string
from numpy import *
from scipy.interpolate import *
from myplotlib import PanelPlot
from matplotlib import pyplot
import pickle
tck_file0 = 'tck.pickle'
tck_file1 = 'bs_tck.pickle'
f = open(tck_file0)
all_tck0 = pickle.load(f)
f.close()
f = open(tck_file1)
all_tcks = pickle.load(f)
f.close()
dm15_low = 0.6
dm15_high = 2.0
t_low = -10.0
t_high = 70
dm15s = [0.9, 1.1, 1.3, 1.5, 1.7, 1.9]
ts = arange(81)/80.0*(t_high - t_low) + t_low
#mp = PanelPlot(3,3)
mp2 = PanelPlot(3,3)
bands = ['u','B','V','g','r','i','Y','J','H']
for j in range(len(bands)):
band = bands[j]
tck0 = all_tck0[band]
tcks = all_tcks[band]
#mp.axes[j].text(0.5, 0.9, band, transform=mp.axes[j].transAxes,
# verticalalignment='top', horizontalalignment='center')
mp2.axes[j].text(0.5, 0.9, band, transform=mp2.axes[j].transAxes,
verticalalignment='top', horizontalalignment='center')
for k in range(len(dm15s)):
dzs = []
z0 = bisplev(ts, dm15s[k], tck0)[:,0]
for i in range(len(tcks)):
z1 = bisplev(ts, dm15s[k], tcks[i])[:,0]
if sometrue(absolute(z1 - z0) > 0.15):
continue
dzs.append(z1 - z0)
if k == 1:
mp2.axes[j].plot(ts, dzs[-1], color='0.65')
dzs = array(dzs)
rms = sqrt(mean(power(dzs, 2), axis=0))
mads = median(absolute(dzs), axis=0)
#mp.axes[j].plot(ts, 1.49*mads, '-', label='%.1f' % (dm15s[k]))
if k == 1:
mp2.axes[j].plot(ts, rms, '-', color='red', linewidth=2)
#mp.axes[0].legend(prop={'size':8})
#mp.xlabel('$t - t_{max}(B)$ (days)')
mp2.xlabel('$t - t_{max}(B)$ (days)')
#mp.ylabel('Median Absolute Deviation')
mp2.ylabel('Master - Bootstrap')
#mp.set_limits(all_equal=1)
mp2.set_limits(all_equal=1)
#mp.draw()
mp2.draw()
pyplot.show()
#mp.close()
mp2.close()
|
{"hexsha": "264f452106edc5a2b7227625d5f9aebc95b80ce9", "size": 1852, "ext": "py", "lang": "Python", "max_stars_repo_path": "snpy/CSPtemp/plot_bs_disps.py", "max_stars_repo_name": "emirkmo/snpy", "max_stars_repo_head_hexsha": "2a0153c84477ba8a30310d7dbca3d5a8f24de3c6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2019-01-14T19:40:45.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-05T12:19:39.000Z", "max_issues_repo_path": "snpy/CSPtemp/plot_bs_disps.py", "max_issues_repo_name": "emirkmo/snpy", "max_issues_repo_head_hexsha": "2a0153c84477ba8a30310d7dbca3d5a8f24de3c6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2017-04-25T20:06:22.000Z", "max_issues_repo_issues_event_max_datetime": "2021-06-09T20:46:41.000Z", "max_forks_repo_path": "snpy/CSPtemp/plot_bs_disps.py", "max_forks_repo_name": "emirkmo/snpy", "max_forks_repo_head_hexsha": "2a0153c84477ba8a30310d7dbca3d5a8f24de3c6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 8, "max_forks_repo_forks_event_min_datetime": "2017-04-25T19:57:57.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-12T11:54:19.000Z", "avg_line_length": 26.0845070423, "max_line_length": 69, "alphanum_fraction": 0.6020518359, "include": true, "reason": "from numpy,from scipy", "num_tokens": 659}
|
% Author: Cristian Gonzales
% Created for Physical Time, 2018
\documentclass[11pt]{article}
\usepackage[margin=1in]{geometry}
\usepackage[utf8]{inputenc}
\usepackage[english]{babel}
\usepackage[document]{ragged2e}
\newcommand\tab[1][1cm]{\hspace*{#1}}
\begin{document}
\Large{\textbf{Sprint 2 Plan}}\\
\Large{\textbf{Product: Physical Time iOS Application}}\\
\Large{\textbf{Team: The Physical Time Team}}\\
\Large{\textbf{Date: February 4, 2018}}\\
%\Large{\textbf{Revised: March 7, 2018}}\\
\vspace{-3mm}
\section{Goal}
\vspace{-3mm}
\tab \normalsize{In short, for this sprint we aim to present a [hardcoded] augmented clock with a decent user interface to navigate to that feature, and other features.}
\section{Task Listing}
\vspace{-3mm}
\begin{itemize}
\item As a user, I want to see a augmented clock that displays a 24-hour time format in one clock rotation (usually 12 hours), with 4 hour divisions.
\begin{itemize}
\item Task 1: Research and find a pluggable clock or d3 plugin to easily visualize a regular clock (3 hour)
\item Task 2: Tinker with the values in the clock to augment the value (3 hours)
\end{itemize}
Total: 6 hours
\item As a user, I want to see a decent UI in the works so that I may eas- ily navigate the application and know the exact purpose it serves (with no prior knowledge of the application).
\begin{itemize}
\item Task 1: Learn about jQuery (2 hours)
\item Task 2: Write some boiler plate code for the application skeleton (6 hours)
\end{itemize}
Total: 8 hours
\item As a developer, I want to integrate an easy-to-use visualization frame- work to create the clock (e.g. d3.js).
\begin{itemize}
\item Task 1: Research about d3.js (1 hours)
\item Task 2: Take a look at other alternatives to d3 and tradeoffs in terms of user experience (UX) (3 hours)
\end{itemize}
Total: 4 hours
\end{itemize}
\section{Team Roles}
\vspace{-3mm}
\begin{itemize}
\item Khai Hua, developer
\item Cristian Gonzales, developer
\item Stephen Ouyang, developer (Scrum master) (Product Owner)
\item George Somers, developer
\end{itemize}
\section{Initial Task Assignment}
\vspace{-3mm}
\begin{itemize}
\item George Somers: story 1, task 1
\item Khai Hua: story 1, task 1 \& story 2, task 1
\item Stephen Ouyang: story 1, task 2 \& story 2, task 2
\item Cristian Gonzales: story 2, task 2 \& story 3, task 1 \& 2
\end{itemize}
\section{Burnup chart included separately}
\section{Scrum board found on Trello}
\section{Scrum Times}
Wednesday and Friday at noon, and Tuesdays at around 10:30AM with TA.
\end{document}
|
{"hexsha": "e03746de59a2c7daec0c21962bdf10963c3203c6", "size": 2713, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "scrum/sprint2/PTSprint2Plan.tex", "max_stars_repo_name": "Physical-TIme/Physical-Time", "max_stars_repo_head_hexsha": "3563c21d0c34503dcb4e82975e20c82621f9efef", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "scrum/sprint2/PTSprint2Plan.tex", "max_issues_repo_name": "Physical-TIme/Physical-Time", "max_issues_repo_head_hexsha": "3563c21d0c34503dcb4e82975e20c82621f9efef", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scrum/sprint2/PTSprint2Plan.tex", "max_forks_repo_name": "Physical-TIme/Physical-Time", "max_forks_repo_head_hexsha": "3563c21d0c34503dcb4e82975e20c82621f9efef", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.3188405797, "max_line_length": 190, "alphanum_fraction": 0.6984887578, "num_tokens": 853}
|
import numpy as np
import math
from scipy import signal, fft, interpolate
def lpfilter_sos(data, dt, cutoff, zero_phase=True):
"""" Low-pass filter using the second-order representation Butterworth implementation
Inputs:
data - 2D numpy array, of shape [channels,samples]
dt - sampling interval (seconds)
cutoff - cutoff frequency (Hz)
zero_phase - Boolean flag for applying filter twice (in forward and backward directions)
Output:
filtered version of data
"""
sos = signal.iirfilter(N=4, Wn=[cutoff], btype='lowpass', fs=1/dt, output='sos')
if zero_phase:
return np.float32(signal.sosfilt(sos, data, axis=-1))
else:
return np.float32(signal.sosfiltfilt(sos, data, axis=-1))
def lpfilter(data, dt, cutoff, zero_phase=True):
"""" Low-pass filter using the Butterworth implementation
Inputs:
data - 2D numpy array, of shape [channels,samples]
dt - sampling interval (seconds)
cutoff - cutoff frequency (Hz)
zero_phase - Boolean flag for applying filter twice (in forward and backward directions)
Output:
filtered version of data
"""
b, a = signal.butter(N=4, Wn=cutoff, btype='low', fs=1/dt)
if zero_phase:
return np.float32(signal.filtfilt(b, a, data, axis=- 1, padtype='odd'))
else
return np.float32(signal.filt(b, a, data, axis=- 1, padtype='odd'))
def bpfilter_sos(data, dt, bp_low, bp_high, zero_phase=True):
"""" Band-pass filter using the SOS implementation
Inputs:
data - 2D numpy array, of shape [channels,samples]
dt - sampling interval (seconds)
bp_low - minimal frequency in the passband (Hz)
bp_high - maximal frequency in the passband (Hz)
zero_phase - Boolean flag for applying filter twice (in forward and backward directions)
Output:
filtered version of data
"""
sos = signal.iirfilter(N=4, Wn=[bp_low, bp_high], btype='bandpass', fs=1/dt, output='sos')
if zero_phase:
return np.float32(signal.sosfiltfilt(sos, data, axis=-1))
else:
return np.float32(signal.sosfilt(sos, data, axis=-1))
def bpfilter(data, dt, bp_low, bp_high, zero_phase=True):
"""" Band-pass filter using the second-order representation Butterworth
Inputs:
data - 2D numpy array, of shape [channels,samples]
dt - sampling interval (seconds)
bp_low - minimal frequency in the passband (Hz)
bp_high - maximal frequency in the passband (Hz)
zero_phase - Boolean flag for applying filter twice (in forward and backward directions)
Output:
filtered version of data
"""
b, a = signal.butter(N=4, Wn=[bp_low, bp_high], btype='bandpass', fs=1/dt)
if zero_phase:
return np.float32(signal.filtfilt(b, a, data, axis=- 1, padtype='odd'))
else:
return np.float32(signal.filt(b, a, data, axis=- 1, padtype='odd'))
def remove_median(data):
"""" Sample-by-sample median removal
Inputs:
data - 2D numpy array, of shape [channels,samples]
Output:
data after median removal
"""
data -= np.median(data, axis=0, keepdims=True)
return data
def clip(data, clip_perc_val):
"""" Data clipping
Inputs:
data - 2D numpy array, of shape [channels,samples]
clip_perc_val - percentile of data the defines the clipping value.
Data are assumed to have both positive and negative values, and clipping also occurs
at 100 - clip_perc_val as well to handle negative values
Output:
data after median removal
"""
return np.clip(data, np.percentile(data, 100.0 - clip_perc_val), np.percentile(data, clip_perc_val))
def normalization(data, mode):
"""" Trace-by-trace normalization
Inputs:
data - 2D numpy array, of shape [channels,samples]
mode - normalization type
'std' : standard deviation of each channel
'max' : maximum value of each channel
'L2' : L2 norm of each channel (no mean removal)
'none' : nothing happens
Output:
data after normalization
"""
live_traces = np.nonzero(np.sum(np.abs(data), axis=-1))
if mode == 'std':
data[live_traces, :] = np.divide(data[live_traces, :], np.std(data[live_traces, :], axis=-1, keepdims=True))
elif mode == 'max':
data[live_traces, :] = np.divide(data[live_traces, :], np.amax(np.abs(data[live_traces, :]), axis=-1, keepdims=True))
elif mode == 'L2':
data[live_traces, :] = np.divide(data[live_traces, :], np.sqrt(np.sum(np.power(data[live_traces, :], 2.0),
axis=-1, keepdims=True)))
elif mode == 'none':
pass
else:
raise NameError
return data
def linear_fv(data, dx, dt, freqs, vels):
"""" Transform data into a frequency-phase velocity image.
Note: works correctly for 2D (line) data.
Inputs:
data - 2D numpy array, of shape [channels,samples]
dx - distance between channels
dt - time sampling interval
freqs - frequencies (Hz) at which to estimate the transformation
vels - phase velocities (m/s) at which to estimate the transformation
Output:
frequency-phase velocity image at desired [f,v] values
"""
(nch, nt) = np.shape(data)
nscanv = np.size(vels)
nf = 2**(math.ceil(math.log(nt, 2)))
nk = 2**(math.ceil(math.log(nch, 2)))
fft_f = np.arange(-nf/2, nf/2)/nf/dt
fft_k = np.arange(-nk/2, nk/2)/nk/dx
fk_res = fft.fftshift(fft.fft2(data, s=[nk, nf]))
fk_res = np.absolute(fk_res)
ones_arr = np.ones(shape=(nscanv,))
fv_map = np.zeros(shape=(len(freqs), len(vels)), dtype=np.float32)
interp_fun = interpolate.interp2d(fft_k, fft_f, fk_res.T)
for ind, fr in enumerate(freqs):
fv_map[ind, :] = np.squeeze(interp_fun(np.divide(ones_arr*fr, vels), fr))/(nch*nt)
return fv_map.T
def template_matching(data, template, threshold=0.0):
"""" Applies template matching with approximated normalization
Important : this is not a true normalized cross-correlation, which is significantly slower.
The autocorrelation of the data is computed as an average value.
Inputs:
data - 2D numpy array, of shape [channels,samples]
template - 2D numpy array, of shape [template_channels,template_samples].
Each dimension has to be smaller than the matching one in data.
threshold - minimal cross-correlation value for the function to return a result. Otherwise, returns None
Output:
List including [channel,sample,cross-correlation value] obtained at the point of maximal cross-correlation
"""
corr = np.fft.irfft2(np.fft.rfft2(data) * np.fft.rfft2(np.flip(template), data.shape))
temp_autocorr = np.sum(template*template)
data_autocorr = np.sum(data*data) * np.prod(template.shape) / np.prod(data.shape)
(nxtemp, nttemp) = template.shape
corr = corr/np.sqrt(temp_autocorr*data_autocorr)
max_val = np.amax(corr)
if max_val >= threshold:
ind = np.unravel_index(np.argmax(corr, axis=None), corr.shape)
return [ind[0]-nxtemp+1, ind[1]-nttemp+1, max_val]
else:
return None
|
{"hexsha": "bf1ea0a82e2f7bf6a9a9788f9bcf6167f94cf14d", "size": 7308, "ext": "py", "lang": "Python", "max_stars_repo_path": "signal_processing.py", "max_stars_repo_name": "DAS-RCN/continuous_data_handling", "max_stars_repo_head_hexsha": "cf1f2140c33bbd3c9f164c5c9fb1ff1d1573700e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-09-14T00:55:11.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-30T13:22:06.000Z", "max_issues_repo_path": "signal_processing.py", "max_issues_repo_name": "DAS-RCN/continuous_data_handling", "max_issues_repo_head_hexsha": "cf1f2140c33bbd3c9f164c5c9fb1ff1d1573700e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "signal_processing.py", "max_forks_repo_name": "DAS-RCN/continuous_data_handling", "max_forks_repo_head_hexsha": "cf1f2140c33bbd3c9f164c5c9fb1ff1d1573700e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.9344262295, "max_line_length": 125, "alphanum_fraction": 0.6469622332, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1879}
|
#pragma once
#include <boost/predef.h>
#if BOOST_ARCH_X86
#include <emmintrin.h>
#endif
namespace emr { namespace detail {
struct no_backoff
{
void operator()() {}
};
class exponential_backoff
{
public:
void operator()()
{
for (unsigned i = 0; i < count; ++i)
do_backoff();
count *= 2;
}
private:
void do_backoff()
{
#if BOOST_ARCH_X86
_mm_pause();
#else
#warning "No backoff implementation available."
#endif
}
unsigned count = 1;
};
using backoff = no_backoff;
}}
|
{"hexsha": "c9c8c4805cb1b1c90eb246cc46adf87ff57251a7", "size": 550, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "include/emr/detail/backoff.hpp", "max_stars_repo_name": "mpoeter/emr", "max_stars_repo_head_hexsha": "390ee0c3b92b8ad0adb897177202e1dd2c53a1b7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 43.0, "max_stars_repo_stars_event_min_datetime": "2017-12-07T13:28:02.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-23T13:51:11.000Z", "max_issues_repo_path": "include/emr/detail/backoff.hpp", "max_issues_repo_name": "mpoeter/emr", "max_issues_repo_head_hexsha": "390ee0c3b92b8ad0adb897177202e1dd2c53a1b7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1.0, "max_issues_repo_issues_event_min_datetime": "2019-07-22T17:08:17.000Z", "max_issues_repo_issues_event_max_datetime": "2019-07-24T04:58:09.000Z", "max_forks_repo_path": "include/emr/detail/backoff.hpp", "max_forks_repo_name": "mpoeter/emr", "max_forks_repo_head_hexsha": "390ee0c3b92b8ad0adb897177202e1dd2c53a1b7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2.0, "max_forks_repo_forks_event_min_datetime": "2019-02-26T08:26:53.000Z", "max_forks_repo_forks_event_max_datetime": "2019-10-17T04:06:16.000Z", "avg_line_length": 13.75, "max_line_length": 52, "alphanum_fraction": 0.6, "num_tokens": 144}
|
C LAST UPDATE 16/03/89
C+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
C
SUBROUTINE GETHDR (ITERM,IPRINT,IUNIT,HFNAM,ISPEC,LSPEC,INCR,MEM,
& IFFR,ILFR,IFINC,IHFMAX,IFRMAX,NCHAN,IRC)
IMPLICIT NONE
C
C Purpose: Get header file information
C
INTEGER ISPEC,LSPEC,INCR,MEM,IFFR,ILFR,IFINC,IHFMAX,IFRMAX,IRC
INTEGER ITERM,IPRINT,IUNIT,NCHAN
CHARACTER*(*) HFNAM
C
C ITERM : Terminal input stream
C IPRINT : Terminal output stream
C IUNIT : Header I/O stream
C HFNAM : Header filename
C ISPEC : Frame nos. part of filename
C LSPEC : Last frame part of file name
C INCR : Header file increment
C MEM : Memory nos.
C IFFR : First frame in sequence
C ILFR : Last frame in sequence
C IFINC : Frame increment
C IHFMAX : Nos. of header file in sequence
C IFRMAX : Nos. of frames/file
C NCHAN : Nos. of channels
C IRC : Return code 0 - successful
C 1 - <ctrl-z>
C
C Calls 4: ERRMSG , FRDATA , GETFIL , RDHDR
C
C-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
C Local variables:
C
INTEGER JRC,NFRAME,NDUM
CHARACTER*13 FNAM
C
C JRC : Return code
C NFRAME : Nos. of frames
C NDUM : Dummy (used in 2d implementation)
C FNAM : Binary dataset name
C
C-----------------------------------------------------------------------
IRC=1
10 CALL GETFIL (ITERM,IPRINT,HFNAM,MEM,ISPEC,LSPEC,INCR,IFFR,ILFR,
& IFINC,JRC)
IF (JRC.EQ.0) THEN
CALL RDHDR (HFNAM,FNAM,ISPEC,MEM,IUNIT,NCHAN,NFRAME,NDUM,JRC)
IF (JRC.EQ.1) THEN
CALL ERRMSG ('Error: Missing values in header file')
GOTO 10
ELSEIF (JRC.EQ.2) THEN
CALL ERRMSG ('Error: Header file not found')
GOTO 10
ELSE
IF (IFFR.EQ.0) CALL FRDATA (ITERM,IPRINT,IFFR,ILFR,IFINC,
& NCHAN,NFRAME,MEM,JRC)
IF (JRC.EQ.0) THEN
IHFMAX=1
IFRMAX=1
IF (INCR.NE.0) IHFMAX=((LSPEC-ISPEC)/INCR)+1
IF (IFINC.NE.0) IFRMAX=((ILFR-IFFR)/IFINC)+1
IF (IHFMAX.GT.1.AND.IFRMAX.GT.1) THEN
CALL ERRMSG ('Error: Invalid operation')
GOTO 10
ENDIF
IF (IFFR.GT.NFRAME.OR.ILFR.GT.NFRAME) THEN
CALL ERRMSG ('Error: Invalid operation')
GOTO 10
ENDIF
IRC=0
ENDIF
ENDIF
ENDIF
RETURN
END
|
{"hexsha": "e22b63acfe96ef12c061db926157643373a5e8a2", "size": 2563, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "software/libs/otoko/gethdr.f", "max_stars_repo_name": "scattering-central/CCP13", "max_stars_repo_head_hexsha": "e78440d34d0ac80d2294b131ca17dddcf7505b01", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "software/libs/otoko/gethdr.f", "max_issues_repo_name": "scattering-central/CCP13", "max_issues_repo_head_hexsha": "e78440d34d0ac80d2294b131ca17dddcf7505b01", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "software/libs/otoko/gethdr.f", "max_forks_repo_name": "scattering-central/CCP13", "max_forks_repo_head_hexsha": "e78440d34d0ac80d2294b131ca17dddcf7505b01", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2017-09-05T15:15:22.000Z", "max_forks_repo_forks_event_max_datetime": "2021-01-15T11:13:45.000Z", "avg_line_length": 32.858974359, "max_line_length": 72, "alphanum_fraction": 0.5228248147, "num_tokens": 782}
|
# Deep Learning - Assignment 1
## Outline (15 points)
#### In this assignment, you will learn:
* How to generate random data using python.
* Building linear models for simple regression problem on the generated data.
* Training the linear models with gradient descent algorithm.
* How to alleviate over-fitting for your model.
* **Concepts** you will learn: ***Regularization***, ***Model selection***, ***Gradient descent***, ***Over-fitting***, ***Weight decay***, ***Training/Validation/Testing***.
#### Tasks
In this assignment, we are going to solve a basic linear regression problem by fitting a polynomial function, which we shall use as a running example to motivate a number of key concepts mentioned above.
* **Part 1.** Generate training and testing data using python.
* **Part 2.** Linear regression with polynomials without regularization.
* **Part 3.** Linear regression with polynomials with regularization.
#### Environment
Python 3.<br>
Other libraries should be installed correctly such as numpy, matplotlib, *etc*., according to the dependencies of the assignment.
#### Database
* Randomly synthesized data.
## Part 1. Generate training and testing data using python
Suppose we observe a real-valued input variable $x$ and we wish to use this observation to predict the value of a real-valued target variable $t$. For the present purposes, it is instructive to consider an artificial example using synthetically generated data because we then know the precise process that generated the data for comparison against any learned model.
We therefore generate our training data which consists of 10 pairs of $\{x_i, y_i\}$ where $\{x_i\}$ are generated uniformly in range (0, 1), and the corresponding target values $\{y_i\}$ are obtained by first computing the corresponding values of the function $sin(2\pi x)$ and then adding random noise with a Gaussian distribution having standard deviation 0.3.
\begin{align*}
y_i = sin(2\pi x_i) + \varepsilon, \varepsilon\sim \mathcal{N}(0,\,0.3^{2}).
\end{align*}
The testing data is created the same way as training data but the number of pairs is 100, in order to give a more complete evaluation of the model to be trained.
Please create the required training and testing data below and plot them. You could use numpy to create the data and matplotlib.pyplot to show them.
```python
# import libraries
import numpy as np
import matplotlib.pyplot as plt
# your code here
x_train = np.random.rand(10, 1)
y_train = np.sin(2 * np.pi * x_train) + np.random.normal(0, 0.3, (10, 1))
x_test = np.random.rand(100, 1)
y_test = np.sin(2 * np.pi * x_test) + np.random.normal(0, 0.3, (100, 1))
plt.plot(x_train, y_train, marker='o', linestyle='none')
plt.plot(x_test, y_test, fillstyle='none', marker='o', color='g', linestyle='none')
plt.show()
```
## Part 2. Linear regression with polynomials without regularization
Congratulations if you generate the data correctly! However, to make sure all of you use the same data, we have generated it in advance.
```python
import pickle
with open('data/rawdata.pickle', 'rb') as f:
data = pickle.load(f)
x_train = data['x_train']
y_train = data['y_train']
x_test = data['x_test']
y_test = data['y_test']
```
The task in this part is to train a linear model to fit the curve on the training data. Now that we have $\{x, y\}$, we consider using a polynomial function of the form
\begin{align}
f(x, \pmb{w}) = \omega_0 + \omega_1x + \omega_2x^2 + ... + \omega_Mx^M = \sum_{j=0}^{M}\omega_jx^j,
\tag{1}
\end{align}
where $M$ is the *order* of the polynomial, and $x^j$ denotes $x$ raised to the power of j. The values of the coefficients will be determined by fitting the polynomial to the training data. To do so, we introduce the error function to be minimized:
\begin{equation}
E(\pmb{w}) = \frac{1}{2}\sum_{i=1}^{N}\{f(x_i, \pmb{w}) - y_i\}^2,
\tag{2}
\end{equation}
where, $N$ is 10 in our case.
First of all, to use matrix operation in this case, we re-write the polynomial function (1) in the form of matrix multiplication:
$$
\begin{bmatrix}
f(x_1, \pmb{w}) \\ f(x_2, \pmb{w}) \\ . \\ . \\ . \\ f(x_N, \pmb{w})
\end{bmatrix}
=
\begin{bmatrix}
1 & x_1 & x_1^2 & . & . & . & x_1^M \\ 1 & x_2 & x_2^2 & . & . & . & x_2^M \\ . & . & . & . & . & . & . \\ . & . & . & . & . & . & . \\ . & . & . & . & . & . & . \\ 1 & x_N & x_N^2 & . & . & . & x_N^M
\end{bmatrix}
\begin{bmatrix}
\omega_0 \\ \omega_1 \\ . \\ . \\ . \\ \omega_M
\end{bmatrix}
$$
We call the first matrix on the right hand side feature matrix. We next build the feature matrix and try to solve above minimization problem using [`Gradient Descent Algorithm`](https://towardsdatascience.com/an-introduction-to-gradient-descent-c9cca5739307) and the closed-form analytical solution. Because the result from the closed-form solution is theoretically optimal for the problem, by doing this, we could know how well our gradient descent algorithm performs by comparing the results using gradient descent and closed-form methods respectively.
Anyway, I'll give an guidance using $M=3$ as following.
### Part 2.1. Method based on closed-form analysis
First, let's quickly solve this using the closed-form analytical solution. In this case, we luckily have this solution to solve the polynomial regression problem. However, in practical problems using deep learning models, which are non-linear and often very complex, there doesn't exist a closed-form solution or extremely difficult to find the solution analytically, the most common way is using gradient descent algorithm.
In our case, we will first solve the regression problem using closed-form analysis to get the optimal solution, which can also be seen as the upper bound the gradient descent algorithm goes (because gradient descent algorithm will alwayse find an approximate solution which close to the optimal).
First, we calculate the feature matrix based on the matrix formulation and apply [`sklearn.linear_model.LinearRegression`](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LinearRegression.html) to solve the regression problem.
```python
feature_matrix = np.ones_like(x_train)
M = 3
for i in range(1, M+1):
feature_matrix = np.concatenate((feature_matrix, x_train ** i), axis=1)
from sklearn.linear_model import LinearRegression
model = LinearRegression(fit_intercept=False).fit(feature_matrix, y_train)
```
Then, show the original training data points and the draw the curve of the function obtained on a single figure.
```python
x_curve = np.linspace(0, 1, 10000).reshape(-1, 1)
features_curve = np.ones_like(x_curve)
for i in range(1, M+1):
features_curve = np.concatenate((features_curve, x_curve ** i), axis=1)
y_curve = model.predict(features_curve)
plt.plot(x_train, y_train, marker='o', linestyle='None')
plt.plot(x_curve, y_curve, 'r')
plt.show()
```
Now, we have got all the coefficients (noted as $\pmb{w}^*$) of the polynomial function by minimizing the error function (2). Therefore, we can evaluate the residual value of $E(\pmb{w}^*)$ given by (2) for the training and testing data. However, it is sometimes more convenient to use the *root-mean-square* (RMS) error defined by
$$
E_{RMS} = \sqrt{2E(\pmb{w}^*)/N}
\tag{3}
$$
in which the division by $N$ allows us to compare different sizes of data sets on an equal footing, also with the same scale (and in the same units) as the target variable $y$.
In order to evaluate the polynomial function we obtained, we do the following:
* Calculate and print the RMS error on training data based on equation (3),
* Calculate and print the RMS error on testing data based on equation (3),
* Get and print the coefficients of the trained function $\omega_0$ ~ $\omega_M$ using [`LinearRegression.coef_`](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LinearRegression.html#sklearn.linear_model.LinearRegression).
```python
prediction_train = model.predict(feature_matrix)
rms_train = np.sqrt(np.sum((prediction_train - y_train) ** 2) / len(prediction_train))
feature_matrix_test = np.ones_like(x_test)
for i in range(1, M+1):
feature_matrix_test = np.concatenate((feature_matrix_test, x_test ** i), axis=1)
prediction_test = model.predict(feature_matrix_test)
rms_test = np.sqrt(np.sum((prediction_test - y_test) ** 2) / len(prediction_test))
print('rms error on training data: ', rms_train)
print('rms error on testing data: ', rms_test)
print('coefficients of the trained function: ', model.coef_)
```
rms error on training data: 0.16066897173745506
rms error on testing data: 0.30943910311337297
coefficients of the trained function: [[ -0.16311694 10.9380811 -30.08577056 19.11555865]]
### Part 2.2. Method based on Gradient Descent Algorithm
Once we get the closed-form solution, which is also the optimal solution, the next thing we are supposed to do is to re-solve the problem by gradient descent algorithm, because gradient descent matters a lot in practical applications since we cannot have a closed-form solution for most of the practical problems. Generally, the ingredients make up a gradient descent algorithm are:
* defining the learning rate to update our parameters,
* defining a loss function or objective function which you should minimize (here we use the error function based on equation (3)),
* calculating the gradients of parameters w.r.t. the loss,
* update parameters according to the learning rate and gradients to a direction that makes the loss smaller.
So, let's do this in that way.
```python
# defining a learning rate, you could change it
learning_rate = 0.1
# initialize the parameters, here we simply assign each parameter to 1
# becuase M=3, so we totally have 4 parameters (w0, w1, w2, w3)
W = np.ones((M+1, 1))
# calculate the feature matrix as before, in order to compute the loss
feature_matrix = np.ones_like(x_train)
for i in range(1, 4):
feature_matrix = np.concatenate((feature_matrix, x_train ** i), axis=1)
# iteratively do gradient descent, here we just iterate it 50 times
for i in range(50):
# calculate the values of the polynomial function
F = np.matmul(feature_matrix, W)
# calculate the loss, print it every 10 iterations
if i % 10 ==0:
loss = 0.5 * np.sum((F - y_train) ** 2)
print('loss: ', loss)
# calcualte the gradients of the parameters
# here we have 10 data samples, so we calculate the mean.
# please understand this formulation by inferencing the gradients
# by yourself with what you learnt in undergraduate school.
G = np.mean((F - y_train) * feature_matrix, axis=0).reshape(-1, 1)
# update parameters to a direction that makes the loss smaller
# so we use '-' here, it is also why we call gradient "descent"
W = W - learning_rate * G
```
loss: 34.380066478803926
loss: 4.012740605942455
loss: 2.436307391471592
loss: 1.958278267953077
loss: 1.6626874106866916
Again, we show the original training data points and the draw the curve of the function obtained on a single figure.
```python
x_curve = np.linspace(0, 1, 10000).reshape(-1, 1)
features_curve = np.ones_like(x_curve)
for i in range(1, M+1):
features_curve = np.concatenate((features_curve, x_curve ** i), axis=1)
#y_curve = model.predict(features_curve)
y_curve = features_curve.dot(W)
plt.plot(x_train, y_train, marker='o', linestyle='None')
plt.plot(x_curve, y_curve, 'r')
plt.show()
```
Also, in order to evaluate the polynomial function we obtained, we do the following:
* Calculate and print the RMS error on training data based on equation (3),
* Calculate and print the RMS error on testing data based on equation (3),
* Print the coefficients (parameters).
```python
prediction_train = np.matmul(feature_matrix, W)
rms_train = np.sqrt(np.sum((prediction_train - y_train) ** 2) / len(prediction_train))
prediction_test = np.matmul(feature_matrix_test, W)
rms_test = np.sqrt(np.sum((prediction_test - y_test) ** 2) / len(prediction_test))
print('rms error on training data: ', rms_train)
print('rms error on testing data: ', rms_test)
print('coefficients of the trained function: ', W)
```
rms error on training data: 0.5422276467732126
rms error on testing data: 0.5918422558573816
coefficients of the trained function: [[ 0.23861297]
[-0.26124322]
[-0.24765614]
[-0.13137266]]
#### Question 1. What's the difference between closed-form method and gradient descent method?
#### Your answer:
### Part 2.3. Model selection with closed-form solution
There remains the problem of choosing the order $M$ of the polynomial, this is actually an example of the concept *model selection*. To see how changing $M$ affects the regression problem being solved, please change $M=$ from 1 to 9, and get the following objectives based on **closed-form analysis**:
* Draw all the 9 figures as done above (make sure the function curve and training data points are in the same figure for each case, using plt.subplots),
* Calculate all the error values using equation (3) with respect to trainig and testing data for all the 9 cases, and plot this values on a single figure (that means 18 points would be drawn in the figure, with x axis representing $M$ and y axis representing the error value, please use different colors to distinguish training and testing),
* Print the coefficients for the 9 polynomial functions.
```python
# your code here
rmss_train = []
rmss_test = []
fig, ax = plt.subplots(3, 3, figsize=(15, 15))
for M in range(1, 10):
feature_matrix = np.ones_like(x_train)
for i in range(1, M + 1):
feature_matrix = np.concatenate((feature_matrix, x_train ** i), axis=1)
model = LinearRegression(fit_intercept=False).fit(feature_matrix, y_train)
x_curve = np.linspace(0, 1, 10000).reshape(-1, 1)
features_curve = np.ones_like(x_curve)
for i in range(1, M + 1):
features_curve = np.concatenate((features_curve, x_curve ** i), axis=1)
# your code here
y_curve = model.predict(features_curve)
row = (M-1) // 3
col = (M-1) % 3
ax[row,col].plot(x_train, y_train, marker='o', linestyle='None')
ax[row,col].plot(x_curve, y_curve, 'r')
prediction_train = model.predict(feature_matrix)
rms_train = np.sqrt(np.sum((prediction_train - y_train) ** 2) / len(prediction_train))
feature_matrix_test = np.ones_like(x_test)
for i in range(1, M + 1):
feature_matrix_test = np.concatenate((feature_matrix_test, x_test ** i), axis=1)
prediction_test = model.predict(feature_matrix_test)
rms_test = np.sqrt(np.sum((prediction_test - y_test) ** 2) / len(prediction_test))
rmss_train.append(rms_train)
rmss_test.append(rms_test)
print('\n Results when M = %d' % M)
print('rms error on training / testing data: %f / %f' % (rms_train, rms_test))
print('coefficients of the trained function: ', model.coef_)
fig.tight_layout()
plt.show()
M_x = np.linspace(1, 9, 9)
```
```python
plt.plot(M_x, rmss_train, 'b', marker='o', linestyle='None')
plt.plot(M_x, rmss_test, 'r', marker='o', linestyle='None')
plt.show()
```
In fact, when $M = 9$, the 10 training points would be exactly fitted, and the training loss becomes zero, since the degree of freedom for this regression problem is 10. However, the fitted curve oscillates wildly and gives a very poor representation of the function $sin(2\pi x)$. This behavior is known as **over-fitting**.
#### Question 2. Which one is the best one and why? Why the over-fitting happens when $M$ is big?
#### Your answer:
### Part 2.4. Gradient descent practice
Set $M=9$, based on **gradient descent algorithm**, do the following (During training, print the loss every $n$ iterations to show you are running the gradient descent algorithm to train the model, you could select a $n$ as you like):
* Change learning rate and see how fast the training converges,
* Change number of iteration to see whether the training loss would be smaller,
* Draw the figure as done above when you feel the training is good enough (make sure the function curve and training data points are in the same figure),
* Calculate the error values using equation (3) with respect to trainig and testing data,
* Print the coefficients (parameters) for the polynomial function.
```python
# your code here
# defining a learning rate
learning_rate = 0.1
# initialize the parameters, here we simply assign each parameter to 1
# becuase M=3, so we totally have 4 parameters (w0, w1, w2, w3)
W = np.ones((M+1, 1))
# calculate the feature matrix as before, in order to compute the loss
feature_matrix = np.ones_like(x_train)
for i in range(1, M+1):
feature_matrix = np.concatenate((feature_matrix, x_train ** i), axis=1)
# iteratively do gradient descent
for i in range(50):
# calculate the values of the polynomial function
F = np.matmul(feature_matrix, W)
# calculate the loss
if i % 10 ==0:
loss = 0.5 * np.sum((F - y_train) ** 2)
print('loss: ', loss)
# calcualte the gradients of the parameters
# here we have 10 data samples, so we calculate the mean.
# please understand this formulation by inferencing the gradients
# by yourself with what you learnt in undergraduate school.
G = np.mean((F - y_train) * feature_matrix, axis=0).reshape(-1, 1)
# update parameters to a direction that makes the loss smaller
# so we use '-' here, it is also why we call gradient "descent"
W = W - learning_rate * G
```
loss: 108.9576867114645
loss: 4.306569760761631
loss: 2.1385030210300267
loss: 1.5563558849819616
loss: 1.312244278798215
```python
x_curve = np.linspace(0, 1, 10000).reshape(-1, 1)
features_curve = np.ones_like(x_curve)
for i in range(1, M+1):
features_curve = np.concatenate((features_curve, x_curve ** i), axis=1)
#y_curve = model.predict(features_curve)
y_curve = features_curve.dot(W)
plt.plot(x_train, y_train, marker='o', linestyle='None')
plt.plot(x_curve, y_curve, 'r')
plt.show()
```
```python
prediction_train = np.matmul(feature_matrix, W)
rms_train = np.sqrt(np.sum((prediction_train - y_train) ** 2) / len(prediction_train))
prediction_test = np.matmul(feature_matrix_test, W)
rms_test = np.sqrt(np.sum((prediction_test - y_test) ** 2) / len(prediction_test))
print('rms error on training data: ', rms_train)
print('rms error on testing data: ', rms_test)
print('coefficients of the trained function: ', W)
```
rms error on training data: 0.48981780339698056
rms error on testing data: 0.5338034958265429
coefficients of the trained function: [[ 0.35652297]
[-0.29036022]
[-0.34839046]
[-0.27086086]
[-0.17482634]
[-0.08679106]
[-0.01139232]
[ 0.05213066]
[ 0.10563873]
[ 0.15094612]]
#### Question 3. Actually, when using gradient descent algorithm, even in the case that $M=9$, it is hard to observe the over-fitting phenomenon. It is because it's difficult to reach the theoretically optimal solution that makes the training loss zero. Regardless of over-fitting, please give your idea to make the training loss smaller when using gradient descent algorithm in this case.
#### Your answer:
## Part 3. Linear regression with polynomials with regularization
Before introducing regularization, let's think about a way to solve the over-fitting problem. <br>
Definitely, through the experiments done above, you somewhat have found that the most direct way is reducing $M$. However, what if we use more training data?
### Part 3.1. Re-visit polynomial regression with extra training data and $M=9$
Finish this step as in Part 2.1 based on closed-form analysis. (*note*: please calculate the RMS error on the original training and testing data)
```python
with open('data/extradata.pickle', 'rb') as f:
extra_data = pickle.load(f)
x_extra = extra_data['x_extra']
y_extra = extra_data['y_extra']
# your code here
x_extra_train = np.concatenate((x_train, x_extra), axis=0)
y_extra_train = np.concatenate((y_train, y_extra), axis=0)
feature_extra_matrix = np.ones_like(x_extra_train)
for i in range(1, 10):
feature_extra_matrix = np.concatenate((feature_extra_matrix, x_extra_train ** i), axis=1)
model = LinearRegression(fit_intercept=False).fit(feature_extra_matrix, y_extra_train)
x_curve = np.linspace(0, 1, 10000).reshape(-1, 1)
features_curve = np.ones_like(x_curve)
for i in range(1, 10):
features_curve = np.concatenate((features_curve, x_curve ** i), axis=1)
y_curve = model.predict(features_curve)
plt.plot(x_train, y_train, marker='o', linestyle='None')
plt.plot(x_curve, y_curve, 'r')
plt.show()
prediction_train = model.predict(feature_matrix)
rms_train = np.sqrt(np.sum((prediction_train - y_train) ** 2) / len(prediction_train))
feature_matrix_test = np.ones_like(x_test)
for i in range(1, 10):
feature_matrix_test = np.concatenate((feature_matrix_test, x_test ** i), axis=1)
prediction_test = model.predict(feature_matrix_test)
rms_test = np.sqrt(np.sum((prediction_test - y_test) ** 2) / len(prediction_test))
print('rms error on training data: ', rms_train)
print('rms error on testing data: ', rms_test)
print('coefficients of the trained function: ', model.coef_)
```
#### Question 4. Does introducing more training data in this case alleviate over-fitting? Why?
#### Your answer:
### Part 3.2. Re-visit polynomial regression with regularization and $M=9$
We may alleviate the over-fitting problem through introducing more training data, but what if there is no extra training data? In fact, we donnot want to limit the number of parameters in a model according to the size of the available training set, because it also limits the capacity and flexibility of the model. One technique that is often used to control over the over-fitting phenomenon in such cases is that of **regularization**, which involves adding a penalty term to the error function (2) in order to discourage the coefficients from reaching large values. The simplest such penalty term takes the form of a sum of squares of all of the coefficients, leading to a modified error function:
$$
\widetilde{E}(\pmb{w})=\frac{1}{2}\sum_{i=1}^{N}\{f(x_i, \pmb{w}) - y_i\}^2 + \frac{\lambda}{2}||\pmb{w}||^2
\tag{4}
$$
where $||\pmb{w}||^2\equiv \pmb{w}^T\pmb{w} = \omega_0^2 + \omega_1^2 + ... + \omega_M^2$, and the coefficient $\lambda$ governs the relative importance of the regularization term compared with the sum-of-squares error term.
Again, this error function can be minimized exactly in closed form. The regression with such a quadratic regularizer is called *ridge regression*. In the context of neural networks, this approcah is known as **weight decay**.
Apart from training and testing data, we also need another data set to test how suitable the $\lambda$ is before we use the model on the testing data. We call this data set **validation** set. Usually, when we have large amount of training data, we create a validation set splitted from the original training data and use the rest of the training data as the new training data. In our case, we only have the training data consisting of 10 data points, so here we use the extra data on Part 3.1 as the validation data.
Please solve the ridge regression problem with the polynomial function using **training** data in the following 7 conditions:
* $M=9$, $ln\lambda=-35$ ($\lambda=e^{-35}$)
* $M=9$, $ln\lambda=-25$
* $M=9$, $ln\lambda=-20$
* $M=9$, $ln\lambda=-15$
* $M=9$, $ln\lambda=-10$
* $M=9$, $ln\lambda=-5$
* $M=9$, $ln\lambda=0$
with the following tasks:
1. Calculate the root-mean-square error based on (3) (not the modified one) on the **training** and **validation** set. Plot the curves of RMS error versus $\ln\lambda$ for training and validation set in a single figure (i.e., in this figure, two curves will be plotted, one for training data and one for validation data. x axis represents $\ln\lambda$ and y axis represents RMS error).
2. Print the coefficients for the obtained 7 ploynomial functions.
3. Find the best value for $\lambda$ among these conditions (`best` means its corresponding RMS on the **validation** set is the smallest), calculate the corresponding RMS error on **testing** data and print it.
4. Draw the curve of the polynomial function in a figure w.r.t the best $\lambda$, in which the 10 original training data points and the 100 testing data points should also be plotted.
To sovle the ridge regression problem, we could use [`sklearn.linear_model.Ridge`](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.Ridge.html).
```python
# your code here
feature_matrix = np.ones_like(x_train)
for i in range(1, 10):
feature_matrix = np.concatenate((feature_matrix, x_train ** i), axis=1)
feature_matrix_valid = np.ones_like(x_extra)
for i in range(1, 10):
feature_matrix_valid = np.concatenate((feature_matrix_valid, x_extra ** i), axis=1)
from sklearn.linear_model import Ridge
rmss_train = []
rmss_valid = []
best_model = None
best_valid = np.finfo(float).max
ln_lambdas = [-35, -25, -20, -15, -10, -5, 0]
for ln_lambda in ln_lambdas:
model = Ridge(alpha=np.exp(ln_lambda), fit_intercept=False).fit(feature_matrix, y_train)
prediction_train = model.predict(feature_matrix)
rms_train = np.sqrt(np.sum((prediction_train - y_train) ** 2) / len(prediction_train))
prediction_valid = model.predict(feature_matrix_valid)
rms_valid = np.sqrt(np.sum((prediction_valid - y_extra) ** 2) / len(prediction_valid))
rmss_train.append(rms_train)
rmss_valid.append(rms_valid)
print('\n Results when M = %d' % M)
print('coefficients of the trained function: ', model.coef_)
if rms_valid < best_valid:
best_valid = rms_valid
best_model = model
plt.plot(ln_lambdas, rmss_train, 'b', marker='o')
plt.plot(ln_lambdas, rmss_valid, 'r', marker='o')
plt.xlabel('ln(lambda)')
plt.ylabel('RMS error')
plt.show()
print(rmss_valid)
```
```python
feature_matrix_test = np.ones_like(x_test)
for i in range(1, 10):
feature_matrix_test = np.concatenate((feature_matrix_test, x_test ** i), axis=1)
prediction_test = best_model.predict(feature_matrix_test)
rms_test = np.sqrt(np.sum((prediction_test - y_test) ** 2) / len(prediction_test))
print('rms error on testing data: ', rms_test)
x_curve = np.linspace(0, 1, 10000).reshape(-1, 1)
features_curve = np.ones_like(x_curve)
for i in range(1, 10):
features_curve = np.concatenate((features_curve, x_curve ** i), axis=1)
y_curve = best_model.predict(features_curve)
plt.plot(x_train, y_train, marker='o', linestyle='None')
plt.plot(x_test, y_test, fillstyle='none', marker='o', color='g', linestyle='None')
plt.plot(x_curve, y_curve, 'r')
plt.show()
```
#### Question 5. Does regularization alleviate over-fitting? Why?
#### Your answer:
### Part 3.3. Gradient descent practice
Choose a $\lambda$ as you like and set $M=9$, calculate and print its rms error on training, validation and testing data respectively, draw the function curve with all the training and testing data on it, based on **gradient descent algorithm**.
***Note***: the loss function is equation (4), when calculating the gradients, remember to also consider the regularizatin component.
```python
# your code here
# your code here
# defining a learning rate
M = 9
learning_rate = 0.1
lmda = np.exp(-10)
# initialize the parameters, here we simply assign each parameter to 1
# becuase M=3, so we totally have 4 parameters (w0, w1, w2, w3)
W = np.ones((M+1, 1))
# calculate the feature matrix as before, in order to compute the loss
feature_matrix = np.ones_like(x_train)
for i in range(1, M+1):
feature_matrix = np.concatenate((feature_matrix, x_train ** i), axis=1)
# iteratively do gradient descent
for i in range(50):
# calculate the values of the polynomial function
F = np.matmul(feature_matrix, W)
# calculate the loss
if i % 10 ==0:
loss = 0.5 * np.sum((F - y_train) ** 2) + 0.5 * lmda * np.sum(W ** 2)
print('loss: ', loss)
# calcualte the gradients of the parameters
# here we have 10 data samples, so we calculate the mean.
# please understand this formulation by inferencing the gradients
# by yourself with what you learnt in undergraduate school.
G = np.mean((F - y_train) * feature_matrix, axis=0).reshape(-1, 1) + lmda * W
# update parameters to a direction that makes the loss smaller
# so we use '-' here, it is also why we call gradient "descent"
W = W - learning_rate * G
x_curve = np.linspace(0, 1, 10000).reshape(-1, 1)
features_curve = np.ones_like(x_curve)
for i in range(1, M+1):
features_curve = np.concatenate((features_curve, x_curve ** i), axis=1)
#y_curve = model.predict(features_curve)
y_curve = features_curve.dot(W)
plt.plot(x_train, y_train, marker='o', linestyle='None')
plt.plot(x_curve, y_curve, 'r')
plt.show()
prediction_train = np.matmul(feature_matrix, W)
rms_train = np.sqrt(np.sum((prediction_train - y_train) ** 2) / len(prediction_train))
prediction_test = np.matmul(feature_matrix_test, W)
rms_test = np.sqrt(np.sum((prediction_test - y_test) ** 2) / len(prediction_test))
print('rms error on training data: ', rms_train)
print('rms error on testing data: ', rms_test)
print('coefficients of the trained function: ', W)
```
```python
```
|
{"hexsha": "c743873b7adbb013929f34af0bef140df85e4044", "size": 268557, "ext": "ipynb", "lang": "Jupyter Notebook", "max_stars_repo_path": "assignment1/Assignment1_v2_answer.ipynb", "max_stars_repo_name": "Haoban/Deep-Learning-Lectures-Exercises-Slides", "max_stars_repo_head_hexsha": "da9141fb9da9fde25d1d3edd4efa13f3f8566e70", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "assignment1/Assignment1_v2_answer.ipynb", "max_issues_repo_name": "Haoban/Deep-Learning-Lectures-Exercises-Slides", "max_issues_repo_head_hexsha": "da9141fb9da9fde25d1d3edd4efa13f3f8566e70", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "assignment1/Assignment1_v2_answer.ipynb", "max_forks_repo_name": "Haoban/Deep-Learning-Lectures-Exercises-Slides", "max_forks_repo_head_hexsha": "da9141fb9da9fde25d1d3edd4efa13f3f8566e70", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 227.3979678239, "max_line_length": 100432, "alphanum_fraction": 0.9014250234, "converted": true, "num_tokens": 7717}
|
import numpy as np
import cv2
import os.path
# File Searching
folders = r"C:\Users\심재윤\PycharmProjects\RGB detection" ### Change Directory with your choice
filename = os.listdir(folders)
for names in filename :
if (names == "makejpg.py") :
continue
file = folders + "\\" + names
a = np.loadtxt(file, dtype='int')
# Image Processing Part
### implement these lines
cv2.normalize(a, a, 0, 65535, cv2.NORM_MINMAX)
np.right_shift(a, 8, a)
### until here
# Image Write
cv2.imwrite(folders + "\\" + names[:-3] + "jpg", np.uint8(a))
exit(0)
|
{"hexsha": "74bda2082ac139cdd4523db51169ef77204dd37c", "size": 572, "ext": "py", "lang": "Python", "max_stars_repo_path": "Detector_1/makejpg.py", "max_stars_repo_name": "JaeyoonSSim/Design-Project", "max_stars_repo_head_hexsha": "8a0037bec50b44b3f5d92da5254e79964fdaf9cf", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Detector_1/makejpg.py", "max_issues_repo_name": "JaeyoonSSim/Design-Project", "max_issues_repo_head_hexsha": "8a0037bec50b44b3f5d92da5254e79964fdaf9cf", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Detector_1/makejpg.py", "max_forks_repo_name": "JaeyoonSSim/Design-Project", "max_forks_repo_head_hexsha": "8a0037bec50b44b3f5d92da5254e79964fdaf9cf", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.8695652174, "max_line_length": 94, "alphanum_fraction": 0.6503496503, "include": true, "reason": "import numpy", "num_tokens": 168}
|
[STATEMENT]
lemma joule_alt_def: "joule \<cong>\<^sub>Q newton \<^bold>\<cdot> metre"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. joule \<cong>\<^sub>Q newton \<^bold>\<cdot> metre
[PROOF STEP]
by si_calc
|
{"llama_tokens": 88, "file": "Physical_Quantities_SI_Derived", "length": 1}
|
import pytest
numpy = pytest.importorskip('numpy')
npt = pytest.importorskip('numpy.testing')
scipy = pytest.importorskip('scipy')
import networkx as nx
from networkx.generators.degree_seq import havel_hakimi_graph
class TestBetheHessian(object):
@classmethod
def setup_class(cls):
deg = [3, 2, 2, 1, 0]
cls.G = havel_hakimi_graph(deg)
cls.P = nx.path_graph(3)
def test_bethe_hessian(self):
"Bethe Hessian matrix"
H = numpy.array([[4, -2, 0],
[-2, 5, -2],
[0, -2, 4]])
permutation = [2, 0, 1]
# Bethe Hessian gives expected form
npt.assert_equal(nx.bethe_hessian_matrix(self.P, r=2).todense(), H)
# nodelist is correctly implemented
npt.assert_equal(nx.bethe_hessian_matrix(self.P, r=2, nodelist=permutation).todense(),
H[numpy.ix_(permutation, permutation)])
# Equal to Laplacian matrix when r=1
npt.assert_equal(nx.bethe_hessian_matrix(self.G, r=1).todense(),
nx.laplacian_matrix(self.G).todense())
# Correct default for the regularizer r
npt.assert_equal(nx.bethe_hessian_matrix(self.G).todense(),
nx.bethe_hessian_matrix(self.G, r=1.25).todense())
|
{"hexsha": "2dccae492f2b11d37bcd1b975772a0b4e666bb70", "size": 1309, "ext": "py", "lang": "Python", "max_stars_repo_path": "networkx/linalg/tests/test_bethehessian.py", "max_stars_repo_name": "jmmcd/networkx", "max_stars_repo_head_hexsha": "207ff7d1e9bfaff013ac77c8d6bb79619892c994", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-12-03T14:58:04.000Z", "max_stars_repo_stars_event_max_datetime": "2019-12-03T14:58:04.000Z", "max_issues_repo_path": "networkx/linalg/tests/test_bethehessian.py", "max_issues_repo_name": "jmmcd/networkx", "max_issues_repo_head_hexsha": "207ff7d1e9bfaff013ac77c8d6bb79619892c994", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2019-11-13T03:48:53.000Z", "max_issues_repo_issues_event_max_datetime": "2021-02-15T16:52:09.000Z", "max_forks_repo_path": "networkx/linalg/tests/test_bethehessian.py", "max_forks_repo_name": "jmmcd/networkx", "max_forks_repo_head_hexsha": "207ff7d1e9bfaff013ac77c8d6bb79619892c994", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-02-13T10:33:34.000Z", "max_forks_repo_forks_event_max_datetime": "2020-08-09T07:59:26.000Z", "avg_line_length": 37.4, "max_line_length": 94, "alphanum_fraction": 0.6088617265, "include": true, "reason": "import networkx,from networkx", "num_tokens": 351}
|
# !/usr/bin/env python
import random
import sys
import os
import rospkg
import networkx as nx
from cbm_pop_lib.common.chromosome import Chromosome
from copy import deepcopy
def init_result(tasks, mdvrp, prec, params):
result = Chromosome(tasks, mdvrp.max_vehicle_load, prec,
mdvrp.sliding_time_windows, mdvrp.n, params)
for v in range(mdvrp.k):
result.add_route(v)
return result
def node_predecessors(node, prec):
pred = list(prec.predecessors(node))
for p in prec.predecessors(node):
pred.extend(node_predecessors(p, prec))
return list(set(pred))
def node_successors(node, prec):
succ = list(prec.successors(node))
for s in prec.successors(node):
succ.extend(node_successors(s, prec))
return list(set(succ))
def greedy_insertion(mdvrp, problem_params):
"""Gradually builds the routes by selecting randomly an unserved customer
and by inserting it at minimum cost in existing routes.
Returns:
MDVRP: MDVRP problem instance
"""
# init prec
prec = deepcopy(mdvrp.precedence_graph)
for node in mdvrp.precedence_graph:
for pred in node_predecessors(node, mdvrp.precedence_graph):
if (pred, node) not in prec.edges():
prec.add_edge(pred, node)
for succ in node_successors(node, mdvrp.precedence_graph):
if (node, succ) not in prec.edges():
prec.add_edge(node, succ)
all_tasks = range(1, mdvrp.n + 1)
result = init_result(all_tasks, mdvrp, prec, problem_params)
# all_tasks = deepcopy(temp)
_constr = list(nx.topological_sort(mdvrp.precedence_graph))
constr = [x for x in _constr if x in all_tasks]
ord_tasks = [x for x in all_tasks if x not in constr]
random.shuffle(ord_tasks)
ord_tasks = constr + ord_tasks
check_recursion = 0
while len(ord_tasks) > 0:
success = result.insertion_minimal_cost(
ord_tasks[0], mdvrp.quality_matrix, mdvrp.duration_matrix,
mdvrp.setup_duration_matrix, mdvrp.demand_matrix,
mdvrp.setup_cost_matrix)
if success:
del ord_tasks[0]
try:
c = nx.find_cycle(result.all_constraints)
print c
# print self.population[-1].routes
raw_input("cycle")
except nx.exception.NetworkXUnfeasible:
pass
else:
x = ord_tasks.pop(0)
if len(ord_tasks) == 0 or check_recursion > len(all_tasks):
print result.routes
print "couldn't do it ........"
print check_recursion
raw_input()
ord_tasks = deepcopy(all_tasks)
random.shuffle(ord_tasks)
result = init_result(all_tasks, mdvrp, prec)
check_recursion = 0
continue
ord_tasks.append(x)
check_recursion += 1
# raw_input()
return result
|
{"hexsha": "f3981695594ab3c3b99b667195ed37574d0ef363", "size": 3003, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/cbm_pop_lib/operators/generational_functions.py", "max_stars_repo_name": "barbara0811/cbm_pop_mdvrp_optimization", "max_stars_repo_head_hexsha": "10bfc55a21f48f93ed87ec4c48f07e315795efcd", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-03-18T14:49:05.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-18T14:49:05.000Z", "max_issues_repo_path": "src/cbm_pop_lib/operators/generational_functions.py", "max_issues_repo_name": "barbara0811/cbm_pop_mdvrp_optimization", "max_issues_repo_head_hexsha": "10bfc55a21f48f93ed87ec4c48f07e315795efcd", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/cbm_pop_lib/operators/generational_functions.py", "max_forks_repo_name": "barbara0811/cbm_pop_mdvrp_optimization", "max_forks_repo_head_hexsha": "10bfc55a21f48f93ed87ec4c48f07e315795efcd", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-03-28T16:31:58.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-28T16:31:58.000Z", "avg_line_length": 32.6413043478, "max_line_length": 77, "alphanum_fraction": 0.6217116217, "include": true, "reason": "import networkx", "num_tokens": 709}
|
"""
compute_laplace_eig(mesh, matrices, pde, eiglim, neig_max)
Compute the Laplace eigenvalues, eigenfunctions and first order moments of products of pairs of eigenfunctions.
"""
function compute_laplace_eig(model, matrices, eiglim = Inf, neig_max = Inf)
# Measure function evaluation time
starttime = Base.time()
# Extract parameters
@unpack mesh, D, T₂ = model
@unpack M, S, R, Mx, Q = matrices
ncompartment = length(mesh.points)
# Compute at most all eigenvalues in the given domain
neig = Int(min(neig_max, size(M, 1)))
println("Solving Laplace eigenvalue problem, computing $neig eigenvalues.")
println("Problem size: $(size(M, 1)) points.")
# Solve generalized eigenproblem, computing the smallest eigenvalues only.
# If 2 * neig_max_domain >= nnode, a full decomposition is performed,
# calling the eig function inside eigs
λ, ϕ = eigs(S + Q, M, nev = neig, which = :SR)
# λ, ϕ = eigs(Hermitian(S + Q), Hermitian(M), nev = neig, which = :SR);
# λ, ϕ = eigen(Hermitian(Matrix(S + Q)), Hermitian(Matrix(M)))
# All Laplace eigenvalues are nonnegative
all(0 .≤ λ) ||
@warn "Obtained negative eigenvalues for Laplace operator." findall(λ .< 0) λ[λ.<0]
# Only keep modes with length scales larger than minimum
inds = λ .≤ eiglim
λ = λ[inds]
ϕ = ϕ[:, inds]
isinf(eiglim) ||
length(λ) < neig ||
@warn "No eigenvalues were outside the interval. Consider increasing `neig_max`." eiglim neig_max
# Normalize eigenfunctions with mass weighting
ϕ ./= .√sum(ϕ .* (M * ϕ), dims = 1)
# Compute first order moments of product of pairs of eigenfunctions
moments = cat([ϕ' * Mx[dim] * ϕ for dim = 1:3]..., dims = 3)
# Compute Laplace relaxation matrix
massrelax = ϕ' * R * ϕ
values = λ
funcs = ϕ
totaltime = Base.time() - starttime
(; values, funcs, moments, massrelax, totaltime)
end
|
{"hexsha": "15f373c9ca0ee7c791c3220a618f6a9e9b7976f7", "size": 1946, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/matrix_formalism/compute_laplace_eig.jl", "max_stars_repo_name": "tapudodo/SpinDoctor.jl", "max_stars_repo_head_hexsha": "11ebc8095f988e52f14e89a2c0bef9fc0c4e63c0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/matrix_formalism/compute_laplace_eig.jl", "max_issues_repo_name": "tapudodo/SpinDoctor.jl", "max_issues_repo_head_hexsha": "11ebc8095f988e52f14e89a2c0bef9fc0c4e63c0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/matrix_formalism/compute_laplace_eig.jl", "max_forks_repo_name": "tapudodo/SpinDoctor.jl", "max_forks_repo_head_hexsha": "11ebc8095f988e52f14e89a2c0bef9fc0c4e63c0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.75, "max_line_length": 111, "alphanum_fraction": 0.6526207605, "num_tokens": 577}
|
import numpy as np
import odrive
import random
import time
'''
Random controller for physical pendulum
'''
cpr = 8192
p0 = 0
t_run = 5
c_max = 3.0
v_max = 3 * cpr
dt = 0.05
def p2r(p):
return 2 * np.pi * (p/cpr)
def v2rs(v):
return p2r(v)
# copied from gym env for model continuity
# it handles wrap, turning pi in to -pi
def angle_normalize(x):
x = p2r(x)
return (((x+np.pi) % (2*np.pi)) - np.pi)
def action_rand():
return random.uniform(-c_max, c_max)
print("Connecting...")
d = odrive.find_any()
print("Connected")
x = d.axis0
x.controller.config.control_mode = 3
x.controller.pos_setpoint = p0
x.controller.config.control_mode = 1
t_start = time.time()
t_last = t_start
while t_last - t_start < t_run:
t_now = time.time()
t_diff = t_now - t_last
if t_diff < dt:
time.sleep(dt - t_diff)
p = x.encoder.pos_cpr
v = x.encoder.vel_estimate
if abs(v) > v_max:
print("Max velocity exceeded: %f" % (v))
c = 0
else:
c = action_rand()
x.controller.current_setpoint = c
t_last = t_now
x.controller.current_setpoint = 0
|
{"hexsha": "34636f9859aa64011dcb4ed4f5ad22c640ab4bb5", "size": 1049, "ext": "py", "lang": "Python", "max_stars_repo_path": "simple/real_pendulum_simple.py", "max_stars_repo_name": "rravenel/furuta_pendulum", "max_stars_repo_head_hexsha": "b2f2a3bb8c6f2676671a24c6f9ea4d8e6479835f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-09-23T16:29:23.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-30T19:55:44.000Z", "max_issues_repo_path": "simple/real_pendulum_simple.py", "max_issues_repo_name": "rravenel/furuta_pendulum", "max_issues_repo_head_hexsha": "b2f2a3bb8c6f2676671a24c6f9ea4d8e6479835f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "simple/real_pendulum_simple.py", "max_forks_repo_name": "rravenel/furuta_pendulum", "max_forks_repo_head_hexsha": "b2f2a3bb8c6f2676671a24c6f9ea4d8e6479835f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 16.9193548387, "max_line_length": 42, "alphanum_fraction": 0.6796949476, "include": true, "reason": "import numpy", "num_tokens": 334}
|
import numpy as np
from logging import getLogger
from tensorflow.keras.datasets import fashion_mnist
from tensorflow.keras.utils import to_categorical
from typing import Tuple
logger = getLogger(__name__)
def get_fasion_mnist() -> (
Tuple[np.ndarray, np.ndarray, np.ndarray],
Tuple[np.ndarray, np.ndarray],
):
width, height, channels = 28, 28, 1
(x_train, y_train), (x_test, y_test) = fashion_mnist.load_data()
x_train = x_train.reshape(x_train.shape[0], width, height, channels)
x_test = x_test.reshape(x_test.shape[0], width, height, channels)
x_train = x_train.astype("float32") / 255
x_test = x_test.astype("float32") / 255
x_train_s, x_test_s, x_test_b = [], [], []
x_ref, y_ref = [], []
x_train_shape = x_train.shape
for i in range(len(x_train)):
if y_train[i] == 7: # スニーカーの ID
temp = x_train[i]
x_train_s.append(temp.reshape((x_train_shape[1:])))
else:
temp = x_train[i]
x_ref.append(temp.reshape((x_train_shape[1:])))
y_ref.append(y_train[i])
logger.info(f"x_train_s: length = {len(x_train_s)}, shape = {x_train_s[0].shape}")
logger.info(f"x_ref: length = {len(x_ref)}, shape = {x_ref[0].shape}")
logger.info(f"y_ref: length = {len(y_ref)}, shape = {y_ref[0].shape}")
np.random.seed(0)
x_ref = np.array(x_ref)
number = np.random.choice(np.arange(0, x_ref.shape[0]), 6000, replace=False)
x, y = [], []
x_ref_shape = x_ref.shape
for i in number:
temp = x_ref[i]
x.append(temp.reshape((x_ref_shape[1:])))
y.append(y_ref[i])
logger.info(f"x: length = {len(x)}, shape = {x[0].shape}")
logger.info(f"y: length = {len(y)}, shape = {y[0].shape}")
x_train_s = np.array(x_train_s)
x_ref = np.array(x)
y_ref = to_categorical(y)
for i in range(len(x_test)):
if y_test[i] == 7: # スニーカーのID
temp = x_test[i, :, :, :]
x_test_s.append(temp.reshape((x_train_shape[1:])))
if y_test[i] == 9:
temp = x_test[i, :, :, :]
x_test_b.append(temp.reshape((x_train_shape[1:])))
x_test_s = np.array(x_test_s)
x_test_b = np.array(x_test_b)
return (x_train_s, x_ref, y_ref), (x_test_s, x_test_b)
if __name__ == "__main__":
import logging
logging.basicConfig(level=logging.DEBUG)
get_fasion_mnist()
|
{"hexsha": "e75905f591f611248b156a35c60c100006ed76bc", "size": 2388, "ext": "py", "lang": "Python", "max_stars_repo_path": "machine_learning/tf_doc/dataset.py", "max_stars_repo_name": "iimuz/til", "max_stars_repo_head_hexsha": "b100438e8ce2f369331b3be215a4b9cdce9ffda5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2020-07-25T01:20:08.000Z", "max_stars_repo_stars_event_max_datetime": "2020-10-03T12:58:15.000Z", "max_issues_repo_path": "machine_learning/tf_doc/dataset.py", "max_issues_repo_name": "iimuz/til", "max_issues_repo_head_hexsha": "b100438e8ce2f369331b3be215a4b9cdce9ffda5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 29, "max_issues_repo_issues_event_min_datetime": "2019-09-30T08:04:14.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-12T13:51:08.000Z", "max_forks_repo_path": "machine_learning/tf_doc/dataset.py", "max_forks_repo_name": "iimuz/til", "max_forks_repo_head_hexsha": "b100438e8ce2f369331b3be215a4b9cdce9ffda5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-08-14T05:15:51.000Z", "max_forks_repo_forks_event_max_datetime": "2020-08-14T05:15:51.000Z", "avg_line_length": 32.2702702703, "max_line_length": 86, "alphanum_fraction": 0.6164154104, "include": true, "reason": "import numpy", "num_tokens": 684}
|
# This file takes in the C-sin-10-shot and converst it into the ball bouncing state data.
import pickle
import numpy as np
filename = "C-sin_10-shot_legit_2.p"
#filename = "bounce-states_100-shot_2.p"
new_file = "C-sin_10-shot_legit_stateform.p"
tasks = pickle.load(open(filename, "rb"))
#Now convert it
def restructure(indice="tasks_train"):
print("number of tasks: " , len(tasks[indice]))
newList = []
for j in range(0,len(tasks[indice])):
if j % 100 == 0:
print j
print(float(j)/len(tasks[indice]))
#Limit to the first 1000
if j > 999:
break
firstTask = tasks[indice][j]
dataTask = firstTask[0]
infoTask = firstTask[1]
traina = dataTask[0]
trainb = dataTask[1]
inputa = traina[0]
labela = traina[1]
inputb = trainb[0]
labelb = trainb[1]
#print('ina shape:',inputa.shape)
#print(labela.shape)
#print(inputb.shape)
#print(labelb.shape)
ina_new = np.tile(inputa.ravel(),(6,1)).transpose().reshape(-1,3,2)
laa_new = np.tile(labela.ravel(),(200,1)).transpose().reshape(-1,100,2)
inb_new = np.tile(inputb.ravel(),(6,1)).transpose().reshape(-1,3,2)
lab_new = np.tile(labelb.ravel(),(200,1)).transpose().reshape(-1,100,2)
#print("ina_new: " , ina_new)
#print("ina_new: " , len(ina_new))
#os.exit()
#traina[0] = ina_new
#traina[1] = laa_new
#trainb[0] = inb_new
#trainb[1] = lab_new
n_dataTask = [[[ina_new,laa_new],[inb_new,lab_new]],infoTask]
#print(n_dataTask)
newList.append(n_dataTask)
#print("Done")
#break
#break
return newList
#tasks[indice] = newList
def pullVals():
print("Adding train....")
l1 = restructure("tasks_train")
print("Adding test ....")
l2 = restructure("tasks_test")
tasks = {'tasks_train':l1,'tasks_test':l2}
#print("Tasks: ", tasks)
pickle.dump(tasks, open(new_file, "wb"))
pullVals()
|
{"hexsha": "0fdaf648e82a502f6ee87dbccd6ac35b8d26d1b3", "size": 1815, "ext": "py", "lang": "Python", "max_stars_repo_path": "data/duplicateData.py", "max_stars_repo_name": "iguanaus/maml-auto", "max_stars_repo_head_hexsha": "833ae74f821279c0eddfcaff2ff2ede3c9fc6dc6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "data/duplicateData.py", "max_issues_repo_name": "iguanaus/maml-auto", "max_issues_repo_head_hexsha": "833ae74f821279c0eddfcaff2ff2ede3c9fc6dc6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "data/duplicateData.py", "max_forks_repo_name": "iguanaus/maml-auto", "max_forks_repo_head_hexsha": "833ae74f821279c0eddfcaff2ff2ede3c9fc6dc6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.4074074074, "max_line_length": 90, "alphanum_fraction": 0.6628099174, "include": true, "reason": "import numpy", "num_tokens": 619}
|
import pandas as pd
import numpy as np
from sklearn.impute import KNNImputer
from sklearn.preprocessing import LabelEncoder
import pickle
from imblearn.over_sampling import RandomOverSampler
class Preprocessor:
"""
This class shall be used to clean and transform the data before training.
"""
def __init__(self, file_object, logger_object):
self.file_object = file_object
self.logger_object = logger_object
def remove_columns(self,data,columns):
"""
Method Name: remove_columns
Description: This method removes the given columns from a pandas dataframe.
Output: A pandas DataFrame after removing the specified columns.
On Failure: Raise Exception
"""
self.logger_object.log(self.file_object, 'Entered the remove_columns method of the Preprocessor class')
self.data=data
self.columns=columns
try:
self.useful_data=self.data.drop(labels=self.columns, axis=1) # drop the labels specified in the columns
self.logger_object.log(self.file_object,
'Column removal Successful.Exited the remove_columns method of the Preprocessor class')
return self.useful_data
except Exception as e:
self.logger_object.log(self.file_object,'Exception occured in remove_columns method of the Preprocessor class. Exception message: '+str(e))
self.logger_object.log(self.file_object,
'Column removal Unsuccessful. Exited the remove_columns method of the Preprocessor class')
raise Exception()
def separate_label_feature(self, data, label_column_name):
"""
Method Name: separate_label_feature
Description: This method separates the features and a Label Coulmns.
Output: Returns two separate Dataframes, one containing features and the other containing Labels .
On Failure: Raise Exception
"""
self.logger_object.log(self.file_object, 'Entered the separate_label_feature method of the Preprocessor class')
try:
self.X=data.drop(labels=label_column_name,axis=1) # drop the columns specified and separate the feature columns
self.Y=data[label_column_name] # Filter the Label columns
self.logger_object.log(self.file_object,
'Label Separation Successful. Exited the separate_label_feature method of the Preprocessor class')
return self.X,self.Y
except Exception as e:
self.logger_object.log(self.file_object,'Exception occured in separate_label_feature method of the Preprocessor class. Exception message: ' + str(e))
self.logger_object.log(self.file_object, 'Label Separation Unsuccessful. Exited the separate_label_feature method of the Preprocessor class')
raise Exception()
def dropUnnecessaryColumns(self,data,columnNameList):
"""
Method Name: is_null_present
Description: This method drops the unwanted columns as discussed in EDA section.
"""
data = data.drop(columnNameList,axis=1)
return data
def replaceInvalidValuesWithNull(self,data):
"""
Method Name: is_null_present
Description: This method replaces invalid values i.e. '?' with null, as discussed in EDA.
"""
for column in data.columns:
count = data[column][data[column] == '?'].count()
if count != 0:
data[column] = data[column].replace('?', np.nan)
return data
def is_null_present(self,data):
"""
Method Name: is_null_present
Description: This method checks whether there are null values present in the pandas Dataframe or not.
Output: Returns a Boolean Value. True if null values are present in the DataFrame, False if they are not present.
On Failure: Raise Exception
"""
self.logger_object.log(self.file_object, 'Entered the is_null_present method of the Preprocessor class')
self.null_present = False
try:
self.null_counts=data.isna().sum() # check for the count of null values per column
for i in self.null_counts:
if i>0:
self.null_present=True
break
if(self.null_present): # write the logs to see which columns have null values
dataframe_with_null = pd.DataFrame()
dataframe_with_null['columns'] = data.columns
dataframe_with_null['missing values count'] = np.asarray(data.isna().sum())
dataframe_with_null.to_csv('preprocessing_data/null_values.csv') # storing the null column information to file
self.logger_object.log(self.file_object,'Finding missing values is a success.Data written to the null values file. Exited the is_null_present method of the Preprocessor class')
return self.null_present
except Exception as e:
self.logger_object.log(self.file_object,'Exception occured in is_null_present method of the Preprocessor class. Exception message: ' + str(e))
self.logger_object.log(self.file_object,'Finding missing values failed. Exited the is_null_present method of the Preprocessor class')
raise Exception()
def encodeCategoricalValues(self,data):
"""
Method Name: encodeCategoricalValues
Description: This method encodes all the categorical values in the training set.
Output: A Dataframe which has all the categorical values encoded.
On Failure: Raise Exception
"""
# We can map the categorical values like below:
data['sex'] = data['sex'].map({'F': 0, 'M': 1})
# except for 'Sex' column all the other columns with two categorical data have same value 'f' and 't'.
# so instead of mapping indvidually, let's do a smarter work
for column in data.columns:
if len(data[column].unique()) == 2:
data[column] = data[column].map({'f': 0, 't': 1})
# this will map all the rest of the columns as we require. Now there are handful of column left with more than 2 categories.
# we will use get_dummies with that.
data = pd.get_dummies(data,columns=['referral_source'])
encode = LabelEncoder().fit(data['Class'])
data['Class'] = encode.transform(data['Class'])
# we will save the encoder as pickle to use when we do the prediction. We will need to decode the predcited values
# back to original
with open('EncoderPickle/enc.pickle', 'wb') as file:
pickle.dump(encode, file)
return data
def encodeCategoricalValuesPrediction(self,data):
"""
Method Name: encodeCategoricalValuesPrediction
Description: This method encodes all the categorical values in the prediction set.
Output: A Dataframe which has all the categorical values encoded.
On Failure: Raise Exception
"""
# We can map the categorical values like below:
data['sex'] = data['sex'].map({'F': 0, 'M': 1})
cat_data = data.drop(['age','T3','TT4','T4U','FTI','sex'],axis=1) #we do not want to encode values with int or float type
# except for 'Sex' column all the other columns with two categorical data have same value 'f' and 't'.
# so instead of mapping indvidually, let's do a smarter work
for column in cat_data.columns:
if (data[column].nunique()) == 1:
if data[column].unique()[0]=='f' or data[column].unique()[0]=='F': #map the variables same as we did in training i.e. if only 'f' comes map as 0 as done in training
data[column] = data[column].map({data[column].unique()[0] : 0})
else:
data[column] = data[column].map({data[column].unique()[0]: 1})
elif (data[column].nunique()) == 2:\
data[column] = data[column].map({'f': 0, 't': 1})
# we will use get dummies for 'referral_source'
data = pd.get_dummies(data, columns=['referral_source'])
return data
def handleImbalanceDataset(self,X,Y):
"""
Method Name: handleImbalanceDataset
Description: This method handles the imbalance in the dataset by oversampling.
Output: A Dataframe which is balanced now.
On Failure: Raise Exception
"""
rdsmple = RandomOverSampler()
x_sampled, y_sampled = rdsmple.fit_sample(X, Y)
return x_sampled,y_sampled
def impute_missing_values(self, data):
"""
Method Name: impute_missing_values
Description: This method replaces all the missing values in the Dataframe using KNN Imputer.
Output: A Dataframe which has all the missing values imputed.
On Failure: Raise Exception
"""
self.logger_object.log(self.file_object, 'Entered the impute_missing_values method of the Preprocessor class')
self.data= data
try:
imputer=KNNImputer(n_neighbors=3, weights='uniform',missing_values=np.nan)
self.new_array=imputer.fit_transform(self.data) # impute the missing values
# convert the nd-array returned in the step above to a Dataframe
# rounding the value because KNNimputer returns value between 0 and 1, but we need either 0 or 1
self.new_data=pd.DataFrame(data=np.round(self.new_array), columns=self.data.columns)
self.logger_object.log(self.file_object, 'Imputing missing values Successful. Exited the impute_missing_values method of the Preprocessor class')
return self.new_data
except Exception as e:
self.logger_object.log(self.file_object,'Exception occured in impute_missing_values method of the Preprocessor class. Exception message: ' + str(e))
self.logger_object.log(self.file_object,'Imputing missing values failed. Exited the impute_missing_values method of the Preprocessor class')
raise Exception()
def get_columns_with_zero_std_deviation(self,data):
"""
Method Name: get_columns_with_zero_std_deviation
Description: This method finds out the columns which have a standard deviation of zero.
Output: List of the columns with standard deviation of zero
On Failure: Raise Exception
"""
self.logger_object.log(self.file_object, 'Entered the get_columns_with_zero_std_deviation method of the Preprocessor class')
self.columns=data.columns
self.data_n = data.describe()
self.col_to_drop=[]
try:
for x in self.columns:
if (self.data_n[x]['std'] == 0): # check if standard deviation is zero
self.col_to_drop.append(x) # prepare the list of columns with standard deviation zero
self.logger_object.log(self.file_object, 'Column search for Standard Deviation of Zero Successful. Exited the get_columns_with_zero_std_deviation method of the Preprocessor class')
return self.col_to_drop
except Exception as e:
self.logger_object.log(self.file_object,'Exception occured in get_columns_with_zero_std_deviation method of the Preprocessor class. Exception message: ' + str(e))
self.logger_object.log(self.file_object, 'Column search for Standard Deviation of Zero Failed. Exited the get_columns_with_zero_std_deviation method of the Preprocessor class')
raise Exception()
|
{"hexsha": "c73382e2f1e9999f3a6030517181db37b8df667b", "size": 12757, "ext": "py", "lang": "Python", "max_stars_repo_path": "data_preprocessing/preprocessing.py", "max_stars_repo_name": "dipesg/Thyroid-Classification", "max_stars_repo_head_hexsha": "b5f1a7ef1b8a6c3af6bf188529ed16471e82d8dd", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "data_preprocessing/preprocessing.py", "max_issues_repo_name": "dipesg/Thyroid-Classification", "max_issues_repo_head_hexsha": "b5f1a7ef1b8a6c3af6bf188529ed16471e82d8dd", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "data_preprocessing/preprocessing.py", "max_forks_repo_name": "dipesg/Thyroid-Classification", "max_forks_repo_head_hexsha": "b5f1a7ef1b8a6c3af6bf188529ed16471e82d8dd", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 53.1541666667, "max_line_length": 192, "alphanum_fraction": 0.6052363408, "include": true, "reason": "import numpy", "num_tokens": 2394}
|
# MIT License
#
# Copyright (c) 2021 Aditya Shridhar Hegde
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import json
from cluster.score import Score
from cluster.dataset import Dataset
import argparse
import os
import numpy as np
from pathlib import Path
def encode_label(v):
res = 0
for i in range(len(v)):
if v[i] == 1:
res += (2**i)
return res
def evaluate(ds, pred_dir, threshold):
pred_dir = Path(pred_dir)
score_list = []
for rep in pred_dir.glob("rep_*.npy"):
pred = np.load(rep.resolve())
pred[pred >= threshold] = 1
pred[pred < threshold] = 0
labels = np.array([encode_label(i) for i in pred])
score_list.append(Score.evaluate_on_dataset(ds, labels))
mean_score_list = [Score.mean(i).all() for i in zip(*score_list)]
return mean_score_list
def cli_args():
parser = argparse.ArgumentParser(
description="Evaluate output of CKP19 meanshift."
)
parser.add_argument(
"data_dir",
help="Path to directory where dataset and labels are stored.",
)
parser.add_argument(
"dataset",
help="Name of dataset. Expects a file by name 'dataset.gz' and 'labels*.gz' containing numpy matrices in text format.",
)
parser.add_argument(
"pred_dir",
help="Path to directory containing output of clustering.",
)
parser.add_argument(
"--output_dir", default="", help="Path to output directory."
)
parser.add_argument(
"--threshold", default=0.1, type=float, help="Threshold for converting labels to 0 and 1."
)
args = parser.parse_args()
return args
if __name__ == "__main__":
args = cli_args()
ds = Dataset.load_gz(args.data_dir, args.dataset)
ds.print_stats()
print()
output = {}
output["scores"] = evaluate(ds, args.pred_dir, args.threshold)
pred_dir = Path(args.pred_dir)
for i, score in enumerate(output["scores"]):
print("--- Mean Scores for groundtruth label", i, "---")
for k in score:
print(k, ":", score[k])
print()
with open(os.path.join(args.output_dir, f"{pred_dir.name}.json"), "w") as f:
json.dump(output, f)
|
{"hexsha": "feccc4c6f08e978045883da79a18c472701adf10", "size": 3234, "ext": "py", "lang": "Python", "max_stars_repo_path": "utils/he_meanshift_evaluate.py", "max_stars_repo_name": "encryptogroup/SoK_ppClustering", "max_stars_repo_head_hexsha": "6b008a09bfe3f3b8074e24059ac3e2aa6b87f227", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-06-18T08:09:46.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-19T05:41:24.000Z", "max_issues_repo_path": "utils/he_meanshift_evaluate.py", "max_issues_repo_name": "encryptogroup/SoK_ppClustering", "max_issues_repo_head_hexsha": "6b008a09bfe3f3b8074e24059ac3e2aa6b87f227", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "utils/he_meanshift_evaluate.py", "max_forks_repo_name": "encryptogroup/SoK_ppClustering", "max_forks_repo_head_hexsha": "6b008a09bfe3f3b8074e24059ac3e2aa6b87f227", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.34, "max_line_length": 127, "alphanum_fraction": 0.6765615337, "include": true, "reason": "import numpy", "num_tokens": 751}
|
## License: Apache 2.0. See LICENSE file in root directory.
## Copyright(c) 2015-2017 Intel Corporation. All Rights Reserved.
#import pyrealsense2 as rs
#import numpy as np
from classes.realsense import RealSense
from classes.objloader import *
import copy
import numpy as np
import cv2
import os
#import screeninfo
CV_PI = 3.1415926535897932384626433832795
def main():
device = RealSense(21312312312)
print("Color intrinsics: ", device.getcolorintrinsics())
print("Depth intrinsics: ", device.getdepthintrinsics())
try:
while True:
#image2 = device.getdepthstream()
#image2 = cv2.applyColorMap(cv2.convertScaleAbs(image2, alpha=0.03), cv2.COLORMAP_BONE)
image1 = device.getcolorstream()
cv2.imwrite("../raw_output.png", image1)
image2 = copy.deepcopy(image1)
image3 = copy.deepcopy(image1)
image4 = copy.deepcopy(image1)
# Color Extraction + Shape identification
# https://stackoverflow.com/questions/10948589/choosing-the-correct-upper-and-lower-hsv-boundaries-for-color-detection-withcv
# ORANGE
hsv = cv2.cvtColor(image2, cv2.COLOR_BGR2HSV)
lower_orange = np.array([0, 0, 0],np.uint8)
upper_orange = np.array([255, 150, 150],np.uint8)
mask = cv2.inRange(hsv, lower_orange, upper_orange)
res = cv2.bitwise_and(image2, image2, mask=mask)
imgray = cv2.cvtColor(res, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(imgray, (5, 5), 0) # TODO: VERY BASIC, TRY OTHER FILTERS
#sharpen_kernel = np.array([[-1, -1, -1], [-1, 9, -1], [-1, -1, -1]])
#sharpen = cv2.filter2D(blur, -1, sharpen_kernel)
ret, thresholded = cv2.threshold(blurred, 50, 255, 0) # TODO: VERY BASIC, TRY OTHER THRESHHOLDS
#thresh = cv2.threshold(sharpen, 160, 255, cv2.THRESH_BINARY_INV)[1]
contours, h = cv2.findContours(thresholded, 1, 2)
#src_gray = cv2.cvtColor(res, cv2.COLOR_BGR2GRAY)
#src_gray = cv2.blur(src_gray, (3, 3))
#canny_output = cv2.Canny(src_gray, 100, 100 * 2)
#contours, _ = cv2.findContours(canny_output, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
squares = []
for cnt in contours:
approx = cv2.approxPolyDP(cnt, 0.01 * cv2.arcLength(cnt, True), True)
if (len(approx) == 4) & (cv2.contourArea(cnt)>25):
#x,y,w,h = cv2.boundingRect(cnt)
#cv2.rectangle(image1, (x, y), (x + w, y + h), (36, 255, 12), 2)
#cv2.drawContours(image1, [cnt], 0, (0, 0, 255), -1)
contour_poly = cv2.approxPolyDP(cnt, 3, True)
#boundRect = cv2.boundingRect(contour_poly)
center, radius = cv2.minEnclosingCircle(contour_poly)
color=(0,255,255)
#cv2.drawContours(image1, contour_poly, 1, color)
#cv2.rectangle(image1, (int(boundRect[0]), int(boundRect[1])), \ (int(boundRect[0] + boundRect[2]), int(boundRect[1] + boundRect[3])), color, 2)
cv2.circle(image1, (int(center[0]), int(center[1])), int(radius), color, 2)
squares.append(center)
#if len(squares) == 3:
# cv2.putText(image1, "CALIBRATED: Detected 3 squares", (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2, cv2.LINE_AA)
#else:
# cv2.putText(image1, "ERROR: NOT-CALIBRATED", (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2, cv2.LINE_AA)
image2 = res
# RED
hsv = cv2.cvtColor(image3, cv2.COLOR_BGR2HSV)
lower_yellow = np.array([0, 50, 50])
upper_yellow = np.array([5, 255, 255])
mask = cv2.inRange(hsv, lower_yellow, upper_yellow)
res = cv2.bitwise_and(image3, image3, mask=mask)
imgray = cv2.cvtColor(res, cv2.COLOR_BGR2GRAY)
#blurred = cv2.GaussianBlur(imgray, (5, 5), 0)
#ret, thresholded = cv2.threshold(blurred, 50, 255, 0)
ret, thresholded = cv2.threshold(imgray, 0, 255, cv2.THRESH_BINARY)
opening = cv2.morphologyEx(thresholded, cv2.MORPH_OPEN, (12,12))
blurred = cv2.blur(opening,(4,4))
contours, h = cv2.findContours(blurred, 1, 2)
edges = cv2.Canny(thresholded, 66, 133, 3)
lines = cv2.HoughLines(edges, 1, CV_PI/180, 50, 0, 0)
# no big difference
#kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
#close = cv2.morphologyEx(thresholded, cv2.MORPH_CLOSE, kernel, iterations=2)
#contours, h = cv2.findContours(close, 1, 2)
contours, h = cv2.findContours(thresholded, 1, 2)
for cnt in contours:
approx = cv2.approxPolyDP(cnt, 0.01 * cv2.arcLength(cnt, True), True)
if (len(approx) == 4) & (cv2.contourArea(cnt)>25):
contour_poly = cv2.approxPolyDP(cnt, 3, True)
center, radius = cv2.minEnclosingCircle(contour_poly)
color=(0,0,255)
cv2.circle(image1, (int(center[0]), int(center[1])), int(radius), color, 2)
image3 = res
# green
hsv = cv2.cvtColor(image4, cv2.COLOR_BGR2HSV)
lower_green = np.array([100, 50, 50], np.uint8)
upper_green = np.array([140, 255, 255], np.uint8)
mask = cv2.inRange(hsv, lower_green, upper_green)
res = cv2.bitwise_and(image4, image4, mask=mask)
imgray = cv2.cvtColor(res, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(imgray, (5, 5), 0)
ret, thresholded = cv2.threshold(blurred, 50, 255, 0)
contours, h = cv2.findContours(thresholded, 1, 2)
for cnt in contours:
approx = cv2.approxPolyDP(cnt, 0.01 * cv2.arcLength(cnt, True), True)
if (len(approx) == 4) & (cv2.contourArea(cnt)>25):
contour_poly = cv2.approxPolyDP(cnt, 3, True)
center, radius = cv2.minEnclosingCircle(contour_poly)
color=(255,0,0)
cv2.circle(image1, (int(center[0]), int(center[1])), int(radius), color, 2)
image4 = res
# Show images
#cv2.namedWindow('RealSense', cv2.WINDOW_AUTOSIZE)
cv2.namedWindow('RealSense', cv2.WND_PROP_FULLSCREEN)
#screen_id = 2
#screen = screeninfo.get_monitors()[1]
#cv2.moveWindow('RealSense', screen.x - 1, screen.y - 1)
cv2.setWindowProperty("RealSense", cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
images_H1 = np.hstack((image1, image2))
images_H2 = np.hstack((image3, image4))
images = np.vstack((images_H1, images_H2))
cv2.imshow('RealSense', images)
cv2.waitKey(1)
finally:
# Stop streaming
device.stop()
if __name__ == '__main__':
main()
|
{"hexsha": "6de7124dc9c738da55dacea4a39b68aceb1aa12e", "size": 7112, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/testSquares.py", "max_stars_repo_name": "snavas/PyMote", "max_stars_repo_head_hexsha": "9ac51251abbc943fcd36fbb58ff5c3031d375c14", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-01-09T13:51:49.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-22T20:32:34.000Z", "max_issues_repo_path": "tests/testSquares.py", "max_issues_repo_name": "snavas/GECCO", "max_issues_repo_head_hexsha": "9ac51251abbc943fcd36fbb58ff5c3031d375c14", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 11, "max_issues_repo_issues_event_min_datetime": "2020-11-20T11:06:34.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-12T00:54:17.000Z", "max_forks_repo_path": "tests/testSquares.py", "max_forks_repo_name": "snavas/PyMote", "max_forks_repo_head_hexsha": "9ac51251abbc943fcd36fbb58ff5c3031d375c14", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 49.3888888889, "max_line_length": 164, "alphanum_fraction": 0.5762092238, "include": true, "reason": "import numpy", "num_tokens": 2038}
|
C Copyright(C) 1999-2020 National Technology & Engineering Solutions
C of Sandia, LLC (NTESS). Under the terms of Contract DE-NA0003525 with
C NTESS, the U.S. Government retains certain rights in this software.
C
C See packages/seacas/LICENSE for details
SUBROUTINE CLOSEG (MSNAP, SNAPDX, NSNAP, X, Y, II, INDEX, XBOT,
& XTOP, YBOT, YTOP)
C***********************************************************************
C SUBROUTINE CLOSEG = SUBROUTINE TO RETURN CLOSEST GRID LINE
C***********************************************************************
C SUBROUTINE CALLED BY:
C DIGIT = A SUBROUTINE TO INPUT GEOMETRY
C***********************************************************************
C VARIABLES USED:
C X = THE X LOCATION IN USER COORDINATES
C Y = THE Y LOCATION IN USER COORDINATES
C***********************************************************************
DIMENSION SNAPDX(2, MSNAP), NSNAP(2)
C FIND CLOSEST GRID CROSSING IN X OR Y
XHOLD = X
YHOLD = Y
CALL SNAPPT (MSNAP, SNAPDX, NSNAP, XHOLD, YHOLD)
IF (ABS(XHOLD - X) .LT. ABS(YHOLD - Y)) THEN
INDEX = 1
ELSE
INDEX = 2
XHOLD = YHOLD
END IF
C FIND INDEX TO GRID LINE
DO 100 I = 1, NSNAP(INDEX)
IF (SNAPDX(INDEX, I) .GE. XHOLD) THEN
II = I
GO TO 110
END IF
100 CONTINUE
II = NSNAP(INDEX)
110 CONTINUE
C SET GRID LINE LIMITS
IF (INDEX .EQ. 1) THEN
XBOT = SNAPDX(1, II)
XTOP = XBOT
YBOT = SNAPDX(2, 1)
YTOP = SNAPDX(2, NSNAP(2))
ELSE
XBOT = SNAPDX(1, 1)
XTOP = SNAPDX(1, NSNAP(1))
YBOT = SNAPDX(2, II)
YTOP = YBOT
END IF
RETURN
END
|
{"hexsha": "40cb450277526fcf6258dfdefcdfa8619363a29e", "size": 1780, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "packages/seacas/applications/fastq/closeg.f", "max_stars_repo_name": "jschueller/seacas", "max_stars_repo_head_hexsha": "14c34ae08b757cba43a3a03ec0f129c8a168a9d3", "max_stars_repo_licenses": ["Python-2.0", "Zlib", "BSD-2-Clause", "MIT", "NetCDF", "BSL-1.0", "X11", "BSD-3-Clause"], "max_stars_count": 82, "max_stars_repo_stars_event_min_datetime": "2016-02-04T18:38:25.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T03:01:49.000Z", "max_issues_repo_path": "packages/seacas/applications/fastq/closeg.f", "max_issues_repo_name": "jschueller/seacas", "max_issues_repo_head_hexsha": "14c34ae08b757cba43a3a03ec0f129c8a168a9d3", "max_issues_repo_licenses": ["Python-2.0", "Zlib", "BSD-2-Clause", "MIT", "NetCDF", "BSL-1.0", "X11", "BSD-3-Clause"], "max_issues_count": 206, "max_issues_repo_issues_event_min_datetime": "2015-11-20T01:57:47.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T21:12:04.000Z", "max_forks_repo_path": "packages/seacas/applications/fastq/closeg.f", "max_forks_repo_name": "jschueller/seacas", "max_forks_repo_head_hexsha": "14c34ae08b757cba43a3a03ec0f129c8a168a9d3", "max_forks_repo_licenses": ["Python-2.0", "Zlib", "BSD-2-Clause", "MIT", "NetCDF", "BSL-1.0", "X11", "BSD-3-Clause"], "max_forks_count": 68, "max_forks_repo_forks_event_min_datetime": "2016-01-13T22:46:51.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T06:25:05.000Z", "avg_line_length": 26.1764705882, "max_line_length": 75, "alphanum_fraction": 0.4853932584, "num_tokens": 525}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.