text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
import os, sys
import numpy as np
from tqdm import tqdm
import tensorflow as tf
class MDN_Load():
def __init__(self, name):
self.name = name
if self.name == 'sample':
(self.x_train, self.y_train), (self.x_test, self.y_test) = self.get_sample(10000)
self.output_dim = 3
else:
NotImplementedError
def load(self, inputs, answer, batch_size, buffer_size=1000, is_training=False):
with tf.variable_scope('{}_dataset'.format('training' if is_training is True else 'validation')):
def preprocess_fn(inputs, answer):
'''A transformation function to preprocess raw data
into trainable input. '''
x = tf.cast(inputs, tf.float32)
y = tf.cast(answer, tf.float32)
return x, y
if is_training: # training dataset
self.x_train, self.y_train = inputs, answer
self.features_placeholder = tf.placeholder(self.x_train.dtype, self.x_train.shape, name='input_images')
self.labels_placeholder = tf.placeholder(self.y_train.dtype, self.y_train.shape, name='labels')
dataset = tf.data.Dataset.from_tensor_slices((self.features_placeholder, self.labels_placeholder))
else: # validation dataset
self.x_test, self.y_test = inputs, answer
self.valid_placeholder = tf.placeholder(self.x_test.dtype, self.x_test.shape, name='valid_inputs')
self.valid_labels_placeholder = tf.placeholder(self.y_test.dtype, self.y_test.shape, name='valid_labels')
dataset = tf.data.Dataset.from_tensor_slices((self.valid_placeholder, self.valid_labels_placeholder))
# Transform and batch data at the same time
dataset = dataset.apply(tf.data.experimental.map_and_batch(
preprocess_fn, batch_size,
num_parallel_batches=4, # cpu cores
drop_remainder=True if is_training else False))
dataset = dataset.shuffle(buffer_size).repeat() # depends on sample size
dataset = dataset.prefetch(tf.contrib.data.AUTOTUNE)
return dataset
def get_sample(self, NSAMPLE):
y_data = np.float32(np.random.uniform(-10.5, 10.5, (1, NSAMPLE))).T
y_data = np.random.permutation(y_data)
r_data = np.float32(np.random.normal(size=(NSAMPLE,1))) # random noise
x_data = np.float32(np.sin(0.75*y_data)*7.0+y_data*0.5+r_data*1.0)
return (x_data[:8000], y_data[:8000]), (x_data[8000:], y_data[8000:])
|
{"hexsha": "8495a6f03fa885c2b5045a91f772a919041433e5", "size": 2620, "ext": "py", "lang": "Python", "max_stars_repo_path": "dataset/mdn_load.py", "max_stars_repo_name": "KNakane/tensorflow", "max_stars_repo_head_hexsha": "1e8c862b8f7928967b1c02c613df0222ab8c4cd2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-05-13T15:14:03.000Z", "max_stars_repo_stars_event_max_datetime": "2019-05-13T15:14:03.000Z", "max_issues_repo_path": "dataset/mdn_load.py", "max_issues_repo_name": "KNakane/tensorflow", "max_issues_repo_head_hexsha": "1e8c862b8f7928967b1c02c613df0222ab8c4cd2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 44, "max_issues_repo_issues_event_min_datetime": "2018-12-22T02:45:29.000Z", "max_issues_repo_issues_event_max_datetime": "2019-06-05T05:44:16.000Z", "max_forks_repo_path": "dataset/mdn_load.py", "max_forks_repo_name": "KNakane/tensorflow", "max_forks_repo_head_hexsha": "1e8c862b8f7928967b1c02c613df0222ab8c4cd2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-11-05T06:08:07.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-05T06:08:07.000Z", "avg_line_length": 48.5185185185, "max_line_length": 121, "alphanum_fraction": 0.6301526718, "include": true, "reason": "import numpy", "num_tokens": 570}
|
struct SnailfishNumber
triplets::Vector{Tuple{Int, Int, Int}} # value, depth, weight
end
SnailfishNumber(str::String) = parse(SnailfishNumber, str)
function Base.parse(::Type{SnailfishNumber}, str::String)
elements = eval(Meta.parse(str))
triplets = tripletize!(Tuple{Int, Int, Int}[], elements, 0, 1)
SnailfishNumber(triplets)
end
function tripletize!(triplets, elements, depth, weight)
left = (elements[1], depth + 1, weight * 3)
right = (elements[2], depth + 1, weight * 2)
first(left) isa Int ? push!(triplets, left) : tripletize!(triplets, left...)
first(right) isa Int ? push!(triplets, right) : tripletize!(triplets, right...)
triplets
end
function Base.:+(x::SnailfishNumber, y::SnailfishNumber)
triplets = vcat(
[(value, depth + 1, weight * 3) for (value, depth, weight) in x.triplets],
[(value, depth + 1, weight * 2) for (value, depth, weight) in y.triplets],
)
reduce!(SnailfishNumber(triplets))
end
function explode!(x::SnailfishNumber)
for i in 1:length(x.triplets) - 1
lvalue, ldepth, lweight = x.triplets[i]
rvalue, rdepth, rweight = x.triplets[i + 1]
is_exploding_pair = ldepth == rdepth == 5 && lweight > rweight
if is_exploding_pair
i > 1 && splice!(x.triplets, i - 1, [x.triplets[i - 1] .+ (lvalue, 0, 0)])
i < length(x.triplets) - 1 && splice!(x.triplets, i + 2, [x.triplets[i + 2] .+ (rvalue, 0, 0)])
deleteat!(x.triplets, i + 1)
deleteat!(x.triplets, i)
insert!(x.triplets, i, (0, ldepth - 1, div(lweight, 3)))
return true
end
end
false
end
function split!(x::SnailfishNumber)
for i in 1:length(x.triplets)
value, depth, weight = x.triplets[i]
if value > 9
deleteat!(x.triplets, i)
insert!(x.triplets, i, (ceil(Int, value / 2), depth + 1, weight * 2))
insert!(x.triplets, i, (floor(Int, value / 2), depth + 1, weight * 3))
return true
end
end
false
end
function reduce!(x::SnailfishNumber)
while true
explode!(x) && continue
split!(x) && continue
break
end
x
end
magnitude(x::SnailfishNumber) = sum(first.(x.triplets) .* last.(x.triplets))
numbers = SnailfishNumber.(readlines("day18/numbers.txt"))
p1 = magnitude(sum(numbers))
@show p1
p2 = maximum(magnitude.(x + y for x in numbers for y in numbers if x != y))
@show p2
|
{"hexsha": "a5b13b22d980ceed2420c5f18f1570a78b8e33a5", "size": 2467, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "day18/solve.jl", "max_stars_repo_name": "nsgrantham/advent-of-code-2021", "max_stars_repo_head_hexsha": "d43d86fae014bbe72dc21283650d69d0cecb7691", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "day18/solve.jl", "max_issues_repo_name": "nsgrantham/advent-of-code-2021", "max_issues_repo_head_hexsha": "d43d86fae014bbe72dc21283650d69d0cecb7691", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "day18/solve.jl", "max_forks_repo_name": "nsgrantham/advent-of-code-2021", "max_forks_repo_head_hexsha": "d43d86fae014bbe72dc21283650d69d0cecb7691", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.2278481013, "max_line_length": 107, "alphanum_fraction": 0.6055938387, "num_tokens": 775}
|
[STATEMENT]
theorem sup_state_Cons1:
"(G \<turnstile> (x#xt, a) <=s (yt, b)) =
(\<exists>y yt'. yt=y#yt' \<and> (G \<turnstile> x \<preceq> y) \<and> (G \<turnstile> (xt,a) <=s (yt',b)))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. G \<turnstile> (x # xt, a) <=s (yt, b) = (\<exists>y yt'. yt = y # yt' \<and> G \<turnstile> x \<preceq> y \<and> G \<turnstile> (xt, a) <=s (yt', b))
[PROOF STEP]
by (auto simp add: sup_state_def stk_convert lesub_def Product.le_def)
|
{"llama_tokens": 227, "file": null, "length": 1}
|
"""
The tests here test the webapp by sending fake requests through a fake GH
object and checking that the right API calls were made.
Each fake request has just the API information currently needed by the webapp,
so if more API information is used, it will need to be added.
The GitHub API docs are useful:
- Pull request event (the main input to the webapp):
https://developer.github.com/v3/activity/events/types/#pullrequestevent
- Pull request object (the 'pull_request' key to the pull request event):
https://developer.github.com/v3/pulls/
- Commit objects (the output from the 'commits_url'):
https://developer.github.com/v3/pulls/#list-commits-on-a-pull-request
- Comment objects (the output from the 'comments_url'):
https://developer.github.com/v3/issues/comments/
- Contents objects (the output from the version_url):
https://developer.github.com/v3/repos/contents/
- Statuses objects (the output from statuses_url):
https://developer.github.com/v3/repos/statuses/
"""
import datetime
import base64
from subprocess import CalledProcessError
import os
from gidgethub import sansio
from ..webapp import router
# These are required for the tests to run properly
import pytest_aiohttp
pytest_aiohttp
import pytest_mock
pytest_mock
from pytest import mark, raises
parametrize = mark.parametrize
class FakeRateLimit:
def __init__(self, *, remaining=5000, limit=5000, reset_datetime=None):
self.remaining = remaining
self.limit = limit
now = datetime.datetime.now(datetime.timezone.utc)
self.reset_datetime = reset_datetime or now + datetime.timedelta(hours=1)
class FakeGH:
"""
Faked gh object
Arguments:
- getitem: dictionary mapping {url: result}, or None
- getiter: dictionary mapping {url: result}, or None
- rate_limit: FakeRateLimit object, or None
- post: dictionary mapping {url: result}, or None
- patch: dictionary mapping {url: result}, or None
- delete: dictionary mapping {url: result}, or None
The results are stored in the properties
- getiter_urls: list of urls called with getiter
- getitem_urls: list of urls called with getitem
- post_urls: list of urls called with post
- post_data: list of the data input for each post
- patch_urls: list of urls called with patch
- patch_data: list of the data input for each patch
- delete_urls: list of urls called with delete
- rate_limit: the FakeRateLimit object
Note that GET requests are cached in the code and may be called multiple
times.
"""
def __init__(self, *, getitem=None, getiter=None, rate_limit=None,
post=None, patch=None, delete=None):
self._getitem_return = getitem
self._getiter_return = getiter
self._post_return = post
self._patch_return = patch
self._delete_return = delete
self.getiter_urls = []
self.getitem_urls = []
self.post_urls = []
self.post_data = []
self.patch_urls = []
self.patch_data = []
self.delete_urls = []
self.rate_limit = rate_limit or FakeRateLimit()
async def getitem(self, url):
self.getitem_urls.append(url)
return self._getitem_return[url]
async def getiter(self, url):
self.getiter_urls.append(url)
for item in self._getiter_return[url]:
yield item
async def post(self, url, *, data):
self.post_urls.append(url)
self.post_data.append(data)
return self._post_return[url]
async def patch(self, url, *, data):
self.patch_urls.append(url)
self.patch_data.append(data)
return self._patch_return[url]
async def delete(self, url):
self.delete_urls.append(url)
return self._delete_return[url]
def _assert_gh_is_empty(gh):
assert gh._getitem_return == None
assert gh._getiter_return == None
assert gh._post_return == None
assert gh.getiter_urls == []
assert gh.getitem_urls == []
assert gh.post_urls == []
assert gh.post_data == []
assert gh.patch_urls == []
assert gh.patch_data == []
assert gh.delete_urls == []
def _event(data):
return sansio.Event(data, event='pull_request', delivery_id='1')
version = '1.2.1'
release_notes_file = 'Release-Notes-for-1.2.1.md'
comments_url = 'https://api.github.com/repos/sympy/sympy/pulls/1/comments'
commits_url = 'https://api.github.com/repos/sympy/sympy/pulls/1/commits'
contents_url = 'https://api.github.com/repos/sympy/sympy/contents/{+path}'
sha = 'a109f824f4cb2b1dd97cf832f329d59da00d609a'
commit_url_template = 'https://api.github.com/repos/sympy/sympy/commits/{sha}'
commit_url = commit_url_template.format(sha=sha)
version_url_template = 'https://api.github.com/repos/sympy/sympy/contents/sympy/release.py?ref={ref}'
version_url = version_url_template.format(ref='master')
html_url = "https://github.com/sympy/sympy"
wiki_url = "https://github.com/sympy/sympy.wiki"
comment_html_url = 'https://github.com/sympy/sympy/pulls/1#issuecomment-1'
comment_html_url2 = 'https://github.com/sympy/sympy/pulls/1#issuecomment-2'
statuses_url = "https://api.github.com/repos/sympy/sympy/statuses/4a09f9f253c7372ec857774b1fe114b1266013fe"
existing_comment_url = "https://api.github.com/repos/sympy/sympy/issues/comments/1"
existing_added_deleted_comment_url = "https://api.github.com/repos/sympy/sympy/issues/comments/2"
pr_number = 1
valid_PR_description = """
<!-- BEGIN RELEASE NOTES -->
* solvers
* new trig solvers
<!-- END RELEASE NOTES -->
"""
valid_PR_description_no_entry = """
<!-- BEGIN RELEASE NOTES -->
NO ENTRY
<!-- END RELEASE NOTES -->
"""
invalid_PR_description = """
<!-- BEGIN RELEASE NOTES -->
<!-- END RELEASE NOTES -->
"""
release_notes_comment_body = """\
:white_check_mark:
Hi, I am the [SymPy bot](https://github.com/sympy/sympy-bot) (version not found!). I'm here to help you write a release notes entry. Please read the [guide on how to write release notes](https://github.com/sympy/sympy/wiki/Writing-Release-Notes).
Your release notes are in good order.
Here is what the release notes will look like:
* solvers
* new trig solvers ([#1](https://github.com/sympy/sympy/pull/1) by [@asmeurer](https://github.com/asmeurer) and [@certik](https://github.com/certik))
This will be added to https://github.com/sympy/sympy/wiki/Release-Notes-for-1.2.1.
<details><summary>Click here to see the pull request description that was parsed.</summary>
<!-- BEGIN RELEASE NOTES -->
* solvers
* new trig solvers
<!-- END RELEASE NOTES -->
</details><p>
"""
added_deleted_comment_body = """\
### \U0001f7e0
Hi, I am the [SymPy bot](https://github.com/sympy/sympy-bot) (version not found!). I've noticed that some of your commits add or delete files. Since this is sometimes done unintentionally, I wanted to alert you about it.
This is an experimental feature of SymPy Bot. If you have any feedback on it, please comment at https://github.com/sympy/sympy-bot/issues/75.
The following commits **add new files**:
* 174b8b37bc33e9eb29e710a233190d02a13bdb54:
- `file1`
The following commits **delete files**:
* a109f824f4cb2b1dd97cf832f329d59da00d609a:
- `file1`
If these files were added/deleted on purpose, you can ignore this message.
"""
@parametrize('action', ['closed', 'synchronize', 'edited'])
@parametrize('merged', [True, False])
async def test_no_action_on_closed_prs(action, merged):
if action == 'closed' and merged == True:
return
gh = FakeGH()
event_data = {
'pull_request': {
'number': 1,
'state': 'closed',
'merged': merged,
},
}
event_data['action'] = action
event = _event(event_data)
res = await router.dispatch(event, gh)
assert res is None
_assert_gh_is_empty(gh)
@parametrize('action', ['opened', 'reopened', 'synchronize', 'edited'])
async def test_status_good_new_comment(action):
event_data = {
'pull_request': {
'number': 1,
'state': 'open',
'merged': False,
'comments_url': comments_url,
'commits_url': commits_url,
'head': {
'user': {
'login': 'asmeurer',
},
},
'base': {
'repo': {
'contents_url': contents_url,
'html_url': html_url,
},
'ref': 'master',
},
'body': valid_PR_description,
'statuses_url': statuses_url,
},
'action': action,
}
commits = [
{
'author': {
'login': 'asmeurer',
},
'commit': {
'message': "A good commit",
},
'sha': sha,
'url': commit_url,
},
{
'author': {
'login': 'certik',
},
'commit': {
'message': "A good commit",
},
'sha': sha,
'url': commit_url,
},
# Test commits without a login
{
'author': None,
'commit': {
'message': "A good commit",
},
'sha': sha,
'url': commit_url,
},
]
commit = {
'files': [
{
'status': 'modified',
},
],
'parents': [
{
"url": commit_url,
"sha": sha,
},
],
}
# No comment from sympy-bot
comments = [
{
'user': {
'login': 'asmeurer',
},
},
{
'user': {
'login': 'certik',
},
},
]
version_file = {
'content': base64.b64encode(b'__version__ = "1.2.1.dev"\n'),
}
getiter = {
commits_url: commits,
comments_url: comments,
}
getitem = {
version_url: version_file,
commit_url: commit,
}
post = {
comments_url: {
'html_url': comment_html_url,
},
statuses_url: {},
}
event = _event(event_data)
gh = FakeGH(getiter=getiter, getitem=getitem, post=post)
await router.dispatch(event, gh)
getitem_urls = gh.getitem_urls
getiter_urls = gh.getiter_urls
post_urls = gh.post_urls
post_data = gh.post_data
patch_urls = gh.patch_urls
patch_data = gh.patch_data
assert set(getiter_urls) == set(getiter)
assert set(getitem_urls) == set(getitem)
assert post_urls == [comments_url, statuses_url]
assert len(post_data) == 2
# Comments data
assert post_data[0].keys() == {"body"}
comment = post_data[0]["body"]
assert ":white_check_mark:" in comment
assert ":x:" not in comment
assert "new trig solvers" in comment
assert "error" not in comment
assert "https://github.com/sympy/sympy-bot" in comment
for line in valid_PR_description:
assert line in comment
assert "good order" in comment
# Statuses data
assert post_data[1] == {
"state": "success",
"target_url": comment_html_url,
"description": "The release notes look OK",
"context": "sympy-bot/release-notes",
}
assert patch_urls == []
assert patch_data == []
@parametrize('action', ['opened', 'reopened', 'synchronize', 'edited'])
async def test_status_good_existing_comment(action):
event_data = {
'pull_request': {
'number': 1,
'state': 'open',
'merged': False,
'comments_url': comments_url,
'commits_url': commits_url,
'head': {
'user': {
'login': 'asmeurer',
},
},
'base': {
'repo': {
'contents_url': contents_url,
'html_url': html_url,
},
'ref': 'master',
},
'body': valid_PR_description,
'statuses_url': statuses_url,
},
'action': action,
}
commits = [
{
'author': {
'login': 'asmeurer',
},
'commit': {
'message': "A good commit",
},
'sha': sha,
'url': commit_url,
},
{
'author': {
'login': 'certik',
},
'commit': {
'message': "A good commit",
},
'sha': sha,
'url': commit_url,
},
# Test commits without a login
{
'author': None,
'commit': {
'message': "A good commit",
},
'sha': sha,
'url': commit_url,
},
]
commit = {
'files': [
{
'status': 'modified',
},
],
'parents': [
{
"url": commit_url,
"sha": sha,
},
],
}
# Has comment from sympy-bot
comments = [
{
'user': {
'login': 'sympy-bot',
},
'url': existing_comment_url,
'body': release_notes_comment_body,
},
{
'user': {
'login': 'asmeurer',
},
'body': "comment",
},
{
'user': {
'login': 'certik',
},
"body": "comment",
},
]
version_file = {
'content': base64.b64encode(b'__version__ = "1.2.1.dev"\n'),
}
getiter = {
commits_url: commits,
comments_url: comments,
}
getitem = {
version_url: version_file,
commit_url: commit,
}
post = {
statuses_url: {},
}
patch = {
existing_comment_url: {
'html_url': comment_html_url,
},
}
event = _event(event_data)
gh = FakeGH(getiter=getiter, getitem=getitem, post=post, patch=patch)
await router.dispatch(event, gh)
getitem_urls = gh.getitem_urls
getiter_urls = gh.getiter_urls
post_urls = gh.post_urls
post_data = gh.post_data
patch_urls = gh.patch_urls
patch_data = gh.patch_data
assert set(getiter_urls) == set(getiter)
assert set(getitem_urls) == set(getitem)
assert post_urls == [statuses_url]
# Statuses data
assert post_data == [{
"state": "success",
"target_url": comment_html_url,
"description": "The release notes look OK",
"context": "sympy-bot/release-notes",
}]
# Comments data
assert patch_urls == [existing_comment_url]
assert len(patch_data) == 1
assert patch_data[0].keys() == {"body"}
comment = patch_data[0]["body"]
assert ":white_check_mark:" in comment
assert ":x:" not in comment
assert "new trig solvers" in comment
assert "error" not in comment
assert "https://github.com/sympy/sympy-bot" in comment
for line in valid_PR_description:
assert line in comment
assert "good order" in comment
@parametrize('action', ['closed'])
async def test_closed_with_merging(mocker, action):
# Based on test_status_good_existing_comment
update_wiki_called_kwargs = {}
def mocked_update_wiki(*args, **kwargs):
nonlocal update_wiki_called_kwargs
assert not args # All args are keyword-only
update_wiki_called_kwargs = kwargs
mocker.patch('sympy_bot.webapp.update_wiki', mocked_update_wiki)
event_data = {
'pull_request': {
'number': 1,
'state': 'open',
'merged': True,
'comments_url': comments_url,
'commits_url': commits_url,
'head': {
'user': {
'login': 'asmeurer',
},
},
'base': {
'repo': {
'contents_url': contents_url,
'html_url': html_url,
},
'ref': 'master',
},
'body': valid_PR_description,
'statuses_url': statuses_url,
},
'action': action,
}
commits = [
{
'author': {
'login': 'asmeurer',
},
'commit': {
'message': "A good commit",
},
'sha': sha,
'url': commit_url,
},
{
'author': {
'login': 'certik',
},
'commit': {
'message': "A good commit",
},
'sha': sha,
'url': commit_url,
},
# Test commits without a login
{
'author': None,
'commit': {
'message': "A good commit",
},
'sha': sha,
'url': commit_url,
},
]
# Has comment from sympy-bot
comments = [
{
'user': {
'login': 'sympy-bot',
},
'url': existing_comment_url,
'body': release_notes_comment_body,
},
{
'user': {
'login': 'asmeurer',
},
'body': "comment",
},
{
'user': {
'login': 'certik',
},
'body': "comment",
},
]
version_file = {
'content': base64.b64encode(b'__version__ = "1.2.1.dev"\n'),
}
getiter = {
commits_url: commits,
comments_url: comments,
}
getitem = {
version_url: version_file,
}
post = {
statuses_url: {},
}
patch = {
existing_comment_url: {
'html_url': comment_html_url,
'body': release_notes_comment_body,
'url': existing_comment_url,
},
}
event = _event(event_data)
gh = FakeGH(getiter=getiter, getitem=getitem, post=post, patch=patch)
await router.dispatch(event, gh)
getitem_urls = gh.getitem_urls
getiter_urls = gh.getiter_urls
post_urls = gh.post_urls
post_data = gh.post_data
patch_urls = gh.patch_urls
patch_data = gh.patch_data
assert set(getiter_urls) == set(getiter), getiter_urls
assert set(getitem_urls) == set(getitem)
assert post_urls == [statuses_url]
# Statuses data
assert post_data == [{
"state": "success",
"target_url": comment_html_url,
"description": "The release notes look OK",
"context": "sympy-bot/release-notes",
}]
# Comments data
assert patch_urls == [existing_comment_url, existing_comment_url]
assert len(patch_data) == 2
assert patch_data[0].keys() == {"body"}
comment = patch_data[0]["body"]
assert comment == release_notes_comment_body
assert ":white_check_mark:" in comment
assert ":x:" not in comment
assert "new trig solvers" in comment
assert "error" not in comment
assert "https://github.com/sympy/sympy-bot" in comment
for line in valid_PR_description:
assert line in comment
assert "good order" in comment
updated_comment = patch_data[1]['body']
assert updated_comment.startswith(comment)
assert "have been updated" in updated_comment
assert update_wiki_called_kwargs == {
'wiki_url': wiki_url,
'release_notes_file': release_notes_file,
'changelogs': {'solvers': ['* new trig solvers']},
'pr_number': pr_number,
'authors': ['asmeurer', 'certik'],
}
@parametrize('action', ['closed'])
async def test_closed_with_merging_no_entry(mocker, action):
# Based on test_status_good_existing_comment
update_wiki_called_kwargs = {}
def mocked_update_wiki(*args, **kwargs):
nonlocal update_wiki_called_kwargs
assert not args # All args are keyword-only
update_wiki_called_kwargs = kwargs
mocker.patch('sympy_bot.webapp.update_wiki', mocked_update_wiki)
event_data = {
'pull_request': {
'number': 1,
'state': 'open',
'merged': True,
'comments_url': comments_url,
'commits_url': commits_url,
'head': {
'user': {
'login': 'asmeurer',
},
},
'base': {
'repo': {
'contents_url': contents_url,
'html_url': html_url,
},
'ref': 'master',
},
'body': valid_PR_description_no_entry,
'statuses_url': statuses_url,
},
'action': action,
}
commits = [
{
'author': {
'login': 'asmeurer',
},
'commit': {
'message': "A good commit",
},
'sha': sha,
'url': commit_url,
},
{
'author': {
'login': 'certik',
},
'commit': {
'message': "A good commit",
},
'sha': sha,
'url': commit_url,
},
# Test commits without a login
{
'author': None,
'commit': {
'message': "A good commit",
},
'sha': sha,
'url': commit_url,
},
]
# Has comment from sympy-bot
comments = [
{
'user': {
'login': 'sympy-bot',
},
'url': existing_comment_url,
'body': release_notes_comment_body,
},
{
'user': {
'login': 'asmeurer',
},
'body': "comment",
},
{
'user': {
'login': 'certik',
},
'body': "comment",
},
]
version_file = {
'content': base64.b64encode(b'__version__ = "1.2.1.dev"\n'),
}
getiter = {
commits_url: commits,
comments_url: comments,
}
getitem = {
version_url: version_file,
}
post = {
statuses_url: {},
}
patch = {
existing_comment_url: {
'html_url': comment_html_url,
'body': release_notes_comment_body,
'url': existing_comment_url,
},
}
event = _event(event_data)
gh = FakeGH(getiter=getiter, getitem=getitem, post=post, patch=patch)
await router.dispatch(event, gh)
getitem_urls = gh.getitem_urls
getiter_urls = gh.getiter_urls
post_urls = gh.post_urls
post_data = gh.post_data
patch_urls = gh.patch_urls
patch_data = gh.patch_data
assert set(getiter_urls) == set(getiter), getiter_urls
assert set(getitem_urls) == set(getitem)
assert post_urls == [statuses_url]
# Statuses data
assert post_data == [{
"state": "success",
"target_url": comment_html_url,
"description": "The release notes look OK",
"context": "sympy-bot/release-notes",
}]
# Comments data
assert patch_urls == [existing_comment_url]
assert len(patch_data) == 1
assert patch_data[0].keys() == {"body"}
comment = patch_data[0]["body"]
assert "No release notes entry will be added for this pull request." in comment
assert ":white_check_mark:" in comment
assert ":x:" not in comment
assert "error" not in comment
assert "https://github.com/sympy/sympy-bot" in comment
for line in valid_PR_description:
assert line in comment
assert update_wiki_called_kwargs == {}
@parametrize('action', ['closed'])
@parametrize('exception', [RuntimeError('error message'),
CalledProcessError(1, 'cmd')])
async def test_closed_with_merging_update_wiki_error(mocker, action, exception):
# Based on test_closed_with_merging
update_wiki_called_kwargs = {}
def mocked_update_wiki(*args, **kwargs):
nonlocal update_wiki_called_kwargs
assert not args # All args are keyword-only
update_wiki_called_kwargs = kwargs
raise exception
mocker.patch('sympy_bot.webapp.update_wiki', mocked_update_wiki)
mocker.patch.dict(os.environ, {"GH_AUTH": "TESTING TOKEN"})
event_data = {
'pull_request': {
'number': 1,
'state': 'open',
'merged': True,
'comments_url': comments_url,
'commits_url': commits_url,
'head': {
'user': {
'login': 'asmeurer',
},
},
'base': {
'repo': {
'contents_url': contents_url,
'html_url': html_url,
},
'ref': 'master',
},
'body': valid_PR_description,
'statuses_url': statuses_url,
},
'action': action,
}
commits = [
{
'author': {
'login': 'asmeurer',
},
'commit': {
'message': "A good commit",
},
'sha': sha,
'url': commit_url,
},
{
'author': {
'login': 'certik',
},
'commit': {
'message': "A good commit",
},
'sha': sha,
'url': commit_url,
},
# Test commits without a login
{
'author': None,
'commit': {
'message': "A good commit",
},
'sha': sha,
'url': commit_url,
},
]
# Has comment from sympy-bot
comments = [
{
'user': {
'login': 'sympy-bot',
},
'url': existing_comment_url,
'body': release_notes_comment_body,
},
{
'user': {
'login': 'asmeurer',
},
'body': "comment",
},
{
'user': {
'login': 'certik',
},
'body': "comment",
},
]
version_file = {
'content': base64.b64encode(b'__version__ = "1.2.1.dev"\n'),
}
getiter = {
commits_url: commits,
comments_url: comments,
}
getitem = {
version_url: version_file,
}
post = {
statuses_url: {},
comments_url: {
'html_url': comment_html_url,
},
}
patch = {
existing_comment_url: {
'html_url': comment_html_url,
'body': release_notes_comment_body,
'url': existing_comment_url,
},
}
event = _event(event_data)
gh = FakeGH(getiter=getiter, getitem=getitem, post=post, patch=patch)
with raises(type(exception)):
await router.dispatch(event, gh)
getitem_urls = gh.getitem_urls
getiter_urls = gh.getiter_urls
post_urls = gh.post_urls
post_data = gh.post_data
patch_urls = gh.patch_urls
patch_data = gh.patch_data
assert set(getiter_urls) == set(getiter), getiter_urls
assert set(getitem_urls) == set(getitem)
assert post_urls == [statuses_url, comments_url, statuses_url]
# Statuses data
assert len(post_data) == 3
assert post_data[0] == {
"state": "success",
"target_url": comment_html_url,
"description": "The release notes look OK",
"context": "sympy-bot/release-notes",
}
assert post_data[1].keys() == {'body'}
error_message = post_data[1]['body']
assert ':rotating_light:' in error_message
assert 'ERROR' in error_message
assert 'https://github.com/sympy/sympy-bot/issues' in error_message
if isinstance(exception, RuntimeError):
assert 'error message' in error_message
else:
assert "Command 'cmd' returned non-zero exit status 1." in error_message
assert post_data[2] == {
"state": "error",
"target_url": comment_html_url,
"description": "There was an error updating the release notes on the wiki.",
"context": "sympy-bot/release-notes",
}
# Comments data
assert patch_urls == [existing_comment_url]
assert len(patch_data) == 1
assert patch_data[0].keys() == {"body"}
comment = patch_data[0]["body"]
assert comment == release_notes_comment_body
assert ":white_check_mark:" in comment
assert ":x:" not in comment
assert "new trig solvers" in comment
assert "error" not in comment
assert "https://github.com/sympy/sympy-bot" in comment
for line in valid_PR_description:
assert line in comment
assert "good order" in comment
assert update_wiki_called_kwargs == {
'wiki_url': wiki_url,
'release_notes_file': release_notes_file,
'changelogs': {'solvers': ['* new trig solvers']},
'pr_number': pr_number,
'authors': ['asmeurer', 'certik'],
}
@parametrize('action', ['closed'])
async def test_closed_with_merging_bad_status_error(mocker, action):
# Based on test_closed_with_merging
update_wiki_called_kwargs = {}
def mocked_update_wiki(*args, **kwargs):
nonlocal update_wiki_called_kwargs
assert not args # All args are keyword-only
update_wiki_called_kwargs = kwargs
mocker.patch('sympy_bot.webapp.update_wiki', mocked_update_wiki)
mocker.patch.dict(os.environ, {"GH_AUTH": "TESTING TOKEN"})
event_data = {
'pull_request': {
'number': 1,
'state': 'open',
'merged': True,
'comments_url': comments_url,
'commits_url': commits_url,
'head': {
'user': {
'login': 'asmeurer',
},
},
'base': {
'repo': {
'contents_url': contents_url,
'html_url': html_url,
},
'ref': 'master',
},
'body': invalid_PR_description,
'statuses_url': statuses_url,
},
'action': action,
}
commits = [
{
'author': {
'login': 'asmeurer',
},
'commit': {
'message': "A good commit",
},
'sha': sha,
'url': commit_url,
},
{
'author': {
'login': 'certik',
},
'commit': {
'message': "A good commit",
},
'sha': sha,
'url': commit_url,
},
# Test commits without a login
{
'author': None,
'commit': {
'message': "A good commit",
},
'sha': sha,
'url': commit_url,
},
]
# Has comment from sympy-bot
comments = [
{
'user': {
'login': 'sympy-bot',
},
'url': existing_comment_url,
'body': release_notes_comment_body,
},
{
'user': {
'login': 'asmeurer',
},
'body': "comment",
},
{
'user': {
'login': 'certik',
},
'body': "comment",
},
]
getiter = {
commits_url: commits,
comments_url: comments,
}
getitem = {}
post = {
statuses_url: {},
comments_url: {
'html_url': comment_html_url,
},
}
patch = {
existing_comment_url: {
'html_url': comment_html_url,
'body': release_notes_comment_body,
'url': existing_comment_url,
},
}
event = _event(event_data)
gh = FakeGH(getiter=getiter, getitem=getitem, post=post, patch=patch)
await router.dispatch(event, gh)
getitem_urls = gh.getitem_urls
getiter_urls = gh.getiter_urls
post_urls = gh.post_urls
post_data = gh.post_data
patch_urls = gh.patch_urls
patch_data = gh.patch_data
assert set(getiter_urls) == set(getiter), getiter_urls
assert set(getitem_urls) == set(getitem)
assert post_urls == [statuses_url, comments_url, statuses_url]
# Statuses data
assert len(post_data) == 3
assert post_data[0] == {
"state": "failure",
"target_url": comment_html_url,
"description": "The release notes check failed",
"context": "sympy-bot/release-notes",
}
assert post_data[1].keys() == {'body'}
error_message = post_data[1]['body']
assert ':rotating_light:' in error_message
assert 'ERROR' in error_message
assert 'https://github.com/sympy/sympy-bot/issues' in error_message
assert "The pull request was merged even though the release notes bot had a failing status." in error_message
assert post_data[2] == {
"state": "error",
"target_url": comment_html_url,
"description": "There was an error updating the release notes on the wiki.",
"context": "sympy-bot/release-notes",
}
# Comments data
assert patch_urls == [existing_comment_url]
assert len(patch_data) == 1
assert patch_data[0].keys() == {"body"}
comment = patch_data[0]["body"]
assert ":white_check_mark:" not in comment
assert ":x:" in comment
assert "new trig solvers" not in comment
assert "error" not in comment
assert "There was an issue" in comment
assert "https://github.com/sympy/sympy-bot" in comment
for line in invalid_PR_description:
assert line in comment
assert "good order" not in comment
assert "No release notes were found" in comment, comment
assert update_wiki_called_kwargs == {}
@parametrize('action', ['opened', 'reopened', 'synchronize', 'edited'])
async def test_status_bad_new_comment(action):
event_data = {
'pull_request': {
'number': 1,
'state': 'open',
'merged': False,
'comments_url': comments_url,
'commits_url': commits_url,
'head': {
'user': {
'login': 'asmeurer',
},
},
'base': {
'repo': {
'contents_url': contents_url,
'html_url': html_url,
},
'ref': 'master',
},
'body': invalid_PR_description,
'statuses_url': statuses_url,
},
'action': action,
}
commits = [
{
'author': {
'login': 'asmeurer',
},
'commit': {
'message': "A good commit",
},
'sha': sha,
'url': commit_url,
},
{
'author': {
'login': 'certik',
},
'commit': {
'message': "A good commit",
},
'sha': sha,
'url': commit_url,
},
# Test commits without a login
{
'author': None,
'commit': {
'message': "A good commit",
},
'sha': sha,
'url': commit_url,
},
]
commit = {
'files': [
{
'status': 'modified',
},
],
'parents': [
{
"url": commit_url,
"sha": sha,
},
],
}
# No comment from sympy-bot
comments = [
{
'user': {
'login': 'asmeurer',
},
'body': "comment",
},
{
'user': {
'login': 'certik',
},
'body': "comment",
},
]
getiter = {
commits_url: commits,
comments_url: comments,
}
getitem = {
commit_url: commit,
}
post = {
comments_url: {
'html_url': comment_html_url,
},
statuses_url: {},
}
event = _event(event_data)
gh = FakeGH(getiter=getiter, getitem=getitem, post=post)
await router.dispatch(event, gh)
getitem_urls = gh.getitem_urls
getiter_urls = gh.getiter_urls
post_urls = gh.post_urls
post_data = gh.post_data
patch_urls = gh.patch_urls
patch_data = gh.patch_data
assert set(getiter_urls) == set(getiter)
assert set(getitem_urls) == set(getitem)
assert post_urls == [comments_url, statuses_url]
assert len(post_data) == 2
# Comments data
assert post_data[0].keys() == {"body"}
comment = post_data[0]["body"]
assert ":white_check_mark:" not in comment
assert ":x:" in comment
assert "new trig solvers" not in comment
assert "error" not in comment
assert "There was an issue" in comment
assert "https://github.com/sympy/sympy-bot" in comment
for line in invalid_PR_description:
assert line in comment
assert "good order" not in comment
assert "No release notes were found" in comment
# Statuses data
assert post_data[1] == {
"state": "failure",
"target_url": comment_html_url,
"description": "The release notes check failed",
"context": "sympy-bot/release-notes",
}
assert patch_urls == []
assert patch_data == []
@parametrize('action', ['opened', 'reopened', 'synchronize', 'edited'])
async def test_status_bad_existing_comment(action):
event_data = {
'pull_request': {
'number': 1,
'state': 'open',
'merged': False,
'comments_url': comments_url,
'commits_url': commits_url,
'head': {
'user': {
'login': 'asmeurer',
},
},
'base': {
'repo': {
'contents_url': contents_url,
'html_url': html_url,
},
'ref': 'master',
},
'body': invalid_PR_description,
'statuses_url': statuses_url,
},
'action': action,
}
commits = [
{
'author': {
'login': 'asmeurer',
},
'commit': {
'message': "A good commit",
},
'sha': sha,
'url': commit_url,
},
{
'author': {
'login': 'certik',
},
'commit': {
'message': "A good commit",
},
'sha': sha,
'url': commit_url,
},
# Test commits without a login
{
'author': None,
'commit': {
'message': "A good commit",
},
'sha': sha,
'url': commit_url,
},
]
commit = {
'files': [
{
'status': 'modified',
},
],
'parents': [
{
"url": commit_url,
"sha": sha,
},
],
}
# Has comment from sympy-bot
comments = [
{
'user': {
'login': 'sympy-bot',
},
'url': existing_comment_url,
'body': release_notes_comment_body,
},
{
'user': {
'login': 'asmeurer',
},
'body': "comment",
},
{
'user': {
'login': 'certik',
},
'body': "comment",
},
]
getiter = {
commits_url: commits,
comments_url: comments,
}
getitem = {
commit_url: commit,
}
post = {
statuses_url: {},
}
patch = {
existing_comment_url: {
'html_url': comment_html_url,
},
}
event = _event(event_data)
gh = FakeGH(getiter=getiter, getitem=getitem, post=post, patch=patch)
await router.dispatch(event, gh)
getitem_urls = gh.getitem_urls
getiter_urls = gh.getiter_urls
post_urls = gh.post_urls
post_data = gh.post_data
patch_urls = gh.patch_urls
patch_data = gh.patch_data
assert set(getiter_urls) == set(getiter)
assert set(getitem_urls) == set(getitem)
assert post_urls == [statuses_url]
# Statuses data
assert post_data == [{
"state": "failure",
"target_url": comment_html_url,
"description": "The release notes check failed",
"context": "sympy-bot/release-notes",
}]
# Comments data
assert patch_urls == [existing_comment_url]
assert len(patch_data) == 1
assert patch_data[0].keys() == {"body"}
comment = patch_data[0]["body"]
assert ":white_check_mark:" not in comment
assert ":x:" in comment
assert "new trig solvers" not in comment
assert "error" not in comment
assert "There was an issue" in comment
assert "https://github.com/sympy/sympy-bot" in comment
for line in invalid_PR_description:
assert line in comment
assert "good order" not in comment
assert "No release notes were found" in comment, comment
@parametrize('action', ['opened', 'reopened', 'synchronize', 'edited'])
async def test_rate_limit_comment(action):
# Based on test_status_good_new_comment
event_data = {
'pull_request': {
'number': 1,
'state': 'open',
'merged': False,
'comments_url': comments_url,
'commits_url': commits_url,
'head': {
'user': {
'login': 'asmeurer',
},
},
'base': {
'repo': {
'contents_url': contents_url,
'html_url': html_url,
},
'ref': 'master',
},
'body': valid_PR_description,
'statuses_url': statuses_url,
},
'action': action,
}
commits = [
{
'author': {
'login': 'asmeurer',
},
'commit': {
'message': "A good commit",
},
'sha': sha,
'url': commit_url,
},
{
'author': {
'login': 'certik',
},
'commit': {
'message': "A good commit",
},
'sha': sha,
'url': commit_url,
},
# Test commits without a login
{
'author': None,
'commit': {
'message': "A good commit",
},
'sha': sha,
'url': commit_url,
},
]
commit = {
'files': [
{
'status': 'modified',
},
],
'parents': [
{
"url": commit_url,
"sha": sha,
},
],
}
# No comment from sympy-bot
comments = [
{
'user': {
'login': 'asmeurer',
},
'body': "comment",
},
{
'user': {
'login': 'certik',
},
"body": "comment",
},
]
version_file = {
'content': base64.b64encode(b'__version__ = "1.2.1.dev"\n'),
}
getiter = {
commits_url: commits,
comments_url: comments,
}
getitem = {
version_url: version_file,
commit_url: commit,
}
post = {
comments_url: {
'html_url': comment_html_url,
},
statuses_url: {},
}
event = _event(event_data)
now = datetime.datetime.now(datetime.timezone.utc)
reset_datetime = now + datetime.timedelta(hours=1)
rate_limit = FakeRateLimit(remaining=5, limit=1000, reset_datetime=reset_datetime)
gh = FakeGH(getiter=getiter, getitem=getitem, post=post, rate_limit=rate_limit)
await router.dispatch(event, gh)
# Everything else is already tested in test_status_good_new_comment()
# above
post_urls = gh.post_urls
post_data = gh.post_data
assert post_urls == [comments_url, statuses_url, comments_url]
assert len(post_data) == 3
assert post_data[2].keys() == {"body"}
comment = post_data[2]["body"]
assert ":warning:" in comment
assert "5" in comment
assert "1000" in comment
assert str(reset_datetime) in comment
@parametrize('action', ['opened', 'reopened', 'synchronize', 'edited'])
async def test_header_in_message(action):
# Based on test_status_good_new_comment
event_data = {
'pull_request': {
'number': 1,
'state': 'open',
'merged': False,
'comments_url': comments_url,
'commits_url': commits_url,
'head': {
'user': {
'login': 'asmeurer',
},
},
'base': {
'repo': {
'contents_url': contents_url,
'html_url': html_url,
},
'ref': 'master',
},
'body': valid_PR_description,
'statuses_url': statuses_url,
},
'action': action,
}
sha_1 = '174b8b37bc33e9eb29e710a233190d02a13bdb54'
commits = [
{
'author': {
'login': 'asmeurer',
},
'commit': {
'message': """
<!-- BEGIN RELEASE NOTES -->
* solvers
* solver change
<!-- END RELEASE NOTES -->
"""
},
'sha': sha_1,
'url': commit_url_template.format(sha=sha_1)
},
{
'author': {
'login': 'certik',
},
'commit': {
'message': "A good commit",
},
'sha': sha,
'url': commit_url,
},
# Test commits without a login
{
'author': None,
'commit': {
'message': "A good commit",
},
'sha': sha,
'url': commit_url,
},
]
commit = {
'files': [
{
'status': 'modified',
},
],
'parents': [
{
"url": commit_url,
"sha": sha,
},
],
}
# No comment from sympy-bot
comments = [
{
'user': {
'login': 'asmeurer',
},
'body': "comment",
},
{
'user': {
'login': 'certik',
},
'body': "comment",
},
]
getiter = {
commits_url: commits,
comments_url: comments,
}
getitem = {
commit_url: commit,
commit_url_template.format(sha=sha_1): commit,
}
post = {
comments_url: {
'html_url': comment_html_url,
},
statuses_url: {},
}
event = _event(event_data)
gh = FakeGH(getiter=getiter, getitem=getitem, post=post)
await router.dispatch(event, gh)
getitem_urls = gh.getitem_urls
getiter_urls = gh.getiter_urls
post_urls = gh.post_urls
post_data = gh.post_data
patch_urls = gh.patch_urls
patch_data = gh.patch_data
# The rest is already tested in test_status_good_new_comment
assert set(getiter_urls) == set(getiter)
assert set(getitem_urls) == set(getitem)
assert post_urls == [comments_url, statuses_url]
assert len(post_data) == 2
# Comments data
assert post_data[0].keys() == {"body"}
comment = post_data[0]["body"]
assert ":white_check_mark:" not in comment
assert ":x:" in comment
assert "error" not in comment
assert "https://github.com/sympy/sympy-bot" in comment
assert "good order" not in comment
assert sha_1 in comment
assert "<!-- BEGIN RELEASE NOTES -->" in comment
assert "<!-- END RELEASE NOTES -->" in comment
# Statuses data
assert post_data[1] == {
"state": "failure",
"target_url": comment_html_url,
"description": "The release notes check failed",
"context": "sympy-bot/release-notes",
}
assert patch_urls == []
assert patch_data == []
@parametrize('action', ['opened', 'reopened', 'synchronize', 'edited'])
async def test_bad_version_file(action):
event_data = {
'pull_request': {
'number': 1,
'state': 'open',
'merged': False,
'comments_url': comments_url,
'commits_url': commits_url,
'head': {
'user': {
'login': 'asmeurer',
},
},
'base': {
'repo': {
'contents_url': contents_url,
'html_url': html_url,
},
'ref': 'master',
},
'body': valid_PR_description,
'statuses_url': statuses_url,
},
'action': action,
}
commits = [
{
'author': {
'login': 'asmeurer',
},
'commit': {
'message': "A good commit",
},
'sha': sha,
'url': commit_url,
},
{
'author': {
'login': 'certik',
},
'commit': {
'message': "A good commit",
},
'sha': sha,
'url': commit_url,
},
# Test commits without a login
{
'author': None,
'commit': {
'message': "A good commit",
},
'sha': sha,
'url': commit_url,
},
]
commit = {
'files': [
{
'status': 'modified',
},
],
'parents': [
{
"url": commit_url,
"sha": sha,
},
],
}
# No comment from sympy-bot
comments = [
{
'user': {
'login': 'asmeurer',
},
},
{
'user': {
'login': 'certik',
},
},
]
version_file = {
'content': base64.b64encode(b'\n'),
}
getiter = {
commits_url: commits,
comments_url: comments,
}
getitem = {
version_url: version_file,
commit_url: commit,
}
post = {
comments_url: {
'html_url': comment_html_url,
},
statuses_url: {},
}
event = _event(event_data)
gh = FakeGH(getiter=getiter, getitem=getitem, post=post)
await router.dispatch(event, gh)
getitem_urls = gh.getitem_urls
getiter_urls = gh.getiter_urls
post_urls = gh.post_urls
post_data = gh.post_data
patch_urls = gh.patch_urls
patch_data = gh.patch_data
assert set(getiter_urls) == set(getiter)
assert set(getitem_urls) == set(getitem)
assert post_urls == [comments_url, statuses_url]
assert len(post_data) == 2
# Comments data
assert post_data[0].keys() == {"body"}
comment = post_data[0]["body"]
assert ":white_check_mark:" not in comment
assert ":x:" in comment
assert "error" in comment
assert "https://github.com/sympy/sympy-bot" in comment
assert "sympy/release.py" in comment
assert "There was an error getting the version" in comment
assert "https://github.com/sympy/sympy-bot/issues" in comment
for line in valid_PR_description:
assert line in comment
assert "good order" not in comment
# Statuses data
assert post_data[1] == {
"state": "error",
"target_url": comment_html_url,
"description": "The release notes check failed",
"context": "sympy-bot/release-notes",
}
assert patch_urls == []
assert patch_data == []
@parametrize('action', ['opened', 'reopened', 'synchronize', 'edited'])
@parametrize('include_extra', [True, False])
async def test_no_user_logins_in_commits(action, include_extra):
event_data = {
'pull_request': {
'number': 1,
'state': 'open',
'merged': False,
'comments_url': comments_url,
'commits_url': commits_url,
'head': {
'user': {
'login': 'asmeurer',
},
},
'base': {
'repo': {
'contents_url': contents_url,
'html_url': html_url,
},
'ref': 'master',
},
'body': valid_PR_description,
'statuses_url': statuses_url,
},
'action': action,
}
commits = [
{
'author': None,
'commit': {
'message': "A good commit",
},
'sha': sha,
'url': commit_url,
},
]
commit = {
'files': [
{
'status': 'modified',
},
],
'parents': [
{
"url": commit_url,
"sha": sha,
},
],
}
if include_extra:
commits += [
{
'author': {
'login': 'certik',
},
'commit': {
'message': "A good commit",
},
'sha': sha,
'url': commit_url,
},
]
# No comment from sympy-bot
comments = [
{
'user': {
'login': 'asmeurer',
},
'body': "comment",
},
{
'user': {
'login': 'certik',
},
'body': "comment",
},
]
version_file = {
'content': base64.b64encode(b'__version__ = "1.2.1.dev"\n'),
}
getiter = {
commits_url: commits,
comments_url: comments,
}
getitem = {
version_url: version_file,
commit_url: commit,
}
post = {
comments_url: {
'html_url': comment_html_url,
},
statuses_url: {},
}
event = _event(event_data)
gh = FakeGH(getiter=getiter, getitem=getitem, post=post)
await router.dispatch(event, gh)
getitem_urls = gh.getitem_urls
getiter_urls = gh.getiter_urls
post_urls = gh.post_urls
post_data = gh.post_data
patch_urls = gh.patch_urls
patch_data = gh.patch_data
assert set(getiter_urls) == set(getiter)
assert set(getitem_urls) == set(getitem)
assert post_urls == [comments_url, statuses_url]
assert len(post_data) == 2
# Comments data
assert post_data[0].keys() == {"body"}
comment = post_data[0]["body"]
assert ":white_check_mark:" in comment
assert ":x:" not in comment
assert "new trig solvers" in comment
assert "error" not in comment
assert "https://github.com/sympy/sympy-bot" in comment
for line in valid_PR_description:
assert line in comment
assert "good order" in comment
assert "@asmeurer" in comment
if include_extra:
assert "@certik" in comment
# Statuses data
assert post_data[1] == {
"state": "success",
"target_url": comment_html_url,
"description": "The release notes look OK",
"context": "sympy-bot/release-notes",
}
assert patch_urls == []
assert patch_data == []
@parametrize('action', ['opened', 'reopened', 'synchronize', 'edited'])
async def test_status_good_new_comment_other_base(action):
# Based on test_status_good_new_comment
event_data = {
'pull_request': {
'number': 1,
'state': 'open',
'merged': False,
'comments_url': comments_url,
'commits_url': commits_url,
'head': {
'user': {
'login': 'asmeurer',
},
},
'base': {
'repo': {
'contents_url': contents_url,
'html_url': html_url,
},
'ref': '1.4',
},
'body': valid_PR_description,
'statuses_url': statuses_url,
},
'action': action,
}
commits = [
{
'author': {
'login': 'asmeurer',
},
'commit': {
'message': "A good commit",
},
'sha': sha,
'url': commit_url,
},
{
'author': {
'login': 'certik',
},
'commit': {
'message': "A good commit",
},
'sha': sha,
'url': commit_url,
},
# Test commits without a login
{
'author': None,
'commit': {
'message': "A good commit",
},
'sha': sha,
'url': commit_url,
},
]
commit = {
'files': [
{
'status': 'modified',
},
],
'parents': [
{
"url": commit_url,
"sha": sha,
},
],
}
# No comment from sympy-bot
comments = [
{
'user': {
'login': 'asmeurer',
},
},
{
'user': {
'login': 'certik',
},
},
]
version_file = {
'content': base64.b64encode(b'__version__ = "1.4rc1"\n'),
}
getiter = {
commits_url: commits,
comments_url: comments,
}
getitem = {
version_url_template.format(ref='1.4'): version_file,
commit_url: commit,
}
post = {
comments_url: {
'html_url': comment_html_url,
},
statuses_url: {},
}
event = _event(event_data)
gh = FakeGH(getiter=getiter, getitem=getitem, post=post)
await router.dispatch(event, gh)
getitem_urls = gh.getitem_urls
getiter_urls = gh.getiter_urls
post_urls = gh.post_urls
post_data = gh.post_data
patch_urls = gh.patch_urls
patch_data = gh.patch_data
assert set(getiter_urls) == set(getiter)
assert set(getitem_urls) == set(getitem)
assert post_urls == [comments_url, statuses_url]
assert len(post_data) == 2
# Comments data
assert post_data[0].keys() == {"body"}
comment = post_data[0]["body"]
assert ":white_check_mark:" in comment
assert ":x:" not in comment
assert "new trig solvers" in comment
assert "error" not in comment
assert "https://github.com/sympy/sympy-bot" in comment
assert '1.2.1' not in comment
assert '1.4' in comment
for line in valid_PR_description:
assert line in comment
assert "good order" in comment
# Statuses data
assert post_data[1] == {
"state": "success",
"target_url": comment_html_url,
"description": "The release notes look OK",
"context": "sympy-bot/release-notes",
}
assert patch_urls == []
assert patch_data == []
@parametrize('action', ['opened', 'reopened', 'synchronize', 'edited'])
async def test_added_deleted_new_comment(action):
# Based on test_status_good_existing_comment
event_data = {
'pull_request': {
'number': 1,
'state': 'open',
'merged': False,
'comments_url': comments_url,
'commits_url': commits_url,
'head': {
'user': {
'login': 'asmeurer',
},
},
'base': {
'repo': {
'contents_url': contents_url,
'html_url': html_url,
},
'ref': 'master',
},
'body': valid_PR_description,
'statuses_url': statuses_url,
},
'action': action,
}
sha_merge = '61697bd7249381b27a4b5d449a8061086effd381'
sha_1 = '174b8b37bc33e9eb29e710a233190d02a13bdb54'
sha_2 = 'aef484a1d46bb5389f1709d78e39126d9cb8599f'
sha_3 = sha
commits = [
{
'author': {
'login': 'asmeurer',
},
'commit': {
'message': "Merge"
},
'sha': sha_merge,
'url': commit_url_template.format(sha=sha_merge)
},
{
'author': {
'login': 'asmeurer',
},
'commit': {
'message': "Adds file1"
},
'sha': sha_1,
'url': commit_url_template.format(sha=sha_1)
},
{
'author': {
'login': 'asmeurer',
},
'commit': {
'message': "Modifies file1",
},
'sha': sha_2,
'url': commit_url_template.format(sha=sha_2),
},
{
'author': {
'login': 'asmeurer',
},
'commit': {
'message': "Deletes file1",
},
'sha': sha_3,
'url': commit_url_template.format(sha=sha_3),
},
]
commit_merge = {
'sha': sha_1,
'files': [
{
'filename': 'file1',
'status': 'added',
},
{
'filename': 'file2',
'status': 'deleted',
},
],
'parents': [
{
"url": commit_url,
"sha": sha_2,
},
{
"url": commit_url,
"sha": sha,
},
],
}
commit_add = {
'sha': sha_1,
'files': [
{
'filename': 'file1',
'status': 'added',
},
],
'parents': [
{
"url": commit_url,
"sha": sha,
},
],
}
commit_modify = {
'sha': sha_2,
'files': [
{
'filename': 'file1',
'status': 'modified',
},
],
'parents': [
{
"url": commit_url,
"sha": sha,
},
],
}
commit_delete = {
'sha': sha_3,
'files': [
{
'filename': 'file1',
'status': 'removed',
},
],
'parents': [
{
"url": commit_url,
"sha": sha,
},
],
}
comments = [
{
'user': {
'login': 'sympy-bot',
},
'url': existing_comment_url,
'body': release_notes_comment_body,
},
{
'user': {
'login': 'asmeurer',
},
'body': "comment",
},
{
'user': {
'login': 'certik',
},
'body': "comment",
},
]
version_file = {
'content': base64.b64encode(b'__version__ = "1.2.1.dev"\n'),
}
getiter = {
commits_url: commits,
comments_url: comments,
}
getitem = {
commit_url_template.format(sha=sha_merge): commit_merge,
commit_url_template.format(sha=sha_1): commit_add,
commit_url_template.format(sha=sha_2): commit_modify,
commit_url_template.format(sha=sha_3): commit_delete,
version_url: version_file,
}
post = {
comments_url: {
'html_url': comment_html_url,
},
statuses_url: {},
}
patch = {
existing_comment_url: {
'html_url': comment_html_url,
},
}
event = _event(event_data)
gh = FakeGH(getiter=getiter, getitem=getitem, post=post, patch=patch)
await router.dispatch(event, gh)
getitem_urls = gh.getitem_urls
getiter_urls = gh.getiter_urls
post_urls = gh.post_urls
post_data = gh.post_data
patch_urls = gh.patch_urls
# The rest is already tested in test_status_good_new_comment
assert set(getiter_urls) == set(getiter)
assert set(getitem_urls) == set(getitem)
assert post_urls == [statuses_url, comments_url]
assert patch_urls == [existing_comment_url]
assert len(post_data) == 2
# Comments data
assert post_data[1].keys() == {"body"}
comment = post_data[1]["body"]
assert ":white_check_mark:" not in comment
assert ":x:" not in comment
assert "\U0001f7e0" in comment
assert "error" not in comment
assert "add new files" in comment
assert "delete files" in comment
assert "https://github.com/sympy/sympy-bot" in comment
assert sha_1 in comment
assert sha_2 not in comment
assert sha_3 in comment
assert sha_merge not in comment
assert "`file1`" in comment
assert "<!-- BEGIN RELEASE NOTES -->" not in comment
assert "<!-- END RELEASE NOTES -->" not in comment
@parametrize('action', ['opened', 'reopened', 'synchronize', 'edited'])
async def test_added_deleted_existing_comment(action):
# Based on test_status_good_existing_comment
event_data = {
'pull_request': {
'number': 1,
'state': 'open',
'merged': False,
'comments_url': comments_url,
'commits_url': commits_url,
'head': {
'user': {
'login': 'asmeurer',
},
},
'base': {
'repo': {
'contents_url': contents_url,
'html_url': html_url,
},
'ref': 'master',
},
'body': valid_PR_description,
'statuses_url': statuses_url,
},
'action': action,
}
sha_merge = '61697bd7249381b27a4b5d449a8061086effd381'
sha_1 = '174b8b37bc33e9eb29e710a233190d02a13bdb54'
sha_2 = 'aef484a1d46bb5389f1709d78e39126d9cb8599f'
sha_3 = sha
commits = [
{
'author': {
'login': 'asmeurer',
},
'commit': {
'message': "Merge"
},
'sha': sha_merge,
'url': commit_url_template.format(sha=sha_merge)
},
{
'author': {
'login': 'asmeurer',
},
'commit': {
'message': "Adds file1"
},
'sha': sha_1,
'url': commit_url_template.format(sha=sha_1)
},
{
'author': {
'login': 'asmeurer',
},
'commit': {
'message': "Modifies file1",
},
'sha': sha_2,
'url': commit_url_template.format(sha=sha_2),
},
{
'author': {
'login': 'asmeurer',
},
'commit': {
'message': "Deletes file1",
},
'sha': sha_3,
'url': commit_url_template.format(sha=sha_3),
},
]
commit_merge = {
'sha': sha_1,
'files': [
{
'filename': 'file1',
'status': 'added',
},
{
'filename': 'file2',
'status': 'deleted',
},
],
'parents': [
{
"url": commit_url,
"sha": sha_2,
},
{
"url": commit_url,
"sha": sha,
},
],
}
commit_add = {
'sha': sha_1,
'files': [
{
'filename': 'file1',
'status': 'added',
},
],
'parents': [
{
"url": commit_url,
"sha": sha,
},
],
}
commit_modify = {
'sha': sha_2,
'files': [
{
'filename': 'file1',
'status': 'modified',
},
],
'parents': [
{
"url": commit_url,
"sha": sha,
},
],
}
commit_delete = {
'sha': sha_3,
'files': [
{
'filename': 'file1',
'status': 'removed',
},
],
'parents': [
{
"url": commit_url,
"sha": sha,
},
],
}
comments = [
{
'user': {
'login': 'sympy-bot',
},
'url': existing_comment_url,
'body': release_notes_comment_body,
},
{
'user': {
'login': 'sympy-bot',
},
'url': existing_added_deleted_comment_url,
'body': added_deleted_comment_body,
},
{
'user': {
'login': 'asmeurer',
},
'body': "comment",
},
{
'user': {
'login': 'certik',
},
'body': "comment",
},
]
version_file = {
'content': base64.b64encode(b'__version__ = "1.2.1.dev"\n'),
}
getiter = {
commits_url: commits,
comments_url: comments,
}
getitem = {
commit_url_template.format(sha=sha_merge): commit_merge,
commit_url_template.format(sha=sha_1): commit_add,
commit_url_template.format(sha=sha_2): commit_modify,
commit_url_template.format(sha=sha_3): commit_delete,
version_url: version_file,
}
post = {
comments_url: {
'html_url': comment_html_url,
},
statuses_url: {},
}
patch = {
existing_comment_url: {
'html_url': comment_html_url,
},
existing_added_deleted_comment_url: {
'html_url': comment_html_url2,
},
}
event = _event(event_data)
gh = FakeGH(getiter=getiter, getitem=getitem, post=post, patch=patch)
await router.dispatch(event, gh)
getitem_urls = gh.getitem_urls
getiter_urls = gh.getiter_urls
post_urls = gh.post_urls
post_data = gh.post_data
patch_urls = gh.patch_urls
patch_data = gh.patch_data
assert set(getiter_urls) == set(getiter)
assert set(getitem_urls) == set(getitem)
assert post_urls == [statuses_url]
assert patch_urls == list(patch)
assert len(post_data) == 1
assert len(patch_data) == 2
# Comments data.
assert patch_data[1].keys() == {"body"}
comment = patch_data[1]["body"]
assert ":white_check_mark:" not in comment
assert ":x:" not in comment
assert "\U0001f7e0" in comment
assert "error" not in comment
assert "add new files" in comment
assert "delete files" in comment
assert "https://github.com/sympy/sympy-bot" in comment
assert sha_1 in comment
assert sha_2 not in comment
assert sha_3 in comment
assert sha_merge not in comment
assert "`file1`" in comment
assert "<!-- BEGIN RELEASE NOTES -->" not in comment
assert "<!-- END RELEASE NOTES -->" not in comment
@parametrize('action', ['opened', 'reopened', 'synchronize', 'edited'])
async def test_added_deleted_remove_existing_comment(action):
# Based on test_status_good_existing_comment
event_data = {
'pull_request': {
'number': 1,
'state': 'open',
'merged': False,
'comments_url': comments_url,
'commits_url': commits_url,
'head': {
'user': {
'login': 'asmeurer',
},
},
'base': {
'repo': {
'contents_url': contents_url,
'html_url': html_url,
},
'ref': 'master',
},
'body': valid_PR_description,
'statuses_url': statuses_url,
},
'action': action,
}
commits = [
{
'author': {
'login': 'asmeurer',
},
'commit': {
'message': "Modifies file1"
},
'sha': sha,
'url': commit_url,
},
]
commit = {
'sha': sha,
'files': [
{
'filename': 'file1',
'status': 'modified',
},
],
'parents': [
{
"url": commit_url,
"sha": sha,
},
],
}
comments = [
{
'user': {
'login': 'sympy-bot',
},
'url': existing_comment_url,
'body': release_notes_comment_body,
},
{
'user': {
'login': 'sympy-bot',
},
'url': existing_added_deleted_comment_url,
'body': added_deleted_comment_body,
},
{
'user': {
'login': 'asmeurer',
},
'body': "comment",
},
{
'user': {
'login': 'certik',
},
'body': "comment",
},
]
version_file = {
'content': base64.b64encode(b'__version__ = "1.2.1.dev"\n'),
}
getiter = {
commits_url: commits,
comments_url: comments,
}
getitem = {
commit_url: commit,
version_url: version_file,
}
post = {
comments_url: {
'html_url': comment_html_url,
},
statuses_url: {},
}
patch = {
existing_comment_url: {
'html_url': comment_html_url,
},
}
delete = {
existing_added_deleted_comment_url: {
'html_url': comment_html_url2,
},
}
event = _event(event_data)
gh = FakeGH(getiter=getiter, getitem=getitem, post=post, patch=patch, delete=delete)
await router.dispatch(event, gh)
getitem_urls = gh.getitem_urls
getiter_urls = gh.getiter_urls
post_urls = gh.post_urls
post_data = gh.post_data
patch_urls = gh.patch_urls
patch_data = gh.patch_data
delete_urls = gh.delete_urls
assert set(getiter_urls) == set(getiter)
assert set(getitem_urls) == set(getitem)
assert post_urls == [statuses_url]
assert patch_urls == list(patch)
assert delete_urls == list(delete)
assert len(post_data) == 1
assert len(patch_data) == 1
# Comments data
assert patch_data[0].keys() == {"body"}
comment = patch_data[0]["body"]
assert "release notes" in comment
assert "\U0001f7e0" not in comment
assert "add new files" not in comment
assert "delete files" not in comment
assert sha not in comment
assert "`file1`" not in comment
|
{"hexsha": "da9fbc6148d5f7d295049c7b9048a20e3659a952", "size": 75709, "ext": "py", "lang": "Python", "max_stars_repo_path": "sympy_bot/tests/test_webapp.py", "max_stars_repo_name": "asmeurer/sympy-bot-1", "max_stars_repo_head_hexsha": "08e16763f7c15f70366af91b8fb022aa6c962115", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 25, "max_stars_repo_stars_event_min_datetime": "2015-10-06T18:50:26.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-12T07:36:25.000Z", "max_issues_repo_path": "sympy_bot/tests/test_webapp.py", "max_issues_repo_name": "asmeurer/sympy-bot-1", "max_issues_repo_head_hexsha": "08e16763f7c15f70366af91b8fb022aa6c962115", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 73, "max_issues_repo_issues_event_min_datetime": "2018-07-14T17:34:30.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-23T03:54:10.000Z", "max_forks_repo_path": "sympy_bot/tests/test_webapp.py", "max_forks_repo_name": "asmeurer/sympy-bot-1", "max_forks_repo_head_hexsha": "08e16763f7c15f70366af91b8fb022aa6c962115", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 23, "max_forks_repo_forks_event_min_datetime": "2015-10-30T06:01:01.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-16T00:59:46.000Z", "avg_line_length": 26.3152589503, "max_line_length": 246, "alphanum_fraction": 0.4919362295, "include": true, "reason": "from sympy", "num_tokens": 17596}
|
from torch.nn import functional as F
from torch import nn
import torch
import numpy as np
from utils import layer
from radam import RAdam
from vpn import MVProp
import utils
from torch_critic import Critic as ClassicCritic
class CriticModel(nn.Module):
def __init__(self, env, layer_number, FLAGS):
super().__init__()
self.q_limit = -FLAGS.time_scale
# Set parameters to give critic optimistic initialization near q_init
self.q_init = -0.067
self.q_offset = -np.log(self.q_limit/self.q_init - 1)
self.no_target_net = FLAGS.no_target_net
self.time_scale = FLAGS.time_scale
self.offset = 2
# Dimensions of goal placeholder will differ depending on layer level
if layer_number == FLAGS.layers - 1 or (layer_number == FLAGS.layers -2 and FLAGS.oracle):
self.goal_dim = env.end_goal_dim
else:
self.goal_dim = env.subgoal_dim
self.loss_val = 0
self.state_dim = env.state_dim
# Dimensions of action placeholder will differ depending on layer level
if layer_number == 0:
action_dim = env.action_dim
else:
action_dim = env.subgoal_dim
def forward(self, v_image, actor_pixel_selection):
# v_image shape [batch_size, height, width]
x_coords = actor_pixel_selection[:, 0]
y_coords = actor_pixel_selection[:, 1]
assert (x_coords >= 0).all()
assert (x_coords < v_image.shape[-1]).all(), (torch.min(x_coords), torch.max(x_coords), v_image.shape)
assert (y_coords >= 0).all(), y_coords.min()
assert (y_coords < v_image.shape[-2]).all(), (y_coords.max(), v_image.shape[-2])
x_slice = x_coords.long().unsqueeze(1).unsqueeze(2).expand(-1, v_image.shape[1], -1)
value = v_image.gather(2, x_slice)
y_slice = y_coords.long().unsqueeze(1).unsqueeze(2)
values = value.gather(1, y_slice)
return values * self.time_scale
class Critic():
def __init__(self, device, env, layer_number, FLAGS, learning_rate=0.001, gamma=0.98, tau=0.05):
self.device = device # Session in its TF equivalent
self.critic_name = 'vpn_critic_' + str(layer_number)
self.learning_rate = learning_rate
self.q_limit = -FLAGS.time_scale
self.gamma = gamma
self.tau = tau
self.sac = FLAGS.sac
self.td3 = FLAGS.td3
self.vpn = MVProp(self.gamma, FLAGS, env).to(self.device)
self.no_target_net = FLAGS.no_target_net
# Create critic network graph
self.infer_net = CriticModel(env, layer_number, FLAGS).to(device=self.device)
self.no_weights = FLAGS.no_vpn_weights
self.vpn_masking = FLAGS.vpn_masking
self.classic_critic = None
if FLAGS.boost_vpn:
self.classic_critic = ClassicCritic(device, env, layer_number, FLAGS, learning_rate, gamma, tau)
if not self.no_weights:
opt_class = RAdam if FLAGS.radam else torch.optim.Adam
self.optimizer = opt_class(self.vpn.parameters(), learning_rate)
if FLAGS.no_target_net:
self.target_net = self.infer_net
self.vpn_target = self.vpn
else:
self.target_net = self.infer_net
self.vpn_target = MVProp(self.gamma, FLAGS, env).to(self.device)
self.vpn_target.load_state_dict(self.vpn.state_dict())
self.get_pos_image = lambda states, images: env.pos_image(states[..., :2], images[:, 0])
self.get_image_pos = lambda states, images: torch.stack(env.get_image_position(states[..., :2], images), dim=-1)
def get_Q_value(self,state, goal, action, image):
with torch.no_grad():
q = self.infer_net(self.vpn.critic(image), self.get_image_pos(action, image))
return q
def get_target_Q_value(self,state, goal, action, image):
assert not self.no_target_net
with torch.no_grad():
q = self.infer_net(self.target_net.critic(image), self.get_image_pos(action, image))
return q
def update_target_weights(self):
for source, target in zip(self.vpn.parameters(), self.vpn_target.parameters()):
target.data.copy_(self.tau * source + (1.0 - self.tau) * target)
def _value(self, net, vpn_net, images, states, actions, get_extra_loss=False):
pos_images = self.get_pos_image(states, images)
action_image_position = self.get_image_pos(actions, images)
agent_image_position = self.get_image_pos(states, images)
vpn_values, vpn_probs = vpn_net.actor(images, pos_images)
extra_loss = 0
if self.vpn_masking:
vpn_values, extra_loss = vpn_net.mask_image(vpn_values, vpn_probs, pos_images, agent_image_position)
if get_extra_loss:
return net(vpn_values, action_image_position).squeeze(), extra_loss
return net(vpn_values, action_image_position).squeeze()
def update(self, old_states, old_actions, rewards, new_states, old_goals, new_goals, new_actions, is_terminals, is_weights, next_entropy, images, metrics, total_steps_taken=None):
if self.no_weights:
return torch.ones_like(rewards)
if self.classic_critic is not None:
self.classic_critic.update(old_states, old_actions, rewards, new_states, old_goals, new_actions, is_terminals, is_weights, next_entropy, None, metrics)
with torch.no_grad():
wanted_qs = self._value(self.target_net, self.vpn_target, images, new_states, new_actions)
if self.classic_critic is not None:
alpha = 1 - (min(total_steps_taken, 1e-6) / 1e-6)
wanted_qs_classic = torch.stack([net(new_states, new_goals, new_actions) for net in self.classic_critic.target_nets], dim=0)
wanted_qs_classic = torch.min(wanted_qs_classic, dim=0)[0].detach().squeeze()
alpha*(wanted_qs_classic) + (1-alpha)*wanted_qs
wanted_qs = rewards + (1 - is_terminals) * (self.gamma * wanted_qs)
if next_entropy is not None:
wanted_qs -= next_entropy
wanted_qs = torch.clamp(wanted_qs, max=0, min=self.q_limit)
infered_Qs, extra_loss = self._value(self.infer_net, self.vpn, images, old_states, old_actions, get_extra_loss=True)
if is_weights is None:
is_weights = torch.ones_like(wanted_qs)
abs_errors = torch.abs(wanted_qs - infered_Qs).detach()
self.optimizer.zero_grad()
difference = (wanted_qs - infered_Qs)
loss = torch.mean(is_weights * torch.mul(difference, difference), dim=0) + extra_loss
loss.backward()
self.optimizer.step()
metrics[self.critic_name + '/Q_loss'] = loss.item()
metrics[self.critic_name + '/Q_val'] = torch.mean(wanted_qs).item()
return abs_errors
def get_gradients_for_actions(self, state, goal, actor, images):
return None
def state_dict(self):
result = {}
if self.no_weights: return result
result['target_net'] = self.target_net.state_dict()
result['infer_net'] = self.infer_net.state_dict()
result['optimizer'] = self.optimizer.state_dict()
result['vpn'] = self.vpn.state_dict()
result['vpn_target'] = self.vpn_target.state_dict()
return result
def load_state_dict(self, state_dict):
if self.no_weights: return
self.target_net.load_state_dict(state_dict['target_net'])
self.infer_net.load_state_dict(state_dict['infer_net'])
self.optimizer.load_state_dict(state_dict['optimizer'])
self.vpn.load_state_dict(state_dict['vpn'])
self.vpn_target.load_state_dict(state_dict['vpn_target'])
|
{"hexsha": "57bc3a2a3181a83d1587e9b900724bcc8fa9fdc5", "size": 7752, "ext": "py", "lang": "Python", "max_stars_repo_path": "vpn_dqn_critic.py", "max_stars_repo_name": "christsa/hide-rl", "max_stars_repo_head_hexsha": "47dc3dfd93b817831473c07137a6a6e7f2eda549", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-09-17T15:16:17.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-15T14:24:39.000Z", "max_issues_repo_path": "vpn_dqn_critic.py", "max_issues_repo_name": "christsa/hide-rl", "max_issues_repo_head_hexsha": "47dc3dfd93b817831473c07137a6a6e7f2eda549", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "vpn_dqn_critic.py", "max_forks_repo_name": "christsa/hide-rl", "max_forks_repo_head_hexsha": "47dc3dfd93b817831473c07137a6a6e7f2eda549", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 45.6, "max_line_length": 183, "alphanum_fraction": 0.6608617131, "include": true, "reason": "import numpy", "num_tokens": 1837}
|
import matplotlib.pyplot as plt
import scipy.optimize as opt
import numpy as np
# Function
def func_exponential(x,g,n_0):
return n_0*np.power(1+g,x)
if __name__=="__main__":
# Data
x_samp = np.array([1,2,3,4,5,6])
y_samp = np.array([3,4,5,6,6,11])
# Estimate
w, _ = opt.curve_fit(func_grow, x_samp, y_samp)
# Print
print('Estimated Parameters', w)
print('Growth rate: ',w[0])
print('Initial value: ',w[1])
# Model
x_lin = np.linspace(0, x_samp.max(), 50) # a number line, 50 evenly spaced digits between 0 and max
y_model = func_grow(x_lin, *w)
# Plot
plt.plot(x_samp, y_samp, "ko", label="Data")
plt.plot(x_lin, y_model, "k--", label="Fit")
plt.title("Least squares regression")
plt.legend(loc="upper left")
plt.show()
|
{"hexsha": "ab4997310a543a92be5052bb61d90d3bbc71b0e2", "size": 804, "ext": "py", "lang": "Python", "max_stars_repo_path": "lib/func_exponential.py", "max_stars_repo_name": "yasirroni/myNafiun", "max_stars_repo_head_hexsha": "70f1eb56eb344d88fa6ea7cafe2d0925bfccc1d6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lib/func_exponential.py", "max_issues_repo_name": "yasirroni/myNafiun", "max_issues_repo_head_hexsha": "70f1eb56eb344d88fa6ea7cafe2d0925bfccc1d6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-07-04T00:58:51.000Z", "max_issues_repo_issues_event_max_datetime": "2020-07-06T08:51:30.000Z", "max_forks_repo_path": "lib/func_exponential.py", "max_forks_repo_name": "yasirroni/myNafiun", "max_forks_repo_head_hexsha": "70f1eb56eb344d88fa6ea7cafe2d0925bfccc1d6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-07-03T03:53:40.000Z", "max_forks_repo_forks_event_max_datetime": "2020-07-03T03:53:40.000Z", "avg_line_length": 24.3636363636, "max_line_length": 103, "alphanum_fraction": 0.6293532338, "include": true, "reason": "import numpy,import scipy", "num_tokens": 264}
|
import os.path
import matplotlib.pyplot as plt
import scanpy as sc
import pandas as pd
import seaborn as sns
from outer_spacem.io import convert_name
import numpy as np
from pathlib import Path
from outer_spacem.pl import plot_distributions, plot_umap_top_n, volcano_plot
from singlecelltools.various import get_molecules_names
well = "Well_8"
postfix = "_log_transform"
if well == "Well_8":
# -------- WELL 8 -----------------------:
adata = sc.read(
"/Users/alberto-mac/EMBL_ATeam/projects/gastrosome/Drug_W8/spatiomolecular_adata.h5ad") # assuming you only have 1 dataset
# filter out extracell stuff
intracell_ions = pd.read_csv("/Users/alberto-mac/EMBL_ATeam/projects/gastrosome/molecules_databases/reannotated/AB_Gastrosome_DrugW8_intra_ions_v2.tsv",
sep="\t", index_col=0)
# adata = adata[:, adata.var.formula.isin(intracell_ions["name"])].copy()
data_dir = Path(r"/Users/alberto-mac/EMBL_ATeam/projects/gastrosome/Drug_W8")
proj_dir = "new_processing" + postfix
elif well == "Well_3":
# -------- WELL 3 -----------------------:
adata = sc.read("/Users/alberto-mac/EMBL_ATeam/projects/gastrosome/Feeding_W3/spatiomolecular_adata.h5ad") # assuming you only have 1 dataset
# filter out extracell stuff
intracell_ions = pd.read_csv("/Users/alberto-mac/EMBL_ATeam/projects/gastrosome/molecules_databases/reannotated/AB_Gastrosome_FeedingW3_intra_ions_v1.tsv", sep="\t", index_col=0)
intracell_ions.head()
# adata = adata[:, adata.var.formula.isin(intracell_ions["name"])].copy()
data_dir = Path(r"/Users/alberto-mac/EMBL_ATeam/projects/gastrosome/Feeding_W3")
proj_dir = "new_processing" + postfix
else:
raise ValueError
plots_path = data_dir / proj_dir / "plots"
plots_path.mkdir(parents=True, exist_ok=True)
sc.settings.figdir = plots_path
cond_col = "cell_type"
adata.obs[cond_col] = np.where(adata.obs["max_intensity-Annotations"] > 0., "Gastrosomes", "Other cells")
adata.obs = adata.obs.astype({cond_col: "category"})
# ------------------------------------
# # Try to select only 172 non-gastrosomes:
# other_cells_ids = np.argwhere((adata.obs[cond_col] == "Other cells").values)[:,0]
# chosen_other_cells_ids = np.random.choice(other_cells_ids, size=172, replace=False)
# mask = adata.obs[cond_col] == "Gastrosomes"
# mask[chosen_other_cells_ids] = True
# print(adata.obs.shape)
# adata = adata[mask, :]
# print(adata.obs.shape)
# ------------------------------------
# ------------------------------------
# # Resample fake gastrosomes
# random_choice = np.random.randint(2, size=adata.obs["max_intensity-Annotations"].shape)
# adata.obs[cond_col] = np.where(random_choice, "Gastrosomes", "Other cells")
# adata.obs = adata.obs.astype({cond_col: "category"})
# ------------------------------------
nb_marked_cells = (adata.obs[cond_col] == "Gastrosomes").sum()
total_nb_cells = adata.obs[cond_col].shape[0]
print("Gastrosomes: {}/{} cells".format(nb_marked_cells, total_nb_cells))
# # ---------------------------
# # Fix the mess with the molecules names:
# # ---------------------------
#
# # Get original databases properly annotated:
# CM = pd.read_csv(
# "/Users/alberto-mac/EMBL_ATeam/projects/gastrosome/molecules_databases/core_metabolome_v3.csv",
# sep="\t", index_col=0)
#
# SwissLip = pd.read_csv(
# "/Users/alberto-mac/EMBL_ATeam/projects/gastrosome/molecules_databases/swisslipids_2018-02-02-v2.tsv",
# sep="\t", index_col=0)
#
# CM.to_csv("/Users/alberto-mac/EMBL_ATeam/projects/gastrosome/Drug_W8/test_data.csv")
#
# combined_databases = pd.concat([CM, SwissLip])
# merged_database = combined_databases.merge(adata.var, on='formula')
# filter out low ion cells
sc.pp.filter_cells(adata, min_genes=5)
# TIC norm
# sc.pp.normalize_total(adata, key_added='tic', target_sum=1.)
# Alternative norm method by Alyona:
adata.obs["tic"] = adata.X.sum(axis=1)
# adata.X = np.divide(adata.X, np.array(adata.obs["tic"])[:, None])
# --------------------------------
# FILTERING!
# --------------------------------
# Filtering low and high TIC
sns.histplot(adata.obs, x="tic", hue=cond_col)
plt.title("TIC, unfiltred dataset")
plt.show()
plt.savefig(plots_path / ("unfiltered_tic_%s.png"%cond_col), dpi=300)
lower_thresh = np.quantile(adata.obs["tic"], 0.1)
higher_thresh = np.quantile(adata.obs["tic"], 0.9)
print(lower_thresh, higher_thresh)
adata = adata[(adata.obs["tic"] > lower_thresh) & (adata.obs["tic"] < higher_thresh)]
# Filter not abundant ions
adata.var["log_total_intensity"] = np.log(adata.X.sum(axis=0))
adata.var["nonzero"] = np.count_nonzero(adata.X, axis=0)
adata.var["nonzero_ratio"] = np.count_nonzero(adata.X, axis=0) / adata.X.shape[0]
adata.layers["masked"]= np.ma.masked_less(adata.X, 1)
adata.var["median_nonzero_I"] = np.ma.median(adata.layers["masked"], axis=0)
sns.histplot(adata.var, x="nonzero_ratio")
plt.title("nonzero_ratio, unfiltred dataset")
plt.show()
plt.savefig(plots_path / ("unfiltered_nonzero_ratio_%s.png"%cond_col), dpi=300)
thresh = 0.1
adata = adata[:, adata.var["nonzero_ratio"] > thresh]
# Filter ions not in marked cells:
adata_marked = adata[adata.obs[cond_col] == "Gastrosomes"]
adata_marked.var["log_total_intensity_marked"] = np.log(adata_marked.X.sum(axis=0))
adata_marked.var["nonzero_marked"] = np.count_nonzero(adata_marked.X, axis=0)
adata_marked.var["nonzero_ratio_marked"] = np.count_nonzero(adata_marked.X, axis=0) / adata_marked.X.shape[0]
sns.histplot(adata_marked.var, x="nonzero_ratio_marked")
plt.title("nonzero_ratio_marked, unfiltred dataset")
plt.show()
plt.savefig(plots_path / ("unfiltered_nonzero_ratio_marked_%s.png"%cond_col), dpi=300)
thresh = 0.05
adata = adata[:, adata_marked.var["nonzero_ratio_marked"] > thresh]
sns.histplot(adata.obs, x="tic", hue=cond_col)
plt.title("TIC, unfiltred dataset")
plt.show()
# NORMALIZATION:
# TIC norm
sc.pp.normalize_total(adata, target_sum=1., key_added='tic')
# sc.pp.normalize_total(adata, key_added='tic')
# Alternative norm method by Alyona:
# adata.obs["tic"] = adata.X.sum(axis=1)
# adata.X = np.divide(adata.X, np.array(adata.obs["tic"])[:, None])
# --------------------------
# DE analysis:
# --------------------------
# adata.X = np.log1p(adata.X)
sc.tl.rank_genes_groups(adata, cond_col, method='wilcoxon', key_added="wilcoxon", gene_symbols="var_names")
# sc.pl.rank_genes_groups(adata, n_genes=25, sharey=False, key="wilcoxon", gene_symbols="var_names")
selected = volcano_plot(adata, "wilcoxon", plots_path, pval_thresh=0.05, foldch_thresh=2, gene_symbols="var_names")
# Export results to csv:
diff_expr_df = sc.get.rank_genes_groups_df(adata, None, key="wilcoxon", gene_symbols="var_names")
diff_expr_df = diff_expr_df.sort_values("pvals_adj", ascending=True)
diff_expr_df = diff_expr_df[diff_expr_df["group"] == "Gastrosomes"]
diff_expr_df.to_csv(os.path.join(plots_path, "DE_results.csv"))
# # --------------------------
# # Plot distributions:
# # --------------------------
# dist_plots_path = plots_path / "intensity_distributions"
# dist_plots_path.mkdir(parents=True, exist_ok=True)
# plot_distributions(adata, cond_col, dist_plots_path, gene_symbols="var_names")
# # --------------------------
# # Plot distributions for selected:
# # --------------------------
adata_filtered = adata.copy()
adata_filtered = adata_filtered[:, np.isin(adata_filtered.var["annotation_id"], selected)]
dist_plots_path = plots_path / "intensity_distributions_volcano_selected"
dist_plots_path.mkdir(parents=True, exist_ok=True)
plot_distributions(adata_filtered, cond_col, dist_plots_path, gene_symbols="var_names")
|
{"hexsha": "96ab967b14d01d4a9a44badb1bfdac6ce54de9a3", "size": 7615, "ext": "py", "lang": "Python", "max_stars_repo_path": "projects/gastrosome_processing/diff_express_analysis_old.py", "max_stars_repo_name": "abailoni/single-cell-analysis", "max_stars_repo_head_hexsha": "3fb68992a22249ab96178e173ceb552c037f25ba", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "projects/gastrosome_processing/diff_express_analysis_old.py", "max_issues_repo_name": "abailoni/single-cell-analysis", "max_issues_repo_head_hexsha": "3fb68992a22249ab96178e173ceb552c037f25ba", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "projects/gastrosome_processing/diff_express_analysis_old.py", "max_forks_repo_name": "abailoni/single-cell-analysis", "max_forks_repo_head_hexsha": "3fb68992a22249ab96178e173ceb552c037f25ba", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.698019802, "max_line_length": 182, "alphanum_fraction": 0.6946815496, "include": true, "reason": "import numpy", "num_tokens": 2121}
|
@testset "Parsimonious flux balance analysis with StandardModel" begin
model = test_toyModel()
d = parsimonious_flux_balance_analysis_dict(
model,
Tulip.Optimizer;
modifications = [
change_constraint("EX_m1(e)", lb = -10.0),
change_optimizer_attribute("IPM_IterationsLimit", 500),
],
qp_modifications = [
change_optimizer(OSQP.Optimizer),
change_optimizer_attribute("polish", true),
silence,
],
)
# The used optimizer doesn't really converge to the same answer everytime
# here, we therefore tolerate a wide range of results.
@test isapprox(d["biomass1"], 10.0, atol = QP_TEST_TOLERANCE)
end
|
{"hexsha": "341eb6c43787009e7d1150b74e88b9046e6a886b", "size": 726, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/analysis/parsimonious_flux_balance_analysis.jl", "max_stars_repo_name": "LCSB-BioCore/COBREXA.jl", "max_stars_repo_head_hexsha": "cfe20e2a9d5e98cd097cf9f62c5d32f07c1199b0", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 19, "max_stars_repo_stars_event_min_datetime": "2021-05-11T15:33:30.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-23T12:16:39.000Z", "max_issues_repo_path": "test/analysis/parsimonious_flux_balance_analysis.jl", "max_issues_repo_name": "LCSB-BioCore/COBREXA.jl", "max_issues_repo_head_hexsha": "cfe20e2a9d5e98cd097cf9f62c5d32f07c1199b0", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 326, "max_issues_repo_issues_event_min_datetime": "2021-05-11T14:20:10.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-25T00:28:12.000Z", "max_forks_repo_path": "test/analysis/parsimonious_flux_balance_analysis.jl", "max_forks_repo_name": "LCSB-BioCore/COBREXA.jl", "max_forks_repo_head_hexsha": "cfe20e2a9d5e98cd097cf9f62c5d32f07c1199b0", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2021-05-13T15:47:59.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-03T21:40:55.000Z", "avg_line_length": 33.0, "max_line_length": 77, "alphanum_fraction": 0.6391184573, "num_tokens": 170}
|
# -*- coding: utf-8 -*-
"""
@author: Quoc-Tuan Truong <tuantq.vnu@gmail.com>
"""
from scipy.sparse import csr_matrix, find
from collections import OrderedDict
import numpy as np
class TrainSet:
def __init__(self, uid_map, iid_map):
self._uid_map = uid_map
self._iid_map = iid_map
@property
def num_users(self):
return len(self._uid_map)
@property
def num_items(self):
return len(self._iid_map)
def is_unk_user(self, mapped_uid):
return mapped_uid >= self.num_users
def is_unk_item(self, mapped_iid):
return mapped_iid >= self.num_items
def get_uid(self, raw_uid):
return self._uid_map[raw_uid]
def get_iid(self, raw_iid):
return self._iid_map[raw_iid]
def get_uid_list(self):
return self._uid_map.values()
def get_raw_uid_list(self):
return self._uid_map.keys()
def get_iid_list(self):
return self._iid_map.values()
def get_raw_iid_list(self):
return self._iid_map.keys()
@staticmethod
def idx_iter(idx_range, batch_size=1, shuffle=False):
""" Create an iterator over batch of indices
Parameters
----------
batch_size : int, optional, default = 1
shuffle : bool, optional
If True, orders of triplets will be randomized. If False, default orders kept
Returns
-------
iterator : batch of indices (array of np.int)
"""
indices = np.arange(idx_range)
if shuffle:
np.random.shuffle(indices)
n_batches = int(np.ceil(len(indices) / batch_size))
for b in range(n_batches):
start_offset = batch_size * b
end_offset = batch_size * b + batch_size
end_offset = min(end_offset, len(indices))
batch_ids = indices[start_offset:end_offset]
yield batch_ids
class MatrixTrainSet(TrainSet):
def __init__(self, matrix, max_rating, min_rating, global_mean, uid_map, iid_map):
TrainSet.__init__(self, uid_map, iid_map)
self.matrix = matrix
self.max_rating = max_rating
self.min_rating = min_rating
self.global_mean = global_mean
self.item_ppl_rank = self._rank_items_by_popularity(matrix)
self.triplets = None
@property
def num_users(self):
return self.matrix.shape[0]
@property
def num_items(self):
return self.matrix.shape[1]
@staticmethod
def _rank_items_by_popularity(rating_matrix):
item_ppl_scores = rating_matrix.sum(axis=0)
item_rank = np.argsort(item_ppl_scores.A1)[::-1]
return item_rank
@classmethod
def from_uir_triplets(cls, triplet_data, pre_uid_map=None, pre_iid_map=None,
pre_ui_set=None, verbose=False):
if pre_uid_map is None:
pre_uid_map = OrderedDict()
if pre_iid_map is None:
pre_iid_map = OrderedDict()
if pre_ui_set is None:
pre_ui_set = set()
uid_map = OrderedDict()
iid_map = OrderedDict()
u_indices = []
i_indices = []
r_values = []
rating_sum = 0.
rating_count = 0
max_rating = float('-inf')
min_rating = float('inf')
for raw_uid, raw_iid, rating in triplet_data:
if (raw_uid, raw_iid) in pre_ui_set: # duplicate rating
continue
pre_ui_set.add((raw_uid, raw_iid))
mapped_uid = pre_uid_map.setdefault(raw_uid, len(pre_uid_map))
mapped_iid = pre_iid_map.setdefault(raw_iid, len(pre_iid_map))
uid_map[raw_uid] = mapped_uid
iid_map[raw_iid] = mapped_iid
rating = float(rating)
rating_sum += rating
rating_count += 1
if rating > max_rating:
max_rating = rating
if rating < min_rating:
min_rating = rating
u_indices.append(mapped_uid)
i_indices.append(mapped_iid)
r_values.append(rating)
# csr_matrix is more efficient for row (user) slicing
csr_mat = csr_matrix((r_values, (u_indices, i_indices)), shape=(len(uid_map), len(iid_map)))
global_mean = rating_sum / rating_count
if verbose:
print('Number of training users = {}'.format(len(uid_map)))
print('Number of training items = {}'.format(len(iid_map)))
print('Max rating = {:.1f}'.format(max_rating))
print('Min rating = {:.1f}'.format(min_rating))
print('Global mean = {:.1f}'.format(global_mean))
return cls(csr_mat, max_rating, min_rating, global_mean, uid_map, iid_map)
def uir_iter(self, batch_size=1, shuffle=False):
""" Create an iterator over data yielding batch of users, items, and rating values
Parameters
----------
batch_size : int, optional, default = 1
shuffle : bool, optional
If True, orders of triplets will be randomized. If False, default orders kept
Returns
-------
iterator : batch of users (array of np.int), batch of items (array of np.int),
batch of ratings (array of np.float)
"""
if self.triplets is None:
self.triplets = find(self.matrix)
for batch_ids in self.idx_iter(len(self.triplets[0]), batch_size, shuffle):
batch_users = self.triplets[0][batch_ids]
batch_items = self.triplets[1][batch_ids]
batch_ratings = self.triplets[2][batch_ids]
yield batch_users, batch_items, batch_ratings
def uij_iter(self, batch_size=1, shuffle=False):
""" Create an iterator over data yielding batch of users, positive items, and negative items
Parameters
----------
batch_size : int, optional, default = 1
shuffle : bool, optional
If True, orders of triplets will be randomized. If False, default orders kept
Returns
-------
iterator : batch of users (array of np.int), batch of positive items (array of np.int),
batch of negative items (array of np.int)
"""
if self.triplets is None:
self.triplets = find(self.matrix)
for batch_ids in self.idx_iter(len(self.triplets[0]), batch_size, shuffle):
batch_users = self.triplets[0][batch_ids]
batch_pos_items = self.triplets[1][batch_ids]
batch_pos_ratings = self.triplets[2][batch_ids]
batch_neg_items = np.zeros_like(batch_pos_items)
for i, (user, pos_rating) in enumerate(zip(batch_users, batch_pos_ratings)):
neg_item = np.random.randint(0, self.num_items - 1)
while self.matrix[user, neg_item] >= pos_rating:
neg_item = np.random.randint(0, self.num_items - 1)
batch_neg_items[i] = neg_item
yield batch_users, batch_pos_items, batch_neg_items
|
{"hexsha": "1d8298560e21eec4a177353d582986e8d3111be2", "size": 7012, "ext": "py", "lang": "Python", "max_stars_repo_path": "cornac/data/trainset.py", "max_stars_repo_name": "Andrew-DungLe/cornac", "max_stars_repo_head_hexsha": "199ab9181f8b6387cc8748ccf8ee3e5c9df087fb", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "cornac/data/trainset.py", "max_issues_repo_name": "Andrew-DungLe/cornac", "max_issues_repo_head_hexsha": "199ab9181f8b6387cc8748ccf8ee3e5c9df087fb", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "cornac/data/trainset.py", "max_forks_repo_name": "Andrew-DungLe/cornac", "max_forks_repo_head_hexsha": "199ab9181f8b6387cc8748ccf8ee3e5c9df087fb", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.6139534884, "max_line_length": 100, "alphanum_fraction": 0.6096691386, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1595}
|
@testset "sparse_constructor" begin
A = sprand(10,10,0.1)
s = SparseTensor(A)
@test run(sess, s)≈A
I = [1;2;4;2;3;5]
J = [1;3;2;2;2;1]
V = rand(6)
A = sparse(I,J,V,6,5)
s = SparseTensor(I,J,V,6,5)
@test run(sess, s)≈A
indices = [I J]
s = SparseTensor(I,J,V,6,5)
@test run(sess, s)≈A
S = Array(s)
@test run(sess, S)≈Array(A)
@test size(s)==(6,5)
@test size(s,1)==6
@test size(s,2)==5
end
@testset "sparse_arithmetic" begin
A1 = rand(10,5); B1 = sprand(10,5,0.3)
A = constant(A1)
B = SparseTensor(B1)
@test run(sess, -B)≈-B1
for op in [+,-]
C = op(A, B)
C1 = op(A1, B1)
@test run(sess, C)≈C1
end
@test run(sess, B-A)≈B1-A1
end
@testset "sparse_adjoint" begin
A = sprand(10,5,0.3)
A1 = SparseTensor(A)
@test run(sess, A1')≈sparse(A')
end
@testset "sparse_mul" begin
A1 = rand(10,10); B1 = sprand(10,10,0.3)
C1 = rand(10)
A = constant(A1)
B = SparseTensor(B1)
C = constant(C1)
@test run(sess, B*A) ≈ B1*A1
@test run(sess, A*B) ≈ A1*B1
@test run(sess, B*A1) ≈ B1*A1
@test run(sess, A1*B) ≈ A1*B1
@test run(sess, B*C) ≈ B1*C1
@test run(sess, B*C1) ≈ B1*C1
end
@testset "sparse_vcat_hcat" begin
B1 = sprand(10,3,0.3)
B = SparseTensor(B1)
@test run(sess, [B;B])≈[B1;B1]
@test run(sess, [B B])≈[B1 B1]
end
@testset "sparse_indexing" begin
B1 = sprand(10,10,0.3)
B = SparseTensor(B1)
@test run(sess, B[2:3,2:3])≈B1[2:3,2:3]
@test run(sess, B[2:3,:])≈B1[2:3,:]
@test run(sess, B[:,2:3])≈B1[:,2:3]
end
@testset "sparse_solve" begin
A = sparse(I, 10,10) + sprand(10,10,0.1)
b = rand(10)
A1 = SparseTensor(A)
b1 = constant(b)
u = A1\b1
@test run(sess, u) ≈ A\b
end
@testset "sparse_assembler" begin
m = 20
n = 100
handle = SparseAssembler(100, m, 0.0)
op = PyObject[]
A = zeros(m, n)
for i = 1:1
ncol = rand(1:n, 10)
row = rand(1:m)
v = rand(10)
for (k,val) in enumerate(v)
@show k
A[row, ncol[k]] += val
end
@show v
push!(op, accumulate(handle, row, ncol, v))
end
op = vcat(op...)
J = assemble(m, n, op)
B = run(sess, J)
@test norm(A-B)<1e-8
handle = SparseAssembler(100, 5, 1.0)
op1 = accumulate(handle, 1, [1;2;3], [2.0;0.5;0.5])
J = assemble(5, 5, op1)
B = run(sess, J)
@test norm(B-[2.0 0.0 0.0 0.0 0.0
0.0 0.0 0.0 0.0 0.0
0.0 0.0 0.0 0.0 0.0
0.0 0.0 0.0 0.0 0.0
0.0 0.0 0.0 0.0 0.0])<1e-8
handle = SparseAssembler(100, 5, 0.0)
op1 = accumulate(handle, 1, [1;1], [1.0;1.0])
op2 = accumulate(handle, 1, [1;2], [1.0;1.0])
J = assemble(5, 5, [op1;op2])
B = run(sess, J)
@test norm(B-[3.0 1.0 0.0 0.0 0.0
0.0 0.0 0.0 0.0 0.0
0.0 0.0 0.0 0.0 0.0
0.0 0.0 0.0 0.0 0.0
0.0 0.0 0.0 0.0 0.0])<1e-8
end
@testset "sparse_least_square" begin
ii = Int32[1;1;2;2;3;3]
jj = Int32[1;2;1;2;1;2]
vv = Float64[1;2;3;4;5;6]
ff = Float64[1;1;1]
A = SparseTensor(ii, jj, vv, 3, 2)
o = A\ff
@test norm(run(sess, o)-[-1;1])<1e-6
end
@testset "sparse mat mul" begin
A = sprand(10,5,0.3)
B = sprand(5,20,0.3)
C = A*B
CC = SparseTensor(A)*SparseTensor(B)
C_ = run(sess, CC)
@test C_≈C
A = spdiagm(0=>[1.;2.;3;4;5])
B = sprand(5,20,0.3)
C = A*B
CC = SparseTensor(A)*SparseTensor(B)
C_ = run(sess, CC)
@test C_≈C
A = sprand(10,5,0.5)
B = spdiagm(0=>[1.;2.;3;4;5])
C = A*B
CC = SparseTensor(A)*SparseTensor(B)
C_ = run(sess, CC)
@test C_≈C
end
@testset "spdiag" begin
p = rand(10)
A = spdiagm(0=>p)
B = spdiag(constant(p))
C = spdiag(10)
@test run(sess, B)≈A
@test B._diag
@test run(sess, C)≈spdiagm(0=>ones(10))
end
@testset "spzero" begin
q = spzero(10)
@test run(sess, q)≈sparse(zeros(10,10))
q = spzero(10,20)
@test run(sess, q)≈sparse(zeros(10,20))
end
@testset "sparse indexing" begin
i1 = unique(rand(1:20,3))
j1 = unique(rand(1:30,3))
A = sprand(20,30,0.3)
Ad = Array(A[i1, j1])
B = SparseTensor(A)
Bd = Array(B[i1, j1])
Bd_ = run(sess, Bd)
@test Ad≈Bd_
end
@testset "sum" begin
s = sprand(10,20,0.2)
S = SparseTensor(s)
@test run(sess, sum(S)) ≈ sum(s)
@test run(sess, sum(S,dims=1)) ≈ sum(Array(s),dims=1)[:]
@test run(sess, sum(S,dims=2)) ≈ sum(Array(s),dims=2)[:]
end
@testset "dense_to_sparse" begin
A = sprand(10,20,0.3)
B = Array(A)
@test run(sess, dense_to_sparse((B))) ≈ A
@test run(sess, dense_to_sparse(constant(B))) ≈ A
end
@testset "spdiagm" begin
a = rand(10)
b = rand(9)
A = spdiag(
10,
0=>a,
-1=>b
)
B = diagm(
0=>a,
-1=>b
)
@test Array(run(sess, A)) ≈ B
b = rand(7)
A = spdiag(
10,
0=>a,
-3=>b
)
B = diagm(
0=>a,
-3=>b
)
@test Array(run(sess, A)) ≈ B
b = rand(7)
A = spdiag(
10,
0=>a,
-3=>b,
3=>4b
)
B = diagm(
0=>a,
-3=>b,
3=>4b
)
@test Array(run(sess, A)) ≈ B
b = rand(7)
A = spdiag(
10,
0=>a,
-3=>b
)
B = diagm(
0=>a,
-3=>b
)
@test Array(run(sess, A)) ≈ B
end
@testset "hvcat" begin
A = sprand(10,5,0.3)
B = sprand(10,5,0.2)
C = sprand(5,10,0.4)
D = [A B;C]
D_ = [SparseTensor(A) SparseTensor(B); SparseTensor(C)]
@test run(sess, D_)≈D
end
@testset "find" begin
A = sprand(10,10, 0.3)
ii = Int64[]
jj = Int64[]
vv = Float64[]
for i = 1:10
for j = 1:10
if A[i,j]!=0
push!(ii, i)
push!(jj, j)
push!(vv, A[i,j])
end
end
end
a = SparseTensor(A)
i, j, v = find(a)
@test run(sess, i)≈ii
@test run(sess, j)≈jj
@test run(sess, v)≈vv
@test run(sess, rows(a))≈ii
@test run(sess, cols(a))≈jj
@test run(sess, values(a))≈vv
end
@testset "sparse scatter update add" begin
A = sprand(10,10,0.3)
B = sprand(3,3,0.6)
ii = [1;4;5]
jj = [2;4;6]
u = scatter_update(A, ii, jj, B)
C = copy(A)
C[ii,jj] = B
@test run(sess, u)≈C
u = scatter_add(A, ii, jj, B)
C = copy(A)
C[ii,jj] += B
@test run(sess, u)≈C
end
@testset "constant sparse" begin
A = sprand(10,10,0.3)
B = constant(A)
@test run(sess, B)≈A
end
@testset "get index" begin
idof = [false;true]
M = spdiag(constant(ones(2)))
Md = M[idof, idof]
@test run(sess, Md) ≈ sparse(reshape([1.0],1,1))
end
@testset "sparse_factorization_and_solve" begin
A = sprand(10,10,0.7)
rhs1 = rand(10)
rhs2 = rand(10)
Afac = factorize(constant(A))
v1 = Afac\rhs1
v2 = Afac\rhs2
@test norm(run(sess, v1) - A\rhs1)<1e-10
@test norm(run(sess, v2) - A\rhs2)<1e-10
end
|
{"hexsha": "ea3fc023013e994e576fcdd1e76f0e1785ffcf5b", "size": 7112, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/sparse.jl", "max_stars_repo_name": "EricDarve/ADCME.jl", "max_stars_repo_head_hexsha": "7eb334354e3ba5427a3f13a4a60e0f6ca5eec006", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-08-12T21:14:41.000Z", "max_stars_repo_stars_event_max_datetime": "2020-08-12T21:14:41.000Z", "max_issues_repo_path": "test/sparse.jl", "max_issues_repo_name": "EricDarve/ADCME.jl", "max_issues_repo_head_hexsha": "7eb334354e3ba5427a3f13a4a60e0f6ca5eec006", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/sparse.jl", "max_forks_repo_name": "EricDarve/ADCME.jl", "max_forks_repo_head_hexsha": "7eb334354e3ba5427a3f13a4a60e0f6ca5eec006", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.421686747, "max_line_length": 60, "alphanum_fraction": 0.4981721035, "num_tokens": 3079}
|
#!/usr/bin/env python3
import numpy as np
from org.mk.training.dl.rnn_cell import LSTMCell
from org.mk.training.dl.rnn import dynamic_rnn
#from org.mk.training.dl.rnn import compute_gradients
from org.mk.training.dl.rnn import print_gradients
from org.mk.training.dl.rnn import zero_state_initializer
from org.mk.training.dl.rnn import LSTMStateTuple
from org.mk.training.dl.common import loss
from org.mk.training.dl.common import softmax
from org.mk.training.dl.common import input_one_hot
from org.mk.training.dl.common import cross_entropy_loss
from org.mk.training.dl.common import WeightsInitializer
from org.mk.training.dl import init_ops
from org.mk.training.dl.optimizer import BatchGradientDescent
from org.mk.training.dl.core import Dense
from org.mk.training.dl.rnn import MultiRNNCell
from org.mk.training.dl.rnn import bidirectional_dynamic_rnn
import sys
import collections
# data I/O
train_file = sys.argv[1]
data = open(train_file, 'r').read()
out_weights = np.array([[-0.09588283, -2.2044923 , -0.74828255, 0.14180686, -0.32083616,
-0.9444244 , 0.06826905, -0.9728962 , -0.18506959, 1.0618515 ],
[ 1.156649 , 3.2738173 , -1.2556943 , -0.9079511 , -0.82127047,
-1.1448543 , -0.60807484, -0.5885713 , 1.0378786 , -0.7088431 ],
[ 1.006477 , 0.28033388, -0.1804534 , 0.8093307 , -0.36991575,
0.29115433, -0.01028167, -0.7357091 , 0.92254084, -0.10753923],
[ 0.19266959, 0.6108299 , 2.2495654 , 1.5288974 , 1.0172302 ,
1.1311738 , 0.2666629 , -0.30611828, -0.01412263, 0.44799015],
[ 0.19266959, 0.6108299 , 2.2495654 , 1.5288974 , 1.0172302 ,
1.1311738 , 0.2666629 , -0.30611828, -0.01412263, 0.44799015],
[-0.09588283, -2.2044923 , -0.74828255, 0.14180686, -0.32083616,
-0.9444244 , 0.06826905, -0.9728962 , -0.18506959, 1.0618515 ],
[ 1.156649 , 3.2738173 , -1.2556943 , -0.9079511 , -0.82127047,
-1.1448543 , -0.60807484, -0.5885713 , 1.0378786 , -0.7088431 ],
[ 1.006477 , 0.28033388, -0.1804534 , 0.8093307 , -0.36991575,
0.29115433, -0.01028167, -0.7357091 , 0.92254084, -0.10753923],
[ 0.19266959, 0.6108299 , 2.2495654 , 1.5288974 , 1.0172302 ,
1.1311738 , 0.2666629 , -0.30611828, -0.01412263, 0.44799015],
[ 0.19266959, 0.6108299 , 2.2495654 , 1.5288974 , 1.0172302 ,
1.1311738 , 0.2666629 , -0.30611828, -0.01412263, 0.44799015]]
)
out_biases = np.array([[0.1458478, -0.3660951, -2.1647317, -1.9633691, -0.24532059,
0.14005205, -1.0961286, -0.43737876, 0.7028531, -1.8481724]]
)
def read_data(fname):
with open(fname) as f:
data = f.readlines()
data = [x.strip() for x in data]
data = [data[i].lower().split() for i in range(len(data))]
data = np.array(data)
data = np.reshape(data, [-1, ])
print(data)
return data
def build_dataset(train_data):
count = collections.Counter(train_data).most_common()
print("count:", count)
dictionary = dict()
print("dictionary:", dictionary)
sortedwords = sorted(set(train_data))
print("sortedword:", sortedwords)
for word in sortedwords:
print("word:", word)
dictionary[word] = len(dictionary)
reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
return dictionary, reverse_dictionary
train_data = read_data(train_file)
dictionary, reverse_dictionary = build_dataset(train_data)
vocab_size = len(dictionary)
print("dictionary:", dictionary)
print("reverse_dictionary:", reverse_dictionary)
print("vocab_size", vocab_size)
learning_rate = 0.001
# training_iters = 50000
training_iters = 200
display_step = 100
n_input = 3
n_hidden = 5
rnd = np.random.RandomState(42)
step = 0
#offset = rnd.randint(0, n_input + 1)
offset=2
end_offset = n_input + 1
acc_total = 0
loss_total = 0
print("offset:", offset)
#with WeightsInitializer(initializer=init_ops.Constant(0.1)) as vs:
#cell = LSTMCell(n_hidden,debug=True)
gdo=BatchGradientDescent(learning_rate)
#out_l = Dense(10,kernel_initializer=init_ops.Constant(out_weights),bias_initializer=init_ops.Constant(out_biases))
def RNN(x, weights, biases):
with WeightsInitializer(initializer=init_ops.Constant(0.1)) as vs:
bw_cell = LSTMCell(n_hidden)
fw_cell = LSTMCell(n_hidden)
result, state = bidirectional_dynamic_rnn(fw_cell,bw_cell, symbols_in_keys)
"Dense in this case should be out of WeightsInitializer scope because we are passing constants"
out_l = Dense(10,kernel_initializer=init_ops.Constant(out_weights),bias_initializer=init_ops.Constant(out_biases))
fw_result,bw_result=result
h=np.concatenate((fw_result,bw_result),-1)
pred=out_l(h[0][-1].reshape(1,vocab_size))
return pred
def LOSS(X,target):
pred=RNN(X,out_weights,out_biases)
return cross_entropy_loss(pred.reshape([1,1,vocab_size]),np.array([[target]]))
while step < training_iters:
if offset > (len(train_data) - end_offset):
offset = rnd.randint(0, n_input + 1)
print("offset:", offset)
symbols_in_keys = [input_one_hot(dictionary[str(train_data[i])],vocab_size) for i in range(offset, offset + n_input)]
symbols_in_keys = np.reshape(np.array(symbols_in_keys), [-1, n_input, vocab_size])
print("symbols_in_keys:",symbols_in_keys)
target=dictionary[str(train_data[offset + n_input])]
"""with WeightsInitializer(initializer=init_ops.Constant(0.1)) as vs:
cell = LSTMCell(n_hidden,debug=True)
result, state = dynamic_rnn(cell, symbols_in_keys)
(c, h) = state.c,state.h
print("final:", repr(result),state,h.shape)
#last layer of Feed Forward to compare to transform result to the shape of target
out_l = Dense(10,kernel_initializer=init_ops.Constant(out_weights),bias_initializer=init_ops.Constant(out_biases))
pred=out_l(h)
print("pred:",pred)"""
#cross_entropy_loss internally calculates the same softmax as and then the loss as above but for a batch and sequence
#pred- batch,seq,input_size
#labels-batch,seq(has to be transformed before comparision with preds(line-43).)
#yhat,cel=cross_entropy_loss(pred.reshape([1,1,vocab_size]),np.array([[target]]))
yhat,cel=LOSS(symbols_in_keys,target)
print("yhat:",yhat)
print("CEL:",cel)
#yhat-Size of yhat should be batch,seq,size
#target-Size of target should be batch,seq
gradients=gdo.compute_gradients(yhat,np.array([[target]]))
gdo.apply_gradients(gradients)
print_gradients(gradients)
step += 1
offset += (n_input + 1)
print("Optimization Finished!")
|
{"hexsha": "16a659b48667cb0626aea2381d7ff798e8923788", "size": 6654, "ext": "py", "lang": "Python", "max_stars_repo_path": "org/mk/training/dl/LSTMMainGraphbi.py", "max_stars_repo_name": "slowbreathing/Deep-Breathe", "max_stars_repo_head_hexsha": "bcc97cadfc53d3297317764ecfb2223e5e715fd1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2019-05-01T03:49:32.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-20T12:41:38.000Z", "max_issues_repo_path": "org/mk/training/dl/LSTMMainGraphbi.py", "max_issues_repo_name": "slowbreathing/Deep-Breathe", "max_issues_repo_head_hexsha": "bcc97cadfc53d3297317764ecfb2223e5e715fd1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "org/mk/training/dl/LSTMMainGraphbi.py", "max_forks_repo_name": "slowbreathing/Deep-Breathe", "max_forks_repo_head_hexsha": "bcc97cadfc53d3297317764ecfb2223e5e715fd1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.5731707317, "max_line_length": 121, "alphanum_fraction": 0.6931169222, "include": true, "reason": "import numpy", "num_tokens": 2090}
|
import numpy as np
import cv2
import matplotlib.pyplot as plt
def generate_seedmap(shape, speckle_density, speckle_size, randomseeds):
np.random.seed(randomseeds[0])
SpeckleSeedMap = np.random.rand(shape[0], shape[1]) < speckle_density
np.random.seed(randomseeds[1])
SpeckleDirectionMap = np.random.rand(shape[0], shape[1]) * 2 * np.pi
np.random.seed(randomseeds[2])
radius = np.abs(np.random.normal(speckle_size, speckle_size / 10, size=shape)) + 0.1
if speckle_size < 4:
radius += (np.random.rand(shape[0], shape[1]) < 0.01) * np.random.normal(speckle_size + 1, 0.5)
else:
radius -= (np.random.rand(shape[0], shape[1]) < 0.02) * np.random.normal(speckle_size + 1.5, 0.5)
np.random.seed(randomseeds[3])
Rx = radius * np.random.normal(1, 0.08, size=shape) # ratio of the long axis over short axis:1/11, factor 1.2/60 to nearlize the areas.
Ry = radius ** 2 / Rx
return SpeckleSeedMap, SpeckleDirectionMap, Rx, Ry
def calculatedisp(Xs, Ys, dispinfo):
us = np.zeros_like(Xs)
vs = np.zeros_like(Xs)
for i in range(len(dispinfo)):
type = dispinfo[i][0]
info = dispinfo[i][1:]
if type == "planer": # Us = AXs0 + BYs0 + C; Vs = DXs0 + EYs0 + F
us += info[0] * Xs + info[1] * Ys + info[2]
vs += info[3] * Xs + info[4] * Ys + info[5]
elif type == "sin": # Zs = Asin(BXs + C) * sin(DYs + E); Zcr = (Asin(BXs + C) * sin(DYs + E) + minus) / btm
us += info[0] * np.sin(info[1] * Xs + info[2]) * np.sin(info[3] * Ys + info[4])
vs += info[5] * np.sin(info[6] * Xs + info[7]) * np.sin(info[8] * Ys + info[9])
return us, vs
def calculatedisp_ws(Xs, Ys, dispinfo):
ws = np.zeros_like(Xs)
for i in range(len(dispinfo)):
type = dispinfo[i][0]
info = dispinfo[i][1:]
if type == "planer": # Us = AXs0 + BYs0 + C; Vs = DXs0 + EYs0 + F
ws += info[0] * Xs + info[1] * Ys + info[2]
elif type == "sin": # Zs = Asin(BXs + C) * sin(DYs + E); Zcr = (Asin(BXs + C) * sin(DYs + E) + minus) / btm
ws += info[0] * np.sin(info[1] * Xs + info[2]) * np.sin(info[3] * Ys + info[4])
return ws
def array2img(array, background, noise):
Gaussian_map = np.random.normal(0.0, 1, size=array.shape) * noise / 256
array += Gaussian_map
img = array * 0.6 * (array > 0)
img = (img < background / 255) * background / 255 + (img >= background / 255) * img
img = (((img - 1) * (img < 1) + 1) * 255).astype("uint8")
return img
def img_flip(imgs):
flipd_img = []
for i in range(len(imgs)):
temp_img = cv2.flip(imgs[i], 0)
flipd_img.append(cv2.flip(temp_img, 1))
return flipd_img
if __name__ == '__main__':
# line = [3, 2, [(["planer", 0.01, 0.02, 0.1, 0.01, 0.02, 0.05], ["planer", -0.03, 0.002, 0.1, 0.01, 0.002, 0.05]), (["planer", 0.01, 0.02, 0.05], ["sin", 0.1, 3, 0.1, 3, 0.1])], 100]
# f = open("./test.txt", "w")
# for i in range(3):
# f.write(str(line)+"\n")
# f.close()
#
# f1 = open("./test.txt", "r")
# for line in f1:
# print(line)
# line = line.split()
U = np.loadtxt("..\data/0_LWU.csv")
V = np.loadtxt("..\data/0_LWV.csv")
W = np.loadtxt("..\data/0_LWW.csv")
DX = np.loadtxt("..\data/0_Disparity_DX.csv")
DY = np.loadtxt("..\data/0_Disparity_DY.csv")
beta_sw = np.linalg.inv(np.array([[0, 1, 0], [0.8660254, 0, 0.5], [0.5, 0, -0.8660254]]))
beta_lr = np.array([[0.5, 0, 0.8660254], [0, 1, 0], [-0.8660254, 0, 0.5]])
Uw = beta_sw[0][0] * U + beta_sw[0][1] * V + beta_sw[0][2] * W
Vw = beta_sw[1][0] * U + beta_sw[1][1] * V + beta_sw[1][2] * W
Ww = beta_sw[2][0] * U + beta_sw[2][1] * V + beta_sw[2][2] * W
Ur = beta_lr[0][0] * Uw + beta_lr[0][1] * Vw + beta_lr[0][2] * Ww
Vr = beta_lr[1][0] * Uw + beta_lr[1][1] * Vw + beta_lr[1][2] * Ww
Wr = beta_lr[2][0] * Uw + beta_lr[2][1] * Vw + beta_lr[2][2] * Ww
plt.figure()
plt.subplot(3, 2, 1)
plt.imshow(U)
plt.title("U")
plt.colorbar()
plt.subplot(3, 2, 2)
plt.imshow(V)
plt.title("V")
plt.colorbar()
plt.subplot(3, 2, 3)
plt.imshow(W)
plt.title("W")
plt.colorbar()
plt.subplot(3, 2, 5)
plt.imshow(DX)
plt.title("DX")
plt.colorbar()
plt.subplot(3, 2, 6)
plt.imshow(DY)
plt.title("DY")
plt.colorbar()
plt.show()
plt.savefig("disp.png")
plt.close()
|
{"hexsha": "bed46c383a347c060a2f84a413e04fa9ffd641d2", "size": 4465, "ext": "py", "lang": "Python", "max_stars_repo_path": "utils/utils.py", "max_stars_repo_name": "GW-Wang-thu/Generator-of-Stereo-Speckle-images-with-displacement-labels", "max_stars_repo_head_hexsha": "6a920827bd7bbba3019c97c02c7382523b790449", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-08-11T01:49:51.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-11T01:49:51.000Z", "max_issues_repo_path": "utils/utils.py", "max_issues_repo_name": "GW-Wang-thu/Generator-of-Stereo-Speckle-images-with-displacement-labels", "max_issues_repo_head_hexsha": "6a920827bd7bbba3019c97c02c7382523b790449", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "utils/utils.py", "max_forks_repo_name": "GW-Wang-thu/Generator-of-Stereo-Speckle-images-with-displacement-labels", "max_forks_repo_head_hexsha": "6a920827bd7bbba3019c97c02c7382523b790449", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-08-19T02:38:00.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-19T02:38:00.000Z", "avg_line_length": 34.6124031008, "max_line_length": 187, "alphanum_fraction": 0.5460246361, "include": true, "reason": "import numpy", "num_tokens": 1722}
|
import mdtraj
import numpy as np
def gmx_saxs(q, trajectory, topology):
intensity = np.zeros_like(q)
for chunk in mdtraj.iterload(trajectory, top=topology):
for c in chunk:
c1 = c.remove_solvent()
for i in range(c1.n_atoms):
rhoi = c1.topology.atom(i).element[0]
for j in range(i, c1.n_atoms):
intensity += rhoi ** 2
dist = np.sum((c1.xyz[0, i, :] - c1.xyz[0, j, :]) ** 2) ** 0.5
intensity += 2 * rhoi * c1.topology.atom(j).element[0] * np.sin(dist * q) / (dist * q)
return intensity
q = np.linspace(0.01, 100, 300)
trajectory = '/home/wachaandras/gromacs/cm15_folding/helix_unfold_gromos/analysis/cm15_unfold_gromos_nopbc.xtc'
topology = '/home/wachaandras/gromacs/cm15_folding/helix_unfold_gromos/production/cm15_npt.gro'
# I=gmx_saxs(q,trj,top)
intensity = np.zeros_like(q)
idx = 0
maxidx = 30
for chunk in mdtraj.iterload(trajectory, top=topology):
if idx > maxidx:
break
for c in chunk:
if idx > maxidx:
break
idx += 1
print(idx)
c1 = c.remove_solvent()
for i in range(c1.n_atoms):
rhoi = c1.topology.atom(i).element.number
intensity += rhoi ** 2
for j in range(i + 1, c1.n_atoms):
dist = np.sum((c1.xyz[0, i, :] - c1.xyz[0, j, :]) ** 2) ** 0.5
intensity += 2 * rhoi * c1.topology.atom(j).element.number * np.sin(dist * q) / (dist * q)
|
{"hexsha": "37232836318fdc40c84c02d1f1583d7f1d253c44", "size": 1519, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/mdscripts/saxs/saxs.py", "max_stars_repo_name": "awacha/mdscripts", "max_stars_repo_head_hexsha": "831bda06557fa2d5f0899fc2f6552c9e49146cef", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/mdscripts/saxs/saxs.py", "max_issues_repo_name": "awacha/mdscripts", "max_issues_repo_head_hexsha": "831bda06557fa2d5f0899fc2f6552c9e49146cef", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/mdscripts/saxs/saxs.py", "max_forks_repo_name": "awacha/mdscripts", "max_forks_repo_head_hexsha": "831bda06557fa2d5f0899fc2f6552c9e49146cef", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.3255813953, "max_line_length": 111, "alphanum_fraction": 0.5694535879, "include": true, "reason": "import numpy", "num_tokens": 470}
|
import numpy as np
def compute_errors_over_time(Xtrain,
ytrain,
Xtest,
ytest,
theta,
feature_inds,
thresholds):
"""
The function ``plt_errors_over_time`` Plots train and test error from boosting.
It plots the training and testing error of a decision-stump based boosting
algorithm over iterations of the boosting algorithm.
"""
num_thresholds = thresholds.shape[0]
train_errors = np.zeros(num_thresholds)
test_errors = np.zeros(num_thresholds)
mtrain = Xtrain.shape[0]
mtest = Xtest.shape[0]
# Predicted margins for train and test
train_predictions = np.zeros(mtrain)
test_predictions = np.zeros(mtest)
# Iteratively compute the margin predicted by the thresholded classifier,
# updating both test and training predictions.
for i in range(num_thresholds):
train_predictions = train_predictions + \
theta[i] * np.sign(Xtrain[:, feature_inds[i]] - thresholds[i])
test_predictions = test_predictions + \
theta[i] * np.sign(Xtest[:, feature_inds[i]] - thresholds[i])
train_errors[i] = (1 / mtrain) * np.sum((ytrain * train_predictions) <= 0)
test_errors[i] = (1 / mtest) * np.sum((ytest * test_predictions) <= 0)
return train_errors, test_errors
|
{"hexsha": "77ddf377612c64fc56a94ea27635c9d5a18809dd", "size": 1454, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/homework2/q5/errors_over_time.py", "max_stars_repo_name": "skymarshal/cs229-machine-learning-stanford-fall-2016", "max_stars_repo_head_hexsha": "3f9e0f4ea7d4fe73a50dc12c84fe47131bb0622a", "max_stars_repo_licenses": ["Apache-2.0", "MIT"], "max_stars_count": 13, "max_stars_repo_stars_event_min_datetime": "2019-08-22T03:15:36.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-29T15:50:34.000Z", "max_issues_repo_path": "src/homework2/q5/errors_over_time.py", "max_issues_repo_name": "skymarshal/cs229-machine-learning-stanford-fall-2016", "max_issues_repo_head_hexsha": "3f9e0f4ea7d4fe73a50dc12c84fe47131bb0622a", "max_issues_repo_licenses": ["Apache-2.0", "MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/homework2/q5/errors_over_time.py", "max_forks_repo_name": "skymarshal/cs229-machine-learning-stanford-fall-2016", "max_forks_repo_head_hexsha": "3f9e0f4ea7d4fe73a50dc12c84fe47131bb0622a", "max_forks_repo_licenses": ["Apache-2.0", "MIT"], "max_forks_count": 8, "max_forks_repo_forks_event_min_datetime": "2019-09-29T13:12:20.000Z", "max_forks_repo_forks_event_max_datetime": "2020-10-21T14:15:31.000Z", "avg_line_length": 36.35, "max_line_length": 83, "alphanum_fraction": 0.6038514443, "include": true, "reason": "import numpy", "num_tokens": 310}
|
[STATEMENT]
lemma weakPsiCongSym:
fixes \<Psi> :: 'b
and P :: "('a, 'b, 'c) psi"
and Q :: "('a, 'b, 'c) psi"
assumes "\<Psi> \<rhd> P \<doteq> Q"
shows "\<Psi> \<rhd> Q \<doteq> P"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<Psi> \<rhd> Q \<doteq> P
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
\<Psi> \<rhd> P \<doteq> Q
goal (1 subgoal):
1. \<Psi> \<rhd> Q \<doteq> P
[PROOF STEP]
by(auto simp add: weakPsiCongruence_def weakBisimE)
|
{"llama_tokens": 237, "file": "Psi_Calculi_Weak_Psi_Congruence", "length": 2}
|
r"""
Graded Hopf algebras
"""
#*****************************************************************************
# Copyright (C) 2008 Teresa Gomez-Diaz (CNRS) <Teresa.Gomez-Diaz@univ-mlv.fr>
# Nicolas M. Thiery <nthiery at users.sf.net>
#
# Distributed under the terms of the GNU General Public License (GPL)
# http://www.gnu.org/licenses/
#******************************************************************************
from category_types import Category_over_base_ring
from sage.categories.all import HopfAlgebras, GradedBialgebras
from sage.misc.cachefunc import cached_method
class GradedHopfAlgebras(Category_over_base_ring):
"""
The category of GradedHopf algebras with several bases
EXAMPLES::
sage: GradedHopfAlgebras(ZZ)
Category of graded hopf algebras over Integer Ring
sage: GradedHopfAlgebras(ZZ).super_categories()
[Category of graded bialgebras over Integer Ring, Category of hopf algebras over Integer Ring]
TESTS::
sage: TestSuite(GradedHopfAlgebras(ZZ)).run()
"""
@cached_method
def super_categories(self):
"""
EXAMPLES::
sage: GradedHopfAlgebras(QQ).super_categories()
[Category of graded bialgebras over Rational Field, Category of hopf algebras over Rational Field]
"""
R = self.base_ring()
return [GradedBialgebras(R), HopfAlgebras(R)]
class ParentMethods:
pass
class ElementMethods:
pass
|
{"hexsha": "e8f4b9dc24d0cc2ee2ff6237631842b38eac1021", "size": 1512, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/sage/categories/graded_hopf_algebras.py", "max_stars_repo_name": "bopopescu/sage-5", "max_stars_repo_head_hexsha": "9d85b34956ca2edd55af307f99c5d3859acd30bf", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2015-01-04T07:15:06.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-04T15:15:18.000Z", "max_issues_repo_path": "src/sage/categories/graded_hopf_algebras.py", "max_issues_repo_name": "bopopescu/sage-5", "max_issues_repo_head_hexsha": "9d85b34956ca2edd55af307f99c5d3859acd30bf", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/sage/categories/graded_hopf_algebras.py", "max_forks_repo_name": "bopopescu/sage-5", "max_forks_repo_head_hexsha": "9d85b34956ca2edd55af307f99c5d3859acd30bf", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 10, "max_forks_repo_forks_event_min_datetime": "2016-09-28T13:12:40.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-12T09:28:34.000Z", "avg_line_length": 31.5, "max_line_length": 110, "alphanum_fraction": 0.6051587302, "include": true, "reason": "from sage", "num_tokens": 352}
|
\vspace{-20pt}
\section{Procedure}
\label{sec:Procedure}
The first step is to find the minimal current value for the laser setup, where
it is still lasing. For this measurement the setup is changed to the configuration shown in
figure~\ref{fig:setup_current}. A voltmeter is connected to the laser current
monitor for a precise voltage measurement. The laser diode has a resistance
of $\SI{100}{\ohm}$, hence the applied current can be calculated.
The minimum threshold can be found in an iterative procedure, which follows the following
steps. First pick a current, where the laser is lasing.
Then lower the current slightly below the lasing threshold and try to bring the system back to lasing,
by adjusting the cavity with the two knobs.
If this is possible, lower the current
until it is slightly below the threshold and repeat the procedure.
\begin{figure}
\vspace{-10pt}
\centering
\includegraphics[width=0.75\textwidth]{Pics/setup_threshold.png}
\caption{Setup for the minimum current measurement.\cite{anleitung}}
\label{fig:setup_current}
\end{figure}
The minimun current below the threshold $I_{below}$ and the current above the treshold $I_{above}$
are given in~\eqref{eqn:nolase} and~\eqref{eqn:lase}.
The associated images of the laser spots are shown in figure~\ref{fig:no_lase}
and~\ref{fig:lase}.
\vspace{-10pt}
\begin{align}
\label{eqn:nolase}
I_{below} &= \SI{33.2}{\milli\ampere}\\
\label{eqn:lase}
I_{above} &= \SI{33.4}{\milli\ampere}
\end{align}
\begin{figure}[h!]
\centering
\begin{subfigure}{0.48\textwidth}
\centering
\includegraphics[width=\textwidth]{Pics/threshold_no_lase.jpg}
\caption{Current below threshold $I_{below}$.}
\label{fig:no_lase}
\end{subfigure}
\begin{subfigure}{0.48\textwidth}
\centering
\includegraphics[width=\textwidth]{Pics/threshold_lase.jpg}
\caption{Current above threshold $I_{above}$.}
\label{fig:lase}
\end{subfigure}
\end{figure}
\FloatBarrier
The setup is now changed to the configuration shown in figure~\ref{fig:setup_fluorescence},
so that the image depicted in~\ref{fig:fluorescence} can be taken. The image
shows the Rubidium fluorescence in the gas chamber.
\begin{figure}
\centering
\includegraphics[width=0.8\textwidth]{Pics/setup_fluorescence.png}
\caption{Setup to observe the Rubidium fluorescence line.\cite{anleitung}}
\label{fig:setup_fluorescence}
\end{figure}
\begin{figure}
\centering
\includegraphics[width=0.5\textwidth]{Pics/Rb_fluorescence.jpg}
\caption{Rubidium fluorescence line.}
\label{fig:fluorescence}
\end{figure}
\FloatBarrier
Now the setup shown in figure~\ref{fig:setup_spectrum} is built up.
The current applied to the piezo is connected to an oscilloscope
for the following measurements. The piezo current is sweeped with a triangular
voltage, which leads to a change in the piezo dimensions due to the inverse piezo
effect. The piezo is attached to the grating, hence the cavity length
is varied. The variation of the cavity length causes a variation of
the wavelength (see equation~\eqref{eqn:frequdiff}),
so that the laser gets tuned over a broad spectrum.
Furthermore the current emmited by the photo diode is similary connected to the oscilloscope.
Figure~\ref{fig:example} shows an example spectrum for Rubidium. Whenever
the laser beam has the right energy to excite an Rubidium atom, the
measured intensity drops to a minimum. Figure~\ref{fig:example} contains
mode hops, which lead to an imprecise spectrum.
Therefore the setup is slightly changed. The internal cavity and the external cavity
are now varied simultaneously, which prevent mode hops.
The simultaneous variation of the laser current and the piezo modulation leads to a linear backgroud.
The absorption spectrum of Rubidium can now be measured precise. The observed spectrum
is shown in figure~\ref{fig:spectrum}. The identification of the Rubidium isotopes
is made by the comparison with reference~\cite{anleitung}.
\begin{figure}
\centering
\includegraphics[width=0.8\textwidth]{Pics/setup_spectrum.png}
\caption{Setup for the Rubidium spectrum measurement.\cite{anleitung}}
\label{fig:setup_spectrum}
\end{figure}
\begin{figure}
\centering
\includegraphics[width=0.7\textwidth]{Pics/example_spectrum_hop.pdf}
\caption{Example Rubidium spectrum. The mode hopes are indicated by the orange markers.}
\label{fig:example}
\end{figure}
\FloatBarrier
\begin{figure}
\centering
\includegraphics[width=0.7\textwidth]{Pics/Rb_spectrum.pdf}
\caption{Rubidium spectrum.}
\label{fig:spectrum}
\end{figure}
\FloatBarrier
The linear background in figure~\ref{fig:spectrum} can be avoided by the
setup shown in image~\ref{fig:setup_substraction}.
This technique is called substraction technique.
The resulting spectrum is shown in figure~\ref{fig:spectrum_sub}.
\begin{figure}
\centering
\includegraphics[width=\textwidth]{Pics/setup_substraction.png}
\caption{Setup for the substraction technique measurement.\cite{anleitung}}
\label{fig:setup_substraction}
\end{figure}
\begin{figure}
\centering
\includegraphics[width=0.7\textwidth]{Pics/Rb_spectrum_subst.pdf}
\caption{Rubidium spectrum with substraction technique.}
\label{fig:spectrum_sub}
\end{figure}
|
{"hexsha": "aedf8f2ee879c9985bf1385ec9c8e6d3f32ab753", "size": 5242, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "Fortgeschrittenenpraktikum/Protokolle/V60_Diodenlaser/Auswertung.tex", "max_stars_repo_name": "smjhnits/Praktikum", "max_stars_repo_head_hexsha": "92c9df3ee7dfa2417f464036d18ac33b70765fdd", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-03-07T08:55:36.000Z", "max_stars_repo_stars_event_max_datetime": "2019-04-22T18:13:03.000Z", "max_issues_repo_path": "Fortgeschrittenenpraktikum/Protokolle/V60_Diodenlaser/Auswertung.tex", "max_issues_repo_name": "smjhnits/Praktikum", "max_issues_repo_head_hexsha": "92c9df3ee7dfa2417f464036d18ac33b70765fdd", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Fortgeschrittenenpraktikum/Protokolle/V60_Diodenlaser/Auswertung.tex", "max_forks_repo_name": "smjhnits/Praktikum", "max_forks_repo_head_hexsha": "92c9df3ee7dfa2417f464036d18ac33b70765fdd", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2017-10-27T13:26:43.000Z", "max_forks_repo_forks_event_max_datetime": "2018-01-13T09:12:24.000Z", "avg_line_length": 38.5441176471, "max_line_length": 102, "alphanum_fraction": 0.7773750477, "num_tokens": 1443}
|
import numpy as np
import pandas as pd
from dtwknn import DtwKnn
min_window_size = 30
max_window_size = 100
threshold_mean = 0
threshold_std = 0.5
threshold_change = 1.5
model = DtwKnn(n_neighbors=1)
def segment_window(data, array, min_window_size, max_window_size):
index_segment = list()
prev_point = array[0]
start_index_window = 0
for i, value in enumerate(array[1:]):
if value >= 0 > prev_point or value < 0 <= prev_point:
if max_window_size >= i - start_index_window >= min_window_size:
# print(start_index_window, i)
index_segment.append((start_index_window, i))
start_index_window = i
prev_point = value
result = list()
for value in index_segment:
window = data[value[0]:value[1]].values
result.append(window)
return result
def get_features(data):
features = list()
for window in data:
window = window[:, 1:]
print(len(window))
max_win = np.amax(window, axis=0)
min_win = np.amin(window, axis=0)
mean_win = np.mean(window, axis=0)
std_win = np.std(window, axis=0)
mad_win = np.median(window, axis=0)
feature = np.concatenate([max_win, min_win, mean_win, std_win, mad_win], axis=0)
features.append(feature)
return np.array(features)
def get_gesture(feature_window, field):
mapping = {
'ax': [15, 21, 'left', 'right'],
'ay': [16, 22, 'left', 'right'],
'az': [17, 23, 'up', 'down']
}
print(feature_window[15:24], field)
if feature_window[mapping[field][1]] < threshold_std:
return None
if feature_window[mapping[field][0]] > threshold_mean:
return mapping[field][2]
else:
return mapping[field][3]
def get_gestures(data):
data_segmented_x = segment_window(data, data[['ax']].values,
min_window_size=min_window_size, max_window_size=max_window_size)
data_segmented_y = segment_window(data, data[['ay']].values,
min_window_size=min_window_size, max_window_size=max_window_size)
data_segmented_z = segment_window(data, data[['az']].values,
min_window_size=min_window_size, max_window_size=max_window_size)
features_x = get_features(data_segmented_x)
features_y = get_features(data_segmented_y)
features_z = get_features(data_segmented_z)
predictions_x = list()
predictions_y = list()
predictions_z = list()
for feature in features_x:
predict = get_gesture(feature, 'ax')
if predict is not None:
predictions_x.append(predict)
for feature in features_y:
predict = get_gesture(feature, 'ay')
if predict is not None:
predictions_y.append(predict)
for feature in features_z:
predict = get_gesture(feature, 'az')
if predict is not None:
predictions_z.append(predict)
print(predictions_x, predictions_y, predictions_z)
if len(predictions_z) > 0:
return predictions_z[0]
elif len(predictions_y) > 0:
return predictions_y[0]
elif len(predictions_x) > 0:
return predictions_x[0]
# return predictions_x, predictions_y, predictions_z
def get_gestures(data):
ax = data[['ax']].values
ay = data[['ay']].values
az = data[['az']].values
ax_change = np.amax(ax) - np.amin(ax)
ay_change = np.amax(ay) - np.amin(ay)
az_change = np.amax(az) - np.amin(az)
if ax_change > ay_change and ax_change > az_change:
if ax_change > threshold_change:
sequence_max_min = np.argmax(ax) - np.argmin(ax)
if sequence_max_min > 0:
return 'in'
else:
return 'out'
else:
return 'fixedly'
elif ay_change > ax_change and ay_change > az_change:
if ay_change > threshold_change:
sequence_max_min = np.argmax(ay) - np.argmin(ay)
if sequence_max_min > 0:
return 'left'
else:
return 'right'
else:
return 'fixedly'
elif az_change > ax_change and az_change > ay_change:
if az_change > threshold_change:
sequence_max_min = np.argmax(az) - np.argmin(az)
if sequence_max_min > 0:
return 'down'
else:
return 'up'
else:
return 'fixedly'
def get_gesture(data):
return model.predict(data[['ax', 'ay', 'az']].values)
def nomalize(data):
if data.shape[1] != 3:
return data
mean = np.mean(data, axis=1)
print(mean)
result = np.copy(data)
for i in range(len(data)):
result[i] = (data[i] - mean[i]) **2
return result
|
{"hexsha": "264341a134db52878d1f65474f2659ac3fd387b8", "size": 4818, "ext": "py", "lang": "Python", "max_stars_repo_path": "bkm3t_DHBKHN/Cau4/service_predict/gesture.py", "max_stars_repo_name": "atheros98/OLP-FOSS-2018", "max_stars_repo_head_hexsha": "c3ba261a60e80a6e355da34b6015c767a4d69fba", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2018-11-29T09:17:22.000Z", "max_stars_repo_stars_event_max_datetime": "2018-12-07T09:11:14.000Z", "max_issues_repo_path": "bkm3t_DHBKHN/Cau4/service_predict/gesture.py", "max_issues_repo_name": "atheros98/OLP-FOSS-2018", "max_issues_repo_head_hexsha": "c3ba261a60e80a6e355da34b6015c767a4d69fba", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "bkm3t_DHBKHN/Cau4/service_predict/gesture.py", "max_forks_repo_name": "atheros98/OLP-FOSS-2018", "max_forks_repo_head_hexsha": "c3ba261a60e80a6e355da34b6015c767a4d69fba", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 12, "max_forks_repo_forks_event_min_datetime": "2018-11-29T00:44:26.000Z", "max_forks_repo_forks_event_max_datetime": "2018-12-04T06:34:11.000Z", "avg_line_length": 30.6878980892, "max_line_length": 103, "alphanum_fraction": 0.604607721, "include": true, "reason": "import numpy", "num_tokens": 1170}
|
import numpy as np
class Config:
MEMORY_START_ADDRESS = 0x200
FONT_SET_START_ADDRESS = 0x50
FONT_SET = np.array([
0xF0, 0x90, 0x90, 0x90, 0xF0,
0x20, 0x60, 0x20, 0x20, 0x70,
0xF0, 0x10, 0xF0, 0x80, 0xF0,
0xF0, 0x10, 0xF0, 0x10, 0xF0,
0x90, 0x90, 0xF0, 0x10, 0x10,
0xF0, 0x80, 0xF0, 0x10, 0xF0,
0xF0, 0x80, 0xF0, 0x90, 0xF0,
0xF0, 0x10, 0x20, 0x40, 0x40,
0xF0, 0x90, 0xF0, 0x90, 0xF0,
0xF0, 0x90, 0xF0, 0x10, 0xF0,
0xF0, 0x90, 0xF0, 0x90, 0x90,
0xE0, 0x90, 0xE0, 0x90, 0xE0,
0xF0, 0x80, 0x80, 0x80, 0xF0,
0xE0, 0x90, 0x90, 0x90, 0xE0,
0xF0, 0x80, 0xF0, 0x80, 0xF0,
0xF0, 0x80, 0xF0, 0x80, 0x80
], dtype=np.uint8)
|
{"hexsha": "1c5b39d6b1c3cd61c5995ed3bf62415a34b72de6", "size": 759, "ext": "py", "lang": "Python", "max_stars_repo_path": "core/cpu/config/memory_config.py", "max_stars_repo_name": "rafael-junio/JustAChip8PythonEmulator", "max_stars_repo_head_hexsha": "ff9c2d67aeaf4f87ff3b5fd6f0231702587455a7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "core/cpu/config/memory_config.py", "max_issues_repo_name": "rafael-junio/JustAChip8PythonEmulator", "max_issues_repo_head_hexsha": "ff9c2d67aeaf4f87ff3b5fd6f0231702587455a7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "core/cpu/config/memory_config.py", "max_forks_repo_name": "rafael-junio/JustAChip8PythonEmulator", "max_forks_repo_head_hexsha": "ff9c2d67aeaf4f87ff3b5fd6f0231702587455a7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.1923076923, "max_line_length": 37, "alphanum_fraction": 0.558629776, "include": true, "reason": "import numpy", "num_tokens": 459}
|
# ===============================================================================
# Copyright 2011 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
import csv
from multiprocessing import Pool
from threading import Thread
from numpy import array, polyval, zeros, hstack, sum
from numpy.ma import masked_array
from pychron.data_processing.regression.ols import OLS
from pychron.data_processing.regression.regressor import Regressor
from traits.api import HasTraits, Instance, Button
from traitsui.api import View, Item
from pychron.core.stats import calculate_mswd
from pychron.graph.graph import Graph
from pychron.graph.stacked_graph import StackedGraph
def mcalculate_mswd(d):
return calculate_mswd(*d)
def regress(d, degree=2):
# coeffs = polyfit(x, y, degree)
# o = OLS(x, y, fitdegree=degree)
o = OLS(*d, fitdegree=degree)
return [o.get_coefficients()[2], o.get_coefficient_standard_errors()[2]]
class CountAnalyzer(HasTraits):
graph = Instance(Graph)
regressor = Instance(Regressor)
test = Button
def _regressor_default(self):
r = Regressor()
return r
def _graph_default(self):
g = StackedGraph()
g.new_plot(show_legend=True, padding=[30, 20, 10, 40], ytitle='Ar40', xtitle='# points')
g.new_plot(padding=[30, 20, 10, 10], ytitle='Ar40_err')
g.new_plot(padding=[30, 20, 10, 40], ytitle='Ar36')
g.new_plot(padding=[30, 20, 10, 10], ytitle='Ar36_err')
g.new_plot(padding=[30, 20, 10, 10], ytitle='Ar40/Ar36')
g.new_plot(padding=[30, 20, 10, 10], ytitle='%Diff')
# g.new_plot(padding=[20, 20, 10, 10], ytitle='Ar40_err')
return g
def traits_view(self):
v = View(Item('test', show_label=False),
Item('graph', show_label=False, style='custom'), resizable=True,
height=0.92,
width=0.55
)
return v
def calculate_intercept(self, xs, ys):
args = self.regressor.parabolic(xs, ys)
return args['coefficients'][2], args['coeff_errors'][2]
def read_signal_data(self, reader, xs, ys):
_name, n = reader.next()
n = int(n)
for _ in range(n):
x, y = map(float, reader.next())
xs.append(x)
ys.append(y)
def load_data2(self):
oxs_40 = []
oys_40 = []
oxs_39 = []
oys_39 = []
oxs_38 = []
oys_38 = []
oxs_37 = []
oys_37 = []
oxs_36 = []
oys_36 = []
p = '/Users/ross/Desktop/counting exp/peak-time1200s'
with open(p, 'U') as f:
reader = csv.reader(f, delimiter='\t')
# first line is runid
_rid = reader.next()[0]
# second line is signal name and num points
self.read_signal_data(reader, oxs_40, oys_40)
self.read_signal_data(reader, oxs_39, oys_39)
self.read_signal_data(reader, oxs_38, oys_38)
self.read_signal_data(reader, oxs_37, oys_37)
self.read_signal_data(reader, oxs_36, oys_36)
return oxs_40, oys_40, oxs_39, oys_39, oxs_38, oys_38, oxs_37, oys_37, oxs_36, oys_36
def load_data(self, rid, **kw):
p = '/Users/ross/Desktop/counting exp/peak-time1500s-40-{}'.format(rid)
xs1, ys1, omits1 = self._load_isotope_peak_time_data(p, **kw)
p = '/Users/ross/Desktop/counting exp/peak-time1500s-36-{}'.format(rid)
xs2, ys2, omits2 = self._load_isotope_peak_time_data(p, **kw)
self.omits = omits1
return xs1, ys1, xs2, ys2
def _load_isotope_peak_time_data(self, p, do_omit=False):
xs = []
ys = []
omits = []
with open(p, 'U') as f:
reader = csv.reader(f, delimiter='\t')
for _ in range(7):
reader.next()
while 1:
l = reader.next()
if len(l) == 0:
break
if do_omit:
omit = False if l[1] == 'OK' else True
else:
omit = False
if not omit:
xs.append(float(l[2]))
ys.append(float(l[4]))
omits.append(l[1])
return xs, ys, omits
def _detect_outliers_by_cluster(self, xs, ys, outs, degree=2):
xs = array(xs)
ys = array(ys)
m = ys.mean()
sd = ys.std()
for i, yi in enumerate(ys):
print m, yi, sd, abs(yi - m) > (sd * 2)
outs[i] = 1 if abs(yi - m) > (sd * 2) else 0
return outs
def _detect_outliers(self, xs, ys, outs, degree=2):
xs = array(xs)
ys = array(ys)
mxs = masked_array(xs, mask=outs)
# print 's', sum(mxs), outs
mys = masked_array(ys, mask=outs)
o = OLS(mxs, mys, fitdegree=degree)
coeffs = o.get_coefficients()
n = len(xs) - sum(outs)
# coeff_errs = o.get_coefficient_standard_errors()
# ymean = ys.mean()
yeval = polyval(coeffs, xs)
# calculate detection_tol. use error of fit
devs = abs(ys - yeval)
ssr = sum(devs ** 2)
detection_tol = 2.5 * (ssr / ((n) - (degree))) ** 0.5
for i, xi, ys, di, mi in zip(xrange(len(xs)), xs, ys, devs, outs):
if di > detection_tol:
outs[i] = 1
omit = 'OK' if di <= detection_tol and not mi else 'User omitted'
# print xi, ys, di, detection_tol, omit, mi
return outs
def analyze_count_times(self, rids=None, do_omits=None, colors=None):
if rids is None:
rids = ['']
if do_omits is None:
do_omits = [True] * len(rids)
if colors is None:
colors = [None] * len(rids)
# load the file
# args = self.load_data2()
for rid, do_omit, color in zip(rids, do_omits, colors):
args = self.load_data(rid, do_omit=do_omit)
oxs_40, oys_40 = args[0], args[1]
oxs_36, oys_36 = args[-2], args[-1]
nsteps = 4
pool = Pool(processes=10)
xs = range(50, len(oxs_40), nsteps)
result40 = pool.map(regress, [(oxs_40[:i], oys_40[:i]) for i in xs])
result36 = pool.map(regress, [(oxs_36[:i], oys_36[:i]) for i in xs])
kw = dict(color=color) if color else dict()
io40, io40_e = array(result40).transpose()
self.graph.new_series(xs, io40, plotid=0, **kw)
self.graph.new_series(xs, io40_e, plotid=1, **kw)
io36, io36_e = array(result36).transpose()
self.graph.new_series(xs, io36, plotid=2, **kw)
self.graph.new_series(xs, io36_e, plotid=3, **kw)
r = io40 / io36
self.graph.new_series(xs, r, plotid=4, **kw)
ro = r[-1]
ys = (r - ro) / ro * 100
# mswd calculation
# err = ((io40_e / io40) ** 2 + (io36_e / io36) ** 2) ** 0.5 * r
# # for ri, ei in zip(r, err):
# # print ri, ei
# resultmswd = pool.map(mcalculate_mswd, [(r[:i], err[:i]) for i in range(len(r))])
self.graph.new_series(xs, ys, plotid=5, **kw)
self.graph.redraw()
def _test_fired(self):
t = Thread(target=self.analyze_count_times, kwargs=dict(rids=['769', '769'],
# colors=['black', 'green'],
do_omits=[False, True]
))
t.start()
# time.sleep(2)
# t = Thread(target=self.analyze_count_times, kwargs=dict(rid='769', color='green', do_omit=True))
# t.start()
#
# t = Thread(target=self.analyze_count_times, kwargs=dict(rid='770', series=1))
# t.start()
####
# t = Thread(target=self.analyze_count_times, kwargs=dict(rid='771', series=2))
# t.start()
if __name__ == '__main__':
d = CountAnalyzer()
# d.configure_traits()
x, y, _, _ = d.load_data('769')
# outlier_mask = zeros(len(x))
step = 10
m = zeros(step)
end = len(x)
end = 20
for i in range(10, end, step):
# for i in range(10, 20, step):
# for i in [10, 10]:
# oom = outlier_mask[:i]
d._detect_outliers_by_cluster(x[:i], y[:i], m)
# print m
m = hstack((m, zeros(step)))
# print
i = 1
for xi, mi, oo in zip(x[:50], m[:50], d.omits[:50]):
if mi:
match = oo == 'User omitted'
else:
match = oo == 'OK'
print i, xi, 'OK' if not mi else 'Omit', oo, match
i += 1
print sum(m), sum([1 if o == 'User omitted' else 0 for o in d.omits])
#======== EOF ================================
|
{"hexsha": "09cd794e3693a0f428c10079ab8eabcd5e9bc901", "size": 9485, "ext": "py", "lang": "Python", "max_stars_repo_path": "sandbox/count_time.py", "max_stars_repo_name": "ASUPychron/pychron", "max_stars_repo_head_hexsha": "dfe551bdeb4ff8b8ba5cdea0edab336025e8cc76", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 31, "max_stars_repo_stars_event_min_datetime": "2016-03-07T02:38:17.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-14T18:23:43.000Z", "max_issues_repo_path": "sandbox/count_time.py", "max_issues_repo_name": "ASUPychron/pychron", "max_issues_repo_head_hexsha": "dfe551bdeb4ff8b8ba5cdea0edab336025e8cc76", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1626, "max_issues_repo_issues_event_min_datetime": "2015-01-07T04:52:35.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-25T19:15:59.000Z", "max_forks_repo_path": "sandbox/count_time.py", "max_forks_repo_name": "UIllinoisHALPychron/pychron", "max_forks_repo_head_hexsha": "f21b79f4592a9fb9dc9a4cb2e4e943a3885ededc", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 26, "max_forks_repo_forks_event_min_datetime": "2015-05-23T00:10:06.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-07T16:51:57.000Z", "avg_line_length": 33.5159010601, "max_line_length": 105, "alphanum_fraction": 0.5317870322, "include": true, "reason": "from numpy", "num_tokens": 2636}
|
# -*- coding: utf-8 -*-
from typing import NamedTuple, Optional, Tuple
import numpy as np
from signalworks import dsp
from signalworks.processors.processing import DefaultProgressTracker, Processor
from signalworks.tracking import TimeValue, Wave
class SpectralDiscontinuityEstimator(Processor):
name = "Spectral Discontinuity Estimator"
acquire = NamedTuple("acquire", [("wave", Wave)])
def __init__(self):
super().__init__()
self.parameters = {
"frame_size": 0.005, # seconds, determines freq res.
"NFFT": 256,
"normalized": 1,
"delta_order": 1,
}
def process(
self, progressTracker: Optional[DefaultProgressTracker] = None
) -> Tuple[TimeValue]:
if progressTracker is not None:
self.progressTracker = progressTracker
wav = self.data.wave
assert isinstance(wav, Wave)
self.progressTracker.update(10)
ftr, time, frequency = dsp.spectrogram(
wav,
self.parameters["frame_size"],
self.parameters["frame_size"], # frame_rate = frame_size
NFFT=self.parameters["NFFT"],
normalized=self.parameters["normalized"],
)
if self.parameters["normalized"]:
ftr = ftr - np.mean(ftr, axis=1).reshape(-1, 1)
time = (time[:-1] + time[1:]) // 2
assert self.parameters["delta_order"] > 0
dynamic_win = np.arange(
-self.parameters["delta_order"], self.parameters["delta_order"] + 1
)
win_width = self.parameters["delta_order"]
win_length = 2 * win_width + 1
den = 0
for s in range(1, win_width + 1):
den += s ** 2
den *= 2
dynamic_win = dynamic_win / den
N, D = ftr.shape
print(N)
temp_array = np.zeros((N + 2 * win_width, D))
delta_array = np.zeros((N, D))
self.progressTracker.update(90)
temp_array[win_width : N + win_width] = ftr
for w in range(win_width):
temp_array[w, :] = ftr[0, :]
temp_array[N + win_width + w, :] = ftr[-1, :]
for i in range(N):
for w in range(win_length):
delta_array[i, :] += temp_array[i + w, :] * dynamic_win[w]
value = np.mean(np.diff(delta_array, axis=0) ** 2, axis=1) ** 0.5
dis = TimeValue(
time,
value,
wav.fs,
wav.duration,
path=wav.path.with_name(wav.path.stem + "-discont").with_suffix(
TimeValue.default_suffix
),
)
dis.min = 0
dis.max = value.max()
dis.unit = "dB"
dis.label = "spectral discontinuity"
self.progressTracker.update(100)
return (dis,)
|
{"hexsha": "24b496e3ffab545ac874bbfddde9d25eabd4a675", "size": 2799, "ext": "py", "lang": "Python", "max_stars_repo_path": "signalworks/processors/spectral_discontinutiy_estimator.py", "max_stars_repo_name": "lxkain/tracking", "max_stars_repo_head_hexsha": "00ed9a0b31c4880687a42df3bf9651e68e0c4360", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-04-09T17:28:34.000Z", "max_stars_repo_stars_event_max_datetime": "2019-06-05T10:05:11.000Z", "max_issues_repo_path": "signalworks/processors/spectral_discontinutiy_estimator.py", "max_issues_repo_name": "lxkain/tracking", "max_issues_repo_head_hexsha": "00ed9a0b31c4880687a42df3bf9651e68e0c4360", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 11, "max_issues_repo_issues_event_min_datetime": "2019-04-19T23:03:38.000Z", "max_issues_repo_issues_event_max_datetime": "2019-11-22T17:59:07.000Z", "max_forks_repo_path": "signalworks/processors/spectral_discontinutiy_estimator.py", "max_forks_repo_name": "lxkain/tracking", "max_forks_repo_head_hexsha": "00ed9a0b31c4880687a42df3bf9651e68e0c4360", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2019-05-01T16:02:32.000Z", "max_forks_repo_forks_event_max_datetime": "2019-06-25T18:05:39.000Z", "avg_line_length": 33.3214285714, "max_line_length": 79, "alphanum_fraction": 0.559842801, "include": true, "reason": "import numpy", "num_tokens": 686}
|
import numpy as np
import pandas as pd
from datetime import datetime
from types import FunctionType
from pandapower.timeseries import OutputWriter
from pandahub.mongo_io_methods import MongoIOMethods
try:
import pplog
logger = pplog.getLogger(__name__)
except ImportError:
import logging
class OutputWriterMongoDB(OutputWriter):
"""
Output Writer which writes to a mongoDB
"""
def __init__(self, net, io_methods: MongoIOMethods, netname: str, db_name: str, start_date: datetime, time_steps=None,
write_time=None, log_variables=None, write_caching=10, freq="15min", collection_name='timeseries_data',
**kwargs):
super().__init__(net, time_steps=time_steps, write_time=write_time, log_variables=log_variables)
self.io_methods = io_methods
self.args = kwargs
self.NET_NAME = netname
self.db_name = db_name
self.write_caching = write_caching
self.current_pos = 0
self.ids = dict()
self.collection_name = collection_name
self.output = dict()
self.freq = freq
self.start_date = start_date
# def dump_to_file(self, net, append=False, recycle_options=None):
# pass
# def _save_single_xls_sheet(self, append):
# ToDo: implement save to a single sheet
# raise NotImplementedError("Sorry not implemented yet")
def _init_np_array(self, partial_func):
(table, variable, net, index, eval_function, eval_name) = partial_func.args
hash_name = self._get_np_name(partial_func.args)
n_columns = len(index)
if eval_function is not None:
n_columns = 1
if isinstance(eval_function, FunctionType):
if "n_columns" in eval_function.__code__.co_varnames:
n_columns = eval_function.__defaults__[0]
# self.np_results[hash_name] = np.zeros((len(self.time_steps), n_columns))
self.np_results[hash_name] = np.zeros((self.write_caching, n_columns))
def _log(self, table, variable, net, index, eval_function=None, eval_name=None):
try:
# ToDo: Create a mask for the numpy array in the beginning and use this one for getting the values. Faster
if net[table].index.equals(pd.Index(index)):
# if index equals all values -> get numpy array directly
result = net[table][variable].values
else:
# get by loc (slow)
result = net[table].loc[index, variable].values
if eval_function is not None:
result = eval_function(result)
# save results to numpy array
# time_step_idx = self.time_step_lookup[self.time_step]
hash_name = self._get_np_name((table, variable, net, index, eval_function, eval_name))
# self.np_results[hash_name][time_step_idx, :] = result
self.np_results[hash_name][self.current_pos, :] = result
except Exception as e:
logger.error("Error at index %s for %s[%s]: %s" % (index, table, variable, e))
def _np_to_pd(self):
# convert numpy arrays (faster so save results) into pd Dataframes (user friendly)
# intended use: At the end of time series simulation write results to pandas
res_df = dict()
for partial_func in self.output_list:
(table, variable, net, index, eval_func, eval_name) = partial_func.args
# res_name = self._get_hash(table, variable)
res_name = self._get_output_name(table, variable)
np_name = self._get_np_name(partial_func.args)
columns = index
if eval_name is not None and eval_func is not None:
if isinstance(eval_func, FunctionType):
if "n_columns" not in eval_func.__code__.co_varnames:
columns = [eval_name]
else:
columns = [eval_name]
# res_df = pd.DataFrame(self.np_results[np_name], index=self.time_steps, columns=columns)
res_df[res_name] = pd.DataFrame(self.np_results[np_name], columns=columns)
return res_df
def save_results(self, net, time_step, pf_converged, ctrl_converged, recycle_options=None):
# remember the last time step
self.time_step = time_step
if not pf_converged:
super().save_nans_to_parameters()
self.output["Parameters"].loc[time_step, "powerflow_failed"] = True
elif not ctrl_converged:
self.output["Parameters"].loc[time_step, "controller_unstable"] = True
else:
super().save_to_parameters()
res = self._np_to_pd()
write_to_db = False
self.current_pos += 1
if self.current_pos >= self.write_caching:
write_to_db = True
# last time_step
if self.time_step == self.time_steps[-1]:
print(time_step)
write_to_db = True
if write_to_db:
for res_name, res_df in res.items():
end = self.start_date + (self.current_pos - 1) * pd.Timedelta(self.freq)
if self.current_pos < self.write_caching:
res_df.drop(range(self.current_pos, self.write_caching), axis=0, inplace=True)
res_df.index = pd.date_range(start=self.start_date,
end=end,
freq=self.freq)
if res_name in self.ids:
for ii in res_df.index:
row = res_df.loc[ii]
self.io_methods.bulk_update_timeseries_in_db(
new_ts_content=pd.DataFrame(row).transpose(),
document_ids=self.ids[res_name],
db_name=self.db_name,
collection_name=self.collection_name,
# **self.args
)
else:
et = res_name.split('.')[0]
dt = res_name.split('.')[1]
self.ids[res_name] = self.io_methods.bulk_write_timeseries_to_db(
timeseries=res_df, netname=self.NET_NAME,
element_type=et, data_type=dt,
collection_name=self.collection_name,
db_name=self.db_name,
return_ids=True,
last_timestamp=self.start_date + pd.Timedelta(self.freq) * len(self.time_steps),
num_timestamps=len(self.time_steps),
**self.args
)
self.start_date = self.start_date + self.current_pos * pd.Timedelta(self.freq)
self.current_pos = 0
|
{"hexsha": "53dd48af95296204f98a9e4ffd68253437994ba0", "size": 6835, "ext": "py", "lang": "Python", "max_stars_repo_path": "pandahub/lib/timeseries/output_writer_mongodb.py", "max_stars_repo_name": "e2nIEE/pandahub", "max_stars_repo_head_hexsha": "4d4abb29f49d32d035120ebea99fb96ba3d44bfc", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2022-03-29T08:19:08.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T06:51:24.000Z", "max_issues_repo_path": "pandahub/lib/timeseries/output_writer_mongodb.py", "max_issues_repo_name": "e2nIEE/pandahub", "max_issues_repo_head_hexsha": "4d4abb29f49d32d035120ebea99fb96ba3d44bfc", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pandahub/lib/timeseries/output_writer_mongodb.py", "max_forks_repo_name": "e2nIEE/pandahub", "max_forks_repo_head_hexsha": "4d4abb29f49d32d035120ebea99fb96ba3d44bfc", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 43.5350318471, "max_line_length": 122, "alphanum_fraction": 0.5909290417, "include": true, "reason": "import numpy", "num_tokens": 1431}
|
import pytest
import numpy as np
import scipy.sparse as sp
import warnings
from sklearn import clone
from sklearn.preprocessing import KBinsDiscretizer
from sklearn.preprocessing import OneHotEncoder
from sklearn.utils._testing import (
assert_array_almost_equal,
assert_array_equal,
assert_allclose_dense_sparse,
)
X = [[-2, 1.5, -4, -1], [-1, 2.5, -3, -0.5], [0, 3.5, -2, 0.5], [1, 4.5, -1, 2]]
@pytest.mark.parametrize(
"strategy, expected",
[
("uniform", [[0, 0, 0, 0], [1, 1, 1, 0], [2, 2, 2, 1], [2, 2, 2, 2]]),
("kmeans", [[0, 0, 0, 0], [0, 0, 0, 0], [1, 1, 1, 1], [2, 2, 2, 2]]),
("quantile", [[0, 0, 0, 0], [1, 1, 1, 1], [2, 2, 2, 2], [2, 2, 2, 2]]),
],
)
def test_fit_transform(strategy, expected):
est = KBinsDiscretizer(n_bins=3, encode="ordinal", strategy=strategy)
est.fit(X)
assert_array_equal(expected, est.transform(X))
def test_valid_n_bins():
KBinsDiscretizer(n_bins=2).fit_transform(X)
KBinsDiscretizer(n_bins=np.array([2])[0]).fit_transform(X)
assert KBinsDiscretizer(n_bins=2).fit(X).n_bins_.dtype == np.dtype(int)
def test_invalid_n_bins():
est = KBinsDiscretizer(n_bins=1)
err_msg = (
"KBinsDiscretizer received an invalid number of bins. Received 1, expected at"
" least 2."
)
with pytest.raises(ValueError, match=err_msg):
est.fit_transform(X)
est = KBinsDiscretizer(n_bins=1.1)
err_msg = (
"KBinsDiscretizer received an invalid n_bins type. Received float, expected"
" int."
)
with pytest.raises(ValueError, match=err_msg):
est.fit_transform(X)
def test_invalid_n_bins_array():
# Bad shape
n_bins = np.full((2, 4), 2.0)
est = KBinsDiscretizer(n_bins=n_bins)
err_msg = r"n_bins must be a scalar or array of shape \(n_features,\)."
with pytest.raises(ValueError, match=err_msg):
est.fit_transform(X)
# Incorrect number of features
n_bins = [1, 2, 2]
est = KBinsDiscretizer(n_bins=n_bins)
err_msg = r"n_bins must be a scalar or array of shape \(n_features,\)."
with pytest.raises(ValueError, match=err_msg):
est.fit_transform(X)
# Bad bin values
n_bins = [1, 2, 2, 1]
est = KBinsDiscretizer(n_bins=n_bins)
err_msg = (
"KBinsDiscretizer received an invalid number of bins "
"at indices 0, 3. Number of bins must be at least 2, "
"and must be an int."
)
with pytest.raises(ValueError, match=err_msg):
est.fit_transform(X)
# Float bin values
n_bins = [2.1, 2, 2.1, 2]
est = KBinsDiscretizer(n_bins=n_bins)
err_msg = (
"KBinsDiscretizer received an invalid number of bins "
"at indices 0, 2. Number of bins must be at least 2, "
"and must be an int."
)
with pytest.raises(ValueError, match=err_msg):
est.fit_transform(X)
@pytest.mark.parametrize(
"strategy, expected",
[
("uniform", [[0, 0, 0, 0], [0, 1, 1, 0], [1, 2, 2, 1], [1, 2, 2, 2]]),
("kmeans", [[0, 0, 0, 0], [0, 0, 0, 0], [1, 1, 1, 1], [1, 2, 2, 2]]),
("quantile", [[0, 0, 0, 0], [0, 1, 1, 1], [1, 2, 2, 2], [1, 2, 2, 2]]),
],
)
def test_fit_transform_n_bins_array(strategy, expected):
est = KBinsDiscretizer(
n_bins=[2, 3, 3, 3], encode="ordinal", strategy=strategy
).fit(X)
assert_array_equal(expected, est.transform(X))
# test the shape of bin_edges_
n_features = np.array(X).shape[1]
assert est.bin_edges_.shape == (n_features,)
for bin_edges, n_bins in zip(est.bin_edges_, est.n_bins_):
assert bin_edges.shape == (n_bins + 1,)
@pytest.mark.parametrize("strategy", ["uniform", "kmeans", "quantile"])
def test_same_min_max(strategy):
warnings.simplefilter("always")
X = np.array([[1, -2], [1, -1], [1, 0], [1, 1]])
est = KBinsDiscretizer(strategy=strategy, n_bins=3, encode="ordinal")
warning_message = "Feature 0 is constant and will be replaced with 0."
with pytest.warns(UserWarning, match=warning_message):
est.fit(X)
assert est.n_bins_[0] == 1
# replace the feature with zeros
Xt = est.transform(X)
assert_array_equal(Xt[:, 0], np.zeros(X.shape[0]))
def test_transform_1d_behavior():
X = np.arange(4)
est = KBinsDiscretizer(n_bins=2)
with pytest.raises(ValueError):
est.fit(X)
est = KBinsDiscretizer(n_bins=2)
est.fit(X.reshape(-1, 1))
with pytest.raises(ValueError):
est.transform(X)
@pytest.mark.parametrize("i", range(1, 9))
def test_numeric_stability(i):
X_init = np.array([2.0, 4.0, 6.0, 8.0, 10.0]).reshape(-1, 1)
Xt_expected = np.array([0, 0, 1, 1, 1]).reshape(-1, 1)
# Test up to discretizing nano units
X = X_init / 10**i
Xt = KBinsDiscretizer(n_bins=2, encode="ordinal").fit_transform(X)
assert_array_equal(Xt_expected, Xt)
def test_invalid_encode_option():
est = KBinsDiscretizer(n_bins=[2, 3, 3, 3], encode="invalid-encode")
err_msg = (
r"Valid options for 'encode' are "
r"\('onehot', 'onehot-dense', 'ordinal'\). "
r"Got encode='invalid-encode' instead."
)
with pytest.raises(ValueError, match=err_msg):
est.fit(X)
def test_encode_options():
est = KBinsDiscretizer(n_bins=[2, 3, 3, 3], encode="ordinal").fit(X)
Xt_1 = est.transform(X)
est = KBinsDiscretizer(n_bins=[2, 3, 3, 3], encode="onehot-dense").fit(X)
Xt_2 = est.transform(X)
assert not sp.issparse(Xt_2)
assert_array_equal(
OneHotEncoder(
categories=[np.arange(i) for i in [2, 3, 3, 3]], sparse=False
).fit_transform(Xt_1),
Xt_2,
)
est = KBinsDiscretizer(n_bins=[2, 3, 3, 3], encode="onehot").fit(X)
Xt_3 = est.transform(X)
assert sp.issparse(Xt_3)
assert_array_equal(
OneHotEncoder(categories=[np.arange(i) for i in [2, 3, 3, 3]], sparse=True)
.fit_transform(Xt_1)
.toarray(),
Xt_3.toarray(),
)
def test_invalid_strategy_option():
est = KBinsDiscretizer(n_bins=[2, 3, 3, 3], strategy="invalid-strategy")
err_msg = (
r"Valid options for 'strategy' are "
r"\('uniform', 'quantile', 'kmeans'\). "
r"Got strategy='invalid-strategy' instead."
)
with pytest.raises(ValueError, match=err_msg):
est.fit(X)
@pytest.mark.parametrize(
"strategy, expected_2bins, expected_3bins, expected_5bins",
[
("uniform", [0, 0, 0, 0, 1, 1], [0, 0, 0, 0, 2, 2], [0, 0, 1, 1, 4, 4]),
("kmeans", [0, 0, 0, 0, 1, 1], [0, 0, 1, 1, 2, 2], [0, 0, 1, 2, 3, 4]),
("quantile", [0, 0, 0, 1, 1, 1], [0, 0, 1, 1, 2, 2], [0, 1, 2, 3, 4, 4]),
],
)
def test_nonuniform_strategies(
strategy, expected_2bins, expected_3bins, expected_5bins
):
X = np.array([0, 0.5, 2, 3, 9, 10]).reshape(-1, 1)
# with 2 bins
est = KBinsDiscretizer(n_bins=2, strategy=strategy, encode="ordinal")
Xt = est.fit_transform(X)
assert_array_equal(expected_2bins, Xt.ravel())
# with 3 bins
est = KBinsDiscretizer(n_bins=3, strategy=strategy, encode="ordinal")
Xt = est.fit_transform(X)
assert_array_equal(expected_3bins, Xt.ravel())
# with 5 bins
est = KBinsDiscretizer(n_bins=5, strategy=strategy, encode="ordinal")
Xt = est.fit_transform(X)
assert_array_equal(expected_5bins, Xt.ravel())
@pytest.mark.parametrize(
"strategy, expected_inv",
[
(
"uniform",
[
[-1.5, 2.0, -3.5, -0.5],
[-0.5, 3.0, -2.5, -0.5],
[0.5, 4.0, -1.5, 0.5],
[0.5, 4.0, -1.5, 1.5],
],
),
(
"kmeans",
[
[-1.375, 2.125, -3.375, -0.5625],
[-1.375, 2.125, -3.375, -0.5625],
[-0.125, 3.375, -2.125, 0.5625],
[0.75, 4.25, -1.25, 1.625],
],
),
(
"quantile",
[
[-1.5, 2.0, -3.5, -0.75],
[-0.5, 3.0, -2.5, 0.0],
[0.5, 4.0, -1.5, 1.25],
[0.5, 4.0, -1.5, 1.25],
],
),
],
)
@pytest.mark.parametrize("encode", ["ordinal", "onehot", "onehot-dense"])
def test_inverse_transform(strategy, encode, expected_inv):
kbd = KBinsDiscretizer(n_bins=3, strategy=strategy, encode=encode)
Xt = kbd.fit_transform(X)
Xinv = kbd.inverse_transform(Xt)
assert_array_almost_equal(expected_inv, Xinv)
@pytest.mark.parametrize("strategy", ["uniform", "kmeans", "quantile"])
def test_transform_outside_fit_range(strategy):
X = np.array([0, 1, 2, 3])[:, None]
kbd = KBinsDiscretizer(n_bins=4, strategy=strategy, encode="ordinal")
kbd.fit(X)
X2 = np.array([-2, 5])[:, None]
X2t = kbd.transform(X2)
assert_array_equal(X2t.max(axis=0) + 1, kbd.n_bins_)
assert_array_equal(X2t.min(axis=0), [0])
def test_overwrite():
X = np.array([0, 1, 2, 3])[:, None]
X_before = X.copy()
est = KBinsDiscretizer(n_bins=3, encode="ordinal")
Xt = est.fit_transform(X)
assert_array_equal(X, X_before)
Xt_before = Xt.copy()
Xinv = est.inverse_transform(Xt)
assert_array_equal(Xt, Xt_before)
assert_array_equal(Xinv, np.array([[0.5], [1.5], [2.5], [2.5]]))
@pytest.mark.parametrize(
"strategy, expected_bin_edges", [("quantile", [0, 1, 3]), ("kmeans", [0, 1.5, 3])]
)
def test_redundant_bins(strategy, expected_bin_edges):
X = [[0], [0], [0], [0], [3], [3]]
kbd = KBinsDiscretizer(n_bins=3, strategy=strategy)
warning_message = "Consider decreasing the number of bins."
with pytest.warns(UserWarning, match=warning_message):
kbd.fit(X)
assert_array_almost_equal(kbd.bin_edges_[0], expected_bin_edges)
def test_percentile_numeric_stability():
X = np.array([0.05, 0.05, 0.95]).reshape(-1, 1)
bin_edges = np.array([0.05, 0.23, 0.41, 0.59, 0.77, 0.95])
Xt = np.array([0, 0, 4]).reshape(-1, 1)
kbd = KBinsDiscretizer(n_bins=10, encode="ordinal", strategy="quantile")
warning_message = "Consider decreasing the number of bins."
with pytest.warns(UserWarning, match=warning_message):
kbd.fit(X)
assert_array_almost_equal(kbd.bin_edges_[0], bin_edges)
assert_array_almost_equal(kbd.transform(X), Xt)
@pytest.mark.parametrize("in_dtype", [np.float16, np.float32, np.float64])
@pytest.mark.parametrize("out_dtype", [None, np.float16, np.float32, np.float64])
@pytest.mark.parametrize("encode", ["ordinal", "onehot", "onehot-dense"])
def test_consistent_dtype(in_dtype, out_dtype, encode):
X_input = np.array(X, dtype=in_dtype)
kbd = KBinsDiscretizer(n_bins=3, encode=encode, dtype=out_dtype)
# a error is raised if a wrong dtype is define for the model
if out_dtype not in [None, np.float32, np.float64]:
with pytest.raises(ValueError, match="Valid options for 'dtype' are"):
kbd.fit(X_input)
else:
kbd.fit(X_input)
# test output dtype
if out_dtype is not None:
expected_dtype = out_dtype
elif out_dtype is None and X_input.dtype == np.float16:
# wrong numeric input dtype are cast in np.float64
expected_dtype = np.float64
else:
expected_dtype = X_input.dtype
Xt = kbd.transform(X_input)
assert Xt.dtype == expected_dtype
@pytest.mark.parametrize("input_dtype", [np.float16, np.float32, np.float64])
@pytest.mark.parametrize("encode", ["ordinal", "onehot", "onehot-dense"])
def test_32_equal_64(input_dtype, encode):
# TODO this check is redundant with common checks and can be removed
# once #16290 is merged
X_input = np.array(X, dtype=input_dtype)
# 32 bit output
kbd_32 = KBinsDiscretizer(n_bins=3, encode=encode, dtype=np.float32)
kbd_32.fit(X_input)
Xt_32 = kbd_32.transform(X_input)
# 64 bit output
kbd_64 = KBinsDiscretizer(n_bins=3, encode=encode, dtype=np.float64)
kbd_64.fit(X_input)
Xt_64 = kbd_64.transform(X_input)
assert_allclose_dense_sparse(Xt_32, Xt_64)
# FIXME: remove the `filterwarnings` in 1.3
@pytest.mark.filterwarnings("ignore:In version 1.3 onwards, subsample=2e5")
@pytest.mark.parametrize("subsample", [None, "warn"])
def test_kbinsdiscretizer_subsample_default(subsample):
# Since the size of X is small (< 2e5), subsampling will not take place.
X = np.array([-2, 1.5, -4, -1]).reshape(-1, 1)
kbd_default = KBinsDiscretizer(n_bins=10, encode="ordinal", strategy="quantile")
kbd_default.fit(X)
kbd_with_subsampling = clone(kbd_default)
kbd_with_subsampling.set_params(subsample=subsample)
kbd_with_subsampling.fit(X)
for bin_kbd_default, bin_kbd_with_subsampling in zip(
kbd_default.bin_edges_[0], kbd_with_subsampling.bin_edges_[0]
):
np.testing.assert_allclose(bin_kbd_default, bin_kbd_with_subsampling)
assert kbd_default.bin_edges_.shape == kbd_with_subsampling.bin_edges_.shape
def test_kbinsdiscretizer_subsample_invalid_strategy():
X = np.array([-2, 1.5, -4, -1]).reshape(-1, 1)
kbd = KBinsDiscretizer(n_bins=10, encode="ordinal", strategy="uniform", subsample=3)
err_msg = '`subsample` must be used with `strategy="quantile"`.'
with pytest.raises(ValueError, match=err_msg):
kbd.fit(X)
def test_kbinsdiscretizer_subsample_invalid_type():
X = np.array([-2, 1.5, -4, -1]).reshape(-1, 1)
kbd = KBinsDiscretizer(
n_bins=10, encode="ordinal", strategy="quantile", subsample="full"
)
msg = "subsample must be an instance of int, not str."
with pytest.raises(TypeError, match=msg):
kbd.fit(X)
# TODO: Remove in 1.3
def test_kbinsdiscretizer_subsample_warn():
X = np.random.rand(200001, 1).reshape(-1, 1)
kbd = KBinsDiscretizer(n_bins=100, encode="ordinal", strategy="quantile")
msg = "In version 1.3 onwards, subsample=2e5 will be used by default."
with pytest.warns(FutureWarning, match=msg):
kbd.fit(X)
@pytest.mark.parametrize("subsample", [0, int(2e5)])
def test_kbinsdiscretizer_subsample_values(subsample):
X = np.random.rand(220000, 1).reshape(-1, 1)
kbd_default = KBinsDiscretizer(n_bins=10, encode="ordinal", strategy="quantile")
kbd_with_subsampling = clone(kbd_default)
kbd_with_subsampling.set_params(subsample=subsample)
if subsample == 0:
with pytest.raises(ValueError, match="subsample == 0, must be >= 1."):
kbd_with_subsampling.fit(X)
else:
# TODO: Remove in 1.3
msg = "In version 1.3 onwards, subsample=2e5 will be used by default."
with pytest.warns(FutureWarning, match=msg):
kbd_default.fit(X)
kbd_with_subsampling.fit(X)
assert not np.all(
kbd_default.bin_edges_[0] == kbd_with_subsampling.bin_edges_[0]
)
assert kbd_default.bin_edges_.shape == kbd_with_subsampling.bin_edges_.shape
@pytest.mark.parametrize(
"encode, expected_names",
[
(
"onehot",
[
f"feat{col_id}_{float(bin_id)}"
for col_id in range(3)
for bin_id in range(4)
],
),
(
"onehot-dense",
[
f"feat{col_id}_{float(bin_id)}"
for col_id in range(3)
for bin_id in range(4)
],
),
("ordinal", [f"feat{col_id}" for col_id in range(3)]),
],
)
def test_kbinsdiscrtizer_get_feature_names_out(encode, expected_names):
"""Check get_feature_names_out for different settings.
Non-regression test for #22731
"""
X = [[-2, 1, -4], [-1, 2, -3], [0, 3, -2], [1, 4, -1]]
kbd = KBinsDiscretizer(n_bins=4, encode=encode).fit(X)
Xt = kbd.transform(X)
input_features = [f"feat{i}" for i in range(3)]
output_names = kbd.get_feature_names_out(input_features)
assert Xt.shape[1] == output_names.shape[0]
assert_array_equal(output_names, expected_names)
|
{"hexsha": "e1317acb978084675cb32d3ae204320b7235f285", "size": 15963, "ext": "py", "lang": "Python", "max_stars_repo_path": "sklearn/preprocessing/tests/test_discretization.py", "max_stars_repo_name": "huzq/scikit-learn", "max_stars_repo_head_hexsha": "f862129f36786acbae3d9f2d161bbb72d77b87ec", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2022-03-16T17:33:38.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-17T11:50:21.000Z", "max_issues_repo_path": "sklearn/preprocessing/tests/test_discretization.py", "max_issues_repo_name": "huzq/scikit-learn", "max_issues_repo_head_hexsha": "f862129f36786acbae3d9f2d161bbb72d77b87ec", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 9, "max_issues_repo_issues_event_min_datetime": "2022-03-12T22:36:34.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-27T06:47:36.000Z", "max_forks_repo_path": "sklearn/preprocessing/tests/test_discretization.py", "max_forks_repo_name": "huzq/scikit-learn", "max_forks_repo_head_hexsha": "f862129f36786acbae3d9f2d161bbb72d77b87ec", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-02-16T05:40:12.000Z", "max_forks_repo_forks_event_max_datetime": "2020-02-16T05:40:12.000Z", "avg_line_length": 33.7484143763, "max_line_length": 88, "alphanum_fraction": 0.6213117835, "include": true, "reason": "import numpy,import scipy", "num_tokens": 4965}
|
import numpy as np
from PyMieScatt import RayleighMieQ
from scipy.special import jv, yv
def MieQ(m, wavelength, diameter, nMedium=1, asDict=False, asCrossSection=False):
# http://pymiescatt.readthedocs.io/en/latest/forward.html#MieQ
wavelength /= nMedium
m /= nMedium
x = np.pi*diameter/wavelength
if x==0:
return 0, 0, 0, 1.5, 0, 0, 0
elif x<=0.05:
return RayleighMieQ(m, wavelength, diameter, asDict)
elif x>0.05:
nmax = np.round(2+x+4*(x**(1/3)))
n = np.arange(1,nmax+1)
n1 = 2*n+1
n2 = n*(n+2)/(n+1)
n3 = n1/(n*(n+1))
x2 = x**2
an,bn = Mie_ab(m,x)
qext = (2/x2)*np.sum(n1*(an.real+bn.real))
qsca = (2/x2)*np.sum(n1*(an.real**2+an.imag**2+bn.real**2+bn.imag**2))
qabs = qext-qsca
g1 = [an.real[1:int(nmax)],
an.imag[1:int(nmax)],
bn.real[1:int(nmax)],
bn.imag[1:int(nmax)]]
g1 = [np.append(x, 0.0) for x in g1]
g = (4/(qsca*x2))*np.sum((n2*(an.real*g1[0]+an.imag*g1[1]+bn.real*g1[2]+bn.imag*g1[3]))+(n3*(an.real*bn.real+an.imag*bn.imag)))
qpr = qext-qsca*g
qback = (1/x2)*(np.abs(np.sum(n1*((-1)**n)*(an-bn)))**2)
qratio = qback/qsca
if asCrossSection:
css = np.pi*(diameter/2)**2
cext = css*qext
csca = css*qsca
cabs = css*qabs
cpr = css*qpr
cback = css*qback
cratio = css*qratio
if asDict:
return dict(Cext=cext,Csca=csca,Cabs=cabs,g=g,Cpr=cpr,Cback=cback,Cratio=cratio)
else:
return cext, csca, cabs, g, cpr, cback, cratio
else:
if asDict:
return dict(Qext=qext,Qsca=qsca,Qabs=qabs,g=g,Qpr=qpr,Qback=qback,Qratio=qratio)
else:
return qext, qsca, qabs, g, qpr, qback, qratio
def Mie_ab(m,x):
# http://pymiescatt.readthedocs.io/en/latest/forward.html#Mie_ab
mx = m*x
nmax = np.round(2+x+4*(x**(1/3)))
nmx = np.round(max(nmax,np.abs(mx))+16)
n = np.arange(1,nmax+1)
nu = n + 0.5
sx = np.sqrt(0.5*np.pi*x)
px = sx*jv(nu,x)
p1x = np.append(np.sin(x), px[0:int(nmax)-1])
chx = -sx*yv(nu,x)
ch1x = np.append(np.cos(x), chx[0:int(nmax)-1])
gsx = px-(0+1j)*chx
gs1x = p1x-(0+1j)*ch1x
# B&H Equation 4.89
Dn = np.zeros(int(nmx),dtype=complex)
for i in range(int(nmx)-1,1,-1):
Dn[i-1] = (i/mx)-(1/(Dn[i]+i/mx))
D = Dn[1:int(nmax)+1] # Dn(mx), drop terms beyond nMax
da = D/m+n/x
db = m*D+n/x
an = (da*px-p1x)/(da*gsx-gs1x)
bn = (db*px-p1x)/(db*gsx-gs1x)
return an, bn
m = 1.5+0.5j
nMedium = 1.3
q1 = MieQ(m, 530, 500, asDict=True)
q2 = MieQ(m, 530, 500, nMedium=nMedium,asDict=True)
m /= nMedium
|
{"hexsha": "da2901b897b2943ec59d19ed3b1d7de99ed797f4", "size": 2574, "ext": "py", "lang": "Python", "max_stars_repo_path": "PyMieScatt/devmode/functionPrototyping.py", "max_stars_repo_name": "hmaarrfk/PyMieScatt", "max_stars_repo_head_hexsha": "81d152af85dad20963cdee2dffd9dfe9a8fc54a1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "PyMieScatt/devmode/functionPrototyping.py", "max_issues_repo_name": "hmaarrfk/PyMieScatt", "max_issues_repo_head_hexsha": "81d152af85dad20963cdee2dffd9dfe9a8fc54a1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "PyMieScatt/devmode/functionPrototyping.py", "max_forks_repo_name": "hmaarrfk/PyMieScatt", "max_forks_repo_head_hexsha": "81d152af85dad20963cdee2dffd9dfe9a8fc54a1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.3829787234, "max_line_length": 131, "alphanum_fraction": 0.5792540793, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1092}
|
Demo - Poisson equation 2D
=======================
Solve Poisson's equation in 2D with homogeneous Dirichlet bcs in one direction and periodicity in the other.
$$
\begin{align}
\nabla^2 u(x, y) &= f(x, y), \quad \forall \, (x, y) \in [-1, 1] \times [0, 2\pi]\\
u(\pm 1, y) &= 0 \\
u(x, 2\pi) &= u(x, 0)
\end{align}
$$
where $u(x, y)$ is the solution and $f(x, y)$ is some right hand side function.
Use either Chebyshev basis $P=\{T_k(x)\}_{k=0}^{N_0-1}$ or Legendre $P=\{L_k(x)\}_{k=0}^{N_0-1}$ and define Shen's composite Dirichlet basis as
$$
V^{N_0}(x) = \{P_k(x) - P_{k+2}(x)\, | \, k=0, 1, \ldots, N_0-3\}.
$$
For the periodic direction use Fourier exponentials
$$
V^{N_1}(y) = \{\exp(i l y)\, | \, l=-N_1/2, -N_1/2+1, \ldots, N_1/2-1\}.
$$
And then define tensor product space as an outer product of these spaces
$$
V^N(x, y) = V^{N_0}(x) \times V^{N_1}(y).
$$
We get the test function
$$
\phi_{kl}(x, y) = (P_k(x) - P_{k+2}(x))\exp(i l y),
$$
and define for simplicity
$$
\begin{align}
v(x, y) &= \phi_{kl}(x, y), \\
u(x, y) &= \sum_{k=0}^{N_0-3}\sum_{l=-N_1/2}^{N_1/2-1} \hat{u}_{kl} \phi_{kl}(x, y),
\end{align}
$$
where $u(x, y)$ is the trial function.
The weighted inner product is defined almost exactly like in 1D, however, we now have to take into account that the solution is complex valued. The inner product is now
$$
(u, v)_w = \int_{-1}^{1}\int_{0}^{2\pi} u v^* w dxdy,
$$
where $v^*$ is the complex conjugate of $v$. Furthermore, we use the constant weight $w(x, y)=1/(2\pi)$ for Legendre/Fourier and get
Find $u \in V^N$ such that
$$ (\nabla u, \nabla v)_w = -(f, v)_w, \quad \forall \, v \in V^N.$$
For Chebyshev the weight is $1/\sqrt{1-x^2}/(2\pi)$ and we do not perform integration by parts:
Find $u \in V^N$ such that
$$ (\nabla^2 u, v)_w = (f, v)_w, \quad \forall \, v \in V^N.$$
## Implementation using shenfun
```python
from shenfun import *
import matplotlib.pyplot as plt
N = (16, 12)
BX = FunctionSpace(N[0], 'L', bc=(0, 0))
BY = FunctionSpace(N[1], 'F')
V = TensorProductSpace(comm, (BX, BY))
```
```python
v = TestFunction(V)
u = TrialFunction(V)
A = inner(grad(u), grad(v))
```
```python
print(A)
```
`TPMatrix` is a tensor product matrix. It is the outer product of two smaller matrices. Consider the inner product:
$$
\begin{align}
(\nabla u, \nabla v) &= \frac{1}{2\pi}\int_{-1}^{1}\int_{0}^{2\pi} \left(\frac{\partial u}{\partial x}, \frac{\partial u}{\partial y}\right) \cdot \left(\frac{\partial v^*}{\partial x}, \frac{\partial v^*}{\partial y}\right) {dxdy} \\
(\nabla u, \nabla v) &= \frac{1}{2\pi} \int_{-1}^1 \int_{0}^{2\pi} \left( \frac{\partial u}{\partial x}\frac{\partial v^*}{\partial x} + \frac{\partial u}{\partial y}\frac{\partial v^*}{\partial y} \right) {dxdy} \\
(\nabla u, \nabla v) &= \frac{1}{2\pi}\int_{-1}^1 \int_{0}^{2\pi} \frac{\partial u}{\partial x}\frac{\partial v^*}{\partial x} {dxdy} + \int_{-1}^1 \int_{0}^{2\pi} \frac{\partial u}{\partial y}\frac{\partial v^*}{\partial y} {dxdy}
\end{align}
$$
which is also a sum of two terms. These two terms are the two `TPMatrix`es returned by `inner` above.
Now each one of these two terms can be written as the outer product of two smaller matrices. Consider the first:
$$
\begin{align}
\frac{1}{2\pi}\int_{-1}^1 \int_{0}^{2\pi} \frac{\partial u}{\partial x}\frac{\partial v^*}{\partial x} {dxdy} &= \frac{1}{2\pi}\int_{-1}^1 \int_{0}^{2\pi} \frac{\partial \sum_{m}\sum_{n} \hat{u}_{mn} \phi_{mn}}{\partial x}\frac{\partial \phi_{kl}^*}{\partial x }{dxdy} \\
&= \sum_{m}\sum_{n} \hat{u}_{mn} \frac{1}{2\pi} \int_{-1}^1 \int_{0}^{2\pi} \frac{\partial (P_m(x)-P_{m+2}(x))\exp(iny)}{\partial x}\frac{\partial (P_k(x)-P_{k+2}(x))\exp(-ily)}{\partial x} {dxdy} \\
&= \sum_{m}\sum_{n} \hat{u}_{mn} \frac{1}{2\pi} \int_{-1}^1 \int_{0}^{2\pi} \frac{\partial (P_m(x)-P_{m+2}(x))}{\partial x}\frac{\partial (P_k(x)-P_{k+2}(x))}{\partial x} \exp(iny) \exp(-ily) {dxdy} \\
&= \sum_{m}\sum_{n} \hat{u}_{mn} \underbrace{\int_{-1}^1 \frac{\partial (P_m(x)-P_{m+2}(x))}{\partial x}\frac{\partial (P_k(x)-P_{k+2}(x))}{\partial x} {dx}}_{a_{km}} \underbrace{\frac{1}{2\pi}\int_{0}^{2\pi} \exp(iny) \exp(-ily) {dy}}_{\delta_{ln}} \\
&= a_{km} \delta_{ln} \hat{u}_{mn} \\
&= a_{km} \hat{u}_{ml}
\end{align}
$$
```python
print(A[0].mats)
```
The first item of the `A[0].mats` list is the $a_{km}$ matrix and the second is the identity matrix.
Now create a manufactured solution to test the implementation.
```python
import sympy as sp
x, y = sp.symbols('x,y')
ue = (sp.cos(4*x) + sp.sin(2*y))*(1 - x**2)
fe = ue.diff(x, 2) + ue.diff(y, 2)
fl = sp.lambdify((x, y), fe, 'numpy')
fj = Array(V, buffer=fl(*V.mesh()))
```
Assemble right hand side
```python
f_tilde = Function(V)
f_tilde = inner(v, -fj, output_array=f_tilde)
```
Solve system of equations by fetching an efficient Helmholtz solver
```python
u_hat = Function(V)
solver = legendre.la.Helmholtz(*A)
u_hat = solver(u_hat, f_tilde)
```
```python
X = V.local_mesh(True)
plt.contourf(X[0], X[1], u_hat.backward(), 100)
plt.colorbar()
```
|
{"hexsha": "6cf8f2b229576db350872e786140e99682d68187", "size": 8375, "ext": "ipynb", "lang": "Jupyter Notebook", "max_stars_repo_path": "binder/Poisson2D.ipynb", "max_stars_repo_name": "jaisw7/shenfun", "max_stars_repo_head_hexsha": "7482beb5b35580bc45f72704b69343cc6fc1d773", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 138, "max_stars_repo_stars_event_min_datetime": "2017-06-17T13:30:27.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-20T02:33:47.000Z", "max_issues_repo_path": "binder/Poisson2D.ipynb", "max_issues_repo_name": "jaisw7/shenfun", "max_issues_repo_head_hexsha": "7482beb5b35580bc45f72704b69343cc6fc1d773", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": 73, "max_issues_repo_issues_event_min_datetime": "2017-05-16T06:53:04.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-04T10:40:44.000Z", "max_forks_repo_path": "binder/Poisson2D.ipynb", "max_forks_repo_name": "jaisw7/shenfun", "max_forks_repo_head_hexsha": "7482beb5b35580bc45f72704b69343cc6fc1d773", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 38, "max_forks_repo_forks_event_min_datetime": "2018-01-31T14:37:01.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T15:07:27.000Z", "avg_line_length": 31.965648855, "max_line_length": 309, "alphanum_fraction": 0.4995820896, "converted": true, "num_tokens": 2021}
|
from unittest.mock import patch
import numpy as np
import pandas as pd
import pytest
import woodwork as ww
from pandas.testing import assert_frame_equal
from woodwork.logical_types import (
Boolean,
Categorical,
Double,
Integer,
NaturalLanguage,
)
from blocktorch.pipelines.components import Imputer
@pytest.fixture
def imputer_test_data():
return pd.DataFrame(
{
"categorical col": pd.Series(
["zero", "one", "two", "zero", "two"] * 4, dtype="category"
),
"int col": [0, 1, 2, 0, 3] * 4,
"object col": ["b", "b", "a", "c", "d"] * 4,
"float col": [0.0, 1.0, 0.0, -2.0, 5.0] * 4,
"bool col": [True, False, False, True, True] * 4,
"categorical with nan": pd.Series(
[np.nan, "1", "0", "0", "3"] * 4, dtype="category"
),
"int with nan": [np.nan, 1, 0, 0, 1] * 4,
"float with nan": [0.0, 1.0, np.nan, -1.0, 0.0] * 4,
"object with nan": ["b", "b", np.nan, "c", np.nan] * 4,
"bool col with nan": pd.Series(
[True, np.nan, False, np.nan, True] * 4, dtype="category"
),
"all nan": [np.nan, np.nan, np.nan, np.nan, np.nan] * 4,
"all nan cat": pd.Series(
[np.nan, np.nan, np.nan, np.nan, np.nan] * 4, dtype="category"
),
}
)
def test_invalid_strategy_parameters():
with pytest.raises(ValueError, match="Valid impute strategies are"):
Imputer(numeric_impute_strategy="not a valid strategy")
with pytest.raises(ValueError, match="Valid categorical impute strategies are"):
Imputer(categorical_impute_strategy="mean")
def test_imputer_default_parameters():
imputer = Imputer()
expected_parameters = {
"categorical_impute_strategy": "most_frequent",
"numeric_impute_strategy": "mean",
"categorical_fill_value": None,
"numeric_fill_value": None,
}
assert imputer.parameters == expected_parameters
@pytest.mark.parametrize("categorical_impute_strategy", ["most_frequent", "constant"])
@pytest.mark.parametrize(
"numeric_impute_strategy", ["mean", "median", "most_frequent", "constant"]
)
def test_imputer_init(categorical_impute_strategy, numeric_impute_strategy):
imputer = Imputer(
categorical_impute_strategy=categorical_impute_strategy,
numeric_impute_strategy=numeric_impute_strategy,
categorical_fill_value="str_fill_value",
numeric_fill_value=-1,
)
expected_parameters = {
"categorical_impute_strategy": categorical_impute_strategy,
"numeric_impute_strategy": numeric_impute_strategy,
"categorical_fill_value": "str_fill_value",
"numeric_fill_value": -1,
}
expected_hyperparameters = {
"categorical_impute_strategy": ["most_frequent"],
"numeric_impute_strategy": ["mean", "median", "most_frequent"],
}
assert imputer.name == "Imputer"
assert imputer.parameters == expected_parameters
assert imputer.hyperparameter_ranges == expected_hyperparameters
def test_numeric_only_input(imputer_test_data):
X = imputer_test_data[
["int col", "float col", "int with nan", "float with nan", "all nan"]
]
y = pd.Series([0, 0, 1, 0, 1] * 4)
imputer = Imputer(numeric_impute_strategy="median")
imputer.fit(X, y)
transformed = imputer.transform(X, y)
expected = pd.DataFrame(
{
"int col": [0, 1, 2, 0, 3] * 4,
"float col": [0.0, 1.0, 0.0, -2.0, 5.0] * 4,
"int with nan": [0.5, 1.0, 0.0, 0.0, 1.0] * 4,
"float with nan": [0.0, 1.0, 0, -1.0, 0.0] * 4,
}
)
assert_frame_equal(transformed, expected, check_dtype=False)
imputer = Imputer()
transformed = imputer.fit_transform(X, y)
assert_frame_equal(transformed, expected, check_dtype=False)
def test_categorical_only_input(imputer_test_data):
X = imputer_test_data[
[
"categorical col",
"object col",
"bool col",
"categorical with nan",
"object with nan",
"bool col with nan",
"all nan cat",
]
]
y = pd.Series([0, 0, 1, 0, 1] * 4)
expected = pd.DataFrame(
{
"categorical col": pd.Series(
["zero", "one", "two", "zero", "two"] * 4, dtype="category"
),
"object col": pd.Series(["b", "b", "a", "c", "d"] * 4, dtype="category"),
"bool col": [True, False, False, True, True] * 4,
"categorical with nan": pd.Series(
["0", "1", "0", "0", "3"] * 4, dtype="category"
),
"object with nan": pd.Series(
["b", "b", "b", "c", "b"] * 4, dtype="category"
),
"bool col with nan": pd.Series(
[True, True, False, True, True] * 4, dtype="category"
),
}
)
imputer = Imputer()
transformed = imputer.fit_transform(X, y)
assert_frame_equal(transformed, expected, check_dtype=False)
def test_categorical_and_numeric_input(imputer_test_data):
X = imputer_test_data
y = pd.Series([0, 0, 1, 0, 1])
imputer = Imputer()
imputer.fit(X, y)
transformed = imputer.transform(X, y)
expected = pd.DataFrame(
{
"categorical col": pd.Series(
["zero", "one", "two", "zero", "two"] * 4, dtype="category"
),
"int col": [0, 1, 2, 0, 3] * 4,
"object col": pd.Series(["b", "b", "a", "c", "d"] * 4, dtype="category"),
"float col": [0.0, 1.0, 0.0, -2.0, 5.0] * 4,
"bool col": [True, False, False, True, True] * 4,
"categorical with nan": pd.Series(
["0", "1", "0", "0", "3"] * 4, dtype="category"
),
"int with nan": [0.5, 1.0, 0.0, 0.0, 1.0] * 4,
"float with nan": [0.0, 1.0, 0, -1.0, 0.0] * 4,
"object with nan": pd.Series(
["b", "b", "b", "c", "b"] * 4, dtype="category"
),
"bool col with nan": pd.Series(
[True, True, False, True, True] * 4, dtype="category"
),
}
)
assert_frame_equal(transformed, expected, check_dtype=False)
imputer = Imputer()
transformed = imputer.fit_transform(X, y)
assert_frame_equal(transformed, expected, check_dtype=False)
def test_drop_all_columns(imputer_test_data):
X = imputer_test_data[["all nan cat", "all nan"]]
y = pd.Series([0, 0, 1, 0, 1] * 4)
X.ww.init()
imputer = Imputer()
imputer.fit(X, y)
transformed = imputer.transform(X, y)
expected = X.drop(["all nan cat", "all nan"], axis=1)
assert_frame_equal(transformed, expected, check_dtype=False)
imputer = Imputer()
transformed = imputer.fit_transform(X, y)
assert_frame_equal(transformed, expected, check_dtype=False)
def test_typed_imputer_numpy_input():
X = np.array([[1, 2, 2, 0], [np.nan, 0, 0, 0], [1, np.nan, np.nan, np.nan]])
y = pd.Series([0, 0, 1])
imputer = Imputer()
imputer.fit(X, y)
transformed = imputer.transform(X, y)
expected = pd.DataFrame(np.array([[1, 2, 2, 0], [1, 0, 0, 0], [1, 1, 1, 0]]))
assert_frame_equal(transformed, expected, check_dtype=False)
imputer = Imputer()
transformed = imputer.fit_transform(X, y)
assert_frame_equal(transformed, expected, check_dtype=False)
def test_imputer_datetime_input():
X = pd.DataFrame(
{
"dates": ["20190902", "20200519", "20190607", np.nan],
"more dates": ["20190902", "20201010", "20190921", np.nan],
}
)
X["dates"] = pd.to_datetime(X["dates"], format="%Y%m%d")
X["more dates"] = pd.to_datetime(X["more dates"], format="%Y%m%d")
y = pd.Series()
imputer = Imputer()
imputer.fit(X, y)
transformed = imputer.transform(X, y)
assert_frame_equal(transformed, X, check_dtype=False)
imputer = Imputer()
transformed = imputer.fit_transform(X, y)
assert_frame_equal(transformed, X, check_dtype=False)
@pytest.mark.parametrize("data_type", ["np", "pd", "ww"])
def test_imputer_empty_data(data_type, make_data_type):
X = pd.DataFrame()
y = pd.Series()
X = make_data_type(data_type, X)
y = make_data_type(data_type, y)
expected = pd.DataFrame(index=pd.Index([]), columns=pd.Index([]))
imputer = Imputer()
imputer.fit(X, y)
transformed = imputer.transform(X, y)
assert_frame_equal(transformed, expected, check_dtype=False)
imputer = Imputer()
transformed = imputer.fit_transform(X, y)
assert_frame_equal(transformed, expected, check_dtype=False)
def test_imputer_does_not_reset_index():
X = pd.DataFrame(
{
"input_val": np.arange(10),
"target": np.arange(10),
"input_cat": ["a"] * 7 + ["b"] * 3,
}
)
X.loc[5, "input_val"] = np.nan
X.loc[5, "input_cat"] = np.nan
assert X.index.tolist() == list(range(10))
X.ww.init(logical_types={"input_cat": "categorical"})
X.drop(0, inplace=True)
y = X.ww.pop("target")
imputer = Imputer()
imputer.fit(X, y=y)
transformed = imputer.transform(X)
pd.testing.assert_frame_equal(
transformed,
pd.DataFrame(
{
"input_val": [1.0, 2, 3, 4, 5, 6, 7, 8, 9],
"input_cat": pd.Categorical(["a"] * 6 + ["b"] * 3),
},
index=list(range(1, 10)),
),
)
def test_imputer_fill_value(imputer_test_data):
X = imputer_test_data[
[
"int with nan",
"categorical with nan",
"float with nan",
"object with nan",
"bool col with nan",
]
]
y = pd.Series([0, 0, 1, 0, 1] * 4)
imputer = Imputer(
categorical_impute_strategy="constant",
numeric_impute_strategy="constant",
categorical_fill_value="fill",
numeric_fill_value=-1,
)
imputer.fit(X, y)
transformed = imputer.transform(X, y)
expected = pd.DataFrame(
{
"int with nan": [-1, 1, 0, 0, 1] * 4,
"categorical with nan": pd.Series(
["fill", "1", "0", "0", "3"] * 4, dtype="category"
),
"float with nan": [0.0, 1.0, -1, -1.0, 0.0] * 4,
"object with nan": pd.Series(
["b", "b", "fill", "c", "fill"] * 4, dtype="category"
),
"bool col with nan": pd.Series(
[True, "fill", False, "fill", True] * 4, dtype="category"
),
}
)
assert_frame_equal(expected, transformed, check_dtype=False)
imputer = Imputer(
categorical_impute_strategy="constant",
numeric_impute_strategy="constant",
categorical_fill_value="fill",
numeric_fill_value=-1,
)
transformed = imputer.fit_transform(X, y)
assert_frame_equal(expected, transformed, check_dtype=False)
def test_imputer_no_nans(imputer_test_data):
X = imputer_test_data[["categorical col", "object col", "bool col"]]
y = pd.Series([0, 0, 1, 0, 1] * 4)
imputer = Imputer(
categorical_impute_strategy="constant",
numeric_impute_strategy="constant",
categorical_fill_value="fill",
numeric_fill_value=-1,
)
imputer.fit(X, y)
transformed = imputer.transform(X, y)
expected = pd.DataFrame(
{
"categorical col": pd.Series(
["zero", "one", "two", "zero", "two"] * 4, dtype="category"
),
"object col": pd.Series(["b", "b", "a", "c", "d"] * 4, dtype="category"),
"bool col": [True, False, False, True, True] * 4,
}
)
assert_frame_equal(transformed, expected, check_dtype=False)
imputer = Imputer(
categorical_impute_strategy="constant",
numeric_impute_strategy="constant",
categorical_fill_value="fill",
numeric_fill_value=-1,
)
transformed = imputer.fit_transform(X, y)
assert_frame_equal(transformed, expected, check_dtype=False)
def test_imputer_with_none():
X = pd.DataFrame(
{
"int with None": [1, 0, 5, None] * 4,
"float with None": [0.1, 0.0, 0.5, None] * 4,
"category with None": pd.Series(
["b", "a", "a", None] * 4, dtype="category"
),
"boolean with None": pd.Series([True, None, False, True] * 4),
"object with None": ["b", "a", "a", None] * 4,
"all None": [None, None, None, None] * 4,
}
)
y = pd.Series([0, 0, 1, 0, 1] * 4)
imputer = Imputer()
imputer.fit(X, y)
transformed = imputer.transform(X, y)
expected = pd.DataFrame(
{
"int with None": [1, 0, 5, 2] * 4,
"float with None": [0.1, 0.0, 0.5, 0.2] * 4,
"category with None": pd.Series(["b", "a", "a", "a"] * 4, dtype="category"),
"boolean with None": pd.Series(
[True, True, False, True] * 4, dtype="category"
),
"object with None": pd.Series(["b", "a", "a", "a"] * 4, dtype="category"),
}
)
assert_frame_equal(expected, transformed, check_dtype=False)
imputer = Imputer()
transformed = imputer.fit_transform(X, y)
assert_frame_equal(expected, transformed, check_dtype=False)
@pytest.mark.parametrize("data_type", ["pd", "ww"])
def test_imputer_all_bool_return_original(data_type, make_data_type):
X = make_data_type(
data_type, pd.DataFrame([True, True, False, True, True], dtype=bool)
)
X_expected_arr = pd.DataFrame([True, True, False, True, True], dtype=bool)
y = make_data_type(data_type, pd.Series([1, 0, 0, 1, 0]))
imputer = Imputer()
imputer.fit(X, y)
X_t = imputer.transform(X)
assert_frame_equal(X_expected_arr, X_t)
@pytest.mark.parametrize("data_type", ["pd", "ww"])
def test_imputer_bool_dtype_object(data_type, make_data_type):
X = pd.DataFrame([True, np.nan, False, np.nan, True] * 4)
y = pd.Series([1, 0, 0, 1, 0] * 4)
X_expected_arr = pd.DataFrame([True, True, False, True, True] * 4, dtype="category")
X = make_data_type(data_type, X)
y = make_data_type(data_type, y)
imputer = Imputer()
imputer.fit(X, y)
X_t = imputer.transform(X)
assert_frame_equal(X_expected_arr, X_t)
@pytest.mark.parametrize("data_type", ["pd", "ww"])
def test_imputer_multitype_with_one_bool(data_type, make_data_type):
X_multi = pd.DataFrame(
{
"bool with nan": pd.Series([True, np.nan, False, np.nan, False] * 4),
"bool no nan": pd.Series(
[False, False, False, False, True] * 4, dtype=bool
),
}
)
y = pd.Series([1, 0, 0, 1, 0] * 4)
X_multi_expected_arr = pd.DataFrame(
{
"bool with nan": pd.Series(
[True, False, False, False, False] * 4, dtype="category"
),
"bool no nan": pd.Series(
[False, False, False, False, True] * 4, dtype=bool
),
}
)
X_multi = make_data_type(data_type, X_multi)
y = make_data_type(data_type, y)
imputer = Imputer()
imputer.fit(X_multi, y)
X_multi_t = imputer.transform(X_multi)
assert_frame_equal(X_multi_expected_arr, X_multi_t)
def test_imputer_int_preserved():
X = pd.DataFrame(pd.Series([1, 2, 11, np.nan]))
imputer = Imputer(numeric_impute_strategy="mean")
transformed = imputer.fit_transform(X)
pd.testing.assert_frame_equal(
transformed, pd.DataFrame(pd.Series([1, 2, 11, 14 / 3]))
)
assert {k: type(v) for k, v in transformed.ww.logical_types.items()} == {0: Double}
X = pd.DataFrame(pd.Series([1, 2, 3, np.nan]))
imputer = Imputer(numeric_impute_strategy="mean")
transformed = imputer.fit_transform(X)
pd.testing.assert_frame_equal(
transformed, pd.DataFrame(pd.Series([1, 2, 3, 2])), check_dtype=False
)
assert {k: type(v) for k, v in transformed.ww.logical_types.items()} == {0: Double}
X = pd.DataFrame(pd.Series([1, 2, 3, 4], dtype="int"))
imputer = Imputer(numeric_impute_strategy="mean")
transformed = imputer.fit_transform(X)
pd.testing.assert_frame_equal(
transformed, pd.DataFrame(pd.Series([1, 2, 3, 4])), check_dtype=False
)
assert {k: type(v) for k, v in transformed.ww.logical_types.items()} == {0: Integer}
def test_imputer_bool_preserved():
X = pd.DataFrame(pd.Series([True, False, True, np.nan] * 4))
imputer = Imputer(categorical_impute_strategy="most_frequent")
transformed = imputer.fit_transform(X)
pd.testing.assert_frame_equal(
transformed,
pd.DataFrame(pd.Series([True, False, True, True] * 4, dtype="category")),
)
assert {k: type(v) for k, v in transformed.ww.logical_types.items()} == {
0: Categorical
}
X = pd.DataFrame(pd.Series([True, False, True, False] * 4))
imputer = Imputer(categorical_impute_strategy="most_frequent")
transformed = imputer.fit_transform(X)
pd.testing.assert_frame_equal(
transformed,
pd.DataFrame(pd.Series([True, False, True, False] * 4)),
check_dtype=False,
)
assert {k: type(v) for k, v in transformed.ww.logical_types.items()} == {0: Boolean}
def test_imputer_does_not_erase_ww_info():
df_train = pd.DataFrame({"a": [1, 2, 3, 2], "b": ["a", "b", "b", "c"]})
df_holdout = pd.DataFrame({"a": [2], "b": [None]})
df_train.ww.init(logical_types={"a": "Double", "b": "Categorical"})
df_holdout.ww.init(logical_types={"a": "Double", "b": "Categorical"})
imputer = Imputer()
imputer.fit(df_train, None)
# Would error out if ww got erased because `b` would be inferred as Unknown, then Double.
imputer.transform(df_holdout, None)
with patch("blocktorch.pipelines.components.SimpleImputer.transform") as mock_transform:
mock_transform.side_effect = [df_holdout[["a"]], df_train[["b"]].iloc[0]]
imputer.transform(df_holdout, None)
mock_transform.call_args[0][0].ww.schema == df_holdout.ww[["b"]].ww.schema
@pytest.mark.parametrize(
"X_df",
[
pd.DataFrame(pd.Series([1, 2, 3], dtype="Int64")),
pd.DataFrame(pd.Series([1.0, 2.0, 4.0], dtype="float")),
pd.DataFrame(pd.Series(["a", "b", "a"], dtype="category")),
pd.DataFrame(pd.Series([True, False, True], dtype=bool)),
pd.DataFrame(
pd.Series(
["this will be a natural language column because length", "yay", "hay"],
dtype="string",
)
),
],
)
@pytest.mark.parametrize("has_nan", [True, False])
@pytest.mark.parametrize("numeric_impute_strategy", ["mean", "median", "most_frequent"])
def test_imputer_woodwork_custom_overrides_returned_by_components(
X_df, has_nan, numeric_impute_strategy
):
y = pd.Series([1, 2, 1])
override_types = [Integer, Double, Categorical, NaturalLanguage, Boolean]
for logical_type in override_types:
# Column with Nans to boolean used to fail. Now it doesn't but it should.
if has_nan and logical_type == Boolean:
continue
try:
X = X_df.copy()
if has_nan:
X.iloc[len(X_df) - 1, 0] = np.nan
X.ww.init(logical_types={0: logical_type})
except ww.exceptions.TypeConversionError:
continue
imputer = Imputer(numeric_impute_strategy=numeric_impute_strategy)
imputer.fit(X, y)
transformed = imputer.transform(X, y)
assert isinstance(transformed, pd.DataFrame)
if numeric_impute_strategy == "most_frequent":
assert {k: type(v) for k, v in transformed.ww.logical_types.items()} == {
0: logical_type
}
elif logical_type in [Categorical, NaturalLanguage] or not has_nan:
assert {k: type(v) for k, v in transformed.ww.logical_types.items()} == {
0: logical_type
}
else:
assert {k: type(v) for k, v in transformed.ww.logical_types.items()} == {
0: Double
}
|
{"hexsha": "d2edebf376b20b73300f85252055ba5ba2c24c68", "size": 20251, "ext": "py", "lang": "Python", "max_stars_repo_path": "ml_source/src/blocktorch/blocktorch/tests/component_tests/test_imputer.py", "max_stars_repo_name": "blocktorch/blocktorch", "max_stars_repo_head_hexsha": "044aa269813ab22c5fd27f84272e5fb540fc522b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-09-23T12:23:02.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-23T12:23:02.000Z", "max_issues_repo_path": "ml_source/src/blocktorch/blocktorch/tests/component_tests/test_imputer.py", "max_issues_repo_name": "blocktorch/blocktorch", "max_issues_repo_head_hexsha": "044aa269813ab22c5fd27f84272e5fb540fc522b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ml_source/src/blocktorch/blocktorch/tests/component_tests/test_imputer.py", "max_forks_repo_name": "blocktorch/blocktorch", "max_forks_repo_head_hexsha": "044aa269813ab22c5fd27f84272e5fb540fc522b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.2804878049, "max_line_length": 93, "alphanum_fraction": 0.5812058664, "include": true, "reason": "import numpy", "num_tokens": 5623}
|
#
# plot model parameters on a simplex
#
import sys, os
from argparse import ArgumentParser
import codecs
import numpy as np
from scipy.misc import logsumexp
from scipy.stats import gaussian_kde
import matplotlib.pyplot as plt
from matplotlib.tri import UniformTriRefiner, Triangulation
sys.path.insert(1, os.path.join(sys.path[0], os.path.pardir))
from json_utils import load_json_file, load_json_stream
# import matplotlib as mpl
# mpl.rcParams['font.family'] = 'Nimbus Roman No9 L'
import matplotlib.font_manager as font_manager
path = '/usr/share/fonts/truetype/msttcorefonts/Times_New_Roman.ttf'
fontprop = font_manager.FontProperties(fname=path)
corners = np.array([[0, 0], [1, 0], [0.5, 0.75**0.5]])
# Mid-points of triangle sides opposite of each corner
midpoints = [(corners[(i + 1) % 3] + corners[(i + 2) % 3]) / 2.0 \
for i in range(3)]
def init_simplex(plt, subdiv=8, fsize=20):
triangle = Triangulation(corners[:, 0], corners[:, 1])
refiner = UniformTriRefiner(triangle)
trimesh = refiner.refine_triangulation(subdiv=subdiv)
plt.triplot(triangle) # plot triangle
plt.axis('off') # no normal axis
plt.axis('equal')
plt.annotate('L', (0, 0), xytext=(-0.08, -0.03), size=fsize, fontproperties=fontprop)
plt.annotate('S', (1, 0), xytext=(1.02, -0.03), size=fsize, fontproperties=fontprop)
plt.annotate('R', (0.5, 0.75**0.5), xytext=(0.47, 0.75**0.5 + 0.02), size=fsize, fontproperties=fontprop)
# xy1 = abc2xy(np.array([1, 0, 0]))
# plt.scatter(xy1[0], xy1[1], c='green', marker='o', s=30)
return trimesh
def fix_order(abc):
# NOTE: data order: R, L, S and plot order L, S, R
return np.array([abc[1], abc[2], abc[0]])
def abc2xy(abc):
# Init to triangle centroid
x = 1.0 / 2
y = 1.0 / (2 * np.sqrt(3))
x = x - (1.0 / np.sqrt(3)) * abc[0] * np.cos(np.pi / 6)
y = y - (1.0 / np.sqrt(3)) * abc[0] * np.sin(np.pi / 6)
# Vector 2 - bisect out of lower right vertex
x = x + (1.0 / np.sqrt(3)) * abc[1] * np.cos(np.pi / 6)
y = y - (1.0 / np.sqrt(3)) * abc[1] * np.sin(np.pi / 6)
# Vector 3 - bisect out of top vertex
y = y + (1.0 / np.sqrt(3) * abc[2])
return np.array((x, y))
def xy2bc(xy, tol=1.e-3):
'''Converts 2D Cartesian coordinates to barycentric.'''
s = [(corners[i] - midpoints[i]).dot(xy - midpoints[i]) / 0.75 \
for i in range(3)]
return np.clip(s, tol, 1.0 - tol)
def main():
parser = ArgumentParser()
parser.add_argument("--mtype", metavar="MODEL_TYPE", default="mono")
parser.add_argument("--type", metavar="POINT_TYPE", default="theta")
parser.add_argument("--output", metavar="IMG", default=None)
parser.add_argument("dumps", metavar="LANG", default=None)
args = parser.parse_args()
fsize=36
subdiv=8
burnin = 100
plt.figure(figsize=(8, 6), dpi=120)
trimesh = init_simplex(plt, fsize=fsize, subdiv=8)
points = []
stream = load_json_stream(open(args.dumps))
for i in xrange(burnin):
stream.next()
for dump in stream:
N = len(dump['mixlist']) # number of langs
if args.mtype == 'mono':
for lang in dump['mixlist']:
adist = lang["adist"]
_sum = adist["K"] * adist["alpha"] + sum(adist["voc"])
probs = []
for k in xrange(adist["K"]):
probs.append((adist["voc"][k] + adist["alpha"]) / _sum)
probs2 = fix_order(probs)
points.append(probs2)
xy = abc2xy(probs2)
plt.scatter(xy[0], xy[1], c='green', marker='s', s=60)
elif args.mtype == 'fact':
J = len(dump['mus']) # number of features
if args.type == "feature":
for j, mu in enumerate(dump['mus']):
_sum = logsumexp(mu)
probs = np.exp(mu - _sum)
probs2 = fix_order(probs)
points.append(probs2)
xy = abc2xy(probs2)
plt.scatter(xy[0], xy[1], c='blue', marker='o', s=60)
else:
for creole in dump['mixlist']:
etas = np.array(creole['etas'])
if args.type == "theta":
for j in xrange(J):
fcts = np.zeros(3)
for k in xrange(3):
fcts[k] = dump['mus'][j][k] + etas[k]
_sum = logsumexp(fcts)
probs = np.exp(fcts - _sum)
probs2 = fix_order(probs)
points.append(probs2)
xy = abc2xy(probs2)
plt.scatter(xy[0], xy[1], c='red', marker='.', s=60)
elif args.type == "lang":
_sum = logsumexp(etas)
probs = np.exp(etas - _sum)
probs2 = fix_order(probs)
points.append(probs2)
xy = abc2xy(probs2)
plt.scatter(xy[0], xy[1], c='green', marker='s', s=60)
if args.output:
plt.savefig(args.output, format="pdf", transparent=False, bbox_inches="tight")
# plt.savefig(args.output, format="png", transparent=False, dpi=160)
plt.show()
if __name__ == "__main__":
main()
|
{"hexsha": "79a968e1016a9d367cc677187c1c1ed6f26e9d40", "size": 5422, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/format_apics/simplex.py", "max_stars_repo_name": "murawaki/creole-mixture", "max_stars_repo_head_hexsha": "dfe585f2c8d698b24c022ec0933ce30925410cfe", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "scripts/format_apics/simplex.py", "max_issues_repo_name": "murawaki/creole-mixture", "max_issues_repo_head_hexsha": "dfe585f2c8d698b24c022ec0933ce30925410cfe", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scripts/format_apics/simplex.py", "max_forks_repo_name": "murawaki/creole-mixture", "max_forks_repo_head_hexsha": "dfe585f2c8d698b24c022ec0933ce30925410cfe", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.9160839161, "max_line_length": 109, "alphanum_fraction": 0.5387310955, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1543}
|
module L1L2
using DataFrames
using LinearAlgebra
using ..DataMod, ..ManualModelMod
export l1l2
# Soft thresholding
function S(x::Float64, λ::Float64)::Float64
if x >= λ
return x - λ
elseif x <= -λ
return x + λ
else
return 0
end
end
S(xs::Vector{Float64}, λ::Float64)::Vector{Float64} = [S(x, λ) for x in xs]
S(xs::Matrix{Float64}, λ::Float64)::Vector{Float64} = S(vec(xs), λ)
# Calculate next weight vector
function nextβ(β::Vector{Float64}, τplus2ϵλ::Float64, λγ::Float64,
τIminusXᵀX::Matrix{Float64}, XᵀY::Vector{Float64})::Vector{Float64}
return S(τIminusXᵀX * β + XᵀY, λγ) / τplus2ϵλ
end
# Calculate max iteration
function lmax(β₁::Vector{Float64}, β₀::Vector{Float64}, κ::Float64,
κ₀::Float64, λ::Float64, ϵ::Float64, ξ::Float64)::Int
# numnum = norm(β₁ - β₀) * (κ + κ₀ + 4 * ϵ * λ)
# numdenom = (2 * κ₀ + 4 * ϵ * λ) * ξ
# denomnum = κ + κ₀ + 4 * ϵ * λ
# denomdenom = κ - κ₀ # THIS GOES WRONG IF κ == κ₀
# num = log(numnum / numdenom)
# denom = log(denomnum / denomdenom)
# return round(Int, num / denom + 1, RoundDown)
return 10000
end
function weight_loop(data::Data, λ::Float64, γ::Float64, ϵ::Float64, ξ::Float64)
X = hcat(ones(Float64, size(data.xmat, 1)), data.xmat)
Ys = Vector{Float64}[data.df[!, y] for y in data.ys]
XᵀX = X'X
τ = norm(XᵀX)
τplus2ϵλ = τ + 2 * ϵ * λ
λγ = λ * γ
τIminusXᵀX = τ * I - XᵀX
XᵀYs = [X'Y for Y in Ys]
β₀ = zeros(length(data.xs) + 1)
βs = [nextβ(β₀, τplus2ϵλ, λγ, τIminusXᵀX, XᵀY) for XᵀY in XᵀYs]
κ, κ₀ = τ, τ # should change
max_iter = maximum([lmax(β, β₀, κ, κ₀, λ, ϵ, ξ) for β in βs])::Int
@inbounds for i in 1:max_iter
βs = Vector{Float64}[nextβ(βs[y], τplus2ϵλ, λγ, τIminusXᵀX, XᵀYs[y]) for y in 1:length(data.ys)]
end
return βs
end
function l1l2(data::Data; λ::Float64 = 0.5, γ::Float64 = 1.0, ϵ::Float64 = 1.0,
ξ::Float64 = 0.001)::Vector{ManualModel}
# λ, γ, ϵ, ξ = (0.5, 1.0, 1.0, 0.001)
weights = weight_loop(data, λ, γ, ϵ, ξ)
return ManualModel.(weights, true, data.ys, data)
end
end
using .L1L2
export l1l2
|
{"hexsha": "271c95439060ad32242a8b026f0f85ce922678e2", "size": 2395, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "incl/fs-l1l2reg.jl", "max_stars_repo_name": "KasperNooteboom/thesis-rvfl-fs", "max_stars_repo_head_hexsha": "31f8ee8ff58da5a8c1f505ef045c35ebbfe91255", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "incl/fs-l1l2reg.jl", "max_issues_repo_name": "KasperNooteboom/thesis-rvfl-fs", "max_issues_repo_head_hexsha": "31f8ee8ff58da5a8c1f505ef045c35ebbfe91255", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "incl/fs-l1l2reg.jl", "max_forks_repo_name": "KasperNooteboom/thesis-rvfl-fs", "max_forks_repo_head_hexsha": "31f8ee8ff58da5a8c1f505ef045c35ebbfe91255", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.7323943662, "max_line_length": 108, "alphanum_fraction": 0.5423799582, "num_tokens": 943}
|
\documentclass[12pt]{article}
\title{Modern Parser Combinators in Python}
\date{\today}
\usepackage[sc,osf]{mathpazo}
\usepackage[T1]{fontenc}
\usepackage{microtype}
\usepackage{hyperref}
\usepackage{listings}
% \lstset{language=Python}
\lstset{escapechar=\!}
\usepackage[backend=bibtex8,style=authoryear]{biblatex}
\bibliography{combinators}
\begin{document}
\maketitle
\section{Overview}
\label{sec:overview}
The big idea behind this library is to combine techniques from the
parsing literature, some new, some old and neglected, to create a
library that can compete with hand-written recursive-descent parsers.
The key advantages that hand-written recursive-descent parsers have
over traditional LALR parser generators are simplicity, handling a
broader range of grammars, better error reporting, and easier
introduction of custom code. A well-designed library can have most of
these, too, and some advantages over a recursive-descent parser, at
the cost of an implementation that isn't as simple.
\begin{itemize}
\item Parser combinators make parsers easier to learn and write by
embedding a parsing library into a general-purpose programming
language, making it easy to add custom code to the parser and not
requiring that anyone learn a DSL like EBNF or formal grammars.
\item GLL parsing can parse any CFG, including ambiguous and
left-recursive grammars, in $O(n^3)$ time, and unambiguous grammars
in linear time, all while remaining top-down depth-first like
recursive descent and thus easier to understand and debug.
\item Attribute grammars can allow the insertion of arbitrary code
into the parsing process, principally allowing the parsing of
context-sensitive grammars that a general CFG algorithm can't handle
alone, and with the data-dependent grammar restrictions, parse them
in $O(n^3)$ time. They also provide an easy mechanism for adding
semantic actions.
\item Non-correcting error handling can locate and provide reasonable
messages for possible errors in an input string without
providing any spurious messages.
\item Metaprogramming techniques like macros or staging can make
parser combinators almost as efficient as recursive descent.
\item Attribute grammars can be inverted to allow writing both a
parser and an unparser at the same time.
\end{itemize}
\section{Design Decisions}
\label{sec:design_decisions}
\subsection{Grammars}
\label{sec:grammar}
My goal for these parser combinators is that they should be able to
handle common languages and formats \emph{without} having to write
custom code except for extremely weird cases. LL and LR grammars are
too limited: many natural grammars for common problems are not in
LL($k$) or LR($k$) for any $k$, and LR parsers, which can handle more
grammars for any given value of $k$, are difficult to debug.
Meanwhile, there now several algorithms capable of parsing any CFG
that can also parse all LL and LR grammars in linear time, giving the
same asymptotic performance as LL and LR parsers while handling a much
broader range of grammars. CFGs also have better closure properties
than LL or LR grammars. They're closed under language composition,
that is, they're closed under union, concatenation, and Kleene star.
Parsing problems with one language embedded in another are becoming
increasingly common, and the ability to write parsers for two
different languages and then combine them is useful. Also, the suffix
language of a CFL is another CFL \parencite[p. 401]{grune_jacobs} and
so is its reverse language, which are useful for producing better
error messages (\ref{sec:errors}).
Unfortunately, CFGs are also not powerful enough to handle common
parsing problems. Real-world programming languages and data formats
are context sensitive: C has the \texttt{typedef-name: identifier}
problem, Python has context-sensitive indentation, real-world HTML has
context-sensitive features like where \texttt{<input>} tags can
appear, and \TeX's DVI files have references to byte offsets in the
file. Thus, a good parsing library needs to be able to handle context
sensitivity. Note that one example often presented as context
sensitive, a field prefixed with a byte giving the field length like a
Pascal string, is actually regular: because a byte can only hold a
finite set of values, it's possible to write a regular expression for
the whole construct like,
\begin{lstlisting}
0 | 1. | 2.. | !\ldots! | 255.{255}
\end{lstlisting}
where \texttt{.} represents any character and a number in curly braces
represents repetition as usual.
This technique generalizes to any field prefixed with a finite-size
integer and beyond. In any language with finite symbols, any
length-type-value array (also type-length-value;
\url{https://en.wikipedia.org/wiki/Type-length-value}) can produce
only a finite language because the set of possible lengths and
possible types are limited. When the production rule for a
length-type-value array is inserted into a grammar producing an
infinite language, the length-type-value array effectively acts as a
terminal because it can only create a finite set of productions, thus
it can't introduce context-sensitivity unless it's explicitly
introduced elsewhere. If all the integers are single symbols for the
lengths, \texttt{a}, \texttt{b}, \texttt{c}, \ldots are types, and
\texttt{A}, \texttt{B}, \texttt{C}, \ldots are the production rules
corresponding to each type, these can be expressed using the
production rule:
\begin{lstlisting}
S !$\to$! 0a | 0b | 0c | !\ldots! | 1aA | 1bB | 1cC | !\ldots! |
2aAA | 2bBB | 2cCC | !\ldots!
\end{lstlisting}
This kind of use of large but finite productions can handle even some
weird cases like lengths of code that are then treated like their own
languages. For instance, if nonterminal \texttt{A} has length 1
always and \texttt{B} length 2,
\begin{lstlisting}
S !$\to$! 0 | 1A | 2AA | 2B | 3AAA | 3AB | 3BA | !\ldots!
\end{lstlisting}
However, \emph{efficiently} handling these finite production rules is
another can of worms, because they can be exponential in the number of
symbols in the language and thus any DFA for them would be
impractical.
I chose as my context-sensitive grammar the data-dependent grammars of
\textcite{yakker1}, which are an extension of CFGs related to
L-attribute grammars. DDGs have several attractive properties.
First, they're easier to understand and write than the primary
alternatives, PEGs and Boolean grammars, because they use familiar
concepts like variable binding and string recognition based on
semantic values, for instance treating bytes as integers rather than
abstract symbols. While it's often easy enough to write a PEG or a
Boolean grammar for simple languages like $a^nb^nc^n$, something like
a field prefixed with a length in decimal as ASCII digits requires
multiple rules like the above for length-type-value arrays.
Meanwhile, a DDG simply calls a Turing-computable function to
transform the ASCII into an integer. This touches on DDGs' second
major advantage, which is the ability to include arbitrary
Turing-computable code in a disciplined fashion.
\begin{quote}
This lack of enthousiasm in incorporating attribute grammars into
formal language and automata theory may be due partly to the
complexity of the model and partly to the (obvious) fact that
attribute grammars of a very simple type, having only one
synthesized attribute and no inherited attributes, can already
realize all possible translations [18] and can e.g. easily simulate
type 0 Chomsky grammars (cf. [19]). \parencite{1V_AGs}
\end{quote}
The power of DDGs makes analyzing them harder but means that they can
parse anything and can incorporate existing specialized parser code
when necessary. Third, they're based on CFG parsing algorithms, which
makes them easier to adapt for one of the existing efficient CFG
algorithms, with the original authors implementing them in particular
for combinators, for Earley, and for GLR \parencite{yakker2}. Fourth,
their languages can be parsed in $O(n^3)$ time in the worst case, or
only $O(n)$ for deterministic languages and $O(n^2)$ for
locally-ambiguous languages. This is a consequence of the single-pass
nature of DDGs: similar to L-attribute grammars, information flows up
and from left to right in the parse tree with no backtracking allowed.
Fifth, the parsing algorithm for DDGs is proved correct assuming the
underlying CFG parsing algorithm is correct. Sixth, DDGs inherit from
CFGs important closure properties, also being closed under union,
concatenation, and Kleene star. A similar construction to the one for
CFGs shows that the suffix language of a DDG can be described by
another DDG, with one wrinkle: the parser always has to take all
options, treating them as alternatives, when encountering a constraint
based on a variable that has yet to be defined.
TODO: work out the reverse language for a DDG and figure out
something to say about it here. Also work out a formal notion of
undefined for attributes: if a constraint tries to access an
undefined attribute, it falls back to parsing all alteratives. If a
attribute function receives an undefined attribute as an input, it
always returns undefined.
The two major alternatives I considered were PEGs and Boolean
grammars. They both have the disadvantage that to parse arbitrary
Turing-computable properties requires further extending them. The
major advantage of PEGs is that the corresponding parsing algorithm,
the packrat parser, always runs in $O(n)$ time. However, modern CFG
algorithms can parse most CFGs in linear time, and the ones that they
can't are highly ambiguous and only found in a subset of real parsing
problems like parsing DNA. My experiments indicate that it's possible
to parse many CFGs in $O(n)$ time without any memoization, and
experiments in \textcite{packrat_is_it_worth_it} back this up, showing
that no memoization is often close to optimal. With the high
preexisting memory cost of the parse tree, the memory costs of the
packrat parser make running out of memory even more likely. Aside
from the memory issues, PEGs have three other disadvantages. Making a
PEG parser handle left recursion makes the implementation much more
complicated, particularly when dealing with indirect left recursion,
and costs the linear-time guarantee. PEGs can't be composed in the
same way that CFGs can, since PEGs ``are closed under union, but only
under a non-standard definition of what it means for a string to be in
a language (the PEG need only succeed on a prefix of the
string)'' \parencite[p. 4]{yakker1}. This and other problems are due
to the non-commutativity of alternation in PEGs. Similarly, the way
PEGs avoid ambiguity makes it hard to explicitly deal with it where it
naturally arises, for instance in unparsing the AST for a language
that ignores white space or when trying to do error handling using the
suffix language.
Boolean grammars are a promising approach because they share
properties like closure under union, concatenation, Kleene star,
suffix, and reversal. However, there hasn't been enough work yet done
on developing practical parsers for them. Many of the algorithms
known to work for CFGs have not been extended to Boolean grammars. In
particular, neither of the algorithms most friendly for parser
combinators, GLL nor Earley, have been, if they even can be extended.
There's also nothing on other aspects of building a practical parser
like error handling.
\subsection{Algorithms}
\label{sec:algorithms}
TODO: Reach a new decision here.
On closer analysis, there are two major questions about algorithm
performance. First, while the worst-case performance for all general
CFG algorithms is $O(n^3)$, the set of grammars which they can handle
in linear time differs. One notable advantage of Earley's algorithm
is that with Leo's modifications it can parse all LR-regular languages
in linear time \parencites{leo, marpa}, and the set of LR-regular
languages contains the deterministic languages \parencite{lr-regular}.
Leo accomplishes this by memoizing certain cases of right recursion
that would otherwise take $O(n^2)$. It's not clear to me what
grammars GLL can handle in linear time. Scott and Johnstone claim
GLL, ``runs in linear time on LL grammars and which also allows
grammar rule factorisation, with consequential speed
up'' \parencite{gll1}, which could mean that GLL runs in linear time
on all LL($k$) for all $k$, but that set is a proper subset of the
deterministic grammars. However, Spiewak says, ``Thus, the class of
grammars which can be handled by GLL in O(n) time is precisely
equivalent to LL(1)'' \parencite{spiewak}, which is a much smaller set
than all LL($k$) grammars. Both of these sets are much smaller than
the LR-regular grammars. ANTLR3 and ANTLR4 run in linear time on a
set the authors call LL(*) \parencites{antlr3, antlr4}, which I think
is equivalent to LL-regular, more or less by directly using DFAs to
predict which alternative to pick. It's possible the same approach
might work for GLL to allow it to run in linear time on LL-regular
grammars, though LL-regular is a proper subset of the LR-regular
grammars\parencite{ll-regular}.
The second question involves constant factors. There are conflicting
reports in the literature about how different parsing algorithms
compare to each other. The most extensive comparison of modern
parsing algorithms is in \textcite{antlr4}, where the authors
benchmark their ALL(*) algorithm against GLR, GLL, packrat/PEG, and
hand-written recursive-descent implementations as well as some others.
On their test case, Java code, all of the limited parsers were much
faster than the general parsers, by orders of magnitude in some cases,
with Rascal's GLL parser performing particularly poorly. They don't
directly compare their algorithm against an Earley parser but state
that \textcite{tomita1985efficient} ``shows GLR to be 5x-10x faster
than Earley.'' It's unclear where this penalty comes from, and it
should be noted the comparison may no longer be valid because Tomita
compared the algorithms before Leo published his modifications that
make Earley run in linear time on LR-regular grammars. This still
seems to be the general impression about Earley versus GLR, but I
haven't seen any evidence backing it up outside Tomita's work. They
suggest that based on disabling the caching of lookahead DFAs, ``This
performance is in line with the high cost of GLL and GLR parsers that
also do not reduce parser speculation by memoizing parsing
decisions,'' which seems to suggest part of the problem the general
were experiencing was O($n^2$) performance because of local
ambiguities, not constant factors, while in their testing, ALL(*) ran
in linear time on all of the grammars they tried. [TODO: Is this a
consequence of the general parsers only handling a too-limited set of
grammars in linear time? I don't know what grammars GLR can parse in
linear time or if any of these Java grammars fall into that set.]
However, part of the problem could easily be constant factors, with
even a linear GSS being slower than a standard stack, for instance.
They note, ``Of the GLR tools, Elkhound has the best performance
primarily because it relies on a linear LR(1) stack instead of a GSS
whenever possible. Further, we allowed Elkhound to disambiguate
during the parse like ALL(*),'' As ANTLR generates recursive-descent
parsers, this suggests that a parser combinator approach that uses
metaprogramming could be competitive.
TODO: rewrite this.
The backend parsing algorithm will be GLL modified to include the
DDG/L-AG features. While Valiant's algorithm, which is based on the
CYK algorithm and Boolean matrix multiplication, has the best known
worst-case run time, CYK has worse average-case run time than other
CFG parsing algorithms, and linear-time average-case performance is
more important than sub-cubic-time worst-case performance. The main
reason I favor GLL is that the alternative algorithms with linear-time
average-case performance, GLR and Earley, are both harder to
understand, while GLL is almost like a recursive-descent parser.
GLL's similarities to recursive descent make it much easier to
implement as parser combinators and easier to combine with DDGs/L-AGs
because of the top-down left-right information flow in both GLL and
DDGs/L-AGs.
Earley is the clear runner-up here, since there exist implementations
of it using parser combinators and efficient theoretical developments
of the algorithm that include all the key features I want like SPPF
creation. The original DDG paper \textcite{yakker1} implements DDGs
on top of Earley's algorithm.
As far as I know, there are no parser combinators for bottom-up
parsers like GLR, and bottom-up depth-first algorithms are harder to
understand than both top-down depth-first ones like GLL and
breadth-first algorithms like Earley. The original versions of both
GLR and Earley have theoretical problems with producing parse trees,
and for GLR, fixing those problems has created a plethora of
alternatives. There being no canonical version of GLR is a good
secondary reason to avoid it.
\subsection{Error Recovery}
\label{sec:errors}
For error handling I'm using Richter's non-correcting error recovery.
When parsing binary data, one major problem with finding errors is
that the point where the parser fails is often not the point where the
input or parser went wrong. Non-correcting error recovery brackets a
range where the error may have occurred, making it easier to find
errors. It's also more suited to the kinds of errors in binary data
in general, which are usually not like typos or other small isolated
errors but large blocks of broken data, and to Python's interactive
debugging. It uses the suffix grammar and the reverse grammar of the
original input grammar. While deterministic grammars are not closed
under suffix (I don't know about reverse), CFGs are, and because I'm
using GLL, I can use the same algorithm for both normal parsing and
error-handling.
\subsection{Architecture}
\label{sec:architecture}
The reason to write my own library in Python rather than using
Libmarpa CFFI bindings is to support Python variants. CFFI only works
to allow calls from the interpreter level to C functions, not RPython
to C. Meanwhile, Jython and IronPython don't, as far as I know, have
anything to allow calling into C at all. Writing it in Python with
metaprogramming to convert it to RPython will make it
platform-independent. Also worth noting is that after seeing the
performance benchmarks for Scala parser combinators with macros and
staging, I'm skeptical that it's necessary to write a parsing library
in a low-level language to get good performance, and that the kind of
code that's fast doesn't change much from interpreter to interpreter:
it's all simple, imperative-style code with obvious optimizations.
\section{Implementation Order}
\label{sec:implementation_order}
\begin{enumerate}
\item GLL combinators.
\item Non-correcting error recovery, for debugging.
\item Tests.
\item Performance benchmarks.
\item Macros, staging, or other metaprogramming for performance.
\item Data-dependent grammar functionality, in some as yet TBD order.
\item Unparsing.
\end{enumerate}
\section{Major Implementation Choices}
\label{sec:implementation_choices}
\subsection{GSS Representation}
\label{sec:gss_representation}
\textcite{afroozeh_izmaylova} improved the performance of GLL with a
different representation of the GSS that places information on the
edges as well as the nodes to avoid creation of duplicate nodes and
moves the sets $\mathcal{U}$ and $\mathcal{P}$ from global hash tables
to local hash tables on the nodes. This results in some clear
performance gains, and since there's no real cost---their changes only
move information around---and Python doesn't have a native linked-list
data type that would work for the GSS layout described in
\textcite{gll2}, there's no reason not to use their changes. In their
representation, GSS nodes are labeled with a nonterminal (a class
instance) and an index into the input and have links to their outgoing
edges and two local hash tables associated with $\mathcal{U}$ and
$\mathcal{P}$, the former containing descriptors as a grammar slot
plus an index and the latter containing an index. GSS edges are
labeled with a grammar slot, i.e. a nonterminal/class instance and an
integer representing an index into a nonterminal. Each edge has a
link to a GSS node and an SPPF node.
\subsection{Return an iterator over parse trees or a shared packed
parse forest.}
\label{sec:iterator_sppf}
Spiewak chose the former option for his GLL combinators in Scala, but
it has a couple of problems. First, it involves freezing the parser
state, which means that starting a parse requires creating a new
parser state. This has some performance implications, particularly
with respect to memory, and makes cleaning up afterwards more
difficult, probably demanding some kind of context manager to close
running coroutines. Second, it means that changing the implementation
of the parse forest almost certainly involves changing the
implementation of the parser itself because the parser state is
entangled with its output. Third, iterating over subtrees as well as
alternatives trees will require nested iterators, i.e. an iterator
that returns another iterator, which gets confusing and requires some
doubly-suspended state. Grune and Jacobs bring up another problem in
what they call the producer-consumer model: anyone wanting to use the
parse trees will have to analyze multiple trees to figure out how they
differ to figure out what to do with them. (Note that it's easy to
build an infinite iterator so that method should be able to handle
infinitely-ambiguous grammars. Avoiding analyzing tree-by-tree will
require users to understand the SPPF and a good API for analyzing it
without generating trees.) They point out that coroutines can reduce
some of these problems by putting the parser and the consumer of the
parse tree on an equal footing. Moura and Ierusalimschy (2009,
"Revisiting coroutines") show that asymmetric coroutines are as
powerful as symmetric ones, so it ought to be possible to implement
this in Python, but I suspect the implementation would involve
creating a trampoline that passes data between the producer and
consumer coroutines, and I'm not sure if the added complexity is
worthwhile, especially given it would involve a performance hit.
Thus, I'm running with the SPPF. Initially, I'm going to implement
Tomita's worst-case unbounded-polynomial version rather than Scott and
Johnstone's binarized worst-case cubic version because binarization
will make the resulting parse forest harder to understand and work
with, and I doubt anything this library will ever process will
approach the worst case because that should only happen with
pathologically-ambiguous grammars. However, I should make sure to
encapsulate the SPPF so if I need to binarize the implementation later
I don't need to change anything else.
\subsection{Tree and SPPF API}
\label{sec:tree_sppf_api}
With a traditional recursive-descent parser, the definitions of the
nonterminals in the grammar and their corresponding functions provide
natural nodes for the parse tree. In a parser built from combinators,
however, because the combinators don't intrinsically have nodes, it's
not clear how to build the parse tree. As far as I can tell,
traditional parser combinators always use the sequence combinator to
build the tree out of nested lists/tuples, either by having it build a
flat list/tuple out of two or more alternatives like Construct's
Sequence (Struct, which makes an ordered map, is a variation on this)
or nested binary lists/tuplesx, like Spiewak's combinators. Other
non-monadic combinator implementations follow the same general
approach: Hughes in \emph{Generalizing Monads to Arrows} observes,
\begin{quote}
For example, it is fairly clear that a library for parsing should
include a combinator to invoke two parsers in sequence, but there
are many possible ways in which such a combinator might handle the
two parsers' results. In some early parsing libraries the two
results were paired together, in others the sequencing combinator
took an extra parameter, a function to combine the results.
\end{quote}
The clearest explanation I've found of the relationship between this
traditional sequence combinator and monadic combinators is in Vegard
\O ye's article (2012, \url{https://github.com/epsil/gll}): they
define an operation bind (this is the monadic bind) that takes a
parser and a function, applies the parser to the input, applies the
function to a success to get another parser, and then applies the
resulting parser to the success to get the final output. They then
define the sequence combinator in terms of a double bind, passing the
list constructor as the function to bind; for a sequence combinator
with more than two elements, they use fold/reduce with list-append to
get a flat list rather than nested lists from one combinator. The
Hutton and Meijer paper (\emph{Monadic Parser Combinators}) also shows
how to build a sequence combinator using bind and a concatenation
function for lists, in this case specifically to avoid nested tuples.
While the standard approach to defining monads, settled on by Haskell
and seemingly imitated by everyone else, is to define a monad in terms
of return (sometimes called result) and bind, one can also define
monads in terms of three functions, return, join, and map (sometimes
fmap), and then define bind in terms of join and map.
Writing monadic (or arrow-style) parsers in Python would be stupid, so
I need a sequence combinator and will follow this universal (as far as
I know) practice and define the nodes in the tree using it. I can let
it take a constructor with which to build the parse tree with a
default constructor or expect that standard usage involves
transforming the parse tree with semantic actions. While monadic bind
combines the choice of parse tree with the sequence combinator, my
understanding is that separating join and map separates the two, so
having a semantic action combinator following a sequence combinator
can simulate any choice of bind for monadic combinators.
Irrespective of the internal representation, I also need to figure out
the interface the SPPF presents to the users. The SPPF itself is hard
to understand so a direct implementation would make for a bad API. My
best idea for handling this is to build a tree-centric API where users
interact with the SPPF as if it was a collection of trees. One
obvious problem after creating the SPPF is how to allow user code to
examine individual trees. Luckily, this problem has an obvious
solution: instead of returning a copy of an individual tree, I can
return an object that contains references to the correct nodes in the
SPPF. However, implementing anything more than simple views on the
SPPF is \emph{hard}. I still haven't come up with a better idea for
the tree API itself than Construct's: because trees are a recursive
data structure, allowing recursive references using Python's []
addressing provides a natural API.
That said, for users to deal with ambiguity in any efficient manner
will probably require users to both understand the SPPF and have
direct access to it; I have no idea how to implement such an API or to
integrate it with the hopefully-simpler tree-centric API.
How semantic actions interact with the SPPF presents some thorny
issues. Semantic actions are essential, as aside from needing them to
emulate the power of monadic combinators, they're required for
directing parsing with previously-parsed input, essential in many
common parser tasks like ignoring whitespace, one of the better reasons
for using a top-down parser in the first place, and possibly important
in performing disambiguations while the parser is running. Because
I can't figure out how to transform the SPPF as if it was a collection
of trees, though, without brute force and maybe not even then, I still
haven't settled on how to set semantic actions up. I have some
incompletely-realized partial ideas:
\begin{itemize}
\item Higher-order functions on trees: filter, map, reduce.
\item A higher-order function that converts a function acting on trees
to a function acting on SPPFs. I don't know how to write it, though.
\item The visitor pattern and other kinds of traversals with functions
acting on the tree/SPPF.
\end{itemize}
\subsection{Tree and SPPF representations}
\label{sec:tree_sppf_representations}
Traditional parser combinators don't provide any natural node labeling
because of the absence of labeled nonterminals. A tree representation
based on high-level sequences and ordered mappings, in Python
lists/tuples and OrderedDicts, doesn't need these node labels, and
it's possible to implement combinators that return trees or an
iterator over trees without them. However, for a shared packed
forest, labels are required for implementing subtree sharing because
without them, combinators in different branches of the parse traversal
have no way to know when they've generated the same subtree.
Moreover, GLL needs labels to merge different branches of the parse in
the GSS, so while it's possible to omit node labels altogether in a
traditional recursive-descent parser, any GLL parser needs labels.
There's an natural labeling scheme for parse trees based on
partitioning the input that Scott and Johnstone use but never
explicitly explain. Each terminal or uninterrupted sequence of
terminals has a starting and ending offset so the corresponding leaf
node can be labeled with (terminals, starting\_offset,
ending\_offset). The leaf nodes read in offset order must cover the
entire input, and in fact reading them in order in the absence of
semantic actions will return the original input. The rest of the tree
nodes represent partitions further subdivided into subpartitions
labeled with nonterminals, with the root node corresponding to
(start\_symbol, 0, length(input) - 1) (zero-indexed). Because parser
combinators don't have nonterminal labels, Spiewak uses parser
identity instead. Like Spiewak, I intend to use parser combinator
class instances as node labels in GLL. Note that generator objects
can't be used for this, since a new generator object is created every
time a generator function is called, and neither can the method or
code objects, because they're shared by all instances of the same
parser combinator class.
Both trees and forests are fundamentally directed graphs, normally
acyclic but in the case of infinitely-ambiguous grammars cyclic. In
Python, one possible graph representation is a dict of lists, where
the lists contain the labels of other nodes. By using the labels
discussed above as the labels for the nodes, it's possible to
represent a parse tree as a digraph where the values of leaf nodes are
Python objects representing terminals and the values of non-leaf nodes
are lists of either pairs of integers, the direct labels for other
nodes, or single integers representing the subpartitions for that
node. Eliding the terminal and nonterminal symbols for clarity, for
instance:
\begin{lstlisting}
(1, 8) : [(1, 2), (2, 5), (5, 8)]
(1, 8) : [2, 5]
\end{lstlisting}
The superfluous integers correspond to the "repeated dimensions" that
Johnstone and Scott describe in \emph{Modelling GLL Parser
Implementations} (2010). The latter representation obviously takes
less memory but node operations will be slower because correct
offset-pairs will have to be generated from the partitions when
they're needed.
The SPPF can have almost an identical representation to the parse
trees themselves because it's also fundamentally a digraph. There are
only two differences: nodes can have more than one parent,
corresponding to subtree sharing, and there has to be a way to
distinguish packed nodes from normal nodes because they have different
meanings. In the binarized SPPF, Johnstone and Scott define normal
nodes using two offsets, called "left extent" and "right extent", and
packed nodes using a single integer, called "pivot." In the
partition representation any node with only two children can be
represented with a single integer, the division between the subtrees.
A packed node is the result of combining at least two nodes, so a
minimal binarized packed node looks like:
Original nodes:
\begin{lstlisting}
(0, 4): [(0, 1), (1, 4)]
(0, 4): [(0, 3), (3, 4)]
\end{lstlisting}
Packed node:
\begin{lstlisting}
(0, 4): [((0, 3), (3, 4)), ((0, 1), (1, 4))]
\end{lstlisting}
Binarized nodes:
\begin{lstlisting}
(0, 4): Packed(1), Packed(3)
Packed(1): [(0, 1), (1, 4)]
Packed(3): [(0, 3), (0, 4)]
\end{lstlisting}
However, I don't understand the relationship between Scott and
Johnstone's partition model and parses that don't consume all the
input. The natural way to implement nondeterminism leads to returning
all incomplete parses as well as the complete parses. Both Spiewak
and Hutton and Meijer mention this natural ambiguity.
\begin{quote}
It is also worth noting that we do not allow \texttt{Success}(es)
which have failed to consume the entire input stream. We will
actually receive a \texttt{Success} every time a \texttt{Parser}
reduces successfully. While this is technically correct (reflecting
the ambiguity between greedy and non-greedy matching), it is almost
never the desired semantics. (Spiewak)
\end{quote}
Scott and Johnstone's constructions of parsers from recognizers seem
to rely on greedy matching, because their partitioning scheme only
works with matches of the full input. They also restrict packing to
nodes that share the same partition, ``Nodes can be packed only if
their yields correspond to the same portion of the input string''
(Scott, \emph{SPPF-Style Parsing from Earley Recognisers}). This
doesn't seem to work in cases with partial/non-greedy matching because
every nontrivial node will have partial matches that don't cover the
same partition.
A further observation that may or may not be related is that their
node labeling scheme seems to include unneeded information. A given
parser started at the same position should always produce the same
output, so all that's needed for a unique node label is (parser label,
starting index into the input). In fact, in one of their early papers
(\emph{BRNGLR: a cubic Tomita-style GLR parsing algorithm}), they use
this labeling scheme, rather than the later three-element labels. I
don't understand why, and if or how this might be connected with
greedy matching. A final note on the greedy-matching issue is
that the obvious implementation of nondeterminism applied naively
leads to the wrong recognizer, because a partial match will report
success even if the whole string is not matched.
In the aforementioned paper, they use a table-based representation
for all the data structures, including the SPPF. Following the data
structure refinement process outlined there, the declarations for the
SPPF are:
\begin{verbatim}
SPPFSymbolNode (SPPFLabel s, Int leftExtent, Int rightExtent)
SPPFPackNode (SPPFLabel s, Int pivot)
SPPF (SPPFSymbolNode symbolNode, SPPFPackNode packNode, SPPFSymbolNode left, SPPFSymbolNode right)
SPPF sppf
\end{verbatim}
Substituting:
\begin{verbatim}
sppf ((*symbolNode) SPPFLabel, Int, Int)
(*packNode) SPPFLabel, Int)
(*left) SPPFLabel, Int, Int)
(*right) SPPFLabel, Int, Int)
)
\end{verbatim}
The left extent for the left node is the same as parent's, the left
node's right extent is the same as the left extent of the right node,
and the right extent for the right node is the same as the parent's.
The SPPFLabel for the pack node and the parent node are also the same.
\begin{verbatim}
sppf ((*symbolNode) SPPFLabel, Int, Int)
(*packNode) Int)
(*left) SPPFLabel, Int)
(*right) SPPFLabel)
)
\end{verbatim}
This still seems like too much: it's a seven-dimensional table with
four dimensions indexed by integers, which as they note in the paper
is too large.
The clearly-fastest implementation with the best encapsulation is to
use Cython to write a specialized SPPF object, with the exact methods
needed for building it, with an appropriate internal representation,
possibly based on the ideas from the \emph{Modeling GLL
Implementations} refinement discussed above. All other approaches
will require methods implemented in Python (which will be slow) to do
the necessary transformations. To achieve anywhere near acceptable
performance, I have to use native Python data structures or C/C++
extensions. Any encapsulation has a performance overhead because I
won't be able to use comprehensions to speed up object creation, but
beyond that full encapsulation from the parsers will cost more speed
because they will have to call methods in Python. On the whole, I'm
convinced enough of the advantages of encapsulation, particularly the
possible need to rewrite the whole thing in Cython, that the first
version will involve partial encapsulation with methods written in
Python for the user API and hard-coding the parsers. On that point,
there's very little reason, as far as I can tell, to expose the
internal node labels to the user.
There's one possible other representation of the SPPF about which I
know little called parse-forest grammars, introduced on pp. 91-93 of
Grune and Jacobs. They have production rules labeled almost
identically to Johnstone and Scott's SPPF, a nonterminal, a starting
position, and a length (which is isomorphic to using the ending
position). I personally don't see how any of the supposed advantages
Grune and Jacobs list for them are good for writing practical parsers
and the API they'd provide would be very unintuitive for people used
to conventional parsers, but they're another possible internal
representation.
\subsection{Immutability vs. mutability}
\label{sec:immutable_mutable}
There are two different approaches to the data flow through the
parser, the functional-immutable style where functions, or in this
case, coroutines, create objects and return them and the mutable-state
style where a mutable object is passed into a function or coroutine,
which then mutates it and returns nothing. The choice of which style
to use can be made on a per-object basis, but because in most
languages including Python functions can only return one object, and
in the case of Python coroutines can only yield and accept through
send() one object, functions that need to return more than one object
have to aggregate all the objects they need to return into one object.
Handling the aggregation and disaggregation for parsers is an example
of the monadic pattern, as even recognizer combinators need to return
a Boolean representing whether a given input is in the language
generated by a grammar and also a stream object representing the
truncation of its input. Real parsers always need to return the same
Boolean, a stream, and a parse tree. Other patterns are possible:
Construct uses a Python stream, which is a mutable object, and thus
doesn't need to return a stream. An empty parse tree or some kind of
null result (None in Python) can represent failure, but this is a bad
idea since it provides no information about where the failure occurred
or what happened.
In traditional object-oriented recursive-descent parsers, the whole
parser would be one class and mutable objects could be handled as
shared state. As it is, since my combinators are classes, I have to
pass mutable objects instead. This isn't the worst thing in the world
since passing objects will avoid self look-ups, helping performance,
and is explicit rather than implicit. Several of the implementations
I've looked at do similar things. Spiewak's GLL combinators use a
functional-immutable style for the tree, returning a list in the
simplified implementation and a stream/iterator in the real
implementation, but an OO class for the trampoline. Scott and
Johnstone's example GLL implementation uses an OO class for the parser
with a mutable shared SPPF. Jim and Mandelbaum's transducers for
data-dependent grammars also use a mutable SPPF.
For GLL, the combinators could potentially return success/failure, the
GSS, the SPPF, the stream, and the pointer. Mutable objects only need
to be passed in when starting a new coroutine instance and don't need
to be yielded or passed via send() because every coroutine will
already have access to them. The advantage of mutability is speed,
and its main problem is that it always has more potential for creating
hard-to-isolate bugs. Unfortunately, Python is simply not designed
for the functional-immutable style and obviously has no optimizations
in the compiler/interpreter for handling immutable object creation for
non-built-ins. Thus, the functional-immutable style with an
encapsulated Python object will be many times over too slow.
(\url{http://www.valuedlessons.com/2008/03/why-are-my-monads-so-slow.html}
gives some numbers suggesting how much too slow.)
For the SPPF, the only way to implement sharing in a functional
fashion is to pass a memoized cache and discard it after building the
SPPF. Otherwise, combinators in different branches of the parse won't
be able to share subtrees. However, a memoized cache itself is most
of the way to an SPPF, so there's no real reason to use the former in
place of the latter. The SPPF is so large (I'm estimating a factor of
>100 times over the input, and that's optimistic) that recreating it
in every parser is wildly impractical, so it has to be mutable.
However, there's a further problem with building the SPPF as a mutable
object passed among combinators that Jim and Mandelbaum describe in
\emph{Delayed Semantic Actions in Yakker}: nodes that get added to the
SPPF during branches of the parse that eventually dead-end will
continue to exist in the final SPPF. The solution they use in OCaml
is to use a weak hash table. The best option in Python is similarly
to use a weak dictionary. This creates several knock-on effects.
First, strong references to the nodes in the SPPF have to be stored in
the SPPF \emph{somewhere} or else the whole structure will get
garbage-collected, and the only reasonable place to put them is in the
nodes themselves, thus a weak dictionary of objects that hold
references to other nodes. Second, most Python built-in types and
subclasses thereof are not weak-referencable in CPython. Subclasses
of lists and dicts can be weak-referenced, while lists and dicts
themselves can't. (None of this behavior is defined for all
implementations, though PyPy seems to follow CPython here.) Changing
from subclasses of tuple to list has memory and speed penalties, but
the alternative to using Python's weakref module is doing manual
object destruction myself, and that's just awful. Third, to keep
nodes alive while building the SPPF I need strong references outside
it, which means the combinators need to return strong references.
Fourth, terminals have to be enclosed in some kind of
weak-referencable proxy object.
That said, while the SPPF has to mutable, the nodes making it up can
be immutable. There are advantages and disadvantages to both
approaches.
\begin{itemize}
\item Mutable nodes make it easier to transform trees and reduce the
memory overhead of transformation operations since they avoid
copying. How common is it to need to transform a parse tree,
though? For writing a compiler/interpreter or a converter for a
binary data format, for instance, the parse tree is used to build
another kind of object, not mutated in-place.
\item Immutable nodes prevent a broad class of potentially
hard-to-find bugs related to mutating a tree in one node in an SPPF
and causing changes that propagate to other trees incorrectly. This
is a particular problem with semantic actions where users shouldn't
need to understand the particulars of the parsers or the SPPF.
\item Immutable objects could provide memory advantages, but Python's
implementation makes this difficult. There's no built-in immutable
mapping type, and tuples can't be weak-referenced. To get the
memory savings, I'd either have to give up speed by using
composition and methods implemented in Python to enclose tuples in a
weak-referencerable object or use Cython to implement my own types
in C.
\item Immutable containers are hashable, which is essential for any
kind of elegant way of handling which packed nodes to traverse in a
tree view of an SPPF and useful for implementing packed nodes as
frozensets, gaining the automatic ability to avoid duplicates when
packing.
\end{itemize}
Spiewak passes the trampoline that contains the GSS as a mutable
object for ``convenience and efficiency,'' and I'm leaning towards
following his lead. For parsing, I'm passing the stream as an
immutable object. That leaves only success/failure, the pointer, and
a node reference as immutable objects I need to create in each combinator.
\begin{itemize}
\item GSS: mutable, passed
\item SPPF: mutable, passed
\item Stream: immutable, passed
\item Success/Failure: immutable, returned
\item Stream pointer: immutable, returned
\item Node: immutable, stored in SPPF and returned
\end{itemize}
There is one other possible way to circumvent the bad performance of
the functional-immutable style, which is to use compilation/code
generation to eliminate the intermediate data structures. This may
turn out to be the best option, and in the end is almost certain to be
the fastest, especially if I go all the way to a two-stage compiler
that compiles Python to Cython and Cython to C. At the moment, I
don't understand it well enough to enumerate its advantages and
disadvantages.
\subsection{Outer-Loop Trampoline}
\label{sec:trampoline}
In Python, the trampoline has to run as the outermost loop and call
the coroutines because Python coroutines are asymmetric, they can only
return control to their callers with yield. If I instead try to pass
the trampoline through the combinators, there's no way for them to
pass control to the trampoline. Conveniently, GLL is organized with a
dispatch function such that every parser returns control to it after
finishing execution, which is my trampoline.
\subsection{Yield from}
\label{sec:yield_from}
The main problem with "yield from" is that it's only available on
Python 3. Also, experimentation suggests that as of 3.4, using "yield
from" still hits the maximum recursion depth which means that "yield
from" still adds a stack frame for each call, and Python has a small
stack limit. On the other hand, without "yield from", Python's
coroutines are not stackful (see Moura 2009, "Revisiting Coroutines"),
which means they may not be as powerful as one-shot continuations and
might not be powerful enough for GLL. That said, I know that "yield
from" is internally implemented with a trampoline, and the existence
of Eby's "fiendishly clever" trampoline implementation for Python 2
suggests it ought to be possible to use a trampoline to convert Python
2's stackless coroutines into stackful coroutines by manually
implementing a stack. I also might end up needing a trampoline
anyways to handle the graph-structured stack, in which case the
advantage offered by "yield from" may be diminished.
My tentative analysis is that "yield from" simply isn't useful for
GLL. GLL's dispatch stack contains information about the GSS and SPPF
as well as which parser needs to resume control, so replacing the
stack with "yield from" isn't going to get me very far since I'll
still need another stack to hold the GSS and SPPF information, even if
it's possible to handle the dispatching without that added
information. I suspect that it's either impossible to use "yield
from" with GLL or that the added complexity from trying to integrate
the two would completely negate any performance or simplicity gains.
This, combined with the lack of "yield from" in 2.7, means I'm not
going to try to use it.
\subsection{Indices versus Slicing}
\label{sec:indices_slicing}
With Python 2.7+'s memoryviews, it's possible to slice the stream
without copying, embedding slices in the parse forests passed between
combinators. The main disadvantage of this approach is that it means
the combinators can't handle text, particularly Unicode, since there's
no equivalent for Python strings. There are also three other factors
to consider: all of the Python library functions (string methods,
struct, re, and so on) take an optional argument that's a starting
index anyways, there's no equivalent stringview object (I'd have to
write one), and indices provide natural labels for the GSS and SPPF
(and memoryviews are only hashable in 3.3+, so they can't take over
that role in 2.7). Primarily because of the last two considerations,
I'm going with indices.
To my surprise, when I profiled indices versus string slicing
directly, string slicing was slower than indices on both CPython and
PyPy 3, though as expected the indexed version consumed much, much
less memory on CPython. The extra time seemed to be spent in the
hash-heavy functions, so this might have something to do with
CPython's optimizations for hash tables with string-only keys. I
still don't understand why the cost of additional allocations didn't
overwhelm that, but this is why we profile. However, the memory
consumption issue and aforementioned considerations mean I'm sticking
with indices.
\subsection{Module for handling bits}
\label{sec:bits_module}
The main module implemented in C for bit arrays/bit vectors is
\href{https://github.com/ilanschnell/bitarray}{bitarray}
(\href{https://pypi.python.org/pypi/bitarray/}{PyPi}). I don't know
how hard it will be to make this work with PyPy, but I'm going to give
it a shot. The main two pure-Python implementations are
\href{https://engineering.purdue.edu/kak/dist/BitVector-3.3.2.html}{BitVector}
(\href{https://pypi.python.org/pypi/BitVector/3.3.2}{PyPi}) and
\href{https://code.google.com/p/python-bitstring/}{bitstring}
(\href{https://pypi.python.org/pypi/bitstring/3.1.3}{PyPi}). For the
prototype, I'm going with bitstring but will return to bitarray once I
look at performance optimizations.
\subsubsection{bitarray}
\label{sec:bitarray}
This ought to be the fastest because it's implemented in C, but by the
same token, it will probably pose the biggest compatibility problems,
particularly with PyPy. The API is clean and mimics the standard
sequence types, but has the major disadvantage of having no offset
option in any of the constructors, which would require copying without
using memoryview, and some imperfections in the handling of
conversions (it has a tostring() method, but I don't know if calling
str() works, for instance). There's no immutable bitarray type, which
would mean that I'd have to make my own. Bitarray is packaged for
Ubuntu.
\subsubsection{bitstring}
\label{sec:bitstring}
Implemented in pure Python, this ought to be compatible with
everything. There's also a Cython version, though from looking at it
doesn't seem to have many optimizations over the pure-Python version.
The API includes mutable and immutable types and supports reading from
at a binary object at an arbitrary offset, though obviously it doesn't
support the buffer protocol, and works the way it should with respect
to functions like str(). On the whole, it has clearly the best API.
\subsubsection{BitVector}
\label{sec:bitvector}
Also implemented in pure Python, it ought to have similar
compatibility to bitstring. The API has the same problem as
bitarray's, no reading binary data at an offset, with the additional
disadvantage of not supporting the buffer protocol. Moreover, the API
as a whole doesn't resemble that of the standard sequence types. The
only advantage of this module is that it has built-in methods for
certain kinds of advanced operations on bit arrays, but that's not
that important.
\subsection{Operator Overloading Dispatch for the Combinators}
\label{sec:operator_overloading_dispatch_combinators}
Operator overloading requires type dispatch: even the simplest
possible operator overload has to distinguish between types it accepts
and those it doesn't. Setting up operator overloading for the
combinators requires more complicated type dispatch. I wanted to try
to do this with \texttt{functools.singledispatch()} but the
implementation details of Python make this a bad solution. When
applying the decorator to a function defined in a class body, the
function will always receive as its first argument the instance
calling it. The author of singledispatch suggests
(\href{http://lukasz.langa.pl/8/single-dispatch-generic-functions/}{What
single-dispatch generic functions mean for you}) using the decorator
on a function after it's been bound as a method to circumvent this:
\begin{lstlisting}
class C:
def __init__(self):
self.method = functools.singledispatch(self.method)
self.method.register(Type, self.implementation)
\end{lstlisting}
A function wrapping a method only receives the arguments that
\emph{aren't} the instance, so it's possible to dispatch usefully on
its first argument.
\begin{lstlisting}
>>> def f(func):
... def g(*args):
... print(args)
... func()
... return g
...
>>> class C:
... def __init__(self):
... self.method = f(self.method)
... def method(self):
... pass
...
>>> c = C()
>>> c.method(1, 2, 3)
(1, 2, 3)
\end{lstlisting}
This direct approach doesn't work for operator overloading, however,
because operators are special methods and special methods are looked
up on the class, not the instance: ``For custom classes, implicit
invocations of special methods are only guaranteed to work correctly
if defined on an object's type, not in the object's instance
dictionary''
(\url{https://docs.python.org/3/reference/datamodel.html#special-method-lookup}).
The way to circumvent this is to have the special method delegate to a
normal method and then wrap that method with
\texttt{functools.singledispatch()}:
\begin{lstlisting}
class AbstractBase:
def __init__(self) -> None:
self._op = functools.singledispatch(self._op)
self._rop = functools.singledispatch(self._rop)
def __op__(self, other: 'AbstractBase') -> 'AbstractBase':
return self._op(other)
def _op(self, other: 'AbstractBase') -> 'AbstractBase':
return NotImplemented
def __rop__(self, other: 'AbstractBase') -> 'AbstractBase':
return self._rop(other)
def _rop(self, other: 'AbstractBase') -> 'AbstractBase':
return NotImplemented
\end{lstlisting}
To avoid code duplication for noncommutative operations, however,
there has to be another call to a function/static method that supports
having its arguments reversed. The following are two classes that
together implement a kind of set of sets monoid with the dispatching
necessary for the operations. The key observation is that almost all
the work is being done in the staticmethods
\texttt{\_op\_collection\_collection()} and \texttt{\_op\_atom\_atom()}.
The \texttt{\_op\_atom()} and \texttt{\_rop\_atom()} methods on
\texttt{Collection} need to set up calls to \texttt{Collection}
(written as \texttt{type(self)(other)} to avoid hard-coding) and
\texttt{\_op\_collection\_collection()} correctly, and one can argue if
there's content here. The vast majority of this code is boilerplate,
though.
\begin{lstlisting}
class Collection(AbstractBase):
def __init__(self, collection: 'Any', *args: 'Atoms') -> None:
super().__init__()
self._op.register(Collection, self._op_collection)
self._op.register(Atom, self._op_atom)
self._op.register(Collection, self._op_collection)
self._rop.register(Atom, self._rop_atom)
self._rop.register(Collection, self._rop_collection)
self._collection = collection(*args)
def _op_collection(self, other: 'Collection') -> 'Collection':
return self._op_collection_collection(self, other)
def _rop_collection(self, other: 'Collection') -> 'Collection':
return self._op_collection_collection(other, self)
def _op_atom(self, other: 'Atom') -> 'Collection':
return self._op_collection_collection(self, type(self)(other))
def _rop_atom(self, other: 'Atom') -> 'Collection':
return self._op_collection_collection(type(self)(other), self)
@staticmethod
def _op_collection_collection(left: 'Collection', right:
'Collection') -> 'Collection':
return self._collection.op(right._collection)
\end{lstlisting}
\begin{lstlisting}
class Atom(AbstractBase):
def __init__(self) -> None:
super().__init__()
self._op.register(Atom, self._op_atom)
self._rop.register(Atom, self._rop_atom)
def _op_atom(self, other: 'Atom') -> 'Collection':
return self._op_atom_atom(self, other)
def _rop_atom(self, other: 'Atom') -> 'Collection':
return self._op_atom_atom(other, self)
@staticmethod
def _op_atom_atom(left: 'Atom', right: 'Atom') -> '(Atom, Collection)':
if oppable(left, right):
return left.op(right) # -> Atom
else:
return Collection(left, right)
\end{lstlisting}
The reason the boilerplate is necessary is that
\texttt{functools.singledispatch()} must dispatch to another method,
since it only received a single argument, with its first argument
being bound implicitly to the instance. However, the functions that
do the work are generic and need to be functions, not methods, so the
method that is dispatched to needs to explicitly call them with
\texttt{self, other} or \texttt{other, self}. This is a total of four
function calls, say \texttt{\_\_op\_\_()}, \texttt{\_op()},
\texttt{\_op\_atom()}, and \texttt{\_op\_atom\_atom()}.
Metaprogramming with dynamic class generation can remove the need to
write a lot of the boilerplate and some of the code duplication.
Scary metaprogramming with AST rewriting can, for instance, be used to
turn a method into its reversed form by reversing the variables, which
can eliminate more code duplication and some of the performance
penalty that comes from turning one function call into four function
calls. However, most of this is a self-created problem relating to
the limitations of \texttt{functools.singledispatch()} for methods,
special methods in particular and using metaprogramming to solve it is
stupid. Moreover, even with metaprogramming, there's a nontrivial
performance hit. Worse, it will make the implementation complex and
hard to understand and maintain, and I don't know if it's even
possible to keep the metaprogramming completely closed off from the
rest of the API. Another, much less significant problem with
\texttt{functools.singledispatch()} is that it doesn't integrate with
function annotations like it should (probably for backwards
compatibility), which means refactoring to come if I use it.
There's nothing wrong with the theory of emulating double dispatch
with two levels of single dispatch, it just doesn't work with
\texttt{functools.singledispatch()'s} implementation. There's a long
list of multiple-dispatch implementations for Python (including
\url{https://pypi.python.org/pypi/multimethods},
\url{https://pypi.python.org/pypi/generic},
\url{https://pypi.python.org/pypi/multipledispatch/},
\url{https://github.com/morepath/reg}, an @overload decorator in
\href{http://mypy-lang.org/}{Mypy} that was removed during the
discussions about type-hinting for Python 3.5, and others that are
older) that could handle the necessary dispatch, but they're all
varying degrees of unsafe (most of them use frame inspection) and
overcomplicated. The latter is a problem because multiple dispatch is
overkill for this problem, since the type of \texttt{self} for any
given special method is known at compile time. Moreover, all the
implementations differ in both syntax and important details, so
there's not anything close to a standard. Guido van Rossum claims to
be looking at multiple dispatch again for the future, but who knows if
that's going to come to pass. The lack of standardization and
possible upcoming changes to singledispatch and the typing system in
general and the possibility of real multiple dispatch in the standard
library make any solution not very future-proof.
The central problem with dispatch for special methods is that at class
creation, a special method doesn't know what instances will be
created, but it has to be capable of dispatching to instance methods
to use object-oriented method dispatch. Thus, the special method can
only dispatch to an instance method when it's called, when it has
access to the instance via its first argument. By having the special
method itself do the dispatching, rather than a wrapper function, and
dispatching on the type of the \emph{second} argument, I can avoid
these problems and use a form of chained single-dispatch to do the
necessary double dispatch. The best solution would probably be to use
the same dispatching for subtypes that
\texttt{functools.singledispatch()} uses. However, the code for
\texttt{functools.singledispatch()} isn't intended to be used outside
its module, with two functions created inside the
\texttt{functools.singledispatch()} function itself. Thus, I instead
implemented my own dispatching decorator in the simplest, dumbest
possible way with no dispatch to supertypes at all.
\section{Notes}
\label{sec:notes}
\subsection{Type Checking}
\label{sec:type_checking}
Ideally combinators should fail as soon as someone attempts to combine
combinators that process data of different types (Unicode/text and
binary data, at the moment) or parse data of an inappropriate type for
a given combinator, rather than failing deep into parsing.
Unfortunately, two things make this difficult: mutually recursive
functions create cycles in the digraph through which types propagate,
and lazy evaluation of combinators makes it hard to analyze types at
class instantiation time. A correct solution to this problem is
almost certainly going to involve using one of the existing
type-checking/type-inference algorithms, and it might make sense to
outsource it to something like mypy. Arguably, it isn't in the spirit
of Python's dynamic type system since it involves a form of static
analysis, though the addition of gradual typing to 3.5 undermines this
argument.
\subsection{Yakker SPPF}
\label{sec:yakker_sppf}
The major differences between my current implementation and the Yakker
implementation are:
\begin{itemize}
\item The non-packed nodes in their tree are binarized.
\item Nodes embed both packing and the normal branching in the same
data structure. Each node contains a list of pairs of children.
\item Every node contains, in addition to its children, an arbitrary
semantic value and a label.
\item Both have a weak hash table containing links to all the partial
trees in the forest. However, their implementation seems to use the
hash table like a set, with a function that checks directly if a
partial tree is in the table and returning it if so.
\end{itemize}
\subsection{Parse-forest grammars and adjacency-list digraphs}
\label{sec:parse-forest_grammars_adjacency-list_digraphs}
My original representation of trees/forests as digraphs using
adjacency lists containing node labels (rather than node references)
is very close (I haven't done the detailed analysis to determine how
close) to a parse-forest grammar. I don't know if there are uses for
this interpretation, but it's something to keep in mind especially
when considering code generation and error handling.
\subsection{Arrow combinators}
\label{sec:arrow_combinators}
Hughes notes that any arrow that supports \emph{apply} is, in effect,
a monad. He makes the further observation that \emph{apply} and the
choice operator can't be defined for the kind of parser that Swiestra
and Duponcheel discuss and states, ``Luckily, this does not matter: it
is rare that we \emph{want} to write a parser which decides on the
grammar to accept on the basis of previously parsed values.'' Of
course, since this is exactly what I'm trying to do, using arrows here
would be counterproductive. What this means exactly for integrating
any of Swierstra and Duponcheel's ideas into my combinators I'm not
sure: is there some kind of model that can incorporate both the
necessary dependence on past parse results and the separation of
static and dynamic elements in arrows? I don't know.
\subsection{Tunnel / Monadic Bind?}
\label{sec:tunnel_bind}
Is there a connection? I actually think I can implement Tunnel with
Act/>>.
\printbibliography
\end{document}
|
{"hexsha": "679d925a051b24876697556aabac4fc3ae53caa3", "size": 64923, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "combinators.tex", "max_stars_repo_name": "ceridwen/combinators", "max_stars_repo_head_hexsha": "a821b69a4382914792699bf10a84087cf251c78c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2015-06-09T17:13:51.000Z", "max_stars_repo_stars_event_max_datetime": "2015-11-19T21:04:09.000Z", "max_issues_repo_path": "combinators.tex", "max_issues_repo_name": "ceridwen/combinators", "max_issues_repo_head_hexsha": "a821b69a4382914792699bf10a84087cf251c78c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2015-11-19T21:59:14.000Z", "max_issues_repo_issues_event_max_datetime": "2019-11-14T13:47:32.000Z", "max_forks_repo_path": "combinators.tex", "max_forks_repo_name": "ceridwen/combinators", "max_forks_repo_head_hexsha": "a821b69a4382914792699bf10a84087cf251c78c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 50.3279069767, "max_line_length": 98, "alphanum_fraction": 0.7903208416, "num_tokens": 15365}
|
export jumble_iter
function jumble_iter(text::String)
return SubwordIter(word_to_bag(text))
end
|
{"hexsha": "5a25c7396bebb04cf2d9986550abc830c77fc4db", "size": 104, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/jumble.jl", "max_stars_repo_name": "dpmerrell/Scrabble.jl", "max_stars_repo_head_hexsha": "61a3333e0983873b3e41e6c7e068850d37e4c4f8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/jumble.jl", "max_issues_repo_name": "dpmerrell/Scrabble.jl", "max_issues_repo_head_hexsha": "61a3333e0983873b3e41e6c7e068850d37e4c4f8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/jumble.jl", "max_forks_repo_name": "dpmerrell/Scrabble.jl", "max_forks_repo_head_hexsha": "61a3333e0983873b3e41e6c7e068850d37e4c4f8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 11.5555555556, "max_line_length": 41, "alphanum_fraction": 0.7788461538, "num_tokens": 26}
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 27 15:49:43 2020
@author: Rapha
"""
import glm
import math
import numpy as np
import OpenGL.GL as gl
from cg.shader_programs.ShaderProgram import ShaderProgram
class MultiLightPhongShadingShaderProgram():
POINT_LIGHT = 3
DIRECTIONAL_LIGHT = 4
SPOTLIGHT = 5
PHONG_SPECULAR = 6
BLINN_SPECULAR = 7
def __init__(self):
VERTEX_SHADER = """
#version 330
in vec4 position;
in vec4 color;
in vec3 normal;
out vec4 frag_position;
out vec4 frag_color;
out vec3 frag_normal;
uniform bool use_uniform_color;
uniform vec4 uniform_color;
uniform mat4 mvpMatrix;
uniform mat4 modelViewMatrix;
void main()
{
gl_Position = mvpMatrix * position;
frag_position = modelViewMatrix * position;
frag_normal = transpose(inverse(mat3(modelViewMatrix))) * normal;
if(use_uniform_color)
frag_color = uniform_color;
else
frag_color = color;
}
"""
FRAGMENT_SHADER = """
#version 330
in vec4 frag_position;
in vec4 frag_color;
in vec3 frag_normal;
uniform bool isLightEnabled[5];
uniform int lightModel[5];
uniform int specularModel[5];
uniform bool hasAttenuation[5];
struct LightInfo
{
vec4 position; // eye Coordinates
vec3 direction; // normalized
float cutoff; // already calculated
float exp;
vec3 La;
vec3 Li;
};
uniform LightInfo light[5];
struct AttenuationInfo
{
float Kc;
float Kl;
float Kq;
};
uniform AttenuationInfo att[5];
struct MaterialInfo
{
vec3 Ka;
vec3 Kd;
vec3 Ks;
float shininess;
};
uniform MaterialInfo material;
out vec4 output_color;
vec3 phongModel(int light_index, vec3 normal, vec3 lightDirection, vec3 viewDirection)
{
vec3 r = reflect(-lightDirection, normal);
vec3 specular = material.Ks * light[light_index].Li * pow(max(dot(r, viewDirection), 0.0), material.shininess);
return specular;
}
vec3 blinnModel(int light_index, vec3 normal, vec3 lightDirection, vec3 viewDirection)
{
vec3 halfVector = normalize(lightDirection + viewDirection);
vec3 specular = material.Ks * light[light_index].Li * pow(max(dot(halfVector, normal), 0.0), material.shininess);
return specular;
}
float attenuationFunc(int light_index, float dist)
{
return 1.0 / (att[light_index].Kc + att[light_index].Kl * dist + att[light_index].Kq * dist * dist);
}
vec3 computeSpecular(int light_index, float lightDotNormal, vec3 normal, vec3 lightDirection, vec3 viewDirection)
{
vec3 specular = vec3(0.0);
if(lightDotNormal > 0.0)
{
if(specularModel[light_index] == 6)
specular = phongModel(light_index, normal, lightDirection, viewDirection);
else
specular = blinnModel(light_index, normal, lightDirection, viewDirection);
}
return specular;
}
vec4 pointLight(int light_index, vec3 normal)
{
vec3 ambient = material.Ka * light[light_index].La;
vec3 lightDirection = normalize(vec3(light[light_index].position - frag_position));
float lightDotNormal = max(0.0, dot(normal, lightDirection));
vec3 diffuse = material.Kd * light[light_index].Li * lightDotNormal;
vec3 specular = computeSpecular(light_index, lightDotNormal, normal, lightDirection, normalize(-frag_position.xyz));
if(hasAttenuation[light_index])
{
float attenuation = attenuationFunc(light_index, length(vec3(light[light_index].position - frag_position)));
vec3 rgb = min(frag_color.rgb * (ambient + attenuation * diffuse) + attenuation * specular, vec3(1.0));
return vec4(rgb, frag_color.a);
} else {
vec3 rgb = min(frag_color.rgb * (ambient + diffuse) + specular, vec3(1.0));
return vec4(rgb, frag_color.a);
}
}
vec4 directionalLight(int light_index, vec3 normal)
{
vec3 ambient = material.Ka * light[light_index].La;
vec3 lightDirection = -light[light_index].direction;
float lightDotNormal = max(0.0, dot(normal, lightDirection));
vec3 diffuse = material.Kd * light[light_index].Li * lightDotNormal;
vec3 specular = computeSpecular(light_index, lightDotNormal, normal, lightDirection, normalize(-frag_position.xyz));
vec3 rgb = min(frag_color.rgb * (ambient + diffuse) + specular, vec3(1.0));
return vec4(rgb, frag_color.a);
}
vec4 spotlight(int light_index, vec3 normal)
{
vec3 ambient = material.Ka * light[light_index].La;
vec3 lightDirection = normalize(vec3(light[light_index].position - frag_position));
float angle = dot(-lightDirection, light[light_index].direction);
float cutoff = clamp(light[light_index].cutoff, 0.0, 1.0);
if(angle > cutoff)
{
float lightDotNormal = max(0.0, dot(normal, lightDirection));
vec3 diffuse = material.Kd * light[light_index].Li * lightDotNormal;
vec3 specular = computeSpecular(light_index, lightDotNormal, normal, lightDirection, normalize(-frag_position.xyz));
float falloff = pow(angle, light[light_index].exp);
if(hasAttenuation[light_index])
{
float attenuation = attenuationFunc(light_index, length(vec3(light[light_index].position - frag_position))) * falloff;
vec3 rgb = min(frag_color.rgb * (ambient + attenuation * diffuse) + attenuation * specular, vec3(1.0));
return vec4(rgb, frag_color.a);
} else {
vec3 rgb = min(frag_color.rgb * (ambient + falloff * diffuse) + falloff * specular, vec3(1.0));
return vec4(rgb, frag_color.a);
}
} else
return vec4(frag_color.rgb * ambient, frag_color.a);
}
void main()
{
vec3 normal = normalize(frag_normal);
output_color = vec4(0.0, 0.0, 0.0, 0.0);
for(int light_index = 0; light_index < 5; light_index++)
{
if(isLightEnabled[light_index])
{
if(lightModel[light_index] == 3)
output_color += pointLight(light_index, normal);
else if(lightModel[light_index] == 4)
output_color += directionalLight(light_index, normal);
else
output_color += spotlight(light_index, normal);
}
}
output_color = min(vec4(output_color.rgb, frag_color.a), vec4(1.0));
}
"""
self.__maxNumberOfLights = 5
use_uniform_color = 0
uniform_color = np.array([1.0, 1.0, 1.0, 1.0], dtype=np.float32)
mvp_matrix = glm.mat4()
model_view_matrix = glm.mat4()
is_light_enabled = np.zeros(self.__maxNumberOfLights, dtype=np.int32)
is_light_enabled[0] = 1
light_model = MultiLightPhongShadingShaderProgram.POINT_LIGHT;
specular_mode = MultiLightPhongShadingShaderProgram.PHONG_SPECULAR;
has_attenuation = 0;
light_position = glm.vec4(5.0, 5.0, 0.0, 1.0)
light_direction = glm.vec3(0.0, -5.0, -5.0)
light_direction = light_direction / np.linalg.norm(light_direction)
light_cutoff = 10;
light_exp = 5;
light_la = np.array([0.3, 0.3, 0.3], dtype=np.float32)
light_li = np.array([1.0, 1.0, 1.0], dtype=np.float32)
material_ka = np.array([0.8, 0.8, 0.8], dtype=np.float32)
material_kd = np.array([0.8, 0.8, 0.8], dtype=np.float32)
material_ks = np.array([0.7, 0.7, 0.7], dtype=np.float32)
material_shininess = 40.0
att_kc = 0.1;
att_kl = 0.1;
att_kq = 0.05;
self.__isLightEnabledLoc = np.zeros(self.__maxNumberOfLights, dtype=np.int32)
self.__lightModelLoc = np.zeros(self.__maxNumberOfLights, dtype=np.int32)
self.__specularModelLoc = np.zeros(self.__maxNumberOfLights, dtype=np.int32)
self.__hasAttenuationLoc = np.zeros(self.__maxNumberOfLights, dtype=np.int32)
self.__lightPositionLoc = np.zeros(self.__maxNumberOfLights, dtype=np.int32)
self.__lightDirectionLoc = np.zeros(self.__maxNumberOfLights, dtype=np.int32)
self.__cutoffLoc = np.zeros(self.__maxNumberOfLights, dtype=np.int32)
self.__lightExpLoc = np.zeros(self.__maxNumberOfLights, dtype=np.int32)
self.__lightLaLoc = np.zeros(self.__maxNumberOfLights, dtype=np.int32)
self.__lightLiLoc = np.zeros(self.__maxNumberOfLights, dtype=np.int32)
self.__attKcLoc = np.zeros(self.__maxNumberOfLights, dtype=np.int32)
self.__attKlLoc = np.zeros(self.__maxNumberOfLights, dtype=np.int32)
self.__attKqLoc = np.zeros(self.__maxNumberOfLights, dtype=np.int32)
self.__shaderProgram = ShaderProgram(VERTEX_SHADER, FRAGMENT_SHADER)
self.__shaderProgram.bind()
self.__useUniformColorLoc = gl.glGetUniformLocation(self.__shaderProgram.getProgramID(), "use_uniform_color");
self.__uniformColorLoc = gl.glGetUniformLocation(self.__shaderProgram.getProgramID(), "uniform_color");
gl.glUniform1i(self.__useUniformColorLoc, use_uniform_color)
gl.glUniform4fv(self.__uniformColorLoc, 1, uniform_color);
self.__mvpMatrixLoc = gl.glGetUniformLocation(self.__shaderProgram.getProgramID(), "mvpMatrix");
self.__modelViewMatrixLoc = gl.glGetUniformLocation(self.__shaderProgram.getProgramID(), "modelViewMatrix");
gl.glUniformMatrix4fv(self.__mvpMatrixLoc, 1, gl.GL_FALSE, glm.value_ptr(mvp_matrix))
gl.glUniformMatrix4fv(self.__modelViewMatrixLoc, 1, gl.GL_FALSE, glm.value_ptr(model_view_matrix))
for i in range(self.__maxNumberOfLights):
self.__isLightEnabledLoc[i] = gl.glGetUniformLocation(self.__shaderProgram.getProgramID(), "isLightEnabled[" + str(i) + "]");
self.__lightModelLoc[i] = gl.glGetUniformLocation(self.__shaderProgram.getProgramID(), "lightModel[" + str(i) + "]");
self.__specularModelLoc[i] = gl.glGetUniformLocation(self.__shaderProgram.getProgramID(), "specularModel[" + str(i) + "]");
self.__hasAttenuationLoc[i] = gl.glGetUniformLocation(self.__shaderProgram.getProgramID(), "hasAttenuation[" + str(i) + "]");
gl.glUniform1i(self.__isLightEnabledLoc[i], is_light_enabled[i]);
gl.glUniform1i(self.__lightModelLoc[i], light_model);
gl.glUniform1i(self.__specularModelLoc[i], specular_mode);
gl.glUniform1i(self.__hasAttenuationLoc[i], has_attenuation);
self.__lightPositionLoc[i] = gl.glGetUniformLocation(self.__shaderProgram.getProgramID(), "light[" + str(i) + "].position");
self.__lightDirectionLoc[i] = gl.glGetUniformLocation(self.__shaderProgram.getProgramID(), "light[" + str(i) + "].direction");
self.__cutoffLoc[i] = gl.glGetUniformLocation(self.__shaderProgram.getProgramID(), "light[" + str(i) + "].cutoff");
self.__lightExpLoc[i] = gl.glGetUniformLocation(self.__shaderProgram.getProgramID(), "light[" + str(i) + "].exp");
self.__lightLaLoc[i] = gl.glGetUniformLocation(self.__shaderProgram.getProgramID(), "light[" + str(i) + "].La");
self.__lightLiLoc[i] = gl.glGetUniformLocation(self.__shaderProgram.getProgramID(), "light[" + str(i) + "].Li");
gl.glUniform4fv(self.__lightPositionLoc[i], 1, glm.value_ptr(light_position));
gl.glUniform3fv(self.__lightDirectionLoc[i], 1, glm.value_ptr(light_direction));
gl.glUniform1f(self.__cutoffLoc[i], math.cos(math.radians(light_cutoff)));
gl.glUniform1f(self.__lightExpLoc[i], light_exp);
gl.glUniform3fv(self.__lightLaLoc[i], 1, light_la);
gl.glUniform3fv(self.__lightLiLoc[i], 1, light_li);
self.__attKcLoc[i] = gl.glGetUniformLocation(self.__shaderProgram.getProgramID(), "att[" + str(i) + "].Kc");
self.__attKlLoc[i] = gl.glGetUniformLocation(self.__shaderProgram.getProgramID(), "att[" + str(i) + "].Kl");
self.__attKqLoc[i] = gl.glGetUniformLocation(self.__shaderProgram.getProgramID(), "att[" + str(i) + "].Kq");
gl.glUniform1f(self.__attKcLoc[i], att_kc);
gl.glUniform1f(self.__attKlLoc[i], att_kl);
gl.glUniform1f(self.__attKqLoc[i], att_kq);
self.__materialKaLoc = gl.glGetUniformLocation(self.__shaderProgram.getProgramID(), "material.Ka");
self.__materialKdLoc = gl.glGetUniformLocation(self.__shaderProgram.getProgramID(), "material.Kd");
self.__materialKsLoc = gl.glGetUniformLocation(self.__shaderProgram.getProgramID(), "material.Ks");
self.__materialShininessLoc = gl.glGetUniformLocation(self.__shaderProgram.getProgramID(), "material.shininess");
gl.glUniform3fv(self.__materialKaLoc, 1, material_ka);
gl.glUniform3fv(self.__materialKdLoc, 1, material_kd);
gl.glUniform3fv(self.__materialKsLoc, 1, material_ks);
gl.glUniform1f(self.__materialShininessLoc, material_shininess);
self.__shaderProgram.release()
def useUniformMaterialColor(self, state):
if(state):
gl.glUniform1i(self.__useUniformColorLoc, 1)
else:
gl.glUniform1i(self.__useUniformColorLoc, 0)
def setUniformMaterialColor(self, color):
gl.glUniform4fv(self.__uniformColorLoc, 1, color);
def setUniformMVPMatrix(self, mvp_matrix):
gl.glUniformMatrix4fv(self.__mvpMatrixLoc, 1, gl.GL_FALSE, glm.value_ptr(mvp_matrix))
def setUniformModelViewMatrix(self, mv_matrix):
gl.glUniformMatrix4fv(self.__modelViewMatrixLoc, 1, gl.GL_FALSE, glm.value_ptr(mv_matrix))
def enableLight(self, light_index):
gl.glUniform1i(self.__isLightEnabledLoc[light_index], 1);
def disableLight(self, light_index):
gl.glUniform1i(self.__isLightEnabledLoc[light_index], 0);
def setUniformLightMode(self, light_index, light_mode):
if(light_mode >= 3 and light_mode <= 5):
gl.glUniform1i(self.__lightModelLoc[light_index], light_mode)
else:
print("MultiLightPhongShadingShaderProgram::setUniformLightMode --> modo inválida. Mudando para modo Point light!")
gl.glUniform1i(self.__lightModelLoc[light_index], MultiLightPhongShadingShaderProgram.POINT_LIGHT)
def setUniformSpecularMode(self, light_index, specular_mode):
if(specular_mode == 6 or specular_mode == 7):
gl.glUniform1i(self.__specularModelLoc[light_index], specular_mode)
else:
print("MultiLightPhongShadingShaderProgram::setUniformSpecularMode --> modo inválida. Mudando para modo Phong specular!")
gl.glUniform1i(self.__specularModelLoc[light_index], MultiLightPhongShadingShaderProgram.PHONG_SPECULAR)
def useUniformLightAttenuation(self, light_index, state):
if(state):
gl.glUniform1i(self.__hasAttenuationLoc[light_index], 1)
else:
gl.glUniform1i(self.__hasAttenuationLoc[light_index], 0)
def setUniformLightPosition(self, light_index, position):
gl.glUniform4fv(self.__lightPositionLoc[light_index], 1, glm.value_ptr(position));
def setUniformLightDirection(self, light_index, direction):
norm_direction = direction / np.linalg.norm(direction)
gl.glUniform3fv(self.__lightDirectionLoc[light_index], 1, glm.value_ptr(norm_direction));
def setUniformSpotlightCutoff(self, light_index, cutoff_angle):
gl.glUniform1f(self.__cutoffLoc[light_index], 1, math.cos(math.radians(cutoff_angle)));
def setUniformSpotlightExpAtt(self, light_index, exp_att):
gl.glUniform1f(self.__lightExpLoc[light_index], exp_att);
def setUniformLightAmbient(self, light_index, ambiente_color):
gl.glUniform3fv(self.__lightLaLoc[light_index], 1, ambiente_color);
def setUniformLightIntensity(self, light_index, light_color):
gl.glUniform3fv(self.__lightLiLoc[light_index], 1, light_color);
def setUniformLightConstantAttenuation(self, light_index, const_att):
gl.glUniform1f(self.__attKcLoc[light_index], const_att);
def setUniformLightLinearAttenuation(self, light_index, linear_att):
gl.glUniform1f(self.__attKlLoc[light_index], linear_att);
def setUniformLightQuadraticAttenuation(self, light_index, quadratic_att):
gl.glUniform1f(self.__attKqLoc[light_index], quadratic_att);
def setUniformMaterialAmbient(self, material_ambient):
gl.glUniform3fv(self.__materialKaLoc, 1, material_ambient);
def setUniformMaterialDiffuse(self, material_diffuse):
gl.glUniform3fv(self.__materialKdLoc, 1, material_diffuse);
def setUniformMaterialSpecular(self, material_specular):
gl.glUniform3fv(self.__materialKsLoc, 1, material_specular);
def setUniformMaterialShininess(self, material_shininess):
gl.glUniform1f(self.__materialShininessLoc, material_shininess);
def bind(self):
self.__shaderProgram.bind()
def release(self):
self.__shaderProgram.release()
def getVertexPositionLoc(self):
return gl.glGetAttribLocation(self.__shaderProgram.getProgramID(), "position")
def getVertexColorLoc(self):
return gl.glGetAttribLocation(self.__shaderProgram.getProgramID(), "color")
def getVertexNormalLoc(self):
return gl.glGetAttribLocation(self.__shaderProgram.getProgramID(), "normal")
|
{"hexsha": "137747cb7c152a9e7d9baa130c3c8f2fa132a314", "size": 19110, "ext": "py", "lang": "Python", "max_stars_repo_path": "1S2020/cg/shader_programs/MultiLightPhongShadingShaderProgram.py", "max_stars_repo_name": "andre91998/EA979", "max_stars_repo_head_hexsha": "f3b82588ffaf20848a54b3a21b0332c1e72c54e8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "1S2020/cg/shader_programs/MultiLightPhongShadingShaderProgram.py", "max_issues_repo_name": "andre91998/EA979", "max_issues_repo_head_hexsha": "f3b82588ffaf20848a54b3a21b0332c1e72c54e8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "1S2020/cg/shader_programs/MultiLightPhongShadingShaderProgram.py", "max_forks_repo_name": "andre91998/EA979", "max_forks_repo_head_hexsha": "f3b82588ffaf20848a54b3a21b0332c1e72c54e8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 8, "max_forks_repo_forks_event_min_datetime": "2020-03-10T17:25:32.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-03T09:21:37.000Z", "avg_line_length": 42.8475336323, "max_line_length": 138, "alphanum_fraction": 0.6240188383, "include": true, "reason": "import numpy", "num_tokens": 4572}
|
import numpy as np
import h5py
class CustomClass:
"""
An artificial custom class used to present
the serialization and deserialization with hdf5.
"""
def __init__(self, name: str, number: int, data: np.ndarray, nested_dict: dict):
self.name = name
self.number = number
self.data = data
self.nested_dict = nested_dict
def __repr__(self):
return self.__str__()
def __str__(self):
return f"Name: {self.name}, \nNumber: {self.number}, \nData: {self.data}, \nNested dict: {self.nested_dict}"
def __eq__(self, other: "CustomClass"):
return (
self.name == other.name
and self.number == other.number
and (self.data == other.data).all()
and self.nested_dict == other.nested_dict
)
def __ne__(self, other: "CustomClass"):
return not self.__eq__(other)
def save_to_hdf5(self, file_path: str) -> None:
"""
Save the current instance to an hdf5 file.
"""
print(f"Saving {self.name} to {file_path}")
with h5py.File(file_path, "w") as f:
f.create_dataset("name", 1, dtype=h5py.special_dtype(vlen=str))
f["name"][:] = self.name
f.create_dataset("number", 1, dtype=int)
f["number"][:] = self.number
f.create_dataset("data", data=self.data)
# for nested dict use group
f.create_group("nested_dict")
for key, value in self.nested_dict.items():
if isinstance(value, dict):
f.create_group(f"nested_dict/{key}")
for key2, value2 in value.items():
f.create_dataset(
f"nested_dict/{key}/{key2}",
1,
dtype=h5py.special_dtype(vlen=str),
)
f[f"nested_dict/{key}/{key2}"][:] = value2
else:
f.create_dataset(
f"nested_dict/{key}", 1, dtype=h5py.special_dtype(vlen=str)
)
f[f"nested_dict/{key}"][:] = value
@classmethod
def load_from_hdf5(cls, file_path: str) -> "CustomClass":
"""
Load an instance from an hdf5 file.
"""
with h5py.File(file_path, "r") as f:
name = f["name"].asstr()[0]
number = int(f["number"][0])
data = np.array(f["data"])
nested_dict = {}
for key, value in f["nested_dict"].items():
if isinstance(value, h5py.Group):
nested_dict[int(key)] = {}
for key2, value2 in value.items():
nested_dict[int(key)][key2] = value2.asstr()[0]
else:
nested_dict[int(key)] = value.asstr()[0]
print(f"Loading {name} from {file_path}")
return cls(name, number, data, nested_dict)
def main():
"""
Main function.
"""
custom_obj = CustomClass(
"test",
55,
np.array([1, 2, 3]),
{
1: {"name": "John", "age": "27", "sex": "Male"},
2: {"name": "Marie", "age": "22", "sex": "Female"},
},
)
file_path = "temp.h5"
custom_obj.save_to_hdf5(file_path)
new_obj = CustomClass.load_from_hdf5(file_path)
print(f"Objects are identical: {custom_obj == new_obj}")
if __name__ == "__main__":
main()
|
{"hexsha": "2211564d680d699c960cfdf7fedf504aa77e43c1", "size": 3508, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/custom_class.py", "max_stars_repo_name": "djeada/Hdf5", "max_stars_repo_head_hexsha": "6264d2d8063341ebed4ecec5fd766303fd018186", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/custom_class.py", "max_issues_repo_name": "djeada/Hdf5", "max_issues_repo_head_hexsha": "6264d2d8063341ebed4ecec5fd766303fd018186", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/custom_class.py", "max_forks_repo_name": "djeada/Hdf5", "max_forks_repo_head_hexsha": "6264d2d8063341ebed4ecec5fd766303fd018186", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.4814814815, "max_line_length": 116, "alphanum_fraction": 0.5096921323, "include": true, "reason": "import numpy", "num_tokens": 840}
|
From iris.proofmode Require Import tactics.
From iris.algebra Require Import auth.
From Perennial.goose_lang Require Import proofmode notation.
From Perennial.program_logic Require Import recovery_weakestpre recovery_adequacy.
From Perennial.goose_lang Require Export recovery_lifting.
From Perennial.goose_lang Require Import typing adequacy lang crash_borrow.
Set Default Proof Using "Type".
Theorem goose_recv_adequacy `{ffi_sem: ffi_semantics} `{!ffi_interp ffi} {Hffi_adequacy:ffi_interp_adequacy} Σ `{hPre: !gooseGpreS Σ} s e r σ g φ φr φinv n :
ffi_initgP g.(global_world) → ffi_initP σ.(world) g.(global_world) →
(∀ `(Hheap : !heapGS Σ), ∃ Φinv,
⊢ ffi_global_start goose_ffiGlobalGS g.(global_world) -∗
ffi_local_start goose_ffiLocalGS σ.(world) -∗
trace_frag σ.(trace) -∗
oracle_frag σ.(oracle) -∗
pre_borrowN n ={⊤}=∗
□ (∀ σ nt, state_interp σ nt -∗ |NC={⊤, ∅}=> ⌜ φinv σ ⌝) ∗
□ (∀ hL : gooseLocalGS Σ,
let hG := HeapGS _ _ hL in
Φinv hG -∗ □ ∀ σ nt, state_interp σ nt -∗ |NC={⊤, ∅}=> ⌜ φinv σ ⌝) ∗
wpr s ⊤ e r (λ v, ⌜φ v⌝) (Φinv) (λ _ v, ⌜φr v⌝)) →
recv_adequate (CS := goose_crash_lang) s e r σ g (λ v _ _, φ v) (λ v _ _, φr v) (λ σ _, φinv σ).
Proof.
intros Hinit Hinitg Hwp.
eapply (wp_recv_adequacy_inv Σ _ _ (n * 4 + crash_borrow_ginv_number)).
iIntros (???).
iMod (na_heap_name_init tls σ.(heap)) as (name_na_heap) "Hh".
iMod (ffi_global_init _ _ g.(global_world)) as (ffi_namesg) "(Hgw&Hgstart)"; first by eauto.
iMod (ffi_local_init _ _ σ.(world)) as (ffi_names) "(Hw&Hstart)"; first by eauto.
iMod (trace_name_init σ.(trace) σ.(oracle)) as (name_trace) "(Htr&Htrfrag&Hor&Hofrag)".
iMod (credit_name_init (n*4 + crash_borrow_ginv_number)) as (name_credit) "(Hcred_auth&Hcred&Htok)".
iDestruct (cred_frag_split with "Hcred") as "(Hpre&Hcred)".
iMod (proph_map_init κs g.(used_proph_id)) as (proph_names) "Hproph".
iAssert (|={⊤}=> crash_borrow_ginv)%I with "[Hcred]" as ">#Hinv".
{ rewrite /crash_borrow_ginv. iApply (inv_alloc _). iNext. eauto. }
(* TODO(RJ): reformulate init lemmas to better match what we need here. *)
set (hG := GooseGlobalGS _ _ proph_names (creditGS_update_pre _ _ name_credit) ffi_namesg).
set (hL := GooseLocalGS Σ Hc ffi_names (na_heapGS_update_pre _ name_na_heap) (traceGS_update_pre Σ _ name_trace)).
destruct (Hwp (HeapGS _ hG hL)) as [Φinv Hwp']. clear Hwp.
iExists state_interp, global_state_interp, fork_post.
iExists _, _.
iExists ((λ Hinv hGen, ∃ hL:gooseLocalGS Σ, ⌜hGen = goose_generationGS (L:=hL)⌝ ∗ Φinv (HeapGS _ _ hL)))%I.
iDestruct (@cred_frag_to_pre_borrowN _ _ _ _ _ hG n with "Hpre") as "Hpre".
iMod (Hwp' with "[$] [$] [$] [$] [$]") as "(#H1&#H2&Hwp)".
iModIntro.
iSplitR.
{ iModIntro. iIntros (??) "Hσ".
iApply ("H1" with "Hσ").
}
iSplitR.
{
iModIntro. iIntros (HG') "(%hL' & -> & H)".
iApply "H2". done.
}
iFrame. iFrame "Hinv". iSplitR; first done.
rewrite /wpr.
iApply (recovery_weakestpre.wpr_strong_mono with "Hwp").
iSplit; first by eauto.
iSplit; first by eauto.
iIntros (? v) "(% & % & $)". done.
Qed.
Section failstop.
Context `{ffi_sem: ffi_semantics} `{!ffi_interp ffi} {Hffi_adequacy:ffi_interp_adequacy}.
(* We can model failstop execution by just having the restart thread be a trivial program that just halts.
Thus, the machine "restarts" after a crash but it does not do anything. *)
Definition adequate_failstop (e: expr) (σ: state) (g: global_state)
(φpost : val → state → global_state → Prop) :=
recv_adequate (CS := goose_crash_lang) NotStuck e (of_val #()) σ g φpost (λ _ _ _, True) (λ _ _, True).
(* Like above, but, for failstop execution one only needs to prove a wp, not a
wpr. Due to that, no φinv is supported (since after the crash σ changed so
[φinv σ] no longer has any reason to hold). *)
Theorem goose_recv_adequacy_failstop
Σ `{hPre: !gooseGpreS Σ} (e: expr) (σ: state) (g: global_state) φpost :
ffi_initgP g.(global_world) → ffi_initP σ.(world) g.(global_world) →
(∀ `(Hheap : !heapGS Σ),
⊢ ffi_global_start goose_ffiGlobalGS g.(global_world) -∗
ffi_local_start goose_ffiLocalGS σ.(world) ={⊤}=∗
WP e @ ⊤ {{ v, ⌜φpost v⌝ }}) →
adequate_failstop e σ g (λ v _ _, φpost v).
Proof.
intros Hinitg Hinit Hwp. eapply goose_recv_adequacy with (n:=0%nat); [done..|].
intros hHeap. exists (λ _, True)%I.
iIntros "Hstartg Hstart _ _ _".
iMod (Hwp with "Hstartg Hstart") as "Hwp". iModIntro.
iSplitR.
{ iIntros "!> * _". iApply ncfupd_mask_intro; auto. }
iSplitR.
{ do 2 iIntros "!> * _". iApply ncfupd_mask_intro; auto. }
iApply (idempotence_wpr _ _ _ _ _ _ _ (λ _, True%I) with "[Hwp] []").
{ iApply wp_wpc. eauto. }
{ iModIntro. iIntros (????) "_".
iModIntro.
rewrite /crash_modality.post_crash.
iIntros (???) "H". iModIntro; iFrame. iIntros "H". iSplit; first auto.
iApply wpc_value; eauto.
}
Qed.
End failstop.
|
{"author": "mit-pdos", "repo": "perennial", "sha": "76dafee3cd47e1c5e5a6d5436f87738a06f13ee0", "save_path": "github-repos/coq/mit-pdos-perennial", "path": "github-repos/coq/mit-pdos-perennial/perennial-76dafee3cd47e1c5e5a6d5436f87738a06f13ee0/src/goose_lang/recovery_adequacy.v"}
|
"""
DuckDB data chunk
"""
mutable struct DataChunk
handle::duckdb_data_chunk
function DataChunk(handle::duckdb_data_chunk, destroy::Bool)
result = new(handle)
if destroy
finalizer(_destroy_data_chunk, result)
end
return result
end
end
function get_column_count(chunk::DataChunk)
return duckdb_data_chunk_get_column_count(chunk.handle)
end
function get_size(chunk::DataChunk)
return duckdb_data_chunk_get_size(chunk.handle)
end
function set_size(chunk::DataChunk, size::Int64)
return duckdb_data_chunk_set_size(chunk.handle, size)
end
function get_vector(chunk::DataChunk, col_idx::Int64)::Vec
if col_idx < 1 || col_idx > get_column_count(chunk)
throw(
InvalidInputException(
string(
"get_array column index ",
col_idx,
" out of range, expected value between 1 and ",
get_column_count(chunk)
)
)
)
end
return Vec(duckdb_data_chunk_get_vector(chunk.handle, col_idx))
end
function get_array(chunk::DataChunk, col_idx::Int64, ::Type{T})::Vector{T} where {T}
return get_array(get_vector(chunk, col_idx), T)
end
function get_validity(chunk::DataChunk, col_idx::Int64)::ValidityMask
return get_validity(get_vector(chunk, col_idx))
end
function all_valid(chunk::DataChunk, col_idx::Int64)
return all_valid(get_vector(chunk, col_idx))
end
# this is only required when we own the data chunk
function _destroy_data_chunk(chunk::DataChunk)
if chunk.handle != C_NULL
duckdb_destroy_data_chunk(chunk.handle)
end
return chunk.handle = C_NULL
end
|
{"hexsha": "7ed25de4534948c736c69d6bc2895eb1b5f4e388", "size": 1704, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "tools/juliapkg/src/data_chunk.jl", "max_stars_repo_name": "lokax/duckdb", "max_stars_repo_head_hexsha": "c2581dfebccaebae9468c924c2c722fcf0306944", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-12-13T06:00:18.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-13T06:00:18.000Z", "max_issues_repo_path": "tools/juliapkg/src/data_chunk.jl", "max_issues_repo_name": "lokax/duckdb", "max_issues_repo_head_hexsha": "c2581dfebccaebae9468c924c2c722fcf0306944", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 32, "max_issues_repo_issues_event_min_datetime": "2021-09-24T23:50:09.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-29T09:37:26.000Z", "max_forks_repo_path": "tools/juliapkg/src/data_chunk.jl", "max_forks_repo_name": "lokax/duckdb", "max_forks_repo_head_hexsha": "c2581dfebccaebae9468c924c2c722fcf0306944", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.0476190476, "max_line_length": 84, "alphanum_fraction": 0.6789906103, "num_tokens": 392}
|
[STATEMENT]
lemma locally_compact_homeomorphism_projection_closed:
assumes "locally compact S"
obtains T and f :: "'a \<Rightarrow> 'a :: euclidean_space \<times> 'b :: euclidean_space"
where "closed T" "homeomorphism S T f fst"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<And>T f. \<lbrakk>closed T; homeomorphism S T f fst\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
proof (cases "closed S")
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. \<lbrakk>\<And>T f. \<lbrakk>closed T; homeomorphism S T f fst\<rbrakk> \<Longrightarrow> thesis; closed S\<rbrakk> \<Longrightarrow> thesis
2. \<lbrakk>\<And>T f. \<lbrakk>closed T; homeomorphism S T f fst\<rbrakk> \<Longrightarrow> thesis; \<not> closed S\<rbrakk> \<Longrightarrow> thesis
[PROOF STEP]
case True
[PROOF STATE]
proof (state)
this:
closed S
goal (2 subgoals):
1. \<lbrakk>\<And>T f. \<lbrakk>closed T; homeomorphism S T f fst\<rbrakk> \<Longrightarrow> thesis; closed S\<rbrakk> \<Longrightarrow> thesis
2. \<lbrakk>\<And>T f. \<lbrakk>closed T; homeomorphism S T f fst\<rbrakk> \<Longrightarrow> thesis; \<not> closed S\<rbrakk> \<Longrightarrow> thesis
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. thesis
[PROOF STEP]
proof
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. closed ?T
2. homeomorphism S ?T ?f fst
[PROOF STEP]
show "homeomorphism S (S \<times> {0}) (\<lambda>x. (x, 0)) fst"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. homeomorphism S (S \<times> {0::'c}) (\<lambda>x. (x, 0::'c)) fst
[PROOF STEP]
by (auto simp: homeomorphism_def continuous_intros)
[PROOF STATE]
proof (state)
this:
homeomorphism S (S \<times> {0::?'c1}) (\<lambda>x. (x, 0::?'c1)) fst
goal (1 subgoal):
1. closed (S \<times> {0::'b})
[PROOF STEP]
qed (use True closed_Times in auto)
[PROOF STATE]
proof (state)
this:
thesis
goal (1 subgoal):
1. \<lbrakk>\<And>T f. \<lbrakk>closed T; homeomorphism S T f fst\<rbrakk> \<Longrightarrow> thesis; \<not> closed S\<rbrakk> \<Longrightarrow> thesis
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<lbrakk>\<And>T f. \<lbrakk>closed T; homeomorphism S T f fst\<rbrakk> \<Longrightarrow> thesis; \<not> closed S\<rbrakk> \<Longrightarrow> thesis
[PROOF STEP]
case False
[PROOF STATE]
proof (state)
this:
\<not> closed S
goal (1 subgoal):
1. \<lbrakk>\<And>T f. \<lbrakk>closed T; homeomorphism S T f fst\<rbrakk> \<Longrightarrow> thesis; \<not> closed S\<rbrakk> \<Longrightarrow> thesis
[PROOF STEP]
obtain U where "open U" and US: "U \<inter> closure S = S"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<And>U. \<lbrakk>open U; U \<inter> closure S = S\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by (metis locally_compact_open_Int_closure [OF assms])
[PROOF STATE]
proof (state)
this:
open U
U \<inter> closure S = S
goal (1 subgoal):
1. \<lbrakk>\<And>T f. \<lbrakk>closed T; homeomorphism S T f fst\<rbrakk> \<Longrightarrow> thesis; \<not> closed S\<rbrakk> \<Longrightarrow> thesis
[PROOF STEP]
with False
[PROOF STATE]
proof (chain)
picking this:
\<not> closed S
open U
U \<inter> closure S = S
[PROOF STEP]
have Ucomp: "-U \<noteq> {}"
[PROOF STATE]
proof (prove)
using this:
\<not> closed S
open U
U \<inter> closure S = S
goal (1 subgoal):
1. - U \<noteq> {}
[PROOF STEP]
using closure_eq
[PROOF STATE]
proof (prove)
using this:
\<not> closed S
open U
U \<inter> closure S = S
(closure ?S = ?S) = closed ?S
goal (1 subgoal):
1. - U \<noteq> {}
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
- U \<noteq> {}
goal (1 subgoal):
1. \<lbrakk>\<And>T f. \<lbrakk>closed T; homeomorphism S T f fst\<rbrakk> \<Longrightarrow> thesis; \<not> closed S\<rbrakk> \<Longrightarrow> thesis
[PROOF STEP]
have [simp]: "closure (- U) = -U"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. closure (- U) = - U
[PROOF STEP]
by (simp add: \<open>open U\<close> closed_Compl)
[PROOF STATE]
proof (state)
this:
closure (- U) = - U
goal (1 subgoal):
1. \<lbrakk>\<And>T f. \<lbrakk>closed T; homeomorphism S T f fst\<rbrakk> \<Longrightarrow> thesis; \<not> closed S\<rbrakk> \<Longrightarrow> thesis
[PROOF STEP]
define f :: "'a \<Rightarrow> 'a \<times> 'b" where "f \<equiv> \<lambda>x. (x, One /\<^sub>R setdist {x} (- U))"
[PROOF STATE]
proof (state)
this:
f \<equiv> \<lambda>x. (x, One /\<^sub>R setdist {x} (- U))
goal (1 subgoal):
1. \<lbrakk>\<And>T f. \<lbrakk>closed T; homeomorphism S T f fst\<rbrakk> \<Longrightarrow> thesis; \<not> closed S\<rbrakk> \<Longrightarrow> thesis
[PROOF STEP]
have "continuous_on U (\<lambda>x. (x, One /\<^sub>R setdist {x} (- U)))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. continuous_on U (\<lambda>x. (x, One /\<^sub>R setdist {x} (- U)))
[PROOF STEP]
proof (intro continuous_intros continuous_on_setdist)
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<forall>x\<in>U. setdist {x} (- U) \<noteq> 0
[PROOF STEP]
show "\<forall>x\<in>U. setdist {x} (- U) \<noteq> 0"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<forall>x\<in>U. setdist {x} (- U) \<noteq> 0
[PROOF STEP]
by (simp add: Ucomp setdist_eq_0_sing_1)
[PROOF STATE]
proof (state)
this:
\<forall>x\<in>U. setdist {x} (- U) \<noteq> 0
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
continuous_on U (\<lambda>x. (x, One /\<^sub>R setdist {x} (- U)))
goal (1 subgoal):
1. \<lbrakk>\<And>T f. \<lbrakk>closed T; homeomorphism S T f fst\<rbrakk> \<Longrightarrow> thesis; \<not> closed S\<rbrakk> \<Longrightarrow> thesis
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
continuous_on U (\<lambda>x. (x, One /\<^sub>R setdist {x} (- U)))
[PROOF STEP]
have homU: "homeomorphism U (f`U) f fst"
[PROOF STATE]
proof (prove)
using this:
continuous_on U (\<lambda>x. (x, One /\<^sub>R setdist {x} (- U)))
goal (1 subgoal):
1. homeomorphism U (f ` U) f fst
[PROOF STEP]
by (auto simp: f_def homeomorphism_def image_iff continuous_intros)
[PROOF STATE]
proof (state)
this:
homeomorphism U (f ` U) f fst
goal (1 subgoal):
1. \<lbrakk>\<And>T f. \<lbrakk>closed T; homeomorphism S T f fst\<rbrakk> \<Longrightarrow> thesis; \<not> closed S\<rbrakk> \<Longrightarrow> thesis
[PROOF STEP]
have cloS: "closedin (top_of_set U) S"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. closedin (top_of_set U) S
[PROOF STEP]
by (metis US closed_closure closedin_closed_Int)
[PROOF STATE]
proof (state)
this:
closedin (top_of_set U) S
goal (1 subgoal):
1. \<lbrakk>\<And>T f. \<lbrakk>closed T; homeomorphism S T f fst\<rbrakk> \<Longrightarrow> thesis; \<not> closed S\<rbrakk> \<Longrightarrow> thesis
[PROOF STEP]
have cont: "isCont ((\<lambda>x. setdist {x} (- U)) o fst) z" for z :: "'a \<times> 'b"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. isCont ((\<lambda>x. setdist {x} (- U)) \<circ> fst) z
[PROOF STEP]
by (rule continuous_at_compose continuous_intros continuous_at_setdist)+
[PROOF STATE]
proof (state)
this:
isCont ((\<lambda>x. setdist {x} (- U)) \<circ> fst) ?z1
goal (1 subgoal):
1. \<lbrakk>\<And>T f. \<lbrakk>closed T; homeomorphism S T f fst\<rbrakk> \<Longrightarrow> thesis; \<not> closed S\<rbrakk> \<Longrightarrow> thesis
[PROOF STEP]
have setdist1D: "setdist {a} (- U) *\<^sub>R b = One \<Longrightarrow> setdist {a} (- U) \<noteq> 0" for a::'a and b::'b
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. setdist {a} (- U) *\<^sub>R b = One \<Longrightarrow> setdist {a} (- U) \<noteq> 0
[PROOF STEP]
by force
[PROOF STATE]
proof (state)
this:
setdist {?a1} (- U) *\<^sub>R ?b1 = One \<Longrightarrow> setdist {?a1} (- U) \<noteq> 0
goal (1 subgoal):
1. \<lbrakk>\<And>T f. \<lbrakk>closed T; homeomorphism S T f fst\<rbrakk> \<Longrightarrow> thesis; \<not> closed S\<rbrakk> \<Longrightarrow> thesis
[PROOF STEP]
have *: "r *\<^sub>R b = One \<Longrightarrow> b = (1 / r) *\<^sub>R One" for r and b::'b
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. r *\<^sub>R b = One \<Longrightarrow> b = (1 / r) *\<^sub>R One
[PROOF STEP]
by (metis One_non_0 nonzero_divide_eq_eq real_vector.scale_eq_0_iff real_vector.scale_scale scaleR_one)
[PROOF STATE]
proof (state)
this:
?r1 *\<^sub>R ?b1 = One \<Longrightarrow> ?b1 = (1 / ?r1) *\<^sub>R One
goal (1 subgoal):
1. \<lbrakk>\<And>T f. \<lbrakk>closed T; homeomorphism S T f fst\<rbrakk> \<Longrightarrow> thesis; \<not> closed S\<rbrakk> \<Longrightarrow> thesis
[PROOF STEP]
have "\<And>a b::'b. setdist {a} (- U) *\<^sub>R b = One \<Longrightarrow> (a,b) \<in> (\<lambda>x. (x, (1 / setdist {x} (- U)) *\<^sub>R One)) ` U"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>a b. setdist {a} (- U) *\<^sub>R b = One \<Longrightarrow> (a, b) \<in> (\<lambda>x. (x, (1 / setdist {x} (- U)) *\<^sub>R One)) ` U
[PROOF STEP]
by (metis (mono_tags, lifting) "*" ComplI image_eqI setdist1D setdist_sing_in_set)
[PROOF STATE]
proof (state)
this:
setdist {?a1} (- U) *\<^sub>R ?b1 = One \<Longrightarrow> (?a1, ?b1) \<in> (\<lambda>x. (x, (1 / setdist {x} (- U)) *\<^sub>R One)) ` U
goal (1 subgoal):
1. \<lbrakk>\<And>T f. \<lbrakk>closed T; homeomorphism S T f fst\<rbrakk> \<Longrightarrow> thesis; \<not> closed S\<rbrakk> \<Longrightarrow> thesis
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
setdist {?a1} (- U) *\<^sub>R ?b1 = One \<Longrightarrow> (?a1, ?b1) \<in> (\<lambda>x. (x, (1 / setdist {x} (- U)) *\<^sub>R One)) ` U
[PROOF STEP]
have "f ` U = (\<lambda>z. (setdist {fst z} (- U) *\<^sub>R snd z)) -` {One}"
[PROOF STATE]
proof (prove)
using this:
setdist {?a1} (- U) *\<^sub>R ?b1 = One \<Longrightarrow> (?a1, ?b1) \<in> (\<lambda>x. (x, (1 / setdist {x} (- U)) *\<^sub>R One)) ` U
goal (1 subgoal):
1. f ` U = (\<lambda>z. setdist {fst z} (- U) *\<^sub>R snd z) -` {One}
[PROOF STEP]
by (auto simp: f_def setdist_eq_0_sing_1 field_simps Ucomp)
[PROOF STATE]
proof (state)
this:
f ` U = (\<lambda>z. setdist {fst z} (- U) *\<^sub>R snd z) -` {One}
goal (1 subgoal):
1. \<lbrakk>\<And>T f. \<lbrakk>closed T; homeomorphism S T f fst\<rbrakk> \<Longrightarrow> thesis; \<not> closed S\<rbrakk> \<Longrightarrow> thesis
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
f ` U = (\<lambda>z. setdist {fst z} (- U) *\<^sub>R snd z) -` {One}
[PROOF STEP]
have clfU: "closed (f ` U)"
[PROOF STATE]
proof (prove)
using this:
f ` U = (\<lambda>z. setdist {fst z} (- U) *\<^sub>R snd z) -` {One}
goal (1 subgoal):
1. closed (f ` U)
[PROOF STEP]
by (force intro: continuous_intros cont [unfolded o_def] continuous_closed_vimage)
[PROOF STATE]
proof (state)
this:
closed (f ` U)
goal (1 subgoal):
1. \<lbrakk>\<And>T f. \<lbrakk>closed T; homeomorphism S T f fst\<rbrakk> \<Longrightarrow> thesis; \<not> closed S\<rbrakk> \<Longrightarrow> thesis
[PROOF STEP]
have "closed (f ` S)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. closed (f ` S)
[PROOF STEP]
by (metis closedin_closed_trans [OF _ clfU] homeomorphism_imp_closed_map [OF homU cloS])
[PROOF STATE]
proof (state)
this:
closed (f ` S)
goal (1 subgoal):
1. \<lbrakk>\<And>T f. \<lbrakk>closed T; homeomorphism S T f fst\<rbrakk> \<Longrightarrow> thesis; \<not> closed S\<rbrakk> \<Longrightarrow> thesis
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
closed (f ` S)
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
closed (f ` S)
goal (1 subgoal):
1. thesis
[PROOF STEP]
by (metis US homU homeomorphism_of_subsets inf_sup_ord(1) that)
[PROOF STATE]
proof (state)
this:
thesis
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 4763, "file": null, "length": 48}
|
\documentclass{beamer}
\usetheme{Antibes}
\useinnertheme{rectangles}
\useoutertheme{infolines}
\usepackage[utf8]{inputenc}
\usepackage[T1]{fontenc}
% patch the look of +, = in arev
\usefonttheme{serif}
\usepackage{arev}
\usepackage{amsmath}
\usepackage{amssymb}
\setbeamertemplate{footline}{%
\begin{beamercolorbox}[ht=3.0ex,dp=1ex]{title in head/foot}
\hfill\footnotesize\insertpagenumber\enspace\enspace\end{beamercolorbox}}
\newcommand{\ee}{\mathrm e}
\newcommand{\ui}{\mathrm i}
\newcommand{\real}{\operatorname{Re}}
\newcommand{\imag}{\operatorname{Im}}
\newcommand{\uv}[1]{\underline{#1}}
\newcommand{\bv}[1]{\mathbf{#1}}
\newcommand{\N}{\mathbb N}
\newcommand{\Z}{\mathbb Z}
\newcommand{\Q}{\mathbb Q}
\newcommand{\R}{\mathbb R}
\newcommand{\C}{\mathbb C}
\newcommand{\id}{\operatorname{id}}
\newcommand{\sgn}{\operatorname{sgn}}
\newcommand{\Abb}{\operatorname{Abb}}
\newcommand{\unit}[1]{\mathrm{#1}}
\newcommand{\chem}[1]{\mathrm{#1}}
\newcommand{\strong}[1]{\textsf{\textbf{#1}}}
\title{Titel}
\date{}
\begin{document}
\maketitle
\begin{frame}[t]
\frametitle{Inhaltsverzeichnis}
\tableofcontents
\end{frame}
\section{Abschnitt 1}
\subsection{Unterabschnitt 1.1}
\begin{frame}
\frametitle{Folientitel}
\framesubtitle{Untertitel}
Text.
\begin{Definition}[Ableitung]
\[f'(x) := \lim_{h\to 0}\frac{f(x+h)-f(x)}{h}\]
\end{Definition}
\begin{Beispiel}
Text.
\end{Beispiel}
\end{frame}
\subsection{Unterabschnitt 1.2}
\begin{frame}
\begin{itemize}
\item Minze
\item Hagebutte
\end{itemize}
\begin{enumerate}
\item Minze
\item Hagebutte
\end{enumerate}
\end{frame}
\section{Abschnitt 2}
\begin{frame}
\begin{block}{Blocktitel}
Blocktext.
\end{block}
\end{frame}
\end{document}
|
{"hexsha": "06cf8ec4745aeb75c3511679901ecfdf475c33a3", "size": 1708, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "templates/de/TeX/Beamer/Vorlage.tex", "max_stars_repo_name": "JohnBSmith/JohnBSmith.github.io", "max_stars_repo_head_hexsha": "5bb0fac7ec4d653be6bd71b4c7ab344c9615f1eb", "max_stars_repo_licenses": ["CC0-1.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-05-15T05:45:25.000Z", "max_stars_repo_stars_event_max_datetime": "2020-05-15T05:45:25.000Z", "max_issues_repo_path": "templates/de/TeX/Beamer/Vorlage.tex", "max_issues_repo_name": "JohnBSmith/JohnBSmith.github.io", "max_issues_repo_head_hexsha": "5bb0fac7ec4d653be6bd71b4c7ab344c9615f1eb", "max_issues_repo_licenses": ["CC0-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "templates/de/TeX/Beamer/Vorlage.tex", "max_forks_repo_name": "JohnBSmith/JohnBSmith.github.io", "max_forks_repo_head_hexsha": "5bb0fac7ec4d653be6bd71b4c7ab344c9615f1eb", "max_forks_repo_licenses": ["CC0-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 17.4285714286, "max_line_length": 73, "alphanum_fraction": 0.7218969555, "num_tokens": 632}
|
from numpy.lib.shape_base import expand_dims
import torch
import numpy as np
import matplotlib.pyplot as plt
from torch.nn.modules.activation import ReLU
def get_angles(pos, i, d_model):
angle_rates = 1 / np.power(10000, 2*(i//2) / np.float(d_model))
return pos * angle_rates
def positional_encoding(position, d_model): # d_model是位置编码的长度,相当于position encoding的embedding_dim?
angle_rads = get_angles(np.arange(position)[:, np.newaxis], # [50, 1]
np.arange(d_model)[np.newaxis, :], # [1, d_model=512]
d_model)
angle_rads[:, 0::2] = np.sin(angle_rads[:, 0::2]) # 2i
angle_rads[:, 1::2] = np.cos(angle_rads[:, 1::2]) # 2i+2
pos_encoding = angle_rads[np.newaxis, ...] # [50,512]=>[1,50,512]
return torch.tensor(pos_encoding, dtype=torch.float32)
def create_padding_mask(seq, pad):
seq = torch.eq(seq, torch.tensor(pad)).float()
return seq[:, np.newaxis, np.newaxis, :]
def create_look_ahead_mask(size):
mask = torch.triu(torch.ones(size, size), diagonal=1)
return mask
def scaled_dot_product_attention(q, k, v, mask=None):
"""计算注意力权重。
q, k, v 必须具有匹配的前置维度。
k, v 必须有匹配的倒数第二个维度,例如:seq_len_k = seq_len_v。
虽然 mask 根据其类型(填充或前瞻)有不同的形状,
但是 mask 必须能进行广播转换以便求和。
参数:
q: 请求的形状 == (..., seq_len_q, depth)
k: 主键的形状 == (..., seq_len_k, depth)
v: 数值的形状 == (..., seq_len_v, depth_v)
mask: Float 张量,其形状能转换成
(..., seq_len_q, seq_len_k)。默认为None。
返回值:
输出,注意力权重
"""
matmul_qk = torch.matmul(q, k.transpose(-2, -1))
depth_k = torch.tensor(k.shape[-1], dtype=torch.float32)
scaled_attion_logits = matmul_qk / torch.sqrt(depth_k)
if mask is not None:
scaled_attion_logits += (mask * -1e9)
attention_weights = torch.nn.functional.softmax(scaled_attion_logits, dim=-1)
output = torch.matmul(attention_weights, v)
return output, attention_weights
def point_wise_feed_forward_network(d_model, d_feedforward):
feed_forward_net = torch.nn.Sequential(
torch.nn.Linear(d_model, d_feedforward),
torch.nn.ReLU(),
torch.nn.Linear(d_feedforward, d_model)
)
return feed_forward_net
class MultiheadAttention(torch.nn.Module):
def __init__(self, d_model, num_heads):
super(MultiheadAttention, self).__init__()
self.d_model = d_model
self.num_heads = num_heads
assert d_model % self.num_heads == 0, "d_model must be divisible by num_heads!"
self.depth = d_model // self.num_heads
self.wq = torch.nn.Linear(d_model, d_model)
self.wk = torch.nn.Linear(d_model, d_model)
self.wv = torch.nn.Linear(d_model, d_model)
self.wfinal = torch.nn.Linear(d_model, d_model)
def split_heads(self, x, batch_size):
x = x.view(batch_size, -1, self.num_heads, self.depth)
return x.transpose(1,2)
def forward(self, q, k, v, mask):
batch_size = q.shape[0]
q = self.wq(q)
k = self.wk(k)
v = self.wv(v)
q = self.split_heads(q, batch_size)
k = self.split_heads(k, batch_size)
v = self.split_heads(v, batch_size)
scaled_attention, attention_weights = scaled_dot_product_attention(q, k, v, mask)
scaled_attention = scaled_attention.transpose(1, 2)
concat_attention = scaled_attention.reshape(batch_size, -1, self.d_model)
output = self.wfinal(concat_attention)
return output, attention_weights
# end class MultiheadAttention
|
{"hexsha": "2ecf86369b7de02ca08c0b4cf10deffdea15c894", "size": 3541, "ext": "py", "lang": "Python", "max_stars_repo_path": "model/utils.py", "max_stars_repo_name": "pgr2015/transformer_pytorch", "max_stars_repo_head_hexsha": "192e5a0feddf3cd8106f6cba72113d01a873ac1c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "model/utils.py", "max_issues_repo_name": "pgr2015/transformer_pytorch", "max_issues_repo_head_hexsha": "192e5a0feddf3cd8106f6cba72113d01a873ac1c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "model/utils.py", "max_forks_repo_name": "pgr2015/transformer_pytorch", "max_forks_repo_head_hexsha": "192e5a0feddf3cd8106f6cba72113d01a873ac1c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.787037037, "max_line_length": 98, "alphanum_fraction": 0.6478395933, "include": true, "reason": "import numpy,from numpy", "num_tokens": 1031}
|
# Commented out IPython magic to ensure Python compatibility.
import os
import tarfile
import time
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader, random_split # TensorDataset
import torchvision
from torchvision.datasets import ImageFolder # MNIST, CIFAR10 etc
from torchvision.datasets.utils import download_url
from torchvision.utils import make_grid
import torchvision.transforms as tt # ToTensor, Compose, Normalize, RandomCrop, RandomResizedCrop, RandomHorizontalFlip, RandomRotate, ColorJitter
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
def device():
if torch.cuda.is_available():
return torch.device('cuda')
else:
return torch.device('cpu')
device = device()
def todevice(data_model, device):
if isinstance(data_model, (list, tuple)):
return [todevice(i, device) for i in data_model]
return data_model.to(device, non_blocking=True)
class DataDeviceLoader():
def __init__(self, dataloader, device):
self.dataloader = dataloader
self.device = device
def __iter__(self):
for batch in self.dataloader:
yield todevice(batch, self.device)
def __len__(self):
return len(self.dataloader)
url = 'https://s3.amazonaws.com/fast-ai-imageclas/cifar10.tgz'
download_url(url, '.', 'cifar10.tgz')
with tarfile.open('./cifar10.tgz', 'r:gz') as tar:
tar.extractall(path='./data')
RGB_mean_std = ((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)) # you should calculate this for each image set, but here it is just given
train_normalized = tt.Compose([tt.RandomCrop(32, padding=4, padding_mode='reflect'),
# tt.RandomResizeCrop(256, scale=(0.5, 0.9), ratio=(1, 1)), # tt.CenterCrop(32), tt.Resize(32)
tt.RandomHorizontalFlip(),
# tt.RandomRotate(),
# tt.CollorJitter(brightness=0.1, contrast=0.1, saturation=0.1, hue=0.1),
tt.ToTensor(),
tt.Normalize(*RGB_mean_std, inplace=True)]) # to make operation in place or რომ არსებულს ზევიდან გადააწეროს, არ ვიცი
valid_normalized = tt.Compose([tt.ToTensor(), tt.Normalize(*RGB_mean_std)])
dir = './data/cifar10'
traindataset = ImageFolder(dir + '/train', train_normalized) # instead transform=ToTensor() we used train_normalized
testset = ImageFolder(dir + '/test', valid_normalized) #!!! we can decide to use test set as valid set, so that we have more data for training
# torch.manual_seed(42)
# fraction = 1/20
# valid = int(len(traindataset) * fraction)
# train = len(traindataset) - valid
# trainset, validset = random_split(traindataset, [train, valid])
batchsize = 128
trainloader = DataLoader(traindataset, batch_size=batchsize, shuffle=True, num_workers=2, pin_memory=True) # traindataset directly instead of trainset
# validloader = DataLoader(validset, batch_size=batchsize*2, num_workers=2, pin_memory=True)
testloader = DataLoader(testset, batch_size=batchsize*2, num_workers=2, pin_memory=True)
trainload = DataDeviceLoader(trainloader, device)
# validload = DataDeviceLoader(validloader, device)
testload = DataDeviceLoader(testloader, device)
print(f"in './data/cifar10' folder there are two folders {os.listdir(dir)}")
print(f"in 'train' folder there are following folders {os.listdir(dir + '/train')}")
print(f"in folder 'dog' there are {len(os.listdir(dir + '/train/dog'))} images")
image10, _ = traindataset[10]
print(f"shape of each image is {image10.shape}")
def unnormalize(images, means, standarddeviations):
means = torch.tensor(means).reshape(1, 3, 1, 1) # starting shape was one raw 3 columns (1, 3) and we added those dimensions as images have those dimensions
stds = torch.tensor(standarddeviations).reshape(1, 3, 1, 1)
return stds * images + means # this is reversed what tt.Normalize does: (IMAGES - means) / stds = images, so images*stds+means=IMAGES
plt.figure(figsize=(4, 4))
plt.imshow(image10.permute((1, 2, 0))) # here permute uses two (()) below with make_grid it uses only one ()
plt.axis('off')
plt.figure(figsize=(4, 4))
plt.imshow(unnormalize(image10, *RGB_mean_std).reshape(3, 32, 32).permute((1, 2, 0)).clamp(0,1))
plt.axis('off')
for images, lables in trainloader:
plt.figure(figsize=(16, 8))
images = unnormalize(images, *RGB_mean_std)
plt.imshow(make_grid(images, nrow=16).permute(1, 2, 0).clamp(0, 1)) # clamp(0, 1) brings pixels to range between 0 and 1 if some are not
plt.axis('off')
break
class LossPart(nn.Module):
def trainloss(self, batch):
images, lables = batch
out = self(images)
loss = F.cross_entropy(out, lables)
return loss
def validloss(self, batch):
images, lables = batch
out = self(images)
loss = F.cross_entropy(out, lables)
_, bestpredictions = torch.max(out, dim=1)
accuracy = torch.tensor(torch.sum(bestpredictions==lables).item() / len(bestpredictions))
return {'accuracy': accuracy, 'loss': loss.detach()}
def epochend(self, epochoutputs):
epochlosses = [batch['loss'] for batch in epochoutputs]
epochaverageloss = torch.stack(epochlosses).mean()
epochaccuracies = [batch['accuracy'] for batch in epochoutputs]
epochaverageaccuracy = torch.stack(epochaccuracies).mean()
return {'epochloss' : epochaverageloss.item(), 'epochaccuracy' : epochaverageaccuracy.item()}
def convblock(input_channels, output_channels, pool=False):
layers = [nn.Conv2d(input_channels, output_channels, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(output_channels),
nn.ReLU(inplace=True)]
if pool:
layers.append(nn.MaxPool2d(2))
return nn.Sequential(*layers)
class ForwardPart(LossPart): # resnet9 architecture
def __init__(self, input_channels, output_classes):
super().__init__() # if you want to inherit something specific super(something, self).__init__()
self.conv1 = convblock(input_channels, 64)
self.conv2 = convblock(64, 128, pool=True) # here image size became 16X16
self.res1 = nn.Sequential(convblock(128, 128), convblock(128, 128))
self.conv3 = convblock(128, 256)
self.conv4 = convblock(256, 512, pool=True) # here image size became 8X8
self.res2 = nn.Sequential(convblock(512, 512), convblock(512, 512))
self.classify = nn.Sequential(nn.MaxPool2d(4), # here image size became 2X2
nn.Flatten(),
nn.Dropout(0.2),
nn.Linear(512*2*2, output_classes))
def forward(self, xbatch):
out = self.conv1(xbatch)
out = self.conv2(out)
out = self.res1(out) + out # residual is addition of 'out' from previous layers!!!
out = self.conv3(out)
out = self.conv4(out)
out = self.res2(out) + out
out = self.classify(out)
return out
model = todevice(ForwardPart(3, 10), device)
model
@torch.no_grad()
def evaluate(model, valid_or_testload):
model.eval()
epochoutputs = [model.validloss(batch) for batch in valid_or_testload]
epochresult = model.epochend(epochoutputs)
return epochresult
def fit(model, trainload, testload, max_lr, epochs, weight_decay=0, clip_grad=None, optim=torch.optim.SGD):
torch.cuda.empty_cache()
history = []
optimizer = optim(model.parameters(), max_lr, weight_decay=weight_decay)
scedule = torch.optim.lr_scheduler.OneCycleLR(optimizer, max_lr, epochs=epochs, steps_per_epoch=len(trainload))
for epoch in range(epochs):
model.train()
learning_rates = []
training_losses = []
for batch in trainload:
loss = model.trainloss(batch)
training_losses.append(loss)
loss.backward()
if clip_grad:
nn.utils.clip_grad_value_(model.parameters(), clip_grad) # clips weights from previous time
optimizer.step() # here will be used weight_decay=weight_decay
optimizer.zero_grad()
learning_rates.append([i['lr'] for i in optimizer.param_groups])
scedule.step()
epochresult = evaluate(model, testload)
epochresult['epoch_trainloss'] = torch.stack(training_losses).mean().item() # creates new key-value pair in dictionary
epochresult['lr'] = learning_rates
history.append(epochresult)
return history
# Commented out IPython magic to ensure Python compatibility.
max_lr = 0.01
epochs = 8
weight_decay = 1e-4
clip_grad = 0.1
optim = torch.optim.Adam
training = []
# %time
training += fit(model, trainload, testload, max_lr, epochs, weight_decay, clip_grad, optim)
accuracies = [acc['epochaccuracy'] for acc in training]
validlosses = [loss['epochloss'] for loss in training]
trainlosses = [loss.get('epoch_trainloss') for loss in training]
learningrates = np.concatenate([lr.get('lr', []) for lr in training])
plt.plot(accuracies, '-mo')
plt.plot(validlosses, '-bo')
plt.plot(trainlosses, '-co')
# plt.plot(learningrates, '-yo') #plot separately as scale is completely different and nothing will be seen in same graph, xlabel=batch
plt.legend(['accuracy', 'validloss', 'trainloss', 'lrs'])
plt.xlabel('epoch')
plt.ylabel('value')
plt.title('learning Performance');
# We do not do testing as we used testset for validation and do not have left data for testing
def predict(model, image):
uimage = todevice(image.unsqueeze(0), device)
mimage = model(uimage)
_, bestprediction = torch.max(mimage, dim=1)
return bestprediction[0].item()
image, label = traindataset[7000]
predicted = predict(model, image)
img = unnormalize(image, *RGB_mean_std).reshape(3, 32, 32)
print(f"lable is {traindataset.classes[label]}, predicted was {traindataset.classes[predicted]}")
plt.imshow(img.permute((1, 2, 0)).clamp(0, 1))
plt.axis('off');
torch.save(model.state_dict(), 'cifar10_resnet9.pth')
new_model = ForwardPart(3, 10)
new_model.load_state_dict(torch.load('cifar10_resnet9.pth'))
|
{"hexsha": "2b686b56900b147e63b5e33a9ad64001285c2958", "size": 10352, "ext": "py", "lang": "Python", "max_stars_repo_path": "Pytorch/resnet_onecyclepolicy.py", "max_stars_repo_name": "VladimerKhasia/ML", "max_stars_repo_head_hexsha": "7b7a6075458a8e9ac275a803f0fd89fb606294ae", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Pytorch/resnet_onecyclepolicy.py", "max_issues_repo_name": "VladimerKhasia/ML", "max_issues_repo_head_hexsha": "7b7a6075458a8e9ac275a803f0fd89fb606294ae", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Pytorch/resnet_onecyclepolicy.py", "max_forks_repo_name": "VladimerKhasia/ML", "max_forks_repo_head_hexsha": "7b7a6075458a8e9ac275a803f0fd89fb606294ae", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.4262295082, "max_line_length": 158, "alphanum_fraction": 0.6709814529, "include": true, "reason": "import numpy", "num_tokens": 2645}
|
import pandas as pd
import matplotlib.pyplot as plt
import scipy
import seaborn as sns
df = pd.read_csv('lifespan40All.csv')
heatmapData = pd.pivot_table(df, values='commentators', index=['all'], columns='year')
plt.figure(figsize = (20, 4))
plot = sns.heatmap(heatmapData, cmap='BuPu', xticklabels=100, yticklabels=False)
plt.xlabel("Year in AH")
plt.ylabel("")
plt.title("Commentaries on the Six Canonical Hadith Collections")
fig = plot.get_figure()
fig.savefig('lifeSpan40allCommentators.svg')
|
{"hexsha": "9386f14b733b0aaaac688522c288d1e25a42843c", "size": 503, "ext": "py", "lang": "Python", "max_stars_repo_path": "allCommentariesCreateHeatmap.py", "max_stars_repo_name": "lwcvl/Plotting-All-Hadith-Commentaries", "max_stars_repo_head_hexsha": "71d22f5815d86943e7e5ce72f84363e4fc3610eb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-03-10T08:38:20.000Z", "max_stars_repo_stars_event_max_datetime": "2020-03-10T08:38:20.000Z", "max_issues_repo_path": "allCommentariesCreateHeatmap.py", "max_issues_repo_name": "lwcvl/Plotting-All-Hadith-Commentaries", "max_issues_repo_head_hexsha": "71d22f5815d86943e7e5ce72f84363e4fc3610eb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "allCommentariesCreateHeatmap.py", "max_forks_repo_name": "lwcvl/Plotting-All-Hadith-Commentaries", "max_forks_repo_head_hexsha": "71d22f5815d86943e7e5ce72f84363e4fc3610eb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-03-10T08:38:38.000Z", "max_forks_repo_forks_event_max_datetime": "2020-03-10T08:38:38.000Z", "avg_line_length": 26.4736842105, "max_line_length": 86, "alphanum_fraction": 0.7554671968, "include": true, "reason": "import scipy", "num_tokens": 128}
|
import numpy as np
import pandas as pd
def read_and_merge_data(*, base_path="../../data/raw/"):
df = pd.read_excel(base_path + 'RVMS_Current_Property_and_BIZ_Owner_List - vCurrent (1).xlsx',
sheet_name='Biz & Prop Owner MAIN list')
naics = pd.read_excel(base_path + '2-6 digit_2017_Codes.xlsx')
# Merge business and NAICS data
df['NAICS Code'] = df['NAICS Code'].astype(object)
df = df.merge(naics, left_on='NAICS Code', right_on='2017 NAICS US Code', how='inner')
# Clean up data
df = df[pd.notnull(df['NAICS Code'])]
df.columns = [c.replace(' ', '_') for c in df.columns]
df['NAICS_2_digit'] = df['NAICS_Code'].astype(str).str[:2]
return df
def make_fake_data(df, *, prob=0.25):
n_rows = df.shape[0]
cols = ['R2B_email_sponsorship_promotion', 'R2B_provide_resources', 'R2B_liason', 'B2R_event_participation',
'B2R_sponsorship_donation', 'B2R_share_business_information', 'B2R_volunteer', 'B2R_use_RVMS_resources']
for col in cols:
df[col] = np.random.binomial(n=1, p=prob, size=n_rows)
return df
|
{"hexsha": "fd63f9a341a00bfbd2c3fdf80df94de200b2d967", "size": 1110, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/data/make_dataset.py", "max_stars_repo_name": "tlittrell/BusinessEngagementMatrix", "max_stars_repo_head_hexsha": "1367493cc01d28da52b0959d51f760c4205109f7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/data/make_dataset.py", "max_issues_repo_name": "tlittrell/BusinessEngagementMatrix", "max_issues_repo_head_hexsha": "1367493cc01d28da52b0959d51f760c4205109f7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 9, "max_issues_repo_issues_event_min_datetime": "2020-03-24T16:36:31.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-11T23:39:08.000Z", "max_forks_repo_path": "src/data/make_dataset.py", "max_forks_repo_name": "tlittrell/BusinessEngagementMatrix", "max_forks_repo_head_hexsha": "1367493cc01d28da52b0959d51f760c4205109f7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.6363636364, "max_line_length": 116, "alphanum_fraction": 0.6621621622, "include": true, "reason": "import numpy", "num_tokens": 333}
|
function S=tria(A)
%%TRIA Square root matrix triangularization. Given a rectangular square
% root matrix, obtain a lower-triangular square root matrix that is
% square.
%
%INPUTS: A A numRowXnumCol matrix that is generally not square.
%
%OUTPUTS: S A lower-triangular matrix such that S*S'=A*A'. If
% numCol>=numRow, then S is a square numRowXnumRow matrix.
% Otherwise, S is a numRowXnumCol matrix.
%
%This is the tria function needed for various steps in the cubature Kalman
%filter and the square root Kalman filter. It is described in [1]. It has
%been slightly modified from the paper so that the diagonal elements remain
%positive.
%
%REFERENCES:
%[1] D. F. Crouse, "Basic tracking using nonlinear 3D monostatic and
% bistatic measurements," IEEE Aerospace and Electronic Systems
% Magazine, vol. 29, no. 8, Part II, pp. 4-53, Aug. 2014.
%
%July 2012 David F. Crouse, Naval Research Laboratory, Washington D.C.
%(UNCLASSIFIED) DISTRIBUTION STATEMENT A. Approved for public release.
[~,R]=qr(A',0);
S=R';
%Make the diagonal elements all positive.
sel=diag(S)<0;
S(:,sel)=-S(:,sel);
end
%LICENSE:
%
%The source code is in the public domain and not licensed or under
%copyright. The information and software may be used freely by the public.
%As required by 17 U.S.C. 403, third parties producing copyrighted works
%consisting predominantly of the material produced by U.S. government
%agencies must provide notice with such work(s) identifying the U.S.
%Government material incorporated and stating that such material is not
%subject to copyright protection.
%
%Derived works shall not identify themselves in a manner that implies an
%endorsement by or an affiliation with the Naval Research Laboratory.
%
%RECIPIENT BEARS ALL RISK RELATING TO QUALITY AND PERFORMANCE OF THE
%SOFTWARE AND ANY RELATED MATERIALS, AND AGREES TO INDEMNIFY THE NAVAL
%RESEARCH LABORATORY FOR ALL THIRD-PARTY CLAIMS RESULTING FROM THE ACTIONS
%OF RECIPIENT IN THE USE OF THE SOFTWARE.
|
{"author": "USNavalResearchLaboratory", "repo": "TrackerComponentLibrary", "sha": "9f6e329de5be06a371757c4b853200beb6def2d0", "save_path": "github-repos/MATLAB/USNavalResearchLaboratory-TrackerComponentLibrary", "path": "github-repos/MATLAB/USNavalResearchLaboratory-TrackerComponentLibrary/TrackerComponentLibrary-9f6e329de5be06a371757c4b853200beb6def2d0/Mathematical_Functions/Basic_Matrix_Operations/tria.m"}
|
# Problem 2 - Project Euler
# http://projecteuler.net/index.php?section=problems&id=2
function fibevensum(a, b, sum, xmax)
if a >= xmax
sum
elseif a % 2 == 0
fibevensum(b, a + b, sum + a, xmax)
else
fibevensum(b, a + b, sum, xmax)
end
end
println(fibevensum(1,2,0, 4000000))
|
{"hexsha": "85966f09735f532af7b92b02f6e75e75ff05907d", "size": 316, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "Julia/problem002.jl", "max_stars_repo_name": "emergent/ProjectEuler", "max_stars_repo_head_hexsha": "ec1c92cc47fde80efddeb0346d9b0fa511df1f00", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Julia/problem002.jl", "max_issues_repo_name": "emergent/ProjectEuler", "max_issues_repo_head_hexsha": "ec1c92cc47fde80efddeb0346d9b0fa511df1f00", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Julia/problem002.jl", "max_forks_repo_name": "emergent/ProjectEuler", "max_forks_repo_head_hexsha": "ec1c92cc47fde80efddeb0346d9b0fa511df1f00", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.5714285714, "max_line_length": 57, "alphanum_fraction": 0.5981012658, "num_tokens": 115}
|
# Simple 1D GP classification example
import time
import numpy as np
import matplotlib.pyplot as plt
import GPpref
import plot_tools as ptt
from active_learners import ActiveLearner, UCBLatent, PeakComparitor, LikelihoodImprovement, ABSThresh, UCBAbsRel
import test_data
import pickle
class Learner(object):
def __init__(self, model_type, obs_arguments):
self.model_type = model_type
self.obs_arguments = obs_arguments
def build_model(self, training_data):
self.model = self.model_type(**training_data)
def wrms(y_true, y_est, weight=True):
if weight:
w = y_true
else:
w = 1.0
return np.sqrt(np.mean(((y_true - y_est)*w)**2))
nowstr = time.strftime("%Y_%m_%d-%H_%M")
plt.rc('font',**{'family':'serif','sans-serif':['Computer Modern Roman']})
plt.rc('text', usetex=True)
# log_hyp = np.log([0.1,0.5,0.1,10.0]) # length_scale, sigma_f, sigma_probit, v_beta
# log_hyp = np.log([0.07, 0.75, 0.25, 1.0, 28.1])
log_hyp = np.log([0.05, 1.5, 0.09, 2.0, 50.0])
np.random.seed(10)
n_rel_train = 1
n_abs_train = 0
rel_sigma = 0.02
delta_f = 1e-5
beta_sigma = 0.8
beta_v = 100.0
n_xtest = 101
n_best_points = 15
n_mcsamples = 1000
n_ysamples = 101
n_trials = 100
n_rel_samples = 5
n_queries = 20
# Define polynomial function to be modelled
random_wave = test_data.VariableWave([0.6, 1.0], [5.0, 10.0], [0.0, 1.0], [10.0, 20.0])
nowstr = time.strftime("%Y_%m_%d-%H_%M")
data_dir = 'data/' + nowstr + '/'
ptt.ensure_dir(data_dir)
print "Data will be saved to: {0}".format(data_dir)
# True function
x_plot = np.linspace(0.0,1.0,n_xtest,dtype='float')
x_test = np.atleast_2d(x_plot).T
# Construct active learner object
learners = [Learner(ActiveLearner, {'p_rel': 0.5, 'n_rel_samples': n_rel_samples}), # 'Random (rel and abs)',
Learner(ActiveLearner, {'p_rel': 1.0, 'n_rel_samples': n_rel_samples}), # 'Random (rel)',
Learner(ActiveLearner, {'p_rel': 0.0, 'n_rel_samples': n_rel_samples}), # 'Random (abs)',
Learner(UCBLatent, {'gamma': 2.0, 'n_test': 100}), # 'UCBLatent'
Learner(UCBAbsRel, { 'n_test': 100, 'p_rel': 0.5, 'n_rel_samples': n_rel_samples, 'gamma': 2.0, 'tau':5.0}), # 'UCBCombined',
# Learner(ABSThresh, {'n_test': 100, 'p_thresh': 0.7}), # 'ABSThresh'
# Learner(PeakComparitor, {'gamma': 2.0, 'n_test': 50, 'n_rel_samples': n_rel_samples}), # 'PeakComparitor'
# Learner(LikelihoodImprovement, {'req_improvement': 0.60, 'n_test': 50, 'gamma': 2.0, 'n_rel_samples': n_rel_samples, 'p_thresh': 0.7}) # 'LikelihoodImprovement'
]
names = ['Random (rel and abs)', 'Random (rel)', 'Random (abs)', 'UCBLatent (abs)', 'UCBCombined (rel and abs)']
n_learners = len(learners)
obs_array = [{'name': name, 'obs': []} for name in names]
wrms_results = np.zeros((n_learners, n_queries+1, n_trials))
true_pos_results = np.zeros((n_learners, n_queries+1, n_trials), dtype='int')
selected_error = np.zeros((n_learners, n_queries+1, n_trials))
for trial_number in range(n_trials):
print 'Trial {0}'.format(trial_number)
random_wave.randomize(print_vals=True)
rel_obs_fun = GPpref.RelObservationSampler(random_wave.out, GPpref.PrefProbit(sigma=rel_sigma))
abs_obs_fun = GPpref.AbsObservationSampler(random_wave.out, GPpref.AbsBoundProbit(sigma=beta_sigma, v=beta_v))
f_true = abs_obs_fun.f(x_test)
y_abs_true = abs_obs_fun.mean_link(x_test)
best_points = np.argpartition(y_abs_true.flatten(), -n_best_points)[-n_best_points:]
best_points_set = set(best_points)
abs_y_samples = np.atleast_2d(np.linspace(0.01, 0.99, n_ysamples)).T
p_abs_y_true = abs_obs_fun.observation_likelihood_array(x_test, abs_y_samples)
p_rel_y_true = rel_obs_fun.observation_likelihood_array(x_test)
# Initial data
x_rel, uvi_rel, uv_rel, y_rel, fuv_rel = rel_obs_fun.generate_n_observations(n_rel_train, n_xdim=1)
x_abs, y_abs, mu_abs = abs_obs_fun.generate_n_observations(n_abs_train, n_xdim=1)
training_data = {'x_rel': x_rel, 'uvi_rel': uvi_rel, 'x_abs': x_abs, 'y_rel': y_rel, 'y_abs': y_abs,
'delta_f': delta_f, 'rel_likelihood': GPpref.PrefProbit(),
'abs_likelihood': GPpref.AbsBoundProbit()}
# Get initial solution
for nl, learner in enumerate(learners):
learner.build_model(training_data)
learner.model.set_hyperparameters(log_hyp)
f = learner.model.solve_laplace()
fhat, vhat = learner.model.predict_latent(x_test)
y_abs_est = learner.model.abs_posterior_mean(x_test, fhat, vhat)
wrms_results[nl, 0, trial_number] = wrms(y_abs_true, y_abs_est)
for obs_num in range(n_queries):
learners[4].obs_arguments['p_rel'] = max(0.0, (20-obs_num)/20.0)
for nl, learner in enumerate(learners):
next_x = learner.model.select_observation(**learner.obs_arguments)
if next_x.shape[0] == 1:
next_y, next_f = abs_obs_fun.generate_observations(next_x)
learner.model.add_observations(next_x, next_y)
# print 'Abs: x:{0}, y:{1}'.format(next_x[0], next_y[0])
else:
next_y, next_uvi, next_fx = rel_obs_fun.cheat_multi_sampler(next_x)
next_fuv = next_fx[next_uvi][:,:,0]
fuv_rel = np.concatenate((fuv_rel, next_fuv), 0)
learner.model.add_observations(next_x, next_y, next_uvi)
# print 'Rel: x:{0}, best_index:{1}'.format(next_x.flatten(), next_uvi[0, 1])
f = learner.model.solve_laplace()
fhat, vhat = learner.model.predict_latent(x_test)
y_abs_est = learner.model.abs_posterior_mean(x_test, fhat, vhat)
best_points_est = set(np.argpartition(y_abs_est.flatten(), -n_best_points)[-n_best_points:])
true_pos_results[nl, obs_num+1, trial_number] = len(best_points_set.intersection(best_points_est))
wrms_results[nl, obs_num+1, trial_number] = wrms(y_abs_true, y_abs_est)
selected_error[nl, obs_num + 1, trial_number] = wrms(y_abs_true[best_points], y_abs_est[best_points], weight=False)
print true_pos_results[:, obs_num+1, trial_number]
print wrms_results[:, obs_num+1, trial_number]
for nl, learner in enumerate(learners):
obs_tuple = learner.model.get_observations()
obs_array[nl]['obs'].append(ObsObject(*obs_tuple))
with open(data_dir+'wrms.pkl', 'wb') as fh:
pickle.dump(wrms_results, fh)
with open(data_dir+'true_pos.pkl', 'wb') as fh:
pickle.dump(true_pos_results, fh)
with open(data_dir+'selected_error.pkl', 'wb') as fh:
pickle.dump(selected_error, fh)
with open(data_dir+'obs.pkl', 'wb') as fh:
pickle.dump(obs_array, fh)
f0, ax0 = plt.subplots()
hl = ax0.plot(np.arange(n_queries+1), np.mean(wrms_results, axis=2).T)
f0.legend(hl, names)
f1, ax1 = plt.subplots()
hl1 = ax1.plot(np.arange(n_queries+1), np.mean(true_pos_results, axis=2).T)
f1.legend(hl1, names)
f2, ax2 = plt.subplots()
hl2 = ax2.plot(np.arange(n_queries+1), np.mean(selected_error, axis=2).T)
f2.legend(hl2, names)
plt.show()
|
{"hexsha": "51566ce1e85f63e81c5759927d8d6d425be44987", "size": 7118, "ext": "py", "lang": "Python", "max_stars_repo_path": "active_statruns.py", "max_stars_repo_name": "nrjl/GPN", "max_stars_repo_head_hexsha": "c7bd98d69e075ef05bcb2a443c02a71a916a71f4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "active_statruns.py", "max_issues_repo_name": "nrjl/GPN", "max_issues_repo_head_hexsha": "c7bd98d69e075ef05bcb2a443c02a71a916a71f4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "active_statruns.py", "max_forks_repo_name": "nrjl/GPN", "max_forks_repo_head_hexsha": "c7bd98d69e075ef05bcb2a443c02a71a916a71f4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.369047619, "max_line_length": 176, "alphanum_fraction": 0.676594549, "include": true, "reason": "import numpy", "num_tokens": 2166}
|
#include <boost/algorithm/string.hpp>
#include <ros/time.h>
#include <tf/tf.h>
#include <tf_conversions/tf_eigen.h>
#include <geometry_msgs/TwistStamped.h>
#include <geometry_msgs/Pose.h>
#include <pluginlib/class_list_macros.h>
#include <cnr_logger/cnr_logger_macros.h>
#include <cnr_cartesian_velocity_controller/cnr_cartesian_velocity_controller.h>
PLUGINLIB_EXPORT_CLASS(cnr::control::CartesianVelocityController, controller_interface::ControllerBase)
namespace cnr
{
namespace control
{
/**
* @brief CartesianVelocityController::CartesianVelocityController
*/
inline CartesianVelocityController::CartesianVelocityController()
{
}
/**
* @brief CartesianVelocityController::doInit
* @return
*/
inline bool CartesianVelocityController::doInit()
{
//INIT PUB/SUB
std::string setpoint_topic_name;
setpoint_topic_name = this->getControllerNamespace() + "/target_cart_teleop";
this->setKinUpdatePeriod(this->m_sampling_period); // superimposing the fkin_update_period,
// we can then use chainCommand() sync and updated
if(!this->getControllerNh().getParam("target_twist_topic",setpoint_topic_name))
CNR_WARN(this->logger(),"target_twist_topic not set. Default value superimposed.");
this->template add_subscriber<geometry_msgs::TwistStamped>(
setpoint_topic_name,5,boost::bind(&CartesianVelocityController::twistSetPointCallback,this,_1), false);
this->setPriority(this->QD_PRIORITY);
this->setCommandVelocity(0.0*this->getVelocity()); //not needed, already superimposed in enterStarting()
this->setCommandPosition(this->getPosition());
if (!this->getControllerNh().getParam("max_cartesian_linear_speed",max_cart_lin_vel_))
{
CNR_INFO(this->logger(),this->getControllerNamespace()<<"/max_cartesian_linear_speed not defined, using 0.25 m/s");
max_cart_lin_vel_=0.25;
}
if (!this->getControllerNh().getParam("max_cartesian_linear_acceleration",max_cart_lin_acc_))
{
CNR_INFO(this->logger(),this->getControllerNamespace()<<"/max_cartesian_linear_acceleration not defined, using 0.75 m/s^2");
max_cart_lin_acc_=0.75;
}
if (!this->getControllerNh().getParam("max_cartesian_angular_speed",max_cart_ang_vel_))
{
CNR_INFO(this->logger(),this->getControllerNamespace()<<"/max_cartesian_angular_speed not defined, using 0.5 rad/s");
max_cart_ang_vel_=0.5;
}
if (!this->getControllerNh().getParam("max_cartesian_angular_acceleration",max_cart_ang_acc_))
{
CNR_INFO(this->logger(),this->getControllerNamespace()<<"/max_cartesian_angular_acceleration not defined, using 1.5 rad/s^2");
max_cart_ang_acc_=1.5;
}
CNR_RETURN_TRUE(this->logger());
}
/**
* @brief CartesianVelocityController::doStarting
* @param time
*/
inline bool CartesianVelocityController::doStarting(const ros::Time& /*time*/)
{
CNR_TRACE_START(this->logger(),"Starting Controller");
this->setCommandVelocity(0.0*this->getVelocity()); //not needed, already superimposed in enterStarting()
this->setCommandPosition(this->getPosition());
last_twist_of_in_b_ = Eigen::Vector6d::Zero();
twist_of_t_in_b_ = Eigen::Vector6d::Zero();
CNR_RETURN_TRUE(this->logger());
}
/**
* @brief CartesianVelocityController::stopping
* @param time
*/
inline bool CartesianVelocityController::doStopping(const ros::Time& /*time*/)
{
CNR_TRACE_START(this->logger(),"Stopping Controller");
CNR_RETURN_TRUE(this->logger());
}
/**
* @brief CartesianVelocityController::doUpdate
* @param time
* @param period
* @return
*/
inline bool CartesianVelocityController::doUpdate(const ros::Time& /*time*/, const ros::Duration& period)
{
CNR_TRACE_START_THROTTLE_DEFAULT(this->logger());
std::stringstream report;
m_mtx.lock();
Eigen::Vector6d twist_of_t_in_b = twist_of_t_in_b_;
m_mtx.unlock();
if (twist_of_t_in_b.block(0,0,3,1).norm() > max_cart_lin_vel_)
twist_of_t_in_b *= max_cart_lin_vel_/twist_of_t_in_b.norm();
if (twist_of_t_in_b.block(3,0,3,1).norm()>max_cart_ang_vel_)
twist_of_t_in_b*=max_cart_ang_vel_/twist_of_t_in_b.norm();
Eigen::Vector6d Dtwist_of_t_in_b;
if (period.toSec()>0.0)
{
Dtwist_of_t_in_b = (twist_of_t_in_b-last_twist_of_in_b_)/period.toSec();
double scaling=1.0;
if (Dtwist_of_t_in_b.block(0,0,3,1).norm()>max_cart_lin_acc_)
scaling=max_cart_lin_acc_/Dtwist_of_t_in_b.norm();
if (Dtwist_of_t_in_b.block(3,0,3,1).norm()>max_cart_ang_acc_)
scaling=std::min(scaling,max_cart_ang_acc_/Dtwist_of_t_in_b.norm());
Dtwist_of_t_in_b*=scaling;
twist_of_t_in_b=last_twist_of_in_b_+Dtwist_of_t_in_b*period.toSec();
}
else
{
twist_of_t_in_b = Eigen::Vector6d::Zero( );
last_twist_of_in_b_ = Eigen::Vector6d::Zero( );
}
rosdyn::VectorXd old_vel_sp = this->getCommandVelocity();
rosdyn::VectorXd pos_sp = this->getCommandPosition();
Eigen::Matrix6Xd J_of_t_in_b;
J_of_t_in_b=this->chainCommand().toolJacobian(); // CHECK IF CORRECT
Eigen::FullPivLU<Eigen::MatrixXd> pinv_J(J_of_t_in_b);
pinv_J.setThreshold ( 1e-2 );
Eigen::JacobiSVD<Eigen::MatrixXd> svd(J_of_t_in_b, Eigen::ComputeThinU | Eigen::ComputeThinV);
auto sv = svd.singularValues();
CNR_WARN_COND_THROTTLE(this->logger(),
(sv(sv.rows()-1)==0) || (sv(0)/sv(sv.rows()-1) > 1e2), 2, "SINGULARITY POINT" );
if(pinv_J.rank()<6)
{
CNR_WARN_THROTTLE(this->logger(),2,"rank: "<<pinv_J.rank()<<"\nJacobian\n"<<J_of_t_in_b);
}
rosdyn::VectorXd vel_sp = svd.solve(twist_of_t_in_b);
if(rosdyn::saturateSpeed(this->chainNonConst(),vel_sp,old_vel_sp,
this->getCommandPosition(),period.toSec(), 1.0, true, &report)) // CHECK!
{
CNR_DEBUG_THROTTLE(this->logger(), 2.0, "\n" << report.str() );
}
Eigen::Vector6d twist_of_t_in_b_command=J_of_t_in_b*vel_sp;
Eigen::Vector6d versor=twist_of_t_in_b.normalized();
Eigen::Vector6d parallel_twist=twist_of_t_in_b_command.dot(versor)*versor;
Eigen::Vector6d perpendicular_twist=twist_of_t_in_b_command -parallel_twist;
if (perpendicular_twist.norm()>1e-6)
{
vel_sp*=1e-6/perpendicular_twist.norm();
CNR_WARN_THROTTLE(this->logger(),1,"saturating velocity, direction error (perpendicular norm = " << perpendicular_twist.norm() << ") due to singularity and joint limits");
CNR_DEBUG_THROTTLE(this->logger(),1,
"twist_of_t_in_b = " << twist_of_t_in_b.transpose() << std::endl <<
"twist_of_t_in_b_command = " << twist_of_t_in_b_command.transpose() << std::endl <<
"parallel_twist velocity = " << parallel_twist.transpose() << std::endl <<
"perpedicular velocity = " << perpendicular_twist.transpose()
);
}
last_twist_of_in_b_=J_of_t_in_b*vel_sp;
if(rosdyn::saturateSpeed(this->chainNonConst(),vel_sp,old_vel_sp,
this->getCommandPosition(),period.toSec(), 1.0, true, &report)) // CHECK!
{
CNR_DEBUG_THROTTLE(this->logger(), 2.0, "\n" << report.str() );
}
pos_sp = this->getCommandPosition() + vel_sp * period.toSec();
if(rosdyn::saturatePosition(this->chainNonConst(),pos_sp, &report))
{
CNR_DEBUG_THROTTLE(this->logger(), 2.0, "\n" << report.str() );
}
last_twist_of_in_b_=J_of_t_in_b*vel_sp;
this->setCommandPosition( pos_sp );
this->setCommandVelocity( vel_sp );
CNR_RETURN_TRUE_THROTTLE_DEFAULT(this->logger());
}
/**
* @brief CartesianVelocityController::twistSetPointCallback
* @param msg
*/
inline void CartesianVelocityController::twistSetPointCallback(const geometry_msgs::TwistStampedConstPtr &msg)
{
Eigen::Vector6d twist_of_t_in_b = Eigen::Vector6d::Zero( );
std::string base_link = this->chain().getLinksName().front();
try
{
CNR_DEBUG_THROTTLE(this->logger(), 2, "[ " << this->getControllerNamespace() << " ] >>>>>>>>>> TWIST TARGET TARGET RECEIVED!");
Eigen::Vector6d twist = Eigen::Vector6d::Zero( );
twist (0) = msg->twist.linear.x;
twist (1) = msg->twist.linear.y;
twist (2) = msg->twist.linear.z;
twist (3) = msg->twist.angular.x;
twist (4) = msg->twist.angular.y;
twist (5) = msg->twist.angular.z;
if(std::isnan(twist.norm()))
{
CNR_WARN_THROTTLE(this->logger(), 2, "[ " << this->getControllerNamespace()
<<" ] SAFETY CHECK - Received a Twist with nan values... superimposed to zero!" );
twist = Eigen::Vector6d::Zero();
}
CNR_DEBUG_THROTTLE( this->logger(), 2, "[ " << this->getControllerNamespace()
<<" ] Reference Twist {" << msg->header.frame_id << "} : " << twist.transpose() );
std::string frame_id = boost::to_lower_copy( msg->header.frame_id);
Eigen::Affine3d Tbt = this->chainCommand().toolPose();
if ( frame_id == "tool" )
{
twist_of_t_in_b = rosdyn::spatialRotation( twist, Tbt.rotation());
}
else if ( frame_id == "base" )
{
twist_of_t_in_b = twist;
}
else
{
tf::StampedTransform TF_T_bf;
CNR_DEBUG_THROTTLE(this->logger(), 2, "[ "
<< this->getControllerNamespace() << " ] listening to transform between "<<base_link<<" and "
<<msg->header.frame_id);
listener_.waitForTransform ( base_link, msg->header.frame_id, ros::Time(0), ros::Duration ( 10.0 ) );
listener_.lookupTransform ( base_link, msg->header.frame_id, ros::Time(0), TF_T_bf);
Eigen::Affine3d T_bf;
tf::transformTFToEigen(TF_T_bf, T_bf);
twist_of_t_in_b = rosdyn::spatialRotation( twist, T_bf.rotation());
}
CNR_DEBUG_THROTTLE( this->logger(), 2, "[ " << this->getControllerNh().getNamespace()
<< " ] Reference Twist {base} : " << twist_of_t_in_b_.transpose() );
}
catch(tf::TransformException& e)
{
CNR_WARN(this->logger(), "[ " << this->getControllerNamespace() << " ] Listening to transform between "<<base_link
<<" and "<<msg->header.frame_id <<" failed" );
twist_of_t_in_b = Eigen::Vector6d::Zero();
}
catch(std::exception& e)
{
CNR_WARN(this->logger(), "[ " << this->getControllerNamespace() << " ]Exception "<< e.what() );
twist_of_t_in_b = Eigen::Vector6d::Zero();
}
catch(...)
{
CNR_WARN(this->logger(), "[ " << this->getControllerNamespace() << " ] unhandled excpetion..");
twist_of_t_in_b = Eigen::Vector6d::Zero();
}
CNR_DEBUG_THROTTLE(this->logger(), 2, "[ " << this->getControllerNamespace() << " ] <<<<<<<<< TWIST TARGET TARGET RECEIVED!" );
std::lock_guard<std::mutex> lock(m_mtx);
twist_of_t_in_b_=twist_of_t_in_b;
return;
}
}
}
|
{"hexsha": "073085ae122f47684e7799b4101434ccd69f7cc0", "size": 10668, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "cnr_cartesian_velocity_controller/src/cnr_cartesian_velocity_controller/cnr_cartesian_velocity_controller.cpp", "max_stars_repo_name": "CNR-STIIMA-IRAS/cnr_ros_controllers", "max_stars_repo_head_hexsha": "c4bbfa4c2968da49b7b1f17ee91cae23af62e793", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2021-04-22T19:53:59.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-22T19:53:59.000Z", "max_issues_repo_path": "cnr_cartesian_velocity_controller/src/cnr_cartesian_velocity_controller/cnr_cartesian_velocity_controller.cpp", "max_issues_repo_name": "CNR-STIIMA-IRAS/cnr_ros_controllers", "max_issues_repo_head_hexsha": "c4bbfa4c2968da49b7b1f17ee91cae23af62e793", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 1.0, "max_issues_repo_issues_event_min_datetime": "2022-02-04T16:46:33.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-04T16:46:33.000Z", "max_forks_repo_path": "cnr_cartesian_velocity_controller/src/cnr_cartesian_velocity_controller/cnr_cartesian_velocity_controller.cpp", "max_forks_repo_name": "CNR-STIIMA-IRAS/cnr_ros_controllers", "max_forks_repo_head_hexsha": "c4bbfa4c2968da49b7b1f17ee91cae23af62e793", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.0405405405, "max_line_length": 176, "alphanum_fraction": 0.6797900262, "num_tokens": 2988}
|
# -*- coding: utf-8 -*-
"""
computes the spectral decrease from the magnitude spectrum
Args:
X: spectrogram (dimension FFTLength X Observations)
f_s: sample rate of audio data
Returns:
vsk spectral decrease
"""
import numpy as np
def FeatureSpectralDecrease(X,f_s):
# compute index vector
kinv = np.arange(0,X.shape[0])
kinv[0] = 1;
kinv = 1/kinv;
norm = X.sum(axis=0,keepdims=True)
ind = np.argwhere(norm == 0)
if ind.size:
norm[norm == 0] = 1 + X[0,ind[0,1]] # hack because I am not sure how to sum subarrays
norm = norm - X[0,:]
# compute slope
vsc = np.dot(kinv, X-X[0,:])/norm
return (vsc)
|
{"hexsha": "21752373668147f6f3afc92577cc1ed8172ca426", "size": 695, "ext": "py", "lang": "Python", "max_stars_repo_path": "pyACA/FeatureSpectralDecrease.py", "max_stars_repo_name": "RichardYang40148/pyACA-1", "max_stars_repo_head_hexsha": "870d100ed232cca5a890570426116f70cd0736c8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pyACA/FeatureSpectralDecrease.py", "max_issues_repo_name": "RichardYang40148/pyACA-1", "max_issues_repo_head_hexsha": "870d100ed232cca5a890570426116f70cd0736c8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pyACA/FeatureSpectralDecrease.py", "max_forks_repo_name": "RichardYang40148/pyACA-1", "max_forks_repo_head_hexsha": "870d100ed232cca5a890570426116f70cd0736c8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.71875, "max_line_length": 93, "alphanum_fraction": 0.5942446043, "include": true, "reason": "import numpy", "num_tokens": 216}
|
# ----------------------------------------
# create fastapi app
# ----------------------------------------
from fastapi import FastAPI, File ,UploadFile
app = FastAPI()
# ----------------------------------------
# setup templates folder
# ----------------------------------------
from fastapi.templating import Jinja2Templates
templates = Jinja2Templates(directory="templates")
# ----------------------------------------
# setup static files folder
# ----------------------------------------
from fastapi.staticfiles import StaticFiles
app.mount("/static", StaticFiles(directory="static"), name="static")
# can use images as it is eg. <img src='static-img.jpg'>
# ----------------------------------------
# define structure for requests (Pydantic & more)
# ----------------------------------------
from fastapi import Request # for get
from pydantic import BaseModel # for post
class b64Request(BaseModel):
b64: str
# ----------------------------------------
# custom
# ----------------------------------------
import os, time, cv2, shutil, base64
import numpy as np
from ocr import ocr
from PIL import Image
from glob import glob
import matplotlib.pyplot as plt
from detect.ctpn_utils import resize
from PIL import Image
from io import BytesIO
height = 720
def single_pic_proc(rgbimg):
""" input is numpy arr """
result, image_framed = ocr(rgbimg)
return result, image_framed
def get_rgb_from_spooled_tempfile(spooled_tempfile):
byte_img = spooled_tempfile.read() # type `byte`
bgrimg = cv2.imdecode(np.frombuffer(byte_img, np.uint8), 1) # 1 - BGR
return cv2.cvtColor(bgrimg, cv2.COLOR_BGR2RGB)
def plot_on_img(img, res):
for _, v in res.items():
# (0,1) + ---------------+ (2,3)
# | text |
# (4,5) + ---------------+ (6,7)
# 8 - acc score
# img = resize(img, height=height)
# cv2.line(img, (int(v[0][0]), int(v[0][1])), (int(v[0][2]), int(v[0][3])), (0, 0, 255), 2)
# cv2.line(img, (int(v[0][0]), int(v[0][1])), (int(v[0][4]), int(v[0][5])), (0, 0, 255), 2)
# cv2.line(img, (int(v[0][6]), int(v[0][7])), (int(v[0][2]), int(v[0][3])), (0, 0, 255), 2)
# cv2.line(img, (int(v[0][4]), int(v[0][5])), (int(v[0][6]), int(v[0][7])), (0, 0, 255), 2)
cv2.putText(img, v[1],#+f"({v[0][8]:.2f})",
(int(v[0][0]), int(v[0][1])),
cv2.FONT_HERSHEY_SIMPLEX,
0.7, # font size
(255,0,0),
1,
cv2.LINE_AA
)
#cv2.imwrite('xxyy.png', img)
bg = Image.fromarray(np.uint8(img)).convert('RGB')
outputBuffer = BytesIO()
bg.save(outputBuffer, format='JPEG')
bgBase64Data = outputBuffer.getvalue()
return 'data:image/jpeg;base64,' + base64.b64encode(bgBase64Data).decode()
#return base64.b64encode(img.tobytes())
# ==============================================================================================
# Application Interface
# ==============================================================================================
@app.get("/")
def api_home(request: Request):
"""
home page to display all real time values
"""
context = {
"request": request
}
return templates.TemplateResponse("home.html", context)
@app.post("/display/")
def display(request: b64Request):
"""
home page to display all real time values
"""
context = {
"request": 'success',
'b64': b64Request.b64
}
return templates.TemplateResponse("out.html", context)
@app.post("/uploadfile/")
def create_upload_file(file: UploadFile = File(...)):
if file.filename.endswith('jpg') or file.filename.endswith('jpeg') or file.filename.endswith('png'):
img = get_rgb_from_spooled_tempfile(file.file)
res, imframed = single_pic_proc(img)
b64_byte_buffer = plot_on_img(imframed, res)
context = {
"request": 'success',
"buffer": b64_byte_buffer
}
return context
else:
print('image format exception')
return {"status": 'image format exception'}
|
{"hexsha": "e3c42f09b3e36856f3a206e30d2a43a1b1c960d5", "size": 4060, "ext": "py", "lang": "Python", "max_stars_repo_path": "main.py", "max_stars_repo_name": "rahulct-commits/ocr.pytorch", "max_stars_repo_head_hexsha": "edc766312a3953f225bbb329f5efa75c3b253210", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "main.py", "max_issues_repo_name": "rahulct-commits/ocr.pytorch", "max_issues_repo_head_hexsha": "edc766312a3953f225bbb329f5efa75c3b253210", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "main.py", "max_forks_repo_name": "rahulct-commits/ocr.pytorch", "max_forks_repo_head_hexsha": "edc766312a3953f225bbb329f5efa75c3b253210", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.5263157895, "max_line_length": 104, "alphanum_fraction": 0.5233990148, "include": true, "reason": "import numpy", "num_tokens": 1033}
|
import sys
import os
import socket
HOME = os.environ['HOME']
sys.path.insert(1, HOME + '/github/StreamingSVM')
import numpy as np
from operations import Print
import time
from comms import Communication
from distributed import DistributedDataLoader
from api import Constant
from api import ExperimentObjectPSGDItems
# per core data distribution testing
def stats(exp_name='', acc=0, time=0, world_size=1, beta1=0.93, beta2=0.99, batch_size=10, epochs=10, repitition=10):
Print.Print.result1("Repitition " + str(repitition) + ", DataSet : " +
exp_name + " Parallel SGD SVM Accuracy : " + str(acc) + "%" + ", " + str(time) + ", Epochs : " + str(epochs) + ", " + str(beta1) + ", " + str(beta2))
fp = open("logs/psgd/adam/" + socket.gethostname() + "_" + exp_name + "_batch_size_" + str(batch_size) + "_cores_" + str(
world_size) + "_psgd_adam_pcd_results.txt", "a")
# fp.write("alpha : " + str(self.alpha) + ", epochs : " + str(self.epochs) + ", accuracy : " + str(self.acc) + "%" + ", time : " + str(self.training_time) + " s\n")
fp.write(
str(epochs) + ", " + str(batch_size) + ", " + str(beta1) + ", " + str(beta2) + ", " + str(acc) + ", " + str(
time) + "\n")
fp.close()
comms = Communication.Communication()
rank = comms.comm.Get_rank()
world_size = comms.comm.Get_size()
T = 100
M = world_size
expItem = ExperimentObjectPSGDItems.ExperimentObjectsPSGDItems()
experiments = expItem.getlist()
for experiment in experiments:
DATA_SET =experiment.dataset
DATA_SOURCE = experiment.data_soruce
FEATURES = experiment.features
SAMPLES = experiment.samples
SPLIT = experiment.split
TRAINING_FILE = experiment.training_file
TESTING_FILE = experiment.testing_file
TRAINING_SAMPLES = experiment.training_samples
TESTING_SAMPLES = experiment.testing_samples
REPITITIONS = 1
dis = DistributedDataLoader.DistributedDataLoader(source_file=DATA_SOURCE,
n_features=FEATURES,
n_samples=SAMPLES, world_size=world_size,
rank=rank,split=SPLIT, testing_file=TESTING_FILE,
train_samples=TRAINING_SAMPLES, test_samples=TESTING_SAMPLES)
x_all, y_all = dis.load_training_data_chunks()
X_test, y_test = dis.load_testing_data()
m1 = len(x_all[0])
epsilon = 0.00000001
w = np.zeros(m1, 'f')
w_ar = np.zeros(m1, 'f')
gradient = np.zeros(m1, 'f')
gradient_r = np.zeros(m1, 'f')
w_list = []
isComplete = False
v = np.zeros(w.shape, 'f')
r = np.zeros(w.shape, 'f')
beta1_range = [0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.90, 0.93, 0.95, 0.99, 0.999]
beta2_range = [0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.90, 0.93, 0.95, 0.99, 0.999]
#beta1_range = [0.90]
#beta2_range = [0.93]
X = x_all
y = y_all
for beta1 in beta1_range:
for beta2 in beta2_range:
epochs = np.arange(1, T)
for rep in np.arange(0,REPITITIONS):
exp_time = 0
exp_time -= time.time()
for epoch in epochs:
m = len(X)
C = int(m / M)
m_real = C * M
range = np.arange(0, m_real - 1, M)
#print(len(range),M)
#if (epoch % 10):
#print("Rank " + str(rank) + ", Epoch " + str(epoch))
for i in range:
Xi = X[i + rank]
yi = y[i + rank]
condition = yi * np.dot(Xi, w)
alpha = 1.0 / (1.0 + float(epoch))
coefficient = ((1.0 / 1.0 + float(epoch)))
if (condition < 1):
gradient = alpha * (-(Xi * yi) + (coefficient * w))
else:
gradient = alpha * (coefficient * w)
v = beta1 * v + (1 - beta1) * gradient
v_hat = v / (1 - beta1 ** epoch)
r = beta2 * r + (1 - beta2) * (np.multiply(gradient, gradient))
r_hat = r / (1 - beta2 ** epoch)
w = w - alpha * np.multiply((v_hat), 1.0 / (np.sqrt(r_hat) + epsilon))
comms.allreduce(input=w, output=w_ar, op=comms.mpi.SUM, dtype=comms.mpi.FLOAT)
w = w_ar / M
comms.bcast(input=w, dtype=comms.mpi.FLOAT, root=0)
if (epoch == T - 1):
isComplete = True
exp_time += time.time()
if (rank == 0 and isComplete):
labels = []
for x in X_test:
label = np.sign(np.dot(w.T, x))
labels.append(label)
y_pred = np.array(labels)
# print(labels)
# print(y_testing)
correct = (y_pred == y_test).sum()
total = len(y_pred)
acc = float(correct) / float(total) * 100.0
print("Acc : ", acc)
stats(exp_name=DATA_SET, acc=acc, time=exp_time, world_size=world_size, beta1=beta1, beta2=beta2, batch_size=-1, epochs=T, repitition=rep)
|
{"hexsha": "b9cab939e820fba1a7e407fa320b63cbd91b7ca5", "size": 5470, "ext": "py", "lang": "Python", "max_stars_repo_path": "psgd/PSGDAdamMulti.py", "max_stars_repo_name": "vibhatha/PSGDSVMPY", "max_stars_repo_head_hexsha": "69ed88f5db8d9a250ee944f44b88e54351f8696f", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "psgd/PSGDAdamMulti.py", "max_issues_repo_name": "vibhatha/PSGDSVMPY", "max_issues_repo_head_hexsha": "69ed88f5db8d9a250ee944f44b88e54351f8696f", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "psgd/PSGDAdamMulti.py", "max_forks_repo_name": "vibhatha/PSGDSVMPY", "max_forks_repo_head_hexsha": "69ed88f5db8d9a250ee944f44b88e54351f8696f", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 43.76, "max_line_length": 168, "alphanum_fraction": 0.5095063985, "include": true, "reason": "import numpy", "num_tokens": 1432}
|
import cv2
import numpy as np
import matplotlib.pyplot as plt
import modi
import time
import firebase_admin
from firebase_admin import credentials
from firebase_admin import firestore
def make_coordinates(image, line_parameters):
slope, intercept = line_parameters
y1 = image.shape[0]
y2 = int(y1*(2/5))
x1 = int((y1 - intercept)/slope)
x2 = int((y2 - intercept)/slope)
return np.array([x1, y1, x2, y2])
def average_slope_intercept(image, lines):
left_fit = []
right_fit = []
for line in lines:
x1, y1, x2, y2 = line.reshape(4)
parameters = np.polyfit((x1, x2), (y1, y2), 1)
slope = parameters[0]
intercept = parameters[1]
if slope < -0.5:
left_fit.append((slope, intercept))
elif 0.5 < slope:
right_fit.append((slope, intercept))
if (len(left_fit) != 0):
left_fit_average = np.average(left_fit, axis=0)
else:
left_fit_average = ((1, 10))
if (len(right_fit) != 0):
right_fit_average = np.average(right_fit, axis=0)
else:
right_fit_average = ((1, 10))
left_line = make_coordinates(image, left_fit_average)
rigth_line = make_coordinates(image, right_fit_average)
return np.array([left_line, rigth_line])
def canny(image):
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
blur = cv2.GaussianBlur(gray, (5, 5), 0)
canny = cv2.Canny(blur, 50, 150)
return canny
def display_lines(image, lines):
line_image = np.zeros_like(image)
if lines is not None:
for x1, y1, x2, y2 in lines:
cv2.line(line_image, (x1, y1), (x2, y2), (255, 0, 0), 10)
return line_image
def t_display_lines(image, lines):
line_image = np.zeros_like(image)
if lines is not None:
for line in lines:
x1, y1, x2, y2 = line.reshape(4)
cv2.line(line_image, (x1, y1), (x2, y2), (255, 0, 0), 10)
return line_image
def region_of_interest(image):
height = image.shape[0]
polygons = np.array([[(100, height), (600, height), (500, 300), (120,300)]])
mask = np.zeros_like(image)
cv2.fillPoly(mask, polygons, 255)
masked_image = cv2.bitwise_and(image, mask)
return masked_image
def find_vanishing(image, lines):
x11, y11, x12, y12 = lines[0]
x21, y21, x22, y22 = lines[1]
m1 = (y12 - y11) / (x12 - x11)
m2 = (y22 - y21) / (x22 - x21)
cx = int((x11 * m1 - y11 - x21 * m2 + y21) / (m1 - m2))
center = int((x11+x21)/2)
cv2.line(image, (cx, 0), (cx, image.shape[0]), (0, 0, 255), 10)
cv2.putText(image, str(cx), (cx+10, 100), cv2.FONT_HERSHEY_PLAIN, 2, (0, 0, 255), 2)
cv2.line(image, (center, 0), (center, image.shape[0]), (0, 255, 0), 10)
cv2.putText(image, str(center), (center+10, 100), cv2.FONT_HERSHEY_PLAIN, 2, (0, 255, 0), 2)
return image, cx, center
def find_num(image, canny):
_, contours, _ = cv2.findContours(canny, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
MIN_AREA = 50
MAX_AREA = 5000
MIN_RATIO, MAX_RATIO = 0.5, 1.0
MIN_HEIGHT = 10
dt = 50
number = 10
for contour in contours:
x, y, w, h = cv2.boundingRect(contour)
area = w * h
ratio = w / h
if MIN_AREA < area < MAX_AREA \
and MIN_RATIO < ratio < MAX_RATIO \
and MIN_HEIGHT < h:
center_x = int((2 * x + w) / 2)
center_y = int((2 * y + h) / 2)
if ((center_x-dt) > 200) and (center_x < 600) and ((center_y-dt) > 400) and (center_y < 1000):
img = image[center_y-dt:center_y+dt, center_x-dt:center_x+dt]
number = process(img)
cv2.rectangle(image, pt1=(center_x - 50, center_y - 50), pt2=(center_x + 50, center_y + 50), color=(0, 255, 0), thickness=2)
cv2.putText(image, "Number", (x+w, y), cv2.FONT_HERSHEY_PLAIN, 1, (0, 255, 0), 1)
else:
cv2.rectangle(image, pt1=(x, y), pt2=(x+w, y+h), color=(255, 0, 0), thickness=2)
cv2.putText(image, "No", (x+w, y), cv2.FONT_HERSHEY_PLAIN, 1, (255, 0, 0), 1)
cv2.imshow('res', image)
return number
def find_way(vanishing, center):
diff = vanishing - center
print(diff)
if diff < -70:
left()
elif diff > 70:
right()
else:
forward()
# Initialize MazeRunner, gets MODI class
# Add needed modules
def init_MR(bundle):
print('modules list\n', bundle.modules)
motor = bundle.motors[0]
return len(bundle.modules), motor
# Checks module connection status by comparing module numbers.
def is_connected(curr_num):
if curr_num != module_num:
print('\n--------interrupt!!!---------')
print('Some modules disconnected!!')
return False
else:
return True
# MODI goes forward, gets delay, speed args
def forward(delay=3, speed=100):
motor.speed(0, 0)
time.sleep(0.001)
# if button.clicked() == True:
print('-----forward!!-----')
for _ in range(delay):
# mazeprint(ir.distance())
time.sleep(0.001)
motor.speed(speed, -speed)
time.sleep(0.001)
motor.speed(0, 0)
# MODI turns left, gets delay arg.
def left(delay=1):
motor.speed(0, 0)
time.sleep(0.001)
print('-----left!!-----')
for _ in range(delay):
time.sleep(0.001)
motor.speed(-100, -100)
time.sleep(0.001)
motor.speed(0, 0)
# MODI turns right, gets delay arg.
def right(delay=1):
motor.speed(0, 0)
time.sleep(0.001)
print('-----right!!-----')
for _ in range(delay):
time.sleep(0.001)
motor.speed(100, 100)
time.sleep(0.001)
motor.speed(0, 0)
msg_cnt = 100
def mazeprint(msg, arg=None):
global msg_cnt
db = firestore.client()
doc_ref = db.collection(u'Maze').document(str(msg_cnt))
if arg:
print(msg, arg)
doc_ref.set({
u'Text': str(msg) + " " + str(arg)
})
else:
print(msg)
doc_ref.set({
u'Text': msg
})
msg_cnt = msg_cnt + 1
def delete_collection(coll_ref, batch_size):
docs = coll_ref.limit(batch_size).get()
deleted = 0
for doc in docs:
print(u'Deleting doc {} => {}'.format(doc.id, doc.to_dict()))
doc.reference.delete()
deleted = deleted + 1
if deleted >= batch_size:
return delete_collection(coll_ref, batch_size)
if __name__=="__main__":
# Initialize
cred = credentials.Certificate("./AccountKey.json")
firebase_admin.initialize_app(cred)
delete_collection(firestore.client().collection(u'Maze'), 200)
bundle = modi.MODI()
time.sleep(1)
module_num, motor = init_MR(bundle)
time.sleep(1)
print('MODI Connected!')
# Main
cap = cv2.VideoCapture(-1)
while(cap.isOpened()):
time.sleep(0.01)
_, frame = cap.read()
canny_image = canny(frame)
cropped_image = region_of_interest(canny_image)
lines = cv2.HoughLinesP(cropped_image, 2, np.pi/180, 100, np.array([]), minLineLength=40, maxLineGap=3)
if len(lines) < 2:
continue
averaged_lines = average_slope_intercept(frame, lines)
find_vanishing(frame, averaged_lines)
line_image = t_display_lines(frame, averaged_lines)
vanishing_line, vanishing, center = find_vanishing(line_image, averaged_lines)
combo_image = cv2.addWeighted(frame, 0.8, vanishing_line, 1, 1)
find_way(vanishing, center)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
|
{"hexsha": "8ea2258dcd55ac6e9632304c06e8245a982d66cc", "size": 6985, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/EyeCar.py", "max_stars_repo_name": "TheStarkor/Eye-Car", "max_stars_repo_head_hexsha": "e0962cd36effa24cc90935b4364dadf47e1ef2d3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/EyeCar.py", "max_issues_repo_name": "TheStarkor/Eye-Car", "max_issues_repo_head_hexsha": "e0962cd36effa24cc90935b4364dadf47e1ef2d3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-12-20T15:18:12.000Z", "max_issues_repo_issues_event_max_datetime": "2019-12-20T15:27:27.000Z", "max_forks_repo_path": "src/EyeCar.py", "max_forks_repo_name": "TheStarkor/Eye-Car", "max_forks_repo_head_hexsha": "e0962cd36effa24cc90935b4364dadf47e1ef2d3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.2259414226, "max_line_length": 130, "alphanum_fraction": 0.646241947, "include": true, "reason": "import numpy", "num_tokens": 2261}
|
# encoding='utf-8'
import cv2
import os
import numpy as np
import random
'''
挑选几张不同颜色的汽车背景,切成小图,将生成的车牌贴在小图上,使生成的车牌更真实
'''
def show(img, title='无标题'):
"""
本地测试时展示图片
:param img:
:param name:
:return:
"""
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
font = FontProperties(fname='/Users/yanmeima/workspace/ocr/crnn/data/data_generator/fonts/simhei.ttf')
plt.title(title, fontsize='large', fontweight='bold', FontProperties=font)
plt.imshow(img)
plt.show()
def get_patches(img):
dim_w = 460
dim_h = 160
h, w = img.shape[:2]
# 如果图像宽高小于256,补齐到256
if h < dim_h:
# copyMakeBorder(img,top,bottom,left,right,borderType,colar
# 把图像边界补上白色
img = cv2.copyMakeBorder(img, 0, dim_h - h, 0, 0, cv2.BORDER_CONSTANT, value=(255,255,255))
if w < dim_w:
img = cv2.copyMakeBorder(img, 0, 0, 0, dim_w - w, cv2.BORDER_CONSTANT, value=(255,255,255))
h, w = img.shape[:2]
#看是256的几倍
hNum, wNum = int(h / dim_h), int(w / dim_w)
hPatchStart = 0 # if hNum < 4 else 1
wPatchStart = 0 # if wNum < 4 else 1
hPatchEnd = hNum # if hNum < 4 else hNum - 1
wPatchEnd = wNum # if wNum < 4 else wNum - 1
backupIdx = []
# 按照256x256去裁图像
candidate_patches = []
for hIdx in range(hPatchStart, hPatchEnd):
for wIdx in range(wPatchStart, wPatchEnd):
hStart = hIdx * dim_h
wStart = wIdx * dim_w
backupIdx.append((hStart, wStart))
grayCrop = img[hStart:(hStart + dim_h), wStart:(wStart + dim_w)]
candidate_patches.append(grayCrop)
#patch_idxes = np.arange(0, len(candidate_patches))
#print("patch_idxes:",patch_idxes)
return candidate_patches
def main(dir):
patches = []
for file in os.listdir(dir):
path = dir + file
img = cv2.imread(path)
candidate_patches = get_patches(img)
patches = patches + candidate_patches
return patches
if __name__ == "__main__":
bj_dir = "data/bj/"
patches_path = "multi_val/patches/"
patches = main(bj_dir)
i = 0
for p in patches:
i += 1
path = os.path.join(patches_path + str(i) + ".jpg")
cv2.imwrite(path, p)
# # 测试
# if __name__ == "__main__":
# img_path = "data/bj/blue.jpg"
# path, name = os.path.split(img_path)
# file, ext = os.path.splitext(name)
# img = cv2.imread(img_path)
# candidate_patches = get_patches(img)
#
# i = 0
# for p in candidate_patches:
# i += 1
# cv2.imwrite(os.path.join("data/patches/" + file + "_" + str(i) + ".jpg"), p)
|
{"hexsha": "9985890285ae53fdd5a704f806b4b2302c565169", "size": 2659, "ext": "py", "lang": "Python", "max_stars_repo_path": "utils/cut.py", "max_stars_repo_name": "mymsimple/plate_generator", "max_stars_repo_head_hexsha": "cbea92aff070a8691a0394263e8f4b20b3f2c839", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 10, "max_stars_repo_stars_event_min_datetime": "2020-09-18T02:11:59.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-06T14:18:36.000Z", "max_issues_repo_path": "utils/cut.py", "max_issues_repo_name": "mymsimple/plate_generator", "max_issues_repo_head_hexsha": "cbea92aff070a8691a0394263e8f4b20b3f2c839", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-12-31T00:59:03.000Z", "max_issues_repo_issues_event_max_datetime": "2020-12-31T05:42:06.000Z", "max_forks_repo_path": "utils/cut.py", "max_forks_repo_name": "mymsimple/plate_generator", "max_forks_repo_head_hexsha": "cbea92aff070a8691a0394263e8f4b20b3f2c839", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2020-07-15T09:02:42.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-10T10:50:10.000Z", "avg_line_length": 25.8155339806, "max_line_length": 106, "alphanum_fraction": 0.6081233546, "include": true, "reason": "import numpy", "num_tokens": 868}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Provides the Station class.
:copyright:
Lion Krischer (krischer@geophysik.uni-muenchen.de), 2013
:license:
GNU Lesser General Public License, Version 3
(https://www.gnu.org/copyleft/lesser.html)
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from future.builtins import * # NOQA
from future.utils import python_2_unicode_compatible
import copy
import fnmatch
import warnings
import numpy as np
from obspy import UTCDateTime
from obspy.core.util.obspy_types import ObsPyException, ZeroSamplingRate
from .util import (BaseNode, Equipment, Operator, Distance, Latitude,
Longitude, _unified_content_strings, _textwrap, Site)
@python_2_unicode_compatible
class Station(BaseNode):
"""
From the StationXML definition:
This type represents a Station epoch. It is common to only have a
single station epoch with the station's creation and termination dates
as the epoch start and end dates.
"""
def __init__(self, code, latitude, longitude, elevation, channels=None,
site=None, vault=None, geology=None, equipments=None,
operators=None, creation_date=None, termination_date=None,
total_number_of_channels=None,
selected_number_of_channels=None, description=None,
comments=None, start_date=None, end_date=None,
restricted_status=None, alternate_code=None,
historical_code=None, data_availability=None):
"""
:type channels: list of :class:`~obspy.core.inventory.channel.Channel`
:param channels: All channels belonging to this station.
:type site: :class:`~obspy.core.inventory.util.Site`
:param site: The lexical description of the site
:type latitude: :class:`~obspy.core.inventory.util.Latitude`
:param latitude: The latitude of the station
:type longitude: :class:`~obspy.core.inventory.util.Longitude`
:param longitude: The longitude of the station
:param elevation: The elevation of the station in meter.
:param site: These fields describe the location of the station using
geopolitical entities (country, city, etc.).
:param vault: Type of vault, e.g. WWSSN, tunnel, transportable array,
etc
:param geology: Type of rock and/or geologic formation.
:param equipments: Equipment used by all channels at a station.
:type operators: list of :class:`~obspy.core.inventory.util.Operator`
:param operator: An operating agency and associated contact persons. If
there multiple operators, each one should be encapsulated within an
Operator tag. Since the Contact element is a generic type that
represents any contact person, it also has its own optional Agency
element.
:type creation_date: :class:`~obspy.core.utcdatetime.UTCDateTime`
:param creation_date: Date and time (UTC) when the station was first
installed
:type termination_date: :class:`~obspy.core.utcdatetime.UTCDateTime`,
optional
:param termination_date: Date and time (UTC) when the station was
terminated or will be terminated. A blank value should be assumed
to mean that the station is still active.
:type total_number_of_channels: int
:param total_number_of_channels: Total number of channels recorded at
this station.
:type selected_number_of_channels: int
:param selected_number_of_channels: Number of channels recorded at this
station and selected by the query that produced this document.
:type external_references: list of
:class:`~obspy.core.inventory.util.ExternalReference`
:param external_references: URI of any type of external report, such as
IRIS data reports or dataless SEED volumes.
:type description: str
:param description: A description of the resource
:type comments: list of :class:`~obspy.core.inventory.util.Comment`
:param comments: An arbitrary number of comments to the resource
:type start_date: :class:`~obspy.core.utcdatetime.UTCDateTime`,
optional
:param start_date: The start date of the resource
:type end_date: :class:`~obspy.core.utcdatetime.UTCDateTime`
:param end_date: The end date of the resource
:type restricted_status: str
:param restricted_status: The restriction status
:type alternate_code: str
:param alternate_code: A code used for display or association,
alternate to the SEED-compliant code.
:type historical_code: str
:param historical_code: A previously used code if different from the
current code.
:type data_availability: :class:`~obspy.station.util.DataAvailability`
:param data_availability: Information about time series availability
for the station.
"""
self.latitude = latitude
self.longitude = longitude
self.elevation = elevation
self.channels = channels or []
self.site = site if site is not None else Site()
self.vault = vault
self.geology = geology
self.equipments = equipments or []
self.operators = operators or []
self.creation_date = creation_date
self.termination_date = termination_date
self.total_number_of_channels = total_number_of_channels
self.selected_number_of_channels = selected_number_of_channels
self.external_references = []
super(Station, self).__init__(
code=code, description=description, comments=comments,
start_date=start_date, end_date=end_date,
restricted_status=restricted_status, alternate_code=alternate_code,
historical_code=historical_code,
data_availability=data_availability)
@property
def total_number_of_channels(self):
return self._total_number_of_channels
@total_number_of_channels.setter
def total_number_of_channels(self, value):
if value is not None and value < 0:
msg = "total_number_of_channels cannot be negative."
raise ValueError(msg)
self._total_number_of_channels = value
@property
def selected_number_of_channels(self):
return self._selected_number_of_channels
@selected_number_of_channels.setter
def selected_number_of_channels(self, value):
if value is not None and value < 0:
msg = "selected_number_of_channels cannot be negative."
raise ValueError(msg)
self._selected_number_of_channels = value
def __str__(self):
contents = self.get_contents()
ret = ("Station {station_name}\n"
"\tStation Code: {station_code}\n"
"\tChannel Count: {selected}/{total} (Selected/Total)\n"
"\t{start_date} - {end_date}\n"
"\tAccess: {restricted} {alternate_code}{historical_code}\n"
"\tLatitude: {lat:.2f}, Longitude: {lng:.2f}, "
"Elevation: {elevation:.1f} m\n")
ret = ret.format(
station_name=contents["stations"][0],
station_code=self.code,
selected=self.selected_number_of_channels,
total=self.total_number_of_channels,
start_date=str(self.start_date),
end_date=str(self.end_date) if self.end_date else "",
restricted=self.restricted_status,
lat=self.latitude, lng=self.longitude, elevation=self.elevation,
alternate_code="Alternate Code: %s " % self.alternate_code if
self.alternate_code else "",
historical_code="historical Code: %s " % self.historical_code if
self.historical_code else "")
ret += "\tAvailable Channels:\n"
ret += "\n".join(_textwrap(
", ".join(_unified_content_strings(contents["channels"])),
initial_indent="\t\t", subsequent_indent="\t\t",
expand_tabs=False))
return ret
def _repr_pretty_(self, p, cycle):
p.text(str(self))
def __getitem__(self, index):
return self.channels[index]
def __len__(self):
return len(self.channels)
def get_contents(self):
"""
Returns a dictionary containing the contents of the object.
.. rubric:: Example
>>> from obspy import read_inventory
>>> example_filename = "/path/to/IRIS_single_channel_with_response.xml"
>>> inventory = read_inventory(example_filename)
>>> station = inventory.networks[0].stations[0]
>>> station.get_contents() # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
{...}
>>> for (k, v) in sorted(station.get_contents().items()):
... print(k, v[0])
channels ANMO.10.BHZ
stations ANMO (Albuquerque, New Mexico, USA)
"""
site_name = None
if self.site and self.site.name:
site_name = self.site.name
desc = "%s%s" % (self.code, " (%s)" % (site_name if site_name else ""))
content_dict = {"stations": [desc], "channels": []}
for channel in self.channels:
content_dict["channels"].append(
"%s.%s.%s" % (self.code, channel.location_code, channel.code))
return content_dict
@property
def operators(self):
return self._operators
@operators.setter
def operators(self, value):
if not hasattr(value, "__iter__"):
msg = "Operators needs to be an iterable, e.g. a list."
raise ValueError(msg)
if any([not isinstance(x, Operator) for x in value]):
msg = "Operators can only contain Operator objects."
raise ValueError(msg)
self._operators = value
@property
def equipments(self):
return self._equipments
@equipments.setter
def equipments(self, value):
if not hasattr(value, "__iter__"):
msg = "equipments needs to be an iterable, e.g. a list."
raise ValueError(msg)
if any([not isinstance(x, Equipment) for x in value]):
msg = "equipments can only contain Equipment objects."
raise ValueError(msg)
self._equipments = value
# if value is None or isinstance(value, Equipment):
# self._equipment = value
# elif isinstance(value, dict):
# self._equipment = Equipment(**value)
# else:
# msg = ("equipment needs to be be of type
# obspy.core.inventory.Equipment "
# "or contain a dictionary with values suitable for "
# "initialization.")
# raise ValueError(msg)
@property
def creation_date(self):
return self._creation_date
@creation_date.setter
def creation_date(self, value):
if value is None:
self._creation_date = None
return
if not isinstance(value, UTCDateTime):
value = UTCDateTime(value)
self._creation_date = value
@property
def termination_date(self):
return self._termination_date
@termination_date.setter
def termination_date(self, value):
if value is not None and not isinstance(value, UTCDateTime):
value = UTCDateTime(value)
self._termination_date = value
@property
def external_references(self):
return self._external_references
@external_references.setter
def external_references(self, value):
if not hasattr(value, "__iter__"):
msg = "external_references needs to be iterable, e.g. a list."
raise ValueError(msg)
self._external_references = value
@property
def longitude(self):
return self._longitude
@longitude.setter
def longitude(self, value):
if isinstance(value, Longitude):
self._longitude = value
else:
self._longitude = Longitude(value)
@property
def latitude(self):
return self._latitude
@latitude.setter
def latitude(self, value):
if isinstance(value, Latitude):
self._latitude = value
else:
self._latitude = Latitude(value)
@property
def elevation(self):
return self._elevation
@elevation.setter
def elevation(self, value):
if isinstance(value, Distance):
self._elevation = value
else:
self._elevation = Distance(value)
def select(self, location=None, channel=None, time=None, starttime=None,
endtime=None, sampling_rate=None):
r"""
Returns the :class:`Station` object with only the
:class:`~obspy.core.inventory.channel.Channel`\ s that match the given
criteria (e.g. all channels with ``channel="EHZ"``).
.. warning::
The returned object is based on a shallow copy of the original
object. That means that modifying any mutable child elements will
also modify the original object
(see https://docs.python.org/2/library/copy.html).
Use :meth:`copy()` afterwards to make a new copy of the data in
memory.
.. rubric:: Example
>>> from obspy import read_inventory, UTCDateTime
>>> sta = read_inventory()[0][0]
>>> t = UTCDateTime(2008, 7, 1, 12)
>>> sta = sta.select(channel="[LB]HZ", time=t)
>>> print(sta) # doctest: +NORMALIZE_WHITESPACE
Station FUR (Fuerstenfeldbruck, Bavaria, GR-Net)
Station Code: FUR
Channel Count: None/None (Selected/Total)
2006-12-16T00:00:00.000000Z -
Access: None
Latitude: 48.16, Longitude: 11.28, Elevation: 565.0 m
Available Channels:
FUR..BHZ, FUR..LHZ
The `location` and `channel` selection criteria may also contain UNIX
style wildcards (e.g. ``*``, ``?``, ...; see
:func:`~fnmatch.fnmatch`).
:type location: str
:param location: Potentially wildcarded location code. If not given,
all location codes will be accepted.
:type channel: str
:param channel: Potentially wildcarded channel code. If not given,
all channel codes will be accepted.
:type time: :class:`~obspy.core.utcdatetime.UTCDateTime`
:param time: Only include channels active at given point in time.
:type starttime: :class:`~obspy.core.utcdatetime.UTCDateTime`
:param starttime: Only include channels active at or after given point
in time (i.e. channels ending before given time will not be shown).
:type endtime: :class:`~obspy.core.utcdatetime.UTCDateTime`
:param endtime: Only include channels active before or at given point
in time (i.e. channels starting after given time will not be
shown).
:type sampling_rate: float
"""
channels = []
for cha in self.channels:
# skip if any given criterion is not matched
if location is not None:
if not fnmatch.fnmatch(cha.location_code.upper(),
location.upper()):
continue
if channel is not None:
if not fnmatch.fnmatch(cha.code.upper(),
channel.upper()):
continue
if sampling_rate is not None:
if cha.sample_rate is None:
msg = ("Omitting channel that has no sampling rate "
"specified.")
warnings.warn(msg)
continue
if not np.allclose(float(sampling_rate), cha.sample_rate,
rtol=1E-5, atol=1E-8):
continue
if any([t is not None for t in (time, starttime, endtime)]):
if not cha.is_active(time=time, starttime=starttime,
endtime=endtime):
continue
channels.append(cha)
sta = copy.copy(self)
sta.channels = channels
return sta
def plot(self, min_freq, output="VEL", location="*", channel="*",
time=None, starttime=None, endtime=None, axes=None,
unwrap_phase=False, plot_degrees=False, show=True, outfile=None):
"""
Show bode plot of instrument response of all (or a subset of) the
station's channels.
:type min_freq: float
:param min_freq: Lowest frequency to plot.
:type output: str
:param output: Output units. One of:
``"DISP"``
displacement, output unit is meters
``"VEL"``
velocity, output unit is meters/second
``"ACC"``
acceleration, output unit is meters/second**2
:type location: str
:param location: Only plot matching channels. Accepts UNIX style
patterns and wildcards (e.g. ``"BH*"``, ``"BH?"``, ``"*Z"``,
``"[LB]HZ"``; see :func:`~fnmatch.fnmatch`)
:type channel: str
:param channel: Only plot matching channels. Accepts UNIX style
patterns and wildcards (e.g. ``"BH*"``, ``"BH?"``, ``"*Z"``,
``"[LB]HZ"``; see :func:`~fnmatch.fnmatch`)
:param time: Only show channels active at given point in time.
:type starttime: :class:`~obspy.core.utcdatetime.UTCDateTime`
:param starttime: Only show channels active at or after given point in
time (i.e. channels ending before given time will not be shown).
:type endtime: :class:`~obspy.core.utcdatetime.UTCDateTime`
:param endtime: Only show channels active before or at given point in
time (i.e. channels starting after given time will not be shown).
:type axes: list of 2 :class:`matplotlib.axes.Axes`
:param axes: List/tuple of two axes instances to plot the
amplitude/phase spectrum into. If not specified, a new figure is
opened.
:type unwrap_phase: bool
:param unwrap_phase: Set optional phase unwrapping using NumPy.
:type plot_degrees: bool
:param plot_degrees: if ``True`` plot bode in degrees
:type show: bool
:param show: Whether to show the figure after plotting or not. Can be
used to do further customization of the plot before showing it.
:type outfile: str
:param outfile: Output file path to directly save the resulting image
(e.g. ``"/tmp/image.png"``). Overrides the ``show`` option, image
will not be displayed interactively. The given path/file name is
also used to automatically determine the output format. Supported
file formats depend on your matplotlib backend. Most backends
support png, pdf, ps, eps and svg. Defaults to ``None``.
.. rubric:: Basic Usage
>>> from obspy import read_inventory
>>> sta = read_inventory()[0][0]
>>> sta.plot(0.001, output="VEL", channel="*Z") # doctest: +SKIP
.. plot::
from obspy import read_inventory
sta = read_inventory()[0][0]
sta.plot(0.001, output="VEL", channel="*Z")
"""
import matplotlib.pyplot as plt
if axes:
ax1, ax2 = axes
fig = ax1.figure
else:
fig = plt.figure()
ax1 = fig.add_subplot(211)
ax2 = fig.add_subplot(212, sharex=ax1)
matching = self.select(location=location, channel=channel, time=time,
starttime=starttime, endtime=endtime)
for cha in matching.channels:
try:
cha.plot(min_freq=min_freq, output=output, axes=(ax1, ax2),
label=".".join((self.code, cha.location_code,
cha.code)),
unwrap_phase=unwrap_phase, plot_degrees=plot_degrees,
show=False, outfile=None)
except ZeroSamplingRate:
msg = ("Skipping plot of channel with zero "
"sampling rate:\n%s")
warnings.warn(msg % str(cha), UserWarning)
except ObsPyException as e:
msg = "Skipping plot of channel (%s):\n%s"
warnings.warn(msg % (str(e), str(cha)), UserWarning)
# final adjustments to plot if we created the figure in here
if not axes:
from obspy.core.inventory.response import _adjust_bode_plot_figure
_adjust_bode_plot_figure(fig, plot_degrees=plot_degrees,
show=False)
if outfile:
fig.savefig(outfile)
else:
if show:
plt.show()
return fig
if __name__ == '__main__':
import doctest
doctest.testmod(exclude_empty=True)
|
{"hexsha": "4dc96053390501c70e60f45fb152e67eeaaf8830", "size": 21281, "ext": "py", "lang": "Python", "max_stars_repo_path": "IRIS_data_download/IRIS_download_support/obspy/core/inventory/station.py", "max_stars_repo_name": "earthinversion/Fnet_IRIS_data_automated_download", "max_stars_repo_head_hexsha": "09a6e0c992662feac95744935e038d1c68539fa1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-03-05T01:03:01.000Z", "max_stars_repo_stars_event_max_datetime": "2020-12-17T05:04:07.000Z", "max_issues_repo_path": "IRIS_data_download/IRIS_download_support/obspy/core/inventory/station.py", "max_issues_repo_name": "earthinversion/Fnet_IRIS_data_automated_download", "max_issues_repo_head_hexsha": "09a6e0c992662feac95744935e038d1c68539fa1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2021-03-31T19:25:55.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-13T20:32:46.000Z", "max_forks_repo_path": "IRIS_data_download/IRIS_download_support/obspy/core/inventory/station.py", "max_forks_repo_name": "earthinversion/Fnet_IRIS_data_automated_download", "max_forks_repo_head_hexsha": "09a6e0c992662feac95744935e038d1c68539fa1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-09-08T19:33:40.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-05T09:47:50.000Z", "avg_line_length": 41.4027237354, "max_line_length": 79, "alphanum_fraction": 0.6104036464, "include": true, "reason": "import numpy", "num_tokens": 4546}
|
[STATEMENT]
lemma test_star [simp]: "`p\<^sup>\<star> = 1`"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<iota> p\<^sup>\<star> = (1::'b)
[PROOF STEP]
by (metis star_subid test_iso test_top top_greatest)
|
{"llama_tokens": 94, "file": "KAT_and_DRA_TwoSorted_KAT2", "length": 1}
|
import numpy as np
from .sh import SH
class TestSH:
"""Test sequential halving policy"""
def test_simple_run(self):
arm_num = 5
budget = 20
learner = SH(arm_num=arm_num, budget=budget)
learner.reset()
while True:
actions = learner.actions()
if actions is None:
break
learner.update([(np.zeros(pulls), None) for (arm_id, pulls) in actions])
assert learner.best_arm() in set(range(arm_num))
|
{"hexsha": "6b315f791d4f55c17972fbf49acb034ebd49f28d", "size": 448, "ext": "py", "lang": "Python", "max_stars_repo_path": "banditpylib/learners/ordinary_fbbai_learner/sh_test.py", "max_stars_repo_name": "XiGYmax/banditpylib", "max_stars_repo_head_hexsha": "07698a1c6b17720a8199dea76580546fe3dfb9be", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "banditpylib/learners/ordinary_fbbai_learner/sh_test.py", "max_issues_repo_name": "XiGYmax/banditpylib", "max_issues_repo_head_hexsha": "07698a1c6b17720a8199dea76580546fe3dfb9be", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "banditpylib/learners/ordinary_fbbai_learner/sh_test.py", "max_forks_repo_name": "XiGYmax/banditpylib", "max_forks_repo_head_hexsha": "07698a1c6b17720a8199dea76580546fe3dfb9be", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.3333333333, "max_line_length": 78, "alphanum_fraction": 0.6517857143, "include": true, "reason": "import numpy", "num_tokens": 115}
|
__author__ = 'diegopinheiro'
__email__ = 'diegompin@gmail.com'
__github__ = 'https://github.com/diegompin'
from src.training_strategies.search_strategy import GridSearchStrategy, RandomizedSearchStrategy
from sklearn.ensemble import RandomForestClassifier
import numpy as np
from scipy.stats import randint as sp_randint
class RandomForestGridStrategy(GridSearchStrategy):
def __init__(self, pipeline, cross_validation, n_jobs=None):
super().__init__(pipeline, cross_validation, n_jobs)
def get_classifier(self):
return RandomForestClassifier()
# TODO Define the parameter space
def get_params(self):
# Number of trees in random forest
n_estimators = [int(x) for x in np.linspace(start=200, stop=2000, num=10)]
# Number of features to consider at every split
max_features = ['auto', 'sqrt']
# Maximum number of levels in tree
max_depth = [int(x) for x in np.linspace(10, 110, num=11)]
max_depth.append(None)
# Minimum number of samples required to split a node
min_samples_split = [2, 5, 10]
# Minimum number of samples required at each leaf node
min_samples_leaf = [1, 2, 4]
# Method of selecting samples for training each tree
bootstrap = [True, False]
# Create the random grid
params = {
'classifier__n_estimators': n_estimators,
'classifier__max_features': max_features,
'classifier__max_depth': max_depth,
'classifier__min_samples_split': min_samples_split,
'classifier__min_samples_leaf': min_samples_leaf,
'classifier__bootstrap': bootstrap
}
return params
class RandomForestRandomizedStrategy(RandomizedSearchStrategy):
def __init__(self, pipeline, cross_validation, n_jobs):
super().__init__(pipeline, cross_validation, n_jobs)
# TODO Define the parameter space
def get_params(self):
# Number of trees in random forest
n_estimators = [int(x) for x in np.linspace(start=200, stop=2000, num=10)]
# Number of features to consider at every split
max_features = ['auto', 'sqrt']
# Maximum number of levels in tree
max_depth = [int(x) for x in np.linspace(10, 110, num=11)]
max_depth.append(None)
# Minimum number of samples required to split a node
min_samples_split = [2, 5, 10]
# Minimum number of samples required at each leaf node
min_samples_leaf = [1, 2, 4]
# Method of selecting samples for training each tree
bootstrap = [True, False]
# Create the random grid
params = {
'classifier__n_estimators': n_estimators,
'classifier__max_features': max_features,
'classifier__max_depth': max_depth,
'classifier__min_samples_split': min_samples_split,
'classifier__min_samples_leaf': min_samples_leaf,
'classifier__bootstrap': bootstrap
}
return params
def get_classifier(self):
return RandomForestClassifier()
|
{"hexsha": "18d4f40736100515b4fa34ac8fc3abc04d6a5060", "size": 3108, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/algorithm_strategies/random_forest_strategy.py", "max_stars_repo_name": "rionbr/smm4h", "max_stars_repo_head_hexsha": "6009ed7800884ab37b7080c8c825c30f501b6942", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2019-04-10T21:21:01.000Z", "max_stars_repo_stars_event_max_datetime": "2020-03-23T14:06:07.000Z", "max_issues_repo_path": "src/algorithm_strategies/random_forest_strategy.py", "max_issues_repo_name": "rionbr/smm4h", "max_issues_repo_head_hexsha": "6009ed7800884ab37b7080c8c825c30f501b6942", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-02-21T14:34:32.000Z", "max_issues_repo_issues_event_max_datetime": "2019-02-21T14:40:30.000Z", "max_forks_repo_path": "src/algorithm_strategies/random_forest_strategy.py", "max_forks_repo_name": "rionbr/smm4h", "max_forks_repo_head_hexsha": "6009ed7800884ab37b7080c8c825c30f501b6942", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-06-15T11:50:49.000Z", "max_forks_repo_forks_event_max_datetime": "2020-07-06T15:36:54.000Z", "avg_line_length": 37.9024390244, "max_line_length": 96, "alphanum_fraction": 0.6698841699, "include": true, "reason": "import numpy,from scipy", "num_tokens": 689}
|
import re
import requests
import io
import sys
import json
import urllib
from bs4 import BeautifulSoup
import sqlite3
import time
import fitz
from PIL import ImageDraw,ImageFont
from PIL import Image
import random
import numpy as np
#import cv2
#sys.stdout = io.TextIOWrapper(sys.stdout.buffer,encoding='utf-8')
class cardDict(dict):
def __missing__(self,key):
#print("调用了 User的__missing__方法")
return False
def __getitem__(self, item):
# print("调用User 类的 __getitem__方法")
return super(cardDict, self).__getitem__(item)
def get(self, k, d=None):
# print("调用User 类的 get 方法")
return super(cardDict, self).get(k, d)
def appendCard(self,k):
if self[k]:
self[k] = self[k] +1
else:
self[k] = 1
class ydkParser(object):
"""docstring for ydkParser"""
def __init__(self,path):
super(ydkParser).__init__()
self.path = path
self.mainEffectDeck = cardDict()
self.mainSpellDeck = cardDict()
self.mainTrapDeck = cardDict()
self.extraDeck = cardDict()
self.sideDeck = cardDict()
def parser(self):
conn = sqlite3.connect("cards.db")
c = conn.cursor()
with open(self.path) as f:
lines = f.readlines()
mainBegin = False
exBegin = False
sideBegin = False
for line in lines:
if line.find("#created") != -1:
continue
if line.find("#main") != -1:
mainBegin = True
continue
if line.find("#extra") != -1:
mainBegin = False
exBegin = True
continue
if line.find("side") != -1:
mainBegin = False
exBegin = False
sideBegin = True
continue
sql = "select * from datas where id = {cardid}".format(cardid=line.replace("\n", ""))
#print(sql)
l = c.execute(sql)
for card in l:
cid = card[0]
cname = card[1]
jname = card[2]
ctype = card[3]
if mainBegin:
if ctype == 0 or ctype == 1 or ctype == 2:
self.mainEffectDeck.appendCard(jname)
continue
if ctype == 7:
self.mainSpellDeck.appendCard(jname)
continue
if ctype == 8:
self.mainTrapDeck.appendCard(jname)
continue
if exBegin:
self.extraDeck.appendCard(jname)
continue
if sideBegin:
self.sideDeck.appendCard(jname)
continue
return None
def outputCardFile(self,filepath):
try:
f = open(filepath,"w+",encoding="utf-8")
f.write("怪兽\n")
for jname in self.mainEffectDeck.keys():
f.write(jname)
f.write(" ")
f.write(str(self.mainEffectDeck[jname]))
f.write("\n")
f.write("魔法\n")
for jname in self.mainSpellDeck.keys():
f.write(jname)
f.write(" ")
f.write(str(self.mainSpellDeck[jname]))
f.write("\n")
f.write("陷阱\n")
for jname in self.mainTrapDeck.keys():
f.write(jname)
f.write(" ")
f.write(str(self.mainTrapDeck[jname]))
f.write("\n")
f.write("额外\n")
for jname in self.extraDeck.keys():
f.write(jname)
f.write(" ")
f.write(str(self.extraDeck[jname]))
f.write("\n")
f.write("Side\n")
for jname in self.sideDeck.keys():
f.write(jname)
f.write(" ")
f.write(str(self.sideDeck[jname]))
f.write("\n")
except Exception as e:
print(e)
def reportResult(self):
print("怪兽")
for jname in self.mainEffectDeck.keys():
print(jname,self.mainEffectDeck[jname])
print("魔法")
for jname in self.mainSpellDeck.keys():
print(jname,self.mainSpellDeck[jname])
print("陷阱")
for jname in self.mainTrapDeck.keys():
print(jname,self.mainTrapDeck[jname])
print("额外")
for jname in self.extraDeck.keys():
print(jname,self.extraDeck[jname])
print("Side")
for jname in self.sideDeck.keys():
print(jname,self.sideDeck[jname])
def drawCardName(self,path,outpath):
self.im = Image.open(path)#生成空白图像
self.im = self.im.convert("RGB")
draw = ImageDraw.Draw(self.im)
basex= 1000
basey= 2000
font = ImageFont.truetype('simhei.ttf',48)
t = 0
for c in self.mainEffectDeck.keys():
draw.text((basex,basey+t*150),c.replace("・","·"),font=font,fill= (0,0,0),direction=None )
draw.text((780,basey+t*150),str(self.mainEffectDeck[c]),font=font,fill= (0,0,0),direction=None )
t = t+1
t= 0
for c in self.mainSpellDeck.keys():
draw.text((2650,basey+t*150),c.replace("・","·"),font=font,fill= (0,0,0),direction=None )
draw.text((2450,basey+t*150),str(self.mainSpellDeck[c]),font=font,fill= (0,0,0),direction=None )
t = t+1
t= 0
for c in self.mainTrapDeck.keys():
draw.text((4350,basey+t*150),c.replace("・","·"),font=font,fill= (0,0,0),direction=None )
draw.text((4050,basey+t*150),str(self.mainTrapDeck[c]),font=font,fill= (0,0,0),direction=None )
t = t+1
t=0
for c in self.extraDeck.keys():
draw.text((basex,5400+t*150),c.replace("・","·"),font=font,fill= (0,0,0),direction=None )
draw.text((780,5400+t*150),str(self.extraDeck[c]),font=font,fill= (0,0,0),direction=None )
t = t+1
t=0
for c in self.sideDeck.keys():
draw.text((2650,5400+t*150),c.replace("・","·"),font=font,fill= (0,0,0),direction=None )
draw.text((2450,5400+t*150),str(self.sideDeck[c]),font=font,fill= (0,0,0),direction=None )
t = t+1
del draw
self.im.save(outpath)
if __name__ == '__main__':
impath = "D:\YGOPro_Setup_2020-12-01\YGOPro\\1.png"
outpath ="D:\YGOPro_Setup_2020-12-01\YGOPro\\out.png"
pdfpath = "D:\YGOPro_Setup_2020-12-01\YGOPro\\list.pdf"
p = ydkParser("D:\YGOPro_Setup_2020-12-01\YGOPro\\deck\\test.ydk")
#p.pdf_images(pdfpath,"D:\YGOPro_Setup_2020-12-01\YGOPro\\",5,5,0)
p.parser()
p.reportResult()
p.drawCardName(impath, outpath)
print("Pass")
#doc = fitz.open(pdfpath)
#page = doc.load_page(0)
#p = fitz.Point(100,100)
#rc = page.insert_text(p,"中文",fontname = "helv",fontsize =12,rotate=0)
#doc.save("D:\YGOPro_Setup_2020-12-01\YGOPro\\out.pdf")
#print(p.mainEffectDeck)
#print(p.mainSpellDeck)
#print(p.mainTrapDeck)
|
{"hexsha": "0ec347958b97ae810cd1bafde26c964702ad33dc", "size": 5996, "ext": "py", "lang": "Python", "max_stars_repo_path": "ydk2list.py", "max_stars_repo_name": "i82Security/ydkparser", "max_stars_repo_head_hexsha": "f5a6c833d074347bc783eeac8a1ca861e2c0a665", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-12-28T14:09:25.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-28T14:09:25.000Z", "max_issues_repo_path": "ydk2list.py", "max_issues_repo_name": "i82Security/ydkparser", "max_issues_repo_head_hexsha": "f5a6c833d074347bc783eeac8a1ca861e2c0a665", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ydk2list.py", "max_forks_repo_name": "i82Security/ydkparser", "max_forks_repo_head_hexsha": "f5a6c833d074347bc783eeac8a1ca861e2c0a665", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.2828282828, "max_line_length": 100, "alphanum_fraction": 0.6289192795, "include": true, "reason": "import numpy", "num_tokens": 1888}
|
"""
Normals Interface Class
Meteorological data provided by Meteostat (https://dev.meteostat.net)
under the terms of the Creative Commons Attribution-NonCommercial
4.0 International Public License.
The code is licensed under the MIT license.
"""
from copy import copy
from typing import Union
from datetime import datetime
import numpy as np
import pandas as pd
from meteostat.core.cache import get_file_path, file_in_cache
from meteostat.core.loader import processing_handler, load_handler
from meteostat.core.warn import warn
from meteostat.utilities.aggregations import weighted_average
from meteostat.interface.base import Base
from meteostat.interface.point import Point
class Normals(Base):
"""
Retrieve climate normals for one or multiple weather stations or
a single geographical point
"""
# The cache subdirectory
cache_subdir: str = 'normals'
# The list of weather Stations
_stations: pd.Index = None
# The first year of the period
_start: int = None
# The last year of the period
_end: int = None
# The data frame
_data: pd.DataFrame = pd.DataFrame()
# Columns
_columns: list = [
'start',
'end',
'month',
'tmin',
'tmax',
'prcp',
'wspd',
'pres',
'tsun'
]
# Index of first meteorological column
_first_met_col = 3
# Data types
_types: dict = {
'tmin': 'float64',
'tmax': 'float64',
'prcp': 'float64',
'wspd': 'float64',
'pres': 'float64',
'tsun': 'float64'
}
def _load(
self,
station: str
) -> None:
"""
Load file from Meteostat
"""
# File name
file = f'normals/{station}.csv.gz'
# Get local file path
path = get_file_path(self.cache_dir, self.cache_subdir, file)
# Check if file in cache
if self.max_age > 0 and file_in_cache(path, self.max_age):
# Read cached data
df = pd.read_pickle(path)
else:
# Get data from Meteostat
df = load_handler(
self.endpoint,
file,
self._columns,
self._types,
None)
if df.index.size > 0:
# Add weather station ID
df['station'] = station
# Set index
df = df.set_index(['station', 'start', 'end', 'month'])
# Save as Pickle
if self.max_age > 0:
df.to_pickle(path)
# Filter time period and append to DataFrame
if df.index.size > 0 and self._end:
# Get time index
end = df.index.get_level_values('end')
# Filter & return
return df.loc[end == self._end]
return df
def _get_data(self) -> None:
"""
Get all required data
"""
if len(self._stations) > 0:
# List of datasets
datasets = []
for station in self._stations:
datasets.append((
str(station),
))
# Data Processing
return processing_handler(
datasets, self._load, self.processes, self.threads)
# Empty DataFrame
return pd.DataFrame(columns=[*self._types])
def _resolve_point(
self,
method: str,
stations: pd.DataFrame,
alt: int,
adapt_temp: bool
) -> None:
"""
Project weather station data onto a single point
"""
if self._stations.size == 0 or self._data.size == 0:
return None
def adjust_temp(data: pd.DataFrame):
"""
Adjust temperature-like data based on altitude
"""
data.loc[data['tmin'] != np.NaN, 'tmin'] = data['tmin'] + \
((2 / 3) * ((data['elevation'] - alt) / 100))
data.loc[data['tmax'] != np.NaN, 'tmax'] = data['tmax'] + \
((2 / 3) * ((data['elevation'] - alt) / 100))
return data
if method == 'nearest':
if adapt_temp:
# Join elevation of involved weather stations
data = self._data.join(
stations['elevation'], on='station')
# Adapt temperature-like data based on altitude
data = adjust_temp(data)
# Drop elevation & round
data = data.drop('elevation', axis=1).round(1)
else:
data = self._data
self._data = data.groupby(level=[
'start',
'end',
'month'
]).agg('first')
else:
data = self._data.join(
stations[['score', 'elevation']], on='station')
# Adapt temperature-like data based on altitude
if adapt_temp:
data = adjust_temp(data)
# Aggregate mean data
data = data.groupby(level=[
'start',
'end',
'month'
]).apply(weighted_average)
# Remove obsolete index column
try:
data = data.reset_index(level=3, drop=True)
except IndexError:
pass
# Drop score and elevation
self._data = data.drop(['score', 'elevation'], axis=1).round(1)
# Set placeholder station ID
self._data['station'] = 'XXXXX'
self._data = self._data.set_index('station', append=True)
self._data = self._data.reorder_levels(
['station', 'start', 'end', 'month'])
self._stations = pd.Index(['XXXXX'])
def __init__(
self,
loc: Union[pd.DataFrame, Point, list, str],
start: int = None,
end: int = None
) -> None:
# Set list of weather stations
if isinstance(loc, pd.DataFrame):
self._stations = loc.index
elif isinstance(loc, Point):
if start and end:
stations = loc.get_stations(
'monthly', datetime(
start, 1, 1), datetime(
end, 12, 31))
else:
stations = loc.get_stations()
self._stations = stations.index
else:
if not isinstance(loc, list):
loc = [loc]
self._stations = pd.Index(loc)
# Check period
if (start and end) and (end - start != 29 or end %
10 != 0 or end >= datetime.now().year):
raise ValueError('Invalid reference period')
# Set period
self._start = start
self._end = end
# Get data for all weather stations
self._data = self._get_data()
# Interpolate data
if isinstance(loc, Point):
self._resolve_point(loc.method, stations, loc.alt, loc.adapt_temp)
# Clear cache
if self.max_age > 0 and self.autoclean:
self.clear_cache()
def normalize(self):
"""
Normalize the DataFrame
"""
# Create temporal instance
temp = copy(self)
if self.count() == 0:
warn('Pointless normalization of empty DataFrame')
# Go through list of weather stations
for station in temp._stations:
# The list of periods
periods: pd.Index = pd.Index([])
# Get periods
if self.count() > 0:
periods = temp._data[temp._data.index.get_level_values(
'station') == station].index.unique('end')
elif periods.size == 0 and self._end:
periods = pd.Index([self._end])
# Go through all periods
for period in periods:
# Create DataFrame
df = pd.DataFrame(
columns=temp._columns[temp._first_met_col:])
# Populate index columns
df['month'] = range(1, 13)
df['station'] = station
df['start'] = period - 29
df['end'] = period
# Set index
df.set_index(
['station', 'start', 'end', 'month'], inplace=True)
# Merge data
temp._data = pd.concat([temp._data, df], axis=0).groupby(
[
'station',
'start',
'end',
'month'
], as_index=True).first() if temp._data.index.size > 0 else df
# None -> NaN
temp._data = temp._data.fillna(np.NaN)
# Return class instance
return temp
def fetch(self) -> pd.DataFrame:
"""
Fetch DataFrame
"""
# Copy DataFrame
temp = copy(self._data)
# Add avg. temperature column
temp.insert(0, 'tavg', temp[['tmin', 'tmax']].dropna(how='any').mean(
axis=1).round(1))
# Remove station index if it's a single station
if len(self._stations) == 1 and 'station' in temp.index.names:
temp = temp.reset_index(level='station', drop=True)
# Remove start & end year if period is set
if self._start and self._end and self.count() > 0:
temp = temp.reset_index(level='start', drop=True)
temp = temp.reset_index(level='end', drop=True)
# Return data frame
return temp
# Import methods
from meteostat.series.convert import convert
from meteostat.series.count import count
from meteostat.core.cache import clear_cache
|
{"hexsha": "d7d21dc1db0aa3091bc0d23d5e85e05f03bc7906", "size": 9765, "ext": "py", "lang": "Python", "max_stars_repo_path": "meteostat/interface/normals.py", "max_stars_repo_name": "meteoDaniel/meteostat-python", "max_stars_repo_head_hexsha": "69ea4206e402f42bc47e3e909923fe5744d92814", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 133, "max_stars_repo_stars_event_min_datetime": "2020-08-05T15:53:44.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-22T17:04:48.000Z", "max_issues_repo_path": "meteostat/interface/normals.py", "max_issues_repo_name": "meteoDaniel/meteostat-python", "max_issues_repo_head_hexsha": "69ea4206e402f42bc47e3e909923fe5744d92814", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 49, "max_issues_repo_issues_event_min_datetime": "2020-10-01T16:16:29.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-30T13:58:59.000Z", "max_forks_repo_path": "meteostat/interface/normals.py", "max_forks_repo_name": "meteoDaniel/meteostat-python", "max_forks_repo_head_hexsha": "69ea4206e402f42bc47e3e909923fe5744d92814", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 31, "max_forks_repo_forks_event_min_datetime": "2020-11-12T23:49:22.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-22T13:17:43.000Z", "avg_line_length": 27.5847457627, "max_line_length": 82, "alphanum_fraction": 0.5143881208, "include": true, "reason": "import numpy", "num_tokens": 2154}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''eclipses.py - Waqas Bhatti (wbhatti@astro.princeton.edu) - Oct 2017
This contains a double gaussian model for first order modeling of eclipsing
binaries.
'''
import numpy as np
from numpy import nan as npnan, sum as npsum, abs as npabs, \
roll as nproll, isfinite as npisfinite, std as npstd, \
sign as npsign, sqrt as npsqrt, median as npmedian, \
array as nparray, percentile as nppercentile, \
polyfit as nppolyfit, var as npvar, max as npmax, min as npmin, \
log10 as nplog10, arange as nparange, pi as MPI, floor as npfloor, \
argsort as npargsort, cos as npcos, sin as npsin, tan as nptan, \
where as npwhere, linspace as nplinspace, \
zeros_like as npzeros_like, full_like as npfull_like, all as npall, \
correlate as npcorrelate, nonzero as npnonzero, diag as npdiag
##################################
## MODEL AND RESIDUAL FUNCTIONS ##
##################################
def _gaussian(x, amp, loc, std):
'''
This is a simple gaussian.
'''
return amp * np.exp(-((x - loc)*(x - loc))/(2.0*std*std))
def _double_inverted_gaussian(x,
amp1, loc1, std1,
amp2, loc2, std2):
'''
This is a double inverted gaussian.
'''
gaussian1 = -_gaussian(x,amp1,loc1,std1)
gaussian2 = -_gaussian(x,amp2,loc2,std2)
return gaussian1 + gaussian2
def invgauss_eclipses_func(ebparams, times, mags, errs):
'''This returns a double eclipse shaped function.
Suitable for first order modeling of eclipsing binaries.
ebparams = [period (time),
epoch (time),
pdepth (mags),
pduration (phase),
psdepthratio,
secondaryphase]
period is the period in days
epoch is the time of minimum in JD
pdepth is the depth of the primary eclipse
- for magnitudes -> transitdepth should be < 0
- for fluxes -> transitdepth should be > 0
pduration is the length of the primary eclipse in phase
psdepthratio is the ratio in the eclipse depths:
depth_secondary/depth_primary. This is generally the same as the ratio of
the Teffs of the two stars.
secondaryphase is the phase at which the minimum of the secondary eclipse is
located. This effectively parameterizes eccentricity.
All of these will then have fitted values after the fit is done.
'''
(period, epoch, pdepth, pduration, depthratio, secondaryphase) = ebparams
# generate the phases
iphase = (times - epoch)/period
iphase = iphase - npfloor(iphase)
phasesortind = npargsort(iphase)
phase = iphase[phasesortind]
ptimes = times[phasesortind]
pmags = mags[phasesortind]
perrs = errs[phasesortind]
zerolevel = npmedian(pmags)
modelmags = npfull_like(phase, zerolevel)
primaryecl_amp = -pdepth
secondaryecl_amp = -pdepth * depthratio
primaryecl_std = pduration/5.0 # we use 5-sigma as full-width -> duration
secondaryecl_std = pduration/5.0 # secondary eclipse has the same duration
halfduration = pduration/2.0
# phase indices
primary_eclipse_ingress = (
(phase >= (1.0 - halfduration)) & (phase <= 1.0)
)
primary_eclipse_egress = (
(phase >= 0.0) & (phase <= halfduration)
)
secondary_eclipse_phase = (
(phase >= (secondaryphase - halfduration)) &
(phase <= (secondaryphase + halfduration))
)
# put in the eclipses
modelmags[primary_eclipse_ingress] = (
zerolevel + _gaussian(phase[primary_eclipse_ingress],
primaryecl_amp,
1.0,
primaryecl_std)
)
modelmags[primary_eclipse_egress] = (
zerolevel + _gaussian(phase[primary_eclipse_egress],
primaryecl_amp,
0.0,
primaryecl_std)
)
modelmags[secondary_eclipse_phase] = (
zerolevel + _gaussian(phase[secondary_eclipse_phase],
secondaryecl_amp,
secondaryphase,
secondaryecl_std)
)
return modelmags, phase, ptimes, pmags, perrs
def invgauss_eclipses_residual(ebparams, times, mags, errs):
'''
This returns the residual between the modelmags and the actual mags.
'''
modelmags, phase, ptimes, pmags, perrs = (
invgauss_eclipses_func(ebparams, times, mags, errs)
)
# this is now a weighted residual taking into account the measurement err
return (pmags - modelmags)/perrs
|
{"hexsha": "3bb14f11ddc5dc2c9aa20e6d92e897c59cac503e", "size": 4689, "ext": "py", "lang": "Python", "max_stars_repo_path": "astrobase/lcmodels/eclipses.py", "max_stars_repo_name": "adrn/astrobase", "max_stars_repo_head_hexsha": "7af71167deec58dffc8f668c0b34cb75ed44ae6a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "astrobase/lcmodels/eclipses.py", "max_issues_repo_name": "adrn/astrobase", "max_issues_repo_head_hexsha": "7af71167deec58dffc8f668c0b34cb75ed44ae6a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "astrobase/lcmodels/eclipses.py", "max_forks_repo_name": "adrn/astrobase", "max_forks_repo_head_hexsha": "7af71167deec58dffc8f668c0b34cb75ed44ae6a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.4905660377, "max_line_length": 80, "alphanum_fraction": 0.6195350821, "include": true, "reason": "import numpy,from numpy", "num_tokens": 1227}
|
using Documenter, GibbsTypePriors
makedocs(
modules = [GibbsTypePriors],
format = Documenter.HTML(; prettyurls = get(ENV, "CI", nothing) == "true"),
authors = "konkam",
sitename = "GibbsTypePriors.jl",
pages = Any["index.md"]
# strict = true,
# clean = true,
# checkdocs = :exports,
)
deploydocs(
repo = "github.com/konkam/GibbsTypePriors.jl.git",
push_preview = true
)
|
{"hexsha": "e7086edfbbd598aff8e11b3cbec32b725b669162", "size": 412, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "docs/make.jl", "max_stars_repo_name": "konkam/GibbsTypePriors", "max_stars_repo_head_hexsha": "f923ed8a365261c34f4749b75005764279e63c94", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-03-27T16:49:28.000Z", "max_stars_repo_stars_event_max_datetime": "2020-03-27T16:49:28.000Z", "max_issues_repo_path": "docs/make.jl", "max_issues_repo_name": "konkam/GibbsTypePriors", "max_issues_repo_head_hexsha": "f923ed8a365261c34f4749b75005764279e63c94", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "docs/make.jl", "max_forks_repo_name": "konkam/GibbsTypePriors", "max_forks_repo_head_hexsha": "f923ed8a365261c34f4749b75005764279e63c94", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-11-13T16:45:47.000Z", "max_forks_repo_forks_event_max_datetime": "2020-11-13T16:45:47.000Z", "avg_line_length": 22.8888888889, "max_line_length": 79, "alphanum_fraction": 0.6359223301, "num_tokens": 128}
|
import numpy as np
from typing import Union
__all__ = ['sum', 'mean', 'var', 'std', 'mean_std', 'quantile', 'median', 'ratio']
def sum(obs: np.ndarray) -> np.float:
return obs.sum(axis=0)
def mean(obs: np.ndarray) -> np.float:
return np.divide(obs.sum(axis=0), obs.shape[0])
def demeaned(obs: np.ndarray) -> np.ndarray:
return obs - mean(obs)
def demeaned_sumsquares(obs: np.ndarray) -> np.float:
return (demeaned(obs) ** 2).sum(axis=0)
def var(obs: np.ndarray) -> np.float:
return demeaned_sumsquares(obs) / (obs.shape[0] - 1)
def std(obs: np.ndarray) -> np.float:
return np.sqrt(var(obs))
def mean_std(obs: np.ndarray) -> np.float:
return std(obs) / np.sqrt(obs.shape[0])
def quantile(obs: np.ndarray, q: float) -> Union[np.ndarray, np.float]:
return np.quantile(obs, q, axis=0)
def median(obs: np.ndarray) -> np.float:
return quantile(obs, 0.5)
def ratio(obs: np.ndarray) -> np.float:
return sum(obs['num']) / sum(obs['den'])
|
{"hexsha": "1c712baef6b541001c8f3b4230fc65c8bbfcd885", "size": 991, "ext": "py", "lang": "Python", "max_stars_repo_path": "abito/lib/stats/plain.py", "max_stars_repo_name": "avito-tech/abito", "max_stars_repo_head_hexsha": "9071eecd9526ee5c268cfacd7ac9a49b6ee185e5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 11, "max_stars_repo_stars_event_min_datetime": "2019-05-30T09:41:18.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-31T17:44:16.000Z", "max_issues_repo_path": "abito/lib/stats/plain.py", "max_issues_repo_name": "lnkov/abito", "max_issues_repo_head_hexsha": "9071eecd9526ee5c268cfacd7ac9a49b6ee185e5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "abito/lib/stats/plain.py", "max_forks_repo_name": "lnkov/abito", "max_forks_repo_head_hexsha": "9071eecd9526ee5c268cfacd7ac9a49b6ee185e5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-11-09T06:10:03.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-09T06:10:03.000Z", "avg_line_length": 21.5434782609, "max_line_length": 82, "alphanum_fraction": 0.6417759839, "include": true, "reason": "import numpy", "num_tokens": 289}
|
import unittest
import numpy as np
from numpy.testing import assert_almost_equal as almost_equal
from thimbles.spectrographs import SamplingModel
import thimbles as tmb
class TestSamplingMatrixhModel(unittest.TestCase):
min_wv = 100
max_wv = 200
npts_spec = 30
npts_model = 100
def setUp(self):
pass
def test_sampling_matrix(self):
spec_wvs = tmb.coordinatization.as_coordinatization(np.linspace(self.min_wv, self.max_wv, self.npts_spec))
spec_wvs_p = tmb.modeling.Parameter(spec_wvs)
model_wvs = tmb.coordinatization.as_coordinatization(np.linspace(self.min_wv, self.max_wv, self.npts_model))
model_wvs_p = tmb.modeling.Parameter(model_wvs)
spec = tmb.Spectrum(spec_wvs, np.ones(self.npts_spec), np.ones(self.npts_spec))
inp_mod_flux = tmb.modeling.Parameter(np.ones(self.npts_model))
output_p = tmb.modeling.Parameter()
samp_mat_mod = SamplingModel(
output_p=output_p,
input_wvs_p=model_wvs_p,
output_wvs_p=spec_wvs_p,
input_lsf_p=tmb.modeling.FloatParameter(1.0),
output_lsf_p=tmb.modeling.FloatParameter(1.0),
)
samp_mat = output_p.value
res = samp_mat*np.ones(self.npts_model)
np.testing.assert_almost_equal(res, np.ones(self.npts_spec))
if __name__ == "__main__":
unittest.main()
|
{"hexsha": "cf35d068811d4a4714436659f3ba09c6b7f847ef", "size": 1403, "ext": "py", "lang": "Python", "max_stars_repo_path": "thimbles/tests/test_spectrograph.py", "max_stars_repo_name": "quidditymaster/thimbles", "max_stars_repo_head_hexsha": "b122654a012f0eb4f043d1ee757f884707c97615", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "thimbles/tests/test_spectrograph.py", "max_issues_repo_name": "quidditymaster/thimbles", "max_issues_repo_head_hexsha": "b122654a012f0eb4f043d1ee757f884707c97615", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "thimbles/tests/test_spectrograph.py", "max_forks_repo_name": "quidditymaster/thimbles", "max_forks_repo_head_hexsha": "b122654a012f0eb4f043d1ee757f884707c97615", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.9210526316, "max_line_length": 116, "alphanum_fraction": 0.6913756237, "include": true, "reason": "import numpy,from numpy", "num_tokens": 360}
|
-- Intuitionistic propositional calculus.
-- Hilbert-style formalisation of syntax.
-- Nested terms.
module IPC.Syntax.Hilbert where
open import IPC.Syntax.Common public
-- Derivations.
infix 3 _⊢_
data _⊢_ (Γ : Cx Ty) : Ty → Set where
var : ∀ {A} → A ∈ Γ → Γ ⊢ A
app : ∀ {A B} → Γ ⊢ A ▻ B → Γ ⊢ A → Γ ⊢ B
ci : ∀ {A} → Γ ⊢ A ▻ A
ck : ∀ {A B} → Γ ⊢ A ▻ B ▻ A
cs : ∀ {A B C} → Γ ⊢ (A ▻ B ▻ C) ▻ (A ▻ B) ▻ A ▻ C
cpair : ∀ {A B} → Γ ⊢ A ▻ B ▻ A ∧ B
cfst : ∀ {A B} → Γ ⊢ A ∧ B ▻ A
csnd : ∀ {A B} → Γ ⊢ A ∧ B ▻ B
unit : Γ ⊢ ⊤
cboom : ∀ {C} → Γ ⊢ ⊥ ▻ C
cinl : ∀ {A B} → Γ ⊢ A ▻ A ∨ B
cinr : ∀ {A B} → Γ ⊢ B ▻ A ∨ B
ccase : ∀ {A B C} → Γ ⊢ A ∨ B ▻ (A ▻ C) ▻ (B ▻ C) ▻ C
infix 3 _⊢⋆_
_⊢⋆_ : Cx Ty → Cx Ty → Set
Γ ⊢⋆ ∅ = 𝟙
Γ ⊢⋆ Ξ , A = Γ ⊢⋆ Ξ × Γ ⊢ A
-- Monotonicity with respect to context inclusion.
mono⊢ : ∀ {A Γ Γ′} → Γ ⊆ Γ′ → Γ ⊢ A → Γ′ ⊢ A
mono⊢ η (var i) = var (mono∈ η i)
mono⊢ η (app t u) = app (mono⊢ η t) (mono⊢ η u)
mono⊢ η ci = ci
mono⊢ η ck = ck
mono⊢ η cs = cs
mono⊢ η cpair = cpair
mono⊢ η cfst = cfst
mono⊢ η csnd = csnd
mono⊢ η unit = unit
mono⊢ η cboom = cboom
mono⊢ η cinl = cinl
mono⊢ η cinr = cinr
mono⊢ η ccase = ccase
mono⊢⋆ : ∀ {Γ″ Γ Γ′} → Γ ⊆ Γ′ → Γ ⊢⋆ Γ″ → Γ′ ⊢⋆ Γ″
mono⊢⋆ {∅} η ∙ = ∙
mono⊢⋆ {Γ″ , A} η (ts , t) = mono⊢⋆ η ts , mono⊢ η t
-- Shorthand for variables.
v₀ : ∀ {A Γ} → Γ , A ⊢ A
v₀ = var i₀
v₁ : ∀ {A B Γ} → Γ , A , B ⊢ A
v₁ = var i₁
v₂ : ∀ {A B C Γ} → Γ , A , B , C ⊢ A
v₂ = var i₂
-- Reflexivity.
refl⊢⋆ : ∀ {Γ} → Γ ⊢⋆ Γ
refl⊢⋆ {∅} = ∙
refl⊢⋆ {Γ , A} = mono⊢⋆ weak⊆ refl⊢⋆ , v₀
-- Deduction theorem.
lam : ∀ {A B Γ} → Γ , A ⊢ B → Γ ⊢ A ▻ B
lam (var top) = ci
lam (var (pop i)) = app ck (var i)
lam (app t u) = app (app cs (lam t)) (lam u)
lam ci = app ck ci
lam ck = app ck ck
lam cs = app ck cs
lam cpair = app ck cpair
lam cfst = app ck cfst
lam csnd = app ck csnd
lam unit = app ck unit
lam cboom = app ck cboom
lam cinl = app ck cinl
lam cinr = app ck cinr
lam ccase = app ck ccase
lam⋆ : ∀ {Ξ Γ A} → Γ ⧺ Ξ ⊢ A → Γ ⊢ Ξ ▻⋯▻ A
lam⋆ {∅} = I
lam⋆ {Ξ , B} = lam⋆ {Ξ} ∘ lam
lam⋆₀ : ∀ {Γ A} → Γ ⊢ A → ∅ ⊢ Γ ▻⋯▻ A
lam⋆₀ {∅} = I
lam⋆₀ {Γ , B} = lam⋆₀ ∘ lam
-- Detachment theorem.
det : ∀ {A B Γ} → Γ ⊢ A ▻ B → Γ , A ⊢ B
det t = app (mono⊢ weak⊆ t) v₀
det⋆ : ∀ {Ξ Γ A} → Γ ⊢ Ξ ▻⋯▻ A → Γ ⧺ Ξ ⊢ A
det⋆ {∅} = I
det⋆ {Ξ , B} = det ∘ det⋆ {Ξ}
det⋆₀ : ∀ {Γ A} → ∅ ⊢ Γ ▻⋯▻ A → Γ ⊢ A
det⋆₀ {∅} = I
det⋆₀ {Γ , B} = det ∘ det⋆₀
-- Cut and multicut.
cut : ∀ {A B Γ} → Γ ⊢ A → Γ , A ⊢ B → Γ ⊢ B
cut t u = app (lam u) t
multicut : ∀ {Ξ A Γ} → Γ ⊢⋆ Ξ → Ξ ⊢ A → Γ ⊢ A
multicut {∅} ∙ u = mono⊢ bot⊆ u
multicut {Ξ , B} (ts , t) u = app (multicut ts (lam u)) t
-- Transitivity.
trans⊢⋆ : ∀ {Γ″ Γ′ Γ} → Γ ⊢⋆ Γ′ → Γ′ ⊢⋆ Γ″ → Γ ⊢⋆ Γ″
trans⊢⋆ {∅} ts ∙ = ∙
trans⊢⋆ {Γ″ , A} ts (us , u) = trans⊢⋆ ts us , multicut ts u
-- Contraction.
ccont : ∀ {A B Γ} → Γ ⊢ (A ▻ A ▻ B) ▻ A ▻ B
ccont = lam (lam (app (app v₁ v₀) v₀))
cont : ∀ {A B Γ} → Γ , A , A ⊢ B → Γ , A ⊢ B
cont t = det (app ccont (lam (lam t)))
-- Exchange, or Schönfinkel’s C combinator.
cexch : ∀ {A B C Γ} → Γ ⊢ (A ▻ B ▻ C) ▻ B ▻ A ▻ C
cexch = lam (lam (lam (app (app v₂ v₀) v₁)))
exch : ∀ {A B C Γ} → Γ , A , B ⊢ C → Γ , B , A ⊢ C
exch t = det (det (app cexch (lam (lam t))))
-- Composition, or Schönfinkel’s B combinator.
ccomp : ∀ {A B C Γ} → Γ ⊢ (B ▻ C) ▻ (A ▻ B) ▻ A ▻ C
ccomp = lam (lam (lam (app v₂ (app v₁ v₀))))
comp : ∀ {A B C Γ} → Γ , B ⊢ C → Γ , A ⊢ B → Γ , A ⊢ C
comp t u = det (app (app ccomp (lam t)) (lam u))
-- Useful theorems in functional form.
pair : ∀ {A B Γ} → Γ ⊢ A → Γ ⊢ B → Γ ⊢ A ∧ B
pair t u = app (app cpair t) u
fst : ∀ {A B Γ} → Γ ⊢ A ∧ B → Γ ⊢ A
fst t = app cfst t
snd : ∀ {A B Γ} → Γ ⊢ A ∧ B → Γ ⊢ B
snd t = app csnd t
boom : ∀ {C Γ} → Γ ⊢ ⊥ → Γ ⊢ C
boom t = app cboom t
inl : ∀ {A B Γ} → Γ ⊢ A → Γ ⊢ A ∨ B
inl t = app cinl t
inr : ∀ {A B Γ} → Γ ⊢ B → Γ ⊢ A ∨ B
inr t = app cinr t
case : ∀ {A B C Γ} → Γ ⊢ A ∨ B → Γ , A ⊢ C → Γ , B ⊢ C → Γ ⊢ C
case t u v = app (app (app ccase t) (lam u)) (lam v)
-- Closure under context concatenation.
concat : ∀ {A B Γ} Γ′ → Γ , A ⊢ B → Γ′ ⊢ A → Γ ⧺ Γ′ ⊢ B
concat Γ′ t u = app (mono⊢ (weak⊆⧺₁ Γ′) (lam t)) (mono⊢ weak⊆⧺₂ u)
-- Convertibility.
data _⋙_ {Γ : Cx Ty} : ∀ {A} → Γ ⊢ A → Γ ⊢ A → Set where
refl⋙ : ∀ {A} → {t : Γ ⊢ A}
→ t ⋙ t
trans⋙ : ∀ {A} → {t t′ t″ : Γ ⊢ A}
→ t ⋙ t′ → t′ ⋙ t″
→ t ⋙ t″
sym⋙ : ∀ {A} → {t t′ : Γ ⊢ A}
→ t ⋙ t′
→ t′ ⋙ t
congapp⋙ : ∀ {A B} → {t t′ : Γ ⊢ A ▻ B} → {u u′ : Γ ⊢ A}
→ t ⋙ t′ → u ⋙ u′
→ app t u ⋙ app t′ u′
congi⋙ : ∀ {A} → {t t′ : Γ ⊢ A}
→ t ⋙ t′
→ app ci t ⋙ app ci t′
congk⋙ : ∀ {A B} → {t t′ : Γ ⊢ A} → {u u′ : Γ ⊢ B}
→ t ⋙ t′ → u ⋙ u′
→ app (app ck t) u ⋙ app (app ck t′) u′
congs⋙ : ∀ {A B C} → {t t′ : Γ ⊢ A ▻ B ▻ C} → {u u′ : Γ ⊢ A ▻ B} → {v v′ : Γ ⊢ A}
→ t ⋙ t′ → u ⋙ u′ → v ⋙ v′
→ app (app (app cs t) u) v ⋙ app (app (app cs t′) u′) v′
congpair⋙ : ∀ {A B} → {t t′ : Γ ⊢ A} → {u u′ : Γ ⊢ B}
→ t ⋙ t′ → u ⋙ u′
→ app (app cpair t) u ⋙ app (app cpair t′) u′
congfst⋙ : ∀ {A B} → {t t′ : Γ ⊢ A ∧ B}
→ t ⋙ t′
→ app cfst t ⋙ app cfst t′
congsnd⋙ : ∀ {A B} → {t t′ : Γ ⊢ A ∧ B}
→ t ⋙ t′
→ app csnd t ⋙ app csnd t′
congboom⋙ : ∀ {C} → {t t′ : Γ ⊢ ⊥}
→ t ⋙ t′
→ app (cboom {C = C}) t ⋙ app cboom t′
conginl⋙ : ∀ {A B} → {t t′ : Γ ⊢ A}
→ t ⋙ t′
→ app (cinl {A = A} {B}) t ⋙ app cinl t′
conginr⋙ : ∀ {A B} → {t t′ : Γ ⊢ B}
→ t ⋙ t′
→ app (cinr {A = A} {B}) t ⋙ app cinr t′
congcase⋙ : ∀ {A B C} → {t t′ : Γ ⊢ A ∨ B} → {u u′ : Γ ⊢ A ▻ C} → {v v′ : Γ ⊢ B ▻ C}
→ t ⋙ t′ → u ⋙ u′ → v ⋙ v′
→ app (app (app ccase t) u) v ⋙ app (app (app ccase t′) u′) v′
-- TODO: Verify this.
beta▻ₖ⋙ : ∀ {A B} → {t : Γ ⊢ A} → {u : Γ ⊢ B}
→ app (app ck t) u ⋙ t
-- TODO: Verify this.
beta▻ₛ⋙ : ∀ {A B C} → {t : Γ ⊢ A ▻ B ▻ C} → {u : Γ ⊢ A ▻ B} → {v : Γ ⊢ A}
→ app (app (app cs t) u) v ⋙ app (app t v) (app u v)
-- TODO: What about eta for ▻?
beta∧₁⋙ : ∀ {A B} → {t : Γ ⊢ A} → {u : Γ ⊢ B}
→ app cfst (app (app cpair t) u) ⋙ t
beta∧₂⋙ : ∀ {A B} → {t : Γ ⊢ A} → {u : Γ ⊢ B}
→ app csnd (app (app cpair t) u) ⋙ u
eta∧⋙ : ∀ {A B} → {t : Γ ⊢ A ∧ B}
→ t ⋙ app (app cpair (app cfst t)) (app csnd t)
eta⊤⋙ : ∀ {t : Γ ⊢ ⊤} → t ⋙ unit
-- TODO: Verify this.
beta∨₁⋙ : ∀ {A B C} → {t : Γ ⊢ A} → {u : Γ ⊢ A ▻ C} → {v : Γ ⊢ B ▻ C}
→ app (app (app ccase (app cinl t)) u) v ⋙ app u t
-- TODO: Verify this.
beta∨₂⋙ : ∀ {A B C} → {t : Γ ⊢ B} → {u : Γ ⊢ A ▻ C} → {v : Γ ⊢ B ▻ C}
→ app (app (app ccase (app cinr t)) u) v ⋙ app v t
-- TODO: What about eta and commuting conversions for ∨? What about ⊥?
|
{"hexsha": "e4b9e5d13db8e76eb1c1b1361d7464cb459b085a", "size": 7564, "ext": "agda", "lang": "Agda", "max_stars_repo_path": "IPC/Syntax/Hilbert.agda", "max_stars_repo_name": "mietek/hilbert-gentzen", "max_stars_repo_head_hexsha": "fcd187db70f0a39b894fe44fad0107f61849405c", "max_stars_repo_licenses": ["X11"], "max_stars_count": 29, "max_stars_repo_stars_event_min_datetime": "2016-07-03T18:51:56.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-01T10:29:18.000Z", "max_issues_repo_path": "IPC/Syntax/Hilbert.agda", "max_issues_repo_name": "mietek/hilbert-gentzen", "max_issues_repo_head_hexsha": "fcd187db70f0a39b894fe44fad0107f61849405c", "max_issues_repo_licenses": ["X11"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2018-06-10T09:11:22.000Z", "max_issues_repo_issues_event_max_datetime": "2018-06-10T09:11:22.000Z", "max_forks_repo_path": "IPC/Syntax/Hilbert.agda", "max_forks_repo_name": "mietek/hilbert-gentzen", "max_forks_repo_head_hexsha": "fcd187db70f0a39b894fe44fad0107f61849405c", "max_forks_repo_licenses": ["X11"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.2086330935, "max_line_length": 87, "alphanum_fraction": 0.3778424114, "num_tokens": 3757}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function
from numpy.testing import assert_almost_equal, assert_array_almost_equal
import pytest
from PyDSTool import PyDSTool_ValueError, PyDSTool_TypeError
from PyDSTool.Generator import Vode_ODEsystem
@pytest.fixture()
def ode():
return Vode_ODEsystem({
'name': 'ode',
'vars': ['x'],
'pars': {'p': 0.5},
'varspecs': {'x': 'x+p'},
'pdomain': {'p': [0,1]}
})
def test_setting_invalid_key(ode):
with pytest.raises(KeyError):
ode.set(invalid_key='')
def test_setting_globalt0(ode):
ode.set(globalt0=11.0)
assert_almost_equal(11.0, ode.globalt0)
assert ode._extInputsChanged
def test_setting_checklevel(ode):
ode.set(checklevel=10)
assert ode.checklevel == 10
def test_setting_abseps(ode):
ode.set(abseps=0.001)
assert_almost_equal(1e-3, ode._abseps)
def test_setting_ics(ode):
ode.set(ics={'x': -1.0})
assert_almost_equal(-1.0, ode.initialconditions['x'])
def test_setting_ics_raises_exception_for_illegal_varname(ode):
with pytest.raises(ValueError):
ode.set(ics={'y': 0.0})
def test_setting_tdata(ode):
ode.set(tdata=[0, 10])
assert_array_almost_equal([0, 10], ode.tdata)
def test_setting_tdomain(ode):
ode.set(tdomain=[0, 20])
assert_array_almost_equal([0, 20], ode.tdomain)
def test_setting_tdata_respects_domain(ode):
ode.set(tdomain=[0, 20])
ode.set(tdata=[-10, 30])
assert_array_almost_equal([0, 20], ode.tdata)
def test_setting_xdomain(ode):
ode.set(xdomain={'x': [0, 20]})
assert_array_almost_equal([0, 20], ode.variables['x'].depdomain.get())
def test_setting_xdomain_using_single_value(ode):
ode.set(xdomain={'x': 0})
assert_array_almost_equal([0, 0], ode.variables['x'].depdomain.get())
def test_setting_xdomain_raises_exception_for_illegal_varname(ode):
with pytest.raises(ValueError):
ode.set(xdomain={'y': []})
def test_setting_xdomain_raises_exception_for_nondictionary_value(ode):
with pytest.raises(AttributeError):
ode.set(xdomain=('x', []))
def test_setting_xdomain_raises_exception_for_wrongly_sorted_values(ode):
with pytest.raises(PyDSTool_ValueError):
ode.set(xdomain={'x': [20, 0]})
def test_settting_xdomain_raises_exception_for_nonsequence_value(ode):
with pytest.raises(PyDSTool_TypeError):
ode.set(xdomain={'x': {}})
def test_setting_pdomain(ode):
ode.set(pdomain={'p': [0, 20]})
assert_array_almost_equal([0, 20], ode.parameterDomains['p'].get())
def test_setting_pdomain_using_single_value(ode):
ode.set(pdomain={'p': 0})
assert_array_almost_equal([0, 0], ode.parameterDomains['p'].get())
def test_setting_pdomain_raises_exception_for_illegal_parname(ode):
with pytest.raises(ValueError):
ode.set(pdomain={'q': []})
def test_setting_pdomain_raises_exception_for_nondictionary_value(ode):
with pytest.raises(AttributeError):
ode.set(pdomain=('p', []))
def test_setting_pdomain_raises_exception_for_wrongly_sorted_values(ode):
with pytest.raises(PyDSTool_ValueError):
ode.set(pdomain={'p': [20, 0]})
def test_settting_pdomain_raises_exception_for_nonsequence_value(ode):
with pytest.raises(PyDSTool_TypeError):
ode.set(pdomain={'p': {}})
|
{"hexsha": "6c2889b1036c01d3f258e3f69e5e65d679cd7ce7", "size": 3369, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/generator/test_odesystem_set_through_vode.py", "max_stars_repo_name": "yuanz271/PyDSTool", "max_stars_repo_head_hexsha": "886c143cdd192aea204285f3a1cb4968c763c646", "max_stars_repo_licenses": ["Python-2.0", "OLDAP-2.7"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-02-04T15:01:31.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-25T16:08:43.000Z", "max_issues_repo_path": "tests/generator/test_odesystem_set_through_vode.py", "max_issues_repo_name": "yuanz271/PyDSTool", "max_issues_repo_head_hexsha": "886c143cdd192aea204285f3a1cb4968c763c646", "max_issues_repo_licenses": ["Python-2.0", "OLDAP-2.7"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/generator/test_odesystem_set_through_vode.py", "max_forks_repo_name": "yuanz271/PyDSTool", "max_forks_repo_head_hexsha": "886c143cdd192aea204285f3a1cb4968c763c646", "max_forks_repo_licenses": ["Python-2.0", "OLDAP-2.7"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-02-25T14:43:36.000Z", "max_forks_repo_forks_event_max_datetime": "2021-02-25T14:43:36.000Z", "avg_line_length": 25.9153846154, "max_line_length": 74, "alphanum_fraction": 0.7082220243, "include": true, "reason": "from numpy", "num_tokens": 904}
|
import numpy as np
from sympy.utilities.iterables import multiset_permutations
import networkx as nx
import itertools
from context import *
from utils.graph_utils import rand_permute_adj_matrix, is_isomorphic_from_adj
def generate_automorphism_dict(num_nodes, edges_range, directed=False, dtype=np.float64):
"""
Generate a dictionary with graphs belonging to the same isomorphism class.
:param num_nodes: int
:param edges_range: a generator with the integer number of edges to consider
:param directed: bool
:param dtype: type of the adjacency matrix array
:return: dictionary with class ids (arranged integers) as keys and list of
adjacency matrices as values
"""
class_dict = dict()
ids = 0
for num_edges in edges_range:
# Create a temporary class dictionary to not redundantly compare graphs
# to those with different number of edges
temp_class_dict = dict()
if directed:
num_possible_edges = num_nodes ** 2 - num_nodes # Number of possible edges
else:
num_possible_edges = int((num_nodes ** 2 - num_nodes) / 2) # Number of possible edges
for edge_vals in multiset_permutations([1] * num_edges + [0] * (num_possible_edges - num_edges)):
# Graph construction implementation differs depending on whether directed or undirected
if directed:
# Add the self-loop connection elements which are set to 0
for i in range(0, num_nodes ** 2, num_nodes + 1):
edge_vals.insert(i, 0)
graph = np.array(edge_vals, dtype=dtype).reshape([num_nodes, num_nodes])
else:
graph = np.zeros([num_nodes, num_nodes], dtype=dtype)
count = 0
for i in range(0, num_nodes):
# Fill in the upper triangular part of the adjacency matrix
graph[i, i + 1:] = edge_vals[count:count + num_nodes - i - 1]
count += num_nodes - i - 1
# copy the upper triangular part into the lower triangular entries
graph += graph.T
# Check whether belongs to any automorphism groups in class_dict
automorphism_in_dict = False
for class_id in temp_class_dict.keys():
class_representative = temp_class_dict[class_id][0]
if is_isomorphic_from_adj(class_representative, graph):
automorphism_in_dict = True
temp_class_dict[class_id].append(graph)
break
# If automorphism class not already in dictionary, add it
if not automorphism_in_dict:
temp_class_dict[ids] = [graph]
ids += 1
# Merge the temp class dictionary into existing one
class_dict = {**class_dict, **temp_class_dict}
return class_dict
def generate_automorphism_dataset_eval_data(num_nodes, edges_range, directed=False, dtype=np.float64):
"""
Generate a dataset for embedding evaluation and visualisation. Returns two lists: list of unique names and
a list of graphs.
"""
d = generate_automorphism_dict(num_nodes, edges_range, directed=directed, dtype=dtype)
names = []
graphs = []
for class_id, graph_list in d.items():
i = 0
for graph in graph_list:
name = f"class{class_id}_graph{i}"
names.append(name)
graphs.append(graph)
i += 1
return names, graphs
def generate_batch(batch_size, num_nodes, directed=False):
# todo: rewrite and encapsulate pieces
assert batch_size % 4 == 0
batch_input_1 = np.ndarray(shape=(batch_size, num_nodes, num_nodes), dtype=np.float32)
batch_input_2 = np.ndarray(shape=(batch_size, num_nodes, num_nodes), dtype=np.float32)
labels = np.ndarray(shape=(batch_size, 1), dtype=np.float32)
# Generate two random graphs
adj_mat_1 = np.random.randint(0, 2, size=[num_nodes] * 2)
adj_mat_2 = np.random.randint(0, 2, size=[num_nodes] * 2)
# Ensure the diagonal is zero
adj_mat_1 -= adj_mat_1 * np.eye(num_nodes, dtype=adj_mat_1.dtype)
adj_mat_2 -= adj_mat_2 * np.eye(num_nodes, dtype=adj_mat_2.dtype)
if not directed:
adj_mat_1[np.tril_indices(num_nodes)] = adj_mat_1.T[np.tril_indices(num_nodes)]
adj_mat_2[np.tril_indices(num_nodes)] = adj_mat_2.T[np.tril_indices(num_nodes)]
# Check whether the two graphs are isomorphic
if is_isomorphic_from_adj(adj_mat_1, adj_mat_2):
# If isomorphic, use the following sophisticated method to make them not isomorphic
idx_to_change = [np.random.randint(0, num_nodes), np.random.randint(0, num_nodes - 1)]
idx_to_change[1] += 1 if idx_to_change[1] == idx_to_change[0] else 0 # Ensures index is off diagonal
idx_to_change = tuple(idx_to_change)
adj_mat_2[idx_to_change] = not adj_mat_2[idx_to_change]
# If undirected, symmetry has to be maintained
if not directed:
idx_complement = (idx_to_change[1], idx_to_change[0])
adj_mat_2[idx_complement] = not adj_mat_2[idx_complement]
adj_mats = (adj_mat_1, adj_mat_2)
# Generate some random permutations of the input graphs
i = 0
# For each batch make an equal number of example with [g1, g1], [g1, g2], [g2, g1], [g2, g2]
for j, k in itertools.product(range(2), repeat=2):
for _ in range(batch_size // 4):
batch_input_1[i, :, :] = rand_permute_adj_matrix(adj_mats[j])
batch_input_2[i, :, :] = rand_permute_adj_matrix(adj_mats[k])
# Set label to 1 if graphs isomorphic, to 0 otherwise
labels[i] = 1 if j == k else 0
i += 1
return batch_input_1, batch_input_2, labels
def generate_example(num_nodes, directed=False, only_negative=True):
label = np.ndarray(shape=(1,), dtype=np.float32)
# Generate two random graphs
adj_mat_1 = np.random.randint(0, 2, size=[num_nodes] * 2)
adj_mat_2 = np.random.randint(0, 2, size=[num_nodes] * 2)
# Ensure the diagonal is zero
adj_mat_1 -= adj_mat_1 * np.eye(num_nodes, dtype=adj_mat_1.dtype)
adj_mat_2 -= adj_mat_2 * np.eye(num_nodes, dtype=adj_mat_2.dtype)
if not directed:
adj_mat_1[np.tril_indices(num_nodes)] = adj_mat_1.T[np.tril_indices(num_nodes)]
adj_mat_2[np.tril_indices(num_nodes)] = adj_mat_2.T[np.tril_indices(num_nodes)]
# Check whether the two graphs are isomorphic
isomorphic = is_isomorphic_from_adj(adj_mat_1, adj_mat_2)
if isomorphic and only_negative:
# If isomorphic, use the following sophisticated method to make them not isomorphic
idx_to_change = [np.random.randint(0, num_nodes), np.random.randint(0, num_nodes - 1)]
idx_to_change[1] += 1 if idx_to_change[1] == idx_to_change[0] else 0 # Ensures index is off diagonal
idx_to_change = tuple(idx_to_change)
adj_mat_2[idx_to_change] = not adj_mat_2[idx_to_change]
# If undirected, symmetry has to be maintained
if not directed:
idx_complement = (idx_to_change[1], idx_to_change[0])
adj_mat_2[idx_complement] = not adj_mat_2[idx_complement]
isomorphic = False
label[0] = int(isomorphic)
return adj_mat_1.astype(np.float32), adj_mat_2.astype(np.float32), label
|
{"hexsha": "d3ec240672b4d405539ef927af3f051cdee5abdd", "size": 7364, "ext": "py", "lang": "Python", "max_stars_repo_path": "embedding/iso_nn_data_util.py", "max_stars_repo_name": "BrunoKM/rhoana_graph_tools", "max_stars_repo_head_hexsha": "7150f4bc6337ecf51dd9123cf03561a57d655160", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-08-17T00:12:30.000Z", "max_stars_repo_stars_event_max_datetime": "2018-08-17T00:12:30.000Z", "max_issues_repo_path": "embedding/iso_nn_data_util.py", "max_issues_repo_name": "BrunoKM/rhoana_graph_tools", "max_issues_repo_head_hexsha": "7150f4bc6337ecf51dd9123cf03561a57d655160", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "embedding/iso_nn_data_util.py", "max_forks_repo_name": "BrunoKM/rhoana_graph_tools", "max_forks_repo_head_hexsha": "7150f4bc6337ecf51dd9123cf03561a57d655160", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-05-19T07:08:54.000Z", "max_forks_repo_forks_event_max_datetime": "2019-05-19T07:08:54.000Z", "avg_line_length": 46.025, "max_line_length": 110, "alphanum_fraction": 0.6630907116, "include": true, "reason": "import numpy,from sympy,import networkx", "num_tokens": 1847}
|
#!/usr/bin/env python3
##
#
# This is a quick script to plot the trajectories resulting from different
# methods on the same plot. We can also do this with comparison.py, but this
# way allows us to compare directly with MILP, which we don't implement here.
#
##
import numpy as np
import matplotlib.pyplot as plt
from example_scenarios import EitherOr
# Bayesian and Differential Evolution (ours)
bayes_de = np.array([[ 0. , 0. , 0.40589072, 1.07705817,
1.25753808, 1.59868652, 2.3810179 , 3.7391193 ,
5.58559533, 7.51505297, 8.88022136, 10.57330406,
12.33943439, 14.22233127, 15.53800547, 16.39248662,
17.70622925, 19.65664343, 22.07932351, 23.87928503,
25.46532034, 26.83402606, 27.81499878, 28.70118186,
29.18346343, 29.73046001],
[ 0. , 0. , 0.46280145, 1.51908195,
2.94267607, 4.74869325, 6.00299569, 7.41522627,
8.1761501 , 8.53863688, 8.58243583, 9.19767658,
9.58281266, 9.63684499, 9.24042164, 8.38195576,
7.23940624, 5.97687117, 4.09515693, 1.87119897,
-0.42221082, -2.61888229, -4.70272269, -7.02074232,
-9.07477992, -11.57816735]])
bayes_only = np.array([[ 0. , 0. , 0.9 , 2.7 , 4.17876169,
6.05964957, 7.8616493 , 10.56364903, 14.04838544, 16.63312185,
19.56277241, 22.4394923 , 25.72151104, 28.94476359, 33.00676685,
36.78232368, 40.44508108, 43.62579294, 46.7455863 , 49.78947878,
51.93337125, 53.9746811 , 55.11599095, 56.86829866, 58.23722201,
60.50614535],
[ 0. , 0. , 0.9 , 2.15018693, 3.52511296,
5.55260607, 8.48009918, 10.80055611, 13.1397553 , 16.37895449,
19.40945241, 22.78858484, 26.01064856, 28.77929896, 30.83728748,
33.79527601, 36.13239412, 38.82021177, 41.97267112, 44.22513047,
47.23110768, 50.95395504, 54.47768564, 57.91981577, 60.87626646,
63.71243011]])
DE_trajectory = np.array([[ 0. , 0. , -0.28030078, -0.27876673, -0.5738335 ,
-1.12219717, -0.86663345, -0.94453174, -0.74063096, -0.55484271,
0.34165481, 1.33208709, 2.52353666, 3.63865814, 4.23842935,
4.68520265, 4.75485459, 5.20456254, 5.51719713, 5.80676006,
5.44802709, 5.66453646, 6.02449398, 6.24696097, 7.09638449,
7.73855101],
[ 0. , 0. , 0.62252809, 1.36556185, 2.89870913,
3.82920259, 4.43967407, 4.62210637, 4.09256274, 4.09906661,
4.47504123, 4.81214299, 5.56976453, 6.93938765, 8.01662249,
8.70636162, 9.50616682, 10.28208942, 10.38454851, 11.07785506,
10.98220712, 10.32404833, 9.81854771, 9.0117156 , 8.16070681,
6.98165327]])
milp = np.array([[ 0.00000000e+00, 0.00000000e+00, 1.07417181e-02,
4.06969249e-02, 9.83373907e-02, 1.92134886e-01,
3.30561182e-01, 5.22088048e-01, 7.75187256e-01,
1.09833058e+00, 1.49998978e+00, 1.98863663e+00,
2.54826151e+00, 3.16285479e+00, 3.81640684e+00,
4.49290804e+00, 5.17634875e+00, 5.85071936e+00,
6.50001023e+00, 7.10821174e+00, 7.49998978e+00,
7.50001023e+00, 7.49998978e+00, 7.49999386e+00,
7.50001022e+00],
[ 0.00000000e+00, 0.00000000e+00, -1.43905493e-02,
-3.43459566e-02, -5.10405304e-02, -5.56485796e-02,
-3.93444126e-02, 6.69766177e-03, 9.13033349e-02,
2.23298298e-01, 4.11508243e-01, 6.64758861e-01,
9.91875843e-01, 1.40168488e+00, 1.90301166e+00,
2.50468189e+00, 3.21552124e+00, 4.04435541e+00,
5.00001010e+00, 6.09131099e+00, 7.27304268e+00,
8.49998977e+00, 9.72693686e+00, 1.09538840e+01,
1.21808310e+01]])
# Set up the scenario for plotting
x0 = np.asarray([0,0,0,0])[:,np.newaxis]
sys = EitherOr(x0)
# Default cycle of colors so we can match other plots
prop_cycle = plt.rcParams['axes.prop_cycle']
colors = prop_cycle.by_key()['color']
plt.figure()
# Bayesian and Differential Evolution (ours)
time = 20.6
rho = 0.248
sys.plot_scenario(plt.gca())
plt.plot(bayes_de[0,:],bayes_de[1,:],marker="x",
color=colors[0],
label="Compute Time: %ss Robustness: %s" % (time, rho)
)
plt.xlim([-2,10])
plt.ylim([-1,12])
plt.legend()
plt.figure()
# Bayesian Only
time = 136.1
rho = -0.05
sys.plot_scenario(plt.gca())
plt.plot(bayes_only[0,:],bayes_only[1,:],marker="o",
color=colors[1],
label="Compute Time: %ss Robustness: %s" % (time, rho)
)
plt.xlim([-2,10])
plt.ylim([-1,12])
plt.legend()
plt.figure()
# Differential Evolution Only
time = 8.80
rho = 0.096
sys.plot_scenario(plt.gca())
plt.plot(DE_trajectory[0,:],DE_trajectory[1,:],marker="^",
color=colors[2],
label="Compute Time: %ss Robustness: %s" % (time, rho)
)
plt.xlim([-2,10])
plt.ylim([-1,12])
plt.legend()
plt.figure()
# MILP
time = 74.839
rho = 0.5
sys.plot_scenario(plt.gca())
plt.plot(milp[0,:],milp[1,:],marker="s",
color=colors[3],
label="Compute Time: %ss Robustness: %s" % (time, rho)
)
plt.xlim([-2,10])
plt.ylim([-1,12])
plt.legend()
prop_cycle = plt.rcParams['axes.prop_cycle']
colors = prop_cycle.by_key()['color']
print(colors)
plt.show()
|
{"hexsha": "02a8e864a6227411b8fe521b29b56a846f7e553e", "size": 5668, "ext": "py", "lang": "Python", "max_stars_repo_path": "stl_optimization_results_plot.py", "max_stars_repo_name": "vincekurtz/STL_optimization", "max_stars_repo_head_hexsha": "05993a497b4fa68e43f0db5f86b7312ae5e7afc2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2019-12-25T03:55:56.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-17T23:19:52.000Z", "max_issues_repo_path": "stl_optimization_results_plot.py", "max_issues_repo_name": "vincekurtz/STL_optimization", "max_issues_repo_head_hexsha": "05993a497b4fa68e43f0db5f86b7312ae5e7afc2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "stl_optimization_results_plot.py", "max_forks_repo_name": "vincekurtz/STL_optimization", "max_forks_repo_head_hexsha": "05993a497b4fa68e43f0db5f86b7312ae5e7afc2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.0724637681, "max_line_length": 102, "alphanum_fraction": 0.5739237826, "include": true, "reason": "import numpy", "num_tokens": 2320}
|
#include <demo/color.h>
#include <demo/memcpy.h>
#include <boost/simd.hpp>
#include <boost/simd/function/load.hpp>
#include <boost/simd/function/store.hpp>
#include <intrin.h>
#include <omp.h>
#include <cstdint>
#include <cstring>
#include <memory>
#include <fstream>
bench::time_point bench::start_;
int main(int, char**)
{
std::ofstream stream;
omp_set_num_threads(8);
stream.open("memory.csv");
run_memory_benchmark(stream);
stream.close();
stream.open("color.csv");
run_color_benchmark(stream);
stream.close();
return 0;
}
void* memcpy_parallel(void* destination, const void* source, size_t num)
{
auto dst = reinterpret_cast<int8_t*>(destination);
auto src = reinterpret_cast<const int8_t*>(source);
#pragma omp parallel for
for (int i = 0; i < num; ++i) {
dst[i] = src[i];
}
return destination;
}
void* memcpy_vector(void* destination, const void* source, size_t num)
{
const char* src = (const char*)source;
char* dst = (char*)destination;
if (!((uintptr_t)src & 15) && !((uintptr_t)dst & 15)) {
__m128 values[4];
for (size_t i = num / 64; i--;) {
_mm_prefetch(src, _MM_HINT_NTA);
values[0] = *(__m128*)(src + 0);
values[1] = *(__m128*)(src + 16);
values[2] = *(__m128*)(src + 32);
values[3] = *(__m128*)(src + 48);
_mm_stream_ps((float*)(dst + 0), values[0]);
_mm_stream_ps((float*)(dst + 16), values[1]);
_mm_stream_ps((float*)(dst + 32), values[2]);
_mm_stream_ps((float*)(dst + 48), values[3]);
src += 64;
dst += 64;
}
num &= 63;
}
while (num--) {
*dst++ = *src++;
}
return destination;
}
#define MIN(x, y) ((x) < (y) ? (x) : (y))
#define MAX(x, y) ((x) > (y) ? (x) : (y))
#define CLIP(x, min, max) (MIN(MAX((x), (min)), (max)))
float* yuv_to_rgba(const float* yuv, int size)
{
const int rgba_size = size / 3 * 4;
auto rgba = new float[rgba_size];
for (int i = 0, j = 0; i < size && j < rgba_size; i += 3, j += 4) {
const auto y = yuv[i];
const auto u = yuv[i + 1];
const auto v = yuv[i + 2];
rgba[j] = CLIP(y + 1.140f * v, .0f, 1.f);
rgba[j + 1] = CLIP(y - 0.395f * u - 0.581f * v, .0f, 1.f);
rgba[j + 2] = CLIP(y + 2.032f * u, .0f, 1.f);
rgba[j + 3] = 1.f;
}
return rgba;
}
float* yuv_to_rgba_parallel(const float* yuv, int size)
{
const int pixel_size = size / 3;
const int rgba_size = size / 3 * 4;
auto rgba = new float[rgba_size];
#pragma omp parallel for
for (int p = 0; p < pixel_size; ++p) {
const auto i = p * 3;
const auto j = p * 4;
const auto y = yuv[i];
const auto u = yuv[i + 1];
const auto v = yuv[i + 2];
rgba[j] = CLIP(y + 1.140f * v, .0f, 1.f);
rgba[j + 1] = CLIP(y - 0.395f * u - 0.581f * v, .0f, 1.f);
rgba[j + 2] = CLIP(y + 2.032f * u, .0f, 1.f);
rgba[j + 3] = 1.f;
}
return rgba;
}
constexpr void yuv_to_rgba_vector_result(int j, float* rgba, float* r_store, float* g_store, float* b_store)
{
for (int n = 0; n < 4; ++n) {
rgba[j + n * 4] = CLIP(r_store[n], .0f, 1.f);
rgba[j + n * 4 + 1] = CLIP(g_store[n], .0f, 1.f);
rgba[j + n * 4 + 2] = CLIP(b_store[n], .0f, 1.f);
rgba[j + n * 4 + 3] = 1.f;
}
}
float* yuv_to_rgba_vector(const float* yuv, int size)
{
static_assert(boost::simd::pack<float>::static_size == 4, "boost pack is not of size 4");
const int pixel_size = size / 3;
const int rgba_size = size / 3 * 4;
auto rgba = new float[rgba_size];
std::array<float, 4> r_store;
std::array<float, 4> g_store;
std::array<float, 4> b_store;
for (int i = 0, j = 0; i < size && j < rgba_size; i += 3 * 4, j += 4 * 4) {
boost::simd::pack<float, 4> y{yuv[i], yuv[i + 3], yuv[i + 6], yuv[i + 9]};
boost::simd::pack<float, 4> u{yuv[i + 1], yuv[i + 4], yuv[i + 7], yuv[i + 10]};
boost::simd::pack<float, 4> v{yuv[i + 2], yuv[i + 5], yuv[i + 8], yuv[i + 11]};
auto r = y + 1.140f * v;
auto g = y - 0.395f * u - 0.581f * v;
auto b = y + 2.032f * u;
boost::simd::store(r, r_store.data());
boost::simd::store(g, g_store.data());
boost::simd::store(b, b_store.data());
yuv_to_rgba_vector_result(j, rgba, r_store.data(), g_store.data(), b_store.data());
}
return rgba;
}
|
{"hexsha": "a7024cad11acf2cb8b5e684a8bd6d183c5e8384e", "size": 4511, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "source/main.cpp", "max_stars_repo_name": "chronos38/low_latency_demo", "max_stars_repo_head_hexsha": "de0d0d3dcebff23ba77c06c6c368b9d1c3d2c648", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3.0, "max_stars_repo_stars_event_min_datetime": "2017-11-18T18:23:16.000Z", "max_stars_repo_stars_event_max_datetime": "2019-02-15T12:41:24.000Z", "max_issues_repo_path": "source/main.cpp", "max_issues_repo_name": "chronos38/low_latency_demo", "max_issues_repo_head_hexsha": "de0d0d3dcebff23ba77c06c6c368b9d1c3d2c648", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "source/main.cpp", "max_forks_repo_name": "chronos38/low_latency_demo", "max_forks_repo_head_hexsha": "de0d0d3dcebff23ba77c06c6c368b9d1c3d2c648", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2019-04-11T20:18:39.000Z", "max_forks_repo_forks_event_max_datetime": "2019-04-11T20:18:39.000Z", "avg_line_length": 29.1032258065, "max_line_length": 108, "alphanum_fraction": 0.530259366, "num_tokens": 1544}
|
"""
Reinforcement learning via policy gradients
"""
import random, math, pickle, time
import interface, move, utils
import numpy as np
from agent import Agent
from rl import RLAlgorithm
from collections import defaultdict
from utils import progressBar
from copy import deepcopy
from sklearn.neural_network import MLPRegressor
from constants import NO_MOVE
class PolicyGradientAlgorithm(RLAlgorithm):
def __init__(self, actions, discount, featureExtractor, exploration = True, weights = None):
self.actions = actions
self.n_actions = len(self.actions(interface.State([], [])))
self.discount = discount
self.featureExtractor = featureExtractor
self.exploration = exploration
self.rl_type = "policy_gradients"
self.verbose = False
self.numIters = 0
self.standardize_rewards = True
self.step_size = 0.01
self.buffer_size = 50 # number of rollouts before updating the weights
self._x_buffer = []
self._y_buffer = []
self._p_buffer = []
self._r_buffer = []
if weights is not None:
self.weights = weights
else:
self.weights = np.random.rand(self.n_actions, self.featureExtractor.nFeatures()) / np.sqrt(self.featureExtractor.nFeatures())
# self.weights = np.zeros((self.n_actions, self.featureExtractor.nFeatures()))
def __str__(self):
return "PolicyGradients"
def exportModel(self):
return self.weights
def stopExploration(self):
self.exploration = False
def evalActions(self, state):
""" Get the model's confidence to take each action from `state` """
scores = np.dot(self.weights, self.featureExtractor.arrayExtractor(state, NO_MOVE))
probas = utils.softmax(scores)
return probas
def getActionDetailed(self, state):
"""
Same as getAction but also return the relative action index.
"""
self.numIters += 1
if len(self.actions(state)) == 0:
return None
# evaluate model confidence
probas = self.evalActions(state)
# choose which (relative) action to take
if self.exploration:
action_idx = np.random.choice(range(self.n_actions), p = probas)
# training on-going, save action taken and proba
self._x_buffer.append(self.featureExtractor.arrayExtractor(state, NO_MOVE))
self._y_buffer.append(action_idx)
self._p_buffer.append(probas)
else:
action_idx = np.argmax(probas)
rel_action = self.actions(state)[action_idx]
abs_action = move.Move(self.featureExtractor.toAbsolutePos(state, rel_action.direction()), norm = rel_action.norm())
if self.verbose:
# print("")
# print(state)
print(self.featureExtractor.dictExtractor(state, NO_MOVE))
# print(self.featureExtractor.arrayExtractor(state, NO_MOVE))
print(probas)
print(rel_action)
# print(abs_action)
# rotate relative action to absolute
return abs_action, action_idx
def getAction(self, state):
"""
The strategy implemented by this algorithm.
If `exploration` is ON, sample action with respect to probas from evalActions.
"""
a, _ = self.getActionDetailed(state)
return a
def getStepSize(self):
"""
Get the step size to update the weights.
"""
return self.step_size
# return 1.0 / math.sqrt(self.numIters)
def discountedRewards(self, rewards):
"""
Compute total discounted rewards at each step given the sequence of step rewards.
"""
n_steps = len(rewards)
dr = list(np.zeros(n_steps))
s = 0
for t in xrange(1, n_steps + 1):
s = self.discount * s + rewards[n_steps - t]
dr[n_steps - t] = s
return dr
def addRolloutFeedback(self, rewards, rollout_idx):
self._r_buffer += self.discountedRewards(rewards)
if ((rollout_idx + 1) % self.buffer_size) == 0:
# TODO: update weights
# https://cs231n.github.io/neural-networks-case-study/
# dL_i / df_k = p_k - 1_(y_i == k)
# df_k / dX = X.T dot
for i, a_idx in enumerate(self._y_buffer):
self._p_buffer[i][a_idx] -= 1
# gradient with respect to scores
dlogp = np.asarray(self._p_buffer) # dlogp has shape (n_choices , n_actions)
# weight by rewards
if self.standardize_rewards:
reward_weights = (np.asarray(self._r_buffer) - np.mean(self._r_buffer)) / np.std(self._r_buffer) # standardize rewards (for stability)
else:
reward_weights = np.asarray(self._r_buffer)
weighted_dlogp = (reward_weights * dlogp.T).T # weighted_dlogp has shape (n_choices , n_actions)
X = np.asarray(self._x_buffer) # X has shape (n_choices, n_features)
dW = np.dot(X.T, weighted_dlogp) # dW has shape (n_features, n_actions)
w1 = deepcopy(self.weights)
self.weights -= (self.getStepSize() / math.log(rollout_idx + 2)) * dW.T
# self.weights -= self.getStepSize() * dW.T
self._x_buffer, self._y_buffer, self._r_buffer, self._p_buffer = [], [], [], [] # reset buffers
w2 = deepcopy(self.weights)
# print("")
# print(self.weights[:, self.featureExtractor.keyToIndex(("candy", 1, (1,0)))])
# if rollout_idx > self.buffer_size:
# raise("End")
|
{"hexsha": "138c85a1fe2c03c16ea92e8b36db6f2bcadfa468", "size": 5700, "ext": "py", "lang": "Python", "max_stars_repo_path": "policy_gradients.py", "max_stars_repo_name": "sds-dubois/snake.ai", "max_stars_repo_head_hexsha": "b5ee2a91b210055397e7942c7e24a51a5e583834", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 25, "max_stars_repo_stars_event_min_datetime": "2017-02-19T20:05:09.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-26T14:42:06.000Z", "max_issues_repo_path": "policy_gradients.py", "max_issues_repo_name": "sds-dubois/snake.ai", "max_issues_repo_head_hexsha": "b5ee2a91b210055397e7942c7e24a51a5e583834", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2018-12-05T12:53:55.000Z", "max_issues_repo_issues_event_max_datetime": "2019-06-03T00:15:14.000Z", "max_forks_repo_path": "policy_gradients.py", "max_forks_repo_name": "sds-dubois/snake.ai", "max_forks_repo_head_hexsha": "b5ee2a91b210055397e7942c7e24a51a5e583834", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 8, "max_forks_repo_forks_event_min_datetime": "2017-03-02T20:43:48.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-16T15:53:53.000Z", "avg_line_length": 35.625, "max_line_length": 150, "alphanum_fraction": 0.6122807018, "include": true, "reason": "import numpy", "num_tokens": 1274}
|
import numpy as np
from chaco.api import AbstractPlotData, ArrayPlotData, Plot, ArrayDataSource
from traits.api import Dict, Instance, Str
from pandas import DataFrame
class PandasPlotData(AbstractPlotData):
''' Chaco requires a PlotData interface to manage plot/data mapping; however, pandas
already is its own nice data handler, so this class mostly provides a wrapper that tries
to be a hidden middle man. As such, I try to make this PandasPlotData feel like a pandas
dataframe. It would be nice to have chaco communicate with a dataframe directly, but for now
it must do so through this liason class.'''
# The dataframe, column and row labels
df = Instance(DataFrame)
# Dict to store extra dataframes or series that aren't necessarily in the dataframe
# Must pass
extras=Dict()
#Dictionary stores a string mapping of the data labels, which are not restricted to strings.
#This allows users to get_data() with floats and chaco plots to access them with strings.
# Dict mapping data series name to a mask array holding the selections
selections = Dict()
def __init__(self, dataframe, columnlabel='columns', indexlabel='index'):
"""PandasPlotData exposes a PlotData interface from a DataFrame.
It is chosen that this object MUST be initialized with a dataframe.
The intializer in ArrayPlotData will assign the names "seriesN" to
any unlabeled selections passed into the program through the *data argument.
Because dataframes are inherently labeled, this behavior is unnecessary
for basic use.
indexlabel and columnlabel keywords let the user access the data stored in the rows/column
labels in the dataframe when calling plot. It is important that no columns or rows
in the data share this name.
Data is stored column-wise or row-wise depending on the choice of axis (0=column)."""
super(AbstractPlotData, self).__init__() #AbstractPlotData has no __init__, but if that ever changes...
### make traits or just leave as attributes? ###
if len(dataframe.shape) > 2:
raise NotImplementedError('Multidimensional dataframes of order 3 or higher \
are not supported by in PandasPlotData')
self.columnlabel=columnlabel
self.indexlabel=indexlabel
event=self._add_dataframe(dataframe)
self.data_changed = event
### SHOULD JUST SUBCLASS ARRAY PLOT DATA
def list_data(self, axis=0, as_strings=False):
""" Returns a list of the names of the selections managed by this instance. For convienence,
axis can be 0,1 or the index label column label.
"""
if axis==0 or axis==self.columnlabel:
if as_strings:
datalist=self._colmasks.keys()
else:
datalist=self._colmasks.values()
elif axis==1 or axis==self.indexlabel:
if as_strings:
datalist=self._rowmasks.keys()
else:
datalist=self._rowmasks.values()
return datalist
def get_data(self, name):
""" Attempts to return a name, which can be either from the dataframe, from the "extras" dict,
or the column/index designators. Trys them all systematically in the order dataframe, extras,
column, row labels. """
try:
return self.df[name].values
except KeyError:
try:
return self.extras[name]
except KeyError:
try:
return self.df[self._colmasks[name]].values #if name is str()
except KeyError:
try:
self.extras[self._colmasks[name]].values
except KeyError:
if name == self.columnlabel:
return self._colmasks.values()
elif name == self.indexlabel:
return self._rowmasks.values()
def get_row_data(self, name):
''' Returns row data. This is never used by plots, only a convienence method for users.'''
return self.df.xs(name)
def set_data(self, name, new_data):
""" Sets the specified array as the value for either the specified
name or a generated name.
Implements AbstractPlotData.
THIS WILL ONLY SET ROW DATA IN A PANDA DATAFRAME OBJECT UNDER CURRENT SELECTION
Parameters
----------
name : string
The name of the array whose value is to be set.
new_data : array
The array to set as the value of *name*.
generate_name : Boolean
I've eliminated this functionality for this datatype
Returns
-------
The name under which the array was set.
"""
if not self.writable:
return None
if isinstance(new_data, list) or isinstance(new_data, tuple):
new_data = np.array(new_data) #Convert to array data
event = {}
### If entry is in data frame, change it
try:
self.df[name]=new_data
event['changed']=[str(name)]
except KeyError:
self.df[self._colmasks[name]] = new_data
event['changed'] = [str(name)] #Enforce strings, since DF names can be floats
### Else, add it to the "extras" array.
self.data_changed = event
return name
def update_dataframe(self, dataframe):
''' Changes dataframe in place assuming all new values are in here'''
for column in dataframe.columns.values:
self.set_data(column, dataframe[column])
def set_dataframe(self, dataframe):
''' Completely reset plotdata with a new object. This removes ALL
old data, does not cross check for overlapping keys.'''
### Remove old columns of dataframe ###
removed={'removed':self._colmasks.keys()}
### Add new entries
added=self._add_dataframe(dataframe)
event=dict(added.items() + removed.items() )
self.data_changed = event
def _add_dataframe(self, dataframe):
''' Convienence function for set_dataframe to be called either by __init__
or set_dataframe. Needed because can't evaluate self.df as a Bool() to
distinguish cases of initialization from overwriting dataframe.'''
self.df=dataframe
self._colmasks=dict( ((str(val), val) for val in self.df.columns.values))
self._rowmasks=dict( ((str(val), val) for val in self.df.index.values))
return {'added':self._colmasks.keys()}
######## These are used by chaco to inform the plot that a certain region of data is selected
def get_selection(self, name):
""" Returns the selection for the given column name """
return self.selections.get(name, None)
def set_selection(self, name, selection):
# Store the selection in a separate dict mapping name to its
# selection array
self.selections[name] = selection
self.data_changed = True
if __name__=='__main__':
df=DataFrame((np.random.randn(10,10)) )
data=PandasPlotData(df)
print data.df[0]
df2=DataFrame((np.random.randn(10,10)) )
data.set_dataframe(df2)
print data.df[0]
|
{"hexsha": "980849a71f1504bb5e3af04a0a12d8a75831d25b", "size": 7500, "ext": "py", "lang": "Python", "max_stars_repo_path": "skspec/chaco_interface/pandasplotdata.py", "max_stars_repo_name": "hugadams/scikit-spectra", "max_stars_repo_head_hexsha": "c451be6d54080fbcc2a3bc5daf8846b83b7343ee", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 83, "max_stars_repo_stars_event_min_datetime": "2015-01-15T18:57:22.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-18T11:43:55.000Z", "max_issues_repo_path": "skspec/chaco_interface/pandasplotdata.py", "max_issues_repo_name": "hugadams/scikit-spectra", "max_issues_repo_head_hexsha": "c451be6d54080fbcc2a3bc5daf8846b83b7343ee", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 18, "max_issues_repo_issues_event_min_datetime": "2015-02-02T22:46:51.000Z", "max_issues_repo_issues_event_max_datetime": "2019-04-29T17:23:32.000Z", "max_forks_repo_path": "skspec/chaco_interface/pandasplotdata.py", "max_forks_repo_name": "hugadams/scikit-spectra", "max_forks_repo_head_hexsha": "c451be6d54080fbcc2a3bc5daf8846b83b7343ee", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 43, "max_forks_repo_forks_event_min_datetime": "2015-01-02T20:47:11.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-18T16:14:40.000Z", "avg_line_length": 39.6825396825, "max_line_length": 119, "alphanum_fraction": 0.6236, "include": true, "reason": "import numpy", "num_tokens": 1567}
|
include("test_data.jl")
function test_cmp(io::Union{Nothing,IO} = nothing)
map(TestData.test_cmp_data) do x
ic = IntcodeMachine(x)
run_intcode!(ic)
res = fetch(ic)
isnothing(res) && return @error "Intcode failed CMP '$x'"
@info something(res)
!isnothing(io) && flush(io)
end
end
function test_jmp(io::Union{Nothing,IO} = nothing)
map(TestData.test_jmp_data) do x
ic = IntcodeMachine(x)
run_intcode!(ic)
res = fetch(ic)
isnothing(res) && return @error "Intcode failed CMP '$x'"
@info something(res)
!isnothing(io) && flush(io)
end
end
function test_larger(io::Union{Nothing,IO} = nothing)
map(TestData.test_larger_data) do x
ic = IntcodeMachine(x)
run_intcode!(ic)
res = fetch(ic)
isnothing(res) && return @error "Intcode failed CMP '$x'"
@info something(res)
!isnothing(io) && flush(io)
end
end
|
{"hexsha": "3d6879fc3a4e4d12a290c82c76917ad89fce5e31", "size": 966, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "events/2019/day-05/Intcode/test/test_cmp_jmp.jl", "max_stars_repo_name": "myrddin89/advent-of-code", "max_stars_repo_head_hexsha": "1401484be662794841c0ac5b863c0fda28e2fe06", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "events/2019/day-05/Intcode/test/test_cmp_jmp.jl", "max_issues_repo_name": "myrddin89/advent-of-code", "max_issues_repo_head_hexsha": "1401484be662794841c0ac5b863c0fda28e2fe06", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "events/2019/day-05/Intcode/test/test_cmp_jmp.jl", "max_forks_repo_name": "myrddin89/advent-of-code", "max_forks_repo_head_hexsha": "1401484be662794841c0ac5b863c0fda28e2fe06", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.6, "max_line_length": 65, "alphanum_fraction": 0.602484472, "num_tokens": 263}
|
# Load necessary packages
library(tidyverse)
# Use readr to read the raw .tab file from GitHub
# Skip the lengthy metadata.
bfd <- read_tsv("https://raw.githubusercontent.com/phjacobs/foram_sdm/master/Data/Raw/BFD.tab", skip=1326)
# Remove '[m]', '[#]' and spaces
names(bfd) <- gsub(x = names(bfd), pattern = " \\[m\\]", replacement = "")
names(bfd) <- gsub(x = names(bfd), pattern = " \\[#\\]", replacement = "")
names(bfd) <- gsub(x = names(bfd), pattern = "\\.", replacement = "")
names(bfd) <- gsub(x = names(bfd), pattern = "\\(", replacement = "")
names(bfd) <- gsub(x = names(bfd), pattern = "\\)", replacement = "")
names(bfd) <- gsub(x = names(bfd), pattern = " ", replacement = "_")
names(bfd)
write_csv(bfd, "bfd_clean_01.csv")
### tidy the data
# First we use gather to breakout the taxa & count data
tidy.bfd <- bfd %>%
# Use select to ignore the irrelavant & summary columns
select(1:3,7:10,12:14,16:42,44:51) %>%
# Use gather to breakout the taxa & count data
gather(Taxa, Count, -Event, -Latitude, -Longitude)
head(tidy.bfd, 3)
write_csv(tidy.bfd, "bfd_tidy_01.csv")
##
# Need to calculate relative abundance, which is count of each taxa divided by total
##
|
{"hexsha": "7f92aa6f7b3e7ffa26b79957284dc177d6755393", "size": 1189, "ext": "r", "lang": "R", "max_stars_repo_path": "bfd_raw_to_tidy.r", "max_stars_repo_name": "phjacobs/tidyflow", "max_stars_repo_head_hexsha": "f2461c83c5ed69ea0d160447018d0f03494db177", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "bfd_raw_to_tidy.r", "max_issues_repo_name": "phjacobs/tidyflow", "max_issues_repo_head_hexsha": "f2461c83c5ed69ea0d160447018d0f03494db177", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "bfd_raw_to_tidy.r", "max_forks_repo_name": "phjacobs/tidyflow", "max_forks_repo_head_hexsha": "f2461c83c5ed69ea0d160447018d0f03494db177", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.0277777778, "max_line_length": 106, "alphanum_fraction": 0.6543313709, "num_tokens": 356}
|
__author__ = 'lucabasa'
__version__ = '1.0.1'
__status__ = 'development'
import numpy as np
import pandas as pd
def clean_cols(data, col_list):
df = data.copy()
for col in col_list:
try:
del df[col]
except KeyError:
pass
return df
def flag_missing(data, col_list):
df = data.copy()
for col in col_list:
df['mis_' + col.lower()] = 0
df.loc[df[col].isna(), 'mis_' + col.lower()] = 1
return df
def gen_clas(data):
df = data.copy()
df.loc[(df.Sex == 1) & (df.Pclass == 1), 'se_cl'] = 'male_1'
df.loc[(df.Sex == 1) & (df.Pclass == 2), 'se_cl'] = 'male_23' # to help with the misclassification of men
df.loc[(df.Sex == 1) & (df.Pclass == 3), 'se_cl'] = 'male_23'
df.loc[(df.Sex == 0) & (df.Pclass == 1), 'se_cl'] = 'female_1'
df.loc[(df.Sex == 0) & (df.Pclass == 2), 'se_cl'] = 'female_2'
df.loc[(df.Sex == 0) & (df.Pclass == 3), 'se_cl'] = 'female_3'
return df
def gen_cab(data):
df = data.copy()
df.loc[((df.Sex == 1) & (df.mis_cabin == 0)) , 'se_ca'] = 'male_nocab'
df.loc[((df.Sex == 1) & (df.mis_cabin == 1)) , 'se_ca'] = 'male_cab'
df.loc[((df.Sex == 0) & (df.mis_cabin == 0)) , 'se_ca'] = 'female_nocab'
df.loc[((df.Sex == 0) & (df.mis_cabin == 1)) , 'se_ca'] = 'female_cab'
return df
def baby(data):
df = data.copy()
df['is_baby'] = 0
df.loc[df.Age < 10, 'is_baby'] = 1
return df
|
{"hexsha": "af4c435bab7fba30fe4776381d507fd320b3cdce", "size": 1455, "ext": "py", "lang": "Python", "max_stars_repo_path": "titanic/processing.py", "max_stars_repo_name": "lucabasa/kaggle_competitions", "max_stars_repo_head_hexsha": "15296375dc303218093aa576533fb809a4540bb8", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-01-31T19:33:30.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-31T19:33:30.000Z", "max_issues_repo_path": "titanic/processing.py", "max_issues_repo_name": "lucabasa/kaggle_competitions", "max_issues_repo_head_hexsha": "15296375dc303218093aa576533fb809a4540bb8", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2021-08-23T21:00:16.000Z", "max_issues_repo_issues_event_max_datetime": "2021-08-23T21:07:45.000Z", "max_forks_repo_path": "titanic/processing.py", "max_forks_repo_name": "lucabasa/kaggle_competitions", "max_forks_repo_head_hexsha": "15296375dc303218093aa576533fb809a4540bb8", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.8524590164, "max_line_length": 109, "alphanum_fraction": 0.5360824742, "include": true, "reason": "import numpy", "num_tokens": 533}
|
using Colors, Gadfly, RDatasets
set_default_plot_size(5inch,4inch)
iris = dataset("datasets","iris")
p = plot(iris, x=:SepalLength, y=:PetalLength, color=:Species, Geom.point,
layer(Stat.smooth(method=:lm, levels=[0.90, 0.99]), Geom.line, Geom.ribbon),
Theme(alphas=[0.6], key_position=:inside)
)
|
{"hexsha": "8cf0bd32852ec3b58fc3968350efc629436079df", "size": 309, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/testscripts/stat_smooth.jl", "max_stars_repo_name": "UnofficialJuliaMirrorSnapshots/Gadfly.jl-c91e804a-d5a3-530f-b6f0-dfbca275c004", "max_stars_repo_head_hexsha": "d180d5760c758863f24e27e2bc42d7c669fc75ed", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 996, "max_stars_repo_stars_event_min_datetime": "2016-10-13T18:33:30.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-25T04:40:31.000Z", "max_issues_repo_path": "test/testscripts/stat_smooth.jl", "max_issues_repo_name": "UnofficialJuliaMirrorSnapshots/Gadfly.jl-c91e804a-d5a3-530f-b6f0-dfbca275c004", "max_issues_repo_head_hexsha": "d180d5760c758863f24e27e2bc42d7c669fc75ed", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 730, "max_issues_repo_issues_event_min_datetime": "2016-10-11T03:23:01.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T18:20:39.000Z", "max_forks_repo_path": "test/testscripts/stat_smooth.jl", "max_forks_repo_name": "UnofficialJuliaMirrorSnapshots/Gadfly.jl-c91e804a-d5a3-530f-b6f0-dfbca275c004", "max_forks_repo_head_hexsha": "d180d5760c758863f24e27e2bc42d7c669fc75ed", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 189, "max_forks_repo_forks_event_min_datetime": "2016-10-19T22:33:09.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-30T00:59:54.000Z", "avg_line_length": 30.9, "max_line_length": 82, "alphanum_fraction": 0.7055016181, "num_tokens": 99}
|
"""
Block and braile rendering of julia arrays, for terminal graphics.
"""
module UnicodeGraphics
export blockize, brailize, blockize!, brailize!
"""
brailize(a, cutoff=0)
Convert an array to a block unicode string, filling values above the cutoff point.
"""
blockize(a, cutoff=0) = blockize!(initblock(size(a)), a, cutoff)
# x and y are inverted: repl rows are columns.
initblock((y, x)) = initblock(y, x)
initblock(y, x) = Array{Char,2}(undef, x + 1, (y - 1) ÷ 2 + 1)
"""
blockize!(out, a, cutoff=0)
Convert an array to a braile unicode string, filling the `out` array.
Calculation of array dims is a little complicated:
"""
blockize!(out, a, cutoff=0) = join(block_array!(out, a, cutoff))
function block_array!(out, a, cutoff)
yrange, xrange = axes(a)
for y in first(yrange):2:last(yrange)
for x in xrange
top = checkval(a, y, x, yrange, xrange, cutoff)
bottom = checkval(a, y + 1, x, yrange, xrange, cutoff)
if top
ch = bottom ? '█' : '▀'
else
ch = bottom ? '▄' : ' '
end
out[x-first(xrange) + 1, (y-first(yrange)) ÷ 2 + 1] = Char(ch)
end
# Return after every column
out[end, (y-first(yrange)) ÷ 2 + 1] = Char('\n')
end
# The last character is null
out[end, end] = 0x00
out
end
const braile_hex = ((0x01, 0x08), (0x02, 0x10), (0x04, 0x20), (0x40, 0x80))
"""
brailize(a, cutoff=0)
Convert an array to a braile unicode string, filling values above the cutoff point.
"""
brailize(a, cutoff=0) = brailize!(initbraile(size(a)), a, cutoff)
# x and y are inverted: repl rows are columns.
initbraile((y, x)) = initbraile(y, x)
initbraile(y, x) = Array{Char,2}(undef, (x - 1) ÷ 2 + 2, (y - 1) ÷ 4 + 1)
"""
brailize!(out, a, cutoff=0)
Convert an array to a braile unicode string, filling the `out` array.
"""
brailize!(out, a, cutoff=0) = join(braile_array!(out, a, cutoff))
function braile_array!(out, a, cutoff)
yrange, xrange = axes(a)
for y in first(yrange):4:last(yrange)
for x in first(xrange):2:last(xrange)
ch = 0x2800
for j = 0:3, i = 0:1
if checkval(a, y+j, x+i, yrange, xrange, cutoff)
ch += braile_hex[j % 4 + 1][i % 2 + 1]
end
end
out[(x - first(xrange)) ÷ 2 + 1, (y-first(yrange)) ÷ 4 + 1] = ch
end
# Return after every column
out[end, (y-first(yrange)) ÷ 4 + 1] = Char('\n')
end
# The last character is null
out[end, end] = 0x00
out
end
checkval(a, y, x, yrange, xrange, cutoff) = begin
if x <= last(xrange) && y <= last(yrange)
a[y, x] > cutoff
else
false
end
end
end # module
|
{"hexsha": "9add9cc2e1b07678b7fb271ce92c325a0cec4d7d", "size": 2767, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/UnicodeGraphics.jl", "max_stars_repo_name": "rafaqz/UnicodeGraphics.jl", "max_stars_repo_head_hexsha": "b1adbf1b0c13e50c0c8fba5e02db87a9f8b9f8ea", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 16, "max_stars_repo_stars_event_min_datetime": "2018-09-26T05:03:44.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-04T19:19:10.000Z", "max_issues_repo_path": "src/UnicodeGraphics.jl", "max_issues_repo_name": "rafaqz/UnicodeGraphics.jl", "max_issues_repo_head_hexsha": "b1adbf1b0c13e50c0c8fba5e02db87a9f8b9f8ea", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2018-10-03T09:39:47.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-05T17:18:29.000Z", "max_forks_repo_path": "src/UnicodeGraphics.jl", "max_forks_repo_name": "rafaqz/UnicodeGraphics.jl", "max_forks_repo_head_hexsha": "b1adbf1b0c13e50c0c8fba5e02db87a9f8b9f8ea", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2018-10-03T03:59:25.000Z", "max_forks_repo_forks_event_max_datetime": "2020-02-08T11:11:49.000Z", "avg_line_length": 28.5257731959, "max_line_length": 83, "alphanum_fraction": 0.5724611493, "num_tokens": 910}
|
import mxnet as mx
from mxnet.gluon import nn
from mxnet import nd
import numpy as np
from mxnet.base import numeric_types
from mxnet import symbol
class Reconstruction2D(nn.HybridBlock):
def __init__(self, in_channels = 1, block_grad = False, **kwargs):
super().__init__(**kwargs)
self.in_channels = in_channels
self.block_grad = block_grad
def hybrid_forward(self, F, x, flow):
if self.block_grad:
flow = F.BlockGrad(flow)
grid = F.GridGenerator(data = flow.flip(axis = 1), transform_type = "warp")
return F.BilinearSampler(x, grid)
class Reconstruction2DSmooth(nn.HybridBlock):
def __init__(self, in_channels = 1, block_grad = False, **kwargs):
super().__init__(**kwargs)
self.in_channels = in_channels
self.block_grad = block_grad
def hybrid_forward(self, F, x, flow):
if self.block_grad:
flow = F.BlockGrad(flow)
grid = F.GridGenerator(data = flow.flip(axis = 1), transform_type = "warp").clip(-1, 1)
return F.BilinearSampler(x, grid)
class DeformableConv2D(nn.HybridBlock):
""" Deformable Convolution 2D
Parameters
----------
channels : int
The dimensionality of the output space
i.e. the number of output channels in the convolution.
kernel_size : int or tuple/list of n ints
Specifies the dimensions of the convolution window.
strides: int or tuple/list of n ints,
Specifies the strides of the convolution.
padding : int or tuple/list of n ints,
If padding is non-zero, then the input is implicitly zero-padded
on both sides for padding number of points
dilation: int or tuple/list of n ints,
Specifies the dilation rate to use for dilated convolution.
groups : int
Controls the connections between inputs and outputs.
At groups=1, all inputs are convolved to all outputs.
At groups=2, the operation becomes equivalent to having two convolution
layers side by side, each seeing half the input channels, and producing
half the output channels, and both subsequently concatenated.
layout : str,
Dimension ordering of data and weight. Can be 'NCW', 'NWC', 'NCHW',
'NHWC', 'NCDHW', 'NDHWC', etc. 'N', 'C', 'H', 'W', 'D' stands for
batch, channel, height, width and depth dimensions respectively.
Convolution is performed over 'D', 'H', and 'W' dimensions.
in_channels : int, default 0
The number of input channels to this layer. If not specified,
initialization will be deferred to the first time `forward` is called
and `in_channels` will be inferred from the shape of input data.
activation : str
Activation function to use. See :func:`~mxnet.ndarray.Activation`.
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: bool
Whether the layer uses a bias vector.
weight_initializer : str or `Initializer`
Initializer for the `weight` weights matrix.
bias_initializer: str or `Initializer`
Initializer for the bias vector.
"""
def __init__(self, channels, kernel_size, strides=1, padding=0, dilation=1,
groups=1, layout='NCHW', num_deformable_group=1, in_channels=0, activation=None, use_bias=True,
weight_initializer=None, bias_initializer='zeros',
prefix=None, params=None):
super().__init__(prefix=prefix, params=params)
with self.name_scope():
self._channels = channels
self._in_channels = in_channels
if isinstance(kernel_size, numeric_types):
kernel_size = (kernel_size,)*2
if isinstance(strides, numeric_types):
strides = (strides,)*len(kernel_size)
if isinstance(padding, numeric_types):
padding = (padding,)*len(kernel_size)
if isinstance(dilation, numeric_types):
dilation = (dilation,)*len(kernel_size)
self._kwargs = {
'kernel': kernel_size, 'stride': strides, 'dilate': dilation,
'pad': padding, 'num_filter': channels, 'num_group': groups,
'no_bias': not use_bias, 'layout': layout,
'num_deformable_group' : num_deformable_group}
wshapes = [
(),
(channels, in_channels) + kernel_size,
(channels,)
]
self.weight = self.params.get('weight', shape=wshapes[1],
init=weight_initializer,
allow_deferred_init=True)
if use_bias:
self.bias = self.params.get('bias', shape=wshapes[2],
init=bias_initializer,
allow_deferred_init=True)
else:
self.bias = None
if activation is not None:
self.act = nn.Activation(activation, prefix=activation+'_')
else:
self.act = None
def hybrid_forward(self, F, x, offset, weight, bias=None):
if bias is None:
act = F.contrib.DeformableConvolution(x, offset, weight, name='fwd', **self._kwargs)
else:
act = F.contrib.DeformableConvolution(x, offset, weight, bias, name='fwd', **self._kwargs)
if self.act is not None:
act = self.act(act)
return act
def _alias(self):
return 'deformable_conv'
def __repr__(self):
s = '{name}({mapping}, kernel_size={kernel}, stride={stride}'
len_kernel_size = len(self._kwargs['kernel'])
if self._kwargs['pad'] != (0,) * len_kernel_size:
s += ', padding={pad}'
if self._kwargs['dilate'] != (1,) * len_kernel_size:
s += ', dilation={dilate}'
if self._kwargs['num_group'] != 1:
s += ', groups={num_group}'
if self.bias is None:
s += ', bias=False'
s += ')'
shape = self.weight.shape
return s.format(name=self.__class__.__name__,
mapping='{0} -> {1}'.format(shape[1] if shape[1] else None, shape[0]),
**self._kwargs)
|
{"hexsha": "04a5590b9a4f51f5964e332ace8e16c9016fec95", "size": 5381, "ext": "py", "lang": "Python", "max_stars_repo_path": "deep_flow/MaskFlownet/network/layer.py", "max_stars_repo_name": "yamaru12345/DF-VO2", "max_stars_repo_head_hexsha": "ed7359deeb38c36099cf8198f88e9a74dfa2403a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 339, "max_stars_repo_stars_event_min_datetime": "2020-04-05T15:09:19.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T09:28:19.000Z", "max_issues_repo_path": "deep_flow/MaskFlownet/network/layer.py", "max_issues_repo_name": "yamaru12345/DF-VO2", "max_issues_repo_head_hexsha": "ed7359deeb38c36099cf8198f88e9a74dfa2403a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 36, "max_issues_repo_issues_event_min_datetime": "2020-04-08T06:46:55.000Z", "max_issues_repo_issues_event_max_datetime": "2021-08-25T12:00:40.000Z", "max_forks_repo_path": "deep_flow/MaskFlownet/network/layer.py", "max_forks_repo_name": "yamaru12345/DF-VO2", "max_forks_repo_head_hexsha": "ed7359deeb38c36099cf8198f88e9a74dfa2403a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 77, "max_forks_repo_forks_event_min_datetime": "2020-04-04T16:42:55.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-19T11:33:54.000Z", "avg_line_length": 37.1103448276, "max_line_length": 100, "alphanum_fraction": 0.7045158892, "include": true, "reason": "import numpy", "num_tokens": 1460}
|
###########################################################################################################################
# SINAN SINAN SINAN SINAN SINAN SINAN SINAN SINAN SINAN SINAN SINAN SINAN SINAN SINAN SINAN SINAN SINAN SINAN SINAN SINAN #
###########################################################################################################################
import os
from datetime import datetime
import numpy as np
import pandas as pd
from transform.extract.download_SINAN import download_SINANXXaa, download_table_dbf, download_table_cnv
"""
Módulo de limpeza/tratamento de dados do SINAN.
"""
# Função para converter um "value" num certo "type" de objeto ou caso não seja possível utiliza o valor "default"
def tryconvert(value, default, type):
try:
return type(value)
except (ValueError, TypeError):
return default
# Classe de dados principais do SINAN
class DataSinanMain:
# Construtor
def __init__(self, base, state, year):
self.base = base
self.state = state
self.year = year
# Método para ler como um objeto pandas DataFrame um arquivo principal de dados do SINAN e adequar e formatar suas...
# colunas e valores
def get_SINANXXaamm_treated(self):
# Lê o arquivo "dbc" ou "parquet", se já tiver sido baixado, como um objeto pandas DataFrame
dataframe = download_SINANXXaa(self.base, self.state, self.year)
print(f'O número de linhas do arquivo {self.base}{self.state}{self.year} é {dataframe.shape[0]}.')
###################################################################################################################
# SINAN_DENG SINAN_DENG SINAN_DENG SINAN_DENG SINAN_DENG SINAN_DENG SINAN_DENG SINAN_DENG SINAN_DENG SINAN_DENG #
###################################################################################################################
if self.base == 'DENG':
# Colunas definidas como necessárias no objeto pandas DataFrame que incrementará a tabela dengbr da base de dados
lista_columns = np.array(['NU_NOTIFIC', 'TP_NOT', 'ID_AGRAVO', 'ID_MUNICIP', 'RES_CHIKS1', 'RES_CHIKS2',
'RESUL_PRNT', 'RESUL_SORO', 'RESUL_NS1', 'SOROTIPO', 'HOSPITALIZ', 'CLASSI_FIN',
'EVOLUCAO', 'GRAV_PULSO', 'GRAV_CONV', 'GRAV_ENCH', 'GRAV_INSUF', 'GRAV_TAQUI',
'GRAV_EXTRE', 'GRAV_HIPOT', 'GRAV_HEMAT', 'GRAV_MELEN', 'GRAV_METRO', 'GRAV_SANG',
'GRAV_AST', 'GRAV_MIOC', 'GRAV_CONSC', 'GRAV_ORGAO'])
# Criação de um objeto pandas DataFrame vazio com as colunas especificadas acima
df = pd.DataFrame(columns=lista_columns)
# Colocação dos dados da variável "dataframe" na variável "df" nas colunas de mesmo nome preenchendo...
# automaticamente com o float NaN as colunas da variável "df" não presentes na variável dataframe
for col in df.columns.values:
for coluna in dataframe.columns.values:
if coluna == col:
df[col] = dataframe[coluna].tolist()
break
# Coloca na variável "dif_set" o objeto array dos nomes das colunas da variável "df" que não estão...
# presentes na variável "dataframe"
dif_set = np.setdiff1d(df.columns.values, dataframe.columns.values)
# Substitui o float NaN pela string vazia as colunas da variável "df" não presentes na variável "dataframe"
for col in dif_set:
df[col].replace(np.nan, '', inplace=True)
# Exclui o último dígito numérico das colunas identificadas, o qual corresponde ao dígito de...
# controle do código do Município
# Foi detectado que para alguns Municípios o cálculo do dígito de controle não é válido
# Esse dígito de controle esteve presente nos arquivos DOXXxxxx até o ano de 2005 (a confirmar!)
if len(df.loc[0, 'ID_MUNICIP']) == 7:
df['ID_MUNICIP'].replace(regex='.$',value='', inplace=True)
# Atualiza/corrige os labels das colunas especificadas
df['ID_MUNICIP'] = df['ID_MUNICIP'].apply(lambda x: x if len(x) == 6 else '')
df['ID_MUNICIP'].replace(['000000', '150475', '207540', '207695', '241005',
'282580', '279982', '282586', '292586', '315205',
'321213', '355038', '405028', '421265', '422000',
'431454', '500627', '596382', '613167', '613592',
'990010', '990014', '999999'], '', inplace=True)
df['ID_MUNICIP'].replace([str(i) for i in range(334501, 334531)], '330455', inplace=True)
df['ID_MUNICIP'].replace([str(i) for i in range(358001, 358059)], '355030', inplace=True)
df['ID_MUNICIP'].replace(['530000', '530100', '530200', '530300', '530400',
'530500', '530600', '530700', '530800', '530900',
'531000', '531200', '531300', '531400', '531500',
'531600', '531700', '531800'] + \
[str(i) for i in range(539900, 540000)], '530010', inplace=True)
df['CLASSI_FIN'].replace(['0', '6', '7', '9'], '', inplace=True)
df['EVOLUCAO'].replace(['0', '9'], '', inplace=True)
# Substitui uma string vazia pela string "NA" nas colunas de foreign keys
for col in ['ID_MUNICIP', 'CLASSI_FIN']:
df[col].replace('', 'NA', inplace=True)
# Substitui uma string vazia por None nas colunas de atributos especificadas
for col in ['TP_NOT', 'ID_AGRAVO', 'RES_CHIKS1', 'RES_CHIKS2', 'RESUL_PRNT',
'RESUL_SORO', 'RESUL_NS1', 'SOROTIPO', 'HOSPITALIZ', 'EVOLUCAO']:
df[col].replace('', None, inplace=True)
# Converte do tipo object para int ou para None as colunas de atributos de valores binários (0 ou 1)
for col in np.array(['GRAV_PULSO', 'GRAV_CONV', 'GRAV_ENCH', 'GRAV_INSUF',
'GRAV_TAQUI', 'GRAV_EXTRE', 'GRAV_HIPOT', 'GRAV_HEMAT',
'GRAV_MELEN', 'GRAV_METRO', 'GRAV_SANG', 'GRAV_AST',
'GRAV_MIOC', 'GRAV_CONSC', 'GRAV_ORGAO']):
df[col] = df[col].apply(lambda x: tryconvert(x, None, int))
# Renomeia colunas
df.rename(index=str, columns={'ID_MUNICIP': 'MUNICIP_ID', 'CLASSI_FIN': 'CLASSIFIN_ID'}, inplace=True)
print(f'Tratou o arquivo DENG{self.state}{self.year} (shape final: {df.shape[0]} x {df.shape[1]}).')
return df
# Classe de dados auxiliares do SINAN
class DataSinanAuxiliary:
# Construtor
def __init__(self, path):
self.path = path
###########################################################################################################################
# SINAN SINAN SINAN SINAN SINAN SINAN SINAN SINAN SINAN SINAN SINAN SINAN SINAN SINAN SINAN SINAN SINAN SINAN SINAN SINAN #
###########################################################################################################################
###################################################################################################################
# SINAN_DENG SINAN_DENG SINAN_DENG SINAN_DENG SINAN_DENG SINAN_DENG SINAN_DENG SINAN_DENG SINAN_DENG SINAN_DENG #
###################################################################################################################
# Função para adequar e formatar as colunas e valores da Tabela TABUF (arquivo TABUF.dbf)
def get_TABUF_treated(self):
# Conversão da Tabela TABUF para um objeto pandas DataFrame
file_name = 'TABUF'
df = download_table_dbf(file_name)
# Renomeia colunas especificadas
df.rename(index=str, columns={'CODIGO': 'ID', 'DESCRICAO': 'ESTADO'}, inplace=True)
# Reordena as colunas
df = df[['ID', 'ESTADO', 'SIGLA_UF']]
# Inserção da primary key "NA" na tabela de que trata esta função para retratar "missing value"
df.loc[df.shape[0]] = ['NA', 'NOT AVAILABLE', '?']
return df
# Função para adequar e formatar as colunas e valores da Tabela RSAUDE (do IBGE)
def get_RSAUDE_treated(self):
# Conversão da Tabela RSAUDE (em formato "xlsx") para um objeto pandas DataFrame
df = pd.read_excel(self.path + 'RSAUDE' + '.xlsx')
# Renomeia a coluna SIGNIFICACAO
df.rename(index=str, columns={'SIGNIFICACAO': 'REGIAO'}, inplace=True)
# Converte para string a coluna especificada
df['ID'] = df['ID'].astype('str')
# Inserção da primary key "NA" na tabela de que trata esta função para retratar "missing value"
df.loc[df.shape[0]] = ['NA', 'NOT AVAILABLE']
return df
# Método para adequar e formatar as colunas e valores da Tabela CADMUN (arquivo CADMUN.dbf)
def get_CADMUN_treated(self):
# Conversão da Tabela CADMUN para um objeto pandas DataFrame
file_name = 'CADMUN'
df1 = download_table_dbf(file_name)
# Renomeia as colunas especificadas
df1.rename(index=str, columns={'MUNCOD': 'ID', 'UFCOD': 'UFCOD_ID'}, inplace=True)
# Drop a linha inteira em que a coluna "ID" tem o valor especificado por não representar nenhum município
df1 = df1.drop(df1[df1['ID']=='000000'].index)
# Remove colunas indesejáveis do objeto pandas DataFrame
df1 = df1.drop(['MUNSINON', 'MUNSINONDV', 'MESOCOD', 'MICROCOD', 'MSAUDCOD',
'RSAUDCOD', 'CSAUDCOD', 'RMETRCOD', 'AGLCOD'], axis=1)
# Substitui uma string vazia pela string "?" nas colunas especificadas
for col in ['SITUACAO', 'MUNSINP', 'MUNSIAFI', 'MUNNOME', 'MUNNOMEX', 'OBSERV',
'AMAZONIA', 'FRONTEIRA', 'CAPITAL', 'ANOINST', 'ANOEXT', 'SUCESSOR']:
df1[col].replace('', '?', inplace=True)
# Substitui uma string vazia pela string "NA" nas colunas especificadas
df1['UFCOD_ID'].replace('', 'NA', inplace=True)
# Substitui uma string vazia pelo float "NaN" nas colunas especificadas
for col in ['LATITUDE', 'LONGITUDE', 'ALTITUDE', 'AREA']:
df1[col].replace('', np.nan, inplace=True)
# Converte do tipo object para float as colunas especificadas
df1[['LATITUDE', 'LONGITUDE', 'ALTITUDE', 'AREA']] = \
df1[['LATITUDE', 'LONGITUDE', 'ALTITUDE', 'AREA']].astype('float')
# Coloca todas as string das colunas especificadas como UPPER CASE
df1['MUNNOME'] = df1['MUNNOME'].apply(lambda x: x.upper())
df1['MUNNOMEX'] = df1['MUNNOMEX'].apply(lambda x: x.upper())
# Insere uma linha referente ao Município de Nazária/PI não constante originalmente do arquivo
df1.loc[df1.shape[0]] = ['220672', '2206720', 'ATIVO', '?', '?', 'NAZÁRIA', 'NAZARIA', '?',
'N', 'N', 'N', '22', '?', '?', '?', np.nan, np.nan, np.nan, 363.589]
# Ordena as linhas de "df1" por ordem crescente dos valores da coluna ID
df1.sort_values(by=['ID'], inplace=True)
# Reset o index devido ao sorting prévio e à exclusão e inclusão das linhas referidas acima
df1.reset_index(drop=True, inplace=True)
# Conversão da Tabela rl_municip_regsaud para um objeto pandas DataFrame
file_name = 'rl_municip_regsaud'
df2 = download_table_dbf(file_name)
# Renomeia as colunas especificadas
df2.rename(index=str, columns={'CO_MUNICIP': 'ID', 'CO_REGSAUD': 'RSAUDE_ID'}, inplace=True)
# Faz o merge de "df1" e "df2" pela coluna ID tendo por base "df1"
df = pd.merge(df1, df2, how='left', left_on='ID', right_on='ID')
# Converte o float NaN para a string "NA"
df['RSAUDE_ID'].replace(np.nan, 'NA', inplace=True)
# Reordena as colunas priorizando as "mais" relevantes
df = df[['ID', 'MUNNOME', 'MUNNOMEX', 'MUNCODDV', 'OBSERV', 'SITUACAO', 'MUNSINP',
'MUNSIAFI', 'UFCOD_ID', 'AMAZONIA', 'FRONTEIRA', 'CAPITAL', 'RSAUDE_ID',
'LATITUDE', 'LONGITUDE', 'ALTITUDE', 'AREA', 'ANOINST', 'ANOEXT', 'SUCESSOR']]
# Inserção da primary key "NA" na tabela de que trata esta função para retratar "missing value"
df.loc[df.shape[0]] = ['NA', 'NOT AVAILABLE', '?', '?', '?', '?', '?', '?', 'NA', '?',
'?', '?', 'NA', np.nan, np.nan, np.nan, np.nan, '?', '?', '?']
return df
# Função para adequar e formatar as colunas e valores da TCC Classdeng (arquivo Classdeng.cnv)
def get_Classdeng_treated(self):
# Conversão da TCC Classdeng para um objeto pandas DataFrame
file_name = 'Classdeng'
df = download_table_cnv(file_name)
# Renomeia a coluna SIGNIFICACAO
df.rename(index=str, columns={'SIGNIFICACAO': 'CLASSIFICACAO'}, inplace=True)
# Converte para string a coluna especificada
df['ID'] = df['ID'].astype('str')
# Inserção de umas linhas no objeto pandas DataFrame
df.loc[df.shape[0]] = ['10', 'DENGUE']
df.loc[df.shape[0]] = ['11', 'DENGUE COM SINAIS DE ALARME']
df.loc[df.shape[0]] = ['12', 'DENGUE GRAVE']
df.loc[df.shape[0]] = ['13', 'CHIKUNGUNYA']
# Inserção da primary key "NA" na tabela de que trata esta função para retratar "missing value"
df.loc[df.shape[0]] = ['NA', 'NOT AVAILABLE']
return df
if __name__ == '__main__':
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
|
{"hexsha": "ae18a7418ef7510bbc1a48a8a2b4071ee7bf50d0", "size": 13850, "ext": "py", "lang": "Python", "max_stars_repo_path": "pegasus/dados/transform/prepare_SINAN.py", "max_stars_repo_name": "SecexSaudeTCU/PegaSUS", "max_stars_repo_head_hexsha": "0e24c00595e8a7376680dfb2e5aa42e1e9eb7770", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pegasus/dados/transform/prepare_SINAN.py", "max_issues_repo_name": "SecexSaudeTCU/PegaSUS", "max_issues_repo_head_hexsha": "0e24c00595e8a7376680dfb2e5aa42e1e9eb7770", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pegasus/dados/transform/prepare_SINAN.py", "max_forks_repo_name": "SecexSaudeTCU/PegaSUS", "max_forks_repo_head_hexsha": "0e24c00595e8a7376680dfb2e5aa42e1e9eb7770", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-08-19T19:31:42.000Z", "max_forks_repo_forks_event_max_datetime": "2020-08-19T19:31:42.000Z", "avg_line_length": 56.3008130081, "max_line_length": 127, "alphanum_fraction": 0.5614440433, "include": true, "reason": "import numpy", "num_tokens": 3656}
|
import numpy as np
def generate(model,
bpe,
texts,
length=100,
top_k=1,
temperature=1.0):
"""Generate text after the given contexts.
:param model: The trained model.
:param bpe: Byte pair encoding object.
:param texts: A list of texts.
:param length: The length of following texts to be generated.
:param top_k: Choose the next token from top K.
:param temperature: Randomness in boltzmann distribution.
:return: A list of generated texts.
"""
batch_size = len(texts)
encodes = [bpe.encode(text) for text in texts]
text_lens = [len(encode) for encode in encodes]
max_len = max(text_lens)
input_data = [encode + [0] * (max_len - len(encode)) for encode in encodes]
for shift in range(length):
output_data = model.predict(np.array(input_data))
for index in range(batch_size):
probs = [(prob, i) for i, prob in enumerate(output_data[index, text_lens[index] + shift - 1])]
probs.sort(reverse=True)
probs = probs[:top_k]
indices, probs = list(map(lambda x: x[1], probs)), list(map(lambda x: x[0], probs))
probs = np.array(probs) / temperature
probs = probs - np.max(probs)
probs = np.exp(probs)
probs = probs / np.sum(probs)
next_token = np.random.choice(indices, p=probs)
input_data[index].append(0)
input_data[index][text_lens[index] + shift] = next_token
outputs = [bpe.decode(input_data[index][:text_lens[index] + length]) for index in range(batch_size)]
return outputs
|
{"hexsha": "7e9e50cd5a57cd7a2842811692d83548b98f3f86", "size": 1653, "ext": "py", "lang": "Python", "max_stars_repo_path": "keras_gpt_2/gen.py", "max_stars_repo_name": "Pimax1/keras-gpt-2", "max_stars_repo_head_hexsha": "0a4adaad651a5a51e8a9c647c50cc01c3e51055c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 131, "max_stars_repo_stars_event_min_datetime": "2019-02-19T09:02:39.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-21T12:59:37.000Z", "max_issues_repo_path": "keras_gpt_2/gen.py", "max_issues_repo_name": "Pimax1/keras-gpt-2", "max_issues_repo_head_hexsha": "0a4adaad651a5a51e8a9c647c50cc01c3e51055c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 12, "max_issues_repo_issues_event_min_datetime": "2019-03-08T10:34:54.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-09T05:01:31.000Z", "max_forks_repo_path": "keras_gpt_2/gen.py", "max_forks_repo_name": "Pimax1/keras-gpt-2", "max_forks_repo_head_hexsha": "0a4adaad651a5a51e8a9c647c50cc01c3e51055c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 31, "max_forks_repo_forks_event_min_datetime": "2019-02-19T10:29:39.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-20T19:07:19.000Z", "avg_line_length": 40.3170731707, "max_line_length": 106, "alphanum_fraction": 0.611615245, "include": true, "reason": "import numpy", "num_tokens": 399}
|
# (c) Copyright IBM Corporation 2020.
# LICENSE: Apache License 2.0 (Apache-2.0)
# http://www.apache.org/licenses/LICENSE-2.0
import numpy as np
import gc
from lrtc_lib.active_learning.strategies import ActiveLearningStrategies
from lrtc_lib.active_learning.core.strategy.perceptron_ensemble import PerceptronEnsemble, \
train_perceptron_ensemble_model
class PerceptronDropout(PerceptronEnsemble):
def __init__(self, n_units=10, max_to_consider=10 ** 6):
super().__init__(n_units, max_to_consider)
def get_strategy(self):
return ActiveLearningStrategies.DROPOUT_PERCEPTRON
def get_per_model_predictions(self, pos, neg, infer):
model = train_perceptron_ensemble_model(pos, neg, n_units=1)[0]
per_model_predictions = [self.dropout_predict(infer, model) for _ in range(self.n_units)]
del model
gc.collect()
return per_model_predictions
def dropout_predict(self, infer, model):
import tensorflow.python.keras.backend as K # see https://github.com/tensorflow/tensorflow/issues/34201
f = K.function([model.layers[0].input, K.symbolic_learning_phase()],
[model.layers[-1].output])
return f([infer, True])[0]
if __name__ == '__main__':
pos = np.random.rand(100, 50)
neg = np.random.rand(100, 50)
infer = np.random.rand(150, 50)
ped = PerceptronDropout(n_units=10)
print(ped.get_scores_from_embeddings(pos, neg, infer))
|
{"hexsha": "28055a6ffbd5947aa71b1f35f8621c3d294f3e19", "size": 1467, "ext": "py", "lang": "Python", "max_stars_repo_path": "lrtc_lib/active_learning/core/strategy/perceptron_dropout.py", "max_stars_repo_name": "MovestaDev/low-resource-text-classification-framework", "max_stars_repo_head_hexsha": "4380755a65b35265e84ecbf4b87e872d79e8f079", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 57, "max_stars_repo_stars_event_min_datetime": "2020-11-18T15:13:06.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-28T22:33:26.000Z", "max_issues_repo_path": "lrtc_lib/active_learning/core/strategy/perceptron_dropout.py", "max_issues_repo_name": "MovestaDev/low-resource-text-classification-framework", "max_issues_repo_head_hexsha": "4380755a65b35265e84ecbf4b87e872d79e8f079", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2021-02-23T22:11:07.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-13T00:13:48.000Z", "max_forks_repo_path": "lrtc_lib/active_learning/core/strategy/perceptron_dropout.py", "max_forks_repo_name": "MovestaDev/low-resource-text-classification-framework", "max_forks_repo_head_hexsha": "4380755a65b35265e84ecbf4b87e872d79e8f079", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 14, "max_forks_repo_forks_event_min_datetime": "2021-02-10T08:55:27.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-23T22:37:54.000Z", "avg_line_length": 34.1162790698, "max_line_length": 112, "alphanum_fraction": 0.712338105, "include": true, "reason": "import numpy", "num_tokens": 377}
|
import pandas as pd
import numpy as np
def ReadJenkins():
Headers=["Num","VComp","Object","Longitude","Latitude","Vmag", "SpType","SpType_ref","E(B-V)","Distance","Z","Lower_log(NHI)","log(NHI)","Flag_log(NHI)","Upper_log(NHI)","ref_log(NHI)","Lower_log(NH2)","log(NH2)","Upper_log(NH2)","ref_log(NH2)"]
rows_to_skip=np.linspace(0,73,74)
colspecs=[(0,6),(10,13),(15,31),(32,38),(39,46),(46,51),(52,68),(69,75),(76,81),(82,87),(88,93),(94,99),(102,107),(108,109),(110,115),(118,124),(126,131),(132,137),(138,143),(144,150)]
file_name=str("Jenkins2009_data.txt")
jen_data=pd.read_fwf(file_name,skiprows=rows_to_skip,header=None,colspecs=colspecs,names=Headers,engine='python')
return(jen_data)
|
{"hexsha": "516b12558239f6eec564c39772f11529b1833166", "size": 729, "ext": "py", "lang": "Python", "max_stars_repo_path": "edibles/data/sightline_data/ReadJenkins_data.py", "max_stars_repo_name": "jancami/edibles", "max_stars_repo_head_hexsha": "51263b24c5e8aef786692011289b906a810ad2f7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2020-04-15T10:44:48.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-21T15:58:19.000Z", "max_issues_repo_path": "edibles/data/sightline_data/ReadJenkins_data.py", "max_issues_repo_name": "jancami/edibles", "max_issues_repo_head_hexsha": "51263b24c5e8aef786692011289b906a810ad2f7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 100, "max_issues_repo_issues_event_min_datetime": "2020-05-08T13:20:41.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-11T20:04:52.000Z", "max_forks_repo_path": "edibles/data/sightline_data/ReadJenkins_data.py", "max_forks_repo_name": "jancami/edibles", "max_forks_repo_head_hexsha": "51263b24c5e8aef786692011289b906a810ad2f7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 8, "max_forks_repo_forks_event_min_datetime": "2020-05-27T00:39:39.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-23T14:07:16.000Z", "avg_line_length": 48.6, "max_line_length": 249, "alphanum_fraction": 0.6543209877, "include": true, "reason": "import numpy", "num_tokens": 248}
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import numpy as np
import os
import sys
from observations.util import maybe_download_and_extract
def muscle(path):
"""Effect of Calcium Chloride on Muscle Contraction in Rat Hearts
The purpose of this experiment was to assess the influence of calcium in
solution on the contraction of heart muscle in rats. The left auricle of
21 rat hearts was isolated and on several occasions a constant-length
strip of tissue was electrically stimulated and dipped into various
concentrations of calcium chloride solution, after which the shortening
of the strip was accurately measured as the response.
This data frame contains the following columns:
`Strip`
which heart muscle strip was used?
`Conc`
concentration of calcium chloride solution, in multiples of 2.2 mM.
`Length`
the change in length (shortening) of the strip, (allegedly) in mm.
Linder, A., Chakravarti, I. M. and Vuagnat, P. (1964) Fitting asymptotic
regression curves with different asymptotes. In *Contributions to
Statistics. Presented to Professor P. C. Mahalanobis on the occasion of
his 70th birthday*, ed. C. R. Rao, pp. 221–228. Oxford: Pergamon Press.
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `muscle.csv`.
Returns:
Tuple of np.ndarray `x_train` with 60 rows and 3 columns and
dictionary `metadata` of column headers (feature names).
"""
import pandas as pd
path = os.path.expanduser(path)
filename = 'muscle.csv'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://dustintran.com/data/r/MASS/muscle.csv'
maybe_download_and_extract(path, url,
save_file_name='muscle.csv',
resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0,
parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return x_train, metadata
|
{"hexsha": "307b2b46257e5524d4dbac068d2b616712aa793a", "size": 2168, "ext": "py", "lang": "Python", "max_stars_repo_path": "observations/r/muscle.py", "max_stars_repo_name": "hajime9652/observations", "max_stars_repo_head_hexsha": "2c8b1ac31025938cb17762e540f2f592e302d5de", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 199, "max_stars_repo_stars_event_min_datetime": "2017-07-24T01:34:27.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-29T00:50:55.000Z", "max_issues_repo_path": "observations/r/muscle.py", "max_issues_repo_name": "hajime9652/observations", "max_issues_repo_head_hexsha": "2c8b1ac31025938cb17762e540f2f592e302d5de", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 46, "max_issues_repo_issues_event_min_datetime": "2017-09-05T19:27:20.000Z", "max_issues_repo_issues_event_max_datetime": "2019-01-07T09:47:26.000Z", "max_forks_repo_path": "observations/r/muscle.py", "max_forks_repo_name": "hajime9652/observations", "max_forks_repo_head_hexsha": "2c8b1ac31025938cb17762e540f2f592e302d5de", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 45, "max_forks_repo_forks_event_min_datetime": "2017-07-26T00:10:44.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-16T20:44:59.000Z", "avg_line_length": 32.8484848485, "max_line_length": 74, "alphanum_fraction": 0.7144833948, "include": true, "reason": "import numpy", "num_tokens": 514}
|
[STATEMENT]
lemma simplicial_simplex_simplex_cone:
assumes f: "simplicial_simplex p S f"
and T: "\<And>x u. \<lbrakk>0 \<le> u; u \<le> 1; x \<in> S\<rbrakk> \<Longrightarrow> (\<lambda>i. (1 - u) * v i + u * x i) \<in> T"
shows "simplicial_simplex (Suc p) T (simplex_cone p v f)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. simplicial_simplex (Suc p) T (simplex_cone p v f)
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. simplicial_simplex (Suc p) T (simplex_cone p v f)
[PROOF STEP]
obtain l where l: "\<And>x. x \<in> standard_simplex p \<Longrightarrow> oriented_simplex p l x \<in> S"
and feq: "f = oriented_simplex p l"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<And>l. \<lbrakk>\<And>x. x \<in> standard_simplex p \<Longrightarrow> oriented_simplex p l x \<in> S; f = oriented_simplex p l\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
using f
[PROOF STATE]
proof (prove)
using this:
simplicial_simplex p S f
goal (1 subgoal):
1. (\<And>l. \<lbrakk>\<And>x. x \<in> standard_simplex p \<Longrightarrow> oriented_simplex p l x \<in> S; f = oriented_simplex p l\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by (auto simp: simplicial_simplex)
[PROOF STATE]
proof (state)
this:
?x \<in> standard_simplex p \<Longrightarrow> oriented_simplex p l ?x \<in> S
f = oriented_simplex p l
goal (1 subgoal):
1. simplicial_simplex (Suc p) T (simplex_cone p v f)
[PROOF STEP]
have "oriented_simplex p l x \<in> S" if "x \<in> standard_simplex p" for x
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. oriented_simplex p l x \<in> S
[PROOF STEP]
using f that
[PROOF STATE]
proof (prove)
using this:
simplicial_simplex p S f
x \<in> standard_simplex p
goal (1 subgoal):
1. oriented_simplex p l x \<in> S
[PROOF STEP]
by (auto simp: simplicial_simplex feq)
[PROOF STATE]
proof (state)
this:
?x \<in> standard_simplex p \<Longrightarrow> oriented_simplex p l ?x \<in> S
goal (1 subgoal):
1. simplicial_simplex (Suc p) T (simplex_cone p v f)
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
?x \<in> standard_simplex p \<Longrightarrow> oriented_simplex p l ?x \<in> S
[PROOF STEP]
have S: "\<And>x. \<lbrakk>\<And>i. 0 \<le> x i \<and> x i \<le> 1; \<And>i. i>p \<Longrightarrow> x i = 0; sum x {..p} = 1\<rbrakk>
\<Longrightarrow> (\<lambda>i. \<Sum>j\<le>p. l j i * x j) \<in> S"
[PROOF STATE]
proof (prove)
using this:
?x \<in> standard_simplex p \<Longrightarrow> oriented_simplex p l ?x \<in> S
goal (1 subgoal):
1. \<And>x. \<lbrakk>\<And>i. 0 \<le> x i \<and> x i \<le> 1; \<And>i. p < i \<Longrightarrow> x i = 0; sum x {..p} = 1\<rbrakk> \<Longrightarrow> (\<lambda>i. \<Sum>j\<le>p. l j i * x j) \<in> S
[PROOF STEP]
by (simp add: oriented_simplex_def standard_simplex_def)
[PROOF STATE]
proof (state)
this:
\<lbrakk>\<And>i. 0 \<le> ?x i \<and> ?x i \<le> 1; \<And>i. p < i \<Longrightarrow> ?x i = 0; sum ?x {..p} = 1\<rbrakk> \<Longrightarrow> (\<lambda>i. \<Sum>j\<le>p. l j i * ?x j) \<in> S
goal (1 subgoal):
1. simplicial_simplex (Suc p) T (simplex_cone p v f)
[PROOF STEP]
have "oriented_simplex (Suc p) (\<lambda>i. if i = 0 then v else l (i -1)) x \<in> T"
if "x \<in> standard_simplex (Suc p)" for x
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. oriented_simplex (Suc p) (\<lambda>i. if i = 0 then v else l (i - 1)) x \<in> T
[PROOF STEP]
proof (simp add: that oriented_simplex_def sum.atMost_Suc_shift del: sum.atMost_Suc)
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. (\<lambda>i. v i * x 0 + (\<Sum>ia\<le>p. l ia i * x (Suc ia))) \<in> T
[PROOF STEP]
have x01: "\<And>i. 0 \<le> x i \<and> x i \<le> 1" and x0: "\<And>i. i > Suc p \<Longrightarrow> x i = 0" and x1: "sum x {..Suc p} = 1"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<And>i. 0 \<le> x i \<and> x i \<le> 1) &&& (\<And>i. Suc p < i \<Longrightarrow> x i = 0) &&& sum x {..Suc p} = 1
[PROOF STEP]
using that
[PROOF STATE]
proof (prove)
using this:
x \<in> standard_simplex (Suc p)
goal (1 subgoal):
1. (\<And>i. 0 \<le> x i \<and> x i \<le> 1) &&& (\<And>i. Suc p < i \<Longrightarrow> x i = 0) &&& sum x {..Suc p} = 1
[PROOF STEP]
by (auto simp: oriented_simplex_def standard_simplex_def)
[PROOF STATE]
proof (state)
this:
0 \<le> x ?i \<and> x ?i \<le> 1
Suc p < ?i \<Longrightarrow> x ?i = 0
sum x {..Suc p} = 1
goal (1 subgoal):
1. (\<lambda>i. v i * x 0 + (\<Sum>ia\<le>p. l ia i * x (Suc ia))) \<in> T
[PROOF STEP]
obtain a where "a \<in> S"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<And>a. a \<in> S \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
using f
[PROOF STATE]
proof (prove)
using this:
simplicial_simplex p S f
goal (1 subgoal):
1. (\<And>a. a \<in> S \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by force
[PROOF STATE]
proof (state)
this:
a \<in> S
goal (1 subgoal):
1. (\<lambda>i. v i * x 0 + (\<Sum>ia\<le>p. l ia i * x (Suc ia))) \<in> T
[PROOF STEP]
show "(\<lambda>i. v i * x 0 + (\<Sum>j\<le>p. l j i * x (Suc j))) \<in> T"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<lambda>i. v i * x 0 + (\<Sum>j\<le>p. l j i * x (Suc j))) \<in> T
[PROOF STEP]
proof (cases "x 0 = 1")
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. x 0 = 1 \<Longrightarrow> (\<lambda>i. v i * x 0 + (\<Sum>j\<le>p. l j i * x (Suc j))) \<in> T
2. x 0 \<noteq> 1 \<Longrightarrow> (\<lambda>i. v i * x 0 + (\<Sum>j\<le>p. l j i * x (Suc j))) \<in> T
[PROOF STEP]
case True
[PROOF STATE]
proof (state)
this:
x 0 = 1
goal (2 subgoals):
1. x 0 = 1 \<Longrightarrow> (\<lambda>i. v i * x 0 + (\<Sum>j\<le>p. l j i * x (Suc j))) \<in> T
2. x 0 \<noteq> 1 \<Longrightarrow> (\<lambda>i. v i * x 0 + (\<Sum>j\<le>p. l j i * x (Suc j))) \<in> T
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
x 0 = 1
[PROOF STEP]
have "sum x {Suc 0..Suc p} = 0"
[PROOF STATE]
proof (prove)
using this:
x 0 = 1
goal (1 subgoal):
1. sum x {Suc 0..Suc p} = 0
[PROOF STEP]
using x1
[PROOF STATE]
proof (prove)
using this:
x 0 = 1
sum x {..Suc p} = 1
goal (1 subgoal):
1. sum x {Suc 0..Suc p} = 0
[PROOF STEP]
by (simp add: atMost_atLeast0 sum.atLeast_Suc_atMost)
[PROOF STATE]
proof (state)
this:
sum x {Suc 0..Suc p} = 0
goal (2 subgoals):
1. x 0 = 1 \<Longrightarrow> (\<lambda>i. v i * x 0 + (\<Sum>j\<le>p. l j i * x (Suc j))) \<in> T
2. x 0 \<noteq> 1 \<Longrightarrow> (\<lambda>i. v i * x 0 + (\<Sum>j\<le>p. l j i * x (Suc j))) \<in> T
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
sum x {Suc 0..Suc p} = 0
[PROOF STEP]
have [simp]: "x (Suc j) = 0" if "j\<le>p" for j
[PROOF STATE]
proof (prove)
using this:
sum x {Suc 0..Suc p} = 0
goal (1 subgoal):
1. x (Suc j) = 0
[PROOF STEP]
unfolding sum.atLeast_Suc_atMost_Suc_shift
[PROOF STATE]
proof (prove)
using this:
sum (x \<circ> Suc) {0..p} = 0
goal (1 subgoal):
1. x (Suc j) = 0
[PROOF STEP]
using x01 that
[PROOF STATE]
proof (prove)
using this:
sum (x \<circ> Suc) {0..p} = 0
0 \<le> x ?i \<and> x ?i \<le> 1
j \<le> p
goal (1 subgoal):
1. x (Suc j) = 0
[PROOF STEP]
by (simp add: sum_nonneg_eq_0_iff)
[PROOF STATE]
proof (state)
this:
?j \<le> p \<Longrightarrow> x (Suc ?j) = 0
goal (2 subgoals):
1. x 0 = 1 \<Longrightarrow> (\<lambda>i. v i * x 0 + (\<Sum>j\<le>p. l j i * x (Suc j))) \<in> T
2. x 0 \<noteq> 1 \<Longrightarrow> (\<lambda>i. v i * x 0 + (\<Sum>j\<le>p. l j i * x (Suc j))) \<in> T
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
?j \<le> p \<Longrightarrow> x (Suc ?j) = 0
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
?j \<le> p \<Longrightarrow> x (Suc ?j) = 0
goal (1 subgoal):
1. (\<lambda>i. v i * x 0 + (\<Sum>j\<le>p. l j i * x (Suc j))) \<in> T
[PROOF STEP]
using T [of 0 a] \<open>a \<in> S\<close>
[PROOF STATE]
proof (prove)
using this:
?j \<le> p \<Longrightarrow> x (Suc ?j) = 0
\<lbrakk>0 \<le> 0; 0 \<le> 1; a \<in> S\<rbrakk> \<Longrightarrow> (\<lambda>i. (1 - 0) * v i + 0 * a i) \<in> T
a \<in> S
goal (1 subgoal):
1. (\<lambda>i. v i * x 0 + (\<Sum>j\<le>p. l j i * x (Suc j))) \<in> T
[PROOF STEP]
by (auto simp: True)
[PROOF STATE]
proof (state)
this:
(\<lambda>i. v i * x 0 + (\<Sum>j\<le>p. l j i * x (Suc j))) \<in> T
goal (1 subgoal):
1. x 0 \<noteq> 1 \<Longrightarrow> (\<lambda>i. v i * x 0 + (\<Sum>j\<le>p. l j i * x (Suc j))) \<in> T
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. x 0 \<noteq> 1 \<Longrightarrow> (\<lambda>i. v i * x 0 + (\<Sum>j\<le>p. l j i * x (Suc j))) \<in> T
[PROOF STEP]
case False
[PROOF STATE]
proof (state)
this:
x 0 \<noteq> 1
goal (1 subgoal):
1. x 0 \<noteq> 1 \<Longrightarrow> (\<lambda>i. v i * x 0 + (\<Sum>j\<le>p. l j i * x (Suc j))) \<in> T
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
x 0 \<noteq> 1
[PROOF STEP]
have "(\<lambda>i. v i * x 0 + (\<Sum>j\<le>p. l j i * x (Suc j))) = (\<lambda>i. (1 - (1 - x 0)) * v i + (1 - x 0) * (inverse (1 - x 0) * (\<Sum>j\<le>p. l j i * x (Suc j))))"
[PROOF STATE]
proof (prove)
using this:
x 0 \<noteq> 1
goal (1 subgoal):
1. (\<lambda>i. v i * x 0 + (\<Sum>j\<le>p. l j i * x (Suc j))) = (\<lambda>i. (1 - (1 - x 0)) * v i + (1 - x 0) * (inverse (1 - x 0) * (\<Sum>j\<le>p. l j i * x (Suc j))))
[PROOF STEP]
by (force simp: field_simps)
[PROOF STATE]
proof (state)
this:
(\<lambda>i. v i * x 0 + (\<Sum>j\<le>p. l j i * x (Suc j))) = (\<lambda>i. (1 - (1 - x 0)) * v i + (1 - x 0) * (inverse (1 - x 0) * (\<Sum>j\<le>p. l j i * x (Suc j))))
goal (1 subgoal):
1. x 0 \<noteq> 1 \<Longrightarrow> (\<lambda>i. v i * x 0 + (\<Sum>j\<le>p. l j i * x (Suc j))) \<in> T
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
(\<lambda>i. v i * x 0 + (\<Sum>j\<le>p. l j i * x (Suc j))) = (\<lambda>i. (1 - (1 - x 0)) * v i + (1 - x 0) * (inverse (1 - x 0) * (\<Sum>j\<le>p. l j i * x (Suc j))))
goal (1 subgoal):
1. x 0 \<noteq> 1 \<Longrightarrow> (\<lambda>i. v i * x 0 + (\<Sum>j\<le>p. l j i * x (Suc j))) \<in> T
[PROOF STEP]
have "\<dots> \<in> T"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<lambda>i. (1 - (1 - x 0)) * v i + (1 - x 0) * (inverse (1 - x 0) * (\<Sum>j\<le>p. l j i * x (Suc j)))) \<in> T
[PROOF STEP]
proof (rule T)
[PROOF STATE]
proof (state)
goal (3 subgoals):
1. 0 \<le> 1 - x 0
2. 1 - x 0 \<le> 1
3. (\<lambda>i. inverse (1 - x 0) * (\<Sum>j\<le>p. l j i * x (Suc j))) \<in> S
[PROOF STEP]
have "x 0 < 1"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. x 0 < 1
[PROOF STEP]
by (simp add: False less_le x01)
[PROOF STATE]
proof (state)
this:
x 0 < 1
goal (3 subgoals):
1. 0 \<le> 1 - x 0
2. 1 - x 0 \<le> 1
3. (\<lambda>i. inverse (1 - x 0) * (\<Sum>j\<le>p. l j i * x (Suc j))) \<in> S
[PROOF STEP]
have xle: "x (Suc i) \<le> (1 - x 0)" for i
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. x (Suc i) \<le> 1 - x 0
[PROOF STEP]
proof (cases "i \<le> p")
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. i \<le> p \<Longrightarrow> x (Suc i) \<le> 1 - x 0
2. \<not> i \<le> p \<Longrightarrow> x (Suc i) \<le> 1 - x 0
[PROOF STEP]
case True
[PROOF STATE]
proof (state)
this:
i \<le> p
goal (2 subgoals):
1. i \<le> p \<Longrightarrow> x (Suc i) \<le> 1 - x 0
2. \<not> i \<le> p \<Longrightarrow> x (Suc i) \<le> 1 - x 0
[PROOF STEP]
have "sum x {0, Suc i} \<le> sum x {..Suc p}"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. sum x {0, Suc i} \<le> sum x {..Suc p}
[PROOF STEP]
by (rule sum_mono2) (auto simp: True x01)
[PROOF STATE]
proof (state)
this:
sum x {0, Suc i} \<le> sum x {..Suc p}
goal (2 subgoals):
1. i \<le> p \<Longrightarrow> x (Suc i) \<le> 1 - x 0
2. \<not> i \<le> p \<Longrightarrow> x (Suc i) \<le> 1 - x 0
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
sum x {0, Suc i} \<le> sum x {..Suc p}
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
sum x {0, Suc i} \<le> sum x {..Suc p}
goal (1 subgoal):
1. x (Suc i) \<le> 1 - x 0
[PROOF STEP]
using x1 x01
[PROOF STATE]
proof (prove)
using this:
sum x {0, Suc i} \<le> sum x {..Suc p}
sum x {..Suc p} = 1
0 \<le> x ?i \<and> x ?i \<le> 1
goal (1 subgoal):
1. x (Suc i) \<le> 1 - x 0
[PROOF STEP]
by (simp add: algebra_simps not_less)
[PROOF STATE]
proof (state)
this:
x (Suc i) \<le> 1 - x 0
goal (1 subgoal):
1. \<not> i \<le> p \<Longrightarrow> x (Suc i) \<le> 1 - x 0
[PROOF STEP]
qed (simp add: x0 x01)
[PROOF STATE]
proof (state)
this:
x (Suc ?i) \<le> 1 - x 0
goal (3 subgoals):
1. 0 \<le> 1 - x 0
2. 1 - x 0 \<le> 1
3. (\<lambda>i. inverse (1 - x 0) * (\<Sum>j\<le>p. l j i * x (Suc j))) \<in> S
[PROOF STEP]
have "(\<lambda>i. (\<Sum>j\<le>p. l j i * (x (Suc j) * inverse (1 - x 0)))) \<in> S"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<lambda>i. \<Sum>j\<le>p. l j i * (x (Suc j) * inverse (1 - x 0))) \<in> S
[PROOF STEP]
proof (rule S)
[PROOF STATE]
proof (state)
goal (3 subgoals):
1. \<And>i. 0 \<le> x (Suc i) * inverse (1 - x 0) \<and> x (Suc i) * inverse (1 - x 0) \<le> 1
2. \<And>i. p < i \<Longrightarrow> x (Suc i) * inverse (1 - x 0) = 0
3. (\<Sum>j\<le>p. x (Suc j) * inverse (1 - x 0)) = 1
[PROOF STEP]
have "x 0 + (\<Sum>j\<le>p. x (Suc j)) = sum x {..Suc p}"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. x 0 + (\<Sum>j\<le>p. x (Suc j)) = sum x {..Suc p}
[PROOF STEP]
by (metis sum.atMost_Suc_shift)
[PROOF STATE]
proof (state)
this:
x 0 + (\<Sum>j\<le>p. x (Suc j)) = sum x {..Suc p}
goal (3 subgoals):
1. \<And>i. 0 \<le> x (Suc i) * inverse (1 - x 0) \<and> x (Suc i) * inverse (1 - x 0) \<le> 1
2. \<And>i. p < i \<Longrightarrow> x (Suc i) * inverse (1 - x 0) = 0
3. (\<Sum>j\<le>p. x (Suc j) * inverse (1 - x 0)) = 1
[PROOF STEP]
with x1
[PROOF STATE]
proof (chain)
picking this:
sum x {..Suc p} = 1
x 0 + (\<Sum>j\<le>p. x (Suc j)) = sum x {..Suc p}
[PROOF STEP]
have "(\<Sum>j\<le>p. x (Suc j)) = 1 - x 0"
[PROOF STATE]
proof (prove)
using this:
sum x {..Suc p} = 1
x 0 + (\<Sum>j\<le>p. x (Suc j)) = sum x {..Suc p}
goal (1 subgoal):
1. (\<Sum>j\<le>p. x (Suc j)) = 1 - x 0
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
(\<Sum>j\<le>p. x (Suc j)) = 1 - x 0
goal (3 subgoals):
1. \<And>i. 0 \<le> x (Suc i) * inverse (1 - x 0) \<and> x (Suc i) * inverse (1 - x 0) \<le> 1
2. \<And>i. p < i \<Longrightarrow> x (Suc i) * inverse (1 - x 0) = 0
3. (\<Sum>j\<le>p. x (Suc j) * inverse (1 - x 0)) = 1
[PROOF STEP]
with False
[PROOF STATE]
proof (chain)
picking this:
x 0 \<noteq> 1
(\<Sum>j\<le>p. x (Suc j)) = 1 - x 0
[PROOF STEP]
show "(\<Sum>j\<le>p. x (Suc j) * inverse (1 - x 0)) = 1"
[PROOF STATE]
proof (prove)
using this:
x 0 \<noteq> 1
(\<Sum>j\<le>p. x (Suc j)) = 1 - x 0
goal (1 subgoal):
1. (\<Sum>j\<le>p. x (Suc j) * inverse (1 - x 0)) = 1
[PROOF STEP]
by (metis add_diff_cancel_left' diff_diff_eq2 diff_zero right_inverse sum_distrib_right)
[PROOF STATE]
proof (state)
this:
(\<Sum>j\<le>p. x (Suc j) * inverse (1 - x 0)) = 1
goal (2 subgoals):
1. \<And>i. 0 \<le> x (Suc i) * inverse (1 - x 0) \<and> x (Suc i) * inverse (1 - x 0) \<le> 1
2. \<And>i. p < i \<Longrightarrow> x (Suc i) * inverse (1 - x 0) = 0
[PROOF STEP]
qed (use x01 x0 xle \<open>x 0 < 1\<close> in \<open>auto simp: field_split_simps\<close>)
[PROOF STATE]
proof (state)
this:
(\<lambda>i. \<Sum>j\<le>p. l j i * (x (Suc j) * inverse (1 - x 0))) \<in> S
goal (3 subgoals):
1. 0 \<le> 1 - x 0
2. 1 - x 0 \<le> 1
3. (\<lambda>i. inverse (1 - x 0) * (\<Sum>j\<le>p. l j i * x (Suc j))) \<in> S
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
(\<lambda>i. \<Sum>j\<le>p. l j i * (x (Suc j) * inverse (1 - x 0))) \<in> S
[PROOF STEP]
show "(\<lambda>i. inverse (1 - x 0) * (\<Sum>j\<le>p. l j i * x (Suc j))) \<in> S"
[PROOF STATE]
proof (prove)
using this:
(\<lambda>i. \<Sum>j\<le>p. l j i * (x (Suc j) * inverse (1 - x 0))) \<in> S
goal (1 subgoal):
1. (\<lambda>i. inverse (1 - x 0) * (\<Sum>j\<le>p. l j i * x (Suc j))) \<in> S
[PROOF STEP]
by (simp add: field_simps sum_divide_distrib)
[PROOF STATE]
proof (state)
this:
(\<lambda>i. inverse (1 - x 0) * (\<Sum>j\<le>p. l j i * x (Suc j))) \<in> S
goal (2 subgoals):
1. 0 \<le> 1 - x 0
2. 1 - x 0 \<le> 1
[PROOF STEP]
qed (use x01 in auto)
[PROOF STATE]
proof (state)
this:
(\<lambda>i. (1 - (1 - x 0)) * v i + (1 - x 0) * (inverse (1 - x 0) * (\<Sum>j\<le>p. l j i * x (Suc j)))) \<in> T
goal (1 subgoal):
1. x 0 \<noteq> 1 \<Longrightarrow> (\<lambda>i. v i * x 0 + (\<Sum>j\<le>p. l j i * x (Suc j))) \<in> T
[PROOF STEP]
finally
[PROOF STATE]
proof (chain)
picking this:
(\<lambda>i. v i * x 0 + (\<Sum>j\<le>p. l j i * x (Suc j))) \<in> T
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
(\<lambda>i. v i * x 0 + (\<Sum>j\<le>p. l j i * x (Suc j))) \<in> T
goal (1 subgoal):
1. (\<lambda>i. v i * x 0 + (\<Sum>j\<le>p. l j i * x (Suc j))) \<in> T
[PROOF STEP]
.
[PROOF STATE]
proof (state)
this:
(\<lambda>i. v i * x 0 + (\<Sum>j\<le>p. l j i * x (Suc j))) \<in> T
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
(\<lambda>i. v i * x 0 + (\<Sum>j\<le>p. l j i * x (Suc j))) \<in> T
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
?x \<in> standard_simplex (Suc p) \<Longrightarrow> oriented_simplex (Suc p) (\<lambda>i. if i = 0 then v else l (i - 1)) ?x \<in> T
goal (1 subgoal):
1. simplicial_simplex (Suc p) T (simplex_cone p v f)
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
?x \<in> standard_simplex (Suc p) \<Longrightarrow> oriented_simplex (Suc p) (\<lambda>i. if i = 0 then v else l (i - 1)) ?x \<in> T
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
?x \<in> standard_simplex (Suc p) \<Longrightarrow> oriented_simplex (Suc p) (\<lambda>i. if i = 0 then v else l (i - 1)) ?x \<in> T
goal (1 subgoal):
1. simplicial_simplex (Suc p) T (simplex_cone p v f)
[PROOF STEP]
by (auto simp: simplicial_simplex feq simplex_cone)
[PROOF STATE]
proof (state)
this:
simplicial_simplex (Suc p) T (simplex_cone p v f)
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 8653, "file": null, "length": 78}
|
import argparse
import chainer
from chainer import cuda
import fcn
import numpy as np
import tqdm
from models.fcn8 import FCN8s
def evaluate():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--file', type=str, help='model file path')
args = parser.parse_args()
file = args.file
print("evaluating: ",file)
dataset = fcn.datasets.VOC2011ClassSeg('seg11valid')
n_class = len(dataset.class_names)
model = FCN8s()
chainer.serializers.load_npz(file, model)
gpu = 0
if gpu >= 0:
cuda.get_device(gpu).use()
model.to_gpu()
lbl_preds, lbl_trues = [], []
for i in tqdm.trange(len(dataset)):
datum, lbl_true = fcn.datasets.transform_lsvrc2012_vgg16(
dataset.get_example(i))
x_data = np.expand_dims(datum, axis=0)
if gpu >= 0:
x_data = cuda.to_gpu(x_data)
with chainer.no_backprop_mode():
x = chainer.Variable(x_data)
with chainer.using_config('train', False):
model(x)
lbl_pred = chainer.functions.argmax(model.score, axis=1)[0]
lbl_pred = chainer.cuda.to_cpu(lbl_pred.data)
lbl_preds.append(lbl_pred)
lbl_trues.append(lbl_true)
acc, acc_cls, mean_iu, fwavacc = fcn.utils.label_accuracy_score(lbl_trues, lbl_preds, n_class)
print('Accuracy: %.4f' % (100 * acc))
print('AccClass: %.4f' % (100 * acc_cls))
print('Mean IoU: %.4f' % (100 * mean_iu))
print('Fwav Acc: %.4f' % (100 * fwavacc))
if __name__ == '__main__':
evaluate()
|
{"hexsha": "e7e9bb1af802ad63828fd7602013209de9a50100", "size": 1447, "ext": "py", "lang": "Python", "max_stars_repo_path": "evaluate.py", "max_stars_repo_name": "juliocamposmachado/gain2", "max_stars_repo_head_hexsha": "cd1cb0ac021078ed42fe3c1456040c00622e79d7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 242, "max_stars_repo_stars_event_min_datetime": "2018-07-05T02:55:37.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-21T15:14:53.000Z", "max_issues_repo_path": "evaluate.py", "max_issues_repo_name": "lomoda0715/Guided-Attention-Inference-Network", "max_issues_repo_head_hexsha": "cd1cb0ac021078ed42fe3c1456040c00622e79d7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 9, "max_issues_repo_issues_event_min_datetime": "2018-09-27T08:11:42.000Z", "max_issues_repo_issues_event_max_datetime": "2021-04-15T02:47:58.000Z", "max_forks_repo_path": "evaluate.py", "max_forks_repo_name": "lomoda0715/Guided-Attention-Inference-Network", "max_forks_repo_head_hexsha": "cd1cb0ac021078ed42fe3c1456040c00622e79d7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 52, "max_forks_repo_forks_event_min_datetime": "2018-07-06T15:33:28.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-15T03:22:27.000Z", "avg_line_length": 26.7962962963, "max_line_length": 95, "alphanum_fraction": 0.7152729786, "include": true, "reason": "import numpy", "num_tokens": 420}
|
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import warnings
import os
warnings.filterwarnings("ignore")
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
dataPath = "temp/"
if not os.path.exists(dataPath):
os.makedirs(dataPath)
input = input_data.read_data_sets(dataPath, one_hot=True)
print(input.train.images.shape)
print(input.train.labels.shape)
print(input.test.images.shape)
print(input.test.labels.shape)
image_0 = input.train.images[0]
image_0 = np.resize(image_0,(28,28))
label_0 = input.train.labels[0]
print(label_0)
plt.imshow(image_0, cmap='Greys_r')
plt.show()
|
{"hexsha": "3b53bee0d511279513b937d275c284243aacab56", "size": 700, "ext": "py", "lang": "Python", "max_stars_repo_path": "Chapter03/MNIST/Explore_MNIST.py", "max_stars_repo_name": "tongni1975/Deep-Learning-with-TensorFlow-Second-Edition", "max_stars_repo_head_hexsha": "6964bf3bf11c1b38113a0459b4d1a9dac416ed39", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 54, "max_stars_repo_stars_event_min_datetime": "2018-04-01T21:25:55.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-04T02:45:45.000Z", "max_issues_repo_path": "Chapter03/MNIST/Explore_MNIST.py", "max_issues_repo_name": "tongni1975/Deep-Learning-with-TensorFlow-Second-Edition", "max_issues_repo_head_hexsha": "6964bf3bf11c1b38113a0459b4d1a9dac416ed39", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2019-04-14T03:15:36.000Z", "max_issues_repo_issues_event_max_datetime": "2020-07-03T12:05:03.000Z", "max_forks_repo_path": "Chapter03/MNIST/Explore_MNIST.py", "max_forks_repo_name": "tongni1975/Deep-Learning-with-TensorFlow-Second-Edition", "max_forks_repo_head_hexsha": "6964bf3bf11c1b38113a0459b4d1a9dac416ed39", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 51, "max_forks_repo_forks_event_min_datetime": "2018-04-10T12:25:40.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-15T07:36:17.000Z", "avg_line_length": 22.5806451613, "max_line_length": 59, "alphanum_fraction": 0.7414285714, "include": true, "reason": "import numpy", "num_tokens": 167}
|
[STATEMENT]
lemma OclAny_allInstances_at_post_oclIsTypeOf\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y2:
"\<exists>\<tau>. (\<tau> \<Turnstile> not (OclAny .allInstances()->forAll\<^sub>S\<^sub>e\<^sub>t(X|X .oclIsTypeOf(OclAny))))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<exists>\<tau>. \<tau> \<Turnstile> not (UML_Set.OclForall OclAny .allInstances() OclIsTypeOf\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y)
[PROOF STEP]
unfolding OclAllInstances_at_post_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<exists>\<tau>. \<tau> \<Turnstile> not (UML_Set.OclForall (OclAllInstances_generic snd OclAny) OclIsTypeOf\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y)
[PROOF STEP]
by(rule OclAny_allInstances_generic_oclIsTypeOf\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y2, simp)
|
{"llama_tokens": 360, "file": "Featherweight_OCL_examples_Employee_Model_Analysis_Analysis_UML", "length": 2}
|
import os,sys,io,shutil,csv
from decimal import Decimal
import numpy as np
def unit_vector(vector):
""" Returns the unit vector of the vector."""
return vector / np.linalg.norm(vector)
def angle_between(v1, v2):
"""Finds angle between two vectors"""
v1_u = unit_vector(v1)
v2_u = unit_vector(v2)
return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))
def x_rotation(vector,theta):
"""Rotates 3-D vector around x-axis"""
R = np.array([[1,0,0],[0,np.cos(theta),-np.sin(theta)],[0, np.sin(theta), np.cos(theta)]])
return np.dot(R,vector)
def y_rotation(vector,theta):
"""Rotates 3-D vector around y-axis"""
R = np.array([[np.cos(theta),0,np.sin(theta)],[0,1,0],[-np.sin(theta), 0, np.cos(theta)]])
return np.dot(R,vector)
def z_rotation(vector,theta):
"""Rotates 3-D vector around z-axis"""
R = np.array([[np.cos(theta), -np.sin(theta),0],[np.sin(theta), np.cos(theta),0],[0,0,1]])
return np.dot(R,vector)
def create_directories(workdir,counter,action,angle_id,label):
"""creates the action directories required for our rotations"""
counter+=1
#making csv file in action directory for Right
path_original = workdir + '/actions'+angle_id+'/' +str(label[0:4])+"_"+str(counter)+"_"+"action"+"_"+action+".csv"
path_by_45 =workdir + '/actions'+angle_id+"by-45"+'/' +str(label[0:4])+"_"+str(counter)+"_"+"action"+"_"+action+".csv"
pathby_by_90 = workdir +'/actions'+angle_id+"by-90"+'/' +str(label[0:4])+"_"+str(counter)+"_"+"action"+"_"+action+".csv"
pathby45 = workdir +'/actions'+angle_id+"by45"+'/' +str(label[0:4])+"_"+str(counter)+"_"+"action"+"_"+action+".csv"
pathby90 =workdir +'/actions'+angle_id+"by90"+'/' +str(label[0:4])+"_"+str(counter)+"_"+"action"+"_"+action+".csv"
#checking existence
for path in {path_original,path_by_45,pathby_by_90,pathby45,pathby90}:
if os.path.exists(path)==False:
action_file = open(path,"w")
action_file.close()
else:
#create
action_file = open(path,"a")
action_file.close()
action_file_original = open(path_original,"a")
action_file_original_by_45 = open(path_by_45,"a")
action_file_original_by_90 = open(pathby_by_90,"a")
action_file_original_by45 = open(pathby45,"a")
action_file_original_by90 = open(pathby90,"a")
return counter,action_file_original,action_file_original_by_45,action_file_original_by_90,action_file_original_by45,action_file_original_by90
def rotation_by_angle(array_for_rotations,theta):
"""performs a single angle rotation"""
w_string=""
for coordinates in range(3,153,3):
if(coordinates<=75 or (coordinates>75 and 0.0 not in array_for_rotations[0,76:150])):
joint_array = np.array([array_for_rotations[0,coordinates-3],array_for_rotations[0,coordinates-2],array_for_rotations[0,coordinates-1]])
new_vect = y_rotation(joint_array, theta)
joint_array = new_vect
w_string += str(joint_array[0])+','+str(joint_array[1])+','+str(joint_array[2])+','
elif():
joint_array = arr[0,i-3:i]
w_string += str(joint_array[0])+','+str(joint_array[1])+','+str(joint_array[2])+','
return w_string
def rotate(datadir,labeldir,workdir):
"""main function performing the rotation"""
pathS = datadir
pathL = labeldir
if workdir in os.getcwd():
workdir = "."
#skeleton directory
dirSkeleton = os.listdir( pathS )
#label directory
dirLabel = os.listdir( pathL )
for angle_id in {"Left","Middle","Right"}:
if os.path.exists(workdir + '/actions'+angle_id)==False: os.mkdir(workdir + '/actions'+angle_id)
if os.path.exists(workdir + '/actions'+angle_id+"by-45")==False: os.mkdir(workdir + '/actions'+angle_id+"by-45")
if os.path.exists(workdir + '/actions'+angle_id+"by-90")==False: os.mkdir(workdir + '/actions'+angle_id+"by-90")
if os.path.exists(workdir + '/actions'+angle_id+"by45")==False: os.mkdir(workdir + '/actions'+angle_id+"by45")
if os.path.exists(workdir + '/actions'+angle_id+"by90")==False: os.mkdir(workdir + '/actions'+angle_id+"by90")
#right middle and left action counters
count_actionsR=0
count_actionsM=0
count_actionsL=0
#initialization of action file variables
action_file_original=""
action_file_original_by_45=""
action_file_original_by_90=""
action_file_original_by45=""
action_file_original_by90=""
for data,label in zip(dirSkeleton,dirLabel):
#ppp string to check the camera angle of each action
if "L" in label:
angle_id ="Left"
if "M" in label:
angle_id ="Middle"
if "R" in label:
angle_id ="Right"
#single label path
path_single_label = pathL + '/' + label
#label file
single_label_file = open(path_single_label)
#line read
line = single_label_file.readline()
while line!="":
#line values
values = line.split(',')
if len(values)<2:
break
#get values
action = values[0]
starting_action_frame = values[1]
ending_action_frame = values[2]
if "Right" in angle_id:
count_actionsR,action_file_original,action_file_original_by_45,action_file_original_by_90,action_file_original_by45,action_file_original_by90 = create_directories(workdir,count_actionsR,action,angle_id,label)
if "Middle" in angle_id:
count_actionsM,action_file_original,action_file_original_by_45,action_file_original_by_90,action_file_original_by45,action_file_original_by90 = create_directories(workdir,count_actionsM,action,angle_id,label)
if "Left" in angle_id:
count_actionsL,action_file_original,action_file_original_by_45,action_file_original_by_90,action_file_original_by45,action_file_original_by90 = create_directories(workdir,count_actionsL,action,angle_id,label)
#data
pathdat = pathS + '/' + label
single_data_file = open(pathdat,"r")
#write data
for num_of_frame,dataLine in enumerate(single_data_file):
#getting data according to frame instructions
if num_of_frame >= int(starting_action_frame)-1 and num_of_frame<=int(ending_action_frame)-1:
values = dataLine.split(' ')
#writes data to the original file
w_string_original=""
#writes data to the -45 degrees rotated file
w_string_by_45=""
#writes data to the -90 degrees rotated file
w_string_by_90=""
#writes data to the 45 degrees rotated file
w_string_by45=""
#writes data to the 90 degrees rotated file
w_string_by90=""
array_for_rotations = np.zeros([1,150])
for y in range(0,len(values)):
array_for_rotations[0,y] = Decimal(values[y])
#ORIGINAL FILE
for coordinates in range(0,150):
w_string_original += str(array_for_rotations[0,coordinates]) + ','
#ROTATION BY -45 DEGREES ABOUT THE Y AXIS
w_string_by_45=rotation_by_angle(array_for_rotations,-np.pi/4)
#ROTATION BY -90 DEGREES ABOUT THE Y AXIS
w_string_by_90=rotation_by_angle(array_for_rotations,-np.pi/2)
#ROTATION BY 45 DEGREES ABOUT THE Y AXIS
w_string_by45=rotation_by_angle(array_for_rotations, np.pi/4)
#ROTATION BY 90 DEGREES ABOUT THE Y AXIS
w_string_by90=rotation_by_angle(array_for_rotations, np.pi/2)
#write string in the action file
if(w_string_original!=""):
action_file_original.write(w_string_original)
action_file_original.write("\n")
if(w_string_by_45!=""):
action_file_original_by_45.write(w_string_by_45)
action_file_original_by_45.write("\n")
if(w_string_by_90!=""):
action_file_original_by_90.write(w_string_by_90)
action_file_original_by_90.write("\n")
if(w_string_by45!=""):
action_file_original_by45.write(w_string_by45)
action_file_original_by45.write("\n")
if(w_string_by90!=""):
action_file_original_by90.write(w_string_by90)
action_file_original_by90.write("\n")
elif num_of_frame>int(ending_action_frame):
break
#read new line
line = single_label_file.readline()
|
{"hexsha": "f0b4d77833aa6a3bf969fef9e426561d392961fb", "size": 8479, "ext": "py", "lang": "Python", "max_stars_repo_path": "Rotate.py", "max_stars_repo_name": "AntonyPapadakis/HumanActionRecognitionCnns", "max_stars_repo_head_hexsha": "55b73afeadccfedc84892e5e6b56644e7bd58b58", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Rotate.py", "max_issues_repo_name": "AntonyPapadakis/HumanActionRecognitionCnns", "max_issues_repo_head_hexsha": "55b73afeadccfedc84892e5e6b56644e7bd58b58", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Rotate.py", "max_forks_repo_name": "AntonyPapadakis/HumanActionRecognitionCnns", "max_forks_repo_head_hexsha": "55b73afeadccfedc84892e5e6b56644e7bd58b58", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.4769874477, "max_line_length": 216, "alphanum_fraction": 0.6564453355, "include": true, "reason": "import numpy", "num_tokens": 2190}
|
import matplotlib as mpl
import make_colormap as mc
import matplotlib
import matplotlib.cm as cm
from matplotlib import gridspec
import sys
sys.path.insert(1, '../sglv_timeseries')
import sglv_timeseries.glv.Timeseries
from matplotlib.colors import Normalize
from make_colormap import *
import pandas as pd
import noise_analysis
from scipy import signal, stats
from timeseries_plotting import PlotTimeseries
from noise_analysis import noise_color
from neutrality_analysis import KullbackLeibler_neutrality
from neutral_covariance_test import neutral_covariance_test
from smooth_spline import *
# colormap noise
c = mpl.colors.ColorConverter().to_rgb
noise_cmap = make_colormap(
[c('k'), c('brown'), 0.33, c('brown'), c('pink'), 0.66, c('pink'), c('lightgrey')]) # with grey
noise_lim = [-3, 0]
noise_cmap_ww = make_colormap(
[c('k'), c('brown'), 0.33, c('brown'), c('pink'), 0.66, c('pink'), c('white')]) # with white
# code from https://stackoverflow.com/questions/30465080/associating-colors-from-a-continuous-colormap-to-specific-values-in-matplotlib
class PiecewiseNormalize(Normalize):
def __init__(self, xvalues, cvalues):
self.xvalues = xvalues
self.cvalues = cvalues
Normalize.__init__(self)
def __call__(self, value, clip=None):
# I'm ignoring masked values and all kinds of edge cases to make a
# simple example...
if self.xvalues is not None:
x, y = self.xvalues, self.cvalues
return np.ma.masked_array(np.interp(value, x, y))
else:
return Normalize.__call__(self, value, clip)
def lighten_color(color, amount=0.5):
"""
Lightens the given color by multiplying (1-luminosity) by the given amount.
Input can be matplotlib color string, hex string, or RGB tuple.
Examples:
>> lighten_color('g', 0.3)
>> lighten_color('#F034A3', 0.6)
>> lighten_color((.3,.55,.1), 0.5)
"""
import matplotlib.colors as mc
import colorsys
try:
c = mc.cnames[color]
except:
c = color
c = colorsys.rgb_to_hls(*mc.to_rgb(c))
return colorsys.hls_to_rgb(c[0], 1 - amount * (1 - c[1]), c[2])
def example_noise_fit(ax, ts, label=None, verbose=False, spline=False, linear_all=False):
frq, f = signal.periodogram(ts)
frq = frq.astype(float)
if np.any(np.imag(f) != 0):
raise UserError('One of the densities is complex, check what went wrong.')
else:
r = np.array([ff.real for ff in f]) # TODO strange error np.real(f) and f.real do not behave as expected
f = r.astype(float)
mask = np.isfinite(f) & np.isfinite(frq)
frq = frq[mask]
f = f[mask]
mask = (f > 0) & (frq > 0)
frq = frq[mask]
f = f[mask]
frq = np.log10(frq)
f = np.log10(np.abs(f))
# plot points
l = ax.plot(frq, f, '.', label=label, markersize=2, alpha=0.25)
if len(frq) > 5:
if spline:
# spline interpolation
p_spline = get_natural_cubic_spline_model(frq, f, minval=min(frq), maxval=max(frq), n_knots=4)
y = p_spline.predict(frq)
deriv = (y[1:] - y[:-1]) / (frq[1:] - frq[:-1])
# plot spline interpolation
ax.plot(frq, y, color=mc.change_color(l[0].get_color(), 1),
linestyle='dotted') # , label = 'spline fit: %.2f' % min(deriv))
if linear_all:
x = np.linspace(min(frq), max(frq), 200)
slope_all, intercept, r_value, p_value, std_err = stats.linregress(frq, f)
if verbose:
print("The slope with all points included is %.3f +- %.3f" % (slope_all, std_err))
# plot linear interpolation
ax.plot(x, slope_all * x + intercept, color=mc.change_color(l[0].get_color(), 0.8), linestyle='dashed')
# only consider frequencies which correspond to periods that are smaller than (length_timeseries/10)
# otherwise effects from windowing
f = f[frq >= min(frq) + 1]
frq = frq[frq >= min(frq) + 1]
x = np.linspace(min(frq), max(frq), 200)
slope, intercept, r_value, p_value, std_err = stats.linregress(frq, f)
if verbose:
print("The slope is %.3f +- %.3f" % (slope, std_err))
# plot linear interpolation
ax.plot(x, slope * x + intercept, color=mc.change_color(l[0].get_color(), 1.2),
label='%.2f' % slope if not spline else "%.2f | %.2f | %.2f" % (slope, slope_all, min(deriv)))
# spline interpolation without low frequencies
# p_spline = get_natural_cubic_spline_model(frq, f, minval=min(frq), maxval=max(frq), n_knots=3.5)
if spline:
y = p_spline.predict(frq)
# plot new spline interpolation
# plt.plot(frq, y, color=change_color(l[0].get_color(), 1.3), label='spline 2')
else:
slope = np.nan
ax.set_xlabel('log$_{10}$(frequency)')
ax.set_ylabel('log$_{10}$(power spectral density)')
return slope
class PlotCharacteristics():
def __init__(self, ts, species=None):
self.ts = ts
self.mean = ts.mean()
self.mean.drop('time', inplace=True)
self.vmin = 0.1 * np.nanmin(self.mean.values[self.mean.values != np.inf])
self.vmax = 10 * np.nanmax(self.mean.values[self.mean.values != np.inf])
self.Nspecies = len(self.ts.columns) - 1
self.noise_color = None
if species == None:
self.selection = self.select_species()
else:
self.selection = species
def select_species(self):
sorted_species = self.mean.sort_values().index.tolist()[::-1]
return sorted_species[::max(1, int(self.Nspecies / 4))]
def plot_timeseries(self, ax, species=None, raw=False):
PlotTimeseries(self.ts, ax, species, raw)
def plot_power_spectral_density(self, ax, species=None, mean_slope=False, raw=False):
if len(self.ts) < 2:
return
if species != None:
self.selection = species
for s in self.selection:
example_noise_fit(ax, self.ts[s])
if mean_slope:
if self.noise_color == None:
self.noise_color = noise_analysis.noise_color(self.ts)
ax.legend([], [], title='mean slope = %.2f + %.2f' % (
np.mean(self.noise_color['slope_linear']), np.std(self.noise_color['slope_linear'])))
if raw:
ax.set_ylabel('')
ax.set_xlabel('')
def plot_noise_color(self, ax, raw=False):
if len(self.ts) < 2:
return
if self.noise_color == None:
self.noise_color = noise_color(self.ts)
ax.scatter(self.mean, self.noise_color['slope_linear'])
ax.errorbar(self.mean, self.noise_color['slope_linear'], self.noise_color['std_slope_linear'], linestyle='')
xx = np.linspace(2, -3, 500).reshape([500, 1])
ax.imshow(xx, cmap=noise_cmap_ww, vmin=noise_lim[0], vmax=noise_lim[1], extent=(self.vmin, self.vmax, -3, 2),
aspect='auto', alpha=0.75)
if not raw:
ax.set_ylabel('Slope power spectral density')
def plot_absolute_step(self, ax, raw=False):
if len(self.ts) < 2:
return
dx = (self.ts.values[1:, 1:].astype(float) - self.ts.values[:-1, 1:].astype(float)) # / x.values[:-1, 1:];
dx[~np.isfinite(dx)] = np.nan
if np.any(~np.isnan(dx)):
mean_dx = np.nanmean(abs(dx), axis=0)
else:
return
x = np.log10(self.mean[~np.isnan(mean_dx)])
y = np.log10(mean_dx[~np.isnan(mean_dx)])
if len(x) > 0:
p_lin = np.polyfit(x, y, deg=1, cov=False)
else:
p_lin = np.nan, np.nan
xx = [np.nanmin(self.mean.values), np.nanmax(self.mean.values)]
ax.plot(xx, 10 ** (p_lin[1] + p_lin[0] * np.log10(xx)), c='k', linewidth=0.5)
ax.text(0.95, 0.05, r'y $\propto$ x$^{%.2f}$' % p_lin[0], transform=ax.transAxes, va='bottom', ha='right')
ax.scatter(self.mean, mean_dx)
if not raw:
ax.set_ylabel(r'$\langle \vert x(t+\delta t) - x(t)\vert \rangle$')
def plot_width_distribution_ratios(self, ax, raw=False):
if len(self.ts) < 2:
return
def fit_ratio(x):
x = [x1 / x2 for x1, x2 in zip(x[:-1], x[1:]) if x1 != 0 and x2 != 0 and ~np.isnan(x1) and ~np.isnan(x2)]
if len(x) > 5:
a, b, c = stats.lognorm.fit(x, floc=0) # Gives the paramters of the fit
stat, pval = stats.kstest(x, 'lognorm', args=((a, b, c)))
return a, b, c, stat, pval
else:
return (np.nan, np.nan, np.nan, np.nan, np.nan)
dx_ratio = pd.DataFrame(index=self.ts.columns, columns=['s', 'loc', 'scale', 'ks-stat', 'ks-pval'])
dx_ratio.drop('time', inplace=True)
for idx in dx_ratio.index:
dx_ratio.loc[idx] = fit_ratio(self.ts[idx].values) # b = 0, c = 1
ax.scatter(self.mean, dx_ratio['s'], c=dx_ratio['ks-pval'], vmin=0, vmax=1, cmap='coolwarm')
def plot_rank_abundance(self, ax, selected_times=None, raw=False):
if selected_times == None:
selected_times = self.ts['time'][::max(1, int(len(self.ts['time']) / 3))]
for t in selected_times:
abundance_profile = self.ts[self.ts['time'] == t].values.flatten()[1:]
ax.plot(range(1, len(abundance_profile) + 1), np.sort(abundance_profile)[::-1], label='Day %d' % int(t))
if not raw:
ax.set_ylabel('Abundance')
def plot_neutrality_measures(self, ax_KL, ax_NCT, raw=False):
if len(self.ts) < 2:
return
KL = KullbackLeibler_neutrality(self.ts)
norm_ts = self.ts.values[:, 1:].copy().astype(float)
norm_ts /= norm_ts.sum(axis=1, keepdims=True)
NCT = neutral_covariance_test(norm_ts, ntests=500, method='Kolmogorov', seed=56)
ax_KL.matshow([[np.log10(KL)]], cmap='Blues_r', vmin=-1, vmax=3, aspect='auto', )
ax_KL.set_xticks([])
ax_KL.set_yticks([0])
ax_KL.set_yticklabels([r'$D_{KL}$'], fontsize=10)
ax_KL.text(0, 0, '{:0.2E}'.format(KL), ha='center', va='center', color='w' if KL < 10 ** (0.5) else 'k')
norm = PiecewiseNormalize([self.vmin, np.log10(0.05), self.vmax], [0, 0.5, 1])
ax_NCT.matshow([[np.log10(NCT)]], norm=norm, cmap='seismic_r', aspect='auto', vmin=-5, vmax=0)
ax_NCT.set_xticks([])
ax_NCT.set_yticks([0])
ax_NCT.set_yticklabels([r'$p_{NCT}$'], fontsize=10)
ax_NCT.text(0, 0, '{:0.2E}'.format(NCT), ha='center', va='center',
color='w' if NCT < 10 ** (-3) or NCT > 10 ** (-0.7) else 'k')
class PlotTimeseriesComparison():
def __init__(self, files, titles=[], composition=['ts', 'psd', 'nc', 'dx', 'disdx', 'ra', 'nn'], vertical=True,
fig=None):
if isinstance(files, str):
self.files = np.array([pd.read_csv(files, na_values='NAN', index_col=0)])
elif isinstance(files, pd.DataFrame):
files = np.array([files])
elif isinstance(files, list):
if all(isinstance(file, str) for file in files):
self.files = [pd.read_csv(file, na_values='NAN') for file in files]
elif all(isinstance(file, pd.DataFrame) for file in files):
self.files = files
else:
types = [type(file) for file in files]
raise ValueError(
"All files should be of type str or pd.DataFrame, these files are of type: %s" % str(types))
else:
raise ValueError("All files should be of type str or pd.DataFrame, this file is of type %s" % type(files))
for i, file in enumerate(self.files):
mask = file[[col for col in file.columns if col.startswith('species_')]].dropna(how='all',
axis='index').index
self.files[i] = file.loc[mask]
# define figure
if fig == None:
if vertical == True:
self.fig = plt.figure(figsize=(3 * len(files), 2.5 * len(composition))) # , tight_layout=True)
else:
self.fig = plt.figure(figsize=(3 * len(composition), 2.5 * len(files))) # , tight_layout=True)
elif isinstance(fig, matplotlib.axes.Axes) and len(composition) == 1 and len(files) == 1:
self.fig = None
elif fig != 0 and composition == ['nn'] and len(fig) == 2:
self.fig = None
else:
self.fig = fig
# define titles
if len(files) != len(titles):
self.titles = ['' for _ in range(len(files))]
else:
self.titles = titles
self.composition = composition
if vertical:
self.orientation = 'vertical'
else:
self.orientation = 'horizontal'
# define grid
self.set_grid_subfigures()
self.axes = {'ts': [], 'psd': [], 'nc': [], 'dx': [], 'disdx': [], 'ra': [], 'KL': [], 'NCT': []}
# define all axes
if isinstance(fig, matplotlib.axes.Axes) and len(composition) == 1 and len(files) == 1:
self.axes[composition[0]] = [fig]
elif fig != 0 and composition == ['nn'] and len(fig) == 2:
self.axes['KL'] = [fig[0]]
self.axes['NCT'] = [fig[1]]
else:
self.define_axes()
# draw all
for i, file, title in zip(range(len(files)), self.files, self.titles):
self.draw_time_series(i, file, title)
# set x- and y-labels
self.set_labels()
# set scales and grid
self.set_scales_axes()
# remove ticklabels of shared axes
if self.orientation == 'vertical' and len(self.files) > 0:
for c in composition:
if c != 'nn':
for ax in self.axes[c][1:]:
ax.tick_params(axis="both", left=True, labelleft=False)
# limit visible yrange of timeseries (do not show values that go to values close to zero/infinity)
if 'ts' in composition:
ylim1, ylim2 = self.axes['ts'][0].get_ylim()
ylim1 = max(1e-5, ylim1)
ylim2 = min(1e6, ylim2)
self.axes['ts'][0].set_ylim([ylim1, ylim2])
def set_grid_subfigures(self):
if self.orientation == 'vertical':
self.gs = gridspec.GridSpec(len(self.composition), len(self.files), top=0.9, bottom=0.2, wspace=0.1,
hspace=0.5, left=0.1, right=0.9)
else:
self.gs = gridspec.GridSpec(len(self.files), len(self.composition), top=0.9, bottom=0.2, wspace=0.5,
width_ratios=[2 if ci == 'nn' else 3 for ci in self.composition], left=0.1,
right=0.9)
def set_labels(self):
for c, xlabel, ylabel in zip(['ts', 'psd', 'nc', 'dx', 'disdx', 'ra'],
['Time', 'log$_{10}$(frequency)', 'Mean abundance', 'Mean abundance',
'Mean abundance', 'Rank'], ['Abundance', 'log$_{10}$(power spectral density)',
'Slope power \n spectral density',
r'$\langle \vert x(t+\delta t) - x(t) \vert \rangle$',
'Width distribution ratios \n of successive time points',
'Abundance']):
if c in self.composition:
self.axes[c][0].set_ylabel(ylabel)
self.axes[c][-1].set_xlabel(xlabel, x=1, ha='right')
def define_axes(self):
for i in range(len(self.files)):
for c in self.composition:
if self.orientation == 'vertical':
row = self.composition.index(c)
col = i
else:
col = self.composition.index(c)
row = i
if c == 'nn':
sub_gs = self.gs[row, col].subgridspec(4, 1, height_ratios=[2, 1, 1, 2])
self.axes['KL'] += [self.fig.add_subplot(sub_gs[1])]
self.axes['NCT'] += [self.fig.add_subplot(sub_gs[2])]
else:
self.axes[c] += [self.fig.add_subplot(self.gs[row, col], sharey=self.axes[c][0] if i > 0 else None)]
def set_scales_axes(self):
for c, xscale, yscale, grid in zip(['ts', 'psd', 'nc', 'dx', 'disdx', 'ra'],
['linear', 'linear', 'log', 'log', 'log', 'log'],
['log', 'linear', 'linear', 'log', 'log', 'log'],
[True, True, True, True, True, True]):
if c in self.composition:
for ax in self.axes[c]:
ax.set_yscale(yscale)
ax.set_xscale(xscale)
ax.grid(grid)
def draw_time_series(self, i, file, title):
if isinstance(file, str):
ts = pd.read_csv(file, na_values='NAN')
elif isinstance(file, pd.DataFrame):
ts = file.copy()
# set title
if self.composition[0] != 'nn':
self.axes[self.composition[0]][i].set_title(title)
else:
self.axes['KL'][i].set_title(title)
plotter = PlotCharacteristics(ts)
for c, func in zip(['ts', 'psd', 'nc', 'dx', 'disdx', 'ra'],
[plotter.plot_timeseries, plotter.plot_power_spectral_density, plotter.plot_noise_color,
plotter.plot_absolute_step, plotter.plot_width_distribution_ratios,
plotter.plot_rank_abundance]):
if c in self.composition:
func(self.axes[c][i], raw=True)
if 'nn' in self.composition:
plotter.plot_neutrality_measures(self.axes['KL'][i], self.axes['NCT'][i], raw=True)
def figure(self):
return self.fig
class PlotNoiseColorComparison():
def __init__(self, files, labels, selfints=1, legend_title=None, ax=0, masi=True, interaction_colors=False):
if ax == 0:
self.fig = plt.figure(figsize=(4, 3.5), tight_layout=True)
self.ax = self.fig.add_subplot(111)
else:
self.ax = ax
self.ax.set_xscale('log')
if masi == True:
self.xaxis = 'masi'
else:
self.xaxis = 'ma'
self.interaction_colors = interaction_colors
if isinstance(selfints, float) or isinstance(selfints, int):
self.selfints = [selfints] * len(files)
elif len(selfints) < len(files):
raise IndexError("The length of the self-interactions must be equal to the length of the files.")
else:
self.selfints = selfints
self.legend_title = legend_title
for file, label, selfint in zip(files, labels, self.selfints):
self.plot_file(file, label, selfint)
self.label_axes()
# legend entries in opposite order:
self.invert_legend_entries()
self.plot_background_colors()
def plot_file(self, file, label, selfint):
if isinstance(file, str):
df = pd.read_csv(file, index_col=0, na_values='NAN')
elif isinstance(file, pd.DataFrame):
df = file.copy()
df.dropna(how='all', axis='index', inplace=True)
if 'steady state' in df.columns: # files created without interactions
ss = df['steady state']
df = df[[col for col in df if col.endswith('slope')]]
if self.xaxis == 'masi':
x = ss * selfint
elif self.xaxis == 'ma':
x = ss
self.ax.errorbar(x, np.mean(df.T), np.std(df.T), linestyle='', marker='.', label=label)
else: # files created with interactions have different structure
means = df.loc['means']
stds = df.loc['stds']
if "KL" in df.index:
KL = df.loc['KL']
mean_color = df.loc['mean_color']
std_color = df.loc['std_color']
if self.interaction_colors:
c = self.interaction_mapper().to_rgba(float(label))
self.ax.errorbar(means, mean_color, std_color, label=label, linestyle='', marker='.', c=c)
else:
self.ax.errorbar(means, mean_color, std_color, label=label, linestyle='', marker='.')
def interaction_mapper(self):
norm = matplotlib.colors.Normalize(vmin=0, vmax=0.21, clip=True)
return cm.ScalarMappable(norm=norm, cmap='summer')
def label_axes(self):
if self.xaxis == 'masi':
self.ax.set_xlabel(r'Mean abundance $\times$ self-interaction', ha='right', x=1)
else:
self.ax.set_xlabel(r'Mean abundance', ha='right', x=1)
self.ax.set_ylabel('Slope power spectral density')
def invert_legend_entries(self):
handles, labels = self.ax.get_legend_handles_labels()
self.ax.legend(handles[::-1], labels[::-1], title=self.legend_title, loc=2)
def change_number_columns_legend(self, ncol):
handles, labels = self.ax.get_legend_handles_labels()
self.ax.legend(handles, labels, title=self.legend_title, loc=2, ncol=ncol)
# TODO make dependent on ranges
def plot_background_colors(self):
x = np.linspace(0.9, -3, 500).reshape([500, 1])
if self.ax.get_xscale() == 'log':
self.background = self.ax.imshow(x, cmap=noise_cmap_ww, vmin=noise_lim[0], vmax=noise_lim[1],
extent=(1e-3, 200, -3, 0.9), aspect='auto', alpha=0.75)
else:
self.background = self.ax.imshow(x, cmap=noise_cmap_ww, vmin=noise_lim[0], vmax=noise_lim[1],
extent=(-5, 105, -3, 0.9), aspect='auto', alpha=0.75)
def figure(self):
return self.fig
def set_limits(self, limits):
left, right, bottom, top = limits
left_orig, right_orig = self.ax.get_xlim()
bottom_orig, top_orig = self.ax.get_ylim()
if left < left_orig or right > right_orig or top > top_orig or bottom < bottom_orig:
self.background.remove()
x = np.linspace(0.9, -3, 500).reshape([500, 1])
if self.ax.get_xscale() == 'log':
self.background = self.ax.imshow(x, cmap=noise_cmap_ww, vmin=noise_lim[0], vmax=noise_lim[1],
extent=(left, right, bottom, top), aspect='auto', alpha=0.75)
else:
self.background = self.ax.imshow(x, cmap=noise_cmap_ww, vmin=noise_lim[0], vmax=noise_lim[1],
extent=(left, right, bottom, top), aspect='auto', alpha=0.75)
self.ax.set_xlim([left, right])
self.ax.set_ylim([bottom, top])
def main():
print('test plotting')
ts = sglv_timeseries.glv.Timeseries.main().timeseries
ts2 = sglv_timeseries.glv.Timeseries.main().timeseries
fig = PlotTimeseriesComparison([ts, ts2])
plt.show()
if __name__ == "__main__":
main()
|
{"hexsha": "002bff78293efc3fddfb9fc1260ed2cdb5736a9d", "size": 23482, "ext": "py", "lang": "Python", "max_stars_repo_path": "noise_properties_plotting.py", "max_stars_repo_name": "lanadescheemaeker/logistic_models", "max_stars_repo_head_hexsha": "9e10e6e631c91adc8e85e8a4130caf9eca835d85", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-04-16T12:33:44.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-16T12:33:44.000Z", "max_issues_repo_path": "noise_properties_plotting.py", "max_issues_repo_name": "BCGardner/logistic_models", "max_issues_repo_head_hexsha": "9e10e6e631c91adc8e85e8a4130caf9eca835d85", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "noise_properties_plotting.py", "max_forks_repo_name": "BCGardner/logistic_models", "max_forks_repo_head_hexsha": "9e10e6e631c91adc8e85e8a4130caf9eca835d85", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2020-03-11T12:05:06.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-16T12:34:36.000Z", "avg_line_length": 38.5582922824, "max_line_length": 135, "alphanum_fraction": 0.5559151691, "include": true, "reason": "from scipy", "num_tokens": 6036}
|
[STATEMENT]
lemma kruskal_exchange_acyclic_inv_2:
assumes "acyclic w"
and "injective w"
and "d \<le> w"
and "bijective (d\<^sup>T * top)"
and "bijective (e * top)"
and "d \<le> top * e\<^sup>T * w\<^sup>T\<^sup>\<star>"
and "w * e\<^sup>T * top = bot"
shows "acyclic ((w \<sqinter> -d) \<squnion> e)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. pd_kleene_allegory_class.acyclic (w \<sqinter> - d \<squnion> e)
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. pd_kleene_allegory_class.acyclic (w \<sqinter> - d \<squnion> e)
[PROOF STEP]
let ?v = "w \<sqinter> -d"
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. pd_kleene_allegory_class.acyclic (w \<sqinter> - d \<squnion> e)
[PROOF STEP]
let ?w = "?v \<squnion> e"
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. pd_kleene_allegory_class.acyclic (w \<sqinter> - d \<squnion> e)
[PROOF STEP]
have "d\<^sup>T * top \<le> w\<^sup>\<star> * e * top"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. d\<^sup>T * top \<le> w\<^sup>\<star> * e * top
[PROOF STEP]
by (metis assms(6) comp_associative comp_inf.star.circ_decompose_9 comp_inf.star_star_absorb comp_isotone conv_dist_comp conv_involutive conv_order conv_star_commute conv_top inf.cobounded1 vector_top_closed)
[PROOF STATE]
proof (state)
this:
d\<^sup>T * top \<le> w\<^sup>\<star> * e * top
goal (1 subgoal):
1. pd_kleene_allegory_class.acyclic (w \<sqinter> - d \<squnion> e)
[PROOF STEP]
hence 1: "e * top \<le> w\<^sup>T\<^sup>\<star> * d\<^sup>T * top"
[PROOF STATE]
proof (prove)
using this:
d\<^sup>T * top \<le> w\<^sup>\<star> * e * top
goal (1 subgoal):
1. e * top \<le> w\<^sup>T\<^sup>\<star> * d\<^sup>T * top
[PROOF STEP]
by (metis assms(4,5) bijective_reverse comp_associative conv_star_commute)
[PROOF STATE]
proof (state)
this:
e * top \<le> w\<^sup>T\<^sup>\<star> * d\<^sup>T * top
goal (1 subgoal):
1. pd_kleene_allegory_class.acyclic (w \<sqinter> - d \<squnion> e)
[PROOF STEP]
have 2: "?v * d\<^sup>T * top = bot"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (w \<sqinter> - d) * d\<^sup>T * top = bot
[PROOF STEP]
by (simp add: assms(2,3) kruskal_exchange_acyclic_inv_3)
[PROOF STATE]
proof (state)
this:
(w \<sqinter> - d) * d\<^sup>T * top = bot
goal (1 subgoal):
1. pd_kleene_allegory_class.acyclic (w \<sqinter> - d \<squnion> e)
[PROOF STEP]
have "?v * w\<^sup>T\<^sup>+ * d\<^sup>T * top \<le> w * w\<^sup>T\<^sup>+ * d\<^sup>T * top"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (w \<sqinter> - d) * w\<^sup>T\<^sup>+ * d\<^sup>T * top \<le> w * w\<^sup>T\<^sup>+ * d\<^sup>T * top
[PROOF STEP]
by (simp add: mult_left_isotone)
[PROOF STATE]
proof (state)
this:
(w \<sqinter> - d) * w\<^sup>T\<^sup>+ * d\<^sup>T * top \<le> w * w\<^sup>T\<^sup>+ * d\<^sup>T * top
goal (1 subgoal):
1. pd_kleene_allegory_class.acyclic (w \<sqinter> - d \<squnion> e)
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
(w \<sqinter> - d) * w\<^sup>T\<^sup>+ * d\<^sup>T * top \<le> w * w\<^sup>T\<^sup>+ * d\<^sup>T * top
goal (1 subgoal):
1. pd_kleene_allegory_class.acyclic (w \<sqinter> - d \<squnion> e)
[PROOF STEP]
have "... \<le> w\<^sup>T\<^sup>\<star> * d\<^sup>T * top"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. w * w\<^sup>T\<^sup>+ * d\<^sup>T * top \<le> w\<^sup>T\<^sup>\<star> * d\<^sup>T * top
[PROOF STEP]
by (metis assms(2) mult_left_isotone mult_1_left mult_assoc)
[PROOF STATE]
proof (state)
this:
w * w\<^sup>T\<^sup>+ * d\<^sup>T * top \<le> w\<^sup>T\<^sup>\<star> * d\<^sup>T * top
goal (1 subgoal):
1. pd_kleene_allegory_class.acyclic (w \<sqinter> - d \<squnion> e)
[PROOF STEP]
finally
[PROOF STATE]
proof (chain)
picking this:
(w \<sqinter> - d) * w\<^sup>T\<^sup>+ * d\<^sup>T * top \<le> w\<^sup>T\<^sup>\<star> * d\<^sup>T * top
[PROOF STEP]
have "?v * w\<^sup>T\<^sup>\<star> * d\<^sup>T * top \<le> w\<^sup>T\<^sup>\<star> * d\<^sup>T * top"
[PROOF STATE]
proof (prove)
using this:
(w \<sqinter> - d) * w\<^sup>T\<^sup>+ * d\<^sup>T * top \<le> w\<^sup>T\<^sup>\<star> * d\<^sup>T * top
goal (1 subgoal):
1. (w \<sqinter> - d) * w\<^sup>T\<^sup>\<star> * d\<^sup>T * top \<le> w\<^sup>T\<^sup>\<star> * d\<^sup>T * top
[PROOF STEP]
using 2
[PROOF STATE]
proof (prove)
using this:
(w \<sqinter> - d) * w\<^sup>T\<^sup>+ * d\<^sup>T * top \<le> w\<^sup>T\<^sup>\<star> * d\<^sup>T * top
(w \<sqinter> - d) * d\<^sup>T * top = bot
goal (1 subgoal):
1. (w \<sqinter> - d) * w\<^sup>T\<^sup>\<star> * d\<^sup>T * top \<le> w\<^sup>T\<^sup>\<star> * d\<^sup>T * top
[PROOF STEP]
by (metis bot_least comp_associative mult_right_dist_sup star.circ_back_loop_fixpoint star.circ_plus_same sup_least)
[PROOF STATE]
proof (state)
this:
(w \<sqinter> - d) * w\<^sup>T\<^sup>\<star> * d\<^sup>T * top \<le> w\<^sup>T\<^sup>\<star> * d\<^sup>T * top
goal (1 subgoal):
1. pd_kleene_allegory_class.acyclic (w \<sqinter> - d \<squnion> e)
[PROOF STEP]
hence 3: "?v\<^sup>\<star> * e * top \<le> w\<^sup>T\<^sup>\<star> * d\<^sup>T * top"
[PROOF STATE]
proof (prove)
using this:
(w \<sqinter> - d) * w\<^sup>T\<^sup>\<star> * d\<^sup>T * top \<le> w\<^sup>T\<^sup>\<star> * d\<^sup>T * top
goal (1 subgoal):
1. (w \<sqinter> - d)\<^sup>\<star> * e * top \<le> w\<^sup>T\<^sup>\<star> * d\<^sup>T * top
[PROOF STEP]
using 1
[PROOF STATE]
proof (prove)
using this:
(w \<sqinter> - d) * w\<^sup>T\<^sup>\<star> * d\<^sup>T * top \<le> w\<^sup>T\<^sup>\<star> * d\<^sup>T * top
e * top \<le> w\<^sup>T\<^sup>\<star> * d\<^sup>T * top
goal (1 subgoal):
1. (w \<sqinter> - d)\<^sup>\<star> * e * top \<le> w\<^sup>T\<^sup>\<star> * d\<^sup>T * top
[PROOF STEP]
by (simp add: comp_associative star_left_induct sup_least)
[PROOF STATE]
proof (state)
this:
(w \<sqinter> - d)\<^sup>\<star> * e * top \<le> w\<^sup>T\<^sup>\<star> * d\<^sup>T * top
goal (1 subgoal):
1. pd_kleene_allegory_class.acyclic (w \<sqinter> - d \<squnion> e)
[PROOF STEP]
have "d * e\<^sup>T \<le> bot"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. d * e\<^sup>T \<le> bot
[PROOF STEP]
by (metis assms(3,7) conv_bot conv_dist_comp conv_involutive conv_top order.trans inf.absorb2 inf.cobounded2 inf_commute le_bot p_antitone_iff p_top schroeder_4_p top_left_mult_increasing)
[PROOF STATE]
proof (state)
this:
d * e\<^sup>T \<le> bot
goal (1 subgoal):
1. pd_kleene_allegory_class.acyclic (w \<sqinter> - d \<squnion> e)
[PROOF STEP]
hence 4: "e\<^sup>T * top \<le> -(d\<^sup>T * top)"
[PROOF STATE]
proof (prove)
using this:
d * e\<^sup>T \<le> bot
goal (1 subgoal):
1. e\<^sup>T * top \<le> - (d\<^sup>T * top)
[PROOF STEP]
by (metis (no_types) comp_associative inf.cobounded2 le_bot p_antitone_iff schroeder_3_p semiring.mult_zero_left)
[PROOF STATE]
proof (state)
this:
e\<^sup>T * top \<le> - (d\<^sup>T * top)
goal (1 subgoal):
1. pd_kleene_allegory_class.acyclic (w \<sqinter> - d \<squnion> e)
[PROOF STEP]
have "?v\<^sup>T * -(d\<^sup>T * top) \<le> -(d\<^sup>T * top)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (w \<sqinter> - d)\<^sup>T * - (d\<^sup>T * top) \<le> - (d\<^sup>T * top)
[PROOF STEP]
using schroeder_3_p mult_assoc 2
[PROOF STATE]
proof (prove)
using this:
(?x * ?y \<le> - ?z) = (?x\<^sup>T * ?z \<le> - ?y)
?a * ?b * ?c = ?a * (?b * ?c)
(w \<sqinter> - d) * d\<^sup>T * top = bot
goal (1 subgoal):
1. (w \<sqinter> - d)\<^sup>T * - (d\<^sup>T * top) \<le> - (d\<^sup>T * top)
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
(w \<sqinter> - d)\<^sup>T * - (d\<^sup>T * top) \<le> - (d\<^sup>T * top)
goal (1 subgoal):
1. pd_kleene_allegory_class.acyclic (w \<sqinter> - d \<squnion> e)
[PROOF STEP]
hence "?v\<^sup>T\<^sup>\<star> * e\<^sup>T * top \<le> -(d\<^sup>T * top)"
[PROOF STATE]
proof (prove)
using this:
(w \<sqinter> - d)\<^sup>T * - (d\<^sup>T * top) \<le> - (d\<^sup>T * top)
goal (1 subgoal):
1. (w \<sqinter> - d)\<^sup>T\<^sup>\<star> * e\<^sup>T * top \<le> - (d\<^sup>T * top)
[PROOF STEP]
using 4
[PROOF STATE]
proof (prove)
using this:
(w \<sqinter> - d)\<^sup>T * - (d\<^sup>T * top) \<le> - (d\<^sup>T * top)
e\<^sup>T * top \<le> - (d\<^sup>T * top)
goal (1 subgoal):
1. (w \<sqinter> - d)\<^sup>T\<^sup>\<star> * e\<^sup>T * top \<le> - (d\<^sup>T * top)
[PROOF STEP]
by (simp add: comp_associative star_left_induct sup_least)
[PROOF STATE]
proof (state)
this:
(w \<sqinter> - d)\<^sup>T\<^sup>\<star> * e\<^sup>T * top \<le> - (d\<^sup>T * top)
goal (1 subgoal):
1. pd_kleene_allegory_class.acyclic (w \<sqinter> - d \<squnion> e)
[PROOF STEP]
hence 5: "d\<^sup>T * top \<le> -(?v\<^sup>T\<^sup>\<star> * e\<^sup>T * top)"
[PROOF STATE]
proof (prove)
using this:
(w \<sqinter> - d)\<^sup>T\<^sup>\<star> * e\<^sup>T * top \<le> - (d\<^sup>T * top)
goal (1 subgoal):
1. d\<^sup>T * top \<le> - ((w \<sqinter> - d)\<^sup>T\<^sup>\<star> * e\<^sup>T * top)
[PROOF STEP]
by (simp add: p_antitone_iff)
[PROOF STATE]
proof (state)
this:
d\<^sup>T * top \<le> - ((w \<sqinter> - d)\<^sup>T\<^sup>\<star> * e\<^sup>T * top)
goal (1 subgoal):
1. pd_kleene_allegory_class.acyclic (w \<sqinter> - d \<squnion> e)
[PROOF STEP]
have "w * ?v\<^sup>T\<^sup>\<star> * e\<^sup>T * top = w * e\<^sup>T * top \<squnion> w * ?v\<^sup>T\<^sup>+ * e\<^sup>T * top"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. w * (w \<sqinter> - d)\<^sup>T\<^sup>\<star> * e\<^sup>T * top = w * e\<^sup>T * top \<squnion> w * (w \<sqinter> - d)\<^sup>T\<^sup>+ * e\<^sup>T * top
[PROOF STEP]
by (metis star_left_unfold_equal mult_right_dist_sup mult_left_dist_sup mult_1_right mult_assoc)
[PROOF STATE]
proof (state)
this:
w * (w \<sqinter> - d)\<^sup>T\<^sup>\<star> * e\<^sup>T * top = w * e\<^sup>T * top \<squnion> w * (w \<sqinter> - d)\<^sup>T\<^sup>+ * e\<^sup>T * top
goal (1 subgoal):
1. pd_kleene_allegory_class.acyclic (w \<sqinter> - d \<squnion> e)
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
w * (w \<sqinter> - d)\<^sup>T\<^sup>\<star> * e\<^sup>T * top = w * e\<^sup>T * top \<squnion> w * (w \<sqinter> - d)\<^sup>T\<^sup>+ * e\<^sup>T * top
goal (1 subgoal):
1. pd_kleene_allegory_class.acyclic (w \<sqinter> - d \<squnion> e)
[PROOF STEP]
have "... = w * ?v\<^sup>T\<^sup>+ * e\<^sup>T * top"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. w * e\<^sup>T * top \<squnion> w * (w \<sqinter> - d)\<^sup>T\<^sup>+ * e\<^sup>T * top = w * (w \<sqinter> - d)\<^sup>T\<^sup>+ * e\<^sup>T * top
[PROOF STEP]
using assms(7)
[PROOF STATE]
proof (prove)
using this:
w * e\<^sup>T * top = bot
goal (1 subgoal):
1. w * e\<^sup>T * top \<squnion> w * (w \<sqinter> - d)\<^sup>T\<^sup>+ * e\<^sup>T * top = w * (w \<sqinter> - d)\<^sup>T\<^sup>+ * e\<^sup>T * top
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
w * e\<^sup>T * top \<squnion> w * (w \<sqinter> - d)\<^sup>T\<^sup>+ * e\<^sup>T * top = w * (w \<sqinter> - d)\<^sup>T\<^sup>+ * e\<^sup>T * top
goal (1 subgoal):
1. pd_kleene_allegory_class.acyclic (w \<sqinter> - d \<squnion> e)
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
w * e\<^sup>T * top \<squnion> w * (w \<sqinter> - d)\<^sup>T\<^sup>+ * e\<^sup>T * top = w * (w \<sqinter> - d)\<^sup>T\<^sup>+ * e\<^sup>T * top
goal (1 subgoal):
1. pd_kleene_allegory_class.acyclic (w \<sqinter> - d \<squnion> e)
[PROOF STEP]
have "... \<le> w * w\<^sup>T * ?v\<^sup>T\<^sup>\<star> * e\<^sup>T * top"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. w * (w \<sqinter> - d)\<^sup>T\<^sup>+ * e\<^sup>T * top \<le> w * w\<^sup>T * (w \<sqinter> - d)\<^sup>T\<^sup>\<star> * e\<^sup>T * top
[PROOF STEP]
by (simp add: comp_associative conv_isotone mult_left_isotone mult_right_isotone)
[PROOF STATE]
proof (state)
this:
w * (w \<sqinter> - d)\<^sup>T\<^sup>+ * e\<^sup>T * top \<le> w * w\<^sup>T * (w \<sqinter> - d)\<^sup>T\<^sup>\<star> * e\<^sup>T * top
goal (1 subgoal):
1. pd_kleene_allegory_class.acyclic (w \<sqinter> - d \<squnion> e)
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
w * (w \<sqinter> - d)\<^sup>T\<^sup>+ * e\<^sup>T * top \<le> w * w\<^sup>T * (w \<sqinter> - d)\<^sup>T\<^sup>\<star> * e\<^sup>T * top
goal (1 subgoal):
1. pd_kleene_allegory_class.acyclic (w \<sqinter> - d \<squnion> e)
[PROOF STEP]
have "... \<le> ?v\<^sup>T\<^sup>\<star> * e\<^sup>T * top"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. w * w\<^sup>T * (w \<sqinter> - d)\<^sup>T\<^sup>\<star> * e\<^sup>T * top \<le> (w \<sqinter> - d)\<^sup>T\<^sup>\<star> * e\<^sup>T * top
[PROOF STEP]
by (metis assms(2) mult_1_left mult_left_isotone)
[PROOF STATE]
proof (state)
this:
w * w\<^sup>T * (w \<sqinter> - d)\<^sup>T\<^sup>\<star> * e\<^sup>T * top \<le> (w \<sqinter> - d)\<^sup>T\<^sup>\<star> * e\<^sup>T * top
goal (1 subgoal):
1. pd_kleene_allegory_class.acyclic (w \<sqinter> - d \<squnion> e)
[PROOF STEP]
finally
[PROOF STATE]
proof (chain)
picking this:
w * (w \<sqinter> - d)\<^sup>T\<^sup>\<star> * e\<^sup>T * top \<le> (w \<sqinter> - d)\<^sup>T\<^sup>\<star> * e\<^sup>T * top
[PROOF STEP]
have "w * ?v\<^sup>T\<^sup>\<star> * e\<^sup>T * top \<le> --(?v\<^sup>T\<^sup>\<star> * e\<^sup>T * top)"
[PROOF STATE]
proof (prove)
using this:
w * (w \<sqinter> - d)\<^sup>T\<^sup>\<star> * e\<^sup>T * top \<le> (w \<sqinter> - d)\<^sup>T\<^sup>\<star> * e\<^sup>T * top
goal (1 subgoal):
1. w * (w \<sqinter> - d)\<^sup>T\<^sup>\<star> * e\<^sup>T * top \<le> - - ((w \<sqinter> - d)\<^sup>T\<^sup>\<star> * e\<^sup>T * top)
[PROOF STEP]
by (simp add: p_antitone p_antitone_iff)
[PROOF STATE]
proof (state)
this:
w * (w \<sqinter> - d)\<^sup>T\<^sup>\<star> * e\<^sup>T * top \<le> - - ((w \<sqinter> - d)\<^sup>T\<^sup>\<star> * e\<^sup>T * top)
goal (1 subgoal):
1. pd_kleene_allegory_class.acyclic (w \<sqinter> - d \<squnion> e)
[PROOF STEP]
hence "w\<^sup>T * -(?v\<^sup>T\<^sup>\<star> * e\<^sup>T * top) \<le> -(?v\<^sup>T\<^sup>\<star> * e\<^sup>T * top)"
[PROOF STATE]
proof (prove)
using this:
w * (w \<sqinter> - d)\<^sup>T\<^sup>\<star> * e\<^sup>T * top \<le> - - ((w \<sqinter> - d)\<^sup>T\<^sup>\<star> * e\<^sup>T * top)
goal (1 subgoal):
1. w\<^sup>T * - ((w \<sqinter> - d)\<^sup>T\<^sup>\<star> * e\<^sup>T * top) \<le> - ((w \<sqinter> - d)\<^sup>T\<^sup>\<star> * e\<^sup>T * top)
[PROOF STEP]
using comp_associative schroeder_3_p
[PROOF STATE]
proof (prove)
using this:
w * (w \<sqinter> - d)\<^sup>T\<^sup>\<star> * e\<^sup>T * top \<le> - - ((w \<sqinter> - d)\<^sup>T\<^sup>\<star> * e\<^sup>T * top)
?x * ?y * ?z = ?x * (?y * ?z)
(?x * ?y \<le> - ?z) = (?x\<^sup>T * ?z \<le> - ?y)
goal (1 subgoal):
1. w\<^sup>T * - ((w \<sqinter> - d)\<^sup>T\<^sup>\<star> * e\<^sup>T * top) \<le> - ((w \<sqinter> - d)\<^sup>T\<^sup>\<star> * e\<^sup>T * top)
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
w\<^sup>T * - ((w \<sqinter> - d)\<^sup>T\<^sup>\<star> * e\<^sup>T * top) \<le> - ((w \<sqinter> - d)\<^sup>T\<^sup>\<star> * e\<^sup>T * top)
goal (1 subgoal):
1. pd_kleene_allegory_class.acyclic (w \<sqinter> - d \<squnion> e)
[PROOF STEP]
hence 6: "w\<^sup>T\<^sup>\<star> * d\<^sup>T * top \<le> -(?v\<^sup>T\<^sup>\<star> * e\<^sup>T * top)"
[PROOF STATE]
proof (prove)
using this:
w\<^sup>T * - ((w \<sqinter> - d)\<^sup>T\<^sup>\<star> * e\<^sup>T * top) \<le> - ((w \<sqinter> - d)\<^sup>T\<^sup>\<star> * e\<^sup>T * top)
goal (1 subgoal):
1. w\<^sup>T\<^sup>\<star> * d\<^sup>T * top \<le> - ((w \<sqinter> - d)\<^sup>T\<^sup>\<star> * e\<^sup>T * top)
[PROOF STEP]
using 5
[PROOF STATE]
proof (prove)
using this:
w\<^sup>T * - ((w \<sqinter> - d)\<^sup>T\<^sup>\<star> * e\<^sup>T * top) \<le> - ((w \<sqinter> - d)\<^sup>T\<^sup>\<star> * e\<^sup>T * top)
d\<^sup>T * top \<le> - ((w \<sqinter> - d)\<^sup>T\<^sup>\<star> * e\<^sup>T * top)
goal (1 subgoal):
1. w\<^sup>T\<^sup>\<star> * d\<^sup>T * top \<le> - ((w \<sqinter> - d)\<^sup>T\<^sup>\<star> * e\<^sup>T * top)
[PROOF STEP]
by (simp add: comp_associative star_left_induct sup_least)
[PROOF STATE]
proof (state)
this:
w\<^sup>T\<^sup>\<star> * d\<^sup>T * top \<le> - ((w \<sqinter> - d)\<^sup>T\<^sup>\<star> * e\<^sup>T * top)
goal (1 subgoal):
1. pd_kleene_allegory_class.acyclic (w \<sqinter> - d \<squnion> e)
[PROOF STEP]
have "e * ?v\<^sup>\<star> * e \<le> e * ?v\<^sup>\<star> * e * top"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. e * (w \<sqinter> - d)\<^sup>\<star> * e \<le> e * (w \<sqinter> - d)\<^sup>\<star> * e * top
[PROOF STEP]
by (simp add: top_right_mult_increasing)
[PROOF STATE]
proof (state)
this:
e * (w \<sqinter> - d)\<^sup>\<star> * e \<le> e * (w \<sqinter> - d)\<^sup>\<star> * e * top
goal (1 subgoal):
1. pd_kleene_allegory_class.acyclic (w \<sqinter> - d \<squnion> e)
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
e * (w \<sqinter> - d)\<^sup>\<star> * e \<le> e * (w \<sqinter> - d)\<^sup>\<star> * e * top
goal (1 subgoal):
1. pd_kleene_allegory_class.acyclic (w \<sqinter> - d \<squnion> e)
[PROOF STEP]
have "... \<le> e * w\<^sup>T\<^sup>\<star> * d\<^sup>T * top"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. e * (w \<sqinter> - d)\<^sup>\<star> * e * top \<le> e * w\<^sup>T\<^sup>\<star> * d\<^sup>T * top
[PROOF STEP]
using 3
[PROOF STATE]
proof (prove)
using this:
(w \<sqinter> - d)\<^sup>\<star> * e * top \<le> w\<^sup>T\<^sup>\<star> * d\<^sup>T * top
goal (1 subgoal):
1. e * (w \<sqinter> - d)\<^sup>\<star> * e * top \<le> e * w\<^sup>T\<^sup>\<star> * d\<^sup>T * top
[PROOF STEP]
by (simp add: comp_associative mult_right_isotone)
[PROOF STATE]
proof (state)
this:
e * (w \<sqinter> - d)\<^sup>\<star> * e * top \<le> e * w\<^sup>T\<^sup>\<star> * d\<^sup>T * top
goal (1 subgoal):
1. pd_kleene_allegory_class.acyclic (w \<sqinter> - d \<squnion> e)
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
e * (w \<sqinter> - d)\<^sup>\<star> * e * top \<le> e * w\<^sup>T\<^sup>\<star> * d\<^sup>T * top
goal (1 subgoal):
1. pd_kleene_allegory_class.acyclic (w \<sqinter> - d \<squnion> e)
[PROOF STEP]
have "... \<le> e * -(?v\<^sup>T\<^sup>\<star> * e\<^sup>T * top)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. e * w\<^sup>T\<^sup>\<star> * d\<^sup>T * top \<le> e * - ((w \<sqinter> - d)\<^sup>T\<^sup>\<star> * e\<^sup>T * top)
[PROOF STEP]
using 6
[PROOF STATE]
proof (prove)
using this:
w\<^sup>T\<^sup>\<star> * d\<^sup>T * top \<le> - ((w \<sqinter> - d)\<^sup>T\<^sup>\<star> * e\<^sup>T * top)
goal (1 subgoal):
1. e * w\<^sup>T\<^sup>\<star> * d\<^sup>T * top \<le> e * - ((w \<sqinter> - d)\<^sup>T\<^sup>\<star> * e\<^sup>T * top)
[PROOF STEP]
by (simp add: comp_associative mult_right_isotone)
[PROOF STATE]
proof (state)
this:
e * w\<^sup>T\<^sup>\<star> * d\<^sup>T * top \<le> e * - ((w \<sqinter> - d)\<^sup>T\<^sup>\<star> * e\<^sup>T * top)
goal (1 subgoal):
1. pd_kleene_allegory_class.acyclic (w \<sqinter> - d \<squnion> e)
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
e * w\<^sup>T\<^sup>\<star> * d\<^sup>T * top \<le> e * - ((w \<sqinter> - d)\<^sup>T\<^sup>\<star> * e\<^sup>T * top)
goal (1 subgoal):
1. pd_kleene_allegory_class.acyclic (w \<sqinter> - d \<squnion> e)
[PROOF STEP]
have "... \<le> bot"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. e * - ((w \<sqinter> - d)\<^sup>T\<^sup>\<star> * e\<^sup>T * top) \<le> bot
[PROOF STEP]
by (metis conv_complement_sub_leq conv_dist_comp conv_involutive conv_star_commute le_bot mult_right_sub_dist_sup_right p_bot regular_closed_bot star.circ_back_loop_fixpoint)
[PROOF STATE]
proof (state)
this:
e * - ((w \<sqinter> - d)\<^sup>T\<^sup>\<star> * e\<^sup>T * top) \<le> bot
goal (1 subgoal):
1. pd_kleene_allegory_class.acyclic (w \<sqinter> - d \<squnion> e)
[PROOF STEP]
finally
[PROOF STATE]
proof (chain)
picking this:
e * (w \<sqinter> - d)\<^sup>\<star> * e \<le> bot
[PROOF STEP]
have 7: "e * ?v\<^sup>\<star> * e = bot"
[PROOF STATE]
proof (prove)
using this:
e * (w \<sqinter> - d)\<^sup>\<star> * e \<le> bot
goal (1 subgoal):
1. e * (w \<sqinter> - d)\<^sup>\<star> * e = bot
[PROOF STEP]
by (simp add: order.antisym)
[PROOF STATE]
proof (state)
this:
e * (w \<sqinter> - d)\<^sup>\<star> * e = bot
goal (1 subgoal):
1. pd_kleene_allegory_class.acyclic (w \<sqinter> - d \<squnion> e)
[PROOF STEP]
hence "?v\<^sup>\<star> * e \<le> -1"
[PROOF STATE]
proof (prove)
using this:
e * (w \<sqinter> - d)\<^sup>\<star> * e = bot
goal (1 subgoal):
1. irreflexive ((w \<sqinter> - d)\<^sup>\<star> * e)
[PROOF STEP]
by (metis bot_least comp_associative comp_commute_below_diversity ex231d order_lesseq_imp semiring.mult_zero_left star.circ_left_top)
[PROOF STATE]
proof (state)
this:
irreflexive ((w \<sqinter> - d)\<^sup>\<star> * e)
goal (1 subgoal):
1. pd_kleene_allegory_class.acyclic (w \<sqinter> - d \<squnion> e)
[PROOF STEP]
hence 8: "?v\<^sup>\<star> * e * ?v\<^sup>\<star> \<le> -1"
[PROOF STATE]
proof (prove)
using this:
irreflexive ((w \<sqinter> - d)\<^sup>\<star> * e)
goal (1 subgoal):
1. irreflexive ((w \<sqinter> - d)\<^sup>\<star> * e * (w \<sqinter> - d)\<^sup>\<star>)
[PROOF STEP]
by (metis comp_associative comp_commute_below_diversity star.circ_transitive_equal)
[PROOF STATE]
proof (state)
this:
irreflexive ((w \<sqinter> - d)\<^sup>\<star> * e * (w \<sqinter> - d)\<^sup>\<star>)
goal (1 subgoal):
1. pd_kleene_allegory_class.acyclic (w \<sqinter> - d \<squnion> e)
[PROOF STEP]
have "1 \<sqinter> ?w\<^sup>+ = 1 \<sqinter> ?w * ?v\<^sup>\<star> * (e * ?v\<^sup>\<star>)\<^sup>\<star>"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (1::'a) \<sqinter> (w \<sqinter> - d \<squnion> e)\<^sup>+ = (1::'a) \<sqinter> (w \<sqinter> - d \<squnion> e) * (w \<sqinter> - d)\<^sup>\<star> * (e * (w \<sqinter> - d)\<^sup>\<star>)\<^sup>\<star>
[PROOF STEP]
by (simp add: star_sup_1 mult_assoc)
[PROOF STATE]
proof (state)
this:
(1::'a) \<sqinter> (w \<sqinter> - d \<squnion> e)\<^sup>+ = (1::'a) \<sqinter> (w \<sqinter> - d \<squnion> e) * (w \<sqinter> - d)\<^sup>\<star> * (e * (w \<sqinter> - d)\<^sup>\<star>)\<^sup>\<star>
goal (1 subgoal):
1. pd_kleene_allegory_class.acyclic (w \<sqinter> - d \<squnion> e)
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
(1::'a) \<sqinter> (w \<sqinter> - d \<squnion> e)\<^sup>+ = (1::'a) \<sqinter> (w \<sqinter> - d \<squnion> e) * (w \<sqinter> - d)\<^sup>\<star> * (e * (w \<sqinter> - d)\<^sup>\<star>)\<^sup>\<star>
goal (1 subgoal):
1. pd_kleene_allegory_class.acyclic (w \<sqinter> - d \<squnion> e)
[PROOF STEP]
have "... = 1 \<sqinter> ?w * ?v\<^sup>\<star> * (e * ?v\<^sup>\<star> \<squnion> 1)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (1::'a) \<sqinter> (w \<sqinter> - d \<squnion> e) * (w \<sqinter> - d)\<^sup>\<star> * (e * (w \<sqinter> - d)\<^sup>\<star>)\<^sup>\<star> = (1::'a) \<sqinter> (w \<sqinter> - d \<squnion> e) * (w \<sqinter> - d)\<^sup>\<star> * (e * (w \<sqinter> - d)\<^sup>\<star> \<squnion> (1::'a))
[PROOF STEP]
using 7
[PROOF STATE]
proof (prove)
using this:
e * (w \<sqinter> - d)\<^sup>\<star> * e = bot
goal (1 subgoal):
1. (1::'a) \<sqinter> (w \<sqinter> - d \<squnion> e) * (w \<sqinter> - d)\<^sup>\<star> * (e * (w \<sqinter> - d)\<^sup>\<star>)\<^sup>\<star> = (1::'a) \<sqinter> (w \<sqinter> - d \<squnion> e) * (w \<sqinter> - d)\<^sup>\<star> * (e * (w \<sqinter> - d)\<^sup>\<star> \<squnion> (1::'a))
[PROOF STEP]
by (metis star.circ_mult_1 star_absorb sup_monoid.add_commute mult_assoc)
[PROOF STATE]
proof (state)
this:
(1::'a) \<sqinter> (w \<sqinter> - d \<squnion> e) * (w \<sqinter> - d)\<^sup>\<star> * (e * (w \<sqinter> - d)\<^sup>\<star>)\<^sup>\<star> = (1::'a) \<sqinter> (w \<sqinter> - d \<squnion> e) * (w \<sqinter> - d)\<^sup>\<star> * (e * (w \<sqinter> - d)\<^sup>\<star> \<squnion> (1::'a))
goal (1 subgoal):
1. pd_kleene_allegory_class.acyclic (w \<sqinter> - d \<squnion> e)
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
(1::'a) \<sqinter> (w \<sqinter> - d \<squnion> e) * (w \<sqinter> - d)\<^sup>\<star> * (e * (w \<sqinter> - d)\<^sup>\<star>)\<^sup>\<star> = (1::'a) \<sqinter> (w \<sqinter> - d \<squnion> e) * (w \<sqinter> - d)\<^sup>\<star> * (e * (w \<sqinter> - d)\<^sup>\<star> \<squnion> (1::'a))
goal (1 subgoal):
1. pd_kleene_allegory_class.acyclic (w \<sqinter> - d \<squnion> e)
[PROOF STEP]
have "... = 1 \<sqinter> (?v\<^sup>+ * e * ?v\<^sup>\<star> \<squnion> ?v\<^sup>+ \<squnion> e * ?v\<^sup>\<star> * e * ?v\<^sup>\<star> \<squnion> e * ?v\<^sup>\<star>)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (1::'a) \<sqinter> (w \<sqinter> - d \<squnion> e) * (w \<sqinter> - d)\<^sup>\<star> * (e * (w \<sqinter> - d)\<^sup>\<star> \<squnion> (1::'a)) = (1::'a) \<sqinter> ((w \<sqinter> - d)\<^sup>+ * e * (w \<sqinter> - d)\<^sup>\<star> \<squnion> (w \<sqinter> - d)\<^sup>+ \<squnion> e * (w \<sqinter> - d)\<^sup>\<star> * e * (w \<sqinter> - d)\<^sup>\<star> \<squnion> e * (w \<sqinter> - d)\<^sup>\<star>)
[PROOF STEP]
by (simp add: comp_associative mult_left_dist_sup mult_right_dist_sup sup_assoc sup_commute sup_left_commute)
[PROOF STATE]
proof (state)
this:
(1::'a) \<sqinter> (w \<sqinter> - d \<squnion> e) * (w \<sqinter> - d)\<^sup>\<star> * (e * (w \<sqinter> - d)\<^sup>\<star> \<squnion> (1::'a)) = (1::'a) \<sqinter> ((w \<sqinter> - d)\<^sup>+ * e * (w \<sqinter> - d)\<^sup>\<star> \<squnion> (w \<sqinter> - d)\<^sup>+ \<squnion> e * (w \<sqinter> - d)\<^sup>\<star> * e * (w \<sqinter> - d)\<^sup>\<star> \<squnion> e * (w \<sqinter> - d)\<^sup>\<star>)
goal (1 subgoal):
1. pd_kleene_allegory_class.acyclic (w \<sqinter> - d \<squnion> e)
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
(1::'a) \<sqinter> (w \<sqinter> - d \<squnion> e) * (w \<sqinter> - d)\<^sup>\<star> * (e * (w \<sqinter> - d)\<^sup>\<star> \<squnion> (1::'a)) = (1::'a) \<sqinter> ((w \<sqinter> - d)\<^sup>+ * e * (w \<sqinter> - d)\<^sup>\<star> \<squnion> (w \<sqinter> - d)\<^sup>+ \<squnion> e * (w \<sqinter> - d)\<^sup>\<star> * e * (w \<sqinter> - d)\<^sup>\<star> \<squnion> e * (w \<sqinter> - d)\<^sup>\<star>)
goal (1 subgoal):
1. pd_kleene_allegory_class.acyclic (w \<sqinter> - d \<squnion> e)
[PROOF STEP]
have "... = 1 \<sqinter> (?v\<^sup>+ * e * ?v\<^sup>\<star> \<squnion> ?v\<^sup>+ \<squnion> e * ?v\<^sup>\<star>)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (1::'a) \<sqinter> ((w \<sqinter> - d)\<^sup>+ * e * (w \<sqinter> - d)\<^sup>\<star> \<squnion> (w \<sqinter> - d)\<^sup>+ \<squnion> e * (w \<sqinter> - d)\<^sup>\<star> * e * (w \<sqinter> - d)\<^sup>\<star> \<squnion> e * (w \<sqinter> - d)\<^sup>\<star>) = (1::'a) \<sqinter> ((w \<sqinter> - d)\<^sup>+ * e * (w \<sqinter> - d)\<^sup>\<star> \<squnion> (w \<sqinter> - d)\<^sup>+ \<squnion> e * (w \<sqinter> - d)\<^sup>\<star>)
[PROOF STEP]
using 7
[PROOF STATE]
proof (prove)
using this:
e * (w \<sqinter> - d)\<^sup>\<star> * e = bot
goal (1 subgoal):
1. (1::'a) \<sqinter> ((w \<sqinter> - d)\<^sup>+ * e * (w \<sqinter> - d)\<^sup>\<star> \<squnion> (w \<sqinter> - d)\<^sup>+ \<squnion> e * (w \<sqinter> - d)\<^sup>\<star> * e * (w \<sqinter> - d)\<^sup>\<star> \<squnion> e * (w \<sqinter> - d)\<^sup>\<star>) = (1::'a) \<sqinter> ((w \<sqinter> - d)\<^sup>+ * e * (w \<sqinter> - d)\<^sup>\<star> \<squnion> (w \<sqinter> - d)\<^sup>+ \<squnion> e * (w \<sqinter> - d)\<^sup>\<star>)
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
(1::'a) \<sqinter> ((w \<sqinter> - d)\<^sup>+ * e * (w \<sqinter> - d)\<^sup>\<star> \<squnion> (w \<sqinter> - d)\<^sup>+ \<squnion> e * (w \<sqinter> - d)\<^sup>\<star> * e * (w \<sqinter> - d)\<^sup>\<star> \<squnion> e * (w \<sqinter> - d)\<^sup>\<star>) = (1::'a) \<sqinter> ((w \<sqinter> - d)\<^sup>+ * e * (w \<sqinter> - d)\<^sup>\<star> \<squnion> (w \<sqinter> - d)\<^sup>+ \<squnion> e * (w \<sqinter> - d)\<^sup>\<star>)
goal (1 subgoal):
1. pd_kleene_allegory_class.acyclic (w \<sqinter> - d \<squnion> e)
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
(1::'a) \<sqinter> ((w \<sqinter> - d)\<^sup>+ * e * (w \<sqinter> - d)\<^sup>\<star> \<squnion> (w \<sqinter> - d)\<^sup>+ \<squnion> e * (w \<sqinter> - d)\<^sup>\<star> * e * (w \<sqinter> - d)\<^sup>\<star> \<squnion> e * (w \<sqinter> - d)\<^sup>\<star>) = (1::'a) \<sqinter> ((w \<sqinter> - d)\<^sup>+ * e * (w \<sqinter> - d)\<^sup>\<star> \<squnion> (w \<sqinter> - d)\<^sup>+ \<squnion> e * (w \<sqinter> - d)\<^sup>\<star>)
goal (1 subgoal):
1. pd_kleene_allegory_class.acyclic (w \<sqinter> - d \<squnion> e)
[PROOF STEP]
have "... = 1 \<sqinter> (?v\<^sup>\<star> * e * ?v\<^sup>\<star> \<squnion> ?v\<^sup>+)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (1::'a) \<sqinter> ((w \<sqinter> - d)\<^sup>+ * e * (w \<sqinter> - d)\<^sup>\<star> \<squnion> (w \<sqinter> - d)\<^sup>+ \<squnion> e * (w \<sqinter> - d)\<^sup>\<star>) = (1::'a) \<sqinter> ((w \<sqinter> - d)\<^sup>\<star> * e * (w \<sqinter> - d)\<^sup>\<star> \<squnion> (w \<sqinter> - d)\<^sup>+)
[PROOF STEP]
by (metis (mono_tags, opaque_lifting) comp_associative star.circ_loop_fixpoint sup_assoc sup_commute)
[PROOF STATE]
proof (state)
this:
(1::'a) \<sqinter> ((w \<sqinter> - d)\<^sup>+ * e * (w \<sqinter> - d)\<^sup>\<star> \<squnion> (w \<sqinter> - d)\<^sup>+ \<squnion> e * (w \<sqinter> - d)\<^sup>\<star>) = (1::'a) \<sqinter> ((w \<sqinter> - d)\<^sup>\<star> * e * (w \<sqinter> - d)\<^sup>\<star> \<squnion> (w \<sqinter> - d)\<^sup>+)
goal (1 subgoal):
1. pd_kleene_allegory_class.acyclic (w \<sqinter> - d \<squnion> e)
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
(1::'a) \<sqinter> ((w \<sqinter> - d)\<^sup>+ * e * (w \<sqinter> - d)\<^sup>\<star> \<squnion> (w \<sqinter> - d)\<^sup>+ \<squnion> e * (w \<sqinter> - d)\<^sup>\<star>) = (1::'a) \<sqinter> ((w \<sqinter> - d)\<^sup>\<star> * e * (w \<sqinter> - d)\<^sup>\<star> \<squnion> (w \<sqinter> - d)\<^sup>+)
goal (1 subgoal):
1. pd_kleene_allegory_class.acyclic (w \<sqinter> - d \<squnion> e)
[PROOF STEP]
have "... \<le> 1 \<sqinter> (?v\<^sup>\<star> * e * ?v\<^sup>\<star> \<squnion> w\<^sup>+)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (1::'a) \<sqinter> ((w \<sqinter> - d)\<^sup>\<star> * e * (w \<sqinter> - d)\<^sup>\<star> \<squnion> (w \<sqinter> - d)\<^sup>+) \<le> (1::'a) \<sqinter> ((w \<sqinter> - d)\<^sup>\<star> * e * (w \<sqinter> - d)\<^sup>\<star> \<squnion> w\<^sup>+)
[PROOF STEP]
using comp_inf.mult_right_isotone comp_isotone semiring.add_right_mono star_isotone sup_commute
[PROOF STATE]
proof (prove)
using this:
?x \<le> ?y \<Longrightarrow> ?z \<sqinter> ?x \<le> ?z \<sqinter> ?y
\<lbrakk>?x \<le> ?y; ?w \<le> ?z\<rbrakk> \<Longrightarrow> ?x * ?w \<le> ?y * ?z
?a \<le> ?b \<Longrightarrow> ?a \<squnion> ?c \<le> ?b \<squnion> ?c
?x \<le> ?y \<Longrightarrow> ?x\<^sup>\<star> \<le> ?y\<^sup>\<star>
?x \<squnion> ?y = ?y \<squnion> ?x
goal (1 subgoal):
1. (1::'a) \<sqinter> ((w \<sqinter> - d)\<^sup>\<star> * e * (w \<sqinter> - d)\<^sup>\<star> \<squnion> (w \<sqinter> - d)\<^sup>+) \<le> (1::'a) \<sqinter> ((w \<sqinter> - d)\<^sup>\<star> * e * (w \<sqinter> - d)\<^sup>\<star> \<squnion> w\<^sup>+)
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
(1::'a) \<sqinter> ((w \<sqinter> - d)\<^sup>\<star> * e * (w \<sqinter> - d)\<^sup>\<star> \<squnion> (w \<sqinter> - d)\<^sup>+) \<le> (1::'a) \<sqinter> ((w \<sqinter> - d)\<^sup>\<star> * e * (w \<sqinter> - d)\<^sup>\<star> \<squnion> w\<^sup>+)
goal (1 subgoal):
1. pd_kleene_allegory_class.acyclic (w \<sqinter> - d \<squnion> e)
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
(1::'a) \<sqinter> ((w \<sqinter> - d)\<^sup>\<star> * e * (w \<sqinter> - d)\<^sup>\<star> \<squnion> (w \<sqinter> - d)\<^sup>+) \<le> (1::'a) \<sqinter> ((w \<sqinter> - d)\<^sup>\<star> * e * (w \<sqinter> - d)\<^sup>\<star> \<squnion> w\<^sup>+)
goal (1 subgoal):
1. pd_kleene_allegory_class.acyclic (w \<sqinter> - d \<squnion> e)
[PROOF STEP]
have "... = (1 \<sqinter> ?v\<^sup>\<star> * e * ?v\<^sup>\<star>) \<squnion> (1 \<sqinter> w\<^sup>+)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (1::'a) \<sqinter> ((w \<sqinter> - d)\<^sup>\<star> * e * (w \<sqinter> - d)\<^sup>\<star> \<squnion> w\<^sup>+) = (1::'a) \<sqinter> (w \<sqinter> - d)\<^sup>\<star> * e * (w \<sqinter> - d)\<^sup>\<star> \<squnion> (1::'a) \<sqinter> w\<^sup>+
[PROOF STEP]
by (simp add: inf_sup_distrib1)
[PROOF STATE]
proof (state)
this:
(1::'a) \<sqinter> ((w \<sqinter> - d)\<^sup>\<star> * e * (w \<sqinter> - d)\<^sup>\<star> \<squnion> w\<^sup>+) = (1::'a) \<sqinter> (w \<sqinter> - d)\<^sup>\<star> * e * (w \<sqinter> - d)\<^sup>\<star> \<squnion> (1::'a) \<sqinter> w\<^sup>+
goal (1 subgoal):
1. pd_kleene_allegory_class.acyclic (w \<sqinter> - d \<squnion> e)
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
(1::'a) \<sqinter> ((w \<sqinter> - d)\<^sup>\<star> * e * (w \<sqinter> - d)\<^sup>\<star> \<squnion> w\<^sup>+) = (1::'a) \<sqinter> (w \<sqinter> - d)\<^sup>\<star> * e * (w \<sqinter> - d)\<^sup>\<star> \<squnion> (1::'a) \<sqinter> w\<^sup>+
goal (1 subgoal):
1. pd_kleene_allegory_class.acyclic (w \<sqinter> - d \<squnion> e)
[PROOF STEP]
have "... = 1 \<sqinter> ?v\<^sup>\<star> * e * ?v\<^sup>\<star>"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (1::'a) \<sqinter> (w \<sqinter> - d)\<^sup>\<star> * e * (w \<sqinter> - d)\<^sup>\<star> \<squnion> (1::'a) \<sqinter> w\<^sup>+ = (1::'a) \<sqinter> (w \<sqinter> - d)\<^sup>\<star> * e * (w \<sqinter> - d)\<^sup>\<star>
[PROOF STEP]
by (metis assms(1) inf_commute pseudo_complement sup_bot_right)
[PROOF STATE]
proof (state)
this:
(1::'a) \<sqinter> (w \<sqinter> - d)\<^sup>\<star> * e * (w \<sqinter> - d)\<^sup>\<star> \<squnion> (1::'a) \<sqinter> w\<^sup>+ = (1::'a) \<sqinter> (w \<sqinter> - d)\<^sup>\<star> * e * (w \<sqinter> - d)\<^sup>\<star>
goal (1 subgoal):
1. pd_kleene_allegory_class.acyclic (w \<sqinter> - d \<squnion> e)
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
(1::'a) \<sqinter> (w \<sqinter> - d)\<^sup>\<star> * e * (w \<sqinter> - d)\<^sup>\<star> \<squnion> (1::'a) \<sqinter> w\<^sup>+ = (1::'a) \<sqinter> (w \<sqinter> - d)\<^sup>\<star> * e * (w \<sqinter> - d)\<^sup>\<star>
goal (1 subgoal):
1. pd_kleene_allegory_class.acyclic (w \<sqinter> - d \<squnion> e)
[PROOF STEP]
have "... = bot"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (1::'a) \<sqinter> (w \<sqinter> - d)\<^sup>\<star> * e * (w \<sqinter> - d)\<^sup>\<star> = bot
[PROOF STEP]
using 8 p_antitone_iff pseudo_complement
[PROOF STATE]
proof (prove)
using this:
irreflexive ((w \<sqinter> - d)\<^sup>\<star> * e * (w \<sqinter> - d)\<^sup>\<star>)
(?x \<le> - ?y) = (?y \<le> - ?x)
(?x \<sqinter> ?y = bot) = (?x \<le> - ?y)
goal (1 subgoal):
1. (1::'a) \<sqinter> (w \<sqinter> - d)\<^sup>\<star> * e * (w \<sqinter> - d)\<^sup>\<star> = bot
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
(1::'a) \<sqinter> (w \<sqinter> - d)\<^sup>\<star> * e * (w \<sqinter> - d)\<^sup>\<star> = bot
goal (1 subgoal):
1. pd_kleene_allegory_class.acyclic (w \<sqinter> - d \<squnion> e)
[PROOF STEP]
finally
[PROOF STATE]
proof (chain)
picking this:
(1::'a) \<sqinter> (w \<sqinter> - d \<squnion> e)\<^sup>+ \<le> bot
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
(1::'a) \<sqinter> (w \<sqinter> - d \<squnion> e)\<^sup>+ \<le> bot
goal (1 subgoal):
1. pd_kleene_allegory_class.acyclic (w \<sqinter> - d \<squnion> e)
[PROOF STEP]
using le_bot p_antitone_iff pseudo_complement
[PROOF STATE]
proof (prove)
using this:
(1::'a) \<sqinter> (w \<sqinter> - d \<squnion> e)\<^sup>+ \<le> bot
?a \<le> bot \<Longrightarrow> ?a = bot
(?x \<le> - ?y) = (?y \<le> - ?x)
(?x \<sqinter> ?y = bot) = (?x \<le> - ?y)
goal (1 subgoal):
1. pd_kleene_allegory_class.acyclic (w \<sqinter> - d \<squnion> e)
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
pd_kleene_allegory_class.acyclic (w \<sqinter> - d \<squnion> e)
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 16571, "file": "Stone_Kleene_Relation_Algebras_Kleene_Relation_Algebras", "length": 109}
|
[STATEMENT]
lemma Contra: "insert (Neg A) H \<turnstile> A \<Longrightarrow> H \<turnstile> A"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. insert (Neg A) H \<turnstile> A \<Longrightarrow> H \<turnstile> A
[PROOF STEP]
by (metis Peirce Imp_I)
|
{"llama_tokens": 101, "file": "Goedel_HFSet_Semanticless_SyntaxN", "length": 1}
|
[STATEMENT]
lemma f''_imp_f':
fixes f :: "real \<Rightarrow> real"
assumes "convex C"
and f': "\<And>x. x \<in> C \<Longrightarrow> DERIV f x :> (f' x)"
and f'': "\<And>x. x \<in> C \<Longrightarrow> DERIV f' x :> (f'' x)"
and pos: "\<And>x. x \<in> C \<Longrightarrow> f'' x \<ge> 0"
and x: "x \<in> C"
and y: "y \<in> C"
shows "f' x * (y - x) \<le> f y - f x"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. f' x * (y - x) \<le> f y - f x
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
convex C
?x \<in> C \<Longrightarrow> (f has_real_derivative f' ?x) (at ?x)
?x \<in> C \<Longrightarrow> (f' has_real_derivative f'' ?x) (at ?x)
?x \<in> C \<Longrightarrow> 0 \<le> f'' ?x
x \<in> C
y \<in> C
goal (1 subgoal):
1. f' x * (y - x) \<le> f y - f x
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<lbrakk>convex C; \<And>x. x \<in> C \<Longrightarrow> (f has_real_derivative f' x) (at x); \<And>x. x \<in> C \<Longrightarrow> (f' has_real_derivative f'' x) (at x); \<And>x. x \<in> C \<Longrightarrow> 0 \<le> f'' x; x \<in> C; y \<in> C\<rbrakk> \<Longrightarrow> f' x * (y - x) \<le> f y - f x
[PROOF STEP]
have less_imp: "f y - f x \<ge> f' x * (y - x)" "f' y * (x - y) \<le> f x - f y"
if *: "x \<in> C" "y \<in> C" "y > x" for x y :: real
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. f' x * (y - x) \<le> f y - f x &&& f' y * (x - y) \<le> f x - f y
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. f' x * (y - x) \<le> f y - f x
2. f' y * (x - y) \<le> f x - f y
[PROOF STEP]
from *
[PROOF STATE]
proof (chain)
picking this:
x \<in> C
y \<in> C
x < y
[PROOF STEP]
have ge: "y - x > 0" "y - x \<ge> 0"
[PROOF STATE]
proof (prove)
using this:
x \<in> C
y \<in> C
x < y
goal (1 subgoal):
1. 0 < y - x &&& 0 \<le> y - x
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
0 < y - x
0 \<le> y - x
goal (2 subgoals):
1. f' x * (y - x) \<le> f y - f x
2. f' y * (x - y) \<le> f x - f y
[PROOF STEP]
from *
[PROOF STATE]
proof (chain)
picking this:
x \<in> C
y \<in> C
x < y
[PROOF STEP]
have le: "x - y < 0" "x - y \<le> 0"
[PROOF STATE]
proof (prove)
using this:
x \<in> C
y \<in> C
x < y
goal (1 subgoal):
1. x - y < 0 &&& x - y \<le> 0
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
x - y < 0
x - y \<le> 0
goal (2 subgoals):
1. f' x * (y - x) \<le> f y - f x
2. f' y * (x - y) \<le> f x - f y
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
x - y < 0
x - y \<le> 0
[PROOF STEP]
obtain z1 where z1: "z1 > x" "z1 < y" "f y - f x = (y - x) * f' z1"
[PROOF STATE]
proof (prove)
using this:
x - y < 0
x - y \<le> 0
goal (1 subgoal):
1. (\<And>z1. \<lbrakk>x < z1; z1 < y; f y - f x = (y - x) * f' z1\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
using subsetD[OF atMostAtLeast_subset_convex[OF \<open>convex C\<close> \<open>x \<in> C\<close> \<open>y \<in> C\<close> \<open>x < y\<close>],
THEN f', THEN MVT2[OF \<open>x < y\<close>, rule_format, unfolded atLeastAtMost_iff[symmetric]]]
[PROOF STATE]
proof (prove)
using this:
x - y < 0
x - y \<le> 0
(\<And>x. \<lbrakk>x \<le> x; x \<le> y\<rbrakk> \<Longrightarrow> x \<in> {x..y}) \<Longrightarrow> \<exists>z>x. z < y \<and> f y - f x = (y - x) * f' z
goal (1 subgoal):
1. (\<And>z1. \<lbrakk>x < z1; z1 < y; f y - f x = (y - x) * f' z1\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
x < z1
z1 < y
f y - f x = (y - x) * f' z1
goal (2 subgoals):
1. f' x * (y - x) \<le> f y - f x
2. f' y * (x - y) \<le> f x - f y
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
x < z1
z1 < y
f y - f x = (y - x) * f' z1
[PROOF STEP]
have "z1 \<in> C"
[PROOF STATE]
proof (prove)
using this:
x < z1
z1 < y
f y - f x = (y - x) * f' z1
goal (1 subgoal):
1. z1 \<in> C
[PROOF STEP]
using atMostAtLeast_subset_convex \<open>convex C\<close> \<open>x \<in> C\<close> \<open>y \<in> C\<close> \<open>x < y\<close>
[PROOF STATE]
proof (prove)
using this:
x < z1
z1 < y
f y - f x = (y - x) * f' z1
\<lbrakk>convex ?C; ?x \<in> ?C; ?y \<in> ?C; ?x < ?y\<rbrakk> \<Longrightarrow> {?x..?y} \<subseteq> ?C
convex C
x \<in> C
y \<in> C
x < y
goal (1 subgoal):
1. z1 \<in> C
[PROOF STEP]
by fastforce
[PROOF STATE]
proof (state)
this:
z1 \<in> C
goal (2 subgoals):
1. f' x * (y - x) \<le> f y - f x
2. f' y * (x - y) \<le> f x - f y
[PROOF STEP]
from z1
[PROOF STATE]
proof (chain)
picking this:
x < z1
z1 < y
f y - f x = (y - x) * f' z1
[PROOF STEP]
have z1': "f x - f y = (x - y) * f' z1"
[PROOF STATE]
proof (prove)
using this:
x < z1
z1 < y
f y - f x = (y - x) * f' z1
goal (1 subgoal):
1. f x - f y = (x - y) * f' z1
[PROOF STEP]
by (simp add: field_simps)
[PROOF STATE]
proof (state)
this:
f x - f y = (x - y) * f' z1
goal (2 subgoals):
1. f' x * (y - x) \<le> f y - f x
2. f' y * (x - y) \<le> f x - f y
[PROOF STEP]
obtain z2 where z2: "z2 > x" "z2 < z1" "f' z1 - f' x = (z1 - x) * f'' z2"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<And>z2. \<lbrakk>x < z2; z2 < z1; f' z1 - f' x = (z1 - x) * f'' z2\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
using subsetD[OF atMostAtLeast_subset_convex[OF \<open>convex C\<close> \<open>x \<in> C\<close> \<open>z1 \<in> C\<close> \<open>x < z1\<close>],
THEN f'', THEN MVT2[OF \<open>x < z1\<close>, rule_format, unfolded atLeastAtMost_iff[symmetric]]] z1
[PROOF STATE]
proof (prove)
using this:
(\<And>x. \<lbrakk>x \<le> x; x \<le> z1\<rbrakk> \<Longrightarrow> x \<in> {x..z1}) \<Longrightarrow> \<exists>z>x. z < z1 \<and> f' z1 - f' x = (z1 - x) * f'' z
x < z1
z1 < y
f y - f x = (y - x) * f' z1
goal (1 subgoal):
1. (\<And>z2. \<lbrakk>x < z2; z2 < z1; f' z1 - f' x = (z1 - x) * f'' z2\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
x < z2
z2 < z1
f' z1 - f' x = (z1 - x) * f'' z2
goal (2 subgoals):
1. f' x * (y - x) \<le> f y - f x
2. f' y * (x - y) \<le> f x - f y
[PROOF STEP]
obtain z3 where z3: "z3 > z1" "z3 < y" "f' y - f' z1 = (y - z1) * f'' z3"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<And>z3. \<lbrakk>z1 < z3; z3 < y; f' y - f' z1 = (y - z1) * f'' z3\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
using subsetD[OF atMostAtLeast_subset_convex[OF \<open>convex C\<close> \<open>z1 \<in> C\<close> \<open>y \<in> C\<close> \<open>z1 < y\<close>],
THEN f'', THEN MVT2[OF \<open>z1 < y\<close>, rule_format, unfolded atLeastAtMost_iff[symmetric]]] z1
[PROOF STATE]
proof (prove)
using this:
(\<And>x. \<lbrakk>z1 \<le> x; x \<le> y\<rbrakk> \<Longrightarrow> x \<in> {z1..y}) \<Longrightarrow> \<exists>z>z1. z < y \<and> f' y - f' z1 = (y - z1) * f'' z
x < z1
z1 < y
f y - f x = (y - x) * f' z1
goal (1 subgoal):
1. (\<And>z3. \<lbrakk>z1 < z3; z3 < y; f' y - f' z1 = (y - z1) * f'' z3\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
z1 < z3
z3 < y
f' y - f' z1 = (y - z1) * f'' z3
goal (2 subgoals):
1. f' x * (y - x) \<le> f y - f x
2. f' y * (x - y) \<le> f x - f y
[PROOF STEP]
have "f' y - (f x - f y) / (x - y) = f' y - f' z1"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. f' y - (f x - f y) / (x - y) = f' y - f' z1
[PROOF STEP]
using * z1'
[PROOF STATE]
proof (prove)
using this:
x \<in> C
y \<in> C
x < y
f x - f y = (x - y) * f' z1
goal (1 subgoal):
1. f' y - (f x - f y) / (x - y) = f' y - f' z1
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
f' y - (f x - f y) / (x - y) = f' y - f' z1
goal (2 subgoals):
1. f' x * (y - x) \<le> f y - f x
2. f' y * (x - y) \<le> f x - f y
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
f' y - (f x - f y) / (x - y) = f' y - f' z1
goal (2 subgoals):
1. f' x * (y - x) \<le> f y - f x
2. f' y * (x - y) \<le> f x - f y
[PROOF STEP]
have "\<dots> = (y - z1) * f'' z3"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. f' y - f' z1 = (y - z1) * f'' z3
[PROOF STEP]
using z3
[PROOF STATE]
proof (prove)
using this:
z1 < z3
z3 < y
f' y - f' z1 = (y - z1) * f'' z3
goal (1 subgoal):
1. f' y - f' z1 = (y - z1) * f'' z3
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
f' y - f' z1 = (y - z1) * f'' z3
goal (2 subgoals):
1. f' x * (y - x) \<le> f y - f x
2. f' y * (x - y) \<le> f x - f y
[PROOF STEP]
finally
[PROOF STATE]
proof (chain)
picking this:
f' y - (f x - f y) / (x - y) = (y - z1) * f'' z3
[PROOF STEP]
have cool': "f' y - (f x - f y) / (x - y) = (y - z1) * f'' z3"
[PROOF STATE]
proof (prove)
using this:
f' y - (f x - f y) / (x - y) = (y - z1) * f'' z3
goal (1 subgoal):
1. f' y - (f x - f y) / (x - y) = (y - z1) * f'' z3
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
f' y - (f x - f y) / (x - y) = (y - z1) * f'' z3
goal (2 subgoals):
1. f' x * (y - x) \<le> f y - f x
2. f' y * (x - y) \<le> f x - f y
[PROOF STEP]
have A': "y - z1 \<ge> 0"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. 0 \<le> y - z1
[PROOF STEP]
using z1
[PROOF STATE]
proof (prove)
using this:
x < z1
z1 < y
f y - f x = (y - x) * f' z1
goal (1 subgoal):
1. 0 \<le> y - z1
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
0 \<le> y - z1
goal (2 subgoals):
1. f' x * (y - x) \<le> f y - f x
2. f' y * (x - y) \<le> f x - f y
[PROOF STEP]
have "z3 \<in> C"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. z3 \<in> C
[PROOF STEP]
using z3 * atMostAtLeast_subset_convex \<open>convex C\<close> \<open>x \<in> C\<close> \<open>z1 \<in> C\<close> \<open>x < z1\<close>
[PROOF STATE]
proof (prove)
using this:
z1 < z3
z3 < y
f' y - f' z1 = (y - z1) * f'' z3
x \<in> C
y \<in> C
x < y
\<lbrakk>convex ?C; ?x \<in> ?C; ?y \<in> ?C; ?x < ?y\<rbrakk> \<Longrightarrow> {?x..?y} \<subseteq> ?C
convex C
x \<in> C
z1 \<in> C
x < z1
goal (1 subgoal):
1. z3 \<in> C
[PROOF STEP]
by fastforce
[PROOF STATE]
proof (state)
this:
z3 \<in> C
goal (2 subgoals):
1. f' x * (y - x) \<le> f y - f x
2. f' y * (x - y) \<le> f x - f y
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
z3 \<in> C
[PROOF STEP]
have B': "f'' z3 \<ge> 0"
[PROOF STATE]
proof (prove)
using this:
z3 \<in> C
goal (1 subgoal):
1. 0 \<le> f'' z3
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
z3 \<in> C
convex C
?x \<in> C \<Longrightarrow> (f has_real_derivative f' ?x) (at ?x)
?x \<in> C \<Longrightarrow> (f' has_real_derivative f'' ?x) (at ?x)
?x \<in> C \<Longrightarrow> 0 \<le> f'' ?x
x \<in> C
y \<in> C
goal (1 subgoal):
1. 0 \<le> f'' z3
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
0 \<le> f'' z3
goal (2 subgoals):
1. f' x * (y - x) \<le> f y - f x
2. f' y * (x - y) \<le> f x - f y
[PROOF STEP]
from A' B'
[PROOF STATE]
proof (chain)
picking this:
0 \<le> y - z1
0 \<le> f'' z3
[PROOF STEP]
have "(y - z1) * f'' z3 \<ge> 0"
[PROOF STATE]
proof (prove)
using this:
0 \<le> y - z1
0 \<le> f'' z3
goal (1 subgoal):
1. 0 \<le> (y - z1) * f'' z3
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
0 \<le> (y - z1) * f'' z3
goal (2 subgoals):
1. f' x * (y - x) \<le> f y - f x
2. f' y * (x - y) \<le> f x - f y
[PROOF STEP]
from cool' this
[PROOF STATE]
proof (chain)
picking this:
f' y - (f x - f y) / (x - y) = (y - z1) * f'' z3
0 \<le> (y - z1) * f'' z3
[PROOF STEP]
have "f' y - (f x - f y) / (x - y) \<ge> 0"
[PROOF STATE]
proof (prove)
using this:
f' y - (f x - f y) / (x - y) = (y - z1) * f'' z3
0 \<le> (y - z1) * f'' z3
goal (1 subgoal):
1. 0 \<le> f' y - (f x - f y) / (x - y)
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
0 \<le> f' y - (f x - f y) / (x - y)
goal (2 subgoals):
1. f' x * (y - x) \<le> f y - f x
2. f' y * (x - y) \<le> f x - f y
[PROOF STEP]
from mult_right_mono_neg[OF this le(2)]
[PROOF STATE]
proof (chain)
picking this:
(f' y - (f x - f y) / (x - y)) * (x - y) \<le> 0 * (x - y)
[PROOF STEP]
have "f' y * (x - y) - (f x - f y) / (x - y) * (x - y) \<le> 0 * (x - y)"
[PROOF STATE]
proof (prove)
using this:
(f' y - (f x - f y) / (x - y)) * (x - y) \<le> 0 * (x - y)
goal (1 subgoal):
1. f' y * (x - y) - (f x - f y) / (x - y) * (x - y) \<le> 0 * (x - y)
[PROOF STEP]
by (simp add: algebra_simps)
[PROOF STATE]
proof (state)
this:
f' y * (x - y) - (f x - f y) / (x - y) * (x - y) \<le> 0 * (x - y)
goal (2 subgoals):
1. f' x * (y - x) \<le> f y - f x
2. f' y * (x - y) \<le> f x - f y
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
f' y * (x - y) - (f x - f y) / (x - y) * (x - y) \<le> 0 * (x - y)
[PROOF STEP]
have "f' y * (x - y) - (f x - f y) \<le> 0"
[PROOF STATE]
proof (prove)
using this:
f' y * (x - y) - (f x - f y) / (x - y) * (x - y) \<le> 0 * (x - y)
goal (1 subgoal):
1. f' y * (x - y) - (f x - f y) \<le> 0
[PROOF STEP]
using le
[PROOF STATE]
proof (prove)
using this:
f' y * (x - y) - (f x - f y) / (x - y) * (x - y) \<le> 0 * (x - y)
x - y < 0
x - y \<le> 0
goal (1 subgoal):
1. f' y * (x - y) - (f x - f y) \<le> 0
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
f' y * (x - y) - (f x - f y) \<le> 0
goal (2 subgoals):
1. f' x * (y - x) \<le> f y - f x
2. f' y * (x - y) \<le> f x - f y
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
f' y * (x - y) - (f x - f y) \<le> 0
[PROOF STEP]
have res: "f' y * (x - y) \<le> f x - f y"
[PROOF STATE]
proof (prove)
using this:
f' y * (x - y) - (f x - f y) \<le> 0
goal (1 subgoal):
1. f' y * (x - y) \<le> f x - f y
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
f' y * (x - y) \<le> f x - f y
goal (2 subgoals):
1. f' x * (y - x) \<le> f y - f x
2. f' y * (x - y) \<le> f x - f y
[PROOF STEP]
have "(f y - f x) / (y - x) - f' x = f' z1 - f' x"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (f y - f x) / (y - x) - f' x = f' z1 - f' x
[PROOF STEP]
using * z1
[PROOF STATE]
proof (prove)
using this:
x \<in> C
y \<in> C
x < y
x < z1
z1 < y
f y - f x = (y - x) * f' z1
goal (1 subgoal):
1. (f y - f x) / (y - x) - f' x = f' z1 - f' x
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
(f y - f x) / (y - x) - f' x = f' z1 - f' x
goal (2 subgoals):
1. f' x * (y - x) \<le> f y - f x
2. f' y * (x - y) \<le> f x - f y
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
(f y - f x) / (y - x) - f' x = f' z1 - f' x
goal (2 subgoals):
1. f' x * (y - x) \<le> f y - f x
2. f' y * (x - y) \<le> f x - f y
[PROOF STEP]
have "\<dots> = (z1 - x) * f'' z2"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. f' z1 - f' x = (z1 - x) * f'' z2
[PROOF STEP]
using z2
[PROOF STATE]
proof (prove)
using this:
x < z2
z2 < z1
f' z1 - f' x = (z1 - x) * f'' z2
goal (1 subgoal):
1. f' z1 - f' x = (z1 - x) * f'' z2
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
f' z1 - f' x = (z1 - x) * f'' z2
goal (2 subgoals):
1. f' x * (y - x) \<le> f y - f x
2. f' y * (x - y) \<le> f x - f y
[PROOF STEP]
finally
[PROOF STATE]
proof (chain)
picking this:
(f y - f x) / (y - x) - f' x = (z1 - x) * f'' z2
[PROOF STEP]
have cool: "(f y - f x) / (y - x) - f' x = (z1 - x) * f'' z2"
[PROOF STATE]
proof (prove)
using this:
(f y - f x) / (y - x) - f' x = (z1 - x) * f'' z2
goal (1 subgoal):
1. (f y - f x) / (y - x) - f' x = (z1 - x) * f'' z2
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
(f y - f x) / (y - x) - f' x = (z1 - x) * f'' z2
goal (2 subgoals):
1. f' x * (y - x) \<le> f y - f x
2. f' y * (x - y) \<le> f x - f y
[PROOF STEP]
have A: "z1 - x \<ge> 0"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. 0 \<le> z1 - x
[PROOF STEP]
using z1
[PROOF STATE]
proof (prove)
using this:
x < z1
z1 < y
f y - f x = (y - x) * f' z1
goal (1 subgoal):
1. 0 \<le> z1 - x
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
0 \<le> z1 - x
goal (2 subgoals):
1. f' x * (y - x) \<le> f y - f x
2. f' y * (x - y) \<le> f x - f y
[PROOF STEP]
have "z2 \<in> C"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. z2 \<in> C
[PROOF STEP]
using z2 z1 * atMostAtLeast_subset_convex \<open>convex C\<close> \<open>z1 \<in> C\<close> \<open>y \<in> C\<close> \<open>z1 < y\<close>
[PROOF STATE]
proof (prove)
using this:
x < z2
z2 < z1
f' z1 - f' x = (z1 - x) * f'' z2
x < z1
z1 < y
f y - f x = (y - x) * f' z1
x \<in> C
y \<in> C
x < y
\<lbrakk>convex ?C; ?x \<in> ?C; ?y \<in> ?C; ?x < ?y\<rbrakk> \<Longrightarrow> {?x..?y} \<subseteq> ?C
convex C
z1 \<in> C
y \<in> C
z1 < y
goal (1 subgoal):
1. z2 \<in> C
[PROOF STEP]
by fastforce
[PROOF STATE]
proof (state)
this:
z2 \<in> C
goal (2 subgoals):
1. f' x * (y - x) \<le> f y - f x
2. f' y * (x - y) \<le> f x - f y
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
z2 \<in> C
[PROOF STEP]
have B: "f'' z2 \<ge> 0"
[PROOF STATE]
proof (prove)
using this:
z2 \<in> C
goal (1 subgoal):
1. 0 \<le> f'' z2
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
z2 \<in> C
convex C
?x \<in> C \<Longrightarrow> (f has_real_derivative f' ?x) (at ?x)
?x \<in> C \<Longrightarrow> (f' has_real_derivative f'' ?x) (at ?x)
?x \<in> C \<Longrightarrow> 0 \<le> f'' ?x
x \<in> C
y \<in> C
goal (1 subgoal):
1. 0 \<le> f'' z2
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
0 \<le> f'' z2
goal (2 subgoals):
1. f' x * (y - x) \<le> f y - f x
2. f' y * (x - y) \<le> f x - f y
[PROOF STEP]
from A B
[PROOF STATE]
proof (chain)
picking this:
0 \<le> z1 - x
0 \<le> f'' z2
[PROOF STEP]
have "(z1 - x) * f'' z2 \<ge> 0"
[PROOF STATE]
proof (prove)
using this:
0 \<le> z1 - x
0 \<le> f'' z2
goal (1 subgoal):
1. 0 \<le> (z1 - x) * f'' z2
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
0 \<le> (z1 - x) * f'' z2
goal (2 subgoals):
1. f' x * (y - x) \<le> f y - f x
2. f' y * (x - y) \<le> f x - f y
[PROOF STEP]
with cool
[PROOF STATE]
proof (chain)
picking this:
(f y - f x) / (y - x) - f' x = (z1 - x) * f'' z2
0 \<le> (z1 - x) * f'' z2
[PROOF STEP]
have "(f y - f x) / (y - x) - f' x \<ge> 0"
[PROOF STATE]
proof (prove)
using this:
(f y - f x) / (y - x) - f' x = (z1 - x) * f'' z2
0 \<le> (z1 - x) * f'' z2
goal (1 subgoal):
1. 0 \<le> (f y - f x) / (y - x) - f' x
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
0 \<le> (f y - f x) / (y - x) - f' x
goal (2 subgoals):
1. f' x * (y - x) \<le> f y - f x
2. f' y * (x - y) \<le> f x - f y
[PROOF STEP]
from mult_right_mono[OF this ge(2)]
[PROOF STATE]
proof (chain)
picking this:
0 * (y - x) \<le> ((f y - f x) / (y - x) - f' x) * (y - x)
[PROOF STEP]
have "(f y - f x) / (y - x) * (y - x) - f' x * (y - x) \<ge> 0 * (y - x)"
[PROOF STATE]
proof (prove)
using this:
0 * (y - x) \<le> ((f y - f x) / (y - x) - f' x) * (y - x)
goal (1 subgoal):
1. 0 * (y - x) \<le> (f y - f x) / (y - x) * (y - x) - f' x * (y - x)
[PROOF STEP]
by (simp add: algebra_simps)
[PROOF STATE]
proof (state)
this:
0 * (y - x) \<le> (f y - f x) / (y - x) * (y - x) - f' x * (y - x)
goal (2 subgoals):
1. f' x * (y - x) \<le> f y - f x
2. f' y * (x - y) \<le> f x - f y
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
0 * (y - x) \<le> (f y - f x) / (y - x) * (y - x) - f' x * (y - x)
[PROOF STEP]
have "f y - f x - f' x * (y - x) \<ge> 0"
[PROOF STATE]
proof (prove)
using this:
0 * (y - x) \<le> (f y - f x) / (y - x) * (y - x) - f' x * (y - x)
goal (1 subgoal):
1. 0 \<le> f y - f x - f' x * (y - x)
[PROOF STEP]
using ge
[PROOF STATE]
proof (prove)
using this:
0 * (y - x) \<le> (f y - f x) / (y - x) * (y - x) - f' x * (y - x)
0 < y - x
0 \<le> y - x
goal (1 subgoal):
1. 0 \<le> f y - f x - f' x * (y - x)
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
0 \<le> f y - f x - f' x * (y - x)
goal (2 subgoals):
1. f' x * (y - x) \<le> f y - f x
2. f' y * (x - y) \<le> f x - f y
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
0 \<le> f y - f x - f' x * (y - x)
[PROOF STEP]
show "f y - f x \<ge> f' x * (y - x)" "f' y * (x - y) \<le> f x - f y"
[PROOF STATE]
proof (prove)
using this:
0 \<le> f y - f x - f' x * (y - x)
goal (1 subgoal):
1. f' x * (y - x) \<le> f y - f x &&& f' y * (x - y) \<le> f x - f y
[PROOF STEP]
using res
[PROOF STATE]
proof (prove)
using this:
0 \<le> f y - f x - f' x * (y - x)
f' y * (x - y) \<le> f x - f y
goal (1 subgoal):
1. f' x * (y - x) \<le> f y - f x &&& f' y * (x - y) \<le> f x - f y
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
f' x * (y - x) \<le> f y - f x
f' y * (x - y) \<le> f x - f y
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
\<lbrakk>?x \<in> C; ?y \<in> C; ?x < ?y\<rbrakk> \<Longrightarrow> f' ?x * (?y - ?x) \<le> f ?y - f ?x
\<lbrakk>?x \<in> C; ?y \<in> C; ?x < ?y\<rbrakk> \<Longrightarrow> f' ?y * (?x - ?y) \<le> f ?x - f ?y
goal (1 subgoal):
1. \<lbrakk>convex C; \<And>x. x \<in> C \<Longrightarrow> (f has_real_derivative f' x) (at x); \<And>x. x \<in> C \<Longrightarrow> (f' has_real_derivative f'' x) (at x); \<And>x. x \<in> C \<Longrightarrow> 0 \<le> f'' x; x \<in> C; y \<in> C\<rbrakk> \<Longrightarrow> f' x * (y - x) \<le> f y - f x
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. f' x * (y - x) \<le> f y - f x
[PROOF STEP]
proof (cases "x = y")
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. x = y \<Longrightarrow> f' x * (y - x) \<le> f y - f x
2. x \<noteq> y \<Longrightarrow> f' x * (y - x) \<le> f y - f x
[PROOF STEP]
case True
[PROOF STATE]
proof (state)
this:
x = y
goal (2 subgoals):
1. x = y \<Longrightarrow> f' x * (y - x) \<le> f y - f x
2. x \<noteq> y \<Longrightarrow> f' x * (y - x) \<le> f y - f x
[PROOF STEP]
with x y
[PROOF STATE]
proof (chain)
picking this:
x \<in> C
y \<in> C
x = y
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
x \<in> C
y \<in> C
x = y
goal (1 subgoal):
1. f' x * (y - x) \<le> f y - f x
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
f' x * (y - x) \<le> f y - f x
goal (1 subgoal):
1. x \<noteq> y \<Longrightarrow> f' x * (y - x) \<le> f y - f x
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. x \<noteq> y \<Longrightarrow> f' x * (y - x) \<le> f y - f x
[PROOF STEP]
case False
[PROOF STATE]
proof (state)
this:
x \<noteq> y
goal (1 subgoal):
1. x \<noteq> y \<Longrightarrow> f' x * (y - x) \<le> f y - f x
[PROOF STEP]
with less_imp x y
[PROOF STATE]
proof (chain)
picking this:
\<lbrakk>?x \<in> C; ?y \<in> C; ?x < ?y\<rbrakk> \<Longrightarrow> f' ?x * (?y - ?x) \<le> f ?y - f ?x
\<lbrakk>?x \<in> C; ?y \<in> C; ?x < ?y\<rbrakk> \<Longrightarrow> f' ?y * (?x - ?y) \<le> f ?x - f ?y
x \<in> C
y \<in> C
x \<noteq> y
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>?x \<in> C; ?y \<in> C; ?x < ?y\<rbrakk> \<Longrightarrow> f' ?x * (?y - ?x) \<le> f ?y - f ?x
\<lbrakk>?x \<in> C; ?y \<in> C; ?x < ?y\<rbrakk> \<Longrightarrow> f' ?y * (?x - ?y) \<le> f ?x - f ?y
x \<in> C
y \<in> C
x \<noteq> y
goal (1 subgoal):
1. f' x * (y - x) \<le> f y - f x
[PROOF STEP]
by (auto simp: neq_iff)
[PROOF STATE]
proof (state)
this:
f' x * (y - x) \<le> f y - f x
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
f' x * (y - x) \<le> f y - f x
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 11830, "file": null, "length": 114}
|
%!TEX root = ../paper.tex
\section{Conclusion and Future Work}
\label{sec:conclusion}
Nowadays, everyone can write a blog post fairly easily.
Thus, the amount of blog posts and authors increases quickly, making the task of identifying an exact author hard to conclude.
In this paper, we presented an approach of dividing a set of blog posts into different groups, each group representing an individual writing style and therefore similar authors.
Thus, we are reducing the potential candidates by an order of magnitude or more and we can aid the user in getting much closer to the correct author.
We tested and evaluated multiple features for identifying different writing styles.
Therefore, we used established features, but also added some new ones, such as an \textit{emoticon} feature.
We use a \textit{k-means} algorithm to cluster the blog posts based on the selected features.
Our evaluation on a small test data set shows that our \textit{k-means} algorithm has an F-measure of $61.64\%$.
Labelling the clusters by the most significant writing style characteristics helps the user to distinguish between them.
To determine a cluster, i.e., writing style, of new blog posts, we examined different classification methods.
\textit{K-nearest neighbor} performed slightly worse ($97.77\%$) than a \textit{support vector machine} ($98.89\%$).
Our approach of finding blog posts similar to one selected blog post, e.g., the blog posts, which are in the same cluster, can be used to find blog posts similar to one a user likes.
Since we can search across topics, a new type of recommendation engine could be created to recommend blogs based on writing style alone.
Our system could be employed on blogging websites extending traditional topic-based recommender systems to also consider writing style.
This might improve recommendation quality or even find good matches for blogs previously considered to be not interesting to a user, thus increasing traffic and advertising revenue.
Our program's results and its usability could be improved in various ways: Adding more languages to our program’s repertoire would greatly increase its scope.
This would merely require adjusting the calculation of some of the features by adding an abbreviation and a function word list as well as new tests on the performance and feasibility of each feature.
Utilizing a larger and more diverse data set with a proper gold standard could allow for improved evaluation of our program’s ability to find exact matches for unknown authors.
This would also allow for a more in-depth discussion of which additional features to use and how to weigh them.
Additional work could also be put into our clustering step by adding an additional phase prior to the \textit{k-means} algorithm to attempt to find the optimal number of clusters for it to use.
Alternatively, evaluating the feasibility of other clustering algorithms is also an option.
Furthermore, our approach could be expanded by additional features, which helps to identify for example the gender, the occupation, or the age of authors.
Maybe woman use more adjectives than men and therefore blog posts can be divided into gender groups.
A group would no longer just represent the writing style of blog posts but would also give the user information about what kind of author wrote those blog posts.
Another idea is to use our approach to find exact authors.
One possibility is to first identify the writing style group of a blog posts and then search for the exact author in that group.
In that way the number of possible matching authors is reduced significantly.
Another possibility is to set the number of groups similar to the number of authors.
In the best case one author would be assigned to one group containing only blog posts of this author.
Instead of characterizing the writing style, the label of each group would then be the author's name.
|
{"hexsha": "b6863ba234c0437ba8fa01d21095caae535e096b", "size": 3906, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "paper/sections/06_conclusion.tex", "max_stars_repo_name": "tabergma/similar_author_identification", "max_stars_repo_head_hexsha": "15ca2bd44f1ff19bf62317f7b146f501a2e60699", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "paper/sections/06_conclusion.tex", "max_issues_repo_name": "tabergma/similar_author_identification", "max_issues_repo_head_hexsha": "15ca2bd44f1ff19bf62317f7b146f501a2e60699", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "paper/sections/06_conclusion.tex", "max_forks_repo_name": "tabergma/similar_author_identification", "max_forks_repo_head_hexsha": "15ca2bd44f1ff19bf62317f7b146f501a2e60699", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 78.12, "max_line_length": 199, "alphanum_fraction": 0.805171531, "num_tokens": 766}
|
# Objective: Find the size of the components of the neutral network of a goal.
# Methodology: Do multiple neutral random walks starting at a random circuit that maps to the goal.
# neutral_walk() accumulate all neighbors of all neutral circuits encountered on the random neutral walk.
# Then run_neutral_walk() combines the circuit lists returned by multiple random walks
# if they have a circuit in common.
# Thus, run_neutral_walk() returns circuit lists that have not been shown to be in the same connected
# component.
using Statistics, Printf
# steps is the number of random walk steps.
# maxsteps is the number of steps in mut_evolve().
# maxtries is the number of attempts to find the next chromosome in the random_neutral_walk.
# Assumes a single output goal
# Returns circuit_code_list of neutral circuits discovered on the neutral walk.
# Includes both walk circuits and neutral neighbors of walk circuits.
# Note that sometimes the function fails when the neutral walk cannot be extended.
function neutral_walk( g::Goal, p::Parameters, steps::Int64, maxsteps::Int64, maxtries::Int64 )
@assert p.numoutputs == 1
funcs = default_funcs( p.numinputs )
circuit_ints = Int64[]
n_repeats = 10 # maximum number of tries to find c that maps to g
complexity_sum = 0.0
complexity_count = 0
res = mut_evolve_repeat(n_repeats, p, [g], funcs, maxsteps )
c = res[1]
#println("complexity(c): ",complexity5(c))
complexity_sum += complexity5(c)
complexity_count += 1
#@assert sort(output_values(c)) == sort(g)
@assert output_values(c) == g # Assumes p.numoutputs==1
push!( circuit_ints, circuit_int(c) )
for i = 1:steps
(new_c,active) = mutate_chromosome!( deepcopy(c), funcs )
outputs = output_values(new_c)
attempts = 1
while attempts < maxtries && outputs != g # try to find a c to extend walk
(new_c,active) = mutate_chromosome!( deepcopy(c), funcs )
outputs = output_values(new_c)
#println("attempts: ",attempts," outputs: ",outputs," goal: ",g)
attempts += 1
end
if attempts == maxtries
# Alternatively, try all mutations of c in our attempt to extend walk
all_chromes = map( x->x[2], mutate_all( deepcopy(c), funcs, output_chromosomes=true ))
filter!( x->output_values(x)==g, all_chromes ) # only chromosomes that map to g
#println(" attempts == maxtries len(all_chromes): ",length(all_chromes))
if length(all_chromes) == 0
println("unable to extend random_neutral_walk in function neutral_walk() at step: ",i)
break
else
@assert output_values(all_chromes[1]) == g
c = rand(all_chromes)
end
else
c = new_c # neutral walk extended
end
complexity_sum += complexity5(c)
complexity_count += 1
@assert output_values(c) == g
# mutate_all() returns a list of pairs where the second element of the pair is the chromosome
all_chromes = map( x->x[2], mutate_all( c, funcs, output_chromosomes=true ))
filter!( x->output_values(c)==g, all_chromes )
for ch in all_chromes
push!( circuit_ints, circuit_int(ch) )
end
circuit_ints = unique(circuit_ints)
end # for loop
(Set(circuit_ints), complexity_sum/complexity_count )
end
# Does multiple random walks and combines the returned circuit code lists if they have circuits in common.
function run_neutral_walk( g::Goal, p::Parameters, n_walks::Int64, steps::Int64, maxsteps::Int64, maxtries::Int64, int_list_file::IOStream )
walk_list = Int64[]
circuit_int_list = Set{Int64}[]
walk_results = pmap(x->neutral_walk( g, p, steps, maxsteps, maxtries), collect(1:n_walks))
println("after pmap()")
#walk_results = map(x->neutral_walk( g, p, steps, maxsteps, maxtries), collect(1:n_walks))
#println("walk_results[1]: ",walk_results[1])
complexity_avg = 0.0
for w = 1:n_walks
#cclist = Set(neutral_walk( g, p, steps, maxsteps, maxtries))
cclist = walk_results[w][1]
complexity_avg += walk_results[w][2]
#println("length(cclist): ",length(cclist))
to_combine = Int64[] # indices of circuit_int list to combine
for i = 1:length(circuit_int_list)
if length(intersect(cclist,circuit_int_list[i])) > 0
push!(to_combine,i)
end
end
if length(to_combine) > 0
println("w: ",w," to_combine: ",to_combine)
combined_list = cclist
for i = length(to_combine):-1:1
#println("combining circuit_int_list[",to_combine[i],"]")
combined_list = union(combined_list,circuit_int_list[to_combine[i]])
if i > 1
deleteat!( circuit_int_list, to_combine[i] )
#println("removing index: ",to_combine[i]," from circuit_int_list")
#println("walk_list before: ",walk_list)
#walk_list = setdiff(walk_list,to_combine[i])
deleteat!( walk_list, to_combine[i] )
#println("removing index: ",to_combine[i]," from walk list")
#println("walk_list after: ",walk_list)
end
end
circuit_int_list[to_combine[1]] = combined_list
else
push!(walk_list,w)
push!(circuit_int_list,cclist)
#println("walk list: ",walk_list)
#println(" lengths circuit_int_list: ", [length(circuit_int_list[k]) for k = 1:length(circuit_int_list)])
end
#println("w: ",w," walk_list after add: ",walk_list)
println("w: ",w," len: ",length(circuit_int_list)," lengths circuit_int_list: ", [length(circuit_int_list[k]) for k = 1:length(circuit_int_list)])
if w == n_walks
println(int_list_file,"goal: ",g," len: ",length(circuit_int_list)," lengths circuit_int_list: ", [length(circuit_int_list[k]) for k = 1:length(circuit_int_list)])
end
#print("w: ",w," length(circuit_int_list) after add: ",length(circuit_int_list))
#println(" lengths circuit_int_list: ",[length(ccl) for ccl in circuit_int_list])
for i = 1:length(circuit_int_list)
for j = 1:(i-1)
if !isempty(intersect(circuit_int_list[i],circuit_int_list[j]))
println("XXXXX i: ",i," j: ",j," len intersect: ",length(intersect(circuit_int_list[i],circuit_int_list[j])))
end
end
end
end # for w = 1:n_walks
#walk_list
(g,length(circuit_int_list),[length(ccl) for ccl in circuit_int_list],complexity_avg/n_walks)
end
# Do multiple runs of run_neutral_walk() defined above for each goal in goallist gl.
# To do repeated runs on a goal, include it multiple times in the gl.
function run_neutral_walk( gl::GoalList, p::Parameters, n_walks::Int64, steps::Int64, maxsteps::Int64, maxtries::Int64;
csvfile::String="" )
df = DataFrame()
df.goal = Vector{MyInt}[]
df.numinputs = Int64[]
df.numoutputs = Int64[]
df.numints = Int64[]
df.numlevsback = Int64[]
df.n_walks = Int64[]
df.steps = Int64[]
df.maxsteps = Int64[]
df.maxtries = Int64[]
df.n_combined = Float64[]
df.complexity = Float64[]
if length(csvfile) > 0
println("file: ","$(csvfile[1:(end-4)])_ints.txt")
int_list_file=open("$(csvfile[1:(end-4)])_ints.txt","w")
end
result = map(g->run_neutral_walk( g, p, n_walks, steps, maxsteps, maxtries, int_list_file ), gl )
#println("after map()")
for r in result
push!(df,(r[1], p.numinputs, p.numoutputs, p.numinteriors, p.numlevelsback, n_walks, steps, maxsteps, maxtries, r[2], r[4] ))
end
hostname = chomp(open("/etc/hostname") do f read(f,String) end)
println("# date and time: ",Dates.now())
println("# host: ",hostname," with ",nprocs()-1," processes: " )
println("# funcs: ", Main.CGP.default_funcs(p.numinputs))
print_parameters(p,comment=true)
println("# n_walks: ",n_walks)
println("# steps: ",steps)
println("# maxsteps: ",maxsteps)
println("# maxtries: ",maxtries)
println("# ngoals: ",length(gl))
if length(csvfile) > 0
close(int_list_file)
open( csvfile, "w" ) do f
println(f,"# date and time: ",Dates.now())
println(f,"# host: ",hostname," with ",nprocs()-1," processes: " )
println(f,"# funcs: ", Main.CGP.default_funcs(p.numinputs))
print_parameters(f,p,comment=true)
println(f,"# steps: ",steps)
println(f,"# maxsteps: ",maxsteps)
println(f,"# maxtries: ",maxtries)
println(f,"# ngoals: ",length(gl))
CSV.write(f, df, append=true, writeheader=true )
end
end
return df
end
# Distribution of complexities on a neutral walk.
function neutral_walk_complexity( g::Goal, p::Parameters, steps::Int64, maxsteps::Int64, maxtries::Int64 )
@assert p.numoutputs == 1
funcs = default_funcs( p.numinputs )
walk_complexities = Float64[]
walk_count = 0
neighbor_complexities = Float64[]
neighbor_count = 9
n_repeats = 10 # maximum number of tries to find c that maps to g
res = mut_evolve_repeat(n_repeats, p, [g], funcs, maxsteps )
c = res[1]
complexity = complexity5(c)
#println("w complexity(c): ",complexity)
push!(walk_complexities,complexity)
#@assert sort(output_values(c)) == sort(g)
@assert output_values(c) == g # Assumes p.numoutputs==1
for i = 1:steps
(new_c,active) = mutate_chromosome!( deepcopy(c), funcs )
outputs = output_values(new_c)
attempts = 1
while attempts < maxtries && outputs != g # try to find a c to extend walk
(new_c,active) = mutate_chromosome!( deepcopy(c), funcs )
outputs = output_values(new_c)
#println("attempts: ",attempts," outputs: ",outputs," goal: ",g)
attempts += 1
end
if attempts == maxtries
# Alternatively, try all mutations of c in our attempt to extend walk
all_chromes = map( x->x[2], mutate_all( deepcopy(c), funcs, output_chromosomes=true ))
filter!( x->output_values(x)==g, all_chromes ) # only chromosomes that map to g
#println(" attempts == maxtries len(all_chromes): ",length(all_chromes))
if length(all_chromes) == 0
println("unable to extend random_neutral_walk in function neutral_walk() at step: ",i)
continue
else
@assert output_values(all_chromes[1]) == g
c = rand(all_chromes)
end
else
c = new_c # neutral walk extended
end
@assert output_values(c) == g
complexity = complexity5(c)
#println("w complexity(c): ",complexity)
push!(walk_complexities,complexity)
walk_count += 1
# mutate_all() returns a list of pairs where the second element of the pair is the chromosome
all_chromes = map( x->x[2], mutate_all( c, funcs, output_chromosomes=true ))
filter!( x->output_values(c)==g, all_chromes )
for ch in all_chromes
complexity = complexity5(ch)
#println("n complexity(c): ",complexity)
push!(neighbor_complexities,complexity)
neighbor_count += 1
end
end
#println("steps: ",steps," length(walk_c): ",length(walk_complexities)," length(nbr_c): ",length(neighbor_complexities))
(walk_complexities,neighbor_complexities)
end
# Distribution of complexities on a neutral walk.
function run_neutral_walk_complexity( goallist::GoalList, p::Parameters, n_walks::Int64, steps::Int64,
maxsteps::Int64, maxtries::Int64; csvfile::String="" )
if length(csvfile) > 0
f = open(csvfile,"w")
print_parameters(f,p)
println(f,"n_walks: ",n_walks)
println(f,"steps: ",steps)
println(f,"maxtries: ",maxtries)
end
for g in goallist
w_cmplx = fill(Float64[],n_walks)
n_cmplx = fill(Float64[],n_walks)
walk_complexities = Float64[]
neighbor_complexities = Float64[]
for w = 1:n_walks
(w_cmplx[w],n_cmplx[w]) = neutral_walk_complexity( g, p, steps, maxsteps, maxtries )
#println("w: ",w," length(w_cmplx): ",length(w_cmplx[w])," length(n_cmplx): ",length(n_cmplx[w]))
end
walk_complexities = vcat( w_cmplx... )
neighbor_complexities = vcat( n_cmplx... )
println("length(walk_complexities): ",length(walk_complexities)," length(neighbor_complexities): ",length(neighbor_complexities))
# Statistics
w_mean = mean(walk_complexities)
w_max = maximum(walk_complexities)
w_min = minimum(walk_complexities)
w_std = std(walk_complexities)
w_q = quantile(walk_complexities,[0.9,0.95,0.99])
n_mean = mean(neighbor_complexities)
n_max = maximum(neighbor_complexities)
n_min = minimum(neighbor_complexities)
n_std = std(neighbor_complexities)
n_q = quantile(neighbor_complexities,[0.9,0.95,0.99])
@printf("goal: [0x%04x]",g[1])
@printf(" w_count:%8d",length(walk_complexities))
@printf(" w_mean:%6.3f",w_mean)
@printf(" w_max:%6.3f",w_max)
@printf(" w_min:%6.3f",w_min)
@printf(" w_std:%6.3f",w_std)
@printf(" w_q90:%6.3f",w_q[1])
@printf(" w_q95:%6.3f",w_q[2])
@printf(" w_q99:%6.3f\n",w_q[3])
@printf("goal: [0x%04x]",g[1])
@printf(" n_count:%8d",length(neighbor_complexities))
@printf(" n_mean:%6.3f",n_mean)
@printf(" n_max:%6.3f",n_max)
@printf(" n_min:%6.3f",n_min)
@printf(" n_std:%6.3f",n_std)
@printf(" n_q90:%6.3f",n_q[1])
@printf(" n_q95:%6.3f",n_q[2])
@printf(" n_q99:%6.3f\n",n_q[3])
if length(csvfile) > 0
@printf(f,"goal: [0x%04x]",g[1])
@printf(f," w_count:%8d",length(walk_complexities))
@printf(f," w_mean:%6.3f",w_mean)
@printf(f," w_max:%6.3f",w_max)
@printf(f," w_min:%6.3f",w_min)
@printf(f," w_std:%6.3f",w_std)
@printf(f," w_q90:%6.3f",w_q[1])
@printf(f," w_q95:%6.3f",w_q[2])
@printf(f," w_q99:%6.3f\n",w_q[3])
@printf(f,"goal: [0x%04x]",g[1])
@printf(f," n_count:%8d",length(neighbor_complexities))
@printf(f," n_mean:%6.3f",n_mean)
@printf(f," n_max:%6.3f",n_max)
@printf(f," n_min:%6.3f",n_min)
@printf(f," n_std:%6.3f",n_std)
@printf(f," n_q90:%6.3f",n_q[1])
@printf(f," n_q95:%6.3f",n_q[2])
@printf(f," n_q99:%6.3f\n",n_q[3])
end
end
close(f)
#(walk_complexities,neighbor_complexities)
end
function neutral_walk_connectivity( p::Parameters, g::Goal, max_steps::Int64, max_evolve_steps::Int64; c2_walk_length::Int64=200 )
funcs = default_funcs( p.numinputs )
res1 = mut_evolve( random_chromosome( p, funcs ), [g], funcs, max_evolve_steps )
if res1[2] < max_evolve_steps
c1 = res1[1]
else
error("failed to find circuit that maps to goal: ",g)
end
#print_build_chromosome( c1 )
res2 = mut_evolve( random_chromosome( p, funcs ), [g], funcs, max_evolve_steps )
if res2[2] < max_evolve_steps
c2 = res2[1]
else
error("failed to find circuit that maps to goal: ",g)
end
#print_build_chromosome( c2 )
neutral_walk_connectivity( c1, c2, c2_walk_length, max_steps )
end
function neutral_walk_connectivity( c1::Chromosome, c2::Chromosome, c2_walk_length::Int64, max_steps::Int64 )
max_attempts = 10
@assert c1.params.numoutputs == 1
funcs = default_funcs( p.numinputs )
c2_walk_list = random_neutral_walk( c2, c2_walk_length )
g = output_values(c1)
out2 = output_values(c2)
@assert g==out2
c_code = circuit_code(c1)
c_dist = circuit_distance_to_list( c1, c2_walk_list )
println("original c_dist: ",c_dist)
if c_dist == 0
return 0
end
c = deepcopy(c1)
@assert output_values(c) == g # Assumes p.numoutputs==1
for step = 1:max_steps
c_code = circuit_code(c)
(new_c,active) = mutate_chromosome!( deepcopy(c), funcs )
outputs = output_values(new_c)
attempts = 1
# try to find a c to extend walk
while attempts < max_attempts && (outputs != g || circuit_distance_to_list(new_c,c2_walk_list) > c_dist || circuit_code(new_c)==c_code )
(new_c,active) = mutate_chromosome!( deepcopy(c), funcs )
outputs = output_values(new_c)
#println("step: ",step," attempts: ",attempts," outputs: ",outputs," new_c_dist: ",circuit_distance(new_c,c2))
attempts += 1
end
if attempts == max_attempts
# Alternatively, try all mutations of c in our attempt to extend walk
c_code = circuit_code(c)
all_chromes = map( x->x[2], mutate_all( deepcopy(c), funcs, output_chromosomes=true ))
# filter to only chromosomes that map to g and do not increase circuit distance and change the circuit code
filter!( (x->output_values(x)==g && circuit_distance_to_list(x,c2_walk_list) <= c_dist && circuit_code(x)!=c_code), all_chromes )
println(" attempts == maxtries len(all_chromes): ",length(all_chromes))
if length(all_chromes) == 0
println("failed to extend random_neutral_walk in function neutral_walk() at step: ",step)
return -1
else
@assert output_values(all_chromes[1]) == g
c = rand(all_chromes)
c_dist = circuit_distance_to_list(c,c2_walk_list)
println("mx step: ",step," neutral walk extended new_cdist: ",c_dist," new_circuit_code: ",circuit_code(c))
@assert circuit_code(c) != c_code
end
else
c = new_c # neutral walk extended
c_dist = circuit_distance_to_list(c,c2_walk_list)
println("at step: ",step," neutral walk extended new_cdist: ",c_dist," new_circuit_code: ",circuit_code(c))
@assert circuit_code(c) != c_code
end
@assert output_values(c) == g
new_c_dist = circuit_distance_to_list( c, c2_walk_list )
@assert new_c_dist <= c_dist
c_dist = new_c_dist < c_dist ? new_c_dist : c_dist
if c_dist == 0.0
println("found path c1 to c2 after ",step," steps")
return step
end
end
println("failed to find path from c1 to c2")
return -1
end
function random_neutral_walk( c::Chromosome, length::Int64, max_attempts::Int64=100 )
funcs = default_funcs( c.params.numinputs )
chrome_list = Chromosome[c]
g = output_values(c)
c_code = circuit_code(c)
new_c = Chromosome(p,[InputNode(1)],[],[OutputNode(1)],0.0,0.0) # dummy chromosome to establish scope
outputs = output_values(new_c)
len = 0
while len < length
attempts = 0
while attempts < max_attempts && (outputs != g || circuit_code(new_c) == c_code )
(new_c,active) = mutate_chromosome!( deepcopy(c), funcs )
outputs = output_values(new_c)
#println("step: ",step," attempts: ",attempts," outputs: ",outputs )
attempts += 1
end
if attempts < max_attempts
c = new_c
c_code = circuit_code(c)
len += 1
push!( chrome_list, new_c )
else
break
end
end
return map( c->circuit_code(c), chrome_list )
end
function circuit_distance_to_list( c::Chromosome, c_code_list::Vector{Vector{Int64}} )
circuit_distance_to_list( circuit_code(c), c_code_list )
end
function circuit_distance_to_list( c_code::Vector{Int64}, c_code_list::Vector{Vector{Int64}} )
distance = length(c_code)
for cc in c_code_list
@assert length(c_code) == length(cc)
diff_count = 0
for i = 1:length(c_code)
diff_count += c_code[i] == cc[i] ? 0 : 1
end
distance = diff_count < distance ? diff_count : distance
end
distance/length(c_code)
end
#=
function neutral_walk_connectivity( c1::Chromosome, c2::Chromosome, max_steps::Int64 )
@assert c1.params == c2.params
funcs = default_funcs( c1.params.numinputs )
out1 = output_values(c1)
out2 = output_values(c2)
@assert out1==out2
code1 = circuit_code(c1)
code2 = circuit_code(c2)
c_dist = circuit_distance( c1, c2 )
println("original c_dist: ",c_dist)
if c_dist == 0
return 0
end
new_c1 = mutate_chromosome!( deepcopy(c1), funcs )[1]
new_c2 = mutate_chromosome!( deepcopy(c2), funcs )[1]
dist1 = circuit_distance( new_c1, c2 )
dist2 = circuit_distance( new_c2, c1 )
println("orig dist1: ",dist1," orig dist2: ",dist2)
step = 1
while step < max_steps && c1 != c2
while step < max_steps && dist1 > c_dist && dist2 > c_dist
new_c1 = mutate_chromosome!( c1, funcs )[1]
new_c2 = mutate_chromosome!( c2, funcs )[1]
dist1 = circuit_distance( new_c1, c2 )
dist2 = circuit_distance( new_c2, c1 )
step += 1
end
println("after inner while loop. step: ",step)
if dist1 < c_dist
c1 = new_c1
c_dist = circuit_distance( c1, c2 )
println("c1 = new_c1 c_dist: ",c_dist)
elseif dist2 < c_dist
c2 = new_c2
c_dist = circuit_distance( c1, c2 )
println("c2 = new_c2 c_dist: ",c_dist)
end
if c_dist == 0
println("found path from c1 to c2 in ",step," steps")
return step
end
if dist1 == c_dist
c1 = new_c1
println("c1 = new_c1")
elseif dist2 == c_dist && circuit_distance( new_c2, c1 ) <= c_dist
c2 = new_c2
println("c2 = new_c2")
end
new_c1 = mutate_chromosome!( c1, funcs )[1]
new_c2 = mutate_chromosome!( c2, funcs )[1]
dist1 = circuit_distance( new_c1, c2 )
dist2 = circuit_distance( new_c2, c1 )
println("new dist1: ",dist1," new dist2: ",dist2)
step += 1
end
if c1 == c2
println("found path from c1 to c2 in ",step," steps")
else
println("failed to find path from c1 to c2 in ",step," steps")
end
return step
end
=#
|
{"hexsha": "67c46a27c797e050d595abca0deafeeb3f83c3f1", "size": 20961, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/neutral_walk_connectivity.jl", "max_stars_repo_name": "ahalwright/CGP.jl", "max_stars_repo_head_hexsha": "73952fcb08d2e6ee39e4df142e14ea34e4c09226", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/neutral_walk_connectivity.jl", "max_issues_repo_name": "ahalwright/CGP.jl", "max_issues_repo_head_hexsha": "73952fcb08d2e6ee39e4df142e14ea34e4c09226", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/neutral_walk_connectivity.jl", "max_forks_repo_name": "ahalwright/CGP.jl", "max_forks_repo_head_hexsha": "73952fcb08d2e6ee39e4df142e14ea34e4c09226", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.7009708738, "max_line_length": 171, "alphanum_fraction": 0.6621821478, "num_tokens": 6290}
|
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 6 11:36:46 2019
@author: CatOnTour
"""
import mpmath as mp
import numpy as np
import matplotlib.pyplot as plt
from sympy.abc import s
from sympy.integrals.transforms import inverse_laplace_transform
from sympy import symbols, lambdify
from sympy import *
from scipy import signal
from cardioLPN import *
def sympy_to_lti(xpr, s=Symbol('s')):
""" Convert Sympy transfer function polynomial to Scipy LTI """
num, den = simplify(xpr).as_numer_denom() # expressions
p_num_den = poly(num, s), poly(den, s) # polynomials
c_num_den = [expand(p).all_coeffs() for p in p_num_den] # coefficients
l_num, l_den = [lambdify((), c)() for c in c_num_den] # convert to floats
return signal.lti(l_num, l_den)
#R, L, C = symbols('R L C', positive=True)
U_2, I_2 = symbols('U_2 I_2', positive=True)
U, I = symbols('U I', positive=True)
#t = symbols('t', positive=True, real = True)
#s = symbols('s', positive=True)
L = 10
C = 2
R = 1
A_CLR = A_C(C) * A_L(L) * A_R(R)
A_CRL = A_C(C) * A_R(R) * A_L(L)
A_LCR = A_L(L) * A_C(C) * A_R(R)
A_LRC = A_L(L) * A_R(R) * A_C(C)
A_RLC = A_R(R) * A_L(L) * A_C(C)
A_RCL = A_R(R) * A_C(C) * A_L(L)
Z_CLR = simplify(trans_A2Z(A_CLR))
Y_CLR = simplify(trans_A2Y(A_CLR))
H_CLR = simplify(trans_A2H(A_CLR))
P_CLR = simplify(trans_A2P(A_CLR))
B_CLR = simplify(trans_A2B(A_CLR))
# defining a function for U_2 and I_2
x_21 = 1/(s*(1+s*5))
#U_2 = 1/(s**2 + 0.5*s + 2)
x_22 = 1/(s*(1+s*15))
# defining
x_2 = Matrix([x_21, x_22])
''' Outputs '''
x_A = A_CLR * x_2
x_Z = Z_CLR * x_2
x_Y = Y_CLR * x_2
x_H = H_CLR * x_2
x_P = P_CLR * x_2
x_B = B_CLR * x_2
#
x_A_func = lambdify(s, x_A[0])
x_Z_func = lambdify(s, x_Z[0])
x_Y_func = lambdify(s, x_Y[0])
x_H_func = lambdify(s, x_H[0])
x_P_func = lambdify(s, x_P[0])
x_B_func = lambdify(s, x_B[0])
#t = np.linspace(0.01,20,20)
t = np.linspace(0.01,20,50)
X_A = []
X_Z = []
X_Y = []
X_H = []
X_P = []
X_B = []
for i in t:
X_A.append(mp.invertlaplace(x_A_func, i, method = 'dehoog', dps = 10, degree = 18))
X_Z.append(mp.invertlaplace(x_Z_func, i, method = 'dehoog', dps = 10, degree = 18))
X_Y.append(mp.invertlaplace(x_Y_func, i, method = 'dehoog', dps = 10, degree = 18))
X_H.append(mp.invertlaplace(x_H_func, i, method = 'dehoog', dps = 10, degree = 18))
X_P.append(mp.invertlaplace(x_P_func, i, method = 'dehoog', dps = 10, degree = 18))
X_B.append(mp.invertlaplace(x_B_func, i, method = 'dehoog', dps = 10, degree = 18))
x_21_func = lambdify(s, x_21)
x_21_time = []
for i in t:
x_21_time.append(mp.invertlaplace(x_21_func, i, method = 'dehoog', dps = 10, degree = 18))
plt.plot(t, x_21_time, "k", label = "U2")
plt.legend()
plt.savefig("constellation_x_21.png")
plt.show()
plt.plot(t, X_A, "k*", label = "A")
plt.plot(t, X_Z, "c", label = "Z")
plt.plot(t, X_Y, "g", label = "Y")
plt.plot(t, X_H, "y", label = "H")
plt.plot(t, X_P, "b", label = "P")
plt.plot(t, X_B, "r*", label = "B")
plt.legend()
plt.savefig("constellation_X.png")
plt.show()
|
{"hexsha": "3bf561d3c68d810ed10a72a02dfdf7cef6121e7d", "size": 3036, "ext": "py", "lang": "Python", "max_stars_repo_path": "solving_ODE_configurations.py", "max_stars_repo_name": "xi2pi/cardioLPN", "max_stars_repo_head_hexsha": "34759fea55f73312ccb8fb645ce2d04a0e2dddea", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "solving_ODE_configurations.py", "max_issues_repo_name": "xi2pi/cardioLPN", "max_issues_repo_head_hexsha": "34759fea55f73312ccb8fb645ce2d04a0e2dddea", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "solving_ODE_configurations.py", "max_forks_repo_name": "xi2pi/cardioLPN", "max_forks_repo_head_hexsha": "34759fea55f73312ccb8fb645ce2d04a0e2dddea", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.6829268293, "max_line_length": 94, "alphanum_fraction": 0.639657444, "include": true, "reason": "import numpy,from scipy,from sympy,import mpmath", "num_tokens": 1157}
|
/-
Copyright (c) 2021 Yaël Dillies, Bhavik Mehta. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Yaël Dillies, Bhavik Mehta
-/
import combinatorics.simplicial_complex.extreme
open_locale classical affine big_operators
open set
--TODO: Generalise to LCTVS
variables {E : Type*} [normed_group E] [normed_space ℝ E] {x y : E} {A B : set E}
namespace affine
def intrinsic_interior (A : set E) :
set E :=
{x ∈ A | ∃ (ι : Type*) (s : finset ι) (w : ι → ℝ) (z : ι → E) (hs : A ⊆ affine_span ℝ (z '' s))
(hw₀ : ∀ i ∈ s, 0 < w i) (hw₁ : ∑ i in s, w i = 1) (hz : ∀ i ∈ s, z i ∈ A),
s.center_mass w z = x}
def intrinsic_frontier (A : set E) :
set E :=
{x ∈ A | ∀ (ι : Type*) (s : finset ι) (w : ι → ℝ) (z : ι → E) (hs : A ⊆ affine_span ℝ (z '' s))
(hw₀ : ∀ i ∈ s, 0 ≤ w i) (hw₁ : ∑ i in s, w i = 1) (hz : ∀ i ∈ s, z i ∈ A)
(hx : s.center_mass w z = x), ∃ i : ι, w i = 0}
lemma intrinsic_interior_subset (A : set E) :
intrinsic_interior A ⊆ A :=
λ x hx, hx.1
lemma intrinsic_frontier_subset (A : set E) :
intrinsic_frontier A ⊆ A :=
λ x hx, hx.1
lemma convex.open_segment_subset_intrinsic_interior_of_mem_left (hA : convex A)
(x ∈ intrinsic_interior A) (y ∈ A) :
open_segment x y ⊆ intrinsic_interior A :=
begin
rintro z hz,
split,
{
sorry -- hA
},
dsimp,
--obtain ⟨x₁, x₂, hx₁, hx₂, x, ⟨hxA, ι, t, hw₀, hw₁, hyA, hy⟩, hx⟩ := sorry,
sorry
end
lemma eq_intrinsic_interior_union_intrinsic_frontier :
A = intrinsic_interior A ∪ intrinsic_frontier A := sorry
lemma intrinsic_frontier.is_extreme :
is_extreme A (intrinsic_frontier A) :=
begin
use intrinsic_frontier_subset _,
rintro x₁ x₂ hx₁ hx₂ x hxA hx,
sorry
end
/-def intrinsic_interior (A : set E) :
set E :=
{x ∈ A | ∀ y ∈ A, ∃ z ∈ A, x ∈ open_segment y z}
def intrinsic_frontier (A : set E) :
set E :=
{x ∈ A | ∃ y ∈ A, ∀ z ∈ A, x ∉ open_segment y z}
lemma intrinsic_interior_subset (A : set E) :
intrinsic_interior A ⊆ A :=
λ x hx, hx.1
lemma intrinsic_frontier_subset (A : set E) :
intrinsic_frontier A ⊆ A :=
λ x hx, hx.1
lemma intrinsic_frontier.is_extreme :
is_extreme A (intrinsic_frontier A) :=
begin
use intrinsic_frontier_subset _,
rintro x₁ x₂ hx₁ hx₂ x ⟨hxA, y, hyA, hy⟩ hx,
split,
{
use [hx₁, y, hyA],
rintro z hz,
}
end-/
/-
def intrinsic_frontier (A : set E) :
set E :=
coe '' (frontier {x : affine_span ℝ A | ↑x ∈ A})
def intrinsic_interior (A : set E) :
set E :=
coe '' (interior {x : affine_span ℝ A | ↑x ∈ A})
def intrinsic_closure (A : set E) :
set E :=
coe '' (closure {x : affine_span ℝ A | ↑x ∈ A})
lemma intrinsic_frontier_empty :
intrinsic_frontier (∅ : set E) = ∅ :=
begin
apply subset_empty_iff.1,
rintro x ⟨x', hx', hxx'⟩,
simp at hx',
exact hx',
end
lemma intrinsic_interior_empty :
intrinsic_frontier (∅ : set E) = ∅ :=
begin
apply subset_empty_iff.1,
rintro x ⟨x', hx', hxx'⟩,
simp at hx',
exact hx',
end
lemma nonempty_intrinsic_interior (hA : A.nonempty) :
(intrinsic_interior A).nonempty :=
begin
end
lemma coe_closure_subset_closure_aux (B : set E) :
coe '' closure {x : affine_span ℝ A | ↑x ∈ B} ⊆ closure B :=
begin
rintro _ ⟨x, hx, rfl⟩,
rw mem_closure_iff_seq_limit at ⊢ hx,
obtain ⟨f, hfB, hflim⟩ := hx,
exact ⟨λ y, f y, hfB, by rwa ←embedding.tendsto_nhds_iff embedding_subtype_coe⟩,
end
lemma closure_eq_intrinsic_closure :
closure A = coe '' (closure {x : affine_span ℝ A | ↑x ∈ A}) :=
begin
refine subset.antisymm _ (coe_closure_subset_closure_aux A),
rintro x hxA,
rw mem_closure_iff_seq_limit at hxA,
obtain ⟨f, hfA, hflim⟩ := hxA,
simp_rw [mem_image, closure_induced],
split,
sorry,
sorry,
end
lemma closure_eq_intrinsic_interior_union_intrinsic_frontier :
closure A = intrinsic_interior A ∪ intrinsic_frontier A :=
begin
ext x,
rw [closure_eq_intrinsic_closure, closure_eq_interior_union_frontier],
split,
{ rintro ⟨x', (hx' | hx'), rfl⟩,
{ left,
exact ⟨x', hx', rfl⟩ },
right,
exact ⟨x', hx', rfl⟩ },
rintro (⟨x', hx', rfl⟩ | ⟨x', hx', rfl⟩),
exacts [⟨x', by {left, exact hx'}, rfl⟩, ⟨x', by {right, exact hx'}, rfl⟩],
end
lemma intrinsic_interior_subset_closure :
intrinsic_interior A ⊆ closure A :=
begin
rw closure_eq_intrinsic_interior_union_intrinsic_frontier,
exact subset_union_left _ _,
end
lemma intrinsic_frontier_subset_closure :
intrinsic_frontier A ⊆ closure A :=
begin
rw closure_eq_intrinsic_interior_union_intrinsic_frontier,
exact subset_union_right _ _,
end
lemma disjoint_intrinsic_interior_intrinsic_frontier :
disjoint (intrinsic_interior A) (intrinsic_frontier A) :=
begin
rintro x ⟨⟨x₁, hx₁, rfl⟩, x₂, hx₂, hx₁x₂⟩,
rw subtype.ext hx₁x₂ at hx₂,
exact hx₂.2 hx₁,
end
lemma intrinsic_frontier_eq_closure_diff_intrinsic_interior :
intrinsic_frontier A = closure A \ intrinsic_interior A :=
by rw [closure_eq_intrinsic_interior_union_intrinsic_frontier,
set.union_diff_cancel_left disjoint_intrinsic_interior_intrinsic_frontier]
lemma intrinsic_interior_eq_closure_diff_intrinsic_frontier :
intrinsic_interior A = closure A \ intrinsic_frontier A :=
by rw [intrinsic_frontier_eq_closure_diff_intrinsic_interior, diff_diff_right, diff_self,
empty_union, inter_eq_self_of_subset_right intrinsic_interior_subset_closure]
lemma intrinsic_frontier_subset_frontier :
intrinsic_frontier A ⊆ frontier A :=
begin
rintro x hx,
unfold intrinsic_frontier at hx,
rw frontier_eq_closure_inter_closure at ⊢ hx,
obtain ⟨x', hx', rfl⟩ := hx,
exact ⟨coe_closure_subset_closure_aux _ ⟨x', hx'.1, rfl⟩, coe_closure_subset_closure_aux Aᶜ
⟨x', hx'.2, rfl⟩⟩,
end
lemma interior_subset_intrinsic_interior :
interior A ⊆ intrinsic_interior A :=
begin
rw [interior_eq_closure_diff_frontier, intrinsic_interior_eq_closure_diff_intrinsic_frontier],
exact diff_subset_diff_right intrinsic_frontier_subset_frontier,
end
--rewrite the condition to something about dimension?
lemma intrinsic_frontier_eq_frontier (hA : affine_span ℝ A = ⊤) :
intrinsic_frontier A = frontier A :=
begin
apply subset.antisymm intrinsic_frontier_subset_frontier,
rintro x hx,
have hxA : x ∈ affine_span ℝ A,
{
rw hA,
sorry,
},
refine ⟨⟨x, hxA⟩, _, rfl⟩,
sorry
end
lemma intrinsic_frontier_convex_hull_eq (hA : affine_independent ℝ (λ p, p : A → E)) :
intrinsic_frontier (convex_hull A) = ⋃ B ⊂ A, convex_hull B :=
begin
sorry --damn hard
end-/
end affine
|
{"author": "mmasdeu", "repo": "brouwerfixedpoint", "sha": "548270f79ecf12d7e20a256806ccb9fcf57b87e2", "save_path": "github-repos/lean/mmasdeu-brouwerfixedpoint", "path": "github-repos/lean/mmasdeu-brouwerfixedpoint/brouwerfixedpoint-548270f79ecf12d7e20a256806ccb9fcf57b87e2/src/combinatorics/simplicial_complex/intrinsic.lean"}
|
"""Module representing the Stroop Test protocol."""
# from typing import Dict, Tuple, Union, Optional, Sequence
from typing import Optional, Sequence
# import pandas as pd
# import numpy as np
# import matplotlib.pyplot as plt
# import matplotlib.ticker as mticks
# import seaborn as sns
#
# import biopsykit.colors as colors
# import biopsykit.protocols.plotting as plot
from biopsykit.protocols import BaseProtocol
class Stroop(BaseProtocol):
"""Class representing the Stroop Test and data collected while conducting the Stroop test.
# TODO add further documentation
"""
def __init__(self, name: Optional[str] = None, structure: Optional[Sequence[str]] = None, **kwargs):
if name is None:
name = "Stroop"
if structure is None:
structure = {
"Part1": None,
"Stroop": {
"Stroop1": 180,
"Stroop2": 180,
"Stroop3": 180,
},
"Part2": None,
}
test_times = kwargs.pop("test_times", [0, 10])
hr_mean_plot_params = {"xlabel": "Stroop Phases"}
hr_mean_plot_params.update(kwargs.pop("hr_mean_plot_params", {}))
saliva_plot_params = {"test_title": "Stroop", "xlabel": "Time relative to Stroop start [min]"}
saliva_plot_params.update(kwargs.pop("saliva_plot_params", {}))
kwargs.update({"hr_mean_plot_params": hr_mean_plot_params, "saliva_plot_params": saliva_plot_params})
super().__init__(name=name, structure=structure, test_times=test_times, **kwargs)
#
# self.hr_ensemble_plot_params = {
# "colormap": colors.fau_palette_blue("ensemble_3"),
# "line_styles": ["-", "--", "-."],
# "ensemble_alpha": 0.4,
# "background_color": ["#e0e0e0", "#9e9e9e", "#757575"],
# "background_alpha": [0.5, 0.5, 0.5],
# "fontsize": 14,
# "xaxis_label": r"Time [s] ",
# "xaxis_minor_ticks": mticks.MultipleLocator(60),
# "yaxis_label": r"$\Delta$Mean HR [bpm]",
# "legend_loc": "upper right",
# "legend_bbox_to_anchor": (0.25, 0.90),
# "phase_text": "Stroop Phase {}",
# "end_phase_text": "End Phase {}",
# "end_phase_line_color": "#e0e0e0",
# "end_phase_line_style": "dashed",
# "end_phase_line_width": 2.0,
# }
# self.stroop_plot_params = {
# "colormap": colors.fau_palette_blue("ensemble_3"),
# "line_styles": ["-", "--", "-."],
# "background_color": ["#e0e0e0", "#9e9e9e", "#757575"],
# "background_alpha": [0.5, 0.5, 0.5],
# "fontsize": 14,
# "xaxis_label": r"Stroop phases",
# "xaxis_minor_ticks": mticks.MultipleLocator(60),
# "yaxis_label": r"$\Delta$Mean HR [bpm]",
# "legend_loc": "upper right",
# "legend_bbox_to_anchor": (1.00, 0.90),
# "phase_text": "Stroop Phase {}",
# }
#
# self.hr_mean_plot_params = {
# "colormap": colors.fau_palette_blue("line_2"),
# "line_styles": ["-", "--"],
# "markers": ["o", "P"],
# "background_color": ["#e0e0e0", "#bdbdbd", "#9e9e9e"],
# "background_alpha": [0.5, 0.5, 0.5],
# "x_offsets": [0, 0.05],
# "fontsize": 14,
# "xaxis_label": "Stroop Subphases",
# "yaxis_label": r"$\Delta$HR [%]",
# "mist_phase_text": "MIST Phase {}",
# }
# def hr_ensemble_plot(
# self,
# data: Dict[str, pd.DataFrame],
# plot_params: Optional[Dict] = None,
# ylims: Optional[Sequence[float]] = None,
# ax: Optional[plt.Axes] = None,
# is_group_dict: Optional[bool] = False,
# **kwargs,
# ) -> Union[Tuple[plt.Figure, plt.Axes], None]:
# """
# Plots the course of heart rate during each Stroop subphase continuously as ensemble plot
# (mean ± standard error).
# Simply pass a 'Stroop dict' dictionary with one pandas heart rate dataframe per Stroop subphase
# (see ``Stroop.concat_stroop_dicts`` for further explanation), i.e. heart rate data with one column
# per subject.
#
# Parameters
# ----------
# data : dict
# dict with heart rate data to plot
# plot_params : dict, optional
# dict with adjustable parameters specific for this plot or ``None`` to keep default parameter values.
# For an overview of parameters and their default values, see `mist.hr_ensemble_params`
# ylims : list, optional
# y axis limits or ``None`` to infer y axis limits from data. Default: ``None``
# ax : plt.Axes, optional
# Axes to plot on, otherwise create a new one. Default: ``None``
#
# Returns
# -------
# tuple or none
# Tuple of Figure and Axes or None if Axes object was passed
# """
#
# import matplotlib.patches as mpatch
#
# fig: Union[plt.Figure, None] = None
# if ax is None:
# if "figsize" in kwargs:
# figsize = kwargs["figsize"]
# else:
# figsize = plt.rcParams["figure.figsize"]
# fig, ax = plt.subplots(figsize=figsize)
#
# if plot_params:
# self.hr_ensemble_plot_params.update(plot_params)
#
# # sns.despine()
# sns.set_palette(self.hr_ensemble_plot_params["colormap"])
# line_styles = self.hr_ensemble_plot_params["line_styles"]
# fontsize = self.hr_ensemble_plot_params["fontsize"]
# xaxis_label = self.hr_ensemble_plot_params["xaxis_label"]
# yaxis_label = self.hr_ensemble_plot_params["yaxis_label"]
# xaxis_minor_ticks = self.hr_ensemble_plot_params["xaxis_minor_ticks"]
# ensemble_alpha = self.hr_ensemble_plot_params["ensemble_alpha"]
# bg_color = self.hr_ensemble_plot_params["background_color"]
# bg_alpha = self.hr_ensemble_plot_params["background_alpha"]
# phase_text = self.hr_ensemble_plot_params["phase_text"]
# end_phase_text = self.hr_ensemble_plot_params["end_phase_text"]
# end_phase_color = self.hr_ensemble_plot_params["end_phase_line_color"]
# end_phase_line_style = self.hr_ensemble_plot_params["end_phase_line_style"]
# end_phase_line_width = self.hr_ensemble_plot_params["end_phase_line_width"]
# legend_loc = self.hr_ensemble_plot_params["legend_loc"]
# legend_bbox_to_anchor = self.hr_ensemble_plot_params["legend_bbox_to_anchor"]
#
# subphases = np.array(self.subphases)
# # mist_dur = [len(v) for v in data.values()]
# start_end = [
# (0, self.subphase_durations[0]),
# (
# self.subphase_durations[0],
# self.subphase_durations[0] + self.subphase_durations[1],
# ),
# ]
#
# if is_group_dict:
# for j, condition in enumerate(data):
# mist_dur = [len(v) for v in data[condition].values()]
# for i, key in enumerate(data[condition]):
# pal = sns.color_palette()[j]
#
# hr_mist = data[condition][key]
# x = hr_mist.index
# hr_mean = hr_mist.mean(axis=1)
# hr_stderr = hr_mist.std(axis=1) / np.sqrt(hr_mist.shape[1])
# ax.plot(
# x,
# hr_mean,
# zorder=2,
# label=phase_text.format(i + 1) + " - " + condition,
# linestyle=line_styles[i],
# color=pal,
# )
# ax.fill_between(
# x,
# hr_mean - hr_stderr,
# hr_mean + hr_stderr,
# zorder=1,
# alpha=ensemble_alpha,
# )
# ax.vlines(
# x=mist_dur[i] - 0.5,
# ymin=0,
# ymax=1,
# transform=ax.get_xaxis_transform(),
# ls=end_phase_line_style,
# lw=end_phase_line_width,
# colors=end_phase_color,
# zorder=3,
# )
# ax.annotate(
# text=end_phase_text.format(i + 1),
# xy=(mist_dur[i], 0.85 - 0.05 * i),
# xytext=(-5, 0),
# xycoords=ax.get_xaxis_transform(),
# textcoords="offset points",
# ha="right",
# fontsize=fontsize - 4,
# bbox=dict(facecolor="#e0e0e0", alpha=0.7, boxstyle="round"),
# zorder=3,
# )
# ax.legend(loc=legend_loc, bbox_to_anchor=(0.20, 0.3), prop={"size": fontsize})
# else:
# mist_dur = [len(v) for v in data.values()]
# for i, key in enumerate(data):
# hr_mist = data[key]
# x = hr_mist.index
# hr_mean = hr_mist.mean(axis=1)
# hr_stderr = hr_mist.std(axis=1) / np.sqrt(hr_mist.shape[1])
# ax.plot(
# x,
# hr_mean,
# zorder=2,
# label=phase_text.format(i + 1),
# linestyle=line_styles[i],
# )
# ax.fill_between(
# x,
# hr_mean - hr_stderr,
# hr_mean + hr_stderr,
# zorder=1,
# alpha=ensemble_alpha,
# )
# ax.vlines(
# x=mist_dur[i] - 0.5,
# ymin=0,
# ymax=1,
# transform=ax.get_xaxis_transform(),
# ls=end_phase_line_style,
# lw=end_phase_line_width,
# colors=end_phase_color,
# zorder=3,
# )
# ax.annotate(
# text=end_phase_text.format(i + 1),
# xy=(mist_dur[i], 0.85 - 0.05 * i),
# xytext=(-5, 0),
# xycoords=ax.get_xaxis_transform(),
# textcoords="offset points",
# ha="right",
# fontsize=fontsize - 4,
# bbox=dict(facecolor="#e0e0e0", alpha=0.7, boxstyle="round"),
# zorder=3,
# )
# ax.legend(
# loc=legend_loc,
# bbox_to_anchor=legend_bbox_to_anchor,
# prop={"size": fontsize},
# )
#
# for (start, end), subphase in zip(start_end, subphases):
# ax.text(
# x=start + 0.5 * (end - start),
# y=0.95,
# transform=ax.get_xaxis_transform(),
# s=subphase,
# ha="center",
# va="center",
# fontsize=fontsize,
# )
# p = mpatch.Rectangle(
# xy=(0, 0.9),
# width=1,
# height=0.1,
# transform=ax.transAxes,
# color="white",
# alpha=0.4,
# zorder=3,
# lw=0,
# )
# ax.add_patch(p)
#
# for (start, end), color, alpha in zip(start_end, bg_color, bg_alpha):
# ax.axvspan(start, end, color=color, alpha=alpha, zorder=0, lw=0)
#
# ax.set_xlabel(xaxis_label, fontsize=fontsize)
# ax.set_xticks([start for (start, end) in start_end])
# ax.xaxis.set_minor_locator(xaxis_minor_ticks)
# ax.tick_params(axis="x", which="both", bottom=True)
#
# ax.set_ylabel(yaxis_label, fontsize=fontsize)
# ax.tick_params(axis="y", which="major", left=True)
#
# if ylims:
# ax.margins(x=0)
# ax.set_ylim(ylims)
# else:
# ax.margins(0, 0.1)
#
# if fig:
# fig.tight_layout()
# return fig, ax
#
# def hr_mean_subphases(
# self,
# data: Union[
# Dict[str, Dict[str, pd.DataFrame]],
# Dict[str, Dict[str, Dict[str, pd.DataFrame]]],
# ],
# is_group_dict: Optional[bool] = False,
# ) -> Union[pd.DataFrame, Dict[str, pd.DataFrame]]:
# """
# Computes the heart rate mean and standard error per Stroop phase over all subjects.
# See ``bp.protocols.utils.hr_course`` for further information.
#
# Parameters
# ----------
# data : dict
# nested dictionary containing heart rate data.
# is_group_dict : boolean, optional
# ``True`` if `data` is a group dict, i.e. contains dictionaries for multiple groups, ``False`` otherwise.
# Default: ``False``
#
# Returns
# -------
# dict or pd.DataFrame
# 'mse dataframe' or dict of 'mse dataframes', one dataframe per group, if `group_dict` is ``True``.
# """
#
# return super().mean_se_subphases(data, subphases=self.subphases, is_group_dict=is_group_dict)
#
# def stroop_dict_to_dataframe(
# self,
# dict_stroop=Dict[str, Dict],
# columns: Optional[Sequence[str]] = None,
# is_group_dict: Optional[bool] = False,
# ) -> pd.DataFrame:
# """
# Converts the dictionary into one dataframe with a MultiIndex (subject, phase). The structure needs to
# be the same as derived from load_stroop_inquisit_data.
#
# The dictionary can also be a group dictionary. In this case, the MultiIndex is expanded with 'group'.
#
# Parameters
# ----------
# dict_stroop : dict
# dictionary which should be converted into a dataframe. The structure should be as followed:
# {'subject': {'subphase' : data,...},..} or as group_dict
# {'group':{'subject': {'subphase' : data,...},..},..}
# columns : str
# column names which should be used.
# Default: ``None`` -> all existing columns are used.
# is_group_dict : bool
# ``True`` if `data` is a group dict, i.e. contains dictionaries for multiple groups, ``False`` otherwise.
# Default: ``False``
# Returns
# -------
# dataframe :
# dataframe with the stroop test data ordered by (group), subject and subphase.
# """
# df_stroop = pd.DataFrame()
#
# if is_group_dict:
# for group, dict_data in dict_stroop.items():
# for subject, data in dict_data.items():
# for subphase, df in data.items():
# df_stroop = pd.concat([df_stroop, df.set_index([[group], [subject], [subphase]])])
# df_stroop.index.names = ["group", "subject", "subphase"]
# else:
# for subject, data in dict_stroop.items():
# for subphase, df in data.items():
# df_stroop = pd.concat([df_stroop, df.set_index([[subject], [subphase]])])
# df_stroop.index.names = ["subject", "subphase"]
#
# if columns:
# df_stroop = df_stroop[columns]
#
# return df_stroop
#
# def stroop_mean_se(self, data=pd.DataFrame, is_group_dict: Optional[bool] = False) -> pd.DataFrame:
# """
# Computes the mean and standard error of the stroop test data per Stroop subphase over all subjects.
#
# Parameters
# ----------
# data : pd.Dataframe
# dataframe with data from the stroop test of which mean and standard error should be computed.
# It has to be one dataframe which is in the kind of format as returned by `stroop_dict_to_dataframe`
# is_group_dict : bool
# ``True`` if `data` is a group dict, i.e. contains dictionaries for multiple groups, ``False`` otherwise.
# Default: ``False``
#
# Returns
# -------
# dataframe:
# dataframe with mean and standard deviation values.
# """
# if is_group_dict:
# index = [("group", "subphase")]
# else:
# index = ["subphase"]
#
# mean = data.mean(level=index).add_suffix("_mean")
# std = data.std(level=index).add_suffix("_std")
# df_mean_se = mean.join(std)
#
# # scale correct answers to percent
# if ("correct_mean" and "correct_std") in df_mean_se.columns:
# df_mean_se[["correct_mean", "correct_std"]] = df_mean_se[["correct_mean", "correct_std"]] * 100
#
# return df_mean_se
#
# def stroop_plot(
# self,
# data=pd.DataFrame,
# variable: Optional[str] = "meanRT",
# is_group_dict: Optional[bool] = False,
# group_col: Optional[str] = "condition",
# ylims: Optional[Sequence[float]] = None,
# ax: Optional[plt.Axes] = None,
# **kwargs,
# ) -> Union[Tuple[plt.Figure, plt.Axes], None]:
# """
# Plots the mean response time or correct answers during the different Stroop task
# (mean ± standard error per phase).
#
# In case of only one group a pandas dataframe can be passed.
#
# In case of multiple groups either a dictionary of pandas dataframes can be passed, where each dataframe
# belongs to one group, or one dataframe with a column indicating group membership (parameter ``group_col``).
#
# Regardless of the kind of input the dataframes need to be in the format of a 'mean dataframe', as returned
# by ``stroop_mean`` (see ``Stroop.stroop_mean`` for further information).
#
#
# Parameters
# ----------
# data : dataframe or dict
# Mean response/Correct answers data to plot. It has to be one dataframe which is in the kind of format as
# returned by `stroop_mean_se`
# variable : str
# Determines if the mean response times (``meanRT``) or correct answers (``propcorrect``) of the stroop
# test should be plotted.
# Default: ``meanRT``
# is_group_dict : bool, optional:
# List of group names. If ``None`` is passed, the groups and their order are inferred from the
# dictionary keys or from the unique values in `group_col`. If list is supplied the groups are
# plotted in that order.
# Default: ``None``
# group_col : str, optional
# Name of group column in the dataframe in case of multiple groups and one dataframe
# ylims : Tuple(int,int)
# Integer to scale the y axes.
# Default: ``None``
# ax : plt.Axes, optional
# Axes to plot on, otherwise create a new one. Default: ``None``
# kwargs: dict, optional
# optional parameters to be passed to the plot, such as:
# * figsize: tuple specifying figure dimensions
# * ylims: list to manually specify y-axis limits, float to specify y-axis margin (see ``Axes.margin()``
# for further information), None to automatically infer y-axis limits
# """
#
# fig: Union[plt.Figure, None] = None
# if ax is None:
# if "figsize" in kwargs:
# figsize = kwargs["figsize"]
# else:
# figsize = plt.rcParams["figure.figsize"]
# fig, ax = plt.subplots(figsize=figsize)
#
# sns.set_palette(self.stroop_plot_params["colormap"])
# line_styles = self.stroop_plot_params["line_styles"]
# fontsize = self.stroop_plot_params["fontsize"]
# xaxis_label = self.stroop_plot_params["xaxis_label"]
# xaxis_minor_ticks = self.stroop_plot_params["xaxis_minor_ticks"]
# bg_color = self.stroop_plot_params["background_color"]
# bg_alpha = self.stroop_plot_params["background_alpha"]
# x_labels = self.phases
#
# x = np.arange(len(x_labels))
# start_end = [(i - 0.5, i + 0.5) for i in x]
# if is_group_dict:
# conditions = list(set(data.index.get_level_values(group_col)))
# line1 = ax.errorbar(
# x,
# data.xs(conditions[0], level=group_col)[variable + "_mean"],
# yerr=data.xs(conditions[0], level=group_col)[variable + "_std"],
# color=sns.color_palette()[0],
# label=conditions[0],
# lw=2,
# errorevery=1,
# ls=line_styles[0],
# marker="D",
# capsize=3,
# )
# line2 = ax.errorbar(
# x,
# data.xs(conditions[1], level=group_col)[variable + "_mean"],
# yerr=data.xs(conditions[1], level=group_col)[variable + "_std"],
# color=sns.color_palette()[1],
# label=conditions[1],
# lw=2,
# errorevery=1,
# ls=line_styles[1],
# marker="D",
# capsize=3,
# )
# plt.legend(handles=[line1, line2], loc="upper right", prop={"size": fontsize})
# else:
# ax.errorbar(
# x,
# data[variable + "_mean"],
# yerr=data[variable + "_std"],
# color=sns.color_palette()[0],
# lw=2,
# errorevery=1,
# ls=line_styles[0],
# marker="D",
# capsize=3,
# )
#
# for (start, end), color, alpha in zip(start_end, bg_color, bg_alpha):
# ax.axvspan(start, end, color=color, alpha=alpha, zorder=0, lw=0)
#
# ax.set_xticklabels(x_labels, fontsize=fontsize)
# ax.set_xlabel(xaxis_label, fontsize=fontsize)
# ax.set_xticks([start + 0.5 for (start, end) in start_end])
# ax.xaxis.set_minor_locator(xaxis_minor_ticks)
# ax.tick_params(axis="x", which="both", bottom=True)
#
# if variable == "correct":
# ax.set_ylim(0, 105)
# ax.set_ylabel(r"$\Delta$Correct answers [%]", fontsize=fontsize)
# elif variable == "latency":
# ax.set_ylabel(r"$\Delta$Response time [ms]", fontsize=fontsize)
#
# ax.tick_params(axis="y", which="major", left=True, labelsize=fontsize)
#
# if ylims:
# ax.margins(x=0)
# ax.set_ylim(ylims)
# else:
# ax.margins(0, 0.1)
#
# if fig:
# fig.tight_layout()
# return fig, ax
#
# def concat_phase_dict(
# self, dict_hr_subject: Dict[str, Dict[str, pd.DataFrame]], **kwargs
# ) -> Dict[str, pd.DataFrame]:
# """
# Rearranges the 'HR subject dict' (see `util s.load_hr_excel_all_subjects`) into 'Stroop subphase dict'.
# See ``bp.protocols.utils.concat_phase_dict`` for further information.
#
# Parameters
# ----------
# dict_hr_subject : dict
# 'HR subject dict', i.e. a nested dict with heart rate data per Stroop subphase and subject
# **kwargs
#
# Returns
# -------
# dict
# 'Stroop dict', i.e. a dict with heart rate data of all subjects per Stroop subphase
#
# """
# if "phases" in kwargs:
# return super().concat_phase_dict(dict_hr_subject, kwargs["phases"])
# else:
# return super().concat_phase_dict(dict_hr_subject, self.phases)
#
# def split_groups_stroop(
# self,
# dict_stroop=Dict[str, Dict[str, pd.DataFrame]],
# condition_dict=Dict[str, Sequence[str]],
# ) -> Dict[str, Dict[str, pd.DataFrame]]:
# """
# Splits 'Stroop dict' into group dict, i.e. one 'Stroop dict' per group.
#
# Parameters
# ----------
# phase_dict : dict
# 'Dict stroop' to be split in groups. This is the outcome of 'stroop.load_stroop_test_data()'
# condition_dict : dict
# dictionary of group membership. Keys are the different groups, values are lists of subject IDs that
# belong to the respective group
#
# Returns
# -------
# dict
# group dict with one 'Stroop dict' per group
#
# """
# return {condition: {ID: dict_stroop[ID] for ID in IDs} for condition, IDs in condition_dict.items()}
#
# def split_groups(
# cls,
# phase_dict: Dict[str, pd.DataFrame],
# condition_list: Dict[str, Sequence[str]],
# ) -> Dict[str, Dict[str, pd.DataFrame]]:
# """
# Splits 'Stroop Phase dict' into group dict, i.e. one 'Stroop Phase dict' per group.
#
# Parameters
# ----------
# phase_dict : dict
# 'Stroop Phase dict' to be split in groups. See ``bp.protocols.utils.concat_phase_dict``
# for further information
# condition_list : dict
# dictionary of group membership. Keys are the different groups, values are lists of subject IDs that
# belong to the respective group
#
# Returns
# -------
# dict
# nested group dict with one 'Stroop Phase dict' per group
# """
#
# return super().split_groups(phase_dict, condition_list)
#
# def hr_mean_plot(
# self,
# data: Union[pd.DataFrame, Dict[str, pd.DataFrame]],
# groups: Optional[Sequence[str]] = None,
# group_col: Optional[str] = None,
# plot_params: Optional[Dict] = None,
# ax: Optional[plt.Axes] = None,
# **kwargs,
# ) -> Union[None, Tuple[plt.Figure, plt.Axes]]:
# """
# Plots the course of heart rate during the complete Stroop test (mean ± standard error per phase).
#
# In case of only one group a pandas dataframe can be passed.
#
# In case of multiple groups either a dictionary of pandas dataframes can be passed, where each dataframe
# belongs to one group, or one dataframe with a column indicating group membership (parameter ``group_col``).
#
# Regardless of the kind of input the dataframes need to be in the format of a 'mse dataframe', as returned
# by ``stroop.hr_course_mist`` (see ``MIST.hr_course_mist`` for further information).
#
#
# Parameters
# ----------
# data : dataframe or dict
# Heart rate data to plot. Can either be one dataframe (in case of only one group or in case of
# multiple groups, together with `group_col`) or a dictionary of dataframes,
# where one dataframe belongs to one group
# groups : list, optional:
# List of group names. If ``None`` is passed, the groups and their order are inferred from the
# dictionary keys or from the unique values in `group_col`. If list is supplied the groups are
# plotted in that order.
# Default: ``None``
# group_col : str, optional
# Name of group column in the dataframe in case of multiple groups and one dataframe
# plot_params : dict, optional
# dict with adjustable parameters specific for this plot or ``None`` to keep default parameter values.
# For an overview of parameters and their default values, see `mist.hr_course_params`
# ax : plt.Axes, optional
# Axes to plot on, otherwise create a new one. Default: ``None``
# kwargs: dict, optional
# optional parameters to be passed to the plot, such as:
# * figsize: tuple specifying figure dimensions
# * ylims: list to manually specify y-axis limits, float to specify y-axis margin (see ``Axes.margin()``
# for further information), None to automatically infer y-axis limits
#
#
# Returns
# -------
# tuple or none
# Tuple of Figure and Axes or None if Axes object was passed
# """
#
# if plot_params:
# self.hr_mean_plot_params.update(plot_params)
# return plot.hr_mean_plot(
# data=data, groups=groups, group_col=group_col, plot_params=self.hr_mean_plot_params, ax=ax, **kwargs
# )
#
# def hr_mean_se(
# self,
# data: Union[
# Dict[str, Dict[str, pd.DataFrame]],
# Dict[str, Dict[str, Dict[str, pd.DataFrame]]],
# ],
# is_group_dict: Optional[bool] = False,
# ) -> Union[pd.DataFrame, Dict[str, pd.DataFrame]]:
# """
# Computes the heart rate mean and standard error per phase over all subjects.
# See ``bp.protocols.utils.hr_course`` for further information.
#
# Parameters
# ----------
# data : dict
# nested dictionary containing heart rate data.
# is_group_dict : boolean, optional
# ``True`` if `data` is a group dict, i.e. contains dictionaries for multiple groups, ``False`` otherwise.
# Default: ``False``
#
# Returns
# -------
# dict or pd.DataFrame
# 'mse dataframe' or dict of 'mse dataframes', one dataframe per group, if `group_dict` is ``True``.
# """
#
# return super().mean_se_subphases(data, is_group_dict=is_group_dict)
|
{"hexsha": "5d54c5703b28cffca127f09fc79a529355fbf76b", "size": 30083, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/biopsykit/protocols/stroop.py", "max_stars_repo_name": "mad-lab-fau/BioPsyK", "max_stars_repo_head_hexsha": "8ed7a2949e9c03c7d67b9ac6d17948ae218d94c1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/biopsykit/protocols/stroop.py", "max_issues_repo_name": "mad-lab-fau/BioPsyK", "max_issues_repo_head_hexsha": "8ed7a2949e9c03c7d67b9ac6d17948ae218d94c1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/biopsykit/protocols/stroop.py", "max_forks_repo_name": "mad-lab-fau/BioPsyK", "max_forks_repo_head_hexsha": "8ed7a2949e9c03c7d67b9ac6d17948ae218d94c1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.2514044944, "max_line_length": 120, "alphanum_fraction": 0.5294684706, "include": true, "reason": "import numpy", "num_tokens": 7658}
|
// Copyright 2018 The Simons Foundation, Inc. - All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef NQS_RBM_MULTIVAL_HPP
#define NQS_RBM_MULTIVAL_HPP
#include <map>
#include <vector>
#include <Eigen/Dense>
#include "Machine/abstract_machine.hpp"
#include "Machine/rbm_spin.hpp"
namespace nqs {
// Restricted Boltzman Machine wave function
// for generic (finite) local hilbert space
class RbmMultival : public AbstractMachine {
// number of visible units
int nv_;
// number of hidden units
int nh_;
// number of parameters
int npar_;
// local size of hilbert space
int ls_;
// weights
MatrixType W_;
// visible units bias
VectorType a_;
// hidden units bias
VectorType b_;
VectorType thetas_;
VectorType lnthetas_;
VectorType thetasnew_;
VectorType lnthetasnew_;
bool usea_;
bool useb_;
Eigen::VectorXd localconfs_;
Eigen::MatrixXd mask_;
Eigen::VectorXd vtilde_;
std::map<double, int> confindex_;
public:
explicit RbmMultival(std::shared_ptr<const AbstractHilbert> hilbert,
int nhidden = 0, int alpha = 0, bool usea = true,
bool useb = true);
int Npar() const override;
int Nvisible() const override;
/*constexpr*/ int Nhidden() const noexcept { return nh_; }
void InitRandomPars(int seed, double sigma) override;
void InitLookup(VisibleConstType v, LookupType <) override;
void UpdateLookup(VisibleConstType v, const std::vector<int> &tochange,
const std::vector<double> &newconf,
LookupType <) override;
VectorType DerLog(VisibleConstType v) override;
VectorType DerLog(VisibleConstType v, const LookupType <) override;
VectorType GetParameters() override;
void SetParameters(VectorConstRefType pars) override;
// Value of the logarithm of the wave-function
Complex LogVal(VisibleConstType v) override;
// Value of the logarithm of the wave-function
// using pre-computed look-up tables for efficiency
Complex LogVal(VisibleConstType v, const LookupType <) override;
// Difference between logarithms of values, when one or more visible variables
// are being changed
VectorType LogValDiff(
VisibleConstType v, const std::vector<std::vector<int>> &tochange,
const std::vector<std::vector<double>> &newconf) override;
// Difference between logarithms of values, when one or more visible variables
// are being changed Version using pre-computed look-up tables for efficiency
// on a small number of local changes
Complex LogValDiff(VisibleConstType v, const std::vector<int> &tochange,
const std::vector<double> &newconf,
const LookupType <) override;
void Save(const std::string &filename) const override;
void Load(const std::string &filename) override;
virtual bool IsHolomorphic() const noexcept override;
private:
inline void Init();
// Computhes the values of the theta pseudo-angles
inline void ComputeTheta(VisibleConstType v, VectorType &theta) {
ComputeVtilde(v, vtilde_);
theta = (W_.transpose() * vtilde_ + b_);
}
inline void ComputeVtilde(VisibleConstType v, Eigen::VectorXd &vtilde) {
auto t = (localconfs_.array() == (mask_ * v).array());
vtilde = t.template cast<double>();
}
};
} // namespace nqs
#endif
|
{"hexsha": "41e98f5b3270c4cb842b90606be168d3dca26e8d", "size": 3867, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "Sources/Machine/rbm_multival.hpp", "max_stars_repo_name": "stubbi/netket", "max_stars_repo_head_hexsha": "7391466077a4694e8f12c649730a81bf634f695e", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2019-11-28T10:26:04.000Z", "max_stars_repo_stars_event_max_datetime": "2019-11-28T10:26:04.000Z", "max_issues_repo_path": "Sources/Machine/rbm_multival.hpp", "max_issues_repo_name": "stubbi/nqs", "max_issues_repo_head_hexsha": "7391466077a4694e8f12c649730a81bf634f695e", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Sources/Machine/rbm_multival.hpp", "max_forks_repo_name": "stubbi/nqs", "max_forks_repo_head_hexsha": "7391466077a4694e8f12c649730a81bf634f695e", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.2109375, "max_line_length": 80, "alphanum_fraction": 0.7129557797, "num_tokens": 946}
|
import numpy as np
import pandas as pd
import yfinance as yf
# Using yfinance
'''
tickerSymbol = 'GOOG'
tickerData = yf.Ticker(tickerSymbol)
tickerDf = tickerData.history(period='1d', start='2010-1-1', end='2021-4-25')
tickerDf.plot(y='Open')
'''
# calling Yahoo finance API and requesting to get data for the last 1 week, with an interval of 90 minutes
data = yf.download(tickers='BTC-USD', period = '1wk', interval = '90m')
# PLOTTING
DEFAULT_PLOTLY_COLORS=['rgb(31, 119, 180)', 'rgb(255, 127, 14)',
'rgb(44, 160, 44)', 'rgb(214, 39, 40)',
'rgb(148, 103, 189)', 'rgb(140, 86, 75)',
'rgb(227, 119, 194)', 'rgb(127, 127, 127)',
'rgb(188, 189, 34)', 'rgb(23, 190, 207)']
#from plotly.subplots import make_subplots
import plotly.graph_objects as go
fig = go.Figure()
#Candlestick
fig.add_trace(go.Candlestick(x=data.index,
open=data.Open,
high=data.High,
low=data.Low,
close=data.Close, name = 'market data'))
# Add titles
fig.update_layout(
#title='Bitcoin live share price evolution',
yaxis_title='Bitcoin Price (Thousand USD)')
# X-Axes
'''fig.update_xaxes(
rangeslider_visible=True,
rangeselector=dict(
buttons=list([
dict(count=15, label="15m", step="minute", stepmode="backward"),
dict(count=45, label="45m", step="minute", stepmode="backward"),
dict(count=1, label="HTD", step="hour", stepmode="todate"),
dict(count=6, label="6h", step="hour", stepmode="backward"),
dict(step="all")
])
)
)'''
# Title
fig.add_annotation(dict(xref='paper',yref='paper',x=0.5,y=1.48,xanchor='center',yanchor='top',
font=dict(family='Arial',size=24,color='grey'),showarrow=False,
text="Crypto: Future of Currency!"))
# Subtitle
fig.add_annotation(dict(xref='paper',yref='paper',x=0.5,y=1.31,xanchor='center',yanchor='top',
font=dict(family='Arial',size=14,color='grey'),showarrow=False,
text="Atleast my friend Raj would say that!"))
# Footer
fig.add_annotation(dict(xref='paper',yref='paper',x=0.5,y=-0.51,xanchor='center',yanchor='top',
font=dict(family='Arial', size=12, color='grey'),showarrow=False,
text='#30DayChartChallenge - 2021/04/28 | uncertainties | future'))
fig.add_annotation(dict(xref='paper',yref='paper',x=0.5,y=-0.59,xanchor='center',yanchor='top',
font=dict(family='Arial', size=12, color='grey'),showarrow=False,
text='Data: last 1 week, with an interval of 90 minutes using yahoo finance api'))
fig.add_annotation(dict(xref='paper',yref='paper',x=0.5,y=-0.67,xanchor='center',yanchor='top',
font=dict(family='Arial', size=12, color='grey'),showarrow=False,
text='twitter.com/vivekparasharr | github.com/vivekparasharr | vivekparasharr.medium.com'))
fig.update_xaxes(color='grey', tickfont=dict(size=10))
fig.update_yaxes(color='grey', tickfont=dict(size=10))
fig.update_layout(template="plotly_dark")
fig.show()
|
{"hexsha": "a2034dd3abfa3bb58067bbf14f92e325d7dcadc3", "size": 3006, "ext": "py", "lang": "Python", "max_stars_repo_path": "30DayChartChallenge/20210428-uncertainties-future.py", "max_stars_repo_name": "vivekparasharr/Challenges-and-Competitions", "max_stars_repo_head_hexsha": "c99d67838a0bb14762d5f4be4993dbcce6fe0c5a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2021-01-11T20:12:04.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-15T04:53:45.000Z", "max_issues_repo_path": "30DayChartChallenge/20210428-uncertainties-future.py", "max_issues_repo_name": "vivekparasharr/Challenges-and-Competitions", "max_issues_repo_head_hexsha": "c99d67838a0bb14762d5f4be4993dbcce6fe0c5a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "30DayChartChallenge/20210428-uncertainties-future.py", "max_forks_repo_name": "vivekparasharr/Challenges-and-Competitions", "max_forks_repo_head_hexsha": "c99d67838a0bb14762d5f4be4993dbcce6fe0c5a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-04-30T19:15:46.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-30T19:15:46.000Z", "avg_line_length": 38.0506329114, "max_line_length": 106, "alphanum_fraction": 0.6510312708, "include": true, "reason": "import numpy", "num_tokens": 888}
|
(* Title: HOL/Auth/n_mutualExSimp_lemma_inv__4_on_rules.thy
Author: Yongjian Li and Kaiqiang Duan, State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences
Copyright 2016 State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences
*)
header{*The n_mutualExSimp Protocol Case Study*}
theory n_mutualExSimp_lemma_inv__4_on_rules imports n_mutualExSimp_lemma_on_inv__4
begin
section{*All lemmas on causal relation between inv__4*}
lemma lemma_inv__4_on_rules:
assumes b1: "r \<in> rules N" and b2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__4 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
proof -
have c1: "(\<exists> i. i\<le>N\<and>r=n_Crit i)\<or>
(\<exists> i. i\<le>N\<and>r=n_Exit i)\<or>
(\<exists> i. i\<le>N\<and>r=n_Idle i)"
apply (cut_tac b1, auto) done
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_Crit i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_CritVsinv__4) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_Exit i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_ExitVsinv__4) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_Idle i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_IdleVsinv__4) done
}
ultimately show "invHoldForRule s f r (invariants N)"
by satx
qed
end
|
{"author": "lyj238Gmail", "repo": "newParaVerifier", "sha": "5c2d49bf8e6c46c60efa53c98b0ba5c577d59618", "save_path": "github-repos/isabelle/lyj238Gmail-newParaVerifier", "path": "github-repos/isabelle/lyj238Gmail-newParaVerifier/newParaVerifier-5c2d49bf8e6c46c60efa53c98b0ba5c577d59618/examples/n_mutualExSimp/n_mutualExSimp_lemma_inv__4_on_rules.thy"}
|
from __future__ import print_function
import os
import pdb
import torch
import utils
import numpy as np
def has_checkpoint(checkpoint_path, rb_path):
"""check if a checkpoint exists"""
if not (os.path.exists(checkpoint_path) and os.path.exists(rb_path)):
return False
if 'model.pyth' not in os.listdir(checkpoint_path):
return False
if len(os.listdir(rb_path)) == 0:
return False
return True
def save_model(checkpoint_path, policy, total_timesteps, episode_num, num_samples, replay_buffer, env_names, args):
# change to default graph before saving
policy.change_morphology([-1])
# Record the state
checkpoint = {
'actor_state': policy.actor.state_dict(),
'critic_state': policy.critic.state_dict(),
'actor_target_state': policy.actor_target.state_dict(),
'critic_target_state': policy.critic_target.state_dict(),
'actor_optimizer_state': policy.actor_optimizer.state_dict(),
'critic_optimizer_state': policy.critic_optimizer.state_dict(),
'total_timesteps': total_timesteps,
'episode_num': episode_num,
'num_samples': num_samples,
'args': args,
'rb_max': {name: replay_buffer[name].max_size for name in replay_buffer},
'rb_ptr': {name: replay_buffer[name].ptr for name in replay_buffer},
'rb_slicing_size': {name: replay_buffer[name].slicing_size for name in replay_buffer}
}
fpath = os.path.join(checkpoint_path, 'model.pyth')
# (over)write the checkpoint
torch.save(checkpoint, fpath)
policy.change_morphology(policy.graph)
return fpath
def save_model_lifelong(checkpoint_path, policy, total_timesteps, episode_num, num_samples, replay_buffer, env_name, args):
# change to default graph before saving
policy.change_morphology([-1])
# Record the state
checkpoint = {
'actor_state': policy.actor.state_dict(),
'critic_state': policy.critic.state_dict(),
'actor_target_state': policy.actor_target.state_dict(),
'critic_target_state': policy.critic_target.state_dict(),
'actor_optimizer_state': policy.actor_optimizer.state_dict(),
'critic_optimizer_state': policy.critic_optimizer.state_dict(),
'total_timesteps': total_timesteps,
'episode_num': episode_num,
'num_samples': num_samples,
'args': args,
'rb_max': replay_buffer.max_size,
'rb_ptr': replay_buffer.ptr,
'rb_slicing_size': replay_buffer.slicing_size
}
fpath = os.path.join(checkpoint_path, '{}_model.pyth'.format(env_name))
# (over)write the checkpoint
torch.save(checkpoint, fpath)
policy.change_morphology(policy.graph)
return fpath
def save_replay_buffer(rb_path, replay_buffer):
# save replay buffer
for name in replay_buffer:
np.save(os.path.join(rb_path, '{}.npy'.format(name)), np.array(replay_buffer[name].storage), allow_pickle=False)
return rb_path
def save_replay_buffer_lifelong(rb_path, replay_buffer, env_name):
# save replay buffer
np.save(os.path.join(rb_path, '{}.npy'.format(env_name)), np.array(replay_buffer.storage), allow_pickle=False)
return rb_path
def load_checkpoint(checkpoint_path, rb_path, policy, args):
fpath = os.path.join(checkpoint_path, 'model.pyth')
checkpoint = torch.load(fpath, map_location='cpu')
# change to default graph before loading
policy.change_morphology([-1])
# load and return checkpoint
policy.actor.load_state_dict(checkpoint['actor_state'])
policy.critic.load_state_dict(checkpoint['critic_state'])
policy.actor_target.load_state_dict(checkpoint['actor_target_state'])
policy.critic_target.load_state_dict(checkpoint['critic_target_state'])
policy.actor_optimizer.load_state_dict(checkpoint['actor_optimizer_state'])
policy.critic_optimizer.load_state_dict(checkpoint['critic_optimizer_state'])
# load replay buffer
all_rb_files = [f[:-4] for f in os.listdir(rb_path) if '.npy' in f]
all_rb_files.sort()
replay_buffer_new = dict()
for name in all_rb_files:
if len(all_rb_files) > args.rb_max // 1e6:
replay_buffer_new[name] = utils.ReplayBuffer(max_size=args.rb_max // len(all_rb_files))
else:
replay_buffer_new[name] = utils.ReplayBuffer()
replay_buffer_new[name].max_size = int(checkpoint['rb_max'][name])
replay_buffer_new[name].ptr = int(checkpoint['rb_ptr'][name])
replay_buffer_new[name].slicing_size = checkpoint['rb_slicing_size'][name]
replay_buffer_new[name].storage = list(np.load(os.path.join(rb_path, '{}.npy'.format(name))))
return checkpoint['total_timesteps'], \
checkpoint['episode_num'], \
replay_buffer_new, \
checkpoint['num_samples'], \
fpath
def load_model_only(exp_path, policy):
model_path = os.path.join(exp_path, 'model.pyth')
if not os.path.exists(model_path):
raise FileNotFoundError('no model file found')
print('*** using model {} ***'.format(model_path))
checkpoint = torch.load(model_path, map_location='cpu')
# change to default graph before loading
policy.change_morphology([-1])
# load and return checkpoint
policy.actor.load_state_dict(checkpoint['actor_state'])
policy.critic.load_state_dict(checkpoint['critic_state'])
policy.actor_target.load_state_dict(checkpoint['actor_target_state'])
policy.critic_target.load_state_dict(checkpoint['critic_target_state'])
policy.actor_optimizer.load_state_dict(checkpoint['actor_optimizer_state'])
policy.critic_optimizer.load_state_dict(checkpoint['critic_optimizer_state'])
|
{"hexsha": "c53ca8819c69771b8c18df340d4c92dff489b159", "size": 5686, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/checkpoint.py", "max_stars_repo_name": "yangfanthu/modular-rl", "max_stars_repo_head_hexsha": "25c599bab641a7e732dbaf116cd240fa2358f113", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/checkpoint.py", "max_issues_repo_name": "yangfanthu/modular-rl", "max_issues_repo_head_hexsha": "25c599bab641a7e732dbaf116cd240fa2358f113", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/checkpoint.py", "max_forks_repo_name": "yangfanthu/modular-rl", "max_forks_repo_head_hexsha": "25c599bab641a7e732dbaf116cd240fa2358f113", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 44.7716535433, "max_line_length": 123, "alphanum_fraction": 0.7126275062, "include": true, "reason": "import numpy", "num_tokens": 1296}
|
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 2 20:32:21 2017
@author: linkw
"""
import pandas as pd
import numpy as np
#read indto df. take care of missing values and combine text.
data=pd.read_csv("D:\\Datasets\\kickstarter\\train.csv")
data.iloc[:,2].replace(np.NAN, '---', inplace=True)
data.iloc[:,4].replace(np.NAN, '---', inplace=True)
data.iloc[:,2] = data.iloc[:,2] + " " + data.iloc[:,4]
#data=data.dropna()
#data=data.dropna(subset=['desc'])
#clean txt data. Get rid of dummy's and non words
from nltk.corpus import stopwords
from nltk.stem.snowball import SnowballStemmer
stopwords=stopwords.words('english')
stemmer=SnowballStemmer('english')
import re
r=re.compile(r'[\W]', re.U)
data.iloc[:,2]=data.iloc[:,2].apply(lambda x : ' '.join(stemmer.stem(w) for w in re.sub('[\\s]+',' ',r.sub(' ',x.lower())).split() if w not in stopwords))
data=data.assign(txt_len=data.iloc[:,2].apply(lambda x : len(x.split())).values)
#get rid of trivial stuff. Also calculate time diffs
txt_data=data.iloc[:,2].values[...,None]
X=data.iloc[:,[3,5,6,8,10,11,14]].values
Y=data.iloc[:,13].values
launch_to_deadline=np.subtract(X[:,3],X[:,5])[...,None]
launch_to_deadline=np.divide(launch_to_deadline,86400)
creation_to_deadline=np.subtract(X[:,3],X[:,4])[...,None]
creation_to_deadline=np.divide(creation_to_deadline,86400)
creation_to_launch=np.subtract(X[:,5],X[:,4])[...,None]
creation_to_launch=np.divide(creation_to_launch,86400)
X=np.concatenate((X,launch_to_deadline,creation_to_deadline,creation_to_launch),1)
X=np.delete(X, 3,1)
X=np.delete(X, 3,1)
X=np.delete(X, 3,1)
#encode values
from sklearn.preprocessing import LabelEncoder
labelencoder_1 = LabelEncoder()
X[:,1]=labelencoder_1.fit_transform(X[:,1])
labelencoder_2 = LabelEncoder()
labelencoder_2.fit(X[:,2])
#take care of unknown labels during prediction phase
import bisect
countries = labelencoder_2.classes_.tolist()
bisect.insort_left(countries, 'unknown')
labelencoder_2.classes_ = countries
X[:,2]=labelencoder_2.transform(X[:,2])
#1H
from sklearn.preprocessing import OneHotEncoder
onehotencoder = OneHotEncoder(categorical_features=[1,2],n_values=[2,12])
X=onehotencoder.fit_transform(X).toarray()
X=np.concatenate((X,txt_data),1)
#unfortunately do an early split to hide test data from feature transformers
from sklearn.model_selection import train_test_split
X_train,X_test,Y_train,Y_test=train_test_split(X, Y, train_size=0.9,random_state=141289)
del X,Y,txt_data,creation_to_deadline,creation_to_launch,launch_to_deadline,countries
#oops backers are not there in test data of the challenge. gotta find another way
txt=X_train[:,19]
txt2=X_test[:,19]
X_train=np.delete(X_train,19,1)
X_test=np.delete(X_test,19,1)
#get ready to clusterize text
from sklearn.feature_extraction.text import CountVectorizer
cvt = CountVectorizer()
text_features = cvt.fit_transform(txt)
text_features2 = cvt.transform(txt2)
from sklearn.feature_extraction.text import TfidfTransformer
tf = TfidfTransformer(use_idf=False)
text_features= tf.fit_transform(text_features)
text_features2= tf.transform(text_features2)
#dr
from sklearn.decomposition import TruncatedSVD
tsvd= TruncatedSVD(n_components= 450,n_iter=12, random_state=141289)
text_features=tsvd.fit_transform(text_features)
#print(tsvd.explained_variance_ratio_.sum())
text_features2=tsvd.transform(text_features2)
#normalize for use in K means
from sklearn.preprocessing import Normalizer
nrm= Normalizer()
nrm.fit(text_features)
text_features=nrm.transform(text_features)
text_features2=nrm.transform(text_features2)
#clusterize text for use as a feature
from sklearn.cluster import KMeans
km = KMeans(n_clusters=18, n_jobs=8, algorithm='full')
clusters=km.fit_predict(text_features)[...,None]
cluster_test=km.predict(text_features2)[...,None]
#1h clusters to be used with XGB
onehotencoder2 = OneHotEncoder()
clusters=onehotencoder2.fit_transform(clusters).toarray()
cluster_test=onehotencoder2.transform(cluster_test).toarray()
X_train=np.concatenate((X_train,clusters,text_features),1)
X_test=np.concatenate((X_test,cluster_test,text_features2),1)
del clusters,cluster_test,text_features,text_features2,txt,txt2
#from sklearn.ensemble import RandomForestClassifier
#rf = RandomForestClassifier(n_estimators = 85, criterion = 'entropy', random_state = 141289,n_jobs=8)
#rf.fit(X_train, Y_train)
#Y_pred = rf.predict(X_test)
from xgboost import XGBClassifier
xgb = XGBClassifier(random_state=141289,n_jobs=8,max_depth=5,n_estimators=245,subsample=0.9,colsample_bytree=0.9)
xgb.fit(X_train, Y_train)
Y_pred = xgb.predict(X_test)
Y_pred=[round(k) for k in Y_pred]
#check accuracy of model
from sklearn.metrics import accuracy_score
print(accuracy_score(Y_test, Y_pred))
#cleanup
del X_test,X_train,Y_test,Y_train,Y_pred,data
###########################################################################################
#predict the test dataset for submission.
test_data=pd.read_csv("D:\\Datasets\\kickstarter\\test.csv")
test_data.iloc[:,2].replace(np.NAN, '---', inplace=True)
test_data.iloc[:,4].replace(np.NAN, '---', inplace=True)
test_data.iloc[:,2] = test_data.iloc[:,2] + " " + test_data.iloc[:,4]
test_data.iloc[:,6] = test_data.iloc[:,6].map(lambda x: 'unknown' if x not in labelencoder_2.classes_ else x)
test_data.iloc[:,2]=test_data.iloc[:,2].apply(lambda x : ' '.join(stemmer.stem(w) for w in re.sub('[\\s]+',' ',r.sub(' ',x.lower())).split() if w not in stopwords))
test_data=test_data.assign(txt_len=test_data.iloc[:,2].apply(lambda x : len(x.split())).values)
txt_data=test_data.iloc[:,2].values[...,None]
pred_x=test_data.iloc[:,[3,5,6,8,10,11,12]].values
launch_to_deadline=np.subtract(pred_x[:,3],pred_x[:,5])[...,None]
launch_to_deadline=np.divide(launch_to_deadline,86400)
creation_to_deadline=np.subtract(pred_x[:,3],pred_x[:,4])[...,None]
creation_to_deadline=np.divide(creation_to_deadline,86400)
creation_to_launch=np.subtract(pred_x[:,5],pred_x[:,4])[...,None]
creation_to_launch=np.divide(creation_to_launch,86400)
pred_x=np.concatenate((pred_x,launch_to_deadline,creation_to_deadline,creation_to_launch),1)
pred_x=np.delete(pred_x, 3,1)
pred_x=np.delete(pred_x, 3,1)
pred_x=np.delete(pred_x, 3,1)
pred_x[:,1]=labelencoder_1.transform(pred_x[:,1])
pred_x[:,2]=labelencoder_2.transform(pred_x[:,2])
pred_x=onehotencoder.transform(pred_x).toarray()
pred_x=np.concatenate((pred_x,txt_data),1)
txt2=pred_x[:,19]
pred_x=np.delete(pred_x,19,1)
text_features2 = cvt.transform(txt2)
text_features2= tf.transform(text_features2)
text_features2=tsvd.transform(text_features2)
text_features2=nrm.transform(text_features2)
cluster_test=km.predict(text_features2)[...,None]
#1h clusters for use with XGB
cluster_test=onehotencoder2.transform(cluster_test).toarray()
pred_x=np.concatenate((pred_x,cluster_test,text_features2),1)
#predictions = rf.predict(pred_x)
predictions = xgb.predict(pred_x)
predictions=[round(k) for k in predictions]
#save csv for upload
test_data=test_data.assign(final_status=predictions)
sub=test_data.iloc[:,[0,13]]
#sub.to_csv("D:\\Datasets\\kickstarter\\subrf.csv",index=False)
sub.to_csv("D:\\Datasets\\kickstarter\\subxgb.csv",index=False)
#cleanup
del txt_data,creation_to_deadline,creation_to_launch,launch_to_deadline,txt2, text_features2,cluster_test,pred_x,predictions,test_data,sub
|
{"hexsha": "c5168e800459d023f9eb5b4d8ce16b3f3c29271b", "size": 7269, "ext": "py", "lang": "Python", "max_stars_repo_path": "kickstarter_xgb.py", "max_stars_repo_name": "linkwithkk/kickstarter", "max_stars_repo_head_hexsha": "bd3b60aaedc3f88ececc484f00dde1414f011310", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-03-19T21:48:18.000Z", "max_stars_repo_stars_event_max_datetime": "2019-03-19T21:48:18.000Z", "max_issues_repo_path": "kickstarter_xgb.py", "max_issues_repo_name": "linkwithkk/kickstarter", "max_issues_repo_head_hexsha": "bd3b60aaedc3f88ececc484f00dde1414f011310", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "kickstarter_xgb.py", "max_forks_repo_name": "linkwithkk/kickstarter", "max_forks_repo_head_hexsha": "bd3b60aaedc3f88ececc484f00dde1414f011310", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.1602209945, "max_line_length": 164, "alphanum_fraction": 0.7664052827, "include": true, "reason": "import numpy", "num_tokens": 1969}
|
\documentclass[margin,line]{res}
\usepackage{fancyhdr}
\usepackage{wasysym}
\usepackage{textcomp}
%\usepackage{hyperref}
\usepackage{url}
\usepackage{marvosym}
%\usepackage[misc]{ifsym}
%
%\usepackage[margin=1in]{geometry}
\addtolength{\textwidth}{0.20cm}
\addtolength{\evensidemargin}{0.1cm}
\addtolength{\oddsidemargin}{0.1cm}
\addtolength{\textheight}{-0.4cm}
\addtolength{\topmargin}{0.1cm}
%
%
\fancyhf{} % clear all header and footer fields
\fancyfoot[R]{\small \thepage~of 6}
\fancyfoot[L]{\hspace{-3.2cm}\footnotesize Last updated: January 2018}
%\fancyfoot[L]{}
\renewcommand{\headrulewidth}{0pt} % no line
\renewcommand{\footrulewidth}{0pt}
%\fancyhfoffset{\sectionwidth}
\pagestyle{fancy}
%\oddsidemargin -.75in
%\evensidemargin -.5in
%\textwidth=5.5in
\itemsep=0in
\parsep=0in
\newenvironment{list1}{
\begin{list}{\ding{113}}{%
\setlength{\itemsep}{0in}
\setlength{\parsep}{0in} \setlength{\parskip}{0in}
\setlength{\topsep}{0in} \setlength{\partopsep}{0in}
\setlength{\leftmargin}{0.15in}}}{\end{list}}
\newenvironment{list2}{
\begin{list}{$\bullet$}{%
\setlength{\itemsep}{0in}
\setlength{\parsep}{0in} \setlength{\parskip}{0in}
\setlength{\topsep}{0in} \setlength{\partopsep}{0in}
\setlength{\leftmargin}{0.10in}}}{\end{list}}
\newenvironment{list3}{
\begin{list}{$\circ$}{%
\setlength{\itemsep}{0in}
\setlength{\parsep}{0in} \setlength{\parskip}{0in}
\setlength{\topsep}{0in} \setlength{\partopsep}{0in}
\setlength{\leftmargin}{0.28in}}}{\end{list}}
\pagenumbering{arabic}
\begin{document}
\begin{center}
{\Large \hspace{-4cm} \bf Yu Zhang}
\end{center}
\vspace{-.25cm}
\begin{tabular}{@{}p{3.9in}p{3.9in}}
Department of Electrical Engineering & \Telefon\, \texttt{831-459-2921}\\
University of California, Santa Cruz (UCSC) & \Letter\, \texttt{zhangy@ucsc.edu} \\
Baskin Engineering 243, UCSC, CA 95064 & \Mundus\, \url{people.ucsc.edu/~yzhan419/}
\end{tabular}
\vspace{.1in}
\begin{resume}
%
\vspace{0.2cm}
\section{\sc{\bf{POSITIONS}}}
Assistant Professor \hfill 07/2017 -- present\\
Department of Electrical Engineering \\
University of California, Santa Cruz
Postdoctoral Employee \hfill 05/2017 -- 06/2017\\
Energy Analysis and Environmental Impacts Division \\
Lawrence Berkeley National Laboratory
Postdoctoral Scholar \hfill 01/2016 -- 04/2017\\
Industrial Engineering \& Operations Research Department\\
University of California, Berkeley
Postdoctoral Associate \hfill 08/2015 -- 01/2016 \\
Department of Electrical and Computer Engineering \\
University of Minnesota -- Twin Cities
\vspace{.2cm}
\section{\sc{\bf{EDUCATION}}}
University of Minnesota -- Twin Cities (UMN) \hfill 08/2010 -- 07/2015 \\
Ph.D. in Electrical Engineering
Shanghai Jiao Tong University (SJTU) \hfill 09/2007 -- 03/2010\\
M.S. in Electrical Engineering
Wuhan University of Technology (WUT) \hfill 09/2002 -- 07/2006\\
B.E. in Electrical Engineering
\vspace{.2cm}
\section{\sc{\bf{RESEARCH INTERESTS}}}
\begin{list2}
\item Smart grids: Energy management, data analytics, and grid monitoring
\item Optimization: Distributed algorithms, stochastic and online optimization
\item Big data: High dimensional statistical inference and deep learning
\item Cyber-physical IoT systems: Optimal resource allocation
\end{list2}
%Emphasis on applying modern optimization theory, signal processing, and machine learning techniques
%to fundamental problems in cyber-physical systems including smart power grids and communication networks.
%Current research focuses on stochastic energy management with high-penetration renewables and energy data analytics.
%Additional thrusts include optimal resource allocation for wireless communications and geo-distributed data centers.
\vspace{.2cm}
\section{\sc{\bf{PUBLICATIONS}}} \newcounter{saveenum}
{\bf Journal Papers}
\vspace{.2cm}
\begin{enumerate}\setcounter{enumi}{\value{saveenum}}
\item[J12.] \textbf{Y. Zhang}, R. Madani, and J. Lavaei,
``Conic Relaxations for Power System State Estimation with Line Measurements,"
\emph{IEEE Trans. on Control of Network Systems}, Mar. 2017 (accepted).
\item[J11.] S. Hu, \textbf{Y. Zhang}, X. Wang, and G. Giannakis,
``Weighted Sum-Rate Maximization for MIMO Downlink Systems Powered by Renewables,''
\emph{IEEE Trans. on Wireless Communications}, vol. 15, no. 8, pp. 5615-–5625, Aug. 2016.
\item[J10.] X. Wang, \textbf{Y. Zhang}, G. Giannakis, and S. Hu,
``Robust Smart-Grid Powered Cooperative Multipoint Systems,"
\emph{IEEE Trans. on Wireless Communications}, vol. 14, no. 11, pp. 1348-–1359, May 2016.
\item[J9.] \textbf{Y. Zhang} and G. Giannakis,
``Distributed Stochastic Market Clearing with High-Penetration Wind Power,"
\emph{IEEE Trans. on Power Systems}, vol. 31, no. 2, pp. 895--906, Mar. 2016.
\item[J8.] T. Chen, \textbf{Y. Zhang}, X. Wang, and G. Giannakis,
``Robust Workload and Energy Management for Sustainable Data Centers,''
\emph{IEEE Journal on Selected Areas in Communications}, vol. 34, no. 3, pp. 651--664, Mar. 2016.
\item[J7.] X. Wang, \textbf{Y. Zhang}, T. Chen, and G. Giannakis,
``Dynamic Energy Management for Smart-Grid Powered Coordinated Multipoint Systems,"
\emph{IEEE Journal on Selected Areas in Communications}, vol. 34, no. 5, pp. 6188--6199, Nov. 2015.
\item[J6.] V. Kekatos, \textbf{Y. Zhang}, and G. Giannakis,
``Electricity Market Forecasting via Low-Rank Multi-Kernel Learning,"
\emph{IEEE Journal of Selected Topics in Signal Processing}, vol. 8, no. 6, pp. 1182--1193, Dec. 2014.
\item[J5.] \textbf{Y. Zhang}, N. Gatsis, and G. Giannakis,
``Robust Energy Management for Microgrids With High-Penetration Renewables,"
\emph{IEEE Trans. on Sustainable Energy}, vol. 4, no. 4, pp. 944--953, Oct. 2013.
\item[J4.] \textbf{Y. Zhang}, E. Dall'Anese, and G. Giannakis,
``Distributed Optimal Beamformers for Cognitive Radios Robust to Channel Uncertainties,"
\emph{IEEE Trans. on Signal Processing}, vol. 60, no. 12, pp. 6495--6508, Dec. 2012.
\item[J3.] \textbf{Y. Zhang}, H.-W. Luo, and X.-L. Zhou,
``A Relay Scheduling Algorithm in Dual-Hop Wireless Networks,"
\emph{Journal of Shanghai Jiao Tong University}, vol. 45, no. 3, pp. 331--335, Mar. 2011.
\item[J2.] \textbf{Y. Zhang}, H.-W. Luo, and W. Chen,
``Efficient Relay Beamforming Design with SIC Detection for Dual-Hop MIMO Relay Networks,"
\emph{IEEE Trans. on Vehicle Technology}, vol. 59, no. 8, pp. 4192--4197, Oct. 2010.
\item[J1.] \textbf{Y. Zhang}, H.-W. Luo, C. Wang, and F. She,
``A Utility Function Based Low Complexity User Scheduling Algorithm for Multi-user MIMO Systems,"
\emph{Journal of Shanghai Jiao Tong University}, vol. 43, no. 7, pp. 1103--1107, Jul. 2009.
\end{enumerate}
{\bf Conference Papers}
\vspace{.2cm}
\begin{enumerate}
\item[C18.] \textbf{Y. Zhang}, R. Madani, and J. Lavaei,
``Power System State Estimation with Line Measurements,"
\emph{Proc. of 55th IEEE Conf. on Decision and Control (CDC)}, Las Vegas, NV, Dec. 2016.
\item[C17.] T. Chen, \textbf{Y. Zhang}, X. Wang, and G. Giannakis,
``Robust Geographical Load Balancing for Sustainable Data Centers,"
\emph{Proc. of Intl. Conf. on Acoustics, Speech, and Signal Process. (ICASSP)}, Shanghai, China, Mar. 2016.
\item[C16.] X. Wang, T. Chen, \textbf{Y. Zhang}, and G. Giannakis,
``Optimal Dynamic Power Management for Green Coordinated Multipoint Systems,"
\emph{Proc. of IEEE Global Commun. Conf. (Globecom)}, San Diego, CA, Dec. 2015.
\item[C15.] S. Chepuri, \textbf{Y. Zhang}, G. Leus, and G. Giannakis,
``Big Data Sketching with Model Mismatch,''
\emph{Proc. of Asilomar Conf. on Signals, Systems, and Computers (Asilomar)}, Pacific Grove, CA, Nov. 2015.
\item[C14.] S. Hu, X. Wang, \textbf{Y. Zhang}, G. Giannakis,
``Optimal Resource Allocation for Smart-Grid Powered MIMO Broadcast Channels,"
\emph{Proc. of 7th Intl Conf. on Wireless Commun. and Signal Process. (WCSP)}, Nanjing, China, Oct. 2015.
\item[C13.] \textbf{Y. Zhang}, X. Wang, G. Giannakis, and S. Hu,
``Distributed Robust Resource Allocation for Renewable Powered Wireless Cellular Networks,"
\emph{Proc. of 3rd Intl. BlackSea Conf. on Commun. and Netw. (BlackSeaCom)}, Constanta, Romania, May 2015.
\item[C12.] \textbf{Y. Zhang}, S.-J. Kim, and G. Giannakis,
``Short-Term Wind Power Forecasting using Nonnegative Sparse Coding,"
\emph{Proc. of 49th Conf. on Info. Sci. and Syst. (CISS)}, Baltimore, MD, Mar. 2015.
\item[C11.] \textbf{Y. Zhang} and G. Giannakis,
``Distributed Market Clearing with Wind Generation and Large-Scale Dispatchable Loads,"
\emph{Proc. of 53rd IEEE Conf. on Decision and Control (CDC)}, Los Angeles, CA, Dec. 2014.
\item[C10.] G. Martinez, \textbf{Y. Zhang}, and G. Giannakis,
``An Efficient Primal-Dual Approach to Chance-Constrained Economic Dispatch,"
\emph{Proc. of North American Power Symp. (NAPS)}, Pullman, WA, Sep. 2014.
\item[C9.] V. Kekatos, \textbf{Y. Zhang}, and G. Giannakis,
``Kernel Selection for Power Market Inference via Block Successive Upper Bound Minimization,"
\emph{Proc. of Intl. Conf. on Acoustics, Speech, and Signal Process. (ICASSP)},
Florence, Italy, May 2014.
\item[C8.] \textbf{Y. Zhang} and G. Giannakis,
``Efficient Decentralized Economic Dispatch for Microgrids with Wind Power Integration,"
\emph{Proc. of 6th Annual IEEE Green Tech. (GreenTech)}, Corpus Christi, TX, Apr. 2014.
\item[C7.] \textbf{Y. Zhang}, N. Gatsis, and G. Giannakis,
``Disaggregated Bundle Methods for Distributed Market Clearing in Power Networks,"
\emph{Proc. of 1st Global Conf. on Signal and Info. Processing (GlobalSIP)}, Austin, TX, Dec. 2013 (invited).
\item[C6.] V. Kekatos, \textbf{Y. Zhang}, and G. Giannakis,
``Low-Rank Kernel Learning for Electricity Market Inference,"
\emph{Proc. of Asilomar Conf. on Signals, Systems, and Computers (Asilomar)}, Pacific Grove, CA, Nov. 2013.
\item[C5.] \textbf{Y. Zhang} and G. Giannakis,
``Robust Optimal Power Flow with Wind Integration using Conditional Value-at-Risk,"
\emph{Proc. of 4th Intl. Conf. on Smart Grid Commun. (SGComm)}, Vancouver, Canada, Oct. 2013.
\item[C4.] \textbf{Y. Zhang}, N. Gatsis, V. Kekatos, and G. Giannakis,
``Risk-aware Management of Distributed Energy Resources,"
\emph{Proc. of 18th Intl. Conf. on Digital Signal Process. (DSP)}, Santorini Island, Greece, Jul. 2013 (invited).
\item[C3.] \textbf{Y. Zhang}, N. Gatsis, and G. Giannakis,
``Risk-Constrained Energy Management with Multiple Wind Farms,"
\emph{Proc. of 4th IEEE-PES on Innovative Smart Grid Tech. (ISGT)}, Washington, D.C., Feb. 2013.
\item[C2.] \textbf{Y. Zhang}, N. Gatsis, and G. Giannakis,
``Robust Distributed Energy Management for Microgrids with Renewables,"
\emph{Proc. of 3rd Intl. Conf. on Smart Grid Commun. (SGComm)}, Tainan, Taiwan, Nov. 2012.
\item[C1.] \textbf{Y. Zhang}, E. Dall'Anese, and G. Giannakis,
``Distributed Robust Beamforming for MIMO Cognitive Networks,"
\emph{Proc. of Intl. Conf. on Acoustics, Speech, and Signal Process. (ICASSP)}, Kyoto, Japan, Mar. 2012.
\end{enumerate}
%{\bf Book chapters}
%
%\vspace{.5cm}
%
%
%\begin{enumerate}
%
%\item[B1.] S.-J. Kim, E. Dall'Anese, J. A. Bazerque, K. Rajawat, and G. Giannakis,
%``Advances in Spectrum Sensing and Cross-Layer Design in Cognitive Radio Networks,''
%\emph{Eurasip, E-Reference Signal Processing}, Nov. 2012.
%
%\end{enumerate}
%
%
%{\bf Research monographs}
%
%\vspace{.3cm}
%
%\begin{enumerate}
%
%\item[M1.] G. Giannakis, S.-J. Kim, and E. Dall'Anese, ``RF Cartography for Cognitive Radios: Spatio-Temporal Sensing and Resource Allocation,'' \emph{Foundations and Trends in Communications and Information Theory} (EiC S. Verd\'u),
% 2013, submitted.
%
%\end{enumerate}
%
%\vspace{.2cm}
{\bf Technical Reports}
\vspace{.2cm}
\begin{enumerate}
\item[R2.] \textbf{Y. Zhang}, R. Madani, and J. Lavaei,
``Conic Relaxations for Power System State Estimation with Line Measurements,"
Apr. 2017, [Online]. Available: \texttt{arxiv.org/abs/1704.00133}
\item[R1.] \textbf{Y. Zhang}, N. Gatsis, and G. Giannakis,
``Robust Energy Management for Microgrids With High-Penetration Renewables,"
Jul. 2012, [Online]. Available: \texttt{arxiv.org/abs/1207.4831}
\end{enumerate}
\vspace{.2cm}
{\bf Thesis}
\vspace{.2cm}
\begin{enumerate}
\item[T1.] \textbf{Y. Zhang}, ``Resource Management for Sustainable Power Grids and Wireless Networks: Distributed and Robust Designs,'' Ph.D. Thesis, ECE Department, University of Minnesota, Jul. 2015.\\
Committee: Prodromos Daoutidis, Sairaj Dhople, Georgios Giannakis, and Mostafa Kaveh (chair).
\end{enumerate}
\vspace{.2cm}
{\bf Patents}
\vspace{.2cm}
\begin{enumerate}
\item[P5.] C. Xu, Y. Wu, \textbf{Y. Zhang}, H.-W. Luo, and H. Yu,
``Eight-Antenna Channel Estimation Method for OFDM Demodulating End,'' CHN invention patent,
pub. No.: CN 101667981 B (grant), 2012-10-31.
\item[P4.] \textbf{Y. Zhang}, H.-W. Luo, L. Chen, C. Xu, and W. Guan,
``A Low Complexity User Selection Method in Multiuser MIMO Broadcasting Channels,'' CHN invention patent,
pub. No.: CN 101499837 B (grant), 2012-09-05.
\item[P3.] L. Chen, H.-W. Luo, F. She, \textbf{Y. Zhang}, and J. Zhang,
``Method and Device of Space Division Multiple Address System Based on Codebook
of Optimal Quantization Error,'' CHN invention patent.
pub. No.: CN 101286756 B (grant), 2012-02-29.
\item[P2.] C. Xu, Y. Wu, \textbf{Y. Zhang}, H.-W. Luo, and H. Yu,
``Multi-User Multi-Antenna Two-Stage Limited Feedback Method,'' CHN invention patent.
pub. No.: CN 101695008 A, (app.), 2010-04-14.
\item[P1.] C. Xu, H.-W. Luo, L. Chen, \textbf{Y. Zhang}, and W. Guan,
``Method for Rapidly Matching Codebook of MIMO System Subscriber Terminal,'' CHN invention patent.
pub. No.: CN 101465684 A, (app.), 2009-06-24.
\end{enumerate}
\vspace{.3cm}
\section{\sc{\bf{HONORS \& AWARDS}}}
\begin{list2}
\item IEEE Signal Processing Society (SPS) Travel Grant, 2014
\item SIAM Student Travel Award, 2014
\item PhD Student Travel Fellowship, Dept of ECE, UMN, 2014
\item TCIPG Summer School Scholarship, 2013
\item ECE Departmental Fellowship, UMN, 2010
\item Shanghai Outstanding Graduate, 2010
\item Merit Student of SJTU, 2009
\item Huawei Scholarship, 2009
\item Infineon Technologies Scholarship, 2009
\item First-Class/Second-Class Academic Excellence Scholarship, SJTU, 2008/2007
\item The Valedictorian at the WUT Commencement 2006
\item Merit Student of Hubei Province, 2006
\item Outstanding Graduate of WUT, 2005
\item Pacemaker to Merit Student of WUT, Highest Honor (1\textperthousand), 2005
\item Outstanding Merit Student of WUT, First-Class Scholarship, 2004
\item Merit Student of WUT, Second-Class Scholarship, 2003
\end{list2}
\vspace{.3cm}
\section{\sc{\bf{FUNDING EXPERIENCE}}}
\begin{list2}
\item PI for NSF-CMMI Collaborative Research: ``EAGER: Holistic Blockchain Based Platform for
Distributed and Intelligent Energy Communities,'' Feb. 2018. The proposal summary is under review (co-PI: Paras Mandal).
%\item PI for the ARPR-E OPEN 2018 (DE-FOA-0001858): ``Smart Planning, Monitoring, and Operation for
%Networked Microgrids via Deep Learning,'' Feb. 2018. Concept paper is submitted(co-PIs: Patrick Mantey and Paras Mandal).
\item Co-PI for the 2018 CITRIS Seed Funding ``Data Analytics to Enable Sustainability for Power Systems,'' Jan. 2018. This proposal is under review (PI: Somayeh Sojoudi, co-PI: Javad Lavaei).
\item Input for the NSF-CCSS proposal ``Smart-Grid Powered Green Communications
in Heterogeneous Networks,'' Jun. 2015.
This proposal was funded under grant number ECCS-1508993 (PI: Xin Wang, co-PI: Georgios Giannakis).
\item Input for the NSF-CCF proposal ``From Communication to Power Networks:
Adaptive Energy Management for Power Systems with Renewables,'' Sep. 2014.
This proposal was funded under grant number CCF-1423316 (PI: Georgios Giannakis).
\item Input for the NSF-CyberSEES proposal ``Tenable Power Distribution Networks,'' Sep. 2014.
This proposal was funded under grant number CCF-1442686 (PI: Georgios Giannakis, Co-PI: Sairaj Dhople).
\item Input for the NSF-RIPS proposal ``Distributed Power and Fuels in Rural Grids,''
Mar. 2014 (PI: Georgios Giannakis).
\item Input for the NSF-EPAS proposal ``Robust Energy Control for Microgrids with Renewables,''
Oct. 2012 (PI: Georgios Giannakis).
\item Input for the NSF-CPS proposal ``Inference and Management for the Power Grid: Distributed and Robust Designs,''
Mar. 2012 (PI: Georgios Giannakis).
\end{list2}
\vspace{.3cm}
%\section{\sc{\bf{RESEARCH EXPERIENCE}}}
%
%\begin{list2}
%
%\item Postdoctoral Employee, LBNL, mentors: Anand Gopal \& Timothy Lipman
%\begin{list3}
%\item Proposed a novel fleet management for electric vehicle networks.
%\end{list3}
%
%\item Postdoctoral Scholar, UC Berkeley, advisor: Javad Lavaei
%\begin{list3}
%\item Proposed a novel convexification framework for solving the power system state estimation problem,
%and established its theoretical performance bound.
%\item Investigated the SDP relaxation and randomization algorithms for the optimal sensor placement problem.
%\end{list3}
%
%
%\item Postdoctoral Assoc. \& Research Asst., UMN, advisor: Georgios Giannakis
%\begin{list3}
%\item Proposed robust and stochastic resource allocation frameworks for smart-grid powered
%complex networks of wireless communications and data centers.
%\item Developed robust regression for sketching big data with model mismatch.
%\item Proposed novel models and algorithms for distributed robust and stochastic energy management with
%renewable energy sources.
%\item Proposed fast algorithms based on the bundle method for distributed market clearing with high-penetration wind power and large-scale demand response.
%\item Devised efficient forecasting approaches for electricity prices via multi-kernel learning, and for wind generation using online dictionary learning.
%\item Proposed optimal distributed beamforming algorithms for MIMO cognitive radios with channel uncertainties.
%\end{list3}
%
%
%
%\item Research Intern, USCRC-ABB, mentors: Mirrasoul Mousavi \& James Stoupis
%\begin{list3}
%\item Proposed efficient occupancy sensing approaches via wireless sensor selection and localization for building automation systems.
%\end{list3}
%
%
%\item Research Student, UMN, mentor: Tom Luo
%\begin{list3}
%\item Researched on optimal transceiver design for wireless communication networks.
%\item Studied complexity analysis and efficient algorithms for coordinated beamforming.
%\end{list3}
%
%
%
%\item Research Intern, Intel Asia-Pacific R\&D Ltd., mentor: Rongzhen Yang
%\begin{list3}
%\item Researched on RBIR for the PHY abstraction between link level and system level.
%\item Developed a link-level simulation platform for the IEEE 802.16m standard.
%\end{list3}
%
%
%\item Research Assistant, SJTU, advisor: Hanwen Luo
%\begin{list3}
%\item Proposed novel relay beamforming via successive interference cancellation.
%\item Developed greedy relay selection and utility based low complexity user scheduling algorithms for MIMO systems.
%\end{list3}
%
%\end{list2}
%
%\vspace{.3cm}
\section{\sc{\bf{TEACHING EXPERIENCE}}}
\begin{list2}
\item EE293/183 Learning, Optim., and Control for Electric Power Syst., Winter 2018, UCSC
\item EE290 EE Graduate Seminar, Academic year 2017-2018, UCSC
\item Lecturer, IEOR 290 Control and Optim. for Power Syst., Spring 2017, 2016, UCB
\item TA, EE8581 Detection and Estimation Theory, Spring 2015, UMN
\item TA, EE3005 Fundamental of Electrical Engr., Fall 2010 \& Spring 2011, UMN
\item TA, ES320 Fundamental Circuits for Commun., Fall 2008, SJTU
\end{list2}
\vspace{.3cm}
\section{\sc{\bf{TALKS}}}
\vspace{.2cm}
\begin{list2}
\item CROSS Symposium, UC Santa Cruz, CA, 2017.
\item Nicholas school of the Environment, Duke University, NC, 2017.
\item ECE Department, Missouri University of Science and Technology, MO, 2017.
\item EE Department, UC Santa Cruz, CA, 2017.
\item Department of Engineering Technology, University of Houston, TX, 2017.
\item ECE Department, New York University, NY, 2017.
\item eCAL Seminar, UC Berkeley, CA, 2016.
\item ECE Department, University of Louisville, KY, 2016.
\item ECE Department, Southern Illinois University, IL, 2015.
\item Foundations of Resilient CybEr-physical Systems (FORCES), UC Berkeley, CA, 2015.
\item WindLogics Inc., Saint Paul, MN, 2013.
\item Honeywell, Minneapolis, MN, 2012.
\end{list2}
\vspace{.3cm}
\section{\sc{\bf{SERVICE}}}
\begin{list2}
\item Academic reviews:
\begin{list3}
\item \emph{IEEE Transactions on Power Systems}
\item \emph{IEEE Transactions on Smart Grid}
\item \emph{IEEE Transactions on Sustainable Energy}
\item \emph{IEEE Transactions on Automatic Control}
\item \emph{IEEE Transactions on Control Systems Technology}
\item \emph{IEEE Journal on Selected Areas in Communications}
\item \emph{IEEE Transactions on Signal Processing}
\item \emph{IEEE Transactions on Communications}
\item \emph{IEEE Transactions on Wireless Communications}
\item \emph{IEEE Transactions on Industrial Electronics}
\item \emph{IEEE Transactions on Vehicular Technology}
\item \emph{IEEE Transactions on Systems, Man, and Cybernetics: Systems}
\item \emph{IEEE Signal Processing Letters}
\item \emph{IEEE Wireless Communications Letters}
\item \emph{IEEE Power Engineering Letters}
\item \emph{IET Generation, Transmission \& Distribution}
\item \emph{International Journal of Electrical Power \& Energy Systems}
\item \emph{International Transactions on Electrical Energy Systems}
%\item \emph{MDPI - Energies}
\item \emph{International Journal of Sustainable Transportation}
\item \emph{Transportmetrica A: Transport Science}
\item \emph{ACM Trans. on Modeling and Performance Evaluation of Computing Systems}
\item \emph{Annals of Operations Research}
\item \emph{Wiley Complexity}
\item \emph{IEEE Conference on Decision and Control}
\item \emph{IEEE American Control Conference}
\item \emph{IEEE Intl. Conference on Smart Grid Communications}
\item \emph{IEEE Intl. Conference on Acoustics, Speech and Signal Processing}
\item \emph{IEEE Global Communications Conference}
\item \emph{IEEE Intl. Workshop on Computational Advances in Multi-Sensor Adaptive Processing}
\item \emph{IEEE Intl. Conference on Computing, Networking and Communications}
\end{list3}
\vspace{4mm}
\item Session Chair or Technical Program Committee:
\begin{list3}
\item \emph{IEEE Global Conference on Signal and Information Processing} (2017)
\item \emph{IEEE International Conference on Smart Grid Communications} (2017, 2016)
\item \emph{IEEE Conference on Decision and Control} (2014)
\item \emph{INFORMS Annual Meeting} (2017, 2016, 2015)
\end{list3}
\end{list2}
%
\vspace{0.4cm}
\section{\sc{\bf{MEMBERSHIP}}}
\begin{list2}
\item Institute of Electrical and Electronics Engineers (IEEE)
\item Institute for Operations Research and the Management Sciences (INFORMS)
\item The New York Academy of Sciences (NYAS)
\item Advisory Board Member of Swiss Innovation Valley (SIV)
\end{list2}
\vspace{0.4cm}
%\section{\sc{\bf{REFERENCES}}}
%%\begin{list2}
%%\item \textbf{Prof. Antonio Conejo}, \texttt{conejonavarro.1@osu.edu}, 614-292-6736, Ohio State Univ.
%%\item \textbf{Prof. Georgios Giannakis}, \texttt{georgios@umn.edu}, 612-626-7781, Univ. of Minnesota
%%%\item \textbf{Prof. Vassilis Kekatos}, \texttt{kekatos@vt.edu}, 540-231-1672, Virginia Tech.
%%\item\textbf{Prof. Javad Lavaei}, \texttt{lavaei@berkeley.edu}, 510-642-2497, UC Berkeley
%%\item \textbf{Prof. Shmuel Oren}, \texttt{oren@ieor.berkeley.edu}, 510-642-1836, UC Berkeley
%%\end{list2}
%
%
%
%
%\begin{tabular}{@{}p{2.75in}p{2.75in}}
%\textbf{Prof. Georgios Giannakis} \\
%McKnight Presidential Endowed Chair \\
%ECE Dept., University of Minnesota \\
%Rm 495, 117 Pleasant St. SE \\
%Minneapolis, MN 55455, USA \\
%{\it Phone:} (612) 626-7781 \\
%{\it Email:} georgios@umn.edu \\
%
%\\ \\
%
%
%\textbf{Prof. Javad Lavaei} \\
%IEOR Dept., UC Berkeley \\
%4121 Etcheverry Hall \\
%Berkeley, CA 94720, USA \\
%{\it Phone:} (510) 642-2497 \\
%{\it Email:} lavaei@berkeley.edu \\
%
%
%\\ \\
%
%
%\textbf{Prof. Shmuel Oren} \\
%Earl J. Isaac Professor \\
%IEOR Dept., UC Berkeley \\
%4135 Etcheverry Hall \\
%Berkeley, CA 94720, USA \\
%{\it Phone:} (510) 642-1836 \\
%{\it Email:} oren@ieor.berkeley.edu \\
%
%
%\\ \\
%
%
%\textbf{Prof. Antonio Conejo} \\
%ISE \& ECE Dept., Ohio State University \\
%286 Baker Systems, 1971 Neil Ave. \\
%Columbus, OH 43210, USA \\
%{\it Phone:} (614) 292-6736 \\
%{\it Email:} conejonavarro.1@osu.edu \\
%
%
%\end{tabular}
\end{resume}
\end{document}
%Prof. Sairaj Dhople \\
%Dept. of Electrical and Computer Engr.\\
%University of Minnesota \\
%Rm 5-115, 200 Union St. SE \\
%Minneapolis, MN 55455, USA \\
%{\it Phone:} (612) 624-8837 \\
%{\it Email:} sdhople@umn.edu \\
%\vspace{1.0cm}
%
%Prof. Nikolaos Gatsis \\
%Dept. of Electrical and Computer Engr. \\
%University of Texas at San Antonio \\
%AET 2.356, One UTSA Circle, BSE 1.500 \\
%San Antonio, TX 78249, USA \\
%{\it Phone:} (210) 258-5519 \\
%{\it Email:} nikolaos.gatsis@utsa.edu
%
%
%\vspace{.3cm}
%
%
%\section{\sc Personal}
%\begin{list2}
%%\item Date of birth: April 9th, 1985.
%%\item Marital status: single.
%\item Gender: male.
%\item Citizenship: Chinese.
%\item Languages: fluent in Chinese and English.
%\item Interests: tennis, swimming, climbing, and photography.
%\end{list2}
|
{"hexsha": "b6a2df32e7dcfd3dfb04cde10f97d9863c263a46", "size": 25785, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "public/cv/mycv_latest/OLD/CV_YuZhang.tex", "max_stars_repo_name": "joshtai/yzhangweb", "max_stars_repo_head_hexsha": "d113f20927c17f11f681d6a8eb67e57b9ecd5984", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "public/cv/mycv_latest/OLD/CV_YuZhang.tex", "max_issues_repo_name": "joshtai/yzhangweb", "max_issues_repo_head_hexsha": "d113f20927c17f11f681d6a8eb67e57b9ecd5984", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2020-02-25T21:54:45.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-26T04:56:50.000Z", "max_forks_repo_path": "public/cv/mycv_latest/OLD/CV_YuZhang.tex", "max_forks_repo_name": "joshtai/yzhangweb", "max_forks_repo_head_hexsha": "d113f20927c17f11f681d6a8eb67e57b9ecd5984", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.5226628895, "max_line_length": 234, "alphanum_fraction": 0.7220864844, "num_tokens": 7849}
|
from copy import deepcopy
import imageio
import numpy as np
import os
from PIL import Image
# Local Modules
from simulation import SphereBody, Simulation, State, SystemState
ANIM_OUT_DIR = "animation_out"
ANIM_VID_FILENAME = ANIM_OUT_DIR + "/animation.mp4"
MAX_QUALITY = 95
V0 = np.array([35, 0, 0], dtype=float)
class Transform:
def __init__(self, translate=None, rotate=None, scale=None):
self.translate = translate
self.rotate = rotate
self.scale = scale
class KeyFrame:
def __init__(self, obj, frame_number, transform=None):
self.obj = obj
self.frame_number = frame_number
if transform is None:
self.transform = Transform()
else:
self.transform = transform
def add_translate(self, translate):
self.transform.translate = translate
def add_rotate(self, rotate):
self.transform.rotate = rotate
def add_scale(self, scale):
self.transform.scale = scale
def initialize_simulation(sphere):
t = 0
initial_state = SystemState(t)
rigid_bodies = [SphereBody(sphere)]
v = V0
vx, vy, vz = v[0], v[1], v[2]
wz = -1 * ((vx + vy) / sphere.radius)
wx = (vz + vy) / sphere.radius
wy = 0
w = np.array([wx, wy, wz])
body_initial_state = State(sphere.position, sphere.rotation, v, w)
initial_state.add(body_initial_state)
return initial_state, rigid_bodies
def create_keyframes_from_states(system_states, rigid_bodies):
"""
Returns a list on which each element is a list of keyframes for the
same frame number.
"""
keyframes = []
for i in range(len(system_states)):
system_state = system_states[i]
current_keyframes = []
for j in range(len(system_state.bodies_state)):
state = system_state.bodies_state[j]
transform = Transform(state.pos, state.rot, state.scale)
obj = rigid_bodies[j].obj
keyframe = KeyFrame(obj, i, transform)
current_keyframes.append(keyframe)
keyframes.append(current_keyframes)
return keyframes
class Animation:
"""
This has the necessary parameters and functions for creating an
animation. To do that it first has to run a simulation for a scene, then
create a series of keyframes from the simulation data, then turn the
keyframes into a series of scenes, and render each of them into images. It
will create a new scene for each frame.
"""
def __init__(self, duration, screen_size, fps, scene, render):
self.duration = duration
self.screen_size = screen_size
self.fps = fps
self.scene = scene
# This is the render function to use
self.render = render
def create_scene_from_keyframes(self, keyframes):
"""
Create a scene from the given keyframes that belong to the same frame
number.
"""
new_scene = deepcopy(self.scene)
for keyframe in keyframes:
new_obj = new_scene.objects[keyframe.obj.ID]
new_obj.position = keyframe.transform.translate
new_obj.rotation = keyframe.transform.rotate
# scale is also possible
return new_scene
def create(self, sphere, camera):
print("Creating animation...")
time_step = 1.0 / self.fps
initial_state, rigid_bodies = initialize_simulation(sphere)
simulation = Simulation(
initial_state, rigid_bodies, self.duration, time_step
)
f = -1 * (V0 / self.duration)
print("Running simulation...")
system_states = simulation.run(f)
keyframes = create_keyframes_from_states(
system_states, rigid_bodies
)
# Write video
writer = imageio.get_writer(ANIM_VID_FILENAME, fps=self.fps)
for i in range(len(keyframes)):
# Create a new scene using the keyframe and render that scene
current_keyframes = keyframes[i]
new_scene = self.create_scene_from_keyframes(current_keyframes)
w, h = self.screen_size
print("Rendering frame={}/{}...".format(i, len(keyframes) - 1))
img_arr = self.render(new_scene, camera, h, w)
# Append rendered image into video
writer.append_data(img_arr)
# Write rendered image into image file
img = Image.fromarray(img_arr)
if not os.path.exists(ANIM_OUT_DIR):
os.mkdir(ANIM_OUT_DIR)
output_img_filename = "{}/{}.jpg".format(ANIM_OUT_DIR, i)
img.save(output_img_filename, quality=MAX_QUALITY)
print("Rendered image saved in {}".format(output_img_filename))
writer.close()
print("Animation video rendered in {}".format(ANIM_VID_FILENAME))
|
{"hexsha": "3775a2a7b11d471b1a8274a63efae84d3ecaa56f", "size": 4810, "ext": "py", "lang": "Python", "max_stars_repo_path": "animation.py", "max_stars_repo_name": "thinhnguyenuit/sombra", "max_stars_repo_head_hexsha": "5176d264508dd5cce780dc63f1dd948d66b189e8", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 10, "max_stars_repo_stars_event_min_datetime": "2020-06-20T00:58:28.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-27T21:36:12.000Z", "max_issues_repo_path": "animation.py", "max_issues_repo_name": "thinhnguyenuit/sombra", "max_issues_repo_head_hexsha": "5176d264508dd5cce780dc63f1dd948d66b189e8", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "animation.py", "max_forks_repo_name": "thinhnguyenuit/sombra", "max_forks_repo_head_hexsha": "5176d264508dd5cce780dc63f1dd948d66b189e8", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2021-06-09T02:17:09.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-27T21:39:11.000Z", "avg_line_length": 34.6043165468, "max_line_length": 78, "alphanum_fraction": 0.641995842, "include": true, "reason": "import numpy", "num_tokens": 1089}
|
import numpy as np
import math
import time
from sklearn.cluster import MiniBatchKMeans, KMeans
class Top_Down(object):
def __init__(self,n_classes):
self.subcls = math.ceil(math.sqrt(n_classes))
self.top_K = KMeans(n_clusters=self.subcls,n_init=10,max_iter=300,n_jobs=-1,verbose=0,init='random')
self.down_Ks = []
for i in range(self.subcls):
self.down_Ks.append(KMeans(n_clusters=self.subcls,n_init=10,max_iter=100,n_jobs=-1,verbose=1,init='k-means++'))
print('%d top classes and %d classes for each top classes'%(self.subcls,self.subcls))
def fit_predict(self,X):
# output labels with input order
self.labels = np.zeros((X.shape[0],))
# Top K-means
top_cls = self.top_K.fit_predict(X)
# generate the index with Top-k cls
n_cls = []
for i in range(self.subcls):
n_cls.append(np.count_nonzero(top_cls == i))
cls_idx = np.argsort(top_cls)
start_idx = 0
end_idx = 0
offset = 0
# do subcluster
for i in range(self.subcls):
end_idx += n_cls[i]
X_idx = cls_idx[start_idx:end_idx]
subcls_label = self.down_Ks[i].fit_predict(X[X_idx])
self.labels[X_idx] = subcls_label + offset
start_idx = end_idx
offset += self.subcls
return self.labels
class Seed_KMeans(object):
"""
Seeded k-means
Selecting some samples as seed to do k-means,
other samples are predicted using distance to existing k-means center
"""
def __init__(self, n_classes, n_seeds):
self.k_means = KMeans(n_clusters=n_classes, n_init=10, max_iter=300,
n_jobs=-1, verbose=1, init='random')
#self.k_means = MiniBatchKMeans(n_clusters=n_classes, max_iter=500, n_init=15,
# init_size=n_classes, verbose=1, max_no_improvement=250)
self.n_seeds = n_seeds
def fit_predict(self, X):
self.labels_ = np.zeros(X.shape[0])
seed = np.random.permutation(X.shape[0])[:self.n_seeds]
exclude_seed = np.ones(X.shape[0])
exclude_seed[seed] = 0
exclude_seed = np.where(exclude_seed)[0]
self.labels_[seed] = self.k_means.fit_predict(X[seed, :])
self.labels_[exclude_seed] = self.k_means.predict(X[exclude_seed, :])
return self.labels_
if __name__ == '__main__':
X = np.random.rand(120000,512)
# clustering = Seed_KMeans(n_classes=10000, n_seeds=30000)
clustering = Top_Down(n_classes=10000)
t0 = time.time()
labels = clustering.fit_predict(X)
print('%.2f min'%((time.time()-t0)/60))
# print(labels)
#print(labels)
|
{"hexsha": "487a130ac2be452fbc4e1c7460ddeb38dd35a76e", "size": 2711, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/AIC2018_iamai/ReID/clustering.py", "max_stars_repo_name": "gordonjun2/CenterTrack", "max_stars_repo_head_hexsha": "358f94c36ef03b8ae7d15d8a48fbf70fff937e79", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-04-13T14:06:23.000Z", "max_stars_repo_stars_event_max_datetime": "2020-06-10T08:41:28.000Z", "max_issues_repo_path": "src/AIC2018_iamai/ReID/clustering.py", "max_issues_repo_name": "gordonjun2/CenterTrack", "max_issues_repo_head_hexsha": "358f94c36ef03b8ae7d15d8a48fbf70fff937e79", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/AIC2018_iamai/ReID/clustering.py", "max_forks_repo_name": "gordonjun2/CenterTrack", "max_forks_repo_head_hexsha": "358f94c36ef03b8ae7d15d8a48fbf70fff937e79", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.7285714286, "max_line_length": 123, "alphanum_fraction": 0.6267060125, "include": true, "reason": "import numpy", "num_tokens": 712}
|
import argparse
import os
import pdb
import pyproj
import numpy as np
from glob import glob
from tqdm import tqdm
from scipy.spatial.transform import Rotation as R
def config_parser():
parser = argparse.ArgumentParser(
description='Semantic label sampling script.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--label_path', type=str, default=None, required=True,
help='Directory where the point cloud npy files are in.')
parser.add_argument('--threshold', type=float, default=50.0,
help='Minimum value threshold for non-empty pixels percentage.')
opt = parser.parse_args()
return opt
def get_rotation_ned_in_ecef(lon, lat):
"""
@param: lon, lat Longitude and latitude in degree
@return: 3x3 rotation matrix of heading-pith-roll NED in ECEF coordinate system
Reference: https://apps.dtic.mil/dtic/tr/fulltext/u2/a484864.pdf, Section 4.3, 4.1
Reference: https://www.fossen.biz/wiley/ed2/Ch2.pdf, p29
"""
# describe NED in ECEF
lon = lon * np.pi / 180.0
lat = lat * np.pi / 180.0
# manual computation
R_N0 = np.array([[np.cos(lon), -np.sin(lon), 0],
[np.sin(lon), np.cos(lon), 0],
[0, 0, 1]])
R__E1 = np.array([[np.cos(-lat - np.pi / 2), 0, np.sin(-lat - np.pi / 2)],
[0, 1, 0],
[-np.sin(-lat - np.pi / 2), 0, np.cos(-lat - np.pi / 2)]])
NED = np.matmul(R_N0, R__E1)
assert abs(np.linalg.det(
NED) - 1.0) < 1e-6, 'NED in NCEF rotation mat. does not have unit determinant, it is: {:.2f}'.format(
np.linalg.det(NED))
return NED
def ecef_to_geographic(x, y, z):
# Careful: here we need to use lat,lon
lat, lon, alt = pyproj.Transformer.from_crs("epsg:4978", "epsg:4979").transform(x, y, z)
return [lon, lat, alt]
def get_pose_mat(cesium_pose):
"""
Get 4x4 homogeneous matrix from Cesium-defined pose
@input: cesium_pose 6d ndarray, [lat, lon, h, yaw, pitch, roll]
lat, lon, h are in ECEF coordinate system
yaw, pitch, roll are in degress
@output: 4x4 homogeneous extrinsic camera matrix
"""
x, y, z, yaw, pitch, roll = cesium_pose # no need to do local conversion when in ECEF
lon, lat, alt = ecef_to_geographic(x, y, z)
rot_ned_in_ecef = get_rotation_ned_in_ecef(lon, lat)
rot_pose_in_ned = R.from_euler('ZYX', [yaw, pitch, roll], degrees=True).as_matrix()
r = np.matmul(rot_ned_in_ecef, rot_pose_in_ned)
# transform coordinate system from NED to standard camera sys.
r = r[0:3, [1, 2, 0]]
r = np.concatenate((r, np.array([[x, y, z]]).transpose()), axis=1)
r = np.concatenate((r, np.array([[0, 0, 0, 1]])), axis=0)
return r
def get_cam_mat(width, height, focal_length):
"""
Get intrinsic camera matrix
"""
cam_mat = np.eye(3, dtype=float)
cam_mat[0, 0] = focal_length
cam_mat[1, 1] = focal_length
cam_mat[0, 2] = width / 2
cam_mat[1, 2] = height / 2
return cam_mat
def main():
args = config_parser()
print(args)
file_ls, pose_ls = [], []
for root, dirs, files in os.walk(args.label_path):
files.sort()
flag_pose = any([file_name.endswith('_poses.npy') for file_name in files])
if flag_pose:
files = [os.path.abspath(os.path.join(root, file_name)) for file_name in files
if file_name.endswith('_pc.npy')]
poses = np.load(glob(os.path.join(root, '*_poses.npy'))[0])
# ensure index consistency
for idx, file_name in enumerate(files):
assert '{:05d}'.format(idx) in os.path.basename(file_name)
file_ls.extend(files)
pose_ls.append(poses)
pose_ls = np.concatenate(pose_ls) # [X, 6]
assert len(file_ls) == len(pose_ls)
print("{:d} npy files to scan...".format(len(file_ls)))
dubious_data_ls = []
valid_rate_ls = []
xyz_min = np.array([float('inf'), float('inf'), float('inf')])
xyz_max = np.array([-float('inf'), -float('inf'), -float('inf')])
reproj_error_ls = []
cam_mat = get_cam_mat(720, 480, 480)
# generate grid of target reprojection pixel positions
pixel_grid = np.zeros((2, 480, 720))
for x in range(0, pixel_grid.shape[2]):
for y in range(0, pixel_grid.shape[1]):
pixel_grid[0, y, x] = x
pixel_grid[1, y, x] = y
pixel_grid = pixel_grid.reshape(2, -1) # [2, H*W]
for i, (npy_file, cam_pose) in tqdm(enumerate(zip(file_ls, pose_ls))):
cam_to_world = get_pose_mat(cam_pose)
this_origin = cam_to_world[:3, -1].copy()
cam_to_world[:3, -1] -= this_origin
this_pc = np.load(npy_file) # [H, W, 3]
mask_nodata = this_pc[:, :, 0] == -1 # [H, W]
mask_has_data = np.logical_not(mask_nodata)
# check reprojection error
reproj_pc = this_pc - this_origin[None, None, :] # demean, [H, W, 3]
reproj_pc = reproj_pc.reshape(-1, 3) # [H*W, 3]
world_to_cam = np.linalg.inv(cam_to_world) # [4, 4]
world_coords = reproj_pc.transpose(1, 0) # [3, H*W]
ones = np.ones([1, world_coords.shape[1]]) # [1, H*W]
world_coords = np.concatenate([world_coords, ones], axis=0) # [4, H*W]
cam_coords = np.matmul(world_to_cam[:3, :], world_coords) # [3, H*W]
pixel_coords = np.matmul(cam_mat, cam_coords) # [3, H*W]
pixel_coords = pixel_coords[0:2] / pixel_coords[2] # [2, H*W]
reproj_error = np.linalg.norm(pixel_coords - pixel_grid, axis=0) # [H*W]
reproj_error = reproj_error.reshape(480, 720)[mask_has_data] # [H*W]
reproj_error_ls.append(np.mean(reproj_error))
# check non-empty pixel rate
valid_rate = np.sum(this_pc[:, :, 0] != -1) / (this_pc.shape[0] * this_pc.shape[1])
valid_rate *= 100.0
this_pc = this_pc[this_pc[:, :, 0] != -1] # [X, 3]
xyz_min = np.minimum(xyz_min, np.min(this_pc.reshape(-1, 3), axis=0))
xyz_max = np.maximum(xyz_max, np.max(this_pc.reshape(-1, 3), axis=0))
valid_rate_ls.append(valid_rate)
if valid_rate < args.threshold:
print("Point cloud {:s} valid rate {:.1f}% lower than threshold {:.1f}%.".format(
npy_file, valid_rate, args.threshold))
dubious_data_ls.append(npy_file + '\n')
reproj_error_ls = np.array(reproj_error_ls)
print("Valid rate statistics over {:d} images, mean: {:.2f}%, std: {:.2f}%, median: {:.2f}%".format(
len(valid_rate_ls), np.mean(valid_rate_ls), np.std(valid_rate_ls), np.median(valid_rate_ls)))
print('Min and Max boundary values: min: {}, max: {}'.format(xyz_min, xyz_max))
print("Reprojection error statistics: mean: {:.2f} px, std: {:.2f} px, median: {:.2f} px, max: {:.2f} px".
format(np.mean(reproj_error_ls), np.std(reproj_error_ls),
np.median(reproj_error_ls), np.max(reproj_error_ls)))
out_path = os.path.join(args.label_path, 'npy_statistics.npz')
np.savez(out_path,
valid_rate=np.array(valid_rate_ls),
reproj_error=np.array(reproj_error_ls),
file_name=file_ls)
print("Overall statistics is saved to {:s}".format(out_path))
if len(dubious_data_ls):
out_path = os.path.join(args.label_path, 'dubious_data.txt')
print("{:d} possibly wrong data points are recorded at {:s}".format(len(dubious_data_ls), out_path))
with open(out_path, 'w') as f:
f.writelines(dubious_data_ls)
else:
print("All point clouds' valid rates are higher than the threshold. Good data!")
if __name__ == '__main__':
main()
|
{"hexsha": "0fe2dbcdf94649c3d18a19e0675bd786612a2938", "size": 7721, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/tools/scan_npy_pointcloud.py", "max_stars_repo_name": "Shanci-Li/TOPO-DataGen", "max_stars_repo_head_hexsha": "bc2be65bbcca4cb415e2f7d19cb3c3d620279ddc", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2022-01-23T01:43:34.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-24T10:06:08.000Z", "max_issues_repo_path": "scripts/tools/scan_npy_pointcloud.py", "max_issues_repo_name": "Shanci-Li/TOPO-DataGen", "max_issues_repo_head_hexsha": "bc2be65bbcca4cb415e2f7d19cb3c3d620279ddc", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scripts/tools/scan_npy_pointcloud.py", "max_forks_repo_name": "Shanci-Li/TOPO-DataGen", "max_forks_repo_head_hexsha": "bc2be65bbcca4cb415e2f7d19cb3c3d620279ddc", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-03-08T16:43:19.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-08T16:43:19.000Z", "avg_line_length": 40.2135416667, "max_line_length": 110, "alphanum_fraction": 0.6131330139, "include": true, "reason": "import numpy,from scipy", "num_tokens": 2242}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.