hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
61994048e974aac7ccedc29d996420c49db567d8 | 879 | py | Python | examples/renderimage.py | acgessler/pysfml2-cython | dfb81e399af2eb389a1953bbe78c2e73778e3440 | [
"Zlib",
"BSD-2-Clause"
] | 1 | 2019-08-16T16:33:12.000Z | 2019-08-16T16:33:12.000Z | examples/renderimage.py | acgessler/pysfml2-cython | dfb81e399af2eb389a1953bbe78c2e73778e3440 | [
"Zlib",
"BSD-2-Clause"
] | null | null | null | examples/renderimage.py | acgessler/pysfml2-cython | dfb81e399af2eb389a1953bbe78c2e73778e3440 | [
"Zlib",
"BSD-2-Clause"
] | null | null | null | #! /usr/bin/env python2
# -*- coding: utf-8 -*-
import sf
import sys
def main():
window = sf.RenderWindow(sf.VideoMode(640, 480), 'RenderImage example')
window.framerate_limit = 60
running = True
rect0 = sf.Shape.rectangle(5, 5, 90, 50, sf.Color.GREEN, 2, sf.Color.BLUE)
rect1 = sf.Shape.rectangle(20.0, 30.0, 50.0, 50.0, sf.Color.CYAN)
ri = sf.RenderTexture(110, 110)
ri.clear(sf.Color(0, 0, 0, 0))
ri.draw(rect0)
ri.draw(rect1)
ri.display()
s = sf.Sprite(ri.texture)
s.origin = (55, 55)
s.position = (320, 240)
while running:
for event in window.iter_events():
if event.type == sf.Event.CLOSED:
running = False
window.clear(sf.Color.WHITE)
s.rotate(5)
window.draw(s)
window.display()
window.close()
if __name__ == '__main__':
main()
| 23.131579 | 78 | 0.583618 | 127 | 879 | 3.96063 | 0.527559 | 0.069583 | 0.063618 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.083333 | 0.262799 | 879 | 37 | 79 | 23.756757 | 0.692901 | 0.050057 | 0 | 0 | 0 | 0 | 0.032413 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.037037 | false | 0 | 0.074074 | 0 | 0.111111 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
619fe24fa47471571e21443afa6062ff41ae9a71 | 453 | py | Python | src/settings.py | tokku5552/google-photo-backup | 6508f98d979faea9de617af91e7f660b2b5de4d1 | [
"MIT"
] | 1 | 2022-03-06T22:55:02.000Z | 2022-03-06T22:55:02.000Z | src/settings.py | tokku5552/google-photo-backup | 6508f98d979faea9de617af91e7f660b2b5de4d1 | [
"MIT"
] | 3 | 2021-09-03T15:28:31.000Z | 2021-09-09T14:22:52.000Z | src/settings.py | tokku5552/google-photo-backup | 6508f98d979faea9de617af91e7f660b2b5de4d1 | [
"MIT"
] | null | null | null | # -*- coding: utf_8 -*-
SCOPES = ['https://www.googleapis.com/auth/photoslibrary']
API_SERVICE_NAME = 'photoslibrary'
API_VERSION = 'v1'
CLIENT_SECRET_FILE = 'client_secret.json'
CREDENTIAL_FILE = 'credential.json'
AQUIRED_MEDIA_LIST = 'aquired_list.json'
TMP_DIR = 'tmp'
DESTINATION_DIR = '/gpbk'
QUERY_FILTER = True
PAST_YEARS = 0
PAST_MONTHS = 1
PAST_DAYS = 0
LOGGING_LEVEL = 20 # DEBUG=10,INFO=20
LOG_FILENAME = '/var/log/google_photos_backup.log'
| 28.3125 | 58 | 0.754967 | 66 | 453 | 4.863636 | 0.727273 | 0.099688 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.027295 | 0.110375 | 453 | 15 | 59 | 30.2 | 0.769231 | 0.083885 | 0 | 0 | 0 | 0 | 0.366505 | 0.080097 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
61a1e5af27a2cafee845ea0db041e5f1132696f9 | 10,968 | py | Python | marketplace/views/views_internalpackage.py | MOOCworkbench/MOOCworkbench | c478dd4f185c50e0a48319e2b30d418533c32a34 | [
"MIT"
] | null | null | null | marketplace/views/views_internalpackage.py | MOOCworkbench/MOOCworkbench | c478dd4f185c50e0a48319e2b30d418533c32a34 | [
"MIT"
] | 1 | 2017-07-09T17:38:21.000Z | 2017-07-09T17:38:22.000Z | marketplace/views/views_internalpackage.py | MOOCworkbench/MOOCworkbench | c478dd4f185c50e0a48319e2b30d418533c32a34 | [
"MIT"
] | null | null | null | import logging
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
from django.http import JsonResponse
from django.shortcuts import get_object_or_404, redirect, render
from django.views.generic import CreateView, DetailView, UpdateView, View
from django.views.generic.list import ListView
from markdown2 import Markdown
from experiments_manager.helper import verify_and_get_experiment
from experiments_manager.mixins import ActiveExperimentsList
from experiments_manager.models import ChosenExperimentSteps
from git_manager.helpers.github_helper import GitHubHelper, get_github_helper
from git_manager.mixins.repo_file_list import get_files_for_repository
from helpers.helper_mixins import ExperimentPackageTypeMixin
from marketplace.forms import InternalPackageForm
from marketplace.helpers.helper import (create_tag_for_package_version,
update_setup_py_with_new_version)
from marketplace.mixins import IsInternalPackageMixin, ObjectTypeIdMixin
from marketplace.models import InternalPackage, PackageResource, PackageVersion
from marketplace.tasks import (task_create_package_from_experiment,
task_publish_update_package,
task_remove_package,
task_create_package)
from requirements_manager.helper import add_internalpackage_to_experiment
from user_manager.models import get_workbench_user
logger = logging.getLogger(__name__)
class InternalPackageBaseView(ObjectTypeIdMixin, IsInternalPackageMixin):
class Meta:
abstract = True
class InternalPackageCreateView(ExperimentPackageTypeMixin, CreateView):
"""View for InternalPackage Create, this view is used for creating an empty package from the Packages index"""
model = InternalPackage
form_class = InternalPackageForm
template_name = 'marketplace/package_create/package_form.html'
success_url = '/packages/new/status'
def get_context_data(self, **kwargs):
context = super(InternalPackageCreateView, self).get_context_data(**kwargs)
logger.info('%s started on package creation for own code', self.request.user)
return context
def form_valid(self, form):
form.instance.owner = get_workbench_user(self.request.user)
form.instance.template_id = 1
response = super(InternalPackageCreateView, self).form_valid(form)
task_create_package.delay(form.instance.pk)
return response
class InternalPackageCreateFromExperimentView(ExperimentPackageTypeMixin, CreateView):
"""View of InternalPackage Create from Experiment. After completing an experiment step, if users wish, they can create
a package and are redirected to this view."""
model = InternalPackage
form_class = InternalPackageForm
template_name = 'marketplace/package_create/package_form.html'
success_url = '/packages/new/status'
def get_context_data(self, **kwargs):
context = super(InternalPackageCreateFromExperimentView, self).get_context_data(**kwargs)
context['experiment_id'] = self.kwargs['experiment_id']
context['step_id'] = self.kwargs['step_id']
logger.info('%s started on package creation for %s', self.request.user, self.kwargs['experiment_id'])
return context
def form_valid(self, form):
step_folder = self.get_step().location
experiment = self.get_experiment()
form.instance.owner = experiment.owner
form.instance.template_id = 1
response = super(InternalPackageCreateFromExperimentView, self).form_valid(form)
task_create_package_from_experiment.delay(form.instance.pk, experiment.pk, step_folder)
return response
def get_experiment(self):
experiment_id = self.kwargs['experiment_id']
experiment = verify_and_get_experiment(self.request, experiment_id)
return experiment
def get_step(self):
step_id = self.kwargs['step_id']
step = ChosenExperimentSteps.objects.get(pk=step_id)
return step
class InternalPackageListView(ListView):
model = InternalPackage
def get_queryset(self):
qs = super(InternalPackageListView, self).get_queryset()
return qs.filter(owner__user_id=self.request.user.id)
class InternalPackageDashboard(ExperimentPackageTypeMixin, View):
def get(self, request, pk):
package = get_object_or_404(InternalPackage, pk=pk)
assert package.owner.user == self.request.user
context = {'docs': package.docs,
'package': package,
'object_id': package.pk,
'object_type': package.get_object_type(),
'edit_form': InternalPackageForm(instance=package),
'dashboard_active': True,
'is_internal': True}
return render(request, 'marketplace/package_detail/internalpackage_dashboard.html', context)
class InternalPackageUpdateView(UpdateView):
"""Updates the information associated with an InternalPackage."""
model = InternalPackage
form_class = InternalPackageForm
def get_success_url(self):
return reverse('internalpackage_dashboard', kwargs={'pk': self.kwargs['pk']})
def form_valid(self, form):
assert form.instance.owner.user == self.request.user
messages.add_message(self.request, messages.SUCCESS, 'Package successfully updated')
return super(InternalPackageUpdateView, self).form_valid(form)
class InternalPackageVersionCreateView(CreateView):
"""View for creating a new version of an InternalPackage.
Starts task to publish this new package and publish it. This is only needed for Python experiments."""
model = PackageVersion
fields = ['version_nr', 'changelog', 'pre_release']
template_name = 'marketplace/package_detail/packageversion_form.html'
def get_context_data(self, **kwargs):
context = super(InternalPackageVersionCreateView, self).get_context_data(**kwargs)
context['package'] = InternalPackage.objects.get(id=self.kwargs['package_id'])
return context
def form_valid(self, form):
package = InternalPackage.objects.get(id=self.kwargs['package_id'])
form.instance.package = package
assert form.instance.package.owner.user == self.request.user
form.instance.added_by = get_workbench_user(self.request.user)
response = super(InternalPackageVersionCreateView, self).form_valid(form)
create_tag_for_package_version(form.instance.id)
if 'Python' in package.language.language:
update_setup_py_with_new_version(form.instance.id)
task_publish_update_package.delay(package.pk)
return response
def get_success_url(self):
return reverse('internalpackage_dashboard', kwargs={'pk': self.kwargs['package_id']})
@login_required
def internalpackage_publish(request, pk):
"""Publish a package, starts task doing the actual publish work"""
package = InternalPackage.objects.get(id=pk)
assert package.owner.user == request.user
task_publish_update_package.delay(package.pk)
logger.info('%s published the package %s', request.user, package)
return redirect(to=package.get_absolute_url())
@login_required
def internalpackage_publish_checklist(request, pk):
"""View for displaying the InternalPackage checklist before publishing a package."""
package = InternalPackage.objects.get(id=pk)
assert package.owner.user == request.user
dependencies_defined = package.requirements.count() != 0
getting_started_guide = PackageResource.objects.filter(package=package.id, title='Getting started')
getting_started = False
if getting_started_guide:
getting_started_guide = getting_started_guide[0]
getting_started = len(getting_started_guide.resource) != 0
return render(request, 'marketplace/package_publish.html', {'object': package,
'dependencies_defined': dependencies_defined,
'getting_started': getting_started})
@login_required
def internalpackage_remove(request, pk):
"""View for removing an internal package.
This is an action that can only be performed by the owner of the package."""
package = InternalPackage.objects.get(id=pk)
assert package.owner.user == request.user
task_remove_package.delay(package.pk)
logger.info("%s removed the package %s", request.user, package)
messages.add_message(request, messages.INFO, "Removing package...")
return redirect(to=package.success_url_dict()['dashboard'])
class InternalPackageDetailView(InternalPackageBaseView, ActiveExperimentsList, DetailView):
model = InternalPackage
template_name = 'marketplace/package_detail/package_detail.html'
def get_context_data(self, **kwargs):
self.object_type = ExperimentPackageTypeMixin.PACKAGE_TYPE
context = super(InternalPackageDetailView, self).get_context_data(**kwargs)
github_helper = get_github_helper(context['package'].owner, context['package'])
package_id = self.kwargs['pk']
context['version_history'] = PackageVersion.objects.filter(package=package_id).order_by('-created')[:5]
context['index_active'] = True
context['git_list'] = get_files_for_repository(github_helper, self.object)
if InternalPackage.objects.filter(pk=self.object.pk):
context['readme'] = self.readme_file_of_package()
return context
def readme_file_of_package(self):
internalpackage = InternalPackage.objects.get(id=self.kwargs['pk'])
github_helper = GitHubHelper(internalpackage.owner, internalpackage.git_repo.name)
readme = github_helper.view_file('README.md')
md = Markdown()
content_file = md.convert(readme)
return content_file
@login_required
def internalpackage_install(request, pk):
"""View for installing a package in own project.
Adds this package to the requirements file of the chosen experiment and starts
task to update this file in GitHub"""
internal_package = InternalPackage.objects.get(pk=pk)
assert 'experiment_id' in request.POST
experiment_id = request.POST['experiment_id']
experiment = verify_and_get_experiment(request, experiment_id)
result = add_internalpackage_to_experiment(internal_package, experiment)
if result:
logger.info('%s installed the package %s in experiment %s', request.user, internal_package, experiment)
messages.add_message(request, messages.SUCCESS, 'Added package to your experiment')
return JsonResponse({'added': True})
else:
messages.add_message(request, messages.ERROR, 'Could not add package to your experiment')
return JsonResponse({'added': False})
| 45.510373 | 122 | 0.728209 | 1,245 | 10,968 | 6.217671 | 0.180723 | 0.019377 | 0.014468 | 0.024803 | 0.346467 | 0.25901 | 0.193903 | 0.128924 | 0.105284 | 0.09159 | 0 | 0.001456 | 0.185904 | 10,968 | 240 | 123 | 45.7 | 0.865494 | 0.081236 | 0 | 0.236264 | 0 | 0 | 0.105342 | 0.032351 | 0 | 0 | 0 | 0 | 0.038462 | 1 | 0.104396 | false | 0 | 0.120879 | 0.010989 | 0.472527 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
61a8845e1ccc4a6f9d7930913c7112d6c98c2b1b | 1,049 | py | Python | solutions/day13/p2/main.py | tosmun/AdventOfCode | 62f4f3a8cc3761ee5d5eaf682ae9c2c985cd80b5 | [
"Apache-2.0"
] | 1 | 2017-07-15T19:01:03.000Z | 2017-07-15T19:01:03.000Z | solutions/day13/p2/main.py | tosmun/Python-AdventOfCode | 62f4f3a8cc3761ee5d5eaf682ae9c2c985cd80b5 | [
"Apache-2.0"
] | null | null | null | solutions/day13/p2/main.py | tosmun/Python-AdventOfCode | 62f4f3a8cc3761ee5d5eaf682ae9c2c985cd80b5 | [
"Apache-2.0"
] | null | null | null | from itertools import permutations
if __name__ == "__main__":
map = { }
with open('../input.txt', 'r') as fp:
while True:
line=fp.readline()
if line is None or line == '':
break
parts = line.split(" ")
person = parts[0]
value = int(parts[3])
if parts[2] == 'lose':
value = value * -1
neighbour = parts[10].split('.')[0]
if person not in map:
map[person] = { }
person_map = map[person]
person_map[neighbour] = value
#Include myself
my_map = { }
for person in map:
map[person]['me'] = 0
my_map[person] = 0
map['me'] = my_map
best = (-1, None)
#For each possible combination
for arrangement in permutations(map.keys(), len(map.keys())):
#Calculate total
total = 0
for i, person in enumerate(arrangement):
#Have to account for boundaries of the list
left = (i-1 if i > 0 else -1)
right = (i+1 if i < len(arrangement)-1 else 0)
total += map[person][arrangement[left]]
total += map[person][arrangement[right]]
if total > best[0]:
best = (total, arrangement)
print(best)
| 26.225 | 62 | 0.622498 | 155 | 1,049 | 4.129032 | 0.412903 | 0.084375 | 0.05625 | 0.04375 | 0.060938 | 0 | 0 | 0 | 0 | 0 | 0 | 0.022086 | 0.22307 | 1,049 | 39 | 63 | 26.897436 | 0.76319 | 0.095329 | 0 | 0 | 0 | 0 | 0.032804 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.029412 | 0 | 0.029412 | 0.029412 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
61aa4cd0aa9e08c093a70a1f326924e6e96fd86c | 13,031 | py | Python | review/models.py | osaimola/django-review | e4f10838a88b84749bda5bd8febaf7d85447fcdf | [
"MIT"
] | null | null | null | review/models.py | osaimola/django-review | e4f10838a88b84749bda5bd8febaf7d85447fcdf | [
"MIT"
] | null | null | null | review/models.py | osaimola/django-review | e4f10838a88b84749bda5bd8febaf7d85447fcdf | [
"MIT"
] | null | null | null | """Just an empty models file to let the testrunner recognize this as app."""
from django.conf import settings
from django.contrib.contenttypes import fields
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.utils import timezone
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext, ugettext_lazy as _
from hvad.models import TranslatableModel, TranslatedFields
DEFAULT_CHOICES = (
('5', '5'),
('4', '4'),
('3', '3'),
('2', '2'),
('1', '1'),
)
@python_2_unicode_compatible
class Review(models.Model):
"""
Represents a user review, which includes free text and images.
:reviewed_item: Object, which is reviewed.
:user (optional): User, which posted the rating.
:content (optional): Running text.
:images (optional): Review-related images.
:language (optional): Language shortcut to filter reviews.
:creation_date: The date and time, this review was created.
:average_rating: Should always be calculated and updated when the object is
saved. This is for improving performance and reducing db queries when
calculating ratings for reviewed items. Currently it gets updated at the
end of the save method of the ``ReviewForm``. This means that when you
manually save a Review via the Django admin, this field will not be
updated.
:extra_item: Optional object, which should be attached to the review.
"""
# GFK 'reviewed_item'
content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE)
object_id = models.PositiveIntegerField()
reviewed_item = fields.GenericForeignKey('content_type', 'object_id')
user = models.ForeignKey(
getattr(settings, 'AUTH_USER_MODEL', 'auth.User'),
verbose_name=_('User'),
blank=True, null=True,
on_delete=models.CASCADE
)
content = models.TextField(
max_length=1024,
verbose_name=_('Content'),
blank=True,
)
images = fields.GenericRelation(
'user_media.UserMediaImage',
)
language = models.CharField(
max_length=5,
verbose_name=_('Language'),
blank=True,
)
creation_date = models.DateTimeField(
auto_now_add=True,
verbose_name=_('Creation date'),
)
average_rating = models.FloatField(
verbose_name=_('Average rating'),
default=0,
)
# GFK 'extra_item'
extra_content_type = models.ForeignKey(
ContentType,
related_name='reviews_attached',
null=True, blank=True,
on_delete=models.CASCADE
)
extra_object_id = models.PositiveIntegerField(null=True, blank=True)
extra_item = fields.GenericForeignKey(
'extra_content_type', 'extra_object_id')
class Meta:
ordering = ['-creation_date']
def __str__(self):
return '{0} - {1}'.format(self.reviewed_item, self.get_user())
# TODO: Add magic to get ReviewExtraInfo content objects here
def get_user(self):
"""Returns the user who wrote this review or ``Anonymous``."""
if self.user:
return self.user.email
return ugettext('Anonymous')
def get_averages(self, max_value=None):
"""
Centralized average calculation. Returns category averages and total
average.
:param max_value: By default the app is set to a rating from 1 to 5.
So if nothing is changed, we can just calculate the average of all
rating values and be good. We then have an average that is between 1
and 5 as well.
BUT if we have custom choices, we could end up having one category
with a range of 1 to 10 and one category with 1 to 5. The result then
must be abstracted to fit into the given range set by max_value.
This can also be used to calculate percentages by setting max_value
to 100.
"""
max_rating_value = 0
category_maximums = {}
category_averages = {}
categories = RatingCategory.objects.filter(counts_for_average=True,
rating__review=self)
# find the highest rating possible across all categories
for category in categories:
category_max = category.get_rating_max_from_choices()
category_maximums.update({category: category_max})
if max_value is not None:
max_rating_value = max_value
else:
if category_max > max_rating_value:
max_rating_value = category_max
# calculate the average of every distinct category, normalized to the
# recently found max
for category in categories:
category_average = None
ratings = Rating.objects.filter(
review=self,
category=category, value__isnull=False).exclude(value='')
category_max = category_maximums[category]
for rating in ratings:
if category_average is None:
category_average = float(rating.value)
else:
category_average += float(rating.value)
if category_average is not None:
category_average *= float(max_rating_value) / float(
category_max)
category_averages[category] = (
category_average / ratings.count())
# calculate the total average of all categories
total_average = 0
for category, category_average in category_averages.items():
total_average += category_average
if not len(category_averages):
return (False, False)
total_average /= len(category_averages)
return total_average, category_averages
def get_average_rating(self, max_value=None):
"""
Returns the average rating for all categories of this review.
A shortcut for get_averages. Look there for more details.
"""
total_average, category_averages = self.get_averages(
max_value=max_value)
return total_average
def get_category_averages(self, max_value=None):
"""
Returns the average ratings for every category of this review.
A shortcut for get_averages. Look there for more details.
"""
total_average, category_averages = self.get_averages(
max_value=max_value)
return category_averages
def is_editable(self):
"""
Returns True, if the time period to update this review hasn't ended
yet.
If the period setting has not been set, it always return True. This
is the general case. If the user has used this setting to define an
update period it returns False, if this period has expired.
"""
if getattr(settings, 'REVIEW_UPDATE_PERIOD', False):
period_end = self.creation_date + timezone.timedelta(
seconds=getattr(settings, 'REVIEW_UPDATE_PERIOD') * 60)
if timezone.now() > period_end:
return False
return True
@python_2_unicode_compatible
class ReviewExtraInfo(models.Model):
"""
Model to add any extra information to a review.
This can be useful if you need to save more information about a reviewer
than just the User instance. Let's say you are building a site for theme
park reviews and you want to allow the user to select the weather
conditions for the day of his visit (which will surely influence his
review). This model would allow you to tie any model of your app to a
review.
:type: Callable type of the extra info. This should be unique per review.
We will soon add a hack to the Review model which allows you to get the
content_object of this instance from a review instance (i.e. by calling
``my_review.weather_conditions.name``). So for this example you would
set the type to ``weather_conditions``.
:review: Related review.
:content_object: The related object that stores this extra information.
"""
type = models.CharField(
max_length=256,
verbose_name=_('Type'),
)
review = models.ForeignKey(
'review.Review',
verbose_name=_('Review'),
on_delete=models.CASCADE
)
# GFK 'content_object'
content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE)
object_id = models.PositiveIntegerField()
content_object = fields.GenericForeignKey('content_type', 'object_id')
class Meta:
ordering = ['type']
def __str__(self):
return '{0} - {1}'.format(self.review, self.type)
@python_2_unicode_compatible
class RatingCategory(TranslatableModel):
"""
Represents a rating category.
If your reviews are just text based, you don't have to use this.
This can be useful if you want to allow users to rate one or more
categories, like ``Food``, ``Room service``, ``Cleansines`` and so on.
:identifier: Optional identifier.
:name: Name of the category. Also used as label for the category form.
:question: If you want to render a more explicit question in addition to
the name, use this field. It is added to the form fields as help text.
:counts_for_average: If True, the ratings of this category will be used to
calculate the average rating. Default is True.
"""
identifier = models.SlugField(
max_length=32,
verbose_name=_('Identifier'),
blank=True,
)
counts_for_average = models.BooleanField(
verbose_name=_('Counts for average rating'),
default=True,
)
translations = TranslatedFields(
name=models.CharField(max_length=256),
question=models.CharField(max_length=512, blank=True, null=True),
)
def __str__(self):
return self.lazy_translation_getter('name', 'Untranslated')
@property
def required(self):
"""Returns False, if the choices include a None value."""
if not hasattr(self, '_required'):
# get_choices sets _required
self.get_choices()
return self._required
def get_choices(self):
"""Returns the tuple of choices for this category."""
choices = ()
self._required = True
for choice in self.choices.all():
if choice.value is None or choice.value == '':
self._required = False
choices += (choice.value, choice.label),
if not choices:
return DEFAULT_CHOICES
return choices
def get_rating_max_from_choices(self):
"""Returns the maximun value a rating can have in this catgory."""
return int(list(self.get_choices())[0][0])
@python_2_unicode_compatible
class RatingCategoryChoice(TranslatableModel):
"""
Defines an optional choice for a `RatingCategory`.
If `RatingChoice` exists, the choices will not be loaded from the settings.
:label: The label that is displayed for this choice.
:ratingcategory: The `RatingCategory` this choice belongs to.
:value: The value that this choice has. If a `RatingChoice` with value=None
is created and chosen by the user, this category is not taken into
account when the average is calculated.
"""
ratingcategory = models.ForeignKey(
RatingCategory,
verbose_name=_('Rating category'),
related_name='choices',
on_delete=models.CASCADE
)
value = models.CharField(
verbose_name=_('Value'),
max_length=20,
blank=True, null=True,
)
translations = TranslatedFields(
label=models.CharField(
verbose_name=_('Label'),
max_length=128,
),
)
def __str__(self):
return self.lazy_translation_getter('label',
self.ratingcategory.identifier)
class Meta:
ordering = ('-value', )
@python_2_unicode_compatible
class Rating(models.Model):
"""
Represents a rating for one rating category.
:rating: Rating value.
:review: The review the rating belongs to.
:category: The rating category the rating belongs to.
"""
rating_choices = DEFAULT_CHOICES
value = models.CharField(
max_length=20,
verbose_name=_('Value'),
choices=getattr(settings, 'REVIEW_RATING_CHOICES', rating_choices),
blank=True, null=True,
)
review = models.ForeignKey(
'review.Review',
verbose_name=_('Review'),
related_name='ratings',
on_delete=models.CASCADE
)
category = models.ForeignKey(
'review.RatingCategory',
verbose_name=_('Category'),
on_delete=models.CASCADE
)
class Meta:
ordering = ['category', 'review']
def __str__(self):
return '{0}/{1} - {2}'.format(self.category, self.review, self.value)
| 33.412821 | 79 | 0.647149 | 1,586 | 13,031 | 5.163304 | 0.210593 | 0.020149 | 0.013677 | 0.020515 | 0.180364 | 0.107461 | 0.090121 | 0.082061 | 0.052754 | 0.052754 | 0 | 0.006861 | 0.272964 | 13,031 | 389 | 80 | 33.498715 | 0.857505 | 0.351393 | 0 | 0.240741 | 0 | 0 | 0.064878 | 0.00844 | 0 | 0 | 0 | 0.002571 | 0 | 1 | 0.060185 | false | 0 | 0.037037 | 0.023148 | 0.342593 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
61aabee9e72f77565b8fbe0b0256f2a2d385c406 | 2,170 | py | Python | python/oneflow/test/modules/test_consistent_stateful_kernel_with_cache.py | L-Net-1992/oneflow | 4dc08d65caea36fdd137841ac95551218897e730 | [
"Apache-2.0"
] | 1 | 2022-03-14T11:17:56.000Z | 2022-03-14T11:17:56.000Z | python/oneflow/test/modules/test_consistent_stateful_kernel_with_cache.py | L-Net-1992/oneflow | 4dc08d65caea36fdd137841ac95551218897e730 | [
"Apache-2.0"
] | null | null | null | python/oneflow/test/modules/test_consistent_stateful_kernel_with_cache.py | L-Net-1992/oneflow | 4dc08d65caea36fdd137841ac95551218897e730 | [
"Apache-2.0"
] | 1 | 2021-12-15T02:14:49.000Z | 2021-12-15T02:14:49.000Z | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import numpy as np
import oneflow as flow
import oneflow.unittest
from oneflow.test_utils.automated_test_util import *
def _test_global_stateful_kernel_with_inpersistent_state(test_case, placement, sbp):
x = (
flow.arange(64)
.reshape(8, 8)
.to_global(flow.env.all_device_placement("cpu"), flow.sbp.broadcast)
)
x = x.to_global(placement, sbp)
y = flow._C.logical_slice(x, [0, 0], [3, 1], [1, 1])
y_np = np.array([[0], [8], [16]])
test_case.assertTrue(
np.array_equal(
y.to_global(flow.env.all_device_placement("cpu"), flow.sbp.broadcast)
.to_local()
.numpy(),
y_np,
)
)
x = x.to_global(sbp=flow.sbp.split(1))
y = flow._C.logical_slice(x, [0, 0], [3, 1], [1, 1])
test_case.assertTrue(
np.array_equal(
y.to_global(flow.env.all_device_placement("cpu"), flow.sbp.broadcast)
.to_local()
.numpy(),
y_np,
)
)
class TestStatefulKernelWithInpersistentState(flow.unittest.TestCase):
@globaltest
def test_global_stateful_kernel_with_inpersistent_state(test_case):
for placement in all_placement():
# logical_slice only support 1d sbp
if len(placement.ranks.shape) != 1:
continue
for sbp in all_sbp(placement, max_dim=2):
_test_global_stateful_kernel_with_inpersistent_state(
test_case, placement, sbp
)
if __name__ == "__main__":
unittest.main()
| 31.449275 | 84 | 0.654839 | 296 | 2,170 | 4.591216 | 0.412162 | 0.04415 | 0.039735 | 0.05298 | 0.358352 | 0.358352 | 0.358352 | 0.358352 | 0.358352 | 0.358352 | 0 | 0.019572 | 0.246544 | 2,170 | 68 | 85 | 31.911765 | 0.811621 | 0.283871 | 0 | 0.318182 | 0 | 0 | 0.010996 | 0 | 0 | 0 | 0 | 0 | 0.045455 | 1 | 0.045455 | false | 0 | 0.113636 | 0 | 0.181818 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
61ab1ecec0d561e9b48dbeb6b94f8d2303697a63 | 1,437 | py | Python | schools3/ml/experiments/multi_dataset_experiment.py | dssg/mlpolicylab_fall20_schools3_public | f8eff4c56e9bada1eb81ddaca03686d7ef53c2c4 | [
"MIT"
] | null | null | null | schools3/ml/experiments/multi_dataset_experiment.py | dssg/mlpolicylab_fall20_schools3_public | f8eff4c56e9bada1eb81ddaca03686d7ef53c2c4 | [
"MIT"
] | null | null | null | schools3/ml/experiments/multi_dataset_experiment.py | dssg/mlpolicylab_fall20_schools3_public | f8eff4c56e9bada1eb81ddaca03686d7ef53c2c4 | [
"MIT"
] | null | null | null | import pandas as pd
from tqdm import tqdm
from schools3.data.datasets.datasets_generator import DatasetsGenerator
from schools3.ml.experiments.models_experiment import ModelsExperiment
from schools3.config import main_config
# an experiment that trains models and reports metrics for multiple grades
class MultiDatasetExperiment(ModelsExperiment):
def __init__(
self, name='ignore', features_list=main_config.features,
labels=main_config.labels, models=main_config.models,
metrics=main_config.metrics, use_cache=main_config.use_cache
):
super(MultiDatasetExperiment, self).__init__(
name, features_list, labels, models, metrics, use_cache=use_cache
)
def perform(
self, grades=main_config.multi_grades, include_all_train_hist=True,
*args, **kwargs
):
df = pd.DataFrame()
t_grades = tqdm(grades)
for grade in t_grades:
t_grades.set_description(f'grade {grade}:')
generator = DatasetsGenerator(grade)
cohorts = \
tqdm(generator.get_all_train_test_pairs(include_all_train_hist))
for train_cohort, test_cohort in cohorts:
cohorts.set_description(train_cohort.get_identifier())
metrics_df = self.get_train_test_metrics(train_cohort, test_cohort)
df = pd.concat([df, metrics_df], ignore_index=True)
return df
| 38.837838 | 83 | 0.695894 | 171 | 1,437 | 5.549708 | 0.385965 | 0.073762 | 0.031612 | 0.040042 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002712 | 0.230341 | 1,437 | 36 | 84 | 39.916667 | 0.855335 | 0.050104 | 0 | 0.066667 | 0 | 0 | 0.014684 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0 | 0.166667 | 0 | 0.3 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
61ab642b229d604ba34d9d88a40117fa3578d4e2 | 276 | py | Python | wrappers/bcftools/view/wrapper.py | delvinso/crg2 | 366f9dd6f89db2243765688bd0c2d9a2b3d170f4 | [
"Apache-2.0"
] | null | null | null | wrappers/bcftools/view/wrapper.py | delvinso/crg2 | 366f9dd6f89db2243765688bd0c2d9a2b3d170f4 | [
"Apache-2.0"
] | null | null | null | wrappers/bcftools/view/wrapper.py | delvinso/crg2 | 366f9dd6f89db2243765688bd0c2d9a2b3d170f4 | [
"Apache-2.0"
] | null | null | null | __author__ = "Johannes Köster"
__copyright__ = "Copyright 2016, Johannes Köster"
__email__ = "koester@jimmy.harvard.edu"
__license__ = "MIT"
from snakemake.shell import shell
shell(
"bcftools view {snakemake.params} {snakemake.input[0]} " "-o {snakemake.output[0]}"
)
| 21.230769 | 87 | 0.731884 | 32 | 276 | 5.8125 | 0.71875 | 0.150538 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.025105 | 0.134058 | 276 | 12 | 88 | 23 | 0.753138 | 0 | 0 | 0 | 0 | 0 | 0.550725 | 0.166667 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.125 | 0 | 0.125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
61ac0e6db30abca8f91a026b4103c099bb566e16 | 4,728 | py | Python | tensorflow/python/ops/ragged/ragged_range_op_test.py | uve/tensorflow | e08079463bf43e5963acc41da1f57e95603f8080 | [
"Apache-2.0"
] | null | null | null | tensorflow/python/ops/ragged/ragged_range_op_test.py | uve/tensorflow | e08079463bf43e5963acc41da1f57e95603f8080 | [
"Apache-2.0"
] | null | null | null | tensorflow/python/ops/ragged/ragged_range_op_test.py | uve/tensorflow | e08079463bf43e5963acc41da1f57e95603f8080 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ragged_range op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.ops.ragged import ragged_math_ops
from tensorflow.python.platform import googletest
@test_util.run_all_in_graph_and_eager_modes
class RaggedRangeOpTest(test_util.TensorFlowTestCase):
def testDocStringExamples(self):
"""Examples from ragged_range.__doc__."""
rt1 = ragged_math_ops.range([3, 5, 2])
self.assertAllEqual(rt1, [[0, 1, 2], [0, 1, 2, 3, 4], [0, 1]])
rt2 = ragged_math_ops.range([0, 5, 8], [3, 3, 12])
self.assertAllEqual(rt2, [[0, 1, 2], [], [8, 9, 10, 11]])
rt3 = ragged_math_ops.range([0, 5, 8], [3, 3, 12], 2)
self.assertAllEqual(rt3, [[0, 2], [], [8, 10]])
def testBasicRanges(self):
# Specify limits only.
self.assertAllEqual(
ragged_math_ops.range([0, 3, 5]),
[list(range(0)), list(range(3)),
list(range(5))])
# Specify starts and limits.
self.assertAllEqual(
ragged_math_ops.range([0, 3, 5], [2, 3, 10]),
[list(range(0, 2)),
list(range(3, 3)),
list(range(5, 10))])
# Specify starts, limits, and deltas.
self.assertAllEqual(
ragged_math_ops.range([0, 3, 5], [4, 4, 15], [2, 3, 4]),
[list(range(0, 4, 2)),
list(range(3, 4, 3)),
list(range(5, 15, 4))])
def testFloatRanges(self):
expected = [[0.0, 0.4, 0.8, 1.2, 1.6, 2.0, 2.4, 2.8, 3.2, 3.6], [3.0],
[5.0, 7.2, 9.4, 11.6, 13.8]]
actual = ragged_math_ops.range([0.0, 3.0, 5.0], [3.9, 4.0, 15.0],
[0.4, 1.5, 2.2])
self.assertAllClose(actual, expected)
def testNegativeDeltas(self):
self.assertAllEqual(
ragged_math_ops.range([0, 3, 5], limits=0, deltas=-1),
[list(range(0, 0, -1)),
list(range(3, 0, -1)),
list(range(5, 0, -1))])
self.assertAllEqual(
ragged_math_ops.range([0, -3, 5], limits=0, deltas=[-1, 1, -2]),
[list(range(0, 0, -1)),
list(range(-3, 0, 1)),
list(range(5, 0, -2))])
def testBroadcast(self):
# Specify starts and limits, broadcast deltas.
self.assertAllEqual(
ragged_math_ops.range([0, 3, 5], [4, 4, 15], 3),
[list(range(0, 4, 3)),
list(range(3, 4, 3)),
list(range(5, 15, 3))])
# Broadcast all arguments.
self.assertAllEqual(
ragged_math_ops.range(0, 5, 1), [list(range(0, 5, 1))])
def testEmptyRanges(self):
rt1 = ragged_math_ops.range([0, 5, 3], [0, 3, 5])
rt2 = ragged_math_ops.range([0, 5, 5], [0, 3, 5], -1)
self.assertAllEqual(rt1, [[], [], [3, 4]])
self.assertAllEqual(rt2, [[], [5, 4], []])
def testShapeFnErrors(self):
self.assertRaises((ValueError, errors.InvalidArgumentError),
ragged_math_ops.range, [[0]], 5)
self.assertRaises((ValueError, errors.InvalidArgumentError),
ragged_math_ops.range, 0, [[5]])
self.assertRaises((ValueError, errors.InvalidArgumentError),
ragged_math_ops.range, 0, 5, [[0]])
self.assertRaises((ValueError, errors.InvalidArgumentError),
ragged_math_ops.range, [0], [1, 2])
def testKernelErrors(self):
with self.assertRaisesRegexp(errors.InvalidArgumentError,
r'Requires delta != 0'):
self.evaluate(ragged_math_ops.range(0, 0, 0))
def testShape(self):
self.assertAllEqual(
ragged_math_ops.range(0, 0, 1).shape.as_list(), [1, None])
self.assertAllEqual(
ragged_math_ops.range([1, 2, 3]).shape.as_list(), [3, None])
self.assertAllEqual(
ragged_math_ops.range([1, 2, 3], [4, 5, 6]).shape.as_list(), [3, None])
if __name__ == '__main__':
googletest.main()
| 37.52381 | 81 | 0.586717 | 648 | 4,728 | 4.151235 | 0.226852 | 0.055762 | 0.10632 | 0.14052 | 0.414498 | 0.369145 | 0.346097 | 0.322677 | 0.305948 | 0.259108 | 0 | 0.070749 | 0.243655 | 4,728 | 125 | 82 | 37.824 | 0.681488 | 0.185702 | 0 | 0.216867 | 0 | 0 | 0.007305 | 0 | 0 | 0 | 0 | 0 | 0.253012 | 1 | 0.108434 | false | 0 | 0.084337 | 0 | 0.204819 | 0.012048 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
61ac853aed3414f5df7058083f22b577f7f6c8d2 | 256 | py | Python | run/dev.py | LuxQuad/ozet-core-api | bf0cd9e4b58bf9b7e805843df4dfe7320afa7e4b | [
"MIT"
] | null | null | null | run/dev.py | LuxQuad/ozet-core-api | bf0cd9e4b58bf9b7e805843df4dfe7320afa7e4b | [
"MIT"
] | 5 | 2021-08-10T03:38:31.000Z | 2021-08-11T12:39:34.000Z | run/dev.py | LuxQuad/ozet-core-api | bf0cd9e4b58bf9b7e805843df4dfe7320afa7e4b | [
"MIT"
] | null | null | null | import uvicorn
HOST = "127.0.0.1"
PORT = 8000
ENV = ".misc/env/dev.env"
SERVICE = "app.main:service"
LOG_LEVEL = "trace"
if __name__ == "__main__":
uvicorn.run(SERVICE, host=HOST, port=PORT, log_level=LOG_LEVEL, workers=4, env_file=ENV, reload=True)
| 23.272727 | 105 | 0.703125 | 42 | 256 | 4 | 0.595238 | 0.142857 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.04955 | 0.132813 | 256 | 10 | 106 | 25.6 | 0.707207 | 0 | 0 | 0 | 0 | 0 | 0.214844 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.125 | 0 | 0.125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
61aea3b9d53c7af9cfa86fadc4caee11c448c970 | 4,307 | py | Python | vgg16_convadd.py | kkkumar2/Vegetable-recognition-with-VGG-16-19-SCRATCH | a1f16bcca0608ae87cbf9b0973ac16d9d2274ae3 | [
"Apache-2.0"
] | null | null | null | vgg16_convadd.py | kkkumar2/Vegetable-recognition-with-VGG-16-19-SCRATCH | a1f16bcca0608ae87cbf9b0973ac16d9d2274ae3 | [
"Apache-2.0"
] | null | null | null | vgg16_convadd.py | kkkumar2/Vegetable-recognition-with-VGG-16-19-SCRATCH | a1f16bcca0608ae87cbf9b0973ac16d9d2274ae3 | [
"Apache-2.0"
] | null | null | null | from tensorflow.keras.layers import Input, Lambda, Dense, Flatten
from tensorflow.keras.models import Model
from tensorflow.keras.applications.vgg16 import VGG16
from tensorflow.keras.applications.vgg16 import preprocess_input
from tensorflow.keras.preprocessing import image
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.models import Sequential
from tensorflow.keras import optimizers
import numpy as np
from glob import glob
import matplotlib.pyplot as plt
from datetime import datetime
from tensorflow.keras.callbacks import ModelCheckpoint, LearningRateScheduler
from tensorflow.keras.callbacks import ReduceLROnPlateau
import warnings
warnings.filterwarnings("ignore", category=FutureWarning)
#Give dataset path
train_path = r'D:\Data science\ineuron\Assignments\Vegetable edited\train'
test_path = r'D:\Data science\ineuron\Assignments\Vegetable edited\test'
validation_path = r'D:\Data science\ineuron\Assignments\Vegetable edited\validation'
vgg = VGG16(input_shape=(224,224,3), weights='imagenet', include_top=False)
for layer in vgg.layers[:16]:
layer.trainable = False
for layer in vgg.layers:
print(layer,layer.trainable)
print("Total layers in VGG 16 are : ",len(vgg.layers))
# useful for getting number of classes
folders = glob(train_path + '\*')
print(len(folders))
x = Flatten()(vgg.output)
prediction = Dense(len(folders), activation='softmax')(x)
model = Model(inputs=vgg.input, outputs=prediction)
model.summary()
sgd = optimizers.SGD(learning_rate=0.01, decay=1e-6, momentum=0.9)
model.compile(loss='categorical_crossentropy',
optimizer=sgd,
metrics=['accuracy'])
# Data Augmentation
train_datagen = ImageDataGenerator(
preprocessing_function=preprocess_input,
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest')
# Data Augmentation
test_datagen = ImageDataGenerator(
preprocessing_function=preprocess_input,
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest')
# Data Augmentation
valid_datagen = ImageDataGenerator(
preprocessing_function=preprocess_input,
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest')
# Make sure you provide the same target size as initialied for the image size
train_set = train_datagen.flow_from_directory(train_path,
target_size = (224, 224),
batch_size = 32,
class_mode = 'categorical')
test_set = train_datagen.flow_from_directory(test_path,
target_size = (224, 224),
batch_size = 32,
class_mode = 'categorical')
valid_set = train_datagen.flow_from_directory(validation_path,
target_size = (224, 224),
batch_size = 32,
class_mode = 'categorical')
#lr_scheduler = LearningRateScheduler(lr_schedule)
lr_reducer = ReduceLROnPlateau(factor=np.sqrt(0.1),
cooldown=0,
patience=5,
min_lr=0.5e-6)
#num_epochs = 1000
#num_batch_size = 32
checkpoint = ModelCheckpoint(filepath='vgg16_tl_convadd.h5',
verbose=1, save_best_only=True)
callbacks = [checkpoint, lr_reducer]
#callbacks = [checkpoint]
start = datetime.now()
model.fit_generator(
train_set,
validation_data=test_set,
epochs=5,
steps_per_epoch=5,
validation_steps=32,
callbacks=callbacks ,verbose=1)
duration = datetime.now() - start
print("Training completed in time: ", duration)
| 36.193277 | 137 | 0.639424 | 484 | 4,307 | 5.508264 | 0.347107 | 0.027007 | 0.031508 | 0.027007 | 0.447862 | 0.3991 | 0.313578 | 0.313578 | 0.313578 | 0.257314 | 0 | 0.032776 | 0.277455 | 4,307 | 118 | 138 | 36.5 | 0.823907 | 0.068029 | 0 | 0.362637 | 0 | 0 | 0.09346 | 0.034758 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.164835 | 0 | 0.164835 | 0.043956 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
61af0d4043273f66c7507d94d876e3a5dbecbd8a | 4,888 | py | Python | scripts/make_phenicx_anechoic_index.py | magdalenafuentes/soundata | 4d2dde8e9ef61483bc202bf94d6a0ccc1601c52b | [
"BSD-3-Clause"
] | null | null | null | scripts/make_phenicx_anechoic_index.py | magdalenafuentes/soundata | 4d2dde8e9ef61483bc202bf94d6a0ccc1601c52b | [
"BSD-3-Clause"
] | null | null | null | scripts/make_phenicx_anechoic_index.py | magdalenafuentes/soundata | 4d2dde8e9ef61483bc202bf94d6a0ccc1601c52b | [
"BSD-3-Clause"
] | 1 | 2021-05-03T19:34:46.000Z | 2021-05-03T19:34:46.000Z | import argparse
import glob
import hashlib
import json
import os
import string
DATASET_INDEX_PATH = '../mirdata/datasets/indexes/phenicx_anechoic_index.json'
def md5(file_path):
"""Get md5 hash of a file.
Parameters
----------
file_path: str
File path.
Returns
-------
md5_hash: str
md5 hash of data in file_path
"""
hash_md5 = hashlib.md5()
with open(file_path, 'rb') as fhandle:
for chunk in iter(lambda: fhandle.read(4096), b''):
hash_md5.update(chunk)
return hash_md5.hexdigest()
def make_dataset_index(data_path):
pieces = ['beethoven', 'bruckner', 'mahler', 'mozart']
families = {
'doublebass': 'strings',
'cello': 'strings',
'clarinet': 'woodwinds',
'viola': 'strings',
'violin': 'strings',
'oboe': 'woodwinds',
'flute': 'woodwinds',
'trumpet': 'brass',
'bassoon': 'woodwinds',
'horn': 'brass',
}
totalinstruments = [20, 39, 30, 10]
ninstruments = [10, 10, 10, 8]
index = {'version':1}
index['tracks'] = {}
index['multitracks'] = {}
for ip, piece in enumerate(pieces):
index['multitracks'][piece] = {}
audio_files = sorted(
glob.glob(os.path.join(data_path, 'audio', piece, '*.wav'))
)
instruments = [
os.path.basename(audio_path).split('.')[0].rstrip(string.digits)
for audio_path in audio_files
]
set_instruments = list(set(instruments))
assert (
len(instruments) == totalinstruments[ip]
), 'audio files for some instruments are missing'
assert (
len(set_instruments) == ninstruments[ip]
), 'some instruments are missing from the dataset'
index['multitracks'][piece]['tracks'] = []
for instrument in set_instruments:
assert (
instrument in families.keys()
), "instrument {} is not in the list of dataset instruments".format(
instrument
)
index['tracks'][piece+'-'+instrument] = {}
index['multitracks'][piece]['tracks'].append(piece+'-'+instrument)
#### add audios
instrument_audio_files = sorted(
glob.glob(os.path.join(data_path, 'audio', piece, instrument + '*.wav'))
)
assert (
len(instrument_audio_files) > 0
), 'no audio has been found for {}'.format(instrument)
for i, audio_file in enumerate(instrument_audio_files):
audio_checksum = md5(
os.path.join(
data_path, 'audio', piece, os.path.basename(audio_file)
)
)
source = os.path.basename(audio_file).replace('.wav', '')
index['tracks'][piece+'-'+instrument]['audio_'+source] = (
'audio/{}/{}'.format(piece, os.path.basename(audio_file)),
audio_checksum,
)
#### add scores
assert os.path.exists(
os.path.join(
data_path, 'annotations', piece, '{}.txt'.format(instrument)
)
), 'cannot find score file {}'.formatos.path.join(
data_path, 'annotations', piece, '{}.txt'.format(instrument)
)
assert os.path.exists(
os.path.join(
data_path, 'annotations', piece, '{}_o.txt'.format(instrument)
)
), 'cannot find score file {}'.formatos.path.join(
data_path, 'annotations', piece, '{}_o.txt'.format(instrument)
)
score_checksum = md5(
os.path.join(
data_path, 'annotations', piece, '{}.txt'.format(instrument)
)
)
score_original_checksum = md5(
os.path.join(
data_path, 'annotations', piece, '{}_o.txt'.format(instrument)
)
)
index['tracks'][piece+'-'+instrument]['notes'] = (
'annotations/{}/{}.txt'.format(piece, instrument),
score_checksum,
)
index['tracks'][piece+'-'+instrument]['notes_original'] = (
'annotations/{}/{}_o.txt'.format(piece, instrument),
score_original_checksum,
)
with open(DATASET_INDEX_PATH, 'w') as fhandle:
json.dump(index, fhandle, indent=2)
def main(args):
make_dataset_index(args.data_path)
if __name__ == '__main__':
PARSER = argparse.ArgumentParser(description='Make Phenicx-anechoic index file.')
PARSER.add_argument(
'data_path', type=str, help='Path to Phenicx-anechoic data folder.'
)
main(PARSER.parse_args())
| 31.133758 | 88 | 0.528232 | 478 | 4,888 | 5.253138 | 0.292887 | 0.031063 | 0.043011 | 0.057348 | 0.321784 | 0.275189 | 0.227798 | 0.212266 | 0.212266 | 0.195938 | 0 | 0.010452 | 0.334493 | 4,888 | 156 | 89 | 31.333333 | 0.761451 | 0.033552 | 0 | 0.163793 | 0 | 0 | 0.181643 | 0.021181 | 0 | 0 | 0 | 0 | 0.051724 | 1 | 0.025862 | false | 0 | 0.051724 | 0 | 0.086207 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
61af771f46003607f0d27ebb336cdcdad337f2c1 | 4,341 | py | Python | Pzzzzz/plugins/code_runner.py | Pzzzzz5142/animal-forest-QQ-group-bot | a9141a212a7746ac95d28459ec9cec5b6c188b35 | [
"MIT"
] | 5 | 2020-05-28T06:29:33.000Z | 2020-09-30T12:14:46.000Z | Pzzzzz/plugins/code_runner.py | Pzzzzz5142/xjbx-QQ-group-bot | a9141a212a7746ac95d28459ec9cec5b6c188b35 | [
"MIT"
] | null | null | null | Pzzzzz/plugins/code_runner.py | Pzzzzz5142/xjbx-QQ-group-bot | a9141a212a7746ac95d28459ec9cec5b6c188b35 | [
"MIT"
] | null | null | null | from nonebot import on_command, CommandSession, get_bot
from nonebot.command import call_command
from nonebot.message import escape as message_escape
import aiohttp
from nonebot.argparse import ArgumentParser
__plugin_name__ = "运行代码"
RUN_API_URL_FORMAT = "https://glot.io/run/{}?version=latest"
SUPPORTED_LANGUAGES = {
"assembly": {"ext": "asm"},
"bash": {"ext": "sh"},
"c": {"ext": "c"},
"clojure": {"ext": "clj"},
"coffeescript": {"ext": "coffe"},
"cpp": {"ext": "cpp"},
"csharp": {"ext": "cs"},
"erlang": {"ext": "erl"},
"fsharp": {"ext": "fs"},
"go": {"ext": "go"},
"groovy": {"ext": "groovy"},
"haskell": {"ext": "hs"},
"java": {"ext": "java", "name": "Main"},
"javascript": {"ext": "js"},
"julia": {"ext": "jl"},
"kotlin": {"ext": "kt"},
"lua": {"ext": "lua"},
"perl": {"ext": "pl"},
"php": {"ext": "php"},
"python": {"ext": "py"},
"ruby": {"ext": "rb"},
"rust": {"ext": "rs"},
"scala": {"ext": "scala"},
"swift": {"ext": "swift"},
"typescript": {"ext": "ts"},
}
headers = {
"Authorization": "Token {}".format(get_bot().config.RUNCODEAPI),
"Content-type": "application/json",
}
@on_command("run", aliases=["运行代码", "运行"], only_to_me=False)
async def run(session: CommandSession):
supported_languages = ", ".join(sorted(SUPPORTED_LANGUAGES.keys()))
language = session.get(
"language", prompt="你想运行的代码是什么语言?\n" f"目前支持 {supported_languages}"
)
code = session.get("code", prompt="你想运行的代码是?")
await session.send("正在运行,请稍等……")
async with aiohttp.ClientSession(headers=headers) as sess:
async with sess.post(
RUN_API_URL_FORMAT.format(language),
json={
"files": [
{
"name": (
SUPPORTED_LANGUAGES[language].get("name", "main")
+ f'.{SUPPORTED_LANGUAGES[language]["ext"]}'
),
"content": code,
}
],
"stdin": "",
"command": "",
},
) as resp:
if resp.status != 200:
session.finish("运行失败,服务可能暂时不可用,请稍后再试")
payload = await resp.json()
if not isinstance(payload, dict):
session.finish("运行失败,服务可能暂时不可用,请稍后再试")
sent = False
for k in ["stdout", "stderr", "error"]:
v = payload.get(k)
lines = v.splitlines()
lines, remained_lines = lines[:10], lines[10:]
out = "\n".join(lines)
out, remained_out = out[: 60 * 10], out[60 * 10 :]
if remained_lines or remained_out:
out += f"\n(输出过多,已忽略剩余内容)"
out = message_escape(out)
if out:
await session.send(f"{k}:\n\n{out}")
sent = True
if not sent:
session.finish("运行成功,没有任何输出")
@on_command("cal", only_to_me=False)
async def cal(session: CommandSession):
args = session.current_arg_text.strip()
if args == "":
session.finish("没有输入内容哦!")
await call_command(
session.bot,
session.event,
"run",
current_arg="""from math import *
print({})""".format(
args
),
)
@run.args_parser
async def _(session: CommandSession):
stripped_arg = session.current_arg_text.strip()
if session.is_first_run:
if stripped_arg == "":
return
parser = ArgumentParser(session=session)
parser.add_argument("-l", "--language", default="python", help="指定编程语言")
parser.add_argument("source", help="运行源代码", nargs="*")
argv = parser.parse_args(stripped_arg.split(" "))
language = argv.language
if language not in SUPPORTED_LANGUAGES:
session.finish("暂时不支持运行你输入的编程语言")
session.state["language"] = language
source = " ".join(argv.source)
if source == "":
return
session.state["code"] = " ".join(argv.source)
return
if not stripped_arg:
return
if not stripped_arg:
session.pause("请输入有效内容")
if session.current_key == "language":
if stripped_arg not in SUPPORTED_LANGUAGES:
session.finish("暂时不支持运行你输入的编程语言")
session.state[session.current_key] = stripped_arg
| 30.356643 | 80 | 0.539968 | 461 | 4,341 | 4.97397 | 0.381779 | 0.0628 | 0.00785 | 0.013083 | 0.143916 | 0.097689 | 0.05495 | 0.05495 | 0.05495 | 0 | 0 | 0.004847 | 0.287031 | 4,341 | 142 | 81 | 30.570423 | 0.734087 | 0 | 0 | 0.096774 | 0 | 0 | 0.17853 | 0.013822 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.048387 | 0 | 0.080645 | 0.008065 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
61b14520dd49127de7f32813b04d71f1ebbf9aa7 | 2,905 | py | Python | jobs.py | gomes-lab/HCLMP | 6770579404f6fa76948f688ae3c626ad621284ec | [
"MIT"
] | 4 | 2021-06-04T12:34:19.000Z | 2022-01-08T07:12:41.000Z | jobs.py | sk2299/HCLMP | d8b3e4dbf39af8d324c4f57e5d56f5846f2b136e | [
"CC-BY-4.0",
"MIT"
] | 1 | 2022-01-07T05:16:26.000Z | 2022-01-07T05:55:17.000Z | jobs.py | sk2299/HCLMP | d8b3e4dbf39af8d324c4f57e5d56f5846f2b136e | [
"CC-BY-4.0",
"MIT"
] | 1 | 2022-02-21T19:12:14.000Z | 2022-02-21T19:12:14.000Z | import os
from os import listdir
from os.path import isfile, join
'''
Author: Shufeng KONG, Cornell University, USA
Contact: sk2299@cornell.edu
This is an example script to run jobs. Set single_job to be True if you only have one setting or dataset to run.
In our experiments, we have 69 systems to run, so we set single_job to be False by default. The trained models will be
saved in the "models" folder by default.
For testing, please set train to be 0. The testing results will be saved in the "results" folder by defaults. We
have provided trained models for our 69 systems. One can run the script to output results.
The transfer_type indicates whether to use the GAN transfer learning. 'None' represents no transfer learning is used.
'''
model = 'run_HCLMP.py'
data_path = 'data/uvis_dataset_no_redundancy/uvis_dict.chkpt'
single_job = True
train = 0 # 0 for testing, 1 for training
transfer_type = 'gen_feat' # choices ['gen_feat', 'None']
#transfer_type = 'None'
epochs = 40
# Run on the ramdom split setting
if single_job:
train_path = 'data/uvis_dataset_no_redundancy/idx/rd_idx_jh/train/rd_idx_jh.npy'
test_path = 'data/uvis_dataset_no_redundancy/idx/rd_idx_jh/test/rd_idx_jh.npy'
val_path = 'data/uvis_dataset_no_redundancy/idx/rd_idx_jh/val/rd_idx_jh.npy'
if train==1:
command = "CUDA_VISIBLE_DEVICES=0 python %s --train --epochs %d --transfer-type %s --data-path %s --train-path %s --val-path %s"\
%(model, epochs, transfer_type, data_path, train_path, val_path)
else:
command = "CUDA_VISIBLE_DEVICES=0 python %s --evaluate --epochs %d --transfer-type %s --data-path %s --test-path %s"\
%(model, epochs, transfer_type, data_path, test_path)
print()
print(command)
print()
os.system(command)
# Run on 69 ternary systems
else:
train_dir = 'data/uvis_dataset_no_redundancy/idx/train/'
val_dir = 'data/uvis_dataset_no_redundancy/idx/val_from_train/'
test_dir = 'data/uvis_dataset_no_redundancy/idx/test/'
system_files = sorted([f.split('.')[0] for f in listdir(train_dir) if isfile(join(train_dir, f))])
for sys in system_files:
train_path = train_dir + sys + '.npy'
val_path = val_dir + sys + '.npy'
test_path = test_dir + sys + '.npy'
if train==1:
command = "CUDA_VISIBLE_DEVICES=0 python %s --train --epochs %d --transfer-type %s --data-path %s --train-path %s --val-path %s"\
%(model, epochs, transfer_type, data_path, train_path, val_path)
else:
command = "CUDA_VISIBLE_DEVICES=0 python %s --transfer-type %s --evaluate --epochs %d --data-path %s --test-path %s" \
% (model, transfer_type, epochs, data_path, test_path)
print()
print(command)
print()
os.system(command)
print('Finish running all systems!!!')
| 39.256757 | 141 | 0.679862 | 448 | 2,905 | 4.21875 | 0.263393 | 0.069841 | 0.055556 | 0.062963 | 0.45873 | 0.424868 | 0.408466 | 0.341799 | 0.31164 | 0.31164 | 0 | 0.010035 | 0.211015 | 2,905 | 73 | 142 | 39.794521 | 0.814572 | 0.047504 | 0 | 0.395349 | 0 | 0.093023 | 0.420673 | 0.221635 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.069767 | 0 | 0.069767 | 0.162791 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
61b34cc74049ca79c83591c3b423846191259210 | 3,460 | py | Python | tests/test_readout.py | msohaibalam/forest-benchmarking | 40f5fd5235803204b34fa8ba1ced4ef2e0f3098d | [
"Apache-2.0"
] | null | null | null | tests/test_readout.py | msohaibalam/forest-benchmarking | 40f5fd5235803204b34fa8ba1ced4ef2e0f3098d | [
"Apache-2.0"
] | null | null | null | tests/test_readout.py | msohaibalam/forest-benchmarking | 40f5fd5235803204b34fa8ba1ced4ef2e0f3098d | [
"Apache-2.0"
] | null | null | null | import re
import numpy as np
from pyquil import Program
from pyquil.device import gates_in_isa
from pyquil.gates import I, RX, CNOT, MEASURE
from pyquil.noise import decoherence_noise_with_asymmetric_ro
from forest_benchmarking.readout import get_flipped_program, estimate_confusion_matrix, \
estimate_joint_confusion_in_set, marginalize_confusion_matrix, estimate_joint_reset_confusion
def test_get_flipped_program():
program = Program()
ro = program.declare('ro', memory_type='BIT', memory_size=2)
program += Program([
I(0),
RX(2.3, 1),
CNOT(0, 1),
MEASURE(0, ro[0]),
MEASURE(1, ro[1]),
])
flipped_program = get_flipped_program(program)
lines = flipped_program.out().splitlines()
matched = 0
for l1, l2 in zip(lines, lines[1:]):
ma = re.match(r'MEASURE (\d) ro\[(\d)\]', l2)
if ma is not None:
matched += 1
assert int(ma.group(1)) == int(ma.group(2))
assert l1 == 'RX(pi) {}'.format(int(ma.group(1)))
assert matched == 2
def test_readout_confusion_matrix_consistency(qvm):
noise_model = decoherence_noise_with_asymmetric_ro(gates=gates_in_isa(qvm.device.get_isa()))
qvm.qam.noise_model = noise_model
qvm.qam.random_seed = 1
num_shots = 500
qubits = (0, 1, 2)
qubit = (0,)
# parameterized confusion matrices
cm_3q_param = estimate_joint_confusion_in_set(qvm, qubits, num_shots=num_shots,
joint_group_size=len(qubits))[qubits]
cm_1q_param = estimate_joint_confusion_in_set(qvm, qubit, num_shots=num_shots,
joint_group_size=1)[qubit]
# non-parameterized confusion matrices
cm_3q = estimate_joint_confusion_in_set(qvm, qubits, num_shots=num_shots,
joint_group_size=len(qubits),
use_param_program=False)[qubits]
cm_1q = estimate_joint_confusion_in_set(qvm, qubit, num_shots=num_shots,
joint_group_size=1,
use_param_program=False,
use_active_reset=True)[qubit]
# single qubit cm
single_q = estimate_confusion_matrix(qvm, qubit[0], num_shots)
# marginals from 3q above
marginal_1q_param = marginalize_confusion_matrix(cm_3q_param, qubits, qubit)
marginal_1q = marginalize_confusion_matrix(cm_3q, qubits, qubit)
atol = .03
np.testing.assert_allclose(cm_3q_param, cm_3q, atol=atol)
np.testing.assert_allclose(cm_1q_param, single_q, atol=atol)
np.testing.assert_allclose(cm_1q, single_q, atol=atol)
np.testing.assert_allclose(cm_1q_param, marginal_1q_param, atol=atol)
np.testing.assert_allclose(cm_1q, marginal_1q, atol=atol)
np.testing.assert_allclose(marginal_1q_param, single_q, atol=atol)
def test_reset_confusion_consistency(qvm):
noise_model = decoherence_noise_with_asymmetric_ro(gates=gates_in_isa(qvm.device.get_isa()))
qvm.qam.noise_model = noise_model
qvm.qam.random_seed = 1
num_trials = 10
qubits = (0, 1)
passive_reset = estimate_joint_reset_confusion(qvm, qubits, num_trials, len(qubits),
use_active_reset=False)[qubits]
atol = .1
np.testing.assert_allclose(passive_reset[:, 0], np.ones(4).T, atol=atol)
| 39.318182 | 97 | 0.650867 | 462 | 3,460 | 4.549784 | 0.222944 | 0.038059 | 0.049952 | 0.076594 | 0.468601 | 0.367745 | 0.342531 | 0.337774 | 0.321123 | 0.299715 | 0 | 0.023653 | 0.254624 | 3,460 | 87 | 98 | 39.770115 | 0.791392 | 0.031503 | 0 | 0.090909 | 0 | 0 | 0.011058 | 0 | 0 | 0 | 0 | 0 | 0.151515 | 1 | 0.045455 | false | 0.030303 | 0.106061 | 0 | 0.151515 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
61b823d8585b20f092c787b7b2c50b78ac5048c4 | 1,166 | py | Python | freshmaker/parsers/errata/signing_change.py | hluk/freshmaker | 224875b104b5be9fa6688af31363a387eeb1b05f | [
"MIT"
] | 5 | 2020-06-17T11:29:16.000Z | 2022-03-24T07:20:16.000Z | freshmaker/parsers/errata/signing_change.py | ronnyhlim/freshmaker | b7635dcfe631759e917c85e6ef6654024a3fb91c | [
"MIT"
] | 96 | 2020-06-29T15:01:23.000Z | 2022-03-30T08:07:06.000Z | freshmaker/parsers/errata/signing_change.py | ronnyhlim/freshmaker | b7635dcfe631759e917c85e6ef6654024a3fb91c | [
"MIT"
] | 20 | 2020-06-16T01:30:08.000Z | 2022-02-19T15:34:55.000Z | # SPDX-License-Identifier: MIT
from freshmaker.parsers import BaseParser
from freshmaker.events import FlatpakModuleAdvisoryReadyEvent
from freshmaker.errata import Errata, ErrataAdvisory
class ErrataAdvisorySigningChangedParser(BaseParser):
"""
Parses errata.activity.signing messages (a build attached to advisory is
signed).
Creates FlatpakModuleAdvisoryReadyEvent if a new flatpak advisory can be
created for module security advisory.
"""
name = "ErrataAdvisorySigningChangedParser"
topic_suffixes = ["errata.activity.signing"]
def can_parse(self, topic, msg):
return any(topic.endswith(s) for s in self.topic_suffixes)
def parse(self, topic, msg):
msg_id = msg.get("msg_id")
inner_msg = msg.get("msg")
if "module" not in inner_msg["content_types"] or inner_msg["errata_status"] != "QE":
return
errata_id = int(inner_msg.get("errata_id"))
errata = Errata()
advisory = ErrataAdvisory.from_advisory_id(errata, errata_id)
if advisory.is_flatpak_module_advisory_ready():
return FlatpakModuleAdvisoryReadyEvent(msg_id, advisory)
| 32.388889 | 92 | 0.715266 | 134 | 1,166 | 6.067164 | 0.440299 | 0.03936 | 0.051661 | 0.04182 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.199828 | 1,166 | 35 | 93 | 33.314286 | 0.871383 | 0.191252 | 0 | 0 | 0 | 0 | 0.119126 | 0.062295 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.166667 | 0.055556 | 0.611111 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
61b8dfc3b67dbc326456271271e5948b8211186a | 1,940 | py | Python | music163/api_test.py | WMLHUST/scrapy_wangyiyun_music | bd6b76d852dc6ead7f88a14843886c233c5a52c7 | [
"Apache-2.0"
] | 3 | 2019-07-01T14:20:26.000Z | 2019-12-16T01:50:23.000Z | music163/api_test.py | WMLHUST/scrapy_wangyiyun_music | bd6b76d852dc6ead7f88a14843886c233c5a52c7 | [
"Apache-2.0"
] | null | null | null | music163/api_test.py | WMLHUST/scrapy_wangyiyun_music | bd6b76d852dc6ead7f88a14843886c233c5a52c7 | [
"Apache-2.0"
] | 5 | 2018-10-15T12:47:55.000Z | 2019-09-13T13:33:53.000Z | # coding: utf-8
import requests
def get_song_comments(music_id, offset=0, total='false', limit=100):
action = 'http://music.163.com/api/v1/resource/comments/R_SO_4_{}/?rid=R_SO_4_{}&\
offset={}&total={}&limit={}'.format(music_id, music_id, offset, total, limit)
# proxy = {"http": "http://dev-proxy.oa.com:8080"}
# proxy = {"http": "http://194.182.74.160:3128"}
# proxy = {"http": "http://127.0.0.1:1080"}
headers = {"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"Accept-Encoding": "gzip, deflate",
'User-Agent': "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36",
"Referer": "Referer:http://music.163.com/",
"Accept-Encoding": "zh-CN,zh;q=0.8,en;q=0.6",
"Content-Type": "application/x-www-form-urlencoded",
}
# rep = requests.get(action, proxies=proxy, headers=headers)
rep = requests.get(action, headers=headers)
print("status code:", rep.status_code)
return rep
def get_hot_comments(rep):
comments_list = []
comments = rep.json()['hotComments']
for comment in comments:
tmp_dict = {}
tmp_dict['nickname'] = comment['user']['nickname']
tmp_dict['star_cnt'] = comment['likedCount']
tmp_dict['content'] = comment['content']
if len(comment['beReplied']) > 0:
tmp_dict['quote'] = comment['beReplied'][0]['content']
# log.msg(tmp_dict, _level=log.INFO)
comments_list.append(tmp_dict)
return comments_list
if __name__ == "__main__":
rep = get_song_comments(520521342)
print(rep.text)
# print(get_hot_comments(rep))
# tmp_proxy = ProxyHandler.random_get()
# proxy = {"http": tmp_proxy}
# rep = requests.get("http://httpbin.org/ip", timeout=(2, 8))
# print(rep.text)
| 38.039216 | 146 | 0.610309 | 262 | 1,940 | 4.362595 | 0.469466 | 0.04287 | 0.034121 | 0.026247 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.059701 | 0.20567 | 1,940 | 50 | 147 | 38.8 | 0.682025 | 0.214433 | 0 | 0 | 0 | 0.103448 | 0.31746 | 0.093254 | 0 | 0 | 0 | 0 | 0 | 1 | 0.068966 | false | 0 | 0.034483 | 0 | 0.172414 | 0.068966 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
61b9588fe2949fa1516521e6875b226d81b8b6f7 | 2,356 | py | Python | src/data_modules/image_dataset_data_module.py | gmum/lcw-generator | fde1128505194bd04f04bbddcbe7fcec453b0052 | [
"MIT"
] | 4 | 2020-09-17T22:16:48.000Z | 2022-02-21T19:07:48.000Z | src/data_modules/image_dataset_data_module.py | gmum/lcw-generator | fde1128505194bd04f04bbddcbe7fcec453b0052 | [
"MIT"
] | null | null | null | src/data_modules/image_dataset_data_module.py | gmum/lcw-generator | fde1128505194bd04f04bbddcbe7fcec453b0052 | [
"MIT"
] | null | null | null | from typing import Union
from torch.utils.data.dataset import Dataset
from data_modules.dataset_factory import DatasetFactory
from torch.utils.data import DataLoader
import pytorch_lightning as pl
class ImageDatasetDataModule(pl.LightningDataModule):
def __init__(self, dataset_factory: Union[DatasetFactory], train_batch_size: int, validation_batch_size: int, workers: int):
super().__init__()
self.__dataset_factory = dataset_factory
self.train_batch_size = train_batch_size
self.validation_batch_size = validation_batch_size
self.workers = workers
self.__validation_dataset = None
self.__train_dataset = None
self.__geneval_dataset = None
def dataset_name(self) -> str:
return self.__dataset_factory.get_dataset_name()
def setup(self, stage=None):
if self.__validation_dataset is None:
self.__validation_dataset = self.__dataset_factory.get_dataset(False)
print(f'Size of validation dataset: {len(self.__validation_dataset)}')
if self.__train_dataset is None:
self.__train_dataset = self.__dataset_factory.get_dataset(True)
print(f'Size of train dataset: {len(self.__train_dataset)}')
if self.__geneval_dataset is None:
self.__geneval_dataset = self.__dataset_factory.get_eval_dataset()
print(f'Size of geneval dataset: {len(self.__geneval_dataset)}')
def train_dataset_elements_count(self) -> int:
assert self.__train_dataset is not None
return len(self.__train_dataset)
def train_dataloader(self, drop_last=True, shuffle=True) -> DataLoader:
assert self.__train_dataset is not None
return DataLoader(self.__train_dataset, batch_size=self.train_batch_size, shuffle=shuffle,
num_workers=self.workers, drop_last=drop_last, pin_memory=False)
def val_dataloader(self) -> DataLoader:
assert self.__validation_dataset is not None
return DataLoader(self.__validation_dataset, batch_size=self.validation_batch_size,
num_workers=4, drop_last=True, pin_memory=False)
def generative_eval_dataset(self) -> Dataset:
assert self.__geneval_dataset is not None
return self.__geneval_dataset
| 43.62963 | 129 | 0.702886 | 287 | 2,356 | 5.324042 | 0.205575 | 0.058901 | 0.08377 | 0.054974 | 0.219895 | 0.168848 | 0.081152 | 0.048429 | 0 | 0 | 0 | 0.000548 | 0.225382 | 2,356 | 53 | 130 | 44.45283 | 0.836712 | 0 | 0 | 0.04878 | 0 | 0 | 0.071211 | 0.038211 | 0 | 0 | 0 | 0 | 0.097561 | 1 | 0.170732 | false | 0 | 0.121951 | 0.02439 | 0.439024 | 0.073171 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
61b978854383493ee32031f63ea4d378d345d545 | 3,429 | py | Python | src/methods/model_investigate.py | clownjiahui/kdd2018_air_pollution_prediction | c76c3ee87132a923cf499d9be17d49b2c9b6eac1 | [
"MIT"
] | 19 | 2019-03-31T09:06:49.000Z | 2022-03-29T12:25:29.000Z | src/methods/model_investigate.py | clownjiahui/kdd2018_air_pollution_prediction | c76c3ee87132a923cf499d9be17d49b2c9b6eac1 | [
"MIT"
] | null | null | null | src/methods/model_investigate.py | clownjiahui/kdd2018_air_pollution_prediction | c76c3ee87132a923cf499d9be17d49b2c9b6eac1 | [
"MIT"
] | 11 | 2019-04-02T07:59:45.000Z | 2022-03-18T08:32:28.000Z | import settings
import const
import pandas as pd
import numpy as np
import matplotlib
from matplotlib import rcParams
rcParams.update({'figure.autolayout': True}) # to prevent labels going out of plot!
matplotlib.use('TkAgg')
import seaborn as sns
import matplotlib.pyplot as plt
from src.preprocess import reform
from src import util
config = settings.config[const.DEFAULT]
feature_dir = config[const.FEATURE_DIR]
suffix = '_12_3_7_24_8_6_12_1_7_24_hybrid_tests.csv'
paths = {
'BJ': {
# 'PM2.5': feature_dir + const.BJ_PM25 + suffix,
# 'PM10': feature_dir + const.BJ_PM10 + suffix,
# 'O3': feature_dir + const.BJ_O3 + suffix,
},
'LD': {
'PM2.5': feature_dir + const.LD_PM25 + suffix,
# 'PM10': feature_dir + const.LD_PM10 + suffix,
}
}
smape_columns = ['city', const.ID, const.LONG, const.LAT, 'pollutant', 'SMAPE', 'count']
smapes = pd.DataFrame(columns=smape_columns)
for city in paths:
station_path = config[const.BJ_STATIONS] if city == 'BJ' else config[const.LD_STATIONS]
stations = pd.read_csv(station_path, sep=";", low_memory=False)
stations_dict = stations.to_dict(orient='index')
for pollutant, path in paths[city].items():
ts = pd.read_csv(path, sep=";", low_memory=False)
station_data = reform.group_by_station(ts=ts, stations=stations)
local_smapes = pd.DataFrame(data=[], columns=smape_columns)
for _, station in stations_dict.items():
data = station_data[station[const.ID]] if station[const.PREDICT] == 1 else pd.DataFrame()
if len(data.index) == 0:
continue # no prediction for this station
actual = data[[pollutant + '__' + str(i) for i in range(1, 49)]].as_matrix()
forecast = data[['f' + str(i) for i in range(0, 48)]].as_matrix()
station['SMAPE'] = util.SMAPE(actual=actual, forecast=forecast)
smape = pd.DataFrame(
data=[[city, station[const.ID], station[const.LONG], station[const.LAT],
pollutant, station['SMAPE'], actual.size]],
columns=smape_columns)
local_smapes = local_smapes.append(other=smape, ignore_index=True)
smapes = smapes.append(other=smape, ignore_index=True)
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(20, 3))
# Plot SMAPE values sorted
local_smapes.sort_values(by='SMAPE', inplace=True)
g = sns.stripplot(x=const.ID, y='SMAPE', data=local_smapes, ax=axes[0])
g.set_xticklabels(labels=g.get_xticklabels(), rotation=90) # rotate station names for readability
# Plot SMAPE values on map
local_smapes.plot.scatter(x=const.LONG, y=const.LAT, s=util.normalize(local_smapes['SMAPE'], multiplier=150),
title=city + '_' + pollutant, fontsize=13, ax=axes[1])
# Plot station names on positions
for _, station in stations_dict.items():
if 'SMAPE' in station:
label = ('%d ' % (100 * station['SMAPE'])) + station[const.ID][0:2] # 64 be
axes[1].annotate(label, xy=(station[const.LONG], station[const.LAT]),
xytext=(5, 0), textcoords='offset points', )
plt.draw()
# Calculate total error
total_smape = np.sum(smapes['SMAPE'] * smapes['count']) / np.sum(smapes['count'])
print('Total SMAPE:', total_smape)
plt.show() | 45.72 | 117 | 0.633712 | 458 | 3,429 | 4.60262 | 0.349345 | 0.045541 | 0.035579 | 0.024194 | 0.171727 | 0.133776 | 0.035104 | 0 | 0 | 0 | 0 | 0.024288 | 0.231554 | 3,429 | 75 | 118 | 45.72 | 0.775712 | 0.115194 | 0 | 0.033333 | 0 | 0 | 0.061549 | 0.013567 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.166667 | 0 | 0.166667 | 0.016667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
61b9871edd93122c9a1e0cadd169ef1f8e7cc7a9 | 6,002 | py | Python | src/obj.py | LemnX4/PointDipole | 79223498f6adce9d4f33939bd0173e9f7cd9132a | [
"MIT"
] | null | null | null | src/obj.py | LemnX4/PointDipole | 79223498f6adce9d4f33939bd0173e9f7cd9132a | [
"MIT"
] | null | null | null | src/obj.py | LemnX4/PointDipole | 79223498f6adce9d4f33939bd0173e9f7cd9132a | [
"MIT"
] | null | null | null | # vim: set et sw=4 ts=4 nu fdm=indent:
# coding: utf8
import numpy as np
import random
from demag import demagnetization
from llg import Bth
class Object:
def __init__(self, position, magnetization, a=1.0, b=1.0, h=1.0, angle=0, M0=1720e3):
if len(magnetization) == 2 and len(position) == 2:
self.dim = "2D"
elif len(magnetization) == 3 and len(position) == 3:
self.dim = "3D"
elif len(magnetization) != len(position):
print("\nError : island dimension undefined.\n")
self.dim == "undefined"
self._frozen = False
self._atomic = False
self._island = False
self.pos = position
self._a = a*1e-9
self._b = b*1e-9
self._h = h*1e-9
self.mag = magnetization
self.thermic_field = []
self.time_evolved = False
self.time = []
self.mag_history = []
self.angle = angle
self.M0 = M0
self.Ku = 0.0
self.har = a/b
self.var = a/h
self.coupled_with = []
@property
def frozen(self):
return self._frozen
@frozen.setter
def frozen(self, value):
self._frozen = value
@property
def atomic(self):
return self._atomic
@atomic.setter
def atomic(self, value):
self._atomic = value
@property
def island(self):
return self._island
@island.setter
def island(self, value):
self._island = value
@property
def a(self):
return self._a
@a.setter
def a(self, value):
self._a = value*1e-9
self.update_values()
@property
def b(self):
return self._b
@b.setter
def b(self, value):
self._b = value*1e-9
self.update_values()
@property
def h(self):
return self._h
@h.setter
def h(self, value):
self._h = value*1e-9
self.update_values()
@property
def M0(self):
return self._M0
@M0.setter
def M0(self, value):
self._M0 = value
self.update_values()
@h.setter
def h(self, value):
self._h = value*1e-9
self.update_values()
@property
def mag(self):
return self._mag
@mag.setter
def mag(self, value):
if np.linalg.norm(value) == 0:
if self.dim == "2D":
print("Error : null vector magnetization. Set to [1, 0] by default.")
self._mag = [1, 0]
elif self.dim == "3D":
print("Error : null vector magnetization. Set to [1, 0, 0] by default.")
self._mag = [1, 0, 0]
else:
self._mag = value / np.linalg.norm(value)
def randomize_magnetization(self, angle=180):
theta = 2*(0.5-random.random())*angle*np.pi/180
if self.dim == "2D":
new_angle = np.arctan2(self.mag[1], self.mag[0]) + theta
self.mag = [np.cos(new_angle), np.sin(new_angle)]
elif self.dim == "3D":
self.mag = [random.random()-0.5, random.random()-0.5, random.random()-0.5]
def update_values(self):
if self.atomic:
self.factors = [1/3.0, 1/3.0, 1/3.0]
self.ku = 0
return
self.V = np.pi * (self.a/2.0)*(self.b/2.0)*self.h
self.factors = demagnetization(self.a, self.b, self.h)
self.M = self.V*self.M0
if self.dim == "2D":
self.E0 = (2*np.pi*1e-7) * self.M0**2 * self.V * self.factors[0]
self.ku = self.M0**2 *(2*np.pi*1e-7)*(1 - 2*self.factors[0] - self.factors[2])
def update_caracteristics(self):
if self.atomic:
c = "\n##########\tCaracteristics of the atom:\t##########\n\n"
elif self.island:
c = "\n##########\tCaracteristics of the nano-island:\t##########\n\n"
c += "Position (nm) : {}\n".format(self.pos)
if self.atomic:
c += "Magnetic moment : {} µB\n".format(self.m)
else:
c += "Magnetic moment : {} µB\n".format(self.M/9.74e-24)
f = ""
if self.frozen:
f = " (frozen)"
c += "Magnetization direction{} : {}\n".format(f, self.mag)
if not self.atomic:
c += "Angle : {}°\n".format(self.angle)
c += "Large diamater (a) : {} nm\n".format(self.a/1e-9)
c += "Small diamater (b) : {} nm\n".format(self.b/1e-9)
c += "Height (h) : {} nm\n".format(self.h/1e-9)
c += "Horizontal aspect ratio : {} \n".format(self.har)
c += "Vertical aspect ratio : {} \n".format(self.var)
else:
c += "Radius : {} nm\n".format(self.radius)
c += "Volume : {} m³\n".format(self.V)
if not self.atomic:
if self.dim == "2D":
c += "Uniaxial horizontal constant Ku : {} kJ/m³\n".format(self.Ku/1e3)
c += "Self dipolar energy : {} J\n".format(self.E0)
elif self.dim == "3D":
c += "Demagnetization factors : {}\n".format(self.factors)
elif self.Ku != 0:
c += "Uniaxial constant Ku : {} kJ/m³\n".format(self.Ku/1e3)
c += "Uniaxial axis : {}\n".format(self.u_axis)
if len(self.coupled_with) !=0 :
c += "\nCoupled with islands : {}\n".format(self.coupled_with)
else:
c += "\nNot coupled with other islands.\n"
c += "\n###################################################################\n"
self.caracteristics = c
def initialize_thermic_field(self, gamma, alpha, T, N):
self.thermic_field = []
for i in range(N+1):
self.thermic_field.append(Bth(gamma, alpha, self.M, T))
| 26.794643 | 90 | 0.488004 | 770 | 6,002 | 3.738961 | 0.192208 | 0.043765 | 0.064953 | 0.022577 | 0.202154 | 0.16603 | 0.162904 | 0.150052 | 0.091004 | 0.063216 | 0 | 0.034581 | 0.354382 | 6,002 | 223 | 91 | 26.914798 | 0.708129 | 0.008164 | 0 | 0.237179 | 0 | 0 | 0.150422 | 0.02968 | 0 | 0 | 0 | 0 | 0 | 1 | 0.141026 | false | 0 | 0.025641 | 0.051282 | 0.230769 | 0.019231 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
61ba96c4c02b0b326e2005e4e9d043a4842e135b | 828 | py | Python | rubbish.bin/legacy/examples/normal_textures.py | Jack12xl/taichi_three | 4785aeefd9e0bccd33cc9b564046dc566b03c714 | [
"MIT"
] | null | null | null | rubbish.bin/legacy/examples/normal_textures.py | Jack12xl/taichi_three | 4785aeefd9e0bccd33cc9b564046dc566b03c714 | [
"MIT"
] | null | null | null | rubbish.bin/legacy/examples/normal_textures.py | Jack12xl/taichi_three | 4785aeefd9e0bccd33cc9b564046dc566b03c714 | [
"MIT"
] | null | null | null | import taichi as ti
import taichi_three as t3
import numpy as np
ti.init(ti.cpu)
scene = t3.Scene()
obj = t3.readobj('assets/cube.obj', scale=0.6)
model = t3.Model(t3.Mesh.from_obj(obj))
model.material = t3.Material(t3.CookTorrance(
color=t3.Texture(ti.imread('assets/cloth.jpg')),
normal=t3.NormalMap(texture=t3.Texture(ti.imread('assets/normal.png'))),
))
scene.add_model(model)
camera = t3.Camera()
camera.ctl = t3.CameraCtl(pos=[0, 1, 1.8])
scene.add_camera(camera)
light = t3.Light([0.4, -0.8, -1.7])
scene.add_light(light)
gui = ti.GUI('Normal map', camera.res)
while gui.running:
gui.get_event(None)
gui.running = not gui.is_pressed(ti.GUI.ESCAPE)
camera.from_mouse(gui)
scene.render()
gui.set_image(camera.img)
#gui.set_image(camera.fb['normal'].to_numpy() * 0.5 + 0.5)
gui.show()
| 27.6 | 76 | 0.693237 | 143 | 828 | 3.937063 | 0.433566 | 0.042629 | 0.039076 | 0.060391 | 0.081705 | 0 | 0 | 0 | 0 | 0 | 0 | 0.040111 | 0.126812 | 828 | 29 | 77 | 28.551724 | 0.738589 | 0.068841 | 0 | 0 | 0 | 0 | 0.075325 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.12 | 0 | 0.12 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
61c0a34abeefd2557216aff867ba7f8de9c65cae | 799 | py | Python | forms-flow-api/src/api/utils/util.py | McCoySmith/forms-flow-ai | 5555c1b2a9a5496f1ab98e5339d66537e25974c2 | [
"Apache-2.0"
] | null | null | null | forms-flow-api/src/api/utils/util.py | McCoySmith/forms-flow-ai | 5555c1b2a9a5496f1ab98e5339d66537e25974c2 | [
"Apache-2.0"
] | 11 | 2021-06-02T04:42:50.000Z | 2022-02-14T07:24:15.000Z | forms-flow-api/src/api/utils/util.py | McCoySmith/forms-flow-ai | 5555c1b2a9a5496f1ab98e5339d66537e25974c2 | [
"Apache-2.0"
] | null | null | null | """Common utils.
* CORS pre-flight decorator. A simple decorator to add the options method to a Request Class.
"""
from .constants import ALLOW_ALL_ORIGINS
def cors_preflight(methods: str = "GET"):
"""Render an option method on the class."""
def wrapper(f): # pylint: disable=invalid-name
def options(self, *args, **kwargs): # pylint: disable=unused-argument
return (
{"Allow": "GET"},
200,
{
"Access-Control-Allow-Origin": ALLOW_ALL_ORIGINS,
"Access-Control-Allow-Methods": methods,
"Access-Control-Allow-Headers": "Authorization, Content-Type",
},
)
setattr(f, "options", options)
return f
return wrapper
| 29.592593 | 93 | 0.560701 | 84 | 799 | 5.27381 | 0.607143 | 0.088036 | 0.121896 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005576 | 0.326658 | 799 | 26 | 94 | 30.730769 | 0.817844 | 0.260325 | 0 | 0 | 0 | 0 | 0.221453 | 0.143599 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1875 | false | 0 | 0.0625 | 0.0625 | 0.4375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
61c6722666daed5da1e5465a987ae0f5c8c2f1c8 | 11,323 | py | Python | wk3_time_space_est.py | pc0179/RomeTaxiData | dff19a538482810e1b84486cdc4299fb820f5051 | [
"MIT"
] | null | null | null | wk3_time_space_est.py | pc0179/RomeTaxiData | dff19a538482810e1b84486cdc4299fb820f5051 | [
"MIT"
] | null | null | null | wk3_time_space_est.py | pc0179/RomeTaxiData | dff19a538482810e1b84486cdc4299fb820f5051 | [
"MIT"
] | null | null | null | """
# week3 madness...
# filter, process (map-match) and output data to new postgres database
# aim by thurs, to be able to answer question, where are (best-guess/estimate) all the taxis at time T.
# then, work out, how far each one is from one another, likely a) search with in BBox, then b) do some fast osrm routing (get that line of sight distance?)
# to aid map-matching, best to use traces of one taxi at at time (allows for timestamp to be used... better approx. should result.)
# get a list of all taxi_IDs... within dataset
#execution_str = "SELECT DISTINCT taxi_id FROM rometaxidata"
#currently all designed to run on C207...
"""
import psycopg2
import pandas.io.sql as pdsql
import pandas as pd
from sqlalchemy import create_engine
#import matplotlib.pyplot as plt
#from mpl_toolkits.basemap import Basemap
import numpy as np
import osrm
#1. querying database
# connection string for working on c207: connect_str = "dbname='rometaxitraces' user='postgres' host='localhost' password='postgres'"
# connection string for Klara:
# taxi_ids = pd.read_csv('/home/user/RomeTaxiData/all_rome_taxi_ids.csv', header=None, sep="\n")
# list_taxi_ids = list(taxi_ids[0]) #I was bored and this seemed easier than figuring out exactly how pandas.iterrows() bullshit works
#osrm.RequestConfig.host = "http://localhost:5000"
# quick reminder of available columns in database:
# cols = ['taxi_id','ts_dt','sim_t','sim_day_num','weekday_num','Lat1','Long1','x','y','unix_ts']
#interesting queries...
#"SELECT * FROM rometaxidata WHERE taxi_id =225"
#execution_str = "SELECT sim_t, Lat1, Long1 FROM rometaxidata WHERE taxi_id = 225"
#execution_str = "SELECT DISTINCT taxi_id FROM rometaxidata"
#execution_str = "SELECT sim_t, x, y FROM rometaxidata WHERE taxi_id = %s"
#"SELECT * FROM rometaxidata WHERE sim_day_num=1"
#execution_str = "SELECT * FROM rometaxidata WHERE (x BETWEEN -1000 AND 1000) AND (y BETWEEN -1000 AND 1000)"
#execution_str = "SELECT * FROM rometaxidata WHERE sim_day_num = 10 AND (x BETWEEN -1000 AND 1000) AND (y BETWEEN -1000 AND 1000)"
#execution_str = "SELECT * FROM rometaxidata WHERE weekday_num = 0 AND taxi_id = 225"
#execution_str = ("SELECT DISTINCT taxi_id FROM rometaxidata WHERE sim_day_num = %s" % (str(sim_day_num)))
#taxi_ids = pdsql.read_sql_query(execution_str,connection)
"""
bunch of taxi ids on day 3
129
195
106
120
285
8
264
305
318
179
209
276
"""
#taxi_id = 129 #129 #taxi_ids['taxi_id'][0]
#execution_str = ("SELECT unix_ts,lat1,long1 FROM rometaxidata WHERE (taxi_id = %s AND sim_day_num = %s)" % (str(taxi_id),str(sim_day_num)))
connect_str = "dbname='c207rometaxitraces' user='postgres' host='localhost' password='postgres'"
#sim_day_num = 4
connection = psycopg2.connect(connect_str)
#connection = psycopg2.connect(connect_str)
#1. get all taxi trace data for one day.
# List unique values in a DataFrame column
# h/t @makmanalp for the updated syntax!
# Grab DataFrame rows where column has certain values
#valuelist = ['value1', 'value2', 'value3']
#df = df[df.column.isin(valuelist)]
#0-13 completed, start again at 14
for k in range(15,27):
sim_day_num = k
execution_str = ("SELECT taxi_id,unix_ts,lat1,long1 FROM rometaxidata WHERE sim_day_num =%s" % (str(sim_day_num)))
taxidf = pdsql.read_sql_query(execution_str,connection)
taxi_IDs = list(taxidf['taxi_id'].unique())
# for each taxi_id that was working on that day number
for j in range(0,len(taxi_IDs)):
trace_data2match = taxidf[taxidf['taxi_id']==taxi_IDs[j]]
trace_data2match = trace_data2match.drop_duplicates()
if len(trace_data2match)>1:
trace_data2match = trace_data2match.sort_values('unix_ts') #VERY IMPORTANT for osrm. big deal!
#search_radius = np.zeros_like(np.array(taxidf['unix_ts']))+10
#time_stamps = taxidf['unix_ts']
#going back to shitty python wrapper:
#m = 0#50 #between 50-60 there is an error... the timestamps are not monotonically increasing.. need to sort this, jokes.
#n = 1260 #60 #900 #len(gps_subset) #1260
#mpmatched_points = osrm.match(gps_positions[m:n], overview="simplified", timestamps=taxidf['unix_ts'][m:n], radius=None)
#bear in mind... i might need to flip lats/longs order... hmmm....
gps_subset = trace_data2match[['long1','lat1']]
gps_positions = [tuple(x) for x in gps_subset.values]
mpmatched_points = osrm.match(gps_positions, overview="simplified", timestamps=trace_data2match['unix_ts'], radius=None)
nobody_index = []
matched_longitude = []
matched_latitude = []
matched_unix_ts = []
matched_cols = ['taxi_id','day_num','unix_ts','mlatitude','mlongitude']
# loop each outputed point from the mapmatched trace (osrm), add correct timestampts,etc... build pandas dataframe
for i in range(0,len(mpmatched_points['tracepoints'])):
if mpmatched_points['tracepoints'][i] is None:
nobody_index.append(i)
else:
matched_unix_ts.append(taxidf['unix_ts'][i])
matched_longitude.append(mpmatched_points['tracepoints'][i]['location'][1])
matched_latitude.append(mpmatched_points['tracepoints'][i]['location'][0])
matched_taxi_id = np.ones_like(matched_unix_ts)*taxi_IDs[j]
matched_day_num = np.ones_like(matched_taxi_id)*sim_day_num
matched_df = pd.DataFrame(np.column_stack([matched_taxi_id,matched_day_num,matched_unix_ts, matched_longitude, matched_latitude]), columns = matched_cols)
matched_df.taxi_id = matched_df.taxi_id.astype(int)
matched_df.day_num = matched_df.day_num.astype(int)
matched_df.unix_ts = matched_df.unix_ts.astype(int)
if j>0:
entire_day_matched_traces = pd.concat([entire_day_matched_traces, matched_df], axis=0, join='outer', join_axes=None, ignore_index=True,
keys=None, levels=None, names=None, verify_integrity=False,
copy=True)
else:
entire_day_matched_traces = matched_df
else:
fail = j
#at the entire sim day level. K loop
file_name = ("/home/pdawg/Downloads/matched_traces/day_%s.csv" % (str(sim_day_num)))
entire_day_matched_traces.to_csv(file_name,sep=',',index=False)
#con.execute('TRUNCATE matchedta ;')
#df.to_sql('my_table', con, if_exists='append')
#connect_str2 = "dbname='matchedtaxitraces' user='postgres' host='localhost' password='postgres'"
#connection2 = psycopg2.connect(connect_str2)
#execution_str2 = ("TRUNCATE matchedtaxidata;") #really stupid code.
#matched_df.to_sql(execution_str2,connection2)
# code insert to postgres table, but first, set up table....
#could numpy row stack, and write to database at the end of the 'sim_day_num'... would reduce some shiz...
# similarily, I should import a days worth of traces, divide into unique taxi_ids, then iterate!
''' my attempt...
0. will need to sort out matched database table etc....
this might mean care sigfigs etc...
1. load maybe 1GB a time from psql...
2. chunk it up,
per chunk
- psql query (yeah it will be slower, deal with it.... lets get this pig up and running,)
- map match: 1000 points? <-- look at above not regards 'gaps=false?'? maybe need to edit fucking pyosrm shit.
- convert results to
- pandas dataframe, with ['unix_ts','latitude','longitude'] <-- ORDER IS IMPORTANT BE CAREFUL.
- save 'overview=full' json file (although writing this out for everything coul be slooow)
or could I save this to yet another database... nah, easy, just save to disk in another directory
should have IterationNum,sim_day_num_taxi_id
- save pandas dataframe to new matched_db
search_radius = [20,20,20,20,20]
url0 = ['http://localhost:5000/match/v1/driving/']
overview = 'full'
steps='false'
geometry='polyline'
gps_points2match = gps_positions[500:1000]
timestamps = taxidf['unix_ts'][500:1000]
#url1 = [url0,';'.join([','.join([str(coord[0]),str(coord[1])]) for coord in gps_points2match])]
#url2 = [join([url0,';'.join([','.join([str(coord[0]),str(coord[1])]) for coord in gps_points2match])])]
#GPS coords...
url0.append(';'.join([','.join([str(coord[0]),str(coord[1])]) for coord in gps_points2match]))
#radiuses
url0.append(';'.join([','.join([str(radii)]) for radii in search_radius]))
url0.append(';'.join([','.join([str(ts)]) for ts in timestamps]))
#timestamps//
url1 = ''.join([url0[0],url0[1]])
url1 = ''.join([url1,'?overview={}&steps={}&geometries={}'.format(overview,str(steps).lower(), geometry)])
#url1 = '&radiuses='.join([url1,url0[2]]) # bold.
url1 = '×tamps='.join([url1,url0[3]])
url2txt_file = open("url2test.txt","w")
url2txt_file.write(url1)
url2txt_file.close()
#url2txt = np.array(url1
#url_filename = '/home/user/RomeTaxiData/url2test.txt'
#np.savetxt(url_filename,url2txt,fmt=str)
#url = [host, '/match/', url_config.version, '/', url_config.profile, '/',';'.join([','.join([str(coord[0]), str(coord[1])]) for coord in points]),"?overview={}&steps={}&geometries={}".format(overview,str(steps).lower(), geometry)]
'''
#mpmatched_points = osrm.match(gps_positions[0:5], overview="full", timestamps=taxidf['unix_ts'][0:5], radius =[10])
#---- from osrm-python wrapper ----
"""
next steps:
1. edit/start from scratch making new python-osrm map-match function, need to get that url query just right
http://localhost:5000/match/v1/driving/{gps points long1,lat1;... longN,latN}&radiuses={r1;r2;r3...rN}×tamps{ts1;ts2;ts3...tsN}
url = 'http://localhost:5000/match/v1/driving/'
gps_points2match = gps_positions[500:505]
for i in gps_points2match:
url = [url.join(str(coord[0]),str(coord[1]) for coord in gps_points2match]
#-------original.... shit.
points = gps_positions
# host = check_host(url_config.host)
url = [host, '/match/', url_config.version, '/', url_config.profile, '/',';'.join(
[','.join([str(coord[0]), str(coord[1])]) for coord in points]),
"?overview={}&steps={}&geometries={}"
.format(overview, str(steps).lower(), geometry)]
if radius:
url.append(";".join([str(rad) for rad in radius]))
if timestamps:
url.append(";".join([str(timestamp) for timestamp in timestamps]))
r = urlopen("".join(url))
r_json = json.loads(r.read().decode('utf-8'))
for taxi_id in list_taxi_ids:
execution_str = ("SELECT lat1,long1,unix_ts FROM rometaxidata WHERE taxi_id = %s" % (str(taxi_id))
MOAR NOTES
these two seem to work reasonably well, however give slightly different results which may/may not be worrying...
curl "http://router.project-osrm.org/match/v1/driving/12.457089,41.895786;12.457089,41.895786;12.487011,41.893273;12.498969,41.902191;12.501389,41.901612?radiuses=20;20;20;20;20×tamps=1391371881;1391371882;1391372422;1391372747;1391372791"
curl "http://localhost:5000/match/v1/driving/12.457089,41.895786;12.457089,41.895786;12.487011,41.893273;12.498969,41.902191;12.501389,41.901612?radiuses=20;20;20;20;20×tamps=1391371881;1391371882;1391372422;1391372747;1391372791"
--
"""
| 35.495298 | 244 | 0.689658 | 1,650 | 11,323 | 4.586667 | 0.289697 | 0.020613 | 0.017838 | 0.006342 | 0.312236 | 0.282109 | 0.213531 | 0.179572 | 0.173229 | 0.156448 | 0 | 0.061961 | 0.17045 | 11,323 | 318 | 245 | 35.606918 | 0.743745 | 0.360505 | 0 | 0.061224 | 0 | 0 | 0.117188 | 0.032552 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.020408 | 0.122449 | 0 | 0.122449 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
61c780171c158e14c78061e8324344923f5412cc | 668 | py | Python | lgy/algorithm/9251 LCS.py | Einere/boostcamp_study | 63a52253c0ee01354a81dcac6349cc84d738b9ca | [
"MIT"
] | 2 | 2019-06-25T14:18:34.000Z | 2019-11-21T01:19:35.000Z | lgy/algorithm/9251 LCS.py | Einere/boostcamp_study | 63a52253c0ee01354a81dcac6349cc84d738b9ca | [
"MIT"
] | null | null | null | lgy/algorithm/9251 LCS.py | Einere/boostcamp_study | 63a52253c0ee01354a81dcac6349cc84d738b9ca | [
"MIT"
] | 1 | 2019-06-26T05:09:39.000Z | 2019-06-26T05:09:39.000Z | import sys
sys.path.append('.')
from lgy.algorithm.StdIOTestContainer import StdIOTestContainer as T
def main():
v1 = input()
v2 = input()
length = max(len(v1), len(v2))
dp = [[0 for i in range(len(v2) + 1)] for j in range(len(v1) + 1)]
ans = 0
for i in range(1, len(v1) + 1):
for j in range(1, len(v2) + 1):
if v1[i-1:i] == v2[j-1:j]:
dp[i][j] = dp[i-1][j-1] + 1
else:
dp[i][j] = max(dp[i][j-1], dp[i-1][j])
print(dp[len(v1)][len(v2)])
print("ab"[0])
user_input = '''
ACAYKP
CAPCAK
'''
expected = '''
4
'''
T.runningTest(user_input.strip(), expected.lstrip(), main) | 19.085714 | 70 | 0.516467 | 113 | 668 | 3.035398 | 0.345133 | 0.043732 | 0.034985 | 0.058309 | 0.139942 | 0 | 0 | 0 | 0 | 0 | 0 | 0.059917 | 0.275449 | 668 | 35 | 71 | 19.085714 | 0.64876 | 0 | 0 | 0.08 | 0 | 0 | 0.03139 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04 | false | 0 | 0.08 | 0 | 0.12 | 0.08 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
61c87be5497f3881250ad3985df824448fa55d00 | 427 | py | Python | md2html/weixin/pre_processor.py | g0man/md2html | c7f021298556e60984497464c1f523ac2443e868 | [
"MIT"
] | 1 | 2018-08-03T01:25:38.000Z | 2018-08-03T01:25:38.000Z | md2html/weixin/pre_processor.py | g0man/md2html | c7f021298556e60984497464c1f523ac2443e868 | [
"MIT"
] | null | null | null | md2html/weixin/pre_processor.py | g0man/md2html | c7f021298556e60984497464c1f523ac2443e868 | [
"MIT"
] | null | null | null | from markdown.preprocessors import Preprocessor
class CalcReadingTimePreprocessor(Preprocessor):
def __init__(self, cfg, *args, **kwargs):
self.cfg = cfg
super(CalcReadingTimePreprocessor, self).__init__(*args, **kwargs)
def run(self, root):
word_count = len(root)
print("word count: %d" % word_count)
self.cfg['READING_MINUTES'] = int(word_count/700)
return root | 30.5 | 74 | 0.662763 | 47 | 427 | 5.765957 | 0.553191 | 0.132841 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009063 | 0.224824 | 427 | 14 | 75 | 30.5 | 0.809668 | 0 | 0 | 0 | 0 | 0 | 0.067757 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.2 | false | 0 | 0.1 | 0 | 0.5 | 0.1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
61c9624c9f9d3fcfa45f472861b7d2d95733b378 | 19,842 | py | Python | data/scripts/model.py | Honorates/covid19_scenarios | c0d6d1a34d3650e9812744932b9b661ddac57643 | [
"MIT"
] | 3 | 2020-05-23T03:22:08.000Z | 2020-05-23T03:22:16.000Z | data/scripts/model.py | Honorates/covid19_scenarios | c0d6d1a34d3650e9812744932b9b661ddac57643 | [
"MIT"
] | null | null | null | data/scripts/model.py | Honorates/covid19_scenarios | c0d6d1a34d3650e9812744932b9b661ddac57643 | [
"MIT"
] | 1 | 2020-05-25T13:50:23.000Z | 2020-05-25T13:50:23.000Z | import csv
import importlib
import sys
sys.path.append('..')
import os
import json
import argparse
import copy
from enum import IntEnum
from datetime import datetime
import numpy as np
import scipy.integrate as solve
import scipy.optimize as opt
import matplotlib.pylab as plt
from scripts.tsv import parse as parse_tsv
from scripts.R0_estimator import get_Re_guess
from paths import BASE_PATH
# ------------------------------------------------------------------------
# Globals
PATH_UN_AGES = os.path.join(BASE_PATH, "../src/assets/data/ageDistribution.json")
PATH_UN_CODES = os.path.join(BASE_PATH,"country_codes.csv")
PATH_POP_DATA = os.path.join(BASE_PATH,"populationData.tsv")
JAN1_2019 = datetime.strptime("2019-01-01", "%Y-%m-%d").toordinal()
JUN1_2019 = datetime.strptime("2019-06-01", "%Y-%m-%d").toordinal()
JAN1_2020 = datetime.strptime("2020-01-01", "%Y-%m-%d").toordinal()
CASES = importlib.import_module(f"scripts.tsv")
CASE_DATA = CASES.parse()
def load_distribution(path):
dist = {}
with open(path, 'r') as fd:
db = json.load(fd)
for data in db["all"]:
key = data["name"]
ageDis = sorted(data["data"], key=lambda x: x["ageGroup"])
dist[key] = np.array([float(elt["population"]) for elt in ageDis])
dist[key] = dist[key]/np.sum(dist[key])
return dist
def load_country_codes(path):
db = {}
with open(path, 'r') as fd:
rdr = csv.reader(fd)
next(rdr)
for entry in rdr:
db[entry[0]] = entry[2]
return db
def load_population_data(path):
db = {}
with open(path, 'r') as fd:
rdr = csv.reader(fd, delimiter='\t')
next(rdr)
for entry in rdr:
db[entry[0]] = {'size':int(entry[1]), 'ageDistribution':entry[2]}
return db
AGES = load_distribution(PATH_UN_AGES)
POPDATA = load_population_data(PATH_POP_DATA)
CODES = load_country_codes(PATH_UN_CODES)
# ------------------------------------------------------------------------
# Indexing enums
compartments = ['S', 'E1', 'E2', 'E3', 'I', 'H', 'C', 'D', 'R', 'T', 'NUM']
Sub = IntEnum('Sub', compartments, start=0)
groups = ['_0', '_1', '_2', '_3', '_4', '_5', '_6', '_7', '_8', 'NUM']
Age = IntEnum('Age', groups , start=0)
# ------------------------------------------------------------------------
# Organizational classes
class Data(object):
def __str__(self):
return str({k : str(v) for k, v in self.__dict__.items()})
class Rates(Data):
def __init__(self, latency, logR0, infection, hospital, critical, imports, efficacy):
self.latency = latency
self.logR0 = logR0
self.infectivity = np.exp(self.logR0) * infection
self.infection = infection
self.hospital = hospital
self.critical = critical
self.imports = imports
self.efficacy = efficacy
# NOTE: Pulled from default severe table on neherlab.org/covid19
# Keep in sync!
# TODO: Allow custom values?
class Fracs(Data):
confirmed = np.array([5, 5, 10, 15, 20, 25, 30, 40, 50]) / 100
severe = np.array([1, 3, 3, 3, 6, 10, 25, 35, 50]) / 100
severe *= confirmed
critical = np.array([5, 10, 10, 15, 20, 25, 35, 45, 55]) / 100
fatality = np.array([30, 30, 30, 30, 30, 40, 40, 50, 50]) / 100
recovery = 1 - severe
discharge = 1 - critical
stabilize = 1 - fatality
def __init__(self, reported=1/30):
self.reported = reported
class TimeRange(Data):
def __init__(self, day0, start, end, delta=1):
self.day0 = day0
self.start = start
self.end = end
self.delta = delta
class Params(Data):
def __init__(self, ages=None, size=None, date=None, times=None, rates=None, fracs=None):
self.ages = ages
self.rates = rates
self.fracs = fracs
self.size = size
self.time = times
self.date = date
# Make infection function
beta = self.rates.infectivity
self.rates.infectivity = lambda t,date,eff : beta if t<date else beta*(1-eff)
# ------------------------------------------------------------------------
# Default parameters
DefaultRates = Rates(latency=1/3.0, logR0=1.0, infection=1/3.0, hospital=1/3.0, critical=1/14, imports=.1, efficacy=0.5)
RateFields = [ f for f in dir(DefaultRates) \
if not callable(getattr(DefaultRates, f)) \
and not f.startswith("__") ]
RateFields.remove('infectivity')
# ------------------------------------------------------------------------
# Functions
# ------------------------------------------
# Modeling
def make_evolve(params):
# Equations for coupled ODEs
def evolve(t, pop):
pop2d = np.reshape(pop, (Sub.NUM, Age.NUM))
fracI = pop2d[Sub.I, :].sum() / params.size
dpop = np.zeros_like(pop2d)
flux_S = params.rates.infectivity(t, params.date, params.rates.efficacy)*fracI*pop2d[Sub.S] + (params.rates.imports / Sub.NUM)
flux_E1 = params.rates.latency*pop2d[Sub.E1]*3
flux_E2 = params.rates.latency*pop2d[Sub.E2]*3
flux_E3 = params.rates.latency*pop2d[Sub.E3]*3
flux_I_R = params.rates.infection*params.fracs.recovery*pop2d[Sub.I]
flux_I_H = params.rates.infection*params.fracs.severe*pop2d[Sub.I]
flux_H_R = params.rates.hospital*params.fracs.discharge*pop2d[Sub.H]
flux_H_C = params.rates.hospital*params.fracs.critical*pop2d[Sub.H]
flux_C_H = params.rates.critical*params.fracs.stabilize*pop2d[Sub.C]
flux_C_D = params.rates.critical*params.fracs.fatality*pop2d[Sub.C]
# Add fluxes to states
dpop[Sub.S] = -flux_S
dpop[Sub.E1] = +flux_S - flux_E1
dpop[Sub.E2] = +flux_E1 - flux_E2
dpop[Sub.E3] = +flux_E2 - flux_E3
dpop[Sub.I] = +flux_E3 - flux_I_R - flux_I_H
dpop[Sub.H] = +flux_I_H + flux_C_H - flux_H_R - flux_H_C
dpop[Sub.C] = +flux_H_C - flux_C_D - flux_C_H
dpop[Sub.R] = +flux_H_R + flux_I_R
dpop[Sub.D] = +flux_C_D
dpop[Sub.T] = +flux_E3*params.fracs.reported
return np.reshape(dpop, Sub.NUM*Age.NUM)
return evolve
def init_pop(ages, size, cases):
pop = np.zeros((Sub.NUM, Age.NUM))
ages = np.array(ages) / np.sum(ages)
pop[Sub.S, :] = size * ages
pop[Sub.S, :] -= cases*ages
pop[Sub.I, :] += cases*ages*0.3
pop[Sub.E1, :] += cases*ages*0.7/3
pop[Sub.E2, :] += cases*ages*0.7/3
pop[Sub.E3, :] += cases*ages*0.7/3
return pop
def solve_ode(params, init_pop):
t_beg = params.time[0]
num_tp = len(params.time)
evolve = make_evolve(params)
solver = solve.ode(evolve) # TODO: Add Jacobian
solver.set_initial_value(init_pop.flatten(), t_beg)
solution = np.zeros((num_tp, init_pop.shape[0], init_pop.shape[1]))
solution[0, :, :] = init_pop
i = 1
while solver.successful() and i<num_tp:
solution[i, :, :] = np.reshape(solver.integrate(params.time[i]), (Sub.NUM, Age.NUM))
i += 1
return solution
def trace_ages(solution):
return solution.sum(axis=-1)
# ------------------------------------------
# Parameter estimation
def is_cumulative(vec):
return not False in (vec[~vec.mask][:-1]<=vec[~vec.mask][1:])
def poissonNegLogLH(n,lam, eps=0.1):
L = np.abs(lam)
N = np.abs(n)
return (L-N) - N*np.log((L+eps)/(N+eps))
def assess_model(params, data, cases):
sol = solve_ode(params, init_pop(params.ages, params.size, cases))
model = trace_ages(sol)
eps = 1e-2
diff_cases = data[Sub.T][3:] - data[Sub.T][:-3]
diff_cases_model = model[3:, Sub.T] - model[:-3, Sub.T]
case_cost = np.ma.sum(poissonNegLogLH(diff_cases, diff_cases_model, eps))
diff_deaths = data[Sub.D][3:] - data[Sub.D][:-3]
diff_deaths_model = model[3:, Sub.D] - model[:-3, Sub.D]
death_cost = np.ma.sum(poissonNegLogLH(diff_deaths, diff_deaths_model, eps))
hospital_cost = 0
ICU_cost = 0
if data[Sub.H] is not None:
hospital_cost = np.ma.sum(poissonNegLogLH(data[Sub.H], model[:,Sub.H], eps))
if data[Sub.C] is not None:
ICU_cost = np.ma.sum(poissonNegLogLH(data[Sub.C], model[:,Sub.C], eps))
return case_cost + 10*death_cost # + hospital_cost + ICU_cost
# Any parameters given in guess are fit. The remaining are fixed and set by DefaultRates
def fit_params(key, time_points, data, guess, fixed_params=None, bounds=None):
if fixed_params is None:
fixed_params = {}
if key not in POPDATA:
return (Params(ages=None, size=None, date=None, times=None, rates=DefaultRates, fracs=Fracs()),
10, (False, "Not within population database"))
params_to_fit = {key : i for i, key in enumerate(guess.keys())}
def pack(x, as_list=False):
data = [x[key] for key in params_to_fit.keys()]
if not as_list:
return np.array(data)
return data
def unpack(x):
vals = {}
for f in RateFields:
if f in guess:
vals[f] = x[params_to_fit[f]]
elif f in fixed_params:
vals[f] = fixed_params[f]
else:
vals[f] = getattr(DefaultRates, f)
return Rates(**vals), Fracs(x[params_to_fit['reported']]) if 'reported' in params_to_fit else Fracs()
def fit(x):
# TODO(nnoll): Need a better default here!
if POPDATA[key]["ageDistribution"] in AGES:
ages = AGES[POPDATA[key]["ageDistribution"]]
else:
ages = AGES["Switzerland"]
rates, fracs = unpack(x)
param = Params(ages=AGES[POPDATA[key]["ageDistribution"]], size=POPDATA[key]["size"],
date=fixed_params.get('containment_start', None), times=time_points, rates=rates, fracs=fracs)
return assess_model(param, data, np.exp(x[params_to_fit['logInitial']]))
if bounds is None:
fit_param = opt.minimize(fit, pack(guess), method='Nelder-Mead')
else:
fit_param = opt.minimize(fit, pack(guess), method='L-BFGS-B', bounds=bounds)
err = (fit_param.success, fit_param.message)
print(key, fit_param.x)
if POPDATA[key]["ageDistribution"] in AGES:
ages = AGES[POPDATA[key]["ageDistribution"]]
else:
ages = AGES["Switzerland"]
rates, fracs = unpack(fit_param.x)
return (Params(ages=AGES[POPDATA[key]["ageDistribution"]], size=POPDATA[key]["size"],
date=fixed_params.get('containment_start', None), times=time_points, rates=rates, fracs=fracs),
np.exp(fit_param.x[params_to_fit['logInitial']]), err)
# ------------------------------------------
# Data loading
def load_data(key, ts):
if key in POPDATA:
popsize = POPDATA[key]["size"]
else:
popsize = 1e6
case_min = 20
data = [[] if (i == Sub.D or i == Sub.T or i == Sub.H or i == Sub.C) else None for i in range(Sub.NUM)]
days = []
for tp in ts: #replace all zeros by np.nan
data[Sub.T].append(tp['cases'] or np.nan)
data[Sub.H].append(tp['hospitalized'] or np.nan)
data[Sub.D].append(tp['deaths'] or np.nan)
data[Sub.C].append(tp['icu'] or np.nan)
data = [ np.ma.array(d) if d is not None else d for d in data]
good_idx = np.array(np.logical_or(case_min <= data[Sub.T], case_min <= data[Sub.D]))
for ii in [Sub.D, Sub.T, Sub.H, Sub.C]:
data[ii] = data[ii][good_idx]
data[ii].mask = np.isnan(data[ii])
if False not in data[ii].mask:
data[ii] = None
days = np.array([datetime.strptime(d['time'].split('T')[0], "%Y-%m-%d").toordinal() for d in ts])
return days[good_idx], data
def get_fit_data(days, data_original, end_discard=3):
"""
Select the relevant part of the data for the fitting procedure. The early datapoints where there is less
than 20 cases are removed. The last 3 days are also removed (due to latency of reporting)
"""
data = copy.deepcopy(data_original)
case_min = 20
day0 = days[case_min <= data[Sub.T]][0]
# Filter points
good_idx = np.bitwise_and(days >= day0, days < days[-1] - end_discard)
for idx in [Sub.D, Sub.T, Sub.H, Sub.C]:
if data[idx] is None:
data[idx] = np.ma.array([np.nan])
data[idx].mask = np.isnan(data[idx])
else:
data[idx] = np.ma.array(np.concatenate([[np.nan], data[idx][good_idx]]))
data[idx].mask = np.isnan(data[idx])
for ii in [Sub.T, Sub.D, Sub.H, Sub.C]: # remove data if whole array is masked
if False not in data[ii].mask:
data[ii] = None
# start the model 3 weeks prior.
time = np.concatenate(([day0-14], days[good_idx]))
return time, data
def fit_population_iterative(key, time_points, data, guess=None, second_fit=False, FRA=False):
"""
Iterative fitting procedure. First, R_effective is estimated from the data and fitted using a stair
function to deduce R0, the containment start date and the efficacy of the containement. Secondly, these
parameters are used to optimize the reported fraction and the initial number of infected people using the
fit_params function.
"""
if data is None or data[Sub.D] is None or len(data[Sub.D]) <= 14:
return None
res = get_Re_guess(time_points, data, only_deaths=FRA)
fit = res['fit']
if fit is None or fit[0]<1 or fit[0]>6 or fit[1]>fit[0] or fit[1]<0:
return None
fixed_params = {}
fixed_params['logR0'] = np.log(fit[0])
fixed_params['efficacy'] = 1-fit[1]/fit[0]
fixed_params['containment_start'] = fit[2]
if guess is None:
guess = { "reported" : 0.1,
"logInitial" : 1,
}
bounds=None
for ii in [Sub.T, Sub.D]:
if not is_cumulative(data[ii]):
print("Cases / deaths count is not cumulative.", data[ii])
t1 = datetime.now().timestamp()
param, init_cases, err = fit_params(key, time_points, data, guess, fixed_params, bounds=bounds)
t2 = datetime.now().timestamp()
print(round(t2 - t1,2), fixed_params)
if second_fit:
guess = { "reported" : param.fracs.reported,
"logInitial" : np.log(init_cases),
"logR0": param.rates.logR0,
"efficacy": param.rates.efficacy
}
param, init_cases, err = fit_params(key, time_points, data, guess,
{'containment_start':fixed_params['containment_start']}, bounds=None)
t3 = datetime.now().timestamp()
print(round(t3 - t2, 2), fixed_params)
tMin = datetime.strftime(datetime.fromordinal(time_points[0]), '%Y-%m-%d')
res = {'params': param, 'initialCases': init_cases, 'tMin': tMin, 'data': data, 'error':err}
if param.date is not None:
res['containment_start'] = datetime.fromordinal(int(param.date)).strftime('%Y-%m-%d')
return res
def fit_population(key, time_points, data, containment_start=None, guess=None):
if data is None or data[Sub.D] is None or len(data[Sub.D]) <= 5:
return None
if guess is None:
guess = { "logR0": 1.0,
"reported" : 0.2,
"logInitial" : 1,
"efficacy" : 0.8
}
# bounds = ((0.4,2),(0.01,0.8),(1,None),(0,1))
bounds=None
for ii in [Sub.T, Sub.D]:
if not is_cumulative(data[ii]):
print("Cases / deaths count is not cumulative.", data[ii])
param, init_cases, err = fit_params(key, time_points, data, guess,
{'containment_start':containment_start}, bounds=bounds)
tMin = datetime.strftime(datetime.fromordinal(time_points[0]), '%Y-%m-%d')
res = {'params': param, 'initialCases': init_cases, 'tMin': tMin, 'data': data, 'error':err}
if param.date is not None:
res['containment_start'] = datetime.fromordinal(param.date).strftime('%Y-%m-%d')
return res
# ------------------------------------------------------------------------
# Testing entry
def fit_error(data, model):
err = [[] if (i == Sub.D or i == Sub.T or i == Sub.H or i == Sub.C) else None for i in range(Sub.NUM)]
eps = 1e-2
for idx in [Sub.T, Sub.D, Sub.H, Sub.C]:
if data[idx] is not None:
err[idx] = poissonNegLogLH(data[idx], model[:,idx], eps)
return err
if __name__ == "__main__":
parser = argparse.ArgumentParser(description = "",
usage="fit data")
parser.add_argument('--key', type=str, help="key for region, e.g 'USA-California'")
args = parser.parse_args()
# NOTE: For debugging purposes only
# rates = DefaultRates
# fracs = Fracs()
# times = TimeRange(0, 100)
# param = Params(AGES[COUNTRY], POPDATA[make_key(COUNTRY, REGION)], times, rates, fracs)
# model = trace_ages(solve_ode(param, init_pop(param.ages, param.size, 1)))
key = args.key or "USA-New York"
# key = "CHE-Basel-Stadt"
# key = "DEU-Berlin"
# Raw data and time points
time, data = load_data(key, CASE_DATA[key])
model_tps, fit_data = get_fit_data(time, data)
# Fitting over the pre-confinement days
res = fit_population_iterative(key, model_tps, fit_data, FRA=False)
model = trace_ages(solve_ode(res['params'], init_pop(res['params'].ages, res['params'].size, res['initialCases'])))
err = fit_error(fit_data, model)
time -= res['params'].time[0]
tp = res['params'].time - res['params'].time[0]
# plt.figure()
# plt.title(f"{key}")
# plt.plot(time, data[Sub.T], 'o', color='#a9a9a9', label="cases")
# plt.plot(tp, model[:,Sub.T], color="#a9a9a9", label="predicted cases")
#
# plt.plot(time, data[Sub.D], 'o', color="#cab2d6", label="deaths")
# plt.plot(tp, model[:,Sub.D], color="#cab2d6", label="predicated deaths")
#
# plt.plot(time, data[Sub.H], 'o', color="#fb9a98", label="Hospitalized")
# plt.plot(tp, model[:,Sub.H], color="#fb9a98", label="Predicted hospitalized")
#
# plt.plot(time, data[Sub.C], 'o', color="#e31a1c", label="ICU")
# plt.plot(tp, model[:,Sub.C], color="#e31a1c", label="Predicted ICU")
#
# plt.plot(tp, model[:,Sub.I], color="#fdbe6e", label="infected")
# plt.plot(tp, model[:,Sub.R], color="#36a130", label="recovered")
#
#
# plt.xlabel("Time [days]")
# plt.ylabel("Number of people")
# plt.legend(loc="best")
# plt.tight_layout()
# # plt.yscale('log')
# # plt.ylim([-100,1000])
# plt.savefig("Basel-Stadt", format="png")
# plt.show()
plt.figure()
plt.title(f"{key}")
plt.plot(time, data[Sub.T], 'o', color='#a9a9a9', label="cases")
plt.plot(tp, model[:,Sub.T], color="#a9a9a9", label="predicted cases")
plt.plot(tp, err[Sub.T], '--', color="#a9a9a9", label="cases error")
plt.plot(time, data[Sub.D], 'o', color="#cab2d6", label="deaths")
plt.plot(tp, model[:,Sub.D], color="#cab2d6")
plt.plot(tp, err[Sub.D], '--', color="#cab2d6")
if data[Sub.H] is not None:
plt.plot(time, data[Sub.H], 'o', color="#fb9a98", label="Hospitalized")
plt.plot(tp, model[:,Sub.H], color="#fb9a98")
plt.plot(tp, err[Sub.H], '--', color="#fb9a98")
if data[Sub.C] is not None:
plt.plot(time, data[Sub.C], 'o', color="#e31a1c", label="ICU")
plt.plot(tp, model[:,Sub.C], color="#e31a1c")
plt.plot(tp, err[Sub.C], '--', color="#e31a1c")
plt.plot(tp, model[:,Sub.I], color="#fdbe6e", label="infected")
plt.plot(tp, model[:,Sub.R], color="#36a130", label="recovered")
plt.xlabel("Time [days]")
plt.ylabel("Number of people")
plt.legend(loc="best")
plt.tight_layout()
plt.yscale("log")
# plt.savefig(f"{key}-Poisson_max_likelihood", format="png")
plt.show()
| 35.945652 | 136 | 0.590969 | 2,849 | 19,842 | 4.010179 | 0.151632 | 0.017768 | 0.012604 | 0.014705 | 0.358775 | 0.289628 | 0.276674 | 0.255755 | 0.2407 | 0.218206 | 0 | 0.025957 | 0.227245 | 19,842 | 551 | 137 | 36.010889 | 0.719168 | 0.154218 | 0 | 0.175824 | 0 | 0 | 0.074276 | 0.002342 | 0 | 0 | 0 | 0.001815 | 0 | 1 | 0.068681 | false | 0 | 0.057692 | 0.008242 | 0.230769 | 0.013736 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
61c99cc2811581dd1cdf3ced3f55a324a3a575c9 | 3,396 | py | Python | mxfold2/fold/fold.py | n-mikamo/mxfold2 | 8c195c77f824bdd5899d3d01d6a096de95cd0e9b | [
"MIT"
] | 46 | 2020-09-17T04:50:22.000Z | 2022-03-22T08:14:15.000Z | mxfold2/fold/fold.py | n-mikamo/mxfold2 | 8c195c77f824bdd5899d3d01d6a096de95cd0e9b | [
"MIT"
] | 7 | 2021-02-09T10:09:03.000Z | 2022-01-14T21:19:02.000Z | mxfold2/fold/fold.py | n-mikamo/mxfold2 | 8c195c77f824bdd5899d3d01d6a096de95cd0e9b | [
"MIT"
] | 20 | 2020-10-15T09:03:59.000Z | 2022-03-09T07:16:20.000Z | import torch
import torch.nn as nn
import torch.nn.functional as F
class AbstractFold(nn.Module):
def __init__(self, predict, partfunc):
super(AbstractFold, self).__init__()
self.predict = predict
self.partfunc = partfunc
def clear_count(self, param):
param_count = {}
for n, p in param.items():
if n.startswith("score_"):
param_count["count_"+n[6:]] = torch.zeros_like(p)
param.update(param_count)
return param
def calculate_differentiable_score(self, v, param, count):
s = 0
for n, p in param.items():
if n.startswith("score_"):
s += torch.sum(p * count["count_"+n[6:]].to(p.device))
s += v - s.item()
return s
def forward(self, seq, return_param=False, param=None, return_partfunc=False,
max_internal_length=30, max_helix_length=30, constraint=None, reference=None,
loss_pos_paired=0.0, loss_neg_paired=0.0, loss_pos_unpaired=0.0, loss_neg_unpaired=0.0):
param = self.make_param(seq) if param is None else param # reuse param or not
ss = []
preds = []
pairs = []
pfs = []
bpps = []
for i in range(len(seq)):
param_on_cpu = { k: v.to("cpu") for k, v in param[i].items() }
param_on_cpu = self.clear_count(param_on_cpu)
with torch.no_grad():
v, pred, pair = self.predict(seq[i], param_on_cpu,
max_internal_length=max_internal_length if max_internal_length is not None else len(seq[i]),
max_helix_length=max_helix_length,
constraint=constraint[i].tolist() if constraint is not None else None,
reference=reference[i].tolist() if reference is not None else None,
loss_pos_paired=loss_pos_paired, loss_neg_paired=loss_neg_paired,
loss_pos_unpaired=loss_pos_unpaired, loss_neg_unpaired=loss_neg_unpaired)
if return_partfunc:
pf, bpp = self.partfunc(seq[i], param_on_cpu,
max_internal_length=max_internal_length if max_internal_length is not None else len(seq[i]),
max_helix_length=max_helix_length,
constraint=constraint[i].tolist() if constraint is not None else None,
reference=reference[i].tolist() if reference is not None else None,
loss_pos_paired=loss_pos_paired, loss_neg_paired=loss_neg_paired,
loss_pos_unpaired=loss_pos_unpaired, loss_neg_unpaired=loss_neg_unpaired)
pfs.append(pf)
bpps.append(bpp)
if torch.is_grad_enabled():
v = self.calculate_differentiable_score(v, param[i], param_on_cpu)
ss.append(v)
preds.append(pred)
pairs.append(pair)
device = next(iter(param[0].values())).device
ss = torch.stack(ss) if torch.is_grad_enabled() else torch.tensor(ss, device=device)
if return_param:
return ss, preds, pairs, param
elif return_partfunc:
return ss, preds, pairs, pfs, bpps
else:
return ss, preds, pairs | 46.520548 | 124 | 0.575677 | 433 | 3,396 | 4.263279 | 0.205543 | 0.03792 | 0.064464 | 0.042254 | 0.444204 | 0.40195 | 0.40195 | 0.40195 | 0.40195 | 0.40195 | 0 | 0.007048 | 0.331567 | 3,396 | 73 | 125 | 46.520548 | 0.806167 | 0.0053 | 0 | 0.246154 | 0 | 0 | 0.007995 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.061538 | false | 0 | 0.046154 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4edee1ef5f03774068d2d400a1fd45f888b489f3 | 3,651 | py | Python | test/connectivity/acts/tests/google/ble/api/GattApiTest.py | Keneral/atools | 055e76621340c7dced125e9de56e2645b5e1cdfb | [
"Unlicense"
] | null | null | null | test/connectivity/acts/tests/google/ble/api/GattApiTest.py | Keneral/atools | 055e76621340c7dced125e9de56e2645b5e1cdfb | [
"Unlicense"
] | null | null | null | test/connectivity/acts/tests/google/ble/api/GattApiTest.py | Keneral/atools | 055e76621340c7dced125e9de56e2645b5e1cdfb | [
"Unlicense"
] | 1 | 2018-02-24T19:13:01.000Z | 2018-02-24T19:13:01.000Z | #/usr/bin/env python3.4
#
# Copyright (C) 2016 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""
Test script to exercise Gatt Apis.
"""
from acts.controllers.android import SL4AAPIError
from acts.test_utils.bt.BluetoothBaseTest import BluetoothBaseTest
from acts.test_utils.bt.bt_test_utils import log_energy_info
from acts.test_utils.bt.bt_test_utils import setup_multiple_devices_for_bt_test
class GattApiTest(BluetoothBaseTest):
def __init__(self, controllers):
BluetoothBaseTest.__init__(self, controllers)
self.ad = self.android_devices[0]
def setup_class(self):
return setup_multiple_devices_for_bt_test(self.android_devices)
def setup_test(self):
self.log.debug(log_energy_info(self.android_devices, "Start"))
for a in self.android_devices:
a.ed.clear_all_events()
return True
def teardown_test(self):
self.log.debug(log_energy_info(self.android_devices, "End"))
return True
@BluetoothBaseTest.bt_test_wrap
def test_open_gatt_server(self):
"""Test a gatt server.
Test opening a gatt server.
Steps:
1. Create a gatt server callback.
2. Open the gatt server.
Expected Result:
Api to open gatt server should not fail.
Returns:
Pass if True
Fail if False
TAGS: LE, GATT
Priority: 1
"""
gatt_server_cb = self.ad.droid.gattServerCreateGattServerCallback()
self.ad.droid.gattServerOpenGattServer(gatt_server_cb)
return True
@BluetoothBaseTest.bt_test_wrap
def test_open_gatt_server_on_same_callback(self):
"""Test repetitive opening of a gatt server.
Test opening a gatt server on the same callback twice in a row.
Steps:
1. Create a gatt server callback.
2. Open the gatt server.
3. Open the gatt server on the same callback as step 2.
Expected Result:
Api to open gatt server should not fail.
Returns:
Pass if True
Fail if False
TAGS: LE, GATT
Priority: 2
"""
gatt_server_cb = self.ad.droid.gattServerCreateGattServerCallback()
self.ad.droid.gattServerOpenGattServer(gatt_server_cb)
self.ad.droid.gattServerOpenGattServer(gatt_server_cb)
return True
@BluetoothBaseTest.bt_test_wrap
def test_open_gatt_server_on_invalid_callback(self):
"""Test gatt server an an invalid callback.
Test opening a gatt server with an invalid callback.
Steps:
1. Open a gatt server with the gall callback set to -1.
Expected Result:
Api should fail to open a gatt server.
Returns:
Pass if True
Fail if False
TAGS: LE, GATT
Priority: 2
"""
invalid_callback_index = -1
try:
self.ad.droid.gattServerOpenGattServer(invalid_callback_index)
except SL4AAPIError as e:
self.log.info("Failed successfully with exception: {}.".format(e))
return True
return False
| 30.173554 | 79 | 0.67461 | 484 | 3,651 | 4.931818 | 0.299587 | 0.096355 | 0.041475 | 0.058651 | 0.480101 | 0.462924 | 0.420193 | 0.420193 | 0.392543 | 0.36238 | 0 | 0.009252 | 0.259929 | 3,651 | 120 | 80 | 30.425 | 0.874167 | 0.414407 | 0 | 0.342105 | 0 | 0 | 0.025967 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.184211 | false | 0 | 0.105263 | 0.026316 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4ee166755be43836f7c47c4a1c08fb0f9fe27932 | 663 | py | Python | url/forms.py | mad-skull/URL-Shortener | 09fdf179a2ae0f0f5bc9309e53c0aebf352a9b02 | [
"MIT"
] | null | null | null | url/forms.py | mad-skull/URL-Shortener | 09fdf179a2ae0f0f5bc9309e53c0aebf352a9b02 | [
"MIT"
] | null | null | null | url/forms.py | mad-skull/URL-Shortener | 09fdf179a2ae0f0f5bc9309e53c0aebf352a9b02 | [
"MIT"
] | null | null | null | from flask_wtf import FlaskForm # , RecaptchaField
from wtforms import validators, StringField
from wtforms.validators import Length
# from wtforms.fields.html5 import URLField
class UrlForm(FlaskForm):
old = StringField('Title', [
validators.InputRequired(),
validators.Length(
min=4, max=2027, message="If URL\'s were that short, would you even be here?")
])
# recaptcha = RecaptchaField()
def save_url(self, url):
self.populate_obj(url)
if not "http" in url.old:
url.old = "https://" + url.old
if not "." in url.old:
url.old = url.old + ".com/"
return url
| 30.136364 | 90 | 0.624434 | 82 | 663 | 5.012195 | 0.560976 | 0.087591 | 0.065693 | 0.087591 | 0.068127 | 0 | 0 | 0 | 0 | 0 | 0 | 0.01232 | 0.26546 | 663 | 21 | 91 | 31.571429 | 0.831622 | 0.131222 | 0 | 0 | 0 | 0 | 0.127622 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0.1875 | 0 | 0.4375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4ee1ab036fce9ec6e8e4a17a9395246651c2444f | 645 | py | Python | Leetcode/0051-0100/0053-maximum-subarray.py | harshbhandari7/Data-Structures-and-Algorithms | 0ce0a5bc64d112ff38ae0de51d19ce3751b70eca | [
"MIT"
] | null | null | null | Leetcode/0051-0100/0053-maximum-subarray.py | harshbhandari7/Data-Structures-and-Algorithms | 0ce0a5bc64d112ff38ae0de51d19ce3751b70eca | [
"MIT"
] | null | null | null | Leetcode/0051-0100/0053-maximum-subarray.py | harshbhandari7/Data-Structures-and-Algorithms | 0ce0a5bc64d112ff38ae0de51d19ce3751b70eca | [
"MIT"
] | 1 | 2019-10-06T15:46:14.000Z | 2019-10-06T15:46:14.000Z | '''
Author : MiKueen
Level : Easy
Problem Statement : Maximum Subarray
Given an integer array nums, find the contiguous subarray (containing at least one number) which has the largest sum and return its sum.
Example:
Input: [-2,1,-3,4,-1,2,1,-5,4],
Output: 6
Explanation: [4,-1,2,1] has the largest sum = 6.
'''
class Solution:
def maxSubArray(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
max_sum = curr = nums[0]
for i in range(1, len(nums)):
curr = max(nums[i], curr + nums[i])
max_sum = max(max_sum, curr)
return max_sum
| 25.8 | 136 | 0.575194 | 94 | 645 | 3.904255 | 0.585106 | 0.065395 | 0.070845 | 0.087193 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.037694 | 0.300775 | 645 | 25 | 137 | 25.8 | 0.776053 | 0.525581 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0 | 0 | 0.428571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4ee4c9bda6034b23325fab78be4485408477098a | 1,727 | py | Python | faker_extras/biology.py | nyimbi/faker_extras | 37967d9101af217f7671fc2ad071b49258711c29 | [
"MIT"
] | null | null | null | faker_extras/biology.py | nyimbi/faker_extras | 37967d9101af217f7671fc2ad071b49258711c29 | [
"MIT"
] | null | null | null | faker_extras/biology.py | nyimbi/faker_extras | 37967d9101af217f7671fc2ad071b49258711c29 | [
"MIT"
] | 1 | 2019-05-23T16:02:45.000Z | 2019-05-23T16:02:45.000Z | """Faker data providers for biological data."""
from random import choice
from faker.providers import BaseProvider
from . import utils
class GeneticProvider(BaseProvider):
"""Genomic data provider.
Acid data source:
http://www.cryst.bbk.ac.uk/education/AminoAcid/the_twenty.html
"""
acids = {
'alanine': 'ala',
'arginine': 'arg',
'asparagine': 'asn',
'aspartic acid': 'asp',
'cysteine': 'cys',
'glutamine': 'gln',
'glutamic acid': 'glu',
'glycine': 'gly',
'histidine': 'his',
'isoleucine': 'ile',
'leucine': 'leu',
'lysine': 'lys',
'methionine': 'met',
'phenylalanine': 'phe',
'proline': 'pro',
'serine': 'ser',
'threonine': 'thr',
'tryptophan': 'trp',
'tyrosine': 'tyr',
'valine': 'val',
}
def amino_acid_group(self):
"""Return an amino acid group."""
return choice([
'Aliphatic',
'Aromatic',
'Acidic',
'Basic',
'Hydroxylic',
'Sulphur-containing',
'Amidic',
])
def amino_acid(self, symbol=True):
"""Return a random amino symbol or acid."""
if symbol:
vals = self.acids.keys()
return choice(vals)
return choice(self.acids.keys())
def rna(self):
"""Return some RNA sequence.
>>> rna()
>>> AAACUAGCUG
"""
return utils._choice_str(['U', 'C', 'G', 'A'], 10)
def dna(self):
"""Return some DNA sequence.
>>> dna()
>>> CTATAGAGCT
"""
return utils._choice_str(['T', 'C', 'G', 'A'], 10)
| 23.337838 | 66 | 0.491025 | 164 | 1,727 | 5.121951 | 0.621951 | 0.032143 | 0.028571 | 0.047619 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003527 | 0.34337 | 1,727 | 73 | 67 | 23.657534 | 0.737213 | 0.182976 | 0 | 0 | 0 | 0 | 0.232346 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.088889 | false | 0 | 0.066667 | 0 | 0.311111 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4eea6491efcd84bda94636814b33bd0e8caf7e12 | 351 | py | Python | check_file.py | aideyisu/english_study | 87f188655f9858dff32ea3f0dc5e86bc79c267ad | [
"MIT"
] | 1 | 2021-08-14T13:42:15.000Z | 2021-08-14T13:42:15.000Z | check_file.py | aideyisu/english_study | 87f188655f9858dff32ea3f0dc5e86bc79c267ad | [
"MIT"
] | null | null | null | check_file.py | aideyisu/english_study | 87f188655f9858dff32ea3f0dc5e86bc79c267ad | [
"MIT"
] | null | null | null | '''
检查系统内文件是否完备
'''
from pathlib import Path
import os
basepath = os.path.dirname(__file__) # 当前文件所在路径
file_list = []
for file in file_list:
my_file = Path(f'{basepath}/analysis_result/asd')
if not my_file.exists():
# 检测是否存在id路径不存在
os.makedirs(my_file) # 只能创建单级目录 =.=对这个用法表示怀疑
print(f'路径不存在 {my_file} 创建路径') | 18.473684 | 53 | 0.652422 | 46 | 351 | 4.73913 | 0.608696 | 0.110092 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.225071 | 351 | 19 | 54 | 18.473684 | 0.801471 | 0.162393 | 0 | 0 | 0 | 0 | 0.179211 | 0.107527 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.222222 | 0 | 0.222222 | 0.111111 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4eeb090fcce31a4d12b4e9146d28f91e0bc82c1a | 7,983 | py | Python | neutron/tests/unit/plugins/wrs/test_extension_host.py | ericho/stx-neutron | d4a8ad548c4afed73269575c48526a704dd09a9c | [
"Apache-2.0"
] | 4 | 2018-08-05T00:43:03.000Z | 2021-10-13T00:45:45.000Z | neutron/tests/unit/plugins/wrs/test_extension_host.py | ericho/stx-neutron | d4a8ad548c4afed73269575c48526a704dd09a9c | [
"Apache-2.0"
] | 8 | 2018-06-14T14:50:16.000Z | 2018-11-13T16:30:42.000Z | neutron/tests/unit/plugins/wrs/test_extension_host.py | ericho/stx-neutron | d4a8ad548c4afed73269575c48526a704dd09a9c | [
"Apache-2.0"
] | 7 | 2018-06-12T18:57:04.000Z | 2019-05-09T15:42:30.000Z | # Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Copyright (c) 2013-2014 Wind River Systems, Inc.
#
import contextlib
import copy
import six
import webob.exc
from neutron_lib.utils import helpers as lib_helpers
from oslo_log import log as logging
from neutron.common import constants
from neutron.tests.common import helpers
from neutron.tests.unit.plugins.wrs import test_wrs_plugin
LOG = logging.getLogger(__name__)
HOST1 = {'name': 'compute-0',
'id': '065aa1d1-84ed-4d59-a777-16b0ea8a5640',
'availability': constants.HOST_UP}
HOST2 = {'name': 'compute-1',
'id': '31df579d-d9ea-4623-a5a6-1bb0ccad22ef',
'availability': constants.HOST_DOWN}
class HostTestCaseMixin(object):
def _update_host(self, id, body):
data = {'host': body}
request = self.new_update_request('hosts', data, id)
response = request.get_response(self.ext_api)
return self.deserialize(self.fmt, response)
def _bind_interface(self, id, body):
data = {'interface': body}
request = self.new_action_request('hosts', data, id,
'bind_interface')
return request.get_response(self.ext_api)
def _unbind_interface(self, id, body):
data = {'interface': body}
request = self.new_action_request('hosts', data, id,
'unbind_interface')
return request.get_response(self.ext_api)
def _create_host(self, host):
data = {'host': {'name': host['name'],
'tenant_id': self._tenant_id}}
for arg in ('id', 'availability'):
data['host'][arg] = host[arg]
request = self.new_create_request('hosts', data)
return request.get_response(self.ext_api)
def _make_host(self, host):
response = self._create_host(host)
if response.status_int >= 400:
raise webob.exc.HTTPClientError(code=response.status_int)
return self.deserialize(self.fmt, response)
def _make_interface(self, id, interface):
response = self._bind_interface(id, interface)
if response.status_int >= 400:
raise webob.exc.HTTPClientError(code=response.status_int)
return self.deserialize(self.fmt, response)
def _delete_interface(self, id, interface):
response = self._unbind_interface(id, interface)
if response.status_int >= 400:
raise webob.exc.HTTPClientError(code=response.status_int)
return self.deserialize(self.fmt, response)
@contextlib.contextmanager
def host(self, host, no_delete=False):
host = self._make_host(host)
try:
yield host
finally:
if not no_delete:
self._delete('hosts', host['host']['id'])
def _create_test_interfaces(self, interfaces):
self._interfaces = copy.deepcopy(interfaces)
for name, host in six.iteritems(self._hosts):
interface = self._interfaces.get(host['name'])
if not interface:
continue
# Add to "sysinv" first
self._host_driver.add_interface(host['name'], interface)
# Then, add to the plugin
self._make_interface(host['id'], interface)
def _delete_test_interfaces(self):
for name, host in six.iteritems(self._hosts):
interface = self._interfaces.get(host['name'])
if not interface:
continue
self._delete_interface(host['id'], interface)
def _create_test_hosts(self, hosts):
for host in hosts:
data = self._make_host(host)
self._hosts[host['name']] = data['host']
self._host_driver.add_host(data['host'])
def _delete_test_hosts(self):
for name, host in six.iteritems(self._hosts):
self._delete('hosts', host['id'])
self._hosts = []
def _get_pnet(self, name):
return self._pnets.get(name, None)
def _create_test_providernets(self, pnets, pnet_ranges):
for pnet in pnets:
data = self._make_pnet(pnet)
self._pnets[pnet['name']] = data['providernet']
# create segmentation ranges for each provider network
pnet_ranges.setdefault(pnet['name'], [])
for pnet_range in pnet_ranges[pnet['name']]:
data = self._make_pnet_range(data['providernet'], pnet_range)
self._pnet_ranges[pnet_range['name']] = data
def _delete_test_providernets(self):
for name, pnet in six.iteritems(self._pnets):
self._delete('wrs-provider/providernets', pnet['id'])
self._pnets = []
def _register_avs_agent(self, host=None, mappings=None):
agent = helpers._get_l2_agent_dict(
host, constants.AGENT_TYPE_WRS_VSWITCH,
'neutron-avs-agent')
agent['configurations']['mappings'] = mappings
return helpers._register_agent(agent, self._plugin)
def _create_l2_agents(self):
for name, host in six.iteritems(self._hosts):
iface = self._interfaces[name]
mappings = ['%s:%s' % (p, iface['uuid'])
for p in iface['providernets'].split(',')]
mappings_dict = lib_helpers.parse_mappings(mappings,
unique_values=False)
self._register_avs_agent(
host=name, mappings=mappings_dict)
def _update_host_states(self):
for name, host in six.iteritems(self._hosts):
updates = {'availability': constants.HOST_UP}
data = self._update_host(host['id'], updates)
self._hosts[name] = data['host']
def _prepare_test_dependencies(self, hosts, providernets,
providernet_ranges, interfaces):
self._create_test_hosts(hosts)
self._create_test_providernets(providernets, providernet_ranges)
self._create_test_interfaces(interfaces)
self._create_l2_agents()
self._update_host_states()
def _cleanup_test_dependencies(self):
self._delete_test_interfaces()
self._delete_test_hosts()
self._delete_test_providernets()
class HostTestCase(HostTestCaseMixin,
test_wrs_plugin.WrsMl2PluginV2TestCase):
def setUp(self, plugin=None, ext_mgr=None):
self.host1 = HOST1
self.host2 = HOST2
super(HostTestCase, self).setUp()
def tearDown(self):
super(HostTestCase, self).tearDown()
def test_create_host(self):
with self.host(self.host1) as host:
self.assertEqual(host['host']['name'], self.host1['name'])
self.assertIsNotNone(host['host']['id'])
def test_update_host(self):
with self.host(self.host1) as host:
self.assertEqual(host['host']['availability'],
constants.HOST_UP)
data = {'host': {'availability': constants.HOST_DOWN}}
request = self.new_update_request('hosts', data,
host['host']['id'])
response = request.get_response(self.ext_api)
self.assertEqual(response.status_int, 200)
body = self.deserialize(self.fmt, response)
self.assertEqual(body['host']['availability'],
constants.HOST_DOWN)
| 38.014286 | 78 | 0.621571 | 925 | 7,983 | 5.144865 | 0.220541 | 0.021853 | 0.025005 | 0.022694 | 0.321286 | 0.276739 | 0.26161 | 0.238285 | 0.214541 | 0.170414 | 0 | 0.01392 | 0.271076 | 7,983 | 209 | 79 | 38.196172 | 0.803918 | 0.09182 | 0 | 0.207792 | 0 | 0 | 0.06971 | 0.013416 | 0 | 0 | 0 | 0 | 0.032468 | 1 | 0.155844 | false | 0 | 0.058442 | 0.006494 | 0.285714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4eeb19e644b70e38ac9382dd07fad932a634f7b9 | 1,792 | py | Python | project4/task1_ground_truth.py | yushanweng/github_projects | e2263a04a37291b767014239c2c23fa25a0811bd | [
"MIT"
] | null | null | null | project4/task1_ground_truth.py | yushanweng/github_projects | e2263a04a37291b767014239c2c23fa25a0811bd | [
"MIT"
] | 6 | 2020-05-18T05:02:09.000Z | 2022-02-27T05:41:49.000Z | project4/task1_ground_truth.py | yushanweng/projects | e2263a04a37291b767014239c2c23fa25a0811bd | [
"MIT"
] | null | null | null | from pyspark import SparkContext
import os
import re
import json
import sys
import time
import logging
import collections
from itertools import combinations
from itertools import product
s_logger = logging.getLogger('py4j.java_gateway')
s_logger.setLevel(logging.ERROR)
sc = SparkContext('local[*]', 'task1')
sc.setLogLevel('ERROR')
input_file_path = '../../PycharmProjects/553hw3/train_review.json'
textRDD = sc.textFile(input_file_path,20) # 20: number of partition
output_file_path = '../../PycharmProjects/553hw3/task1truth.txt'
#output_file_path=sys.argv[4]
start = time.time()
#
def Jac_func(x): # x = (bus1,bus2)
user_id_v1 =dict_pre_min[x[0]]
user_id_v2 = dict_pre_min[x[1]]
inter_set = set(user_id_v1).intersection(set(user_id_v2))
union_set= set(user_id_v1).union(set(user_id_v2))
sim=len(inter_set)/len(union_set)
return (x,sim)
# ground truth no minhash (no bands)
pre_min=textRDD.map(lambda x: json.loads(x)).map(lambda x: (x['business_id'],[x['user_id']])).reduceByKey(lambda a,b: a+b)
unique_bs_id=pre_min.map(lambda x:x[0]).collect()
pair_list = [unique_bs_id]
# print(pre_min)
dict_pre_min={}
pre=pre_min.collect()
for i in pre:
dict_pre_min.update({i[0]:i[1]})
unique_bs_id_comb=sc.parallelize(pair_list)\
.map(lambda x: list(combinations(sorted(x),2)))\
.flatMap(lambda x: x)\
.map(Jac_func)\
.filter(lambda x: x[1]>=0.05)\
.sortBy(lambda x:x[1])\
.collect()
f = open(output_file_path, 'w')
for i in unique_bs_id_comb:
dict_result={}
dict_result.update({"b1":i[0][0]})
dict_result.update({"b2": i[0][1]})
dict_result.update({"sim":i[1]})
json_string = json.dumps(dict_result)
f.write(str(json_string))
f.write('\n')
end= time.time()
case_time = end - start
print('Duration:',case_time) | 28.444444 | 122 | 0.707589 | 299 | 1,792 | 4.016722 | 0.371237 | 0.039967 | 0.033306 | 0.048293 | 0.023314 | 0 | 0 | 0 | 0 | 0 | 0 | 0.026854 | 0.127232 | 1,792 | 63 | 123 | 28.444444 | 0.741049 | 0.06529 | 0 | 0 | 0 | 0 | 0.096465 | 0.053325 | 0 | 0 | 0 | 0 | 0 | 1 | 0.019608 | false | 0 | 0.196078 | 0 | 0.235294 | 0.019608 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4eec905aa21dad34da72aba28caa72b9f1341da1 | 3,917 | py | Python | preprocess_kb.py | ruinunca/NeuralDialog-ZSDG | c20359541036ea876a126d1c7c172b820476dcb2 | [
"Apache-2.0"
] | null | null | null | preprocess_kb.py | ruinunca/NeuralDialog-ZSDG | c20359541036ea876a126d1c7c172b820476dcb2 | [
"Apache-2.0"
] | null | null | null | preprocess_kb.py | ruinunca/NeuralDialog-ZSDG | c20359541036ea876a126d1c7c172b820476dcb2 | [
"Apache-2.0"
] | 1 | 2020-09-24T15:09:34.000Z | 2020-09-24T15:09:34.000Z | import os
from argparse import ArgumentParser
import json
import copy
import shutil
def process_kb(in_kb):
all_values = set({})
items = in_kb.get('items', {})
if items is None:
items = {}
for item in items:
for key, value in item.items():
all_values.add(value)
return sorted(all_values, key=len)
def flatten_entities(in_entities_map):
result = []
for key, values_list in in_entities_map.items():
for value in values_list:
if isinstance(value, dict):
result += map(lambda x: str(x).lower(), value.values())
else:
result.append(str(value).lower())
return sorted(result, key=len, reverse=True)
def extract_entities(in_utterance, in_kb_entries):
result = set([])
for kb_entry in in_kb_entries:
if kb_entry in in_utterance:
in_utterance = in_utterance.replace(kb_entry, '__entity__')
result.add(kb_entry)
return result
def extract_entities_from_dialog(in_dialog, in_entities):
result = set([])
for turn in in_dialog['dialogue']:
result.update(extract_entities(turn['data']['utterance'], in_entities))
return result
def kb_entry_contains_all_entities(in_kb_entry, in_entities):
found = set([])
kb_entry_str = json.dumps(in_kb_entry)
for entity in in_entities:
if entity in kb_entry_str:
found.add(entity)
return len(found) == len(in_entities)
def delexicalize_dialog(in_dialog, in_entities_list):
result = copy.deepcopy(in_dialog)
result['scenario']['kb'] = json.loads(json.dumps(result['scenario']['kb']).lower())
for turn in result['dialogue']:
turn['data']['utterance'] = turn['data']['utterance'].lower()
dialog_entities = extract_entities_from_dialog(in_dialog, in_entities_list)
if result['scenario']['kb']['items']:
for entry in result['scenario']['kb']['items']:
if kb_entry_contains_all_entities(entry, dialog_entities):
result['scenario']['kb']['items'] = [entry]
print('New kb: {}'.format(json.dumps(entry)))
break
return result
def process_dataset(in_dataset_folder):
datasets = {}
for dataset_name in ['train', 'dev', 'test']:
filename = 'kvret_{}_public.json'.format(dataset_name)
with open(os.path.join(in_dataset_folder, filename)) as dataset_in:
datasets[filename] = json.load(dataset_in)
with open(os.path.join(in_dataset_folder, 'kvret_entities.json')) as entities_in:
entities = json.load(entities_in)
entities_flat = flatten_entities(entities)
for dataset_name, dataset in datasets.items():
for idx, dialog in enumerate(dataset):
dataset[idx] = delexicalize_dialog(dialog, entities_flat)
return datasets
def save_dataset(in_src_folder, in_tgt_folder, in_datasets):
if not os.path.exists(in_tgt_folder):
os.makedirs(in_tgt_folder)
for filename in os.listdir(in_src_folder):
if filename not in in_datasets:
if os.path.isdir(os.path.join(in_src_folder, filename)):
shutil.copytree(os.path.join(in_src_folder, filename),
os.path.join(in_tgt_folder, filename))
else:
shutil.copy(os.path.join(in_src_folder, filename), in_tgt_folder)
else:
with open(os.path.join(in_tgt_folder, filename), 'w') as json_out:
json.dump(in_datasets[filename], json_out)
def configure_argument_parser():
parser = ArgumentParser()
parser.add_argument('dataset_folder')
parser.add_argument('output_folder')
return parser
if __name__ == '__main__':
parser = configure_argument_parser()
args = parser.parse_args()
datasets = process_dataset(args.dataset_folder)
save_dataset(args.dataset_folder, args.output_folder, datasets)
| 34.359649 | 87 | 0.658667 | 514 | 3,917 | 4.745136 | 0.200389 | 0.0451 | 0.0287 | 0.03444 | 0.159492 | 0.138171 | 0.121771 | 0.062321 | 0 | 0 | 0 | 0 | 0.225428 | 3,917 | 113 | 88 | 34.663717 | 0.803889 | 0 | 0 | 0.087912 | 0 | 0 | 0.059244 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.098901 | false | 0 | 0.054945 | 0 | 0.241758 | 0.010989 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4eeebae26c14fcd330b4a36375b13a6878c29835 | 1,038 | py | Python | Prettified Stopwatch.py | adrian88szymanski/Automate_the_Boring_Stuff_with_Python_by_Sweigart | a5fa40a3e27c4f0c79d6406926456c3d1a54c0c1 | [
"MIT"
] | 1 | 2020-08-15T11:36:24.000Z | 2020-08-15T11:36:24.000Z | Prettified Stopwatch.py | adrian88szymanski/Automate_the_Boring_Stuff_with_Python_by_Sweigart | a5fa40a3e27c4f0c79d6406926456c3d1a54c0c1 | [
"MIT"
] | 2 | 2022-01-13T03:18:08.000Z | 2022-03-12T00:48:23.000Z | Prettified Stopwatch.py | adrian88szymanski/Sweigart_tasks | a5fa40a3e27c4f0c79d6406926456c3d1a54c0c1 | [
"MIT"
] | null | null | null | #! python3
"""A stopwatch program with a prettier output and pyperclip functionality."""
import time
import pyperclip
# Display the programs instructions.
print('Press ENTER to begin. Afterwards, press ENTER to "click" the stopwatch.'
'Press Ctrl-c to quit.')
input()
print('Started.')
start_time = time.time()
last_time = start_time
lap_num = 1
# Start tracking the lap times.
try:
while True:
input()
lap_time = round(time.time() - last_time, 2)
total_time = round(time.time() - start_time, 2)
lap = 'lap # {} {} ({})'.format((str(lap_num)+ ':').ljust(3),
str(total_time).rjust(5),
str(lap_time).rjust(6))
print(lap, end='')
lap_num += 1
last_time = time.time() # Reset the last lap time.
pyperclip.copy(lap) # Copy latest lap to clipboard.
except KeyboardInterrupt:
# Handle the Ctrl-C exception to keep its error message from displaying.
print('\nDone.') | 28.833333 | 79 | 0.595376 | 132 | 1,038 | 4.583333 | 0.484848 | 0.079339 | 0.039669 | 0.052893 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010753 | 0.283237 | 1,038 | 36 | 80 | 28.833333 | 0.802419 | 0.262042 | 0 | 0.086957 | 0 | 0 | 0.164021 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.086957 | 0 | 0.086957 | 0.173913 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4eefc422a0f1ecd6f321d7742b73d4e9a3dbaace | 2,768 | py | Python | src/tspf/main.py | Javernaver/ProyectoTitulo | de8406b13bf62c3f96409ce95675c95a9e00c7f1 | [
"Apache-2.0"
] | 2 | 2022-01-28T02:15:55.000Z | 2022-01-28T02:16:00.000Z | src/tspf/main.py | Javernaver/TSP-Framework | de8406b13bf62c3f96409ce95675c95a9e00c7f1 | [
"Apache-2.0"
] | null | null | null | src/tspf/main.py | Javernaver/TSP-Framework | de8406b13bf62c3f96409ce95675c95a9e00c7f1 | [
"Apache-2.0"
] | null | null | null | """Modulo principal que utiliza todas las demas clases para ejecutar el framework"""
from .Algorithms import GeneticAlgorithm, SimulatedAnnealing, LocalSearch, IteratedLocalSearch, timer
from . import sys, os, AlgorithmsOptions, MHType, Tsp, Tour, bcolors
def main(argv=sys.argv) -> None:
"""
Funcion principal que ejecuta el framework algoritmos metaheristicos para resolver el problema del vendedor viajero (TSP)
"""
# Activa la secuencia VT100 en Windows 10 para que funcione ANSI y se puedan cambiar se color los textos en cmd y powershell
os.system('')
#bcolors.disable(bcolors)
start = timer() # tiempo inicial de ejecucion
# leer e inicializar las opciones
options = AlgorithmsOptions(argv=argv)
# leer e interpretar el problema TSP leido desde la instancia definida
problem = Tsp(filename=options.instance)
# Ejecutar Metaheuristica Simulated Annealing
if (options.metaheuristic == MHType.SA):
# Solucion inicial
first_solution = Tour(type_initial_sol=options.initial_solution, problem=problem)
# Crear solver
solver = SimulatedAnnealing(options=options, problem=problem)
# Ejecutar la busqueda
solver.search(first_solution)
# Ejecutar Metaheuristica Algoritmo Genetico
elif (options.metaheuristic == MHType.GA):
# Crear solver
solver = GeneticAlgorithm(options=options, problem=problem)
# Ejecutar la busqueda
solver.search()
elif (options.metaheuristic == MHType.LS):
# Solucion inicial
first_solution = Tour(type_initial_sol=options.initial_solution, problem=problem)
# Crear solver
solver = LocalSearch(options=options, problem=problem)
# Ejecutar la busqueda
solver.search(first_solution)
elif (options.metaheuristic == MHType.ILS):
# Solucion inicial
first_solution = Tour(type_initial_sol=options.initial_solution, problem=problem)
# Crear solver
solver = IteratedLocalSearch(options=options, problem=problem)
# Ejecutar la busqueda
solver.search(first_solution)
else:
# Crear solver
solver = GeneticAlgorithm(options=options, problem=problem)
# Ejecutar la busqueda
solver.search()
# Guardar la solucion y trayectoria en archivo
solver.printSolFile(options.solution)
solver.printTraFile(options.trajectory)
# Escribir la solucion por consola
solver.print_best_solution()
end = timer() # tiempo final de ejecucion
print(f"{bcolors.BOLD}Tiempo total de ejecucion: {bcolors.ENDC}{bcolors.OKBLUE} {end-start:.3f} segundos{bcolors.ENDC}")
if options.visualize:
solver.visualize(options.replit)
| 38.444444 | 128 | 0.700506 | 304 | 2,768 | 6.322368 | 0.404605 | 0.058273 | 0.044225 | 0.072841 | 0.360042 | 0.360042 | 0.360042 | 0.360042 | 0.360042 | 0.360042 | 0 | 0.002788 | 0.222543 | 2,768 | 72 | 129 | 38.444444 | 0.890335 | 0.321532 | 0 | 0.3125 | 0 | 0.03125 | 0.059946 | 0.028338 | 0 | 0 | 0 | 0 | 0 | 1 | 0.03125 | false | 0 | 0.0625 | 0 | 0.09375 | 0.125 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4ef1fb454c62d9675e2048e32596b30d112e5f10 | 4,838 | py | Python | python_tools/test/test_memory_engine.py | ultimatezen/felix | 5a7ad298ca4dcd5f1def05c60ae3c84519ec54c4 | [
"MIT"
] | null | null | null | python_tools/test/test_memory_engine.py | ultimatezen/felix | 5a7ad298ca4dcd5f1def05c60ae3c84519ec54c4 | [
"MIT"
] | null | null | null | python_tools/test/test_memory_engine.py | ultimatezen/felix | 5a7ad298ca4dcd5f1def05c60ae3c84519ec54c4 | [
"MIT"
] | null | null | null | """
Tests for MemoryEngine::RemoteMemory
"""
import unittest
import cPickle
from cStringIO import StringIO
import mock
from FelixMemoryServes import MemoryEngine
class WebMocker(unittest.TestCase):
def set_request_val(self, val):
self.req_val = cPickle.dumps(val)
def get_request(self, x):
return StringIO(self.req_val)
def setUp(self):
# mock COM stuff
self.wrap = MemoryEngine.util.wrap
self.unwrap = MemoryEngine.util.unwrap
self.coll = MemoryEngine.util.NewCollection
MemoryEngine.util.wrap = lambda x : x
MemoryEngine.util.unwrap = lambda x : x
MemoryEngine.util.NewCollection = lambda x : x
# mock urllib stuff
self.urlencode = MemoryEngine.urllib.urlencode
self.Request = MemoryEngine.urllib2.Request
self.urlopen = MemoryEngine.urllib2.urlopen
MemoryEngine.urllib2.Request = lambda x, y : (x, y)
self.req_val = cPickle.dumps(None)
MemoryEngine.urllib2.urlopen = self.get_request
MemoryEngine.urllib.urlencode = lambda x : x
self.engine = MemoryEngine.FelixRemoteMemory()
def tearDown(self):
MemoryEngine.util.unwrap = self.unwrap
MemoryEngine.util.wrap = self.wrap
MemoryEngine.util.NewCollection = self.coll
MemoryEngine.urllib2.Request = self.Request
MemoryEngine.urllib2.urlopen = self.urlopen
MemoryEngine.urllib.urlencode = self.urlencode
class TestRecordById(WebMocker):
def test_no_url(self):
self.engine.commands = {}
result = self.engine.RecordById(3)
assert result is None, result
def test_None_result(self):
self.engine.commands = dict(rec_by_id="foo")
result = self.engine.RecordById(3)
assert result is None, result
def test_a_b(self):
self.engine.commands = dict(rec_by_id="foo")
self.set_request_val(dict(source="a", trans="b"))
result = self.engine.RecordById(3)
assert result.Source() == "a", result.Source()
assert result.Trans() == "b", result.Trans()
class TestRecToRaw(WebMocker):
def test_a_b(self):
record = mock.Mock()
record.data = dict(source="raw a",
trans="raw b",
context="context",
created_by="Ryan",
modified_by="Sam")
record.commands = {}
data = self.engine.rec_to_raw(record)
assert data == record.data, data
def test_a_b_record2dict(self):
class Foo: pass
record = Foo()
record.data = dict(source="raw a",
trans="raw b",
context="context")
record.Source = u"record2d source"
record.Trans = u"record2d trans"
record.Context = u"record2d context"
record.Reliability = 5
record.Validated = 4
record.RefCount= 3
record.Created = 2
record.Modified = 1
record.CreatedBy = u"Created by"
record.ModifiedBy = u"Modified by"
record.Id = 3
record.commands = {}
data = self.engine.rec_to_raw(record)
assert data["source"] == "record2d source", data
assert data["trans"] == "record2d trans", data
assert data["context"] == "record2d context", data
class TestPrepareHits(WebMocker):
def test_empty(self):
hits = self.engine.prepare_hits([])
assert hits == [], hits
def test_two(self):
rec1 = dict(source="sa", trans="ta")
rec2 = dict(source="sb", trans="tb")
h1, h2 = self.engine.prepare_hits([rec1, rec2])
assert h1.Source() == "sa", h1.Source()
assert h2.Trans() == "tb", h2.Trans()
class TestSetBase(unittest.TestCase):
def setUp(self):
self.engine = MemoryEngine.FelixRemoteMemory()
def test_connection(self):
connection = "http://ginstrom.com:8000/api/mems/1"
expected = "http://ginstrom.com:8000"
base = self.engine.set_base(connection)
assert base == expected, base
class TestGetLoginUrl(unittest.TestCase):
def setUp(self):
self.engine = MemoryEngine.FelixRemoteMemory()
def test_commands(self):
expected = "http://ginstrom.com:8000/api/login"
self.engine.commands = dict(login=expected)
login = self.engine.get_login_url()
assert login == expected, login
def test_no_command(self):
self.engine.base = "http://felix-cat.com:8000"
expected = "http://felix-cat.com:8000/api/login/"
login = self.engine.get_login_url()
assert login == expected, login
| 31.620915 | 60 | 0.596114 | 539 | 4,838 | 5.270872 | 0.207792 | 0.063358 | 0.029567 | 0.041183 | 0.330517 | 0.236888 | 0.236888 | 0.223161 | 0.223161 | 0.197818 | 0 | 0.015529 | 0.294543 | 4,838 | 152 | 61 | 31.828947 | 0.816877 | 0.014469 | 0 | 0.243243 | 0 | 0 | 0.075103 | 0 | 0 | 0 | 0 | 0 | 0.126126 | 1 | 0.144144 | false | 0.009009 | 0.045045 | 0.009009 | 0.261261 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4ef25d0f65d81e8e0d5a9796fa97079edac4afe1 | 6,033 | py | Python | commands/print9.py | egigoka/commands | 3431ccdb9b9e8b13957b6cfc10feb51c46188b48 | [
"MIT"
] | 1 | 2018-05-23T03:34:05.000Z | 2018-05-23T03:34:05.000Z | commands/print9.py | egigoka/commands | 3431ccdb9b9e8b13957b6cfc10feb51c46188b48 | [
"MIT"
] | null | null | null | commands/print9.py | egigoka/commands | 3431ccdb9b9e8b13957b6cfc10feb51c46188b48 | [
"MIT"
] | null | null | null | #! python3
# -*- coding: utf-8 -*-
from typing import Union
"""Internal module with functions for print to console.
"""
__version__ = "0.13.0"
class __Print:
"""Class with functions for print to console.
"""
def __init__(self, *args, **kwargs):
from threading import Lock
self.s_print_lock = Lock()
self.colorama_inited = False
self._color_output_enabled = None
def __call__(self, *args, **kwargs) -> None:
self.multithread_safe(*args, **kwargs)
def multithread_safe(self, *args, **kwargs) -> None:
"""Thread safe print function"""
with self.s_print_lock:
print(*args, **kwargs)
def debug(self, *strings: Union[str, bytes], raw: bool = False) -> None:
"""More notable print, used only for debugging
<br>`param strings` prints separately
<br>`param raw` print representation of strings
<br>`return`
"""
from .console9 import Console
line = "-" * Console.width()
self.multithread_safe("<<<Debug sheet:>>>")
for str_ in strings:
self.multithread_safe(line, end="")
if raw:
self.multithread_safe(repr(str_))
else:
self.multithread_safe(str_)
self.multithread_safe(line)
self.multithread_safe("<<<End of debug sheet>>>")
def rewrite(self, *strings: str, sep: str = " ", fit: bool = True) -> None:
"""Print rewritable string. note, that you need to rewrite string to remove previous characters
<br>`param strings` work as builtin print()
<br>`param sep` sep as builtin print(sep)
<br>`param fit` try to fit output in one line
"""
from .os9 import OS
from .console9 import Console
line = " " * Console.width()
if OS.windows: # windows add symbol to end of string :(
line = line[:-1]
self.multithread_safe(line, end="\r")
if fit:
strings = Console.fit(*strings, sep=sep)
self.multithread_safe(*strings, sep=sep, end="\r")
def prettify(self, object_: Union[list, dict, tuple], indent: int = 4, quiet: bool = False) -> str:
"""Pretty print of list, dicts, tuples
<br>`param object_` object to print
<br>`param indent` indent to new nested level
<br>`param quiet` suppress print to console
<br>`return` from pprint.pformat
"""
import pprint
pretty_printer = pprint.PrettyPrinter(indent=indent)
pretty_string = pretty_printer.pformat(object=object_)
if not quiet:
self.multithread_safe(pretty_string)
return pretty_string
@property
def color_output_enabled(self):
if self._color_output_enabled is None:
from .os9 import OS
self._color_output_enabled = "NO_COLOR" not in OS.env.keys()
return self._color_output_enabled
def colored(self, *strings: Union[str, int, list, dict], attributes: list = None, end: str = "\n",
sep: str = " ", flush: bool = False, verbose: bool = True) -> None:
"""Wrapper for termcolor.cprint, added some smartness
<br>Usage` Print.colored("text1", "text2", "red") or Print.colored("text", "text2", "red", "on_white")
<br>even Print.colored("text", "text2", "on_white", "red") now.
You can pick colors from termcolor.COLORS, highlights from termcolor.HIGHLIGHTS.
When environment variable NO_COLOR present (regardless of its value), prevents the addition of ANSI color.
<br>`param strings` work as builtin print(*strings)
<br>`param attributes` going to termcolor.cprint(attrs) argument
<br>`param end` same as builtin print(end)
<br>`param sep` same as builtin print(sep)
<br>`param flush` same as builtin print(flush)
"""
import termcolor
from contextlib import suppress
termcolor.COLORS["gray"] = termcolor.COLORS["black"] = 30
termcolor.HIGHLIGHTS["on_gray"] = termcolor.HIGHLIGHTS["on_black"] = 40
from .os9 import OS
if OS.windows and not self.colorama_inited and self.color_output_enabled:
import colorama
colorama.init()
self.colorama_inited = True
# check for colors in input
highlight = None
color = None
color_args = 0
try:
if str(strings[-1]) in termcolor.HIGHLIGHTS:
highlight = strings[-1]
color_args += 1
if str(strings[-2]) in termcolor.COLORS:
color = strings[-2]
color_args += 1
elif str(strings[-1]) in termcolor.COLORS:
color = strings[-1]
color_args += 1
if str(strings[-2]) in termcolor.HIGHLIGHTS:
highlight = strings[-2]
color_args += 1
except KeyError:
pass
# create single string to pass it into termcolor
string = ""
if color_args:
strings = strings[:-color_args]
if len(strings) > 1:
for substring in strings[:-1]: # все строки добавляются в основную строку с сепаратором
string += str(substring) + sep
string += str(strings[-1]) # последняя без сепаратора
else: # if there only one object
string = strings[0]
if self.color_output_enabled:
colored_string = termcolor.colored(string, color=color, on_color=highlight, attrs=attributes)
else:
colored_string = string
if verbose:
self.multithread_safe(colored_string, end=end, flush=flush)
with suppress(KeyError): # for work with multithreading
termcolor.COLORS.pop("gray")
termcolor.COLORS.pop("black")
termcolor.HIGHLIGHTS.pop("on_gray")
termcolor.HIGHLIGHTS.pop("on_black")
return colored_string
Print = __Print()
| 39.690789 | 114 | 0.593403 | 714 | 6,033 | 4.896359 | 0.268908 | 0.02603 | 0.059783 | 0.037757 | 0.165332 | 0.096682 | 0.065789 | 0.024027 | 0.024027 | 0.024027 | 0 | 0.009003 | 0.300348 | 6,033 | 151 | 115 | 39.953642 | 0.819237 | 0.260401 | 0 | 0.121212 | 0 | 0 | 0.027299 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.080808 | false | 0.010101 | 0.111111 | 0 | 0.232323 | 0.060606 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4ef3dc5e360dade818b286abce16461dc436ebac | 3,514 | py | Python | src/function_file.py | cognitedata/function-action-oidc | 6ba70da59aa1e94fff003def1082f48fc55bd6a2 | [
"Apache-2.0"
] | 1 | 2021-09-06T20:57:27.000Z | 2021-09-06T20:57:27.000Z | src/function_file.py | cognitedata/function-action-oidc | 6ba70da59aa1e94fff003def1082f48fc55bd6a2 | [
"Apache-2.0"
] | 8 | 2021-09-06T12:16:39.000Z | 2022-02-16T11:48:54.000Z | src/function_file.py | cognitedata/function-action-oidc | 6ba70da59aa1e94fff003def1082f48fc55bd6a2 | [
"Apache-2.0"
] | null | null | null | import io
import logging
import os
from pathlib import Path
from zipfile import ZipFile
from cognite.client.data_classes import DataSet, FileMetadata
from cognite.client.exceptions import CogniteAPIError
from cognite.experimental import CogniteClient
from retry import retry # type: ignore
from configs import FunctionConfig
from exceptions import FunctionDeployError
from utils import retrieve_dataset, temporary_chdir
logger = logging.getLogger(__name__)
def _write_files_to_zip_buffer(zf: ZipFile, directory: Path):
for dirpath, _, files in os.walk(directory):
zf.write(dirpath)
for f in files:
zf.write(Path(dirpath) / f)
@retry(exceptions=FunctionDeployError, tries=12, delay=2, jitter=2, max_delay=15, logger=None)
def await_file_upload_status(client: CogniteClient, file_id: int):
if not client.files.retrieve(file_id).uploaded:
logger.info(f"- File (ID: {file_id}) not yet uploaded...")
raise FunctionDeployError
def upload_zipped_code_to_files(
client: CogniteClient,
file_bytes: bytes,
xid: str,
ds: DataSet,
) -> FileMetadata:
file_meta = client.files.upload_bytes(
file_bytes,
name=xid,
external_id=xid,
data_set_id=ds.id,
overwrite=True,
)
await_file_upload_status(client, file_meta.id)
return file_meta
def zip_and_upload_folder(client: CogniteClient, fn_config: FunctionConfig, xid: str) -> int:
logger.info(f"Uploading code from '{fn_config.function_folder}' to Files using external ID: '{xid}'")
buf = io.BytesIO() # TempDir, who needs that?! :rocket:
with ZipFile(buf, mode="a") as zf:
with temporary_chdir(fn_config.function_folder):
_write_files_to_zip_buffer(zf, directory=Path())
if (common_folder := fn_config.common_folder) is not None:
with temporary_chdir(common_folder.parent): # Note .parent
logger.info(f"- Added common directory: '{common_folder}' to the file/function")
_write_files_to_zip_buffer(zf, directory=common_folder)
if (ds_id := fn_config.data_set_id) is not None:
ds = retrieve_dataset(client, ds_id)
logger.info(
f"- Using dataset '{ds.external_id}' (ID: {ds_id}) to govern the file "
f"(has write protection: {ds.write_protected})."
)
else:
ds = DataSet(id=None)
logger.info("- No dataset will be used to govern the function zip-file!")
file_meta = upload_zipped_code_to_files(client, buf.getvalue(), xid, ds)
if (file_id := file_meta.id) is not None:
logger.info(f"- File uploaded successfully ({xid})!")
return file_id
raise FunctionDeployError(f"Failed to upload file ({xid}) to CDF Files")
def delete_function_file(client: CogniteClient, xid: str):
if (file_meta := client.files.retrieve(external_id=xid)) is None:
logger.info(f"Unable to delete file! External ID: '{xid}' NOT found!")
return
logger.info(f"Deleting existing file '{xid}' (ID: {file_meta.id})")
try:
client.files.delete(external_id=xid)
logger.info(f"- Delete of file '{xid}' successful!")
except CogniteAPIError as err:
reason = f"{type(err).__name__}: {err}" # 'CogniteAPIError' does not implement dunder repr...
logger.error(
"Unable to delete file! Trying to ignore and continue as this action will overwrite "
f"the file later. Error message from the API: \n{reason}"
)
| 37.382979 | 105 | 0.684121 | 476 | 3,514 | 4.863445 | 0.287815 | 0.038877 | 0.038013 | 0.019438 | 0.085961 | 0.062635 | 0.027646 | 0 | 0 | 0 | 0 | 0.002171 | 0.213432 | 3,514 | 93 | 106 | 37.784946 | 0.835384 | 0.031873 | 0 | 0 | 0 | 0 | 0.2199 | 0.021195 | 0 | 0 | 0 | 0 | 0 | 1 | 0.065789 | false | 0 | 0.157895 | 0 | 0.263158 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4ef491194dad1406c21f05ef14e5bf3fd5cd294c | 474 | py | Python | learning_python/2.built-in_data_types/1.built-in_data_types.py | thekilian/Python-pratica | 875661addd5b8eb4364bc638832c7ab55dcefce4 | [
"MIT"
] | null | null | null | learning_python/2.built-in_data_types/1.built-in_data_types.py | thekilian/Python-pratica | 875661addd5b8eb4364bc638832c7ab55dcefce4 | [
"MIT"
] | null | null | null | learning_python/2.built-in_data_types/1.built-in_data_types.py | thekilian/Python-pratica | 875661addd5b8eb4364bc638832c7ab55dcefce4 | [
"MIT"
] | null | null | null | '''
BUILT-IN DATA TYPES
Mutable - the value can change
Immutable - the value cannot change
'''
# Immutable object
age = 99
id(age)
age = 100
id(age)
'''
We didn't change 99 to 100. We actually just pointed age to a different location: the new int object whose value is 100.
We print the IDs by calling the built-in id function.
'''
# Mutable object
'''
fab = Person(age=99)
fab.age
# 99
id(fab)
# some numbers here
fab.age = 100
id(fab)
# same 'some numbers here'
''' | 14.8125 | 120 | 0.694093 | 82 | 474 | 4.012195 | 0.5 | 0.045593 | 0.042553 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.052632 | 0.198312 | 474 | 32 | 121 | 14.8125 | 0.813158 | 0.253165 | 0 | 0.5 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4ef5599e07f8786bc1af735d52d729d2cbe1c992 | 3,147 | py | Python | deep_dream.py | ewalldo/Deep-Dream---Keras | bd4b116c547b008a38cb9e15d7845b1d4f1d120b | [
"MIT"
] | null | null | null | deep_dream.py | ewalldo/Deep-Dream---Keras | bd4b116c547b008a38cb9e15d7845b1d4f1d120b | [
"MIT"
] | null | null | null | deep_dream.py | ewalldo/Deep-Dream---Keras | bd4b116c547b008a38cb9e15d7845b1d4f1d120b | [
"MIT"
] | null | null | null | from keras.applications import inception_v3
from keras import backend as K
import scipy
import imageio
from keras.preprocessing import image
import numpy as np
K.set_learning_phase(0)
model = inception_v3.InceptionV3(weights='imagenet', include_top=False)
layer_contributions = {'mixed2': 0.2, 'mixed3': 3., 'mixed4': 2., 'mixed5': 1.5,}
layer_dict = dict([(layer.name, layer) for layer in model.layers])
loss = K.variable(0.)
for layer_name in layer_contributions:
coeff = layer_contributions[layer_name]
activation = layer_dict[layer_name].output
scaling = K.prod(K.cast(K.shape(activation), 'float32'))
loss = loss + coeff * K.sum(K.square(activation[:, 2: -2, 2: -2, :])) / scaling
dream = model.input
grads = K.gradients(loss, dream)[0]
grads /= K.maximum(K.mean(K.abs(grads)), 1e-7)
outputs = [loss, grads]
fetch_loss_and_grads = K.function([dream], outputs)
def eval_loss_and_grads(x):
outs = fetch_loss_and_grads([x])
loss_value = outs[0]
grad_values = outs[1]
return loss_value, grad_values
def gradient_ascent(x, iterations, step, max_loss=None):
for i in range(iterations):
loss_value, grad_values = eval_loss_and_grads(x)
if max_loss is not None and loss_value > max_loss:
break
print('Loss at', i, ":", loss_value)
x += step * grad_values
return x
def resize_img(img, size):
img = np.copy(img)
factors = (1, float(size[0]) / img.shape[1], float(size[1]) / img.shape[2], 1)
return scipy.ndimage.zoom(img, factors, order=1)
def save_img(img, fname):
pil_image = deprocess_image(np.copy(img))
# scipy.misc.imsave(fname, pil_image)
imageio.imwrite(fname, pil_image)
def preprocess_image(image_path):
img = image.load_img(image_path)
img = image.img_to_array(img)
img = np.expand_dims(img, axis=0)
img = inception_v3.preprocess_input(img)
return img
def deprocess_image(x):
if K.image_data_format() == 'channels_first':
x = x.reshape((3, x.shape[2], x.shape[3]))
x = x.transpose((1, 2, 0))
else:
x = x.reshape((x.shape[1], x.shape[2], 3))
x /= 2.
x += 0.5
x *= 255
x = np.clip(x, 0, 255).astype('uint8')
return x
step = 0.01
num_octave = 3
octave_scale = 1.4
iterations = 20
max_loss = 10.
base_image_path = 'cats_and_dogs_small/train/cats/cat.0.jpg'
img = preprocess_image(base_image_path)
original_shape = img.shape[1:3]
succesive_shapes = [original_shape]
for i in range(1, num_octave):
shape = tuple([int(dim / (octave_scale ** i)) for dim in original_shape])
succesive_shapes.append(shape)
succesive_shapes = succesive_shapes[::-1]
original_img = np.copy(img)
shrunk_original_img = resize_img(img, succesive_shapes[0])
for shape in succesive_shapes:
print("Processing image shape", shape)
img = resize_img(img, shape)
img = gradient_ascent(img, iterations=iterations, step=step, max_loss=max_loss)
upscaled_shrunk_original_img = resize_img(shrunk_original_img, shape)
same_size_original = resize_img(original_img, shape)
lost_detail = same_size_original - upscaled_shrunk_original_img
img += lost_detail
shrunk_original_img = resize_img(original_img, shape)
save_img(img, fname='dream_at_scale'+str(shape)+'.png')
save_img(img, fname='final_dream.png')
| 28.351351 | 81 | 0.733079 | 516 | 3,147 | 4.251938 | 0.296512 | 0.021878 | 0.038742 | 0.017776 | 0.069736 | 0 | 0 | 0 | 0 | 0 | 0 | 0.025912 | 0.12933 | 3,147 | 110 | 82 | 28.609091 | 0.774818 | 0.011122 | 0 | 0.02381 | 0 | 0 | 0.051768 | 0.012862 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0.071429 | 0 | 0.202381 | 0.02381 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4ef59607a8d06514cadf280683c16c602c23e215 | 978 | py | Python | game.py | greymistcube/racing_game_ai | 7e5e6ec781eb3c98729d370cbcc0ab6ed053962f | [
"MIT"
] | null | null | null | game.py | greymistcube/racing_game_ai | 7e5e6ec781eb3c98729d370cbcc0ab6ed053962f | [
"MIT"
] | null | null | null | game.py | greymistcube/racing_game_ai | 7e5e6ec781eb3c98729d370cbcc0ab6ed053962f | [
"MIT"
] | null | null | null | import pygame
import argparser
import lib
import ai.neatinterface.neatcore
pygame.init()
if __name__ == "__main__":
args = argparser.get_args()
# pygame initialization
pygame.init()
# initialize properly and make links make them as common resources
# for other modules
# I admit this looks pretty hideous but python has no good way of
# handling singletons
lib.common.settings = settings = lib.Settings(args)
lib.common.display = display = lib.Display()
lib.common.clock = clock = lib.Clock()
lib.common.events = events = lib.Events()
# setting game mode
if args.ai == "neat":
lib.common.core = core = ai.neatinterface.neatcore.NeatCore()
else:
lib.common.core = core = lib.Core()
core.new_game()
# main loop
while True:
clock.tick()
core.update()
if core.game_over():
core.new_game()
continue
display.draw(core.get_surface())
| 22.744186 | 70 | 0.639059 | 122 | 978 | 5.016393 | 0.5 | 0.088235 | 0.075163 | 0.055556 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.260736 | 978 | 42 | 71 | 23.285714 | 0.846473 | 0.220859 | 0 | 0.166667 | 0 | 0 | 0.015915 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.166667 | 0 | 0.166667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4ef6267fcd36a4e2159433da6c76f793b111f5da | 39,946 | py | Python | build/lib/biotas/radio_project.py | knebiolo/biotas | 2ea06297fc2851bc54ce89f20f8f7aaa98dd8fc1 | [
"MIT"
] | 1 | 2021-12-30T14:25:39.000Z | 2021-12-30T14:25:39.000Z | build/lib/biotas/radio_project.py | knebiolo/biotas | 2ea06297fc2851bc54ce89f20f8f7aaa98dd8fc1 | [
"MIT"
] | 17 | 2020-11-27T18:05:45.000Z | 2022-01-27T02:46:46.000Z | build/lib/biotas/radio_project.py | knebiolo/biotas | 2ea06297fc2851bc54ce89f20f8f7aaa98dd8fc1 | [
"MIT"
] | 1 | 2020-11-17T21:07:38.000Z | 2020-11-17T21:07:38.000Z | # -*- coding: utf-8 -*-
'''
Module contains all of the functions to create a radio telemetry project.'''
# import modules required for function dependencies
import numpy as np
import pandas as pd
import os
import sqlite3
import datetime
import matplotlib.pyplot as plt
import matplotlib
import matplotlib.dates as mdates
from mpl_toolkits.mplot3d import Axes3D
import statsmodels.api as sm
import statsmodels.formula.api as smf
import networkx as nx
from matplotlib import rcParams
from scipy import interpolate
font = {'family': 'serif','size': 6}
rcParams['font.size'] = 6
rcParams['font.family'] = 'serif'
def noiseRatio (duration,data,study_tags):
''' function calculates the ratio of miscoded, pure noise detections, to matching frequency/code
detections within the duration specified.
In other words, what is the ratio of miscoded to correctly coded detections within the duration specified
duration = moving window length in minutes
data = current data file
study_tags = list or list like object of study tags
'''
# identify miscodes
data['miscode'] = np.isin(data.FreqCode.values, study_tags, invert = True)
# bin everything into nearest 5 min time bin and count miscodes and total number of detections
duration_s = str(int(duration * 60)) + 's'
miscode = data.groupby(pd.Grouper(key = 'timeStamp', freq = duration_s)).miscode.sum().to_frame()
total = data.groupby(pd.Grouper(key = 'timeStamp', freq = duration_s)).FreqCode.count().to_frame()
# rename
total.rename(columns = {'FreqCode':'total'}, inplace = True)
# merge dataframes, calculate noise ratio
noise = total.merge(miscode, left_on = 'timeStamp', right_on ='timeStamp')
noise.reset_index(inplace = True)
noise.fillna(value = 0, inplace = True)
noise['noiseRatio'] = noise.miscode / noise.total
noise.dropna(inplace = True)
noise['Epoch'] = (noise['timeStamp'] - datetime.datetime(1970,1,1)).dt.total_seconds()
# create function for noise ratio at time
if len(noise) >= 2:
noise_ratio_fun = interpolate.interp1d(noise.Epoch.values,noise.noiseRatio.values,kind = 'linear',bounds_error = False, fill_value ='extrapolate')
# interpolate noise ratio as a function of time for every row in data
data['noiseRatio'] = noise_ratio_fun(data.Epoch.values)
data.drop(columns = ['miscode'], inplace = True)
return data
def createTrainDB(project_dir, dbName):
''' function creates empty project database, user can edit project parameters using
DB Broswer for sqlite found at: http://sqlitebrowser.org/'''
# first step creates a project directory if it doesn't already exist
if not os.path.exists(project_dir):
os.makedirs(project_dir)
data_dir = os.path.join(project_dir,'Data') # raw data goes here
if not os.path.exists(data_dir):
os.makedirs(data_dir)
training_dir = os.path.join(data_dir,'Training_Files')
if not os.path.exists(training_dir):
os.makedirs(training_dir)
output_dir = os.path.join(project_dir, 'Output') # intermediate data products, final data products and images
if not os.path.exists(output_dir):
os.makedirs(output_dir)
scratch_dir = os.path.join(output_dir,'Scratch')
if not os.path.exists(scratch_dir):
os.makedirs(scratch_dir)
figures_dir = os.path.join(output_dir, 'Figures')
if not os.path.exists(figures_dir):
os.makedirs(figures_dir)
# program_dir = os.path.join(project_dir, 'Program') # this is where we will create a local clone of the Git repository
# if not os.path.exists(program_dir):
# os.makedirs(program_dir)
dbDir = os.path.join(data_dir,dbName)
# connect to and create the project geodatabase
conn = sqlite3.connect(dbDir, timeout=30.0)
c = conn.cursor()
# mandatory project tables
c.execute('''DROP TABLE IF EXISTS tblMasterReceiver''') # receiver ID, receiver type
c.execute('''DROP TABLE IF EXISTS tblMasterTag''') # tag ID, frequency, freqcode
c.execute('''DROP TABLE IF EXISTS tblReceiverParameters''') # field crews fuck up, we need these parameters to correctly quantify detection history
c.execute('''DROP TABLE IF EXISTS tblAlgParams''')
c.execute('''DROP TABLE IF EXISTS tblNodes''')
c.execute('''CREATE TABLE tblMasterReceiver(recID TEXT, Name TEXT, RecType TEXT, Node TEXT)''')
c.execute('''CREATE TABLE tblReceiverParameters(recID TEXT, RecType TEXT, ScanTime REAL, Channels INTEGER, fileName TEXT)''')
c.execute('''CREATE TABLE tblMasterTag(FreqCode TEXT, PIT_ID TEXT, PulseRate REAL, MortRate REAL, CapLoc TEXT, RelLoc TEXT, TagType TEXT, Length INTEGER, Sex TEXT, RelDate TIMESTAMP, Study TEXT, Species TEXT)''')
c.execute('''CREATE TABLE tblAlgParams(det INTEGER, duration INTEGER)''')
c.execute('''CREATE TABLE tblNodes(Node TEXT, Reach TEXT, RecType TEXT, X INTEGER, Y INTEGER)''')
''' note these top three tables are mandatory, depending upon how many receivers
we train and/or use for a study we may not need all of these tables, or we may
need more. This must be addressed in future iterations, can we keep adding empty
tables at the onset of the project???'''
c.execute('''DROP TABLE IF EXISTS tblRaw''')
c.execute('''DROP TABLE IF EXISTS tblTrain''')
c.execute('''CREATE TABLE tblTrain(Channels INTEGER, Detection INTEGER, FreqCode TEXT, Power REAL, lag INTEGER, lagDiff REAL, FishCount INTEGER, conRecLength INTEGER, miss_to_hit REAL, consDet INTEGER, detHist TEXT, hitRatio REAL, noiseRatio REAL, seriesHit INTEGER, timeStamp TIMESTAMP, Epoch INTEGER, Seconds INTEGER, fileName TEXT, recID TEXT, recType TEXT, ScanTime REAL)''') # create full radio table - table includes all records, final version will be designed for specific receiver types
c.execute('''CREATE TABLE tblRaw(timeStamp TIMESTAMP, Epoch INTEGER, FreqCode TEXT, Power REAL,noiseRatio, fileName TEXT, recID TEXT, ScanTime REAL, Channels REAL, RecType TEXT)''')
#c.execute('''CREATE INDEX idx_fileNameRaw ON tblRaw (fileName)''')
c.execute('''CREATE INDEX idx_RecID_Raw ON tblRaw (recID)''')
c.execute('''CREATE INDEX idx_FreqCode On tblRaw (FreqCode)''')
#c.execute('''CREATE INDEX idx_fileNameTrain ON tblTrain (fileName)''')
c.execute('''CREATE INDEX idx_RecType ON tblTrain (recType)''')
conn.commit()
c.close()
def setAlgorithmParameters(det,duration,dbName):
'''Function sets parameters for predictor variables used in the naive bayes
classifier
det = number of detections to look forward and backward in times for detection
history strings
duration = moving window around each detection, used to calculate the noise
ratio and number of fish present (fish count)
'''
conn = sqlite3.connect(dbName, timeout=30.0)
c = conn.cursor()
params = [(det,duration)]
conn.executemany('INSERT INTO tblAlgParams VALUES (?,?)',params)
conn.commit()
conn.commit()
c.close()
def studyDataImport(dataFrame,dbName,tblName):
'''function imports formatted data into project database. The code in its current
function does not check for inconsistencies with data structures. If you're
shit isn't right, this isn't going to work for you. Make sure your table data
structures match exactly, that column names and datatypes match. I'm not your
mother, clean up your shit.
dataFrame = pandas dataframe imported from your structured file.
dbName = full directory path to project database
tblName = the name of the data you can import to. If you are brave, import to
tblRaw, but really this is meant for tblMasterTag and tblMasterReceiver'''
conn = sqlite3.connect(dbName)
c = conn.cursor()
dataFrame.to_sql(tblName,con = conn,index = False, if_exists = 'append')
conn.commit()
c.close()
def orionImport(fileName,rxfile,dbName,recName,switch = False, scanTime = None, channels = None, ant_to_rec_dict = None):
'''Function imports raw Sigma Eight orion data.
Text parser uses simple column fixed column widths.
'''
conn = sqlite3.connect(dbName, timeout=30.0)
c = conn.cursor()
study_tags = pd.read_sql('SELECT FreqCode, TagType FROM tblMasterTag',con = conn)
study_tags = study_tags[study_tags.TagType == 'Study'].FreqCode.values
recType = 'orion'
if ant_to_rec_dict != None:
scanTime = 1
channels = 1
# what orion firmware is it? the header row is the key
o_file =open(fileName, encoding='utf-8')
header = o_file.readline()[:-1] # read first line in file
columns = str.split(header)
o_file.close()
if 'Type' in columns:
# with our data row, extract information using pandas fwf import procedure
telemDat = pd.read_fwf(fileName,colspecs = [(0,12),(13,23),(24,30),(31,35),(36,45),(46,54),(55,60),(61,65)],
names = ['Date','Time','Site','Ant','Freq','Type','Code','Power'],
skiprows = 1,
dtype = {'Date':str,'Time':str,'Site':np.int32,'Ant':str,'Freq':str,'Type':str,'Code':str,'Power':np.float64})
telemDat = telemDat[telemDat.Type != 'STATUS']
telemDat.drop(['Type'], axis = 1, inplace = True)
else:
# with our data row, extract information using pandas fwf import procedure
telemDat = pd.read_fwf(fileName,colspecs = [(0,11),(11,20),(20,26),(26,30),(30,37),(37,42),(42,48)],
names = ['Date','Time','Site','Ant','Freq','Code','Power'],
skiprows = 1,
dtype = {'Date':str,'Time':str,'Site':str,'Ant':str,'Freq':str,'Code':str,'Power':str})
if len(telemDat) > 0:
telemDat['fileName'] = np.repeat(rxfile,len(telemDat)) #Note I'm going back here to the actual file name without the path. Is that OK? I prefer it, but it's a potential source of confusion
telemDat['FreqCode'] = telemDat['Freq'].astype(str) + ' ' + telemDat['Code'].astype(str)
telemDat['timeStamp'] = pd.to_datetime(telemDat['Date'] + ' ' + telemDat['Time'],errors = 'coerce')# create timestamp field from date and time and apply to index
telemDat['ScanTime'] = np.repeat(scanTime,len(telemDat))
telemDat['Channels'] = np.repeat(channels,len(telemDat))
telemDat['RecType'] = np.repeat('orion',len(telemDat))
telemDat = telemDat[telemDat.timeStamp.notnull()]
if len(telemDat) == 0:
print ("Invalid timestamps in raw data, cannot import")
else:
telemDat['Epoch'] = (telemDat['timeStamp'] - datetime.datetime(1970,1,1)).dt.total_seconds()
telemDat.drop (['Date','Time','Freq','Code','Site'],axis = 1, inplace = True)
telemDat = noiseRatio(5.0,telemDat,study_tags)
if ant_to_rec_dict == None:
telemDat.drop(['Ant'], axis = 1, inplace = True)
telemDat['recID'] = np.repeat(recName,len(telemDat))
tuples = zip(telemDat.FreqCode.values,telemDat.recID.values,telemDat.Epoch.values)
index = pd.MultiIndex.from_tuples(tuples, names=['FreqCode', 'recID','Epoch'])
telemDat.set_index(index,inplace = True,drop = False)
telemDat.to_sql('tblRaw',con = conn,index = False, if_exists = 'append')
# recParamLine = [(recName,recType,scanTime,channels,fileName)]
# conn.executemany('INSERT INTO tblReceiverParameters VALUES (?,?,?,?,?)',recParamLine)
conn.commit()
c.close()
else:
for i in ant_to_rec_dict:
site = ant_to_rec_dict[i]
telemDat_sub = telemDat[telemDat.Ant == str(i)]
telemDat_sub['recID'] = np.repeat(site,len(telemDat_sub))
tuples = zip(telemDat_sub.FreqCode.values,telemDat_sub.recID.values,telemDat_sub.Epoch.values)
index = pd.MultiIndex.from_tuples(tuples, names=['FreqCode', 'recID','Epoch'])
telemDat_sub.set_index(index,inplace = True,drop = False)
telemDat_sub.drop(['Ant'], axis = 1, inplace = True)
telemDat_sub.to_sql('tblRaw',con = conn,index = False, if_exists = 'append')
# recParamLine = [(site,recType,scanTime,channels,fileName)]
# conn.executemany('INSERT INTO tblReceiverParameters VALUES (?,?,?,?,?)',recParamLine)
conn.commit()
c.close()
def lotek_import(fileName,rxfile,dbName,recName,ant_to_rec_dict = None):
''' function imports raw lotek data, reads header data to find receiver parameters
and automatically locates raw telemetry data. Import procedure works with
standardized project database. Database must be created before function can be run'''
'''to do: in future iterations create a check for project database, if project
data base does not exist, throw an exception
inputs:
fileName = name of raw telemetry data file with full directory and extenstion
dbName = name of project database with full directory and extension
recName = official receiver name'''
# declare the workspace - in practice we will identify all files in diretory and iterate over them as part of function, all this crap passed as parameters
recType = 'lotek'
headerDat = {} # create empty dictionary to hold Lotek header data indexed by line number - to be imported to Pandas dataframe
lineCounter = [] # create empty array to hold line indices
lineList = [] # generate a list of header lines - contains all data we need to write to project set up database
o_file = open(fileName, encoding='utf-8')
counter = 0 # start line counter
line = o_file.readline()[:-1] # read first line in file
lineCounter.append(counter) # append the current line counter to the counter array
lineList.append(line) # append the current line of header data to the line list
if line == "SRX800 / 800D Information:":
# find where data begins and header data ends
with o_file as f:
for line in f:
if "** Data Segment **" in line:
counter = counter + 1
dataRow = counter + 5 # if this current line signifies the start of the data stream, the data starts three rows down from this
break # break the loop, we have reached our stop point
else:
counter = counter + 1 # if we are still reading header data increase the line counter by 1
lineCounter.append(counter) # append the line counter to the count array
lineList.append(line) # append line of data to the data array
headerDat['idx'] = lineCounter # add count array to dictionary with field name 'idx' as key
headerDat['line'] = lineList # add data line array to dictionary with field name 'line' as key
headerDF = pd.DataFrame.from_dict(headerDat) # create pandas dataframe of header data indexed by row number
headerDF.set_index('idx',inplace = True)
# find scan time
for row in headerDF.iterrows(): # for every header data row
if 'Scan Time' in row[1][0]: # if the first 9 characters of the line say 'Scan Time' = we have found the scan time in the document
scanTimeStr = row[1][0][-7:-1] # get the number value from the row
scanTimeSplit = scanTimeStr.split(':') # split the string
scanTime = float(scanTimeSplit[1]) # convert the scan time string to float
break # stop that loop, we done
del row
# find number of channels and create channel dictionary
scanChan = [] # create empty array of channel ID's
channelDict = {} # create empty channel ID: frequency dictionary
counter = 0 # create counter
rows = headerDF.iterrows() # create row iterator
for row in rows: # for every row
if 'Active scan_table:' in row[1][0]: # if the first 18 characters say what that says
idx0 = counter + 2 # channel dictionary data starts two rows from here
while next(rows)[1][0] != '\n': # while the next row isn't empty
counter = counter + 1 # increase the counter, when the row is empty we have reached the end of channels, break loop
idx1 = counter + 1 # get index of last data row
break # break that loop, we done
else:
counter = counter + 1 # if it isn't a data row, increase the counter by 1
del row, rows
channelDat = headerDF.iloc[idx0:idx1] # extract channel dictionary data using rows identified earlier
for row in channelDat.iterrows():
dat = row[1][0]
channel = int(dat[0:4])
frequency = dat[10:17]
channelDict[channel] = frequency
scanChan.append(channel) # extract that channel ID from the data row and append to array
channels = len(scanChan)
conn = sqlite3.connect(dbName, timeout=30.0)
c = conn.cursor()
study_tags = pd.read_sql('SELECT FreqCode, TagType FROM tblMasterTag',con = conn)
study_tags = study_tags[study_tags.TagType == 'Study'].FreqCode.values
# with our data row, extract information using pandas fwf import procedure
#Depending on firmware the data structure will change. This is for xxx firmware. See below for additional firmware configs
# telemDat = pd.read_fwf(fileName,colspecs = [(0,8),(8,18),(18,28),(28,36),(36,51),(51,59)],names = ['Date','Time','ChannelID','TagID','Antenna','Power'],skiprows = dataRow)
# telemDat = telemDat.iloc[:-2] # remove last two rows, Lotek adds garbage at the end
#Master Firmware: Version 9.12.5
telemDat = pd.read_fwf(fileName,colspecs = [(0,8),(8,23),(23,33),(33,41),(41,56),(56,64)],names = ['Date','Time','ChannelID','TagID','Antenna','Power'],skiprows = dataRow)
telemDat = telemDat.iloc[:-2] # remove last two
telemDat['Antenna'] = telemDat['Antenna'].astype(str) #TCS Added this to get dict to line up with data
telemDat['fileName'] = np.repeat(rxfile,len(telemDat)) # Adding the filename into the dataset...drop the path (note this may cause confusion because above we use filename with path. Decide what to do and fix)
def id_to_freq(row,channelDict):
if row[2] in channelDict:
return channelDict[row[2]]
else:
return '888'
if len(telemDat) > 0:
if ant_to_rec_dict == None:
telemDat['Frequency'] = telemDat.apply(id_to_freq, axis = 1, args = (channelDict,))
telemDat = telemDat[telemDat.Frequency != '888']
telemDat = telemDat[telemDat.TagID != 999]
telemDat['FreqCode'] = telemDat['Frequency'].astype(str) + ' ' + telemDat['TagID'].astype(int).astype(str)
telemDat['timeStamp'] = pd.to_datetime(telemDat['Date'] + ' ' + telemDat['Time'])# create timestamp field from date and time and apply to index
telemDat['Epoch'] = (telemDat['timeStamp'] - datetime.datetime(1970,1,1)).dt.total_seconds()
telemDat = noiseRatio(5.0,telemDat,study_tags)
telemDat.drop (['Date','Time','Frequency','TagID','ChannelID','Antenna'],axis = 1, inplace = True)
telemDat['ScanTime'] = np.repeat(scanTime,len(telemDat))
telemDat['Channels'] = np.repeat(channels,len(telemDat))
telemDat['RecType'] = np.repeat(recType,len(telemDat))
telemDat['recID'] = np.repeat(recName,len(telemDat))
telemDat.to_sql('tblRaw',con = conn,index = False, if_exists = 'append')
else:
for ant in ant_to_rec_dict:
site = ant_to_rec_dict[ant]
telemDat_sub = telemDat[telemDat.Antenna == str(ant)]
telemDat_sub['Frequency'] = telemDat_sub.apply(id_to_freq, axis = 1, args = (channelDict,))
telemDat_sub = telemDat_sub[telemDat_sub.Frequency != '888']
telemDat_sub = telemDat_sub[telemDat_sub.TagID != 999]
telemDat_sub['FreqCode'] = telemDat_sub['Frequency'].astype(str) + ' ' + telemDat_sub['TagID'].astype(int).astype(str)
telemDat_sub['timeStamp'] = pd.to_datetime(telemDat_sub['Date'] + ' ' + telemDat_sub['Time'])# create timestamp field from date and time and apply to index
telemDat_sub['Epoch'] = (telemDat_sub['timeStamp'] - datetime.datetime(1970,1,1)).dt.total_seconds()
telemDat_sub = noiseRatio(5.0,telemDat_sub,study_tags)
telemDat_sub.drop (['Date','Time','Frequency','TagID','ChannelID','Antenna'],axis = 1, inplace = True)
telemDat_sub['ScanTime'] = np.repeat(scanTime,len(telemDat_sub))
telemDat_sub['Channels'] = np.repeat(channels,len(telemDat_sub))
telemDat_sub['RecType'] = np.repeat(recType,len(telemDat_sub))
telemDat_sub['recID'] = np.repeat(site,len(telemDat_sub))
telemDat_sub.to_sql('tblRaw',con = conn,index = False, if_exists = 'append')
else:
lotek400 = False
# find where data begins and header data ends
with o_file as f:
for line in f:
if "********************************* Data Segment *********************************" in line:
counter = counter + 1
dataRow = counter + 5 # if this current line signifies the start of the data stream, the data starts three rows down from this
break # break the loop, we have reached our stop point
elif line[0:14] == "Code_log data:":
counter = counter + 1
dataRow = counter + 3
lotek400 = True
break
else:
counter = counter + 1 # if we are still reading header data increase the line counter by 1
lineCounter.append(counter) # append the line counter to the count array
lineList.append(line) # append line of data to the data array
headerDat['idx'] = lineCounter # add count array to dictionary with field name 'idx' as key
headerDat['line'] = lineList # add data line array to dictionary with field name 'line' as key
headerDF = pd.DataFrame.from_dict(headerDat) # create pandas dataframe of header data indexed by row number
headerDF.set_index('idx',inplace = True)
# find scan time
for row in headerDF.iterrows(): # for every header data row
if 'scan time' in row[1][0] or 'Scan time' in row[1][0]: # if the first 9 characters of the line say 'Scan Time' = we have found the scan time in the document
scanTimeStr = row[1][0][-7:-1] # get the number value from the row
scanTimeSplit = scanTimeStr.split(':') # split the string
scanTime = float(scanTimeSplit[1]) # convert the scan time string to float
break # stop that loop, we done
del row
# find number of channels and create channel dictionary
scanChan = [] # create empty array of channel ID's
channelDict = {} # create empty channel ID: frequency dictionary
counter = 0 # create counter
rows = headerDF.iterrows() # create row iterator
for row in rows: # for every row
if 'Active scan_table:' in row[1][0]: # if the first 18 characters say what that says
idx0 = counter + 2 # channel dictionary data starts two rows from here
while next(rows)[1][0] != '\n': # while the next row isn't empty
counter = counter + 1 # increase the counter, when the row is empty we have reached the end of channels, break loop
idx1 = counter + 1 # get index of last data row
break # break that loop, we done
else:
counter = counter + 1 # if it isn't a data row, increase the counter by 1
del row, rows
channelDat = headerDF.iloc[idx0:idx1] # extract channel dictionary data using rows identified earlier
for row in channelDat.iterrows():
dat = row[1][0]
channel = int(dat[0:4])
frequency = dat[10:17]
channelDict[channel] = frequency
scanChan.append(channel) # extract that channel ID from the data row and append to array
channels = len(scanChan)
conn = sqlite3.connect(dbName, timeout=30.0)
c = conn.cursor()
study_tags = pd.read_sql('SELECT FreqCode FROM tblMasterTag WHERE TagType == "Study" OR TagType == "Beacon"',con = conn).FreqCode.values
def id_to_freq(row,channelDict):
channel = row['ChannelID']
if np.int(channel) in channelDict:
return channelDict[np.int(channel)]
else:
return '888'
# with our data row, extract information using pandas fwf import procedure
if lotek400 == False:
telemDat = pd.read_fwf(os.path.join(fileName),colspecs = [(0,5),(5,14),(14,23),(23,31),(31,46),(46,54)],names = ['DayNumber','Time','ChannelID','TagID','Antenna','Power'],skiprows = dataRow)
telemDat = telemDat.iloc[:-2] # remove last two rows, Lotek adds garbage at the end
telemDat.dropna(inplace = True)
if len(telemDat) > 0:
if ant_to_rec_dict == None:
telemDat['Frequency'] = telemDat.apply(id_to_freq, axis = 1, args = (channelDict,))
telemDat = telemDat[telemDat.Frequency != '888']
telemDat = telemDat[telemDat.TagID != 999]
telemDat['FreqCode'] = telemDat['Frequency'].astype(str) + ' ' + telemDat['TagID'].astype(int).astype(str)
telemDat['day0'] = np.repeat(pd.to_datetime("1900-01-01"),len(telemDat))
telemDat['Date'] = telemDat['day0'] + pd.to_timedelta(telemDat['DayNumber'].astype(int), unit='d')
telemDat['Date'] = telemDat.Date.astype('str')
telemDat['timeStamp'] = pd.to_datetime(telemDat['Date'] + ' ' + telemDat['Time'])# create timestamp field from date and time and apply to index
telemDat.drop(['day0','DayNumber'],axis = 1, inplace = True)
telemDat['Epoch'] = (telemDat['timeStamp'] - datetime.datetime(1970,1,1)).dt.total_seconds()
telemDat.drop (['Date','Time','Frequency','TagID','ChannelID','Antenna'],axis = 1, inplace = True)
telemDat['fileName'] = np.repeat(rxfile,len(telemDat)) #Made change here as above--taking jsut the file name and writing it to the dataset. Note naming issue.
telemDat['recID'] = np.repeat(recName,len(telemDat))
telemDat['noiseRatio'] = noiseRatio(5.0,telemDat,study_tags)
telemDat['ScanTime'] = np.repeat(scanTime,len(telemDat))
telemDat['Channels'] = np.repeat(channels,len(telemDat))
telemDat['RecType'] = np.repeat(recType,len(telemDat))
tuples = zip(telemDat.FreqCode.values,telemDat.recID.values,telemDat.Epoch.values)
index = pd.MultiIndex.from_tuples(tuples, names=['FreqCode', 'recID','Epoch'])
telemDat.set_index(index,inplace = True,drop = False)
telemDat.to_sql('tblRaw',con = conn,index = False, if_exists = 'append')
else:
site = ant_to_rec_dict[ant]
telemDat_sub = telemDat[telemDat.Antenna == str(ant)]
telemDat_sub['Frequency'] = telemDat_sub.apply(id_to_freq, axis = 1, args = (channelDict,))
telemDat_sub = telemDat_sub[telemDat_sub.Frequency != '888']
telemDat_sub = telemDat_sub[telemDat_sub.TagID != 999]
telemDat_sub['FreqCode'] = telemDat_sub['Frequency'].astype(str) + ' ' + telemDat_sub['TagID'].astype(int).astype(str)
telemDat_sub['day0'] = np.repeat(pd.to_datetime("1900-01-01"),len(telemDat_sub))
telemDat_sub['Date'] = telemDat_sub['day0'] + pd.to_timedelta(telemDat_sub['DayNumber'].astype(int), unit='d')
telemDat_sub['Date'] = telemDat_sub.Date.astype('str')
telemDat_sub['timeStamp'] = pd.to_datetime(telemDat_sub['Date'] + ' ' + telemDat_sub['Time'])# create timestamp field from date and time and apply to index
telemDat.drop(['day0','DayNumber'],axis = 1, inplace = True)
telemDat_sub['Epoch'] = (telemDat_sub['timeStamp'] - datetime.datetime(1970,1,1)).dt.total_seconds()
telemDat_sub.drop (['Date','Time','Frequency','TagID','ChannelID','Antenna'],axis = 1, inplace = True)
telemDat_sub['fileName'] = np.repeat(rxfile,len(telemDat_sub)) #Made change here as above--taking jsut the file name and writing it to the dataset. Note naming issue.
telemDat_sub['recID'] = np.repeat(recName,len(telemDat_sub))
telemDat_sub['noiseRatio'] = noiseRatio(5.0,telemDat_sub,study_tags)
telemDat_sub['ScanTime'] = np.repeat(scanTime,len(telemDat_sub))
telemDat_sub['Channels'] = np.repeat(channels,len(telemDat_sub))
telemDat_sub['RecType'] = np.repeat(recType,len(telemDat_sub))
tuples = zip(telemDat_sub.FreqCode.values,telemDat_sub.recID.values,telemDat_sub.Epoch.values)
index = pd.MultiIndex.from_tuples(tuples, names=['FreqCode', 'recID','Epoch'])
telemDat_sub.set_index(index,inplace = True,drop = False)
telemDat_sub.to_sql('tblRaw',con = conn,index = False, if_exists = 'append')
else:
telemDat = pd.read_fwf(os.path.join(fileName),colspecs = [(0,6),(6,14),(14,22),(22,27),(27,35),(35,41),(41,48),(48,56),(56,67),(67,80)],names = ['DayNumber_Start','StartTime','ChannelID','TagID','Antenna','Power','Data','Events','DayNumber_End','EndTime'],skiprows = dataRow)
telemDat.dropna(inplace = True)
# if len(telemDat) > 0:
# telemDat['Frequency'] = telemDat.apply(id_to_freq, axis = 1, args = (channelDict,))
# telemDat = telemDat[telemDat.Frequency != '888']
# telemDat = telemDat[telemDat.TagID != 999]
# telemDat['FreqCode'] = telemDat['Frequency'].astype(str) + ' ' + telemDat['TagID'].astype(int).astype(str)
# telemDat['day0'] = np.repeat(pd.to_datetime("1900-01-01"),len(telemDat))
# telemDat['Date_Start'] = telemDat['day0'] + pd.to_timedelta(telemDat['DayNumber_Start'].astype(int), unit='d')
# telemDat['Date_Start'] = telemDat.Date_Start.astype('str')
# telemDat['Date_End'] = telemDat['day0'] + pd.to_timedelta(telemDat['DayNumber_End'].astype(int), unit='d')
# telemDat['Date_End'] = telemDat.Date_End.astype('str')
# telemDat['timeStamp'] = pd.to_datetime(telemDat['Date_Start'] + ' ' + telemDat['StartTime'])# create timestamp field from date and time and apply to index
# telemDat['time_end'] = pd.to_datetime(telemDat['Date_End'] + ' ' + telemDat['EndTime'])# create timestamp field from date and time and apply to index
# telemDat.drop(['day0','DayNumber_Start','DayNumber_End'],axis = 1, inplace = True)
# telemDat['duration'] = (telemDat.time_end - telemDat.timeStamp).astype('timedelta64[s]')
# telemDat['events_per_duration'] = telemDat.Events / telemDat.duration
# telemDat['Epoch'] = (telemDat['timeStamp'] - datetime.datetime(1970,1,1)).dt.total_seconds()
# telemDat.drop (['Date_Start','Date_End','time_end','Frequency','TagID','ChannelID','Antenna'],axis = 1, inplace = True)
# telemDat['fileName'] = np.repeat(fileName,len(telemDat))
# telemDat['recID'] = np.repeat(recName,len(telemDat))
# tuples = zip(telemDat.FreqCode.values,telemDat.recID.values,telemDat.Epoch.values)
# index = pd.MultiIndex.from_tuples(tuples, names=['FreqCode', 'recID','Epoch'])
# telemDat.set_index(index,inplace = True,drop = False)
# telemDat.to_sql('tblRaw_Lotek400',con = conn,index = False, if_exists = 'append')
if len(telemDat) > 0:
telemDat['Frequency'] = telemDat.apply(id_to_freq, axis = 1, args = (channelDict,))
telemDat = telemDat[telemDat.Frequency != '888']
telemDat = telemDat[telemDat.TagID != 999]
telemDat['FreqCode'] = telemDat['Frequency'].astype(str) + ' ' + telemDat['TagID'].astype(int).astype(str)
telemDat['day0'] = np.repeat(pd.to_datetime("1900-01-01"),len(telemDat))
telemDat['Date_Start'] = telemDat['day0'] + pd.to_timedelta(telemDat['DayNumber_Start'].astype(int), unit='d')
telemDat['Date_Start'] = telemDat.Date_Start.astype('str')
telemDat['Date_End'] = telemDat['day0'] + pd.to_timedelta(telemDat['DayNumber_End'].astype(int), unit='d')
telemDat['Date_End'] = telemDat.Date_End.astype('str')
telemDat['timeStamp'] = pd.to_datetime(telemDat['Date_Start'] + ' ' + telemDat['StartTime'])# create timestamp field from date and time and apply to index
telemDat['time_end'] = pd.to_datetime(telemDat['Date_End'] + ' ' + telemDat['EndTime'])# create timestamp field from date and time and apply to index
telemDat.drop(['day0','DayNumber_Start','DayNumber_End'],axis = 1, inplace = True)
telemDat['duration'] = (telemDat.time_end - telemDat.timeStamp).astype('timedelta64[s]')
telemDat['events_per_duration'] = telemDat.Events / telemDat.duration
telemDat['Epoch'] = (telemDat['timeStamp'] - datetime.datetime(1970,1,1)).dt.total_seconds()
telemDat.drop (['Date_Start','Date_End','time_end','Frequency','TagID','ChannelID','Antenna'],axis = 1, inplace = True)
telemDat['fileName'] = np.repeat(rxfile,len(telemDat)) #This is the 4th time I'm assigning file to fileName in the saved data table.
telemDat['recID'] = np.repeat(recName,len(telemDat))
telemDat['ScanTime'] = np.repeat(scanTime,len(telemDat))
telemDat['Channels'] = np.repeat(channels,len(telemDat))
telemDat['RecType'] = np.repeat(recType,len(telemDat))
telemDat.drop(['StartTime','Data','Events','EndTime','duration','events_per_duration'], axis = 1, inplace = True)
tuples = zip(telemDat.FreqCode.values,telemDat.recID.values,telemDat.Epoch.values)
index = pd.MultiIndex.from_tuples(tuples, names=['FreqCode', 'recID','Epoch'])
telemDat.set_index(index,inplace = True,drop = False)
telemDat.to_sql('tblRaw',con = conn,index = False, if_exists = 'append')
# add receiver parameters to database
# recParamLine = [(recName,recType,scanTime,channels,fileName)]
# conn.executemany('INSERT INTO tblReceiverParameters VALUES (?,?,?,?,?)',recParamLine)
conn.commit()
c.close()
def telemDataImport(site,recType,file_directory,projectDB,switch = False, scanTime = None, channels = None, ant_to_rec_dict = None):
tFiles = os.listdir(file_directory)
for f in tFiles:
f_dir = os.path.join(file_directory,f)
rxfile=f
if recType == 'lotek':
lotek_import(f_dir,rxfile,projectDB,site,ant_to_rec_dict)
elif recType == 'orion':
orionImport(f_dir,rxfile,projectDB,site,switch, scanTime, channels, ant_to_rec_dict)
else:
print ("There currently is not an import routine created for this receiver type. Please try again")
print ("File %s imported"%(f))
print ("Raw Telemetry Data Import Completed") | 69.957968 | 498 | 0.575953 | 4,575 | 39,946 | 4.954754 | 0.137486 | 0.038821 | 0.014999 | 0.015529 | 0.665431 | 0.63186 | 0.602303 | 0.584172 | 0.576231 | 0.556423 | 0 | 0.017805 | 0.313874 | 39,946 | 571 | 499 | 69.957968 | 0.809253 | 0.277099 | 0 | 0.587065 | 0 | 0.007463 | 0.142193 | 0.005652 | 0 | 0 | 0 | 0 | 0 | 1 | 0.022388 | false | 0 | 0.059701 | 0 | 0.094527 | 0.00995 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4ef9b9608e8bbaa101ff10ed781037b8288c8ad9 | 3,284 | py | Python | cpp/python/libgdf_cffi/tests/test_prefixsum.py | tgravescs/cudf | b8e72d713c801afd6b7d3f4b11711ef90b9f1f51 | [
"Apache-2.0"
] | 1 | 2021-02-23T21:19:08.000Z | 2021-02-23T21:19:08.000Z | cpp/python/libgdf_cffi/tests/test_prefixsum.py | tgravescs/cudf | b8e72d713c801afd6b7d3f4b11711ef90b9f1f51 | [
"Apache-2.0"
] | null | null | null | cpp/python/libgdf_cffi/tests/test_prefixsum.py | tgravescs/cudf | b8e72d713c801afd6b7d3f4b11711ef90b9f1f51 | [
"Apache-2.0"
] | null | null | null | from __future__ import division, print_function
import pytest
from itertools import product
import numpy as np
from libgdf_cffi import ffi, libgdf
from librmm_cffi import librmm as rmm
from libgdf_cffi.tests.utils import (new_column, unwrap_devary,
get_dtype, gen_rand, buffer_as_bits, count_nulls)
params_dtype = [
np.int8,
np.int16,
np.int32,
np.int64,
np.float32,
np.float64,
]
params_sizes = [1, 2, 13, 64, 100, 1000]
def _gen_params():
for t, n in product(params_dtype, params_sizes):
if (t == np.int8, np.int16 ) and n > 20:
# to keep data in range
continue
yield t, n
@pytest.mark.parametrize('dtype,nelem', list(_gen_params()))
def test_prefixsum(dtype, nelem):
if dtype == np.int8:
# to keep data in range
data = gen_rand(dtype, nelem, low=-2, high=2)
else:
data = gen_rand(dtype, nelem)
d_data = rmm.to_device(data)
d_result = rmm.device_array(d_data.size, dtype=d_data.dtype)
col_data = new_column()
gdf_dtype = get_dtype(dtype)
libgdf.gdf_column_view(col_data, unwrap_devary(d_data), ffi.NULL, nelem,
gdf_dtype)
col_result = new_column()
libgdf.gdf_column_view(col_result, unwrap_devary(d_result), ffi.NULL,
nelem, gdf_dtype)
inclusive = True
libgdf.gdf_prefixsum(col_data, col_result, inclusive)
expect = np.cumsum(d_data.copy_to_host())
got = d_result.copy_to_host()
if not inclusive:
expect = expect[:-1]
assert got[0] == 0
got = got[1:]
decimal = 4 if dtype == np.float32 else 6
np.testing.assert_array_almost_equal(expect, got, decimal=decimal)
@pytest.mark.parametrize('dtype,nelem', list(_gen_params()))
def test_prefixsum_masked(dtype, nelem):
if dtype == np.int8:
data = gen_rand(dtype, nelem, low=-2, high=2)
else:
data = gen_rand(dtype, nelem)
mask = gen_rand(np.int8, (nelem + 8 - 1) // 8)
dummy_mask = gen_rand(np.int8, (nelem + 8 - 1) // 8)
d_data = rmm.to_device(data)
d_mask = rmm.to_device(mask)
d_result = rmm.device_array(d_data.size, dtype=d_data.dtype)
d_result_mask = rmm.to_device(dummy_mask)
gdf_dtype = get_dtype(dtype)
extra_dtype_info = ffi.new('gdf_dtype_extra_info*')
extra_dtype_info.time_unit = libgdf.TIME_UNIT_NONE
col_data = new_column()
libgdf.gdf_column_view_augmented(col_data, unwrap_devary(d_data),
unwrap_devary(d_mask), nelem, gdf_dtype,
count_nulls(d_mask, nelem),
extra_dtype_info[0])
col_result = new_column()
libgdf.gdf_column_view(col_result, unwrap_devary(d_result),
unwrap_devary(d_result_mask), nelem, gdf_dtype)
inclusive = True
libgdf.gdf_prefixsum(col_data, col_result, inclusive)
boolmask = buffer_as_bits(mask)[:nelem]
expect = np.cumsum(data[boolmask])
got = d_result.copy_to_host()[boolmask]
if not inclusive:
expect = expect[:-1]
assert got[0] == 0
got = got[1:]
decimal = 4 if dtype == np.float32 else 6
np.testing.assert_array_almost_equal(expect, got, decimal=decimal)
| 29.061947 | 77 | 0.640682 | 472 | 3,284 | 4.177966 | 0.220339 | 0.022819 | 0.039554 | 0.032454 | 0.627789 | 0.556288 | 0.474138 | 0.453347 | 0.453347 | 0.427992 | 0 | 0.02449 | 0.253959 | 3,284 | 112 | 78 | 29.321429 | 0.780408 | 0.013094 | 0 | 0.444444 | 0 | 0 | 0.013296 | 0.006494 | 0 | 0 | 0 | 0 | 0.049383 | 1 | 0.037037 | false | 0 | 0.08642 | 0 | 0.123457 | 0.012346 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4efabc6bb17436720e7713819341d82858e58c26 | 1,889 | py | Python | tests/contrib_django/test_types.py | jhgg/graphene | 67904e8329de3d69fec8c82ba8c3b4fe598afa8e | [
"MIT"
] | 1 | 2021-04-28T21:35:01.000Z | 2021-04-28T21:35:01.000Z | tests/contrib_django/test_types.py | jhgg/graphene | 67904e8329de3d69fec8c82ba8c3b4fe598afa8e | [
"MIT"
] | null | null | null | tests/contrib_django/test_types.py | jhgg/graphene | 67904e8329de3d69fec8c82ba8c3b4fe598afa8e | [
"MIT"
] | null | null | null | from py.test import raises
from collections import namedtuple
from pytest import raises
from graphene.core.fields import (
Field,
StringField,
)
from graphql.core.type import (
GraphQLObjectType,
GraphQLInterfaceType
)
from graphene import Schema
from graphene.contrib.django.types import (
DjangoNode,
DjangoInterface
)
from .models import Reporter, Article
from tests.utils import assert_equal_lists
class Character(DjangoInterface):
'''Character description'''
class Meta:
model = Reporter
class Human(DjangoNode):
'''Human description'''
def get_node(self, id):
pass
class Meta:
model = Article
schema = Schema()
def test_django_interface():
assert DjangoNode._meta.interface is True
def test_pseudo_interface():
object_type = Character.internal_type(schema)
assert Character._meta.interface is True
assert isinstance(object_type, GraphQLInterfaceType)
assert Character._meta.model == Reporter
assert_equal_lists(
object_type.get_fields().keys(),
['articles', 'firstName', 'lastName', 'email', 'pets', 'id']
)
def test_interface_resolve_type():
resolve_type = Character.resolve_type(schema, Human(object()))
assert isinstance(resolve_type, GraphQLObjectType)
def test_object_type():
object_type = Human.internal_type(schema)
fields_map = Human._meta.fields_map
assert Human._meta.interface is False
assert isinstance(object_type, GraphQLObjectType)
assert object_type.get_fields() == {
'headline': fields_map['headline'].internal_field(schema),
'id': fields_map['id'].internal_field(schema),
'reporter': fields_map['reporter'].internal_field(schema),
'pubDate': fields_map['pub_date'].internal_field(schema),
}
assert object_type.get_interfaces() == [DjangoNode.internal_type(schema)]
| 24.855263 | 77 | 0.71784 | 215 | 1,889 | 6.093023 | 0.297674 | 0.061069 | 0.058015 | 0.029008 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.181578 | 1,889 | 75 | 78 | 25.186667 | 0.847348 | 0.020646 | 0 | 0.037736 | 0 | 0 | 0.047308 | 0 | 0 | 0 | 0 | 0 | 0.207547 | 1 | 0.09434 | false | 0.018868 | 0.169811 | 0 | 0.339623 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4efd82d7035e7bb99b1ee82af5fcb6a562111dd9 | 10,225 | py | Python | landia/runner.py | pistarlab/simpleland | e1d5f65ef6ffaf9e32536d46aa3a2526d3b57801 | [
"MIT"
] | 4 | 2021-08-19T21:41:34.000Z | 2022-02-03T00:44:43.000Z | landia/runner.py | pistarlab/simpleland | e1d5f65ef6ffaf9e32536d46aa3a2526d3b57801 | [
"MIT"
] | null | null | null | landia/runner.py | pistarlab/simpleland | e1d5f65ef6ffaf9e32536d46aa3a2526d3b57801 | [
"MIT"
] | null | null | null |
import argparse
import json
import logging
import threading
from pyinstrument import Profiler
from landia.client import GameClient
from landia.config import GameDef, PlayerDefinition, ServerConfig
from landia.content import Content
from landia.common import StateDecoder, StateEncoder
from landia.registry import load_game_content, load_game_def
from landia.renderer import Renderer
from landia.utils import gen_id
import traceback
from landia import gamectx
from landia.server import GameUDPServer, UDPHandler
import signal
import sys
LOG_LEVELS = {
'critical': logging.CRITICAL,
'error': logging.ERROR,
'warn': logging.WARNING,
'warning': logging.WARNING,
'info': logging.INFO,
'debug': logging.DEBUG
}
def get_game_def(
game_id,
enable_server,
remote_client,
port,
tick_rate=None,
step_mode =False,
config_filename="base_config.json",
content_overrides={}
) -> GameDef:
game_def = load_game_def(game_id, config_filename, content_overrides)
game_def.server_config.enabled = enable_server
game_def.server_config.hostname = '0.0.0.0'
game_def.server_config.port = port
game_def.game_config.step_mode = step_mode
game_def.game_config.config_filename =config_filename
# Game
game_def.game_config.tick_rate = tick_rate
game_def.game_config.client_only_mode = not enable_server and remote_client
return game_def
def get_player_def(
enable_client,
client_id,
remote_client,
hostname,
port,
player_type,
player_name=None,
resolution=None,
fps=None,
render_shapes=None,
is_human=True,
draw_grid = False,
tile_size=16,
debug_render_bodies=False,
view_type=0,
sound_enabled = True,
show_console = True,
enable_resize=False,
include_state_observation = False,
render_to_screen=True,
disable_hud = False) -> PlayerDefinition:
player_def = PlayerDefinition()
player_def.client_config.player_type = player_type
player_def.client_config.client_id = client_id
player_def.client_config.player_name=player_name
player_def.client_config.enabled = enable_client
player_def.client_config.server_hostname = hostname
player_def.client_config.server_port = port
player_def.client_config.frames_per_second = fps
player_def.client_config.is_remote = remote_client
player_def.client_config.is_human = is_human
player_def.client_config.include_state_observation = include_state_observation
player_def.renderer_config.resolution = resolution
player_def.renderer_config.render_shapes = render_shapes
player_def.renderer_config.draw_grid = draw_grid
player_def.renderer_config.tile_size = tile_size
player_def.renderer_config.debug_render_bodies = debug_render_bodies
player_def.renderer_config.view_type = view_type
player_def.renderer_config.sound_enabled =sound_enabled
player_def.renderer_config.show_console =show_console
player_def.renderer_config.enable_resize = enable_resize
player_def.renderer_config.render_to_screen = render_to_screen
player_def.renderer_config.disable_hud = disable_hud
if player_type == "admin":
player_def.renderer_config.view_port_scale = 0.7,0.7
player_def.renderer_config.border_h_offset = 0.02
player_def.renderer_config.info_filter = set(['label'])
# else:
# player_def.renderer_config.view_port_scale = (0.7,0.7)
# player_def.renderer_config.border_h_offset = 0
# player_def.renderer_config.info_filter = set(['label'])
return player_def
def get_arguments(override_args=None):
parser = argparse.ArgumentParser()
# Server
parser.add_argument("--enable_server", action="store_true", help="Accepts remote clients")
# Client
parser.add_argument("--enable_client", action="store_true", help="Run Client")
parser.add_argument("--remote_client", action="store_true", help="client uses server")
parser.add_argument("--resolution", default="800x600", help="resolution eg, [f,640x480]")
parser.add_argument("--hostname", default="localhost", help="hostname or ip, default is localhost")
parser.add_argument("--client_id", default=gen_id(), help="user id, default is random")
parser.add_argument("--render_shapes", action='store_true', help="render actual shapes")
parser.add_argument("--fps", default=60, type=int, help="fps")
parser.add_argument("--player_type", default="default", type=str, help="Player type ")
parser.add_argument("--view_type", default=0, type=int, help="NOT USED at moment: View type (0=perspective, 1=world)")
parser.add_argument("--tile_size", default=16, type=int, help="not = no grid")
parser.add_argument("--debug_render_bodies", action="store_true", help=" render")
parser.add_argument("--disable_sound", action="store_true", help="disable_sound")
parser.add_argument("--draw_grid", action="store_true", help="draw_grid")
parser.add_argument("--show_console", action="store_true", help="Show on screen info")
parser.add_argument("--disable_hud", action="store_true", help="Disable all screen printing")
parser.add_argument("--enable_resize", action="store_true", help="Enable Screen Resize")
parser.add_argument("--player_name",help="player name")
parser.add_argument("--config_filename",default="base_config.json")
# used for both client and server
parser.add_argument("--port", default=10001, help="the port the server is running on")
# Game Options
parser.add_argument("--enable_profiler", action="store_true", help="Enable Performance profiler")
parser.add_argument("--tick_rate", default=60, type=int, help="tick_rate")
parser.add_argument("--game_id", default="survival", help="id of game")
parser.add_argument("--content_overrides", default="{}", type=str,help="Content overrides in JSON format Eg: --content_overrides='{\"maps\":{\"main\":{\"static_layers\":[\"map_layer_test.txt\"]}}}'")
parser.add_argument("--log_level",default="info",help=", ".join(list(LOG_LEVELS.keys())),type=str)
parser.add_argument("--step_mode", action="store_true", help="Step mode (requires input for game time to proceed)")
return parser.parse_args(override_args)
def main(override_args=None):
args = get_arguments(override_args)
run(args)
def run(args):
logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
logging.getLogger().setLevel(LOG_LEVELS.get(args.log_level))
if not args.enable_server and not args.enable_client and not args.remote_client:
args.enable_client = True
if args.enable_server and args.enable_client and args.remote_client:
print("Error: Server and Remote Client cannot be started from the same process. Please run seperately.")
exit(1)
profiler = None
if args.enable_profiler:
print("Profiling Enabled..")
profiler = Profiler()
profiler.start()
game_def = get_game_def(
game_id=args.game_id,
enable_server=args.enable_server,
remote_client=args.remote_client,
port=args.port,
tick_rate=args.tick_rate,
step_mode = args.step_mode,
config_filename=args.config_filename,
content_overrides = json.loads(args.content_overrides),
)
# Get resolution
if args.enable_client and args.resolution == 'f':
import pygame
pygame.init()
infoObject = pygame.display.Info()
resolution = (infoObject.current_w, infoObject.current_h)
else:
res_string = args.resolution.split("x")
resolution = (int(res_string[0]), int(res_string[1]))
# player_meta_st = f"{{{args.player_meta}}}"
# print(player_meta_st)
# player_meta = json.loads( player_meta_st)
player_def = get_player_def(
enable_client=args.enable_client,
client_id=str(args.client_id),
remote_client=args.remote_client,
hostname=args.hostname,
port=args.port,
render_shapes=args.render_shapes,
resolution=resolution,
fps=args.fps,
draw_grid = args.draw_grid,
player_type=args.player_type,
tile_size=args.tile_size,
debug_render_bodies = args.debug_render_bodies,
view_type = args.view_type,
sound_enabled= not args.disable_sound,
show_console= args.show_console,
enable_resize = args.enable_resize,
disable_hud = args.disable_hud,
player_name=args.player_name
)
content: Content = load_game_content(game_def)
gamectx.initialize(
game_def,
content=content)
if player_def.client_config.enabled:
renderer = Renderer(
config = player_def.renderer_config,
asset_bundle=content.get_asset_bundle())
client = GameClient(
renderer=renderer,
config=player_def.client_config)
gamectx.add_local_client(client)
server = None
def graceful_exit(signum=None, frame=None):
print("Shutting down")
if game_def.server_config.enabled:
# server.shutdown()
server.server_close()
if args.enable_profiler:
profiler.stop()
print(profiler.output_text(unicode=True, color=True))
exit()
signal.signal(signal.SIGINT, graceful_exit)
try:
if game_def.server_config.enabled:
server = GameUDPServer(
conn=(game_def.server_config.hostname, game_def.server_config.port),
config=game_def.server_config)
server_thread = threading.Thread(target=server.serve_forever)
server_thread.daemon = True
server_thread.start()
print("Server started at {} port {}".format(game_def.server_config.hostname, game_def.server_config.port))
gamectx.run()
except (Exception,KeyboardInterrupt) as e:
print(traceback.format_exc())
print(e)
finally:
graceful_exit()
if __name__ == "__main__":
main()
| 36.3879 | 203 | 0.696235 | 1,300 | 10,225 | 5.183846 | 0.177692 | 0.046743 | 0.065588 | 0.061433 | 0.190681 | 0.073601 | 0.05995 | 0.049859 | 0.037691 | 0.037691 | 0 | 0.005861 | 0.199022 | 10,225 | 280 | 204 | 36.517857 | 0.816972 | 0.037066 | 0 | 0.055814 | 0 | 0 | 0.131448 | 0.004477 | 0 | 0 | 0 | 0 | 0 | 1 | 0.027907 | false | 0 | 0.083721 | 0 | 0.125581 | 0.037209 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4eff13c2701b38fb56a685cebe6a17525d5923cc | 3,734 | py | Python | ships.py | prime-ffxiv/primebot | 7f30815d06f69bc0f61aeff6dd3a0a19f4002657 | [
"MIT"
] | null | null | null | ships.py | prime-ffxiv/primebot | 7f30815d06f69bc0f61aeff6dd3a0a19f4002657 | [
"MIT"
] | 10 | 2021-07-13T03:39:52.000Z | 2021-07-14T05:47:20.000Z | ships.py | prime-ffxiv/primebot | 7f30815d06f69bc0f61aeff6dd3a0a19f4002657 | [
"MIT"
] | 1 | 2021-07-13T03:24:15.000Z | 2021-07-13T03:24:15.000Z | import io
import datetime
class Vehicle:
def __init__(self, name, rank, max_rank=50):
self.name = name
self.rank = rank
self.max_rank = max_rank
self.voyage = None
def add_voyage(self, voyage):
self.voyage = voyage
def delete_voyage(self):
self.voyage = None
def rename(self, name):
self.name = name
def update_rank(self, rank):
self.rank = rank
def __str__(self):
if self.voyage is None:
return "Ship: {} -- Rank: {}/{} -- Docked".format(self.name, self.rank, self.max_rank)
else:
# datetime formatting courtesy of
# https://stackoverflow.com/a/538687
time_left = self.voyage.end_time - datetime.datetime.now()
time_left = ''.join(str(time_left).split('.')[0])
return "Ship: {} -- Rank: {}/{} -- {} -- Voyage complete in {}".format(\
self.name, self.rank, self.max_rank, self.voyage.purpose, time_left)
class Voyage:
def __init__(self, start_time=None, end_time=None, time_delta=None, purpose=None):
if (start_time is not None) and (end_time is not None) and \
(time_delta is not None):
# check that the times given agree with elapsed time
if end_time - start_time != time_delta:
raise ValueError("Start/end times do not agree with voyage length")
self.start_time = start_time
self.end_time = end_time
self.time_delta = time_delta
elif (start_time is not None) and (end_time is not None):
self.start_time = start_time
self.end_time = end_time
self.time_delta = self.end_time - self.start_time
elif (start_time is not None) and (time_delta is not None):
self.start_time = start_time
self.time_delta = time_delta
self.end_time = self.start_time + self.time_delta
elif (time_delta is not None) and (end_time is not None):
self.time_delta = time_delta
self.end_time = end_time
self.start_time = self.end_time - self.time_delta
elif end_time is not None:
self.time_delta = None
self.start_time = None
self.end_time = end_time
else:
raise ValueError("Not enough information provided to determine voyage start/end time")
if purpose is not None:
self.purpose = purpose
else:
self.purpose = "n/a"
def __str__(self):
return "Start time: {}, End time: {}, Voyage_length: {}, Purpose: {}".format(self.start_time, \
self.end_time, self.time_delta, self.purpose)
class VehicleList:
def __init__(self, airships=list(), submersibles=list()):
self.airships = airships
self.submersibles = submersibles
def update_airships(self, airships):
self.airships = airships
def update_submersibles(self, submersibles):
self.submersibles = submersibles
def clear(self):
self.airships = []
self.submersibles = []
def __str__(self):
out_buf = io.StringIO()
out_buf.write(u"```")
out_buf.write(u"Airships:\n")
for airship in self.airships:
out_buf.write(str(airship))
out_buf.write(u"\n")
out_buf.write(u"\n")
out_buf.write(u"Submersibles:\n")
for submersible in self.submersibles:
out_buf.write(str(submersible))
out_buf.write(u"\n")
out_buf.write(u"\n")
out_buf.write(u"```")
out_buf.seek(0)
out_str = str(out_buf.read()).rstrip()
return out_str
def print_list(self):
print(self)
| 39.723404 | 103 | 0.594537 | 486 | 3,734 | 4.349794 | 0.17284 | 0.069536 | 0.046831 | 0.045412 | 0.349101 | 0.328288 | 0.305582 | 0.299432 | 0.209555 | 0.162252 | 0 | 0.003823 | 0.299411 | 3,734 | 93 | 104 | 40.150538 | 0.804281 | 0.031602 | 0 | 0.363636 | 0 | 0 | 0.084164 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.159091 | false | 0 | 0.022727 | 0.011364 | 0.261364 | 0.022727 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4eff140b6ab6cbf7d2f94869396489708d4d2a0a | 10,735 | py | Python | preql/core/pql_types.py | otherJL0/Preql | 958a8dfd3a040f9c40fa394a8bfc3295f32a3019 | [
"MIT"
] | null | null | null | preql/core/pql_types.py | otherJL0/Preql | 958a8dfd3a040f9c40fa394a8bfc3295f32a3019 | [
"MIT"
] | null | null | null | preql/core/pql_types.py | otherJL0/Preql | 958a8dfd3a040f9c40fa394a8bfc3295f32a3019 | [
"MIT"
] | null | null | null | from collections import defaultdict, deque
from contextlib import suppress
from dataclasses import field
from datetime import datetime
from decimal import Decimal
from typing import Union
import arrow
import runtype
from runtype.typesystem import TypeSystem
from preql.utils import dataclass
from .base import Object
global_methods = {}
class Id:
def __init__(self, *parts):
assert all(isinstance(p, str) for p in parts), parts
self.parts = parts
def __repr__(self):
return 'Id(%s)' % '.'.join(self.parts)
def __str__(self):
# Prevents accidents!
raise Exception("Operation not allowed!")
def __hash__(self):
return hash(tuple(self.parts))
def __eq__(self, other):
if not isinstance(other, Id):
return NotImplemented
return self.parts == other.parts
@property
def repr_name(self):
return self.parts[-1]
@property
def name(self):
return self.parts[-1]
def lower(self):
return Id(*[p.lower() for p in self.parts])
def _repr_type_elem(t, depth):
return _repr_type(t, depth - 1) if isinstance(t, Type) else repr(t)
def _repr_type(t, depth=2):
if t.elems:
if depth > 0:
if isinstance(t.elems, dict):
elems = '[%s]' % ', '.join(
f'{k}: {_repr_type_elem(v, depth)}' for k, v in t.elems.items()
)
else:
elems = '[%s]' % ', '.join(_repr_type_elem(e, depth) for e in t.elems)
else:
elems = '[...]'
else:
elems = ''
return f'{t._typename_with_q}{elems}'
ITEM_NAME = 'item'
@dataclass
class Type(Object):
typename: str
supertypes: frozenset
elems: Union[tuple, dict] = field(hash=False, default_factory=dict)
options: dict = field(hash=False, compare=False, default_factory=dict)
proto_attrs: dict = field(
hash=False, compare=False, default_factory=lambda: dict(global_methods)
)
_nullable: bool = field(default_factory=bool)
@property
def _typename_with_q(self):
n = '?' if self._nullable else ''
return f'{self.typename}{n}'
@property
def elem(self):
if isinstance(self.elems, dict):
(elem,) = self.elems.values()
else:
(elem,) = self.elems
return elem
def as_nullable(self):
# assert not self.maybe_null()
return self.replace(_nullable=True)
def maybe_null(self):
return self._nullable or self is T.nulltype
def supertype_chain(self):
res = {t2 for t1 in self.supertypes for t2 in t1.supertype_chain()}
assert self not in res
return res | {self}
def __eq__(self, other, memo=None):
"Repetitive nested equalities are assumed to be true"
if not isinstance(other, Type):
return False
if memo is None:
memo = set()
a, b = id(self), id(other)
if (a, b) in memo or (b, a) in memo:
return True
memo.add((a, b))
l1 = self.elems if isinstance(self.elems, dict) else dict(enumerate(self.elems))
l2 = (
other.elems
if isinstance(other.elems, dict)
else dict(enumerate(other.elems))
)
if len(l1) != len(l2):
return False
res = self.typename == other.typename and all(
k1 == k2 and v1.__eq__(v2, memo)
for (k1, v1), (k2, v2) in zip(l1.items(), l2.items())
)
return res
@property
def elem_types(self):
if isinstance(self.elems, dict):
return self.elems.values()
return self.elems
def issubtype(self, t):
assert isinstance(t, Type), t
if t.typename == 'union': # XXX a little hacky. Change to issupertype?
return any(self.issubtype(t2) for t2 in t.elem_types)
if self is T.nulltype:
if t.maybe_null():
return True
# TODO zip should be aware of lengths
if t.typename in (s.typename for s in self.supertype_chain()):
return all(
e1.issubtype(e2) for e1, e2 in zip(self.elem_types, t.elem_types)
)
return False
def __le__(self, other):
return self.issubtype(other)
def __getitem__(self, elems):
# TODO assert elems = (any,)
assert not isinstance(elems, tuple), (self, elems)
elems = {ITEM_NAME: elems}
return self.replace(elems=elems)
def __call__(self, elems=None, **options):
return self.replace(
elems=elems or self.elems,
proto_attrs=dict(self.proto_attrs),
options={**self.options, **options},
)
def __repr__(self):
# TODO Move to dp_inst?
return _repr_type(self)
def get_attr(self, attr):
if self is T.unknown:
return self
if isinstance(self.elems, dict):
with suppress(KeyError):
return self.elems[attr]
with suppress(KeyError):
return self.proto_attrs[attr]
return super().get_attr(attr)
def all_attrs(self):
# return {'elems': self.elems}
if isinstance(self.elems, dict):
return self.elems
return {}
def repr(self):
return repr(self)
def __or__(self, other):
return T.union[self, other]
class TupleType(Type):
def __getitem__(self, elems):
assert not self.elems
return self.replace(elems=tuple(elems))
def __or__(self, other):
return self.replace(elems=self.elems + (other,))
class SumType(TupleType):
def issubtype(self, other):
return all(t.issubtype(other) for t in self.elem_types)
class ProductType(TupleType):
def issubtype(self, other):
return all(a.issubtype(b) for a, b in zip(self.elem_types, other.elem_types))
class PhantomType(Type):
def issubtype(self, other):
return super().issubtype(other) or self.elem.issubtype(other)
class TypeDict(dict):
def _register(self, name, supertypes=(), elems=(), type_class=Type):
t = type_class(name, frozenset(supertypes), elems)
assert name not in self
T[name] = t
dict.__setattr__(self, name, t)
def __setattr__(self, name, args):
if isinstance(args, tuple):
self._register(name, *args)
else:
self._register(name, args)
T = TypeDict()
T.any = ()
T.unknown = [T.any]
# T.union = [T.any]
T._register('union', type_class=SumType)
T.type = [T.any]
Type.type = T.type
T.object = [T.any]
T.nulltype = [T.object]
T.primitive = [T.object]
T.text = [T.primitive]
T._rich = [T.text]
T.string = [T.text]
T.number = [T.primitive]
T.int = [T.number]
T.float = [T.number]
T.bool = [T.primitive] # number?
T.decimal = [T.number]
# TODO datetime vs timestamp !
T.timestamp = [T.primitive] # struct?
T.datetime = [T.primitive] # struct?
T.date = [T.primitive] # struct?
T.time = [T.primitive] # struct?
T.container = [T.object]
T.struct = [T.container]
T.row = [T.struct]
# T.collection = [T.container], {}
# T.table = [T.container], {}
T._register('table', [T.container], {})
T.list = [T.table], {ITEM_NAME: T.any}
T.set = [T.table], {ITEM_NAME: T.any}
T.t_id = [T.primitive], (T.table,)
T.t_relation = [T.primitive], (T.any,) # t_id?
# XXX sequence instead of container?
T._register('aggregated', [T.container], (T.any,), type_class=PhantomType)
T._register('projected', [T.container], (T.any,), type_class=PhantomType)
T._register('aggregate_result', [T.object], (T.any,), type_class=PhantomType)
T.json = [T.container], (T.any,)
T.json_array = [T.json]
T._register('function', [T.object], type_class=TupleType)
T.property = [T.object]
T.module = [T.object]
T.signal = [T.object]
# -----------
T.Exception = [T.signal]
T.IOError = [T.Exception]
T.CodeError = [T.Exception]
T.EvalError = [T.Exception]
# CodeError - Failures due to inherently unexecutable code
T.SyntaxError = [T.CodeError]
T.NotImplementedError = [T.CodeError]
# IOError - All errors resulting directly from attempts at I/O communication
T.FileError = [T.IOError]
T.DbError = [T.IOError]
T.DbQueryError = [T.DbError]
T.DbConnectionError = [T.DbError]
# EvalError - Errors that arise only when evaluating the code (either at run-time or compile-time)
T.TypeError = [T.EvalError]
T.ValueError = [T.EvalError]
T.NameError = [T.EvalError]
T.JoinError = [T.EvalError]
T.CompileError = [T.EvalError]
T.AttributeError = [T.NameError]
T.AssertError = [T.ValueError]
T.IndexError = [T.ValueError]
T.CastError = [T.TypeError]
T.ImportError = [T.Exception]
def _get_subtypes():
d = defaultdict(list)
for t in T.values():
for st in t.supertypes:
d[st].append(t)
return dict(d)
subtypes = _get_subtypes()
# -------------
_python_type_to_sql_type = {
bool: T.bool,
int: T.int,
float: T.float,
str: T.string,
datetime: T.timestamp,
Decimal: T.decimal,
arrow.Arrow: T.timestamp, # datetime?
}
def from_python(t):
# TODO throw proper exception if this fails
return _python_type_to_sql_type[t]
def common_type(t1, t2):
"Returns a type which is the closest ancestor of both t1 and t2"
v1 = {t1}
v2 = {t2}
o1 = deque([t1])
o2 = deque([t2])
while o1 or o2:
x1 = o1.popleft()
v1.add(x1)
if x1 in v2:
return x1
o1 += [t for t in x1.supertypes if t not in v1]
x2 = o2.popleft()
v2.add(x2)
if x2 in v1:
return x2
o2 += [t for t in x2.supertypes if t not in v2]
assert False
def union_types(types):
# TODO flatten unions, remove duplications and subtypes
ts = set(types)
if len(ts) > 1:
elem_type = T.union(elems=tuple(ts))
else:
(elem_type,) = ts
return elem_type
class ProtoTS(TypeSystem):
def issubclass(self, t1, t2):
if t2 is object:
return True
is_t2 = isinstance(t2, Type)
if isinstance(t1, Type):
return is_t2 and t1 <= t2
elif is_t2:
return False
# Regular Python types
return runtype.issubclass(t1, t2)
default_type = object
class TS_Preql(ProtoTS):
def get_type(self, obj):
try:
return obj.type
except AttributeError:
return type(obj)
class TS_Preql_subclass(ProtoTS):
def get_type(self, obj):
# Preql objects
if isinstance(obj, Type):
return obj
# Regular Python
return type(obj)
dp_type = runtype.Dispatch(TS_Preql_subclass())
dp_inst = runtype.Dispatch(TS_Preql())
| 24.453303 | 98 | 0.604844 | 1,446 | 10,735 | 4.36722 | 0.183956 | 0.029929 | 0.006334 | 0.016627 | 0.152336 | 0.094378 | 0.0654 | 0.040222 | 0.013618 | 0 | 0 | 0.009701 | 0.270238 | 10,735 | 438 | 99 | 24.509132 | 0.7964 | 0.085794 | 0 | 0.135762 | 0 | 0 | 0.030202 | 0.002727 | 0 | 0 | 0 | 0.002283 | 0.02649 | 1 | 0.13245 | false | 0 | 0.039735 | 0.059603 | 0.407285 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4eff47851c2ce7c0c90b9adfdf5d2fd11cd451cc | 947 | py | Python | setup.py | MichaelPHartmann/iexfinance-py | 9d91002a45747a78d47d3ff364d9ebf0f11a6fba | [
"Apache-2.0"
] | null | null | null | setup.py | MichaelPHartmann/iexfinance-py | 9d91002a45747a78d47d3ff364d9ebf0f11a6fba | [
"Apache-2.0"
] | null | null | null | setup.py | MichaelPHartmann/iexfinance-py | 9d91002a45747a78d47d3ff364d9ebf0f11a6fba | [
"Apache-2.0"
] | null | null | null | """
Version naming has been simplified in 2.0 going forward.
Production releases will be MAJOR.MINOR format.
Increments to major are reserved for significant updates.
Increments to minor are available for all new versions
Test releases are MAJOR.MINOR.PATCH format.
"""
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name = "FinMesh",
version = "2.2",
author = "Michael Hartmann",
author_email = "michaelpeterhartmann94@gmail.com.com",
description = "A Python wrapper to bring together various financial APIs.",
long_description = long_description,
long_description_content_type = "text/markdown",
keywords = "Finance, API, DCF, IEX, EDGAR, FRED, interest rates",
url = "https://finmesh.readthedocs.io/",
packages=setuptools.find_packages(),
classifiers = [
"Programming Language :: Python :: 3"
],
python_requires = ">3.6",
)
| 31.566667 | 79 | 0.706441 | 117 | 947 | 5.641026 | 0.726496 | 0.090909 | 0.057576 | 0.090909 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.011658 | 0.184794 | 947 | 29 | 80 | 32.655172 | 0.843264 | 0.275607 | 0 | 0 | 0 | 0 | 0.389381 | 0.053097 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.052632 | 0 | 0.052632 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f601696eb01af41cfb6bfff7cab1149a7ddd141f | 1,433 | py | Python | universum/analyzers/pylint.py | o-andrieiev/Universum | 71c9494f59dbac58a378d29eb31f8724964c8067 | [
"BSD-2-Clause"
] | 21 | 2019-01-07T03:59:54.000Z | 2021-12-13T10:51:54.000Z | universum/analyzers/pylint.py | o-andrieiev/Universum | 71c9494f59dbac58a378d29eb31f8724964c8067 | [
"BSD-2-Clause"
] | 407 | 2019-01-29T11:50:29.000Z | 2022-03-24T15:09:20.000Z | universum/analyzers/pylint.py | o-andrieiev/Universum | 71c9494f59dbac58a378d29eb31f8724964c8067 | [
"BSD-2-Clause"
] | 14 | 2019-01-08T07:37:13.000Z | 2022-02-03T17:00:19.000Z | import argparse
import json
from typing import List
from . import utils
def pylint_argument_parser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(description="Pylint analyzer")
parser.add_argument("--rcfile", dest="rcfile", type=str, help="Specify a configuration file.")
utils.add_python_version_argument(parser)
return parser
@utils.sys_exit
@utils.analyzer(pylint_argument_parser())
def main(settings: argparse.Namespace) -> List[utils.ReportData]:
cmd = [f"python{settings.version}", '-m', 'pylint', '-f', 'json']
if settings.rcfile:
cmd.append(f'--rcfile={settings.rcfile}')
cmd.extend(settings.file_list)
output, _ = utils.run_for_output(cmd)
return pylint_output_parser(output)
def pylint_output_parser(output: str) -> List[utils.ReportData]:
result: List[utils.ReportData] = []
for data in json.loads(output):
# pylint has its own escape rules for json output of "message" values.
# it uses cgi.escape lib and escapes symbols <>&
result.append(utils.ReportData(
symbol=data["symbol"],
message=data["message"].replace("<", "<").replace(">", ">").replace("&", "&"),
path=data["path"],
line=int(data["line"])
))
return result
if __name__ == "__main__":
main() # pylint: disable=no-value-for-parameter # see https://github.com/PyCQA/pylint/issues/259
| 33.325581 | 102 | 0.667132 | 176 | 1,433 | 5.289773 | 0.460227 | 0.064447 | 0.061224 | 0.051557 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002564 | 0.183531 | 1,433 | 42 | 103 | 34.119048 | 0.793162 | 0.141661 | 0 | 0 | 0 | 0 | 0.136327 | 0.040816 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.133333 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f60211b49b4a0be6744888f8409578cc9d61668a | 5,485 | py | Python | fn_jira/fn_jira/components/jira_common.py | rudimeyer/resilient-community-apps | 7a46841ba41fa7a1c421d4b392b0a3ca9e36bd00 | [
"MIT"
] | 1 | 2020-08-25T03:43:07.000Z | 2020-08-25T03:43:07.000Z | fn_jira/fn_jira/components/jira_common.py | rudimeyer/resilient-community-apps | 7a46841ba41fa7a1c421d4b392b0a3ca9e36bd00 | [
"MIT"
] | 1 | 2019-07-08T16:57:48.000Z | 2019-07-08T16:57:48.000Z | fn_jira/fn_jira/components/jira_common.py | rudimeyer/resilient-community-apps | 7a46841ba41fa7a1c421d4b392b0a3ca9e36bd00 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# pragma pylint: disable=unused-argument, no-self-use
# (c) Copyright IBM Corp. 2010, 2019. All Rights Reserved.
"""
These are methods for accessing Jira. The Jira REST API is used for general access.
Requirements: JIRA URL and basic authentication user/password
"""
import json
import fn_jira.lib.constants as constants
from resilient_lib import RequestsCommon
"""
This module implements the calls needed for jira api access. API operations supported:
) create an issue
) create a comment
) transition an issue
"""
# URL fragments needed along with the base jira URL
ISSUE_URL = 'rest/api/2/issue'
TRANSITION_PARAM = 'transitions'
COMMENT_PARAM = 'comment'
class JiraCommon:
def __init__(self, opts, function_opts):
self.req_common = RequestsCommon(opts=opts, function_opts=function_opts)
def create_issue(self, log, appDict):
"""Function: create a jira issue.
:return the raw JSON returned from the api call
"""
issue_url = '/'.join((appDict['url'], ISSUE_URL))
payload = self._mkCreatePayload(appDict)
resp = self.req_common.execute_call_v2('post', issue_url, auth=(appDict['user'], appDict['password']),
data=payload, verify=appDict['verifyFlag'], headers=constants.HTTP_HEADERS)
log and log.debug(resp)
return self.get_json_result(resp)
def transition_issue(self, log, appDict):
"""Function: transition a jira issue.
:return: the raw JSON returned from the api call
"""
url = '/'.join((appDict['url'], TRANSITION_PARAM))
payload = self._mkTransitionPayload(appDict)
#find_transitions(log, appDict) # uncomment to see transitions for this enterprise
log and log.debug(payload)
resp = self.req_common.execute_call_v2('post', url, auth=(appDict['user'], appDict['password']),
data=payload, verify=appDict['verifyFlag'], headers=constants.HTTP_HEADERS)
log and log.debug(resp)
return self.get_json_result(resp)
def find_transitions(self, log, appDict):
"""
determine the ticket transitions for a given issue
:param log:
:param appDict:
:return: None
"""
url = '/'.join((appDict['url'], TRANSITION_PARAM))
resp = self.req_common.execute_call_v2('get', url, auth=(appDict['user'], appDict['password']),
verify=appDict['verifyFlag'], headers=constants.HTTP_HEADERS)
log and log.debug(resp)
return self.get_json_result(resp)
def create_comment(self, log, appDict):
"""Function: create a jira comment in a Jira issue. No JSON is returned on success
:return: dictionary for a comment
"""
url = '/'.join((appDict['url'], COMMENT_PARAM))
payload = self._mkCommentPayload(appDict)
resp = self.req_common.execute_call_v2('post', url, auth=(appDict['user'], appDict['password']),
data=payload, verify=appDict['verifyFlag'], headers=constants.HTTP_HEADERS)
log and log.debug(resp)
# successfully added comments return an empty dictionary: { }
return self.get_json_result(resp)
def get_json_result(self, resp):
"""
get the response in json format, if possible
:param resp:
:return: None if errors or not json
"""
try:
result = resp.json() if resp and resp.content else None
except:
result = None
return result
def _mkCreatePayload(self, appDict):
'''
Build the payload for creating a Jira issue
:param **dict could be **kwargs:
:return: json payload for jira update
'''
payload = {
"fields": {
"project": {
"key": appDict.get('project')
},
"issuetype": {
"name": appDict.get('issuetype')
}
}
}
for key in appDict['fields']:
payload['fields'][key] = appDict['fields'][key]
return json.dumps(payload)
def _mkCommentPayload(self, appDict):
'''
Build the payload for adding a Jira comment
:param **dict could be **kwargs:
:return: json payload for jira update
'''
payload = {"body": appDict['comment']}
return json.dumps(payload)
def _mkTransitionPayload(self, appDict):
'''
Build the payload needed to transition a Jira issue
:param **dict could be **kwargs:
:return: json payload for jira call
'''
payload = {
"transition": {
"id": appDict['transitionId']
}
}
if appDict.get('comment'):
comment = \
{"comment":
[
{
"add": {
"body": appDict['comment']
}
}
]
}
payload['update'] = comment
if appDict.get('resolution'):
resolution = {
"resolution": {
"name": appDict['resolution']
}
}
payload['fields'] = resolution
return json.dumps(payload)
| 30.642458 | 122 | 0.561531 | 577 | 5,485 | 5.240901 | 0.256499 | 0.011574 | 0.021495 | 0.023148 | 0.421296 | 0.385582 | 0.334325 | 0.294312 | 0.294312 | 0.278439 | 0 | 0.003818 | 0.331449 | 5,485 | 178 | 123 | 30.814607 | 0.820834 | 0.233728 | 0 | 0.232558 | 0 | 0 | 0.086413 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.104651 | false | 0.046512 | 0.034884 | 0 | 0.244186 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f6073840233bba54bc871867d9a6348f670aaee9 | 22,428 | py | Python | staticwebsync/__init__.py | staticwebsync/staticwebsync | 84c5f39cdd82192e3ed24541f9208be915ffc6df | [
"MIT"
] | null | null | null | staticwebsync/__init__.py | staticwebsync/staticwebsync | 84c5f39cdd82192e3ed24541f9208be915ffc6df | [
"MIT"
] | 14 | 2015-12-20T16:10:44.000Z | 2016-03-08T18:21:10.000Z | staticwebsync/__init__.py | staticwebsync/staticwebsync | 84c5f39cdd82192e3ed24541f9208be915ffc6df | [
"MIT"
] | 1 | 2017-02-19T01:05:40.000Z | 2017-02-19T01:05:40.000Z | __all__ = ('log', 'progress_callback_factory', 'progress_callback_divisions', 'BadUserError', 'setup')
import binascii
import hashlib
import mimetypes
import mmap
import os
import posixpath
import re
import time
import boto3
import botocore
import termcolor
log = lambda msg: None
progress_callback_factory = lambda: None
progress_callback_divisions = 10 # this is no longer used, but is retained so as not to break the module API
class BadUserError(Exception):
def __init__(self, message):
self.message = message
def setup(args):
def split_all(s, splitter):
out = []
while len(s) != 0:
s, tail = splitter(s)
out.insert(0, tail)
return out
def md5_hex_digest_string(filename):
digestor = hashlib.md5()
with open(filename, 'rb') as opened_file:
fd = opened_file.fileno()
if os.fstat(fd).st_size > 0: # can't mmap empty files
with mmap.mmap(fd, 0, access=mmap.ACCESS_READ) as mm:
digestor.update(mm)
return digestor.hexdigest()
def log_check(msg):
"""Use this when reporting that we are about to check something."""
log(msg)
def log_noop(msg):
"""Use this when reporting that we checked something and it was fine as-is so it didn't need to be changed."""
log(termcolor.colored(msg, 'cyan', attrs=['bold']))
def log_op(msg):
"""Use this when reporting that we changed something (uploaded a file, changed a setting etc.)"""
log(termcolor.colored(msg, 'green', attrs=['bold']))
def log_warn(msg):
"""Use this when warning the user about something."""
log(termcolor.colored(msg, 'red', attrs=['bold']))
prefix = 'http://'
if args.host_name.startswith(prefix):
args.host_name = args.host_name[len(prefix):]
suffix = '/'
if args.host_name.endswith(suffix):
args.host_name = args.host_name[:-len(suffix)]
standard_bucket_name = args.host_name
is_index_key = re.compile('(?P<path>^|.*?/)%s$' % re.escape(args.index))
session = boto3.session.Session(
aws_access_key_id=args.access_key_id,
aws_secret_access_key=args.secret_access_key)
s3 = session.resource('s3')
bucket = None
region = None
all_buckets = None
try:
log_check('looking for existing S3 bucket')
all_buckets = list(s3.buckets.all())
except botocore.exceptions.ClientError as e:
if e.response['ResponseMetadata']['HTTPStatusCode'] == 403:
raise BadUserError('Access denied: %s' % e.response['Error']['Message'])
else:
raise e
except botocore.exceptions.NoCredentialsError:
raise BadUserError('No AWS credentials found. Please set up your ~/.aws/credentials file or specify them on the command line.')
use_cloudfront = not args.no_cloudfront
MARKER_KEY_NAME = '.staticwebsync'
def install_marker_key(bucket):
s3.Object(bucket.name, MARKER_KEY_NAME).put(Body=b'', ACL='private')
def object_or_none(bucket, key):
try:
o = s3.Object(bucket.name, key)
o.load()
return o
except botocore.exceptions.ClientError as e:
if e.response['ResponseMetadata']['HTTPStatusCode'] == 404:
return None
else:
raise e
for b in all_buckets:
if b.name == standard_bucket_name or b.name.startswith(standard_bucket_name + '-'):
log_noop('found existing bucket %s' % b.name)
# The bucket location must be set in boto so that it can use the
# path addressing style:
# http://boto3.readthedocs.org/en/latest/guide/s3.html?highlight=botocore.client.Config#changing-the-addressing-style
# That's required because otherwise requests on buckets with dots
# in their names fail HTTPS validation:
# https://github.com/boto/boto/issues/2836
region = s3.meta.client.get_bucket_location(Bucket=b.name)['LocationConstraint']
# That API returns None when the region is us-east-1:
# http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlocation.html
if region is None: region = 'us-east-1'
s3 = session.resource('s3', region_name=region)
bucket = s3.Bucket(b.name)
if not object_or_none(b, MARKER_KEY_NAME):
if not args.take_over_existing_bucket:
raise BadUserError("The S3 bucket %s already exists, but was not created by staticwebsync. If you wish to use it anyway and are happy for any existing files in it to be deleted if they don't have a corresponding local file then use the --take-over-existing-bucket option." % bucket.name)
install_marker_key(bucket)
break
else:
bucket_name = standard_bucket_name
first_fail = True
while True:
try:
log_op('creating bucket %s' % bucket_name)
configuration = None
region = args.bucket_location
if not region or region == 'US': region = 'us-east-1'
if region != 'us-east-1':
configuration = { 'LocationConstraint': region }
s3 = session.resource('s3', region_name=region)
if configuration:
bucket = s3.create_bucket(Bucket=bucket_name, CreateBucketConfiguration=configuration)
else:
bucket = s3.create_bucket(Bucket=bucket_name)
install_marker_key(bucket)
break
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == 'BucketAlreadyExists':
log_warn('bucket %s was already used by another user' % bucket_name)
if first_fail:
log_warn('We can use an alternative bucket name, but this will only work with CloudFront and not with standard S3 web site hosting (because it requires the bucket name to match the host name).')
first_fail = False
if not use_cloudfront:
raise BadUserError("Using CloudFront is disabled, so we can't continue.")
bucket_name = standard_bucket_name + '-' + binascii.b2a_hex(os.urandom(8)).decode('ascii')
continue
else:
raise e
log_op('configuring bucket ACL policy')
bucket.Acl().put(ACL='private')
log_op('configuring bucket for website access')
website_configuration = { 'IndexDocument': { 'Suffix': args.index } }
if args.error_page is not None:
website_configuration['ErrorDocument'] = { 'Key': args.error_page }
bucket.Website().put(WebsiteConfiguration=website_configuration)
# http://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteEndpoints.html
website_endpoint = '%s.s3-website-%s.amazonaws.com' % (bucket.name, region)
def set_caller_reference(options):
options['CallerReference'] = binascii.b2a_hex(os.urandom(8)).decode('ascii')
if use_cloudfront:
cf = session.client('cloudfront')
all_distribution_summaries = []
try:
log_check('looking for existing CloudFront distribution')
distribution_lists = list(cf.get_paginator('list_distributions').paginate())
for distribution_list in distribution_lists:
all_distribution_summaries.extend(distribution_list['DistributionList'].get('Items', []))
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == 'OptInRequired':
raise BadUserError('Your AWS account is not signed up for CloudFront, please sign up at http://aws.amazon.com/cloudfront/')
else:
raise e
def set_required_config(config):
any_changed = False
def get_or_set_default(d, k, default):
nonlocal any_changed
value = d.get(k)
if value is None:
any_changed = True
d[k] = default
return default
return value
def set_if_not_equal(d, k, value):
nonlocal any_changed
old_value = d.get(k)
if old_value != value:
any_changed = True
d[k] = value
aliases = get_or_set_default(config, 'Aliases', {})
aliases_items = get_or_set_default(aliases, 'Items', [])
if args.host_name not in aliases_items:
any_changed = True
aliases_items.append(args.host_name)
aliases['Quantity'] = len(aliases_items)
origins = get_or_set_default(config, 'Origins', {})
origins_items = get_or_set_default(origins, 'Items', [])
if len(origins_items) == 0:
any_changed = True
origin = {}
origins_items[:] = [origin]
elif len(origins_items) == 1:
origin = origins_items[0]
else:
raise BadUserError("The existing distribution has multiple origins, and we can't configure distributions with more than one. Please delete all but the default origin or delete the distribution.")
set_if_not_equal(origins, 'Quantity', len(origins_items))
set_if_not_equal(origin, 'DomainName', website_endpoint)
set_if_not_equal(origin, 'Id', 'S3 Website')
custom_origin_config = get_or_set_default(origin, 'CustomOriginConfig', {})
set_if_not_equal(custom_origin_config, 'OriginProtocolPolicy', 'http-only')
set_if_not_equal(custom_origin_config, 'HTTPPort', 80)
set_if_not_equal(custom_origin_config, 'HTTPSPort', 443)
default_cache_behavior = get_or_set_default(config, 'DefaultCacheBehavior', {})
set_if_not_equal(default_cache_behavior, 'Compress', True)
set_if_not_equal(default_cache_behavior, 'TargetOriginId', origin['Id'])
forwarded_values = get_or_set_default(default_cache_behavior, 'ForwardedValues', {})
set_if_not_equal(forwarded_values, 'QueryString', False)
cookies = get_or_set_default(forwarded_values, 'Cookies', {})
if cookies.get('Forward') != 'none':
any_changed = True
cookies.clear()
cookies['Forward'] = 'none'
set_if_not_equal(config, 'Enabled', True)
return any_changed
created_new_distribution = False
for distribution_summary in all_distribution_summaries:
origins = distribution_summary['Origins'].get('Items', [])
if len(origins) == 1:
origin = origins[0]
if origin['DomainName'] == website_endpoint:
distribution_id = distribution_summary['Id']
distribution_domain_name = distribution_summary['DomainName']
log_noop('found distribution: %s' % distribution_id)
break
if args.host_name in distribution_summary['Aliases'].get('Items', []):
# TODO Remove the alias if a force option is given.
raise BadUserError("Existing distribution %s has this hostname set as an alternate domain name (CNAME), but it isn't associated with the correct origin bucket. Please remove the alternate domain name from the distribution or delete the distribution." % distribution_summary['Id'])
else:
log_op('creating CloudFront distribution')
creation_config = {}
set_required_config(creation_config)
# Set defaults for options that are required to create a distribution:
creation_config.setdefault('Comment', '')
default_cache_behavior = creation_config.setdefault('DefaultCacheBehavior', {})
trusted_signers = default_cache_behavior.setdefault('TrustedSigners', {})
trusted_signers.setdefault('Enabled', False)
trusted_signers.setdefault('Quantity', 0)
default_cache_behavior.setdefault('ViewerProtocolPolicy', 'allow-all')
default_cache_behavior.setdefault('MinTTL', 0)
set_caller_reference(creation_config)
distribution_creation_response = cf.create_distribution(DistributionConfig=creation_config)
distribution_id = distribution_creation_response['Distribution']['Id']
distribution_domain_name = distribution_creation_response['Distribution']['DomainName']
log_op('created distribution %s' % distribution_id)
created_new_distribution = True
if not created_new_distribution:
log_check('checking distribution configuration')
get_distribution_config_response = cf.get_distribution_config(Id=distribution_id)
update_config = get_distribution_config_response['DistributionConfig']
if set_required_config(update_config):
log_op('configuring distribution')
cf.update_distribution(
Id=distribution_id,
IfMatch=get_distribution_config_response['ETag'],
DistributionConfig=update_config)
else:
log_noop('distribution configuration already fine')
# TODO Set up custom MIME types.
mimetypes.init()
# On my Windows system these get set to silly other values by some registry
# key, which is, for the avoidance of doubt, super lame.
mimetypes.types_map['.png'] = 'image/png'
mimetypes.types_map['.jpg'] = 'image/jpeg'
mimetypes.types_map['.js'] = 'application/javascript'
# TODO Serialize these in case of failure, and resume when restarting:
invalidations = []
dir = os.path.normpath(args.folder)
if not os.path.exists(dir):
raise BadUserError('Folder %s does not exist.' % args.folder)
if not os.path.isdir(dir):
raise BadUserError('%s is a file not a folder.' % args.folder)
os.chdir(dir)
for (dirpath, dirnames, filenames) in os.walk('.'):
if not args.allow_dot_files:
blacklisted = False
for p in split_all(dirpath, os.path.split):
if p.startswith('.') and p != '.':
log_noop('skipping folder %s' % os.path.normpath(dirpath))
blacklisted = True
break
if blacklisted:
continue
for filename in filenames:
if not args.allow_dot_files and filename.startswith('.'):
log_noop('skipping file %s' % filename)
continue
inf = os.path.normpath(os.path.join(dirpath, filename))
d = os.path.normpath(dirpath)
if d == '.':
d = ''
type = mimetypes.guess_type(filename, strict=False)
upload_extra_args = {}
if type[0] is not None:
# the lack of hyphens in the keys is correct, because these are method arguments rather than HTTP headers:
upload_extra_args['ContentType'] = type[0]
if type[1] is not None:
upload_extra_args['ContentEncoding'] = type[1]
def upload(f):
# We could re-use this when uploading the same file twice, but
# the code would be a bit messy.
md5 = None
parts = list(split_all(d, os.path.split))
parts.append(f)
outf = posixpath.join(*parts)
if outf == '':
outf = args.index
log_check('processing "%s" -> "%s"' % (inf, outf))
obj = s3.Object(bucket.name, outf)
try:
obj.load()
existed = True
log_noop('%s exists in bucket' % outf)
md5 = md5_hex_digest_string(inf)
if obj.e_tag == '"%s"' % md5 and \
obj.content_type == upload_extra_args.get('ContentType', obj.content_type) and \
obj.content_encoding == upload_extra_args.get('ContentEncoding'):
# TODO Check for other headers?
log_noop('%s matches local file' % outf)
if not args.repair:
return
acl = obj.Acl()
user_grant_okay = False
public_grant_okay = False
for grant in acl.grants:
grantee = grant['Grantee']
if grantee.get('ID') == acl.owner['ID']:
user_grant_okay = grant['Permission'] == 'FULL_CONTROL'
if not user_grant_okay:
break
elif grantee['Type'] == 'Group':
public_grant_okay = \
grantee['URI'] == 'http://acs.amazonaws.com/groups/global/AllUsers' and \
grant['Permission'] == 'READ'
if not public_grant_okay:
break
else:
break
else:
if user_grant_okay and public_grant_okay:
log_noop('%s ACL is fine' % outf)
return
log_op('%s ACL is wrong' % outf)
except botocore.exceptions.ClientError as ce:
if ce.response['Error']['Code'] != '404':
raise ce
existed = False
log_op('uploading %s' % outf)
upload_extra_args['ACL'] = 'public-read'
# Convert our callbacks to be compatible with the boto3 upload callback API:
class CallbackWrapper:
def __init__(self, old_callback_factory, file_size):
self.old_callback = old_callback_factory()
self.file_size = file_size
self.total_transferred = 0
def __call__(self, newly_transferred_bytes_count):
self.total_transferred += newly_transferred_bytes_count
self.old_callback(self.total_transferred, self.file_size)
obj.upload_file(inf, ExtraArgs=upload_extra_args,
Callback=CallbackWrapper(progress_callback_factory, os.path.getsize(inf)))
if existed:
key_name = obj.key
invalidations.append(key_name)
# Index pages are likely to be cached in CloudFront without the trailing filename instead (or as well).
m = is_index_key.match(key_name)
if m:
invalidations.append(m.group('path'))
upload(filename)
log_check('checking for deleted files')
for obj in list(bucket.objects.all()):
name = obj.key
if name == MARKER_KEY_NAME:
continue
if name.endswith('/'):
name = posixpath.join(name, args.index)
parts = split_all(name, posixpath.split)
blacklisted = False
if not args.allow_dot_files:
for p in parts:
if p.startswith('.'):
blacklisted = True
break
if not blacklisted and os.path.isfile(os.path.join(*parts)):
log_noop('%s has corresponding local file' % obj.key)
continue
log_op('deleting %s' % obj.key)
obj.delete()
invalidations.append(obj.key)
def log_sync_complete(dns_entry_name, dns_entry_target):
log_op('sync complete')
log_check('a DNS entry needs to be set for\n%s\npointing to\n%s' % (dns_entry_name, dns_entry_target))
if not use_cloudfront:
log_sync_complete(args.host_name, website_endpoint)
return
def cf_complete():
log_sync_complete(args.host_name, distribution_domain_name)
if (args.dont_wait_for_cloudfront_propagation):
log_noop('CloudFront may take up to 15 minutes to reflect any changes')
return
while True:
log_check('checking if CloudFront propagation is complete')
get_distribution_response = cf.get_distribution(Id=distribution_id)['Distribution']
if get_distribution_response['Status'] != 'InProgress' and \
get_distribution_response['InProgressInvalidationBatches'] == 0:
log_op('CloudFront propagation is complete')
return
interval = 15
log_check('propagation still in progress; checking again in %d seconds' % interval)
time.sleep(interval)
if len(invalidations) == 0:
cf_complete()
return
log_op('invalidating cached copies of changed or deleted files')
def invalidate_all(paths):
batch = {
'Paths': {
'Quantity': len(paths),
'Items': paths,
},
}
while True:
try:
set_caller_reference(batch)
cf.create_invalidation(DistributionId=distribution_id, InvalidationBatch=batch)
break
except botocore.exceptions.ClientError as ce:
if ce.response['Error']['Code'] != 'TooManyInvalidationsInProgress':
raise ce
interval = 60
log_check('too many invalidations in progress; trying again in %d seconds' % interval)
time.sleep(interval)
paths.clear()
paths = []
def invalidate(path):
paths.append(path)
if len(paths) == 3000:
invalidate_all(paths)
for i in invalidations:
invalidate('/' + i)
if (i == args.index):
invalidate('/')
if len(paths) > 0:
invalidate_all(paths)
cf_complete()
| 40.852459 | 307 | 0.581951 | 2,487 | 22,428 | 5.063932 | 0.211902 | 0.010322 | 0.011434 | 0.011355 | 0.141337 | 0.110767 | 0.088772 | 0.043195 | 0.031285 | 0.031285 | 0 | 0.005905 | 0.327938 | 22,428 | 548 | 308 | 40.927007 | 0.829629 | 0.075575 | 0 | 0.186441 | 0 | 0.014528 | 0.165644 | 0.009184 | 0 | 0 | 0 | 0.001825 | 0 | 1 | 0.050847 | false | 0 | 0.026634 | 0 | 0.113801 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f608cd237ad400b7d70ddc6b3272e50852e00f2c | 14,393 | py | Python | src/dsa/chapter4_exercises.py | AlexMGitHub/DS-A_Python | a4770c95ef2f76917fb1d8bc8c11433828a735a3 | [
"MIT"
] | null | null | null | src/dsa/chapter4_exercises.py | AlexMGitHub/DS-A_Python | a4770c95ef2f76917fb1d8bc8c11433828a735a3 | [
"MIT"
] | null | null | null | src/dsa/chapter4_exercises.py | AlexMGitHub/DS-A_Python | a4770c95ef2f76917fb1d8bc8c11433828a735a3 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Solutions to chapter 4 exercises.
###############################################################################
# chapter4_exercises.py
#
# Revision: 1.00
# Date: 6/27/2021
# Author: Alex
#
# Purpose: Solutions to chapter 4 exercises from "Data Structures and
# Algorithms in Python" by Goodrich et. al.
#
###############################################################################
"""
# %% Imports
# Standard system imports
from pathlib import Path
# Related third party imports
# Local application/library specific imports
# %% Reinforcement Exercises
def harmonic_number(n):
"""Solution to exercise R-4.6.
Describe a recursive function for computing the nth Harmonic number.
"""
if n == 1:
return 1/n # Base case
return 1/n + harmonic_number(n-1)
def str_to_int(string):
"""Solution to exercise R-4.7.
Describe a recursive function for converting a string of digits into the
integer it represents. For example, "13531" represents the integer 13,531.
"""
n = len(string)
zero_unicode = ord('0')
def recurse(idx):
if idx == n:
return 0 # Base case
int_val = ord(string[idx]) - zero_unicode
return int_val * 10 ** (n - 1 - idx) + recurse(idx + 1)
return recurse(0)
# %% Creativity Exercises
def find_min_max(data):
"""Solution to exercise C-4.9.
Write a short recursive Python function that finds the minimum and
maximum values in a sequence without using any loops.
"""
n = len(data)
min_val = data[0]
max_val = data[0]
def recurse_minmax(idx):
nonlocal min_val, max_val
if idx == n:
return min_val, max_val # Base case
if data[idx] > max_val:
max_val = data[idx]
elif data[idx] < min_val:
min_val = data[idx]
return recurse_minmax(idx + 1)
return recurse_minmax(1)
def recursive_log(num):
"""Solution to exercise C-4.10.
Describe a recursive algorithm to compute the integer part of the base-two
logarithm of n using only addition and integer division.
"""
def recurse(num, count):
if num == 1:
return count # Base case
return recurse(num // 2, count + 1)
return recurse(num, 0)
def recursive_unique(sequence):
"""Solution to exercise C-4.11.
Describe an efficient recursive function for solving the element
uniqueness problem, which runs in time that is at most O(n^2) in the
worst case without using sorting.
--------------------------------------------------------------------------
Solution:
--------------------------------------------------------------------------
The nonrecursive part of each call uses O(1) time, so the overall running
time will be proportional to the total number of recursive invocations.
However, unlike the "bad" recursion example in the text, my function does
not make more than one recursive call per invocation. It is a linear
recursion algorithm.
The algorithm works by decrementing the stop index until it reaches the
start index. Once that happens, the stop index is reset to the end of the
sequence and the start index is incremented by 1.
The first recursion call in the conditional statements only executes if
the stop index hasn't reached the start index yet. The second recursion
call only occurs if the start index hasn't reached the end of the sequence
yet. Both calls are placed in an "elif" statement that makes them mutually
exclusive. In other words, a maximum of one recursive call per invocation.
Based on the above description, it's clear that the algorithm is worst
case O(n^2). It's equivalent to a nested loop with n outer iterations and
(n-1), (n-2) ... 1 inner iterations. This is well-known to be O(n^2).
I used timeit to verify that the execution time of my algorithm grows
approximately as n^2.
"""
n = len(sequence)
def unique(start, stop):
if sequence[start] == sequence[stop]:
return False # Base case if not unique
if stop > (start+1):
return unique(start, stop-1)
if start < (n-2):
return unique(start+1, n-1)
return True # Base case if unique
return unique(0, n-1)
def integer_product(num1, num2):
"""Solution to exercise C-4.12.
Give a recursive algorithm to compute the product of two positive integers,
m and n, using only addition and subtraction.
"""
def recurse(num1, idx):
if idx == 0:
return 0 # Base case
return num1 + recurse(num1, idx-1)
return recurse(num1, num2)
def towers_of_hanoi(n):
"""Solution to exercise C-4.14.
In the Towers of Hanoi puzzle, we are given a platform with three pegs, a,
b, and c, sticking out of it. On peg a is a stack of n disks, each larger
than the next, so that the smallest is on the top and the largest is on the
bottom. The puzzle is to move all the disks from peg a to peg c, moving
one disk at a time, so that we never place a larger disk on top of a
smaller one.
See Figure 4.15 for an example of the case n = 4. Describe a recursive
algorithm for solving the Towers of Hanoi puzzle for arbitrary n.
"""
a = list(range(n, 0, -1))
b = []
c = []
def recurse(n, source, destination, temp):
if n > 0: # Base case, bottom of stack of disks
# Move n-1 disks from source to temporary storage
recurse(n-1, source, temp, destination)
# Move the nth (bottom) disk from source to destination
destination.append(source.pop())
# Move the n-1 disks from temporary storage to destination
recurse(n-1, temp, destination, source)
recurse(n, a, c, b)
return c
def all_subsets(aset):
"""Solution to exercise C-4.15.
Write a recursive function that will output all the subsets of a set of n
elements (without repeating any subsets).
--------------------------------------------------------------------------
Solution:
--------------------------------------------------------------------------
I've made the following assumptions:
1. The input is a list of unique numbers
2. The set itself is considered a subset (not a proper subset)
3. The empty set is considered a subset
"""
def recurse(alist):
if not alist:
return [[]] # Base case, return empty set
prev_lists = recurse(alist[1:])
return prev_lists + [[alist[0]] + y for y in prev_lists]
return recurse(aset)
def reverse_string(string):
"""Solution to exercise C-4.16.
Write a short recursive Python function that takes a character string s and
outputs its reverse. For example, the reverse of "pots&pans" would be
"snap&stop".
"""
n = len(string)
def recurse(idx):
if idx == 0:
return string[0] # Base case, decremented to beginning of string
return string[idx] + recurse(idx-1)
return recurse(n-1)
def is_palindrome(string):
"""Solution to exercise C-4.17.
Write a short recursive Python function that determines if a string s is a
palindrome, that is, it is equal to its reverse. For example, "racecar"
and "gohangasalamiimalasagnahog" are palindromes.
"""
n = len(string)
def recurse(idx):
if idx == n:
return True # Base case, end of string and all letters matched
if string[idx] == string[n-1-idx]:
return recurse(idx+1)
return False
return recurse(0)
def more_vowels(astring):
"""Solution to exercise C-4.18.
Use recursion to write a Python function for determining if a string s has
more vowels than consonants.
"""
string = astring.lower()
vowels = 'aeiou'
n = len(string)
vowel_count = 0
def recurse(idx):
nonlocal vowel_count
if idx == n:
return vowel_count > (n-vowel_count) # Base case, end of string
if string[idx] in vowels:
vowel_count += 1
return recurse(idx+1)
return recurse(0)
def evens_first(nums):
"""Solution to exercise C-4.19.
Write a short recursive Python function that rearranges a sequence of
integer values so that all the even values appear before all the odd
values.
"""
n = len(nums)
def recurse(start, stop):
if start == stop:
return nums # Base case, finished sorting list
if nums[start] % 2 == 0:
return recurse(start+1, stop)
nums[stop], nums[start] = nums[start], nums[stop]
return recurse(start, stop-1)
return recurse(0, n-1)
def rearrange_unsorted(nums, k):
"""Solution to exercise C-4.20.
Given an unsorted sequence, S, of integers and an integer k, describe a
recursive algorithm for rearranging the elements in S so that all elements
less than or equal to k come before any elements larger than k. What is
the running time of your algorithm on a sequence of n values?
--------------------------------------------------------------------------
Solution:
--------------------------------------------------------------------------
The algorithm terminates when the start index equals the stop index. That
requires n recursive calls. Each recursive call will worst case swap two
values in the list. Replacing a value in a list is O(1) according to the
text (table 5.4), and so this algorithm is O(n).
"""
n = len(nums)
def recurse(start, stop):
if start == stop:
return nums # Base case, finished sorting list
if nums[start] <= k:
return recurse(start+1, stop)
nums[stop], nums[start] = nums[start], nums[stop]
return recurse(start, stop-1)
return recurse(0, n-1)
def sum_to_k(nums, k):
"""Solution to exercise C-4.21.
Suppose you are given an n-element sequence, S, containing distinct
integers that are listed in increasing order. Given a number k, describe a
recursive algorithm to find two integers in S that sum to k, if such a pair
exists. What is the running time of your algorithm?
--------------------------------------------------------------------------
Solution:
--------------------------------------------------------------------------
All of the non-recursive operations are O(1). The running time is thus
proportional to the number of recursive calls. Worst case, the algorithm
will try every pairwise combination in the sequence. This is O(n^2), as
there are n elements, each of which will be compared with n - k other
elements: the familiar n*(n+1)/2 formula.
"""
n = len(nums)
def recurse(start, stop):
if nums[start] + nums[stop] == k:
return nums[start], nums[stop] # Base case: pair found
if stop > (start+1):
return recurse(start, stop-1)
if start < (n-2):
return recurse(start+1, n-1)
return None # Base case: no pair found
return recurse(0, n-1)
# %% Project Exercises
def summation_puzzle(words):
"""Solution to exercise P-4.24.
Write a program for solving summation puzzles by enumerating and testing
all possible configurations. Using your program, solve the three puzzles
given in Section 4.4.3.
"""
assert len(words) == 3, 'Summation puzzle must be three word phrase'
digit_list = list(range(10))
chars = list(''.join(words))
unique_chars = list(set(chars))
n = len(unique_chars)
char_dict = {}
def solution_found(S):
nonlocal char_dict
char_dict = {unique_chars[idx]: S[idx] for idx in range(len(S))}
word1 = [str(char_dict[x]) for x in words[0]]
val1 = int(''.join(word1))
word2 = [str(char_dict[x]) for x in words[1]]
val2 = int(''.join(word2))
word3 = [str(char_dict[x]) for x in words[2]]
val3 = int(''.join(word3))
return (val1 + val2) == val3
def recurse(k, S, U):
for idx, e in enumerate(U):
S.append(U.pop(idx))
if k == 1:
if solution_found(S):
return (S, char_dict) # Base case: Solution found
else:
result = recurse(k-1, S.copy(), U.copy())
if result is not None:
return result
S.pop()
U.insert(0, e)
return None # Base case: No solution found
return recurse(n, [], digit_list)
def os_walk(path_str):
"""Solution to exercise P-4.27.
Python’s os module provides a function with signature walk(path) that
is a generator yielding the tuple (dirpath, dirnames, filenames) for each
subdirectory of the directory identified by string path, such that string
dirpath is the full path to the subdirectory, dirnames is a list of the
names of the subdirectories within dirpath, and filenames is a list of the
names of non-directory entries of dirpath.
--------------------------------------------------------------------------
Solution:
--------------------------------------------------------------------------
I used pytest's tmp_path fixture to create a temporary directory with a
function-level scope. I then wrote a recursive function to create a simple
directory with multiple levels of subdirectories and files.
I compare the results of the os module's walk function to my own. Note
that the order of the tuples reported by the two methods may be arbitrary,
and so I sorted both results before comparing them. I also compared the
lists of files and directories as sets so that a differing order does not
cause the test to fail.
"""
results = []
path = Path(path_str)
def walk(path):
files = []
dirs = []
contents = Path.iterdir(path)
for obj in contents:
if Path.is_dir(obj):
dirs.append(obj.name)
walk(obj)
else:
files.append(obj.name)
results.append((str(path), dirs, files))
walk(path)
return results
| 33.472093 | 79 | 0.595359 | 2,010 | 14,393 | 4.230846 | 0.236318 | 0.033631 | 0.033866 | 0.026811 | 0.234243 | 0.130527 | 0.107714 | 0.075729 | 0.043509 | 0.043509 | 0 | 0.018008 | 0.266935 | 14,393 | 429 | 80 | 33.550117 | 0.787982 | 0.568332 | 0 | 0.301136 | 0 | 0 | 0.00857 | 0 | 0 | 0 | 0 | 0 | 0.005682 | 1 | 0.181818 | false | 0 | 0.005682 | 0 | 0.482955 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f60bbe8d92cacc0baec1190db4c4305ce847b01a | 4,220 | py | Python | spanish_inflections.py | mathigatti/spanish_inflections | bb5d9677edec8862aaaf4a4f7c6ad5bc4c151c50 | [
"MIT"
] | null | null | null | spanish_inflections.py | mathigatti/spanish_inflections | bb5d9677edec8862aaaf4a4f7c6ad5bc4c151c50 | [
"MIT"
] | null | null | null | spanish_inflections.py | mathigatti/spanish_inflections | bb5d9677edec8862aaaf4a4f7c6ad5bc4c151c50 | [
"MIT"
] | null | null | null |
rules_adjetive = []
with open("MM.adj.txt",'r') as f:
for line in f.readlines():
rule = line.split()
rules_adjetive.append({"word":rule[0], "lemma":rule[1], "code":rule[2]})
rules_noun = []
with open("MM.nom.txt",'r') as f:
for line in f.readlines():
rule = line.split()
rules_noun.append({"word":rule[0], "lemma":rule[1], "code":rule[2]})
rules_tanc = []
with open("MM.tanc.txt",'r') as f:
for line in f.readlines():
rule = line.split()
rules_tanc.append({"word":rule[0], "lemma":rule[1], "code":rule[2]})
rules_verb = []
with open("MM.verb.txt",'r') as f:
for line in f.readlines():
rule = line.split()
rules_verb.append({"word":rule[0], "lemma":rule[1], "code":rule[2]})
rules = {"DET": rules_tanc, "ADJ": rules_adjetive, "VERB": rules_verb, "NOUN": rules_noun}
def search_rule(rules, word):
for rule in rules:
if word == rule["word"]:
return rule
return None
def search_word(rules, lemma, code):
for rule_i in rules:
if lemma == rule_i["lemma"] and code == rule_i["code"]:
return rule_i["word"]
return ""
def search_verb(verb):
rule_i = search_rule(rules_verb, verb)
if rule_i is None:
return {"original": verb}
else:
lemma = rule["lemma"]
code = rule["code"]
result_i = {"original": noun}
for rule in rules_verb:
if rule["lemma"] == lemma:
result_i[rule["code"]] = rule["word"]
return result_i
def search_noun(noun):
rule = search_rule(rules_noun, noun)
if rule is None:
return {"original": noun}
else:
lemma = rule["lemma"]
code = rule["code"]
result_i = {"original": noun}
for sub_code in ["S","P"]:
code = code[:3] + sub_code + code[4:]
result_i[code[2:4]] = search_word(rules_noun, lemma, code)
return result_i
def search_adjetive(adjetive):
rule = search_rule(rules_adjetive, adjetive)
if rule is None:
return {"original": adjetive, "FS": "", "FP": "", "MS": "", "MP": ""}
else:
lemma = rule["lemma"]
code = rule["code"]
result_i = {"original": adjetive}
for sub_code in ["FS","FP","MS", "MP", "CS", "CP"]:
code = code[:3] + sub_code + code[5:]
result_i[sub_code] = search_word(rules_adjetive, lemma, code)
if result_i["CS"] != "":
for sub_code in ["FS","MS"]:
result_i[sub_code] = result_i["CS"]
for sub_code in ["FP","MP"]:
result_i[sub_code] = result_i["CP"]
if result_i["FS"] == "":
for sub_code in ["FS","MS"]:
result_i[sub_code] = result_i["MS"]
for sub_code in ["FP","MP"]:
result_i[sub_code] = result_i["MP"]
if result_i["MS"] == "":
for sub_code in ["FS","MS"]:
result_i[sub_code] = result_i["FS"]
for sub_code in ["FP","MP"]:
result_i[sub_code] = result_i["FP"]
del result_i["CS"]
del result_i["CP"]
return result_i
def basic_noun_data(word):
code = search_rule(rules_noun, word)["code"]
gender = code[2]
number = code[3]
return {"gender": gender, "number": number}
def fix_verb(rules, word, gender, number):
try:
rule = search_rule(rules, word)
lemma = rule["lemma"]
code = rule["code"]
for a, b in [("F", gender), ("M", gender), ("S", number), ("P", number)]:
code = code[:-2] + code[-2:].replace(a,b)
result = search_word(rules, lemma, code)
if result != "":
return result
else:
return word
except:
return word
def fix_word(rules, word, gender, number):
try:
rule = search_rule(rules, word)
lemma = rule["lemma"]
code = rule["code"]
for a, b in [("F", gender), ("M", gender), ("S", number), ("P", number)]:
code = code.replace(a,b)
result = search_word(rules, lemma, code)
if result != "":
return result
else:
return word
except:
return word
| 29.929078 | 90 | 0.529384 | 569 | 4,220 | 3.773286 | 0.110721 | 0.084769 | 0.046111 | 0.044714 | 0.614811 | 0.576619 | 0.533768 | 0.518398 | 0.518398 | 0.518398 | 0 | 0.007465 | 0.301659 | 4,220 | 140 | 91 | 30.142857 | 0.721072 | 0 | 0 | 0.461538 | 0 | 0 | 0.077507 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.068376 | false | 0 | 0 | 0 | 0.213675 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f60d57417f085366ece3370fedcd7871a28b809a | 1,746 | py | Python | setup.py | T-FitAndFat/toucan-connectors | 7d03454e4d06f5cb2e9c2c778d99dd655efd14a7 | [
"BSD-3-Clause"
] | null | null | null | setup.py | T-FitAndFat/toucan-connectors | 7d03454e4d06f5cb2e9c2c778d99dd655efd14a7 | [
"BSD-3-Clause"
] | null | null | null | setup.py | T-FitAndFat/toucan-connectors | 7d03454e4d06f5cb2e9c2c778d99dd655efd14a7 | [
"BSD-3-Clause"
] | null | null | null | from setuptools import setup, find_packages
auth_deps = ['oauthlib', 'requests_oauthlib']
extras_require = {
'adobe': ['adobe_analytics'],
'azure_mssql': ['pyodbc'],
'dataiku': ['dataiku-api-client'],
'elasticsearch': ['elasticsearch'],
'facebook': ['facebook-sdk'],
'google_analytics': ['google-api-python-client', 'oauth2client'],
'google_big_query': ['pandas_gbq'],
'google_cloud_mysql': ['PyMySQL>=0.8.0'],
'google_my_business': ['google-api-python-client>=1.7.5'],
'google_spreadsheet': ['gspread>=3', 'oauth2client'],
'hive': ['pyhive[hive]'],
'http_api': auth_deps,
'mongo': ['pymongo>=3.6.1'],
'mssql': ['pymssql>=2.1.3'],
'mysql': ['PyMySQL>=0.8.0'],
'odata': auth_deps + ['tctc_odata'],
'oracle_sql': ['cx_Oracle>=6.2.1'],
'postgres': ['psycopg2>=2.7.4'],
'sap_hana': ['pyhdb>=0.3.4'],
'snowflake': ['snowflake-connector-python'],
'toucan_toco': ['toucan_client']
}
extras_require['all'] = sorted(set(sum(extras_require.values(), [])))
install_requires = [
'aiohttp',
'jq',
'jinja2',
'pydantic==0.31.1',
'requests',
'tenacity',
'toucan_data_sdk'
]
classifiers = [
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 3.6'
]
setup(name='toucan_connectors',
version='0.23.4',
description='Toucan Toco Connectors',
author='Toucan Toco',
author_email='dev@toucantoco.com',
url='https://github.com/ToucanToco/toucan-connectors',
license='BSD',
classifiers=classifiers,
packages=find_packages(),
install_requires=install_requires,
extras_require=extras_require,
include_package_data=True)
| 30.631579 | 69 | 0.627148 | 198 | 1,746 | 5.338384 | 0.535354 | 0.061495 | 0.028382 | 0.039735 | 0.028382 | 0 | 0 | 0 | 0 | 0 | 0 | 0.027046 | 0.174112 | 1,746 | 56 | 70 | 31.178571 | 0.705964 | 0 | 0 | 0 | 0 | 0 | 0.4874 | 0.046392 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.019231 | 0 | 0.019231 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f60dc5242dc79fcf961a2da0cfd7f24d3f799fd1 | 1,125 | py | Python | pytest_unittest_filter.py | un-def/pytest-unittest-filter | 7919aff50c6b86c60d2c808c18f5e973db33f338 | [
"MIT"
] | 4 | 2018-10-26T13:17:05.000Z | 2019-03-22T06:51:50.000Z | pytest_unittest_filter.py | un-def/pytest-unittest-filter | 7919aff50c6b86c60d2c808c18f5e973db33f338 | [
"MIT"
] | null | null | null | pytest_unittest_filter.py | un-def/pytest-unittest-filter | 7919aff50c6b86c60d2c808c18f5e973db33f338 | [
"MIT"
] | null | null | null | import pytest
from _pytest.unittest import UnitTestCase
__version__ = '0.2.1'
INI_OPTION_CLASSES = 'python_unittest_classes'
INI_OPTION_UNDERSCORE = 'python_unittest_exclude_underscore'
def pytest_addoption(parser):
parser.addini(
INI_OPTION_CLASSES,
type='args',
default=None,
help='prefixes or glob names for unittest.TestCase subclass discovery',
)
parser.addini(
INI_OPTION_UNDERSCORE,
type='bool',
default=False,
help='prefixes or glob names for unittest.TestCase subclass discovery',
)
@pytest.hookimpl(hookwrapper=True, tryfirst=True)
def pytest_pycollect_makeitem(collector, name):
outcome = yield
result = outcome.get_result()
if result is None or not isinstance(result, UnitTestCase):
return
if collector.config.getini(INI_OPTION_UNDERSCORE) and name.startswith('_'):
outcome.force_result(None)
return
if not collector.config.getini(INI_OPTION_CLASSES):
return
if not collector._matches_prefix_or_glob_option(INI_OPTION_CLASSES, name):
outcome.force_result(None)
| 28.125 | 79 | 0.714667 | 134 | 1,125 | 5.731343 | 0.432836 | 0.082031 | 0.083333 | 0.054688 | 0.231771 | 0.153646 | 0.153646 | 0.153646 | 0.153646 | 0.153646 | 0 | 0.003352 | 0.204444 | 1,125 | 39 | 80 | 28.846154 | 0.854749 | 0 | 0 | 0.290323 | 0 | 0 | 0.175111 | 0.050667 | 0 | 0 | 0 | 0 | 0 | 1 | 0.064516 | false | 0 | 0.064516 | 0 | 0.225806 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f60f64227a4b588c259406487b84cb9a51b06bfb | 5,058 | py | Python | tests/unit/schema/wrappers/test_field.py | nsky80/gapic-generator-python | 6dd7498438e87329c69a27ac57bb1693b02471d3 | [
"Apache-2.0"
] | 1 | 2019-08-15T05:41:02.000Z | 2019-08-15T05:41:02.000Z | tests/unit/schema/wrappers/test_field.py | nsky80/gapic-generator-python | 6dd7498438e87329c69a27ac57bb1693b02471d3 | [
"Apache-2.0"
] | null | null | null | tests/unit/schema/wrappers/test_field.py | nsky80/gapic-generator-python | 6dd7498438e87329c69a27ac57bb1693b02471d3 | [
"Apache-2.0"
] | 1 | 2022-01-23T12:29:11.000Z | 2022-01-23T12:29:11.000Z | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from google.api import field_behavior_pb2
from google.protobuf import descriptor_pb2
from gapic.schema import wrappers
def test_field_properties():
Type = descriptor_pb2.FieldDescriptorProto.Type
field = make_field(name='my_field', number=1, type=Type.Value('TYPE_BOOL'))
assert field.name == 'my_field'
assert field.number == 1
assert field.type.python_type == bool
def test_field_is_primitive():
Type = descriptor_pb2.FieldDescriptorProto.Type
primitive_field = make_field(type=Type.Value('TYPE_INT32'))
assert primitive_field.is_primitive
def test_field_proto_type():
Type = descriptor_pb2.FieldDescriptorProto.Type
primitive_field = make_field(type=Type.Value('TYPE_INT32'))
assert primitive_field.proto_type == 'INT32'
def test_field_not_primitive():
Type = descriptor_pb2.FieldDescriptorProto.Type
message = wrappers.MessageType(
fields={},
nested_messages={},
nested_enums={},
message_pb=descriptor_pb2.DescriptorProto(),
)
non_primitive_field = make_field(
type=Type.Value('TYPE_MESSAGE'),
type_name='bogus.Message',
message=message,
)
assert not non_primitive_field.is_primitive
def test_ident():
Type = descriptor_pb2.FieldDescriptorProto.Type
field = make_field(type=Type.Value('TYPE_BOOL'))
assert str(field.ident) == 'bool'
def test_ident_repeated():
Type = descriptor_pb2.FieldDescriptorProto.Type
REP = descriptor_pb2.FieldDescriptorProto.Label.Value('LABEL_REPEATED')
field = make_field(type=Type.Value('TYPE_BOOL'), label=REP)
assert str(field.ident) == 'Sequence[bool]'
def test_repeated():
REP = descriptor_pb2.FieldDescriptorProto.Label.Value('LABEL_REPEATED')
field = make_field(label=REP)
assert field.repeated
def test_not_repeated():
OPT = descriptor_pb2.FieldDescriptorProto.Label.Value('LABEL_OPTIONAL')
field = make_field(label=OPT)
assert not field.repeated
def test_required():
field = make_field()
field.options.Extensions[field_behavior_pb2.field_behavior].append(
field_behavior_pb2.FieldBehavior.Value('REQUIRED')
)
assert field.required
def test_not_required():
field = make_field()
assert not field.required
def test_ident_sphinx():
Type = descriptor_pb2.FieldDescriptorProto.Type
field = make_field(type=Type.Value('TYPE_BOOL'))
assert field.ident.sphinx == 'bool'
def test_ident_sphinx_repeated():
Type = descriptor_pb2.FieldDescriptorProto.Type
REP = descriptor_pb2.FieldDescriptorProto.Label.Value('LABEL_REPEATED')
field = make_field(type=Type.Value('TYPE_BOOL'), label=REP)
assert field.ident.sphinx == 'Sequence[bool]'
def test_type_primitives():
T = descriptor_pb2.FieldDescriptorProto.Type
assert make_field(type=T.Value('TYPE_FLOAT')).type.python_type == float
assert make_field(type=T.Value('TYPE_INT64')).type.python_type == int
assert make_field(type=T.Value('TYPE_BOOL')).type.python_type == bool
assert make_field(type=T.Value('TYPE_STRING')).type.python_type == str
assert make_field(type=T.Value('TYPE_BYTES')).type.python_type == bytes
def test_type_message():
T = descriptor_pb2.FieldDescriptorProto.Type
message = wrappers.MessageType(
fields={},
nested_messages={},
nested_enums={},
message_pb=descriptor_pb2.DescriptorProto(),
)
field = make_field(
type=T.Value('TYPE_MESSAGE'),
type_name='bogus.Message',
message=message,
)
assert field.type == message
def test_type_enum():
T = descriptor_pb2.FieldDescriptorProto.Type
enum = wrappers.EnumType(
values={},
enum_pb=descriptor_pb2.EnumDescriptorProto(),
)
field = make_field(
type=T.Value('TYPE_ENUM'),
type_name='bogus.Enumerable',
enum=enum,
)
assert field.type == enum
def test_type_invalid():
T = descriptor_pb2.FieldDescriptorProto.Type
with pytest.raises(TypeError):
make_field(type=T.Value('TYPE_GROUP')).type
def make_field(*, message=None, enum=None, **kwargs) -> wrappers.Field:
kwargs.setdefault('name', 'my_field')
kwargs.setdefault('number', 1)
kwargs.setdefault('type',
descriptor_pb2.FieldDescriptorProto.Type.Value('TYPE_BOOL'),
)
field_pb = descriptor_pb2.FieldDescriptorProto(**kwargs)
return wrappers.Field(field_pb=field_pb, message=message, enum=enum)
| 31.030675 | 79 | 0.720245 | 639 | 5,058 | 5.486698 | 0.211268 | 0.081574 | 0.169424 | 0.137193 | 0.49943 | 0.454934 | 0.414147 | 0.348831 | 0.324301 | 0.324301 | 0 | 0.010491 | 0.170819 | 5,058 | 162 | 80 | 31.222222 | 0.825465 | 0.108343 | 0 | 0.348214 | 0 | 0 | 0.078292 | 0 | 0 | 0 | 0 | 0 | 0.1875 | 1 | 0.151786 | false | 0 | 0.035714 | 0 | 0.196429 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f612776e5655cc35f2ff3a2714bb44593975be1d | 4,241 | py | Python | src/expressivity.py | elijahc/vae | 5cd80518f876d4ca9e97de2ece7c266e3df09cb7 | [
"MIT"
] | null | null | null | src/expressivity.py | elijahc/vae | 5cd80518f876d4ca9e97de2ece7c266e3df09cb7 | [
"MIT"
] | null | null | null | src/expressivity.py | elijahc/vae | 5cd80518f876d4ca9e97de2ece7c266e3df09cb7 | [
"MIT"
] | null | null | null | import numpy as np
# from ray.dataframe import pd
# def pairwise_correlations( g_t)
def euclidean_metric( g_t,delta):
n_d = g_t.shape[0]
v = np.gradient( g_t, delta, axis=0 )
vv = np.empty( n_d, dtype=np.float32 )
for t in np.arange( n_d ):
vv[t] = np.dot( v[t], v[t].T )
return vv
def curvature( g_t, delta ):
n_d = g_t.shape[0]
v = np.gradient( g_t, delta, axis=0 )
vv = np.empty( n_d, dtype=np.float32 )
vhat = np.empty_like( v )
for t in np.arange( n_d ):
vv[t] = np.dot( v[t], v[t].T )
vhat[t] = v[t] / np.sqrt( vv[t] )
k = np.empty(n_d, dtype=np.float32)
a = np.gradient(v, delta, axis=0)
for i in np.arange( n_d ):
aa = np.dot( a[i], a[i].T )
va = np.dot( v[i], a[i].T )
k[i] = ( vv[i]**-(3/2))*np.sqrt(( vv[i]*aa)-va**2)
return k
def grassmanian_metric(g_t,delta):
k = curvature(g_t,delta)
g_E = euclidean_metric(g_t,delta)
return (k**2)*g_E
def curvature_length(g_t,delta,N=None):
# Number of examples, e.g. number of theta's
n_d = g_t.shape[0]
g_dt = np.ediff1d(delta)
g_E = euclidean_metric(g_t,delta)[:-1]
dL_E= np.sqrt(g_E)*g_dt
L_E = dL_E.sum()
if N is not None:
L_E = L_E/np.sqrt(N)
return L_E
def grassmanian_length( g_t, delta ):
n_d = g_t.shape[0]
v = np.gradient( g_t, delta, axis=0 )
vv = np.empty( n_d, dtype=np.float32 )
vhat = np.empty_like( v )
for t in np.arange( n_d ):
vv[t] = np.dot( v[t], v[t].T )
vhat[t] = v[t] / np.sqrt( vv[t] )
a_hat = np.gradient( vhat, delta, axis=0)
gauss_metric = np.array([np.dot(a_hat[i],a_hat[i].T) for i in np.arange( n_d )])
if isinstance(delta,float):
dG = np.sqrt(gauss_metric)[:-1]*delta
elif isinstance(delta,np.ndarray):
dG = np.sqrt(gauss_metric)[:-1]*np.ediff1d(delta)
return dG.sum()
class Expressivity():
def __init__(self,model,trajectory,delta,index=None):
# evaluate expressivity on a specific layer if index is provided
self.trajectory = trajectory
self.delta = delta
self.n_d = trajectory.shape[0]
self.model = model
if index is not None:
activation_functors = gen_activation_functors(model)
func = activation_functors[index]
self.g_t = np.squeeze(func([self.trajectory])[0])
else:
self.g_t = self.model.predict(self.trajectory,batch_size=32)
self.v = np.gradient(self.g_t,self.delta,axis=0)
self.vv = np.empty(self.n_d,dtype=np.float32)
self.vhat = np.empty_like(self.v)
for t in np.arange(self.n_d):
self.vv[t] = np.dot(self.v[t],self.v[t].T)
self.vhat[t] = self.v[t]/np.sqrt(self.vv[t])
def curvature(self):
return curvature(self.g_t,self.n_d,self.delta)
def curve_length(self):
self.dL = np.sqrt(self.vv)*self.delta
return self.dL.sum()
def grassmanian_length(self):
a_hat = np.gradient(self.vhat,self.delta,axis=0)
gauss_metric = np.array([np.dot(a_hat[i],a_hat[i].T) for i in np.arange(self.n_d)])
self.dG = np.sqrt(gauss_metric)*self.delta
return self.dG.sum()
def salience(model,x_test,masks,x_iso=None):
funcs = gen_activation_functors(model)
outs = []
input_G = []
for i,mask in enumerate(masks):
x_traj = x_test[mask]
if x_iso is None:
# Calc embedding
print('Calculating Isomap embeddings...')
x_traj,x_iso = gen_sorted_isomap(x_traj,n_neighbors=20,n_components=1,n_jobs=1)
x_G = grassmanian_length(x_traj,delta=x_iso)
input_G.append(x_G)
g_t = [np.squeeze(f([x_traj])[0]) for f in funcs]
y_G = [grassmanian_length(g,1.0/len(g)) for g in g_t]
for l_idx,Y in enumerate(y_G):
rec = {
'Grassmanian Length':Y,
'x_G':x_G,
'Layer': l_idx+1,
'Digit':i+1,
'G_delta':Y-x_G
}
outs.append(rec)
return pd.DataFrame.from_records(outs)
def manifold_overlap():
pass | 30.510791 | 91 | 0.569441 | 713 | 4,241 | 3.221599 | 0.175316 | 0.019155 | 0.033522 | 0.019591 | 0.311711 | 0.281236 | 0.257292 | 0.226382 | 0.201132 | 0.201132 | 0 | 0.014177 | 0.284838 | 4,241 | 139 | 92 | 30.510791 | 0.743159 | 0.042679 | 0 | 0.192308 | 0 | 0 | 0.017263 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.105769 | false | 0.009615 | 0.009615 | 0.009615 | 0.211538 | 0.009615 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f61517f627009ff7f376b1194a4e917a81ddfd11 | 5,243 | py | Python | userbot/modules/misc.py | FS-Project/FeRuBoT | 54cc12243ccbeb289ed37d691698fbd42fd8f740 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 3 | 2021-01-24T20:35:11.000Z | 2021-03-10T18:16:26.000Z | userbot/modules/misc.py | FS-Project/FeRuBoT | 54cc12243ccbeb289ed37d691698fbd42fd8f740 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | userbot/modules/misc.py | FS-Project/FeRuBoT | 54cc12243ccbeb289ed37d691698fbd42fd8f740 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 1 | 2021-03-10T18:16:25.000Z | 2021-03-10T18:16:25.000Z | # INFO : ini merupakan copy source code dari repo one4ubot, dan sudah mendapatkan izin dari pemilik.
# INFO : This is a copy of the source code from the One4ubot repo, and has the permission of the owner.
# Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.d (the "License");
# you may not use this file except in compliance with the License.
#
# You can find misc modules, which dont fit in anything xD
""" Userbot module for other small commands. """
import io
import sys
from os import execl
from random import randint
from time import sleep
from userbot import BOTLOG, BOTLOG_CHATID, CMD_HELP, bot
from userbot.events import register
from userbot.utils import time_formatter
@register(outgoing=True, pattern="^.random")
async def randomise(items):
""" For .random command, get a random item from the list of items. """
itemo = (items.text[8:]).split()
if len(itemo) < 2:
await items.edit(
"`2 item atau lebih diperlukan! Periksa .help random untuk info lebih lanjut.`"
)
return
index = randint(1, len(itemo) - 1)
await items.edit(
"**Query: **\n`" + items.text[8:] + "`\n**Output: **\n`" + itemo[index] + "`"
)
@register(outgoing=True, pattern="^.sleep ([0-9]+)$")
async def sleepybot(time):
""" For .sleep command, let the userbot snooze for a few second. """
counter = int(time.pattern_match.group(1))
await time.edit("`Saya merajuk dan tertidur...`")
if BOTLOG:
str_counter = time_formatter(counter)
await time.client.send_message(
BOTLOG_CHATID,
f"Anda sudah membuat bot untuk tidur💤 {str_counter}.",
)
sleep(counter)
await time.edit("`Oke, saya sudah bangun sekarang.`")
@register(outgoing=True, pattern="^.shutdown$")
async def killbot(shut):
"""For .shutdown command, shut the bot down."""
await shut.edit("`Selamat tinggal *Suara shutdown Windows XP*....`")
if BOTLOG:
await shut.client.send_message(BOTLOG_CHATID, "#SHUTDOWN \n" "Bot sudah meninggal, hidupkan lagi ")
await bot.disconnect()
@register(outgoing=True, pattern="^.restart$")
async def killdabot(reboot):
await reboot.edit("`*saya akan kembali sebentar lagi*`")
if BOTLOG:
await reboot.client.send_message(BOTLOG_CHATID, "#RESTART \n" "Bot di nyalakan ulang")
await bot.disconnect()
# Spin a new instance of bot
execl(sys.executable, sys.executable, *sys.argv)
# Shut the existing one down
exit()
# Copyright (c) Gegham Zakaryan | 2019
@register(outgoing=True, pattern="^.repeat (.*)")
async def repeat(rep):
cnt, txt = rep.pattern_match.group(1).split(" ", 1)
replyCount = int(cnt)
toBeRepeated = txt
replyText = toBeRepeated + "\n"
for i in range(0, replyCount - 1):
replyText += toBeRepeated + "\n"
await rep.edit(replyText)
@register(outgoing=True, pattern="^.repo$")
async def repo_is_here(wannasee):
""" For .repo command, just returns the repo URL. """
await wannasee.edit(
"[🔗Sentuh ini](https://github.com/FS-Project/FeRuBoT) untuk membuka repo FeRuBoT."
)
@register(outgoing=True, pattern="^.raw$")
async def raw(rawtext):
the_real_message = None
reply_to_id = None
if rawtext.reply_to_msg_id:
previous_message = await rawtext.get_reply_message()
the_real_message = previous_message.stringify()
reply_to_id = rawtext.reply_to_msg_id
else:
the_real_message = rawtext.stringify()
reply_to_id = rawtext.message.id
with io.BytesIO(str.encode(the_real_message)) as out_file:
out_file.name = "raw_message_data.txt"
await rawtext.edit("`Periksa log userbot untuk data pesan yang didekodekan!!`")
await rawtext.client.send_file(
BOTLOG_CHATID,
out_file,
force_document=True,
allow_cache=False,
reply_to=reply_to_id,
caption="`Berikut data pesan yang diterjemahkan!!`",
)
CMD_HELP.update(
{
"random": ".random <item1> <item2> ... <itemN>\
\nPenggunaan: Dapatkan item acak dari daftar item."
}
)
CMD_HELP.update(
{
"sleep": ".sleep <seconds>\
\nPenggunaan: Userbot juga lelah. Biarkan punyamu tertidur selama beberapa detik💤."
}
)
CMD_HELP.update(
{
"shutdown": ".shutdown\
\nPenggunaan: Terkadang Anda perlu mematikan bot Anda. Terkadang Anda hanya berharap\
mendengar suara shutdown Windows XP ... tetapi Anda tidak melakukannya."
}
)
CMD_HELP.update(
{
"repo": ".repo\
\nPenggunaan: Jika Anda penasaran dengan apa yang membuat userbot bekerja, inilah yang Anda butuhkan."
}
)
CMD_HELP.update(
{
"readme": ".readme\
\nPenggunaan: Berikan tautan untuk menyiapkan bot pengguna dan modulnya."
}
)
CMD_HELP.update(
{
"repeat": ".repeat <no.> <text>\
\nPenggunaan: Mengulangi teks beberapa kali. Jangan bingung ini dengan spam."
}
)
CMD_HELP.update(
{
"restart": ".restart\
\nPenggunaan: Mulai ulang bot!!"
}
)
CMD_HELP.update(
{
"raw": ".raw\
\nPenggunaan: Dapatkan data rinci yang diformat seperti JSON tentang pesan yang dibalas."
}
)
| 29.789773 | 107 | 0.662026 | 675 | 5,243 | 5.057778 | 0.402963 | 0.018453 | 0.030463 | 0.05536 | 0.049209 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006352 | 0.21934 | 5,243 | 175 | 108 | 29.96 | 0.827022 | 0.111005 | 0 | 0.129771 | 0 | 0.007634 | 0.160463 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.061069 | 0 | 0.068702 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f617abbd37a250768e243f67f08bb7f3f81db933 | 1,675 | py | Python | util_scripts/test_zoom.py | ShuaiW/kaggle-heart | 022997f27add953c74af2b371c67d9d86cbdccc3 | [
"MIT"
] | 182 | 2016-03-15T01:51:29.000Z | 2021-04-21T09:49:05.000Z | util_scripts/test_zoom.py | weidezhang/kaggle-heart | 022997f27add953c74af2b371c67d9d86cbdccc3 | [
"MIT"
] | 1 | 2018-06-22T16:46:12.000Z | 2018-06-22T21:08:09.000Z | util_scripts/test_zoom.py | weidezhang/kaggle-heart | 022997f27add953c74af2b371c67d9d86cbdccc3 | [
"MIT"
] | 61 | 2016-03-15T00:58:28.000Z | 2020-03-06T22:00:41.000Z | import numpy as np
import matplotlib.pyplot as plt
from matplotlib import animation
from scipy.special import erf, erfinv
import cPickle as pickle
import glob
import os
import scipy
import scipy.ndimage.interpolation
#print glob.glob(os.path.expanduser("~/storage/metadata/kaggle-heart/predictions/j7_jeroen_ch.pkl"))
#predictions = pickle.load(open(glob.glob(os.path.expanduser("~/storage/metadata/kaggle-heart/predictions/j7_jeroen_ch.pkl"))[0]))["predictions"]
#scipy.ndimage.interpolation.zoom(input, zoom, output=None, order=3, mode='constant', cval=0.0, prefilter=True)
p = np.array(range(0,600), dtype='float32')
predictions = (erf( (p - 300)/50 )+1)/2
def zoom(array, zoom_factor):
result = np.ones(array.shape)
zoom = [1.0]*array.ndim
zoom[-1] = zoom_factor
zr = scipy.ndimage.interpolation.zoom(array,
zoom,
order=3,
mode='nearest',
prefilter=True)
result[...,:min(zr.shape[-1],array.shape[-1])] = zr[...,:min(zr.shape[-1],array.shape[-1])]
return result
fig = plt.figure()
mngr = plt.get_current_fig_manager()
# to put it into the upper left corner for example:
mngr.window.setGeometry(50, 100, 600, 300)
im1 = fig.gca().plot(p, predictions)
def init():
pp = predictions
im1[0].set_ydata(pp)
def animate(i):
z = float(i)/50
pp = zoom(predictions,z)
fig.suptitle("zoom %f"%z)
im1[0].set_ydata(pp)
return im1
anim = animation.FuncAnimation(fig, animate, init_func=init, frames=100, interval=50)
#anim.save('my_animation.mp4')
plt.show() | 32.211538 | 145 | 0.640597 | 233 | 1,675 | 4.549356 | 0.454936 | 0.022642 | 0.070755 | 0.026415 | 0.207547 | 0.181132 | 0.181132 | 0.139623 | 0.139623 | 0.139623 | 0 | 0.039484 | 0.213731 | 1,675 | 52 | 146 | 32.211538 | 0.765376 | 0.25791 | 0 | 0.054054 | 0 | 0 | 0.016949 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.081081 | false | 0 | 0.243243 | 0 | 0.378378 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f618ea5cffb6353292606a5b9b153b42064483ff | 3,523 | py | Python | train_liner.py | ALEXKIRNAS/Toxic-Comment-Classification-Challenge | 1f86d113f38c6c9eaca03ec33c6467709bf2d652 | [
"MIT"
] | 2 | 2018-04-07T19:52:09.000Z | 2018-04-24T11:37:58.000Z | train_liner.py | ALEXKIRNAS/Toxic-Comment-Classification-Challenge | 1f86d113f38c6c9eaca03ec33c6467709bf2d652 | [
"MIT"
] | null | null | null | train_liner.py | ALEXKIRNAS/Toxic-Comment-Classification-Challenge | 1f86d113f38c6c9eaca03ec33c6467709bf2d652 | [
"MIT"
] | null | null | null | import concurrent.futures
import click
import numpy as np
import pandas as pd
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import KFold
from models import NbSvmClassifier
from utils.constants import RANDOM_SEED, CLASS_NAMES
from utils.data_loader import tf_idf_vectors
cv_params = [
{'C': 0.7},
{'C': 0.25},
{'C': 0.27},
{'C': 0.25},
{'C': 0.25},
{'C': 0.25},
]
train_word_features, test_word_features, train, test = None, None, None, None
def training(train_indices, val_indices, class_name, params):
classifier = NbSvmClassifier(**params)
csr = train_word_features.tocsr()
X_train = csr[train_indices]
y_train = np.array(train[class_name])[train_indices]
X_test = csr[val_indices]
y_test = np.array(train[class_name])[val_indices]
classifier.fit(X_train, y_train)
train_proba = classifier.predict_proba(X_train)[:, 1]
val_proba = classifier.predict_proba(X_test)[:, 1]
sub_proba = classifier.predict_proba(test_word_features)[:, 1]
train_score = roc_auc_score(y_train, train_proba)
val_score = roc_auc_score(y_test, val_proba)
return train_score, val_score, val_proba, sub_proba, val_indices
@click.command()
@click.option('--train_df_path', default='./input/train.csv')
@click.option('--test_df_path', default='./input/test.csv')
@click.option('--stamp', default='lr')
@click.option('--preprocess', default=False)
def main(train_df_path, test_df_path, stamp, preprocess):
global train_word_features, test_word_features, train, test
train = pd.read_csv(train_df_path).fillna(' ')
test = pd.read_csv(test_df_path).fillna(' ')
print('Create features')
train_word_features, test_word_features = tf_idf_vectors(train, test, preprocess)
print('Start training')
submission = pd.DataFrame.from_dict({'id': test['id']})
train_submission = pd.DataFrame.from_dict({'id': train['id']})
scores = []
for i, class_name in enumerate(CLASS_NAMES):
print('Class: %s' % class_name)
sub_probas = np.zeros(shape=(len(test),))
train_probas = np.zeros(shape=(len(train),))
kf = KFold(n_splits=5, shuffle=True, random_state=RANDOM_SEED)
train_scores, val_scores = [], []
with concurrent.futures.ProcessPoolExecutor(max_workers=5) as executor:
futures = (executor.submit(training,
train_indices,
val_indices,
class_name,
cv_params[i])
for train_indices, val_indices in kf.split(train))
for future in concurrent.futures.as_completed(futures):
train_score, val_score, val_proba, sub_proba, val_indices = future.result()
train_scores.append(train_score)
val_scores.append(val_score)
train_probas[val_indices] += val_proba
sub_probas += sub_proba / 5.
scores.append(np.mean(val_scores))
print('\tTrain ROC-AUC: %s' % np.mean(train_scores))
print('\tVal ROC-AUC: %s' % np.mean(val_scores))
submission[class_name] = sub_probas
train_submission[class_name] = train_probas
submission.to_csv('%s.csv' % stamp, index=False)
train_submission.to_csv('%s.csv' % stamp, index=False)
print('Total: %s' % np.mean(scores))
if __name__ == '__main__':
main() | 33.552381 | 91 | 0.640931 | 460 | 3,523 | 4.619565 | 0.254348 | 0.045176 | 0.007529 | 0.007059 | 0.296941 | 0.2 | 0.149647 | 0.112941 | 0.041412 | 0.041412 | 0 | 0.008547 | 0.236162 | 3,523 | 105 | 92 | 33.552381 | 0.781122 | 0 | 0 | 0.052632 | 0 | 0 | 0.057321 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.026316 | false | 0 | 0.118421 | 0 | 0.157895 | 0.078947 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f61be4718911e196ba9094bf4af6f045bc2cf0e9 | 4,527 | py | Python | tools/python/bluebottle_flow_reader.py | GediZhou/bluebottle-3.0 | 645e6cbe257ad4f65456652a50d49e5f3059564f | [
"Apache-2.0"
] | 11 | 2018-02-20T15:58:07.000Z | 2021-12-27T09:02:30.000Z | tools/python/bluebottle_flow_reader.py | groundcherry/bluebottle-2.0 | 7adc8782ad269f06ab0edb0111500907757bea50 | [
"Apache-2.0"
] | 3 | 2018-12-11T13:44:45.000Z | 2021-03-10T15:13:38.000Z | tools/python/bluebottle_flow_reader.py | groundcherry/bluebottle-2.0 | 7adc8782ad269f06ab0edb0111500907757bea50 | [
"Apache-2.0"
] | 6 | 2018-09-21T11:36:22.000Z | 2021-03-13T09:15:35.000Z | ################################################################################
################################## BLUEBOTTLE ##################################
################################################################################
#
# Copyright 2012 - 2018 Adam Sierakowski and Daniel Willen,
# The Johns Hopkins University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Please contact the Johns Hopkins University to use Bluebottle for
# commercial and/or for-profit applications.
################################################################################
import sys, os, glob
import h5py as h5
import numpy
# Initialize the reader by passing the directory containing the CGNS files. This
# returns a list containing the rounded time values available for reading.
def init(sys):
# Parse command line args
if len(sys.argv) == 2:
basedir = sys.argv[1]
else:
print("Usage: requires ./path/to/some/output as command-line argument")
sys.exit()
if not basedir.endswith("/"):
basedir = basedir + "/"
global base
base = basedir
t_read = list()
files = glob.glob(base + "/flow-*.cgns")
if(len(files) == 0):
print("cannot find any flow-*.cgns files in", base)
sys.exit()
else:
for i in files:
start = i.find("flow-") # XXX breaks on dirs with "flow-" in name
t_read.append(i[start+5:-5])
return (sorted(t_read, key=float), basedir)
# Open a particular CGNS file using a time value in the list returned by init().
def open(time):
global f
infile = base + "/flow-" + time + ".cgns"
try:
f = h5.File(infile, 'r')
return f
except OSError:
f = None
print("file", infile, "does not exist")
return f
def close():
f.close()
# Read the time.
def read_time():
t1 = f["/Base/Zone0/Etc/Time/ data"][0]
try:
t2 = g["/Base/Zone0/Etc/Time/ data"][0]
except NameError:
return t1
else:
return (t1,t2)
# Read flow parameters
def read_flow_params():
rho_f = numpy.array(f["/Base/Zone0/Etc/Density/ data"])
nu = numpy.array(f["/Base/Zone0/Etc/KinematicViscosity/ data"])
return (rho_f, nu)
# Read grid extents
def read_flow_extents(basedir):
infile = basedir + "/grid.cgns"
try:
gr = h5.File(infile, 'r')
except OSError:
gr = None
print("file", infile, "does not exist")
sys.exit()
Nxyz = numpy.array(gr["/Base/Zone0/ data"])
Nx = Nxyz[1, 2]
Ny = Nxyz[1, 1]
Nz = Nxyz[1, 0]
# These are output as x[k,j,i]
x = numpy.array(gr["/Base/Zone0/GridCoordinates/CoordinateX/ data"])
y = numpy.array(gr["/Base/Zone0/GridCoordinates/CoordinateY/ data"])
z = numpy.array(gr["/Base/Zone0/GridCoordinates/CoordinateZ/ data"])
xs = numpy.min(x)
xe = numpy.max(x)
xl = xe - xs
ys = numpy.min(y)
ye = numpy.max(y)
yl = ye - ys
zs = numpy.min(z)
ze = numpy.max(z)
zl = ze - zs
return (Nx, Ny, Nz, xs, xe, xl, ys, ye, yl, zs, ze, zl)
# Read the flow positions.
def read_flow_position():
# Open grid file
#global gr
infile = base + "/grid.cgns"
try:
gr = h5.File(infile, 'r')
except OSError:
gr = None
print("file", infile, "does not exist")
sys.exit()
# These are output as x[k,j,i]
x = numpy.array(gr["/Base/Zone0/GridCoordinates/CoordinateX/ data"])
y = numpy.array(gr["/Base/Zone0/GridCoordinates/CoordinateY/ data"])
z = numpy.array(gr["/Base/Zone0/GridCoordinates/CoordinateZ/ data"])
x = x[0,0,:]
y = y[0,:,0]
z = z[:,0,0]
return (x,y,z)
# Read the particle velocities.
def read_flow_velocity():
u1 = numpy.array(f["/Base/Zone0/Solution/VelocityX/ data"])
v1 = numpy.array(f["/Base/Zone0/Solution/VelocityY/ data"])
w1 = numpy.array(f["/Base/Zone0/Solution/VelocityZ/ data"])
try:
u2 = numpy.array(g["/Base/Zone0/Solution/VelocityX/ data"])
v2 = numpy.array(g["/Base/Zone0/Solution/VelocityY/ data"])
w2 = numpy.array(g["/Base/Zone0/Solution/VelocityZ/ data"])
except NameError:
return (u1,v1,w1)
else:
return ((u1,v1,w1),(u2,v2,w2))
| 28.118012 | 80 | 0.608571 | 646 | 4,527 | 4.243034 | 0.321981 | 0.055819 | 0.030646 | 0.040861 | 0.326888 | 0.29077 | 0.197373 | 0.186063 | 0.186063 | 0.186063 | 0 | 0.019891 | 0.189309 | 4,527 | 160 | 81 | 28.29375 | 0.726975 | 0.276342 | 0 | 0.333333 | 0 | 0 | 0.282453 | 0.186712 | 0 | 0 | 0 | 0 | 0 | 1 | 0.080808 | false | 0 | 0.030303 | 0 | 0.212121 | 0.050505 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f61d162134f520eaed2779c8b47fde1648352f14 | 954 | py | Python | exerc_25.py | marcelocmedeiros/EstruturaDeRepeticao | 23e917377e41083901a4ffbf3e0fda31e3a78982 | [
"MIT"
] | null | null | null | exerc_25.py | marcelocmedeiros/EstruturaDeRepeticao | 23e917377e41083901a4ffbf3e0fda31e3a78982 | [
"MIT"
] | null | null | null | exerc_25.py | marcelocmedeiros/EstruturaDeRepeticao | 23e917377e41083901a4ffbf3e0fda31e3a78982 | [
"MIT"
] | null | null | null | # Marcelo Campos de Medeiros
# ADS UNIFIP
# Estrutura de Repetição
# 25/03/2020
'''
25 -Faça um programa que peça para n pessoas a sua idade,
ao final o programa devera verificar se a média de idade da turma
varia entre 0 e 25,26 e 60 e maior que 60; e então, dizer se a turma é jovem,
adulta ou idosa, conforme a média calculada.
'''
print('=' * 40)
print('{:=^40}'.format(" 'MÉDIA DE IDADE DA TURMA' "))
print('=' * 40, '\n')
# entrada de variável
n = int(input('Quantas pessoas tem na turma: '))
soma = 0
# laço
for c in range(1, n + 1):
idade = int(input(f'Informe a idade da {c}° pessoa: '))
soma += idade
media = soma / n
#
if media >= 0 and media <= 25:
print(f'A média das idades é {media:.2f}, por tanto esssa turma é jovem!')
elif media >= 26 and media <= 60:
print(f'A média das idades é {media},por tanto esssa turma é adulta!')
elif media > 60:
print(f'A média das idades é {media},por tanto esssa turma é idosa!')
| 27.257143 | 78 | 0.659329 | 172 | 954 | 3.662791 | 0.453488 | 0.047619 | 0.033333 | 0.057143 | 0.301587 | 0.211111 | 0.211111 | 0.211111 | 0.168254 | 0.168254 | 0 | 0.050599 | 0.212788 | 954 | 34 | 79 | 28.058824 | 0.786951 | 0.360587 | 0 | 0 | 0 | 0 | 0.477234 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.4 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f61dc2bab26e07654cfc84f3ad968a59e5a75412 | 4,089 | py | Python | appengine/main.py | luiscielak/bl.ocks.org | c971a001fa9c1c65a64b641937b88d078376e77d | [
"BSD-3-Clause"
] | 6 | 2016-09-05T17:22:01.000Z | 2021-11-16T13:44:07.000Z | appengine/main.py | luiscielak/bl.ocks.org | c971a001fa9c1c65a64b641937b88d078376e77d | [
"BSD-3-Clause"
] | null | null | null | appengine/main.py | luiscielak/bl.ocks.org | c971a001fa9c1c65a64b641937b88d078376e77d | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
import wsgiref.handlers
import yaml
import re
from cgi import escape
from urllib import quote
from markdown import markdown
from google.appengine.ext import webapp
from google.appengine.api.urlfetch import fetch
from google.appengine.dist import use_library
use_library('django', '1.1')
from django.utils.encoding import smart_unicode
class GistRedirectHandler(webapp.RequestHandler):
def get(self, id):
self.redirect('/%s' % id)
class GistViewHandler(webapp.RequestHandler):
def get(self, id):
raw = fetch('http://gist.github.com/api/v1/yaml/%s' % id)
meta = yaml.load(raw.content)['gists'][0]
owner = meta[':owner'] or ""
description = meta[':description'] or ""
files = meta[':files'] or []
time = meta[':created_at']
title = "%s - %s" % (id, escape(description)) if description else id
self.response.out.write("""
<!DOCTYPE html>
<html>
<head>
<title>bl.ocks.org - %s</title>
<style type="text/css">
@import url("/style.css");
</style>
</head>
<body>
<div class="body">
<a href="/" class="about right">What’s all this then?</a>
<h1>block <a href="http://gist.github.com/%s">#%s</a></h1>
<h2>
<span class="description">%s</span>
by <a href="http://github.com/%s" class="owner">%s</a>
</h2>
<iframe marginwidth="0" marginheight="0" scrolling="no" src=\"/d/%s/\"></iframe>
<div class="readme">
""" % (title, id, id, escape(description), quote(owner), escape(owner), id))
# display the README
for f in files:
if re.match("^readme\.(md|mkd|markdown)$", f, re.I):
html = markdown(smart_unicode(fetch('http://gist.github.com/raw/%s/%s' % (id, quote(f))).content))
elif re.match("^readme(\.txt)?$", f, re.I):
html = "<pre>%s</pre>" % escape(fetch('http://gist.github.com/raw/%s/%s' % (id, quote(f))).content)
else:
html = None
if html:
self.response.out.write(html)
# display the creation time
if time:
self.response.out.write("<p class=\"time\">Created at %s.</p>" % time)
self.response.out.write("</div>")
# display the other files as source
for f in files:
if not re.match("^readme(\.[a-z]+)?$", f, re.I):
self.response.out.write('<script src="http://gist.github.com/%s.js?file=%s"></script>' % (id, f))
self.response.out.write("""
<a href="/" class="about">about bl.ocks.org</a>
</div>
</body>
</html>
""")
class GistDataHandler(webapp.RequestHandler):
def get(self, id, file):
if not file:
file = 'index.html'
raw = fetch('http://gist.github.com/raw/%s/%s' % (id, quote(file)))
if re.search("\.css$", file):
self.response.headers["Content-Type"] = "text/css"
elif re.search("\.js$", file):
self.response.headers["Content-Type"] = "text/javascript"
elif re.search("\.json$", file):
self.response.headers["Access-Control-Allow-Origin"] = "*"
self.response.headers["Content-Type"] = "application/json"
elif re.search("\.txt$", file):
self.response.headers["Content-Type"] = "text/plain"
self.response.out.write(raw.content)
def main():
application = webapp.WSGIApplication([
('/([0-9]+)', GistViewHandler),
('/([0-9]+)/', GistRedirectHandler),
('/d/([0-9]+)/(.*)', GistDataHandler)
], debug=True)
wsgiref.handlers.CGIHandler().run(application)
if __name__ == '__main__':
main()
| 32.452381 | 107 | 0.63414 | 569 | 4,089 | 4.530756 | 0.340949 | 0.055857 | 0.040729 | 0.054306 | 0.192397 | 0.138092 | 0.089992 | 0.045772 | 0.045772 | 0.045772 | 0 | 0.007162 | 0.180484 | 4,089 | 125 | 108 | 32.712 | 0.762161 | 0.158474 | 0 | 0.066667 | 0 | 0.044444 | 0.361777 | 0.037697 | 0 | 0 | 0 | 0 | 0 | 1 | 0.044444 | false | 0 | 0.133333 | 0 | 0.211111 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f61f02c078332b8fc380bf074e7157b43f51dde5 | 831 | py | Python | junk.py | blue-army/azuretools-docker | 255a17b0adf25ccaf9797079fd59c98b3fc48095 | [
"MIT"
] | null | null | null | junk.py | blue-army/azuretools-docker | 255a17b0adf25ccaf9797079fd59c98b3fc48095 | [
"MIT"
] | 1 | 2021-06-01T22:07:59.000Z | 2021-06-01T22:07:59.000Z | junk.py | blue-army/azuretools-docker | 255a17b0adf25ccaf9797079fd59c98b3fc48095 | [
"MIT"
] | null | null | null | import os, uuid, sys
from azure.storage.blob import BlockBlobService, PublicAccess
def run_sample():
try:
# Create the BlockBlockService that is used to call the Blob service for the storage account
block_blob_service = BlockBlobService(account_name='planck', account_key='dXskxcS8enXEWXbk2K4dAfh5ktJkF/LHx9er5I2UdW44jKqT/AYWBqI7M2IzkoDUCvmbzHRWdV3nCXUmUn1WPQ==')
# Create a container called 'quickstartblobs'.
container_name ='nutanix-pipeline'
# List the blobs in the container
print("\nList blobs in the container")
generator = block_blob_service.list_blobs(container_name)
for blob in generator:
print("\t Blob name: " + blob.name)
except Exception as e:
print(e)
# Main method.
if __name__ == '__main__':
run_sample() | 36.130435 | 172 | 0.700361 | 94 | 831 | 5.989362 | 0.553191 | 0.058615 | 0.056838 | 0.067496 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.020155 | 0.223827 | 831 | 23 | 173 | 36.130435 | 0.852713 | 0.216607 | 0 | 0 | 0 | 0 | 0.248841 | 0.136012 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0.142857 | 0 | 0.214286 | 0.214286 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f621c4152cb5134441f2e29cffa3dab4475d0a8b | 2,078 | py | Python | examples/compare_methods_kld_air_quality.py | hsivan/automon | 222b17651533bdb2abce7de36a80156ab7b9cc21 | [
"BSD-3-Clause"
] | 1 | 2022-02-25T17:50:32.000Z | 2022-02-25T17:50:32.000Z | examples/compare_methods_kld_air_quality.py | hsivan/automon | 222b17651533bdb2abce7de36a80156ab7b9cc21 | [
"BSD-3-Clause"
] | null | null | null | examples/compare_methods_kld_air_quality.py | hsivan/automon | 222b17651533bdb2abce7de36a80156ab7b9cc21 | [
"BSD-3-Clause"
] | 1 | 2022-03-12T08:12:37.000Z | 2022-03-12T08:12:37.000Z | from automon import AutomonNode, AutomonCoordinator, RlvNode, RlvCoordinator
from test_utils.functions_to_monitor import func_kld
from test_utils.tune_neighborhood_size import tune_neighborhood_size
from test_utils.data_generator import DataGeneratorKldAirQuality
from test_utils.test_utils import start_test, end_test, run_test, write_config_to_file, read_config_file
from test_utils.stats_analysis_utils import plot_monitoring_stats
import logging
from test_utils.object_factory import get_objects
if __name__ == "__main__":
try:
test_folder = start_test("compare_methods_kld_air_quality")
'''conf = get_config(num_nodes=12, num_iterations=30000, sliding_window_size=200, d=20, error_bound=0.1,
slack_type=SlackType.Drift.value, sync_type=SyncType.LazyLRU.value, domain=(0, 1),
neighborhood_size=1.0, num_iterations_for_tuning=300)'''
data_folder = '../datasets/air_quality/'
conf = read_config_file(data_folder)
write_config_to_file(test_folder, conf)
data_generator = DataGeneratorKldAirQuality(num_iterations=conf["num_iterations"], num_nodes=conf["num_nodes"],
d=conf["d"], test_folder=test_folder, num_iterations_for_tuning=conf["num_iterations_for_tuning"], sliding_window_size=conf["sliding_window_size"])
logging.info("\n###################### Start KLD RLV test ######################")
data_generator.reset()
coordinator, nodes = get_objects(RlvNode, RlvCoordinator, conf, func_kld)
run_test(data_generator, coordinator, nodes, test_folder)
logging.info("\n###################### Start KLD AutoMon test ######################")
data_generator.reset()
coordinator, nodes = get_objects(AutomonNode, AutomonCoordinator, conf, func_kld)
tune_neighborhood_size(coordinator, nodes, conf, data_generator)
run_test(data_generator, coordinator, nodes, test_folder)
plot_monitoring_stats(test_folder)
finally:
end_test()
| 53.282051 | 199 | 0.701155 | 250 | 2,078 | 5.428 | 0.316 | 0.046426 | 0.05748 | 0.048637 | 0.168018 | 0.138541 | 0.138541 | 0.138541 | 0 | 0 | 0 | 0.01231 | 0.179018 | 2,078 | 38 | 200 | 54.684211 | 0.783118 | 0 | 0 | 0.142857 | 0 | 0 | 0.149832 | 0.096521 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.285714 | 0 | 0.285714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f622c63183c5ab98481217bec0358ab5279f4cbd | 643 | py | Python | beecrowd-1007.py | jessicabessaoliveira/Python | c4732f5e9528a40721b7c16364e6310e7ed8d490 | [
"MIT"
] | null | null | null | beecrowd-1007.py | jessicabessaoliveira/Python | c4732f5e9528a40721b7c16364e6310e7ed8d490 | [
"MIT"
] | null | null | null | beecrowd-1007.py | jessicabessaoliveira/Python | c4732f5e9528a40721b7c16364e6310e7ed8d490 | [
"MIT"
] | null | null | null | # https://www.beecrowd.com.br/judge/pt/problems/view/1007
'''
Leia quatro valores inteiros A, B, C e D. A seguir, calcule e mostre a diferença do produto de A e B pelo produto de C e D segundo a fórmula: DIFERENCA = (A * B - C * D).
Entrada
O arquivo de entrada contém 4 valores inteiros.
Saída
Imprima a mensagem DIFERENCA com todas as letras maiúsculas, conforme exemplo abaixo, com um espaço em branco antes e depois da igualdade.
'''
a = int(input())
b = int(input())
c = int(input())
d = int(input())
diferenca = a*b - c*d
print('DIFERENCA = {}'.format(diferenca))
'''
Usando f-string PYTHON 3.6
print(f'DIFERENCA = {diferenca}')
'''
| 27.956522 | 170 | 0.7014 | 109 | 643 | 4.137615 | 0.577982 | 0.070953 | 0.019956 | 0.053215 | 0.05765 | 0 | 0 | 0 | 0 | 0 | 0 | 0.013109 | 0.169518 | 643 | 22 | 171 | 29.227273 | 0.831461 | 0.66874 | 0 | 0 | 0 | 0 | 0.10219 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.166667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f62329758cd5399f4ed0ac810b952ed937df5568 | 15,450 | py | Python | earthquake/generator.py | viktorsapozhok/earthquake-prediction | aca49d2c7e25deb385f98ef030c904ebae96135e | [
"MIT"
] | 8 | 2019-06-20T13:31:42.000Z | 2020-05-01T21:53:20.000Z | earthquake/generator.py | viktorsapozhok/earthquake-prediction | aca49d2c7e25deb385f98ef030c904ebae96135e | [
"MIT"
] | null | null | null | earthquake/generator.py | viktorsapozhok/earthquake-prediction | aca49d2c7e25deb385f98ef030c904ebae96135e | [
"MIT"
] | 1 | 2019-06-05T19:15:14.000Z | 2019-06-05T19:15:14.000Z | import argparse
from itertools import product
import warnings
from joblib import Parallel, delayed
import librosa
import numpy as np
import pandas as pd
from scipy import signal, stats
from sklearn.linear_model import LinearRegression
from tqdm import tqdm
from tsfresh.feature_extraction import feature_calculators
from earthquake import config
warnings.filterwarnings("ignore")
class FeatureGenerator(object):
"""Feature engineering.
"""
def __init__(
self,
path_to_store,
is_train=True,
n_rows=1e6,
n_jobs=1,
segment_size=150000
):
"""Decomposition of initial signal into the set of features.
Args:
path_to_store:
Path to .hdf store with original signal data.
is_train:
True, if creating the training set.
n_rows:
Amount of rows in training store.
n_jobs:
Amount of parallel jobs.
segment_size:
Amount of observations in each segment
"""
self.path_to_store = path_to_store
self.n_rows = n_rows
self.n_jobs = n_jobs
self.segment_size = segment_size
self.is_train = is_train
if self.is_train:
self.total = int(self.n_rows / self.segment_size)
self.store = None
self.keys = None
else:
self.store = pd.HDFStore(self.path_to_store, mode='r')
self.keys = self.store.keys()
self.total = len(self.keys)
def __del__(self):
if self.store is not None:
self.store.close()
def segments(self):
"""Returns generator object to iterate over segments.
"""
if self.is_train:
for i in range(self.total):
start = i * self.segment_size
stop = (i + 1) * self.segment_size
# read one segment of data from .hdf store
data = pd.read_hdf(self.path_to_store, start=start, stop=stop)
x = data['acoustic_data'].values
y = data['time_to_failure'].values[-1]
seg_id = 'train_' + str(i)
del data
yield seg_id, x, y
else:
for key in self.keys:
seg_id = key[1:]
x = self.store[key]['acoustic_data'].values
yield seg_id, x, -999
def get_features(self, x, y, seg_id):
x = pd.Series(x)
# fast fourier transform
zc = np.fft.fft(x)
# real part
realFFT = pd.Series(np.real(zc))
# imaginary part
imagFFT = pd.Series(np.imag(zc))
main_dict = self.features(x, y, seg_id)
r_dict = self.features(realFFT, y, seg_id)
i_dict = self.features(imagFFT, y, seg_id)
for k, v in r_dict.items():
if k not in ['target', 'seg_id']:
main_dict[f'fftr_{k}'] = v
for k, v in i_dict.items():
if k not in ['target', 'seg_id']:
main_dict[f'ffti_{k}'] = v
return main_dict
def features(self, x, y, seg_id):
feature_dict = dict()
feature_dict['target'] = y
feature_dict['seg_id'] = seg_id
# lists with parameters to iterate over them
percentiles = [
1, 5, 10, 20, 25, 30, 40, 50, 60, 70, 75, 80, 90, 95, 99]
hann_windows = [
50, 150, 1500, 15000]
spans = [
300, 3000, 30000, 50000]
windows = [
10, 50, 100, 500, 1000, 10000]
borders = list(range(-4000, 4001, 1000))
peaks = [
10, 20, 50, 100]
coefs = [
1, 5, 10, 50, 100]
autocorr_lags = [
5, 10, 50, 100, 500, 1000, 5000, 10000]
# basic stats
feature_dict['mean'] = x.mean()
feature_dict['std'] = x.std()
feature_dict['max'] = x.max()
feature_dict['min'] = x.min()
# basic stats on absolute values
feature_dict['mean_change_abs'] = np.mean(np.diff(x))
feature_dict['abs_max'] = np.abs(x).max()
feature_dict['abs_mean'] = np.abs(x).mean()
feature_dict['abs_std'] = np.abs(x).std()
# geometric and harmonic means
feature_dict['hmean'] = stats.hmean(np.abs(x[np.nonzero(x)[0]]))
feature_dict['gmean'] = stats.gmean(np.abs(x[np.nonzero(x)[0]]))
# k-statistic and moments
for i in range(1, 5):
feature_dict[f'kstat_{i}'] = stats.kstat(x, i)
feature_dict[f'moment_{i}'] = stats.moment(x, i)
for i in [1, 2]:
feature_dict[f'kstatvar_{i}'] = stats.kstatvar(x, i)
# aggregations on various slices of data
for agg_type, slice_length, direction in product(
['std', 'min', 'max', 'mean'],
[1000, 10000, 50000],
['first', 'last']):
if direction == 'first':
feature_dict[f'{agg_type}_{direction}_{slice_length}'] = \
x[:slice_length].agg(agg_type)
elif direction == 'last':
feature_dict[f'{agg_type}_{direction}_{slice_length}'] = \
x[-slice_length:].agg(agg_type)
feature_dict['max_to_min'] = x.max() / np.abs(x.min())
feature_dict['max_to_min_diff'] = x.max() - np.abs(x.min())
feature_dict['count_big'] = len(x[np.abs(x) > 500])
feature_dict['sum'] = x.sum()
feature_dict['mean_change_rate'] = self.calc_change_rate(x)
# calc_change_rate on slices of data
for slice_length, direction in product(
[1000, 10000, 50000], ['first', 'last']):
if direction == 'first':
feature_dict[f'mean_change_rate_{direction}_{slice_length}'] = \
self.calc_change_rate(x[:slice_length])
elif direction == 'last':
feature_dict[f'mean_change_rate_{direction}_{slice_length}'] = \
self.calc_change_rate(x[-slice_length:])
# percentiles on original and absolute values
for p in percentiles:
feature_dict[f'percentile_{p}'] = np.percentile(x, p)
feature_dict[f'abs_percentile_{p}'] = np.percentile(np.abs(x), p)
feature_dict['trend'] = self.add_trend_feature(x)
feature_dict['abs_trend'] = self.add_trend_feature(x, abs_values=True)
feature_dict['mad'] = x.mad()
feature_dict['kurt'] = x.kurtosis()
feature_dict['skew'] = x.skew()
feature_dict['med'] = x.median()
feature_dict['Hilbert_mean'] = np.abs(signal.hilbert(x)).mean()
for hw in hann_windows:
feature_dict[f'Hann_window_mean_{hw}'] = \
(signal.convolve(x, signal.hann(hw), mode='same') / sum(signal.hann(hw))).mean()
feature_dict['classic_sta_lta1_mean'] = \
self.classic_sta_lta(x, 500, 10000).mean()
feature_dict['classic_sta_lta2_mean'] = \
self.classic_sta_lta(x, 5000, 100000).mean()
feature_dict['classic_sta_lta3_mean'] = \
self.classic_sta_lta(x, 3333, 6666).mean()
feature_dict['classic_sta_lta4_mean'] = \
self.classic_sta_lta(x, 10000, 25000).mean()
feature_dict['classic_sta_lta5_mean'] = \
self.classic_sta_lta(x, 50, 1000).mean()
feature_dict['classic_sta_lta6_mean'] = \
self.classic_sta_lta(x, 100, 5000).mean()
feature_dict['classic_sta_lta7_mean'] = \
self.classic_sta_lta(x, 333, 666).mean()
feature_dict['classic_sta_lta8_mean'] = \
self.classic_sta_lta(x, 4000, 10000).mean()
# exponential rolling statistics
ewma = pd.Series.ewm
for s in spans:
feature_dict[f'exp_Moving_average_{s}_mean'] = \
(ewma(x, span=s).mean(skipna=True)).mean(skipna=True)
feature_dict[f'exp_Moving_average_{s}_std'] = \
(ewma(x, span=s).mean(skipna=True)).std(skipna=True)
feature_dict[f'exp_Moving_std_{s}_mean'] = \
(ewma(x, span=s).std(skipna=True)).mean(skipna=True)
feature_dict[f'exp_Moving_std_{s}_std'] = \
(ewma(x, span=s).std(skipna=True)).std(skipna=True)
feature_dict['iqr'] = np.subtract(*np.percentile(x, [75, 25]))
feature_dict['iqr1'] = np.subtract(*np.percentile(x, [95, 5]))
feature_dict['ave10'] = stats.trim_mean(x, 0.1)
for slice_length, threshold in product(
[50000, 100000, 150000], [5, 10, 20, 50, 100]):
feature_dict[f'count_big_{slice_length}_threshold_{threshold}'] = \
(np.abs(x[-slice_length:]) > threshold).sum()
feature_dict[f'count_big_{slice_length}_less_threshold_{threshold}'] = \
(np.abs(x[-slice_length:]) < threshold).sum()
feature_dict['range_minf_m4000'] = \
feature_calculators.range_count(x, -np.inf, -4000)
feature_dict['range_p4000_pinf'] = \
feature_calculators.range_count(x, 4000, np.inf)
for i, j in zip(borders, borders[1:]):
feature_dict[f'range_{i}_{j}'] = feature_calculators.range_count(x, i, j)
for autocorr_lag in autocorr_lags:
feature_dict[f'autocorrelation_{autocorr_lag}'] = \
feature_calculators.autocorrelation(x, autocorr_lag)
feature_dict[f'c3_{autocorr_lag}'] = \
feature_calculators.c3(x, autocorr_lag)
for p in percentiles:
feature_dict[f'binned_entropy_{p}'] = \
feature_calculators.binned_entropy(x, p)
feature_dict['num_crossing_0'] = \
feature_calculators.number_crossing_m(x, 0)
for peak in peaks:
feature_dict[f'num_peaks_{peak}'] = feature_calculators.number_peaks(x, peak)
for c in coefs:
feature_dict[f'spkt_welch_density_{c}'] = \
list(feature_calculators.spkt_welch_density(x, [{'coeff': c}]))[0][1]
feature_dict[f'time_rev_asym_stat_{c}'] = \
feature_calculators.time_reversal_asymmetry_statistic(x, c)
for w in windows:
x_roll_std = x.rolling(w).std().dropna().values
x_roll_mean = x.rolling(w).mean().dropna().values
feature_dict[f'ave_roll_std_{w}'] = x_roll_std.mean()
feature_dict[f'std_roll_std_{w}'] = x_roll_std.std()
feature_dict[f'max_roll_std_{w}'] = x_roll_std.max()
feature_dict[f'min_roll_std_{w}'] = x_roll_std.min()
for p in percentiles:
feature_dict[f'percentile_roll_std_{p}_window_{w}'] = \
np.percentile(x_roll_std, p)
feature_dict[f'av_change_abs_roll_std_{w}'] = \
np.mean(np.diff(x_roll_std))
feature_dict[f'av_change_rate_roll_std_{w}'] = \
np.mean(np.nonzero((np.diff(x_roll_std) / x_roll_std[:-1]))[0])
feature_dict[f'abs_max_roll_std_{w}'] = \
np.abs(x_roll_std).max()
feature_dict[f'ave_roll_mean_{w}'] = x_roll_mean.mean()
feature_dict[f'std_roll_mean_{w}'] = x_roll_mean.std()
feature_dict[f'max_roll_mean_{w}'] = x_roll_mean.max()
feature_dict[f'min_roll_mean_{w}'] = x_roll_mean.min()
for p in percentiles:
feature_dict[f'percentile_roll_mean_{p}_window_{w}'] = \
np.percentile(x_roll_mean, p)
feature_dict[f'av_change_abs_roll_mean_{w}'] = \
np.mean(np.diff(x_roll_mean))
feature_dict[f'av_change_rate_roll_mean_{w}'] = \
np.mean(np.nonzero((np.diff(x_roll_mean) / x_roll_mean[:-1]))[0])
feature_dict[f'abs_max_roll_mean_{w}'] = \
np.abs(x_roll_mean).max()
# Mel-frequency cepstral coefficients (MFCCs)
x = x.values.astype('float32')
mfcc = librosa.feature.mfcc(y=x)
for i in range(len(mfcc)):
feature_dict[f'mfcc_{i}_avg'] = np.mean(np.abs(mfcc[i]))
# spectral features
feature_dict['spectral_centroid'] = \
np.mean(np.abs(librosa.feature.spectral_centroid(y=x)[0]))
feature_dict['zero_crossing_rate'] = \
np.mean(np.abs(librosa.feature.zero_crossing_rate(y=x)[0]))
feature_dict['spectral_flatness'] = \
np.mean(np.abs(librosa.feature.spectral_flatness(y=x)[0]))
feature_dict['spectral_contrast'] = \
np.mean(np.abs(librosa.feature.spectral_contrast(S=np.abs(librosa.stft(x)))[0]))
feature_dict['spectral_bandwidth'] = \
np.mean(np.abs(librosa.feature.spectral_bandwidth(y=x)[0]))
return feature_dict
def generate(self):
feature_list = []
res = Parallel(n_jobs=self.n_jobs, backend='threading')(
delayed(self.get_features)(x, y, s)
for s, x, y in tqdm(self.segments(),
total=self.total,
ncols=100,
desc='generating features',
ascii=True))
for r in res:
feature_list.append(r)
return pd.DataFrame(feature_list)
@staticmethod
def add_trend_feature(arr, abs_values=False):
idx = np.array(range(len(arr)))
if abs_values:
arr = np.abs(arr)
lr = LinearRegression()
lr.fit(idx.reshape(-1, 1), arr)
return lr.coef_[0]
@staticmethod
def classic_sta_lta(x, length_sta, length_lta):
sta = np.cumsum(x ** 2)
# Convert to float
sta = np.require(sta, dtype=np.float)
# Copy for LTA
lta = sta.copy()
# Compute the STA and the LTA
sta[length_sta:] = sta[length_sta:] - sta[:-length_sta]
sta /= length_sta
lta[length_lta:] = lta[length_lta:] - lta[:-length_lta]
lta /= length_lta
# Pad zeros
sta[:length_lta - 1] = 0
# Avoid division by zero by setting zero values to tiny float
dtiny = np.finfo(0.0).tiny
idx = lta < dtiny
lta[idx] = dtiny
return sta / lta
@staticmethod
def calc_change_rate(x):
change = (np.diff(x) / x[:-1]).values
change = change[np.nonzero(change)[0]]
change = change[~np.isnan(change)]
change = change[change != -np.inf]
change = change[change != np.inf]
return np.mean(change)
def main(args):
if args['train']:
fg = FeatureGenerator(
config.path_to_train_store,
is_train=True, n_rows=config.n_rows_all,
n_jobs=config.n_jobs, segment_size=config.segment_size)
else:
fg = FeatureGenerator(
config.path_to_test_store,
is_train=False, n_jobs=config.n_jobs)
data = fg.generate()
data.to_csv(config.path_to_test, index=False, float_format='%.5f')
if __name__ == '__main__':
arg_parser = argparse.ArgumentParser(
description='features generator',
formatter_class=argparse.RawTextHelpFormatter)
arg_parser.add_argument(
'--train', action='store_true',
help='make train set')
arg_parser.add_argument(
'--test', action='store_true',
help='make test set')
main(vars(arg_parser.parse_args()))
| 36.438679 | 96 | 0.569968 | 2,021 | 15,450 | 4.095992 | 0.180604 | 0.11295 | 0.057985 | 0.015221 | 0.376419 | 0.282194 | 0.202464 | 0.133366 | 0.110292 | 0.087461 | 0 | 0.031812 | 0.302136 | 15,450 | 423 | 97 | 36.524823 | 0.735949 | 0.065566 | 0 | 0.084967 | 0 | 0 | 0.11877 | 0.057043 | 0 | 0 | 0 | 0 | 0 | 1 | 0.03268 | false | 0 | 0.039216 | 0 | 0.094771 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f6262ab64348d90863c6a1288397d28b818c68b1 | 6,611 | py | Python | main.py | haoyfan/SemiTime | 9604900bd5d67513d128d33d395bf78e5186b467 | [
"MIT"
] | 14 | 2020-10-28T11:29:43.000Z | 2022-03-31T02:20:27.000Z | main.py | haoyfan/SemiTime | 9604900bd5d67513d128d33d395bf78e5186b467 | [
"MIT"
] | 1 | 2022-03-31T08:16:35.000Z | 2022-03-31T08:16:35.000Z | main.py | haoyfan/SemiTime | 9604900bd5d67513d128d33d395bf78e5186b467 | [
"MIT"
] | 1 | 2021-06-21T16:30:43.000Z | 2021-06-21T16:30:43.000Z | # -*- coding: utf-8 -*-
import datetime
from optim.pretrain import *
import argparse
import torch
from optim.train import supervised_train
def parse_option():
parser = argparse.ArgumentParser('argument for training')
parser.add_argument('--save_freq', type=int, default=200,
help='save frequency')
parser.add_argument('--batch_size', type=int, default=128,
help='batch_size')
parser.add_argument('--K', type=int, default=4, help='Number of augmentation for each sample') # Bigger is better.
parser.add_argument('--alpha', type=float, default=0.5, help='Past-future split point')
parser.add_argument('--feature_size', type=int, default=64,
help='feature_size')
parser.add_argument('--num_workers', type=int, default=16,
help='num of workers to use')
parser.add_argument('--epochs', type=int, default=1000,
help='number of training epochs')
parser.add_argument('--patience', type=int, default=200,
help='training patience')
parser.add_argument('--aug_type', type=str, default='none', help='Augmentation type')
parser.add_argument('--class_type', type=str, default='3C', help='Classification type')
parser.add_argument('--gpu', type=str, default='0', help='gpu id')
# optimization
parser.add_argument('--learning_rate', type=float, default=0.01,
help='learning rate')
# model dataset
parser.add_argument('--dataset_name', type=str, default='MFPT',
choices=['CricketX',
'UWaveGestureLibraryAll',
'InsectWingbeatSound',
'MFPT', 'XJTU',
'EpilepticSeizure',
],
help='dataset')
parser.add_argument('--nb_class', type=int, default=3,
help='class number')
# ucr_path = '../datasets/UCRArchive_2018'
parser.add_argument('--ucr_path', type=str, default='./datasets',
help='Data root for dataset.')
parser.add_argument('--ckpt_dir', type=str, default='./ckpt/',
help='Data path for checkpoint.')
# method
parser.add_argument('--backbone', type=str, default='SimConv4')
parser.add_argument('--model_name', type=str, default='SemiTime',
choices=['SupCE', 'SemiTime'], help='choose method')
parser.add_argument('--label_ratio', type=float, default=0.1,
help='label ratio')
opt = parser.parse_args()
return opt
if __name__ == "__main__":
import os
import numpy as np
opt = parse_option()
os.environ['CUDA_VISIBLE_DEVICES']=opt.gpu
exp = 'exp-cls'
Seeds = [0, 1, 2, 3, 4]
Runs = range(0, 10, 1)
aug1 = ['magnitude_warp']
aug2 = ['time_warp']
if opt.model_name == 'SemiTime':
model_paras='label{}_{}'.format(opt.label_ratio, opt.alpha)
else:
model_paras='label{}'.format(opt.label_ratio)
if aug1 == aug2:
opt.aug_type = [aug1]
elif type(aug1) is list:
opt.aug_type = aug1 + aug2
else:
opt.aug_type = [aug1, aug2]
log_dir = './results/{}/{}/{}/{}'.format(
exp, opt.dataset_name, opt.model_name, model_paras)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
file2print_detail_train = open("{}/train_detail.log".format(log_dir), 'a+')
print(datetime.datetime.now(), file=file2print_detail_train)
print("Dataset\tTrain\tTest\tDimension\tClass\tSeed\tAcc_label\tAcc_unlabel\tEpoch_max", file=file2print_detail_train)
file2print_detail_train.flush()
file2print = open("{}/test.log".format(log_dir), 'a+')
print(datetime.datetime.now(), file=file2print)
print("Dataset\tAcc_mean\tAcc_std\tEpoch_max",
file=file2print)
file2print.flush()
file2print_detail = open("{}/test_detail.log".format(log_dir), 'a+')
print(datetime.datetime.now(), file=file2print_detail)
print("Dataset\tTrain\tTest\tDimension\tClass\tSeed\tAcc_max\tEpoch_max",
file=file2print_detail)
file2print_detail.flush()
ACCs = {}
MAX_EPOCHs_seed = {}
ACCs_seed = {}
for seed in Seeds:
np.random.seed(seed)
torch.manual_seed(seed)
opt.ckpt_dir = './ckpt/{}/{}/{}/{}/{}/{}'.format(
exp, opt.model_name, opt.dataset_name, '_'.join(opt.aug_type),
model_paras, str(seed))
if not os.path.exists(opt.ckpt_dir):
os.makedirs(opt.ckpt_dir)
print('[INFO] Running at:', opt.dataset_name)
x_train, y_train, x_val, y_val, x_test, y_test, opt.nb_class, _ \
= load_ucr2018(opt.ucr_path, opt.dataset_name)
ACCs_run={}
MAX_EPOCHs_run = {}
for run in Runs:
################
## Train #######
################
if opt.model_name == 'SupCE':
acc_test, epoch_max = supervised_train(
x_train, y_train, x_val, y_val, x_test, y_test, opt)
acc_unlabel=0
elif 'SemiTime' in opt.model_name:
acc_test, acc_unlabel, epoch_max = train_SemiTime(
x_train, y_train, x_val, y_val, x_test, y_test,opt)
print("{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}".format(
opt.dataset_name, x_train.shape[0], x_test.shape[0], x_train.shape[1], opt.nb_class,
seed, round(acc_test, 2), round(acc_unlabel, 2), epoch_max),
file=file2print_detail_train)
file2print_detail_train.flush()
ACCs_run[run] = acc_test
MAX_EPOCHs_run[run] = epoch_max
ACCs_seed[seed] = round(np.mean(list(ACCs_run.values())), 2)
MAX_EPOCHs_seed[seed] = np.max(list(MAX_EPOCHs_run.values()))
print("{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}".format(
opt.dataset_name, x_train.shape[0], x_test.shape[0], x_train.shape[1], opt.nb_class,
seed, ACCs_seed[seed], MAX_EPOCHs_seed[seed]),
file=file2print_detail)
file2print_detail.flush()
ACCs_seed_mean = round(np.mean(list(ACCs_seed.values())), 2)
ACCs_seed_std = round(np.std(list(ACCs_seed.values())), 2)
MAX_EPOCHs_seed_max = np.max(list(MAX_EPOCHs_seed.values()))
print("{}\t{}\t{}\t{}".format(
opt.dataset_name, ACCs_seed_mean, ACCs_seed_std, MAX_EPOCHs_seed_max),
file=file2print)
file2print.flush()
| 36.932961 | 122 | 0.588262 | 821 | 6,611 | 4.501827 | 0.218027 | 0.046266 | 0.087392 | 0.00974 | 0.322511 | 0.225379 | 0.225379 | 0.176407 | 0.147457 | 0.118236 | 0 | 0.017795 | 0.260475 | 6,611 | 178 | 123 | 37.140449 | 0.738188 | 0.018152 | 0 | 0.123077 | 0 | 0 | 0.175109 | 0.048322 | 0 | 0 | 0 | 0 | 0 | 1 | 0.007692 | false | 0 | 0.053846 | 0 | 0.069231 | 0.184615 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f6306e5dbd5f299747a7bcacf37b900f61ec3359 | 2,731 | py | Python | boundingbox/distances.py | nickhalmagyi/boundingbox | 76aae366171b64ed938aa22912e0c9684fecd351 | [
"MIT"
] | 1 | 2019-05-10T13:07:40.000Z | 2019-05-10T13:07:40.000Z | boundingbox/distances.py | nickhalmagyi/BoundingBox | 76aae366171b64ed938aa22912e0c9684fecd351 | [
"MIT"
] | null | null | null | boundingbox/distances.py | nickhalmagyi/BoundingBox | 76aae366171b64ed938aa22912e0c9684fecd351 | [
"MIT"
] | null | null | null | from boundingbox.boundingbox import BoundingBox
from haversine import haversine
import numpy as np
import time
from importlib import reload
import boundingbox.validations; reload(boundingbox.validations)
from boundingbox.validations.numbers import validate_strictly_positive_integer, validate_positive_number
def get_points_within_distance(source, targets, length):
"""
It is possible for a point to be within the bbox but further than length from source.
Here we remove such points.
:param source: lat-lon tuple
:param targets: iterable of the form [(lat, lon), dist]
:param length: positive number
:return: list of targets whose distance to source is less than length.
"""
validate_positive_number(length)
boundingbox = BoundingBox(source, length)
targets_in_bbox = boundingbox.get_points_within_bboxs(targets, boundingbox.bbox)
targets_within_distance = targets_in_bbox[np.transpose(targets_in_bbox)[1] <= length]
return targets_within_distance
def closest_points_are_within_length(targets_distance, N, length):
"""
:param targets_dist: iterable of the form [(lat, lon), dist]
:param N: strictly positive integer
:param length: positive number
:return: boolean, whether the distance from source to the N-th point in targets_dist is leq to length
"""
return targets_distance[:N][-1][1] <= length
def get_closest_points(source_degrees, targets, N, length=None):
validate_strictly_positive_integer(N)
if N > len(targets):
N = len(targets)
boundingbox = BoundingBox(source_degrees, length)
targets_filtered = boundingbox.filter_targets_in_bboxs(targets, boundingbox.bbox)
targets_distance = boundingbox.compute_distances_from_source(source_degrees, targets_filtered)
while (len(targets_distance) < N) or not closest_points_are_within_length(targets_distance, N, boundingbox.length):
print('Rescaling box, consider using a larger initial length')
# rescale
if len(targets_distance) < N:
boundingbox.length *= 1.25
else:
# set length to be the distance from source to the N-th point.
Nth_point_distance = targets_distance[:N][-1][1]
if Nth_point_distance <= boundingbox.length:
boundingbox.length *= 1.25
else:
boundingbox.length = Nth_point_distance
boundingbox.bbox = boundingbox.make_bounding_box(boundingbox.source_radians, boundingbox.length)
targets_filtered = boundingbox.filter_targets_in_bboxs(targets, boundingbox.bbox)
targets_distance = boundingbox.compute_distances_from_source(source_degrees, targets_filtered)
return targets_distance[:N]
| 42.671875 | 119 | 0.737459 | 346 | 2,731 | 5.601156 | 0.263006 | 0.069659 | 0.057792 | 0.041796 | 0.379773 | 0.272446 | 0.272446 | 0.272446 | 0.194014 | 0.158927 | 0 | 0.004971 | 0.189674 | 2,731 | 63 | 120 | 43.349206 | 0.870764 | 0.220066 | 0 | 0.222222 | 0 | 0 | 0.025666 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.194444 | 0 | 0.361111 | 0.027778 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f630cd0ac8ff9fcf66251d9af12794677c609746 | 709 | py | Python | apis/betterself/v1/constants.py | jeffshek/betterself | 51468253fc31373eb96e0e82189b9413f3d76ff5 | [
"MIT"
] | 98 | 2017-07-29T14:26:36.000Z | 2022-02-28T04:10:15.000Z | apis/betterself/v1/constants.py | jeffshek/betterself | 51468253fc31373eb96e0e82189b9413f3d76ff5 | [
"MIT"
] | 1,483 | 2017-05-30T00:05:56.000Z | 2022-03-31T12:37:06.000Z | apis/betterself/v1/constants.py | lawrendran/betterself | 51468253fc31373eb96e0e82189b9413f3d76ff5 | [
"MIT"
] | 13 | 2017-11-08T00:02:35.000Z | 2022-02-28T04:10:32.000Z | from events.models import SupplementLog
from supplements.models import IngredientComposition, Supplement, Ingredient, Measurement
from vendors.models import Vendor
VALID_REST_RESOURCES = [
SupplementLog,
Supplement,
IngredientComposition,
Ingredient,
Measurement,
Vendor
]
# a lot of frontend (react) depends on a uniqueKey to render rows, in this case, do something here that makes rendering
# all the rows a little bit easier. in most circumstances, for any resources that are directly related to a model
# uuid is fine, but not all resources are django models, so uniqueKey comes in handy
UNIQUE_KEY_CONSTANT = 'uniqueKey'
DAILY_FREQUENCY = 'daily'
MONTHLY_FREQUENCY = 'monthly'
| 33.761905 | 119 | 0.782793 | 93 | 709 | 5.903226 | 0.677419 | 0.065574 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.169252 | 709 | 20 | 120 | 35.45 | 0.932088 | 0.440056 | 0 | 0 | 0 | 0 | 0.053435 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.214286 | 0 | 0.214286 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f630fb65819047b6f622503fb05a79444a7a296e | 4,538 | py | Python | Data/DataExploitation.py | Someone-42/BonapioSQL | 1d059ed1796cfc33f1a580c6320b5e569db8cb5d | [
"MIT"
] | null | null | null | Data/DataExploitation.py | Someone-42/BonapioSQL | 1d059ed1796cfc33f1a580c6320b5e569db8cb5d | [
"MIT"
] | 1 | 2021-12-19T16:49:46.000Z | 2021-12-19T16:49:58.000Z | Data/DataExploitation.py | Someone-42/BonapioSQL | 1d059ed1796cfc33f1a580c6320b5e569db8cb5d | [
"MIT"
] | null | null | null | import os
_NamePath = "Data/noms2008nat_text.txt"
_SurnameGirlsPath = "Data/prenoms_donnees_filles.csv"
_SurnameBoysPath = "Data/prenoms_donnees_garcons.csv"
_FreqPathPrenom = "Data/freq_prenoms.csv"
_FreqPathNom = "Data/freq_noms.csv"
def _get_first_name_freq(file_name: str, d: dict) -> int:
""" Modifies the dictionary with new names frequency - Returns the total added frequency"""
total = 0
# Opening the bois file
with open(file_name) as fp:
print("Reading first names :", file_name)
fp.readlines(5<<5) #Removes the first useless lines
fp.readline()
lines = fp.readlines()
for line in lines:
print(line)
_region_, first_name, nombre = line.rstrip().split(";")
#prenom_garcons = prenom_garcons.casefold() check le comment en bas
if first_name not in d:
d[first_name] = int(nombre.replace(' ', ''))
else:
d[first_name] += int(nombre.replace(' ', ''))
total += int(nombre.replace(' ', ''))
return total
def _write_name_freqs_to_file(file_name: str, total: int, name_freqs: dict) -> None:
with open(file_name, 'w') as f:
print("Writing to names frequency file")
f.write(f"{total}\n")
for name, freq in name_freqs.values():
f.write(f"{name};{freq}\n")
def _read_name_freqs_file(file_name: str) -> tuple:
""" Returns a tuple, with the total frequencies added, and the list of names and their respective frequencies """
name_freqs = []
total = -1
with open(file_name) as f:
total = int(f.readline)
for line in f.readlines():
nom, freq = line.split(';')
freq = int(freq)
name_freqs.append((nom, freq))
return (total, name_freqs)
def _create_prenoms_freq_file() -> None:
"""Creates a file containing the frequency of each prenom in the database for 2020"""
total = 0
dict_prenoms_et_nombre = {}
print("Creating prenoms frequency file...")
total += _get_first_name_freq(_SurnameGirlsPath, dict_prenoms_et_nombre)
total += _get_first_name_freq(_SurnameBoysPath, dict_prenoms_et_nombre)
_write_name_freqs_to_file(_FreqPathPrenom, total, dict_prenoms_et_nombre)
def _create_noms_freq_file(period: list = [(1991, 2000)]) -> None:
"""Creates a file containing the frequency of each name in the database for a given period"""
assert period[0] < period[1], "The first year of the period must be lower than the second"
total = 0
dict_noms_et_nombre = {}
print("Creating noms frequency file...")
with open(_NamePath) as f:
print("Reading")
liste_entete_avec_les_dates = f.readline().split("\t")
period_indices = []
for p in period:
s = '_' + str(p[0]) + '_' + str(p[1])
ind = liste_entete_avec_les_dates.index(s)
assert ind > 0, "The period is not in the database" # bigger than 0 bc first index is the names column
period_indices.append(ind)
lines = f.readlines()
for line in lines:
name_and_dates_freq = line.split('\t')
freq = sum([name_and_dates_freq[i] for i in period_indices]) # Gathers the sum of frequencies over every period selected
name = name_and_dates_freq[0]
dict_noms_et_nombre[name] = freq
total += freq
_write_name_freqs_to_file(_FreqPathNom, total, dict_noms_et_nombre)
def get_prenoms(limit: int = None) -> tuple:
"""Returns a tuple, with the total frequency, and a list of surnames with their frequency"""
freq_compiled_firstnames_exists = os.path.exists(_FreqPathPrenom)
freq_boys_exists = os.path.exists(_SurnameBoysPath)
freq_girls_exists = os.path.exists(_SurnameGirlsPath)
if not freq_compiled_firstnames_exists:
if freq_boys_exists and freq_girls_exists:
_create_prenoms_freq_file()
else:
raise FileNotFoundError("A file for first names (boys or girls) is missing, cannot compile name frequencies")
return _read_name_freqs_file(_FreqPathPrenom)
def get_noms(limit: int = None, period: list = [(1991,2000)]) -> list:
"""Returns a list of names with their respective frequencies"""
if not os.path.exists(_FreqPathNom):
if os.path.exists(_NamePath):
_create_noms_freq_file(period)
else:
raise FileNotFoundError("The file containing names does not exist")
return _read_name_freqs_file(_FreqPathNom)
| 41.254545 | 132 | 0.659982 | 613 | 4,538 | 4.628059 | 0.239804 | 0.034896 | 0.021149 | 0.026789 | 0.196687 | 0.070497 | 0.052168 | 0.031019 | 0.031019 | 0 | 0 | 0.010718 | 0.239312 | 4,538 | 109 | 133 | 41.633028 | 0.811124 | 0.161084 | 0 | 0.095238 | 0 | 0 | 0.132626 | 0.028912 | 0 | 0 | 0 | 0 | 0.02381 | 1 | 0.083333 | false | 0 | 0.011905 | 0 | 0.142857 | 0.071429 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f6323866975c2773aa564a86869863a47d476760 | 1,126 | py | Python | backend/moves/signals.py | mnieber/lindyscience | 468160aa6da42f45d8c37a2141a077a48410f81d | [
"MIT"
] | null | null | null | backend/moves/signals.py | mnieber/lindyscience | 468160aa6da42f45d8c37a2141a077a48410f81d | [
"MIT"
] | 21 | 2020-02-11T23:50:05.000Z | 2022-02-27T17:44:29.000Z | backend/moves/signals.py | mnieber/lindyscience | 468160aa6da42f45d8c37a2141a077a48410f81d | [
"MIT"
] | null | null | null | from django.dispatch import receiver
from django_rtk.signals import account_activated
from moves import models
from profiles.models import Profile, ProfileToMoveList
@receiver(account_activated)
def create_profile_on_activation(sender, user, request, **kwargs):
trash = models.MoveList(
role="trash",
name="Trash",
slug="trash",
is_private=True,
description="",
owner=user,
)
trash.save()
drafts = models.MoveList(
role="drafts",
name="Drafts",
slug="drafts",
is_private=True,
description="",
owner=user,
)
drafts.save()
moves = models.MoveList(
role="",
name="Moves",
slug="moves",
is_private=False,
description="",
owner=user,
)
moves.save()
profile = Profile(
owner=user, recent_move_url="lists/%s/%s" % (user.username, moves.name)
)
profile.save()
for (idx, move_list) in enumerate([trash, drafts, moves]):
p2m = ProfileToMoveList(profile=profile, move_list=move_list, order=idx + 1)
p2m.save()
| 23.458333 | 84 | 0.60302 | 122 | 1,126 | 5.45082 | 0.401639 | 0.054135 | 0.081203 | 0.07218 | 0.099248 | 0.099248 | 0 | 0 | 0 | 0 | 0 | 0.003667 | 0.273535 | 1,126 | 47 | 85 | 23.957447 | 0.809291 | 0 | 0 | 0.2 | 0 | 0 | 0.047957 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.025 | false | 0 | 0.1 | 0 | 0.125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f6347a3ec7983daa3f16a31ca1dbd8d5e0049877 | 5,131 | py | Python | Assets/Python/Victories.py | dguenms/Dawn-of-Civilization | 1c4f510af97a869637cddb4c0859759158cea5ce | [
"MIT"
] | 93 | 2015-11-20T04:13:36.000Z | 2022-03-24T00:03:08.000Z | Assets/Python/Victories.py | dguenms/Dawn-of-Civilization | 1c4f510af97a869637cddb4c0859759158cea5ce | [
"MIT"
] | 206 | 2015-11-09T00:27:15.000Z | 2021-12-04T19:05:18.000Z | Assets/Python/Victories.py | dguenms/Dawn-of-Civilization | 1c4f510af97a869637cddb4c0859759158cea5ce | [
"MIT"
] | 117 | 2015-11-08T02:43:46.000Z | 2022-02-12T06:29:00.000Z | from Core import *
from StoredData import data
from Events import handler
### SCENARIO SETUP ###
lLostIn1700AD = [iChina, iIndia, iTamils, iKorea, iVikings, iTurks, iSpain, iHolyRome, iPoland, iPortugal, iMughals, iOttomans, iThailand]
lWonIn1700AD = [(iIran, 0), (iJapan, 0), (iFrance, 0), (iCongo, 0), (iNetherlands, 1)]
### DELAYED IMPORT ###
dHistoricalGoals = None
dReligiousGoals = None
dPaganGoals = None
@handler("fontsLoaded")
def onFontsLoaded():
from HistoricalVictory import dGoals as dDefinedHistoricalGoals
global dHistoricalGoals
dHistoricalGoals = dDefinedHistoricalGoals
from ReligiousVictory import dGoals as dDefinedReligiousGoals
global dReligiousGoals
dReligiousGoals = dDefinedReligiousGoals
from ReligiousVictory import dAdditionalPaganGoal
global dPaganGoals
dPaganGoals = dAdditionalPaganGoal
def getHistoricalGoals(iPlayer):
return list(dHistoricalGoals[civ(iPlayer)])
def getReligiousGoals(iPlayer):
iStateReligion = player(iPlayer).getStateReligion()
if iStateReligion >= 0:
return list(dReligiousGoals[iStateReligion])
elif player(iPlayer).isStateReligion():
return concat(dReligiousGoals[iPaganVictory], dPaganGoals[infos.civ(civ(iPlayer)).getPaganReligion()])
else:
return dReligiousGoals[iSecularVictory]
### GOAL CHECKS ###
class HistoricalVictoryCallback(object):
def stateChange(self, goal):
if goal.succeeded():
goal.announceSuccess()
iCount = count(goal.succeeded() for goal in data.players[goal.iPlayer].historicalGoals)
if iCount == 2:
self.goldenAge(goal.iPlayer)
elif iCount == 3:
self.victory(goal.iPlayer)
elif goal.failed():
goal.announceFailure()
def goldenAge(self, iPlayer):
data.players[iPlayer].bLaunchHistoricalGoldenAge = True
def victory(self, iPlayer):
if game.getWinner() == -1:
game.setWinner(iPlayer, VictoryTypes.VICTORY_HISTORICAL)
class ReligiousVictoryCallback(object):
def check(self, goal):
if goal:
iCount = count(goal for goal in data.players[goal.iPlayer].religiousGoals)
if iCount == 3:
self.victory(goal.iPlayer)
def victory(self, iPlayer):
if game.getWinner() == -1:
game.setWinner(iPlayer, VictoryTypes.VICTORY_RELIGIOUS)
historicalVictoryCallback = HistoricalVictoryCallback()
religiousVictoryCallback = ReligiousVictoryCallback()
### SETUP ###
def createHistoricalGoals(iPlayer):
goals = [goal.activate(iPlayer, historicalVictoryCallback) for goal in getHistoricalGoals(iPlayer)]
goals = setupScenario(iPlayer, goals)
return goals
def createReligiousGoals(iPlayer):
return [goal.passivate(iPlayer, religiousVictoryCallback) for goal in getReligiousGoals(iPlayer)]
def disable(iPlayer=None):
if iPlayer is None:
iPlayer = active()
for goal in data.players[iPlayer].historicalGoals + data.players[iPlayer].religiousGoals:
goal.deactivate()
data.players[iPlayer].historicalGoals = []
data.players[iPlayer].religiousGoals = []
def switchReligiousGoals(iPlayer):
for goal in data.players[iPlayer].religiousGoals:
goal.deactivate()
data.players[iPlayer].religiousGoals = createReligiousGoals(iPlayer)
def setupScenario(iPlayer, goals):
iCiv = civ(iPlayer)
if scenario() == i1700AD:
if iCiv in lLostIn1700AD:
for goal in goals:
goal.fail()
for iGoal in [iGoal for iGoalCiv, iGoal in lWonIn1700AD if iGoalCiv == iCiv]:
goals[iGoal].succeed()
# setup English tech goal
if iCiv == iEngland:
goals[2].accumulate(4, iRenaissance)
# setup Congo slave trade goal
if iCiv == iCongo:
goals[1].accumulate(500)
return goals
### GOLDEN AGE ###
def goldenAge(iPlayer):
iGoldenAgeTurns = player(iPlayer).getGoldenAgeLength()
player(iPlayer).changeGoldenAgeTurns(iGoldenAgeTurns)
message(iPlayer, "TXT_KEY_UHV_INTERMEDIATE", color=iPurple)
if player(iPlayer).isHuman():
for iOtherPlayer in players.major().alive().without(iPlayer):
player(iOtherPlayer).AI_changeAttitudeExtra(iPlayer, -2)
@handler("GameStart")
def setup():
iPlayer = active()
data.players[iPlayer].historicalGoals = createHistoricalGoals(iPlayer)
data.players[iPlayer].religiousGoals = createReligiousGoals(iPlayer)
@handler("switch")
def onSwitch(iPrevious, iCurrent):
for goal in data.players[iPrevious].historicalGoals + data.players[iPrevious].religiousGoals:
goal.deactivate()
data.players[iPrevious].historicalGoals = []
data.players[iPrevious].religiousGoals = []
data.players[iCurrent].historicalGoals = createHistoricalGoals(iCurrent)
data.players[iCurrent].religiousGoals = createReligiousGoals(iCurrent)
@handler("civicChanged")
def onCivicChanged(iPlayer, iOldCivic, iNewCivic):
if iPlayer == active() and infos.civic(iOldCivic).isStateReligion() != infos.civic(iNewCivic).isStateReligion():
switchReligiousGoals(iPlayer)
@handler("playerChangeStateReligion")
def onStateReligionChanged(iPlayer):
if iPlayer == active():
switchReligiousGoals(iPlayer)
@handler("EndPlayerTurn")
def checkHistoricalGoldenAge(iGameTurn, iPlayer):
if data.players[iPlayer].bLaunchHistoricalGoldenAge:
data.players[iPlayer].bLaunchHistoricalGoldenAge = False
goldenAge(iPlayer) | 27.586022 | 138 | 0.764958 | 519 | 5,131 | 7.55106 | 0.308285 | 0.05333 | 0.050523 | 0.016586 | 0.193672 | 0.187803 | 0.142894 | 0.127073 | 0.071447 | 0.038785 | 0 | 0.008473 | 0.125901 | 5,131 | 186 | 139 | 27.586022 | 0.865329 | 0.022413 | 0 | 0.142857 | 0 | 0 | 0.020068 | 0.009833 | 0 | 0 | 0 | 0 | 0 | 1 | 0.159664 | false | 0.008403 | 0.05042 | 0.016807 | 0.285714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f634d3bb4f093f386bc4f87d452b8f879ce45cb6 | 8,015 | py | Python | src/cpePaser/day_extract.py | MAE-M/PotentiallyInactiveCpeAnalysisTool | 58f897fb45437ff72a6db4d490f364061d779c50 | [
"Apache-2.0"
] | null | null | null | src/cpePaser/day_extract.py | MAE-M/PotentiallyInactiveCpeAnalysisTool | 58f897fb45437ff72a6db4d490f364061d779c50 | [
"Apache-2.0"
] | null | null | null | src/cpePaser/day_extract.py | MAE-M/PotentiallyInactiveCpeAnalysisTool | 58f897fb45437ff72a6db4d490f364061d779c50 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2020 Huawei Technologies Co., Ltd.
# foss@huawei.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
from typing import Dict, List
import numpy as np
import pandas as pd
from collections import Counter
from src.compress import compress
# 日志器
from src.logger_setting.my_logger import get_logger
from src.setting import setting
LOGGER = get_logger()
def groupby_calc(df):
df['esn'] = df['esn'].astype('str')
df = df.groupby(['esn'])
return df
def calc_total(series):
series = series.values
count = 0
for d in range(len(series)):
if d < len(series) - 1:
if pd.isna(series[d]) or pd.isna(series[d + 1]):
continue
if float(series[d]) <= float(series[d + 1]):
count += float(series[d + 1]) - float(series[d])
else:
count += float(series[d + 1])
return count
def is_active(series):
series = calc_total(series)
if float(series) / setting.mb > 10:
return 1
else:
return 0
def get_max(series):
if series:
return np.max(series)
else:
return setting.INVALID_VALUE
def get_min(series):
if series:
return np.min(series)
else:
return setting.INVALID_VALUE
def get_avg(values, counts):
count = sum(counts) if type(counts) == list else counts
if count == 0:
return setting.INVALID_VALUE
else:
return sum(values) / count if type(values) == list else values / count
def get_avg_max_min(df, avg_name, max_name, min_name, counts):
avg_list = list(filter(lambda x: int(x) != setting.INVALID_VALUE, df[avg_name].values))
sum_value = get_sum(avg_list)
cnt = get_sum(list(df[counts].values))
avg = sum_value / cnt if cnt != 0 else setting.INVALID_VALUE
max_list = list(filter(lambda x: int(x) != setting.INVALID_VALUE, df[max_name].values))
max_value = get_max(max_list)
min_list = list(filter(lambda x: int(x) != setting.INVALID_VALUE, df[min_name].values))
min_value = get_min(min_list)
return {avg_name: avg,
max_name: max_value,
min_name: min_value}
def get_sum(series):
if series:
return np.sum(series)
else:
return setting.INVALID_VALUE
def get_std(series):
if series:
return np.std(series)
else:
return setting.INVALID_VALUE
def get_all_day():
all_day_file = compress.get_all_csv_file(os.path.join(setting.data_path, 'extractData'))
day_list = []
for file in all_day_file:
day_list.append(os.path.split(file)[1].split("\\")[-1].split('_')[0])
return list(set(day_list))
def merge_day_data(day_dict: Dict[str, List[str]]):
for day in day_dict.keys():
file_list: List[str] = day_dict.get(day)
df = pd.concat(pd.read_csv(file, error_bad_lines=False, index_col=False) for file in file_list)
df.columns = setting.parameter_json["extract_data_columns"]
df = df.sort_values('collectTime', ascending=True)
# 把-9999变成了NaN,但是原来是空的值,在读进来的时候已经变成NaN了,所有空值和-9999都变成了NaN
df = df.replace(setting.INVALID_VALUE, np.nan)
grouped = groupby_calc(df).agg(
MaxRSRP=pd.NamedAgg(column='RSRP', aggfunc=max),
MinRSRP=pd.NamedAgg(column='RSRP', aggfunc=min),
AvgRSRP=pd.NamedAgg(column='RSRP', aggfunc=sum),
CntRSRP=pd.NamedAgg(column='RSRP', aggfunc="count"),
MaxCQI=pd.NamedAgg(column='CQI', aggfunc=max),
MinCQI=pd.NamedAgg(column='CQI', aggfunc=min),
AvgCQI=pd.NamedAgg(column='CQI', aggfunc=sum),
CntCQI=pd.NamedAgg(column='CQI', aggfunc="count"),
MaxRSRQ=pd.NamedAgg(column='RSRQ', aggfunc=max),
MinRSRQ=pd.NamedAgg(column='RSRQ', aggfunc=min),
AvgRSRQ=pd.NamedAgg(column='RSRQ', aggfunc=sum),
CntRSRQ=pd.NamedAgg(column='RSRQ', aggfunc="count"),
MaxRSSI=pd.NamedAgg(column='RSSI', aggfunc=max),
MinRSSI=pd.NamedAgg(column='RSSI', aggfunc=min),
AvgRSSI=pd.NamedAgg(column='RSSI', aggfunc=sum),
CntRSSI=pd.NamedAgg(column='RSSI', aggfunc="count"),
MaxSINR=pd.NamedAgg(column='SINR', aggfunc=max),
MinSINR=pd.NamedAgg(column='SINR', aggfunc=min),
AvgSINR=pd.NamedAgg(column='SINR', aggfunc=sum),
CntSINR=pd.NamedAgg(column='SINR', aggfunc="count"),
TotalDownload=pd.NamedAgg(column='TotalDownload', aggfunc=calc_total),
TotalUpload=pd.NamedAgg(column='TotalUpload', aggfunc=calc_total),
TotalConnectTime=pd.NamedAgg(column='TotalConnectTime', aggfunc=calc_total),
ModelName=pd.NamedAgg(column='ModelName', aggfunc=lambda x: x.iloc[-1]),
IMSI=pd.NamedAgg(column='IMSI', aggfunc=lambda x: x.iloc[-1]),
IMEI=pd.NamedAgg(column='IMEI', aggfunc=lambda x: x.iloc[-1]),
MSISDN=pd.NamedAgg(column='MSISDN', aggfunc=lambda x: x.iloc[-1]),
isActive=pd.NamedAgg(column='TotalDownload', aggfunc=is_active),
AvgDlThroughput=pd.NamedAgg(column='MaxDLThroughput', aggfunc=sum),
CntDlThroughput=pd.NamedAgg(column='MaxDLThroughput', aggfunc="count"),
AvgUlThroughput=pd.NamedAgg(column='MaxULThroughput', aggfunc=sum),
CntUlThroughput=pd.NamedAgg(column='MaxULThroughput', aggfunc="count"),
WiFiUserQty=pd.NamedAgg(column='WiFiUserQty', aggfunc=sum),
CntWiFiUserQty=pd.NamedAgg(column='WiFiUserQty', aggfunc="count"),
HostNumberOfEntries=pd.NamedAgg(column='HostNumberOfEntries', aggfunc=sum),
CntHostNumberOfEntries=pd.NamedAgg(column='HostNumberOfEntries', aggfunc="count"),
ECGI=pd.NamedAgg(column='ECGI', aggfunc=get_main_cell),)
grouped[['TotalDownload', 'TotalUpload', 'TotalConnectTime', 'ModelName', 'IMSI',
'IMEI', 'MSISDN']] = grouped.sort_values('esn')[
['TotalDownload', 'TotalUpload', 'TotalConnectTime', 'ModelName', 'IMSI',
'IMEI', 'MSISDN']].fillna(0)
grouped = grouped.reset_index()
grouped['date'] = day
# 除了 'TotalDownload', 'TotalUpload', 'TotalConnectTime', 'ModelName', 'IMSI', 'IMEI', 'MSISDN' 这几列
# 其他列的nan将转换还原为setting.INVALID_VALUE, 也就是-9999
grouped = grouped.replace(np.nan, setting.INVALID_VALUE)
grouped.to_csv(os.path.join(setting.data_path, 'day', day + r".csv"), index=False)
# return a dictionary with:
# key: date
# value: list of filenames of this date
def get_day_df_dict() -> Dict[str, List[str]]:
all_day_file = compress.get_all_csv_file(os.path.join(setting.data_path, 'extractData'))
day_dict = dict()
for file in all_day_file:
date = os.path.split(file)[1].split("\\")[-1].split('_')[0]
if date not in day_dict:
day_dict[date] = list()
day_dict[date].append(file)
return day_dict
def get_main_cell(series):
count_map = Counter(list(filter(lambda x: x != setting.INVALID_STRING, series)))
count = 0
main_cell = "-"
for cell, nums in count_map.items():
if nums > count:
count = nums
main_cell = cell
return main_cell
def day_extract():
compress.empty_folder(os.path.join(setting.data_path, 'day'))
day_dict = get_day_df_dict()
merge_day_data(day_dict)
if __name__ == '__main__':
print(time.localtime(time.time()))
day_extract()
print(time.localtime(time.time()))
| 37.629108 | 106 | 0.648534 | 1,049 | 8,015 | 4.824595 | 0.224023 | 0.073108 | 0.116973 | 0.024699 | 0.405453 | 0.17131 | 0.147994 | 0.11065 | 0.065995 | 0.05493 | 0 | 0.007021 | 0.218091 | 8,015 | 212 | 107 | 37.806604 | 0.800543 | 0.108796 | 0 | 0.154839 | 0 | 0 | 0.076404 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090323 | false | 0 | 0.058065 | 0 | 0.264516 | 0.012903 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f635b3eca92319c1097c2f0cdb0375a22892f53e | 1,138 | py | Python | module/seeding/seeder/threshold_seed.py | ObliviousJamie/opic-prototype | a925ce9faa38b9a6c8976d4c63b47349a53fd07e | [
"BSD-3-Clause"
] | null | null | null | module/seeding/seeder/threshold_seed.py | ObliviousJamie/opic-prototype | a925ce9faa38b9a6c8976d4c63b47349a53fd07e | [
"BSD-3-Clause"
] | null | null | null | module/seeding/seeder/threshold_seed.py | ObliviousJamie/opic-prototype | a925ce9faa38b9a6c8976d4c63b47349a53fd07e | [
"BSD-3-Clause"
] | null | null | null | import peakutils
from module.seeding.seeder.seeder import Seeder
class ThresholdSeeder(Seeder):
def __init__(self, threshold, return_type='string', s_filter=None, peak_filter=None):
super(ThresholdSeeder, self).__init__(return_type)
self.threshold = threshold
self.s_filter = s_filter
self.peak_filter = peak_filter
def pick_peaks(self, x_axis, y_axis, G):
seeds = []
indexes = peakutils.indexes(y_axis, thres=self.threshold / max(y_axis))
for seed in x_axis[indexes]:
seed = self.seed_switch[self.return_type](seed)
if seed not in seeds:
for v in G[seed]:
if v in seeds:
break
seeds.append(seed)
return seeds
def _gen_name(self, name):
self.name = f'{name}'
if self.peak_filter is not None:
self.name = f'{self.name}_{self.peak_filter.name}'
else:
self.name = f'{self.name}_gaussian_peak{self.threshold}'
if self.s_filter is not None:
self.name = f'{self.name}_{self.s_filter.name}'
| 29.947368 | 89 | 0.598418 | 150 | 1,138 | 4.313333 | 0.3 | 0.098918 | 0.055641 | 0.060278 | 0.137558 | 0.111283 | 0.111283 | 0.111283 | 0.111283 | 0.111283 | 0 | 0 | 0.301406 | 1,138 | 37 | 90 | 30.756757 | 0.813836 | 0 | 0 | 0 | 0 | 0 | 0.105448 | 0.094903 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.074074 | 0 | 0.259259 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f635f47508977479e51aeb2a2d9a8692db618aa8 | 1,620 | py | Python | ball.py | iliescua/Pong | 88c62cc9db44b76b98130546582aa601859e0041 | [
"MIT"
] | null | null | null | ball.py | iliescua/Pong | 88c62cc9db44b76b98130546582aa601859e0041 | [
"MIT"
] | null | null | null | ball.py | iliescua/Pong | 88c62cc9db44b76b98130546582aa601859e0041 | [
"MIT"
] | null | null | null | from turtle import Turtle
TOP_BORDER = 280
SIDE_BORDER = 380
P_SIZE = 50
P_POSITION = 320
class Ball:
def __init__(self):
self.ball = Turtle("circle")
self.ball.pu()
self.ball.color("white")
self.x_move = 10
self.y_move = 10
self.r_score = 0
self.l_score = 0
self.ball_speed = 0.1
def move(self):
x_dir = self.ball.xcor() + self.x_move
y_dir = self.ball.ycor() + self.y_move
self.ball.goto(x_dir, y_dir)
def bounce(self, r_pad, l_pad):
if self.ball.ycor() > TOP_BORDER or self.ball.ycor() < -TOP_BORDER:
self.y_move *= -1
check_right = self.ball.distance(r_pad) < P_SIZE and self.ball.xcor() > P_POSITION
check_left = self.ball.distance(l_pad) < P_SIZE and self.ball.xcor() < -P_POSITION
if check_right or check_left:
self.ball_speed *= 0.9
self.x_move *= -1
def reset_ball_right(self):
self.ball.goto(0, 0)
self.ball_speed = 0.1
if self.x_move < 0:
self.x_move *= -1
def reset_ball_left(self):
self.ball.goto(0, 0)
self.ball_speed = 0.1
if self.x_move > 0:
self.x_move *= -1
def update_score(self):
if self.ball.xcor() > SIDE_BORDER:
self.l_score += 1
self.reset_ball_right()
if self.ball.xcor() < -SIDE_BORDER:
self.r_score += 1
self.reset_ball_left()
def get_r_score(self):
return self.r_score
def get_l_score(self):
return self.l_score
| 23.142857 | 90 | 0.559877 | 245 | 1,620 | 3.453061 | 0.204082 | 0.189125 | 0.074468 | 0.066194 | 0.429078 | 0.334515 | 0.315603 | 0.212766 | 0.212766 | 0.137116 | 0 | 0.033976 | 0.327778 | 1,620 | 69 | 91 | 23.478261 | 0.742883 | 0 | 0 | 0.166667 | 0 | 0 | 0.00679 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0.020833 | 0.041667 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f63677a352c96443fb84314f00dd42cf61e97b33 | 2,404 | py | Python | nicos_sinq/devices/epics/motor.py | jkrueger1/nicos | 5f4ce66c312dedd78995f9d91e8a6e3c891b262b | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null | nicos_sinq/devices/epics/motor.py | jkrueger1/nicos | 5f4ce66c312dedd78995f9d91e8a6e3c891b262b | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null | nicos_sinq/devices/epics/motor.py | jkrueger1/nicos | 5f4ce66c312dedd78995f9d91e8a6e3c891b262b | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null | # -*- coding: utf-8 -*-
# *****************************************************************************
# NICOS, the Networked Instrument Control System of the MLZ
# Copyright (c) 2009-2022 by the NICOS contributors (see AUTHORS)
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Module authors:
# Michele Brambilla <mnichele.brambilla@psi.ch>
#
# *****************************************************************************
from nicos import session
from nicos.core import MAIN, Param, status
from nicos_ess.devices.epics import EpicsMotor
from nicos_ess.devices.epics.extensions import HasDisablePv
class MotorCanDisable(HasDisablePv, EpicsMotor):
parameters = {
'auto_enable': Param('Automatically enable the motor when the setup is'
' loaded', type=bool, default=False,
settable=False),
}
def doInit(self, mode):
EpicsMotor.doInit(self, mode)
if session.sessiontype == MAIN and self.auto_enable:
self.enable()
def doShutdown(self):
if session.sessiontype == MAIN:
self.disable()
EpicsMotor.doShutdown(self)
def doStatus(self, maxage=0):
stat, message = self._get_status_message()
self._motor_status = stat, message
if stat == status.ERROR:
return stat, message or 'Unknown problem in record'
elif stat == status.WARN:
return stat, message
if not self.isEnabled:
return status.WARN, 'Motor is disabled'
return EpicsMotor.doStatus(self, maxage)
def doIsAllowed(self, target):
if not self.isEnabled:
return False, 'Motor disabled'
return EpicsMotor.doIsAllowed(self, target)
| 36.424242 | 79 | 0.635191 | 291 | 2,404 | 5.216495 | 0.505155 | 0.013175 | 0.025692 | 0.037549 | 0.11726 | 0.036891 | 0 | 0 | 0 | 0 | 0 | 0.013499 | 0.229617 | 2,404 | 65 | 80 | 36.984615 | 0.806156 | 0.431364 | 0 | 0.0625 | 0 | 0 | 0.090909 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.125 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f637709339851920c290c85068ff77de0d14dc64 | 2,941 | py | Python | tests/test_matrix.py | cmc333333/mapbox-sdk-py | c38d177fc4f235b08a9e8dcda711e09a6edf0e20 | [
"MIT"
] | null | null | null | tests/test_matrix.py | cmc333333/mapbox-sdk-py | c38d177fc4f235b08a9e8dcda711e09a6edf0e20 | [
"MIT"
] | null | null | null | tests/test_matrix.py | cmc333333/mapbox-sdk-py | c38d177fc4f235b08a9e8dcda711e09a6edf0e20 | [
"MIT"
] | null | null | null | import pytest
import responses
from mapbox import DirectionsMatrix
from mapbox.errors import MapboxDeprecationWarning
points = [{
"type": "Feature",
"properties": {},
"geometry": {
"type": "Point",
"coordinates": [
-87, 36]}}, {
"type": "Feature",
"properties": {},
"geometry": {
"type": "Point",
"coordinates": [
-86, 36]}}, {
"type": "Feature",
"properties": {},
"geometry": {
"type": "Point",
"coordinates": [
-88, 37]}}]
def test_class_attrs():
"""Get expected class attr values"""
serv = DirectionsMatrix()
assert serv.api_name == 'directions-matrix'
assert serv.api_version == 'v1'
def test_profile_invalid():
"""'jetpack' is not a valid profile."""
with pytest.raises(ValueError):
DirectionsMatrix(access_token='pk.test')._validate_profile('jetpack')
@pytest.mark.parametrize('profile', ['mapbox/driving', 'mapbox/cycling', 'mapbox/walking'])
def test_profile_valid(profile):
"""Profiles are valid"""
assert profile == DirectionsMatrix(
access_token='pk.test')._validate_profile(profile)
@pytest.mark.parametrize('profile', ['mapbox.driving', 'mapbox.cycling', 'mapbox.walking'])
def test_deprecated_profile(profile):
"""Profiles are deprecated"""
service = DirectionsMatrix()
with pytest.warns(MapboxDeprecationWarning):
assert profile.replace('.', '/') == service._validate_profile(profile)
def test_null_query():
service = DirectionsMatrix()
assert service._make_query(None, None) == {}
def test_query():
service = DirectionsMatrix()
params = service._make_query([0, 3], [1, 2])
assert params['sources'] == '0;3'
assert params['destinations'] == '1;2'
@responses.activate
@pytest.mark.parametrize('waypoints', [points, [p['geometry'] for p in points], [p['geometry']['coordinates'] for p in points]])
def test_matrix(waypoints):
responses.add(
responses.GET,
'https://api.mapbox.com/directions-matrix/v1/mapbox/driving/-87,36;-86,36;-88,37?access_token=pk.test',
match_querystring=True,
body='{"durations":[[0,4977,5951],[4963,0,9349],[5881,9317,0]]}',
status=200,
content_type='application/json')
# We need a second response because of the difference in rounding between
# Python 2 (leaves a '.0') and 3 (no unnecessary '.0').
responses.add(
responses.GET,
'https://api.mapbox.com/directions-matrix/v1/mapbox/driving/-87.0,36.0;-86.0,36.0;-88.0,37.0?access_token=pk.test',
match_querystring=True,
body='{"durations":[[0,4977,5951],[4963,0,9349],[5881,9317,0]]}',
status=200,
content_type='application/json')
res = DirectionsMatrix(access_token='pk.test').matrix(waypoints)
matrix = res.json()['durations']
# 3x3 list
assert len(matrix) == 3
assert len(matrix[0]) == 3 | 31.287234 | 128 | 0.633798 | 340 | 2,941 | 5.385294 | 0.338235 | 0.026761 | 0.0355 | 0.046423 | 0.43692 | 0.418897 | 0.418897 | 0.339705 | 0.283998 | 0.283998 | 0 | 0.051586 | 0.195852 | 2,941 | 94 | 129 | 31.287234 | 0.722622 | 0.082625 | 0 | 0.441176 | 0 | 0.058824 | 0.269806 | 0.042601 | 0 | 0 | 0 | 0 | 0.132353 | 1 | 0.102941 | false | 0 | 0.058824 | 0 | 0.161765 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f63ae4ae1659c3703f55d9ef00455b73d7a5f9b7 | 2,801 | py | Python | scripts/no_bool_in_generic.py | umangino/pandas | c492672699110fe711b7f76ded5828ff24bce5ab | [
"BSD-3-Clause"
] | 28,899 | 2016-10-13T03:32:12.000Z | 2022-03-31T21:39:05.000Z | scripts/no_bool_in_generic.py | umangino/pandas | c492672699110fe711b7f76ded5828ff24bce5ab | [
"BSD-3-Clause"
] | 31,004 | 2016-10-12T23:22:27.000Z | 2022-03-31T23:17:38.000Z | scripts/no_bool_in_generic.py | umangino/pandas | c492672699110fe711b7f76ded5828ff24bce5ab | [
"BSD-3-Clause"
] | 15,149 | 2016-10-13T03:21:31.000Z | 2022-03-31T18:46:47.000Z | """
Check that pandas/core/generic.py doesn't use bool as a type annotation.
There is already the method `bool`, so the alias `bool_t` should be used instead.
This is meant to be run as a pre-commit hook - to run it manually, you can do:
pre-commit run no-bool-in-core-generic --all-files
The function `visit` is adapted from a function by the same name in pyupgrade:
https://github.com/asottile/pyupgrade/blob/5495a248f2165941c5d3b82ac3226ba7ad1fa59d/pyupgrade/_data.py#L70-L113
"""
from __future__ import annotations
import argparse
import ast
import collections
from typing import Sequence
def visit(tree: ast.Module) -> dict[int, list[int]]:
"Step through tree, recording when nodes are in annotations."
in_annotation = False
nodes: list[tuple[bool, ast.AST]] = [(in_annotation, tree)]
to_replace = collections.defaultdict(list)
while nodes:
in_annotation, node = nodes.pop()
if isinstance(node, ast.Name) and in_annotation and node.id == "bool":
to_replace[node.lineno].append(node.col_offset)
for name in reversed(node._fields):
value = getattr(node, name)
if name in {"annotation", "returns"}:
next_in_annotation = True
else:
next_in_annotation = in_annotation
if isinstance(value, ast.AST):
nodes.append((next_in_annotation, value))
elif isinstance(value, list):
for value in reversed(value):
if isinstance(value, ast.AST):
nodes.append((next_in_annotation, value))
return to_replace
def replace_bool_with_bool_t(to_replace, content: str) -> str:
new_lines = []
for n, line in enumerate(content.splitlines(), start=1):
if n in to_replace:
for col_offset in reversed(to_replace[n]):
line = line[:col_offset] + "bool_t" + line[col_offset + 4 :]
new_lines.append(line)
return "\n".join(new_lines)
def check_for_bool_in_generic(content: str) -> tuple[bool, str]:
tree = ast.parse(content)
to_replace = visit(tree)
if not to_replace:
mutated = False
return mutated, content
mutated = True
return mutated, replace_bool_with_bool_t(to_replace, content)
def main(argv: Sequence[str] | None = None) -> None:
parser = argparse.ArgumentParser()
parser.add_argument("paths", nargs="*")
args = parser.parse_args(argv)
for path in args.paths:
with open(path, encoding="utf-8") as fd:
content = fd.read()
mutated, new_content = check_for_bool_in_generic(content)
if mutated:
with open(path, "w", encoding="utf-8") as fd:
fd.write(new_content)
if __name__ == "__main__":
main()
| 31.829545 | 111 | 0.646912 | 377 | 2,801 | 4.633952 | 0.3687 | 0.068689 | 0.036634 | 0.022896 | 0.154551 | 0.136234 | 0.104179 | 0.104179 | 0.062965 | 0.062965 | 0 | 0.016667 | 0.250268 | 2,801 | 87 | 112 | 32.195402 | 0.815238 | 0.193859 | 0 | 0.071429 | 0 | 0 | 0.048918 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0.089286 | 0 | 0.232143 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f63b5c57bfdbb91b31f33dd30b40f9e108f599e0 | 4,786 | py | Python | tests/unit/configuration_subsystem/test_sample_configurations.py | richm/ansible-navigator | c9cd9a4b2eeb424145d4953aca79c4cc8ee8afd4 | [
"Apache-2.0",
"MIT"
] | null | null | null | tests/unit/configuration_subsystem/test_sample_configurations.py | richm/ansible-navigator | c9cd9a4b2eeb424145d4953aca79c4cc8ee8afd4 | [
"Apache-2.0",
"MIT"
] | 1 | 2022-02-04T02:38:15.000Z | 2022-02-04T02:38:15.000Z | tests/unit/configuration_subsystem/test_sample_configurations.py | richm/ansible-navigator | c9cd9a4b2eeb424145d4953aca79c4cc8ee8afd4 | [
"Apache-2.0",
"MIT"
] | 1 | 2021-11-17T09:45:18.000Z | 2021-11-17T09:45:18.000Z | """ Some tests using a alternate test configurations
to prove code paths not covered by the ansible-navigator
configuration
"""
import pytest
from ansible_navigator.configuration_subsystem.configurator import Configurator
from ansible_navigator.configuration_subsystem.navigator_post_processor import (
NavigatorPostProcessor,
)
from ansible_navigator.configuration_subsystem.definitions import ApplicationConfiguration
from ansible_navigator.configuration_subsystem.definitions import CliParameters
from ansible_navigator.configuration_subsystem.definitions import Entry
from ansible_navigator.configuration_subsystem.definitions import EntryValue
from ansible_navigator.configuration_subsystem.definitions import SubCommand
from ansible_navigator.configuration_subsystem.parser import Parser
# pylint: disable=protected-access
def test_cmdline_source_not_set():
"""Ensure a Config without a subparse entry fails"""
test_config = ApplicationConfiguration(
application_name="test_config1",
post_processor=NavigatorPostProcessor(),
subcommands=[
SubCommand(name="subcommand1", description="subcommand1"),
],
entries=[
Entry(
name="cmdline",
short_description="cmdline",
value=EntryValue(),
),
],
)
configurator = Configurator(params=[], application_configuration=test_config)
configurator._post_process()
assert "Completed post processing for cmdline" in configurator._messages[0][1]
assert configurator._exit_messages == []
def test_no_subcommand():
"""Ensure a Config without a subparse entry fails"""
test_config = ApplicationConfiguration(
application_name="test_config1",
post_processor=None,
subcommands=[
SubCommand(name="subcommand1", description="subcommand1"),
],
entries=[],
)
with pytest.raises(ValueError, match="No entry with subparser value defined"):
Configurator(params=[], application_configuration=test_config).configure()
def test_many_subcommand():
"""Ensure a Config without a subparse entry fails"""
test_config = ApplicationConfiguration(
application_name="test_config1",
post_processor=None,
subcommands=[
SubCommand(name="subcommand1", description="subcommand1"),
],
entries=[
Entry(
name="sb1",
short_description="Subcommands",
subcommand_value=True,
value=EntryValue(default="welcome"),
),
Entry(
name="sb2",
short_description="Subcommands",
subcommand_value=True,
value=EntryValue(default="welcome"),
),
],
)
with pytest.raises(ValueError, match="Multiple entries with subparser value defined"):
Configurator(params=[], application_configuration=test_config).configure()
def test_invalid_choice_not_set():
"""Ensure an error is raised for no choice"""
test_config = ApplicationConfiguration(
application_name="test_config1",
post_processor=None,
subcommands=[
SubCommand(name="subcommand1", description="subcommand1"),
],
entries=[
Entry(
name="sb1",
short_description="Subcommands",
subcommand_value=True,
value=EntryValue(default="welcome"),
),
Entry(
name="e1",
short_description="ex1",
value=EntryValue(),
),
],
)
with pytest.raises(ValueError, match="Current source not set for e1"):
test_config.entry("e1").invalid_choice # pylint: disable=expression-not-assigned
def test_cutom_nargs_for_postional():
"""Ensure a nargs for a positional are carried forward"""
test_config = ApplicationConfiguration(
application_name="test_config1",
post_processor=None,
subcommands=[
SubCommand(name="subcommand1", description="subcommand1"),
],
entries=[
Entry(
name="sb1",
short_description="Subcommands",
subcommand_value=True,
value=EntryValue(default="welcome"),
),
Entry(
name="e1",
cli_parameters=CliParameters(positional=True, nargs=3),
short_description="ex1",
value=EntryValue(),
subcommands=["subcommand1"],
),
],
)
parser = Parser(test_config)
assert parser.parser._actions[2].choices["subcommand1"]._actions[2].nargs == 3
| 34.431655 | 90 | 0.635395 | 425 | 4,786 | 6.962353 | 0.249412 | 0.033795 | 0.088205 | 0.089219 | 0.689084 | 0.592092 | 0.574518 | 0.474823 | 0.474823 | 0.449814 | 0 | 0.009464 | 0.271417 | 4,786 | 138 | 91 | 34.681159 | 0.839117 | 0.08901 | 0 | 0.663717 | 0 | 0 | 0.104046 | 0 | 0 | 0 | 0 | 0 | 0.026549 | 1 | 0.044248 | false | 0 | 0.079646 | 0 | 0.123894 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f63e8c61c7288fdc96e3a4c021f87342a2409094 | 563 | py | Python | tests/shared.py | award7/dicomsort | b83a8d9468a6599cfc36f497dcdb38ea62c2c783 | [
"MIT"
] | 15 | 2015-02-26T17:27:48.000Z | 2019-10-22T12:28:24.000Z | tests/shared.py | BigHeartDB/dicomsort | e4e73241cc08e2fe34ea5a3fae606c9ce5afd3ff | [
"MIT"
] | 61 | 2020-02-07T21:56:23.000Z | 2022-03-31T22:12:08.000Z | tests/shared.py | suever/dicomsort | d2a09887ebe7e3f2bcdc07eb1375d995ba365205 | [
"MIT"
] | 7 | 2015-09-07T04:47:29.000Z | 2019-03-18T09:29:48.000Z | import wx
class WxTestCase:
def setup(self):
self.app = wx.App()
self.frame = wx.Frame(None)
self.frame.Show()
def teardown(self):
def _cleanup():
for win in wx.GetTopLevelWindows():
if win:
if isinstance(win, wx.Dialog) and win.IsModal():
win.EndModal(0)
else:
win.Close(force=True)
wx.WakeUpIdle()
timer = wx.PyTimer(_cleanup)
timer.Start(100)
self.app.MainLoop()
| 23.458333 | 68 | 0.479574 | 59 | 563 | 4.542373 | 0.576271 | 0.052239 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.012158 | 0.415631 | 563 | 23 | 69 | 24.478261 | 0.802432 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0.055556 | 0 | 0.277778 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f6403a7748cde59ed1c5a1bdb63ee72fbf187a9c | 637 | py | Python | tests/utilities/read_properties.py | BSE21-13/FrontEndSite | 3592b7c0f62bdc3868d93221b27e9d365e9120b8 | [
"MIT"
] | null | null | null | tests/utilities/read_properties.py | BSE21-13/FrontEndSite | 3592b7c0f62bdc3868d93221b27e9d365e9120b8 | [
"MIT"
] | 5 | 2021-12-28T12:48:56.000Z | 2022-01-24T12:07:17.000Z | tests/utilities/read_properties.py | BSE21-13/FrontEndSite | 3592b7c0f62bdc3868d93221b27e9d365e9120b8 | [
"MIT"
] | null | null | null | import configparser
config = configparser.RawConfigParser()
config.read("../tests/configurations/config.ini")
class ReadConfig:
"""This class consists of methods that retrieve
information from the configuration file"""
@staticmethod
def get_application_url():
"""This method retrieves the URL from config file"""
url = config.get('Common required information', 'base_url')
return url
@staticmethod
def get_cadise_page_title():
cadise_page_title = config.get(
'Common required information',
'cadise_page_title'
)
return cadise_page_title
| 27.695652 | 67 | 0.676609 | 69 | 637 | 6.072464 | 0.507246 | 0.095465 | 0.143198 | 0.109785 | 0.162291 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.238619 | 637 | 22 | 68 | 28.954545 | 0.863918 | 0.205651 | 0 | 0.133333 | 0 | 0 | 0.230143 | 0.069246 | 0 | 0 | 0 | 0 | 0 | 1 | 0.133333 | false | 0 | 0.066667 | 0 | 0.4 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f6406e119d2329e146c0e9ca048882d418cdc911 | 20,678 | py | Python | s2protocol/decoders.py | karlgluck/heroes-of-the-storm-replay-parser | 5dd407e3ce2bd06d1acd279dd85c2a2a924c3c62 | [
"MIT"
] | 31 | 2015-01-19T09:42:02.000Z | 2021-01-02T12:42:07.000Z | s2protocol/decoders.py | karlgluck/heroes-of-the-storm-replay-parser | 5dd407e3ce2bd06d1acd279dd85c2a2a924c3c62 | [
"MIT"
] | null | null | null | s2protocol/decoders.py | karlgluck/heroes-of-the-storm-replay-parser | 5dd407e3ce2bd06d1acd279dd85c2a2a924c3c62 | [
"MIT"
] | 9 | 2015-04-02T04:24:54.000Z | 2017-09-08T11:17:19.000Z | # Copyright (c) 2013 Blizzard Entertainment
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import struct
import base64
# A note on decoding blobs:
#
# The library Blizzard wrote has no distinction between strings and binary blobs
# at the protocol level. Because the results are going to be formatted as JSON
# and printed in utf-8 text, they must be properly encoded.
#
# I extended their decoders to support distinguishing these binary blobs. Every
# blob will now return a dictionary instead of a string. This dictionary will
# contain one or two members:
# * 'utf8' - A utf-8 decoded Unicode string representing the blob,
# created using Python's 'replace' option. This is what you expect
# a string to be.
# * 'base64' - A base-64 encoded version of the bytes from the blob. This member
# is present if and only if the blob cannot be decoded correctly
# into the utf8 member.
#
# This distinction is necessary because one cannot simply interpret all blobs
# as base64, since strings would be unrecognizable, or as utf-8 Unicode, since
# not all byte sequences form valid Unicode strings. This makes it easy to use
# the most common case of just getting strings from the replay, but keeps it
# possible to obtain the original binary blob by first checking for the base64
# member, then decoding either it or utf8.
class TruncatedError(Exception):
pass
class CorruptedError(Exception):
pass
class BitPackedBuffer:
def __init__(self, contents, endian='big'):
self._data = contents or []
self._used = 0
self._next = None
self._nextbits = 0
self._bigendian = (endian == 'big')
def __str__(self):
return 'buffer(%02x/%d,[%d]=%s)' % (
self._nextbits and self._next or 0, self._nextbits,
self._used, '%02x' % (ord(self._data[self._used]),) if (self._used < len(self._data)) else '--')
def copy(self, other):
self._data = other._data
self._used = other._used
self._next = other._next
self._nextbits = other._nextbits
self._bigendian = other._bigendian
def peek_bytes_as_hex_string(self, bytes=0):
if bytes == 0:
bytes = len(self._data)
bpb = BitPackedBuffer([], self._bigendian)
bpb.copy(self)
return ''.join('{:02x}'.format(ord(x)) for x in bpb.read_unaligned_bytes(bytes))
def peek_bytes_as_bin_string(self, bytes=0):
if bytes == 0:
bytes = len(self._data)
bpb = BitPackedBuffer([], self._bigendian)
bpb.copy(self)
return ''.join('{:08b}'.format(ord(x)) for x in bpb.read_unaligned_bytes(bytes))
def peek_bits_as_bin_string(self, bits=0):
if bits == 0:
bits = len(self._data) * 8
bpb = BitPackedBuffer([], self._bigendian)
bpb.copy(self)
return ('{:0%ib}'%bits).format(bpb.read_bits(bits))
def done(self):
return self._nextbits == 0 and self._used >= len(self._data)
def used_bits(self):
return self._used * 8 - self._nextbits
def byte_align(self):
self._nextbits = 0
def read_aligned_bytes(self, bytes):
self.byte_align()
data = self._data[self._used:self._used + bytes]
self._used += bytes
if len(data) != bytes:
raise TruncatedError(self)
return data
def state(self):
return '{next=%i,nextbits=%i,used=%i}' % (self._next, self._nextbits, self._used)
def read_bits(self, bits):
result = 0
resultbits = 0
while resultbits != bits:
if self._nextbits == 0:
if self.done():
raise TruncatedError(self)
self._next = ord(self._data[self._used])
self._used += 1
self._nextbits = 8
copybits = min(bits - resultbits, self._nextbits)
copy = (self._next & ((1 << copybits) - 1))
if self._bigendian:
result |= copy << (bits - resultbits - copybits)
else:
result |= copy << resultbits
self._next >>= copybits
self._nextbits -= copybits
resultbits += copybits
return result
def read_unaligned_bytes(self, bytes):
return ''.join([chr(self.read_bits(8)) for i in xrange(bytes)])
class BitPackedDecoder:
def __init__(self, contents, typeinfos):
self._buffer = BitPackedBuffer(contents)
self._typeinfos = typeinfos
def __str__(self):
return self._buffer.__str__()
def instance(self, typeid):
if typeid >= len(self._typeinfos):
raise CorruptedError(self)
typeinfo = self._typeinfos[typeid]
return getattr(self, typeinfo[0])(*typeinfo[1])
def byte_align(self):
self._buffer.byte_align()
def done(self):
return self._buffer.done()
def used_bits(self):
return self._buffer.used_bits()
def _array(self, bounds, typeid):
length = self._int(bounds)
return [self.instance(typeid) for i in xrange(length)]
def _bitarray(self, bounds):
length = self._int(bounds)
return (length, self._buffer.read_bits(length))
def _blob(self, bounds):
length = self._int(bounds)
result = self._buffer.read_aligned_bytes(length)
try:
result = {'utf8': result.decode('utf-8', 'strict')}
except UnicodeDecodeError:
result = {
'utf8': result.decode('utf-8', 'replace'),
'base64': base64.b64encode(result)
}
return result
def _bool(self):
return self._int((0, 1)) != 0
def _choice(self, bounds, fields):
tag = self._int(bounds)
if tag not in fields:
raise CorruptedError(self)
field = fields[tag]
return {field[0]: self.instance(field[1])}
def _fourcc(self):
return self._buffer.read_unaligned_bytes(4)
def _int(self, bounds):
return bounds[0] + self._buffer.read_bits(bounds[1])
def _null(self):
return None
def _optional(self, typeid):
exists = self._bool()
return self.instance(typeid) if exists else None
def _real32(self):
return struct.unpack('>f', self._buffer.read_unaligned_bytes(4))
def _real64(self):
return struct.unpack('>d', self._buffer.read_unaligned_bytes(8))
def _struct(self, fields):
result = {}
for field in fields:
if field[0] == '__parent':
parent = self.instance(field[1])
if isinstance(parent, dict):
result.update(parent)
elif len(fields) == 1:
result = parent
else:
result[field[0]] = parent
else:
result[field[0]] = self.instance(field[1])
return result
class BitPackedDecoderDebug:
def __init__(self, contents, typeinfos):
self._buffer = BitPackedBuffer(contents)
self._typeinfos = typeinfos
self._markers = []
self._json = {}
def __str__(self):
return self._buffer.__str__()
def peek_bytes_as_hex_string(self, bytes):
return self._buffer.peek_bytes_as_hex_string(bytes)
def peek_bytes_as_bin_string(self, bytes=0):
return self._buffer.peek_bytes_as_bin_string(bytes)
def space_binary_string_by_markers(self, bin_string, first_bit_index):
retval = ''
x = 0
while x < len(bin_string):
for m in self._markers:
if m['at'] == (first_bit_index + x):
retval = retval + '{' + m['type'] + '}'
retval = retval + bin_string[x]
x += 1
for m in self._markers:
if m['at'] == (first_bit_index + x):
retval = retval + '{' + m['type'] + '}'
return retval
def get_json_and_reset(self):
retval = self._json
self._json = {}
return retval
def instance(self, typeid):
used_bits = self._buffer.used_bits()
self._markers.append({'at':self.used_bits(),'type':'instance(%i)'%typeid})
old_json = self._json
self._json = {'bit_start': self.used_bits(), 'typeid': typeid}
if typeid >= len(self._typeinfos):
return {"ERROR":"Asked to instance typeid '%i' but there are only '%i' type IDs" % (typeid, len(self._typeinfos)), "hex": hex_string }
typeinfo = self._typeinfos[typeid]
retval = getattr(self, typeinfo[0])(*typeinfo[1])
self._json['bit_end'] = self.used_bits()
old_json['instance%i'%self.used_bits()] = self._json
self._json = old_json
self._markers.append({'at':self.used_bits(),'type':'end-instance(%i)'%typeid})
return retval
def byte_align(self):
self._buffer.byte_align()
def done(self):
return self._buffer.done()
def used_bits(self):
return self._buffer.used_bits()
def _array(self, bounds, typeid):
self._markers.append({'at':self.used_bits(),'type':'array(%s,%s)'%(str(bounds),str(typeid))})
old_json = self._json
self._json = {'bit_start': self.used_bits(), 'bounds': bounds, 'typeid': typeid}
length = self._int(bounds)
self._json['length'] = length
retval = [self.instance(typeid) for i in xrange(length)]
old_json['array%i' % self.used_bits()] = self._json
self._json = old_json
return retval
def _bitarray(self, bounds):
self._markers.append({'at':self.used_bits(),'type':'bitarray(%s)'%str(bounds)})
old_json = self._json
self._json = {'bit_start': self.used_bits(), 'bounds': bounds}
length = self._int(bounds)
self._json['bits'] = self._buffer.peek_bits_as_bin_string(length)
retval = (length, self._buffer.read_bits(length))
old_json['bitarray%i'%self.used_bits()] = self._json
self._json = old_json
return retval
def _blob(self, bounds):
self._markers.append({'at':self.used_bits(),'type':'blob(%s)'%str(bounds)})
old_json = self._json
self._json = {'bit_start': self.used_bits(), 'bounds': bounds}
length = self._int(bounds)
self._json['length'] = length
retval = self._buffer.read_aligned_bytes(length)
self._json['bytes'] = ''.join('{:02x}'.format(ord(x)) for x in retval)
old_json['blob%i'%self.used_bits()] = self._json
self._json = old_json
try:
retval = {'utf8': retval.decode('utf-8', 'strict')}
except UnicodeDecodeError:
retval = {
'utf8': retval.decode('utf-8', 'replace'),
'base64': base64.b64encode(retval)
}
return retval
def _bool(self):
old_json = self._json
self._json = {'bit_start': self.used_bits()}
self._markers.append({'at':self.used_bits(),'type':'bool'})
retval = self._int((0, 1)) != 0
self._json['value'] = retval
old_json['bool%i'%self.used_bits()] = self._json
self._json = old_json
return retval
def _choice(self, bounds, fields):
self._markers.append({'at':self.used_bits(),'type':'choice(%s,%s)'%(str(bounds),str(fields))})
old_json = self._json
self._json = {'bit_start': self.used_bits(), 'bounds': bounds, 'fields':fields}
tag = self._int(bounds)
if tag not in fields:
return {"ERROR":"Choice '%s' does not exist in available fields '%s'" % (str(tag), str(fields))}
field = fields[tag]
retval = {field[0]: self.instance(field[1])}
self._json['value'] = retval
old_json['choice%i'%self.used_bits()] = self._json
self._json = old_json
return retval
def _fourcc(self):
old_json = self._json
self._json = {'bit_start': self.used_bits()}
self._markers.append({'at':self.used_bits(),'type':'blob'})
retval = self._buffer.read_unaligned_bytes(4)
old_json['fourcc%i'%self.used_bits()] = self._json
self._json = old_json
return retval
def _int(self, bounds):
old_json = self._json
self._json = {'bit_start': self.used_bits(), 'bounds': bounds, 'bits':self._buffer.peek_bits_as_bin_string(bounds[1])}
bitpos = self.used_bits()
retval = bounds[0] + self._buffer.read_bits(bounds[1])
self._markers.append({'at':bitpos,'type':'int(%s)=%i @ %s'%(str(bounds), retval, self._buffer.state())})
self._json['value'] = retval
old_json['int%i'%self.used_bits()] = self._json
self._json = old_json
return retval
def _null(self):
self._markers.append({'at':self.used_bits(),'type':'null'})
return None
def _optional(self, typeid):
old_json = self._json
self._json = {'bit_start': self.used_bits(), 'typeid':typeid}
self._markers.append({'at':self.used_bits(),'type':'optional(%s)'%str(typeid)})
exists = self._bool()
retval = self.instance(typeid) if exists else None
old_json['optional%i'%self.used_bits()] = self._json
self._json = old_json
return retval
def _real32(self):
old_json = self._json
self._json = {'bit_start': self.used_bits()}
self._markers.append({'at':self.used_bits(),'type':'real32'})
retval = struct.unpack('>f', self._buffer.read_unaligned_bytes(4))
self._json['value'] = retval
old_json['real32%i'%self.used_bits()] = self._json
self._json = old_json
return retval
def _real64(self):
old_json = self._json
self._json = {'bit_start': self.used_bits()}
self._markers.append({'at':self.used_bits(),'type':'real64'})
retval = struct.unpack('>d', self._buffer.read_unaligned_bytes(8))
self._json['value'] = retval
old_json['real64%i'%self.used_bits()] = self._json
self._json = old_json
return retval
def _struct(self, fields):
old_json = self._json
self._json = {'bit_start': self.used_bits(), 'fields': fields}
self._markers.append({'at':self.used_bits(),'type':'struct(%s)'%str(fields)})
result = {}
for field in fields:
if field[0] == '__parent':
parent = self.instance(field[1])
if isinstance(parent, dict):
result.update(parent)
elif len(fields) == 1:
result = parent
else:
result[field[0]] = parent
else:
result[field[0]] = self.instance(field[1])
old_json['struct%i'%self.used_bits()] = self._json
self._json = old_json
return result
class VersionedDecoder:
def __init__(self, contents, typeinfos):
self._buffer = BitPackedBuffer(contents)
self._typeinfos = typeinfos
def __str__(self):
return self._buffer.__str__()
def instance(self, typeid):
if typeid >= len(self._typeinfos):
raise CorruptedError(self)
typeinfo = self._typeinfos[typeid]
return getattr(self, typeinfo[0])(*typeinfo[1])
def byte_align(self):
self._buffer.byte_align()
def done(self):
return self._buffer.done()
def used_bits(self):
return self._buffer.used_bits()
def _expect_skip(self, expected):
if self._buffer.read_bits(8) != expected:
raise CorruptedError(self)
def _vint(self):
b = self._buffer.read_bits(8)
negative = b & 1
result = (b >> 1) & 0x3f
bits = 6
while (b & 0x80) != 0:
b = self._buffer.read_bits(8)
result |= (b & 0x7f) << bits
bits += 7
return -result if negative else result
def _array(self, bounds, typeid):
self._expect_skip(0)
length = self._vint()
return [self.instance(typeid) for i in xrange(length)]
def _bitarray(self, bounds):
self._expect_skip(1)
length = self._vint()
return (length, self._buffer.read_aligned_bytes((length + 7) / 8))
def _blob(self, bounds):
self._expect_skip(2)
length = self._vint()
result = self._buffer.read_aligned_bytes(length)
try:
result = {'utf8': result.decode('utf-8', 'strict')}
except UnicodeDecodeError:
result = {
'utf8': result.decode('utf-8', 'replace'),
'base64': base64.b64encode(result)
}
return result
def _bool(self):
self._expect_skip(6)
return self._buffer.read_bits(8) != 0
def _choice(self, bounds, fields):
self._expect_skip(3)
tag = self._vint()
if tag not in fields:
self._skip_instance()
return {}
field = fields[tag]
return {field[0]: self.instance(field[1])}
def _fourcc(self):
self._expect_skip(7)
return self._buffer.read_aligned_bytes(4)
def _int(self, bounds):
self._expect_skip(9)
return self._vint()
def _null(self):
return None
def _optional(self, typeid):
self._expect_skip(4)
exists = self._buffer.read_bits(8) != 0
return self.instance(typeid) if exists else None
def _real32(self):
self._expect_skip(7)
return struct.unpack('>f', self._buffer.read_aligned_bytes(4))
def _real64(self):
self._expect_skip(8)
return struct.unpack('>d', self._buffer.read_aligned_bytes(8))
def _struct(self, fields):
self._expect_skip(5)
result = {}
length = self._vint()
for i in xrange(length):
tag = self._vint()
field = next((f for f in fields if f[2] == tag), None)
if field:
if field[0] == '__parent':
parent = self.instance(field[1])
if isinstance(parent, dict):
result.update(parent)
elif len(fields) == 1:
result = parent
else:
result[field[0]] = parent
else:
result[field[0]] = self.instance(field[1])
else:
self._skip_instance()
return result
def _skip_instance(self):
skip = self._buffer.read_bits(8)
if skip == 0: # array
length = self._vint()
for i in xrange(length):
self._skip_instance()
elif skip == 1: # bitblob
length = self._vint()
self._buffer.read_aligned_bytes((length + 7) / 8)
elif skip == 2: # blob
length = self._vint()
self._buffer.read_aligned_bytes(length)
elif skip == 3: # choice
tag = self._vint()
self._skip_instance()
elif skip == 4: # optional
exists = self._buffer.read_bits(8) != 0
if exists:
self._skip_instance()
elif skip == 5: # struct
length = self._vint()
for i in xrange(length):
tag = self._vint()
self._skip_instance()
elif skip == 6: # u8
self._buffer.read_aligned_bytes(1)
elif skip == 7: # u32
self._buffer.read_aligned_bytes(4)
elif skip == 8: # u64
self._buffer.read_aligned_bytes(8)
elif skip == 9: # vint
self._vint()
| 35.106961 | 146 | 0.587678 | 2,566 | 20,678 | 4.518316 | 0.127046 | 0.042091 | 0.040366 | 0.034501 | 0.609539 | 0.57133 | 0.492841 | 0.468432 | 0.424357 | 0.380887 | 0 | 0.014355 | 0.289148 | 20,678 | 588 | 147 | 35.166667 | 0.774406 | 0.112922 | 0 | 0.608791 | 0 | 0 | 0.050036 | 0.002844 | 0 | 0 | 0.000656 | 0 | 0 | 1 | 0.162637 | false | 0.004396 | 0.004396 | 0.050549 | 0.325275 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f646c5cb4299bf70a2742fabb4cedc310b92d3e0 | 6,019 | py | Python | rl_rvo_nav/policy_test/post_train.py | hanruihua/rl_rvo_nav | 9ce17853336ee3231860e0a0f8df9269515adfb4 | [
"MIT"
] | 8 | 2022-03-18T06:41:07.000Z | 2022-03-31T03:49:55.000Z | rl_rvo_nav/policy_test/post_train.py | hanruihua/rl_rvo_nav | 9ce17853336ee3231860e0a0f8df9269515adfb4 | [
"MIT"
] | null | null | null | rl_rvo_nav/policy_test/post_train.py | hanruihua/rl_rvo_nav | 9ce17853336ee3231860e0a0f8df9269515adfb4 | [
"MIT"
] | 2 | 2022-03-29T01:43:54.000Z | 2022-03-29T07:47:02.000Z | import torch
import numpy as np
from pathlib import Path
import platform
from rl_rvo_nav.policy.policy_rnn_ac import rnn_ac
from math import pi, sin, cos, sqrt
import time
class post_train:
def __init__(self, env, num_episodes=100, max_ep_len=150, acceler_vel = 1.0, reset_mode=3, render=True, save=False, neighbor_region=4, neighbor_num=5, args=None, **kwargs):
self.env = env
self.num_episodes=num_episodes
self.max_ep_len = max_ep_len
self.acceler_vel = acceler_vel
self.reset_mode = reset_mode
self.render=render
self.save=save
self.robot_number = self.env.ir_gym.robot_number
self.step_time = self.env.ir_gym.step_time
self.inf_print = kwargs.get('inf_print', True)
self.std_factor = kwargs.get('std_factor', 0.001)
# self.show_traj = kwargs.get('show_traj', False)
self.show_traj = False
self.traj_type = ''
self.figure_format = kwargs.get('figure_format', 'png')
self.nr = neighbor_region
self.nm = neighbor_num
self.args = args
def policy_test(self, policy_type='drl', policy_path=None, policy_name='policy', result_path=None, result_name='/result.txt', figure_save_path=None, ani_save_path=None, policy_dict=False, once=False):
if policy_type == 'drl':
model_action = self.load_policy(policy_path, self.std_factor, policy_dict=policy_dict)
o, r, d, ep_ret, ep_len, n = self.env.reset(mode=self.reset_mode), 0, False, 0, 0, 0
ep_ret_list, speed_list, mean_speed_list, ep_len_list, sn = [], [], [], [], 0
print('Policy Test Start !')
figure_id = 0
while n < self.num_episodes:
# if n == 1:
# self.show_traj = True
action_time_list = []
if self.render or self.save:
self.env.render(save=self.save, path=figure_save_path, i = figure_id, show_traj=self.show_traj, traj_type=self.traj_type)
if policy_type == 'drl':
abs_action_list =[]
for i in range(self.robot_number):
start_time = time.time()
a_inc = np.round(model_action(o[i]), 2)
end_time = time.time()
temp = end_time - start_time
action_time_list.append(temp)
cur_vel = self.env.ir_gym.robot_list[i].vel_omni
abs_action = self.acceler_vel * a_inc + np.squeeze(cur_vel)
abs_action_list.append(abs_action)
o, r, d, info = self.env.step_ir(abs_action_list, vel_type = 'omni')
robot_speed_list = [np.linalg.norm(robot.vel_omni) for robot in self.env.ir_gym.robot_list]
avg_speed = np.average(robot_speed_list)
speed_list.append(avg_speed)
ep_ret += r[0]
ep_len += 1
figure_id += 1
if np.max(d) or (ep_len == self.max_ep_len) or np.min(info):
speed = np.mean(speed_list)
figure_id = 0
if np.min(info):
ep_len_list.append(ep_len)
if self.inf_print: print('Successful, Episode %d \t EpRet %.3f \t EpLen %d \t EpSpeed %.3f'%(n, ep_ret, ep_len, speed))
else:
if self.inf_print: print('Fail, Episode %d \t EpRet %.3f \t EpLen %d \t EpSpeed %.3f'%(n, ep_ret, ep_len, speed))
ep_ret_list.append(ep_ret)
mean_speed_list.append(speed)
speed_list = []
o, r, d, ep_ret, ep_len = self.env.reset(mode=self.reset_mode), 0, False, 0, 0
n += 1
if np.min(info):
sn+=1
# if n == 2:
if once:
self.env.ir_gym.world_plot.save_gif_figure(figure_save_path, 0, format='eps')
break
if self.save:
self.env.ir_gym.save_ani(figure_save_path, ani_save_path, ani_name=policy_name)
break
mean_len = 0 if len(ep_len_list) == 0 else np.round(np.mean(ep_len_list), 2)
std_len = 0 if len(ep_len_list) == 0 else np.round(np.std(ep_len_list), 2)
average_speed = np.round(np.mean(mean_speed_list),2)
std_speed = np.round(np.std(mean_speed_list), 2)
f = open( result_path + result_name, 'a')
print( 'policy_name: '+ policy_name, ' successful rate: {:.2%}'.format(sn/self.num_episodes), "average EpLen:", mean_len, "std length", std_len, 'average speed:', average_speed, 'std speed', std_speed, file = f)
f.close()
print( 'policy_name: '+ policy_name, ' successful rate: {:.2%}'.format(sn/self.num_episodes), "average EpLen:", mean_len, 'std length', std_len, 'average speed:', average_speed, 'std speed', std_speed)
def load_policy(self, filename, std_factor=1, policy_dict=False):
if policy_dict == True:
model = rnn_ac(self.env.observation_space, self.env.action_space, self.args.state_dim, self.args.rnn_input_dim, self.args.rnn_hidden_dim, self.args.hidden_sizes_ac, self.args.hidden_sizes_v, self.args.activation, self.args.output_activation, self.args.output_activation_v, self.args.use_gpu, self.args.rnn_mode)
check_point = torch.load(filename)
model.load_state_dict(check_point['model_state'], strict=True)
model.eval()
else:
model = torch.load(filename)
model.eval()
# model.train()
def get_action(x):
with torch.no_grad():
x = torch.as_tensor(x, dtype=torch.float32)
action = model.act(x, std_factor)
return action
return get_action
def dis(self, p1, p2):
return sqrt( (p2.py - p1.py)**2 + (p2.px - p1.px)**2 ) | 41.510345 | 323 | 0.579 | 849 | 6,019 | 3.839812 | 0.199058 | 0.026074 | 0.016564 | 0.022086 | 0.211963 | 0.177301 | 0.164417 | 0.156442 | 0.156442 | 0.156442 | 0 | 0.014166 | 0.308025 | 6,019 | 145 | 324 | 41.510345 | 0.768547 | 0.018275 | 0 | 0.118812 | 0 | 0 | 0.066226 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.049505 | false | 0 | 0.069307 | 0.009901 | 0.158416 | 0.059406 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f6483d57aeed490b4f308d885d8bb0758430e6ca | 4,866 | py | Python | scripts/NIRISS_AMI_tutorial.py | vandalt/ImPlaneIA | 72b22e487ef45a8a665e4a6a88a91e99e382fdd0 | [
"BSD-3-Clause"
] | 6 | 2020-03-03T16:15:40.000Z | 2022-03-23T16:15:09.000Z | scripts/NIRISS_AMI_tutorial.py | vandalt/ImPlaneIA | 72b22e487ef45a8a665e4a6a88a91e99e382fdd0 | [
"BSD-3-Clause"
] | 5 | 2020-02-03T17:46:59.000Z | 2022-03-07T20:00:59.000Z | scripts/NIRISS_AMI_tutorial.py | vandalt/ImPlaneIA | 72b22e487ef45a8a665e4a6a88a91e99e382fdd0 | [
"BSD-3-Clause"
] | 1 | 2021-07-06T23:02:01.000Z | 2021-07-06T23:02:01.000Z | #!/usr/bin/env python
# coding: utf-8
# # A short Tutorial to process sample NIRISS AMI simulations
#
# * fit fringes for a simulated target and calibrator sequence (no WFE evolution between them)
# * calibrate target closure phases with the calibrator
# * fit for a binary
import glob
import os, sys, time
from astropy.io import fits
import numpy as np
from nrm_analysis import nrm_core, InstrumentData
from nrm_analysis.misctools import utils
print(InstrumentData.__file__)
import matplotlib.pyplot as plt
#get_ipython().run_line_magic('matplotlib', 'inline')
debug = True
home = os.path.expanduser('~')
np.set_printoptions(precision=4)
if debug:
print("Current working directory is ", os.getcwd())
print("InstrumentData is file: ", InstrumentData.__file__)
filt="F430M"
oversample = 3
# ### Where the data lives:
# small disk, noise, call name different cos of central pix kluge, but it's correct.
# copied these from ami_sim output ~/scene_noise/..."
datadir = home+"/Downloads/asoulain_arch2019.12.07/Simulated_data/"
cr = "c_dsk_100mas__F430M_81_flat_x11__00_mir"
tr = "t_dsk_100mas__F430M_81_flat_x11__00_mir"
# Directories where ascii output files of fringe fitting will go:
tsavedir = datadir+"tgt_ov%d"%oversample
csavedir = datadir+"cal_ov%d"%oversample
test_tar = datadir + tr + ".fits"
test_cal = datadir + cr + ".fits"
if debug:
print("tsavedir:", tsavedir, "\ntest_tar:", test_tar)
print("csavedir:", csavedir, "\ntest_cal:", test_cal)
# ### First we specify the instrument & filter # (defaults: Spectral type set to A0V)
# SET BANDPASS - or use NIRISS' default bandpass for the filter
default = None # 'bandpass' defaults to None - it's here for clarity
bp3= np.array([(0.1, 4.2e-6),(0.8, 4.3e-6),(0.1,4.4e-6)]) # for speedy development
bpmono = np.array([(1.0, 4.3e-6),]) # for speedy development
niriss = InstrumentData.NIRISS(filt, bandpass=bpmono)
# ### Next: Extract fringe observables using image plane fringe-fitting
# * Need to pass the InstrumentData object, some keywords.
# * Observables are (over)written to a new savedir/input_datafile_root (eg cr or tr here)
# * Initialize FringeFitter with save_txt_only=True to switch off diagnostic fits file writing
# *files written out to these directories.
ff_t = nrm_core.FringeFitter(niriss, datadir=datadir, savedir=tsavedir,
oversample=oversample, interactive=False)
ff_c = nrm_core.FringeFitter(niriss, datadir=datadir, savedir=csavedir,
oversample=oversample, interactive=False)
# set interactive to False unless you don't know what you are doing
# This can take a little while -- there is a parallelization option, set threads=n_threads
# output of this is long -- may also want to do this scripted instead of in notebook,
# leaving off the output in this example.
ff_t.fit_fringes(test_tar)
ff_c.fit_fringes(test_cal)
utils.compare_pistons(ff_t.nrm.phi*2*np.pi, ff_t.nrm.fringepistons, str="ff_t")
utils.compare_pistons(ff_c.nrm.phi*2*np.pi, ff_t.nrm.fringepistons, str="ff_c")
# Text files contain the observables you are trying to
# measure, but some diagnostic fits files written: centered_nn
# are the cropped/centered data, modelsolution_nn are the best fit model to the
# data, and residual_nn is the data - model_solution
print("oversample {:d} used in modelling the data".format(oversample))
print("observables text files in rootdir", home+"/Downloads/asoulain_arch2019.12.07/Simulated_data/")
print("tgt observables in subdir", tsavedir)
print("cal observables in subdir", csavedir)
showfig = False
if showfig:
target_outputdir = tsavedir + "/" + tr
data = fits.getdata(target_outputdir + "/centered_0.fits")
fmodel = fits.getdata(target_outputdir + "/modelsolution_01.fits")
res = fits.getdata(target_outputdir + "/residual_01.fits")
plt.figure(figsize=(12,4))
plt.subplot(131)
plt.title("Input data")
im = plt.imshow(pow(data/data.max(), 0.5))
plt.axis("off")
plt.colorbar(fraction=0.046, pad=0.04)
plt.subplot(132)
plt.title("best model")
im = plt.imshow(pow(fmodel/data.max(), 0.5))
plt.axis("off")
plt.colorbar(fraction=0.046, pad=0.04)
plt.subplot(133)
plt.title("residual")
im = plt.imshow(res/data.max())
plt.axis("off")
plt.colorbar(fraction=0.046, pad=0.04)
plt.show()
# If you don't want to clog up your hardrive with fits files you can initialize
# FringeFitter with keyword "save_txt_only=True" -- but you may want to save
# out everything the first time you reduce the data to check it. Above we can
# see a pretty good fit the magnification of the model is a bit off. This shows
# up as a radial pattern in the residual. Finely fitting the exact magnification
# and rotation should be done before fringe fitting.
| 39.560976 | 101 | 0.723387 | 741 | 4,866 | 4.635628 | 0.403509 | 0.00524 | 0.006987 | 0.022707 | 0.136245 | 0.136245 | 0.136245 | 0.109461 | 0.066376 | 0.066376 | 0 | 0.02773 | 0.169955 | 4,866 | 122 | 102 | 39.885246 | 0.822728 | 0.427661 | 0 | 0.153846 | 0 | 0 | 0.193277 | 0.073073 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.015385 | 0.107692 | 0 | 0.107692 | 0.153846 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f64bb2f111d4579927862540613c0debd02f8fa0 | 3,817 | py | Python | vilya/views/api/git.py | mubashshirjamal/code | d9c7adf7efed8e9c1ab3ff8cdeb94e7eb1a45382 | [
"BSD-3-Clause"
] | 1,582 | 2015-01-05T02:41:44.000Z | 2022-03-30T20:03:22.000Z | vilya/views/api/git.py | mubashshirjamal/code | d9c7adf7efed8e9c1ab3ff8cdeb94e7eb1a45382 | [
"BSD-3-Clause"
] | 66 | 2015-01-23T07:58:04.000Z | 2021-11-12T02:23:27.000Z | vilya/views/api/git.py | mubashshirjamal/code | d9c7adf7efed8e9c1ab3ff8cdeb94e7eb1a45382 | [
"BSD-3-Clause"
] | 347 | 2015-01-05T07:47:07.000Z | 2021-09-20T21:22:32.000Z | # coding: UTF-8
from __future__ import absolute_import
import json
import PyRSS2Gen as RSS2
from vilya.config import DOMAIN
from vilya.libs.reltime import compute_relative_time
from vilya.views.api.utils import jsonize
class GitUI(object):
_q_exports = ['branches', 'allfiles', 'lastlog', 'lineblame']
def __init__(self, project):
self.project = project
@jsonize
def branches(self, request):
return self.project.repo.branches
@jsonize
def allfiles(self, request):
branch = request.get_form_var('branch', 'HEAD')
repo = self.project.repo
# FIXME: path sort order in ellen
tree = repo.get_tree(branch, recursive=True)
return [f['path'] for f in tree]
@jsonize
def lastlog(self, request):
path = request.get_form_var('path')
repo = self.project.repo
commit = repo.get_last_commit('HEAD', path=path)
data = {"author": '',
"age": '',
"parents": [],
"date": '',
"commit": '',
"message": '',
"email": ''}
if commit:
data = commit.as_dict()
data['commit'] = data['id']
data['age'] = compute_relative_time(commit.author_timestamp)
return data
@jsonize
def lineblame(self, request):
rev = request.get_form_var('rev', 'HEAD')
path = request.get_form_var('path')
lineno = request.get_form_var('lineno', 1)
dumb = {
'author': '',
'time': '',
'summary': '',
'sha': '',
}
if not path:
return dumb
blame = self.project.repo.blame_file(rev, path, lineno=int(lineno))
for hunk in blame.hunks:
for line in hunk.lines:
if line.no == int(lineno):
dumb['author'] = line.commit.author.name
dumb['time'] = line.commit.author_time
dumb['summary'] = line.commit.message_header
dumb['sha'] = line.commit.sha
return dumb
class CommitsUI(object):
_q_exports = []
def __init__(self, request, project):
self.project = project
def __call__(self, request):
return self._index(request)
def _q_index(self, request):
return self._index(request)
def _gen_rss(self, data):
proj_name = self.project.name
items = []
for d in data:
items.append(RSS2.RSSItem(
title=d.get('message', ''),
link="%s/%s/commit/%s" % (
DOMAIN, proj_name, d.get('id', '')),
author=d.get('email', ''),
pubDate=d.get('date', ''),
))
rss = RSS2.RSS2(
title="%s RSS Feed" % proj_name,
link="%s/api/%s/commits" % (DOMAIN, proj_name),
description="%s RSS Feed" % proj_name,
items=items,
)
return rss.to_xml('utf-8')
def _index(self, request):
begin = request.get_form_var('begin') or 'HEAD~5'
end = request.get_form_var('end') or 'HEAD'
format = request.get_form_var('format') or 'json'
repo = self.project.repo
data = []
if repo:
commits = repo.get_commits(end,
from_ref=begin)
data = [commit.as_dict(with_files=True) for commit in commits]
if format == 'rss':
return self._gen_rss(data)
else:
return json.dumps(data)
def _q_lookup(self, request, sha):
data = {}
repo = self.project.repo
commit = repo.get_commit(sha)
if commit:
data = commit.as_dict(with_files=True)
return json.dumps(data)
| 30.055118 | 75 | 0.530259 | 433 | 3,817 | 4.505774 | 0.265589 | 0.056381 | 0.057406 | 0.069708 | 0.157868 | 0.141466 | 0.099436 | 0 | 0 | 0 | 0 | 0.00359 | 0.343201 | 3,817 | 126 | 76 | 30.293651 | 0.774631 | 0.011789 | 0 | 0.190476 | 0 | 0 | 0.071106 | 0 | 0 | 0 | 0 | 0.007937 | 0 | 1 | 0.104762 | false | 0 | 0.057143 | 0.028571 | 0.304762 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f64c8b2cad206f15a32b0791a1effdab5558a028 | 1,912 | py | Python | src/graph.py | kb2ma/nethead-ui | 950aa85806aa31e1d7857af8f933e75588c1fdc5 | [
"Apache-2.0"
] | null | null | null | src/graph.py | kb2ma/nethead-ui | 950aa85806aa31e1d7857af8f933e75588c1fdc5 | [
"Apache-2.0"
] | null | null | null | src/graph.py | kb2ma/nethead-ui | 950aa85806aa31e1d7857af8f933e75588c1fdc5 | [
"Apache-2.0"
] | null | null | null | import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
from app import app
from app import applog
from datetime import datetime
import json
import numpy as np
import pandas as pd
import plotly.express as px
import requests
"""Displays graph of time series data from Graphite.
Refreshes graph once a minute.
"""
def _collect_data(device_sn):
"""Collects data from Graphite into a dataframe.
"""
url = 'http://localhost:8089/render'
params = {
'target': 'device.{}.object.3303.0'.format(device_sn),
'from': 'now-1d',
'format': 'json'
}
data = json.loads(requests.get(url, params=params).text)
series = np.array(data[0]['datapoints'])
df = pd.DataFrame(series,
index=series[:, 1],
columns=['temp', 'time'])
# time as hours:minutes
df['time'] = df['time'].apply(lambda x: datetime.fromtimestamp(x).strftime('%H:%M'))
return df
def tab_layout(device_sn, device_desc):
if device_sn and device_desc == 'Status: found':
df = _collect_data(device_sn)
fig = px.line(df, x="time", y="temp")
return html.Div(id='graph-div', children=[
dcc.Graph(id='graph-ref', figure=fig, style={'height': '500px'}),
dcc.Interval(
id='graph-refresh',
interval=60*1000, # in milliseconds
n_intervals=0)
])
else:
return html.Div(id='graph-div', children=[ 'No valid device' ])
@app.callback([Output('graph-ref', 'figure'),
Output('device-desc2', 'children')],
[Input('graph-refresh', 'n_intervals')],
[State('device-sn', 'value')])
def updateGraph(n, device_sn):
df = _collect_data(device_sn)
return px.line(df, x="time", y="temp"), 'data as of {}'.format(df.iloc[-1,1])
| 29.415385 | 88 | 0.605649 | 250 | 1,912 | 4.544 | 0.436 | 0.056338 | 0.044894 | 0.050176 | 0.123239 | 0.086268 | 0.086268 | 0 | 0 | 0 | 0 | 0.017349 | 0.246339 | 1,912 | 64 | 89 | 29.875 | 0.770992 | 0.046548 | 0 | 0.044444 | 0 | 0 | 0.167245 | 0.01331 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0 | 0.244444 | 0 | 0.4 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f64c8c0ad69d6d3faab6d5fe3c733e51363ca006 | 1,985 | py | Python | xprize_robojudge.py | anibalsolon/covid-xprize | cafc2c65c7e4f4184c16a1793da85371b6bc3218 | [
"Apache-2.0"
] | null | null | null | xprize_robojudge.py | anibalsolon/covid-xprize | cafc2c65c7e4f4184c16a1793da85371b6bc3218 | [
"Apache-2.0"
] | null | null | null | xprize_robojudge.py | anibalsolon/covid-xprize | cafc2c65c7e4f4184c16a1793da85371b6bc3218 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 (c) Cognizant Digital Business, Evolutionary AI. All rights reserved. Issued under the Apache 2.0 License.
import numpy as np
import pandas as pd
LATEST_DATA_URL = 'https://raw.githubusercontent.com/OxCGRT/covid-policy-tracker/master/data/OxCGRT_latest.csv'
LOCAL_DATA_URL = "tests/fixtures/OxCGRT_latest.csv"
NPI_COLUMNS = ['C1_School closing',
'C2_Workplace closing',
'C3_Cancel public events',
'C4_Restrictions on gatherings',
'C5_Close public transport',
'C6_Stay at home requirements',
'C7_Restrictions on internal movement',
'C8_International travel controls',
'H1_Public information campaigns',
'H2_Testing policy',
'H3_Contact tracing']
class XPrizeRobojudge(object):
def load_dataset(self, url: str = LATEST_DATA_URL) -> pd.DataFrame:
latest_df = pd.read_csv(url,
parse_dates=['Date'],
encoding="ISO-8859-1",
error_bad_lines=False)
# Handle regions
latest_df["RegionName"].fillna('', inplace=True)
# Replace CountryName by CountryName / RegionName
# np.where usage: if A then B else C
latest_df["CountryName"] = np.where(latest_df["RegionName"] == '',
latest_df["CountryName"],
latest_df["CountryName"] + ' / ' + latest_df["RegionName"])
return latest_df
def get_npis(self,
start_date: np.datetime64,
end_date: np.datetime64,
url: str = LATEST_DATA_URL) -> pd.DataFrame:
latest_df = self.load_dataset(url)
npis_df = latest_df[["CountryName", "Date"] + NPI_COLUMNS]
actual_npis_df = npis_df[(npis_df.Date >= start_date) & (npis_df.Date <= end_date)]
return actual_npis_df
| 43.152174 | 123 | 0.582872 | 219 | 1,985 | 5.045662 | 0.561644 | 0.072398 | 0.068778 | 0.028959 | 0.110407 | 0.068778 | 0.068778 | 0.068778 | 0.068778 | 0 | 0 | 0.019288 | 0.320907 | 1,985 | 45 | 124 | 44.111111 | 0.800445 | 0.110327 | 0 | 0 | 0 | 0.029412 | 0.280522 | 0.018171 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058824 | false | 0 | 0.058824 | 0 | 0.205882 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f64c94177f64a2f9e4145e71f70d7ea87d65280f | 3,282 | py | Python | ooobuild/lo/frame/frame_action_event.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | ooobuild/lo/frame/frame_action_event.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | ooobuild/lo/frame/frame_action_event.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Struct Class
# this is a auto generated file generated by Cheetah
# Namespace: com.sun.star.frame
# Libre Office Version: 7.3
from ooo.oenv.env_const import UNO_NONE
from ..lang.event_object import EventObject as EventObject_a3d70b03
from ..uno.x_interface import XInterface as XInterface_8f010a43
import typing
from .frame_action import FrameAction as FrameAction_aef40b5c
from .x_frame import XFrame as XFrame_7a570956
class FrameActionEvent(EventObject_a3d70b03):
"""
Struct Class
this event struct is broadcast for actions which can happen to components within frames
See Also:
`API FrameActionEvent <https://api.libreoffice.org/docs/idl/ref/structcom_1_1sun_1_1star_1_1frame_1_1FrameActionEvent.html>`_
"""
__ooo_ns__: str = 'com.sun.star.frame'
__ooo_full_ns__: str = 'com.sun.star.frame.FrameActionEvent'
__ooo_type_name__: str = 'struct'
typeName: str = 'com.sun.star.frame.FrameActionEvent'
"""Literal Constant ``com.sun.star.frame.FrameActionEvent``"""
def __init__(self, Source: typing.Optional[XInterface_8f010a43] = None, Frame: typing.Optional[XFrame_7a570956] = None, Action: typing.Optional[FrameAction_aef40b5c] = FrameAction_aef40b5c.COMPONENT_ATTACHED) -> None:
"""
Constructor
Arguments:
Source (XInterface, optional): Source value.
Frame (XFrame, optional): Frame value.
Action (FrameAction, optional): Action value.
"""
if isinstance(Source, FrameActionEvent):
oth: FrameActionEvent = Source
self.Source = oth.Source
self.Frame = oth.Frame
self.Action = oth.Action
return
kargs = {
"Source": Source,
"Frame": Frame,
"Action": Action,
}
self._init(**kargs)
def _init(self, **kwargs) -> None:
self._frame = kwargs["Frame"]
self._action = kwargs["Action"]
inst_keys = ('Frame', 'Action')
kargs = kwargs.copy()
for key in inst_keys:
del kargs[key]
super()._init(**kargs)
@property
def Frame(self) -> XFrame_7a570956:
"""
contains the frame in which the event occurred
"""
return self._frame
@Frame.setter
def Frame(self, value: XFrame_7a570956) -> None:
self._frame = value
@property
def Action(self) -> FrameAction_aef40b5c:
"""
specifies the concrete event
"""
return self._action
@Action.setter
def Action(self, value: FrameAction_aef40b5c) -> None:
self._action = value
__all__ = ['FrameActionEvent']
| 32.176471 | 221 | 0.666057 | 395 | 3,282 | 5.36962 | 0.41519 | 0.028289 | 0.023574 | 0.035361 | 0.057049 | 0.042433 | 0 | 0 | 0 | 0 | 0 | 0.033708 | 0.240707 | 3,282 | 101 | 222 | 32.49505 | 0.817416 | 0.360146 | 0 | 0.044444 | 0 | 0 | 0.078836 | 0.037037 | 0 | 0 | 0 | 0 | 0 | 1 | 0.133333 | false | 0 | 0.133333 | 0 | 0.444444 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f64d006c79ff144996e84e5aa827cbb38d274052 | 4,446 | py | Python | actions/lib/bestfit.py | StackStorm-Exchange/manageiq | e94b3bb0f3d68fee4d749cee1b5d1d24012ffe81 | [
"Apache-2.0"
] | 1 | 2019-08-26T02:31:19.000Z | 2019-08-26T02:31:19.000Z | actions/lib/bestfit.py | StackStorm-Exchange/manageiq | e94b3bb0f3d68fee4d749cee1b5d1d24012ffe81 | [
"Apache-2.0"
] | 5 | 2018-09-27T16:51:18.000Z | 2020-09-25T18:06:05.000Z | actions/lib/bestfit.py | StackStorm-Exchange/manageiq | e94b3bb0f3d68fee4d749cee1b5d1d24012ffe81 | [
"Apache-2.0"
] | 2 | 2018-09-25T23:21:03.000Z | 2021-01-28T17:45:20.000Z | import base_action
import re
import json
class BestFit(base_action.BaseAction):
def __init__(self, config):
"""Creates a new BaseAction given a StackStorm config object (kwargs works too)
:param config: StackStorm configuration object for the pack
:returns: a new BaseAction
"""
super(BestFit, self).__init__(config)
def _load_disks(self, client, disks):
"""If disks json is present this gets the first disk information
and returns the proper information.
"""
datastoreName = None
datastoreID = None
first_disk = disks['all_disks'][0]
datastore_name = first_disk['datastore']
if datastore_name != "automatic":
datastoreName, datastoreID = self._find_storage(client, datastore_name)
return (datastoreName, datastoreID)
def _check_hosts(self, client, hosts, kwargs_dict):
cluster = self._get_arg("clusterName", kwargs_dict)
if not cluster:
raise ValueError("Cluster Name can not be empty.")
leastVMs = None
hostID = None
hostName = None
datastoreName = None
datastoreID = None
disks = self._get_arg("disk_json", kwargs_dict)
if disks is not None:
datastoreName, datastoreID = self._load_disks(client, disks)
for host in hosts:
# Need to verify that the host is on and connected (not in maintenance mode)
# power_state can be 'on' 'maintenance' 'off'
if (host.v_owning_cluster == cluster and host.power_state == "on"):
if (leastVMs is None or host.v_total_vms < leastVMs):
hostID = str(host.id)
hostName = str(host.name)
leastVMs = host.v_total_vms
if (datastoreName is None and datastoreID is None):
datastoreName, datastoreID = self._check_storages(client,
host.host_storages,
kwargs_dict)
# only success if all of these are not None
# fail otherwise
success = cluster is not None and hostID is not None and datastoreID is not None
result = {'clusterName': cluster,
'hostName': hostName,
'hostID': hostID,
'datastoreName': datastoreName,
'datastoreID': datastoreID}
return (success, result)
def _check_storages(self, client, storages, kwargs_dict):
mostSpace = 0
dName = None
dId = None
for datastore in storages:
ds = client.collections.data_stores(datastore["storage_id"])
if self._filter_datastores(ds.name, kwargs_dict):
if ds.free_space > mostSpace:
dName = ds.name
dId = str(ds.id)
mostSpace = ds.free_space
return (dName, dId)
def _find_storage(self, client, datastore_name):
all_datastores = client.collections.data_stores.all
dName = None
dId = None
for datastore in all_datastores:
if datastore.name == datastore_name:
dName = datastore.name
dId = str(datastore.id)
break
return (dName, dId)
def _filter_datastores(self, datastore, kwargs_dict):
datastoreFilters = self._get_arg("datastoreFilterRegEx", kwargs_dict)
if not type(datastoreFilters) is dict:
datastoreFilters = json.loads(datastoreFilters)
datastoreFilterRegEx = datastoreFilters["filters"]
"""Filter out the datastores by name
Include if the datastore name does NOT match any of the regex expressions
"""
for regex in datastoreFilterRegEx:
if re.search(regex.strip(), datastore):
return False
return True
def bestfit(self, client, kwargs_dict):
attributes = self._attributes_str(["v_owning_cluster", "v_total_vms", "host_storages"])
allHosts = client.collections.hosts.query_string( # pylint: disable=no-member
expand="resources", attributes=attributes)
if not allHosts:
raise ValueError("No Hosts were returned from ManageIQ")
return self._check_hosts(client, allHosts, kwargs_dict)
| 39 | 95 | 0.593342 | 474 | 4,446 | 5.398734 | 0.295359 | 0.039078 | 0.018757 | 0.02501 | 0.023447 | 0.023447 | 0.023447 | 0 | 0 | 0 | 0 | 0.000675 | 0.333108 | 4,446 | 113 | 96 | 39.345133 | 0.862395 | 0.104363 | 0 | 0.123457 | 0 | 0 | 0.065876 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.08642 | false | 0 | 0.037037 | 0 | 0.222222 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f64d4cb44725ca01b18c26fa31d360e83ae96dc0 | 622 | py | Python | tests/IOExpander.py | Footleg/rocky-rover-board | 83d2f7a75fe362304fbc54bf71b6cd89718098f6 | [
"MIT"
] | 1 | 2022-01-15T17:51:08.000Z | 2022-01-15T17:51:08.000Z | tests/IOExpander.py | Footleg/rocky-rover-board | 83d2f7a75fe362304fbc54bf71b6cd89718098f6 | [
"MIT"
] | null | null | null | tests/IOExpander.py | Footleg/rocky-rover-board | 83d2f7a75fe362304fbc54bf71b6cd89718098f6 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
""" Test case for the Footleg Robotics Sentinel robot controller board
Pulses all IO expander pins configured as outputs
"""
from time import sleep
import digitalio
from sentinelboard import SentinelHardware
sh = SentinelHardware()
# Set all pins as outputs, setting them LOW
for pin in range(16):
p = sh.mcp23017.get_pin(pin)
p.direction = digitalio.Direction.OUTPUT
p.value = False
# Pulse each pin twice in turn
for pin in range(16):
p = sh.mcp23017.get_pin(pin)
for i in range(2):
p.value = True
sleep(0.15)
p.value = False
sleep(0.3) | 23.923077 | 70 | 0.686495 | 95 | 622 | 4.473684 | 0.578947 | 0.049412 | 0.037647 | 0.061176 | 0.164706 | 0.164706 | 0.164706 | 0.164706 | 0.164706 | 0.164706 | 0 | 0.043841 | 0.229904 | 622 | 26 | 71 | 23.923077 | 0.843424 | 0.336013 | 0 | 0.4 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.2 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f64ef28d2932094a9e57197502304041dd4681ae | 882 | py | Python | airflow/upload.py | jordanparker6/coronva-virus | bd9ea4e8f3b9ebd193b5425bf4268bf6c0baf275 | [
"Apache-2.0"
] | 2 | 2020-05-09T06:45:38.000Z | 2020-06-01T21:50:27.000Z | airflow/upload.py | jordanparker6/coronva-virus | bd9ea4e8f3b9ebd193b5425bf4268bf6c0baf275 | [
"Apache-2.0"
] | 4 | 2021-03-10T11:46:52.000Z | 2022-02-27T01:33:20.000Z | airflow/upload.py | jordanparker6/COVID-19 | bd9ea4e8f3b9ebd193b5425bf4268bf6c0baf275 | [
"Apache-2.0"
] | null | null | null | import boto3, os
from datetime import datetime as dt
from botocore.client import Config
def main():
ACCESS_KEY = os.environ['DIGITAL_OCEAN_ACCESS_KEY']
SECRET = os.environ['DIGITAL_OCEAN_SECRET_KEY']
date = dt.today().strftime('%Y.%m.%d')
files = ['data.csv', 'agg_data.csv', 'confirmed_cases.csv']
# Initialize a session using DigitalOcean Spaces.
session = boto3.session.Session()
client = session.client('s3',
region_name='nyc3',
endpoint_url='https://nyc3.digitaloceanspaces.com',
aws_access_key_id=ACCESS_KEY,
aws_secret_access_key=SECRET)
# Upload Files
for file in files:
print('Uploading: ', file)
fn = f"{date}/{file}"
client.upload_file(fn, 'covid-19', file)
if __name__ == "__main__":
main() | 33.923077 | 79 | 0.600907 | 105 | 882 | 4.790476 | 0.552381 | 0.089463 | 0.063618 | 0.083499 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.011006 | 0.278912 | 882 | 26 | 80 | 33.923077 | 0.779874 | 0.068027 | 0 | 0 | 0 | 0 | 0.214634 | 0.058537 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05 | false | 0 | 0.15 | 0 | 0.2 | 0.05 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f64f2f22b9f3bcf769fdc52943788c73b7058eaa | 3,100 | py | Python | src/eduid_common/session/testing.py | SUNET/eduid-common | d666aec7e47e6b0ccb575d621bb6e9f40bcea4e4 | [
"BSD-3-Clause"
] | 1 | 2016-04-14T13:45:10.000Z | 2016-04-14T13:45:10.000Z | src/eduid_common/session/testing.py | SUNET/eduid-common | d666aec7e47e6b0ccb575d621bb6e9f40bcea4e4 | [
"BSD-3-Clause"
] | 16 | 2017-03-10T11:47:59.000Z | 2020-03-19T13:51:01.000Z | src/eduid_common/session/testing.py | SUNET/eduid-common | d666aec7e47e6b0ccb575d621bb6e9f40bcea4e4 | [
"BSD-3-Clause"
] | 3 | 2016-11-21T11:39:49.000Z | 2019-09-18T12:32:02.000Z | #
# Copyright (c) 2016 NORDUnet A/S
# Copyright (c) 2018 SUNET
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# 3. Neither the name of the NORDUnet nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import logging
from typing import Sequence
import redis
from eduid_userdb.testing import EduidTemporaryInstance
logger = logging.getLogger(__name__)
class RedisTemporaryInstance(EduidTemporaryInstance):
"""Singleton to manage a temporary Redis instance
Use this for testing purpose only. The instance is automatically destroyed
at the end of the program.
"""
@property
def command(self) -> Sequence[str]:
return [
'docker',
'run',
'--rm',
'-p',
'{!s}:6379'.format(self.port),
'-v',
'{!s}:/data'.format(self.tmpdir),
'-e',
'extra_args=--daemonize no --bind 0.0.0.0',
'docker.sunet.se/eduid/redis:latest',
]
def setup_conn(self) -> bool:
try:
host, port, db = self.get_params()
_conn = redis.Redis(host, port, db)
_conn.set('dummy', 'dummy')
self._conn = _conn
except redis.exceptions.ConnectionError:
return False
return True
@property
def conn(self) -> redis.Redis:
if self._conn is None:
raise RuntimeError('Missing temporary Redis instance')
return self._conn
def get_params(self):
"""
Convenience function to get Redis connection parameters for the temporary database.
:return: Host, port and database
"""
return 'localhost', self.port, 0
| 34.831461 | 91 | 0.671613 | 390 | 3,100 | 5.3 | 0.510256 | 0.009676 | 0.016449 | 0.022254 | 0.089018 | 0.065796 | 0.065796 | 0.065796 | 0.065796 | 0.065796 | 0 | 0.008651 | 0.254194 | 3,100 | 88 | 92 | 35.227273 | 0.885381 | 0.587742 | 0 | 0.055556 | 0 | 0 | 0.136288 | 0.046823 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.111111 | 0.027778 | 0.388889 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f65146c393969c774797935b3458611949f43a82 | 285 | py | Python | lectures/19-functional-ideas/examples/gematria2.py | mattmiller899/biosys-analytics | ab24a4c7206ed9a865e896daa57cee3c4e62df1f | [
"MIT"
] | 14 | 2019-07-14T08:29:04.000Z | 2022-03-07T06:33:26.000Z | lectures/19-functional-ideas/examples/gematria2.py | mattmiller899/biosys-analytics | ab24a4c7206ed9a865e896daa57cee3c4e62df1f | [
"MIT"
] | null | null | null | lectures/19-functional-ideas/examples/gematria2.py | mattmiller899/biosys-analytics | ab24a4c7206ed9a865e896daa57cee3c4e62df1f | [
"MIT"
] | 33 | 2019-01-05T17:03:47.000Z | 2019-11-11T20:48:24.000Z | #!/usr/bin/env python3
import os
import sys
args = sys.argv[1:]
if len(args) != 1:
print('Usage: {} WORD'.format(os.path.basename(sys.argv[0])))
sys.exit(1)
word = args[0]
number = 0
for letter in word:
number += ord(letter)
print('"{}" = "{}"'.format(word, number))
| 15 | 65 | 0.603509 | 45 | 285 | 3.822222 | 0.555556 | 0.081395 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.030043 | 0.182456 | 285 | 18 | 66 | 15.833333 | 0.708155 | 0.073684 | 0 | 0 | 0 | 0 | 0.095057 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.181818 | 0 | 0.181818 | 0.181818 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f653d42bc6c243aed7bff8e1f420a4f32e076ef4 | 1,113 | py | Python | setup.py | andreped/livermask | 4bfc3dd858fc8ed6ee87468458cae7a4736e1551 | [
"MIT"
] | 38 | 2020-02-11T08:34:33.000Z | 2022-03-24T10:54:48.000Z | setup.py | andreped/livermask | 4bfc3dd858fc8ed6ee87468458cae7a4736e1551 | [
"MIT"
] | 9 | 2020-02-06T22:49:21.000Z | 2022-01-31T12:27:52.000Z | setup.py | andreped/livermask | 4bfc3dd858fc8ed6ee87468458cae7a4736e1551 | [
"MIT"
] | 6 | 2020-02-29T16:01:11.000Z | 2021-11-04T10:27:36.000Z | import setuptools
with open("README.md", "r") as f:
long_description = f.read()
with open('requirements.txt', 'r', encoding='utf-16') as ff:
required = ff.read().splitlines()
setuptools.setup(
name='livermask',
version='1.2.0',
author="André Pedersen",
author_email="andrped94@gmail.com",
license='MIT',
description="A package for automatic segmentation of liver from CT data",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/andreped/livermask",
packages=setuptools.find_packages(),
entry_points={
'console_scripts': [
'livermask = livermask.livermask:main'
]
},
install_requires=required,
classifiers=[
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
)
| 30.916667 | 78 | 0.619946 | 121 | 1,113 | 5.603306 | 0.644628 | 0.088496 | 0.147493 | 0.153392 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.020047 | 0.238095 | 1,113 | 35 | 79 | 31.8 | 0.779481 | 0 | 0 | 0 | 0 | 0 | 0.419587 | 0.021563 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.03125 | 0 | 0.03125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f654fd291c8b54b41d59a2fb5e8750235276c4ea | 1,039 | py | Python | pycontrol/libcpu/discovery.py | Velko/8-bit-CPU | 30cab1bd157da01149898607a5c1a15961b56294 | [
"MIT"
] | 7 | 2021-02-22T19:29:35.000Z | 2022-03-27T23:17:04.000Z | pycontrol/libcpu/discovery.py | Velko/8-bit-CPU | 30cab1bd157da01149898607a5c1a15961b56294 | [
"MIT"
] | 8 | 2021-01-05T19:08:24.000Z | 2021-08-16T20:50:13.000Z | pycontrol/libcpu/discovery.py | Velko/8-bit-CPU | 30cab1bd157da01149898607a5c1a15961b56294 | [
"MIT"
] | null | null | null | import sys
from typing import Iterator, Tuple
from .pin import PinBase, Pin, MuxPin, Mux
from . import DeviceSetup
def all_pins() -> Iterator[Tuple[str, PinBase]]:
dupe_filter = set()
for v_name, var in vars(DeviceSetup).items():
if not hasattr(var, "__dict__"): continue
for a_name, attr in vars(var).items():
if (not a_name.startswith("_")) and (isinstance(attr, Pin) or isinstance(attr, MuxPin)) and attr not in dupe_filter:
dupe_filter.add(attr)
yield "{}.{}".format(var.name, a_name), attr
def simple_pins() -> Iterator[Tuple[str, Pin]]:
for name, pin in all_pins():
if isinstance(pin, Pin):
yield name, pin
def all_muxes() -> Iterator[Tuple[str, Mux]]:
for v_name, var in vars(DeviceSetup).items():
if isinstance(var, Mux):
yield v_name, var
def mux_pins(mux: Mux) -> Iterator[Tuple[str, MuxPin]]:
for name, pin in all_pins():
if isinstance(pin, MuxPin) and pin.mux == mux:
yield name, pin | 37.107143 | 128 | 0.623677 | 148 | 1,039 | 4.243243 | 0.277027 | 0.103503 | 0.101911 | 0.063694 | 0.219745 | 0.219745 | 0.219745 | 0.219745 | 0.219745 | 0 | 0 | 0 | 0.246391 | 1,039 | 28 | 129 | 37.107143 | 0.802043 | 0 | 0 | 0.25 | 0 | 0 | 0.013462 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0.166667 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f65533e82f38d22c049fe300eaa8d9c21a02aaae | 749 | py | Python | tests/apis/test_yara.py | ninoseki/uzen | 93726f22f43902e17b22dd36142dac05171d0d84 | [
"MIT"
] | 76 | 2020-02-27T06:36:27.000Z | 2022-03-10T20:18:03.000Z | tests/apis/test_yara.py | ninoseki/uzen | 93726f22f43902e17b22dd36142dac05171d0d84 | [
"MIT"
] | 33 | 2020-03-13T02:04:14.000Z | 2022-03-04T02:06:11.000Z | tests/apis/test_yara.py | ninoseki/uzen | 93726f22f43902e17b22dd36142dac05171d0d84 | [
"MIT"
] | 6 | 2020-03-17T16:42:25.000Z | 2021-04-27T06:35:46.000Z | import asyncio
import pytest
from fastapi.testclient import TestClient
@pytest.mark.usefixtures("snapshots_setup")
def test_yara_scan(client: TestClient, event_loop: asyncio.AbstractEventLoop):
# it matches with all snapshots
payload = {"source": 'rule foo: bar {strings: $a = "foo" condition: $a}'}
response = client.post("/api/yara/scan", json=payload)
assert response.status_code == 200
snapshot = response.json()
assert snapshot.get("id")
assert snapshot.get("type") == "yara"
def test_yara_scan_with_invalid_input(
client: TestClient, event_loop: asyncio.AbstractEventLoop
):
payload = {"source": "boo"}
response = client.post("/api/yara/scan", json=payload)
assert response.status_code == 422
| 29.96 | 78 | 0.715621 | 93 | 749 | 5.634409 | 0.494624 | 0.061069 | 0.041985 | 0.057252 | 0.431298 | 0.431298 | 0.244275 | 0.244275 | 0.244275 | 0.244275 | 0 | 0.009509 | 0.157543 | 749 | 24 | 79 | 31.208333 | 0.820919 | 0.038718 | 0 | 0.117647 | 0 | 0 | 0.162953 | 0 | 0 | 0 | 0 | 0 | 0.235294 | 1 | 0.117647 | false | 0 | 0.176471 | 0 | 0.294118 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f656ca916c5e4b551ec247056d8e8cd69797a168 | 576 | py | Python | src/text/text_clean/rm_biaodian.py | TongtongSong/tools | 598417a018ab01b8dcda7fdbce118ce261246ea3 | [
"Apache-2.0"
] | null | null | null | src/text/text_clean/rm_biaodian.py | TongtongSong/tools | 598417a018ab01b8dcda7fdbce118ce261246ea3 | [
"Apache-2.0"
] | null | null | null | src/text/text_clean/rm_biaodian.py | TongtongSong/tools | 598417a018ab01b8dcda7fdbce118ce261246ea3 | [
"Apache-2.0"
] | null | null | null | import re
import sys
#!/bin/env python
import sys
import re
if len(sys.argv) != 1:
print >>sys.stderr,"%s < in > out"%(__file__)
sys.exit(1)
for line in sys.stdin:
# 中文的正则匹配式
zh_patt = u'[\u4e00-\u9fa5]'
# 英文的正则匹配式
en_patt = u'[A-Za-z]'
src_word_list = []
for word in line.strip().split(' '):
if re.findall(zh_patt, word):
src_word_list.append(word)
elif re.findall(en_patt, word):
src_word_list.append(word)
if src_word_list:
string = ' '.join(src_word_list)
print (string)
| 20.571429 | 49 | 0.581597 | 86 | 576 | 3.686047 | 0.476744 | 0.11041 | 0.173502 | 0.094637 | 0.182965 | 0.182965 | 0.182965 | 0 | 0 | 0 | 0 | 0.016827 | 0.277778 | 576 | 27 | 50 | 21.333333 | 0.745192 | 0.059028 | 0 | 0.315789 | 0 | 0 | 0.070501 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.210526 | 0 | 0.210526 | 0.105263 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |