blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
88dc715905f20934ff480a22be2bf54e2f00e709 | 4c13c70f61ef7c5afac989509bbf7b7b1149a43e | /compounds/migrations/0004_auto_20161103_0655.py | 553d4bfb7c753500143335ec01b6eb9b8c6ef0fd | [] | no_license | shanshanglaohu/yatcm | 82c69cc01cabbc1ddeb7fa4fe6f7f6026ed9649d | 845a4b4a3fc285004042ede4081af88ad597c37a | refs/heads/master | 2020-03-15T19:29:47.460663 | 2018-05-06T05:43:24 | 2018-05-06T05:43:24 | 132,308,463 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,232 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-11-03 06:55
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('compounds', '0003_auto_20161102_0210'),
]
operations = [
migrations.CreateModel(
name='Prescription',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('english_name', models.CharField(blank=True, max_length=1024)),
('chinese_name', models.CharField(blank=True, max_length=1024)),
('phonetic_name', models.CharField(blank=True, max_length=1024)),
('describe', models.TextField(blank=True)),
('prescription_text', models.TextField(blank=True)),
('herbs', models.ManyToManyField(to='compounds.Herb')),
],
),
migrations.AddField(
model_name='compound',
name='cid',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='compounds.CID'),
),
]
| [
"thatzhigang@outlook.com"
] | thatzhigang@outlook.com |
6510069f188477b43941632c6de0a16cdf521346 | e947249756abf52e1ff1ee767cf35aa080d877ff | /streamlit/components/base_component.py | 24fb4f6faac189d6515db464585a8e4c202249c8 | [] | no_license | Letsgetloud/Mushroom_Py-cture_Recognition | 41f736e7803649bb51dee41ff8675ad187176771 | 567c6e170f90443be6bdd2e8bf73daa944272237 | refs/heads/main | 2023-08-14T21:26:29.993541 | 2021-09-27T12:37:14 | 2021-09-27T12:37:14 | 405,022,347 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 916 | py | # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Import
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Import Standard libraries
from abc import ABC, abstractmethod
import streamlit as st
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Classes
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class CBaseComponent(ABC):
def __init__(self
, compOpts
):
self.opts = compOpts
@abstractmethod
def getOptDflt(self, optName):
raise NotImplementedError('This method is not implemented in the Base Class: it must be implemented in the child class.')
@abstractmethod
def render(self):
pass
def getOpt(self, optName):
if optName in self.opts.keys():
return self.opts.get(optName)
return self.getOptDflt(optName)
def showTitle(self, title):
# Display the component title
if title:
st.subheader(title)
| [
"noreply@github.com"
] | Letsgetloud.noreply@github.com |
de3613892f9749b6a6c5741111088946725b1406 | bb4b1afda1c477f9d7da2aae931d70b7f269ef8e | /k8s/io/apimachinery/pkg/util/intstr/generated_pb2.py | fb7a4fcafec06e423b25293341d4a0f9c0a7e1df | [
"Apache-2.0"
] | permissive | isabella232/flyteproto | 9dcf44cd216627b2516cdfd2b5343985f0dcf952 | 7c3548b070c441d228b599aeadc87b5408ccbb74 | refs/heads/master | 2022-03-20T00:25:50.388264 | 2019-11-08T01:33:56 | 2019-11-08T01:33:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | true | 3,072 | py | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: k8s.io/apimachinery/pkg/util/intstr/generated.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='k8s.io/apimachinery/pkg/util/intstr/generated.proto',
package='k8s.io.apimachinery.pkg.util.intstr',
syntax='proto2',
serialized_options=_b('Z\006intstr'),
serialized_pb=_b('\n3k8s.io/apimachinery/pkg/util/intstr/generated.proto\x12#k8s.io.apimachinery.pkg.util.intstr\";\n\x0bIntOrString\x12\x0c\n\x04type\x18\x01 \x01(\x03\x12\x0e\n\x06intVal\x18\x02 \x01(\x05\x12\x0e\n\x06strVal\x18\x03 \x01(\tB\x08Z\x06intstr')
)
_INTORSTRING = _descriptor.Descriptor(
name='IntOrString',
full_name='k8s.io.apimachinery.pkg.util.intstr.IntOrString',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='type', full_name='k8s.io.apimachinery.pkg.util.intstr.IntOrString.type', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='intVal', full_name='k8s.io.apimachinery.pkg.util.intstr.IntOrString.intVal', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='strVal', full_name='k8s.io.apimachinery.pkg.util.intstr.IntOrString.strVal', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=92,
serialized_end=151,
)
DESCRIPTOR.message_types_by_name['IntOrString'] = _INTORSTRING
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
IntOrString = _reflection.GeneratedProtocolMessageType('IntOrString', (_message.Message,), dict(
DESCRIPTOR = _INTORSTRING,
__module__ = 'k8s.io.apimachinery.pkg.util.intstr.generated_pb2'
# @@protoc_insertion_point(class_scope:k8s.io.apimachinery.pkg.util.intstr.IntOrString)
))
_sym_db.RegisterMessage(IntOrString)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| [
"39540587+lyft-buildnotify-12@users.noreply.github.com"
] | 39540587+lyft-buildnotify-12@users.noreply.github.com |
8e920f817c8398f706ae2bf6dd2e2f04e80ba32c | ceea067a498a16c92ebba0c0f97c50d059388152 | /users/views.py | 6b03c0796b8c8ab6bccdbbdc3b7715d34a80b007 | [] | no_license | anvandev/ToDoApp | 8b2b5ca45e7279ebf0d612e1196638da994eab93 | 72618711a2106716fe7920b1204f193f835117f2 | refs/heads/master | 2023-03-25T21:25:55.531930 | 2021-03-24T15:39:57 | 2021-03-24T15:39:57 | 344,505,947 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 738 | py | from django.shortcuts import render, redirect
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from .forms import UserRegisterForm
def register(request):
if request.method == "POST":
form = UserRegisterForm(request.POST)
if form.is_valid():
form.save()
username = form.cleaned_data.get('username')
messages.success(request, f'{username}, your account has been created! You are now able to log in.')
return redirect('login')
else:
form = UserRegisterForm()
return render(request, 'users/register.html', {'form': form})
@login_required
def profile(request):
return render(request, 'users/profile.html')
| [
"ans.vanyan@gmail.com"
] | ans.vanyan@gmail.com |
e01432f1942decb1ea410c9a89bbdb3153df6103 | 921ea79505102e96dee5f514bca0134420dac515 | /answers/views.py | 7d479ff1fa0122a2c33778afeac5184b62674adf | [] | no_license | fahadmak/stackoverflow_clone | 22737ed7903aaf7cd0eb7a386e68e4670ede2f94 | e6b138437b4cdbbd2cee106a70eefb74e3567dd3 | refs/heads/develop | 2022-11-24T22:32:12.942225 | 2019-07-26T13:04:23 | 2019-07-26T13:04:23 | 195,412,232 | 0 | 0 | null | 2022-11-22T04:08:03 | 2019-07-05T13:16:32 | JavaScript | UTF-8 | Python | false | false | 4,462 | py | from django.contrib.auth.mixins import LoginRequiredMixin
from django.http import HttpResponseRedirect
from django.views.generic import TemplateView
from django.urls import reverse
from django.shortcuts import get_object_or_404
from django.core.mail import send_mail
from stackoverflow import settings
from votes.models import QuestionVote
from .models import Question, Answer, AnswerComment
from questions.models import QuestionComment
from answers.forms import CreateAnswerForm, CreateAnswerCommentForm
from questions.forms import CreateQuestionCommentForm
class AnswerView(LoginRequiredMixin, TemplateView):
template_name = 'questions/question_detail.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
question = get_object_or_404(Question, pk=kwargs['question_id'])
# Add in a QuerySet of all the books
tags = question.tags.filter(question=question)
related_questions = []
for tag in tags:
questions = Question.objects.filter(tags=tag)
related_questions.extend(questions)
if question in related_questions:
related_questions.remove(question)
context['related_questions'] = set(related_questions)
context['question_votes'] = QuestionVote.objects.filter(question=question)
context['question'] = question
context['question_comments'] = QuestionComment.objects.filter(question=question).order_by('-created_at')
context['answer_comments'] = AnswerComment.objects.all().order_by('-created_at')
# context['question_comments'] = QuestionComment.objects.filter(question=question).order_by('-created_at')
context['answer_list'] = Answer.objects.filter(question=question).order_by('-created_at')
context['answer_form'] = CreateAnswerForm(self.request.POST or None)
context['comment_form'] = CreateQuestionCommentForm(self.request.POST or None)
context['answer_comment_form'] = CreateAnswerCommentForm(self.request.POST or None)
return context
def post(self, request, *args, **kwargs):
context = self.get_context_data(**kwargs)
answer_form = context['answer_form']
comment_form = context['comment_form']
answer_comment_form = context['answer_comment_form']
question_id = kwargs['question_id']
domain = f'http://127.0.0.1:8000/{question_id}/answers/'
if answer_form.is_valid():
new_answer = answer_form.save(commit=False)
new_answer.author = self.request.user
new_answer.question = context['question']
new_answer.save()
send_mail('Your question has been answered',
f"Your question \'{context['question'].title}\' has received an answer. Click to see {domain}",
settings.EMAIL_HOST_USER,
[context['question'].author.email])
return HttpResponseRedirect(reverse('answers:answer_list', kwargs={'question_id': kwargs['question_id']}))
if comment_form.is_valid():
new_comment = comment_form.save(commit=False)
new_comment.author = self.request.user
new_comment.question = Question.objects.get(id=kwargs['question_id'])
new_comment.save()
send_mail('Your question has been commented on',
f"Your question \'{context['question'].title}\' has received a comment. Click to see {domain}",
settings.EMAIL_HOST_USER,
[context['question'].author.email])
return HttpResponseRedirect(reverse('answers:answer_list', kwargs={'question_id': kwargs['question_id']}))
if answer_comment_form.is_valid():
new_comment = answer_comment_form.save(commit=False)
new_comment.author = self.request.user
new_comment.answer = Answer.objects.get(id=kwargs['answer_id'])
new_comment.save()
send_mail('Your answer has been commented on',
f"Your answer \'{new_comment.answer.answer_title}\' has received a comment. Click to see {domain}",
settings.EMAIL_HOST_USER,
[context['question'].author.email])
return HttpResponseRedirect(reverse('answers:answer_list', kwargs={'question_id': kwargs['question_id']}))
return self.render_to_response({'aform': answer_form})
| [
"fahad.makabugo@andela.com"
] | fahad.makabugo@andela.com |
2a44968cfabfaf205e914f478c6be8b45a5371d1 | 3dc38a2fdce91eca7e51fa66d6d421856cc617ea | /FirstBadVersion.py | 9fb47c09e3dcc9c18942b7bc5acbbf6bbbb98ba2 | [] | no_license | CafeYuzuHuang/coding | 1406291925f8a70867122bc169a7786ba151bfbc | b690f64691173a55d95041b534f265cb77b65af0 | refs/heads/main | 2023-06-03T21:35:17.643617 | 2021-06-27T09:41:32 | 2021-06-27T09:41:32 | 346,586,409 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 916 | py | # The isBadVersion API is already defined for you.
# @param version, an integer
# @return an integer
# def isBadVersion(version):
class Solution:
def firstBadVersion(self, n):
"""
:type n: int
:rtype: int
"""
# 2021.04.26
# 1st solution: using bisect search, like what git offers
# time: O(logn), space: O(1)
l, r = 1, n
while l <= r:
m = (l+r)//2
m_ = isBadVersion(m)
if m_: r = m-1
else: l = m+1
return l
# 3rd solution: scanning
# time: O(n), space: O(1)
"""
i = n
while i > 0:
if not isBadVersion(i): return i+1
i -= 1
return 1
"""
# 1st solution: 32 ms (44%), 14.3 MB (9%)
# 3rd solution: time limit exceeded
| [
"noreply@github.com"
] | CafeYuzuHuang.noreply@github.com |
ef9d1444e7bf505cadfb72178b4eefbf0bc70510 | 1fad3905860f078d076cac8ebd09fe1f619b77bd | /week03/code/pmap/ping.py | c749fb470c4fbc49ec69165cc9f81c004ce8f23f | [] | no_license | Farinosa/Python001-class01 | 7f832dfef1df7cf5a11b0909c7fb787789b94a99 | 0496e470246046cfa1d70aaeafbf3074a430d143 | refs/heads/master | 2022-12-07T06:04:16.303644 | 2020-09-05T17:25:36 | 2020-09-05T17:25:36 | 273,857,131 | 0 | 0 | null | 2020-06-21T07:36:06 | 2020-06-21T07:36:05 | null | UTF-8 | Python | false | false | 448 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import subprocess
from common import IS_WIN, INFO_FMT, logger
def ping(host):
logger.info(INFO_FMT.format(f"ping: {host}"))
opt = "-c 5" if not IS_WIN else ""
cmd = f"ping {opt} {host}"
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
res = [i.strip() for i in p.stdout.read().decode('gbk').split('\n') if i and i.strip()]
return res
| [
"575813104@qq.com"
] | 575813104@qq.com |
211711f9dd6e36e7fbd73ada11de99cca49587ba | c97a51e8ab8ccc7336dc1b2b6c83d36dc24122c7 | /setup.py | 73ad07f3a7615f6e38b4006b636aa29aff1e2997 | [
"MIT"
] | permissive | mardom/FoFCatalogMatching | e0f9d0a7cd2aadb99663aed42f4f94e16da1141f | 14252227793d0e57741ecdbf82b949c5727b2625 | refs/heads/master | 2023-08-08T17:52:18.102491 | 2021-01-01T23:42:25 | 2021-01-01T23:42:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,617 | py | #!/usr/bin/env python
"""
Using friends-of-friends method to iteratively match multiple sky catalogs
without the need of specifying the main catalog.
Project website: https://github.com/yymao/FoFCatalogMatching
The MIT License (MIT)
Copyright (c) 2018 Yao-Yuan Mao (yymao)
http://opensource.org/licenses/MIT
"""
import os
from setuptools import setup
_name = 'FoFCatalogMatching'
_version = ''
with open(os.path.join(os.path.dirname(__file__), '{}.py'.format(_name))) as _f:
for _l in _f:
if _l.startswith('__version__ = '):
_version = _l.partition('=')[2].strip().strip('\'').strip('"')
break
if not _version:
raise ValueError('__version__ not define!')
setup(
name=_name,
version=_version,
description='Using friends-of-friends method to iteratively match multiple sky catalogs without the need of specifying the main catalog.',
url='https://github.com/yymao/{}'.format(_name),
download_url='https://github.com/yymao/{}/archive/v{}.zip'.format(_name, _version),
author='Yao-Yuan Mao',
author_email='yymao.astro@gmail.com',
maintainer='Yao-Yuan Mao',
maintainer_email='yymao.astro@gmail.com',
license='MIT',
classifiers=[
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.6',
],
keywords='fof catalog matching merging',
py_modules=['FoFCatalogMatching'],
install_requires=['numpy>1.3.0', 'astropy>1.0.0', 'fast3tree>=0.3.1'],
)
| [
"yymao.astro@gmail.com"
] | yymao.astro@gmail.com |
69d187a80900ba4e7157396649b0052029f97973 | de517d77cfd4c28d818ee663dd90fc6e278543e0 | /playbooks/roles/createresources/molecule/fiaas02-debian10/tests/test_vhosts.py | bc2b5d381033764ac1e368bead736053a4d184d6 | [
"MIT"
] | permissive | srgvg/fiaas | 8dc95c49150730bdf302c74a292d71a7f157d462 | 4707852373bf404625a5affbcaef9528e2835f19 | refs/heads/master | 2023-04-10T14:18:26.381542 | 2021-04-15T17:57:12 | 2021-04-15T17:57:12 | 68,356,000 | 0 | 0 | MIT | 2021-04-15T20:11:09 | 2016-09-16T05:46:05 | Jinja | UTF-8 | Python | false | false | 1,783 | py | import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
def test_apache_vhost_config(host):
config = host.file('/etc/apache2/sites-available/www.svhost1.com.conf')
assert config.exists
assert config.contains('ServerName www.svhost1.com')
assert config.contains('DocumentRoot /var/www/sites/svhost1/web')
assert config.contains('fcgi://svhost1/')
config = host.file('/etc/apache2/sites-available/www.svhost2.com.conf')
assert config.exists
assert config.contains('ServerName www.svhost2.com')
assert config.contains('DocumentRoot /var/www/sites/svhost2/web')
assert config.contains('fcgi://svhost2/')
config = host.file('/etc/apache2/sites-available/www.svhost3.com.conf')
assert config.exists
assert config.contains('ServerName www.svhost3.com')
assert config.contains('DocumentRoot /var/www/sites/svhost3/web/public')
assert config.contains('fcgi://svhost3/')
assert config.contains('alias1.svhost3.com')
assert config.contains('alias2.svhost3.com')
def test_apache_vhost_enabled(host):
link = host.file('/etc/apache2/sites-enabled/001-default.conf')
assert link.exists
assert link.is_symlink
assert link.linked_to == '/etc/apache2/sites-available/www.svhost1.com.conf'
link = host.file('/etc/apache2/sites-enabled/www.svhost2.com.conf')
assert link.exists
assert link.is_symlink
assert link.linked_to == '/etc/apache2/sites-available/www.svhost2.com.conf'
link = host.file('/etc/apache2/sites-enabled/www.svhost3.com.conf')
assert link.exists
assert link.is_symlink
assert link.linked_to == '/etc/apache2/sites-available/www.svhost3.com.conf'
| [
"noreply@github.com"
] | srgvg.noreply@github.com |
1af043b82f61d55acd46f190698fc67143c3b30f | 7e6dae745e3443bb37cc53d7ed5cfef516cb6fa9 | /landvision/landvision/wsgi.py | 0a42baea126a77952f6480f05bc79d418d5b1d3e | [] | no_license | gblack686/django | 943b4ae530b16b36066c58fa4159380b56823d02 | 282728ea5fe547dcca8e0028478a5742d24bb560 | refs/heads/master | 2023-08-06T05:28:05.578907 | 2020-05-06T04:04:46 | 2020-05-06T04:04:46 | 256,689,001 | 0 | 0 | null | 2023-07-23T12:22:04 | 2020-04-18T06:53:36 | Jupyter Notebook | UTF-8 | Python | false | false | 397 | py | """
WSGI config for landvision project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "landvision.settings")
application = get_wsgi_application()
| [
"gblack686@gmail.com"
] | gblack686@gmail.com |
8e204756e205394482650c812c5b994b021ff48c | 2bb90b620f86d0d49f19f01593e1a4cc3c2e7ba8 | /pardus/tags/2007.2/programming/libs/geoip/actions.py | 8ea47d4a7aaa461b4099e2a3b64026df8fc2b019 | [] | no_license | aligulle1/kuller | bda0d59ce8400aa3c7ba9c7e19589f27313492f7 | 7f98de19be27d7a517fe19a37c814748f7e18ba6 | refs/heads/master | 2021-01-20T02:22:09.451356 | 2013-07-23T17:57:58 | 2013-07-23T17:57:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 634 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2006,2007 TUBITAK/UEKAE
# Licensed under the GNU General Public License, version 2.
# See the file http://www.gnu.org/copyleft/gpl.txt.
from pisi.actionsapi import autotools
from pisi.actionsapi import pisitools
from pisi.actionsapi import get
WorkDir = "GeoIP-%s" % get.srcVERSION()
def setup():
autotools.configure("--enable-shared \
--disable-static")
def build():
autotools.make()
def install():
autotools.rawInstall("DESTDIR=%s" % get.installDIR())
pisitools.dodoc("AUTHORS", "ChangeLog", "COPYING", "NEWS", "README", "TODO")
| [
"yusuf.aydemir@istanbul.com"
] | yusuf.aydemir@istanbul.com |
8bd38defaee4893c193703d70ceef315991da224 | cae6a75bb123626a6c0a8d08e6990d378d3bc579 | /labUpd/mapperTwo.py | 23cdc5ee71d0249336b36f302cf199518bc122a5 | [] | no_license | Dimagiopatriot/bigdata | 831f8dfbff418ff95366b06b082e09baf20bae77 | c7503ab15d53af91f13e4dac53b6a5c8e9624b1a | refs/heads/master | 2020-04-15T06:48:26.281885 | 2019-01-07T18:30:29 | 2019-01-07T18:30:29 | 164,473,792 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 673 | py | #!/usr/bin/env python
import sys
def read_input(file):
for line in file:
# split the line into words
yield line.split()
def main(separator='\t'):
# input comes from STDIN (standard input)
data = read_input(sys.stdin)
for words in data:
# write the results to STDOUT (standard output);
# what we output here will be the input for the
# Reduce step, i.e. the input for reducer.py
#
# tab-delimited; the trivial word count is 1
f = open("file2.txt", "w+")
for word in words:
f.write(word + separator + "1" + "\n")
f.close()
if __name__ == "__main__":
main() | [
"troll3758@gmail.com"
] | troll3758@gmail.com |
f8bee13d71ac5fd00983b47a2cfe24ddd993c7cd | 5f001bf25543eb4bd19f8ad8d1726da3764aaa9e | /accounts/forms.py | 7835f8af495d3ece68529b99533c8e58ad25cdd7 | [] | no_license | Tomasgei/Black_box | e5bd6f0b1b1c687709b9f28cd843704865c9fbaa | 65d15412142a86787d31ed6096e88a7560e74d49 | refs/heads/main | 2023-09-02T15:48:06.949487 | 2021-11-14T19:04:39 | 2021-11-14T19:04:39 | 425,913,910 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,723 | py | from django import forms
from django.contrib.auth.forms import UserCreationForm, AuthenticationForm
from django.contrib.auth.models import User
class SingInForm(AuthenticationForm):
def __init__(self, *args, **kwargs):
super(SingInForm, self).__init__(*args, **kwargs)
username = forms.CharField(max_length=200, required=True,widget=forms.TextInput(attrs={
"class": "form-control",
"type" : "text",
"placeholder": "Enter Username"
}))
password = forms.CharField(max_length=200, required=True, widget=forms.TextInput(attrs={
"class": "form-control",
"type" : "password",
"placeholder": "Enter Password"
}))
class SignUpForm(UserCreationForm):
username = forms.CharField(max_length=200, required=True,widget=forms.TextInput(attrs={
"class": "form-control",
"type" : "text",
"placeholder": "Type Username"
}), label="Your Email")
email = forms.CharField(max_length=200, required=True,widget=forms.EmailInput(attrs={
"class": "form-control",
"type" : "email",
"placeholder": "example@company.com"
}), label="Your Email")
password1 = forms.CharField(max_length=200, required=True, widget=forms.TextInput(attrs={
"class": "form-control",
"type" : "password",
"placeholder": "Password"
}), label="Your Password")
password2 = forms.CharField(max_length=200, required=True, widget=forms.TextInput(attrs={
"class": "form-control",
"type" : "password",
"placeholder": "Confirm Password"
}), label="Confirm Password")
class Meta:
model = User
fields = ("username","email","password1","password2")
| [
"50343160+Tomasgei@users.noreply.github.com"
] | 50343160+Tomasgei@users.noreply.github.com |
d82801c140644d7d2e3d8e2a5bd2883da694c2e2 | b385059cc536050746e45bd762040270c13ca67e | /01_sinuses_plot/3_sinuses_plot.py | 57cdd580b3e0209e0fa58c9684e74270fea17596 | [
"MIT"
] | permissive | savver/Python-Examples | 8c020f7aea08fb15e51566858de20a514358bd49 | f52987cbac92ca5af555dd119b1d11773365a6e5 | refs/heads/master | 2021-02-15T05:26:17.725079 | 2020-05-12T18:23:42 | 2020-05-12T18:23:42 | 244,859,906 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,421 | py | import numpy as np
import math as math
import matplotlib.pyplot as plt
import matplotlib as mpl
ampl = 100
delta = 0.0
n = 6
#-------------------
pi = math.pi
xrange = np.arange(0, 6, 0.001)
sn1 = []
sn2 = []
sn3 = []
for x in xrange:
sn1.append(ampl * math.sin((x + delta)*6 + 0))
sn2.append(ampl * math.sin((x + delta)*6 + 2*pi/3))
sn3.append(ampl * math.sin((x + delta)*6 + 4*pi/3))
#---------------------
print ('matplotlib ver: ', mpl.__version__)
dpi = 80
fig = plt.figure(dpi = dpi, figsize = (512 / dpi, 384 / dpi) )
mpl.rcParams.update({'font.size': 10})
min_val = min(sn1)
max_val = max(sn1)
plt.axis([0, max(xrange), min_val-20, max_val+20])
plt.title('3 sinuses with phase offset')
plt.xlabel('x, * 0.001 rad')
plt.ylabel('sn(x)')
ax = plt.axes()
ax.xaxis.grid(True)
ax.yaxis.grid(True)
#locator = mpl.ticker.MultipleLocator (base=1.0)
#ax.xaxis.set_major_locator (locator)
ax.minorticks_on()
ax.grid(which='major',
color = 'k',
linewidth = 1)
ax.grid(which='minor',
color = 'k',
linestyle = ':')
plt.plot(xrange, sn1, color = 'blue', linestyle = '-',
label = 'sn1', marker=".")
plt.plot(xrange, sn2, color = 'red', linestyle = '-',
label = 'sn2', marker=".")
plt.plot(xrange, sn3, color = 'green', linestyle = '-',
label = 'sn3', marker=".")
plt.legend(loc = 'upper right')
plt.show()
input() | [
"savver.engineer@gmail.com"
] | savver.engineer@gmail.com |
564d007f30314f626af2a6f9ebbfe6aa75131c69 | 1c4a19c0d1953280f79193f30ad8c4759e3aff58 | /ansys/dpf/core/operators/math/conjugate.py | ddeae28fea91fca0f6c68e3c561790131c01251f | [
"MIT"
] | permissive | hoangxuyenle/DPF-Core | d02c843b678560f12715ea90dc8c9764b3bffc99 | a404dd290c7b3ee75463b2487cafb8bf48468691 | refs/heads/master | 2023-06-15T15:27:02.597938 | 2021-06-22T15:19:04 | 2021-06-22T15:19:04 | 381,611,135 | 0 | 0 | MIT | 2021-06-30T07:18:30 | 2021-06-30T07:18:30 | null | UTF-8 | Python | false | false | 4,859 | py | """
conjugate
=========
"""
from ansys.dpf.core.dpf_operator import Operator
from ansys.dpf.core.inputs import Input, _Inputs
from ansys.dpf.core.outputs import Output, _Outputs, _modify_output_spec_with_one_type
from ansys.dpf.core.operators.specification import PinSpecification, Specification
"""Operators from Ans.Dpf.Native plugin, from "math" category
"""
class conjugate(Operator):
"""Computes element-wise conjugate of field containers containing complex fields.
available inputs:
- fields_container (FieldsContainer)
available outputs:
- fields_container (FieldsContainer)
Examples
--------
>>> from ansys.dpf import core as dpf
>>> # Instantiate operator
>>> op = dpf.operators.math.conjugate()
>>> # Make input connections
>>> my_fields_container = dpf.FieldsContainer()
>>> op.inputs.fields_container.connect(my_fields_container)
>>> # Instantiate operator and connect inputs in one line
>>> op = dpf.operators.math.conjugate(fields_container=my_fields_container)
>>> # Get output data
>>> result_fields_container = op.outputs.fields_container()"""
def __init__(self, fields_container=None, config=None, server=None):
super().__init__(name="conjugate", config = config, server = server)
self._inputs = InputsConjugate(self)
self._outputs = OutputsConjugate(self)
if fields_container !=None:
self.inputs.fields_container.connect(fields_container)
@staticmethod
def _spec():
spec = Specification(description="""Computes element-wise conjugate of field containers containing complex fields.""",
map_input_pin_spec={
0 : PinSpecification(name = "fields_container", type_names=["fields_container"], optional=False, document="""""")},
map_output_pin_spec={
0 : PinSpecification(name = "fields_container", type_names=["fields_container"], optional=False, document="""""")})
return spec
@staticmethod
def default_config():
return Operator.default_config(name = "conjugate")
@property
def inputs(self):
"""Enables to connect inputs to the operator
Returns
--------
inputs : InputsConjugate
"""
return super().inputs
@property
def outputs(self):
"""Enables to get outputs of the operator by evaluationg it
Returns
--------
outputs : OutputsConjugate
"""
return super().outputs
#internal name: conjugate
#scripting name: conjugate
class InputsConjugate(_Inputs):
"""Intermediate class used to connect user inputs to conjugate operator
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.math.conjugate()
>>> my_fields_container = dpf.FieldsContainer()
>>> op.inputs.fields_container.connect(my_fields_container)
"""
def __init__(self, op: Operator):
super().__init__(conjugate._spec().inputs, op)
self._fields_container = Input(conjugate._spec().input_pin(0), 0, op, -1)
self._inputs.append(self._fields_container)
@property
def fields_container(self):
"""Allows to connect fields_container input to the operator
Parameters
----------
my_fields_container : FieldsContainer,
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.math.conjugate()
>>> op.inputs.fields_container.connect(my_fields_container)
>>> #or
>>> op.inputs.fields_container(my_fields_container)
"""
return self._fields_container
class OutputsConjugate(_Outputs):
"""Intermediate class used to get outputs from conjugate operator
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.math.conjugate()
>>> # Connect inputs : op.inputs. ...
>>> result_fields_container = op.outputs.fields_container()
"""
def __init__(self, op: Operator):
super().__init__(conjugate._spec().outputs, op)
self._fields_container = Output(conjugate._spec().output_pin(0), 0, op)
self._outputs.append(self._fields_container)
@property
def fields_container(self):
"""Allows to get fields_container output of the operator
Returns
----------
my_fields_container : FieldsContainer,
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.math.conjugate()
>>> # Connect inputs : op.inputs. ...
>>> result_fields_container = op.outputs.fields_container()
"""
return self._fields_container
| [
"lea.paradis@ansys.com"
] | lea.paradis@ansys.com |
76223c165e5e9ac07147392a1c676096c926a704 | 9f1039075cc611198a988034429afed6ec6d7408 | /tensorflow-stubs/python/debug/cli/ui_factory.pyi | b43ca70100629b7c956effd95df1bc66726070c7 | [] | no_license | matangover/tensorflow-stubs | 9422fbb1cb3a3638958d621461291c315f9c6ec2 | 664bd995ef24f05ba2b3867d979d23ee845cb652 | refs/heads/master | 2020-05-23T12:03:40.996675 | 2019-05-15T06:21:43 | 2019-05-15T06:21:43 | 186,748,093 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 352 | pyi | # Stubs for tensorflow.python.debug.cli.ui_factory (Python 3)
#
# NOTE: This dynamically typed stub was automatically generated by stubgen.
from typing import Any as Any, Optional as Optional
SUPPORTED_UI_TYPES: Any
def get_ui(ui_type: Any, on_ui_exit: Optional[Any] = ..., available_ui_types: Optional[Any] = ..., config: Optional[Any] = ...): ...
| [
"matangover@gmail.com"
] | matangover@gmail.com |
5b430c441e126abe88cb99af4ffdd4557c199ad5 | 9b7f191e08244b27bcf439f7363d1f12740e05eb | /Python/py-26/ex7-8.py | b9219fb8d637b51059fc5b2525ce0d79ed751850 | [] | no_license | rufi91/sw800-Coursework | ea01c673282c808db53af1cad8d198fb34c8d1e6 | 3900d932c6b3fa2459f38b8e5ccfdbaa6dd180bb | refs/heads/master | 2020-04-05T17:16:54.223717 | 2019-02-11T06:29:53 | 2019-02-11T06:29:53 | 157,052,977 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 764 | py | """
7.From the given csv files(items.csv and sales.csv in common/Python_Exercises folder) create two dataframe objects- idf1 and sdf1
8.Using idf1 and sdf1 print item name, region and sales quantity and store the same in a new dataframe.
9.Create two dataframes by applying pivot on the above dataframe with region/item name as index/column.
"""
import pandas as pd
idf1=pd.read_csv('/home/ai21/Desktop/common/Python_Exercises/items.csv')
sdf1=pd.read_csv('/home/ai21/Desktop/common/Python_Exercises/sales.csv')
print idf1.columns, sdf1.columns
mdf1=idf1.merge(sdf1, left_on='id', right_on='tid')[['name','region','sale_qty']]
print mdf1
df1=mdf1.pivot(index='region',columns='name')
print df1
df2=mdf1.pivot(columns='region',index='name')
print "\n\n",df2
| [
"noreply@github.com"
] | rufi91.noreply@github.com |
a3102fcc5d0e0bfb6ee0b1bf3111b652fc63dcb7 | 07fa9a51d737d0a1fbe217b1a6a956abbef4ef87 | /pytorchvideo/layers/accelerator/mobile_cpu/conv_helper.py | 9d9d7c228c92e93dca3ca889ed62c9c8350ab955 | [
"Apache-2.0"
] | permissive | xchani/pytorchvideo | 2b6decf3a1076b9256745f0ae81d86e2f43e14a7 | 16f2abf2f8aa174915316007622bbb260215dee8 | refs/heads/main | 2023-08-27T16:36:23.346066 | 2021-11-11T11:40:56 | 2021-11-11T11:40:56 | 414,095,913 | 0 | 0 | Apache-2.0 | 2021-10-06T06:32:33 | 2021-10-06T06:32:32 | null | UTF-8 | Python | false | false | 21,774 | py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""
This file contains helper classes for building conv3d efficient blocks.
The helper classes are intended to be instantiated inside efficient block,
not to be used by user to build network.
"""
from copy import deepcopy
from typing import Tuple
import torch
import torch.nn as nn
class _Reshape(nn.Module):
"""
Helper class to implement data reshape as a module.
Args:
reshape_size (tuple): size of data after reshape.
"""
def __init__(
self,
reshape_size: Tuple,
):
super().__init__()
self.reshape_size = reshape_size
def forward(self, x):
return torch.reshape(x, self.reshape_size)
class _SkipConnectMul(nn.Module):
"""
Helper class to implement skip multiplication.
Args:
layer (nn.Module): layer for skip multiplication. With input x, _SkipConnectMul
implements layer(x)*x.
"""
def __init__(
self,
layer: nn.Module,
):
super().__init__()
self.layer = layer
self.mul_func = nn.quantized.FloatFunctional()
def forward(self, x):
return self.mul_func.mul(x, self.layer(x))
class _Conv3dTemporalKernel3Decomposed(nn.Module):
"""
Helper class for decomposing conv3d with temporal kernel of 3 into equivalent conv2ds.
In conv3d with temporal kernel 3 and input I, for output temporal index of t (O[:,:,t,:,:]),
the conv can be expressed as:
O[:,:,t,:,:] = conv3d(I[:,:,t:t+3,:,:])
= conv2d_0(I[:,:,t,:,:]) + conv2d_1(I[:,:,t+1,:,:]) + conv2d_2(I[:,:,t+2,:,:])
If bias is considered:
O[:,:,t,:,:] = conv3d_w_bias(I[:,:,t:t+3,:,:])
= conv2d_0_wo_bias(I[:,:,t,:,:])
+ conv2d_1_w_bias(I[:,:,t+1,:,:]) + conv2d_2_wo_bias(I[:,:,t+2,:,:])
The input Conv3d also needs zero padding of size 1 in temporal dimension.
"""
def __init__(
self,
conv3d_in: nn.Conv3d,
input_THW_tuple: Tuple,
):
"""
Args:
conv3d_in (nn.Module): input nn.Conv3d module to be converted
into equivalent conv2d.
input_THW_tuple (tuple): input THW size for conv3d_in during forward.
"""
super().__init__()
assert conv3d_in.padding[0] == 1, (
"_Conv3dTemporalKernel3Eq only support temporal padding of 1, "
f"but got {conv3d_in.padding[0]}"
)
assert conv3d_in.padding_mode == "zeros", (
"_Conv3dTemporalKernel3Eq only support zero padding, "
f"but got {conv3d_in.padding_mode}"
)
self._input_THW_tuple = input_THW_tuple
padding_2d = conv3d_in.padding[1:]
in_channels = conv3d_in.in_channels
out_channels = conv3d_in.out_channels
kernel_size = conv3d_in.kernel_size[1:]
groups = conv3d_in.groups
stride_2d = conv3d_in.stride[1:]
# Create 3 conv2d to emulate conv3d.
if (
self._input_THW_tuple[0] > 1
): # Those two conv2d are needed only when temporal input > 1.
self._conv2d_3_3_0 = nn.Conv2d(
in_channels,
out_channels,
kernel_size=kernel_size,
padding=padding_2d,
stride=stride_2d,
groups=groups,
bias=False,
)
self._conv2d_3_3_2 = nn.Conv2d(
in_channels,
out_channels,
kernel_size=kernel_size,
padding=padding_2d,
stride=stride_2d,
groups=groups,
bias=False,
)
self._conv2d_3_3_1 = nn.Conv2d(
in_channels,
out_channels,
kernel_size=kernel_size,
padding=padding_2d,
stride=stride_2d,
groups=groups,
bias=(conv3d_in.bias is not None),
)
state_dict = conv3d_in.state_dict()
state_dict_1 = deepcopy(state_dict)
state_dict_1["weight"] = state_dict["weight"][:, :, 1]
self._conv2d_3_3_1.load_state_dict(state_dict_1)
if self._input_THW_tuple[0] > 1:
state_dict_0 = deepcopy(state_dict)
state_dict_0["weight"] = state_dict["weight"][:, :, 0]
if conv3d_in.bias is not None:
"""
Don't need bias for other conv2d instances to avoid duplicated addition of bias.
"""
state_dict_0.pop("bias")
self._conv2d_3_3_0.load_state_dict(state_dict_0)
state_dict_2 = deepcopy(state_dict)
state_dict_2["weight"] = state_dict["weight"][:, :, 2]
if conv3d_in.bias is not None:
state_dict_2.pop("bias")
self._conv2d_3_3_2.load_state_dict(state_dict_2)
self._add_funcs = nn.ModuleList(
[
nn.quantized.FloatFunctional()
for _ in range(2 * (self._input_THW_tuple[0] - 1))
]
)
self._cat_func = nn.quantized.FloatFunctional()
def forward(self, x):
"""
Use three conv2d to emulate conv3d.
This forward assumes zero padding of size 1 in temporal dimension.
"""
if self._input_THW_tuple[0] > 1:
out_tensor_list = []
"""
First output plane in temporal dimension,
conv2d_3_3_0 is skipped due to zero padding.
"""
cur_tensor = (
self._add_funcs[0]
.add(self._conv2d_3_3_1(x[:, :, 0]), self._conv2d_3_3_2(x[:, :, 1]))
.unsqueeze(2)
)
out_tensor_list.append(cur_tensor)
for idx in range(2, self._input_THW_tuple[0]):
cur_tensor = (
self._add_funcs[2 * idx - 3]
.add(
self._add_funcs[2 * idx - 2].add(
self._conv2d_3_3_0(x[:, :, idx - 2]),
self._conv2d_3_3_1(x[:, :, idx - 1]),
),
self._conv2d_3_3_2(x[:, :, idx]),
)
.unsqueeze(2)
)
out_tensor_list.append(cur_tensor)
"""
Last output plane in temporal domain, conv2d_3_3_2 is skipped due to zero padding.
"""
cur_tensor = (
self._add_funcs[-1]
.add(self._conv2d_3_3_0(x[:, :, -2]), self._conv2d_3_3_1(x[:, :, -1]))
.unsqueeze(2)
)
out_tensor_list.append(cur_tensor)
return self._cat_func.cat(out_tensor_list, 2)
else: # Degenerated to simple conv2d
return self._conv2d_3_3_1(x[:, :, 0]).unsqueeze(2)
class _Conv3dTemporalKernel5Decomposed(nn.Module):
"""
Helper class for decomposing conv3d with kernel size of (5, k, k) into equivalent conv2ds.
In such conv3d and input I, for output temporal index of t (O[:,:,t,:,:]), the conv
can be expressed as:
O[:,:,t,:,:] = conv3d(I[:,:,t:t+5,:,:])
= conv2d_0(I[:,:,t,:,:]) + conv2d_1(I[:,:,t+1,:,:]) + conv2d_2(I[:,:,t+2,:,:])
+ conv2d_3(I[:,:,t+3,:,:]) + conv2d_4(I[:,:,t+4,:,:])
If bias is considered:
O[:,:,t,:,:] = conv3d_w_bias(I[:,:,t:t+3,:,:])
= conv2d_0_wo_bias(I[:,:,t,:,:])
+ conv2d_1_wo_bias(I[:,:,t+1,:,:]) + conv2d_2_w_bias(I[:,:,t+2,:,:])
+ conv2d_3_wo_bias(I[:,:,t+1,:,:]) + conv2d_4_wo_bias(I[:,:,t+2,:,:])
The input Conv3d also needs zero padding of size 2 in temporal dimension at begin and end.
"""
def __init__(
self,
conv3d_in: nn.Conv3d,
thw_shape: Tuple[int, int, int],
):
"""
Args:
conv3d_in (nn.Module): input nn.Conv3d module to be converted
into equivalent conv2d.
thw_shape (tuple): input THW size for conv3d_in during forward.
"""
super().__init__()
assert conv3d_in.padding[0] == 2, (
"_Conv3dTemporalKernel5Eq only support temporal padding of 2, "
f"but got {conv3d_in.padding[0]}"
)
assert conv3d_in.padding_mode == "zeros", (
"_Conv3dTemporalKernel5Eq only support zero padding, "
f"but got {conv3d_in.padding_mode}"
)
self._thw_shape = thw_shape
padding_2d = conv3d_in.padding[1:]
in_channels = conv3d_in.in_channels
out_channels = conv3d_in.out_channels
kernel_size = conv3d_in.kernel_size[1:]
groups = conv3d_in.groups
stride_2d = conv3d_in.stride[1:]
# Create 3 conv2d to emulate conv3d.
t, h, w = self._thw_shape
args_dict = {
"in_channels": in_channels,
"out_channels": out_channels,
"kernel_size": kernel_size,
"padding": padding_2d,
"stride": stride_2d,
"groups": groups,
}
for iter_idx in range(5):
if iter_idx != 2:
if t > 1: # Those four conv2d are needed only when temporal input > 1.
self.add_module(
f"_conv2d_{iter_idx}", nn.Conv2d(**args_dict, bias=False)
)
else: # _conv2d_2 is needed for all circumstances.
self.add_module(
f"_conv2d_{iter_idx}",
nn.Conv2d(**args_dict, bias=(conv3d_in.bias is not None)),
)
# State dict for _conv2d_2
original_state_dict = conv3d_in.state_dict()
state_dict_to_load = deepcopy(original_state_dict)
state_dict_to_load["weight"] = original_state_dict["weight"][:, :, 2]
self._conv2d_2.load_state_dict(state_dict_to_load)
if t > 1:
if conv3d_in.bias is not None:
# Don't need bias for other conv2d instances to avoid duplicated
# addition of bias.
state_dict_to_load.pop("bias")
# State dict for _conv2d_0, _conv2d_1, _conv2d_3, _conv2d_4
state_dict_to_load["weight"] = original_state_dict["weight"][:, :, 0]
self._conv2d_0.load_state_dict(state_dict_to_load)
state_dict_to_load["weight"] = original_state_dict["weight"][:, :, 1]
self._conv2d_1.load_state_dict(state_dict_to_load)
state_dict_to_load["weight"] = original_state_dict["weight"][:, :, 3]
self._conv2d_3.load_state_dict(state_dict_to_load)
state_dict_to_load["weight"] = original_state_dict["weight"][:, :, 4]
self._conv2d_4.load_state_dict(state_dict_to_load)
# Elementwise add are needed in forward function, use nn.quantized.FloatFunctional()
# for better quantization support. One convolution needs at most 4 elementwise adds
# without zero padding; for boundary planes fewer elementwise adds are needed.
# See forward() for more details.
self._add_funcs = nn.ModuleList(
[nn.quantized.FloatFunctional() for _ in range(4 * t - 6)]
)
self._cat_func = nn.quantized.FloatFunctional()
def forward(self, x):
"""
Use three conv2d to emulate conv3d.
Args:
x (torch.Tensor): 5D tensor of (B, C, T, H, W)
"""
t, h, w = self._thw_shape
out_tensor_list = []
if (
t == 1
): # Degenerated to simple conv2d, but make sure output still has T dimension
return self._conv2d_2(x[:, :, 0]).unsqueeze(2)
elif t == 2:
# out_tensor_list[0]: conv2d_1_1_0, conv2d_1_1_1 and conv2d_1_1_4 are
# applied to zero padding.
cur_tensor = (
self._add_funcs[0]
.add(self._conv2d_2(x[:, :, 0]), self._conv2d_3(x[:, :, 1]))
.unsqueeze(2)
)
out_tensor_list.append(cur_tensor)
# out_tensor_list[1]: conv2d_1_1_0, conv2d_1_1_3 and conv2d_1_1_4 are
# applied to zero padding.
cur_tensor = (
self._add_funcs[1]
.add(self._conv2d_1(x[:, :, 0]), self._conv2d_2(x[:, :, 1]))
.unsqueeze(2)
)
out_tensor_list.append(cur_tensor)
elif t == 3:
# out_tensor_list[0]: conv2d_1_1_0, conv2d_1_1_1 are applied to zero padding.
cur_tensor = (
self._add_funcs[0]
.add(
self._add_funcs[1].add(
self._conv2d_2(x[:, :, 0]), self._conv2d_3(x[:, :, 1])
),
self._conv2d_4(x[:, :, 2]),
)
.unsqueeze(2)
)
out_tensor_list.append(cur_tensor)
# out_tensor_list[1]: conv2d_1_1_0, conv2d_1_1_4 are applied to zero padding.
cur_tensor = (
self._add_funcs[2]
.add(
self._add_funcs[3].add(
self._conv2d_1(x[:, :, 0]), self._conv2d_2(x[:, :, 1])
),
self._conv2d_3(x[:, :, 2]),
)
.unsqueeze(2)
)
out_tensor_list.append(cur_tensor)
# out_tensor_list[2]: conv2d_1_1_3, conv2d_1_1_4 are applied to zero padding.
cur_tensor = (
self._add_funcs[4]
.add(
self._add_funcs[5].add(
self._conv2d_0(x[:, :, 0]), self._conv2d_1(x[:, :, 1])
),
self._conv2d_2(x[:, :, 2]),
)
.unsqueeze(2)
)
out_tensor_list.append(cur_tensor)
elif t == 4:
# out_tensor_list[0]: conv2d_1_1_0, conv2d_1_1_1 are applied to zero padding.
cur_tensor = (
self._add_funcs[0]
.add(
self._add_funcs[1].add(
self._conv2d_2(x[:, :, 0]), self._conv2d_3(x[:, :, 1])
),
self._conv2d_4(x[:, :, 2]),
)
.unsqueeze(2)
)
out_tensor_list.append(cur_tensor)
# out_tensor_list[1]: conv2d_1_1_0 is applied to zero padding.
cur_tensor = (
self._add_funcs[2]
.add(
self._add_funcs[3].add(
self._add_funcs[4].add(
self._conv2d_1(x[:, :, 0]),
self._conv2d_2(x[:, :, 1]),
),
self._conv2d_3(x[:, :, 2]),
),
self._conv2d_4(x[:, :, 3]),
)
.unsqueeze(2)
)
out_tensor_list.append(cur_tensor)
# out_tensor_list[2]: conv2d_1_1_4 is applied to zero padding.
cur_tensor = (
self._add_funcs[5]
.add(
self._add_funcs[6].add(
self._add_funcs[7].add(
self._conv2d_0(x[:, :, 0]),
self._conv2d_1(x[:, :, 1]),
),
self._conv2d_2(x[:, :, 2]),
),
self._conv2d_3(x[:, :, 3]),
)
.unsqueeze(2)
)
out_tensor_list.append(cur_tensor)
# out_tensor_list[3]: conv2d_1_1_3, conv2d_1_1_4 are applied to zero padding.
cur_tensor = (
self._add_funcs[8]
.add(
self._add_funcs[9].add(
self._conv2d_0(x[:, :, 1]), self._conv2d_1(x[:, :, 2])
),
self._conv2d_2(x[:, :, 3]),
)
.unsqueeze(2)
)
out_tensor_list.append(cur_tensor)
else: # t >= 5
# out_tensor_list[0]: conv2d_1_1_0, conv2d_1_1_1 are applied to zero padding.
add_func_idx_base = 0
cur_tensor = (
self._add_funcs[add_func_idx_base]
.add(
self._add_funcs[add_func_idx_base + 1].add(
self._conv2d_2(x[:, :, 0]), self._conv2d_3(x[:, :, 1])
),
self._conv2d_4(x[:, :, 2]),
)
.unsqueeze(2)
)
out_tensor_list.append(cur_tensor)
add_func_idx_base += 2
# out_tensor_list[1]: conv2d_1_1_0 is applied to zero padding.
cur_tensor = (
self._add_funcs[add_func_idx_base]
.add(
self._add_funcs[add_func_idx_base + 1].add(
self._add_funcs[add_func_idx_base + 2].add(
self._conv2d_1(x[:, :, 0]),
self._conv2d_2(x[:, :, 1]),
),
self._conv2d_3(x[:, :, 2]),
),
self._conv2d_4(x[:, :, 3]),
)
.unsqueeze(2)
)
out_tensor_list.append(cur_tensor)
add_func_idx_base += 3
# out_tensor_list[2:-2]: zero padding has no effect.
for idx in range(4, t):
cur_tensor = (
self._add_funcs[add_func_idx_base]
.add(
self._add_funcs[add_func_idx_base + 1].add(
self._add_funcs[add_func_idx_base + 2].add(
self._add_funcs[add_func_idx_base + 3].add(
self._conv2d_0(x[:, :, idx - 4]),
self._conv2d_1(x[:, :, idx - 3]),
),
self._conv2d_2(x[:, :, idx - 2]),
),
self._conv2d_3(x[:, :, idx - 1]),
),
self._conv2d_4(x[:, :, idx]),
)
.unsqueeze(2)
)
out_tensor_list.append(cur_tensor)
add_func_idx_base += 4
# out_tensor_list[-2]: conv2d_1_1_4 is applied to zero padding.
cur_tensor = (
self._add_funcs[add_func_idx_base]
.add(
self._add_funcs[add_func_idx_base + 1].add(
self._add_funcs[add_func_idx_base + 2].add(
self._conv2d_0(x[:, :, -4]),
self._conv2d_1(x[:, :, -3]),
),
self._conv2d_2(x[:, :, -2]),
),
self._conv2d_3(x[:, :, -1]),
)
.unsqueeze(2)
)
out_tensor_list.append(cur_tensor)
add_func_idx_base += 3
# out_tensor_list[-1]: conv2d_1_1_3, conv2d_1_1_4 are applied to zero padding.
cur_tensor = (
self._add_funcs[add_func_idx_base]
.add(
self._add_funcs[add_func_idx_base + 1].add(
self._conv2d_0(x[:, :, -3]),
self._conv2d_1(x[:, :, -2]),
),
self._conv2d_2(x[:, :, -1]),
)
.unsqueeze(2)
)
out_tensor_list.append(cur_tensor)
return self._cat_func.cat(out_tensor_list, 2)
class _Conv3dTemporalKernel1Decomposed(nn.Module):
"""
Helper class for decomposing conv3d with temporal kernel of 1 into conv2d on
multiple temporal planes.
In conv3d with temporal kernel 1 and input I, for output temporal index of t (O[:,:,t,:,:]),
the conv can be expressed as:
O[:,:,t,:,:] = conv3d(I[:,:,t,:,:])
= conv2d(I[:,:,t,:,:])
The full output can be obtained by concat O[:,:,t,:,:] for t in 0...T,
where T is the length of I in temporal dimension.
"""
def __init__(
self,
conv3d_eq: nn.Conv3d,
input_THW_tuple: Tuple,
):
"""
Args:
conv3d_eq (nn.Module): input nn.Conv3d module to be converted
into equivalent conv2d.
input_THW_tuple (tuple): input THW size for conv3d_eq during forward.
"""
super().__init__()
# create equivalent conv2d module
in_channels = conv3d_eq.in_channels
out_channels = conv3d_eq.out_channels
bias_flag = conv3d_eq.bias is not None
self.conv2d_eq = nn.Conv2d(
in_channels,
out_channels,
kernel_size=(conv3d_eq.kernel_size[1], conv3d_eq.kernel_size[2]),
stride=(conv3d_eq.stride[1], conv3d_eq.stride[2]),
groups=conv3d_eq.groups,
bias=bias_flag,
padding=(conv3d_eq.padding[1], conv3d_eq.padding[2]),
dilation=(conv3d_eq.dilation[1], conv3d_eq.dilation[2]),
)
state_dict = conv3d_eq.state_dict()
state_dict["weight"] = state_dict["weight"].squeeze(2)
self.conv2d_eq.load_state_dict(state_dict)
self.input_THW_tuple = input_THW_tuple
def forward(self, x):
out_tensor_list = []
for idx in range(self.input_THW_tuple[0]):
cur_tensor = self.conv2d_eq(x[:, :, idx]).unsqueeze(2)
out_tensor_list.append(cur_tensor)
return torch.cat(out_tensor_list, 2)
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
b66cfc4ef4f311863cf16e4ae898ab4f639f567c | a1fd9852fca3fd99f7ee988df6bbd0535e000a5e | /message_board/wsgi.py | f8aff5cecd03e897b85dd8d783a9f63fa7daef04 | [
"MIT"
] | permissive | GouthamDoddi/Message_board | 13704aaf38ffb617980b00a33e2fa1ed8881f7fe | 8e4e3248c7f072582d525349201f9f8279a267f8 | refs/heads/master | 2023-08-01T05:55:47.504453 | 2020-06-09T03:50:39 | 2020-06-09T03:50:39 | 266,520,105 | 0 | 0 | MIT | 2021-09-22T19:05:38 | 2020-05-24T10:44:44 | Python | UTF-8 | Python | false | false | 403 | py | """
WSGI config for message_board project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'message_board.settings')
application = get_wsgi_application()
| [
"gouthambolt@gmail.com"
] | gouthambolt@gmail.com |
349d1a858c9f94d638e8aa9726e058979f72624d | c7dd00eab83ae444aabbedef73dab5a371050458 | /fungsi rekursi 4.py | 15875b1ff85f11cd14b7c8d5965a9aa52dac9e6c | [] | no_license | YudhaDwiAnggara/python-x3-yudhadwianggara | 971d2d7b314d622dd2233e81c4d98758e6ee92e4 | bdb7452a4919bf2551f1b2982926a67af1ec55f3 | refs/heads/master | 2023-04-25T06:47:28.721310 | 2021-05-20T06:42:49 | 2021-05-20T06:42:49 | 337,626,173 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 416 | py | #menampilkan deret fibonancy
def deret_fibo(n):
if n <= 1:
return n
else:
return(deret_fibo(n-1) + deret_fibo(n-2))
jumlah_deret = int(input('jumlah deret : '))
if jumlah_deret <= 0:
print('masukkan bilangan bulat positif')
else:
print('deret fibonanci : ')
for i in range(jumlah_deret):
print(deret_fibo(i))
#output
0 1 1 2 3 5 8 13 21 34 dst | [
"noreply@github.com"
] | YudhaDwiAnggara.noreply@github.com |
0e68abbcb7a235aef8d867b839794b3da6f3f67a | 2f052b1d01f2c2af6f74f79f0d0fb73e8908e1da | /Python/Exercicios/37.py | f1dfdc541d16f571f25b96572a128b2b69063027 | [] | no_license | joeynosharefood/estudos | 3e43f1b7385ec7aaaa0a818523d19a893875157e | 2f7d4a6ac2ae042ad24e6aa2a921d7bbbac244ae | refs/heads/master | 2020-05-30T01:41:05.928082 | 2019-06-07T23:49:48 | 2019-06-07T23:49:48 | 189,482,183 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 292 | py | x = float(input('Qual o salário do solicitante ?'))
y = float(input('Qual o valor do imovel ?'))
z = int(input('Em quantos anos pretende pagar ?'))
z1 = z*12
x1 = x*0.3
y1 = y/z1
if x1 >= y1:
print('O valor a ser pago é de R${}'.format(y1))
else:
print('O financiamento foi negado') | [
"spntn.mateus@gmail.com"
] | spntn.mateus@gmail.com |
bc659d018f63b12b61660850475ce71c0eff0045 | eb329c647084be349a2d7dd4d549142b7b4886af | /cloneproject/groups/urls.py | 87b768d497445b6518f169d363c9fa0860753418 | [] | no_license | Subhajit688/django_web_devlopment | 8d8540ff7da20b702d76efb2537bf79b5cc26da7 | 57617836806efaa65e15c2c74bd9918fb831808b | refs/heads/master | 2021-02-03T23:26:27.619199 | 2020-03-15T14:51:13 | 2020-03-15T14:51:13 | 243,572,352 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 404 | py | from django.urls import path
from . import views
app_name = 'groups'
urlpatterns = [
path('',views.ListGroup.as_view(),name='all'),
path('new/',views.CreateGroup.as_view(),name='create'),
path('posts/in/<slug>/',views.SingleGroup.as_view(),name='single'),
path('join/<slug>/',views.JoinGroup.as_view(),name='join'),
path('leave/<slug>/',views.LeaveGroup.as_view(),name='leave'),
]
| [
"subhajit688@gmail.com"
] | subhajit688@gmail.com |
15495e598f34f5493c0426a5ef037d4dd19bf5e0 | 8a3ec7dc24b2cf5b4ad13d5aaf6eb7b1da58ac49 | /斐波那契数列.py | 210a9b41786e4bfacd5b1dd8c1ab7f6f84d6b5e1 | [] | no_license | zlnanytime/untitled | 2cb4a3bd31dbe1919f4dfd8c8323e3779edfa6fb | 31ef93cc7d82dfef570050273a99594daa6b2db9 | refs/heads/master | 2021-05-10T18:07:28.647592 | 2018-01-28T00:47:20 | 2018-01-28T00:47:20 | 118,622,686 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 156 | py | f0 = 0
f1 = 1
f_input = int(input())
f_l = [0,1]
for i in range(0,f_input-1):
f_n = f_l[i] + f_l[i+1]
f_l.append(f_n)
print(f_l)
print(f_l[f_input]) | [
"zlanytime@gmail.com"
] | zlanytime@gmail.com |
3f45a0ead1cf4666a92a4e6dc6450c3ac923cd4a | e5efada3529d94875455c4230c8dabe27fb72a89 | /apps/api/migrations/0015_auto_20230210_1801.py | aa364ba5d0abda9b62dcdf65fd85023772bbf6fc | [] | no_license | alexmon1989/uma | d8c321fb0ec9b1a9039b1c83aeaaff774f657416 | 5dea579d634eeb1c8103c21157299b33ca5590f0 | refs/heads/master | 2023-08-03T04:31:13.598577 | 2023-07-22T18:17:13 | 2023-07-22T18:17:13 | 154,835,498 | 0 | 0 | null | 2023-03-02T11:20:54 | 2018-10-26T13:02:12 | Nunjucks | UTF-8 | Python | false | false | 411 | py | # Generated by Django 3.2.12 on 2023-02-10 18:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0014_auto_20211213_1900'),
]
operations = [
migrations.AddField(
model_name='opendata',
name='files_path',
field=models.CharField(blank=True, max_length=500, null=True),
),
]
| [
"alex.mon1989@gmail.com"
] | alex.mon1989@gmail.com |
e98edf9eca8ffe9aadce676b8b51923a8df97b7c | 2b9ce2be1c031099cda8456c5b899ce283994ed4 | /pagina web/New project/nuevo.py | 9ee1b4d23e97af2de07ad0b8432872cf15365e14 | [] | no_license | angel993/nuevo-repository | 3d3f47fec9bc398e92a1f2cad46084336598fbde | 908f17aca1a11957e50aaebc62b32ee8eee7e2b9 | refs/heads/master | 2022-11-05T03:45:08.436321 | 2020-06-19T15:54:48 | 2020-06-19T15:54:48 | 264,762,783 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 40 | py | saludo = "Hola mundo"
print(saludo) | [
"angelsilvestrerojasblanca@gmail.com"
] | angelsilvestrerojasblanca@gmail.com |
a220aea2b5c78023a22076d9c19a6dd6523da5d2 | 30d1902232eb9ddb84fdf5404a3a1dfd6232406a | /wxpython/project/panels/WorkExperience.py | 17bf63ac737356239469250d54b41bd0999928ea | [] | no_license | sxnys/mypython | c3a768b054077ed97ff1e2fac31cb93f0765deb3 | de48cd883ad2de3320cb0c6b46b451ebb2311ac7 | refs/heads/master | 2022-11-07T15:11:48.936412 | 2019-04-14T12:04:30 | 2019-04-14T12:04:30 | 119,686,106 | 0 | 1 | null | 2022-10-31T05:13:00 | 2018-01-31T12:46:06 | Python | UTF-8 | Python | false | false | 791 | py | # -*- coding: utf-8
__author__ = 'Sxn'
__date__ = '2017/5/22 19:09'
from . import StudyExperience
from extra import JsonIO
class TabPanel(StudyExperience.TabPanel):
def __init__(self, parent):
StudyExperience.TabPanel.__init__(self, parent, tabName=u'工作经历', instructText=u'含学术兼职情况', numLimit=10, editInfo=[u'起止年月', u'工作单位', u'职务/职称'], colSize=[250, 250, 250], childOrder=2)
def addToJsonDict(self):
JsonIO.working_exp = []
for i in xrange(self.gridRow):
tmp = {}
tmp['start_end_date'] = self.infoGrid.GetCellValue(i, 0)
tmp['working_dep'] = self.infoGrid.GetCellValue(i, 1)
tmp['job'] = self.infoGrid.GetCellValue(i, 2)
JsonIO.working_exp.append(tmp) | [
"1119112647@qq.com"
] | 1119112647@qq.com |
d9aac2fb61e031ed926a808fc89218ce845b6e56 | 4ecd36e614dd5b60aff4602979494ff04ce09e1b | /drf_messaging/migrations/0005_auto_20180606_1107.py | 18d7101b0ed22629a1d0d555c2210d9637746d71 | [] | no_license | globax89/drf-messaging | 0368b2f352c7c88c985b36833618e490c5bff776 | 1433a7367eba56e3ebb5a406ef496e0d5de6f865 | refs/heads/master | 2021-10-10T03:44:17.228185 | 2018-10-25T18:03:12 | 2018-10-25T18:03:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 401 | py | # Generated by Django 2.0.2 on 2018-06-06 11:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('drf_messaging', '0004_reportedmessages'),
]
operations = [
migrations.AlterField(
model_name='messages',
name='message',
field=models.TextField(blank=True, null=True),
),
]
| [
"rluts@yandex.ua"
] | rluts@yandex.ua |
25fddce52c5fff1316342189155ae8e8d443c6c5 | 47fecd7d951e40eb9e6bd7b88fe0c1fa9004d621 | /exercicio_undo_rendo.py | 13b447e8aad120d770c4908a10f532b13185b1de | [] | no_license | eliasantoniorodrigues1/python_intermediario | 0ccc7f374db9ce390c8b95e4a6fdc4564c019e87 | e5f1ab88d376dce8b3e2c73f5fccfb7bf2c98108 | refs/heads/master | 2023-06-11T11:06:59.843941 | 2021-07-04T17:07:45 | 2021-07-04T17:07:45 | 382,906,462 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,271 | py | """
Faça uma lista de tarefas com as seguintes opções:
adicionar tarefa
listar tarefas
opção de desfazer ( a cada vez que chamarmos, desfaz a última ação)
opção de refazer ( a cada vez que chamarmos, refaz a última ação)
['tarefa1', 'tarefa2', 'tarefa3']
['tarefa1', 'tarefa2'] <- Desfazer
input <- Nova tarefa
"""
def do_add(todo, todo_list):
todo_list.append(todo)
def show_op(todo_list):
print(todo_list)
def do_undo(todo_list, redo_list):
if not todo_list:
print('Nada a desfazer.')
return
last_todo = todo_list.pop()
redo_list.append(last_todo)
def do_redo(todo_list, redo_list):
if not redo_list:
print('Nada a refazer.')
return
last_redo = redo_list.pop()
todo_list.append(last_redo)
if __name__ == '__main__':
todo_list = []
redo_list = []
while True:
todo = input('Digite uma tarefa ou [undo, redo, ls]: ')
if todo == 'ls':
show_op(todo_list)
continue
elif todo == 'undo':
do_undo(todo_list, redo_list)
continue
elif todo == 'redo':
do_redo(todo_list, redo_list)
continue
do_add(todo, todo_list)
print(todo_list)
| [
"49626719+eliasantoniorodrigues1@users.noreply.github.com"
] | 49626719+eliasantoniorodrigues1@users.noreply.github.com |
1cc832c7c5ac3aebe64225732d0802195bdcdc6b | bb0ec30d25cedfaa0d6ad2a8a9fa383a63ddaa01 | /models.py | dac06ea078186fff1e8f3460981971a836851d05 | [] | no_license | AlfonsoSargiotto/a-soft-test | b42f7c3464b656aa6f1d3b52d209ba71a02bc933 | e5877cb0de901ab870262518fa1025b2b854f336 | refs/heads/master | 2023-04-30T03:23:38.771205 | 2021-05-21T23:35:38 | 2021-05-21T23:35:38 | 368,647,600 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 430 | py | class Seat:
id = None
type = None
price = None
cabin_class = None
availability = None
def __init__( self, id, type, price, cabin_class, availability ):
self.id = id
self.type = type
self.price = price
self.cabin_class = cabin_class
self.availability = availability
def __repr__(self):
return f"Seat('{self.id}', '{self.type}', '{self.price}', '{self.cabin_class}', '{self.availability}')" | [
"a.sargiotto@itecriocuarto.org.ar"
] | a.sargiotto@itecriocuarto.org.ar |
c193a178302a06c5e4a95745af908a602494132c | 68131e0c358221a9fe8dfa3ee98dc317b2a68f0e | /snippets/asyncserver.py | af808e705bcd15ae4554fa815581e49a52987eaa | [] | no_license | saishg/httpproxy | a5de3f9e38bc5bbbb9363515eb7d70e492de50a7 | 93edb4dc6f10350f5a27b43fb2ec26b870b42df4 | refs/heads/master | 2021-07-09T03:23:14.241646 | 2017-10-10T16:36:53 | 2017-10-10T16:36:53 | 106,321,922 | 0 | 0 | null | 2017-10-10T16:36:54 | 2017-10-09T18:43:18 | Python | UTF-8 | Python | false | false | 748 | py | import socket
import asyncio
SERVER_PORT = 8080
BUF_SIZE = 10240
def process_connection(clientsocket, address):
print(str(clientsocket.recv(BUF_SIZE)))
clientsocket.close()
async def start_server(future):
serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serversocket.bind((socket.gethostname(), SERVER_PORT))
serversocket.listen(5)
while True:
(clientsocket, address) = serversocket.accept()
process_connection(clientsocket, address)
future.set_result('Future is done!')
if __name__ == '__main__':
loop = asyncio.get_event_loop()
future = asyncio.Future()
asyncio.ensure_future(start_server(future))
try:
loop.run_forever()
finally:
loop.close()
| [
"sgersapp@cisco.com"
] | sgersapp@cisco.com |
f9c4b00b6aec82bf24749b5e201259aee12b72cd | be7001a470a6ac26fa338044715c9b9a507edd35 | /ressources/migrations/0001_initial.py | d0d8113d4e538ecfb332e8b7e490150ef154dad8 | [] | no_license | araoux/django-agreg | 60882bb36684e67bf1cbf2df22dacc0780b01117 | ec4d8046db4eca40e13d0b8986962244ef1943fb | refs/heads/master | 2023-07-24T10:18:05.654819 | 2023-07-16T16:30:13 | 2023-07-16T16:30:13 | 174,581,004 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,347 | py | # Generated by Django 2.1.7 on 2019-02-26 23:42
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Categorie',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nom', models.CharField(max_length=20)),
],
),
migrations.CreateModel(
name='Discipline',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nom', models.CharField(max_length=16)),
],
),
migrations.CreateModel(
name='MotCle',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nom', models.CharField(max_length=20)),
],
),
migrations.CreateModel(
name='Oral',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('numero', models.PositiveSmallIntegerField()),
('typeOral', models.CharField(choices=[('LC', 'Leçon de chimie'), ('LP', 'Leçon de physique'), ('M', 'Montage')], max_length=2)),
('discipline', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='ressources.Discipline')),
],
),
migrations.CreateModel(
name='Ressource',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.TextField(null=True)),
('date', models.DateTimeField(default=django.utils.timezone.now, verbose_name='Date de parution')),
('auteur', models.CharField(max_length=20)),
],
),
migrations.CreateModel(
name='SousCategorie',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nom', models.CharField(max_length=20)),
('cat_parent', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='ressources.Categorie')),
],
),
migrations.CreateModel(
name='TypeRessource',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nom', models.CharField(max_length=16)),
],
),
migrations.CreateModel(
name='RessourceFichier',
fields=[
('ressource_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='ressources.Ressource')),
('contenu', models.FileField(upload_to='')),
],
bases=('ressources.ressource',),
),
migrations.CreateModel(
name='RessourceImage',
fields=[
('ressource_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='ressources.Ressource')),
('contenu', models.ImageField(upload_to='')),
],
bases=('ressources.ressource',),
),
migrations.CreateModel(
name='RessourceLien',
fields=[
('ressource_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='ressources.Ressource')),
('contenu', models.URLField()),
],
bases=('ressources.ressource',),
),
migrations.AddField(
model_name='ressource',
name='categorie',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='ressources.Categorie'),
),
migrations.AddField(
model_name='ressource',
name='discipline',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='ressources.Discipline'),
),
migrations.AddField(
model_name='ressource',
name='mots_cles',
field=models.ManyToManyField(to='ressources.MotCle'),
),
migrations.AddField(
model_name='ressource',
name='oral',
field=models.ManyToManyField(to='ressources.Oral'),
),
migrations.AddField(
model_name='ressource',
name='sous_cat',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='ressources.SousCategorie'),
),
migrations.AddField(
model_name='ressource',
name='type_ressource',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='ressources.TypeRessource'),
),
]
| [
"arnaud.raoux@normalesup.org"
] | arnaud.raoux@normalesup.org |
2f99f01f10557354ca3accc62e9a49f215a62cc7 | 1e61633a835a7080511fa2e07d86d0c321b2f6f6 | /contract/migrations/0002_rename_contrac_contractform.py | 9c0f26cb218a59268e93350a285822d25b2f7a83 | [] | no_license | phuocquoc/django | 23a1d8b6add637c39f6f6b81b4eaefaf5de8811b | eca98073c48df66024c7116e3d69d2c7dd0c0199 | refs/heads/main | 2023-08-03T12:02:35.749081 | 2021-09-23T10:13:23 | 2021-09-23T10:13:23 | 409,541,116 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 327 | py | # Generated by Django 3.2.7 on 2021-09-21 10:55
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('contract', '0001_initial'),
]
operations = [
migrations.RenameModel(
old_name='contrac',
new_name='contractForm',
),
]
| [
"phuocquocnguyen@gmail.com"
] | phuocquocnguyen@gmail.com |
5e65254844f16b658ad6828501d1c3536c170e7f | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/63/usersdata/230/28042/submittedfiles/swamee.py | 88094a850a0912518cfb951ac45f0e9faca901c7 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 493 | py | # -*- coding: utf-8 -*-
import math
#COMECE SEU CÓDIGO AQUI
f = float(input('Digite o valor de f: '))
L = float(input('Digite o valor de L: '))
Q = float(input('Digite o valor de Q: '))
DeltaH = float(input('Digite o valor de DeltaH: '))
v = float(input('Digite o valor de v: '))
g = 9.81
E = 0.000002
D = ((8*f*L*(Q**2))/((math.pi**2)*g*DeltaH))**0.2
Rey = (4*Q)/(math.pi*D*v)
k = (0.25)/(math.log10((E/3.7*D))+(5.74/(Rey**0.9)))**2
print ('%.4f' % D)
print ('%.4f' % Rey)
print ('%.4f' % k) | [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
6ee1971fb3a4a6690ad7a3d8b188086dba1bcd33 | 98900a28086fb9438b853c6c00a34723a2b1efd2 | /notebook.py | 98fc386ee9fb7543cd3ad0a944dc1d665a92a168 | [] | no_license | bydmitry/simu | 866bcf1df7394a4d102069392bfada2416be0be9 | 8829076f6249422fbd6fe4c5dce31992d9a489b8 | refs/heads/master | 2021-05-06T13:08:34.903730 | 2018-01-19T08:29:58 | 2018-01-19T08:29:58 | 113,178,359 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,339 | py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Mon May 22 14:06:37 2017
@author: bychkov
"""
import numpy as np
import matplotlib.pyplot as plt
import os
import plotly
#from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
import plotly.offline as py
py.init_notebook_mode()
import plotly.graph_objs as go
#----------------------------------------------------------------------
# Settings:
#----------------------------------------------------------------------
# Number of samples:
NS = 1000
# Total number of covariates:
NC = 8
# Number of factors (relevant features):
NF = 2
#----------------------------------------------------------------------
# Simulate:
#----------------------------------------------------------------------
def linear_risk(X):
"""
Computes a linear scalar risk over covariates in X.
"""
# Vector of coefficients:
betas = np.zeros((NC,))
betas[0:NF] = range(1,NF + 1)
# Linear Combinations of Coefficients and Covariates
risk = np.dot(X, betas)
return risk
def gaussian_risk(X, c=0.0, rad=0.5, max_hr=2.0):
"""
Computes Gaussian function.
"""
z = np.square((X-c))
z = np.sum(z[:,0:NF], axis = -1)
risk = max_hr * (np.exp( -(z) / (2 * rad ** 2) ))
return risk
def generate_data():
pass
# Baseline data:
data = np.random.uniform(low = -1, high = 1, size = (NS,NC))
#
# Center the risk:
risk = risk - np.mean(risk)
# Generate time of death:
# From exponential:
death_time = np.zeros((NS,1))
T = np.zeros((NS,1))
lmb = 0.5
for i in range(NS):
death_time[i] = np.random.exponential(1 / (lmb*np.exp(risk[i])) )
T[i] = -( (np.log(np.random.uniform(low = 0, high = 1)))/(lmb * np.exp(risk[i])) )
plt.hist(T,55)
plt.hist(death_time,55)
print np.mean(T)
print np.mean(death_time)
#----------------------------------------------------------------------
# SeepSurv:
#----------------------------------------------------------------------
import lasagne
import deepsurv as DeepSurv
simulator = DeepSurv.datasets.SimulatedData(hr_ratio=2)
train_set = simulator.generate_data(N = 3000, method='linear')
valid_set = simulator.generate_data(N = 1000, method='linear')
test_set = simulator.generate_data(N = 1000, method='linear')
model = DeepSurv.DeepSurv(n_in = 10,
learning_rate = 0.1,
hidden_layers_sizes = list((3,3)))
log = model.train(train_set, valid_set, n_epochs=30)
model.get_concordance_index(**test_set)
DeepSurv.plot_log(log)
model.plot_risk_surface(test_set['x'])
#==============================================================================
# Done.
#==============================================================================
file_name = 'Loss.png'
t = np.arange(0.0, 2.0, 0.01)
y1 = np.sin(2*np.pi*t)
y2 = np.sin(4*np.pi*t)
y3 = np.sin(10*np.pi*t)
# Make a plot:
fig, ax1 = plt.subplots()
ax2 = ax1.twinx()
ax3 = ax1.twinx()
lns1 = ax1.plot(t, y1, label='y1', color='#348ABD')
lns2 = ax2.plot(t, y2, label='y2', color='#8EBA42')
lns3 = ax3.plot(t, y3, label='y3', color='#BA4252')
ax1.set_ylabel('y1'); ax2.set_ylabel('y2'); ax3.set_ylabel('y3');
lns = lns1+lns2+lns3
labs = [l.get_label() for l in lns]
ax1.legend(lns, labs, loc='upper center', ncol=3)
plotly_fig = tools.mpl_to_plotly(fig)
py.plot(plotly_fig)
plt.savefig(os.path.join(self.builder.wrk_dir, file_name))
plt.close()
#==============================================================================
# Done.
#==============================================================================
import plotly as py
import plotly.graph_objs as go
from plotly import tools
import numpy as np
left_trace = go.Scatter(x = np.random.randn(1000), y = np.random.randn(1000), yaxis = "y1", mode = "markers")
right_traces = []
right_traces.append(go.Scatter(x = np.random.randn(1000), y = np.random.randn(1000), yaxis = "y2", mode = "markers"))
right_traces.append(go.Scatter(x = np.random.randn(1000) * 10, y = np.random.randn(1000) * 10, yaxis = "y3", mode = "markers"))
fig = tools.make_subplots(rows = 1, cols = 2)
fig.append_trace(left_trace, 1, 1)
for trace in right_traces:
yaxis = trace["yaxis"] # Store the yaxis
fig.append_trace(trace, 1, 2)
fig["data"][-1].update(yaxis = yaxis) # Update the appended trace with the yaxis
fig["layout"]["yaxis1"].update(range = [0, 3], anchor = "x1", side = "left")
fig["layout"]["yaxis2"].update(range = [0, 3], anchor = "x2", side = "left")
fig["layout"]["yaxis3"].update(range = [0, 30], anchor = "x2", side = "right", overlaying = "y2")
div = py.offline.plot(fig, include_plotlyjs=True, output_type='div')
fig['data']
fig['layout']
#==============================================================================
# Done.
#==============================================================================
#
# def sanity_check(self, data_dict, nbinsx = 35):
# # General settings:
# xlim = float( data_dict['observ_period'] * 1.15 )
# opct = 0.7
# h_norm = ''
#
# fig = tools.make_subplots(
# rows=1, cols=1,
# subplot_titles=('Failure-time distribution'))
#
# # Data layers:
# fig.append_trace(go.Histogram(
# x = data_dict['t'][data_dict['e'] == 1],
# name = 'Uncenored Observations',
# histnorm = h_norm,
# autobinx = False,
# xbins = dict(
# start = 0.0,
# end = xlim,
# size = xlim/nbinsx ),
# opacity = opct
# ),1,1)
# fig.append_trace(go.Histogram(
# x = data_dict['t'][data_dict['e'] == 0],
# name = 'Censored Observations',
# autobinx = False,
# histnorm = h_norm,
# xbins = dict(
# start = 0.0,
# end = xlim,
# size = xlim/nbinsx ),
# opacity = opct
# ),1,1)
# fig.append_trace(go.Histogram(
# x = data_dict['f'],
# name = 'Failure Times',
# autobinx = False,
# histnorm = h_norm,
# xbins = dict(
# start = 0.0,
# end = xlim,
# size = xlim/nbinsx ),
# opacity = opct
# ),1,1)
#
# # Layout settings:
## layout = go.Layout(
## title = 'Failure-time distribution',
## xaxis = dict(title='Follow-up duration')
## )
#
#
#
# #fig.append_trace(trace_list, 1, 1)
# #fig.append_trace(trace_list, 1, 2)
#
# fig['layout'].update(
# title = 'Simulated Data Statistics',
# legend = dict(orientation="h"),
# height = 400, width=785)
#
# #fig = go.Figure( data=trace_list, layout=layout )
# py.iplot(fig, filename='failure_time_distributions')
fname = os.path.join(self.builder.wrk_dir, 'loss.html')
# Colors:
cl1 = '#348ABD'; cl2 = '#8EBA42'; cl3='#BA4252';
pmode = 'markers'
XS = self.history['epoch']
train_loss = go.Scatter( name='Train loss',
x = XS, y = self.history['train_loss'], yaxis='y1'
line = dict(color = cl1), mode = pmode )
valid_loss = go.Scatter(name='Valid loss', yaxis='y2',
x = XS, y = self.history['valid_loss'],
line = dict(color = cl2), mode = pmode )
test_loss = go.Scatter(name='Test loss', yaxis='y3',
x = XS, y = self.history['test_loss'],
line = dict(color = cl3), mode = pmode )
fake_trace = go.Scatter(name='Test loss', yaxis='y4',
x = XS, y = self.history['test_loss'],
line = dict(color = cl3), mode = pmode )
loss_traces = [train_loss, valid_loss, test_loss]
fig = tools.make_subplots(rows=2, cols=1,
shared_xaxes=True, shared_yaxes=False )
layout = go.Layout(
title = 'Training dynamics',
yaxis = dict( title='Train loss',
titlefont = dict(color=cl1),
tickfont = dict(color=cl1)),
yaxis2 = dict( title='Valid loss',
titlefont = dict(color=cl2),
tickfont = dict(color=cl2),
overlaying='y', side='right', position=0.85, anchor ='free' ),
yaxis3 = dict( title='Test loss',
titlefont = dict(color=cl3),
tickfont = dict(color=cl3),
overlaying='y', side='right', position=0.95, anchor ='free' )
)
#fig = go.Figure(data=data, layout=layout)
fig.append_trace(train_loss, 2, 1)
fig.append_trace(valid_loss, 2, 1)
fig.append_trace(test_loss, 1, 1)
fig['layout'].update(layout)
py.plot(fig, filename=fname, auto_open=False)
| [
"never_mind.spb@mail.ru"
] | never_mind.spb@mail.ru |
091268e4190eaf882973e5648b40bb523fa2eba2 | 6e6652245e1496649435fb72b2f1344fb840ce99 | /guessing game.py | 2496ba4b3827af590b0a02bf26bdb641e97e94da | [] | no_license | tresa2002/Guessing_Game | e0ea1e98ede0be13e42c771a9831f2f3b1b3f7c5 | af95af074b7d9800c95662beeb3eac4d30e5911f | refs/heads/master | 2022-12-15T08:31:06.318034 | 2020-09-19T15:10:30 | 2020-09-19T15:10:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 843 | py | #Guessing Game..........
#Generates random number between 1 to 10
#import
import random
# Generates a random number
number = random.randint(1,10)
print("Welcome to the game!!!")
player_name = input("Hello,What's your name?").strip()
print("Hello {}!".format(player_name),"I am guessing a number between 1 and 10")
print(" Try to guess.... ")
#resticting number of guesses to 3
for i in range(0,3):
num_guess = int(input())
i+=1
if num_guess<number:
print("Your guess is too low..........")
elif num_guess>number:
print("Your guess is too high..........")
elif num_guess == number:
break
if num_guess==number:
print("Correct Guess,You guessed in {} number of tries.....".format(i))
else:
print("You lose the game")
print("Wrong guess,The correct number was {}".format(number)) | [
"noreply@github.com"
] | tresa2002.noreply@github.com |
d93049fd78b099b0e02c13959a40ae80864347f1 | 0d44e486e4a05a3c11d4ed2365e957dfc063869e | /road_following2/.ipynb_checkpoints/timednn-checkpoint.py | b74c169a1f5cb6ba781e345077054ec8b161d84d | [] | no_license | reinisnud/jetson | 795e15c9e27d272fd6aca1a1270bdb6ce41cb745 | 3bb321025613ae43b747adb28a20b0cd533dacbb | refs/heads/master | 2023-01-19T13:10:17.463090 | 2020-11-26T07:30:38 | 2020-11-26T07:30:38 | 267,051,033 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,902 | py | import torchvision
import torch
import torch.nn as nn
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# Declare all the layers for feature extraction
self.features = nn.Sequential(
nn.Conv2d(in_channels=3,
out_channels=64,
kernel_size=3,
stride=1,
padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(2, 2),
nn.Conv2d(in_channels=64,
out_channels=128,
kernel_size=3,
stride=1,
padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(2, 2),
nn.Conv2d(in_channels=128,
out_channels=256,
kernel_size=3,
stride=1,
padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(2, 2),
nn.Conv2d(in_channels=256,
out_channels=512,
kernel_size=3,
stride=1,
padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(2, 2),
# nn.Conv2d(in_channels=512,
# out_channels=512,
# kernel_size=3,
# stride=1,
# padding=1),
# nn.ReLU(inplace=True),
nn.MaxPool2d(2, 2))
# Declare all the layers for classification
self.classifier = nn.Sequential(
nn.Linear(4608, 2))
def forward(self, x):
# Apply the feature extractor in the input
x = self.features(x)
# Squeeze the three spatial dimensions in one
x = x.view(-1, 4608)
# Classify the images
x = self.classifier(x)
return x
# model = torchvision.models.resnet18(pretrained=False)
# model.fc = torch.nn.Linear(512, 2)
model = Net()
model.load_state_dict(torch.load('best_steering_model_xycustomCNN.pth'))
device = torch.device('cuda')
model = model.to(device)
print(model.eval().half())
import torchvision.transforms as transforms
import torch.nn.functional as F
import cv2
import PIL.Image
import numpy as np
mean = torch.Tensor([0.485, 0.456, 0.406]).cuda().half()
std = torch.Tensor([0.229, 0.224, 0.225]).cuda().half()
def get_x(path):
"""Gets the x value from the image filename"""
return (float(int(path[3:6])) - 50.0) / 50.0
def get_y(path):
"""Gets the y value from the image filename"""
return (float(int(path[7:10])) - 50.0) / 50.0
def preprocess(image):
image = PIL.Image.open(image)
image = transforms.functional.to_tensor(image).to(device).half()
image.sub_(mean[:, None, None]).div_(std[:, None, None])
# print("success pre")
return image[None, ...]
def runNetwork(image):
global angle
xy = model(preprocess(image)).detach().float().cpu().numpy().flatten()
x = xy[0]
y = (0.5 - xy[1]) / 2.0
angle = np.arctan2(x, y)
print(x, " ", xy[1])
import time
import os
runNetwork(os.path.join('images', 'xy_000_059_d9cd0272-993b-11ea-8fac-16f63a1aa8c9.jpg'))
total_time = 0
# averagetime = 0
count =0
for filename in os.listdir('images'):
name = os.path.join('images', filename)
# strname = "images\\" + filename
if os.path.isfile(name):
start_time = time.time()
runNetwork(name)
# print(name)
print(get_x(filename), get_y(filename))
count +=1
timeOneIteration = time.time() - start_time
# print(timeOneIteration, "Seconds")
total_time += timeOneIteration
else:
print("No file found")
# print(total_time)
print(total_time/count) | [
"reinisnudiens@gmail.com"
] | reinisnudiens@gmail.com |
2d1ff66d90a2adb3e0779f18b5a50d2212b45545 | 13f5984be7be77852e4de29ab98d5494a7fc6767 | /LeetCode/binary_serach_tree.py | ac894fb9c1a5994df4054cf4407beae85859a72b | [] | no_license | YuanXianguo/Python-Interview-Master | 4252514763fc3f563d9b94e751aa873de1719f91 | 2f73786e8c51dbd248341559de171e18f67f9bf2 | refs/heads/master | 2020-11-26T18:14:50.190812 | 2019-12-20T02:18:03 | 2019-12-20T02:18:03 | 229,169,825 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,513 | py | from tree import Tree
class Node(object):
"""结点"""
def __init__(self, val=None):
self.val = val
self.left = None
self.right = None
class BinarySearchTree(Tree):
"""二叉搜索树"""
def __init__(self, node=None):
super().__init__(node)
def insert(self, val):
"""二叉搜索树插入"""
node = Node(val)
if not self.root:
self.root = node
else:
if val < self.root.val:
self.root.left = BinarySearchTree(
self.root.left).insert(val)
elif val > self.root.val:
self.root.right = BinarySearchTree(
self.root.right).insert(val)
return self.root
def find(self, val):
"""递归查找值"""
if not self.root:
return "查找失败"
if val < self.root.val:
return BinarySearchTree(self.root.left).find(val)
elif val > self.root.val:
return BinarySearchTree(self.root.right).find(val)
else: # 找到了
return self.root
def find2(self, val):
"""非递归查找"""
root = self.root
while root:
if val < root.val:
root = root.left
elif val > root.val:
root = root.right
else:
return root
return "查找失败"
def find_min(self):
"""递归查找最小值,一定是在树的最左分支的端结点上"""
if not self.root:
return "查找失败"
if not self.root.left:
return self.root # 最小值没有左子树
else:
return BinarySearchTree(self.root.left).find_min()
def find_max(self):
"""迭代查找最大值,一定是在树的最右分支的端结点上"""
root = self.root
if not root:
return "查找失败"
while root.right:
root = root.right
return root
def delete(self, val):
"""每次递归删除都把删除后的子树返回"""
if not self.root:
return "删除失败"
elif val < self.root.val:
self.root.left = BinarySearchTree(
self.root.left).delete(val)
elif val > self.root.val:
self.root.right = BinarySearchTree(
self.root.right).delete(val)
else: # 该结点为要删除结点
# 如果左右子树都不为空
if self.root.left and self.root.right:
# 找到右子树最小值或左子树最大值
right_min = BinarySearchTree(self.root.right).find_min()
# 将找到的右子树最小值填充要删除的根结点
self.root.val = right_min.val
# 删除右子树最小值
self.root.right = BinarySearchTree(
self.root.right).delete(right_min)
else: # 被删除结点有一个或无子树
if not self.root.left:
self.root = self.root.right
elif not self.root.right:
self.root = self.root.left
return self.root
if __name__ == '__main__':
bt = BinarySearchTree()
for i in range(10):
bt.insert(i)
print(bt.find_min().val)
print(bt.find_max().val)
print(bt.find(10))
bt.postorder()
print("")
bt.delete(9)
print(bt.find_max().val)
bt.inorder()
| [
"736913978@qq.com"
] | 736913978@qq.com |
11e42a2253ad2931f70a97042fc8fd387c6443e2 | 57b6b52a84af17482c9949cf72c7f5adf55234f2 | /projecttwo/manage.py | 72e07e41f84a947a216341b141a4f4db3f689eaa | [] | no_license | dianamoraa47/Django-Framework | 987a9b4d1fa226c6f7f45d550a81ee50e78714f9 | e0970bd65e057188da464c29540777477228ddc6 | refs/heads/master | 2020-06-12T09:43:56.282404 | 2019-06-28T11:28:00 | 2019-06-28T11:28:00 | 194,261,457 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 630 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'projecttwo.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"dianamoraa47@gmail.com"
] | dianamoraa47@gmail.com |
8fd2bb63b0671a63754e55e0430e7618751c5806 | 6af9ac61c688291077db6e9d64a3dda1bf1e9532 | /disappearing_ninjas/disappearing_ninjas/settings.py | 647b954e013e30fb4e6a7e6587812dc4e9d2ef90 | [] | no_license | CodingDojoSeattlePythonMay2017/brad_sherman | febba090f00a3224957e95d0bbea7cbc6e4e27ab | 8b1418ab972220b52aec14ecfa4f6eba4df2d095 | refs/heads/master | 2021-01-20T12:50:05.200720 | 2017-05-22T20:42:15 | 2017-05-22T20:42:15 | 90,415,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,276 | py | """
Django settings for disappearing_ninjas project.
Generated by 'django-admin startproject' using Django 1.11.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '@mx_%)yaj)3c+sn6f2!rfegm&2ertxr18z^m-_g0!6c%i@g+6p'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'apps.first_app',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'disappearing_ninjas.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'disappearing_ninjas.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
| [
"noreply@github.com"
] | CodingDojoSeattlePythonMay2017.noreply@github.com |
f4439d92392a0d72267afab20182090f9366d62e | 44839c64ef4219964f7a4cedebccae9844e431df | /mdm_inventory/users/graphql/mutations/users.py | c27160e621e24a197a83d82c89482abb3335ea09 | [
"MIT"
] | permissive | TeamWalls/mdm-backend-django | 4b790fbaf598280db267fc16a6f5d31d125b65c8 | 4e23f9abc8531eb786d5e6cf958c9ffa8acd6b1d | refs/heads/main | 2023-02-21T06:31:59.609519 | 2021-01-17T17:10:14 | 2021-01-17T17:10:14 | 329,751,659 | 0 | 0 | MIT | 2021-01-17T17:10:16 | 2021-01-14T22:40:25 | Python | UTF-8 | Python | false | false | 4,832 | py | # django
from django.conf import settings
from django.utils.translation import gettext_lazy as _
from django.core.exceptions import ObjectDoesNotExist, ValidationError
# graphene
import graphene
import graphql_jwt
# Auth
from graphql_jwt.decorators import login_required
#models
from mdm_inventory.users.models import User
# types
from mdm_inventory.users.graphql.types import UserType
#serializers
from mdm_inventory.users.serializers import (
UserCreateSerializer,
UserUpdateSerializer,
UserCreateSerializer
)
# utils
from mdm_inventory.utils.graphql.generic_mutation import GenericMutationSerializer
from mdm_inventory.utils.graphql.exceptions import ResponseError
class InputUserData(graphene.InputObjectType):
first_name = graphene.String(description="Primer Nombre")
last_name = graphene.String(description="Apellido")
email = graphene.String(description="correo electronico")
username = graphene.String(description="Nombre de usuario")
profile_picture = graphene.String(description="Foto de perfil")
is_manager = graphene.Boolean(description="Gerente")
is_supervisor = graphene.Boolean(description="SuperVisor")
is_cashier = graphene.Boolean(description="Cajero")
password = graphene.String(description="password")
password_confirmation = graphene.String(description="password confirmation")
class CreateUser(GenericMutationSerializer):
class Arguments:
input = InputUserData(description="Input User Data")
user = graphene.Field(UserType)
class Meta:
model = User
description = 'CreateUser in data base'
serializer_class = UserCreateSerializer
@classmethod
def mutate(cls, root, info, **kwargs):
user, message, status = cls.perform_mutation(root, info, **kwargs)
message = _("Usuario Creado")
return cls(user=user, message=str(message), status=status)
class UpdateUser(GenericMutationSerializer):
class Arguments:
id = graphene.ID(description="Id User")
input = InputUserData(description="Input User Data")
user = graphene.Field(UserType)
class Meta:
model = User
description = 'CreateUser in data base'
serializer_class = UserUpdateSerializer
update = True
@classmethod
def mutate(cls, root, info, **kwargs):
user, message, status = cls.perform_mutation(root, info, **kwargs)
message = _("Usuario Actualizado ")
return cls(user=user, message=str(message), status=status)
class InputDisableData(graphene.InputObjectType):
id = graphene.Int(description="Id User ref pk")
class DeleterUser(GenericMutationSerializer):
class Arguments:
input = InputDisableData(description="Input User Data")
class Meta:
model = User
description = 'Deleter in data base'
delete = True
@classmethod
def mutate(cls, root, info, **kwargs):
message, status = cls.perform_mutation(root, info, **kwargs)
message = _("Usuario Eliminado")
return cls(message=str(message), status=status)
#Login Person
class CustomObtainJSONWebToken(graphql_jwt.JSONWebTokenMutation):
user = graphene.Field(UserType)
@classmethod
def resolve(cls, root, info, **kwargs):
user_login = info.context.user
return cls(user=user_login)
class UserLoginInput(graphene.InputObjectType):
email = graphene.String(description="Email user")
password = graphene.String(description="password")
class Login(graphene.Mutation):
"""
Mutacion for verification , password recovery and user verification
"""
class Arguments:
input = UserLoginInput(description=_("Input user data"))
user = graphene.Field(UserType)
token = graphene.String()
exp_token = graphene.Int()
@classmethod
def mutate(cls, root, info, **kwargs):
email = kwargs["input"]["email"].lower()
kwargs["input"]["email"] = email
password = kwargs["input"]["password"]
try :
#?send user data to be purchased
data = kwargs.pop("input")
data_mutation = CustomObtainJSONWebToken.mutate(root,info,**data)
user = data_mutation.user
token = data_mutation.token
exp = data_mutation.payload["exp"]
if user.is_verified is True :
pass
else:
msg = _("Debes verificar tu usuario antes de empezar a usar mdm_inventory")
raise ResponseError(message=str(msg), code="400", params={"is_verified": "False"})
except ObjectDoesNotExist :
msg = _("Datos invalidos")
raise ResponseError(message=str(msg), code="400")
return cls(user=user , token=token , exp_token=exp)
from graphql_jwt.shortcuts import get_token
| [
"lm5708144@gmail.com"
] | lm5708144@gmail.com |
15e500c9c358be08430d57e616e598ea959391ab | c9b592f78623e5f661e024b5a7895ffce865988a | /chapter5/ex2.py | 769917623052639836b15973ac45453a765103d3 | [] | no_license | jabedude/python-class | 85c48a116ff7b5158ff49e6b18e80063e76179eb | 608fc04810ceff431daebbf415ef25c2b22878d6 | refs/heads/master | 2021-01-20T02:07:58.720531 | 2017-05-06T12:15:49 | 2017-05-06T12:15:49 | 89,377,877 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 274 | py | #!/usr/bin/env python3
def greater_than(*args, num):
''' returns number of elements greater than num '''
count = 0
for number in args:
if number > num:
count += 1
return count
res = greater_than(5, -10, 10, -20, 30, num=0)
print(res)
| [
"sinisterpatrician@gmail.com"
] | sinisterpatrician@gmail.com |
de5d0e7e97e723d3ceb465bbbe581bf634a4f411 | 816c5da70ba67217c198d6bd05abc443e9f47511 | /template.py | f1d7da23c63fa28b308b2109bd59b9f992751f7f | [
"Apache-2.0"
] | permissive | elp2/advent_of_code_2020 | 80dd3723a22671503a661062cd3f1244246470d1 | 71e12e25769aa7d5154213077ffae595ad9a4019 | refs/heads/main | 2023-02-09T01:09:37.952831 | 2020-12-25T05:17:32 | 2020-12-25T05:17:32 | 315,207,722 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,109 | py | from collections import defaultdict, deque
import re
CHALLENGE_DAY = "22"
REAL = open(CHALLENGE_DAY + ".txt").read()
SAMPLE = open(CHALLENGE_DAY + ".sample.txt").read()
# SAMPLE_EXPECTED = None
# SAMPLE_EXPECTED =
def parse_lines(raw):
# Groups.
# groups = raw.split("\n\n")
# return list(map(lambda group: group.split("\n"), groups))
# lines = raw.split("\n")
# return lines # raw
# return list(map(lambda l: l.split(" "), lines)) # words.
# return list(map(int, lines))
# return list(map(lambda l: l.strip(), lines)) # beware leading / trailing WS
def solve(raw):
parsed = parse_lines(raw)
# Debug here to make sure parsing is good.
ret = 0
return ret
sample = solve(SAMPLE)
if sample != SAMPLE_EXPECTED:
print("SAMPLE FAILED: ", sample, " != ", SAMPLE_EXPECTED)
assert sample == SAMPLE_EXPECTED
print("\n*** SAMPLE PASSED ***\n")
solved = solve(REAL)
print("SOLUTION: ", solved)
import pandas as pd
df=pd.DataFrame([str(solved)])
df.to_clipboard(index=False,header=False)
print("COPIED TO CLIPBOARD")
| [
"891364+elp2@users.noreply.github.com"
] | 891364+elp2@users.noreply.github.com |
05f2f300257d5ca6375765b26379c1ae5bcd4984 | 3ec9d3aa7e59475683dba30a87ca68242a7ec181 | /cn/edgedetection/03Sample.py | 843592f93fc567466fb142220d9454d1c28724ac | [
"Apache-2.0"
] | permissive | Jasonandy/Python-X | 58bf36499572cdfb7d7bf80c6a3cd0c818f62c1e | 2f02b9a17bd5495dd1f8746b191f11ec2d7bccbe | refs/heads/master | 2021-06-16T17:07:29.277404 | 2021-03-07T14:17:05 | 2021-03-07T14:17:05 | 175,353,402 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,419 | py | """
边缘检测示列
https://blog.csdn.net/HuangZhang_123/article/details/80511270
"""
import cv2
import numpy as np
def show_image(image_path):
"""
show_image 展示
:param image_path:
:return:
"""
img = cv2.pyrDown(cv2.imread(image_path, cv2.IMREAD_UNCHANGED))
# threshold 函数对图像进行二化值处理,由于处理后图像对原图像有所变化,因此img.copy()生成新的图像,cv2.THRESH_BINARY是二化值
ret, thresh = cv2.threshold(cv2.cvtColor(img.copy(), cv2.COLOR_BGR2GRAY), 127, 255, cv2.THRESH_BINARY)
# findContours函数查找图像里的图形轮廓
# 函数参数thresh是图像对象
# 层次类型,参数cv2.RETR_EXTERNAL是获取最外层轮廓,cv2.RETR_TREE是获取轮廓的整体结构
# 轮廓逼近方法
# 输出的返回值,image是原图像、contours是图像的轮廓、hier是层次类型
contours, hier = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
for c in contours:
# 轮廓绘制方法一
# boundingRect函数计算边框值,x,y是坐标值,w,h是矩形的宽和高
x, y, w, h = cv2.boundingRect(c)
# 在img图像画出矩形,(x, y), (x + w, y + h)是矩形坐标,(0, 255, 0)设置通道颜色,2是设置线条粗度
cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)
# 轮廓绘制方法二
# 查找最小区域
rect = cv2.minAreaRect(c)
# 计算最小面积矩形的坐标
box = cv2.boxPoints(rect)
# 将坐标规范化为整数
box = np.int0(box)
# 绘制矩形
cv2.drawContours(img, [box], 0, (0, 0, 255), 3)
# 轮廓绘制方法三
# 圆心坐标和半径的计算
(x, y), radius = cv2.minEnclosingCircle(c)
# 规范化为整数
center = (int(x), int(y))
radius = int(radius)
# 勾画圆形区域
img = cv2.circle(img, center, radius, (0, 255, 0), 2)
# # 轮廓绘制方法四
# 围绕图形勾画蓝色线条
cv2.drawContours(img, contours, -1, (255, 0, 0), 2)
# 显示图像
cv2.imshow("contours", img)
cv2.waitKey()
cv2.destroyAllWindows()
def run():
# image_path = "media/13.jpg"
# image_path = "media/lena/lena.jpg"
image_path = "media/sample/sample.png"
show_image(image_path)
if __name__ == '__main__':
run()
| [
"jasonandy@hotmail.com"
] | jasonandy@hotmail.com |
c9c1b551a250613ffb85b581a47121c5e9319dc2 | 855c7f84876f66fe48fa3cfebcb2d96cb809ccce | /manage.py | 642ca99f5fc4b37a16fb35199b75f179159c8063 | [] | no_license | chifeng111/Appointment | 1a9fd11755a03112fa995ef4736761d9ee4c4b93 | 233c65a51fe4b346ddd2e4dc22adb7d233bd0faf | refs/heads/master | 2020-06-20T00:08:56.139597 | 2016-12-06T15:30:44 | 2016-12-06T15:30:44 | 74,894,926 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 809 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "Appointment.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| [
"liaozhenhua1129@gmail.com"
] | liaozhenhua1129@gmail.com |
24db83579abf43746157b9ca44973bed9739e2cf | 907a4ba813ec1a536cfe8e9e0390c36df168af5b | /Wigle Mining/Parsers/basic.py | efeab38f4401065420d042433376997513a8ddb4 | [] | no_license | deyanlazarov/SkyLift | 7ec9cd372c2532110a7b4cb69b951b12ad263d7a | ed4c3733077fee66b442639103894bbb533b3fc3 | refs/heads/master | 2020-12-25T10:34:47.442272 | 2014-11-12T19:48:27 | 2014-11-12T19:48:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,410 | py | import sys
import csv
#time, ?, ?, ?, ?, type, mac, name
#1410992732,Radio,181,2462,1152,Beacn,00:12:0e:85:70:58,Jane's Wireless
names = set()
devices = {}
routers = {}
with open(sys.argv[1], 'rb') as f:
reader = csv.reader(f, delimiter=',')
for row in reader:
if len(row) > 6:
time = row[0]
typ = row[5]
mac = row[6]
name = row[7]
if typ == 'Probe' and name != 'BROADCAST':
names.add(name)
if mac in devices:
# print devices[mac]
devices[mac].add(name)
if name in devices[mac] :
# print name + " is duplicate"
pass
else:
print name
else:
devices[mac] = set()
devices[mac].add(name)
for name in names:
print name
names_count = {}
for mac in devices.keys():
for name in devices[mac]:
if name in names_count:
names_count[name] += 1
else:
names_count[name] = 1
#prints mac address
# output = mac + ': ' + ', '.join(devices[mac])
# print output
#print names_count
## prints the unique names
#for w in sorted(names_count, key=names_count.get, reverse=True):
# print w + ': ' + str(names_count[w])
| [
"surya@suryamattu.com"
] | surya@suryamattu.com |
e95d8d628f0c6e23acbe5b3c4f57a605cb3ba946 | 27944731195a6c5e4ad1017d0bc12215cfa92ce4 | /manage.py | 676c3d692862013d1c6abd33527dd427a316a9e8 | [] | no_license | sreejithinapp/validify | caa981475100847ef49b8ca45788212d5f094cac | ea058268261fecd4290f52da3326288965e13388 | refs/heads/master | 2021-05-07T01:08:49.786200 | 2017-11-02T06:26:18 | 2017-11-02T06:26:18 | 110,222,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 806 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "Validify.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| [
"rajesh.bk@inapp.com"
] | rajesh.bk@inapp.com |
13e9b3fafe2f0f5e0947fec71bd1d9c4f1fd6730 | 2a171178942a19afe9891c2425dce208ae04348b | /kubernetes/client/models/v1_job_list.py | 4c27f86f781c5f929aaeab8ca1386fdec70302fc | [
"Apache-2.0"
] | permissive | ouccema/client-python | ac3f1dee1c5ad8d82f15aeecb87a2f5f219ca4f4 | d7f33ec53e302e66674df581904a3c5b1fcf3945 | refs/heads/master | 2021-01-12T03:17:54.274888 | 2017-01-03T22:13:14 | 2017-01-03T22:13:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,395 | py | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.5.0-snapshot
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
import re
class V1JobList(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, items=None, metadata=None):
"""
V1JobList - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'items': 'list[V1Job]',
'metadata': 'UnversionedListMeta'
}
self.attribute_map = {
'items': 'items',
'metadata': 'metadata'
}
self._items = items
self._metadata = metadata
@property
def items(self):
"""
Gets the items of this V1JobList.
Items is the list of Job.
:return: The items of this V1JobList.
:rtype: list[V1Job]
"""
return self._items
@items.setter
def items(self, items):
"""
Sets the items of this V1JobList.
Items is the list of Job.
:param items: The items of this V1JobList.
:type: list[V1Job]
"""
if items is None:
raise ValueError("Invalid value for `items`, must not be `None`")
self._items = items
@property
def metadata(self):
"""
Gets the metadata of this V1JobList.
Standard list metadata More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
:return: The metadata of this V1JobList.
:rtype: UnversionedListMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""
Sets the metadata of this V1JobList.
Standard list metadata More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
:param metadata: The metadata of this V1JobList.
:type: UnversionedListMeta
"""
self._metadata = metadata
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"mehdy@google.com"
] | mehdy@google.com |
51e410a7583b82d254106376c125b43aa5f99007 | ed7e61c8eef7fb2213adeb67557d605470c17fb3 | /ML/confusion-matrix/split_two.py | b3bc65d93e39225a414ead9d46ec4d8d6b6fd697 | [] | no_license | MartinThoma/algorithms | 535840224323822f2ea6b7dd6f82a0fdd22a0ff9 | a251e9599b685dbf89c891f02d20fefd8538ead5 | refs/heads/master | 2023-02-23T17:58:10.913634 | 2023-02-21T05:58:59 | 2023-02-21T05:58:59 | 4,939,076 | 241 | 126 | null | 2023-02-16T05:16:23 | 2012-07-07T16:07:23 | Python | UTF-8 | Python | false | false | 6,693 | py | #!/usr/bin/env python
"""Split the classes into two equal-sized groups to maximize accuracy."""
import json
import os
import random
import numpy as np
random.seed(0)
import logging
import sys
from visualize import apply_permutation, plot_cm, read_symbols, swap, swap_1d
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',
level=logging.DEBUG,
stream=sys.stdout)
def calculate_split_accuracy(cm):
"""
Calculate the accuracy of the adjusted classifier.
The adjusted classifier is built by joining the first n/2 classes into one
group and the rest into another group.
"""
n = len(cm)
first = int(n / 2)
cm_small = np.zeros((2, 2))
for i in range(n):
class_i = int(i < first)
for j in range(n):
class_j = int(j < first)
cm_small[class_i][class_j] += cm[i][j]
return (float(cm_small[0][0] + cm_small[1][1]) / cm_small.sum())
def calculate_split_error(cm):
"""Calculate the error of 2 group split."""
return 1.0 - calculate_split_accuracy(cm)
def simulated_annealing(current_cm,
current_perm=None,
score=calculate_split_error,
steps=2 * 10**5,
temp=100.0,
cooling_factor=0.99,
deterministic=False):
"""
Optimize current_cm by randomly swapping elements.
Parameters
----------
current_cm : numpy array
current_perm : None or iterable, optional (default: None)
steps : int, optional (default: 2 * 10**4)
temp : float > 0.0, optional (default: 100.0)
Temperature
cooling_factor: float in (0, 1), optional (default: 0.99)
"""
assert temp > 0
assert cooling_factor > 0
assert cooling_factor < 1
n = len(current_cm)
if current_perm is None:
current_perm = list(range(n))
current_perm = np.array(current_perm)
# Debugging code
perm_exp = np.zeros((n, n), dtype=np.int)
for i in range(n):
for j in range(n):
perm_exp[i][j] = j
current_cm = apply_permutation(current_cm, current_perm)
perm_exp_current = apply_permutation(perm_exp, current_perm)
logging.debug(perm_exp_current[0])
print("apply permutation %s" % str(current_perm))
current_score = score(current_cm)
best_perm = current_perm
best_cm = current_cm
best_score = current_score
print("## Starting Score: {:0.2f}%".format(current_score * 100))
for step in range(steps):
tmp = np.array(current_cm, copy=True)
split_part = int(n / 2) - 1
i = random.randint(0, split_part)
j = random.randint(split_part + 1, n - 1)
perm = swap_1d(current_perm.copy(), i, j)
tmp = swap(tmp, i, j)
# tmp = apply_permutation(tmp, perm)
tmp_score = score(tmp)
if deterministic:
chance = 1.0
else:
chance = random.random()
temp *= 0.99
hot_prob = min(1, np.exp(-(tmp_score - current_score) / temp))
if chance <= hot_prob:
if best_score > tmp_score: # Minimize the score
best_perm = perm
best_cm = tmp
best_score = tmp_score
current_score = tmp_score
perm_exp_current = swap(perm_exp_current, i, j)
print(list(perm_exp_current[0]))
current_cm = tmp
logging.info(("Current: %0.2f%% (best: %0.2f%%, hot_prob=%0.2f%%, "
"step=%i)"),
(current_score * 100),
(best_score * 100),
(hot_prob * 100),
step)
return {'cm': best_cm, 'perm': list(perm_exp_current[0])}
def main(cm_file, perm_file, steps, labels_file):
"""Orchestrate."""
# Load confusion matrix
with open(cm_file) as f:
cm = json.load(f)
cm = np.array(cm)
# Load permutation
if os.path.isfile(perm_file):
print("loaded %s" % perm_file)
with open(perm_file) as data_file:
perm = json.load(data_file)
else:
perm = random.shuffle(list(range(len(cm))))
print("Score without perm: {:0.2f}%".format(calculate_split_error(cm) * 100))
result = simulated_annealing(cm, perm,
score=calculate_split_error,
deterministic=True,
steps=steps)
# First recursive step
# split_i = int(len(cm) / 2)
# cm = result['cm'][:split_i, :split_i]
# perm = list(range(split_i))
# result = simulated_annealing(cm, perm,
# score=calculate_split_error,
# deterministic=True,
# steps=steps)
print("Score: {}".format(calculate_split_error(result['cm'])))
print("Perm: {}".format(list(result['perm'])))
# Load labels
if os.path.isfile(labels_file):
with open(labels_file) as f:
symbols = json.load(f)
else:
symbols = read_symbols()
print("Symbols: {}".format([symbols[i] for i in result['perm']]))
plot_cm(result['cm'], zero_diagonal=True)
def get_parser():
"""Get parser object for script xy.py."""
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser
parser = ArgumentParser(description=__doc__,
formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument("--cm",
dest="cm_file",
help=("path of a json file with a confusion matrix"),
metavar="cm.json",
default='confusion-matrix.json')
parser.add_argument("--perm",
dest="perm_file",
help=("path of a json file with a permutation to "
"start with"),
metavar="perm.json",
default="")
parser.add_argument("--labels",
dest="labels_file",
help=("path of a json file with a list of label "
"names"),
metavar="labels.json",
default="")
parser.add_argument("-n",
dest="n",
default=4 * 10**5,
type=int,
help="number of steps to iterate")
return parser
if __name__ == "__main__":
args = get_parser().parse_args()
main(args.cm_file, args.perm_file, args.n, args.labels_file)
| [
"info@martin-thoma.de"
] | info@martin-thoma.de |
4b3f9feda60324e78a9be7aad56593b2ab7b748a | 00c58fe39d71f329230a133e6e4ec9f50023ba3d | /app/identidock.py | 2877bc0d2ee1a55f68ddb837934db4ede669af2d | [] | no_license | MtBlue81/identidock | 229f38f00500d49dc589548478cffab72626e7ad | 5848a8eb899716f46dc0f996df64c819806cc726 | refs/heads/master | 2021-07-20T19:34:55.886689 | 2017-10-30T04:02:26 | 2017-10-30T04:02:26 | 108,799,979 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,345 | py | from flask import Flask, Response, request
import requests
import hashlib
import redis
import html
app = Flask(__name__)
cache = redis.StrictRedis(host='redis', port=6379, db=0)
salt = "UNIQUE_SALT"
default_name = 'Joe Bloggs'
@app.route('/', methods=['GET', 'POST'])
def main_page():
name = default_name
if request.method == 'POST':
name = html.escape(request.form['name'], quote=True)
salted_name = salt + name
name_hash = hashlib.sha256(salted_name.encode()).hexdigest()
header = '<html><head><title>Identidock</title></head><body>'
body = '''<form method="POST">
Hello <input type="text" name="name" value="{0}">
<input type="submit" value="submit">
</form>
<p>You look like a:
<img src="/monster/{1}"/>
'''.format(name, name_hash)
footer = '</body></html>'
return header + body + footer
@app.route('/monster/<name>')
def get_identicon(name):
name = html.escape(name, quote=True)
image = cache.get(name)
if image is None:
print("Cache miss", flush=True)
r = requests.get('http://dnmonster:8080/monster/' + name + '?size=80')
image = r.content
cache.set(name, image)
return Response(image, mimetype='image/png')
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0')
| [
"aoyama@sansan.com"
] | aoyama@sansan.com |
ff387e92c9c6a86f4263cbb335c2ceb25f5dd756 | e9586485d778efc1938efa8f4c3d427288e4ed46 | /blog/urls.py | d1911a18ba62066ab872db548daa9433921e0643 | [] | no_license | nantaletracy/my-first-blog | 98ea6af2691343115e6a18480900863b7a5ed290 | d3f0043bc686ff6faaa21f61429711e509fae795 | refs/heads/master | 2020-03-27T19:27:15.568016 | 2018-09-01T12:58:25 | 2018-09-01T12:58:25 | 146,985,730 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 113 | py | from django.urls import path
from . import views
urlpatterns = [
path('', views.post_list, name='post_list'), ]
| [
"nantaletracycynthia@gmail.com"
] | nantaletracycynthia@gmail.com |
4e40e93a445cfbd2aa655830d8a1cdbe6617f66a | 474fc018047eba79d5e7a544ac676a31365de9b6 | /dashboard/controllers/administrator/apis/educators_rating.py | 0d6218fb71a063c39c8e87452a392736388a3d1b | [] | no_license | moohhaammaad/GP | db9bee6e87cc523195f028a2a3e5736fdd2c64e2 | 68b6c488a9f38e0a9d2e2370c0e786b88cf005a2 | refs/heads/master | 2020-03-12T04:03:47.655135 | 2020-01-15T09:31:57 | 2020-01-15T09:31:57 | 130,437,251 | 0 | 0 | null | 2018-04-21T03:42:41 | 2018-04-21T03:42:41 | null | UTF-8 | Python | false | false | 718 | py | from django.http import JsonResponse
from dashboard.controllers.administrator.base import AdministratorBaseView
from dashboard.logic import *
class AdministratorEducatorsRatingApi(AdministratorBaseView):
def get(self, request, user_id):
# Getting the department
department_id = request.GET.get('department_id')
# Getting the year
year_id = request.GET.get('year_id')
# Educators Rating
educators_rating = administrator.get_educators_rating(department_id=department_id,
year_id=year_id)
result = {
'result': list(educators_rating),
}
return JsonResponse(result)
| [
"midomedo33@gmail.com"
] | midomedo33@gmail.com |
5ede66c9648bc1b9d0917421a96337994e9eb7b9 | fabda2fec11a6212d706a5c2b8f6855225273a06 | /FeathersFightApp/migrations/0014_auto_20210916_1309.py | 94fc47944dbd77bd946ff71fdc9ffacbf2cb6adc | [] | no_license | Vitor-W-Espindola/Feather-s-Fight-with-Django | d4bb91dfda5b66404b1f26e64e7400d2f4229988 | d8b6e5ce56a8e6d6c1b05b1827db5c776e2da61d | refs/heads/main | 2023-08-14T06:35:28.813442 | 2021-09-22T14:32:47 | 2021-09-22T14:32:47 | 405,784,666 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 919 | py | # Generated by Django 3.2.7 on 2021-09-16 13:09
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('FeathersFightApp', '0013_auto_20210916_0206'),
]
operations = [
migrations.AlterField(
model_name='deleterequest',
name='delete_request_date',
field=models.DateTimeField(default=datetime.datetime(2021, 9, 16, 13, 9, 42, 714590)),
),
migrations.AlterField(
model_name='editrequest',
name='edit_request_date',
field=models.DateTimeField(default=datetime.datetime(2021, 9, 16, 13, 9, 42, 714303)),
),
migrations.AlterField(
model_name='publicationrequest',
name='request_datetime',
field=models.DateTimeField(default=datetime.datetime(2021, 9, 16, 13, 9, 42, 713999)),
),
]
| [
"vitor.w@edu.pucrs.br"
] | vitor.w@edu.pucrs.br |
1fc3ffd950b3d4ac931905b47906453a274768c3 | b87960239c88eee978861992f808fbd57f52e783 | /user.py | ba2373e7eaf930361e5306058a418c6aef3d9525 | [] | no_license | warlockJay/Sentiment-Analysis-on-ChatApp-with-Database-Integration-FULL-STACK- | f1931f8e464f746b9f5355c162dcdf42e65f2109 | a6f12a7cad9d03e31ff48487922df17d0772e885 | refs/heads/master | 2022-11-22T14:51:09.773158 | 2020-07-23T10:11:14 | 2020-07-23T10:11:14 | 281,914,648 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 590 | py | from werkzeug.security import check_password_hash
class User:
def __init__(self, username, email, password):
self.username = username
self.email = email
self.password = password
@staticmethod
def is_authenticated():
return True
@staticmethod
def is_active():
return True
@staticmethod
def is_anonymous():
return False
def get_id(self):
return self.username
def check_password(self, password_input):
return check_password_hash(self.password, password_input) | [
"noreply@github.com"
] | warlockJay.noreply@github.com |
bacfdc1a5cd8a5abb14487ce99ce36f10832ae3e | e27da822638b4ede1cccbd880285374d8f45dcf2 | /test.py | 38739658c804dd2e703a7d6616e35e1409cc5c34 | [] | no_license | Jungjaewon/Single_Image_SuperResolution_via_Holistic_Attention_Network | e5b88706937009a028ed392497c5a85b5e8b82a9 | e6faca3df27f193c45efe2fcd885c73e22eca3f0 | refs/heads/main | 2023-01-29T00:53:10.264030 | 2020-12-17T01:04:52 | 2020-12-17T01:04:52 | 319,529,799 | 12 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,922 | py | import torch
import torch.nn as nn
class Channel_Spatial_Attention_Module(nn.Module):
def __init__(self):
super(Channel_Spatial_Attention_Module, self).__init__()
self.conv_3d = nn.Conv3d(1, 1, kernel_size=3, padding=1, stride=1)
self.sigmoid = nn.Sigmoid()
self.scale = nn.Parameter(torch.zeros(1))
def forward(self, x):
n, c, h, w = x.size()
x_reshape = x.reshape(n, 1, c, h, w)
x_3d = self.sigmoid(self.conv_3d(x_reshape))
x_squzzed = x_3d.reshape(n, c, h, w)
return (self.scale * x_squzzed) * x + x
class Layer_Attention_Module(nn.Module):
def __init__(self):
super(Layer_Attention_Module, self).__init__()
self.softmax = nn.Softmax(dim=2)
self.scale = nn.Parameter(torch.zeros(1))
def forward(self, feature_group):
b,n,c,h,w = feature_group.size()
feature_group_reshape = feature_group.view(b, n, c * h * w)
attention_map = torch.bmm(feature_group_reshape, feature_group_reshape.view(b, c * h * w, n))
attention_map = self.softmax(attention_map) # N * N
attention_feature = torch.bmm(attention_map, feature_group_reshape) # N * CHW
b, n, chw = attention_feature.size()
attention_feature = attention_feature.view(b,n,c,h,w)
attention_feature = self.scale * attention_feature + feature_group
b, n, c, h, w = attention_feature.size()
return attention_feature.view(b, n * c, h, w)
if __name__ == '__main__':
pass
base = list()
for _ in range(10):
base.append(torch.rand((3,5,10,10)))
#print(torch.stack(base, dim=1).size())
CSA = Channel_Spatial_Attention_Module()
LA = Layer_Attention_Module()
print(CSA(base[-1]).size())
print(LA(torch.stack(base, dim=1)).size())
feature_csa = CSA(base[-1])
feature_la = LA(torch.stack(base, dim=1))
feature_csa + feature_la | [
"woodcook486@naver.com"
] | woodcook486@naver.com |
54f126ade815193aaff99c1a75e1cf19095aebcc | 1694b9837aa7e4ef6dba893521ab7c1aad832536 | /foundry_backend/database/migrations/0015_property_acreage.py | 8a950f20b0f32109809771e560d317ddd987e6b7 | [] | no_license | willBoyd8/foundry_backend | e97451b39ae559e7fdf58b593d3f40de38a4477f | 0c20f7a0ee58904b69bc1b5c5a06755dd44b4244 | refs/heads/master | 2023-04-27T07:43:35.451945 | 2019-12-05T06:27:39 | 2019-12-05T06:27:39 | 208,946,749 | 2 | 0 | null | 2023-04-21T20:52:43 | 2019-09-17T03:01:08 | Python | UTF-8 | Python | false | false | 595 | py | # Generated by Django 2.2.5 on 2019-11-17 21:35
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('database', '0014_auto_20191116_2058'),
]
operations = [
migrations.AddField(
model_name='property',
name='acreage',
field=models.DecimalField(decimal_places=2, default=1.0, max_digits=5, validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(999.99)]),
preserve_default=False,
),
]
| [
"noreply@github.com"
] | willBoyd8.noreply@github.com |
f6c293d06b8c76b853843fdd60b3b7470685085a | c1cb8cb93e06180453ad0cbb65ef2154f67591f2 | /day17/day17p2.py | 74a8b22ed8cf7fe40c646beb77605e9df6563751 | [] | no_license | kenkitts/advent_of_code | 1dd442ef5879e03b4bcf77e8ded9fdb0e8fa32ed | bd895d36520729238a26d0cbe3c7391003ca44b6 | refs/heads/main | 2023-03-10T17:03:46.932006 | 2021-02-16T20:53:07 | 2021-02-16T20:53:07 | 323,134,930 | 1 | 1 | null | 2020-12-24T23:03:59 | 2020-12-20T18:09:26 | Python | UTF-8 | Python | false | false | 2,416 | py | from collections import defaultdict
import time
start_time = time.time()
map = defaultdict()
cube_d = {'-x':0,'x':7,'-y':0,'y':7,'-z':0,'z':0, '-w':0,'w':0}
with open('input.txt','rt') as file:
for x,line in enumerate(file):
for y,char in enumerate(line.strip("\n")):
if char == '.':
map.update({(x,y,0,0):False})
else:
map.update({(x,y,0,0):True})
def expand_cube(map,cube_d):
for x in range(cube_d.get("-x") - 1,cube_d.get("x") + 2):
for y in range(cube_d.get("-y") -1,cube_d.get("y") + 2):
for z in range(cube_d.get("-z") -1,cube_d.get("z") + 2):
for w in range(cube_d.get("-w") -1, cube_d.get("w") +2):
if (x,y,z,w) in map:
continue
else:
map[(x,y,z,w)] = False
cube_d['-x'] -= 1
cube_d['-y'] -= 1
cube_d['-z'] -= 1
cube_d['-w'] -= 1
cube_d['x'] += 1
cube_d['y'] += 1
cube_d['z'] += 1
cube_d['w'] += 1
return map
def cycle(map,iterations=6):
while iterations > 0:
expand_cube(map,cube_d)
new_map = defaultdict()
for i in map:
x,y,z,w = i[0], i[1], i[2], i[3]
active_neighbors = 0
for xx in range(x-1, x+2):
for yy in range(y-1,y+2):
for zz in range(z-1,z+2):
for ww in range(w-1,w+2):
if (xx,yy,zz,ww) == (x,y,z,w):
continue
if map.get((xx,yy,zz,ww)) is True:
active_neighbors += 1
if map.get((x,y,z,w)) is True:
if active_neighbors == 2 or active_neighbors == 3:
new_map[(x,y,z,w)] = True
else:
new_map[(x,y,z,w)] = False
if map.get((x, y, z, w)) is False:
if active_neighbors == 3:
new_map[(x,y,z,w)] = True
else:
new_map[(x,y,z,w)] = False
map = new_map.copy()
iterations -= 1
return map
map = cycle(map)
count = 0
for i in map.values():
if i is True:
count += 1
stop_time = time.time()
total_time = stop_time - start_time
print('The answer to day 17 part 2 is {}. It took {} seconds to complete'.format(count,total_time)) | [
"Kenneth.G.KittsJR@aexp.com"
] | Kenneth.G.KittsJR@aexp.com |
ce5acc3da09052bc865fd91bb2273a4a147c130e | 4a23908779804445aa382d89df582593945cdd3c | /src/sas/guiframe/pdfview.py | c1bd133929edf35a808644311bba824cbecc6a6f | [] | no_license | diffpy/srfit-sasview | 97930afca42b13774461d4993a80575f3e98c721 | 792e6be8e13f67657de88e5fb9a9afb91a750c30 | refs/heads/master | 2021-01-18T05:00:15.447038 | 2015-12-08T16:19:34 | 2015-12-08T16:19:34 | 44,706,210 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,219 | py | # Read PDF files by embeding the Adobe Acrobat Reader
# wx.activex module uses class ActiveX control
import wx
import os
if wx.Platform == '__WXMSW__':
from wx.lib.pdfwin import PDFWindow
from wx.lib.scrolledpanel import ScrolledPanel
STYLE = wx.TE_MULTILINE|wx.TE_READONLY|wx.SUNKEN_BORDER|wx.HSCROLL
class TextPanel(ScrolledPanel):
"""
Panel that contains the text
"""
def __init__(self, parent, text=None):
"""
"""
ScrolledPanel.__init__(self, parent, id=-1)
self.SetupScrolling()
self.parent = parent
self.text = text
sizer = wx.BoxSizer(wx.VERTICAL)
self.textctl = wx.TextCtrl(self, -1, size=(-1, -1), style=STYLE)
self.textctl.SetValue(self.text)
sizer.Add(self.textctl, proportion=1, flag=wx.EXPAND)
self.SetSizer(sizer)
self.SetAutoLayout(True)
wx.EVT_CLOSE(self.parent, self.OnClose)
def OnClose(self, event):
"""
Close panel
"""
self.parent.Destroy()
class TextFrame(wx.Frame):
"""
Frame for PDF panel
"""
def __init__(self, parent, id, title, text):
"""
Init
:param parent: parent panel/container
:param path: full path of the pdf file
"""
# Initialize the Frame object
wx.Frame.__init__(self, parent, id, title,
wx.DefaultPosition, wx.Size(600, 830))
# make an instance of the class
TextPanel(self, text)
self.SetFocus()
class PDFPanel(wx.Panel):
"""
Panel that contains the pdf reader
"""
def __init__(self, parent, path=None):
"""
"""
wx.Panel.__init__(self, parent, id=-1)
self.parent = parent
self.path = path
sizer = wx.BoxSizer(wx.VERTICAL)
btnSizer = wx.BoxSizer(wx.HORIZONTAL)
self.pdf = PDFWindow(self, style=wx.SUNKEN_BORDER)
sizer.Add(self.pdf, proportion=1, flag=wx.EXPAND)
btn = wx.Button(self, wx.NewId(), "Open PDF File")
self.Bind(wx.EVT_BUTTON, self.OnOpenButton, btn)
btnSizer.Add(btn, proportion=1, flag=wx.EXPAND|wx.ALL, border=5)
self.pdf.LoadFile(self.path)
btn = wx.Button(self, wx.NewId(), "Previous Page")
self.Bind(wx.EVT_BUTTON, self.OnPrevPageButton, btn)
btnSizer.Add(btn, proportion=1, flag=wx.EXPAND|wx.ALL, border=5)
btn = wx.Button(self, wx.NewId(), "Next Page")
self.Bind(wx.EVT_BUTTON, self.OnNextPageButton, btn)
btnSizer.Add(btn, proportion=1, flag=wx.EXPAND|wx.ALL, border=5)
btn = wx.Button(self, wx.NewId(), "Close")
self.Bind(wx.EVT_BUTTON, self.OnClose, btn)
btnSizer.Add(btn, proportion=1, flag=wx.EXPAND|wx.ALL, border=5)
btnSizer.Add((50,-1), proportion=2, flag=wx.EXPAND)
sizer.Add(btnSizer, proportion=0, flag=wx.EXPAND)
self.SetSizer(sizer)
self.SetAutoLayout(True)
wx.EVT_CLOSE(self.parent, self.OnClose)
def OnOpenButton(self, event):
"""
Open file button
"""
# make sure you have PDF files available on your drive
dlg = wx.FileDialog(self, wildcard="*.pdf")
dlg.SetDirectory(os.path.dirname(self.path))
if dlg.ShowModal() == wx.ID_OK:
wx.BeginBusyCursor()
file = dlg.GetPath()
self.pdf.LoadFile(file)
self.parent.SetTitle(os.path.basename(file.split('.')[0]))
wx.EndBusyCursor()
dlg.Destroy()
# Let Panel know the file changed: Avoiding C++ error
self.Update()
def OnLoad(self, event=None, path=None):
"""
Load a pdf file
: Param path: full path to the file
"""
self.pdf.LoadFile(path)
def OnPrevPageButton(self, event):
"""
Goes to Previous page
"""
self.pdf.gotoPreviousPage()
def OnNextPageButton(self, event):
"""
Goes to Next page
"""
self.pdf.gotoNextPage()
def OnClose(self, event):
"""
Close panel
"""
self.parent.Destroy()
class PDFFrame(wx.Frame):
"""
Frame for PDF panel
"""
def __init__(self, parent, id, title, path):
"""
Init
:param parent: parent panel/container
:param path: full path of the pdf file
"""
# Initialize the Frame object
wx.Frame.__init__(self, parent, id, title,
wx.DefaultPosition, wx.Size(600, 830))
# make an instance of the class
PDFPanel(self, path)
class ViewApp(wx.App):
def OnInit(self):
path = None
frame = PDFFrame(None, -1, "PDFView", path=path)
frame.Show(True)
#self.SetTopWindow(frame)
return True
if __name__ == "__main__":
app = ViewApp(0)
app.MainLoop()
| [
"jhjcho@gmail.com"
] | jhjcho@gmail.com |
f1aa0caced9e2cf7cc85781d8e933e093f0ffabc | 9bb92edc8f60cf393d9ef47768524ca00f0cc3a1 | /model_trainer/Implicit_Arg2_extractor/trainer.py | 053f038f28be743a9a7edd5283e116854a3b2941 | [] | no_license | qkaren/CoNLL2016-CNN | e514e5970467edcaff3a3213b7ca0d69072ce378 | 86ee7d769c7aaf7fa9a93478e84c41b3fa13d483 | refs/heads/master | 2020-07-31T21:42:14.799674 | 2017-09-28T13:51:32 | 2017-09-28T13:51:32 | 73,591,629 | 8 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,502 | py | #coding:utf-8
from model_trainer.mallet_classifier import *
from model_trainer.Implicit_Arg2_extractor.make_feature_file import implicit_arg2_make_feature_file
from model_trainer.Implicit_Arg2_extractor.feature_functions import *
from pdtb_parse import PDTB_PARSE
from model_trainer.Implicit_Arg2_extractor import evaluation
from operator import itemgetter
class Trainer:
def __init__(self, classifier, model_path, feature_function_list,
train_feature_path ,dev_feature_path, dev_result_file_path):
self.classifier = classifier
self.model_path = model_path
self.feature_function_list = feature_function_list
self.train_feature_path = train_feature_path
self.dev_feature_path = dev_feature_path
self.dev_result_file_path = dev_result_file_path
def make_feature_file(self, train_pdtb_parse, dev_pdtb_parse):
print("make %s feature file ..." % ("train"))
implicit_arg2_make_feature_file(train_pdtb_parse, self.feature_function_list, self.train_feature_path)
print("make %s feature file ..." % ("dev"))
implicit_arg2_make_feature_file(dev_pdtb_parse, self.feature_function_list, self.dev_feature_path)
def train_mode(self):
classifier.train_model(self.train_feature_path, self.model_path)
def test_model(self):
classifier.test_model(self.dev_feature_path, self.dev_result_file_path, self.model_path)
def get_evaluation(self):
cm =evaluation.get_evaluation(self.dev_result_file_path)
cm.print_out()
Arg2_Acc = evaluation.get_Arg2_Acc()
print("Arg2_Acc: %.2f" % Arg2_Acc)
return Arg2_Acc
if __name__ == "__main__":
# feature_function_list = [all_features]
# MaxEnt , Arg2 acc : 76.24
# feature_function_list = [
# lowercase_verbs,
# # lemma_verbs,
# curr_first,
# # curr_last,
# prev_last,
# next_first,
# prev_last_curr_first,
# curr_last_next_first,
# position,
# # production_rule,
# is_curr_NNP_prev_PRP_or_NNP,
# # prev_curr_production_rule,
# prev_curr_CP_production_rule,
# curr_next_CP_production_rule,
# prev2_pos_lemma_verb,
# curr_first_to_prev_last_path,
# clause_word_num,
# is_NNP_WP,
#
# ]
# NaiveBayes: 77.01
# feature_function_list = [
# prev_curr_CP_production_rule,
# curr_next_CP_production_rule,
# is_NNP_WP,
# clause_word_num
# ]
# MaxEnt , Arg2 acc: 77.41
feature_function_list = [
prev_curr_CP_production_rule,
is_NNP_WP,
is_curr_NNP_prev_PRP_or_NNP,
clause_word_num,
prev2_pos_lemma_verb,
next_first,
prev_last,
]
''' train & dev pdtb parse'''
train_pdtb_parse = PDTB_PARSE(config.PARSERS_TRAIN_PATH_JSON, config.PDTB_TRAIN_PATH, config.TRAIN)
dev_pdtb_parse = PDTB_PARSE(config.PARSERS_DEV_PATH_JSON, config.PDTB_DEV_PATH, config.DEV)
''' train & dev feature output path '''
train_feature_path = config.IMPLICIT_ARG2_TRAIN_FEATURE_OUTPUT_PATH
dev_feature_path = config.IMPLICIT_ARG2_DEV_FEATURE_OUTPUT_PATH
''' classifier '''
classifier = Mallet_classifier(MaxEnt())
''' model path '''
model_path = config.IMPLICIT_ARG2_CLASSIFIER_MODEL
''' dev_result_file_path'''
dev_result_file_path = config.IMPLICIT_ARG2_DEV_OUTPUT_PATH
'''---- trainer ---- '''
trainer = Trainer(classifier, model_path, feature_function_list, train_feature_path, dev_feature_path, dev_result_file_path)
#特征
trainer.make_feature_file(train_pdtb_parse, dev_pdtb_parse)
#训练
trainer.train_mode()
#测试
trainer.test_model()
#结果
trainer.get_evaluation()
# best_feature_list = []
# dict_feat_functions_to_score = {}
# curr_best = 0.0
#
# while len(best_feature_list) != len(feature_function_list):
# T = list(set(feature_function_list) - set(best_feature_list))
# score = [0] * len(T)
# for index, feat_func in enumerate(T):
#
# train_feature_function_list = best_feature_list + [feat_func]
# trainer = Trainer(classifier, model_path, train_feature_function_list, train_feature_path, dev_feature_path, dev_result_file_path)
# #特征
# trainer.make_feature_file(train_pdtb_parse, dev_pdtb_parse)
# #训练
# trainer.train_mode()
# #测试
# trainer.test_model()
# #结果
# Arg2_Acc = trainer.get_evaluation()
# score[index] = Arg2_Acc
#
# if Arg2_Acc > curr_best:
# curr_best = Arg2_Acc
#
# print "Current Best : %.2f" % curr_best
#
# # 加入字典
# feat_func_name = " ".join([func.func_name for func in train_feature_function_list])
# dict_feat_functions_to_score[feat_func_name] = Arg2_Acc
#
#
# # 将最好的放入 best_feature_list
# best_index = score.index(max(score))
# best_feature_list.append(T[best_index])
#
# #将各种特征的组合及对应的score写入文件, 按sore降排
# fout = open("result.txt", "w")
# for func_names, score in sorted(dict_feat_functions_to_score.iteritems(), key=itemgetter(1), reverse=True):
# fout.write("%s : %.2f\n" % (func_names, score))
# fout.close()
pass | [
"qkaren@mercury.mercury"
] | qkaren@mercury.mercury |
0c9102bbd125f3b6f41e16248615229506d6e220 | 8f1c6b052d69ec70583e439d464dd0bc1b170e3f | /tools/tensor_read.py | 40c8313b651733dbc15d06f567fd77388be107d0 | [] | no_license | recsysgroup/deep-match | 2d250e7ec90a30d46f14a4ff9a24bcf8d6b7840a | 971c540d56a7402af0442d9257da34ecc9433f60 | refs/heads/main | 2023-03-27T07:19:12.662327 | 2021-03-27T03:22:21 | 2021-03-27T03:22:21 | 338,772,344 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 681 | py | from tensorflow.python import pywrap_tensorflow
import tensorflow as tf
flags = tf.flags
FLAGS = flags.FLAGS
# pai
flags.DEFINE_string("checkpoint", None, "")
flags.DEFINE_string("name", None, "")
def main(_):
# Read data from checkpoint file
reader = pywrap_tensorflow.NewCheckpointReader(FLAGS.checkpoint)
var_to_shape_map = reader.get_variable_to_shape_map()
# Print tensor name and values
for key in var_to_shape_map:
print("tensor_name: ", key)
tensor = reader.get_tensor(FLAGS.name)
print type(tensor)
for i in tensor:
print (i)
if __name__ == "__main__":
tf.logging.set_verbosity(tf.logging.INFO)
tf.app.run()
| [
"jiusheng.lsw@alibaba-inc.com"
] | jiusheng.lsw@alibaba-inc.com |
f5435f602b8973519150389a75dd7328fe65e570 | c0f808504dd3d7fd27c39f1503fbc14c1d37bf9f | /sources/scipy-scipy-414c1ab/scipy/sparse/linalg/dsolve/umfpack/tests/try_umfpack.py | 6f7cd7acdb421fa1497d93d5b68da26ef2943b61 | [] | no_license | georgiee/lip-sync-lpc | 7662102d4715e4985c693b316a02d11026ffb117 | e931cc14fe4e741edabd12471713bf84d53a4250 | refs/heads/master | 2018-09-16T08:47:26.368491 | 2018-06-05T17:01:08 | 2018-06-05T17:01:08 | 5,779,592 | 17 | 4 | null | null | null | null | UTF-8 | Python | false | false | 6,310 | py | #!/usr/bin/env python
# Created by: Robert Cimrman, 05.12.2005
"""Benchamrks for umfpack module"""
from optparse import OptionParser
import time
import urllib
import gzip
import numpy as np
import scipy.sparse as sp
import scipy.sparse.linalg.dsolve.umfpack as um
import scipy.linalg as nla
defaultURL = 'http://www.cise.ufl.edu/research/sparse/HBformat/'
usage = """%%prog [options] <matrix file name> [<matrix file name>, ...]
<matrix file name> can be a local or distant (gzipped) file
default url is:
%s
supported formats are:
triplet .. [nRow, nCol, nItem] followed by 'nItem' * [ir, ic, value]
hb .. Harwell-Boeing format N/A
""" % defaultURL
##
# 05.12.2005, c
def read_triplet( fd ):
nRow, nCol = map( int, fd.readline().split() )
nItem = int( fd.readline() )
ij = np.zeros( (nItem,2), np.int32 )
val = np.zeros( (nItem,), np.float64 )
for ii, row in enumerate( fd.readlines() ):
aux = row.split()
ij[ii] = int( aux[0] ), int( aux[1] )
val[ii] = float( aux[2] )
mtx = sp.csc_matrix( (val, ij), dims = (nRow, nCol), nzmax = nItem )
return mtx
##
# 06.12.2005, c
def read_triplet2( fd ):
nRow, nCol = map( int, fd.readline().split() )
nItem = int( fd.readline() )
ij, val = io.read_array( fd,
columns = [(0,1), (2,)],
atype = (np.int32, np.float64),
rowsize = nItem )
mtx = sp.csc_matrix( (val, ij), dims = (nRow, nCol), nzmax = nItem )
return mtx
formatMap = {'triplet' : read_triplet}
##
# 05.12.2005, c
def readMatrix( matrixName, options ):
if options.default_url:
matrixName = defaultURL + matrixName
print 'url:', matrixName
if matrixName[:7] == 'http://':
fileName, status = urllib.urlretrieve( matrixName )
## print status
else:
fileName = matrixName
print 'file:', fileName
try:
readMatrix = formatMap[options.format]
except:
raise ValueError('unsupported format: %s' % options.format)
print 'format:', options.format
print 'reading...'
if fileName.endswith('.gz'):
fd = gzip.open( fileName )
else:
fd = open( fileName )
mtx = readMatrix( fd )
fd.close()
print 'ok'
return mtx
##
# 05.12.2005, c
def main():
parser = OptionParser( usage = usage )
parser.add_option( "-c", "--compare",
action = "store_true", dest = "compare",
default = False,
help = "compare with default scipy.sparse solver [default: %default]" )
parser.add_option( "-p", "--plot",
action = "store_true", dest = "plot",
default = False,
help = "plot time statistics [default: %default]" )
parser.add_option( "-d", "--default-url",
action = "store_true", dest = "default_url",
default = False,
help = "use default url [default: %default]" )
parser.add_option( "-f", "--format", type = type( '' ),
dest = "format", default = 'triplet',
help = "matrix format [default: %default]" )
(options, args) = parser.parse_args()
if (len( args ) >= 1):
matrixNames = args;
else:
parser.print_help(),
return
sizes, nnzs, times, errors = [], [], [], []
legends = ['umfpack', 'sparse.solve']
for ii, matrixName in enumerate( matrixNames ):
print '*' * 50
mtx = readMatrix( matrixName, options )
sizes.append( mtx.shape )
nnzs.append( mtx.nnz )
tts = np.zeros( (2,), dtype = np.double )
times.append( tts )
err = np.zeros( (2,2), dtype = np.double )
errors.append( err )
print 'size : %s (%d nnz)' % (mtx.shape, mtx.nnz)
sol0 = np.ones( (mtx.shape[0],), dtype = np.double )
rhs = mtx * sol0
umfpack = um.UmfpackContext()
tt = time.clock()
sol = umfpack( um.UMFPACK_A, mtx, rhs, autoTranspose = True )
tts[0] = time.clock() - tt
print "umfpack : %.2f s" % tts[0]
error = mtx * sol - rhs
err[0,0] = nla.norm( error )
print '||Ax-b|| :', err[0,0]
error = sol0 - sol
err[0,1] = nla.norm( error )
print '||x - x_{exact}|| :', err[0,1]
if options.compare:
tt = time.clock()
sol = sp.solve( mtx, rhs )
tts[1] = time.clock() - tt
print "sparse.solve : %.2f s" % tts[1]
error = mtx * sol - rhs
err[1,0] = nla.norm( error )
print '||Ax-b|| :', err[1,0]
error = sol0 - sol
err[1,1] = nla.norm( error )
print '||x - x_{exact}|| :', err[1,1]
if options.plot:
try:
import pylab
except ImportError:
raise ImportError("could not import pylab")
times = np.array( times )
print times
pylab.plot( times[:,0], 'b-o' )
if options.compare:
pylab.plot( times[:,1], 'r-s' )
else:
del legends[1]
print legends
ax = pylab.axis()
y2 = 0.5 * (ax[3] - ax[2])
xrng = range( len( nnzs ) )
for ii in xrng:
yy = y2 + 0.4 * (ax[3] - ax[2])\
* np.sin( ii * 2 * np.pi / (len( xrng ) - 1) )
if options.compare:
pylab.text( ii+0.02, yy,
'%s\n%.2e err_umf\n%.2e err_sp'
% (sizes[ii], np.sum( errors[ii][0,:] ),
np.sum( errors[ii][1,:] )) )
else:
pylab.text( ii+0.02, yy,
'%s\n%.2e err_umf'
% (sizes[ii], np.sum( errors[ii][0,:] )) )
pylab.plot( [ii, ii], [ax[2], ax[3]], 'k:' )
pylab.xticks( xrng, ['%d' % (nnzs[ii] ) for ii in xrng] )
pylab.xlabel( 'nnz' )
pylab.ylabel( 'time [s]' )
pylab.legend( legends )
pylab.axis( [ax[0] - 0.05, ax[1] + 1, ax[2], ax[3]] )
pylab.show()
if __name__ == '__main__':
main()
| [
"georgios@kaleadis.de"
] | georgios@kaleadis.de |
fc25356354bc680cf49d82450ed1864df13bc7cb | 18ccaa1160f49f0d91f1d9dc376f860aed8a9c2a | /tracpro/groups/tests/test_middleware.py | 170e207ec77587705841395a880fa174d72a1d05 | [
"BSD-3-Clause"
] | permissive | caktus/tracpro | bb6033b170b7a77cf9ac76b1be2779b71afa80e0 | 368f43e666d3c718843dffe934ba35ca859ebaf7 | refs/heads/develop | 2020-12-24T22:06:21.341755 | 2016-01-22T13:16:29 | 2016-01-22T13:16:29 | 50,186,576 | 0 | 0 | null | 2016-01-22T14:38:29 | 2016-01-22T14:38:28 | null | UTF-8 | Python | false | false | 8,083 | py | from django.contrib.auth.models import AnonymousUser
from django.test import RequestFactory
from tracpro.test import factories
from tracpro.test.cases import TracProTest
from ..middleware import UserRegionsMiddleware
from ..models import Region
class TestUserRegionsMiddleware(TracProTest):
def setUp(self):
super(TestUserRegionsMiddleware, self).setUp()
self.middleware = UserRegionsMiddleware()
self.org = factories.Org()
self.user = factories.User()
def get_request(self, **kwargs):
request_kwargs = {'HTTP_HOST': "{}.testserver".format(self.org.subdomain)}
request = RequestFactory().get("/", **request_kwargs)
for key, value in kwargs.items():
setattr(request, key, value)
return request
def make_regions(self):
"""Create a collection of nested regions."""
self.region_uganda = factories.Region(
org=self.org, name="Uganda")
self.region_kampala = factories.Region(
org=self.org, name="Kampala", parent=self.region_uganda)
self.region_makerere = factories.Region(
org=self.org, name="Makerere", parent=self.region_kampala)
self.region_entebbe = factories.Region(
org=self.org, name="Entebbe", parent=self.region_uganda)
self.region_kenya = factories.Region(
org=self.org, name="Kenya")
self.region_nairobi = factories.Region(
org=self.org, name="Nairobi", parent=self.region_kenya)
self.region_mombasa = factories.Region(
org=self.org, name="Mombasa", parent=self.region_kenya)
self.region_inactive = factories.Region(
org=self.org, name="Inactive", parent=self.region_nairobi,
is_active=False)
return Region.get_all(self.org)
def test_variables_set(self):
"""Middleware should set several commonly-used region variables."""
request = self.get_request(user=self.user, org=self.org, session={})
self.middleware.process_request(request)
self.assertTrue(hasattr(request, 'region'))
self.assertTrue(hasattr(request, 'include_subregions'))
self.assertTrue(hasattr(request, 'user_regions'))
self.assertTrue(hasattr(request, 'data_regions'))
def test_user_regions__unauthenticated(self):
"""User regions should be set to null for unauthenticated users."""
request = self.get_request(user=AnonymousUser(), org=self.org)
self.middleware.set_user_regions(request)
self.assertIsNone(request.user_regions)
def test_user_regions__no_org(self):
"""User regions should be set to null for non-org views."""
request = self.get_request(user=self.user, org=None)
self.middleware.set_user_regions(request)
self.assertIsNone(request.user_regions)
def test_user_regions(self):
"""User regions should be set to the value of get_all_regions."""
self.make_regions()
self.region_kenya.users.add(self.user)
request = self.get_request(user=self.user, org=self.org)
self.middleware.set_user_regions(request)
self.assertEqual(
set(request.user_regions),
set([self.region_kenya, self.region_nairobi, self.region_mombasa]))
def test_include_subregions__default(self):
"""If key is not in the session, should default to True."""
request = self.get_request(session={})
self.middleware.set_include_subregions(request)
self.assertTrue(request.include_subregions)
def test_include_subregions__yes(self):
"""include_subregions should be retrieved from the session."""
request = self.get_request(session={'include_subregions': True})
self.middleware.set_include_subregions(request)
self.assertTrue(request.include_subregions)
def test_include_subregions__no(self):
"""include_subregions should be retrieved from the session."""
request = self.get_request(session={'include_subregions': False})
self.middleware.set_include_subregions(request)
self.assertFalse(request.include_subregions)
def test_data_regions__no_region(self):
"""If there is no current region, data_regions should be None."""
request = self.get_request(user=self.user, region=None)
self.middleware.set_data_regions(request)
self.assertIsNone(request.data_regions)
def test_data_regions__include_subregions(self):
"""Include all subregions user has access to if include_subregions is True."""
self.make_regions()
user_regions = Region.objects.filter(pk__in=(
self.region_uganda.pk, self.region_kenya.pk, self.region_nairobi.pk))
request = self.get_request(
user=self.user, region=self.region_kenya, include_subregions=True,
user_regions=user_regions)
self.middleware.set_data_regions(request)
self.assertEqual(
set(request.data_regions),
set([self.region_kenya, self.region_nairobi]))
def test_data_regions__exclude_subregions(self):
"""Include only the current region if include_subregions is False."""
self.make_regions()
user_regions = Region.objects.filter(pk__in=(
self.region_uganda.pk, self.region_kenya.pk, self.region_nairobi.pk))
request = self.get_request(
user=self.user, region=self.region_kenya, include_subregions=False,
user_regions=user_regions)
self.middleware.set_data_regions(request)
self.assertEqual(
set(request.data_regions),
set([self.region_kenya]))
def test_region__unauthenticated(self):
"""Current region should be None for an unauthenticated user."""
request = self.get_request(user=AnonymousUser(), org=self.org)
self.middleware.set_region(request)
self.assertIsNone(request.region)
def test_region__no_org(self):
"""Current region should be None if there is no current org."""
request = self.get_request(user=self.user, org=None)
self.middleware.set_region(request)
self.assertIsNone(request.region)
def test_region__not_set__admin(self):
"""If region_id is not in the session, admin will see All Regions."""
self.make_regions()
self.org.administrators.add(self.user)
user_regions = Region.objects.filter(pk__in=(
self.region_uganda.pk, self.region_kenya.pk, self.region_nairobi.pk))
request = self.get_request(
user=self.user, org=self.org, session={}, user_regions=user_regions)
self.middleware.set_region(request)
self.assertIsNone(request.region)
def test_region__not_set(self):
"""If region_id is not in the session, user will see first of their regions."""
self.make_regions()
user_regions = Region.objects.filter(pk=self.region_kenya.pk)
request = self.get_request(
user=self.user, org=self.org, session={}, user_regions=user_regions)
self.middleware.set_region(request)
self.assertEqual(request.region, self.region_kenya)
def test_region__not_in_user_regions(self):
"""If region is not in user regions, return the first of the user's regions."""
self.make_regions()
user_regions = Region.objects.filter(pk=self.region_kenya.pk)
request = self.get_request(
user=self.user, org=self.org, session={'region_id': self.region_nairobi.pk},
user_regions=user_regions)
self.middleware.set_region(request)
self.assertEqual(request.region, self.region_kenya)
def test_region(self):
self.make_regions()
user_regions = Region.objects.filter(pk=self.region_kenya.pk)
request = self.get_request(
user=self.user, org=self.org, session={'region_id': self.region_kenya.pk},
user_regions=user_regions)
self.middleware.set_region(request)
self.assertEqual(request.region, self.region_kenya)
| [
"rebecca@caktusgroup.com"
] | rebecca@caktusgroup.com |
f6e51d6c62e5a2656994ffe64f3dd8ac57d3e46d | 45d01fdd70a4a51135d6874c63176966cf063553 | /titanic_dnn.py | 39a6c72130fb0b14e89b882dc989676b34aac0dd | [] | no_license | LYoung-Hub/ML-Project | 74d86573e296d5994147219e7ddf08e38cd3ba1e | dedd860367fcba9ac318b54479fae42b15b85d05 | refs/heads/master | 2020-07-16T05:54:10.975956 | 2019-09-05T18:27:22 | 2019-09-05T18:27:22 | 205,733,618 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,939 | py | import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler
from keras.layers import Dense, Input
from keras.models import Model, load_model
from keras.optimizers import Adam
from keras.callbacks import EarlyStopping, ReduceLROnPlateau, ModelCheckpoint
def load_data(path):
data = pd.read_csv(path)
return data
def modify_csv(path):
pre = pd.read_csv(path)
pre['Survived'] = pd.Series(data=pre['Survived'], dtype=int)
pre.to_csv(path_or_buf='prediction_dnn.csv', index=False)
def cal_ratio():
data = load_data('titanic/train.csv')
return data['Survived'].mean()
class Titanic:
age_scaler = StandardScaler()
fare_scaler = StandardScaler()
def __init__(self):
pass
def data_pre_process(self, data, mode='test'):
# variables selection, normalization
data['Sex'] = data['Sex'].replace(['male', 'female'], [1, 0])
data['Age'] = data['Age'].fillna(data['Age'].median())
data['Embarked'] = data['Embarked'].replace(['C', 'S', 'Q'], [0, 1, 2])
data['Embarked'] = data['Embarked'].fillna(3)
data['Fare'] = data['Fare'].fillna(data['Fare'].median())
feature = data[['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked']].values
if mode == 'train':
feature[:, 2] = np.reshape(self.age_scaler.fit_transform(np.reshape(feature[:, 2], (-1, 1))), (-1))
feature[:, 5] = np.reshape(self.fare_scaler.fit_transform(np.reshape(feature[:, 5], (-1, 1))), (-1))
label = data['Survived']
return feature, label
else:
feature[:, 2] = np.reshape(self.age_scaler.fit_transform(np.reshape(feature[:, 2], (-1, 1))), (-1))
feature[:, 5] = np.reshape(self.age_scaler.fit_transform(np.reshape(feature[:, 5], (-1, 1))), (-1))
p_id = data['PassengerId']
return feature, p_id
def train(self):
data = load_data('titanic/train.csv')
feature, label = self.data_pre_process(data, 'train')
# model structure
input_data = Input(shape=(7, ))
d = Dense(
units=128,
activation='relu'
)(input_data)
for i in range(0, 20):
d = Dense(
units=128,
activation='relu'
)(d)
d = Dense(
units=64,
activation='relu'
)(d)
d = Dense(
units=32,
activation='relu'
)(d)
d = Dense(
units=16,
activation='relu'
)(d)
d = Dense(
units=8,
activation='relu'
)(d)
d = Dense(
units=4,
activation='relu'
)(d)
output = Dense(
units=1,
activation='sigmoid'
)(d)
model = Model(input_data, output)
model.summary()
adam = Adam(lr=0.0001)
model.compile(
optimizer=adam,
loss='mse',
metrics=['accuracy']
)
# callbacks
callbacks = [
ModelCheckpoint(
filepath='models/best_model.hdf5',
save_best_only=True,
verbose=1,
period=1,
mode='auto',
monitor='val_loss'
),
EarlyStopping(
monitor='val_loss',
patience=20,
mode='auto',
verbose=1
),
ReduceLROnPlateau(
monitor='val_loss',
patience=10,
factor=0.1,
mode='auto',
min_lr=0.0,
verbose=1
)
]
model.fit(
x=feature,
y=label,
batch_size=1,
epochs=10000,
shuffle=True,
validation_split=0.3,
callbacks=callbacks,
verbose=1
)
model.save('model_dnn.hdf5')
def test(self):
data = load_data('titanic/test.csv')
feature, p_id = self.data_pre_process(data)
pre_id = p_id.reset_index(drop=True)
model = load_model('models/best_model.hdf5')
pre = model.predict(feature)
# change to acceptable result
pre = np.reshape(pre, 418)
ratio = cal_ratio()
for i in range(0, 418):
if pre[i] < ratio:
pre[i] = 0
pre[i] = int(pre[i])
else:
pre[i] = 1
pre[i] = int(pre[i])
# create csv
prediction = pd.Series(data=pre, name='Survived').to_frame()
result = pre_id.to_frame().join(prediction)
result.to_csv(path_or_buf='prediction_dnn.csv', index=False)
return result
if __name__ == '__main__':
ti = Titanic()
ti.train()
ti.test()
modify_csv('prediction_dnn.csv')
| [
"yangliu2@caltech.edu"
] | yangliu2@caltech.edu |
4749b0a8d289a0bffffe9feb6bf473a6d619fda1 | dddddfc74ee69c5ba796c100298291398abef3ee | /ecommerceapp/products/serializers/__init__.py | 731713f4eac426fe0ecf1ecc58263781e22c00d7 | [] | no_license | faruk56-arch/Example-ecommerce-app | 1391704ca9dd849da8e63d74d6a062e957bfab89 | e00edfda3a9f66457f5b0d35e050b709a5facaf3 | refs/heads/main | 2023-08-20T15:53:55.490483 | 2021-10-29T13:27:58 | 2021-10-29T13:27:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 124 | py | from .response.product_response import ProductReponseSerializer
from .request.create_product import CreateProductSerializer
| [
"jim.bienvenu@protonmail.com"
] | jim.bienvenu@protonmail.com |
c950717d1e6ed8a8f7faf513f103beacbf79a453 | a2a70494c68b3f9ca7f978a48119a4482f445a65 | /api/migrations/0001_initial.py | 2ed76955e1170c94f2174f9ea6bc97a805cf8529 | [] | no_license | nenadTod/secureCloud-viewer | 2283e7556482e9a934b70334283631226ac083fa | ab6594c21787661d30cd54a41895b694fc522348 | refs/heads/master | 2021-01-15T09:28:37.379609 | 2016-09-09T07:26:16 | 2016-09-09T07:26:16 | 58,070,936 | 2 | 0 | null | 2016-06-30T11:22:50 | 2016-05-04T17:30:17 | Python | UTF-8 | Python | false | false | 739 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-06-28 18:28
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Encryption',
fields=[
('id', models.CharField(max_length=64, primary_key=True, serialize=False)),
('public_key', models.CharField(max_length=3220)),
('private_key_part', models.CharField(max_length=3220)),
('recovery', models.CharField(max_length=3220)),
('password', models.CharField(max_length=3220)),
],
),
]
| [
"nemanja.miladinovic@live.com"
] | nemanja.miladinovic@live.com |
d81d2b43ac85608fa444af799aa07471d64e0421 | ed7176caf46fa282b1eb32b81dd1012042a667df | /FinalProject/kaan_code/lstm.py | 1409347c9f28e3dbfddad7f227a2ef02436b3704 | [] | no_license | xingziye/movie-reviews-sentiment | 072550123837a7da30b17aed687cf6518a855be8 | cff695e3aa241b10627c8d016114f3f8dc033c1e | refs/heads/master | 2021-08-28T02:27:30.460449 | 2017-12-11T03:28:47 | 2017-12-11T03:28:47 | 109,536,585 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,359 | py |
from nltk.tokenize import RegexpTokenizer
from nltk.corpus import stopwords, movie_reviews
from nltk.stem import SnowballStemmer
from nltk.probability import FreqDist
from sklearn.feature_extraction.text import TfidfTransformer, CountVectorizer
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.metrics import accuracy_score
from sklearn.linear_model import SGDClassifier
from sklearn import svm
from sklearn.model_selection import cross_val_score, train_test_split
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Embedding, LSTM, GRU, SpatialDropout1D, Bidirectional
from keras.utils import np_utils
from keras.preprocessing import sequence
import numpy as np
import csv
import xgboost
np.random.seed(0)
class SentimentAnalysis:
def readFile(self, filePath):
data = []
y = []
with open(filePath, 'r') as file:
csvreader = csv.reader(file, delimiter='\t')
next(csvreader)
for row in csvreader:
data.append(row[2])
if len(row) > 3:
y.append(row[3])
return data, y
def preprocess(self, data):
preprocessedCorpus = []
for phrase in data:
# All to lower case
phrase = phrase.lower()
# Split to tokens
tokenizer = RegexpTokenizer(r'\w+')
tokens = tokenizer.tokenize(phrase)
# Stopword filtering
nonstopTokens = [token for token in tokens if not token in self.stopWords]
# Stemming
stemmer = SnowballStemmer("english")
for index, item in enumerate(nonstopTokens):
stemmedWord = stemmer.stem(item)
nonstopTokens[index] = stemmedWord
# Remove numbers
finalTokens = [token for token in nonstopTokens if not token.isnumeric()]
# Add to corpus
preprocessedCorpus.append(" ".join(nonstopTokens))
return preprocessedCorpus
def extractFeatures(self, corpus):
wordIds = []
for phrase in corpus:
wordIds.append([self.word2id[word] for word in phrase.split(" ")])
return wordIds
def classify(self, classifier_name):
if classifier_name == "RandomForest":
self.classifier = RandomForestClassifier(n_estimators=100)
scores = cross_val_score(self.classifier, self.X, self.y, cv=5)
print("Accuracy: %0.2f" %(scores.mean()))
elif classifier_name == "SGD":
self.classifier = SGDClassifier(loss="hinge", penalty="l2")
scores = cross_val_score(self.classifier, self.X, self.y, cv=5)
print("Accuracy: %0.2f" %(scores.mean()))
elif classifier_name == "GradientBoosting":
self.classifier = GradientBoostingClassifier(n_estimators=100, learning_rate=1.0, max_depth=1, random_state=0)
scores = cross_val_score(self.classifier, self.X, self.y, cv=5)
print("Accuracy: %0.2f" %(scores.mean()))
elif classifier_name == "SVM":
self.classifier = svm.LinearSVC()
scores = cross_val_score(self.classifier, self.X, self.y, cv=5)
print("Accuracy: %0.2f" %(scores.mean()))
elif classifier_name == "XGBoost":
params = {'max_depth':3, 'eta':1.0, 'silent':1, 'colsample_bytree':1,
'num_class':5, 'min_child_weight':2, 'objective':'multi:softprob'}
numRounds = 4
dataMatrix = xgboost.DMatrix(self.X, label=self.y, missing=-999)
xgboost.cv(params, dataMatrix, numRounds, nfold=5, metrics={'merror'},
seed=0, callbacks=[xgboost.callback.print_evaluation(show_stdv=True)])
elif classifier_name == "RNN":
numLabels = 5
trainLabels = np_utils.to_categorical(self.trainY, numLabels)
testLabels = np_utils.to_categorical(self.testY, numLabels)
self.classifier = Sequential()
self.classifier.add(Embedding(len(self.vocabulary), 128))
self.classifier.add(SpatialDropout1D(0.2))
self.classifier.add(Bidirectional(LSTM(128)))
self.classifier.add(Dense(numLabels))
self.classifier.add(Activation('softmax'))
self.classifier.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
self.classifier.fit(self.trainX, trainLabels, validation_data=(self.testX, testLabels), epochs=5, batch_size=256, verbose=1)
def run(self):
# Preprocessing
train_corpus = self.preprocess(self.trainData)
test_corpus = self.preprocess(self.testData)
# Generate dictionary
wordFreq = FreqDist([word for phrase in train_corpus+test_corpus for word in phrase.split(" ")])
self.vocabulary = list(wordFreq.keys())
self.word2id = {word: i for i, word in enumerate(self.vocabulary)}
# Extracting features
self.X = self.extractFeatures(train_corpus)
self.testData = self.extractFeatures(test_corpus)
# Determine max sequence length
lenStats = sorted([len(phrase) for phrase in self.X+self.testData])
maxLength = lenStats[int(len(lenStats)*0.8)]
# Pad sequences
self.X = sequence.pad_sequences(np.array(self.X), maxlen=maxLength)
self.testData = sequence.pad_sequences(np.array(self.testData), maxlen=maxLength)
# Split validation set
self.trainX, self.testX, self.trainY, self.testY = train_test_split(self.X, self.y, test_size=0.1, random_state=0)
# Classify
classifier_name = "XGBoost"
self.classify(classifier_name)
def __init__(self, trainPath, testPath):
self.trainData, self.y = self.readFile(trainPath)
self.testData, _ = self.readFile(testPath)
self.stopWords = set(stopwords.words("english"))
self.run()
if __name__ == "__main__":
pre = SentimentAnalysis("./train.tsv", "./test.tsv")
| [
"xingziye46@gmail.com"
] | xingziye46@gmail.com |
9d15a1a622b160728e64599a1b2a78f6bb562ab6 | e6cc0f8659d11fd6705a7fe991a2f2786f583a51 | /simulation/tao/opticalpar/inte.py | e751a238f2038bb7bb9d17893bf9370658661284 | [] | no_license | arlier/arlierwork | 9fbaf74a075a30de276dfc8246e84a56c5873f4c | 53d28318fadf476b0616dccd7de732117affb5a6 | refs/heads/master | 2020-04-27T20:14:45.272701 | 2019-05-09T07:00:01 | 2019-05-09T07:00:01 | 174,650,136 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,058 | py | #!/usr/bin/env python
# -*-coding:utf-8 -*-
import numpy as np
from scipy import interpolate
import pylab as pl
f = open('/media/tao/_dde_data/arlierwork/simulation/tao/opticalpar/ej200.txt')
s = f.readline()
a1=[]
a2=[]
count=0
while (count<218):
arr=s.split(' ')
# print arr
# print arr[1]
# a1=arr[0]
a1.append(float(arr[0]))
a2.append(float(arr[1].replace('\r\n',''))) #readline 读取文件的时候,默认加上“\n"
# a2=arr[1].replace('\n','') #readline 读取文件的时候,默认加上“\n"
s=f.readline()
count+=1
print(a1)
print(a2)
#x=np.linspace(0,10,11)
#x=[ 0. 1. 2. 3. 4. 5. 6. 7. 8. 9. 10.]
x=np.array(a1)
#y=np.sin(x)
y=np.array(a2)
print(x)
#xnew=np.linspace(0,10,101)
xnew=np.linspace(300,600,301)
pl.figure(figsize=(8,8))
#axes = pl.plot(111)
fig=pl.plot(x,y,"o",label='the original data',c=(0,0.0,0.0),alpha=0.5)
pl.suptitle('EJ-200 emission spectrum and interpolation')
#pl.plot(x,y,"ro",label='the original data',c=(1,0.2,0.5),alpha=0.5)
pl.style.use('ggplot')
#pl.grid(axis='x')
pl.grid(True)
#pl.grid(color='g',linewidth=2,alpha=0.2,ls='--',lw=1)
#pl.axis([350,550,0,1])
pl.xlabel('wave length [nm]',color='k',fontsize=15,rotation=0)
pl.ylabel('light output [A.U]',color='k',fontsize=15,rotation=90)
#pl.xticks([420,440,460,480],['420','440','490','480'])
for kind in ["slinear"]:#插值方式
#for kind in ["nearest","zero","slinear","quadratic","cubic"]:#插值方式
#"nearest","zero"为阶梯插值
#slinear 线性插值
#"quadratic","cubic" 为2阶、3阶B样条曲线插值
f=interpolate.interp1d(x,y,kind=kind)
# ‘slinear’, ‘quadratic’ and ‘cubic’ refer to a spline interpolation of first, second or third order)
ynew=f(xnew)
pl.plot(xnew,ynew,label=str(kind)+ ' interpolated data',color='g',markersize=.31)
pl.legend(loc="upper right")
pl.savefig("emissionej200.eps")
pf = open("ej200out.txt","w")
i=1
while i<=len(xnew):
print >> pf, "%3d %0.5f" % (xnew[i-1], ynew[i-1])
i += 1
pf.close()
pl.show()
| [
"1278118931@qq.com"
] | 1278118931@qq.com |
4d6aefd0642247c5b2935aa0f2469e88ddd516de | 46be054f2e4a5a0305dc5892c37778613fdc2024 | /function.py | 0831871ecadfe73380b4cc2364cad227051d855c | [] | no_license | YuqingDuan/Python_02 | ece24f1f3cfca95447dae5e2fa868d9915edd115 | 1852ac3f7689bc7ddd1f02dc279f408f7e54ebb1 | refs/heads/master | 2020-04-04T07:55:59.872903 | 2018-11-01T23:26:46 | 2018-11-01T23:26:46 | 155,765,746 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 357 | py | #作用域
'''
i=10
print(i)
def func01():
j=11
j+=1
print(j)
func01()
def func02():
global k
k=12
k+=1
print(k)
func02()
'''
#函数的定义和调用
def abc():
print("abcd!")
abc()
#形参和实参
def function1(a,b):
if(a>b):
print(a)
else:
print(b)
function1(3,4)
| [
"yduan@laurentian.ca"
] | yduan@laurentian.ca |
7e60a4df9930178e0ae0a8e732141a2219d3acd4 | a0cbbc57dd1b583ab66ce37ad8c6970e74a600ba | /raylab/policy/modules/model/stochastic/single.py | 0aa460ac8354ed4246fca21f0ef0ac8245a399ee | [
"MIT"
] | permissive | GapData/raylab | ccf6c39ea20d5568561207d92a4b9097657fb909 | c5e862334dc1f29a09b42286ddcc40e72c6eb3a2 | refs/heads/master | 2022-12-19T07:09:45.799180 | 2020-09-29T17:09:54 | 2020-09-29T17:09:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,157 | py | """NN modules for stochastic dynamics estimation."""
from dataclasses import dataclass
from typing import List
from typing import Tuple
import torch
import torch.nn as nn
from gym.spaces import Box
from torch import Tensor
import raylab.torch.nn as nnx
import raylab.torch.nn.distributions as ptd
from raylab.policy.modules.networks.mlp import StateActionMLP
from raylab.utils.types import TensorDict
SampleLogp = Tuple[Tensor, Tensor]
class StochasticModel(nn.Module):
"""Represents a stochastic model as a conditional distribution module."""
def __init__(
self, params_module: nn.Module, dist_module: ptd.ConditionalDistribution
):
super().__init__()
self.params = params_module
self.dist = dist_module
def forward(self, obs, action) -> TensorDict: # pylint:disable=arguments-differ
return self.params(obs, action)
@torch.jit.export
def sample(self, params: TensorDict, sample_shape: List[int] = ()) -> SampleLogp:
"""
Generates a sample_shape shaped sample or sample_shape shaped batch of
samples if the distribution parameters are batched. Returns a (sample, log_prob)
pair.
"""
return self.dist.sample(params, sample_shape)
@torch.jit.export
def rsample(self, params: TensorDict, sample_shape: List[int] = ()) -> SampleLogp:
"""
Generates a sample_shape shaped reparameterized sample or sample_shape
shaped batch of reparameterized samples if the distribution parameters
are batched. Returns a (rsample, log_prob) pair.
"""
return self.dist.rsample(params, sample_shape)
@torch.jit.export
def log_prob(self, next_obs: Tensor, params: TensorDict) -> Tensor:
"""
Returns the log probability density/mass function evaluated at `next_obs`.
"""
return self.dist.log_prob(next_obs, params)
@torch.jit.export
def cdf(self, next_obs: Tensor, params: TensorDict) -> Tensor:
"""Returns the cumulative density/mass function evaluated at `next_obs`."""
return self.dist.cdf(next_obs, params)
@torch.jit.export
def icdf(self, prob, params: TensorDict) -> Tensor:
"""Returns the inverse cumulative density/mass function evaluated at `prob`."""
return self.dist.icdf(prob, params)
@torch.jit.export
def entropy(self, params: TensorDict) -> Tensor:
"""Returns entropy of distribution."""
return self.dist.entropy(params)
@torch.jit.export
def perplexity(self, params: TensorDict) -> Tensor:
"""Returns perplexity of distribution."""
return self.dist.perplexity(params)
@torch.jit.export
def reproduce(self, next_obs, params: TensorDict) -> SampleLogp:
"""Produce a reparametrized sample with the same value as `next_obs`."""
return self.dist.reproduce(next_obs, params)
@torch.jit.export
def deterministic(self, params: TensorDict) -> SampleLogp:
"""
Generates a deterministic sample or batch of samples if the distribution
parameters are batched. Returns a (rsample, log_prob) pair.
"""
return self.dist.deterministic(params)
class ResidualMixin:
"""Overrides StochasticModel interface to model state transition residuals."""
# pylint:disable=missing-function-docstring,not-callable
def forward(self, obs: Tensor, action: Tensor) -> TensorDict:
params = self.params(obs, action)
params["obs"] = obs
return params
@torch.jit.export
def sample(self, params: TensorDict, sample_shape: List[int] = ()) -> SampleLogp:
res, log_prob = self.dist.sample(params, sample_shape)
return params["obs"] + res, log_prob
@torch.jit.export
def rsample(self, params: TensorDict, sample_shape: List[int] = ()) -> SampleLogp:
res, log_prob = self.dist.rsample(params, sample_shape)
return params["obs"] + res, log_prob
@torch.jit.export
def log_prob(self, next_obs: Tensor, params: TensorDict) -> Tensor:
return self.dist.log_prob(next_obs - params["obs"], params)
@torch.jit.export
def cdf(self, next_obs: Tensor, params: TensorDict) -> Tensor:
return self.dist.cdf(next_obs - params["obs"], params)
@torch.jit.export
def icdf(self, prob, params: TensorDict) -> Tensor:
residual = self.dist.icdf(prob, params)
return params["obs"] + residual
@torch.jit.export
def reproduce(self, next_obs, params: TensorDict) -> SampleLogp:
sample_, log_prob_ = self.dist.reproduce(next_obs - params["obs"], params)
return params["obs"] + sample_, log_prob_
@torch.jit.export
def deterministic(self, params: TensorDict) -> SampleLogp:
sample, log_prob = self.dist.deterministic(params)
return params["obs"] + sample, log_prob
class DynamicsParams(nn.Module):
"""Neural network mapping state-action pairs to distribution parameters.
Args:
encoder: Module mapping state-action pairs to 1D features
params: Module mapping 1D features to distribution parameters
"""
def __init__(self, encoder: nn.Module, params: nn.Module):
super().__init__()
self.encoder = encoder
self.params = params
def forward(self, obs, actions): # pylint:disable=arguments-differ
return self.params(self.encoder(obs, actions))
@dataclass
class MLPModelSpec(StateActionMLP.spec_cls):
"""Specifications for stochastic mlp model network.
Inherits parameters from `StateActionMLP.spec_cls`.
Args:
units: Number of units in each hidden layer
activation: Nonlinearity following each linear layer
delay_action: Whether to apply an initial preprocessing layer on the
observation before concatenating the action to the input.
standard_scaler: Whether to transform the inputs of the NN using a
standard scaling procedure (subtract mean and divide by stddev). The
transformation mean and stddev should be fitted during training and
used for both training and evaluation.
fix_logvar_bounds: Whether to use fixed or dynamically adjusted
bounds for the log-scale outputs of the network.
input_dependent_scale: Whether to parameterize the Gaussian standard
deviation as a function of the state and action
"""
fix_logvar_bounds: bool = True
input_dependent_scale: bool = True
class MLPModel(StochasticModel):
"""Stochastic model with multilayer perceptron state-action encoder.
Attributes:
params: NN module mapping obs-act pairs to obs dist params
dist: NN module implementing the distribution API
encoder: NN module used in `params` to map obs-act pairs to vector
embeddings
"""
spec_cls = MLPModelSpec
def __init__(self, obs_space: Box, action_space: Box, spec: MLPModelSpec):
encoder = StateActionMLP(obs_space, action_space, spec)
params = nnx.NormalParams(
encoder.out_features,
obs_space.shape[0],
input_dependent_scale=spec.input_dependent_scale,
bound_parameters=not spec.fix_logvar_bounds,
)
if spec.fix_logvar_bounds:
params.max_logvar.fill_(2)
params.min_logvar.fill_(-20)
params = DynamicsParams(encoder, params)
dist = ptd.Independent(ptd.Normal(), reinterpreted_batch_ndims=1)
super().__init__(params, dist)
# Can only assign modules and parameters after calling nn.Module.__init__
self.encoder = encoder
def initialize_parameters(self, initializer_spec: dict):
"""Initialize all encoder parameters.
Args:
initializer_spec: Dictionary with mandatory `name` key corresponding
to the initializer function name in `torch.nn.init` and optional
keyword arguments.
"""
self.encoder.initialize_parameters(initializer_spec)
class ResidualMLPModel(ResidualMixin, MLPModel):
"""Residual stochastic multilayer perceptron model."""
| [
"angelolovatto@gmail.com"
] | angelolovatto@gmail.com |
59128387488db0592ddb5fef863061a8952d1da3 | 929cdbe211fbf254e1ec8122f9b48fa32520232c | /analysisflow.py | 0f35da16f85c1ab0b53e1d567b3def8ec7103f46 | [] | no_license | arizzi/nail | c8edec306628cecd269ad9d4241100afdbf6a7fc | a5ba9aed1bcc266cd9d9a36167ce66e51d851e8f | refs/heads/master | 2023-05-11T15:55:34.038861 | 2023-05-05T12:56:42 | 2023-05-05T12:56:42 | 162,547,201 | 3 | 1 | null | 2023-02-22T15:40:31 | 2018-12-20T08:09:10 | Python | UTF-8 | Python | false | false | 3,501 | py | from .nail import *
flow.SetAlias("n(.*)", "\\1.size()", defaultPersitency=True)
flow.SetAlias(
"(.*)_p4", "{TLorentzVector ret; ret.SetPtEtaPhiM(\\1_pt,\\1_eta,\\1_phi,\\1_mass); return ret;}", defaultPersistency=False)
# SubCikkectuib actuib"
flow.SetAlias("SelectedMuon_(.*)([\.*\])", "Muon_\1[SelectedMuon[\2]]")
flow = SampleProcessing("")
# cuts value should not be hardcoded below but rather being declared here so that scans and optimizations are possible
flow.DefaultConfig(muIsoCut=0.13, muIdCut=3, muPtCut=25)
# Higgs to mumu reconstruction
# Maps to plain RDF VecOps
flow.DefineCollAttr("Muon_id", "Muon_tightId*3+Muon_looseId")
# this should generate some kind of wrapper/ref that can be used as the parent collection
flow.SubCollection("SelectedMuon", "Muon",
sel="Muon_iso < muIsoCut && Muon_id > muIdCut && Muon_pt > muPtCut")
flow.Filter("twoOppositeSignMuons",
"nSelectedMuon==2 && SelectedMuon_charge[0]*SelectedMuon_charge[1] < 0")
# p4 should be handled somehow ... any syntax is ok such as p4(SelectedMuon[0]) or _p4 or .p4 etc..
flow.Define("Higgs", "p4at(SelectedMuon,0)+p4at(SelectedMuon,1)",
requires=["twoOppositeSignMuons"])
# the following could work
# define p4at(x,y) ROOT::Math::PtEtaPhiMVector(x##_pt[y] , x##_eta[y], x##_phi[y], x##_mass[y])
# define p4(x) ROOT::Math::PtEtaPhiMVector(x##_pt , x##_eta, x##_phi, x##_mass)
# VBF Jets kinematics
flow.DefaultConfig(jetPtCut=25)
flow.SubCollection("SelectedJet", "Jet",
"Jet_pt > jetPtCut && (Jet_muonIdx1 == -1 || Muon_iso[Jet_muonIdx1] > muIsoCut || Muon_id[Jet_muonIdx1] > 0")
flow.Filter("twoJets", "nSelectedJet>=2")
flow.Define("Qjet1", "SelectedJet[0].p4()", requires=["twoJets"])
flow.Define("Qjet2", "SelectedJet[1].p4()", requires=["twoJets"])
flow.Define("qq", "Qjet1+Qjet2")
flow.Define("Mqq", "qq.M()")
flow.Define("qq_pt", "qq.Pt()")
flow.Define("qqDeltaEta", "TMath::Abs(Qjet1.Eta()-Qjet2.Eta())")
flow.Define("qqDeltaPhi", "TMath::Abs(Qjet1.DeltaPhi(Qjet2))")
# QQ vs ll kinematic
flow.Define(
"ll_ystar", "Higgs.Rapidity() - (Qjet1.Rapidity() + Qjet2.Rapidity())")
flow.Define(
"ll_zstar", " TMath::Abs( ll_ystar/ (Qjet1.Rapidity()-Qjet2.Rapidity() )) ")
flow.Define("DeltaEtaQQSum",
"TMath::Abs(Qjet1.Eta()) + TMath::Abs(Qjet2.Eta())")
flow.Define("PhiZQ1", "TMath::Abs(Higgs.DeltaPhi(Qjet1))")
flow.Define("PhiZQ2", "TMath::Abs(Higgs.DeltaPhi(Qjet2))")
flow.Define("EtaHQ1", "TMath::Abs(Higgs.Eta() - Qjet1.Eta())")
flow.Define("EtaHQ2", "TMath::Abs(Higgs.Eta() - Qjet2.Eta())")
flow.Define("DeltaRelQQ", "(Qjet1+Qjet2).Pt()/( Qjet1.Pt()+Qjet2.Pt())")
flow.Define(
"Rpt", "(Qjet1+Qjet2+ Higgs).Pt()/( Qjet1.Pt()+Qjet2.Pt() + Higgs.Pt())")
flow.DefaultConfig(higgsMassWindowWidth=15, mQQcut=400, nominalHMass=125.03)
flow.Filter("MassWindow", "abs(Higgs_m-nominalHMass)<higgsMassWindowWidth")
flow.Filter("SideBand", "! MassWindow")
flow.Filter("VBFRegion", "Mqq > mQQcut")
flow.Filter("SignalRegion", "VBFRegion && MassWindow")
# flow.Trainable("SBClassifier","evalMVA",["Higgs_pt","Higgs_m","Mqq","Rpt","DeltaRelQQ"],splitMode="TripleMVA",requires="VBFRegion")
print((flow.NeededInputs()))
# flow.AddSystematic("MuScaleUp","Muon_pt","Muon_pt*1.01") #name, target, replacement
# flow.AddSystematic("HMassUncertainityUp","nominalHMass","125.1") #name, target, replacement
# flow.OptimizationScan("MuPtCutScan","muPtCut","30") #name, target, replacement
#from samples import background,signal,data
| [
"andrea.rizzi@cern.ch"
] | andrea.rizzi@cern.ch |
86fdb33960f6d66a9f2a7859616d8be12c31da46 | 41bd31aaee1038428b78a29e27d33d9639b90787 | /scripts/pat_gui.py | f63e45c026353a3c5f91a3ed79122677c68a46ee | [] | no_license | gregglegarda/WBC_Recognition_DEMO | fb603a1bcf58f0126baa5024b0276dc5342ea9a2 | c716113ddcb685b183229bdd3987e8aa63c541a1 | refs/heads/master | 2022-12-04T05:09:01.894837 | 2020-04-25T07:26:20 | 2020-04-25T07:26:20 | 242,291,853 | 0 | 2 | null | 2022-11-22T04:59:33 | 2020-02-22T06:28:12 | Python | UTF-8 | Python | false | false | 16,102 | py | from PyQt5.QtWidgets import (QMainWindow, QApplication, QComboBox, QDialog, QTableView,
QDialogButtonBox, QFormLayout, QGridLayout, QGroupBox, QHBoxLayout,
QLabel, QLineEdit, QMenu, QMenuBar, QPushButton, QSpinBox, QTextEdit,
QVBoxLayout,QMessageBox, QSizePolicy,QAbstractItemView)
from PyQt5.QtGui import QPalette,QColor, QIcon, QPixmap
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
import csv
from PyQt5 import QtCore, QtGui, QtWidgets
import sys
import os
import ntpath
def runit(app, fn, ln,dob,ssn):
gui = patient_gui(app, fn, ln,dob,ssn)
run = app.exec_()
return gui, run
def stop(run):
sys.exit(run)
class patient_gui(QMainWindow):
def __init__(self, app, pat_fn, pat_ln,pat_dob,pat_ssn):
self.app = app
super(patient_gui, self).__init__()
#=================VIEW IN TABLE INFO
self.firstname_info = pat_fn
self.lastname_info = pat_ln
self.dob_info = pat_dob
self.ssn_info = pat_ssn
##the main widget layout
self.widget = QtWidgets.QWidget()
self.setCentralWidget(self.widget)
self.widget.setLayout(QtWidgets.QGridLayout())
self.widget.layout().setContentsMargins(10, 10, 10, 10)
self.widget.layout().setSpacing(10)
self.setWindowTitle("Patient Results")
#self.widget.layout().setColumnMinimumWidth(0, 50)
#self.widget.layout().setColumnMinimumWidth(3, 50)
#self.widget.layout().setRowMinimumHeight(0, 50)
self.widget.layout().setRowMinimumHeight(1, 500)
self.showMaximized()
# THEME COLOR
self.setStyleSheet("QMainWindow {background-image: url(background/background.jpg)}")
print("Patient GUI Screen")
#=================== GROUPS ====================#
# Small group1
self.GroupBox1 = QGroupBox()
layout1 = QGridLayout()
self.GroupBox1.setLayout(layout1)
layout1.setSpacing(5)
self.widget.layout().addWidget(self.GroupBox1, 0, 0, 3, 2)
# Small group2
self.GroupBox2 = QGroupBox()
layout2 = QGridLayout()
self.GroupBox2.setLayout(layout2)
layout2.setSpacing(5)
self.widget.layout().addWidget(self.GroupBox2, 0, 2, 3, 2)
# Small group3 (in group box 2)
self.GroupBox3 = QGroupBox()
layout3 = QGridLayout()
self.GroupBox3.setLayout(layout3)
layout3.setContentsMargins(60, 10, 10, 10)
layout3.setSpacing(5)
layout2.addWidget(self.GroupBox3, 2, 0, 1, 3)
self.GroupBox3.setStyleSheet("QGroupBox {background-image: url(background/image.png)}")
# Small group4 (in group box 2)
#self.GroupBox4 = QGroupBox()
#layout4 = QGridLayout()
#self.GroupBox4.setLayout(layout4)
#layout4.setContentsMargins(60, 10, 10, 10)
#layout4.setSpacing(5)
#layout2.addWidget(self.GroupBox4, 2, 2, 1, 1)
#self.GroupBox4.setStyleSheet("QGroupBox {background-image: url(background/image.png)}")
#==================# LOGOUT BUTTON #==================#
button_logout = QPushButton('Logout')
button_logout.clicked.connect(self.logout_success)
self.widget.layout().addWidget(button_logout, 3, 3, 1,1)
# ==================# NORMAL AND ABNORMAL BUTTON #==================#
# View Differential BUTTON
#pushButtonNormalDiffs = QtWidgets.QPushButton(self.widget)
#pushButtonNormalDiffs.setText("View Differential Results")
# pushButtonNormalDiffs.clicked.setText(QColor.blue("Normal Results"))
#pushButtonNormalDiffs.clicked.connect(self.on_pushButtonLoad_clicked4)
#layout1.addWidget(pushButtonNormalDiffs, 0, 2, 1, 1)
#==================# TABLE DATABASE #==================#
filename2 = os.path.expanduser("~/Desktop/WBC_Recognition_DEMO/records/diff_records_outofrange.csv")
self.fileName2 = filename2
filename4 = os.path.expanduser("~/Desktop/WBC_Recognition_DEMO/records/diff_records_normal.csv")
self.fileName4 = filename4
filename5 = os.path.expanduser("~/Desktop/WBC_Recognition_DEMO/records/diff_records_abnormal.csv")
self.fileName5 = filename5
self.items_all = []
#self.on_pushButtonLoad_clicked4
#set model settings
self.model = QtGui.QStandardItemModel(self.widget)
self.model.setHorizontalHeaderLabels(['Accession ID', 'Acc Date', 'First Name', 'Last Name', 'DOB', 'SSN', 'EOS %', 'LYM %','MON %', 'NEU %', 'Initial Result', 'Final Result'])
self.tableView = QTableView(self.widget)
self.tableView.setModel(self.model)
self.tableView.horizontalHeader().setStretchLastSection(True)
self.tableView.setSortingEnabled(True)
self.model.rowsInserted.connect(lambda: QtCore.QTimer.singleShot(0, self.tableView.scrollToBottom))
self.tableView.setEditTriggers(QAbstractItemView.NoEditTriggers)
#set widths
self.tableView.setColumnWidth(0, 200)#id
self.tableView.setColumnWidth(1, 150)#date
self.tableView.setColumnWidth(2, 120)#first
self.tableView.setColumnWidth(3, 120)#last
self.tableView.setColumnWidth(4, 120)# DOB
self.tableView.setColumnWidth(5, 120)# SSN
self.tableView.setColumnWidth(6, 60)# E
self.tableView.setColumnWidth(7, 60)# L
self.tableView.setColumnWidth(8, 60)# M
self.tableView.setColumnWidth(9, 60) # N
self.tableView.setColumnWidth(10, 150) # Initial result
#hide some columns
self.tableView.setColumnHidden(2, True)
self.tableView.setColumnHidden(3, True)
self.tableView.setColumnHidden(4, True)
self.tableView.setColumnHidden(5, True)
self.tableView.setColumnHidden(6, True)
self.tableView.setColumnHidden(7, True)
self.tableView.setColumnHidden(8, True)
self.tableView.setColumnHidden(9, True)
self.tableView.setColumnHidden(10, False)
#layout1.addRow(self.tableView)
layout1.addWidget(self.tableView, 1, 0, 1, 4)
#show table on login
self.on_pushButtonLoad_clicked4()
# ==================# EDIT AND VIEW BUTTON #==================#
# Qlineedit
self.line_edit_viewImage = QLineEdit()
self.line_edit_viewImage.setPlaceholderText('Enter Accession ID')
self.line_edit_viewImage.mousePressEvent = lambda _: self.line_edit_viewImage.selectAll()
layout2.addWidget(self.line_edit_viewImage, 0, 0, 1, 2)
# view button
viewImage_button = QPushButton('View Differential')
viewImage_button.clicked.connect(self.button_find_specimen_clicked)
# layout2.addRow(self.line_edit_viewImage, viewImage_button)
layout2.addWidget(viewImage_button, 0, 2, 1, 1)
# ==================# END EDIT AND VIEW BUTTON #==================#
# ==================# SPECIMEN INFO BOX #==================#
self.specimen_info_label = QLabel()
self.specimen_info_label.setAlignment(QtCore.Qt.AlignTop)
self.specimen_info_label.setText(
'\t'
'\n\t'
'\n\t'
'\n\t'
'\n\t'
'\n\t')
layout3.addWidget(self.specimen_info_label, 0, 0, 1, 2)
# ==================# END OF SPECIMEN INFO BOX #==================#
# ==================# WBC RESULTS BOX #==================#
#self.specimen_results_label = QLabel()
#self.specimen_results_label.setAlignment(QtCore.Qt.AlignTop)
#self.specimen_results_label.setText(
#'\t'
#'\n\t'
#'\n\t'
#'\n\t'
#'\n\t'
#'\n\t')
#layout4.addWidget(self.specimen_results_label, 0, 0, 1, 2)
# ==================# END OF WBC RESULTS BOX #==================#
############################## FUNCTIONS #########################
# ==============# FUNCTION (VIEW DIFFERENTIAL RESULTS)#==============#
@QtCore.pyqtSlot()
def on_pushButtonLoad_clicked4(self):
self.loadCsv4(self.fileName2, self.fileName4, self.fileName5)
def loadCsv4(self, fileName2, fileName4, fileName5):
while (self.model.rowCount() > 0):
self.model.removeRow(0)
try:
with open(fileName2, "r") as fileInput2:
for row2 in csv.reader(fileInput2):
if self.firstname_info == row2[2] and self.lastname_info == row2[3]:
row2.append('PENDING')
self.items_all = [
QtGui.QStandardItem(field2)
for field2 in row2
]
self.model.appendRow(self.items_all)
except:
print("No Out of range Database")
try:
with open(fileName4, "r") as fileInput4:
for row4 in csv.reader(fileInput4):
if self.firstname_info == row4[2] and self.lastname_info == row4[3]:
self.items_all = [
QtGui.QStandardItem(field4)
for field4 in row4
]
self.model.appendRow(self.items_all)
except:
print("No Normal Database")
try:
with open(fileName5, "r") as fileInput5:
for row5 in csv.reader(fileInput5):
if self.firstname_info == row5[2] and self.lastname_info == row5[3]:
self.items_all = [
QtGui.QStandardItem(field5)
for field5 in row5
]
self.model.appendRow(self.items_all)
except:
print("No Abnormal Database")
# ===============# LOGOUT FUNCTION#===============#
def logout_success(self):
msg = QMessageBox()
msg.setText('Logged out successful')
msg.exec_()
## go to login screen
self.close()
#self.app.quit()
# ===============# FIND IMAGE BUTTON AND VIEW#===============#
@QtCore.pyqtSlot()
def button_find_specimen_clicked(self):
# show the specimen info and results
editline = self.line_edit_viewImage.text()
pat_text_format = ('<p>'
'<br/><b><h3>PATIENT INFORMATION</h3></b>'
'Patient First Name {cc}'
'<br/>Patient Last Name {dd}'
'<br/>Date of Birth {ee}'
'<br/>Social Security Number {ff}'
'<br/>'
'<br/><b><h3>SPECIMEN INFORMATION</h3></b>'
'Accession ID {aa}'
'<br/>Accession Date/Time {bb}'
'<br/>'
'<br/><h3><b>SPECIMEN RESULT</b></h3>'
'INITIAL RESULT {kk}'
'<br/>Eosinophil % {gg}'
'<br/>Lymphocyte % {hh}'
'<br/>Monocyte % {ii}'
'<br/>Neutrophil % {jj}'
'<br/>'
'<br/><b><h3>DOCTORS COMMENTS</h3></b>'
'<html><body><pre style=font-family:Arial>{ll}</pre></body></html>'
'</p>')
############## OUT OF RANGE ##################
try:
aa, bb, cc, dd, ee, ff, gg, hh, ii, jj, kk, ll = self.readCsvForSpecInfo(self.fileName2, editline)
# show speciment info
self.specimen_info_label.setText(
pat_text_format
.format(aa=aa, bb=bb, cc=cc, dd=dd, ee=ee, ff=ff, gg=gg, hh=hh, ii=ii, jj=jj, kk=kk, ll=ll))
# show results
#self.specimen_results_label.setText(
#'Eosinophil %\t\t{gg}'
#'\nLymphocyte %\t\t{hh}'
#'\nMonocyte %\t\t{ii}'
#'\nNeutrophil %\t\t{jj}'
#'\n'
#'\nINITIAL RESULT\t\t\t{kk}'
#'\nDOCTORS COMMENTS\t\t\t{ll}'
#.format(gg=gg, hh=hh, ii=ii, jj=jj, kk=kk, ll=ll))
except:
print("Edit Line Empty for abnormal")
################ ABNORMAL ###################
try:
aa, bb, cc, dd, ee, ff, gg, hh, ii, jj, kk, ll = self.readCsvForSpecInfo(self.fileName5, editline)
# show speciment info
self.specimen_info_label.setText(pat_text_format
.format(aa=aa, bb=bb, cc=cc, dd=dd, ee=ee, ff=ff, gg=gg, hh=hh, ii=ii, jj=jj, kk=kk, ll=ll))
# show results
#self.specimen_results_label.setText(
#'Eosinophil %\t\t{gg}'
#'\nLymphocyte %\t\t{hh}'
#'\nMonocyte %\t\t{ii}'
#'\nNeutrophil %\t\t{jj}'
#'\n'
#'\nINITIAL RESULT\t\t\t{kk}'
#'\nDOCTORS COMMENTS\t\t\t{ll}'
#.format(gg=gg, hh=hh, ii=ii, jj=jj, kk=kk, ll=ll))
except:
print("Edit Line Empty for abnormal")
################ NORMAL ###################
try:
aa, bb, cc, dd, ee, ff, gg, hh, ii, jj, kk, ll = self.readCsvForSpecInfo(self.fileName4, editline)
# show speciment info
self.specimen_info_label.setText(pat_text_format
.format(aa=aa, bb=bb, cc=cc, dd=dd, ee=ee, ff=ff, gg=gg, hh=hh, ii=ii, jj=jj, kk=kk, ll=ll))
# show results
#self.specimen_results_label.setText(
#'Eosinophil %\t\t{gg}'
#'\nLymphocyte %\t\t{hh}'
#'\nMonocyte %\t\t{ii}'
#'\nNeutrophil %\t\t{jj}'
#'\n'
#'\nINITIAL RESULT\t\t\t{kk}'
#'\nDOCTORS COMMENTS\t\t\t{ll}'
#.format(gg=gg, hh=hh, ii=ii, jj=jj, kk=kk, ll=ll))
except:
print("Edit Line Empty for normal")
def path_leaf(self, path):
head, tail = ntpath.split(path)
return tail or ntpath.basename(head)
def readCsvForSpecInfo(self, fileName, editline):
try:
with open(fileName, "r") as fileInput:
for entry in csv.reader(fileInput):
print(entry[0])
print(editline)
if (entry[0]) == editline:
aa = entry[0] #accID
bb = entry[1] #accDate
cc = entry[2] #FN
dd = entry[3] #LN
ee = entry[4] #DOB
ff = entry[5] #SSN
gg = entry[6] #E
hh = entry[7] #L
ii = entry[8] #M
jj = entry[9] #N
kk = entry[10] #Normality
try:
ll = entry[11] #Comments
except:
ll = 'PENDING'
return aa, bb, cc, dd, ee, ff, gg, hh, ii, jj, kk, ll
except:
print("Reading Specimen Info Failed")
# ===============# LOAD CSV AND CHECK FOR THE LOGIN INFO#===============# | [
"gregglegarda@gmail.com"
] | gregglegarda@gmail.com |
d8525de73a2125fd269706447241b8b4f0ee37c6 | 2b0189b9a2dee259efce1b3a781f0af47709eb42 | /Chapter_9/9-7_Admin.py | cfdd76e293c0ce1ccd1de29e1288d5c63851aae8 | [] | no_license | rlongo02/Python-Book-Exercises | c4a8503753fe1048503ee8b77a389ea15d1da651 | fec20482069e9b79ba4ab9ac049ec1bac5c8ca85 | refs/heads/master | 2020-06-19T05:51:42.116008 | 2019-07-16T15:24:58 | 2019-07-16T15:24:58 | 196,587,224 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,241 | py | class User():
def __init__(self, first_name, last_name, age, bio):
self.first_name = first_name
self.last_name = last_name
self.age = age
self.bio = bio
self.login_attempts = 0
def describe_user(self):
print(self.first_name.title(), self.last_name.title(),
"is a", str(self.age), "year old user.")
print("Their biography reads:\n" + self.bio,"\n")
def greet_user(self):
print("Hello,", self.first_name.title() +"!\n")
def increment_login_attempts(self):
self.login_attempts += 1
def reset_login_attempts(self):
self.login_attempts = 0
class Admin(User):
def __init__(self, first_name, last_name, age, bio):
super().__init__(first_name, last_name, age, bio)
self.privileges = ['can add post', 'can delete post', 'can ban user'
'can unban user', 'can edit source']
def show_privileges(self):
print(self.first_name.title(), "has the following privileges:")
for privilege in self.privileges:
print("-", privilege)
print('')
admin = Admin('justin', 'serota', 28, 'Former Comp Sci Teacher')
admin.show_privileges()
| [
"noreply@github.com"
] | rlongo02.noreply@github.com |
9060cd355a4f7e4d37ac4e518dcd9a42465b0e0c | 058595b044f405cf89157f3d69d6c5758008c88f | /level.py | 72cebb6266822c4ef0c9b1a5a9c53fe7e902ab2b | [] | no_license | arikel/dm | 2f640fcd4fb28bf733749bfece10f978066f9f29 | db7951b7dd48ff66e98b6ee5c1845077c0eda1b6 | refs/heads/master | 2021-01-01T18:21:55.815143 | 2012-03-31T20:57:56 | 2012-03-31T20:57:56 | 3,884,737 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 12,877 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import pygame
from player import *
pygame.mixer.init()
SoundDB = {}
SoundDB["bump"] = pygame.mixer.Sound("sounds/party_bump.wav")
SoundDB["door"] = pygame.mixer.Sound("sounds/dungeon_door.wav")
SoundDB["thud"] = pygame.mixer.Sound("sounds/dungeon_thud.wav")
SoundDB["swipe"] = pygame.mixer.Sound("sounds/attack_swipe.wav")
WALL_ELEMENTS = ["niche", "fountain", "lock", "vim", "button", "torch_holder"]
FLOOR_ELEMENTS = ["pit", "hidden_pit", "trigger", "hidden_trigger"]
ALL_ITEMS = ["box"]
#-------------------------------------------------------------------
# Item
#-------------------------------------------------------------------
class ThrownItem(object):
def __init__(self, item, level, x, y, slot, direction, step=500, moves=5):
self.item = item
self.x = x
self.y = y
self._level = level
self.slot = slot
self.direction = direction
self.step = step
self.moves = moves
self.nextUpdate = pygame.time.get_ticks() + self.step
def update(self, t=None):
if not t:
t = pygame.time.get_ticks()
if t>= self.nextUpdate:
if self.moves>0:
x, y, slot = self.getNextPos()
#if self._level.getTile(x, y).genre == "floor":
if self._level.getTile(x, y)._open:
#TODO : add test for hitting monster or player
self.x = x
self.y = y
self.slot = slot
self.nextUpdate = t + self.step
self.moves -=1
else:
#TODO : add possible effect on wall or through door
self._level.tiles[self.x][self.y].addItem(self.item, self.slot)
self._level.removeThrownItem(self)
self.moves = 0
SoundDB["thud"].play()
else:
self._level.tiles[self.x][self.y].addItem(self.item, self.slot)
self._level.removeThrownItem(self)
SoundDB["thud"].play()
def getNextPos(self):
if self.direction == "N":
if self.slot == "NW":
x = self.x
y = self.y - 1
slot = "SW"
elif self.slot == "NE":
x = self.x
y = self.y - 1
slot = "SE"
elif self.slot == "SW":
x = self.x
y = self.y
slot = "NW"
elif self.slot == "SE":
x = self.x
y = self.y
slot = "NE"
if self.direction == "S":
if self.slot == "NW":
x = self.x
y = self.y
slot = "SW"
elif self.slot == "NE":
x = self.x
y = self.y
slot = "SE"
elif self.slot == "SW":
x = self.x
y = self.y + 1
slot = "NW"
elif self.slot == "SE":
x = self.x
y = self.y + 1
slot = "NE"
if self.direction == "W":
if self.slot == "NW":
x = self.x - 1
y = self.y
slot = "NE"
elif self.slot == "NE":
x = self.x
y = self.y
slot = "NW"
elif self.slot == "SW":
x = self.x - 1
y = self.y
slot = "SE"
elif self.slot == "SE":
x = self.x
y = self.y
slot = "SW"
if self.direction == "E":
if self.slot == "NW":
x = self.x
y = self.y
slot = "NE"
elif self.slot == "NE":
x = self.x + 1
y = self.y
slot = "NW"
elif self.slot == "SW":
x = self.x
y = self.y
slot = "SE"
elif self.slot == "SE":
x = self.x + 1
y = self.y
slot = "SW"
return (x, y, slot)
#-------------------------------------------------------------------
# Tile
#-------------------------------------------------------------------
class Tile(object):
def __init__(self, _level, x, y, _open=True):
self._level = _level
self._open = _open
self.x = x
self.y = y
self.elemSlots = {}
self.itemSlots = {}
self.genre = "floor" # wall, door, hole
self.frame = -1 # if self.frame >= 0 , tile has animation
self.nbFrames = 1
self.state = "sleep"
#-------------------------------------------------------------------
# items
def addItem(self, item, slotName="NE"):
#print "Adding item : %s for slot %s" % (item, slotName)
if slotName not in self.itemSlots:
self.itemSlots[slotName] = []
self.itemSlots[slotName].append(item)
def makeItem(self, itemName, slotName="NE"):
#print "making item : %s for slot %s" % (itemName, slotName)
if slotName not in self.itemSlots:
self.itemSlots[slotName] = []
self.itemSlots[slotName].append(Item(itemName))
def removeItem(self, slotName):
if self.hasItem(slotName):
return self.itemSlots[slotName].pop(-1)
return False
def hasItem(self, slotName):
if slotName not in self.itemSlots:
return False
#print "slotName %s found, value = %s" % (slotName, self.itemSlots[slotName])
if len(self.itemSlots[slotName])>0:
return True
return False
#-------------------------------------------------------------------
# elements
def addElement(self, elementName, slot):
#print "Adding element : %s for slot %s" % (elementName, slot)
if slot not in self.elemSlots:
self.elemSlots[slot] = []
#self.elemSlots[slot].append(elementName)
# don't add a same element twice on the same slot
else:
for elem in self.elemSlots[slot]:
if elem.genre == elementName:
return
if elementName == "niche" and self.genre == "floor":
#print "can't add niche to floor"
return
self.elemSlots[slot].append(TileElement(elementName, slot))
def removeElement(self, slot):
if slot in self.slots:
if len(self.slots[slot]>0):
self.slots[slot].pop(-1)
if len(self.slots[slot])==0:
del self.slots[slot]
def hasElement(self):
if len(self.elemSlots)>0:
return True
return False
def update(self, t):
pass
def getSaveData(self):
data = ""
if self.hasElement():
for slot in self.elemSlots:
for elem in self.elemSlots[slot]:
elemtxt = "tileElement, %s, %s = %s, %s\n" % (self.x, self.y, elem.genre, elem.slot)
data += elemtxt
return data
#-------------------------------------------------------------------
# DoorTile
#-------------------------------------------------------------------
class DoorTile(Tile):
def __init__(self, _level, x, y, _open=False):
self._level = _level
self._open = _open
self.x = x
self.y = y
self.elemSlots = {}
self.itemSlots = {}
self.genre = "door"
self.frame = 0 #tile has animation
self.nbFrames = 5
self.state = "closed" #
self.nextUpdate = 0
self.step = 300
opposite = {"N":"S", "E":"W", "S":"N","W":"E"}
for k in opposite:
self.addElement("switch", k)
def update(self, t):
if t>= self.nextUpdate:
if self.state == "opening":
self.frame += 1
if self.frame >= self.nbFrames:
self.frame = self.nbFrames - 1
self.state = "open"
self._level.removeToUpdate(self.x, self.y)
else:
#SoundDB["door"].stop()
SoundDB["door"].play()
if self.state == "closing":
self.frame -= 1
if self.frame < 0:
self.frame = 0
self.state = "closed"
self._level.removeToUpdate(self.x, self.y)
else:
#SoundDB["door"].stop()
SoundDB["door"].play()
if self.frame >=3:
self._open = True
else:
self._open = False
self.nextUpdate = t + self.step
#-------------------------------------------------------------------
# TileElement : special objects on tiles
#-------------------------------------------------------------------
class TileElement(object):
def __init__(self, genre, slot):
self.genre = genre
self.slot = slot
self.items = []
self.acceptedItems = []
def addItem(self, item):
if item.genre in self.acceptedItems:
self.items.append(item)
def removeItem(self):
if len(self.items):
return self.items.pop(-1)
return None
def onAction(self, action = None):
if not action:return
cmd = action["action"]
print "Action %s received" % (cmd)
def makeFloorTile(level, x, y):
return Tile(level, x, y)
def makeWallTile(level, x, y):
tile = Tile(level, x, y, False)
tile.genre = "wall"
return tile
def makeDoorTile(level, x, y):
tile = DoorTile(level, x, y)
return tile
def makeNicheTile(level, x, y, direction="N"):
tile = Tile(level, x, y, False)
tile.genre = "wall"
tile.direction = direction
tile.addElement("niche", tile.direction)
return tile
def makeTile(level, x, y, genre):
if genre == "floor":
return makeFloorTile(level, x, y)
elif genre == "wall":
return makeWallTile(level, x, y)
elif genre == "door":
return makeDoorTile(level, x, y)
else:
return None
#-------------------------------------------------------------------
# Level
#-------------------------------------------------------------------
class Level(object):
def __init__(self, filename = None):
# codes used in saved data
self.dataCode = {}
self.dataCode["floor"] = "0"
self.dataCode["wall"] = "1"
self.dataCode["door"] = "d"
self.tilesToUpdate = []
self.thrownItems = []
self.filename = filename
if self.filename:
self.load(self.filename)
def makeTileFromCode(self, code, x, y):
if code == "d":
genre = "door"
elif code == "0":
genre = "floor"
elif code == "1":
genre = "wall"
return makeTile(self, x, y, genre)
def extendX(self, n):# add column to level map
for i in range(n):
col = []
for y in range(self.Y):
col.append(makeWallTile(self, self.X+i, y))
self.tiles.append(col)
self.X += n
def extendY(self, n):# add line to level map
for x in range(self.X):
for i in range(n):
self.tiles[x].append(makeWallTile(self, x, self.Y+i))
self.Y += n
def reduceX(self, n):# remove column from level map
for i in range(n):
self.tiles.pop(-1)
self.X -= n
def reduceY(self, n):# remove line from level map
for x in range(self.X):
for i in range(n):
self.tiles[x].pop(-1)
self.Y -= n
def new(self, filename, x=30, y=20):
self.filename
self.X = x
self.Y = y
print "creating new level x = %s, y = %s" % (self.X, self.Y)
self.tiles = []
for x in range(self.X):
tilesCol = []
for y in range(self.Y):
#print "adding tile %s %s" % (x, y)
tileCode = "1"
tilesCol.append(makeWallTile(self, x, y))
self.tiles.append(tilesCol)
#-------------------------------------------------------------------
# load
def load(self, levelFile):
content = open(levelFile).read()
lines = content.split("\n")
for line in lines:
if len(line.split("=")) != 2:
#print "invalid line found %s" % (line)
continue
k, v = line.split("=")
if k.strip() == "x":
self.X = int(v.strip())
elif k.strip() == "y":
self.Y = int(v.strip())
elif k.strip() == "tilecode":
tilecode = v.strip()
i = 0
if len(tilecode) == self.X*self.Y:
self.tiles = []
for x in range(self.X):
tileCols = []
for y in range(self.Y):
code = tilecode[y*self.X+x]
tileCols.append(self.makeTileFromCode(code, x, y))
i += 1
self.tiles.append(tileCols)
#print "loaded tiles ok"
elif len(k.split(","))==3:
elems = k.split(",")
code, x, y = elems[0].strip(), int(elems[1].strip()), int(elems[2].strip())
if code == "tileElement":
genre, direction = v.split(",")
genre = genre.strip()
direction = direction.strip()
self.tiles[x][y].addElement(genre, direction)
elif code == "item":
pass
#-------------------------------------------------------------------
# save
def getSaveData(self):
data = ""
data += "x = %s\n" % self.X
data += "y = %s\n" % self.Y
data += "tilecode = "
for y in range(self.Y):
for x in range(self.X):
data = data + self.dataCode[self.tiles[x][y].genre]
#data += "\n"
data += "\n"
for x in range(self.X):
for y in range(self.Y):
tiledata = self.tiles[x][y].getSaveData()
if tiledata:
data += tiledata
return data
def save(self, filename):
f = open(filename, "w")
f.write(self.getSaveData())
f.close()
print "Level file saved as : %s" % (filename)
def isInLevel(self, x, y):
if not 0<=x<self.X:
return False
if not 0<=y<self.Y:
return False
return True
def getTile(self, x, y):
if not self.isInLevel(x, y):
return None
return self.tiles[x][y]
def isOpen(self, x, y):
if not self.isInLevel(x, y):
return False
if self.tiles[x][y]._open:
return True
return False
def addToUpdate(self, x, y):
if not self.isInLevel(x, y):
return False
if ((x, y) not in self.tilesToUpdate):
self.tilesToUpdate.append((x, y))
def removeToUpdate(self, x, y):
if ((x, y) in self.tilesToUpdate):
self.tilesToUpdate.remove((x, y))
def addItem(self, item, x, y, slot):
if not self.isInLevel(x, y):
return
tile = self.getTile(x, y)
if not tile:
return
tile.addItem(item, slot)
def removeItem(self, x, y, slot):
if not self.isInLevel(x, y):
return
tile = self.getTile(x, y)
if not tile:
return
tile.removeItem(slot)
def addThrownItem(self, item, x, y, slot, direction):
thrownItem = ThrownItem(item, self, x, y, slot, direction)
self.thrownItems.append(thrownItem)
def removeThrownItem(self, thrownItem):
if thrownItem in self.thrownItems:
self.thrownItems.remove(thrownItem)
| [
"arikel@arikel-desktop.(none)"
] | arikel@arikel-desktop.(none) |
4024e4ad97232a5b71e2795e3cf5bd9cfb1ad89f | de9609bf476451ead58c7440d37f4653f37d72e2 | /ATA/test_biomart.py | b6eabc500d74fc029493984e60e6d5ceaa155e1a | [] | no_license | carolinamonzo/random_ipythons | 801f7718cacddeb746b9fe90ba8322801682c27b | a03749abc78155774f45b5e07ad68970a36c1625 | refs/heads/master | 2022-12-21T03:22:21.058466 | 2022-12-20T13:53:00 | 2022-12-20T13:53:00 | 180,973,711 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 899 | py | #! /usr/bin/python3.5
from biomart import BiomartServer
server = BiomartServer("http://grch37.ensembl.org/biomart")
# set verbose to true
server.verbose = True
# show server databases
#server.show_databases()
# Show server datasets
#server.show_datasets()
# User the ensemble genes dataset
genes = server.datasets['hsapiens_gene_ensembl']
# Show all available filters and atributes of the hsapiens gene dataset
#genes.show_filters()
#genes.show_attributes()
response = genes.search({
'filters':{
'chromosomal_region':["10:100148059:100148060", "10:100456043:100456044", "10:100385714:100385715"]
},
'attributes':["chromosome_name", "start_position", "end_position", "ensembl_gene_id", "external_gene_name"]
})
# response format in TSV
for line in response.iter_lines():
line = line.decode('utf-8')
print(line.split("\t"))
| [
"noreply@github.com"
] | carolinamonzo.noreply@github.com |
7b8a2c1af45cd3340a68f6d140fa21379cb23002 | 7bccbeb5accc2f112b23b5b8302c1fd5e5899fe7 | /D51 - Internet Speed Twitter Complaint Bot/isp_tweet_bot.py | 1446f400a0a05b3e3170f19b8e01f2e1ab014d2e | [] | no_license | messierspheroid/instruction | 66f3214bba8a22a6def7a407fe5c925a2f603fd1 | b0fd8a115bfd6ae69259fa650e3aaca304c45516 | refs/heads/main | 2023-06-11T18:43:57.929513 | 2021-07-09T15:23:02 | 2021-07-09T15:23:02 | 384,462,326 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,956 | py | from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
chrome_driver_path = "C:\Development\chromedriver.exe"
speedtest_url = "https://www.speedtest.net/"
twitter_login_url = "https://twitter.com/login"
twitter_user = "messierspheroid"
twitter_pass = "*Applepass91*"
account_dl = 600
account_ul = 15
class InternetSpeedTwitterBot():
def __init__(self, driver_path):
self.driver = webdriver.Chrome(executable_path=chrome_driver_path)
self.download_result_text = 0
self.upload_result_text = 0
def get_internet_speed(self):
self.driver.get(speedtest_url)
self.driver.set_window_size(700, 500)
time.sleep(3)
start_speedtest_button = self.driver.find_element_by_xpath(
"//*[@id='container']/div/div[3]/div/div/div/div[2]/div[3]/div[1]/a")
start_speedtest_button.click()
time.sleep(50)
self.download_result_text = self.driver.find_element_by_xpath(
"//*[@id='container']/div/div[3]/div/div/div/div[2]/div[3]/div[3]/div/div[3]/div/div/div[2]/div[1]/div[2]/div/div[2]/span").text
print(self.download_result_text)
self.upload_result_text = self.driver.find_element_by_xpath(
"//*[@id='container']/div/div[3]/div/div/div/div[2]/div[3]/div[3]/div/div[3]/div/div/div[2]/div[1]/div[3]/div/div[2]/span").text
print(self.upload_result_text)
def tweet_at_provider(self):
if float(self.download_result_text) < account_dl or float(self.upload_result_text) < account_ul:
with open("C:/Users/chade/Google Drive/xfinity_speeds.txt", mode="a") as data_file:
data_file.write(f"{self.download_result_text}, {self.upload_result_text}\n")
self.driver.get(twitter_login_url)
time.sleep(3)
t_user_input = self.driver.find_element_by_name("session[username_or_email]")
t_user_input.send_keys(twitter_user)
t_user_pass = self.driver.find_element_by_name("session[password]")
t_user_pass.send_keys(twitter_pass)
t_user_pass.send_keys(Keys.ENTER)
time.sleep(3)
t_text_field = self.driver.find_element_by_xpath(
"//*[@id='react-root']/div/div/div[2]/main/div/div/div/div[1]/div/div[2]/div/div[2]/div[1]/div/div/div/div[2]/div[1]/div/div/div/div/div/div/div/div/div/div[1]/div/div/div/div[2]/div/div/div/div")
t_text_field.send_keys(f"Hey @Xfinity, why is my internet speed "
f"{self.download_result_text}down/{self.upload_result_text}up when I pay for {account_dl}down/{account_ul}up?")
time.sleep(2)
t_button_post = self.driver.find_element_by_xpath(
"//*[@id='react-root']/div/div/div[2]/main/div/div/div/div[1]/div/div[2]/div/div[2]/div[1]/div/div/div/div[2]/div[4]/div/div/div[2]/div[3]").click()
bot = InternetSpeedTwitterBot(chrome_driver_path)
bot.get_internet_speed()
bot.tweet_at_provider()
| [
"chad.bjornberg@gmail.com"
] | chad.bjornberg@gmail.com |
789ef9d38702b2bb55490a4fcb0456870a1338aa | 7e019f3cb4e2b6aeb6d5b81d56a3f2fccda78702 | /rec/exceptions.py | 3a8f80a5efe86a0cc07cee4d12ca9dceb27bf4ce | [
"Apache-2.0"
] | permissive | 50mkw/mysite | 7d43d6ee73f275f656cea059a2e737a207be8fd8 | 9767208b81e029f3896467570516f19f26cb0f3f | refs/heads/master | 2022-12-09T21:23:46.953347 | 2019-06-26T02:49:08 | 2019-06-26T02:49:08 | 193,817,755 | 0 | 0 | NOASSERTION | 2022-12-08T05:17:15 | 2019-06-26T02:48:33 | Python | UTF-8 | Python | false | false | 323 | py | from django.core.exceptions import SuspiciousOperation
class DisallowedModelRecLookup(SuspiciousOperation):
"""Invalid filter was passed to rec view via URL querystring"""
pass
class DisallowedModelRecToField(SuspiciousOperation):
"""Invalid to_field was passed to rec view via URL query string"""
pass
| [
"patrick.yang@50mkw.com"
] | patrick.yang@50mkw.com |
aeacb4306279e72cab7c8fd20e25ca480c58c205 | 4d0266a4554a93bba316f70548c2f4916834b1a8 | /partitioned code/dhrec/autohistoryinfer.py | 8a9be06ba94e2dcf00b756ec20eed03174e536cf | [] | no_license | seferlab/DHREC | 864c2c2d4076f8de85697b097a90e2ce549db1cd | b8a9f5274321e14ecc486f4cad02417dbc5658ce | refs/heads/master | 2022-12-27T16:14:54.145795 | 2020-09-29T12:25:27 | 2020-09-29T12:25:27 | 299,609,232 | 0 | 0 | null | 2020-09-29T12:23:25 | 2020-09-29T12:23:24 | null | UTF-8 | Python | false | false | 21,522 | py | import networkx as nx
import sys
sys.path.append("./lib")
import os
import gzip
import cPickle
import random
import myutilities as myutil
import itertools
from InputOutput import InputOutput
from Trace import Trace
from Pbs import Pbs
import HistoryInferRunner
import HistOtherAlgos
def genOtherConfigFile(configfile,graphfile,algo,algoparams,snapshotfile,resultfile,smodel,trunfolder,inter):
"""generates config file for other algos
Args:
configfile:
config parameters:
"""
with gzip.open(configfile,"wb") as file:
cPickle.dump(graphfile,file)
cPickle.dump(algo,file)
cPickle.dump(algoparams,file)
cPickle.dump(snapshotfile,file)
cPickle.dump(resultfile,file)
cPickle.dump(smodel,file)
cPickle.dump(trunfolder,file)
cPickle.dump(inter,file)
def genHistoryConfigFile(configfile,vararr):
"""generates history config file
Args:
configfile:
vararr:
"""
with open(configfile,"w") as file:
for var in vararr.keys():
if var == "dists":
for key in vararr[var].keys():
if key in [Trace.S2I, Trace.I2R, Trace.E2I, Trace.I2S, Trace.S2E]:
paramstr = " ".join([str(item) for item in vararr[var][key][1]])
file.write("{0}: {1} {2}\n".format(key,vararr[var][key][0],paramstr))
elif key == Trace.SPROB:
file.write("{0}: {1}\n".format(key,vararr[var][key]))
else:
file.write("{0}: {1}\n".format(var,vararr[var]))
def getAlgoBlocks(prob,inter,infermode,smodel):
if prob == "dis" and inter == "bound":
if infermode == "Spreader":
algoblocks = [("MatroidSub",{"method":"search","ensemble":False}),("MatroidSub",{"method":"search","ensemble":True,"enscount":2}),("MatroidSub",{"method":"search","ensemble":True,"enscount":5})]
algoblocks = [("second-logconcave-internal",{"ensemble":False,"approx":"reliability"}),("second-arbitrary-internal",{"ensemble":False,"method":"greedy"})]
algoblocks = [("second-arbitrary-internal",{"ensemble":False,"method":"pipage"})]
algoblocks.extend(algoblocks2)
otheralgos = [("RumorCentrality",{})]
algoblocks.extend(otheralgos)
elif infermode == "History":
algoblocks = [("MatroidSub",{"method":"search","ensemble":False})]
#algoblocks = [("second-logconcave",{"ensemble":False,"approx":"reliability"}),("second-arbitrary",{"ensemble":False,"method":"greedy"})]
#algoblocks = [("second-arbitrary",{"ensemble":False,"method":"pipage"})]
#algoblocks = [("MatroidSub",{"method":"search","ensemble":False})]
#algoblocks = [("Qsapmin",{"ensemble":False,"approx":"reliability"})]
#algoblocks = [("Qsapmax",{"ensemble":False,"method":"pipage"})]
#algoblocks.extend(algoblocks2)
#indeblocks = [("Independent",{"ensemble":False})]
#algoblocks.extend(indeblocks)
elif prob == "dis" and inter == None:
if infermode == "Spreader":
algoblocks = [("GreedySubCoverSingle",{"iter":None,"objmethod":"log","ensemble":False}),("GreedySubCoverSingle",{"iter":None, "objmethod":"log","ensemble":True,"enscount":2})]
#,("GreedySubCoverSingle",{"iter":None, "objmethod":"log","ensemble":True,"enscount":3})
#algoblocks = [("FracCover",{"iter":None, "objmethod":"log","ensemble":False,"roundmethod":"random"})]
#algoblocks = []
if smodel in ["si","sir"]:
appralgos = [("Pcdsvc",{"ensemble":False}),("Pcdsvc",{"ensemble":True,"enscount":2})]
appralgos2 = [("Pcvc",{"ensemble":False}),("Pcvc",{"ensemble":True,"enscount":2})]
algoblocks.extend(appralgos)
algoblocks.extend(appralgos2)
elif smodel == "seir":
appralgos = [("MinCut",{"ensemble":False}),("MinCut",{"ensemble":True,"enscount":2})]
algoblocks.extend(appralgos)
#algoblocks = []
#otheralgos = [("NetSleuth",{}),("RumorCentrality",{}),("KEffectors",{})]
#otheralgos = [("NetSleuth",{})]
otheralgos = [("RumorCentrality",{})]
algoblocks.extend(otheralgos)
elif infermode == "History":
algoblocks = [("GreedySubCoverSingle",{"iter":None, "objmethod":"log","ensemble":False})]
if smodel in ["si","sir"]:
appralgos = [("Pcdsvc",{"ensemble":False}),("Pcvc",{"ensemble":False})]
elif smodel == "seir":
appralgos = [("MinCut",{"ensemble":False})]
algoblocks.extend(appralgos)
indeblocks = [("GreedyForward",{"ensemble":False})]
algoblocks.extend(indeblocks)
elif prob == "cont" and inter == "bound":
algoblocks = [("greedy",{})]
elif prob == "cont" and inter == None:
algoblocks = [("greedy",{})]
return algoblocks
def returnAlgoStr(algo,algoparams):
return "-".join([algo] + [str(item) for item in algoparams.values()])
def genMainFolders(graphtraceinput,runresultinput,configinput,sideinput):
(graphfolderpref,tracepref,tracefolderpref,newtracepref,newtracefolderpref) = graphtraceinput
(runpref,runfolderpref,resultpref,resultfolderpref) = runresultinput
(configpref,configfolderpref,pbsfolder) = configinput
(smodel,prob,evol,realdata,inter) = sideinput
graphfolder = "{0}/{1}_{2}_{3}".format(graphfolderpref,realdata,evol,"graphs")
tracefolder = "{0}/{1}_{2}_{3}_{4}_{5}_{6}".format(tracefolderpref,tracepref,realdata,evol,smodel,"edge",prob)
newtracefolder = "{0}/{1}_{2}_{3}_{4}_{5}_{6}".format(newtracefolderpref,newtracepref,realdata,evol,smodel,prob,inter)
configfolder = "{0}/{1}_{2}_{3}_{4}_{5}_{6}".format(configfolderpref,configpref,realdata,evol,smodel,prob,inter)
resultfolder = "{0}/{1}_{2}_{3}_{4}_{5}_{6}".format(resultfolderpref,resultpref,realdata,evol,smodel,prob,inter)
runfolder = "{0}/{1}_{2}_{3}_{4}_{5}_{6}".format(runfolderpref,runpref,realdata,evol,smodel,prob,inter)
[os.makedirs(folder) for folder in [pbsfolder,configfolder,resultfolder,runfolder] if not os.path.exists(folder)]
return graphfolder,tracefolder,newtracefolder,configfolder,resultfolder,runfolder
def returnPathInfo(graphinput,traceinput):
"""returns path info
Args:
graphinput:
traceinput:
Returns:
path2info:
"""
(graphfolder,realdata,evol,prob,smodel,filesamplecount,inter) = graphinput
(tracefolder,fraccons,samplenoisestr,startcount) = traceinput
path2info={}
if realdata == "real" and evol == "static":
for filename in myutil.listfiles(graphfolder):
if filename.split("-")[0:2] != [prob,smodel]:
continue
filepath ="{0}/{1}".format(graphfolder,filename)
if inter == None and (filename.find("weibull") != -1 or filename.find("rayleigh") != -1 or filename.find("powerlaw") != -1):
continue
#if filename.find("sn") != -1:
# if filename.find("sn1") == -1:
# continue
#if filename.find("grid") != -1:
# continue
filetracefolder = "{0}/{1}/{2}/{3}".format(tracefolder,filename,samplenoisestr,startcount)
assert os.path.exists(filetracefolder)
G = InputOutput.readGraphAndParams(filepath)
minicount = int(round(G.number_of_nodes() * fraccons[Trace.INFECTED]["min"]))
maxicount = int(round(G.number_of_nodes() * fraccons[Trace.INFECTED]["max"]))
sentfraccons = {Trace.INFECTED: {"min":minicount, "max":maxicount}}
while True:
filem = Trace.getRandomTraceFile(filetracefolder,sentfraccons)
if filem not in ["0.plain","1.plain"]:
continue
tracefile = "{0}/{1}".format(filetracefolder,filem)
break
#tracefile = "{0}/{1}".format(filetracefolder,Trace.getRandomTraceFile(filetracefolder,sentfraccons))
path2info[filepath] = (filename,tracefile)
elif realdata == "syn" and evol == "static":
for filename in myutil.listfiles(graphfolder):
filepath = "{0}/{1}".format(graphfolder,filename)
if filename.split("-")[0:2] != [prob,smodel]:
continue
if inter == None and (filename.find("weibull") != -1 or filename.find("rayleigh") != -1 or filename.find("powerlaw") != -1):
continue
innum = int(filename.split("_")[-1].replace(".edgelist",""))
if innum > 1:
continue
if smodel == "si":
if filename.find("sprob_1.0_1.0") != -1 and (filename.find("expo_0.2_1.0") != -1 or filename.find("expo_0.5_0.5") != -1 or filename.find("expo_0.1_0.5") != -1):
pass
else:
continue
elif smodel == "sir":
if filename.find("sprob_1.0_1.0") != -1 and filename.find("s2i_expo_0.2_1.0") != -1 and filename.find("i2r_expo_0.8_1.0") != -1:
pass
else:
continue
elif smodel == "seir":
if filename.find("sprob_1.0_1.0") != -1 and filename.find("s2e_expo_0.5_2.0") != -1 and filename.find("e2i_expo_0.5_2.0") != -1 and filename.find("i2r_expo_0.5_2.0") != -1:
pass
else:
continue
filetracefolder = "{0}/{1}/{2}/{3}".format(tracefolder,filename,samplenoisestr,startcount)
assert os.path.exists(filetracefolder)
G = InputOutput.readGraphAndParams(filepath)
minicount = int(round(G.number_of_nodes() * fraccons[Trace.INFECTED]["min"]))
maxicount = int(round(G.number_of_nodes() * fraccons[Trace.INFECTED]["max"]))
sentfraccons = {Trace.INFECTED: {"min":minicount, "max":maxicount}}
while True:
filem = Trace.getRandomTraceFile(filetracefolder,sentfraccons)
if filem not in ["0.plain","1.plain"]:
continue
tracefile = "{0}/{1}".format(filetracefolder,filem)
break
#tracefile = "{0}/{1}".format(filetracefolder,Trace.getRandomTraceFile(filetracefolder,sentfraccons))
path2info[filepath] = (filename,tracefile)
return path2info
def runOtherAlgos(algofields,tracefields,otherfields):
"""runs other algos
Args:
algofields:
tracefields:
otherfields:
Returns:
"""
return
algo,algoparams,runfolder = algofields
noise,noisetype,timefrac,timecount,realdata,tracefile,tracestr,newtracefolder,tracefolder = tracefields
complete,completeupto,path,graphfolder,configfolder,resultfolder = otherfields
algostr = returnAlgoStr(algo,algoparams)
parameterstr = "frac{0}-count{1}".format(timefrac,timecount)
extensionstr = "-".join(path.replace("./","").split("/")[1:])
tresultfolder = "{0}/{1}/{2}/{3}/{4}/{5}".format(resultfolder,extensionstr,tracestr,parameterstr,infermode,algostr)
if not os.path.exists(tresultfolder):
os.makedirs(tresultfolder)
indices = set([-1] + [int(myfile.replace(".hist","")) for myfile in myutil.listfiles(tresultfolder)])
if complete and len(indices) >= completeupto + 1:
return
resultfile = "{0}/{1}.hist".format(tresultfolder,max(indices)+1)
trunfolder = "{0}/{1}/{2}/{3}/{4}/{5}/{6}".format(runfolder,extensionstr,tracestr,parameterstr,infermode,algostr,max(indices)+1)
if not os.path.exists(trunfolder):
os.makedirs(trunfolder)
tnewtracefolder = "{0}/{1}/{2}/{3}/{4}/{5}/{6}".format(newtracefolder,extensionstr,tracestr,parameterstr,infermode,algostr,max(indices)+1)
if not os.path.exists(tnewtracefolder):
os.makedirs(tnewtracefolder)
snapshotfile = "{0}/infer.snapshot".format(tnewtracefolder)
G = InputOutput.readGraphAndParams(path)
assert timecount == 1
if infermode == "History":
fracpoints = [(timefrac*index)/timecount for index in xrange(1,timecount+1)]
elif infermode == "Spreader":
fracpoints = [timefrac]
for index in xrange(1,timecount):
interval = (1.0-timefrac)/(timecount-1)
fracpoints.append(timefrac+(index*interval))
assert max(fracpoints) <= 1.00000001
trace = InputOutput.readPlainTrace(tracefile,prob)
maxtime = Trace.getMaxTraceTime(trace)
obstimes = sorted(list(set([int(round(frac*maxtime)) for frac in fracpoints])))
if 0 in obstimes:
obstimes.remove(0)
if len(obstimes) == 0:
return
curstates = [Trace.trace2Snapshot(trace,obstime,smodel,G) for obstime in obstimes]
InputOutput.writeSnapshot(curstates,infermode,inter,smodel,obstimes,snapshotfile)
configpath = "{0}/config_{1}_{2}_{3}_{4}_{5}-{6}.config".format(configfolder,extensionstr,infermode,algostr,parameterstr,tracestr,max(indices)+1)
genOtherConfigFile(configpath,path,algo,algoparams,snapshotfile,resultfile,smodel,trunfolder,inter)
code = "python HistOtherAlgos.py {0}".format(configpath)
#os.system(code)
#return
if random.random() <= 0.5:
pool = "pool2"
else:
pool = "pool2"
pbsfilename = "{0}-{1}-{2}-{3}-{4}-{5}-{6}-{7}-{8}-{9}-{10}.pbs".format(realdata,evol,smodel,prob,inter,extensionstr,infermode,algostr,parameterstr,tracestr,max(indices)+1)
Pbs.submitPbs(code,pbsfolder,pbsfilename,pool)
def runMyAlgos(algofields,tracefields,otherfields):
"""runs my algos
Args:
algofields:
tracefields:
otherfields:
Returns:
"""
algo,algoparams,infermode,inter,runfolder = algofields
noise,noisetype,timefrac,timecount,prob,smodel,evol,realdata,tracefile,tracestr,newtracefolder,tracefolder = tracefields
complete,completeupto,path,printscore,graphfolder,configfolder,resultfolder = otherfields
algostr = returnAlgoStr(algo,algoparams)
parameterstr = "frac{0}-count{1}".format(timefrac,timecount)
extensionstr = "-".join(path.replace("./","").split("/")[1:])
tresultfolder = "{0}/{1}/{2}/{3}/{4}/{5}".format(resultfolder,extensionstr,tracestr,parameterstr,infermode,algostr)
if not os.path.exists(tresultfolder):
os.makedirs(tresultfolder)
indices = set([-1] + [int(myfile.replace(".hist","")) for myfile in myutil.listfiles(tresultfolder)])
if complete and len(indices) >= completeupto + 1:
return
resultfile = "{0}/{1}.hist".format(tresultfolder,max(indices)+1)
trunfolder = "{0}/{1}/{2}/{3}/{4}/{5}/{6}".format(runfolder,extensionstr,tracestr,parameterstr,infermode,algostr,max(indices)+1)
if not os.path.exists(trunfolder):
os.makedirs(trunfolder)
tnewtracefolder = "{0}/{1}/{2}/{3}/{4}/{5}/{6}".format(newtracefolder,extensionstr,tracestr,parameterstr,infermode,algostr,max(indices)+1)
if not os.path.exists(tnewtracefolder):
os.makedirs(tnewtracefolder)
snapshotfile = "{0}/infer.snapshot".format(tnewtracefolder)
G = InputOutput.readGraphAndParams(path)
if infermode == "History":
fracpoints = [(timefrac*index)/timecount for index in xrange(1,timecount+1)]
elif infermode == "Spreader":
fracpoints = [timefrac]
for index in xrange(1,timecount):
interval = (1.0-timefrac)/(timecount-1)
fracpoints.append(timefrac+(index*interval))
assert max(fracpoints) <= 1.00000001
trace = InputOutput.readPlainTrace(tracefile,prob)
maxtime = Trace.getMaxTraceTime(trace)
obstimes = sorted(list(set([int(round(frac*maxtime)) for frac in fracpoints])))
if 0 in obstimes:
obstimes.remove(0)
if len(obstimes) == 0:
return
print "observed times"
print obstimes
#if max(obstimes) < 3:
# return
curstates = [Trace.trace2Snapshot(trace,obstime,smodel,G) for obstime in obstimes]
InputOutput.writeSnapshot(curstates,infermode,inter,smodel,obstimes,snapshotfile)
vararr = {"graphfile": path, "dist": prob, "runfolder":trunfolder}
if inter == None:
vararr["scoretimes"] = " ".join([str(time) for time in obstimes])
for item in ["snapshotfile","noise","noisetype","smodel","algo","inter","infermode","evol","printscore","resultfile","tracefile"]:
exec('vararr["{0}"] = {0}'.format(item)) in locals(),globals()
for param in algoparams.keys():
vararr[param] = algoparams[param]
configpath = "{0}/config_{1}_{2}_{3}_{4}_{5}-{6}.config".format(configfolder,extensionstr,infermode,algostr,parameterstr,tracestr,max(indices)+1)
genHistoryConfigFile(configpath,vararr)
code = "python HistoryInfer.py {0}".format(configpath)
os.system(code)
return
if random.random() <= 0.5:
pool = "pool2"
else:
pool = "pool2"
pbsfilename = "{0}-{1}-{2}-{3}-{4}-{5}-{6}-{7}-{8}-{9}-{10}.pbs".format(realdata,evol,smodel,prob,inter,extensionstr,infermode,algostr,parameterstr,tracestr,max(indices)+1)
Pbs.submitPbs(code,pbsfolder,pbsfilename,pool)
def assignParams():
"""assign params
Args:
Returns:
paramdict:
"""
paramdict = {}
paramdict["newtracepref"] = "tracesnapshots"
paramdict["tracepref"] = "traces"
paramdict["configpref"] = "histconfig"
paramdict["runpref"] = "run"
paramdict["resultpref"] = "result"
paramdict["resultfolderpref"] = "."
paramdict["runfolderpref"] = "."
paramdict["tracefolderpref"] = "."
paramdict["newtracefolderpref"] = "."
paramdict["graphfolderpref"] = "."
paramdict["configfolderpref"] = "."
paramdict["pbsfolder"] = "pbsfolder"
paramdict["realdata"] = "syn"
paramdict["evol"] = "static"
paramdict["infermode"] = "History" #"History" #"Spreader"
paramdict["smodel"] = "si" #"seir","sir","sis", "samd", "mamd"
paramdict["prob"] = "dis" #"dis"
paramdict["inter"] = None #"bound"
paramdict["startcounts"] = [1,2,3,5,7,10] #[1,2,3,4,5,6,7,8,9,10] #[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
paramdict["maxtimefracs"] = [0.15,0.26,0.34,0.4,0.51,0.6,0.76,0.9]
paramdict["mintimefracs"] = [0.1,0.15,0.2,0.26,0.34,0.4,0.45,0.51,0.55,0.6,0.65,0.7,0.76,0.8,0.85,0.9,0.95]
paramdict["timecounts"] = [3,5] #[1,2,3,4,5,6,7] #[1,2,3,4,5,10] #[1,2,3,4,5] #[1,2,3,4,5,10] #[1,2,3,4,5] #[1,2,3,4,5,6,7,10]
paramdict["noises"] = [0.0] #[0.0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9]
paramdict["noisetype"] = "StateChange" #Normal noise
paramdict["filesamplecount"] = 10 #for syndPlainTrace(tracefile,prob)
paramdict["complete"] = True
paramdict["completeupto"] = 2
paramdict["fraccons"] = {Trace.INFECTED: {"min":0.00001, "max":1.01}}
paramdict["printscore"] = None #"KenTauCorrelation" #"Graph,rand" #"KenTauCorrelation" #"KenTauCorrelation" #"Hausdorff"
return paramdict
if __name__ == "__main__":
paramdict = assignParams()
for var in paramdict.keys():
if type(paramdict[var]) == type(""):
exec('{0}="{1}"'.format(var,paramdict[var]))
else:
exec('{0}={1}'.format(var,paramdict[var]))
graphtraceinput = [graphfolderpref,tracepref,tracefolderpref,newtracepref,newtracefolderpref]
runresultinput = [runpref,runfolderpref,resultpref,resultfolderpref]
configinput = [configpref,configfolderpref,pbsfolder]
sideinput = [smodel,prob,evol,realdata,inter]
(graphfolder,tracefolder,newtracefolder,configfolder,resultfolder,runfolder) = genMainFolders(graphtraceinput,runresultinput,configinput,sideinput)
mainparamlist = list(itertools.product(startcounts,noises))
random.shuffle(mainparamlist)
for startcount,noise in mainparamlist:
samplenoisestr = "{0}-{1}-{2}".format(noisetype,noise,0)
graphinput = [graphfolder,realdata,evol,prob,smodel,filesamplecount,inter]
traceinput = [tracefolder,fraccons,samplenoisestr,startcount]
path2info = returnPathInfo(graphinput,traceinput)
algoblocks = getAlgoBlocks(prob,inter,infermode,smodel)
if infermode == "Spreader":
paramlist = list(itertools.product(algoblocks,mintimefracs,timecounts))
elif infermode == "History":
paramlist = list(itertools.product(algoblocks,maxtimefracs,timecounts))
for path in path2info.keys():
Gname,tracefile = path2info[path]
tracestr = "-".join(tracefile.split("/")[-3:])
for algoblock,timefrac,timecount in paramlist:
algo,algoparams = algoblock
if algo in ["NetSleuth","RumorCentrality","KEffectors"]:
assert infermode == "Spreader" and prob == "dis"
if timecount > 1:
continue
algofields = [algo,algoparams,runfolder]
tracefields = [noise,noisetype,timefrac,timecount,realdata,tracefile,tracestr,newtracefolder,tracefolder]
otherfields = [complete,completeupto,path,graphfolder,configfolder,resultfolder]
runOtherAlgos(algofields,tracefields,otherfields)
else:
algofields = [algo,algoparams,infermode,inter,runfolder]
tracefields = [noise,noisetype,timefrac,timecount,prob,smodel,evol,realdata,tracefile,tracestr,newtracefolder,tracefolder]
otherfields = [complete,completeupto,path,printscore,graphfolder,configfolder,resultfolder]
runMyAlgos(algofields,tracefields,otherfields)
| [
"70752445+seferlab@users.noreply.github.com"
] | 70752445+seferlab@users.noreply.github.com |
c09a2bf2f183c1f2bd2e9836e0243db303ffa8f5 | a1b44fe98a41e945d3d4c69b84236c63e0b1d0b2 | /scenes/InstructionScene.py | c0d5d086cce1066b0ba2442cbf8060b9a6765d4e | [] | no_license | KornSiwat/RocketTyper | 61f7f345ce8688ec7e86f8fa90cff3672d84980f | f6a86f739be8093de9b0bdfedd1d2a49d66de06d | refs/heads/master | 2023-08-02T05:37:30.541654 | 2019-06-08T12:06:28 | 2019-06-08T12:06:28 | 178,116,841 | 0 | 0 | null | 2023-07-21T23:11:41 | 2019-03-28T03:10:57 | Python | UTF-8 | Python | false | false | 1,745 | py | import arcade
import sys
sys.path.append('..')
from models.Route import Route
from Config import Config
class InstructionScene():
def __init__(self, router):
self.width = Config.SCREEN_WIDTH
self.height = Config.SCREEN_HEIGHT
self.setup_assets()
self.setup_header()
self.setup_background()
self.router = router
def setup_assets(self):
self.background = arcade.load_texture("images/background.png")
self.gray_background = arcade.load_texture("images/result.png")
self.header = arcade.Sprite()
self.header.append_texture(arcade.load_texture('images/howtoplay.png'))
self.instruction_pic = arcade.load_texture('images/instruction.png')
def setup_header(self):
self.header.center_x = self.width//2
self.header.center_y = self.height//2 + 205
self.header.set_texture(0)
def setup_background(self):
self.background_width = self.width-250
self.background_height = self.height-150
def draw(self):
self.draw_background()
self.draw_header()
def draw_texture(self, width, height, texture):
arcade.draw_texture_rectangle(self.width//2 , self.height//2, width=width, height=height,texture=texture)
def draw_background(self):
self.draw_texture(width=self.background_width, height=self.background_height, texture=self.gray_background)
self.draw_texture(width=self.background_width, height=self.background_height, texture=self.instruction_pic)
def draw_header(self):
self.header.draw()
def update(self):
pass
def on_key_press(self, key):
if key == arcade.key.ESCAPE:
self.router.change_route(Route.menu)
| [
"ponpued@gmail.com"
] | ponpued@gmail.com |
67098321aad7457be0c6a6e2242e8815aa9205e0 | 9ac7fdc0512a3c971ef06e21cd5d359fcddadce4 | /gymkit/agent/gym_agent.py | af7324a3aabf10f67297926eba46291a9d9f7fe5 | [
"Apache-2.0"
] | permissive | marevol/gym-starter-kit | e6cdad6a7aedd2723d7e7ed65e31fbcb7ac3586b | 9c8298a035fd5cfe86fe5d62ebe0b82d58c2d768 | refs/heads/master | 2021-01-12T06:10:09.435163 | 2016-12-29T12:10:09 | 2016-12-29T12:10:09 | 77,323,047 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 461 | py | # -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import, unicode_literals
class GymKitAgent(object):
def __init__(self, env):
self.env = env
def act(self, observation):
return self.env.action_space.sample()
def fit(self, observation, reward, done, info):
pass
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, traceback):
pass
| [
"shinsuke@yahoo.co.jp"
] | shinsuke@yahoo.co.jp |
7320cdd4067aed5f5646095d06c93caac61698dc | 0b373e56daadc88ce20007fecd204f2f7ffaa736 | /chalicelib/web_scraper/catbirdnyc.py | 5c54b459ffe4b2c7adebd958024fc689333e1311 | [] | no_license | Auratenewyork/shipments | 3a9ced761b5068916bc9eb0497122a49966ff094 | 087ca5f425611366c58433010dc5a17048839caf | refs/heads/master | 2023-02-21T08:11:30.084649 | 2022-04-13T12:22:56 | 2022-04-13T12:22:56 | 242,126,702 | 0 | 1 | null | 2023-02-16T00:40:18 | 2020-02-21T11:46:27 | Python | UTF-8 | Python | false | false | 1,493 | py | import pickle
import requests
import boto3
from . import BUCKET
from lxml import html
s3 = boto3.client('s3', region_name='us-east-2')
def run():
page = 1
result = []
while True:
url = f'https://www.catbirdnyc.com/jewelry-catbird.html?p={page}'
response = requests.get(url)
tree = html.fromstring(response.content)
products_grid = tree.xpath('//div[@class="products wrapper grid products-grid"]')
if products_grid:
products_grid = products_grid[0]
else:
break
products = products_grid.xpath('.//div[@class="product-item-info"]')
def get_value(element, path):
val = item.xpath(path)
if val:
return val[0].strip()
for item in products:
p = get_value(item, './/span[@class="price"]/text()')
res = dict(
price=p[1:],
designer=get_value(item, './/div[@class="product-designer"]/text()'),
image=get_value(item, './/img[@class="product-image-photo"]/@src'),
link=get_value(item, './/a[@class="product-item-link"]/@href'),
name=get_value(item, './/a[@class="product-item-link"]/text()'),
)
result.append(res)
page += 1
if page > 50:
raise Exception("Something went wrong!")
s3.put_object(Body=pickle.dumps(result), Bucket=BUCKET, Key='catbirdnyc')
if __name__ == "__main__":
run() | [
"roman.borodinov@uadevelopers.com"
] | roman.borodinov@uadevelopers.com |
a5ed20c3d34592d74fe5054fcf4d8d969d95d783 | bce91ed5baba4b27e14bf1e90146febffbb5d590 | /Experimental/yodel-master/demo/biquad_filter_design.py | 46f9efc622e7b22ad0af0efaa070037aba3d4f39 | [
"MIT"
] | permissive | jamin-hu/SoundsGood | c5b8848edf5d7bdaf718d94071b587db9a2b37fc | 399446597b8005e8d2e8f11bd43f579b84d8d123 | refs/heads/master | 2021-01-01T12:35:31.133133 | 2020-04-30T20:14:32 | 2020-04-30T20:14:32 | 239,282,794 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,631 | py | import yodel.filter
import yodel.analysis
import yodel.complex as dcx
import yodel.conversion as dcv
import math
import matplotlib.pyplot as plt
from matplotlib.widgets import RadioButtons, Slider
def impulse_response(bq, size):
impulse = [0] * size
impulse[0] = 1
response = [0] * size
bq.process(impulse, response)
return response
def frequency_response(response):
size = len(response)
freq_response_real = [0] * size
freq_response_imag = [0] * size
fft = yodel.analysis.FFT(size)
fft.forward(response, freq_response_real, freq_response_imag)
return freq_response_real, freq_response_imag
def amplitude_response(spec_real, spec_imag, db=True):
size = len(spec_real)
amp = [0] * size
for i in range(0, size):
amp[i] = yodel.complex.modulus(spec_real[i], spec_imag[i])
if db:
amp[i] = yodel.conversion.lin2db(amp[i])
return amp
def phase_response(spec_real, spec_imag, degrees=True):
size = len(spec_real)
pha = [0] * size
for i in range(0, size):
pha[i] = yodel.complex.phase(spec_real[i], spec_imag[i])
if degrees:
pha[i] = (pha[i] * 180.0 / math.pi)
return pha
class BiquadSelector:
def __init__(self):
self._bq_fs = 48000
self._bq_fc = self._bq_fs/4
self._bq_q = 1.0 / math.sqrt(2.0)
self._bq_dbgain = 0
self._bq_size = 512
self._bq_plot_db = True
self._nfft = int(self._bq_size / 2)
self._impulse_response = [0] * self._bq_size
self._freq_response_real = [0] * self._bq_size
self._freq_response_imag = [0] * self._bq_size
self._response = [0] * self._bq_size
self._bq_filter = yodel.filter.Biquad()
self._biquad_type = 'Low Pass'
self.update_biquad()
self._create_plot()
self._create_plot_controls()
def _create_plot(self):
self._fig, self._ax = plt.subplots()
self._ax.set_title('Biquad Filter Design')
self._ax.grid()
plt.subplots_adjust(bottom=0.3)
self.update_biquad_response()
self._x_axis = [i*(self._bq_fs/2/self._nfft) for i in range(0, self._nfft)]
self._y_axis = self._response[0:self._nfft]
if self._bq_plot_db:
self._l_bot, = self._ax.plot(self._x_axis, [-100] * self._nfft, 'k')
self._l_top, = self._ax.plot(self._x_axis, [0] * self._nfft, 'k')
else:
self._l_bot, = self._ax.plot(self._x_axis, [- 180] * self._nfft, 'k')
self._l_top, = self._ax.plot(self._x_axis, [180] * self._nfft, 'k')
if self._bq_plot_db:
self._l_fc, = self._ax.plot([self._bq_fc] * 100, [i for i in range(-100, 0)], 'k')
else:
self._l_fc, = self._ax.plot([self._bq_fc] * int(2.0 * self._nfft), [(180*i/self._nfft) for i in range(-self._nfft, self._nfft)], 'k')
self._l_fr, = self._ax.plot(self._x_axis, self._y_axis, 'b')
self._rescale_plot()
def _create_plot_controls(self):
self._dbrax = plt.axes([0.12, 0.05, 0.13, 0.10])
self._dbradio = RadioButtons(self._dbrax, ('Amplitude', 'Phase'))
self._dbradio.on_clicked(self.set_plot_style)
self._rax = plt.axes([0.27, 0.03, 0.15, 0.20])
self._radio = RadioButtons(self._rax, ('Low Pass', 'High Pass', 'Band Pass', 'All Pass', 'Notch', 'Peak', 'Low Shelf', 'High Shelf'))
self._radio.on_clicked(self.set_biquad_type)
self._sfax = plt.axes([0.6, 0.19, 0.2, 0.03])
self._sqax = plt.axes([0.6, 0.12, 0.2, 0.03])
self._sdbax = plt.axes([0.6, 0.05, 0.2, 0.03])
self._fcslider = Slider(self._sfax, 'Cut-off frequency', 0, self._bq_fs/2, valinit = self._bq_fc)
self._qslider = Slider(self._sqax, 'Q factor', 0.01, 10.0, valinit = self._bq_q)
self._dbslider = Slider(self._sdbax, 'dB gain', -20.0, 20.0, valinit = self._bq_dbgain)
self._fcslider.on_changed(self.set_biquad_frequency_cutoff)
self._qslider.on_changed(self.set_biquad_q_factor)
self._dbslider.on_changed(self.set_biquad_dbgain)
def update_biquad(self):
if self._biquad_type == 'Low Pass':
self._bq_filter.low_pass(self._bq_fs, self._bq_fc, self._bq_q)
elif self._biquad_type == 'High Pass':
self._bq_filter.high_pass(self._bq_fs, self._bq_fc, self._bq_q)
elif self._biquad_type == 'Band Pass':
self._bq_filter.band_pass(self._bq_fs, self._bq_fc, self._bq_q)
elif self._biquad_type == 'All Pass':
self._bq_filter.all_pass(self._bq_fs, self._bq_fc, self._bq_q)
elif self._biquad_type == 'Notch':
self._bq_filter.notch(self._bq_fs, self._bq_fc, self._bq_q)
elif self._biquad_type == 'Peak':
self._bq_filter.peak(self._bq_fs, self._bq_fc, self._bq_q, self._bq_dbgain)
elif self._biquad_type == 'Low Shelf':
self._bq_filter.low_shelf(self._bq_fs, self._bq_fc, self._bq_q, self._bq_dbgain)
elif self._biquad_type == 'High Shelf':
self._bq_filter.high_shelf(self._bq_fs, self._bq_fc, self._bq_q, self._bq_dbgain)
def set_biquad_type(self, biquad_type):
self._biquad_type = biquad_type
self.update_biquad()
self._plot_frequency_response(False)
self._rescale_plot()
def set_biquad_frequency_cutoff(self, fc):
self._bq_fc = fc
self.update_biquad()
self._plot_frequency_response()
def set_biquad_q_factor(self, q):
self._bq_q = q
self.update_biquad()
self._plot_frequency_response()
def set_biquad_dbgain(self, dbgain):
self._bq_dbgain = dbgain
self.update_biquad()
self._plot_frequency_response()
def update_biquad_response(self):
self._impulse_response = impulse_response(self._bq_filter, self._bq_size)
self._freq_response_real, self._freq_response_imag = frequency_response(self._impulse_response)
if self._bq_plot_db:
self._response = amplitude_response(self._freq_response_real, self._freq_response_imag)
else:
self._response = phase_response(self._freq_response_real, self._freq_response_imag)
def set_plot_style(self, style):
if style == 'Phase':
self._bq_plot_db = False
elif style == 'Amplitude':
self._bq_plot_db = True
self._plot_range_limits(False)
self._plot_frequency_response(False)
self._rescale_plot()
def _plot_frequency_response(self, redraw = True):
self.update_biquad_response()
self._y_axis = self._response[0:self._nfft]
if self._bq_plot_db:
self._l_fc.set_xdata([self._bq_fc] * 100)
self._l_fc.set_ydata([i for i in range(-100, 0)])
else:
self._l_fc.set_xdata([self._bq_fc] * int(2.0 * self._nfft))
self._l_fc.set_ydata([(180*i/self._nfft) for i in range(-self._nfft, self._nfft)])
self._l_fr.set_ydata(self._y_axis)
if redraw:
plt.draw()
def _plot_range_limits(self, redraw = True):
if self._bq_plot_db:
self._l_bot.set_ydata([-100] * self._nfft)
self._l_top.set_ydata([0] * self._nfft)
else:
self._l_bot.set_ydata([- 180] * self._nfft)
self._l_top.set_ydata([180] * self._nfft)
if redraw:
plt.draw()
def _rescale_plot(self):
if self._bq_plot_db:
self._ax.set_ylim(-110, 20)
else:
self._ax.set_ylim(- 200, 200)
plt.draw()
bqs = BiquadSelector()
plt.show()
| [
"38098193+jamin-hu@users.noreply.github.com"
] | 38098193+jamin-hu@users.noreply.github.com |
c10bc0332037dd5d2303554b6314fb29c8dac6ab | 0a659109f93ac79f192c910ac6d8815d7c853ddb | /geoliberty/models.py | 21fd631500724036ea4016ebe0d6a0d5d9bbcae4 | [] | no_license | paulovianna/cadunico-pbf | dacf48f15e0e1faff052894b5b8810a06fdeafe7 | a1465dd4f550000c6c4dd86b59677c1fb7627add | refs/heads/master | 2021-03-22T04:44:31.208710 | 2017-06-02T17:09:57 | 2017-06-02T17:09:57 | 42,933,542 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,760 | py | # -*- coding: utf-8 -*-
from django.contrib.gis.db import models
# Classes Objetos Geograficos
class Mpoly(models.Model):
mpoly = models.MultiPolygonField('Multi Poligono')
class Meta:
abstract = True
class Poly(models.Model):
poly = models.PolygonField('Poligono')
class Meta:
abstract = True
class Linha(models.Model):
linha = models.LineStringField('Linha')
class Meta:
abstract = True
class Ponto(models.Model):
ponto = models.PointField('Ponto')
class Meta:
abstract = True
# Classes Banco de Dados Base
class Pais(Mpoly):
pais = models.CharField('País',max_length=64)
sigla = models.CharField(max_length=4)
class Meta:
verbose_name = 'País'
verbose_name_plural = 'Países'
def __unicode__(self):
return self.pais
def get_absolute_url(self):
return 'pais/%i/' % self.id
def get_name(self):
return self.pais
class Regiao(Mpoly):
pais = models.ForeignKey(Pais,verbose_name='País')
regiao = models.CharField('Região',max_length=64)
class Meta:
verbose_name = 'Região'
verbose_name_plural = 'Regiões'
def __unicode__(self):
return self.regiao
def get_absolute_url(self):
return 'regiao/%i/' % self.id
def get_name(self):
return self.regiao
class Uf(Mpoly):
regiao = models.ForeignKey(Regiao,verbose_name='Região')
uf = models.CharField('Unidade Federativa',max_length=64)
class Meta:
verbose_name = 'Unidades Federativa'
verbose_name_plural = 'Unidades Federativas'
def __unicode__(self):
return self.uf
def get_absolute_url(self):
return 'estado/%i/' % self.id
def get_name(self):
return self.uf
class MesoRegiao(Mpoly):
uf = models.ForeignKey(Uf,verbose_name='Unidade Federativa')
mesoRegiao = models.CharField('Mesorregião',max_length=64)
class Meta:
verbose_name = 'Mesorregião'
verbose_name_plural = 'Mesorregiões'
def __unicode__(self):
return self.mesoRegiao
def get_absolute_url(self):
return 'mesorregiao/%i/' % self.id
def get_name(self):
return self.mesoRegiao
class MicroRegiao(Mpoly):
mesoRegiao = models.ForeignKey(MesoRegiao,verbose_name='Mesorregião')
microRegiao = models.CharField('Microrregião',max_length=64)
class Meta:
verbose_name = 'Microrregião'
verbose_name_plural = 'Microrregiões'
def __unicode__(self):
return self.microRegiao
def get_absolute_url(self):
return 'microrregiao/%i/' % self.id
def get_name(self):
return self.microRegiao
class Municipio(Mpoly):
microRegiao = models.ForeignKey(MicroRegiao,verbose_name='Microrregião')
municipio = models.CharField('Município',max_length=64)
class Meta:
verbose_name = 'Município'
verbose_name_plural = 'Municípios'
ordering = ['municipio']
def __unicode__(self):
return self.municipio
def get_absolute_url(self):
return 'municipio/%i/' % self.id
def get_name(self):
return self.municipio
class RegiaoGeoPolitica(models.Model):
municipios = models.ManyToManyField(Municipio,verbose_name='Município')
regiaoGeoPolitica = models.CharField('Região Geopolítica',max_length=64)
class Meta:
verbose_name = 'Região Geopolítica'
verbose_name_plural = 'Regiões GeoPolíticas'
def __unicode__(self):
return self.regiaoGeoPolitica
def get_absolute_url(self):
return 'geopolitica/%i/' % self.id
# Classes Base para Pessoas
class Pessoa(models.Model):
denominacao = models.CharField('Denominação',max_length=64)
class Meta:
abstract = True
class PessoaFisica(Pessoa):
OPCOES_SEXO = (
('Masculino', 'Masculino'),
('Feminino', 'Feminino'),
)
cpf = models.CharField('CPF',max_length=32)
dataNascimento = models.DateField('Data de Nascimento')
sexo = models.CharField('Sexo',max_length=9,choices=OPCOES_SEXO)
class Meta:
abstract = True
def __unicode__(self):
return self.denominacao
def idade(self):
pass
class PessoaJuridica(Pessoa):
cnpj = models.CharField('CNPJ',max_length=32)
class Meta:
abstract = True | [
"paulo@innovareweb.com.br"
] | paulo@innovareweb.com.br |
940706ba72cd5a665d935be61a2efa9dee5e3d86 | 39544f56b5bd5a0400436f34808acb8d14f65b91 | /Reports/PS_Survey_Report.py | 60c65722b1428a2845f977e5a3ce13abe5ad6ef0 | [] | no_license | pbaranyai/GIS_python | c841b40ea7b4d7d32edf1a9ecd8ea67a88899441 | c72fe6c8d0b37910898eda162a8a3def7bff5dff | refs/heads/master | 2023-08-31T15:02:49.789076 | 2023-08-30T17:08:46 | 2023-08-30T17:08:46 | 217,299,224 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,581 | py | # -*- coding: utf-8 -*-
# ---------------------------------------------------------------------------
# PS_Survey_Report.py
# Created on: 2019-07-01
# Updated on 2021-09-21
# Works in ArcGIS Pro
#
# Author: Phil Baranyai/GIS Manager
#
# Description:
# Report any new surveys that haven't been acknoledged via the data.
#
# ---------------------------------------------------------------------------
# Import modules
import sys
import arcpy
import datetime
import os
import traceback
import logging
# Stop geoprocessing log history in metadata (stops program from filling up geoprocessing history in metadata with every run)
arcpy.SetLogHistory(False)
# Setup error logging (configure logging location, type, and filemode -- overwrite every run)
logfile = r"R:\\GIS\\GIS_LOGS\\911\\PS_SurveyReport.log"
logging.basicConfig(filename= logfile, filemode='w', level=logging.DEBUG)
# Setup Date (and day/time)
date = datetime.date.today().strftime("%Y%m%d")
Day = time.strftime("%m-%d-%Y", time.localtime())
Time = time.strftime("%I:%M:%S %p", time.localtime())
try:
# Write Logfile (define logfile write process, each step will append to the log, if program is started over, it will wipe the log and re-start fresh)
def write_log(text, file):
f = open(file, 'a') # 'a' will append to an existing file if it exists
f.write("{}\n".format(text)) # write the text to the logfile and move to next line
return
except:
print ("\n Unable to write log file")
write_log("Unable to write log file", logfile)
sys.exit ()
#Database Connection Folder
Database_Connections = r"\\CCFILE\\anybody\\GIS\\ArcAutomations\\Database_Connections"
#Database variables:
AGOL_EDIT_PUB_PS = Database_Connections + "\\agol_edit_pub@ccsde.sde"
PS_Report_Fldr = r"R:\\GIS\\Public Safety\\Reports"
# Local variables:
NEW_ADDR_REQUESTS = AGOL_EDIT_PUB_PS + "\\CCSDE.AGOL_EDIT_PUB.ADDRESS_NEW_REQUESTS_AGOL_EDIT_PUB"
BUSINESS_DIRECTORY = AGOL_EDIT_PUB_PS + "\\CCSDE.AGOL_EDIT_PUB.BUSINESS_DIRECTORY_AGOL_EDIT_PUB"
BusinessDirectory_Excel = PS_Report_Fldr + "\\Business_Directory.xls"
start_time = time.time()
print ("============================================================================")
print ("Begining Public Safety Survey reports run: "+ str(Day) + " " + str(Time))
print ("Works in ArcGIS Pro")
print ("============================================================================")
write_log("============================================================================", logfile)
write_log("Begining Public Safety Survey reports run: "+ str(Day) + " " + str(Time), logfile)
write_log("Works in ArcGIS Pro", logfile)
write_log("============================================================================", logfile)
try:
# Clean up BusinessDirectory_Excel by deleting it (to stop excel sheets from filling up the folder, it will delete the old report before running a new one)
if arcpy.Exists(BusinessDirectory_Excel):
os.remove(BusinessDirectory_Excel)
print (BusinessDirectory_Excel + " found - table deleted")
write_log(BusinessDirectory_Excel + " found - table deleted", logfile)
except:
print ("\n Unable to delete Excel_reports, need to delete existing excel file manually and/or close program locking the file")
write_log("\n Unable to delete Excel_reports, need to delete existing excel file manually and/or close program locking the file", logfile)
logging.exception('Got exception on delete Excel_reports logged at:' + str(Day) + " " + str(Time))
raise
sys.exit ()
try:
# Make temp table from Business Directory FC in PublicSafety_TempFGDB (make a temporary table in memory to save space on the drive and the trouble of deleting it later, selection only the records that have the field CAD_UPDATED set to N)
BusinessDirectory_TBL = arcpy.MakeTableView_management(BUSINESS_DIRECTORY, "BusinessDirectory_TBL", "CAD_UPDATED = 'N'", "", "CREATED_DATE CREATED_DATE VISIBLE NONE;EDIT_DATE EDIT_DATE VISIBLE NONE;BUSINESS_NAME BUSINESS_NAME VISIBLE NONE;STREET_ADDRESS STREET_ADDRESS VISIBLE NONE;POST_OFFICE POST_OFFICE VISIBLE NONE;STATE STATE VISIBLE NONE;ZIPCODE ZIPCODE VISIBLE NONE;MUNICIPALITY MUNICIPALITY VISIBLE NONE;PHONE_NUMBER PHONE_NUMBER VISIBLE NONE;WEBSITE WEBSITE VISIBLE NONE;HOURS_OF_OPERATION HOURS_OF_OPERATION VISIBLE NONE;CONTACT_NAME CONTACT_NAME VISIBLE NONE;CONTACT_PHONE_NUMBER CONTACT_PHONE_NUMBER VISIBLE NONE;CAD_UPDATED CAD_UPDATED VISIBLE NONE;GIS_UPDATED GIS_UPDATED VISIBLE NONE;SHAPE SHAPE VISIBLE NONE;GlobalID GlobalID VISIBLE NONE;OBJECTID OBJECTID VISIBLE NONE;NEW_UPDATE NEW_UPDATE VISIBLE NONE;COMMENTS COMMENTS VISIBLE NONE;CONTACT_NAME_2 CONTACT_NAME_2 VISIBLE NONE;CONTACT_PHONE_2 CONTACT_PHONE_2 VISIBLE NONE;CONTACT_NAME_3 CONTACT_NAME_3 VISIBLE NONE;CONTACT_PHONE_3 CONTACT_PHONE_3 VISIBLE NONE;FAX_NUMBER FAX_NUMBER VISIBLE NONE")
except:
print ("\n Unable to make table view from Business Directory FC")
write_log("\n Unable to make table view from Business Directory FC", logfile)
logging.exception('Got exception on make table view from Business Directory FC logged at:' + str(Day) + " " + str(Time))
raise
sys.exit ()
try:
# Export table to excel (export temporary table out as excel workbook)
BusinessDirectory_Excel = arcpy.TableToExcel_conversion(BusinessDirectory_TBL, "R:/GIS/Public Safety/Reports/Business_Directory.xls", "ALIAS", "DESCRIPTION")
print ("\n Table exported out as R:/GIS/Public Safety/Reports/Business_Directory.xls")
write_log("\n Table exported out as R:/GIS/Public Safety/Reports/Business_Directory.xls",logfile)
except:
print ("\n Unable to export Business Directory table to excel in R:/GIS/Public Safety/Reports folder")
write_log("\n Unable to export Business Directory table to excel in R:/GIS/Public Safety/Reports folder", logfile)
logging.exception('Got exception on export Business Directory table to excel in R:/GIS/Public Safety/Reports folder logged at:' + str(Day) + " " + str(Time))
raise
sys.exit ()
Table_Views = [BusinessDirectory_TBL]
try:
# Delete views and temp files used in process (deletes temporary files from workspace)
for Views in Table_Views:
delete_input = Views
arcpy.Delete_management(delete_input, "")
print ("\n Table_Views deleted...")
write_log("\n Table_Views deleted...",logfile)
except:
print ("\n Unable to delete Table_Views")
write_log("\n Unable to delete Table_Views", logfile)
logging.exception('Got exception on delete Table_Views logged at:' + str(Day) + " " + str(Time))
raise
sys.exit ()
end_time = time.strftime("%I:%M:%S %p", time.localtime())
elapsed_time = time.time() - start_time
print ("==============================================================")
print ("\n Public Safety Survey reports COMPLETED: " + str(Day) + " " + str(end_time))
write_log("\n Public Safety Survey reports COMPLETED: " + str(Day) + " " + str(end_time), logfile)
print ("Elapsed time: " + time.strftime(" %H:%M:%S", time.gmtime(elapsed_time))+" // Program completed: " + str(Day) + " " + str(end_time))
write_log("Elapsed time: " + str (time.strftime(" %H:%M:%S", time.gmtime(elapsed_time))+" // Program completed: " + str(Day) + " " + str(end_time)), logfile)
print ("==============================================================")
write_log("\n +#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#", logfile)
del arcpy
sys.exit()
| [
"noreply@github.com"
] | pbaranyai.noreply@github.com |
c43f3fc077de3a17d699c5cb4c8416a0f23c88d5 | 315450354c6ddeda9269ffa4c96750783963d629 | /CMSSW_7_0_4/src/TotemDQMLite/GUI/scripts/.svn/text-base/reco_template_T1_cfg.py.svn-base | f60ffa0be9c54729e058dea21046f4747c66c5f4 | [] | no_license | elizamelo/CMSTOTEMSim | e5928d49edb32cbfeae0aedfcf7bd3131211627e | b415e0ff0dad101be5e5de1def59c5894d7ca3e8 | refs/heads/master | 2021-05-01T01:31:38.139992 | 2017-09-12T17:07:12 | 2017-09-12T17:07:12 | 76,041,270 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,233 | import FWCore.ParameterSet.Config as cms
process = cms.Process("recoT1")
# Specify the maximum events to simulate
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(100)
)
# Configure if you want to detail or simple log information.
# LoggerMax -- detail log info output including: errors.log, warnings.log, infos.log, debugs.log
# LoggerMin -- simple log info output to the standard output (e.g. screen)
process.load("Configuration.TotemCommon.LoggerMin_cfi")
# RawDataSource
process.load('TotemRawData.Readers.RawDataSource_cfi')
#process.source.fileNames.append('/project/gruppo1/totem/IP5_2015/Data/run_EVB-wn10_9261.000.vmeb')
process.source.fileNames.append('$input_file')
# Raw to digi conversion
process.load('TotemCondFormats.DAQInformation.DAQMappingSourceXML_cfi')
process.DAQMappingSourceXML.mappingFileNames.append('TotemCondFormats/DAQInformation/data/t1_all_run2.xml')
process.DAQMappingSourceXML.maskFileNames.append('TotemCondFormats/DAQInformation/test/T1DeadChannelsList_9255_onlyStrips.xml')
# Random number generator service
process.load("Configuration.TotemCommon.RandomNumbers_cfi")
################## STEP 1process.Raw2DigiProducer*process.TriggerBits
process.load('TotemRawData.RawToDigi.Raw2DigiProducer_cfi')
process.load("RecoTotemT1T2.T1MakeCluster.T1MakeCluster_cfi")
process.t1cluster.T1DigiVfatCollectionLabel = cms.InputTag("Raw2DigiProducer", "t1DataOutput")
process.t1cluster.ActivateDeadChannels = cms.bool(True)
process.load("RecoTotemT1T2.T1RecHit.T1RecHit_cfi")
process.t1rechit.T1DigiWireCollectionLabel = cms.InputTag("Raw2DigiProducer", "t1DataOutput")
process.load("RecoTotemT1T2.T1RoadProducer.T1RoadProducer_cfi")
process.t1roads.Alignment = cms.bool(True)
process.load("RecoTotemT1T2.T1TrackProducer2.T1TrackProducer2_cfi")
# Configure the output module (save the result in a file)
process.output = cms.OutputModule("PoolOutputModule",
fileName = cms.untracked.string('$output_file'),
outputCommands = cms.untracked.vstring('keep *')
)
process.path = cms.Path(
process.Raw2DigiProducer
*process.t1cluster
*process.t1rechit
# *process.t1roads
# *process.t1tracks2
)
process.outpath = cms.EndPath(process.output)
| [
"eliza@cern.ch"
] | eliza@cern.ch | |
88be688ae6f9601f9f00ee052d7c0ccd265e4c87 | 7e0c33140dca7f9e7e8cb23861c904d3561d713e | /example.py | 09f0506a4349b2776a685fe5848a0d57778589d0 | [] | no_license | christophajohns/buzzwrapper | be309eb8cbe44858a23f3cfdd398c5db152467ac | 537dede3b049fa2215f4034a35084ef25862ee96 | refs/heads/master | 2022-04-09T13:29:10.653318 | 2020-03-17T19:28:17 | 2020-03-17T19:28:17 | 165,684,688 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,234 | py | from buzzwrapper import Team, Monitor, Filter
import os
import itertools
import datetime
import pandas
import multiprocessing.dummy
def make_data_dir(source):
"""Make Directory for Output Data"""
data_dir = "data/" + source + "/"
if not os.path.exists(data_dir):
os.makedirs(data_dir)
return data_dir
def make_brand_dir(data_dir, corporate_brand):
"""Make Directory for Output Data"""
brand_dir = data_dir + corporate_brand + "/"
if not os.path.exists(brand_dir):
os.makedirs(brand_dir)
return brand_dir
def make_query_dir(brand_dir, short_query):
"""Make Directory for Output Data"""
return make_brand_dir(brand_dir, short_query)
def get_brands_dict(xl_file):
"""Makes list of dicts that holds data about brands like name, abbreviation and keywords"""
df = pandas.read_excel(xl_file)
corporate_brands = df.corporate_brand.unique()
brands = []
for corporate_brand in corporate_brands:
product_brands = df.loc[df.corporate_brand == corporate_brand]
pbrands = []
for prodcut_brand in product_brands.itertuples():
pbrand = {"name": prodcut_brand.brand, "keywords": prodcut_brand.keywords, "short": prodcut_brand.abbreviation}
pbrands.append(pbrand)
cb_dict = {"corporate_brand": corporate_brand, "brands": pbrands}
brands.append(cb_dict)
return brands
def print_progress(index, length, description, single_desc):
"""print progress and description (e.g. [1/10] sources: twitter)"""
print "[" + str(index+1) + "/" + str(length) + "] " + description + ": " + single_desc
def get_or_query(brands):
"""keyword query used for monitor, e.g. Abercrombie&Fitch OR Hollister OR Abercrombie Kids"""
or_query = "(" + ") OR (".join([brand["keywords"] for brand in brands]) + ")"
return or_query
def get_keyword_query(combination):
"""keyword query used for filter, e.g. Abercrombie&Fitch AND Hollister"""
keyword_query = "(" + ") AND (".join([brand["keywords"] for brand in combination]) + ")"
return keyword_query
def get_short_query(combination):
"""short version of the keyword query, e.g. A&F_HL for Abercrombie&Fitch AND Hollister"""
short_query = "_".join([brand["short"] for brand in combination])
return short_query
def chunks(l, n):
"""Return successive n-sized chunks from l as list."""
l_total = []
for i in range(0, len(l), n):
l_total.append(l[i:i + n])
return l_total
def get_combinations(brands):
combinations = []
for L in range(1, len(brands)+1):
for subset in itertools.combinations(brands, L):
combinations.append(list(subset))
return combinations
def get_monitor_data(combinations, title, source, keywords, data_dir, progress):
print progress + " monitors: Adding monitor for " + title + "..."
# add buzz monitor
new_monitor = Monitor(title=title, sources=source, languages=languages, keywords=keywords, start=start, end=end)
monitor_id = new_monitor.id
# start worker threads for parallel processing
pool = multiprocessing.dummy.Pool(processes=len(combinations))
res_list = []
# for each combination
for combi_index, combination in enumerate(combinations):
progress = "[" + str(combi_index+1) + "/" + str(len(combinations)) + "]"
# make keyword query
keyword_query = get_keyword_query(combination)
# make short query
short_query = get_short_query(combination)
# make a data directory
query_dir = make_query_dir(data_dir, short_query)
# save keyword query in txt-file
with open(query_dir + "keywords.txt", "w+") as txt_file: txt_file.write(keyword_query)
# get data for combination (parallel)
res = pool.apply_async(add_filter, (monitor_id, keyword_query, short_query, progress))
res_list.append((res, query_dir))
for (res, query_dir) in res_list:
filter = res.get()
get_filter_data(filter, query_dir)
pool.close()
pool.join()
# delete buzz monitor
new_monitor.delete()
def add_filter(monitor_id, keywords, title, progress):
print progress + " filters: Adding filter for " + title + "..."
# make filter
new_filter = Filter(monitor_id=monitor_id, title=title, keywords=keywords)
return new_filter
def get_filter_data(filter, query_dir):
# save volume_data to csv
print "Getting volume data for "+filter.title+"..."
filter.volume_to_csv(start=start, end=end, output_filename=query_dir+"volume_data.csv")
# save sentiment_data to csv
print "Getting sentiment data for "+filter.title+"..."
filter.sentiment_to_csv(start=start, end=end, output_filename=query_dir+"sentiment_data.csv")
# -- MAIN ----------------
if __name__ == '__main__':
# Start Time
start_time = datetime.datetime.now()
# number of usable free monitors
free_monitors = Team.get_free_monitors()
# Input
start = "2008-06-01"
end = "2018-06-01"
xl_file = "keywords.xlsx"
brands = get_brands_dict(xl_file)
sources = [
["twitter"],
# ["blogs"],
# ["forums"],
# ["reddit"],
# ["googleplus"],
# ["tumblr"],
# ["qq"],
# ["reviews"],
# ["news"],
# ["youtube"],
# ["blogs", "news"], # Control for Overlap between Blogs and News by having an additional monitor with sources "Blogs AND News"
]
languages = [
"en",
]
# for each source
for source_index, source in enumerate(sources):
# print progress and source (e.g. [1/10] sources: twitter)
print_progress(source_index, len(sources), "sources", " AND ".join(source))
# make data directory
data_dir = make_data_dir("&".join(source))
# start worker threads for parallel processing (max. free_monitors)
pool = multiprocessing.dummy.Pool(processes=free_monitors)
res_list = []
# for each corporate_brand
for cbrand_index, corporate_brand in enumerate(brands):
progress = "[" + str(cbrand_index+1) + "/" + str(len(brands)) + "]"
# make data directory
brand_dir = make_brand_dir(data_dir, corporate_brand["corporate_brand"])
# make OR query
pbrands = corporate_brand["brands"] # product_brands / brands
or_query = get_or_query(pbrands)
# save or query in txt-file
with open(brand_dir + "keywords.txt", "w+") as txt_file: txt_file.write(or_query)
# make combinations (due to filter limit of 50 per monitor we have to process the combinations in chunks of 50)
all_combinations = get_combinations(pbrands)
combi_chunks = chunks(all_combinations, 50)
# get data for every combination
for combinations in combi_chunks:
res = pool.apply_async(get_monitor_data, (combinations, corporate_brand["corporate_brand"], source, or_query, brand_dir, progress))
res_list.append(res)
for res in res_list:
res.get()
pool.close()
pool.join()
# Print Total Time elapsed
print datetime.datetime.now() - start_time
| [
"christophjohns@gmail.com"
] | christophjohns@gmail.com |
72a75369681d56095b6a30be9a7499409fa18cbe | f1eef9d69acf0689cac9df96decbd89676a479be | /searchAgents.py | 79c273bb5ff7c15ec820aeadcb6a8417e78fb831 | [] | no_license | enricooliva/AIND-Lab-Teaching-Pac-Man-to-Search | 41eb7cded694a4804db4a5ba4d87b013287b8a2f | 387466b17e8aef1de890aef10d8caea2036e749f | refs/heads/master | 2020-12-25T18:32:54.685194 | 2017-06-10T21:47:46 | 2017-06-10T21:47:46 | 93,966,860 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20,883 | py | # searchAgents.py
# ---------------
# Licensing Information: Please do not distribute or publish solutions to this
# project. You are free to use and extend these projects for educational
# purposes. The Pacman AI projects were developed at UC Berkeley, primarily by
# John DeNero (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# For more info, see http://inst.eecs.berkeley.edu/~cs188/sp09/pacman.html
"""
This file contains all of the agents that can be selected to
control Pacman. To select an agent, use the '-p' option
when running pacman.py. Arguments can be passed to your agent
using '-a'. For example, to load a SearchAgent that uses
depth first search (dfs), run the following command:
> python pacman.py -p SearchAgent -a searchFunction=depthFirstSearch
Commands to invoke other search strategies can be found in the
project description.
Please only change the parts of the file you are asked to.
Look for the lines that say
"*** YOUR CODE HERE ***"
The parts you fill in start about 3/4 of the way down. Follow the
project description for details.
Good luck and happy searching!
"""
from game import Directions
from game import Agent
from game import Actions
import util
import time
import search
import searchAgents
class GoWestAgent(Agent):
"An agent that goes West until it can't."
def getAction(self, state):
"The agent receives a GameState (defined in pacman.py)."
if Directions.WEST in state.getLegalPacmanActions():
return Directions.WEST
else:
return Directions.STOP
#######################################################
# This portion is written for you, but will only work #
# after you fill in parts of search.py #
#######################################################
class SearchAgent(Agent):
"""
This very general search agent finds a path using a supplied search algorithm for a
supplied search problem, then returns actions to follow that path.
As a default, this agent runs DFS on a PositionSearchProblem to find location (1,1)
Options for fn include:
depthFirstSearch or dfs
breadthFirstSearch or bfs
Note: You should NOT change any code in SearchAgent
"""
def __init__(self, fn='depthFirstSearch', prob='PositionSearchProblem', heuristic='nullHeuristic'):
# Warning: some advanced Python magic is employed below to find the right functions and problems
# Get the search function from the name and heuristic
if fn not in dir(search):
raise AttributeError, fn + ' is not a search function in search.py.'
func = getattr(search, fn)
if 'heuristic' not in func.func_code.co_varnames:
print('[SearchAgent] using function ' + fn)
self.searchFunction = func
else:
if heuristic in dir(searchAgents):
heur = getattr(searchAgents, heuristic)
elif heuristic in dir(search):
heur = getattr(search, heuristic)
else:
raise AttributeError, heuristic + ' is not a function in searchAgents.py or search.py.'
print('[SearchAgent] using function %s and heuristic %s' % (fn, heuristic))
# Note: this bit of Python trickery combines the search algorithm and the heuristic
self.searchFunction = lambda x: func(x, heuristic=heur)
# Get the search problem type from the name
if prob not in dir(searchAgents) or not prob.endswith('Problem'):
raise AttributeError, prob + ' is not a search problem type in SearchAgents.py.'
self.searchType = getattr(searchAgents, prob)
print('[SearchAgent] using problem type ' + prob)
def registerInitialState(self, state):
"""
This is the first time that the agent sees the layout of the game board. Here, we
choose a path to the goal. In this phase, the agent should compute the path to the
goal and store it in a local variable. All of the work is done in this method!
state: a GameState object (pacman.py)
"""
if self.searchFunction == None: raise Exception, "No search function provided for SearchAgent"
starttime = time.time()
problem = self.searchType(state) # Makes a new search problem
self.actions = self.searchFunction(problem) # Find a path
totalCost = problem.getCostOfActions(self.actions)
print('Path found with total cost of %d in %.1f seconds' % (totalCost, time.time() - starttime))
if '_expanded' in dir(problem): print('Search nodes expanded: %d' % problem._expanded)
def getAction(self, state):
"""
Returns the next action in the path chosen earlier (in registerInitialState). Return
Directions.STOP if there is no further action to take.
state: a GameState object (pacman.py)
"""
if 'actionIndex' not in dir(self): self.actionIndex = 0
i = self.actionIndex
self.actionIndex += 1
if i < len(self.actions):
return self.actions[i]
else:
return Directions.STOP
class PositionSearchProblem(search.SearchProblem):
"""
A search problem defines the state space, start state, goal test,
successor function and cost function. This search problem can be
used to find paths to a particular point on the pacman board.
The state space consists of (x,y) positions in a pacman game.
Note: this search problem is fully specified; you should NOT change it.
"""
def __init__(self, gameState, costFn = lambda x: 1, goal=(1,1), start=None, warn=True):
"""
Stores the start and goal.
gameState: A GameState object (pacman.py)
costFn: A function from a search state (tuple) to a non-negative number
goal: A position in the gameState
"""
self.walls = gameState.getWalls()
self.startState = gameState.getPacmanPosition()
if start != None: self.startState = start
self.goal = goal
self.costFn = costFn
if warn and (gameState.getNumFood() != 1 or not gameState.hasFood(*goal)):
print 'Warning: this does not look like a regular search maze'
# For display purposes
self._visited, self._visitedlist, self._expanded = {}, [], 0
def getStartState(self):
return self.startState
def isGoalState(self, state):
isGoal = state == self.goal
# For display purposes only
if isGoal:
self._visitedlist.append(state)
import __main__
if '_display' in dir(__main__):
if 'drawExpandedCells' in dir(__main__._display): #@UndefinedVariable
__main__._display.drawExpandedCells(self._visitedlist) #@UndefinedVariable
return isGoal
def getSuccessors(self, state):
"""
Returns successor states, the actions they require, and a cost of 1.
As noted in search.py:
For a given state, this should return a list of triples,
(successor, action, stepCost), where 'successor' is a
successor to the current state, 'action' is the action
required to get there, and 'stepCost' is the incremental
cost of expanding to that successor
"""
successors = []
for action in [Directions.NORTH, Directions.SOUTH, Directions.EAST, Directions.WEST]:
x,y = state
dx, dy = Actions.directionToVector(action)
nextx, nexty = int(x + dx), int(y + dy)
if not self.walls[nextx][nexty]:
nextState = (nextx, nexty)
cost = self.costFn(nextState)
successors.append( ( nextState, action, cost) )
# Bookkeeping for display purposes
self._expanded += 1
if state not in self._visited:
self._visited[state] = True
self._visitedlist.append(state)
return successors
def getCostOfActions(self, actions):
"""
Returns the cost of a particular sequence of actions. If those actions
include an illegal move, return 999999
"""
if actions == None: return 999999
x,y= self.getStartState()
cost = 0
for action in actions:
# Check figure out the next state and see whether its' legal
dx, dy = Actions.directionToVector(action)
x, y = int(x + dx), int(y + dy)
if self.walls[x][y]: return 999999
cost += self.costFn((x,y))
return cost
class StayEastSearchAgent(SearchAgent):
"""
An agent for position search with a cost function that penalizes being in
positions on the West side of the board.
The cost function for stepping into a position (x,y) is 1/2^x.
"""
def __init__(self):
self.searchFunction = search.uniformCostSearch
costFn = lambda pos: .5 ** pos[0]
self.searchType = lambda state: PositionSearchProblem(state, costFn)
class StayWestSearchAgent(SearchAgent):
"""
An agent for position search with a cost function that penalizes being in
positions on the East side of the board.
The cost function for stepping into a position (x,y) is 2^x.
"""
def __init__(self):
self.searchFunction = search.uniformCostSearch
costFn = lambda pos: 2 ** pos[0]
self.searchType = lambda state: PositionSearchProblem(state, costFn)
def manhattanHeuristic(position, problem, info={}):
"The Manhattan distance heuristic for a PositionSearchProblem"
xy1 = position
xy2 = problem.goal
return abs(xy1[0] - xy2[0]) + abs(xy1[1] - xy2[1])
def euclideanHeuristic(position, problem, info={}):
"The Euclidean distance heuristic for a PositionSearchProblem"
xy1 = position
xy2 = problem.goal
return ( (xy1[0] - xy2[0]) ** 2 + (xy1[1] - xy2[1]) ** 2 ) ** 0.5
#####################################################
# This portion is incomplete. Time to write code! #
#####################################################
class CornersProblem(search.SearchProblem):
"""
This search problem finds paths through all four corners of a layout.
You must select a suitable state space and successor function
"""
def __init__(self, startingGameState):
"""
Stores the walls, pacman's starting position and corners.
"""
self.walls = startingGameState.getWalls()
self.startingPosition = startingGameState.getPacmanPosition()
top, right = self.walls.height-2, self.walls.width-2
self.corners = ((1,1), (1,top), (right, 1), (right, top))
for corner in self.corners:
if not startingGameState.hasFood(*corner):
print 'Warning: no food in corner ' + str(corner)
self._expanded = 0 # Number of search nodes expanded
"*** YOUR CODE HERE ***"
def getStartState(self):
"Returns the start state (in your state space, not the full Pacman state space)"
"*** YOUR CODE HERE ***"
return (self.startingPosition, [])
util.raiseNotDefined()
def isGoalState(self, state):
"Returns whether this search state is a goal state of the problem"
"*** YOUR CODE HERE ***"
node = state[0]
visitedCorners = state[1]
if node in self.corners:
if not (node in visitedCorners):
visitedCorners.append(node)
return len(visitedCorners) == 4
return False
util.raiseNotDefined()
def getSuccessors(self, state):
"""
Returns successor states, the actions they require, and a cost of 1.
As noted in search.py:
For a given state, this should return a list of triples,
(successor, action, stepCost), where 'successor' is a
successor to the current state, 'action' is the action
required to get there, and 'stepCost' is the incremental
cost of expanding to that successor
"""
successors = []
x,y = state[0]
visitedCorners = state[1]
for action in [Directions.NORTH, Directions.SOUTH, Directions.EAST, Directions.WEST]:
# Add a successor state to the successor list if the action is legal
# Here's a code snippet for figuring out whether a new position hits a wall:
nextVisitedCorner = list(visitedCorners)
dx, dy = Actions.directionToVector(action)
nextx, nexty = int(x + dx), int(y + dy)
hitsWall = self.walls[nextx][nexty]
if not hitsWall:
nextState = (nextx, nexty)
if (nextState in self.corners):
if (not (nextState in visitedCorners)):
nextVisitedCorner.append(nextState)
successors.append( ((nextState,nextVisitedCorner), action, 1))
# Bookkeeping for display purposes
self._expanded += 1
return successors
def getCostOfActions(self, actions):
"""
Returns the cost of a particular sequence of actions. If those actions
include an illegal move, return 999999. This is implemented for you.
"""
if actions == None: return 999999
x,y= self.startingPosition
for action in actions:
dx, dy = Actions.directionToVector(action)
x, y = int(x + dx), int(y + dy)
if self.walls[x][y]: return 999999
return len(actions)
def cornersHeuristic(state, problem):
"""
A heuristic for the CornersProblem that you defined.
state: The current search state
(a data structure you chose in your search problem)
problem: The CornersProblem instance for this layout.
This function should always return a number that is a lower bound
on the shortest path from the state to a goal of the problem; i.e.
it should be admissible (as well as consistent).
"""
corners = problem.corners # These are the corner coordinates
walls = problem.walls # These are the walls of the maze, as a Grid (game.py)
print()
"*** YOUR CODE HERE ***"
return 0 # Default to trivial solution
class AStarCornersAgent(SearchAgent):
"A SearchAgent for FoodSearchProblem using A* and your foodHeuristic"
def __init__(self):
self.searchFunction = lambda prob: search.aStarSearch(prob, cornersHeuristic)
self.searchType = CornersProblem
class FoodSearchProblem:
"""
A search problem associated with finding the a path that collects all of the
food (dots) in a Pacman game.
A search state in this problem is a tuple ( pacmanPosition, foodGrid ) where
pacmanPosition: a tuple (x,y) of integers specifying Pacman's position
foodGrid: a Grid (see game.py) of either True or False, specifying remaining food
"""
def __init__(self, startingGameState):
self.start = (startingGameState.getPacmanPosition(), startingGameState.getFood())
self.walls = startingGameState.getWalls()
self.startingGameState = startingGameState
self._expanded = 0
self.heuristicInfo = {} # A dictionary for the heuristic to store information
def getStartState(self):
return self.start
def isGoalState(self, state):
return state[1].count() == 0
def getSuccessors(self, state):
"Returns successor states, the actions they require, and a cost of 1."
successors = []
self._expanded += 1
for direction in [Directions.NORTH, Directions.SOUTH, Directions.EAST, Directions.WEST]:
x,y = state[0]
dx, dy = Actions.directionToVector(direction)
nextx, nexty = int(x + dx), int(y + dy)
if not self.walls[nextx][nexty]:
nextFood = state[1].copy()
nextFood[nextx][nexty] = False
successors.append( ( ((nextx, nexty), nextFood), direction, 1) )
return successors
def getCostOfActions(self, actions):
"""Returns the cost of a particular sequence of actions. If those actions
include an illegal move, return 999999"""
x,y= self.getStartState()[0]
cost = 0
for action in actions:
# figure out the next state and see whether it's legal
dx, dy = Actions.directionToVector(action)
x, y = int(x + dx), int(y + dy)
if self.walls[x][y]:
return 999999
cost += 1
return cost
class AStarFoodSearchAgent(SearchAgent):
"A SearchAgent for FoodSearchProblem using A* and your foodHeuristic"
def __init__(self):
self.searchFunction = lambda prob: search.aStarSearch(prob, foodHeuristic)
self.searchType = FoodSearchProblem
def foodHeuristic(state, problem):
"""
Your heuristic for the FoodSearchProblem goes here.
This heuristic must be consistent to ensure correctness. First, try to come up
with an admissible heuristic; almost all admissible heuristics will be consistent
as well.
If using A* ever finds a solution that is worse uniform cost search finds,
your heuristic is *not* consistent, and probably not admissible! On the other hand,
inadmissible or inconsistent heuristics may find optimal solutions, so be careful.
The state is a tuple ( pacmanPosition, foodGrid ) where foodGrid is a
Grid (see game.py) of either True or False. You can call foodGrid.asList()
to get a list of food coordinates instead.
If you want access to info like walls, capsules, etc., you can query the problem.
For example, problem.walls gives you a Grid of where the walls are.
If you want to *store* information to be reused in other calls to the heuristic,
there is a dictionary called problem.heuristicInfo that you can use. For example,
if you only want to count the walls once and store that value, try:
problem.heuristicInfo['wallCount'] = problem.walls.count()
Subsequent calls to this heuristic can access problem.heuristicInfo['wallCount']
"""
position, foodGrid = state
foodList = list(foodGrid.asList())
if len(foodList) > 0:
return len(foodList)
return 0
class ClosestDotSearchAgent(SearchAgent):
"Search for all food using a sequence of searches"
def registerInitialState(self, state):
self.actions = []
currentState = state
while(currentState.getFood().count() > 0):
nextPathSegment = self.findPathToClosestDot(currentState) # The missing piece
self.actions += nextPathSegment
for action in nextPathSegment:
legal = currentState.getLegalActions()
if action not in legal:
t = (str(action), str(currentState))
raise Exception, 'findPathToClosestDot returned an illegal move: %s!\n%s' % t
currentState = currentState.generateSuccessor(0, action)
self.actionIndex = 0
print 'Path found with cost %d.' % len(self.actions)
def findPathToClosestDot(self, gameState):
"Returns a path (a list of actions) to the closest dot, starting from gameState"
# Here are some useful elements of the startState
startPosition = gameState.getPacmanPosition()
food = gameState.getFood()
walls = gameState.getWalls()
problem = AnyFoodSearchProblem(gameState)
"*** YOUR CODE HERE ***"
util.raiseNotDefined()
class AnyFoodSearchProblem(PositionSearchProblem):
"""
A search problem for finding a path to any food.
This search problem is just like the PositionSearchProblem, but
has a different goal test, which you need to fill in below. The
state space and successor function do not need to be changed.
The class definition above, AnyFoodSearchProblem(PositionSearchProblem),
inherits the methods of the PositionSearchProblem.
You can use this search problem to help you fill in
the findPathToClosestDot method.
"""
def __init__(self, gameState):
"Stores information from the gameState. You don't need to change this."
# Store the food for later reference
self.food = gameState.getFood()
# Store info for the PositionSearchProblem (no need to change this)
self.walls = gameState.getWalls()
self.startState = gameState.getPacmanPosition()
self.costFn = lambda x: 1
self._visited, self._visitedlist, self._expanded = {}, [], 0
def isGoalState(self, state):
"""
The state is Pacman's position. Fill this in with a goal test
that will complete the problem definition.
"""
x,y = state
"*** YOUR CODE HERE ***"
util.raiseNotDefined()
##################
# Mini-contest 1 #
##################
class ApproximateSearchAgent(Agent):
"Implement your contest entry here. Change anything but the class name."
def registerInitialState(self, state):
"This method is called before any moves are made."
"*** YOUR CODE HERE ***"
def getAction(self, state):
"""
From game.py:
The Agent will receive a GameState and must return an action from
Directions.{North, South, East, West, Stop}
"""
"*** YOUR CODE HERE ***"
util.raiseNotDefined()
def mazeDistance(point1, point2, gameState):
"""
Returns the maze distance between any two points, using the search functions
you have already built. The gameState can be any game state -- Pacman's position
in that state is ignored.
Example usage: mazeDistance( (2,4), (5,6), gameState)
This might be a useful helper function for your ApproximateSearchAgent.
"""
x1, y1 = point1
x2, y2 = point2
walls = gameState.getWalls()
assert not walls[x1][y1], 'point1 is a wall: ' + point1
assert not walls[x2][y2], 'point2 is a wall: ' + str(point2)
prob = PositionSearchProblem(gameState, start=point1, goal=point2, warn=False)
return len(search.bfs(prob)) | [
"noreply@github.com"
] | enricooliva.noreply@github.com |
91183ac55065439e452437f781e63d0e4f189b7c | 99101ab8ddbd8dbf34d01bdfc4e15d56848ab6e7 | /chapter_4.py | 11ea288be64769686d5dbcfab994601569ad102b | [] | no_license | adambemski/book_think_in_python | 553a595c53475b3d927782f1776085df3a7bc3aa | 638225d26f1c6d8bae0ca755fcae1fd631685a92 | refs/heads/main | 2023-03-21T14:27:17.362329 | 2021-03-11T20:58:36 | 2021-03-11T20:58:36 | 345,181,127 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 397 | py | import turtle
from official_solutions import polygon
"""
4.1
"""
class Artist:
def __init__(self):
self.t = turtle.Turtle()
def draw_flower(self, nr_of_petals, size):
polygon.arc(self.t, r=(size / 2), angle=20)
polygon.arc(self.t, r=(size / 2), angle=200)
if __name__ == '__main__':
painter = Artist()
painter.draw_flower(nr_of_petals=7, size=500)
| [
"adambemski@gmail.com"
] | adambemski@gmail.com |
f1dcb87b03d4e069b627083ae09884605b4e4f9d | f70983559e0aa1996e42ad18a1433a67e5a865b6 | /spark-bench.py | 020a1da8213d4a04a77159e6fd7d72fd5e564d5d | [
"MIT"
] | permissive | xmxm00/sparkbench | 44c10bb5482a5b519eb5a431bb96df8f4329f1b5 | 701097ea04093b920f65f4b008112753b8cd7460 | refs/heads/main | 2023-06-09T17:55:59.593301 | 2021-07-06T08:24:53 | 2021-07-06T08:24:53 | 381,943,467 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,750 | py | import timeit
import argparse
from pyspark import SparkContext, SQLContext
from pyspark.sql import SparkSession
from pyspark.sql.types import *
from pyspark.sql.functions import from_json, desc, col
txschema = StructType([
StructField("treat_name", StringType(), False),
StructField("price", LongType(), False)
])
ptschema = StructType([
StructField("uuid4", StringType(), False),
StructField("patient_name", StringType(), False),
StructField("age", IntegerType(), False),
StructField("sex", StringType(), False)
])
dcschema = StructType([
StructField("clinic_name", StringType(), False),
StructField("address", StringType(), False),
StructField("telephone", StringType(), False)
])
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-if", "--inputformat", help="input format", default="json")
args = parser.parse_args()
iteration = 1
dataType = args.inputformat
for i in range(0, iteration):
spark = SparkSession.builder.appName("Data Read Benchmark").getOrCreate()
df = spark.read.format(dataType).option("header", "true").load("./parsed/" + dataType)
spark.sparkContext.setLogLevel("ERROR")
start_time = timeit.default_timer()
print("Original Data")
df.show()
end_time = timeit.default_timer()
df = df.sort(desc("patient_name"), "date") # SQL을 이용한 정렬
print("Sort by patient_name and date")
df.show()
print("Groupby patient_name, sum of price")
df.groupBy("patient_name").sum("price").show() # filter
df.printSchema()
print(dataType.upper() + " : " + str(end_time - start_time))
# remove data
# add data
| [
"cmcm0012@gmail.com"
] | cmcm0012@gmail.com |
7d867585f3aa16e2239690844cb8f714a7bc9dea | f91e701de1749f21d3064e1c52037ca1716ee8b3 | /app/core/tests/test_models.py | 3197b44d2081df1d8bd846721e3dfdc1af0a2f68 | [
"MIT"
] | permissive | GrandArc/recipe-app-api | dbe3bc2dd5d74a246b9d8fbf809534f4109af897 | eed77bcd6900c5a7985a2aff7e24e30b03e9984f | refs/heads/main | 2023-04-25T03:45:06.814704 | 2021-05-11T10:53:47 | 2021-05-11T10:53:47 | 364,840,591 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,023 | py | from django.test import TestCase
from django.contrib.auth import get_user_model
from core import models
def sample_user(email='test@visualengineering.com', password='test123'):
"""Create a sample user"""
return get_user_model().objects.create_user(email, password)
class ModelTests(TestCase):
def test_create_user_with_email_successful(self):
"""Test creating a new user with an email is successful"""
email = 'test@visualengineering.com'
password = 'TestPass123'
user = get_user_model().objects.create_user(
email=email,
password=password
)
self.assertEqual(user.email, email)
self.assertTrue(user.check_password(password))
def test_new_user_email_normalized(self):
"""Test the email for a new user is normalized"""
email = 'test@VISUALENGINEERING.COM'
user = get_user_model().objects.create_user(email, 'blah123')
self.assertEqual(user.email, email.lower())
def test_new_user_invalid_email(self):
"""Test creating a user with no email raises error"""
with self.assertRaises(ValueError):
get_user_model().objects.create_user(None, 'test123')
def test_create_new_superuser(self):
"""Test creating a new superuser"""
user = get_user_model().objects.create_superuser(
'test@visualengineering.com',
'test123'
)
self.assertTrue(user.is_superuser)
self.assertTrue(user.is_staff)
def test_tag_str(self):
"""Test the tag string representation"""
tag = models.Tag.objects.create(
user=sample_user(),
name='Vegan'
)
self.assertEqual(str(tag), tag.name)
def test_ingredient_str(self):
"""Test the ingredient string representation"""
ingredient = models.Ingredient.objects.create(
user=sample_user(),
name='Cucumber'
)
self.assertEqual(str(ingredient), ingredient.name)
| [
"gr4nd_a@yahoo.com"
] | gr4nd_a@yahoo.com |
b4e990d93bfd4a2916201a75c53557884579150a | f62fd455e593a7ad203a5c268e23129473d968b6 | /python-watcherclient-1.0.0/watcherclient/osc/plugin.py | 5885de3b4873319fa9e70d64baded41315c15e6c | [
"Apache-2.0"
] | permissive | MinbinGong/OpenStack-Ocata | 5d17bcd47a46d48ff9e71e2055f667836174242f | 8b7650128cfd2fdf5d6c8bc4613ac2e396fb2fb3 | refs/heads/master | 2021-06-23T05:24:37.799927 | 2017-08-14T04:33:05 | 2017-08-14T04:33:05 | 99,709,985 | 0 | 2 | null | 2020-07-22T22:06:22 | 2017-08-08T15:48:44 | Python | UTF-8 | Python | false | false | 1,854 | py | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from osc_lib import utils
LOG = logging.getLogger(__name__)
DEFAULT_API_VERSION = '1'
API_VERSION_OPTION = 'os_infra_optim_api_version'
API_NAME = 'infra-optim'
API_VERSIONS = {
'1': 'watcherclient.v1.client.Client',
}
def make_client(instance):
"""Returns an infra-optim service client."""
infraoptim_client_class = utils.get_client_class(
API_NAME,
instance._api_version[API_NAME],
API_VERSIONS)
LOG.debug('Instantiating infraoptim client: %s', infraoptim_client_class)
client = infraoptim_client_class(
os_watcher_api_version=instance._api_version[API_NAME],
session=instance.session,
region_name=instance._region_name,
)
return client
def build_option_parser(parser):
"""Hook to add global options."""
parser.add_argument('--os-infra-optim-api-version',
metavar='<infra-optim-api-version>',
default=utils.env(
'OS_INFRA_OPTIM_API_VERSION',
default=DEFAULT_API_VERSION),
help=('Watcher API version, default=' +
DEFAULT_API_VERSION +
' (Env: OS_INFRA_OPTIM_API_VERSION)'))
return parser
| [
"gongwayne@hotmail.com"
] | gongwayne@hotmail.com |
eb8510ab7e413fd34a53def7260268846761e34a | bea5f18b3aa15bd3561abff49f672019a0f3cdbb | /v1/coins/views.py | 7c3c2272e4c9cabbdd22ecd4e33e075834ba87e4 | [] | no_license | collabro2017/ledgershield_backend | 7035ff076db05069f3e33ebc56cd57a8b588d05b | ee25b82c1f1aac8f182e9ff3aa857097f385747d | refs/heads/master | 2022-12-11T10:59:27.102957 | 2019-10-21T16:57:12 | 2019-10-21T16:57:12 | 144,188,378 | 0 | 0 | null | 2022-12-08T01:00:27 | 2018-08-09T18:16:11 | Python | UTF-8 | Python | false | false | 844 | py |
from rest_framework import generics, status, views
from rest_framework import permissions
from rest_framework.response import Response
from v1.coins.models import Coin, CoinPair
from v1.coins.serializers import CoinSerializer, CoinPairSerializer
class CoinListView(generics.ListAPIView):
serializer_class = CoinSerializer
permission_classes = (permissions.AllowAny,)
queryset = Coin.objects.all()
# def get_queryset(self):
# return Coin.objects.filter(operational=True)
class CoinListViewBySource(generics.ListAPIView):
serializer_class = CoinPairSerializer
permission_classes = (permissions.AllowAny,)
queryset = CoinPair.objects.all()
# lookup_field = 'symbol'
def get_queryset(self):
symbol = self.kwargs.get('symbol')
return CoinPair.objects.filter(source__symbol=symbol) | [
"geek96@outlook.com"
] | geek96@outlook.com |
3ef712c8922011991b8bc0405f58cf47b3092678 | 5c6507bee429f25101f8c040b24083314475c553 | /flask-fcgi/app.py | feaf6f4e1912bb391a1c43e337a69232527aeab7 | [
"MIT"
] | permissive | theodesp/examples | 39a8d1bfc28169510ec9dfe79c932fa2522a25f8 | e4a103c2ed7fac6a832e8a7da22eaec3a741b98e | refs/heads/master | 2020-04-02T06:43:57.570629 | 2018-10-22T15:37:09 | 2018-10-22T15:37:09 | 154,165,289 | 1 | 0 | MIT | 2018-10-22T15:11:19 | 2018-10-22T15:11:19 | null | UTF-8 | Python | false | false | 506 | py | import sys
import os
import logging
from flup.server.fcgi import WSGIServer
from flask import Flask
app = Flask(__name__)
@app.route('/')
def hello_world():
return 'Hello, World!'
def main(app):
try:
WSGIServer(app, bindAddress='./hello-world.sock', umask=0000).run()
except (KeyboardInterrupt, SystemExit, SystemError):
logging.info("Shutdown requested...exiting")
except Exception:
traceback.print_exc(file=sys.stdout)
if __name__ == '__main__':
main(app) | [
"theo.despoudis@teckro.com"
] | theo.despoudis@teckro.com |
b2531218e88feaad082a9cd50938c520c24ee64d | 83da02991d394f614cf0af4bfc48cd5983163b6c | /tienda_virtual/wsgi.py | 1ff7a7128cb759b3e487da7854886f5788e42523 | [] | no_license | anderson1198/contactos | dfe4a72320f3612319153bb0dc3630b3c93b9112 | 30601bc24e90942ede4c618f33d365fab81be957 | refs/heads/main | 2023-05-07T13:12:07.052759 | 2021-05-30T22:45:20 | 2021-05-30T22:45:20 | 366,799,774 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 405 | py | """
WSGI config for tienda_virtual project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'tienda_virtual.settings')
application = get_wsgi_application()
| [
"andersonvid-478@gmail.com"
] | andersonvid-478@gmail.com |
7a9bb17945e25201112d76a1a968afc5169dc4f7 | f029279fb5562e2984adb2d3cafc407da0ace1ee | /pages/start_matter_main_page.py | 84ef0dba4a7f22d942bd945408dda76e2cdb6ee6 | [
"Apache-2.0"
] | permissive | akvinich/python_training | 2978cb06695153b215e06cbe90319de99b19f442 | 90f7504b5f885887e82da6ee17c9c36ba0a21e98 | refs/heads/master | 2020-03-20T19:28:06.820641 | 2018-06-17T15:32:42 | 2018-06-17T15:32:42 | 137,639,024 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,771 | py | from selenium.webdriver.support.wait import WebDriverWait
class StartMatterMainPage:
def __init__(self, driver):
self.driver = driver
self.wait = WebDriverWait(driver, 10)
def open(self):
self.driver.get("https://startmatter.com/")
return self
@property
def mobileproject_radiobutton(self):
return self.driver.find_element_by_css_selector("label[for=mobile]")
@property
def webproject_radiobutton(self):
return self.driver.find_element_by_css_selector("label[for=web]")
@property
def otherproject_radiobutton(self):
return self.driver.find_element_by_css_selector("label[for=other]")
@property
def hourlykindproject_radiobutton(self):
return self.driver.find_element_by_css_selector("label[for=hourly]")
@property
def hirekindproject_radiobutton(self):
return self.driver.find_element_by_css_selector("label[for=hire]")
@property
def notsurekindproject_radiobutton(self):
return self.driver.find_element_by_css_selector("label[for=not_sure]")
@property
def fixedkindproject_radiobutton(self):
return self.driver.find_element_by_css_selector("label[for=fixed]")
@property
def name_input(self):
return self.driver.find_element_by_id("name")
@property
def email_input(self):
return self.driver.find_element_by_id("email")
@property
def country_input(self):
return self.driver.find_element_by_id("country")
@property
def contact_input(self):
return self.driver.find_element_by_id("contact")
@property
def sendrequest_button(self):
return self.driver.find_element_by_css_selector("input.button-default.button-default_md_mobile")
| [
"vital1234mail@gmail.com"
] | vital1234mail@gmail.com |
b13aec275da4151009697accac9711e4949a705d | 76f549c062600a0c713315a9a7361ebb111178f8 | /Taller/Preguntas/PrimeraPregunta.py | 329f447c04d5a3317beecceb811b7df4bb35473d | [] | no_license | jorszs/AI | f612f26537fc3563dd2837c8f67801f091f7e3a0 | 05a839e6e115e7c6c9378e84d5ac7f50afe2870d | refs/heads/master | 2020-03-11T17:57:32.555978 | 2018-06-07T01:48:36 | 2018-06-07T01:48:36 | 130,162,558 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,335 | py |
import urllib3
def getNameNodes():
i = 0
res = {}
archivo = open('links.csv', 'rt')
for linea in archivo:
k = linea.replace(' ', '')
k = k.replace('\n', '')
if i > 0:
j = k.split('.')
if j[0] in res:
res[j[0]].append(k)
else:
res[j[0]] = [k]
i+=1
archivo.close()
return res
def getDataWeb(url):
http = urllib3.PoolManager()
r = http.request('GET', url)
r.status
return r.data
def makeArchivos(archivos):
base = 'elib.zib.de/pub/mp-testdata/tsp/tsplib/tsp/'
for k,v in archivos.items():
for e in v:
data = str(getDataWeb(base + e))
a =data.replace('\\n', ',')
#b =a.replace('\\', '')
j = a.split(',')
if len(e.split('.')) > 2:
#captura el optimo
f = open ('archivos/'+ k + '.opt'+'.txt','w')
for elem in j:
f.write(elem + '\n')
f.close()
else:
f = open ('archivos/'+ k +'.txt','w')
for elem in j:
f.write(elem + '\n')
f.close()
if __name__ == "__main__":
archivos = getNameNodes()
#print(archivos)
makeArchivos(archivos)
| [
"root@localhost.localdomain"
] | root@localhost.localdomain |
d987fd7caf9cbc013370f9bd80f1a1e196798145 | 6eb02822eeb8a59e391234460a6afa923db7bf70 | /4651.py | b3706d35aaf309259e8932766f9d1c1821317d05 | [] | no_license | codingpen-io/codeup | 73efd7d403535a853ac1ccaf0585c311ccc38d15 | 20e4e298fed5aecd498233ec5a8c902dbeb60082 | refs/heads/master | 2023-04-08T19:24:51.372778 | 2021-04-27T03:05:27 | 2021-04-27T03:05:27 | 278,296,633 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 201 | py | r = ['E', 'A', 'B', 'C', 'D']
for i in range(3):
arr = list(map(int, input().split()))
zero_count = 0
for n in arr:
if n == 0:
zero_count += 1
print(r[zero_count])
| [
"codingepen.io@gmail.com"
] | codingepen.io@gmail.com |
e73558eadc4dedb1c9a10509fcc0e2fa726e78d5 | 37ff794052b2341be00387f8f9176a6f091df6ec | /20.8 Modifying member variables.py | 97458ff0c67796a08ee62fee13dc636ff4830317 | [] | no_license | ryanjoya/Python-Practice-Unit-20-Classes | c565ce4cc8c4ee8b96bfcc40e61010b164209eae | 3a8ccc7e1f29dfd04c7eaeb42a1f407319970a7c | refs/heads/master | 2021-01-10T04:31:24.736563 | 2015-12-29T03:51:19 | 2015-12-29T03:51:19 | 48,151,002 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 442 | py | class Car(object):
condition = "new"
def __init__(self, model, color, mpg):
self.model = model
self.color = color
self.mpg = mpg
def display_car(self):
return "This is a %s %s with %s MPG." % (self.color, self.model, str(self.mpg))
def drive_car(self):
self.condition = "used"
my_car = Car("DeLorean", "silver", 88)
print my_car.condition
my_car.drive_car()
print my_car.condition
| [
"ryanjoya@users.noreply.github.com"
] | ryanjoya@users.noreply.github.com |
36747e1f63d574bda386a20da262a3ea2291d5ab | 98bb51c8509d81b87ffa58aab6d03ca49924a8c3 | /src/models/ent.py | bda8b789c87946bff6a4bf026df9298cbdb936d7 | [] | no_license | TobiasUhmann/power | 8a6d55200830cf0467e099c7ce903b10a244efc3 | 76737bf0c991e21bad867f6f2cad8d98a4b4fdea | refs/heads/main | 2023-05-30T02:39:39.423723 | 2021-06-10T10:22:58 | 2021-06-10T10:22:58 | 326,040,753 | 0 | 0 | null | 2021-06-10T10:22:59 | 2021-01-01T19:15:08 | Python | UTF-8 | Python | false | false | 96 | py | from dataclasses import dataclass
@dataclass(frozen=True)
class Ent:
id: int
lbl: str
| [
"tobias.uhmann@gmail.com"
] | tobias.uhmann@gmail.com |
f514f0c972565ebfc8022902b1abcc0fa242ca14 | 9d07335de5a17453bf8ae290d70993d7b20dddcd | /.history/dice_20210223203524.py | a9e39059915547dad83aa8fbce4c3cc0fedfd011 | [] | no_license | wh-debug/Matplotlib | 8d12291cd4135b3b42c185e6700f22c627ddb046 | b4f5bf63d977620f799d953c67b262c75344a1cb | refs/heads/master | 2023-03-14T10:09:33.602492 | 2021-02-23T13:51:21 | 2021-02-23T13:51:21 | 340,374,612 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 672 | py | '''
Author: your name
Date: 2021-02-23 20:07:30
LastEditTime: 2021-02-23 20:35:24
LastEditors: Please set LastEditors
Description: In User Settings Edit
FilePath: \Matplotlib\dice.py
'''
from make_plotly import Die
import matplotlib.pyplot as plt
x_values = [1, 2, 3, 4, 5, 6]
y_values = []
die = Die()
#todo 创建一个空列表结果存储在空列表中
results = []
for roll_num in range(1000):
result = die.roll()
results.append(result)
frequencies = []
for value in range(1, die.num_sides+1):
frequency = results.count(value) #todo value是数字几,就会统计列表中相应数字个数
frequencies.append(frequency)
print(frequencies)
| [
"1813763848@qq.com"
] | 1813763848@qq.com |
728f85ad0e20b4d1c04a48811f6c15e810d74d83 | ade56006172d349f20e3ca1c2b71b1cb55d9c4b9 | /api/models.py | 59e47e79867fa7bf61c1ade3a0d94ca4fe24665b | [] | no_license | Syldup/Wesh_API | be7c44c0b94755c019a66eadff19fcaaf016d3f7 | 697bf7d5e264e5e1c7475f033dec7a092a97376d | refs/heads/master | 2020-12-23T17:36:28.063242 | 2020-02-03T13:30:57 | 2020-02-03T13:30:57 | 237,220,726 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 518 | py | from django.db import models
from django.contrib.auth.models import User
class CodePromo(models.Model):
code = models.CharField(primary_key=True, max_length=50, default='')
name = models.CharField(max_length=50, default='')
create_time = models.DateTimeField()
end_time = models.DateTimeField()
class History(models.Model):
code = models.ForeignKey(CodePromo, on_delete=models.CASCADE)
user = models.ForeignKey(User, on_delete=models.CASCADE)
time = models.DateTimeField(auto_now=True)
| [
"syd9up@gmail.com"
] | syd9up@gmail.com |
e16640d95b8206c25008182920b8a3f19a890f2e | e2420a7abd9c1c3d286eefe623b19b9e3436fd49 | /ciphers/tests/test_caeser_cipher.py | 383ef6f6e343e941f2b294dbc02e29568273e197 | [] | no_license | sam-myers/gravity-falls-codes | 2974417f12d4e8737fd7a01cfff63584732de6d8 | f8e0bb41affdd0359c47fde18efcd25c836947c6 | refs/heads/master | 2022-07-14T23:08:28.025419 | 2019-04-25T07:14:41 | 2019-04-25T07:14:41 | 177,056,867 | 0 | 0 | null | 2019-05-04T10:00:09 | 2019-03-22T02:20:07 | Python | UTF-8 | Python | false | false | 466 | py | from ciphers.caeser import transform_text
def test_shift_zero():
assert transform_text("abc", 0) == "abc"
def test_shift_one():
assert transform_text("abc", 1) == "bcd"
def test_shift_negative_one():
assert transform_text("abc", -1) == "zab"
def test_punctuation_unchanged():
assert transform_text(", .!?", 1) == ", .!?"
def test_undoes_itself():
text = "foo bar biz baz!"
assert transform_text(transform_text(text, 1), -1) == text
| [
"github@sammye.rs"
] | github@sammye.rs |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.