blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0fdb86c8d95ec238f669e4cd793c8b90cee446f0 | b48a1d5733d10c39a112698c2286ae1afb02f36a | /announce/management/commands/migrate_mailchimp_users.py | 7cce1d4f3b7e6e48acb8b65b386b435c2095820c | [
"MIT"
] | permissive | p2pu/learning-circles | ecb317aaa8620cb076ce45c42d055e89e6586516 | ae8de4df48aae0844fb50dca5c62c099b3b2b0a3 | refs/heads/master | 2023-08-19T19:18:09.198077 | 2023-08-10T09:23:58 | 2023-08-10T09:23:58 | 32,735,768 | 11 | 10 | MIT | 2023-08-10T09:30:04 | 2015-03-23T14:05:41 | Python | UTF-8 | Python | false | false | 1,191 | py | from django.core.management.base import BaseCommand, CommandError
from django.contrib.auth.models import User
from announce.mailchimp import archive_members, list_members, batch_subscribe
from studygroups.models import Profile
import requests
import logging
logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = 'Synchronize mailchimp audience with users that opted in for communications'
def handle(self, *args, **options):
# get all mailchimp users
mailchimp_members = list_members()
filter_subscribed = lambda x: x.get('status') not in ['unsubscribed', 'cleaned']
mailchimp_members = filter(filter_subscribed, mailchimp_members)
emails = [member.get('email_address').lower() for member in mailchimp_members]
# add all members with communicagtion_opt_in == True to mailchimp
subscribed = User.objects.filter(profile__communication_opt_in=True, is_active=True, profile__email_confirmed_at__isnull=False)
to_sub = list(filter(lambda u: u.email.lower() not in emails, subscribed))
print('{} users will be added to the mailchimp list'.format(len(to_sub)))
batch_subscribe(to_sub)
| [
"dirkcuys@gmail.com"
] | dirkcuys@gmail.com |
87d5df6728743a0ba428c23dbaf1bb9006868efa | 45e391764fe478412361ae8695100ada658b215e | /Doctor's Secret.py | d6711c8ee1bdcc6b8366fa698a9bd3ca9142b5f1 | [] | no_license | yash12-cha/Hackerearth_Codes | b79f17cc4d05d1f369f93a82719a2104fb60794a | 8608dd367875047dea2cbf158d0f68acfde7b77e | refs/heads/main | 2023-05-08T22:47:20.758904 | 2021-06-03T05:30:21 | 2021-06-03T05:30:21 | 367,254,270 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 164 | py | n, p = map(int, input().split())
if ( n <=23) and ( p>=500 and p<=1000):
print("Take Medicine")
else:
print("Don't take Medicine")
| [
"noreply@github.com"
] | noreply@github.com |
65b60e56c9365241b5fd54b5f8e8a41ad337171a | cde96466cd047daf012ba8e188104e4e8902caa5 | /Shelfy/app/venv/lib/python3.5/tempfile.py | e2508df71d9e1462854ec0d0e474ae953d2967ac | [] | no_license | Meetmshah/Shelfy | fb770b420cc078471f81a42ae9ce8afca53be475 | 3a3960ded061413ed4c0904827d816e44556220a | refs/heads/master | 2020-04-03T15:06:55.894712 | 2018-09-24T06:32:03 | 2018-09-24T06:32:03 | 155,350,985 | 0 | 0 | null | 2018-10-30T08:35:47 | 2018-10-30T08:35:46 | null | UTF-8 | Python | false | false | 55 | py | /Users/asheikhussain/anaconda/lib/python3.5/tempfile.py | [
"noreply@github.com"
] | noreply@github.com |
9c19ee33a1abc0d34b3c4dbfd9e704ca5e4534d0 | 3581090a1a334f1fe98fd60db08e4a543375e5ca | /SIRMAT/Fotografias/migrations/0002_fotografia_idmuestra.py | 6406e0580541013d3b80682c0e883805d68d1dc2 | [] | no_license | MauricioMiramontes/PrototipoSIRMAT | 4893213098ecc8931132ee64b49d35061db3c987 | 319c8f652ba82ea378f4ebf6b9be28f343e1df51 | refs/heads/main | 2023-08-01T01:50:53.193000 | 2021-09-20T23:54:34 | 2021-09-20T23:54:34 | 351,940,479 | 1 | 0 | null | 2021-06-04T04:00:14 | 2021-03-26T23:52:46 | Python | UTF-8 | Python | false | false | 568 | py | # Generated by Django 3.1.4 on 2021-05-26 05:51
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('Muestra', '0002_muestra_nombremuestra'),
('Fotografias', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='fotografia',
name='idMuestra',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='Muestra.muestra'),
preserve_default=False,
),
]
| [
"mmiramontesr@gmail.com"
] | mmiramontesr@gmail.com |
0ee0fecb3d4df02cb0dc4970912acd258cfee73d | 549f00e84cf77f08b2d72b3a298174143e23222e | /pytest_echo.py | 025e4f09603e8814d066cc041fb8926ac1650558 | [
"MIT"
] | permissive | hugovk/pytest-echo | 9a94c6a246ae1803dd6b391f56c35a7d0472f209 | 939793448e7d7e80a356aafc4dbb58bbedbe7e2c | refs/heads/master | 2020-08-10T05:26:13.397571 | 2018-04-22T17:12:07 | 2018-04-22T17:12:07 | 214,268,306 | 0 | 0 | MIT | 2019-10-10T19:27:28 | 2019-10-10T19:27:28 | null | UTF-8 | Python | false | false | 5,239 | py | # -*- coding: utf-8 -*-
from __future__ import print_function
import fnmatch
import os
from pprint import pformat
import pkg_resources
from pkg_resources import DistributionNotFound
__version__ = '1.6.0'
def get_installed_distributions():
"""
Return a list of installed Distribution objects.
"""
return [d for d in pkg_resources.working_set]
def get_attr(obj, attr, default='NOT FOUND'):
"""Recursive get object's attribute. May use dot notation.
>>> class C(object):
... pass
>>> a = C()
>>> a.b = C()
>>> a.b.c = 4
>>> get_attr(a, 'b.c')
4
>>> get_attr(a, 'b.c.y', None)
>>> get_attr(a, 'b.c.y', 1)
1
>>> get_attr([0,1,2], '2')
2
>>> get_attr([0,1,(21, 22)], '2.1')
22
>>> get_attr({'key': 11}, 'key')
11
>>> get_attr({'key': {'key': 11}}, 'key.key')
11
"""
if '.' not in attr:
try:
if hasattr(obj, attr):
return getattr(obj, attr, default)
elif isinstance(obj, (list, tuple, set)):
return obj[int(attr)]
elif isinstance(obj, dict):
return obj[attr]
else:
return default
except Exception as e: # pragma: no cover
return str(e)
else:
L = attr.split('.')
return get_attr(get_attr(obj, L[0], default), '.'.join(L[1:]), default)
def get_module_attribute(path):
"""
Returns a attribute value base on it's full path.
The `attribute` can be either a module attribute (ie. os.path.curdir)
or a object attribute (ie. linecache.cache.__class__)
Warning: Be careful when use thi function as it load any module in the path
and this will execute any module's level code
:param path: full path to the attribute
:return:
>>> print(get_module_attribute('linecache.cache.__class__'))
<... 'dict'>
>>> print(get_module_attribute('os.path.curdir'))
'.'
>>> print(get_module_attribute('wrong'))
('Unable to load %s', 'wrong')
"""
parts = path.split('.')
parent = ""
pkg = None
try:
for i, part in enumerate(parts):
try:
if parent:
module_name = "%s.%s" % (parent, parts[i])
else:
module_name = parts[i]
pkg = __import__(module_name, fromlist=[parent])
parent = module_name
except ImportError:
if hasattr(pkg, part):
return pformat(get_attr(pkg, ".".join(parts[i:])))
raise Exception('Unable to load %s', path)
except Exception as e:
return str(e)
def get_env(var_name):
if '*' in var_name:
targets = [(key, value)
for key, value in os.environ.items()
if fnmatch.fnmatch(key, var_name)]
else:
targets = [(var_name, os.environ.get(var_name, "<not set>"))]
return targets
def get_version(package_name):
if '*' in package_name:
targets = [(i.key, i.version)
for i in get_installed_distributions()
if fnmatch.fnmatch(i.key, package_name)]
else:
targets = [(package_name, _get_version(package_name))]
return targets
def _get_version(package_name):
try:
import pkg_resources
return pkg_resources.require(package_name)[0].version
except (ImportError, AttributeError, TypeError, DistributionNotFound):
pass
try:
pkg = __import__(package_name)
except ImportError:
return '<unable to load package>'
for attr_name in ('get_version', '__version__', 'VERSION', 'version'):
if hasattr(pkg, attr_name):
attr = getattr(pkg, attr_name)
if callable(attr):
return attr()
else:
return attr
def pytest_report_header(config):
ret = []
if config.option.echo_envs:
ret.append("Environment:")
data = []
for k in config.option.echo_envs:
data.extend(get_env(k))
ret.append("\n".join([" %s: %s" % (k, v)
for k, v in sorted(data)]))
if config.option.echo_versions:
ret.append("Package version:")
data = []
for k in config.option.echo_versions:
data.extend(get_version(k))
ret.append("\n".join([" %s: %s" % (k, v)
for k, v in sorted(data)]))
if config.option.echo_attribues:
ret.append("Inspections:")
ret.append("\n".join([" %s: %s" % (k, get_module_attribute(k))
for k in config.option.echo_attribues]))
if ret:
return "\n".join(ret)
def pytest_addoption(parser):
group = parser.getgroup("general")
group.addoption('--echo-env', action='append', dest="echo_envs",
default=[], help="environment to print")
group.addoption('--echo-version', action='append', dest="echo_versions",
default=[], help="package version to print")
group.addoption('--echo-attr', action='append', dest="echo_attribues",
default=[], help="attribute to print (full path)")
| [
"s.apostolico@gmail.com"
] | s.apostolico@gmail.com |
fbb938e8e3878525bac3bbe915afa3db565344c2 | a58f1ed0f14c2addbf95cc5113c2da9341c05b97 | /bin/rstpep2html.py | 1ae9975e6cee4615c7401f7bada26cc3b8c4d0d1 | [] | no_license | squidish/dgax_app | 4469733dbb20b08c1c85dd61be6fbeb904ff26f1 | 484bb518530da4f0632a17f251cf0638b265a263 | refs/heads/master | 2021-04-28T16:52:20.568151 | 2018-02-21T07:01:44 | 2018-02-21T07:01:44 | 122,023,830 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 726 | py | #!/Users/Williams/Scripts/Python/virtualenvironment/my_gdax_app/bin/python
# $Id: rstpep2html.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing HTML from PEP
(Python Enhancement Proposal) documents.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
description = ('Generates (X)HTML from reStructuredText-format PEP files. '
+ default_description)
publish_cmdline(reader_name='pep', writer_name='pep_html',
description=description)
| [
"root@Christophers-MacBook-Pro.local"
] | root@Christophers-MacBook-Pro.local |
423679755baa5d60a9bbbf0e873e813f13e5b092 | 9ba33919959f8cdb722682296e94c7ddad8e9410 | /load-test/locustfile.py | 8d8d96dd8a10608067b036c8c68fd241ab87c70a | [
"MIT"
] | permissive | madclumsil33t/atat | 59657549a9fa9561ec640e64035987d0c6c21a5c | 290b4adc58791e95dac73ad17ec6645f55307609 | refs/heads/main | 2023-04-02T16:37:03.567866 | 2020-12-18T21:26:58 | 2020-12-18T21:26:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,806 | py | import os
import re
import string
from functools import wraps
from random import choice, choices, randrange
from uuid import uuid4
import names
from locust import SequentialTaskSet, between, task
from locust.contrib.fasthttp import FastHttpUser
from pyquery import PyQuery as pq
# Provide username/password for basic auth
USERNAME = os.getenv("ATAT_BA_USERNAME", "")
PASSWORD = os.getenv("ATAT_BA_PASSWORD", "")
# Ability to disable SSL verification for bad cert situations
DISABLE_VERIFY = os.getenv("DISABLE_VERIFY", "true").lower() == "true"
# matcher used in extracting id from url path
ENTITY_ID_MATCHER = re.compile(
".*\/?(?:portfolios|applications|task_orders)\/([0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}).*",
re.I,
)
# chance something will happen
NEW_PORTFOLIO_CHANCE = 10
NEW_APPLICATION_CHANCE = 10
NEW_TASK_ORDER_CHANCE = 10
NEW_LOGOUT_CHANCE = 10
dod_ids = set()
def update_user_profile(user, client, parent):
# get csrf token
user_url = "/user"
response = client.get(user_url)
csrf_token = get_csrf_token(response)
d = pq(response.text)
# get values from input form elements
keys = (x.attr("name") for x in d(f"[initial-value]").items())
update_user_body = {k: d(f"[key='{k}']").attr("initial-value") for k in keys}
# get values from non-input form elements
keys = (x.attr("name") for x in d(f"[v-bind\:initial-value]").items())
update_user_body.update(
{k: d(f"[key='{k}']").attr("v-bind:initial-value")[1:-1] for k in keys}
)
# update phone number and add csrf token
update_user_body.update(
{
"csrf_token": csrf_token,
"phone_number": "".join(choices(string.digits, k=10)),
"email": user.email,
"citizenship": user.citizenship,
"service_branch": user.service_branch,
"designation": user.designation,
}
)
# post new values for user profiles
client.post(user_url, update_user_body, headers={"Referer": parent.host + user_url})
def create_application(client, parent, portfolio_id):
# get new application page for csrf token
create_app_url = f"/portfolios/{portfolio_id}/applications/new"
response = client.get(create_app_url)
csrf_token = get_csrf_token(response)
# create new application
response = client.post(
create_app_url,
{
"name": f"Load Test Created - {''.join(choices(string.ascii_letters, k=5))}",
"description": "Description",
"csrf_token": csrf_token,
},
headers={"Referer": parent.host + create_app_url},
)
csrf_token = get_csrf_token(response)
# get application id
application_id = extract_id(response._request.get_full_url())
# set up application environments
create_environments_url = f"/applications/{application_id}/new/step_2"
response = client.post(
create_environments_url + f"?portfolio_id={portfolio_id}",
{
"environment_names-0": "Development",
"environment_names-1": "Testing",
"environment_names-2": "Staging",
"environment_names-3": "Production",
"csrf_token": csrf_token,
},
headers={"Referer": parent.host + create_environments_url},
)
# get environments' ids from step 3 of application creation
create_team_members_url = f"/applications/{application_id}/new/step_3"
response = client.get(create_team_members_url)
csrf_token = get_csrf_token(response)
d = pq(response.text)
env_0_id = d("#environment_roles-0-environment_id").val()
env_1_id = d("#environment_roles-1-environment_id").val()
# create application member
client.post(
create_team_members_url + f"?application_id={application_id}",
{
"user_data-first_name": "Bob",
"user_data-last_name": "Smith",
"user_data-dod_id": "1234567890",
"user_data-email": "user@email.com",
"environment_roles-0-environment_id": env_0_id,
"environment_roles-0-role": "ADMIN",
"environment_roles-0-environment_name": "First Env",
"environment_roles-1-environment_id": env_1_id,
"environment_roles-1-role": "No Access",
"environment_roles-1-environment_name": "Second Env",
"perms_env_mgmt": True,
"perms_team_mgmt": True,
"csrf_token": csrf_token,
},
headers={"Referer": parent.host + create_team_members_url},
)
def create_portfolio(client, parent):
# get portfolios page for csrf token
response = client.get("/portfolios/new")
csrf_token = get_csrf_token(response)
# create new portfolio
portfolios_url = "/portfolios"
response = client.post(
portfolios_url,
{
"name": f"Load Test Created - {''.join(choices(string.ascii_letters, k=5))}",
"defense_component": "army",
"description": "Test",
"csrf_token": csrf_token,
},
headers={"Referer": parent.host + portfolios_url},
)
return extract_id(response._request.get_full_url())
def create_task_order(client, parent, portfolio_id):
# get init page for csrf token
response = client.get(f"/portfolios/{portfolio_id}/task_orders/form/step_1")
csrf_token = get_csrf_token(response)
# submit TO pdf file
upload_task_order_pdf_url = f"/portfolios/{portfolio_id}/task_orders/form/step-1"
response = client.post(
upload_task_order_pdf_url,
{
"pdf-filename": "sample.pdf",
"pdf-object_name": uuid4(),
"csrf_token": csrf_token,
},
headers={"Referer": parent.host + upload_task_order_pdf_url},
)
csrf_token = get_csrf_token(response)
# get TO ID
task_order_id = extract_id(response._request.get_full_url())
# set TO number
number = "".join(choices(string.digits, k=choice(range(13, 18))))
set_task_order_number_url = f"/task_orders/{task_order_id}/form/step_2"
response = client.post(
set_task_order_number_url,
{"number": number, "csrf_token": csrf_token},
headers={"Referer": parent.host + set_task_order_number_url},
)
csrf_token = get_csrf_token(response)
# set TO parameters
clins_number = "".join(choices(string.digits, k=4))
task_orders_step_3 = f"/task_orders/{task_order_id}/form/step_3"
response = client.post(
task_orders_step_3,
{
"csrf_token": csrf_token,
"clins-0-number": clins_number,
"clins-0-jedi_clin_type": "JEDI_CLIN_1",
"clins-0-total_amount": 100,
"clins-0-obligated_amount": 50,
"clins-0-start_date": "01/11/2020",
"clins-0-end_date": "01/11/2021",
},
headers={"Referer": parent.host + task_orders_step_3},
)
csrf_token = get_csrf_token(response)
# submit TO
submit_task_order_url = f"/task_orders/{task_order_id}/submit"
client.post(
submit_task_order_url,
{"csrf_token": csrf_token, "signature": "y", "confirm": "y",},
headers={"Referer": parent.host + submit_task_order_url},
)
def get_portfolios(client):
# get all portfolios
response = client.get("/home")
d = pq(response.text)
return [p.attr("href") for p in d(".sidenav__link").items()]
def get_applications(client, portfolio_id):
# get all applications for a portfolio
response = client.get(f"/portfolios/{portfolio_id}/applications")
d = pq(response.text)
return [
p.attr("href")
for p in d(".portfolio-applications .accordion__header-text a").items()
]
def has_task_orders(client, portfolio_id):
response = client.get(f"/portfolios/{portfolio_id}/task_orders")
d = pq(response.text)
return not d(".portfolio-funding .empty-state")
def get_csrf_token(response):
# get csrf token from html
d = pq(response.text)
return d("#csrf_token").val()
def extract_id(path):
# get an id from a url path
entity_id_match = ENTITY_ID_MATCHER.match(path)
assert entity_id_match is not None, f"Could not find id in {path}"
if entity_id_match:
return entity_id_match.group(1)
def get_new_dod_id():
while True:
dod_id = "1" + "".join(choice(string.digits) for _ in range(9))
if dod_id not in dod_ids:
dod_ids.add(dod_id)
return dod_id
def get_new_name():
return names.get_full_name().split(" ")
def login_as(user, client):
result = client.get(
f"/dev-new-user?first_name={user.first_name}&last_name={user.last_name}&dod_id={user.dod_id}"
)
user.logged_in = result.status == "200" or result.status == "302"
def log_out(user, client):
client.get("/logout")
user.logged_in = False
def user_status(f):
@wraps(f)
def decorated_function(*args, **kwargs):
task_set = args[0]
client = task_set.client
user = task_set.user
if not user.logged_in:
result = client.get(f"/login-local?dod_id={user.dod_id}")
user.logged_in = result.status == "200" or result.status == "302"
f(*args, **kwargs)
if randrange(0, 100) < NEW_LOGOUT_CHANCE:
log_out(user, client)
return decorated_function
class UserBehavior(SequentialTaskSet):
def on_start(self):
self.client.verify = not DISABLE_VERIFY
login_as(self.user, self.client)
@user_status
@task
def user_profile(self):
update_user_profile(self.user, self.client, self.parent)
@user_status
@task
def portfolio(self):
client = self.client
portfolio_links = get_portfolios(client)
if not portfolio_links or randrange(0, 100) < NEW_PORTFOLIO_CHANCE:
self.user.portfolio_id = create_portfolio(client, self.parent)
else:
self.user.portfolio_id = extract_id(choice(portfolio_links))
@user_status
@task
def application(self):
client = self.client
portfolio_id = self.user.portfolio_id
application_links = get_applications(client, portfolio_id)
if not application_links or randrange(0, 100) < NEW_APPLICATION_CHANCE:
create_application(client, self.parent, portfolio_id)
@user_status
@task
def task_order(self):
if (
not has_task_orders(self.client, self.user.portfolio_id)
or randrange(0, 100) < NEW_TASK_ORDER_CHANCE
):
create_task_order(self.client, self.parent, self.user.portfolio_id)
def on_stop(self):
log_out(self.user, self.client)
class ATATUser(FastHttpUser):
tasks = [UserBehavior]
wait_time = between(3, 9)
def on_start(self):
dod_id = get_new_dod_id()
first_name, last_name = get_new_name()
self.dod_id = dod_id
self.first_name = first_name
self.last_name = last_name
self.email = "".join([first_name.lower(), last_name.lower(), "@loadtest.atat"])
self.service_branch = choice(
[
"air_force",
"army",
"marine_corps",
"navy",
"space_force",
"ccmd_js",
"dafa",
"osd_psas",
"other",
]
)
self.citizenship = choice(["United States", "Foreign National", "Other"])
self.designation = choice(["military", "civilian", "contractor"])
self.logged_in = False
self.portfolio_id = None
def on_stop(self):
dod_ids.remove(self.dod_id)
if __name__ == "__main__":
# if run as the main file, will spin up a single locust
ATATUser().run()
| [
"tom.chandler-ctr@friends.dds.mil"
] | tom.chandler-ctr@friends.dds.mil |
6e9327902f07f2b117493cc77e7ea2cdb9842385 | 4d77ad757d344ffb47b610a0e3ea393834fb4442 | /pattern 14.py | 94543268a4705d4c1e004c3a33545e2e9898f5e2 | [] | no_license | codebluff/Pattern-Programming | 8494a15ab765afd02b6d3bd9a959e8ecbbbb0bf4 | 08ccf37e573cedfee92d33a7954364f5938169d0 | refs/heads/master | 2020-12-02T01:09:07.920977 | 2019-12-30T03:13:39 | 2019-12-30T03:13:39 | 230,838,920 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 120 | py | n = ['A', 'B', 'C', 'D', 'E']
for i in range(len(n)):
for j in range(i+1):
print(n[j], end=" ")
print() | [
"codebluff101@gmail.com"
] | codebluff101@gmail.com |
805535843094f8434fa9cfb5c22c4c9c99ef2185 | 2c2d2405929b026ac4de77d34538cec623dee5eb | /codes/SRN/models/modules/loss.py | 844e09818490d48d1b9b375b12a65032b32c4075 | [] | no_license | greitzmann/DASR | 9d709cf031561897722f1553842af05fca36855e | f85b22ada54344fd0d94ba31ae596427cb9b5c5b | refs/heads/master | 2023-01-01T12:26:11.563140 | 2020-10-15T16:03:26 | 2020-10-15T16:03:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,636 | py | import random
import torch
import torch.nn as nn
import sys
from PerceptualSimilarity.models import util as ps
# Define GAN loss: [vanilla | lsgan | wgan-gp]
class GANLoss(nn.Module):
def __init__(self, gan_type, real_label_val=1.0, fake_label_val=0.0):
super(GANLoss, self).__init__()
self.gan_type = gan_type.lower()
self.real_label_val = real_label_val
self.fake_label_val = fake_label_val
if self.gan_type == 'vanilla':
self.loss = nn.BCEWithLogitsLoss()
elif self.gan_type == 'lsgan':
self.loss = nn.MSELoss()
elif self.gan_type == 'wgan-gp':
def wgan_loss(input, target):
# target is boolean
return -1 * input.mean() if target else input.mean()
self.loss = wgan_loss
else:
raise NotImplementedError('GAN type [{:s}] is not found'.format(self.gan_type))
def get_target_label(self, input, target_is_real):
if self.gan_type == 'wgan-gp':
return target_is_real
if target_is_real:
return torch.empty_like(input).fill_(self.real_label_val)
else:
return torch.empty_like(input).fill_(self.fake_label_val)
def forward(self, input, target_is_real):
target_label = self.get_target_label(input, target_is_real)
loss = self.loss(input, target_label)
return loss
class GradientPenaltyLoss(nn.Module):
def __init__(self, device=torch.device('cpu')):
super(GradientPenaltyLoss, self).__init__()
self.register_buffer('grad_outputs', torch.Tensor())
self.grad_outputs = self.grad_outputs.to(device)
def get_grad_outputs(self, input):
if self.grad_outputs.size() != input.size():
self.grad_outputs.resize_(input.size()).fill_(1.0)
return self.grad_outputs
def forward(self, interp, interp_crit):
grad_outputs = self.get_grad_outputs(interp_crit)
grad_interp = torch.autograd.grad(outputs=interp_crit, inputs=interp, \
grad_outputs=grad_outputs, create_graph=True, retain_graph=True, only_inputs=True)[0]
grad_interp = grad_interp.view(grad_interp.size(0), -1)
grad_interp_norm = grad_interp.norm(2, dim=1)
loss = ((grad_interp_norm - 1)**2).mean()
return loss
class PerceptualLossLPIPS(nn.Module):
def __init__(self):
super(PerceptualLossLPIPS, self).__init__()
self.loss_network = ps.PerceptualLoss(use_gpu=torch.cuda.is_available())
def forward(self, x, y):
return self.loss_network.forward(x, y, normalize=True).mean()
class PerceptualLoss(nn.Module):
def __init__(self, rotations=False, flips=False):
super(PerceptualLoss, self).__init__()
self.loss = PerceptualLossLPIPS()
self.rotations = rotations
self.flips = flips
def forward(self, x, y):
if self.rotations:
k_rot = random.choice([-1, 0, 1])
x = torch.rot90(x, k_rot, [2, 3])
y = torch.rot90(y, k_rot, [2, 3])
if self.flips:
if random.choice([True, False]):
x = torch.flip(x, (2,))
y = torch.flip(y, (2,))
if random.choice([True, False]):
x = torch.flip(x, (3,))
y = torch.flip(y, (3,))
return self.loss(x, y)
def generator_loss(labels, wasserstein=False, weights=None):
if not isinstance(labels, list):
labels = (labels,)
if weights is None:
weights = [1.0 / len(labels)] * len(labels)
loss = 0.0
for label, weight in zip(labels, weights):
if wasserstein:
loss += weight * torch.mean(-label)
else:
loss += weight * torch.mean(-torch.log(label + 1e-8))
return loss
def discriminator_loss(reals, fakes, wasserstein=False, grad_penalties=None, weights=None):
if not isinstance(reals, list):
reals = (reals,)
if not isinstance(fakes, list):
fakes = (fakes,)
if weights is None:
weights = [1.0 / len(fakes)] * len(fakes)
loss = 0.0
if wasserstein:
if not isinstance(grad_penalties, list):
grad_penalties = (grad_penalties,)
for real, fake, weight, grad_penalty in zip(reals, fakes, weights, grad_penalties):
loss += weight * (-real.mean() + fake.mean() + grad_penalty)
else:
for real, fake, weight in zip(reals, fakes, weights):
loss += weight * (-torch.log(real + 1e-8).mean() - torch.log(1 - fake + 1e-8).mean())
return loss
if __name__ == '__main__':
a = PerceptualLossLPIPS() | [
"516488199@qq.com"
] | 516488199@qq.com |
d1660437d7cc1d437db44a397725e49216966700 | eefc47dcb8377239c34134024be8783a9e3b5f44 | /bimdata_api_client/models/raw_system.py | 3d6644047f00fe509b01b9df9dfbe5ddcdf9b50d | [] | no_license | Mike-FR/python-api-client | 4fea5afcd942ebdf6dca174e2d38afaeed71eee4 | 54b2b090cbbf127cf8ac0f17c3492e6d0e1c7f29 | refs/heads/master | 2023-06-29T13:07:30.438434 | 2021-07-28T09:08:54 | 2021-07-28T09:08:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,617 | py | # coding: utf-8
"""
BIMData API
BIMData API is a tool to interact with your models stored on BIMData’s servers. Through the API, you can manage your projects, the clouds, upload your IFC files and manage them through endpoints. # noqa: E501
The version of the OpenAPI document: v1
Contact: support@bimdata.io
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from bimdata_api_client.configuration import Configuration
class RawSystem(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'uuid': 'str',
'name': 'str',
'description': 'str',
'object_type': 'str'
}
attribute_map = {
'uuid': 'uuid',
'name': 'name',
'description': 'description',
'object_type': 'object_type'
}
def __init__(self, uuid=None, name=None, description=None, object_type=None, local_vars_configuration=None): # noqa: E501
"""RawSystem - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._uuid = None
self._name = None
self._description = None
self._object_type = None
self.discriminator = None
self.uuid = uuid
self.name = name
self.description = description
self.object_type = object_type
@property
def uuid(self):
"""Gets the uuid of this RawSystem. # noqa: E501
:return: The uuid of this RawSystem. # noqa: E501
:rtype: str
"""
return self._uuid
@uuid.setter
def uuid(self, uuid):
"""Sets the uuid of this RawSystem.
:param uuid: The uuid of this RawSystem. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and uuid is None: # noqa: E501
raise ValueError("Invalid value for `uuid`, must not be `None`") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
uuid is not None and len(uuid) < 1):
raise ValueError("Invalid value for `uuid`, length must be greater than or equal to `1`") # noqa: E501
self._uuid = uuid
@property
def name(self):
"""Gets the name of this RawSystem. # noqa: E501
:return: The name of this RawSystem. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this RawSystem.
:param name: The name of this RawSystem. # noqa: E501
:type: str
"""
self._name = name
@property
def description(self):
"""Gets the description of this RawSystem. # noqa: E501
:return: The description of this RawSystem. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this RawSystem.
:param description: The description of this RawSystem. # noqa: E501
:type: str
"""
self._description = description
@property
def object_type(self):
"""Gets the object_type of this RawSystem. # noqa: E501
:return: The object_type of this RawSystem. # noqa: E501
:rtype: str
"""
return self._object_type
@object_type.setter
def object_type(self, object_type):
"""Sets the object_type of this RawSystem.
:param object_type: The object_type of this RawSystem. # noqa: E501
:type: str
"""
self._object_type = object_type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, RawSystem):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, RawSystem):
return True
return self.to_dict() != other.to_dict()
| [
"infra@bimdata.io"
] | infra@bimdata.io |
7891607f1409d3289cc4425dcad2510306aa61e2 | c9a15adca3434b463310e80051da369efb622fc0 | /VirtualSim/virtualsim.py | 0f75193140d15a25dbfbc4e12701356c921d579e | [
"MIT"
] | permissive | iandouglas96/swarm-thesis | b6c5f531acafd17e50099e61708347cc14474d35 | dbafa2343a75c4de26b0ae19d08a033b34570287 | refs/heads/master | 2021-01-23T06:39:42.514997 | 2018-05-08T17:43:22 | 2018-05-08T17:43:22 | 86,386,203 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,431 | py | #set our main window size
#have to do this first
from kivy.config import Config
Config.set('graphics', 'width', '800')
Config.set('graphics', 'height', '800')
import os
from constants import *
from kivy.app import App
from kivy.uix.floatlayout import FloatLayout
from kivy.clock import Clock
from Queue import Queue
from serialinterface import SerialInterface
from nodefield import NodeField
class VirtualSim(FloatLayout):
def __init__(self, **kwargs):
super(VirtualSim, self).__init__(**kwargs)
#open a serial port for controller to hook into
self.ser = SerialInterface()
#display the name of the serial port to hook into
self.ids.serial_label.text = self.ser.get_port_name()
#set up a timer to regularly check for incoming commands over the serial port
self.cmd_check = Clock.schedule_interval(self.check_for_commands, 1/10)
def check_for_commands(self, dt):
while (self.ser.has_packets()):
cmd = self.ser.get_next_packet()
#print "received: "+str([hex(ord(c)) for c in cmd])
self.ids.node_field.process_cmd(ord(cmd[1]), cmd[2:])
def gen_adjacencies(self):
#print self.ids.node_field.gen_adjacencies()
self.ids.node_field.pause_sim()
class VirtualSimApp(App):
def build(self):
return VirtualSim()
if __name__ == '__main__':
#set our main window size
VirtualSimApp().run()
| [
"iandouglas96@gmail.com"
] | iandouglas96@gmail.com |
ed9e656edfd4ecfefb842663afcc673709df3a1b | bedfa479971b5efa6d81ac0c1bce4546cc8ea38c | /trainings/shared/response_object.py | cc8b54fc7b8a9070544a0e7a1077cfdeefed3eb7 | [
"MIT"
] | permissive | ferminhg/training-logger | 952f070856f664955c4267af45ddb9f0931e6a05 | bb9a1bfbfd52cbd6db89360407a89e404b113644 | refs/heads/master | 2020-05-25T05:38:41.783488 | 2019-05-26T18:37:41 | 2019-05-26T18:37:41 | 187,653,629 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,418 | py | class ResponseSuccess(object):
def __init__(self, value=None):
self.value = value
def __nonzero__(self):
return True
__bool__ = __nonzero__
class ResponseFailure(object):
RESOURCE_ERROR = 'ResourceError'
PARAMETERS_ERROR = 'ParametersError'
SYSTEM_ERROR = 'SystemError'
def __init__(self, type_, message):
self.type = type_
self.message = self._format_message(message)
def _format_message(self, msg):
if isinstance(msg, Exception):
return "{}: {}".format(msg.__class__.__name__, "{}".format(msg))
return msg
@property
def value(self):
return {'type': self.type, 'message': self.message}
def __nonzero__(self):
return False
__bool__ = __nonzero__
@classmethod
def build_resource_error(cls, message=None):
return cls(cls.RESOURCE_ERROR, message)
@classmethod
def build_system_error(cls, message=None):
return cls(cls.SYSTEM_ERROR, message)
@classmethod
def build_parameters_error(cls, message=None):
return cls(cls.PARAMETERS_ERROR, message)
@classmethod
def build_from_invalid_request_object(cls, invalid_request_object):
message = "\n".join(["{}: {}".format(err['parameter'], err['message'])
for err in invalid_request_object.errors])
return cls.build_parameters_error(message) | [
"fermin.hdez@gmail.com"
] | fermin.hdez@gmail.com |
f8195af9586f981a99e5f8f499d281e5b620001c | 8131081196d91e9a63521e6a5c059e08adceb167 | /chapter1.py | 76383e5d2405327a733d7dd9dbe9da2008318f0c | [] | no_license | kdliaokueida/ComputerVisionIntro | 6a863c2e73b64d30dde6f87c2c83c45fe602e1bc | 6bf29989d9a8c32bbbe43a27f1b120598bcb8762 | refs/heads/master | 2023-03-21T08:37:49.648794 | 2021-03-06T19:24:49 | 2021-03-06T19:24:49 | 345,176,085 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 288 | py | import cv2
import numpy as np
print("Package Imported")
cap = cv2.VideoCapture(0)
cap.set(3, 640)
cap.set(4, 480)
cap.set(10, 100)
while True:
success, img = cap.read()
if img is not None:
cv2.imshow("Webcam", img)
if cv2.waitKey(1) & 0xFF==ord('q'):
break
| [
"kdliao@Kueida-MBP.attlocal.net"
] | kdliao@Kueida-MBP.attlocal.net |
de4b7353a05a64aa211b5f3691c26d78f0e97ea8 | d44499739ea1169aef4d8ad596fdd5888ac6bcbd | /main.py | 7b05ed3ef6d78f8a17901fc630cbdcc40d5fbd68 | [] | no_license | Kindred393/4thtermprojects | 810fe290a830037ab4930acb9b92f88638ddfe80 | 6a63a72c31f78b9e89c3283749d94a8b2b26d570 | refs/heads/main | 2023-04-29T21:12:02.459020 | 2021-05-17T16:34:52 | 2021-05-17T16:34:52 | 360,211,357 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,500 | py | # KidsCanCode - Game Development with Pygame video series
# Jumpy! (a platform game) - Part 18
# Video link: https://youtu.be/i0PaigPo6KM
# scrolling cloud background
# Art from Kenney.nl
# Happy Tune by http://opengameart.org/users/syncopika
# Yippee by http://opengameart.org/users/snabisch
import pygame as pg
import random
from settings import *
from sprites import *
from os import path
class Game:
def __init__(self):
# initialize game window, etc
pg.init()
pg.mixer.init()
self.screen = pg.display.set_mode((WIDTH, HEIGHT))
pg.display.set_caption(TITLE)
self.clock = pg.time.Clock()
self.running = True
self.font_name = pg.font.match_font(FONT_NAME)
self.load_data()
def load_data(self):
# load high score
self.dir = path.dirname(__file__)
with open(path.join(self.dir, HS_FILE), 'r') as f:
try:
self.highscore = int(f.read())
except:
self.highscore = 0
# load spritesheet image
img_dir = path.join(self.dir, 'img')
self.spritesheet = Spritesheet(path.join(img_dir, SPRITESHEET))
# cloud images
self.cloud_images = []
for i in range(1, 4):
self.cloud_images.append(pg.image.load(path.join(img_dir, 'cloud{}.png'.format(i))).convert())
# load sounds
self.snd_dir = path.join(self.dir, 'snd')
self.jump_sound = pg.mixer.Sound(path.join(self.snd_dir, 'Jump33.wav'))
self.boost_sound = pg.mixer.Sound(path.join(self.snd_dir, 'Boost16.wav'))
def new(self):
# start a new game
self.score = 0
self.all_sprites = pg.sprite.LayeredUpdates()
self.platforms = pg.sprite.Group()
self.powerups = pg.sprite.Group()
self.mobs = pg.sprite.Group()
self.clouds = pg.sprite.Group()
self.player = Player(self)
for plat in PLATFORM_LIST:
Platform(self, *plat)
self.mob_timer = 0
pg.mixer.music.load(path.join(self.snd_dir, 'Happy Tune.ogg'))
for i in range(8):
c = Cloud(self)
c.rect.y += 500
self.run()
def run(self):
# Game Loop
pg.mixer.music.play(loops=-1)
self.playing = True
while self.playing:
self.clock.tick(FPS)
self.events()
self.update()
self.draw()
pg.mixer.music.fadeout(500)
def update(self):
# Game Loop - Update
self.all_sprites.update()
# spawn a mob?
now = pg.time.get_ticks()
if now - self.mob_timer > 5000 + random.choice([-1000, -500, 0, 500, 1000]):
self.mob_timer = now
Mob(self)
# hit mobs?
mob_hits = pg.sprite.spritecollide(self.player, self.mobs, False, pg.sprite.collide_mask)
if mob_hits:
self.playing = False
# check if player hits a platform - only if falling
if self.player.vel.y > 0:
hits = pg.sprite.spritecollide(self.player, self.platforms, False)
if hits:
lowest = hits[0]
for hit in hits:
if hit.rect.bottom > lowest.rect.bottom:
lowest = hit
if self.player.pos.x < lowest.rect.right + 10 and \
self.player.pos.x > lowest.rect.left - 10:
if self.player.pos.y < lowest.rect.centery:
self.player.pos.y = lowest.rect.top
self.player.vel.y = 0
self.player.jumping = False
# if player reaches top 1/4 of screen
if self.player.rect.top <= HEIGHT / 4:
if random.randrange(100) < 15:
Cloud(self)
self.player.pos.y += max(abs(self.player.vel.y), 2)
for cloud in self.clouds:
cloud.rect.y += max(abs(self.player.vel.y / 2), 2)
for mob in self.mobs:
mob.rect.y += max(abs(self.player.vel.y), 2)
for plat in self.platforms:
plat.rect.y += max(abs(self.player.vel.y), 2)
if plat.rect.top >= HEIGHT:
plat.kill()
self.score += 10
# if player hits powerup
pow_hits = pg.sprite.spritecollide(self.player, self.powerups, True)
for pow in pow_hits:
if pow.type == 'boost':
self.boost_sound.play()
self.player.vel.y = -BOOST_POWER
self.player.jumping = False
# Die!
if self.player.rect.bottom > HEIGHT:
for sprite in self.all_sprites:
sprite.rect.y -= max(self.player.vel.y, 10)
if sprite.rect.bottom < 0:
sprite.kill()
if len(self.platforms) == 0:
self.playing = False
# spawn new platforms to keep same average number
while len(self.platforms) < 6:
width = random.randrange(50, 100)
Platform(self, random.randrange(0, WIDTH - width),
random.randrange(-75, -30))
def events(self):
# Game Loop - events
for event in pg.event.get():
# check for closing window
if event.type == pg.QUIT:
if self.playing:
self.playing = False
self.running = False
if event.type == pg.KEYDOWN:
if event.key == pg.K_SPACE:
self.player.jump()
if event.type == pg.KEYUP:
if event.key == pg.K_SPACE:
self.player.jump_cut()
def draw(self):
# Game Loop - draw
self.screen.fill(BGCOLOR)
self.all_sprites.draw(self.screen)
self.draw_text(str(self.score), 22, WHITE, WIDTH / 2, 15)
# *after* drawing everything, flip the display
pg.display.flip()
def show_start_screen(self):
# game splash/start screen
pg.mixer.music.load(path.join(self.snd_dir, 'Yippee.ogg'))
pg.mixer.music.play(loops=-1)
self.screen.fill(BGCOLOR)
self.draw_text(TITLE, 48, WHITE, WIDTH / 2, HEIGHT / 4)
self.draw_text("Arrows to move, Space to jump", 22, WHITE, WIDTH / 2, HEIGHT / 2)
self.draw_text("Press a key to play", 22, WHITE, WIDTH / 2, HEIGHT * 3 / 4)
self.draw_text("High Score: " + str(self.highscore), 22, WHITE, WIDTH / 2, 15)
pg.display.flip()
self.wait_for_key()
pg.mixer.music.fadeout(500)
def show_go_screen(self):
# game over/continue
if not self.running:
return
pg.mixer.music.load(path.join(self.snd_dir, 'Yippee.ogg'))
pg.mixer.music.play(loops=-1)
self.screen.fill(BGCOLOR)
self.draw_text("GAME OVER", 48, WHITE, WIDTH / 2, HEIGHT / 4)
self.draw_text("Score: " + str(self.score), 22, WHITE, WIDTH / 2, HEIGHT / 2)
self.draw_text("Press a key to play again", 22, WHITE, WIDTH / 2, HEIGHT * 3 / 4)
if self.score > self.highscore:
self.highscore = self.score
self.draw_text("NEW HIGH SCORE!", 22, WHITE, WIDTH / 2, HEIGHT / 2 + 40)
with open(path.join(self.dir, HS_FILE), 'w') as f:
f.write(str(self.score))
else:
self.draw_text("High Score: " + str(self.highscore), 22, WHITE, WIDTH / 2, HEIGHT / 2 + 40)
pg.display.flip()
self.wait_for_key()
pg.mixer.music.fadeout(500)
def wait_for_key(self):
waiting = True
while waiting:
self.clock.tick(FPS)
for event in pg.event.get():
if event.type == pg.QUIT:
waiting = False
self.running = False
if event.type == pg.KEYUP:
waiting = False
def draw_text(self, text, size, color, x, y):
font = pg.font.Font(self.font_name, size)
text_surface = font.render(text, True, color)
text_rect = text_surface.get_rect()
text_rect.midtop = (x, y)
self.screen.blit(text_surface, text_rect)
g = Game()
g.show_start_screen()
while g.running:
g.new()
g.show_go_screen()
pg.quit() | [
"noreply@github.com"
] | noreply@github.com |
9c68f21e289ac893f938e83bb2be5f054a2a7561 | 2f6c3e78de825b14cc6d471ba231724d819b7436 | /tasks/ogle.py | 2c1ff53316fd09c5f8108a519f3f81fdfee981f0 | [
"MIT"
] | permissive | astrocatalogs/supernovae | 3f685d447b56c741081acffc6de0c9818149bb47 | 9585d2ae053f15fa91ab5900b5ae962c6a508037 | refs/heads/master | 2023-03-12T12:19:01.300505 | 2023-03-10T16:45:53 | 2023-03-10T16:45:53 | 62,802,442 | 42 | 18 | MIT | 2023-03-14T20:39:37 | 2016-07-07T11:42:13 | Python | UTF-8 | Python | false | false | 6,846 | py | """Import tasks for OGLE.
"""
import os
import re
from astrocats.catalog.utils import is_number, jd_to_mjd, pbar, uniq_cdl
from bs4 import BeautifulSoup, NavigableString, Tag
from decimal import Decimal
from ..supernova import SUPERNOVA
def do_ogle(catalog):
task_str = catalog.get_current_task_str()
basenames = [
'transients', 'transients/2015', 'transients/2014b', 'transients/2014',
'transients/2013', 'transients/2012'
]
oglenames = []
ogleupdate = [True, False, False, False, False]
for b, bn in enumerate(pbar(basenames, task_str)):
if catalog.args.update and not ogleupdate[b]:
continue
filepath = os.path.join(catalog.get_current_task_repo(), 'OGLE-')
filepath += bn.replace('/', '-') + '-transients.html'
htmltxt = catalog.load_url(
'http://ogle.astrouw.edu.pl/ogle4/' + bn + '/transients.html',
filepath)
if not htmltxt:
continue
soup = BeautifulSoup(htmltxt, 'html5lib')
links = soup.findAll('a')
breaks = soup.findAll('br')
datalinks = []
datafnames = []
for a in links:
if a.has_attr('href'):
if '.dat' in a['href']:
datalinks.append('http://ogle.astrouw.edu.pl/ogle4/' + bn +
'/' + a['href'])
datafnames.append(
bn.replace('/', '-') + '-' + a['href'].replace('/',
'-'))
ec = -1
reference = 'OGLE-IV Transient Detection System'
refurl = 'http://ogle.astrouw.edu.pl/ogle4/transients/transients.html'
for bi, br in enumerate(pbar(breaks, task_str)):
sibling = br.nextSibling
if 'Ra,Dec=' in sibling:
line = sibling.replace('\n', '').split('Ra,Dec=')
name = line[0].strip()
ec += 1
if 'NOVA' in name or 'dupl' in name:
continue
if name in oglenames:
continue
oglenames.append(name)
name = catalog.add_entry(name)
mySibling = sibling.nextSibling
atelref = ''
claimedtype = ''
while 'Ra,Dec=' not in mySibling:
if isinstance(mySibling, NavigableString):
if not claimedtype and 'class=' in str(mySibling):
claimedtype = re.sub(r'\([^)]*\)', '',
str(mySibling).split('=')[-1])
claimedtype = claimedtype.replace('SN', '').strip()
if claimedtype == '-':
claimedtype = ''
if isinstance(mySibling, Tag):
atela = mySibling
if (atela and atela.has_attr('href') and
'astronomerstelegram' in atela['href']):
atelref = atela.contents[0].strip()
atelurl = atela['href']
mySibling = mySibling.nextSibling
if mySibling is None:
break
# nextSibling = sibling.nextSibling
# if ((isinstance(nextSibling, Tag) and
# nextSibling.has_attr('alt') and
# nextSibling.contents[0].strip() != 'NED')):
# radec = nextSibling.contents[0].strip().split()
# else:
# radec = line[-1].split()
# ra = radec[0]
# dec = radec[1]
fname = os.path.join(catalog.get_current_task_repo(),
'OGLE/') + datafnames[ec]
csvtxt = catalog.load_url(datalinks[ec], fname)
lcdat = csvtxt.splitlines()
sources = [
catalog.entries[name].add_source(
name=reference, url=refurl)
]
catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, name,
sources[0])
if atelref and atelref != 'ATel#----':
sources.append(catalog.entries[name].add_source(
name=atelref, url=atelurl))
sources = uniq_cdl(sources)
if name.startswith('OGLE'):
if name[4] == '-':
if is_number(name[5:9]):
catalog.entries[name].add_quantity(
SUPERNOVA.DISCOVER_DATE, name[5:9], sources)
else:
if is_number(name[4:6]):
catalog.entries[name].add_quantity(
SUPERNOVA.DISCOVER_DATE, '20' + name[4:6],
sources)
# RA and Dec from OGLE pages currently not reliable
# catalog.entries[name].add_quantity(SUPERNOVA.RA, ra, sources)
# catalog.entries[name].add_quantity(SUPERNOVA.DEC, dec,
# sources)
if claimedtype and claimedtype != '-':
catalog.entries[name].add_quantity(SUPERNOVA.CLAIMED_TYPE,
claimedtype, sources)
elif ('SN' not in name and
SUPERNOVA.CLAIMED_TYPE not in catalog.entries[name]):
catalog.entries[name].add_quantity(SUPERNOVA.CLAIMED_TYPE,
'Candidate', sources)
for row in lcdat:
row = row.split()
mjd = str(jd_to_mjd(Decimal(row[0])))
magnitude = row[1]
if float(magnitude) > 90.0:
continue
e_mag = row[2]
upperlimit = False
if e_mag == '-1' or float(e_mag) > 10.0:
e_mag = ''
upperlimit = True
catalog.entries[name].add_photometry(
time=mjd,
u_time='MJD',
band='I',
magnitude=magnitude,
e_magnitude=e_mag,
system='Vega',
source=sources,
upperlimit=upperlimit)
if catalog.args.update:
catalog.journal_entries()
if catalog.args.travis and bi >= catalog.TRAVIS_QUERY_LIMIT:
break
catalog.journal_entries()
return
| [
"guillochon@gmail.com"
] | guillochon@gmail.com |
bb0a1ffc03e0ff7632f90464defb6602e6c4567d | 3f784f82d78adfbc28c9a67afa08c84adc4a0278 | /charts/venv/Scripts/pip3-script.py | 460ee9b68c1b5158d1149685d1920f3e4c0a4295 | [] | no_license | zyrrrr/pythonanywhere | 25ed4c613de76620fad6f8b7c46a3157e754a52d | 4324de0a4978c51cec7dcdd6d504d553fa8eff91 | refs/heads/master | 2020-12-04T19:15:53.599525 | 2020-01-05T13:34:12 | 2020-01-05T13:34:12 | 231,878,464 | 0 | 0 | null | null | null | null | ISO-8859-7 | Python | false | false | 420 | py | #!E:\Desktop\ΑΦΟώΒό\charts\venv\Scripts\python.exe -x
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3')()
)
| [
"1020444587@qq.com"
] | 1020444587@qq.com |
275bb25eea944a6bb647046ca261fbca80c5a230 | 59881241c4dc42f3838ed1fa55c048f3f3cad456 | /chileApp/serializers.py | 8534f972d0e89a16494953af9907dc8dd0ccd330 | [
"MIT"
] | permissive | giovannicadiz/Chile-Api-Django-Rest-Framework | efc474166a434d055b8cd43287780724efdbbaa4 | f67d5e0862624223dfde731b2f63ec7bb79e6fbd | refs/heads/master | 2021-08-08T09:09:05.449079 | 2017-11-10T03:15:29 | 2017-11-10T03:15:29 | 109,915,485 | 0 | 0 | null | 2017-11-08T03:07:00 | 2017-11-08T02:10:05 | Python | UTF-8 | Python | false | false | 574 | py | from chileApp.models import Region, Provincia, Comuna
from rest_framework import serializers
class RegionSerializer(serializers.ModelSerializer):
class Meta:
model = Region
fields = ('region_id', 'region_nombre', 'region_ordinal')
class ProvinciaSerializer(serializers.ModelSerializer):
class Meta:
model = Provincia
fields = ('provincia_id', 'provincia_nombre','region_id')
class ComunaSerializer(serializers.ModelSerializer):
class Meta:
model = Comuna
fields = ('comuna_id','comuna_nombre','provincia_id') | [
"giovannicadiz@gmail.com"
] | giovannicadiz@gmail.com |
2ea36cd8fa8a015b6306eb459d6fd37155a29a56 | 04f6faa8843961bc9424cc339598ddfed876d9d1 | /OAsestem/login/migrations/0001_initial.py | 1a961cc742438bf97c463ab4c6a696760bf851d5 | [] | no_license | dearxiaoxue/AID | bff19327e3d2f38c25948570aeb02cae7dd1530f | 99e9a8ddfbb1680426cf13a0f1b9f3dfb8c9a51c | refs/heads/master | 2020-06-18T18:01:05.053171 | 2019-07-11T12:21:03 | 2019-07-11T12:21:03 | 196,390,346 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 841 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.8 on 2019-06-27 11:33
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Users',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.CharField(max_length=30)),
('password', models.CharField(max_length=20)),
('intro', models.CharField(max_length=20)),
],
options={
'verbose_name': '账户',
'verbose_name_plural': '账户',
'db_table': 'users',
},
),
]
| [
"923999458@qq"
] | 923999458@qq |
1456749f8112ef261d1267355ffa482ee9219a0d | 315473760c1bdc2410cc5110250edfa6a4d4010d | /tm/as2-eval.py | 15184a9eabea72ab9b52e6ff0ab83d9afbee0b09 | [] | no_license | seven7e/2019Papers | 5935cac88b893a3437a26ec4f65f05f08eff2209 | 0238467d9101afb2e8e65538c52fcf8a8338930f | refs/heads/master | 2022-11-15T13:55:46.558296 | 2020-07-06T03:51:11 | 2020-07-06T03:51:11 | 199,423,812 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,463 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import json
from glob import glob
from tqdm import tqdm
from collections import Counter, defaultdict
import numpy as np
from numpy.linalg import norm
import pandas as pd
from as2 import load_pkl, save_pkl, Record, MetaData, Post, \
stem_word, flatten3d, foreach3d, lemmatizer
import matplotlib.pyplot as plt
from wordcloud import WordCloud
MAX_FONT_SIZE = 80
def show_summary(dataset):
'''This function describes the summary of dataset or human inspection.
It's not necessary for the mining process.
Parameters
--------------
dataset : list of Record
The blog dataset
'''
df = pd.DataFrame([d.meta for d in dataset])
df['blog_count'] = [len(d.posts) for d in dataset]
df['char_count'] = [sum(len(p.text) for p in d.posts) for d in dataset]
# print(df)
print(df.describe(include='all'))
print('{} possible values for "gender": {}'.format(
len(df.gender.unique()), ', '.join(sorted(df.gender.unique()))))
# print('{} possible values for "{}": {}'.format(
# len(df.age.unique()), ', '.join(sorted(df.age.unique()))))
print('{} possible values for category: {}'.format(
len(df.category.unique()), ', '.join(sorted(df.category.unique()))))
print('{} possible values for zodiac: {}'.format(
len(df.zodiac.unique()), ', '.join(sorted(df.zodiac.unique()))))
plt.rcParams.update({'font.size': 20})
# df.hist()
df['gender'].value_counts().plot(kind='bar')
plt.xticks(rotation=0)
# plt.show()
plt.gcf().tight_layout()
plt.savefig('img/show-gender.png')
plt.rcParams.update({'font.size': 10})
plt.clf()
df['category'].value_counts().plot(kind='bar')
# plt.xticks(rotation=45)
# plt.yscale('log')
plt.gcf().tight_layout()
plt.savefig('img/show-category.png')
plt.rcParams.update({'font.size': 18})
plt.clf()
df['zodiac'].value_counts().plot(kind='bar')
plt.xticks(rotation=90)
plt.gcf().tight_layout()
plt.savefig('img/show-zodiac.png')
plt.rcParams.update({'font.size': 20})
plt.clf()
age = df['age']
# bins = np.linspace(age.min(), age.max(), 20)
df['age'].hist(bins=20)
# plt.xticks(bins)
plt.gcf().tight_layout()
plt.savefig('img/show-age.png')
plt.clf()
cnt = df['blog_count']
logbins = np.logspace(np.log10(cnt.min()),np.log10(cnt.max()), 20)
cnt.hist(bins=logbins)
plt.xscale('log')
plt.gcf().tight_layout()
plt.savefig('img/show-blog-count.png')
plt.clf()
cnt = df['char_count']
logbins = np.logspace(np.log10(cnt.min()),np.log10(cnt.max()), 20)
cnt.hist(bins=logbins)
plt.xscale('log')
plt.gcf().tight_layout()
plt.savefig('img/show-char-count.png')
plt.clf()
df['gender_age'] = [g + '\n' + ('<=20' if a <= 20 else '>20') \
for (g, a) in zip(df['gender'], df['age'])]
# bins = np.linspace(age.min(), age.max(), 20)
df['gender_age'].value_counts()[[2, 3, 1, 0]].plot(kind='bar')
# plt.xticks(bins)
plt.xticks(rotation=0)
plt.gcf().tight_layout()
plt.savefig('img/show-gender-age.png')
def calc_stem_map():
'''Map word stem back to the most representative word so we can display valid
English words in the word cloud, and also for calculating the coherence score
'''
print('building map from stem to words ...')
docs = load_pkl('tokenised_docs.pkl')
stem2word = defaultdict(lambda *_, **__: Counter())
def _helper(w):
s = stem_word(w)
stem2word[s][lemmatizer.lemmatize(w.lower())] += 1
# print(stem2word)
print('calculating map...')
foreach3d(_helper, docs)
out = {}
for k, cnt in stem2word.items():
out[k] = cnt.most_common(10)
# print(out)
save_pkl(out, 'stem2word.pkl')
return out
## Colour functions for word cloud
def color_black(word, *args, **kwargs):
return '#000000'
def grey_color_func(word, font_size, position, orientation, random_state=None, **kwargs):
# return 'hsl(0, 0%, {:d}%)'.format(np.random.randint(10, 60))
return 'hsl(0, 0%, {:d}%)'.format((MAX_FONT_SIZE - font_size) // (MAX_FONT_SIZE * 1))
STEM2WORD = None
def eval_topics(fpath, method='tf', top_k=2, num_words_in_topic=10):
'''Evaluate topics by:
1. plotting the word cloud
2. calculating the diagnostic and coherence metrics
'''
with open(fpath, encoding='utf8') as f:
result = json.load(f)
global STEM2WORD
if STEM2WORD is None:
STEM2WORD = load_pkl('stem2word.pkl')
def _2w(w):
if w in STEM2WORD:
return STEM2WORD[w][0][0]
else:
return w
topics_formatted = {}
for group, topics2 in result.items():
# print(group)
topics = topics2[method]
topics_formatted[group] = []
for i, topic in enumerate(topics[:top_k]):
topic_name = _2w(topic['topic'])
words = {}
words.update((_2w(kw[0]), kw[1]) for kw in topic['keywords'][:(num_words_in_topic-1)])
if method == 'tf':
words[topic_name] = topic['score']
else:
try:
words[topic_name] = topic['keywords'][0][1] * 2 # fake frequency for display
except IndexError:
words[topic_name] = 1
topics_formatted[group].append((topic_name, words))
print(topics_formatted)
plot_topics(topics_formatted, method=method)
return calc_coherence_all(topics_formatted, method=method)
LSA = None
def load_lsa():
global LSA
if LSA is None:
print('loading LSA model...')
try:
LSA = load_pkl('lsa.pkl')
except:
print('failed')
print('loading LSA model...')
with open('semilar/LSA-MODELS/LSA-MODEL-TASA-LEMMATIZED-DIM300/voc.txt') as f:
vocab = [x.strip() for x in f]
# print(vocab[:10])
print('vocab size:', len(vocab))
with open('semilar/LSA-MODELS/LSA-MODEL-TASA-LEMMATIZED-DIM300/lsaModel.txt') as f:
vec = [np.array([float(x) for x in line.split()]) for line in f]
print('vector size:', len(vec), len(vec[0]))
# print(vec[0])
LSA = {w: v for w, v in zip(vocab, vec)}
save_pkl(LSA, 'lsa.pkl')
return LSA
WIKI_PMI = None
def load_wiki_pmi():
global WIKI_PMI
if WIKI_PMI is None:
print('loading wiki PMI model...')
try:
WIKI_PMI = load_pkl('wiki-pmi.pkl')
except:
print('load from original files...')
WIKI_PMI = {}
for fname in tqdm(glob('semilar/wiki-pmi/*')):
with open(fname) as f:
next(f)
next(f)
next(f)
next(f)
for line in f:
a, b, s = line.strip().split()
s = float(s)
WIKI_PMI[(a, b)] = s
# break
# print(WIKI_PMI)
save_pkl(WIKI_PMI, 'wiki-pmi.pkl')
return WIKI_PMI
GLOVE = None
def load_glove(ndim=100):
global GLOVE
if GLOVE is None:
print('loading glove embeddings...')
try:
GLOVE = load_pkl('glove{}.pkl'.format(ndim))
except:
print('failed')
GLOVE = {}
fname = 'embeddings/glove.6B.{}d.txt'.format(ndim)
print('load from file', fname)
with open(fname) as f:
for line in f:
arr = line.strip().split()
GLOVE[arr[0].strip()] = np.array([float(f) for f in arr[1:]])
# break
# print(GLOVE)
save_pkl(GLOVE, 'glove{}.pkl'.format(ndim))
return GLOVE
def cosine_similarity(a, b):
return np.dot(a, b)/(norm(a)*norm(b))
def lsa_score(wi, wj):
lsa = load_lsa()
vi = lsa[wi]
vj = lsa[wj]
return cosine_similarity(vi, vj)
def pmi_score(wi, wj):
pmi = load_wiki_pmi()
p = (wi, wj)
if p in pmi:
return pmi[p]
else:
return pmi[wj, wi]
def glove_score(wi, wj):
lsa = load_glove()
vi = lsa[wi]
vj = lsa[wj]
return cosine_similarity(vi, vj)
def calc_coherence(words, f):
n = 0
score = 0.0
for i in range(len(words)):
for j in range(i+1, len(words)):
try:
score += f(words[i], words[j])
except KeyError:
print('warning: cannot find pairwise association: {} {}' \
.format(words[i], words[j]))
continue
n += 1
# print(i, j, words[i], words[j], score, n)
return score / n
def calc_word_length(words):
return sum(len(w) for w in words) / len(words)
WORD_COUNT = None
def calc_topic_size(words):
global WORD_COUNT
if WORD_COUNT is None:
docs, _ = load_pkl('intermediate_data.pkl')
WORD_COUNT = Counter(w for w, t in flatten3d(docs))
return sum(WORD_COUNT[w] for w in words)
def calc_coherence_all(topics_all, method):
metrics = []
for group, topics in topics_all.items():
# print(group)
for i, (topic_name, words_freq) in enumerate(topics):
if len(words_freq) <= 1:
continue
words = sorted(words_freq.keys())
print('topic:', topic_name, words)
lsa = calc_coherence(words, lsa_score)
print('lsa score', lsa)
# pmi = calc_coherence(words, pmi_score)
# print('pmi score', pmi)
we_glove = calc_coherence(words, glove_score)
print('we glove score', we_glove)
wl = calc_word_length(words)
print('avg word length', wl)
ts = calc_topic_size(words)
print('avg topic size', ts)
metrics.append((group, i, topic_name, words, lsa, we_glove, wl, ts))
return metrics
def print_metrics_as_table(metrics, fpath):
'''Plot metrics as \LaTeX table
'''
with open(fpath, 'w') as f:
for group, i, topic, keywords, lsa, we_glove, wl, ts in metrics:
if i > 0:
group = ' '
elif group == 'less_or_20':
group = '20 or younger'
elif group == 'over_20':
group = 'over 20'
elif group == 'all':
group = 'everyone'
f.write(f'{group} & {topic} & {ts//1000:,}k & {wl:.1f} & {lsa:.3f} & {we_glove:.3f} \\\\\n')
if i == 1: # two topics for each group
f.write('\\hline\n')
else:
f.write('\\cline{2-6}\n')
def plot_topics(topics_json, method):
'''Plot word cloud for the keywords in each topic
'''
for group, topics in topics_json.items():
# print(group)
for i, (topic_name, words) in enumerate(topics):
print('topic: ', topic_name, 'number of keywords:', len(words))
wc = WordCloud(background_color="white",
max_font_size=80,
max_words=len(words)+1,
color_func=grey_color_func)
wc.generate_from_frequencies(words)
# show
plt.clf()
plt.imshow(wc, interpolation="bilinear")
plt.axis("off")
# plt.show()
# plt.title(topic_name, y=-0.25, fontsize=20, fontname='Times New Roman')
plt.title(topic_name, y=-0.25, fontsize=20, fontname='Georgia')
plt.gcf().tight_layout()
fig_path = 'img/{}-{}-{}.png'.format(group, method, i+1, topic_name)
print('drawing ' + fig_path)
plt.savefig(fig_path)
def main():
cmd = sys.argv[1]
if cmd == 'show':
show_summary(load_pkl('blogs.pkl'))
# show_summary(load_pkl('blogs-10.pkl'))
elif cmd == 'eval':
fpath = sys.argv[2]
metrics_tf = eval_topics(fpath, top_k=2, method='tf')
metrics_tfidf = eval_topics(fpath, top_k=2, method='tfidf')
print('tf metrics', metrics_tf)
print_metrics_as_table(metrics_tf, 'metrics-tf.tex')
print('tfidf metrics', metrics_tfidf)
print_metrics_as_table(metrics_tfidf, 'metrics-tfidf.tex')
elif cmd == 'stem2word':
calc_stem_map()
if __name__ == '__main__':
main()
| [
"nanoix9@gmail.com"
] | nanoix9@gmail.com |
467dcabde18edb99fd16a2cec7e947fea61b963f | 38e3cce7f03adc4e430a7987dfcd3433d90d5811 | /networkler.py | 7bc67d1998a05ab73f289e4dd6ead0e61d3e07a2 | [] | no_license | Hasokeyk/dc_tts-turkish-implementation-Turkce-metinden-sese- | c740e1fb05b92639a61ab7912cc7696749669477 | 918a8f8f5bed26f92653122c0338eb6ccc780d00 | refs/heads/master | 2022-02-20T21:48:11.582194 | 2019-09-25T18:49:27 | 2019-09-25T18:49:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,762 | py | # Mert Hacıahmetoğlu
# 03.08.2019
"""hyperparameters script"""
#---------------------------------------------------------------------
from __future__ import print_function
from hiperparametreler import Hiperparametreler as hp
from moduller import *
import tensorflow as tf
def TextEnc(L, training=True):
'''
Args:
L: Text inputs. (B, N)
Return:
K: Keys. (B, N, d)
V: Values. (B, N, d)
'''
i = 1
tensor = embed(L,
vocab_size=len(hp.harfler),
num_units=hp.e,
scope="embed_{}".format(i)); i += 1
tensor = conv1d(tensor,
filters=2*hp.d,
size=1,
rate=1,
dropout_rate=hp.dropout_rate,
activation_fn=tf.nn.relu,
training=training,
scope="C_{}".format(i)); i += 1
tensor = conv1d(tensor,
size=1,
rate=1,
dropout_rate=hp.dropout_rate,
training=training,
scope="C_{}".format(i)); i += 1
for _ in range(2):
for j in range(4):
tensor = hc(tensor,
size=3,
rate=3**j,
dropout_rate=hp.dropout_rate,
activation_fn=None,
training=training,
scope="HC_{}".format(i)); i += 1
for _ in range(2):
tensor = hc(tensor,
size=3,
rate=1,
dropout_rate=hp.dropout_rate,
activation_fn=None,
training=training,
scope="HC_{}".format(i)); i += 1
for _ in range(2):
tensor = hc(tensor,
size=1,
rate=1,
dropout_rate=hp.dropout_rate,
activation_fn=None,
training=training,
scope="HC_{}".format(i)); i += 1
K, V = tf.split(tensor, 2, -1)
return K, V
def AudioEnc(S, training=True):
'''
Args:
S: melspectrogram. (B, T/r, n_mels)
Returns
Q: Queries. (B, T/r, d)
'''
i = 1
tensor = conv1d(S,
filters=hp.d,
size=1,
rate=1,
padding="CAUSAL",
dropout_rate=hp.dropout_rate,
activation_fn=tf.nn.relu,
training=training,
scope="C_{}".format(i)); i += 1
tensor = conv1d(tensor,
size=1,
rate=1,
padding="CAUSAL",
dropout_rate=hp.dropout_rate,
activation_fn=tf.nn.relu,
training=training,
scope="C_{}".format(i)); i += 1
tensor = conv1d(tensor,
size=1,
rate=1,
padding="CAUSAL",
dropout_rate=hp.dropout_rate,
training=training,
scope="C_{}".format(i)); i += 1
for _ in range(2):
for j in range(4):
tensor = hc(tensor,
size=3,
rate=3**j,
padding="CAUSAL",
dropout_rate=hp.dropout_rate,
training=training,
scope="HC_{}".format(i)); i += 1
for _ in range(2):
tensor = hc(tensor,
size=3,
rate=3,
padding="CAUSAL",
dropout_rate=hp.dropout_rate,
training=training,
scope="HC_{}".format(i)); i += 1
return tensor
def Attention(Q, K, V, mononotic_attention=False, prev_max_attentions=None):
'''
Args:
Q: Queries. (B, T/r, d)
K: Keys. (B, N, d)
V: Values. (B, N, d)
mononotic_attention: A boolean. At training, it is False.
prev_max_attentions: (B,). At training, it is set to None.
Returns:
R: [Context Vectors; Q]. (B, T/r, 2d)
alignments: (B, N, T/r)
max_attentions: (B, T/r)
'''
A = tf.matmul(Q, K, transpose_b=True) * tf.rsqrt(tf.to_float(hp.d))
if mononotic_attention: # for inference
key_masks = tf.sequence_mask(prev_max_attentions, hp.max_N)
reverse_masks = tf.sequence_mask(hp.max_N - hp.attention_win_size - prev_max_attentions, hp.max_N)[:, ::-1]
masks = tf.logical_or(key_masks, reverse_masks)
masks = tf.tile(tf.expand_dims(masks, 1), [1, hp.max_T, 1])
paddings = tf.ones_like(A) * (-2 ** 32 + 1) # (B, T/r, N)
A = tf.where(tf.equal(masks, False), A, paddings)
A = tf.nn.softmax(A) # (B, T/r, N)
max_attentions = tf.argmax(A, -1) # (B, T/r)
R = tf.matmul(A, V)
R = tf.concat((R, Q), -1)
alignments = tf.transpose(A, [0, 2, 1]) # (B, N, T/r)
return R, alignments, max_attentions
def AudioDec(R, training=True):
'''
Args:
R: [Context Vectors; Q]. (B, T/r, 2d)
Returns:
Y: Melspectrogram predictions. (B, T/r, n_mels)
'''
i = 1
tensor = conv1d(R,
filters=hp.d,
size=1,
rate=1,
padding="CAUSAL",
dropout_rate=hp.dropout_rate,
training=training,
scope="C_{}".format(i)); i += 1
for j in range(4):
tensor = hc(tensor,
size=3,
rate=3**j,
padding="CAUSAL",
dropout_rate=hp.dropout_rate,
training=training,
scope="HC_{}".format(i)); i += 1
for _ in range(2):
tensor = hc(tensor,
size=3,
rate=1,
padding="CAUSAL",
dropout_rate=hp.dropout_rate,
training=training,
scope="HC_{}".format(i)); i += 1
for _ in range(3):
tensor = conv1d(tensor,
size=1,
rate=1,
padding="CAUSAL",
dropout_rate=hp.dropout_rate,
activation_fn=tf.nn.relu,
training=training,
scope="C_{}".format(i)); i += 1
# mel_hats
logits = conv1d(tensor,
filters=hp.mels_s,
size=1,
rate=1,
padding="CAUSAL",
dropout_rate=hp.dropout_rate,
training=training,
scope="C_{}".format(i)); i += 1
Y = tf.nn.sigmoid(logits) # mel_hats
return logits, Y
def SSRN(Y, training=True):
'''
Args:
Y: Melspectrogram Predictions. (B, T/r, n_mels)
Returns:
Z: Spectrogram Predictions. (B, T, 1+n_fft/2)
'''
i = 1 # number of layers
# -> (B, T/r, c)
tensor = conv1d(Y,
filters=hp.c,
size=1,
rate=1,
dropout_rate=hp.dropout_rate,
training=training,
scope="C_{}".format(i)); i += 1
for j in range(2):
tensor = hc(tensor,
size=3,
rate=3**j,
dropout_rate=hp.dropout_rate,
training=training,
scope="HC_{}".format(i)); i += 1
for _ in range(2):
# -> (B, T/2, c) -> (B, T, c)
tensor = conv1d_transpose(tensor,
scope="D_{}".format(i),
dropout_rate=hp.dropout_rate,
training=training,); i += 1
for j in range(2):
tensor = hc(tensor,
size=3,
rate=3**j,
dropout_rate=hp.dropout_rate,
training=training,
scope="HC_{}".format(i)); i += 1
# -> (B, T, 2*c)
tensor = conv1d(tensor,
filters=2*hp.c,
size=1,
rate=1,
dropout_rate=hp.dropout_rate,
training=training,
scope="C_{}".format(i)); i += 1
for _ in range(2):
tensor = hc(tensor,
size=3,
rate=1,
dropout_rate=hp.dropout_rate,
training=training,
scope="HC_{}".format(i)); i += 1
# -> (B, T, 1+n_fft/2)
tensor = conv1d(tensor,
filters=1+hp.fft_s//2,
size=1,
rate=1,
dropout_rate=hp.dropout_rate,
training=training,
scope="C_{}".format(i)); i += 1
for _ in range(2):
tensor = conv1d(tensor,
size=1,
rate=1,
dropout_rate=hp.dropout_rate,
activation_fn=tf.nn.relu,
training=training,
scope="C_{}".format(i)); i += 1
logits = conv1d(tensor,
size=1,
rate=1,
dropout_rate=hp.dropout_rate,
training=training,
scope="C_{}".format(i))
Z = tf.nn.sigmoid(logits)
return logits, Z | [
"noreply@github.com"
] | noreply@github.com |
92636880ee00dcdaf6082a42f6967c44fa8b6054 | 9bcd8a8de7e34ab52f44070c171e2e12e52e9775 | /setup.py | c7ab57d1d127894b45df406d8c76bdb98355363e | [
"BSD-2-Clause"
] | permissive | miracle2k/localtodo | c419bf5cd8aa5fd6092420577c6155a3d418cd1d | 8598a073d9fe466832b6a952a0b1dc20603d0e7d | refs/heads/master | 2022-04-30T13:36:50.211348 | 2022-03-21T18:45:16 | 2022-03-21T18:45:16 | 5,198,753 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 754 | py | #!/usr/bin/env python
# coding: utf-8
from setuptools import setup
setup(
name='localtodo',
url='https://github.com/miracle2k/localtodo',
version='1.0',
license='BSD',
author=u'Michael Elsdörfer',
author_email='michael@elsdoerfer.com',
description=
'.gitignore local todo files, but sync them through Dropbox.',
py_modules=['localtodo'],
install_requires=['docopt==0.4.1'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python'
],
entry_points="""[console_scripts]\nlocaltodo = localtodo:run\n""",
) | [
"michael@elsdoerfer.com"
] | michael@elsdoerfer.com |
ce1b27305c6bd0b8a926dbe2218abbb8f297e24b | 5cde21d3eb1667152d4aa7fe489f15339db89551 | /maple/community/process/__init__.py | 1e2c945b04a925467c68181e7a269746fc3c10b2 | [] | no_license | SunmoonSan/PythonDaily | cce0b82c9bfe8e57dc26d8bcb722e165302cf4b0 | c0a95c2ece1b3cb6ef00d1b096fef14892de1ce6 | refs/heads/master | 2022-04-02T13:08:41.692131 | 2020-01-19T12:48:38 | 2020-01-19T12:48:38 | 115,420,499 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 91 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @desc : Created by San on 2019/12/13 17:23
| [
"18814184179@163.com"
] | 18814184179@163.com |
65a0a3a64109442716a5a600edec18ed6380230c | 63c9d7103ad5cff38b8d120078a37e81d7f57fd1 | /nested_list.py | 8c205a4eadfc63ae333c79efbe13107109a0eddd | [] | no_license | Priyanka-Kothmire/python | 9a854ee3637ec8f4ce8702af9d90c21e0f53949a | fb950f8a18f302ea3451a5580a86256a0d77dfd9 | refs/heads/main | 2023-06-14T22:34:07.973805 | 2021-07-11T14:40:46 | 2021-07-11T14:40:46 | 384,970,700 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 238 | py | # l = [1, 2, [3, 4, [5, 6]], 7, 8, [9, [10]]]
l=["s", "l", ["d", "w", ["e", "q"]], "f", "g", ["k", ["p"]]]
output = []
i=0
while i<len(l):
j=0
while j<len(l(i)):
output.append(l[i][j])
j=j+1
i=i+1
print(output) | [
"noreply@github.com"
] | noreply@github.com |
ed5aaf4d9c069dfae5c52ce541ca6227e507404e | 358aaf68f3c60ebbbd86b3bc66d4e6c098bcb39e | /fonts/tsvga_et4000_8x16.py | ec50d5db6c710d173aebde5e57c209dc19065ccd | [
"MIT"
] | permissive | ccccmagicboy/st7735_mpy | d2de0046abd81978d5176dace45a40758377af82 | b15f1bde69fbe6e0eb4931c57e71c136d8e7f024 | refs/heads/master | 2022-08-28T23:18:04.353733 | 2020-05-28T04:19:21 | 2020-05-28T04:19:21 | 254,869,035 | 7 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,759 | py | """converted from ..\fonts\TSVGA_ET4000_8x16.bin """
WIDTH = 8
HEIGHT = 16
FIRST = 0x20
LAST = 0x7f
_FONT =\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x18\x3c\x3c\x3c\x18\x18\x18\x00\x18\x18\x00\x00\x00\x00'\
b'\x00\x66\x66\x66\x24\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x6c\x6c\xfe\x6c\x6c\x6c\xfe\x6c\x6c\x00\x00\x00\x00'\
b'\x18\x18\x7c\xc6\xc2\xc0\x7c\x06\x06\x86\xc6\x7c\x18\x18\x00\x00'\
b'\x00\x00\x00\x00\xc2\xc6\x0c\x18\x30\x60\xc6\x86\x00\x00\x00\x00'\
b'\x00\x00\x38\x6c\x6c\x38\x76\xdc\xcc\xcc\xcc\x76\x00\x00\x00\x00'\
b'\x00\x30\x30\x30\x60\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x0c\x18\x30\x30\x30\x30\x30\x30\x18\x0c\x00\x00\x00\x00'\
b'\x00\x00\x30\x18\x0c\x0c\x0c\x0c\x0c\x0c\x18\x30\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x66\x3c\xff\x3c\x66\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x18\x18\x7e\x18\x18\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x18\x18\x18\x30\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x7e\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x18\x18\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x02\x06\x0c\x18\x30\x60\xc0\x80\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc6\xc6\xce\xde\xf6\xe6\xc6\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\x18\x38\x78\x18\x18\x18\x18\x18\x18\x7e\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc6\x06\x0c\x18\x30\x60\xc0\xc6\xfe\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc6\x06\x06\x3c\x06\x06\x06\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\x0c\x1c\x3c\x6c\xcc\xfe\x0c\x0c\x0c\x1e\x00\x00\x00\x00'\
b'\x00\x00\xfe\xc0\xc0\xc0\xfc\x06\x06\x06\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\x38\x60\xc0\xc0\xfc\xc6\xc6\xc6\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\xfe\xc6\x06\x06\x0c\x18\x30\x30\x30\x30\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc6\xc6\xc6\x7c\xc6\xc6\xc6\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc6\xc6\xc6\x7e\x06\x06\x06\x0c\x78\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x18\x18\x00\x00\x00\x18\x18\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x18\x18\x00\x00\x00\x18\x18\x30\x00\x00\x00\x00'\
b'\x00\x00\x00\x06\x0c\x18\x30\x60\x30\x18\x0c\x06\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x7e\x00\x00\x7e\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x60\x30\x18\x0c\x06\x0c\x18\x30\x60\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc6\xc6\x0c\x18\x18\x18\x00\x18\x18\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc6\xc6\xc6\xde\xde\xde\xdc\xc0\x7c\x00\x00\x00\x00'\
b'\x00\x00\x10\x38\x6c\xc6\xc6\xfe\xc6\xc6\xc6\xc6\x00\x00\x00\x00'\
b'\x00\x00\xfc\x66\x66\x66\x7c\x66\x66\x66\x66\xfc\x00\x00\x00\x00'\
b'\x00\x00\x3c\x66\xc2\xc0\xc0\xc0\xc0\xc2\x66\x3c\x00\x00\x00\x00'\
b'\x00\x00\xf8\x6c\x66\x66\x66\x66\x66\x66\x6c\xf8\x00\x00\x00\x00'\
b'\x00\x00\xfe\x66\x62\x68\x78\x68\x60\x62\x66\xfe\x00\x00\x00\x00'\
b'\x00\x00\xfe\x66\x62\x68\x78\x68\x60\x60\x60\xf0\x00\x00\x00\x00'\
b'\x00\x00\x3c\x66\xc2\xc0\xc0\xde\xc6\xc6\x66\x3a\x00\x00\x00\x00'\
b'\x00\x00\xc6\xc6\xc6\xc6\xfe\xc6\xc6\xc6\xc6\xc6\x00\x00\x00\x00'\
b'\x00\x00\x3c\x18\x18\x18\x18\x18\x18\x18\x18\x3c\x00\x00\x00\x00'\
b'\x00\x00\x1e\x0c\x0c\x0c\x0c\x0c\xcc\xcc\xcc\x78\x00\x00\x00\x00'\
b'\x00\x00\xe6\x66\x66\x6c\x78\x78\x6c\x66\x66\xe6\x00\x00\x00\x00'\
b'\x00\x00\xf0\x60\x60\x60\x60\x60\x60\x62\x66\xfe\x00\x00\x00\x00'\
b'\x00\x00\xc6\xee\xfe\xfe\xd6\xc6\xc6\xc6\xc6\xc6\x00\x00\x00\x00'\
b'\x00\x00\xc6\xe6\xf6\xfe\xde\xce\xc6\xc6\xc6\xc6\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc6\xc6\xc6\xc6\xc6\xc6\xc6\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\xfc\x66\x66\x66\x7c\x60\x60\x60\x60\xf0\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc6\xc6\xc6\xc6\xc6\xc6\xd6\xde\x7c\x0c\x0e\x00\x00'\
b'\x00\x00\xfc\x66\x66\x66\x7c\x6c\x66\x66\x66\xe6\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc6\xc6\x60\x38\x0c\x06\xc6\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\x7e\x7e\x5a\x18\x18\x18\x18\x18\x18\x3c\x00\x00\x00\x00'\
b'\x00\x00\xc6\xc6\xc6\xc6\xc6\xc6\xc6\xc6\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\xc6\xc6\xc6\xc6\xc6\xc6\xc6\x6c\x38\x10\x00\x00\x00\x00'\
b'\x00\x00\xc6\xc6\xc6\xc6\xd6\xd6\xd6\xfe\xee\x6c\x00\x00\x00\x00'\
b'\x00\x00\xc6\xc6\x6c\x7c\x38\x38\x7c\x6c\xc6\xc6\x00\x00\x00\x00'\
b'\x00\x00\x66\x66\x66\x66\x3c\x18\x18\x18\x18\x3c\x00\x00\x00\x00'\
b'\x00\x00\xfe\xc6\x86\x0c\x18\x30\x60\xc2\xc6\xfe\x00\x00\x00\x00'\
b'\x00\x00\x3c\x30\x30\x30\x30\x30\x30\x30\x30\x3c\x00\x00\x00\x00'\
b'\x00\x00\x00\x80\xc0\xe0\x70\x38\x1c\x0e\x06\x02\x00\x00\x00\x00'\
b'\x00\x00\x3c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x3c\x00\x00\x00\x00'\
b'\x10\x38\x6c\xc6\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\x00\x00'\
b'\x30\x30\x18\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x78\x0c\x7c\xcc\xcc\xcc\x76\x00\x00\x00\x00'\
b'\x00\x00\xe0\x60\x60\x78\x6c\x66\x66\x66\x66\x7c\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x7c\xc6\xc0\xc0\xc0\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\x1c\x0c\x0c\x3c\x6c\xcc\xcc\xcc\xcc\x76\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x7c\xc6\xfe\xc0\xc0\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\x38\x6c\x64\x60\xf0\x60\x60\x60\x60\xf0\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x76\xcc\xcc\xcc\xcc\xcc\x7c\x0c\xcc\x78\x00'\
b'\x00\x00\xe0\x60\x60\x6c\x76\x66\x66\x66\x66\xe6\x00\x00\x00\x00'\
b'\x00\x00\x18\x18\x00\x38\x18\x18\x18\x18\x18\x3c\x00\x00\x00\x00'\
b'\x00\x00\x06\x06\x00\x0e\x06\x06\x06\x06\x06\x06\x66\x66\x3c\x00'\
b'\x00\x00\xe0\x60\x60\x66\x6c\x78\x78\x6c\x66\xe6\x00\x00\x00\x00'\
b'\x00\x00\x38\x18\x18\x18\x18\x18\x18\x18\x18\x3c\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\xec\xfe\xd6\xd6\xd6\xd6\xc6\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\xdc\x66\x66\x66\x66\x66\x66\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x7c\xc6\xc6\xc6\xc6\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\xdc\x66\x66\x66\x66\x66\x7c\x60\x60\xf0\x00'\
b'\x00\x00\x00\x00\x00\x76\xcc\xcc\xcc\xcc\xcc\x7c\x0c\x0c\x1e\x00'\
b'\x00\x00\x00\x00\x00\xdc\x76\x66\x60\x60\x60\xf0\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x7c\xc6\x60\x38\x0c\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\x10\x30\x30\xfc\x30\x30\x30\x30\x36\x1c\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\xcc\xcc\xcc\xcc\xcc\xcc\x76\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x66\x66\x66\x66\x66\x3c\x18\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\xc6\xc6\xd6\xd6\xd6\xfe\x6c\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\xc6\x6c\x38\x38\x38\x6c\xc6\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\xc6\xc6\xc6\xc6\xc6\xc6\x7e\x06\x0c\xf8\x00'\
b'\x00\x00\x00\x00\x00\xfe\xcc\x18\x30\x60\xc6\xfe\x00\x00\x00\x00'\
b'\x00\x00\x0e\x18\x18\x18\x70\x18\x18\x18\x18\x0e\x00\x00\x00\x00'\
b'\x00\x00\x18\x18\x18\x18\x00\x18\x18\x18\x18\x18\x00\x00\x00\x00'\
b'\x00\x00\x70\x18\x18\x18\x0e\x18\x18\x18\x18\x70\x00\x00\x00\x00'\
b'\x00\x00\x76\xdc\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x10\x38\x6c\xc6\xc6\xc6\xfe\x00\x00\x00\x00\x00'\
FONT = memoryview(_FONT)
| [
"cuiwei_cv@163.com"
] | cuiwei_cv@163.com |
8838064973dcf235bd1744d1dadead87051a80ea | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/adjectives/_widest.py | 8492170c13955906272fe657ccf1e56cec9420c8 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 236 | py |
from xai.brain.wordbase.adjectives._wide import _WIDE
#calss header
class _WIDEST(_WIDE, ):
def __init__(self,):
_WIDE.__init__(self)
self.name = "WIDEST"
self.specie = 'adjectives'
self.basic = "wide"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
2fb12aec00a48be35b262b6bf6cbd7fb176cb67c | 3d7b4b097a1ddf08aadc0cc8552fb64b374e17a3 | /firstsite/conversion/views.py | 668f57e175d65986fe6df8882afca55ba243d30a | [] | no_license | kahihia/OnlineSite | f3e89034e0a99530238c512c5902200f4e48db8b | 58644cd2a23716c249cd5977749608a630ee94ae | refs/heads/master | 2021-01-21T10:22:20.981608 | 2017-01-01T11:49:21 | 2017-01-01T11:49:21 | 91,687,052 | 1 | 0 | null | 2017-05-18T11:48:31 | 2017-05-18T11:48:31 | null | UTF-8 | Python | false | false | 457 | py | from django.shortcuts import render
from django.http import HttpResponse
from interpay.models import User
def index(request):
return render(request, 'conversion/index.html')
# Create your views here
def detail(request, trans_id):
p = User.objects.get(first_name = "salman")
b = models.BankAccount.objects.get(owner = p)
c = models.BankAccount.objects.all()
return HttpResponse("You're looking at transaction %s balance." % b.balance)
| [
"sepehrabdous@yahoo.com"
] | sepehrabdous@yahoo.com |
8b2c8aa97d01bb2c4395c2ca6d60e9f7e707861a | b6b23a10e97dbdf6f6b5532caa5620862f0513ce | /Libraries/NanoPB/_archive/generator/nanopb_generator.py | 9e85728641f607d9e72c991273d116e32f2c747e | [
"Zlib"
] | permissive | kwbbpc/arduino-old-2 | fb77ceef395232222549ea33cbc6835733ec54d0 | 3eeb21ecb27511368023113e69203c21506f90d6 | refs/heads/master | 2020-04-23T05:55:52.737285 | 2019-02-16T03:51:12 | 2019-02-16T03:51:12 | 170,956,662 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 39,267 | py | #!/usr/bin/python
'''Generate header file for nanopb from a ProtoBuf FileDescriptorSet.'''
nanopb_version = "nanopb-0.2.4"
try:
import google.protobuf.descriptor_pb2 as descriptor
import google.protobuf.text_format as text_format
except:
print
print "*************************************************************"
print "*** Could not import the Google protobuf Python libraries ***"
print "*** Try installing package 'python-protobuf' or similar. ***"
print "*************************************************************"
print
raise
try:
import nanopb_pb2
except:
print
print "***************************************************************"
print "*** Could not import the precompiled nanopb_pb2.py. ***"
print "*** Run 'make' in the 'generator' folder to update the file.***"
print "***************************************************************"
print
raise
# ---------------------------------------------------------------------------
# Generation of single fields
# ---------------------------------------------------------------------------
import time
import os.path
# Values are tuple (c type, pb type, encoded size)
FieldD = descriptor.FieldDescriptorProto
datatypes = {
FieldD.TYPE_BOOL: ('bool', 'BOOL', 1),
FieldD.TYPE_DOUBLE: ('double', 'DOUBLE', 8),
FieldD.TYPE_FIXED32: ('uint32_t', 'FIXED32', 4),
FieldD.TYPE_FIXED64: ('uint64_t', 'FIXED64', 8),
FieldD.TYPE_FLOAT: ('float', 'FLOAT', 4),
FieldD.TYPE_INT32: ('int32_t', 'INT32', 5),
FieldD.TYPE_INT64: ('int64_t', 'INT64', 10),
FieldD.TYPE_SFIXED32: ('int32_t', 'SFIXED32', 4),
FieldD.TYPE_SFIXED64: ('int64_t', 'SFIXED64', 8),
FieldD.TYPE_SINT32: ('int32_t', 'SINT32', 5),
FieldD.TYPE_SINT64: ('int64_t', 'SINT64', 10),
FieldD.TYPE_UINT32: ('uint32_t', 'UINT32', 5),
FieldD.TYPE_UINT64: ('uint64_t', 'UINT64', 10)
}
class Names:
'''Keeps a set of nested names and formats them to C identifier.'''
def __init__(self, parts = ()):
if isinstance(parts, Names):
parts = parts.parts
self.parts = tuple(parts)
def __str__(self):
return '_'.join(self.parts)
def __add__(self, other):
if isinstance(other, (str, unicode)):
return Names(self.parts + (other,))
elif isinstance(other, tuple):
return Names(self.parts + other)
else:
raise ValueError("Name parts should be of type str")
def __eq__(self, other):
return isinstance(other, Names) and self.parts == other.parts
def names_from_type_name(type_name):
'''Parse Names() from FieldDescriptorProto type_name'''
if type_name[0] != '.':
raise NotImplementedError("Lookup of non-absolute type names is not supported")
return Names(type_name[1:].split('.'))
def varint_max_size(max_value):
'''Returns the maximum number of bytes a varint can take when encoded.'''
for i in range(1, 11):
if (max_value >> (i * 7)) == 0:
return i
raise ValueError("Value too large for varint: " + str(max_value))
assert varint_max_size(0) == 1
assert varint_max_size(127) == 1
assert varint_max_size(128) == 2
class EncodedSize:
'''Class used to represent the encoded size of a field or a message.
Consists of a combination of symbolic sizes and integer sizes.'''
def __init__(self, value = 0, symbols = []):
if isinstance(value, (str, Names)):
symbols = [str(value)]
value = 0
self.value = value
self.symbols = symbols
def __add__(self, other):
if isinstance(other, (int, long)):
return EncodedSize(self.value + other, self.symbols)
elif isinstance(other, (str, Names)):
return EncodedSize(self.value, self.symbols + [str(other)])
elif isinstance(other, EncodedSize):
return EncodedSize(self.value + other.value, self.symbols + other.symbols)
else:
raise ValueError("Cannot add size: " + repr(other))
def __mul__(self, other):
if isinstance(other, (int, long)):
return EncodedSize(self.value * other, [str(other) + '*' + s for s in self.symbols])
else:
raise ValueError("Cannot multiply size: " + repr(other))
def __str__(self):
if not self.symbols:
return str(self.value)
else:
return '(' + str(self.value) + ' + ' + ' + '.join(self.symbols) + ')'
def upperlimit(self):
if not self.symbols:
return self.value
else:
return 2**32 - 1
class Enum:
def __init__(self, names, desc, enum_options):
'''desc is EnumDescriptorProto'''
self.options = enum_options
self.names = names + desc.name
if enum_options.long_names:
self.values = [(self.names + x.name, x.number) for x in desc.value]
else:
self.values = [(names + x.name, x.number) for x in desc.value]
self.value_longnames = [self.names + x.name for x in desc.value]
def __str__(self):
result = 'typedef enum _%s {\n' % self.names
result += ',\n'.join([" %s = %d" % x for x in self.values])
result += '\n} %s;' % self.names
return result
class Field:
def __init__(self, struct_name, desc, field_options):
'''desc is FieldDescriptorProto'''
self.tag = desc.number
self.struct_name = struct_name
self.name = desc.name
self.default = None
self.max_size = None
self.max_count = None
self.array_decl = ""
self.enc_size = None
# Parse field options
if field_options.HasField("max_size"):
self.max_size = field_options.max_size
if field_options.HasField("max_count"):
self.max_count = field_options.max_count
if desc.HasField('default_value'):
self.default = desc.default_value
# Check field rules, i.e. required/optional/repeated.
can_be_static = True
if desc.label == FieldD.LABEL_REQUIRED:
self.rules = 'REQUIRED'
elif desc.label == FieldD.LABEL_OPTIONAL:
self.rules = 'OPTIONAL'
elif desc.label == FieldD.LABEL_REPEATED:
self.rules = 'REPEATED'
if self.max_count is None:
can_be_static = False
else:
self.array_decl = '[%d]' % self.max_count
else:
raise NotImplementedError(desc.label)
# Decide the C data type to use in the struct.
if datatypes.has_key(desc.type):
self.ctype, self.pbtype, self.enc_size = datatypes[desc.type]
elif desc.type == FieldD.TYPE_ENUM:
self.pbtype = 'ENUM'
self.ctype = names_from_type_name(desc.type_name)
if self.default is not None:
self.default = self.ctype + self.default
self.enc_size = 5 # protoc rejects enum values > 32 bits
elif desc.type == FieldD.TYPE_STRING:
self.pbtype = 'STRING'
if self.max_size is None:
can_be_static = False
else:
self.ctype = 'char'
self.array_decl += '[%d]' % self.max_size
self.enc_size = varint_max_size(self.max_size) + self.max_size
elif desc.type == FieldD.TYPE_BYTES:
self.pbtype = 'BYTES'
if self.max_size is None:
can_be_static = False
else:
self.ctype = self.struct_name + self.name + 't'
self.enc_size = varint_max_size(self.max_size) + self.max_size
elif desc.type == FieldD.TYPE_MESSAGE:
self.pbtype = 'MESSAGE'
self.ctype = self.submsgname = names_from_type_name(desc.type_name)
self.enc_size = None # Needs to be filled in after the message type is available
else:
raise NotImplementedError(desc.type)
if field_options.type == nanopb_pb2.FT_DEFAULT:
if can_be_static:
field_options.type = nanopb_pb2.FT_STATIC
else:
field_options.type = nanopb_pb2.FT_CALLBACK
if field_options.type == nanopb_pb2.FT_STATIC and not can_be_static:
raise Exception("Field %s is defined as static, but max_size or max_count is not given." % self.name)
if field_options.type == nanopb_pb2.FT_STATIC:
self.allocation = 'STATIC'
elif field_options.type == nanopb_pb2.FT_CALLBACK:
self.allocation = 'CALLBACK'
self.ctype = 'pb_callback_t'
self.array_decl = ''
else:
raise NotImplementedError(field_options.type)
def __cmp__(self, other):
return cmp(self.tag, other.tag)
def __str__(self):
if self.rules == 'OPTIONAL' and self.allocation == 'STATIC':
result = ' bool has_' + self.name + ';\n'
elif self.rules == 'REPEATED' and self.allocation == 'STATIC':
result = ' size_t ' + self.name + '_count;\n'
else:
result = ''
result += ' %s %s%s;' % (self.ctype, self.name, self.array_decl)
return result
def types(self):
'''Return definitions for any special types this field might need.'''
if self.pbtype == 'BYTES' and self.allocation == 'STATIC':
result = 'typedef struct {\n'
result += ' size_t size;\n'
result += ' uint8_t bytes[%d];\n' % self.max_size
result += '} %s;\n' % self.ctype
else:
result = None
return result
def default_decl(self, declaration_only = False):
'''Return definition for this field's default value.'''
if self.default is None:
return None
ctype, default = self.ctype, self.default
array_decl = ''
if self.pbtype == 'STRING':
if self.allocation != 'STATIC':
return None # Not implemented
array_decl = '[%d]' % self.max_size
default = str(self.default).encode('string_escape')
default = default.replace('"', '\\"')
default = '"' + default + '"'
elif self.pbtype == 'BYTES':
if self.allocation != 'STATIC':
return None # Not implemented
data = self.default.decode('string_escape')
data = ['0x%02x' % ord(c) for c in data]
default = '{%d, {%s}}' % (len(data), ','.join(data))
if declaration_only:
return 'extern const %s %s_default%s;' % (ctype, self.struct_name + self.name, array_decl)
else:
return 'const %s %s_default%s = %s;' % (ctype, self.struct_name + self.name, array_decl, default)
def tags(self):
'''Return the #define for the tag number of this field.'''
identifier = '%s_%s_tag' % (self.struct_name, self.name)
return '#define %-40s %d\n' % (identifier, self.tag)
def pb_field_t(self, prev_field_name):
'''Return the pb_field_t initializer to use in the constant array.
prev_field_name is the name of the previous field or None.
'''
result = ' PB_FIELD2(%3d, ' % self.tag
result += '%-8s, ' % self.pbtype
result += '%s, ' % self.rules
result += '%s, ' % self.allocation
result += '%s, ' % ("FIRST" if not prev_field_name else "OTHER")
result += '%s, ' % self.struct_name
result += '%s, ' % self.name
result += '%s, ' % (prev_field_name or self.name)
if self.pbtype == 'MESSAGE':
result += '&%s_fields)' % self.submsgname
elif self.default is None:
result += '0)'
elif self.pbtype in ['BYTES', 'STRING'] and self.allocation != 'STATIC':
result += '0)' # Arbitrary size default values not implemented
else:
result += '&%s_default)' % (self.struct_name + self.name)
return result
def largest_field_value(self):
'''Determine if this field needs 16bit or 32bit pb_field_t structure to compile properly.
Returns numeric value or a C-expression for assert.'''
if self.pbtype == 'MESSAGE':
if self.rules == 'REPEATED' and self.allocation == 'STATIC':
return 'pb_membersize(%s, %s[0])' % (self.struct_name, self.name)
else:
return 'pb_membersize(%s, %s)' % (self.struct_name, self.name)
return max(self.tag, self.max_size, self.max_count)
def encoded_size(self, allmsgs):
'''Return the maximum size that this field can take when encoded,
including the field tag. If the size cannot be determined, returns
None.'''
if self.allocation != 'STATIC':
return None
if self.pbtype == 'MESSAGE':
for msg in allmsgs:
if msg.name == self.submsgname:
encsize = msg.encoded_size(allmsgs)
if encsize is None:
return None # Submessage size is indeterminate
# Include submessage length prefix
encsize += varint_max_size(encsize.upperlimit())
break
else:
# Submessage cannot be found, this currently occurs when
# the submessage type is defined in a different file.
# Instead of direct numeric value, reference the size that
# has been #defined in the other file.
encsize = EncodedSize(self.submsgname + 'size')
# We will have to make a conservative assumption on the length
# prefix size, though.
encsize += 5
elif self.enc_size is None:
raise RuntimeError("Could not determine encoded size for %s.%s"
% (self.struct_name, self.name))
else:
encsize = EncodedSize(self.enc_size)
encsize += varint_max_size(self.tag << 3) # Tag + wire type
if self.rules == 'REPEATED':
# Decoders must be always able to handle unpacked arrays.
# Therefore we have to reserve space for it, even though
# we emit packed arrays ourselves.
encsize *= self.max_count
return encsize
class ExtensionRange(Field):
def __init__(self, struct_name, range_start, field_options):
'''Implements a special pb_extension_t* field in an extensible message
structure. The range_start signifies the index at which the extensions
start. Not necessarily all tags above this are extensions, it is merely
a speed optimization.
'''
self.tag = range_start
self.struct_name = struct_name
self.name = 'extensions'
self.pbtype = 'EXTENSION'
self.rules = 'OPTIONAL'
self.allocation = 'CALLBACK'
self.ctype = 'pb_extension_t'
self.array_decl = ''
self.default = None
self.max_size = 0
self.max_count = 0
def __str__(self):
return ' pb_extension_t *extensions;'
def types(self):
return None
def tags(self):
return ''
def encoded_size(self, allmsgs):
# We exclude extensions from the count, because they cannot be known
# until runtime. Other option would be to return None here, but this
# way the value remains useful if extensions are not used.
return EncodedSize(0)
class ExtensionField(Field):
def __init__(self, struct_name, desc, field_options):
self.fullname = struct_name + desc.name
self.extendee_name = names_from_type_name(desc.extendee)
Field.__init__(self, self.fullname + 'struct', desc, field_options)
if self.rules != 'OPTIONAL':
self.skip = True
else:
self.skip = False
self.rules = 'OPTEXT'
def tags(self):
'''Return the #define for the tag number of this field.'''
identifier = '%s_tag' % self.fullname
return '#define %-40s %d\n' % (identifier, self.tag)
def extension_decl(self):
'''Declaration of the extension type in the .pb.h file'''
if self.skip:
msg = '/* Extension field %s was skipped because only "optional"\n' % self.fullname
msg +=' type of extension fields is currently supported. */\n'
return msg
return 'extern const pb_extension_type_t %s;\n' % self.fullname
def extension_def(self):
'''Definition of the extension type in the .pb.c file'''
if self.skip:
return ''
result = 'typedef struct {\n'
result += str(self)
result += '\n} %s;\n\n' % self.struct_name
result += ('static const pb_field_t %s_field = \n %s;\n\n' %
(self.fullname, self.pb_field_t(None)))
result += 'const pb_extension_type_t %s = {\n' % self.fullname
result += ' NULL,\n'
result += ' NULL,\n'
result += ' &%s_field\n' % self.fullname
result += '};\n'
return result
# ---------------------------------------------------------------------------
# Generation of messages (structures)
# ---------------------------------------------------------------------------
class Message:
def __init__(self, names, desc, message_options):
self.name = names
self.fields = []
for f in desc.field:
field_options = get_nanopb_suboptions(f, message_options, self.name + f.name)
if field_options.type != nanopb_pb2.FT_IGNORE:
self.fields.append(Field(self.name, f, field_options))
if len(desc.extension_range) > 0:
field_options = get_nanopb_suboptions(desc, message_options, self.name + 'extensions')
range_start = min([r.start for r in desc.extension_range])
if field_options.type != nanopb_pb2.FT_IGNORE:
self.fields.append(ExtensionRange(self.name, range_start, field_options))
self.packed = message_options.packed_struct
self.ordered_fields = self.fields[:]
self.ordered_fields.sort()
def get_dependencies(self):
'''Get list of type names that this structure refers to.'''
return [str(field.ctype) for field in self.fields]
def __str__(self):
result = 'typedef struct _%s {\n' % self.name
if not self.ordered_fields:
# Empty structs are not allowed in C standard.
# Therefore add a dummy field if an empty message occurs.
result += ' uint8_t dummy_field;'
result += '\n'.join([str(f) for f in self.ordered_fields])
result += '\n}'
if self.packed:
result += ' pb_packed'
result += ' %s;' % self.name
if self.packed:
result = 'PB_PACKED_STRUCT_START\n' + result
result += '\nPB_PACKED_STRUCT_END'
return result
def types(self):
result = ""
for field in self.fields:
types = field.types()
if types is not None:
result += types + '\n'
return result
def default_decl(self, declaration_only = False):
result = ""
for field in self.fields:
default = field.default_decl(declaration_only)
if default is not None:
result += default + '\n'
return result
def fields_declaration(self):
result = 'extern const pb_field_t %s_fields[%d];' % (self.name, len(self.fields) + 1)
return result
def fields_definition(self):
result = 'const pb_field_t %s_fields[%d] = {\n' % (self.name, len(self.fields) + 1)
prev = None
for field in self.ordered_fields:
result += field.pb_field_t(prev)
result += ',\n'
prev = field.name
result += ' PB_LAST_FIELD\n};'
return result
def encoded_size(self, allmsgs):
'''Return the maximum size that this message can take when encoded.
If the size cannot be determined, returns None.
'''
size = EncodedSize(0)
for field in self.fields:
fsize = field.encoded_size(allmsgs)
if fsize is None:
return None
size += fsize
return size
# ---------------------------------------------------------------------------
# Processing of entire .proto files
# ---------------------------------------------------------------------------
def iterate_messages(desc, names = Names()):
'''Recursively find all messages. For each, yield name, DescriptorProto.'''
if hasattr(desc, 'message_type'):
submsgs = desc.message_type
else:
submsgs = desc.nested_type
for submsg in submsgs:
sub_names = names + submsg.name
yield sub_names, submsg
for x in iterate_messages(submsg, sub_names):
yield x
def iterate_extensions(desc, names = Names()):
'''Recursively find all extensions.
For each, yield name, FieldDescriptorProto.
'''
for extension in desc.extension:
yield names, extension
for subname, subdesc in iterate_messages(desc, names):
for extension in subdesc.extension:
yield subname, extension
def parse_file(fdesc, file_options):
'''Takes a FileDescriptorProto and returns tuple (enums, messages, extensions).'''
enums = []
messages = []
extensions = []
if fdesc.package:
base_name = Names(fdesc.package.split('.'))
else:
base_name = Names()
for enum in fdesc.enum_type:
enum_options = get_nanopb_suboptions(enum, file_options, base_name + enum.name)
enums.append(Enum(base_name, enum, enum_options))
for names, message in iterate_messages(fdesc, base_name):
message_options = get_nanopb_suboptions(message, file_options, names)
messages.append(Message(names, message, message_options))
for enum in message.enum_type:
enum_options = get_nanopb_suboptions(enum, message_options, names + enum.name)
enums.append(Enum(names, enum, enum_options))
for names, extension in iterate_extensions(fdesc, base_name):
field_options = get_nanopb_suboptions(extension, file_options, names)
if field_options.type != nanopb_pb2.FT_IGNORE:
extensions.append(ExtensionField(names, extension, field_options))
# Fix field default values where enum short names are used.
for enum in enums:
if not enum.options.long_names:
for message in messages:
for field in message.fields:
if field.default in enum.value_longnames:
idx = enum.value_longnames.index(field.default)
field.default = enum.values[idx][0]
return enums, messages, extensions
def toposort2(data):
'''Topological sort.
From http://code.activestate.com/recipes/577413-topological-sort/
This function is under the MIT license.
'''
for k, v in data.items():
v.discard(k) # Ignore self dependencies
extra_items_in_deps = reduce(set.union, data.values(), set()) - set(data.keys())
data.update(dict([(item, set()) for item in extra_items_in_deps]))
while True:
ordered = set(item for item,dep in data.items() if not dep)
if not ordered:
break
for item in sorted(ordered):
yield item
data = dict([(item, (dep - ordered)) for item,dep in data.items()
if item not in ordered])
assert not data, "A cyclic dependency exists amongst %r" % data
def sort_dependencies(messages):
'''Sort a list of Messages based on dependencies.'''
dependencies = {}
message_by_name = {}
for message in messages:
dependencies[str(message.name)] = set(message.get_dependencies())
message_by_name[str(message.name)] = message
for msgname in toposort2(dependencies):
if msgname in message_by_name:
yield message_by_name[msgname]
def make_identifier(headername):
'''Make #ifndef identifier that contains uppercase A-Z and digits 0-9'''
result = ""
for c in headername.upper():
if c.isalnum():
result += c
else:
result += '_'
return result
def generate_header(dependencies, headername, enums, messages, extensions, options):
'''Generate content for a header file.
Generates strings, which should be concatenated and stored to file.
'''
yield '/* Automatically generated nanopb header */\n'
yield '/* Generated by %s at %s. */\n\n' % (nanopb_version, time.asctime())
symbol = make_identifier(headername)
yield '#ifndef _PB_%s_\n' % symbol
yield '#define _PB_%s_\n' % symbol
try:
yield options.libformat % ('pb.h')
except TypeError:
# no %s specified - use whatever was passed in as options.libformat
yield options.libformat
yield '\n'
for dependency in dependencies:
noext = os.path.splitext(dependency)[0]
yield options.genformat % (noext + '.' + options.extension + '.h')
yield '\n'
yield '#ifdef __cplusplus\n'
yield 'extern "C" {\n'
yield '#endif\n\n'
yield '/* Enum definitions */\n'
for enum in enums:
yield str(enum) + '\n\n'
yield '/* Struct definitions */\n'
for msg in sort_dependencies(messages):
yield msg.types()
yield str(msg) + '\n\n'
if extensions:
yield '/* Extensions */\n'
for extension in extensions:
yield extension.extension_decl()
yield '\n'
yield '/* Default values for struct fields */\n'
for msg in messages:
yield msg.default_decl(True)
yield '\n'
yield '/* Field tags (for use in manual encoding/decoding) */\n'
for msg in sort_dependencies(messages):
for field in msg.fields:
yield field.tags()
for extension in extensions:
yield extension.tags()
yield '\n'
yield '/* Struct field encoding specification for nanopb */\n'
for msg in messages:
yield msg.fields_declaration() + '\n'
yield '\n'
yield '/* Maximum encoded size of messages (where known) */\n'
for msg in messages:
msize = msg.encoded_size(messages)
if msize is not None:
identifier = '%s_size' % msg.name
yield '#define %-40s %s\n' % (identifier, msize)
yield '\n'
yield '#ifdef __cplusplus\n'
yield '} /* extern "C" */\n'
yield '#endif\n'
# End of header
yield '\n#endif\n'
def generate_source(headername, enums, messages, extensions, options):
'''Generate content for a source file.'''
yield '/* Automatically generated nanopb constant definitions */\n'
yield '/* Generated by %s at %s. */\n\n' % (nanopb_version, time.asctime())
yield options.genformat % (headername)
yield '\n'
for msg in messages:
yield msg.default_decl(False)
yield '\n\n'
for msg in messages:
yield msg.fields_definition() + '\n\n'
for ext in extensions:
yield ext.extension_def() + '\n'
# Add checks for numeric limits
if messages:
count_required_fields = lambda m: len([f for f in msg.fields if f.rules == 'REQUIRED'])
largest_msg = max(messages, key = count_required_fields)
largest_count = count_required_fields(largest_msg)
if largest_count > 64:
yield '\n/* Check that missing required fields will be properly detected */\n'
yield '#if PB_MAX_REQUIRED_FIELDS < %d\n' % largest_count
yield '#error Properly detecting missing required fields in %s requires \\\n' % largest_msg.name
yield ' setting PB_MAX_REQUIRED_FIELDS to %d or more.\n' % largest_count
yield '#endif\n'
worst = 0
worst_field = ''
checks = []
checks_msgnames = []
for msg in messages:
checks_msgnames.append(msg.name)
for field in msg.fields:
status = field.largest_field_value()
if isinstance(status, (str, unicode)):
checks.append(status)
elif status > worst:
worst = status
worst_field = str(field.struct_name) + '.' + str(field.name)
if worst > 255 or checks:
yield '\n/* Check that field information fits in pb_field_t */\n'
if worst < 65536:
yield '#if !defined(PB_FIELD_16BIT) && !defined(PB_FIELD_32BIT)\n'
if worst > 255:
yield '#error Field descriptor for %s is too large. Define PB_FIELD_16BIT to fix this.\n' % worst_field
else:
assertion = ' && '.join(str(c) + ' < 256' for c in checks)
msgs = '_'.join(str(n) for n in checks_msgnames)
yield 'STATIC_ASSERT((%s), YOU_MUST_DEFINE_PB_FIELD_16BIT_FOR_MESSAGES_%s)\n'%(assertion,msgs)
yield '#endif\n\n'
if worst > 65535 or checks:
yield '#if !defined(PB_FIELD_32BIT)\n'
if worst > 65535:
yield '#error Field descriptor for %s is too large. Define PB_FIELD_32BIT to fix this.\n' % worst_field
else:
assertion = ' && '.join(str(c) + ' < 65536' for c in checks)
msgs = '_'.join(str(n) for n in checks_msgnames)
yield 'STATIC_ASSERT((%s), YOU_MUST_DEFINE_PB_FIELD_32BIT_FOR_MESSAGES_%s)\n'%(assertion,msgs)
yield '#endif\n'
# Add check for sizeof(double)
has_double = False
for msg in messages:
for field in msg.fields:
if field.ctype == 'double':
has_double = True
if has_double:
yield '\n'
yield '/* On some platforms (such as AVR), double is really float.\n'
yield ' * These are not directly supported by nanopb, but see example_avr_double.\n'
yield ' * To get rid of this error, remove any double fields from your .proto.\n'
yield ' */\n'
yield 'STATIC_ASSERT(sizeof(double) == 8, DOUBLE_MUST_BE_8_BYTES)\n'
yield '\n'
# ---------------------------------------------------------------------------
# Options parsing for the .proto files
# ---------------------------------------------------------------------------
from fnmatch import fnmatch
def read_options_file(infile):
'''Parse a separate options file to list:
[(namemask, options), ...]
'''
results = []
for line in infile:
line = line.strip()
if not line or line.startswith('//') or line.startswith('#'):
continue
parts = line.split(None, 1)
opts = nanopb_pb2.NanoPBOptions()
text_format.Merge(parts[1], opts)
results.append((parts[0], opts))
return results
class Globals:
'''Ugly global variables, should find a good way to pass these.'''
verbose_options = False
separate_options = []
def get_nanopb_suboptions(subdesc, options, name):
'''Get copy of options, and merge information from subdesc.'''
new_options = nanopb_pb2.NanoPBOptions()
new_options.CopyFrom(options)
# Handle options defined in a separate file
dotname = '.'.join(name.parts)
for namemask, options in Globals.separate_options:
if fnmatch(dotname, namemask):
new_options.MergeFrom(options)
# Handle options defined in .proto
if isinstance(subdesc.options, descriptor.FieldOptions):
ext_type = nanopb_pb2.nanopb
elif isinstance(subdesc.options, descriptor.FileOptions):
ext_type = nanopb_pb2.nanopb_fileopt
elif isinstance(subdesc.options, descriptor.MessageOptions):
ext_type = nanopb_pb2.nanopb_msgopt
elif isinstance(subdesc.options, descriptor.EnumOptions):
ext_type = nanopb_pb2.nanopb_enumopt
else:
raise Exception("Unknown options type")
if subdesc.options.HasExtension(ext_type):
ext = subdesc.options.Extensions[ext_type]
new_options.MergeFrom(ext)
if Globals.verbose_options:
print "Options for " + dotname + ":"
print text_format.MessageToString(new_options)
return new_options
# ---------------------------------------------------------------------------
# Command line interface
# ---------------------------------------------------------------------------
import sys
import os.path
from optparse import OptionParser
optparser = OptionParser(
usage = "Usage: nanopb_generator.py [options] file.pb ...",
epilog = "Compile file.pb from file.proto by: 'protoc -ofile.pb file.proto'. " +
"Output will be written to file.pb.h and file.pb.c.")
optparser.add_option("-x", dest="exclude", metavar="FILE", action="append", default=[],
help="Exclude file from generated #include list.")
optparser.add_option("-e", "--extension", dest="extension", metavar="EXTENSION", default="pb",
help="Set extension to use instead of 'pb' for generated files. [default: %default]")
optparser.add_option("-f", "--options-file", dest="options_file", metavar="FILE", default="%s.options",
help="Set name of a separate generator options file.")
optparser.add_option("-Q", "--generated-include-format", dest="genformat",
metavar="FORMAT", default='#include "%s"\n',
help="Set format string to use for including other .pb.h files. [default: %default]")
optparser.add_option("-L", "--library-include-format", dest="libformat",
metavar="FORMAT", default='#include <%s>\n',
help="Set format string to use for including the nanopb pb.h header. [default: %default]")
optparser.add_option("-q", "--quiet", dest="quiet", action="store_true", default=False,
help="Don't print anything except errors.")
optparser.add_option("-v", "--verbose", dest="verbose", action="store_true", default=False,
help="Print more information.")
optparser.add_option("-s", dest="settings", metavar="OPTION:VALUE", action="append", default=[],
help="Set generator option (max_size, max_count etc.).")
def process_file(filename, fdesc, options):
'''Process a single file.
filename: The full path to the .proto or .pb source file, as string.
fdesc: The loaded FileDescriptorSet, or None to read from the input file.
options: Command line options as they come from OptionsParser.
Returns a dict:
{'headername': Name of header file,
'headerdata': Data for the .h header file,
'sourcename': Name of the source code file,
'sourcedata': Data for the .c source code file
}
'''
toplevel_options = nanopb_pb2.NanoPBOptions()
for s in options.settings:
text_format.Merge(s, toplevel_options)
if not fdesc:
data = open(filename, 'rb').read()
fdesc = descriptor.FileDescriptorSet.FromString(data).file[0]
# Check if there is a separate .options file
try:
optfilename = options.options_file % os.path.splitext(filename)[0]
except TypeError:
# No %s specified, use the filename as-is
optfilename = options.options_file
if options.verbose:
print 'Reading options from ' + optfilename
if os.path.isfile(optfilename):
Globals.separate_options = read_options_file(open(optfilename, "rU"))
else:
Globals.separate_options = []
# Parse the file
file_options = get_nanopb_suboptions(fdesc, toplevel_options, Names([filename]))
enums, messages, extensions = parse_file(fdesc, file_options)
# Decide the file names
noext = os.path.splitext(filename)[0]
headername = noext + '.' + options.extension + '.h'
sourcename = noext + '.' + options.extension + '.c'
headerbasename = os.path.basename(headername)
# List of .proto files that should not be included in the C header file
# even if they are mentioned in the source .proto.
excludes = ['nanopb.proto', 'google/protobuf/descriptor.proto'] + options.exclude
dependencies = [d for d in fdesc.dependency if d not in excludes]
headerdata = ''.join(generate_header(dependencies, headerbasename, enums,
messages, extensions, options))
sourcedata = ''.join(generate_source(headerbasename, enums,
messages, extensions, options))
return {'headername': headername, 'headerdata': headerdata,
'sourcename': sourcename, 'sourcedata': sourcedata}
def main_cli():
'''Main function when invoked directly from the command line.'''
options, filenames = optparser.parse_args()
if not filenames:
optparser.print_help()
sys.exit(1)
if options.quiet:
options.verbose = False
Globals.verbose_options = options.verbose
for filename in filenames:
results = process_file(filename, None, options)
if not options.quiet:
print "Writing to " + results['headername'] + " and " + results['sourcename']
open(results['headername'], 'w').write(results['headerdata'])
open(results['sourcename'], 'w').write(results['sourcedata'])
def main_plugin():
'''Main function when invoked as a protoc plugin.'''
import plugin_pb2
data = sys.stdin.read()
request = plugin_pb2.CodeGeneratorRequest.FromString(data)
import shlex
args = shlex.split(request.parameter)
options, dummy = optparser.parse_args(args)
# We can't go printing stuff to stdout
Globals.verbose_options = False
options.verbose = False
options.quiet = True
response = plugin_pb2.CodeGeneratorResponse()
for filename in request.file_to_generate:
for fdesc in request.proto_file:
if fdesc.name == filename:
results = process_file(filename, fdesc, options)
f = response.file.add()
f.name = results['headername']
f.content = results['headerdata']
f = response.file.add()
f.name = results['sourcename']
f.content = results['sourcedata']
sys.stdout.write(response.SerializeToString())
if __name__ == '__main__':
# Check if we are running as a plugin under protoc
if 'protoc-gen-' in sys.argv[0]:
main_plugin()
else:
main_cli()
| [
"k4ls4ydog"
] | k4ls4ydog |
d044824c9b43cb3683ca050c720f6d3b10ba6526 | bc59c6fb1cbc2a4505f505a267f806cbe074b6c0 | /myproject/ajax.py | dd56cb321e2987f32da44606d8916eaacae873b3 | [] | no_license | micheledinanni/miniIPIP_test | 0449f54e171a8a6dcd7a285bb93497abffd7c1bf | 59723cfde237bee3ca000b5fc232912f6550bc4e | refs/heads/master | 2022-05-16T11:44:50.552889 | 2020-06-06T13:15:15 | 2020-06-06T13:15:15 | 160,211,850 | 2 | 2 | null | 2022-04-22T21:00:21 | 2018-12-03T15:23:47 | JavaScript | UTF-8 | Python | false | false | 374 | py | import json
from .logger import logger_file
from django.http import JsonResponse
def validate(request, id):
try:
with open('myproject/ajax_files/status.json', 'r') as jsonfile:
data = json.load(jsonfile)
jsonfile.close()
return JsonResponse(data)
except Exception as e:
error = str(e)
logger_file(error)
| [
"michele.dinanni@libero.it"
] | michele.dinanni@libero.it |
f56ffcbf9e73013af4c6531cb180dc41e524665c | 2a882fde8266a381ce405c990516cc01ba4317d2 | /Mendel's First Law.py | f62359a3402bf7fedea29910920493ff8d98dd3c | [] | no_license | chandlerklein/Rosalind | acdf5c2c3e0134b8ff792ef525e5c8a55ab9db5f | 3587f596e063a0bef310203714f15fe05c3b0eb8 | refs/heads/main | 2023-08-16T04:14:41.997407 | 2021-09-16T23:45:52 | 2021-09-16T23:45:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 451 | py | # http://rosalind.info/problems/iprb/
with open('datasets/rosalind_iprb.txt') as f:
num_d, num_h, num_r = map(float, f.readline().split())
total = num_d + num_h + num_r
rr = (num_r / total) * ((num_r - 1) / (total - 1))
hh = (num_h / total) * ((num_h - 1) / (total - 1))
hr = ((num_h / total) * (num_r / (total - 1))) + ((num_r / total) * (num_h / (total - 1)))
rec_prob = rr + hh * 0.25 + hr * 0.5
print (1 - rec_prob)
| [
"kleinc16@gmail.com"
] | kleinc16@gmail.com |
24f5f53ce96d023d4e27da65ff4e68c9dfcdb81d | 223faad72af5096c71f28808f156e4179aa185c6 | /paddle.py | 1db4122ed1848b9cd8cbae754d347f572160e560 | [] | no_license | AlexandrDaultukaev/pong_game | 6ed4d80eb477bdda884c9b1da523a1899ff85515 | 0a3c642461b462c9c65d16bfe85c2b4f83120230 | refs/heads/master | 2023-06-24T05:49:47.144943 | 2021-07-27T17:59:24 | 2021-07-27T17:59:24 | 390,080,101 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 535 | py | from turtle import Turtle
class Paddle(Turtle):
def __init__(self, x, y):
super().__init__()
self.x = x
self.y = y
self.penup()
self.shape("square")
self.shapesize(stretch_wid=5, stretch_len=0.8)
self.goto(x=self.x, y=self.y)
self.color("white")
def up(self):
if self.y < 250:
self.y += 50
self.goto(x=self.x, y=self.y)
def down(self):
if self.y > -250:
self.y -= 50
self.goto(x=self.x, y=self.y)
| [
"shyra31102000@mail.ru"
] | shyra31102000@mail.ru |
73456206a8ae6e65b0f1cf53ecb7a0b8b0cde6d6 | 18fa5f5f7ee625253240681fb4b7509d48a6adf7 | /python/books/writing-solid-python-code-91-suggestions/16.py | 3288669ba2653de2f9fb52e77513a817ff661505 | [] | no_license | heyulin1989/language | 849fa3619505d08e107379fc123ce6c178d5e2d4 | ee4b5db615bfd52ad4313da170f7bf6391b9ffa8 | refs/heads/master | 2021-07-05T15:46:11.706373 | 2021-07-02T10:12:54 | 2021-07-02T10:12:54 | 79,528,071 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 445 | py | #coding:utf8
"""
# vim: set fileencoding=utf8
from __future__ import unnicode_literals
"""
a="Hi"
b="Hi"
print a is b
print a == b
print "a = ",id(a)," b = ",id(b)
# 一但有空格就会不在一个内存中
a1 = "I am"
b1 = "I am"
print a1 is b1
print a1 == b1
print "a1 = ",id(a1)," b = ",id(b1)
"""
is 表示的是对象标示符(object identity): 两个对象是否在同一个内存空间
== equal 两个对象的值是否相等
"""
| [
"heyulin_1989@163.com"
] | heyulin_1989@163.com |
3c5b887cbc4cdb2d6567919559b3ccd0c61bf851 | f6d271233b8a91f63c30148cdcdb86341ba45c2f | /external/TriBITS/tribits/python_utils/snapshot-dir.py | 577f9746a955bdf93ab040a8cb553ce61281078e | [
"MIT",
"BSD-2-Clause"
] | permissive | wawiesel/BootsOnTheGround | 09e7884b9df9c1eb39a4a4f3d3f8f109960aeca0 | bdb63ae6f906c34f66e995973bf09aa6f2999148 | refs/heads/master | 2021-01-11T20:13:10.304516 | 2019-10-09T16:44:33 | 2019-10-09T16:44:33 | 79,069,796 | 4 | 1 | MIT | 2019-10-09T16:44:34 | 2017-01-16T00:47:11 | CMake | UTF-8 | Python | false | false | 2,715 | py | #!/usr/bin/env python
# @HEADER
# ************************************************************************
#
# TriBITS: Tribal Build, Integrate, and Test System
# Copyright 2013 Sandia Corporation
#
# Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,
# the U.S. Government retains certain rights in this software.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the Corporation nor the names of the
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# ************************************************************************
# @HEADER
import sys
import os
#
# Get the location of the scripts directory whether from a sym link or the
# actual
#
checkinTestFilePath = os.path.abspath(sys.argv[0])
checkinTestFileRealPath = os.path.realpath(checkinTestFilePath)
scriptsDir = os.path.dirname(checkinTestFileRealPath)+"/python"
#print("scriptsDir='" + scriptsDir + "'")
sys.path.insert(0, scriptsDir)
#
# Import and run
#
import SnapshotDir
snapshotDirDummyDefaults = os.environ.get("SNAPSHOT_DIR_DUMMY_DEFAULTS","")
defaultOptions = None
if snapshotDirDummyDefaults:
defaultOptions = SnapshotDir.DefaultOptions()
defaultOptions.origDir = "<orig-dir>"
defaultOptions.destDir = "<dest-dir>"
success = SnapshotDir.snapshotDirMainDriver(sys.argv[1:], defaultOptions)
if success:
rtnCode = 0
else:
rtnCode = 1
sys.exit(rtnCode)
| [
"ww5@ornl.gov"
] | ww5@ornl.gov |
6bac8cbe7d5c8f648531bd8b9df7cea5d5121a28 | 4562339920161ad5d2bbf65324318ce8df747559 | /Python_Functions/unitEdit.py | 05725a447e60b6ce18b4eb77ee689cc6868fe5a4 | [] | no_license | JLO64/Hardware-Donations-Desktop-App | 4dfb41874717888b5d10dcae6f82e9927ee2dcec | d238dbb6c30721f79cfae58d2cc32f4bf3fc4982 | refs/heads/master | 2020-09-16T05:12:36.184053 | 2020-04-16T19:15:06 | 2020-04-16T19:15:06 | 223,663,476 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20,713 | py | import boto3, json, getpass, os, click
import terminalColor, settingsJson, fileFunctions, unitEdit, browseDatabase, settings
import array as arr
from pyautogui import typewrite
try: import readline
except: settingsJson.externalEditor = True
try:
from tkinter import Tk
from tkinter.filedialog import askopenfilename
except: settingsJson.guiMode = False
def unitEditOptions(responseJson, unitID): #unit options user is given
intDecision = 0
listOfOptions =[". Edit Entry", ". Upload New Photo", ". Download Unit Photos", ". Download Unit Label", ". Download Unit PDF", ". Delete Unit", ". Exit"]
while ( (intDecision < 1 ) or (intDecision > len(listOfOptions)) ):
try:
printUnitInfo(responseJson, unitID)
print("\nWhat do you want to do with this unit?")
for i in range( len(listOfOptions) ): terminalColor.printBlueString( str(i+1) + listOfOptions[i] )
intDecision = int(input())
if ( (intDecision < 1) or (intDecision > len(listOfOptions)) ): terminalColor.printRedString("Invalid Input")
elif ( listOfOptions[intDecision-1] == ". Exit"): break
elif ( listOfOptions[intDecision-1] == ". Edit Entry"):
intDecision = 0
responseJson = unitEditEntry(responseJson, "Editing Existing Unit")
elif ( listOfOptions[intDecision-1] == ". Upload New Photo"):
intDecision = 0
uploadNewPhoto(unitID)
elif ( listOfOptions[intDecision-1] == ". Delete Unit"):
if deleteUnit(unitID): terminalColor.printGreenString("Unit Deleted")
else: intDecision = 0
elif ( listOfOptions[intDecision-1] == ". Download Unit Photos"):
intDecision = 0
try:
unitInfo = responseJson["unitInfo"]
if unitInfo["Photo_URL"] == "https://hardware-donations-database-gamma.s3-us-west-1.amazonaws.com/Misc_Items/noPhotoFound.png":
terminalColor.printRedString("No unit photos uploaded")
else: downloadUnitPhoto(responseJson)
except:
terminalColor.printRedString("Unable to download unit photos")
elif ( listOfOptions[intDecision-1] == ". Download Unit Label"):
intDecision = 0
try:
downloadUnitLabel(unitID)
except:
try:
createNewUnitLabel(unitID)
downloadUnitLabel(unitID)
except:
terminalColor.printRedString("Unable to download unit label")
elif ( listOfOptions[intDecision-1] == ". Download Unit PDF"):
intDecision = 0
try:
downloadUnitPDF(unitID)
except:
try:
createNewUnitPDF(unitID, responseJson)
downloadUnitPDF(unitID)
except:
terminalColor.printRedString("Unable to download unit PDF")
except:
intDecision = 0
terminalColor.printRedString("Invalid Input")
def unitEditEntry(responseJson, typeOfEditing): #User selects what category they want to edit
unitInfo = responseJson["unitInfo"]
intDecision = 0
listOfOptions = [". Location", ". Status", ". User ID",". Manufacturer",". Model",". ARK-OS Version", ". Original Operating System", ". CPU Model", ". CPU GHz",". CPU Threads",". CPU Architecture",". RAM GB",". RAM Slots",". RAM Type", ". HDD GB", ". HDD Port",". HDD Speed",". USB Ports",". Audio Ports",". Display Ports",". External Disk Drives",". Networking",". Other Ports", ". Comments", ". Exit", ". Save and Exit"]
listOfCategories = ["Location", "Status", "UserID", "Manufacturer", "Model", "ARK-OS_Version", "Operating System", "CPU Type", "CPU GHz", "CPU Threads","CPU Architecture","RAM","RAM Slots","RAM Type", "HDD", "HDD Port","HDD Speed","USB Ports","Audio Ports","Display Ports","Disk Drive","Networking","Ports", "Comments"]
stuffToUpdate = {}
while ( (intDecision < 1 ) or (intDecision > len(listOfOptions)) ):
try:
print("\nWhat section do you want to edit?")
for i in range( len(listOfOptions) - 1):
if ( len(listOfCategories) > i and listOfCategories[i] in stuffToUpdate ): terminalColor.printGreenRegString( str(i+1) + listOfOptions[i] )
else: terminalColor.printBlueString( str(i+1) + listOfOptions[i] )
if len(stuffToUpdate) > 0: terminalColor.printBlueString( str(len(listOfOptions)) + listOfOptions[len(listOfOptions) - 1] ) #Prints "Save and Exit"
intDecision = int(input())
if ( (intDecision) == 20207864):
for i in listOfCategories:
stuffToUpdate[i] = "[REDACTED]"
elif ( (intDecision < 1) or (intDecision > len(listOfOptions)) ): terminalColor.printRedString("Invalid Input")
elif ( listOfOptions[intDecision-1] == ". Exit" ):
try: testVariable = responseJson["Unit_ID"]
except: responseJson["Unit_ID"] = "bad"
return responseJson
elif ( listOfOptions[intDecision-1] == ". Save and Exit" ) and len(stuffToUpdate) > 0:
if typeOfEditing == "Editing Existing Unit":
if verifyUploadData(stuffToUpdate): return uploadUnitUpdate(stuffToUpdate, unitInfo["Unit_ID"])
else:
intDecision = 0
elif typeOfEditing == "Creating New Unit":
if len(stuffToUpdate) > 23:
if verifyUploadData(stuffToUpdate):
stuffToUpdate["Unit_ID"] = "OkToUpload"
return stuffToUpdate
else:
intDecision = 0
else:
intDecision = 0
terminalColor.printRedString("Please fill out all fields before creating a new unit")
elif ( listOfOptions[intDecision-1] == ". Comments"):
intDecision = 0
try: oldComments = stuffToUpdate["Comments"]
except: oldComments = unitInfo["Comments"]
newComments = click.edit(oldComments)
stuffToUpdate["Comments"] = newComments
elif checkIfCategoryHasLists(listOfCategories[intDecision-1]):
category = listOfCategories[intDecision-1]
intDecision = 0
originalData = unitInfo[category]
try: oldData = stuffToUpdate[category]
except: oldData = unitInfo[category]
if category == "RAM Type": newData = changeRAMType(originalData, oldData)
elif category == "CPU Architecture": newData = changeCPUArchitecture(originalData, oldData)
elif category == "ARK-OS_Version": newData = changeARKOSVersion(originalData, oldData)
elif category == "Location": newData = changeUnitLocation(originalData, oldData)
if newData == originalData and category in stuffToUpdate: del stuffToUpdate[category]
elif not newData == originalData: stuffToUpdate[category] = newData
else:
stuffToUpdate = editTextEntry(stuffToUpdate, unitInfo, listOfCategories[intDecision-1])
intDecision = 0
except:
intDecision = 0
terminalColor.printRedString("Invalid Input")
def uploadUnitUpdate(stuffToUpdate, unitID): #Connects to AWS lambda to update unit data
payload = dict(key1=settingsJson.key1, key2=settingsJson.key2, key3=settingsJson.key3, type="unit_update", unitID=unitID, updateInfo=stuffToUpdate)
response = settings.initiateLambdaClient().invoke(
FunctionName='arn:aws:lambda:us-west-1:105369739187:function:HDPasswordCheck',
InvocationType='RequestResponse',
Payload=json.dumps(payload),
)
passTest=json.loads(response['Payload'].read())
responseJson=passTest["unitInfo"]
return passTest
def printUnitInfo(responseJson, unitID): #Prints out data on units
try:
unitInfo = responseJson["unitInfo"]
print("\nInfo Page For " + unitID )
terminalColor.printCyanString(" " + unitID )
print( terminalColor.generateYellowString( " Unit Category: " ) + unitInfo["Category"])
print( terminalColor.generateYellowString( " Unit Number: " ) + unitID.split("-",1)[1] )
print( terminalColor.generateYellowString( " Location: " ) + unitInfo["Location"])
print( terminalColor.generateYellowString( " Status: " ) + unitInfo["Status"])
print( terminalColor.generateYellowString( " User ID: " ) + unitInfo["UserID"])
terminalColor.printCyanString( " System Info")
print( terminalColor.generateYellowString( " Manufacturer: " ) + unitInfo["Manufacturer"])
print( terminalColor.generateYellowString( " Model: " ) + unitInfo["Model"])
print( terminalColor.generateYellowString( " ARK-OS Version: " ) + unitInfo["ARK-OS_Version"])
print( terminalColor.generateYellowString( " Original Operating System: " ) + unitInfo["Operating System"])
terminalColor.printCyanString(" CPU")
print( terminalColor.generateYellowString( " CPU Model: " ) + unitInfo["CPU Type"])
print( terminalColor.generateYellowString( " CPU GHz: " ) + unitInfo["CPU GHz"])
print( terminalColor.generateYellowString( " CPU Threads: " ) + unitInfo["CPU Threads"])
print( terminalColor.generateYellowString( " CPU Architecture: " ) + unitInfo["CPU Architecture"])
terminalColor.printCyanString( " RAM")
print( terminalColor.generateYellowString( " RAM GB: " ) + unitInfo["RAM"])
print( terminalColor.generateYellowString( " RAM Slots: " ) + unitInfo["RAM Slots"])
print( terminalColor.generateYellowString( " RAM Type: " ) + unitInfo["RAM Type"])
terminalColor.printCyanString( " HDD")
print( terminalColor.generateYellowString( " HDD Size: " ) + unitInfo["HDD"])
print( terminalColor.generateYellowString( " HDD Port: " ) + unitInfo["HDD Port"])
print( terminalColor.generateYellowString( " HDD Speed: " ) + unitInfo["HDD Speed"])
terminalColor.printCyanString( " Ports")
print( terminalColor.generateYellowString( " USB Ports: " ) + unitInfo["USB Ports"])
print( terminalColor.generateYellowString( " Audio Ports: " ) + unitInfo["Audio Ports"])
print( terminalColor.generateYellowString( " Display Ports: " ) + unitInfo["Display Ports"])
print( terminalColor.generateYellowString( " External Disk Drives: " ) + unitInfo["Disk Drive"])
print( terminalColor.generateYellowString( " Networking: " ) + unitInfo["Networking"])
print( terminalColor.generateYellowString( " Other Ports: " ) + unitInfo["Ports"])
print( terminalColor.generateCyanString( " Comments: ") + unitInfo["Comments"])
except:
terminalColor.printRedString("Unable to print all data")
def downloadUnitLabel(unitID): #downloads unit label from AWS S3
awsURL = "https://hardware-donations-database-gamma.s3-us-west-1.amazonaws.com/Unit_Photos/"
unitType = unitID.split("-",1)[0]
unitLabelUrl = awsURL + unitType + "_Units/" + unitID + "/" + unitID + "_QR_Label.png"
fileFunctions.chooseFolderToSaveFile( [unitLabelUrl, unitID + " Label", ".png", "Labels"] )
def downloadUnitPhoto(responseJson): #downloads unit photo from AWS S3
unitInfo = responseJson["unitInfo"]
fileFunctions.chooseFolderToSaveFile( [unitInfo["Photo_URL"], unitInfo["Unit_ID"] + "_Photo", "unknown", "Unit Photos"] )
def downloadUnitPDF(unitID): #downloads unit pdf from AWS S3
awsURL = "https://hardware-donations-database-gamma.s3-us-west-1.amazonaws.com/Unit_Photos/"
unitType = unitID.split("-",1)[0]
unitPDFUrl = awsURL + unitType + "_Units/" + unitID + "/" + unitID + "_Info_Page.pdf"
fileFunctions.chooseFolderToSaveFile( [unitPDFUrl, unitID + " Info Page", ".pdf", "PDFs"] )
def createNewUnitLabel(unitID): #connects to AWS Lambda to generate a label for the unit
itemType = unitType = unitID.split("-",1)[0]
itemNumber = unitType = unitID.split("-",1)[1]
payload = dict(itemType=itemType, itemNumber=itemNumber)
response = settings.initiateLambdaClient().invoke(
FunctionName='arn:aws:lambda:us-west-1:105369739187:function:HDLabelGenerator',
InvocationType='RequestResponse',
Payload=json.dumps(payload),
)
passTest=json.loads(response['Payload'].read())
labelURL=passTest["qrLabelURL"]
def createNewUnitPDF(unitID, unitInfo): #connects to AWS Lambda to generate a label for the unit
payload = dict(unitID=unitID, unitInfo=unitInfo)
response = settings.initiateLambdaClient().invoke(
FunctionName='arn:aws:lambda:us-west-1:105369739187:function:Hardware-Donations-PDF-Generator',
InvocationType='RequestResponse',
Payload=json.dumps(payload),
)
def changeUnitLocation(original, current): #Selects version of ARK-OS
unitLocations = ["Unknown","Donated","Site 1(Bosco Tech)","Site 2(Roosevelt)","Site 3(ELAC)", "Cancel"]
intDecision = 0
while ( (intDecision < 1 ) or (intDecision > len(unitLocations)) ):
try:
print("\nWhere is this unit located?")
print("Original Location Version Data: " + original)
print("Current Location Version Data: " + current)
for i in range( len(unitLocations) ): terminalColor.printBlueString( str(i+1) + ". " + unitLocations[i])
intDecision = int(input())
if ( (intDecision < 1) or (intDecision > len(unitLocations)) ): terminalColor.printRedString("Invalid Input")
elif unitLocations[intDecision -1] == "Cancel": return current
else: return unitLocations[intDecision - 1]
except:
intDecision = 0
terminalColor.printRedString("Invalid Input")
def changeARKOSVersion(original, current): #Selects version of ARK-OS
arkosVersions = ["Unknown","None","Experimental","v1.0.6","v1.1.2","v1.2.1","v2.0.1 \"Bosco\"","v2.1.0 \"Bosco Tech\"", "Cancel"]
intDecision = 0
while ( (intDecision < 1 ) or (intDecision > len(arkosVersions)) ):
try:
print("\nWhat version of ARK-OS does this unit have installed?")
print("Original ARK-OS Version Data: " + original)
print("Current ARK-OS Version Data: " + current)
for i in range( len(arkosVersions) ): terminalColor.printBlueString( str(i+1) + ". " + arkosVersions[i])
intDecision = int(input())
if ( (intDecision < 1) or (intDecision > len(arkosVersions)) ): terminalColor.printRedString("Invalid Input")
elif arkosVersions[intDecision -1] == "Cancel": return current
else: return arkosVersions[intDecision - 1]
except:
intDecision = 0
terminalColor.printRedString("Invalid Input")
def changeCPUArchitecture(original, current): #Selects CPU Arch.
cpuArchitectures = ["Unknown","64-Bit","32-Bit","PowerPC","Cancel"]
intDecision = 0
while ( (intDecision < 1 ) or (intDecision > len(cpuArchitectures)) ):
try:
print("\nWhat CPU Architecture does this unit have?")
print("Original CPU Architecture Data: " + original)
print("Current CPU Architecture Data: " + current)
for i in range( len(cpuArchitectures) ): terminalColor.printBlueString( str(i+1) + ". " + cpuArchitectures[i])
intDecision = int(input())
if ( (intDecision < 1) or (intDecision > len(cpuArchitectures)) ): terminalColor.printRedString("Invalid Input")
elif cpuArchitectures[intDecision -1] == "Cancel": return current
else: return cpuArchitectures[intDecision -1]
except:
intDecision = 0
terminalColor.printRedString("Invalid Input")
def changeRAMType(original, current): #Selects type of RAM
ramType = ["Unknown","Other","DDR RAM","DDR2 RAM","DDR3 RAM","DDR4 RAM","DDR SDRAM","DDR2 SDRAM","DDR3 SDRAM","DDR4 SDRAM","Cancel"]
intDecision = 0
while ( (intDecision < 1 ) or (intDecision > len(ramType)) ):
try:
print("\nWhat type of RAM does this unit have?")
print("Original RAM Type Data: " + original)
print("Current RAM Type Data: " + current)
for i in range( len(ramType) ): terminalColor.printBlueString( str(i+1) + ". " + ramType[i])
intDecision = int(input())
if ( (intDecision < 1) or (intDecision > len(ramType)) ): terminalColor.printRedString("Invalid Input")
elif ramType[intDecision -1] == "Cancel": return current
else: return ramType[intDecision - 1]
except:
intDecision = 0
terminalColor.printRedString("Invalid Input")
def editTextEntry(stuffToUpdate, unitInfo, category): #code to edit data in a category
copyOfStuffToUpdate = stuffToUpdate.copy()
originalData = unitInfo[category]
try: oldData = copyOfStuffToUpdate[category]
except: oldData = unitInfo[category]
print("Original " + category + " Data: " + originalData)
newData = methodOfEditingString(category +": " ,oldData)
while len(newData) < 2 or len(newData) > 70:
terminalColor.printRedString("The data you entered is too long or short")
newData = methodOfEditingString(category +": " ,oldData)
if newData == originalData and category in copyOfStuffToUpdate: del copyOfStuffToUpdate[category]
elif not newData == originalData: copyOfStuffToUpdate[category] = newData
return copyOfStuffToUpdate
def deleteUnit(unitID): #connects to AWS Lambda to delete a unit
try:
payload = dict(key1=settingsJson.key1, key2=settingsJson.key2, key3=settingsJson.key3, type="unit_delete", unitID=unitID)
response = settings.initiateLambdaClient().invoke(
FunctionName='arn:aws:lambda:us-west-1:105369739187:function:HDPasswordCheck',
InvocationType='RequestResponse',
Payload=json.dumps(payload),
)
responseJSON=json.loads(response['Payload'].read())
if not responseJSON["result"]: terminalColor.printRedString("Failed to delete unit: " + responseJSON["reason"])
return responseJSON["result"]
except:
return False
def checkIfCategoryHasLists(category):
categoryWithLists = ["RAM Type","CPU Architecture","ARK-OS_Version","Location"]
if category in categoryWithLists: return True
return False
def verifyUploadData(jsonToUpload):
uploadDataOk = False
while not uploadDataOk:
for x in jsonToUpload: print(terminalColor.generateYellowString(x) + ": " + jsonToUpload[x] )
print("\nDo you want to want to upload these changes?[Yes/No]")
strDecision = input()
if strDecision.lower() == "yes" or strDecision.lower() == "y": return True
elif strDecision.lower() == "no" or strDecision.lower() == "n": return False
else: terminalColor.printRedString("Invalid Input")
def methodOfEditingString(prompt, dataToEdit):
if settingsJson.externalEditor: return click.edit(dataToEdit).replace("\n", "")
else: return rlinput(prompt ,dataToEdit)
def rlinput(prompt, prefill=''): #code for input with text prefilled in
readline.set_startup_hook(lambda: readline.insert_text(prefill))
try:
return input(prompt)
finally:
readline.set_startup_hook()
def uploadNewPhoto(unitID):
print("\nAre you sure you want to replace the unit photo?(Yes/No)")
strDecision = input()
if strDecision.lower() == "yes" or strDecision.lower() == "y":
Tk().withdraw()
filepath = askopenfilename()
filename = filepath.split("/")[-1]
filetype = filename.split(".")[-1]
filename = filename.replace(" ", "")
if filetype.lower() == "jpg" or filetype.lower() == "jpeg" or filetype.lower() == "png":
uploadLocation = fileFunctions.uploadToS3(unitID, filename, filepath)
if not uploadLocation == "false":
stuffToUpdate = dict(Photo_URL=uploadLocation)
uploadUnitUpdate(stuffToUpdate, unitID)
else: terminalColor.printRedString("Upload failed")
else: terminalColor.printRedString("Invalid file type. Please upload a \"JPEG\" or \"PNG\" file")
else: terminalColor.printRedString("Upload canceled") | [
"64julianlopez@gmail.com"
] | 64julianlopez@gmail.com |
54ca30c8c40c9bb999bd83d71d9f182a0c7739ae | 7c9f984e3f3fa8e251e0fa8e65107dc0d7101002 | /get_graph_data.py | 50d7d11777dc6397af2cb243ca321e70c354962b | [] | no_license | ChaitMat/world_cup_2019_prediction_dashboard | a63bf1e979f832b566949e6fccf009cdd6e57e2f | e7df8113f43c2c51f283bee68f11efaf1342e25f | refs/heads/master | 2022-12-11T16:12:41.439861 | 2019-07-08T11:28:57 | 2019-07-08T11:28:57 | 191,324,782 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 596 | py | import pandas as pd
import sqlite3
import plotly.graph_objs as go
from get_rank import get_rank_table
from display_individual_data import dispIndvData
def getGraphData():
rank_table = get_rank_table()
trace = []
for (i,j) in zip(rank_table['EMNO'], rank_table['Name']):
indv_data = dispIndvData(i)
league = indv_data[0]
league.dropna(inplace=True)
trace.append(go.Scatter(x=league["Match"], y=league["Points"].cumsum(), name=j, mode='lines',
marker={'size': 8, "opacity": 0.6, "line": {'width': 0.5}}, ))
return trace
| [
"chaitanya.mathkar@gmail.com"
] | chaitanya.mathkar@gmail.com |
db7e79e1527c31a36fa86ad326bd52e9ecab29d8 | 430cfece27c54180baf29b3199a67f79fe7d155c | /examples/projections/cyl/cyl_oblique_mercator_3.py | cd9f17fd91016137b34dc329604e31ca38d5372f | [
"BSD-3-Clause"
] | permissive | JamieJQuinn/pygmt | 139f25a3f4280b2d2d43c3fa63179437a9227d31 | 9269fbcb2fc7fca2d5c412acdb794be375c260ab | refs/heads/main | 2023-08-24T16:19:27.673739 | 2021-10-29T09:51:44 | 2021-10-29T09:51:44 | 384,119,354 | 0 | 0 | BSD-3-Clause | 2021-07-08T12:37:21 | 2021-07-08T12:37:21 | null | UTF-8 | Python | false | false | 1,013 | py | r"""
Oblique Mercator
================
Oblique configurations of the cylinder give rise to the oblique Mercator
projection. It is particularly useful when mapping regions of large lateral
extent in an oblique direction. Both parallels and meridians are complex
curves. The projection was developed in the early 1900s by several workers.
**oc**\|\ **oC**\ *lon0/lat0/lonp/latp/scale*\ [**+v**] or
**Oc**\|\ **OC**\ *lon0/lat0/lonp/latp/width*\ [**+v**]
The projection is set with **o** or **O**. The central meridian is set
by *lon0/lat0*. The projection pole is set by *lonp/latp* in option three.
Align the y-axis with the optional **+v**. The figure size is set
with *scale* or *width*.
"""
import pygmt
fig = pygmt.Figure()
# Using the origin projection pole
fig.coast(
projection="Oc280/25.5/22/69/12c",
# Set bottom left and top right coordinates of the figure with "+r"
region="270/20/305/25+r",
frame="afg",
land="gray",
shorelines="1/thin",
water="lightblue",
)
fig.show()
| [
"noreply@github.com"
] | noreply@github.com |
f0d990d45a27fde720efb4dff618a7fd5ef391b2 | 8600ea155f279e5a8dfe5a1926038511f6b6a7ea | /sale_crm/wizard/__init__.py | 7c43908361fc6be916f06247bd16776a8e4c1776 | [] | no_license | MarkNorgate/addons-EAD | c2fff89ab16fce3ba19fbe433ee5863705a6f4e5 | 840f28642b5d328e4b86839c413e5164622295a5 | refs/heads/master | 2020-04-23T22:11:00.164438 | 2015-07-22T12:24:53 | 2015-07-22T12:24:53 | 39,501,011 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,090 | py | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import makesale
import makecase
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| [
"mark.norgate@affinity-digital.com"
] | mark.norgate@affinity-digital.com |
a35d4a56654493c1d685072f3d7735f1c90b45fd | 6853a26eb5604dc015f1b95fed3b64ef099ae37b | /main.py | 0c1a27436453fdb0229895568e4464689b9321eb | [] | no_license | filipppp/binomialkoefficient-manim | e056a94178e8fa712178e10c254ba43574ca8335 | e9adb6d0d3be9487142634a16f001f2c22321fb9 | refs/heads/master | 2022-07-31T10:13:51.461909 | 2020-05-16T01:30:33 | 2020-05-16T01:30:33 | 264,057,848 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,342 | py | from manimlib.imports import *
import sys
sys.path.append("E:\\3b1b\\")
from customs import MarkingMobject, ZoomInAndOut
class PermutationScene(Scene):
def construct(self):
# All SVGs from students
svg_m = SVGMobject("student")
svg_r = svg_m.copy()
svg_r.next_to(svg_m, 2*RIGHT)
svg_rr = svg_r.copy()
svg_rr.next_to(svg_r, 2*RIGHT)
svg_l = SVGMobject("student")
svg_l.next_to(svg_m, 2*LEFT)
svg_ll = svg_l.copy()
svg_ll.next_to(svg_l, 2*LEFT)
svgs = [svg_ll, svg_l, svg_m, svg_r, svg_rr]
# Animate Rectangles including moving to the bottom and changing colors
self.play(DrawBorderThenFill(svg_m, rate_func=linear))
self.wait(2)
self.play(ReplacementTransform(svg_m.copy(), svg_r), ReplacementTransform(svg_m.copy(), svg_l), ReplacementTransform(svg_m.copy(), svg_rr), ReplacementTransform(svg_m.copy(), svg_ll))
self.play(ApplyMethod(svg_m.set_color, GREEN), ApplyMethod(svg_rr.set_color, YELLOW), ApplyMethod(svg_r.set_color, RED), ApplyMethod(svg_l.set_color, BLUE), ApplyMethod(svg_ll.set_color, PURPLE))
def ZoomInAndMove(mob):
mob.shift(0.35*BOTTOM)
mob.scale(0.5)
return mob
self.play(ApplyFunction(ZoomInAndMove, VGroup(*svgs)), run_time=1)
self.wait(2)
# Create Titles for student svgs and align + write them
text_m = TextMobject("Eve")
text_r = TextMobject("Alice")
text_rr = TextMobject("Bob")
text_l = TextMobject("Daniel")
text_ll = TextMobject("Ted")
texts = [text_ll, text_l, text_m, text_r, text_rr]
for i, text in enumerate(texts):
text.next_to(svgs[i], 0.1*TOP)
text.scale(0.5)
self.play(*[Write(text) for text in texts])
self.wait(5)
# Create squares to show different permutations
square_l = Square().shift(1.2*LEFT).shift(0.35*TOP)
square_r = Square().shift(1.2*RIGHT).shift(0.35*TOP)
self.play(FadeInFromDown(square_l), FadeInFromDown(square_r))
self.wait(3)
# Algorithm for permutations
for i in range(len(svgs)):
main_group = Group(svgs[i], texts[i])
main_pos = main_group.get_center()
self.play(main_group.move_to, square_l)
for j in range(len(svgs)):
if i != j:
second_group = Group(svgs[j], texts[j])
second_pos = second_group.get_center()
self.play(second_group.move_to, square_r, run_time=0.3)
self.play(second_group.move_to, second_pos, run_time=0.3)
self.play(main_group.move_to, main_pos, run_time=1)
left_side_wo_symbol = Group(*svgs, *texts, square_l, square_r)
sum_symbol = TexMobject("\sum").scale(2.2).next_to(left_side_wo_symbol, 1.7*LEFT).set_y(0)
left_side = Group(left_side_wo_symbol, sum_symbol)
equal = TexMobject("=").scale(3).shift(0.7*RIGHT)
result = TexMobject("5", "\cdot", "4").scale(3).next_to(equal, 5*RIGHT)
# Show Result
self.wait(2)
self.play(left_side_wo_symbol.scale, 0.7, run_time=1)
self.play(Write(sum_symbol))
self.play(left_side.to_edge, LEFT, run_time= 1)
self.play(Write(equal), Write(result))
self.wait(2)
# Explain Result
def start_indication(mob):
mob.scale_in_place(1.2)
mob.set_color(YELLOW)
return mob
def end_indication(mob):
mob.scale_in_place(1/1.2)
mob.set_color(WHITE)
return mob
self.play(ApplyFunction(start_indication, square_l))
self.wait(2)
self.play(ShowCreationThenDestructionAround(Group(*svgs, *texts)), ShowCreationThenDestructionAround(result[0]), run_time=3)
self.wait(2)
self.play(ApplyFunction(end_indication, square_l))
self.wait(3)
# Explain Second Part of the result
temp_group = Group(svgs[0], texts[0])
temp_pos = temp_group.get_center()
self.play(ApplyMethod(temp_group.move_to, square_l))
self.wait(2)
self.play(ApplyFunction(start_indication, square_r))
self.wait(2)
self.play(ShowCreationThenDestructionAround(Group(*svgs[1:], *texts[1:])), ShowCreationThenDestructionAround(result[2]), run_time=3)
self.wait(2)
self.play(ApplyFunction(end_indication, square_r))
self.play(ApplyMethod(temp_group.move_to, temp_pos))
self.wait(3)
# add braces
n_brace = Brace(Group(*svgs)).set_color(BLUE)
n_brace_text = n_brace.get_tex("n"," = 5").set_color(BLUE)
self.play(GrowFromCenter(n_brace), FadeIn(n_brace_text))
self.wait(11)
k_brace = Brace(Group(square_l, square_r), UP).set_color(GREEN)
k_brace_text = k_brace.get_tex("k"," = 2").set_color(GREEN)
self.play(GrowFromCenter(k_brace), FadeIn(k_brace_text))
self.wait(11)
main_brace = Brace(result).set_color(RED)
main_text = main_brace.get_tex("{n", "! ", "\over", "(", "n","-","k",")!}", " = ", "{ ", "5 \cdot 4 ", "\cdot", "3 \cdot 2 \cdot 1", "\over", "3 \cdot 2 \cdot 1}").scale(1.3)
# n
main_text[0].set_color(BLUE)
main_text[4].set_color(BLUE)
# k
main_text[6].set_color(GREEN)
original_left_pos = main_text[:8].get_center()
main_text[:8].move_to(main_text.get_center())
self.play(GrowFromCenter(main_brace), FadeIn(Group(main_text[1:4], main_text[5], main_text[7])), ReplacementTransform(n_brace_text[0].copy(), main_text[0]), ReplacementTransform(n_brace_text[0].copy(), main_text[4]), ReplacementTransform(k_brace_text[0].copy(), main_text[6]))
self.wait(8)
self.play(ReplacementTransform(result.copy(), main_text[10]), FadeIn(main_text[8:10]), FadeIn(main_text[11:]), ApplyMethod(main_text[:8].move_to, original_left_pos))
self.wait(4)
# Kürzen
cross1 = Line(main_text[-1].get_left(), main_text[-1].get_right()).set_color(RED)
cross2 = Line(main_text[-3].get_left(), main_text[-3].get_right()).set_color(RED)
self.play(GrowFromEdge(cross1, LEFT), GrowFromEdge(cross2, LEFT))
self.play(FadeOut(cross1), FadeOut(cross2), FadeOut(main_text[9]), FadeOut(main_text[11:]))
self.play(ApplyMethod(main_text[10].next_to, Group(main_text[9:]).get_left()))
self.play(Indicate(main_text[10]))
self.wait(0.2)
self.play(Indicate(result))
self.wait(4)
# Show similarity
def scale_up_and_move(mob):
mob.scale_in_place(2)
mob.move_to(ORIGIN)
return mob
# GRÖ?TEN PART FERTIG
self.play(FadeOut(left_side), FadeOut(Group(main_brace, k_brace, n_brace, n_brace_text, k_brace_text, main_text[8:11], result, equal)), ApplyFunction(scale_up_and_move, main_text[:8]))
self.wait(2)
self.play(ApplyMethod(main_text[:8].scale_in_place, 0.6))
self.wait(2)
class CompareGeneralizationScene(Scene):
def construct(self):
general = MarkingMobject(r"{n! \over (n - k)!} ", r"\XYEQ {n! \over ", "k!", "\cdot (n-k)!}",).scale(1.56)
left_eq = general[:7]
original_pos = left_eq.get_center()
left_eq.move_to(ORIGIN)
k_factorial = general[10:12]
self.play(ApplyMethod(left_eq.move_to, original_pos), FadeIn(general[7:]))
self.wait(2)
self.play(ShowCreationThenDestructionAround(general[10:12]), run_time=6)
self.wait(2)
self.play(ApplyMethod(k_factorial.move_to, ORIGIN), FadeOut(general[:10]), FadeOut(general[12:]))
self.wait(2)
class WhatIsKFactorialScene(Scene):
def construct(self):
equation = MarkingMobject("k!", "=", r"\platzhalter ???").scale(1.56)
definition = TextMobject("How often can you rearrange a", "certain combination")
definition_p2 = TextMobject(" so it's still the ", "same", "one?").next_to(definition, 0.5*BOTTOM)
k_factorial = equation[:2]
original_k_pos = k_factorial.get_center()
k_factorial.move_to(ORIGIN)
self.play(ApplyMethod(k_factorial.move_to, original_k_pos), FadeIn(equation[2]))
self.wait(1)
self.play(Write(equation[3:]))
self.wait(1)
self.play(ApplyMethod(k_factorial.move_to, ORIGIN+0.35*TOP), FadeOut(equation[2:]))
self.wait(1)
self.play(Write(definition))
self.play(Write(definition_p2))
self.wait(3)
self.play(Indicate(definition[1]))
self.wait(1.5)
self.play(Indicate(definition_p2[1]))
self.wait(9)
class ComparisonScene(Scene):
def construct(self):
square_l = Square().shift(1.2 * LEFT)
square_r = Square().shift(1.2 * RIGHT)
main_svg = SVGMobject("student").move_to(square_l).set_color(RED).scale(0.5)
second_svg = SVGMobject("student").move_to(square_r).set_color(BLUE).scale(0.5)
group1 = Group(square_l, square_r, main_svg, second_svg).shift(4*LEFT).scale(0.5)
plus_symbol = TexMobject("+").next_to(group1, 3*RIGHT).scale(2)
square_l2 = Square().shift(1.2 * LEFT)
square_r2 = Square().shift(1.2 * RIGHT)
main_svg2 = SVGMobject("student").move_to(square_l2).set_color(BLUE).scale(0.5)
second_svg2 = SVGMobject("student").move_to(square_r2).set_color(RED).scale(0.5)
group2 = Group(square_l2, square_r2, main_svg2, second_svg2).scale(0.5)
equals = TexMobject("=").shift(2.5*RIGHT).scale(2)
two = TexMobject("2").next_to(equals, 2.5*RIGHT).scale(2)
one = TexMobject("1").next_to(equals, 2.5 * RIGHT).scale(2)
cross = SVGMobject("cross").move_to(one).set_color(RED).scale(0.5)
self.play(FadeInFromDown(group1), FadeInFromDown(group2), FadeInFromDown(plus_symbol), run_time=1)
self.wait(2)
self.play(FadeInFromDown(equals), FadeInFromDown(two))
self.wait(6)
self.play(Write(cross))
self.wait(1)
self.play(FadeOut(cross))
self.wait(3)
self.play(ReplacementTransform(two, one))
self.wait(1)
self.play(ApplyMethod(equals.move_to, ORIGIN), ApplyMethod(group2.move_to, ORIGIN+2*RIGHT), ApplyMethod(group1.move_to, ORIGIN+2*LEFT), FadeOut(one), FadeOut(plus_symbol))
self.wait(1)
self.play(Indicate(group1), Indicate(group2))
self.wait(0.5)
self.play(Indicate(equals))
self.wait(4)
class FormulaScene(Scene):
def construct(self):
binom = MarkingMobject("{n \choose k}", "= {n! \over k! \cdot (n - k)!}").scale_in_place(2)
definition = TextMobject('"On how many ways can you arrange ', "$n$", "Students on ", "$k$", 'chairs?"').shift(0.3*BOTTOM)
definition[1].set_color(BLUE)
definition[3].set_color(GREEN)
binom[:5].set_color(WHITE)
binom[2][0].set_color(BLUE)
binom[2][1].set_color(GREEN)
original_pos = binom[:5].get_center()
binom[:5].set_x(0).set_y(0).shift(0.5*RIGHT)
self.play(FadeIn(binom[:5]))
self.wait(27)
self.play(ApplyMethod(binom[:5].shift, 0.3*TOP))
self.wait(1)
self.play(Write(definition))
self.play(ZoomInAndOut(definition[1]), ZoomInAndOut(binom[2][0]))
self.play(ZoomInAndOut(definition[3]), ZoomInAndOut(binom[2][1]))
self.wait(2)
self.play(FadeOutAndShiftDown(definition))
self.wait(2)
self.play(ApplyMethod(binom[:5].move_to, original_pos))
self.wait(0.5)
self.play(Write(binom[5:]))
self.wait(10)
class ExplainKFactorialScene(Scene):
def construct(self):
square_l = Square()
square_m = Square()
square_r = Square()
person_l = SVGMobject("student").set_color(RED)
person_m = SVGMobject("student").set_color(GREEN)
person_r = SVGMobject("student").set_color(BLUE)
squares = VGroup(square_l, square_m, square_r).arrange_submobjects(RIGHT)
text_m = TextMobject("Eve")
text_r = TextMobject("Alice")
text_l = TextMobject("Daniel")
texts = [text_l, text_m, text_r]
persons_arr = [person_l, person_m, person_r]
for i, text in enumerate(texts):
text.next_to(persons_arr[i], 0.3*TOP)
text.scale(1)
persons = VGroup(VGroup(person_l, text_l), VGroup(person_m, text_m), VGroup(person_r, text_r)).scale(0.6)
persons[0].move_to(square_l)
persons[2].move_to(square_r)
persons[1].move_to(square_m)
# Explain Result
def start_indication(mob):
mob.scale_in_place(1.1)
mob.set_color(YELLOW)
return mob
def end_indication(mob):
mob.scale_in_place(1/1.1)
mob.set_color(WHITE)
return mob
equation = TexMobject("=", "3", "\cdot 2", "\cdot 1", "k", "!").scale(2)
equation[4].set_color(GREEN)
equation.set_opacity(0)
self.play(FadeIn(squares), FadeIn(persons))
self.wait(2)
self.play(ShowCreationThenDestructionAround(VGroup(squares, persons)), run_time=2)
self.wait(2)
self.play(ApplyMethod(VGroup(squares, persons).arrange_submobjects, BOTTOM))
self.play(ApplyMethod(persons[0].shift, RIGHT), ApplyMethod(persons[2].shift, LEFT))
self.wait(2)
self.play(ApplyMethod(VGroup(VGroup(squares, persons), equation).arrange_submobjects, RIGHT))
self.wait(2)
equation.shift(RIGHT)
self.play(ApplyMethod(equation[0].set_opacity, 1))
self.play(ApplyFunction(start_indication, square_l))
self.wait(1)
self.play(ShowCreationThenDestructionAround(persons), run_time=3)
self.play(ReplacementTransform(persons.copy(), equation[1]), ApplyMethod(equation[1].set_opacity, 1))
self.wait(0.5)
self.play(ApplyFunction(end_indication, square_l))
self.play(persons[0].move_to, square_l)
self.wait(1)
self.play(ApplyFunction(start_indication, square_m))
self.wait(1)
self.play(ShowCreationThenDestructionAround(persons[1:]), run_time=2)
self.play(ReplacementTransform(persons[1:].copy(), equation[2]), ApplyMethod(equation[2].set_opacity, 1))
self.wait(0.5)
self.play(ApplyFunction(end_indication, square_m))
self.play(persons[1].move_to, square_m)
self.wait(1)
self.play(ApplyFunction(start_indication, square_r))
self.wait(1)
self.play(ShowCreationThenDestructionAround(persons[2:]), run_time=1)
self.play(ReplacementTransform(persons[2:].copy(), equation[3]), ApplyMethod(equation[3].set_opacity, 1))
self.wait(0.5)
self.play(ApplyFunction(end_indication, square_r))
self.wait(2)
self.play(persons[2].move_to, square_r)
self.play(VGroup(persons, squares).set_y, 0)
self.wait(1)
# show brace
k_brace = Brace(Group(square_l, square_m, square_r), UP).set_color(GREEN)
k_brace_text = k_brace.get_tex("k"," = 3").set_color(GREEN)
self.play(GrowFromCenter(k_brace), FadeIn(k_brace_text))
self.wait(1)
left_pos_eq = equation[1:4].get_left()
equation[4:].move_to(left_pos_eq).shift(RIGHT)
self.play(ReplacementTransform(equation[1:4], equation[4:]), ApplyMethod(equation[4:].set_opacity, 1))
self.wait(5)
class ContinuedPermutationScene(Scene):
def construct(self):
# All SVGs from students
svg_m = SVGMobject("student")
svg_r = svg_m.copy()
svg_r.next_to(svg_m, 2*RIGHT)
svg_rr = svg_r.copy()
svg_rr.next_to(svg_r, 2*RIGHT)
svg_l = SVGMobject("student")
svg_l.next_to(svg_m, 2*LEFT)
svg_ll = svg_l.copy()
svg_ll.next_to(svg_l, 2*LEFT)
svgs = [svg_ll, svg_l, svg_m, svg_r, svg_rr]
# Animate Rectangles including moving to the bottom and changing colors
self.play(DrawBorderThenFill(svg_m, rate_func=linear))
self.wait(2)
self.play(ReplacementTransform(svg_m.copy(), svg_r), ReplacementTransform(svg_m.copy(), svg_l), ReplacementTransform(svg_m.copy(), svg_rr), ReplacementTransform(svg_m.copy(), svg_ll))
self.play(ApplyMethod(svg_m.set_color, GREEN), ApplyMethod(svg_rr.set_color, YELLOW), ApplyMethod(svg_r.set_color, RED), ApplyMethod(svg_l.set_color, BLUE), ApplyMethod(svg_ll.set_color, PURPLE))
def ZoomInAndMove(mob):
mob.shift(0.35*BOTTOM)
mob.scale(0.5)
return mob
self.play(ApplyFunction(ZoomInAndMove, VGroup(*svgs)), run_time=1)
self.wait(2)
# Create Titles for student svgs and align + write them
text_m = TextMobject("Eve")
text_r = TextMobject("Alice")
text_rr = TextMobject("Bob")
text_l = TextMobject("Daniel")
text_ll = TextMobject("Ted")
texts = [text_ll, text_l, text_m, text_r, text_rr]
for i, text in enumerate(texts):
text.next_to(svgs[i], 0.1*TOP)
text.scale(0.5)
self.play(*[Write(text) for text in texts])
self.wait(3)
# Create squares to show different permutations
square_l = Square().shift(1.2*LEFT).shift(0.35*TOP)
square_r = Square().shift(1.2*RIGHT).shift(0.35*TOP)
self.play(FadeInFromDown(square_l), FadeInFromDown(square_r))
# Algorithm for permutations
for i in range(len(svgs)):
break
main_group = Group(svgs[i], texts[i])
main_pos = main_group.get_center()
self.play(main_group.move_to, square_l)
for j in range(len(svgs)):
if i != j:
second_group = Group(svgs[j], texts[j])
second_pos = second_group.get_center()
self.play(second_group.move_to, square_r, run_time=0.3)
self.play(second_group.move_to, second_pos, run_time=0.3)
self.play(main_group.move_to, main_pos, run_time=1)
left_side_wo_symbol = Group(*svgs, *texts, square_l, square_r)
sum_symbol = TexMobject("\sum").scale(2.2).next_to(left_side_wo_symbol, 1.7*LEFT).set_y(0)
left_side = Group(left_side_wo_symbol, sum_symbol)
equal = TexMobject("=").scale(3).shift(0.7*RIGHT)
result = TexMobject("5", "\cdot", "4").scale(3).next_to(equal, 5*RIGHT)
real_result = TexMobject("5 \cdot 4", "\over 2 \cdot 1").scale_in_place(2).move_to(result).shift(0.5*UP)
calculated_result = TexMobject("10").scale_in_place(3).move_to(result)
# Show Result
self.wait(2)
self.play(left_side_wo_symbol.scale, 0.7, run_time=1)
self.play(Write(sum_symbol))
self.play(left_side.to_edge, LEFT, run_time= 1)
self.play(Write(equal), Write(result))
self.wait(2)
# Explain Result
def start_indication(mob):
mob.scale_in_place(1.2)
mob.set_color(YELLOW)
return mob
def end_indication(mob):
mob.scale_in_place(1/1.2)
mob.set_color(WHITE)
return mob
self.play(ApplyFunction(start_indication, square_l))
self.wait(2)
self.play(ShowCreationThenDestructionAround(Group(*svgs, *texts)), ShowCreationThenDestructionAround(result[0]), run_time=3)
self.wait(2)
self.play(ApplyFunction(end_indication, square_l))
self.wait(3)
# Explain Second Part of the result
temp_group = Group(svgs[0], texts[0])
temp_pos = temp_group.get_center()
self.play(ApplyMethod(temp_group.move_to, square_l))
self.wait(2)
self.play(ApplyFunction(start_indication, square_r))
self.wait(2)
self.play(ShowCreationThenDestructionAround(Group(*svgs[1:], *texts[1:])), ShowCreationThenDestructionAround(result[2]), run_time=3)
self.wait(2)
self.play(ApplyFunction(end_indication, square_r))
self.play(ApplyMethod(temp_group.move_to, temp_pos))
self.wait(3)
# add braces
k_brace = Brace(Group(square_l, square_r), UP).set_color(GREEN)
k_brace_text = k_brace.get_tex("k"," = 2").set_color(GREEN)
self.play(GrowFromCenter(k_brace), FadeIn(k_brace_text))
self.wait(2)
n_brace = Brace(Group(*svgs)).set_color(BLUE)
n_brace_text = n_brace.get_tex("n"," = 5").set_color(BLUE)
self.play(GrowFromCenter(n_brace), FadeIn(n_brace_text))
self.wait(2)
main_brace = Brace(result).set_color(RED)
main_text = main_brace.get_tex("{n", "! ", "\over", "k", "!\cdot", "(", "n","-","k",")!}", "\over ", "k", "!").scale(1.3)
main_text_real_formula = main_brace.get_tex("{n", "! ", "\over", "k", "!\cdot", "(", "n","-","k",")!}")
# n
main_text[0].set_color(BLUE)
main_text[6].set_color(BLUE)
main_text_real_formula[0].set_color(BLUE)
main_text_real_formula[6].set_color(BLUE)
# k
main_text[3].set_color(GREEN)
main_text[8].set_color(GREEN)
main_text[11].set_color(GREEN)
main_text_real_formula[3].set_color(GREEN)
main_text_real_formula[8].set_color(GREEN)
main_text_real_formula2 = main_text_real_formula.copy()
before_x = main_text[5:10].get_x()
main_text[5:10].set_x(main_text.get_center()[0])
original_y = main_text.get_y()
main_text_real_formula[5:10].set_x(main_text.get_x())
self.play(GrowFromCenter(main_brace), FadeIn(main_text_real_formula[:3]), FadeIn(main_text_real_formula[5:]))
self.wait(7)
self.play(Indicate(result))
self.wait(13)
self.play(ReplacementTransform(main_text_real_formula[:3], main_text[:3]),
ReplacementTransform(main_text_real_formula[5:], main_text[5:10]),
FadeIn(main_text[10:]))
main_text_real_formula[5:10].set_x(before_x)
self.wait(6)
def scale_and_set(mob):
mob.scale_in_place(1.3)
mob.set_y(original_y)
return mob
self.play(ReplacementTransform(main_text[11:13], main_text_real_formula2[3:5]),
FadeOut(main_text[10]),
ReplacementTransform(main_text[:3], main_text_real_formula2[:3]),
ReplacementTransform(main_text[5:10], main_text_real_formula2[5:]),
FadeOut(main_text[10]))
self.play(ApplyFunction(scale_and_set, main_text_real_formula2))
self.wait(3)
self.play(ReplacementTransform(result, real_result[0]), FadeIn(real_result[1]))
self.wait(2)
self.play(ReplacementTransform(real_result, calculated_result))
self.wait(2)
self.play(Indicate(calculated_result))
self.wait(4)
self.play(Indicate(main_text_real_formula2))
self.wait(1)
def move_and_scale(mob):
mob.scale_in_place(2)
mob.move_to(ORIGIN)
return mob
self.play(ApplyFunction(move_and_scale, main_text_real_formula2), FadeOut(left_side), FadeOut(equal), FadeOut(calculated_result), FadeOut(Group(k_brace_text, k_brace, n_brace_text, n_brace, main_brace)))
self.wait(7)
| [
""
] | |
377b21ca6961646b114df319987e5f99ad8f28bd | a1b158abe0188203873ec5e37c71c9cd6452196b | /freeze.py | 8787aea86853cc94f8e9be1e728beea5ea251aee | [] | no_license | taiki-cano/kabochappiNote | d13891338e4f412d6825d5fac8034e035a34a0c6 | 86e4dd819c4560c695fe84f1761c82045aed9afd | refs/heads/master | 2023-03-24T17:52:33.200703 | 2021-03-23T09:15:16 | 2021-03-23T09:15:16 | 350,598,420 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 128 | py | from flask_frozen import Freezer
from myapp import app
freezer = Freezer(app)
if __name__ == "__main__":
freezer.freeze()
| [
"bolu@outlook.jp"
] | bolu@outlook.jp |
fce854fa476c7f67e6ea810e5a2980fb700b79e3 | 2342744291c27d4501f53accf48678e22dfcfec7 | /scripts/download_and_regrid/Download_PRISMdata.py | 288ff86a7ba09a341b26f42b74865bb5b07de2bc | [] | no_license | l5d1l5/tree_mortality_from_vod | dc77858ac5bb690bc2dbae9d0eaa45793c8b0a99 | 0111df5ad1b61db121470a76b1f1a403f453bd89 | refs/heads/master | 2022-12-30T23:59:41.616552 | 2020-10-20T01:30:52 | 2020-10-20T01:30:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,584 | py | import urllib
import os
import sys
from dirs import MyDir
#data=['ppt','tmax','tmean']
#year_range=range(2005,2017)
#month_range=range(1,13)
#link='http://services.nacse.org/prism/data/public/4km/'
#for file in data:
# os.chdir(MyDir+'/PRISM')
# if not(os.path.isdir(MyDir+'/PRISM/'+file)):
# os.mkdir(file)
# os.chdir(MyDir+'/PRISM/'+file)
# for year in year_range:
# for month in month_range:
# linkname=link+file+'/%d%02d'%(year,month)
# filename='PRISM_%s_stable_4kmM2_%d%02d_bil.zip'%(file,year,month)
# if not(os.path.isfile(filename)):
# urllib.urlretrieve(linkname,filename)
##-----------------------------------------------------------------------------
data=['vpdmax']
year_range=range(2005,2017)
month_range=range(1,13)
day_range=range(1,32)
link='http://services.nacse.org/prism/data/public/4km/'
for variable in data:
os.chdir(MyDir+'/PRISM')
if not(os.path.isdir(MyDir+'/PRISM/'+variable)):
os.mkdir(variable)
os.chdir(MyDir+'/PRISM/'+variable)
for year in year_range:
for month in month_range:
for day in day_range:
sys.stdout.write('\r'+'Processing data for %d %02d %02d...'%(year,month,day))
sys.stdout.flush()
linkname=link+variable+'/%d%02d%02d'%(year,month,day)
filename='PRISM_%s_stable_4kmM2_%d%02d%02d_bil.zip'%(variable,year,month,day)
if not(os.path.isfile(filename)):
urllib.urlretrieve(linkname,filename) | [
"kkraoj@gmail.com"
] | kkraoj@gmail.com |
7051e4a16a1737b784cb8b5336898af39dcf1074 | b3f1f5b51f801bfdb640fd1225072d1316e911bc | /modules/figures.py | d07f16a9ee34f9f13baa347a443a1cef9eca6a80 | [
"MIT"
] | permissive | WolfGang1710/Figures_diffraction | 97d500a5ab885ac37e822cba0020592296143bef | 82b89ee55859f9eb408d91ca65dc5032a6423fdd | refs/heads/master | 2022-12-04T04:18:41.195561 | 2022-11-22T17:58:41 | 2022-11-22T17:58:41 | 266,510,501 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,022 | py | """
Title: Projet IPT - Diffraction DI
Description: Ce programme permet de créer les figures de diffraction
de la lumière et de les enregistrer.
"""
#pylint: disable=invalid-name
#===Importation des modules ===
import matplotlib.pyplot as plt
def save(image, nom_figure, cmap, chemin_dossier, nom_fichier, extension='.png', affichage=False):
"""
Génère une figure contenant une image
grâce au module matplotlib et la sauvegarde dans le dossier
chemin_dossier avec le nom_fichier.extension (extension vaut
par défaut .png)
- image est une matrice. (list)
- nom_figure est le nom qui sera affiché sur la fenêtre si
affichage = True (str)
-cmap est la colormap à utiliser pour les couleurs. (str)
- nom_fichier (str)
- extension (str)
- affichage (bool)
"""
#Création de la figure, la fenêtre a pour titre 'nom_figure'
figure = plt.figure(str(nom_figure))
#Paramétrage de l'image
plt.imshow(image, cmap=str(cmap), interpolation='nearest')
plt.axis('off') #Désactivation des axes
#Enregistrement de l'image ; paramètres tight permet d'avoir
#peu de contour blanc
figure.savefig(str(chemin_dossier)+str(nom_fichier)+str(extension), bbox_inches='tight')
#Affichage de la photo ou non
if affichage == True:
print("Affichage.")
plt.show() #Affichage
plt.clf() #Remise à zéro de la fenêtre
def s_save(L_image, cmap, chemin_dossier, L_nom_fichier, L_titre='', L_figure='', extension='.png', affichage=False):
"""
Génère autant de figure qu'il y d'élément dans L_image
grâce au module matplotlib et les sauvegarde dans le dossier
chemin_dossier avec le nom_fichier.extension
(extension vaut par défaut .png).
- L_image est une liste de matrices d'image. (list)
- cmap est la colormap à utiliser pour les couleurs. (str)
- chemin_dossier (str).
- L_nom_fichier est la liste contenant les différents noms utilisés
pour enregistrer les photos générées
- L_titres est optionnel, liste contenant le titre à afficher
au-dessous de chaque image.
- L_figure est optionnel, liste contenant les différents noms à
afficher en titre de fenêtre si affichage = True
- affichage, optionnel - booléan
/! on considère que les liste sont correctement ordonnées :
L_nom_fichier[i] est le nom de l'image L_image[i] etc.
"""
#Si l'argument L_titre n'est pas donné, on le transforme en liste
if L_titre == '':
L_titre = []
for _ in range(len(L_image)):
L_titre.append('')
#Si l'argument L_figure n'est pas donné, on le transforme en liste
if L_figure == '':
L_figure = []
for _ in range(len(L_image)):
L_figure.append('')
index = 0
for image in L_image: #On boucle sur toutes les images de la liste
#On créé une fenêtre pour cjauqe image ayant pour titre
#"L_figure[index]"
figure = plt.figure(str(L_figure[index]))
#Paramétrage de l'image
plt.imshow(image, cmap=str(cmap), interpolation='nearest')
plt.axis('off') #Désactivation des axes
plt.title(str(L_titre[index])) #Titre de l'image "L_titre[index]"
#Enregistrement de l'image ; paramètres tight permet d'avoir
#peu de contour blanc
figure.savefig(str(chemin_dossier)+"/"+str(L_nom_fichier[index])+str(extension), bbox_inches='tight')
index += 1 #On passage à l'image suivante
#Affichage ou non
if affichage == True:
print("Affichage.")
plt.show() #Affichage
plt.clf() #Remise à zéro de la fenêtre
def subplot_save(L_image, nb_ligne, nb_colonne, cmap, chemin_dossier, nom_fichier, nom_figure='', L_titre='', extension='.png', affichage=False):
"""
Génère une image contenant tout les images de la liste L_image
grâce au module matplotlib et les sauvegarde dans chemin_dossier
avec le nom nom_fichier.extension (extension ='.png' par défaut)
- L_image est une liste de matrices d'image. (list)
- nb_ligne (int). On divise la fenêtre en nb_lignes lignes.
- nb_colonne (int). On divise la fenêtre en nb_colonne colonne.
- cmap est la colormap à utiliser pour les couleurs. (str)
- chemin_dossier (str).
- nom_fichier est le nom pour enregistrer les photos générées dans
la même fenêtre.
- nom_figure (str). Nom affiché sur la fenêtre.
- L_titres est optionnel, liste contenant le titre à afficher
au-dessous de chaque image.
- L_figure est optionnel, liste contenant les différents noms à
afficher en titre de fenêtre si affichage = True
- affichage, optionnel - booléan
/! on considère que les liste sont correctement ordonnées :
L_nom_fichier[i] est le nom de l'image L_image[i] etc.
"""
index = 1 #Position de l'image sur la figure (qui ne peut valoir 0)
figure = plt.figure(str(nom_figure)) #Création de la figure
#Si l'argument L_titre n'est pas donné, on le transforme en liste
if L_titre == '':
L_titre = []
for _ in range(len(L_image)):
L_titre.append('')
#Chaque image dans L_image se voit attribuer une place sur la figure
for image in L_image:
#On défini cette place dans la fenêtre
figure.add_subplot(int(str(nb_ligne)+str(nb_colonne)+str(index)))
#Paramétrage de la figure
plt.imshow(image, cmap=str(cmap), interpolation='nearest')
plt.axis('off') #Désactivation des axes
plt.title(str(L_titre[index-1])) #Titre image "L_titre[index-1]"
index += 1 #On passe à la figure suivante
#Enregistrement de l'image ; paramètres tight permet d'avoir
#peu de contour blanc
figure.savefig(str(chemin_dossier)+"/"+str(nom_fichier)+str(extension), bbox_inches='tight')
if affichage == True:
print("Affichage.")
plt.show() #Affichage
plt.clf() #Remise à zéro de la fenêtre
| [
"noreply@github.com"
] | noreply@github.com |
2a6b8636981543229b79a083e04eade5f4e1d75d | 7c998fd8a0f300ccd79aa8366b3ea9f75b7239af | /misc/speeding_ticket.py | 8c5e30a786c7cfdfdb0af41647f70a0d7bd390d4 | [] | no_license | mattchrlw/competitive-programming | 811b0b8b9732f9f6e56809129c6dbce8be271f0a | 0e965bcd237072a4459270f729d8bfe11d63e8bc | refs/heads/main | 2023-02-15T20:17:47.230618 | 2021-01-06T07:16:17 | 2021-01-06T07:16:17 | 316,663,911 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 488 | py | fin = open("speeding.in", "r")
fout = open("speeding.out","w")
output = ""
speed = []
limit = []
n, m = [int(i) for i in fin.readline().split()]
for i in range(n):
l, s = [int(i) for i in fin.readline().split()]
for _ in range(l):
limit.append(s)
for i in range(m):
l, s = [int(i) for i in fin.readline().split()]
for _ in range(l):
speed.append(s)
fin.close()
fout.write(str(max(0, max([speed[i] - limit[i] for i in range(len(limit))]))))
fout.close() | [
"mattchrlw@gmail.com"
] | mattchrlw@gmail.com |
954b5f123263a27d2de8144a7805c66055a3a6b8 | a47cd75fdbea15c076e70a3312a6e4cf09aec492 | /菜鸟实例12--判断闰年.py | 3dc1939fb761bef531d82c3b20363884c21a1c24 | [] | no_license | iam66/sele_project_test1 | 38af85c0ed2c696d72184e92b180933f538c7850 | a5ab25cf431a10695ab50b50a85920f943b0768e | refs/heads/master | 2020-03-22T19:58:08.205296 | 2018-07-12T11:23:34 | 2018-07-12T11:23:34 | 140,564,413 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 176 | py | year = int(input('请输入年份:'))
if (year % 4==0 and year % 100 !=0) or year%400==0:
print('{0}是闰年'.format(year))
else:
print('{0}是平年'.format(year))
| [
"765007536@qq.com"
] | 765007536@qq.com |
f7fb65690822efe6f71aad076d707f92528ec6ec | 55591cff4b4dbfec91ac8e498652c2ad60bfdffe | /datahub/activity_stream/interaction/serializers.py | 222e2a50af6ecc115e0552613b574b504ad24d4b | [
"MIT"
] | permissive | alixedi/data-hub-api-cd-poc | 2caf6575cced33f0b2b22582d8bdcd60e99ba7d8 | a5e5ea45bb496c0d2a06635864514af0c7d4291a | refs/heads/develop | 2020-12-26T05:52:39.575158 | 2020-01-30T15:27:27 | 2020-01-30T15:27:27 | 237,407,350 | 0 | 0 | MIT | 2020-02-07T12:38:47 | 2020-01-31T10:25:34 | null | UTF-8 | Python | false | false | 3,897 | py | from datahub.activity_stream.serializers import ActivitySerializer
from datahub.interaction.models import Interaction
class InteractionActivitySerializer(ActivitySerializer):
"""Interaction serialiser for activity stream."""
KINDS_JSON = {
Interaction.KINDS.interaction: 'Interaction',
Interaction.KINDS.service_delivery: 'ServiceDelivery',
}
class Meta:
model = Interaction
def _get_project_context(self, project):
return {} if project is None else {
'id': f'dit:DataHubInvestmentProject:{project.pk}',
'type': 'dit:InvestmentProject',
'name': project.name,
'url': project.get_absolute_url(),
}
def _get_event_context(self, event):
return {} if event is None else {
'id': f'dit:DataHubEvent:{event.pk}',
'type': 'dit:Event',
'dit:eventType': {'name': event.event_type.name},
'name': event.name,
'startTime': event.start_date,
'endTime': event.end_date,
'dit:team': self._get_team(event.lead_team),
'url': event.get_absolute_url(),
}
def _get_context(self, instance):
if instance.kind == Interaction.KINDS.interaction:
context = self._get_project_context(instance.investment_project)
elif instance.kind == Interaction.KINDS.service_delivery:
context = self._get_event_context(instance.event)
else:
context = {}
return context
def _get_adviser_with_team(self, participant):
adviser = self._get_adviser(participant.adviser)
if participant.team is not None:
adviser['dit:team'] = self._get_team(participant.team)
return adviser
def _get_dit_participants(self, participants):
return [
self._get_adviser_with_team(participant)
for participant in participants.all()
if participant.adviser is not None
]
def _get_team(self, team):
return {} if team is None else {
'id': f'dit:DataHubTeam:{team.pk}',
'type': ['Group', 'dit:Team'],
'name': team.name,
}
def to_representation(self, instance):
"""
Serialize the interaction as per Activity Stream spec:
https://www.w3.org/TR/activitystreams-core/
"""
interaction_id = f'dit:DataHubInteraction:{instance.pk}'
interaction = {
'id': f'{interaction_id}:Announce',
'type': 'Announce',
'published': instance.created_on,
'generator': self._get_generator(),
'object': {
'id': interaction_id,
'type': ['dit:Event', f'dit:{self.KINDS_JSON[instance.kind]}'],
'startTime': instance.date,
'dit:status': instance.status,
'dit:archived': instance.archived,
'dit:subject': instance.subject,
'attributedTo': [
self._get_company(instance.company),
*self._get_dit_participants(instance.dit_participants),
*self._get_contacts(instance.contacts),
],
'url': instance.get_absolute_url(),
},
}
context = self._get_context(instance)
if context:
interaction['object']['context'] = [context]
if (
instance.kind == Interaction.KINDS.interaction
and instance.communication_channel is not None
):
interaction['object']['dit:communicationChannel'] = {
'name': instance.communication_channel.name,
}
if instance.service is not None:
interaction['object']['dit:service'] = {
'name': instance.service.name,
}
return interaction
| [
"alixedi@gmail.com"
] | alixedi@gmail.com |
5bf92804ebbb44951df8edec22035ecaa5639594 | e2216a22541b9bd1bd3f87ed2fd55e6f7b141dfe | /examples/sauerbraten_espaim.py | 47c326359f23f9e7939f4565ca94067f8eb90afd | [] | no_license | ALittlePatate/STW | 88866444dda70e5a70d0c9acc64f660391969d8d | 565d2055eccf1fb95347f47c35a7728809b95136 | refs/heads/master | 2023-02-16T15:29:29.095226 | 2021-01-19T16:35:02 | 2021-01-19T16:35:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,239 | py | import sys
from math import atan2, asin, pi
from pymeow import *
class Offsets:
EntityList = 0x346C90
PlayerCount = 0x346C9C
ViewMatrix = 0x32D040
GameMode = 0x26f6c0
Health = 0x178
Armor = 0x180
State = 0x77
Name = 0x274
Team = 0x378
ViewAngles = 0x3C
class Entity:
def __init__(self, addr):
self.addr = addr
self.hpos3d = read_vec3(mem, addr)
self.fpos3d = vec3(
self.hpos3d["x"], self.hpos3d["y"], self.hpos3d["z"] - 15
)
self.health = read_int(mem, addr + Offsets.Health)
self.armor = read_int(mem, addr + Offsets.Armor)
self.team = read_string(mem, addr + Offsets.Team)
self.alive = read_byte(mem, addr + Offsets.State) == 0
self.name = read_string(mem, addr + Offsets.Name)
self.distance = 0.0
self.hpos2d = vec2()
self.fpos2d = vec2()
try:
mem = process_by_name("sauerbraten.exe")
base = mem["baseaddr"]
overlay = overlay_init("Cube 2: Sauerbraten")
local = None
except Exception as e:
sys.exit(e)
def is_team_game():
return read_byte(mem, base + Offsets.GameMode) in \
[2, 4, 6, 8, 10, 11, 12, 17, 13, 14, 18, 15, 16, 19, 20, 21, 22]
def get_ents():
player_count = read_int(mem, base + Offsets.PlayerCount)
if player_count > 1:
ent_buffer = read_ints64(mem, read_int64(mem, base + Offsets.EntityList), player_count)
try:
global local
local = Entity(ent_buffer[0])
except:
return
vm = read_floats(mem, base + Offsets.ViewMatrix, 16)
for e in ent_buffer[1:]:
try:
ent = Entity(e)
if ent.alive:
if is_team_game() and ent.team == local.team:
continue
ent.hpos2d = wts_ogl(overlay, vm, ent.hpos3d)
ent.fpos2d = wts_ogl(overlay, vm, ent.fpos3d)
ent.distance = int(vec3_distance(local.hpos3d, ent.hpos3d) / 3)
yield ent
except:
continue
def aim_bot(ent_vecs):
src = local.hpos3d
dst = vec3_closest(src, ent_vecs)
angle = vec2()
angle["x"] = -atan2(dst["x"] - src["x"], dst["y"] - src["y"]) / pi * 180.0
angle["y"] = asin((dst["z"] - src["z"]) / vec3_distance(src, dst)) * (180.0 / pi)
write_vec2(mem, local.addr + Offsets.ViewAngles, angle)
def main():
set_foreground("Cube 2: Sauerbraten")
font = font_init(10, "Tahoma")
while overlay_loop(overlay):
overlay_update(overlay)
if key_pressed(35):
overlay_close(overlay)
ent_vecs = list()
for e in get_ents():
ent_vecs.append(e.hpos3d)
head = e.fpos2d["y"] - e.hpos2d["y"]
width = head / 2
center = width / -2
font_x = e.hpos2d["x"] - len(e.name)
alpha_box(
e.hpos2d["x"] + center,
e.hpos2d["y"],
width,
head + 5,
rgb("red"),
rgb("black"),
0.15,
)
font_print(
font,
font_x,
e.hpos2d["y"] + 10,
e.name,
rgb("white")
)
font_print(
font,
font_x,
e.fpos2d["y"] - 10,
f"{e.health} ({e.armor})",
rgb("green") if e.health > 50 else rgb("red")
)
font_print(
font,
font_x,
e.fpos2d["y"] - 23,
str(e.distance),
rgb("white")
)
dashed_line(
overlay["midX"],
0,
e.fpos2d["x"],
e.fpos2d["y"],
1,
rgb("orange"),
)
if key_pressed(88) and ent_vecs:
aim_bot(ent_vecs)
overlay_deinit(overlay)
if __name__ == "__main__":
main()
| [
"shucks@web.com"
] | shucks@web.com |
371a5524cd7cf37c6e5684e417e269098e0914b9 | 8674d7e3ff78e98ade68d3f45e46d730ecf6970c | /python/visualizer.py | 9685b9e22688c20a5ba7911fc74c994ddc2062f3 | [] | no_license | adrvdw/DataProcessing | 3a58caa47d0349f91a4152f70ae201780ddd8f7e | 7b1383d56ce46176d9ed395a529ec3448fda0cc4 | refs/heads/master | 2020-04-05T04:50:13.006459 | 2018-12-17T20:48:26 | 2018-12-17T20:48:26 | 156,569,022 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,353 | py | #!/usr/bin/env python
# Name: Ad Ruigrok van der Werve
# Student number: 11323760
"""
This script visualizes data obtained from a .csv file
"""
import csv
import matplotlib.pyplot as plt
import numpy
# Global constants for the input file, first and last year
INPUT_CSV = "movies.csv"
START_YEAR = 2008
END_YEAR = 2018
# Global dictionary for the data
data_dict = {str(key): [] for key in range(START_YEAR, END_YEAR)}
if __name__ == "__main__":
with open(INPUT_CSV, newline='') as csvfile:
movies = csv.reader(csvfile)
year = int(START_YEAR)
counter = 0
for row in movies:
rating = row[1]
year = row[2]
if counter > 1:
data_dict[year].append(rating)
counter += 1
average_list = []
for key in range(START_YEAR, END_YEAR):
string_list = data_dict[str(key)]
float_list = []
for value in string_list:
value = float(value)
float_list.append(value)
length = float(len(float_list))
summed = sum(float_list)
average_list.append(summed / length)
plt.xticks(range(START_YEAR, END_YEAR))
plt.yticks([8.1, 8.2, 8.3, 8.4, 8.5, 8.6, 8.7, 8.8, 8.9])
plt.plot(range(START_YEAR, END_YEAR), average_list)
plt.axis([START_YEAR -1 , END_YEAR, 8.1, 8.9])
plt.grid()
plt.show()
| [
"adruigrokvanderwerve@wcw-stud-145-109-34-86.wireless.uva.nl"
] | adruigrokvanderwerve@wcw-stud-145-109-34-86.wireless.uva.nl |
73cc3ea2b59f3696ec7a086dedd0fccfeca6d5cb | b244140226aa627ac0784f603e2df3d1a95be3d1 | /python_test/crawling.py | 9423f0d7d180135c193f8471df705c5402d34ef8 | [] | no_license | sontaku/learning_elk | 7720f2e3cfa54c2c1975e1f2e1a5a77a2f0fcabd | 8f453b3f05f91a6e984c0a9d0cd6fddef87c5a14 | refs/heads/main | 2023-06-04T11:43:47.310004 | 2021-06-22T11:44:24 | 2021-06-22T11:44:24 | 368,138,086 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,510 | py | import requests
from pandas import DataFrame
from bs4 import BeautifulSoup
import re
from datetime import datetime
import os
import pymysql
date = str(datetime.now())
date = date[:date.rfind(':')].replace(' ', '_')
date = date.replace(':', '시') + '분'
query = input('검색 키워드를 입력하세요 : ')
news_num = int(input('총 필요한 뉴스기사 수를 입력해주세요(숫자만 입력) : '))
query = query.replace(' ', '+')
news_url = 'https://search.naver.com/search.naver?where=news&sm=tab_jum&query={}'
req = requests.get(news_url.format(query))
soup = BeautifulSoup(req.text, 'html.parser')
news_dict = {}
idx = 0
cur_page = 1
print()
print('크롤링 중...')
while idx < news_num:
### 네이버 뉴스 웹페이지 구성이 바뀌어 태그명, class 속성 값 등을 수정함(20210126) ###
table = soup.find('ul', {'class': 'list_news'})
li_list = table.find_all('li', {'id': re.compile('sp_nws.*')})
area_list = [li.find('div', {'class': 'news_area'}) for li in li_list]
a_list = [area.find('a', {'class': 'news_tit'}) for area in area_list]
print('테스트', a_list)
for n in a_list[:min(len(a_list), news_num - idx)]:
news_dict[idx] = {'title': n.get('title'),
'url': n.get('href'),
'category': '금융'
}
print(news_dict[idx])
idx += 1
cur_page += 1
pages = soup.find('div', {'class': 'sc_page_inner'})
next_page_url = [p for p in pages.find_all('a') if p.text == str(cur_page)][0].get('href')
req = requests.get('https://search.naver.com/search.naver' + next_page_url)
soup = BeautifulSoup(req.text, 'html.parser')
print('크롤링 완료')
# conn = pymysql.connect(host='DB 서버 IP', user='계정', password='비번', db='데이터베이스명', charset='utf8', autocommit=True,cursorclass=pymysql.cursors.DictCursor)
conn = pymysql.connect(host='localhost', user='elkt', password='elkt', db='elktest', charset='utf8', autocommit=True,cursorclass=pymysql.cursors.DictCursor)
print('연결성공')
cursor = conn.cursor()
print(news_dict)
for key, value in news_dict.items():
print(key, value)
print(value['title'])
print(value['url'])
parse = re.sub('[-=#/\:$}""]', " ", value['title'])
print("테스트입니다", parse)
sql = 'REPLACE INTO news(news_id ,title ,url, category, currdate) VALUES (news_id,"{0}","{1}","{2}",sysdate())'.format(parse, value['url'], value['category'])
cursor.execute(sql)
conn.commit()
cursor.close()
conn.close()
| [
"jyson19@gmail.com"
] | jyson19@gmail.com |
736887a4862a68ddb38a06f891def851858936db | 9d8acc20d2ee1d1957849dfb71c22e0dae2d8c5c | /baomoicrawl/venv/Lib/site-packages/twisted/test/test_ftp_options.py | ef567bbb49bfce24ee4cb271b1a59b1a8730dd7a | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | thuy4tbn99/TranTruongThuy_17021178_Nhom4_Crawler | b0fdedee2942a12d9f64dfed93f43802dc5ab340 | 87c8c07433466bbc43a24ea089f75baeb467c356 | refs/heads/master | 2022-11-27T21:36:33.917491 | 2020-08-10T23:24:42 | 2020-08-10T23:24:42 | 286,583,216 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,765 | py | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.tap.ftp}.
"""
from twisted.trial.unittest import TestCase
from twisted.cred import credentials, error
from twisted.tap.ftp import Options
from twisted.python import versions
from twisted.python.filepath import FilePath
class FTPOptionsTests(TestCase):
"""
Tests for the command line option parser used for C{twistd ftp}.
"""
usernamePassword = (b'iamuser', b'thisispassword')
def setUp(self):
"""
Create a file with two users.
"""
self.filename = self.mktemp()
f = FilePath(self.filename)
f.setContent(b':'.join(self.usernamePassword))
self.options = Options()
def test_passwordfileDeprecation(self):
"""
The C{--password-file} option will emit a warning stating that
said option is deprecated.
"""
self.callDeprecated(
versions.Version("Twisted", 11, 1, 0),
self.options.opt_password_file, self.filename)
def test_authAdded(self):
"""
The C{--auth} command-line option will add a checker to the list of
checkers
"""
numCheckers = len(self.options['credCheckers'])
self.options.parseOptions(['--auth', 'file:' + self.filename])
self.assertEqual(len(self.options['credCheckers']), numCheckers + 1)
def test_authFailure(self):
"""
The checker created by the C{--auth} command-line option returns a
L{Deferred} that fails with L{UnauthorizedLogin} when
presented with credentials that are unknown to that checker.
"""
self.options.parseOptions(['--auth', 'file:' + self.filename])
checker = self.options['credCheckers'][-1]
invalid = credentials.UsernamePassword(self.usernamePassword[0], 'fake')
return (checker.requestAvatarId(invalid)
.addCallbacks(
lambda ignore: self.fail("Wrong password should raise error"),
lambda err: err.trap(error.UnauthorizedLogin)))
def test_authSuccess(self):
"""
The checker created by the C{--auth} command-line option returns a
L{Deferred} that returns the avatar id when presented with credentials
that are known to that checker.
"""
self.options.parseOptions(['--auth', 'file:' + self.filename])
checker = self.options['credCheckers'][-1]
correct = credentials.UsernamePassword(*self.usernamePassword)
return checker.requestAvatarId(correct).addCallback(
lambda username: self.assertEqual(username, correct.username)
)
| [
"thuy4tbn99@gmail.com"
] | thuy4tbn99@gmail.com |
b7283bfaeee4aac17256bbfa8a86fb9d0f0e1c8c | 6bc88ea8e817736a085bf386d4afbbf73a1b961f | /mutation/migrations/0001_initial.py | 66c77b51fdc85ca5386d75c4ef1826b0c1211cb4 | [] | no_license | Dodant/DB_First_Project | c0ad9b4f51db71435888e7324533431ee6b383ad | 37b99e4aeb4658cd2dea32f27d44bc99957c9b86 | refs/heads/master | 2020-06-05T08:02:02.569126 | 2019-06-17T15:09:43 | 2019-06-17T15:09:43 | 192,369,421 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,442 | py | # Generated by Django 2.2.2 on 2019-06-17 11:15
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Allo_Syndrome',
fields=[
('allo_syn_code', models.IntegerField(primary_key=True, serialize=False)),
('allo_syn_name', models.CharField(max_length=30)),
('allo_syn_desc', models.TextField(default='Fill')),
('allo_syn_fre', models.CharField(max_length=30, null=True)),
],
),
migrations.CreateModel(
name='Auto_Syndrome',
fields=[
('auto_syn_code', models.IntegerField(primary_key=True, serialize=False)),
('auto_syn_name', models.CharField(max_length=35)),
('auto_syn_desc', models.TextField(default='Fill')),
('auto_syn_fre', models.CharField(max_length=30, null=True)),
],
),
migrations.CreateModel(
name='Autosome',
fields=[
('auto_no', models.IntegerField(primary_key=True, serialize=False)),
('auto_desc', models.TextField(default='Fill')),
],
),
migrations.CreateModel(
name='XYLinked',
fields=[
('allo_mut_code', models.IntegerField(primary_key=True, serialize=False)),
('linked', models.CharField(max_length=10)),
('allo_syn_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='mutation.Allo_Syndrome')),
],
),
migrations.CreateModel(
name='Auto_Mutation',
fields=[
('auto_mut_code', models.IntegerField(primary_key=True, serialize=False)),
('mutation_type', models.CharField(choices=[('Deletion', 'Deletion'), ('Duplication', 'Duplication'), ('Inversion', 'Inversion'), ('Translocation', 'Translocation')], max_length=15)),
('auto_sec', models.IntegerField(blank=True, null=True)),
('auto_pri', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='mutation.Autosome')),
('auto_syn_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='mutation.Auto_Syndrome')),
],
),
]
| [
"ohho0728@naver.com"
] | ohho0728@naver.com |
3e5b84a9c8b44a20e8220b2f08183b6df5e4c710 | 936701969d2e58b2b767ba788784138c0c5dee95 | /todo_api/todos/serializers.py | a0aa85a8a16b54a53d117254ed4938e8b2f7a6be | [] | no_license | marciomarquessouza/todolist | 1adb0079ddfe282e34c4b135d7220f42226243c9 | 6e599138dc650960d12a85015ae57efb7a4b255b | refs/heads/master | 2020-03-13T03:35:47.007022 | 2019-02-12T20:21:37 | 2019-02-12T20:21:37 | 130,946,983 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 276 | py | # todos/serializers.py
from rest_framework import serializers
from . import models
class TodoSerializer(serializers.ModelSerializer):
class Meta:
fields = (
'id',
'title',
'description',
)
model = models.Todo
| [
"marciomultimedia@gmail.com"
] | marciomultimedia@gmail.com |
12573798bb2cd66c19a6b323e9b7162707cd59a6 | 9da129ec93a6fd2c5f65b57a0faec21d8eb80720 | /Term_NSI/devoirs/4-dm2/Corrigé/S4/E4.py | 82d370f886007cdf745140140d3a4edd805448e6 | [] | no_license | FranckCHAMBON/ClasseVirtuelle | 79bd4e4114d27ca3792b46b1fb384c394397b329 | 48fe9883ee6763e79e561537bc0ed7048339b457 | refs/heads/master | 2022-08-22T15:35:48.128508 | 2021-04-28T07:21:04 | 2021-04-28T07:21:04 | 249,953,475 | 3 | 4 | null | 2022-08-05T09:28:10 | 2020-03-25T10:52:47 | HTML | UTF-8 | Python | false | false | 532 | py | """
Prologin: Entrainement 2003
Exercice: 4 - Initiales
https://prologin.org/train/2003/semifinal/initiales
"""
def initiales(texte):
"""Renvoie l'initiale de chaque mots de la phrase.
>>> initiales('Rentre Avec tes pieds')
RATP
"""
lettre=""
for i, c in enumerate(texte):
if c!=" " and (i==0 or (i > 0 and texte[i-1] == " ")):
lettre += c.upper()
print(lettre)
# Test
import doctest
doctest.testmod()
# Entrées
nb_lettre = int(input())
texte=input()
# Sortie
initiales(texte) | [
"franck.chambon@ac-aix-marseille.fr"
] | franck.chambon@ac-aix-marseille.fr |
c864394b9237eba1f5b927f0badf3a4f4273881d | 14c006e1f9441efd9a33ae717e02ae2f15a0b350 | /scoreboard.py | 4a64ab5ff75d1bc863a1b27a172905e174c20fe5 | [] | no_license | johng7765/Space-Invaders | 1cfa4a2a819aeb0c4f5619ae914d432bce99194e | 3d7d322b33a71335299d8096b8c890e3972f77ae | refs/heads/master | 2020-03-30T18:10:39.511692 | 2018-10-09T00:06:27 | 2018-10-09T00:06:27 | 151,487,260 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,071 | py | # scoreboard.py
# Created by: John Gawlik
# Campus ID: 889752424
# Due: September 21st, 2018
########################################################################################################################
import pygame.font
from pygame.sprite import Group
from ship import Ship
class Scoreboard():
"""A class to report scoring information."""
def __init__(self, ai_settings, screen, stats):
"""Initialize scorekeeping attributes."""
self.screen = screen
self.screen_rect = screen.get_rect()
self.ai_settings = ai_settings
self.stats = stats
# Font settings for scoring information
self.text_color = (0, 255, 0)
self.font = pygame.font.SysFont(None, 48)
# Prepare the initial score image.
self.prep_score()
self.prep_high_score()
self.prep_level()
self.prep_ships()
def prep_high_score(self):
"""Turn the high score into a rendered image."""
high_score = int(round(self.stats.high_score, -1))
high_score_str = "High Score: " + "{:,}".format(high_score)
self.high_score_image = self.font.render(high_score_str, True, self.text_color, self.ai_settings.bg_color)
# Center the high score at the top of the screen.
self.high_score_rect = self.high_score_image.get_rect()
self.high_score_rect.centerx = self.screen_rect.centerx
self.high_score_rect.top = self.score_rect.top
def prep_level(self):
"""Turn the level into a rendered image."""
level_str = "L: " + (str(self.stats.level))
self.level_image = self.font.render(level_str, True, self.text_color, self.ai_settings.bg_color)
# Position the level below the score.
self.level_rect = self.level_image.get_rect()
self.level_rect.right = self.score_rect.right
self.level_rect.top = self.score_rect. bottom + 10
def prep_score(self):
"""Turn the score into a rendered image."""
rounded_score = int(round(self.stats.score, -1))
score_str = "Score: " + "{:,}".format(rounded_score)
self.score_image = self.font.render(score_str, True, self.text_color, self.ai_settings.bg_color)
# Display the score at the top right of the screen.
self.score_rect = self.score_image.get_rect()
self.score_rect.right = self.screen_rect.right - 20
self.score_rect.top = 20
def prep_ships(self):
"""Show how many ships are left."""
self.ships = Group()
for ship_number in range(self.stats.ships_left):
ship = Ship(self.ai_settings, self.screen)
ship.rect.x = 10 + ship_number * (ship.rect.width + 10)
ship.rect.y = 10
self.ships.add(ship)
def show_score(self):
"""Draw scores and ships to the screen."""
self.screen.blit(self.score_image, self.score_rect)
self.screen.blit(self.high_score_image, self.high_score_rect)
self.screen.blit(self.level_image, self.level_rect)
self.ships.draw(self.screen)
| [
"johng7765@csu.fullerton.edu"
] | johng7765@csu.fullerton.edu |
e55655e807a18e9ab1404bb26635d96b384e723a | 6125e3cc779742ce582207b82f82d0b477033846 | /unit_test_debug/test_sd_convergence.py | 6f298399d4ecf2f25206c3e453d0500ad59882f8 | [] | no_license | awalenkiewicz/parcel | c0753ef8a102e56d6d7cde05912d2c56d96b600b | 261f9bbe5b8d392faf853262dd88f273c8015919 | refs/heads/master | 2021-01-19T18:15:31.459804 | 2016-10-13T09:25:44 | 2016-10-13T09:25:44 | 70,793,641 | 0 | 0 | null | 2016-10-13T10:01:30 | 2016-10-13T10:01:30 | null | UTF-8 | Python | false | false | 2,557 | py | import sys
sys.path.insert(0, "../")
sys.path.insert(0, "./")
sys.path.insert(0, "plots/comparison/")
from parcel import parcel
from libcloudphxx import common
from chem_conditions import parcel_dict
from scipy.io import netcdf
import numpy as np
import pytest
import os, glob
import subprocess
import math
import copy
sd_conc_list = [64, 128, 256, 512, 1024, 2*1024, 4*1024, 8*1024, 16*1024, 32*1204]
@pytest.fixture(scope="module")
def data(request):
"""
run parcel simulations with different initial sd_conc
return data with values of sd_conc and initial dry mass of the aerosol
"""
# copy options from chem_conditions
p_dict = copy.deepcopy(parcel_dict)
# ... and modify them for the current test
p_dict['chem_dsl'] = True
p_dict['z_max'] = .05
p_dict['outfreq'] = 1
p_dict['out_bin'] = '{"drad": {"rght": 1e-6, "left": 1e-10, "drwt": "dry", "lnli": "log", "nbin": 26, "moms": [3]}}'
# lists to store sd_conc at the total dry mass at t=0 of each test run
out_sd_conc = []
out_m3_dry = []
for sd_conc in sd_conc_list:
print "sd_conc = ", sd_conc
p_dict['outfile'] = "convergence_test_sd_conc=" + str(sd_conc) + ".nc"
p_dict['sd_conc'] = sd_conc
# run parcel simulation
parcel(**p_dict)
# read data
f_out = netcdf.netcdf_file(p_dict['outfile'], "r")
mom3_init = f_out.variables["drad_m3"][0,:]
rhod_init = f_out.variables["rhod"][0]
chem_rho = getattr(f_out, "chem_rho")
# initial dry mass of aerosol [kg/kg dry air]
ini = mom3_init.sum() * 4./3 * math.pi * chem_rho
out_sd_conc.append(sd_conc)
out_m3_dry.append(ini)
data = {"sd_conc" : out_sd_conc, "dry_mass" : out_m3_dry}
# removing all netcdf files after all tests
def removing_files():
for file in glob.glob("convergence_test_sd_conc*"):
subprocess.call(["rm", file])
request.addfinalizer(removing_files)
return data
def test_timestep_print(data, eps=3e-3):
"""
Check if the total mass of dry aerosol (sum of 3rm moment of dry radii)
doesn't change too much with different initail super droplet concentration
"""
dry_mass = np.array(data["dry_mass"]).reshape(data["dry_mass"].__len__());
# average dry mass from all sd_conc runs
av = dry_mass.sum() / dry_mass.shape[0]
for it in range(dry_mass.shape[0]):
assert np.isclose(av, dry_mass[it], atol=0, rtol=eps), "difference: " + str((av - dry_mass[it]) / av)
| [
"ajaruga@igf.fuw.edu.pl"
] | ajaruga@igf.fuw.edu.pl |
ff8f76cc0d6c522541e6954dfadddf86f058148d | d9c24b7e995babaa37a338168e4ec9897f261a09 | /main.py | 287e2109b589341b5961b0ff126fb756809acab0 | [] | no_license | ChaseStruse/SecureFileStorage | 957ab15cb767eb76da41d4b71906a40163801915 | 56561243510177b451c19e8fcd9c9a31ad133d8a | refs/heads/master | 2020-08-28T18:39:43.896399 | 2019-10-27T02:33:28 | 2019-10-27T02:33:28 | 217,787,761 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,710 | py | import sqlite3
import os
ADMIN_PASS = 'password4321' #super duper hard password
DB_NAME = 'userdb'
def menu():
print("****************************************")
print("* WELCOME TO YOUR DATABASE *")
print("****************************************")
print("* Choose from the following commands: *")
print("* *")
print("* Add = Adds file to database *")
print("* Open = Opens file from database *")
print("* Delete = Deletes file from database *")
print("* QUIT = Quits the program *")
print("****************************************")
def createTable(csr, database):
csr.execute(
'''
CREATE TABLE files
(
id INTEGER PRIMARY KEY,
file_name TEXT,
file_type TEXT,
file_password TEXT,
file_directory TEXT
)
'''
)
database.commit()
print("Database has been created")
def addFile(fileName, fileType, filePassword, fileDir, csr, db):
csr.execute(
'''
INSERT INTO files (file_name, file_type, file_password, file_directory)
VALUES (?,?,?,?)
''',(fileName, fileType, filePassword, fileDir)
)
db.commit()
def openFile(fileName, csr):
csr.execute(
'''
SELECT file_directory
FROM files
WHERE file_name=?
''',(fileName,)
)
file = csr.fetchone()
print("File that is opening: " + file[0] + "\n")
fd = os.open( file[0], os.O_RDONLY )
print(os.read(fd, 1024))
userEnteredPassword = input("Password: ")
while userEnteredPassword != ADMIN_PASS:
print("Password was entered incorrectly, please retry.\n")
userEnteredPassword = input("Password: ")
db = sqlite3.connect(DB_NAME)
cursor = db.cursor()
try:
cursor.execute(''' SELECT id FROM files ''')
except:
createTable(cursor, db)
menu()
userInput = input("")
while userInput != "QUIT":
if userInput == "Add":
fileName = input("Enter the file name: ")
fileType = input("Enter the file type: ")
filePassword = input("Enter the file password: ")
fileDirectory = input("Enter the file directory: ")
addFile(fileName, fileType, filePassword, fileDirectory, cursor, db)
menu()
userInput = input("")
elif userInput == "Open":
fileName = input("Enter file name: ")
openFile(fileName, cursor)
menu()
userInput = input("")
db.close()
| [
"chasestruse@gmail.com"
] | chasestruse@gmail.com |
d0ac379c9f0dfe3d593d8e268be2b785888ab442 | f2c0a279e3641b4b455472c4f960e48abeef0880 | /manage.py | dd171e4f3acf2bf0a4fccd99c1ca7fdba36c42b7 | [] | no_license | vk-ai/DjangoAssignment | 38269e0b774f81664822ac687f3d8763a30b0db8 | 51ab559adbc0f8540775522c071f599aed89b61e | refs/heads/master | 2022-04-30T09:15:12.241582 | 2020-04-28T04:40:52 | 2020-04-28T04:40:52 | 259,443,656 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 632 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'BolSellerApp.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"vinaykm.mails@gmail.com"
] | vinaykm.mails@gmail.com |
9562bc0b7e2dcc38f7a84b31462b6d5fd5598619 | 3c898b1aec7009110c63504d5a56e31914625d1b | /acrylamid/filters/rstx_youtube.py | 1866872d0d44360221e12addf431c60a545739b3 | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | MagicSword/acrylamid | e294b151ed6305f37fc5a5fdd4f1f0fb999a22f7 | 6f34bc5fb2175af1103aec7a910ef48a6842de03 | refs/heads/master | 2021-01-16T21:30:58.564719 | 2012-06-22T16:00:50 | 2012-06-22T16:45:38 | 4,817,948 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,988 | py | # -*- encoding: utf-8 -*-
#
# Copyright 2012 posativ <info@posativ.org>. All rights reserved.
# License: BSD Style, 2 clauses. see acrylamid/__init__.py
from docutils import nodes
from docutils.parsers.rst import Directive, directives
match = ['youtube', 'yt']
def align(argument):
return directives.choice(argument, ('left', 'center', 'right'))
class YouTube(Directive):
"""reStructuredText directive that creates an embed object to display
a video from Youtube (:options: are optional).
Usage example::
.. youtube:: ZPJlyRv_IGI
:start: 34
:align: center
:height: 1280
:width: 720
:ssl:
"""
required_arguments = 1
optional_arguments = 0
option_spec = {
'height': directives.length_or_unitless,
'width': directives.length_or_percentage_or_unitless,
'border': directives.length_or_unitless,
'align': align,
'start': int,
'ssl': directives.flag,
}
has_content = False
def run(self):
alignments = {
'left': '0',
'center': '0 auto',
'right': '0 0 0 auto',
}
uri = ('https://' if 'ssl' in self.options else 'http://') \
+ 'www.youtube-nocookie.com/embed/' + self.arguments[0]
self.options['uri'] = uri
self.options['align'] = alignments[self.options.get('align', 'center')]
self.options.setdefault('width', '680px')
self.options.setdefault('height', '382px')
self.options.setdefault('border', 0)
self.options.setdefault('start', 0)
YT_EMBED = """<iframe width="%(width)s" height="%(height)s" src="%(uri)s" \
frameborder="%(border)s" style="display: block; margin: %(align)s;" \
start="%(start)i" class="video" allowfullscreen></iframe>"""
return [nodes.raw('', YT_EMBED % self.options, format='html')]
def makeExtension():
return YouTube
| [
"info@posativ.org"
] | info@posativ.org |
54e305cc17a0c2232390a83ffbdeb8ed103b393e | 219d7cf7cf00b778ff1a5709406c144fcf2132f3 | /First Steps in Coding - Lab/07. Projects Creation.py | 7ca4becdbaa4bc97c0ff32e779c16f999679df79 | [] | no_license | SilviaKoynova/Softuni-Programming-Basics-Python | e8e175419383815c65c4e110fdb2b752d940e887 | 0dfef0850f2cb8471dfee1af89f137be4e887cb8 | refs/heads/main | 2023-07-13T00:35:09.389302 | 2021-08-27T07:43:45 | 2021-08-27T07:43:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 163 | py | name = input()
projects = int(input())
need_hours = projects * 3
print(f"The architect {name} will need {need_hours} hours to complete {projects} project/s.")
| [
"noreply@github.com"
] | noreply@github.com |
f5d4a03f72cfbfd9afb8e3bd6721a179ada550f4 | a1ee833ae14e978cef8cf2bbabb7dfbf9e01d90b | /11 if_elif_else condition/example05.py | 4a0945ebdbb8ff0f4432d332a48cfdc1a5bdd182 | [] | no_license | AbhishekKunwar17/pythonexamples | eafc157b7b9092993effa9ba30b03b98c94f8262 | 05a739d19c050ed6b434ce02a51c34eb3f317b5a | refs/heads/master | 2023-05-01T18:22:59.738951 | 2021-04-29T19:29:24 | 2021-04-29T19:29:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 149 | py | var1 = 5 # 5 -10 25
if var1 > 12:
print("var1 is above 12,")
if var1 > 20:
print("and also above 20")
else:
print("var1 is not above 20") | [
"ksrinivas0575@gmail.com"
] | ksrinivas0575@gmail.com |
8b84aac7a2c36c742e100e367bbaa2be7d8bd071 | 43bb8504e1a552ff4fa10975190e4228b0f3ab4a | /calibration.py | a8ae76cc8aa3e9b23f53ff6bb50b48275cef4f9d | [] | no_license | darraghmoran11/FYP_StereoVision | 716d2ed7fa9c8119498a220d35a250382937ebf7 | 6ddd59e71e9064a9edbb69f735a66ecbaa959f06 | refs/heads/master | 2021-01-11T01:03:54.408224 | 2017-04-04T00:39:02 | 2017-04-04T00:39:02 | 71,081,681 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,080 | py | import numpy as np
import cv2
import glob
# termination criteria
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = np.zeros((6*7,3), np.float32)
objp[:,:2] = np.mgrid[0:7,0:6].T.reshape(-1,2)
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d point in real world space
imgpoints = [] # 2d points in image plane.
images = glob.glob('*.jpg')
for fname in images:
img = cv2.imread(fname)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# Find the chess board corners
ret, corners = cv2.findChessboardCorners(gray, (7,6),None)
# If found, add object points, image points (after refining them)
if ret == True:
objpoints.append(objp)
cv2.cornerSubPix(gray,corners,(11,11),(-1,-1),criteria)
imgpoints.append(corners)
# Draw and display the corners
cv2.drawChessboardCorners(img, (7,6), corners,ret)
cv2.imshow('img',img)
cv2.waitKey(500)
cv2.destroyAllWindows() | [
"darraghmoran11@hotmail.com"
] | darraghmoran11@hotmail.com |
8e8df293aaca82bc4d7fcb58a03a63985dc58a74 | a626e80e926593409140089b32b525481e2e3e20 | /alchemy/report/views.py | 665eee1e52880dd0867ffe70037e1bd0963f8fb9 | [] | no_license | zmoog/alchemy | bc88ee68b2687e700d4e3107029fbb4468e23579 | 3f392e7f0a969e00897c10642c355afef6c483dd | refs/heads/master | 2021-07-02T22:05:22.478304 | 2013-02-09T13:21:22 | 2013-02-09T13:21:22 | 55,659 | 2 | 2 | null | 2021-06-10T20:22:53 | 2008-09-23T17:17:35 | Python | UTF-8 | Python | false | false | 4,831 | py | from django.shortcuts import get_object_or_404, render_to_response
from django.http import HttpResponse,HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.db.models import Q, Sum
from django.template import RequestContext
import datetime
from cash.models import Account, Transfer
def report(request):
"""
Redirect to the current monthly report.
"""
now = datetime.datetime.now()
return HttpResponseRedirect(reverse('report-month', kwargs=dict(year=now.year, month=now.month)))
def report_year(request, year):
"""
Build the year report view
"""
#base_queryset = Transfer.objects.filter(Q(destination__type='ex') | Q(source__type='in'))
queryset = Transfer.objects.filter(validity_date__year=int(year)).order_by('-validity_date')
balance, income, expense, rebate, qs = _do_balance(queryset)
context = {
'year': int(year),
#'month': int(month),
#'first_day': datetime.date(int(year), int(month), 1),
'months': qs.dates('validity_date', 'month', order="DESC"),
'expense_list': expense,
'income_list': income,
'rebate_list': rebate,
'balance': balance
}
return render_to_response(
'report/report_year.html',
context,
context_instance=RequestContext(request))
def report_month(request, year, month):
"""
Build the monthly report view
"""
#base_queryset = Transfer.objects.filter(
# Q(destination__type='ex') |
# Q(source__type='in') |
# Q(source__type='ex'))
queryset = Transfer.objects.filter(
validity_date__month=int(month), validity_date__year=int(year)).order_by('-validity_date')
balance, income, expense, rebate, qs = _do_balance(queryset)
context = {
'year': int(year),
'month': int(month),
'first_day': datetime.date(int(year), int(month), 1),
'months': qs.dates('validity_date', 'month', order="DESC"),
'expense_list': expense,
'income_list': income,
'rebate_list': rebate,
'balance': balance
}
return render_to_response(
'report/report_month.html',
context,
context_instance=RequestContext(request))
def report_day(request, year, month, day):
"""
Build the monthly report view
"""
#base_queryset = Transfer.objects.filter(Q(destination__type='ex') | Q(source__type='in'))
queryset = Transfer.objects.filter(
validity_date__month=int(month),
validity_date__year=int(year),
validity_date__day=int(day)).order_by('-validity_date')
balance, income, expense, rebate, qs = _do_balance(queryset)
context = {
'year': int(year),
'month': int(month),
'day': int(day),
'first_day': datetime.date(int(year), int(month), int(day)),
'months': qs.dates('validity_date', 'month', order="DESC"),
'expense_list': expense,
'income_list': income,
'balance': balance
}
return render_to_response(
'report/report_day.html',
context,
context_instance=RequestContext(request))
def _do_balance(queryset):
"""
"""
income = { 'tot': 0, 'transfers': []}
expense = { 'tot': 0, 'transfers': [], 'x': {} }
rebate = { 'tot': 0, 'transfers': []}
final_queryset = queryset.select_related().filter(
Q(destination__type='ex') |
Q(source__type='in') |
Q(source__type='ex'))
for transfer in final_queryset:
if transfer.destination.type == 'ex':
expense['transfers'].append(transfer)
expense['tot'] += transfer.amount
if expense['x'].has_key(transfer.destination.id):
expense['x'][transfer.destination.id]['tot'] += transfer.amount
else:
expense['x'][transfer.destination.id] = { 'name': transfer.destination.name, 'tot': transfer.amount }
if transfer.source.type == 'in':
income['transfers'].append(transfer)
income['tot'] += transfer.amount
if transfer.source.type == 'ex':
rebate['transfers'].append(transfer)
rebate['tot'] += transfer.amount
if expense['x'].has_key(transfer.source.id):
expense['x'][transfer.source.id]['tot'] -= transfer.amount
else:
expense['x'][transfer.source.id] = { 'name': transfer.source.name, 'tot': -transfer.amount }
else:
expense['y'] = list()
for key in expense['x'].keys():
x = expense['x'][key]
x['id'] = key
expense['y'].append(x)
balance = income['tot'] - expense['tot'] + rebate['tot']
return balance, income, expense, rebate, final_queryset | [
"maurizio.branca@gmail.com"
] | maurizio.branca@gmail.com |
7de217fd92e8d0892384b908bfab2fc26f614a13 | 7e27c916749fc15cbf59f96b5d7291148ac246cc | /views/grid_display.py | af0df438bb89da5288df809169b110530c19442b | [] | no_license | CellularAutomata110/SupercomputingChallengeExploringSimpleSpatialModelsOfRiverFloods | e422603c537bb933cf3227a285af7889ab2dc9eb | d4be8f36d718e9dad3ef9d83cea9c21585ff9a26 | refs/heads/main | 2023-03-30T09:05:31.398455 | 2021-04-11T04:45:12 | 2021-04-11T04:45:12 | 356,766,692 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 27,609 | py | from base import *
import pygame
import math
import numpy
from models.emulated_grid import EmulatedSimulationGrid
'''
Classes for creating and updating pygame surfaces based on simulation data.
'''
class SurfaceDisplay(object):
'''
Interface for displays of surface CRNs. If you want to make your own custom
surface display for the simulator, inherit from this class and override all
of its methods.
Each Surface object also needs to have the following accessible variables:
* display_width: The total width of the display of the surface, in
pixels. For a square grid, this is
max(min_x, <# columns> * <cell width in pixels>)
* display_width: The total height of the display of the surface, in
pixels. For a square grid, this is
max(min_y, <# columns> * <cell height in pixels>)
* grid: The surface being simulated. Doesn't actually need to be a
*grid*, just needs to be iterable (and probably should have
*some* kind of structure).
Note that the window in which you will be embedding your display requires at
least some space to display other elements. These will be given to your
object as the min_x and min_y parameters in the constructor. Make sure your
display is at least this big, or you'll get some weird display issues.
'''
def __init__(self, grid, colormap, min_x = 0, min_y = 0, pixels_per_node=5,
display_text = False):
'''
Params:
grid: Whatever object represents the nodes on the surface. For a
square grid, this would be a SquareGrid object.
colormap: A dictionary defining what color each state will be
displayed as. Maps from states (strings) to colors (RGB
3-tuples, or anything else directly recognizable by
Pygame).
min_x, min_y: Minimum x and y sizes, in pixels, of the
surface-displaying portion of the window. The window
requires at least some space to blit control buttons
and a legend; make sure the display_width and
display_height you calcuate are at least this big,
or you will have display problems.
pixels_per_node: Scaling factor. Usually the size of a single node,
in pixels.
display_text: If true, your display should blit the state of each
node xonto that node.
'''
raise NotImplementedError("You need to override the constructor for " +\
"Surface.")
def render(self, parent_surface, x_pos = 0, y_pos = 0):
'''
This function should blit the entire surface onto its parent. This will
be called once at the beginning of the simulation.
Params:
parent_surface: The pygame.Surface object representing the entire
window.
x_pos, y_pos: X and Y coordinates of the upper-left corner of this
grid relative to parent_surface. Use these to make
sure you aren't blitting onto stuff outside the
display region.
'''
raise NotImplementedError("You need to override the 'render' " + \
"method of Surface.")
def update_node(self, node):
'''
This function should blit a single node, or otherwise do whatever is
required when a node changes state. This will be called whenever the
state of a node is changed, passing the node that changed.
params:
node: The node that just changed state.
'''
raise NotImplementedError("You need to override the 'update_node' " + \
"method of Surface.")
class SquareGridDisplay(object):
'''
Displays a SquareGrid object as a colored grid.
'''
def __init__(self, grid, colormap, min_x = 0, min_y = 0, pixels_per_node=5,
display_text = False):
'''
Parameters:
grid: The SquareGrid object displayed
colormap: Dictionary defining what colors are assigned to each state
min_x, min_y: Minimum x and y sizes, in pixels, of the surface
created. If the natural size of the grid isn't big
enough, the grid will be centered and whitespace
will be added to fill the excess.
pixels_per_node: Width and height of each node, in pixels, either as
a pair of integers (for arbitrary form factors)
or as a single integer (for square nodes)
display_text: If True, will display each node with a text overlay of
that node's state. Otherwise, will only display the
color of the node.
'''
self.debug = False
# Constants
self.grid_buffer = 5
# Argument inits
self.grid = grid
self.colormap = colormap
self.min_x = min_x
self.min_y = min_y
self.pixels_per_node = pixels_per_node
self.display_text = display_text
self.recalculate_display_sizes()
def recalculate_display_sizes(self):
# Calculate some internal variables
self.display_width = max((2 * self.grid_buffer +
self.grid.x_size * self.node_width), self.min_x)
self.display_height = max((2 * self.grid_buffer +
self.grid.y_size * self.node_height), self.min_y)
def pixels_per_node():
doc = "The number of pixels used to display a single node in the grid."
def fget(self):
return (self.node_width, self.node_height)
def fset(self, value):
if isinstance(value, int):
self.node_width = value
self.node_height = value
elif (isinstance(value, list) or isinstance(value, tuple)) and \
len(value) == 2:
self.node_width = value[0]
self.node_height = value[1]
else:
raise Exception("Invalid argument for pixels_per_node: " +
str(value))
self.recalculate_display_sizes()
def fdel(self):
del self.node_width
del self.node_height
return locals()
pixels_per_node = property(**pixels_per_node())
def render(self, parent_surface, x_pos = 0, y_pos = 0):
'''
Set up the display and make the first render. This must be called before
any other updates.
parent_surface: The surface onto which this grid will be displayed.
x_p os, y_pos: X and Y coordinates of the upper-left corner of this
grid relative to parent_surface.
'''
self.x_pos = x_pos
self.y_pos = y_pos
# Create display surface
if self.debug:
print("Displaying grid display at position (" + str(x_pos) + "," +
str(y_pos) + ") with width " + str(self.display_width) +
" and height " + str(self.display_height) + ".")
self.parent_surface = parent_surface
self.display_surface = parent_surface.subsurface(
(x_pos, y_pos, self.display_width, self.display_height))
# Initial render
for node in self.grid:
self.update_node(node)
def update_node(self, node):
'''
Redraw a specified node.
'''
new_rect = self.make_node_rectangle(node)
node_color = self.colormap[node.state]
pygame.draw.rect(self.display_surface, node_color, new_rect)
if self.display_text:
node_text_surface = self.make_node_text(node)
text_rect = node_text_surface.get_rect()
text_rect.center = new_rect.center
self.display_surface.blit(node_text_surface,
text_rect)
def make_node_rectangle(self, node):
x = node.position[0]
y = node.position[1]
if self.grid.x_size * self.node_width < self.min_x:
x_buffer = (self.min_x - self.grid.x_size*self.node_width)/2
x_pos = x_buffer + x * self.node_width
else:
x_pos = self.grid_buffer + x * self.node_width
if self.grid.y_size * self.node_height < self.min_y:
y_buffer = (self.min_y - self.grid.y_size*self.node_height)/2
y_pos = y_buffer + y * self.node_height
else:
y_pos = self.grid_buffer + y * self.node_height
if self.debug:
print("Creating new rectangle with position (" + str(x_pos) + "," +
str(y_pos) + "), height " + str(self.node_height) +
", and width " + str(self.node_width) + ".")
return pygame.Rect(x_pos, y_pos, self.node_width, self.node_height)
def make_node_text(self, node):
BLACK = (0,0,0)
WHITE = (255,255,255)
node_color = self.colormap[node.state]
if sum(node_color) < 150:
text_color = WHITE
else:
text_color = BLACK
font = pygame.font.SysFont('monospace', int(0.3*self.pixels_per_node[0]))
text_surface = font.render(node.state, True, text_color)
return text_surface
#end class SquareGridDisplay
class HexGridDisplay(object):
'''
Displays a HexGrid object as a colored honeycomb.
'''
def __init__(self, grid, colormap, min_x = 0, min_y = 0, pixels_per_node=.5,
display_text = False):
'''
Parameters:
grid: The SquareGrid object displayed
colormap: Dictionary defining what colors are assigned to each state
min_x, min_y: Minimum x and y sizes, in pixels, of the surface
created. If the natural size of the grid isn't big
enough, the grid will be centered and whitespace
will be added to fill the excess.
pixels_per_node: Width and height of each node, in pixels, either as
a pair of integers (for arbitrary form factors)
or as a single integer (for square nodes)
display_text: If True, will display each node with a text overlay of
that node's state. Otherwise, will only display the
color of the node.
'''
self.debug = False
# Constants
self.grid_buffer = .5
# Argument inits
self.grid = grid
self.colormap = colormap
self.min_x = min_x
self.min_y = min_y
self.pixels_per_node = pixels_per_node
self.display_text = display_text
self.recalculate_display_sizes()
def recalculate_display_sizes(self):
# Calculate some internal variables
#need to change depending on region
self.total_grid_width = 900 #int(2 * self.grid_buffer + \
#(self.grid.x_size+0.5) * self.node_width)
# Height = top buffer + bottom buffer + (height of one whole hex) +
# (row-height for each except the first row)
self.total_grid_height = 500 #int(2 * self.grid_buffer + \
#self.node_width / math.cos(math.pi/6) +
#(self.grid.y_size-1) * self.node_height)
# Total height and width must be at least big enough to fit other
# elements of the UI.
self.display_width = max(self.total_grid_width, self.min_x)
self.display_height = max(self.total_grid_height, self.min_y)
def pixels_per_node():
doc = "The width, in pixels, of a single hex in the grid. " + \
"Setting this also sets the row height (the number of vertical "+\
"pixels added by adding a row)."
def fget(self):
return (self.node_width, self.node_height)
def fset(self, value):
if isinstance(value, int):
self.node_width = 7
self.node_height = value/ 2 / math.tan(math.pi/6)
elif (isinstance(value, list) or isinstance(value, tuple)) and \
len(value) == 2:
self.node_width = 3#value[0]
self.node_height = 3#value[1]
else:
raise Exception("Invalid argument for pixels_per_node: " +
str(value))
self.recalculate_display_sizes()
def fdel(self):
del self.node_width
del self.node_height
return locals()
pixels_per_node = property(**pixels_per_node())
def render(self, parent_surface, x_pos = 0, y_pos = 0):
'''
Set up the display and make the first render. This must be called before
any other updates.
parent_surface: The surface onto which this grid will be displayed.
x_p os, y_pos: X and Y coordinates of the upper-left corner of this
grid relative to parent_surface.
'''
self.x_pos = x_pos
self.y_pos = y_pos
# Create display surface
if self.debug:
print("Displaying grid display at position (" + str(x_pos) + "," +
str(y_pos) + ") with width " + str(self.display_width) +
" and height " + str(self.display_height) + ".")
self.parent_surface = parent_surface
self.display_surface = parent_surface.subsurface(
(x_pos, y_pos, self.display_width, self.display_height))
# Initial render
for node in self.grid:
self.update_node(node)
def update_node(self, node):
'''
Redraw a specified node.
'''
new_hex = self.make_node_hex(node)
node_color = self.colormap[numpy.str_((str(round(int(node.state)))))]
pygame.draw.polygon(self.display_surface, node_color, new_hex)
pygame.draw.lines(self.display_surface, (0,0,0), True, new_hex)
if self.display_text:
node_text_surface = self.make_node_text(node)
text_rect = node_text_surface.get_rect()
text_rect.center = self.get_center(node)
self.display_surface.blit(node_text_surface,
text_rect)
def make_node_hex(self, node):
'''
Returns the list of vertices of the hex at the node's position.
'''
x_pos, y_pos = self.get_center(node)
a = self.node_width * 0.5 / math.cos(math.pi/6.0)
b = self.node_width * 0.5 * math.tan(math.pi/6.0)
vertex_list = [(x_pos, y_pos + a),
(x_pos + 0.5 * self.node_width, y_pos + b),
(x_pos + 0.5 * self.node_width, y_pos - b),
(x_pos, y_pos - a),
(x_pos - 0.5 * self.node_width, y_pos - b),
(x_pos - 0.5 * self.node_width, y_pos + b)]
vertex_list = list(map(lambda pair: (int(pair[0]), int(pair[1])),
vertex_list))
if self.debug:
print("Making new polygon (hex) with the following vertices: " + \
str(vertex_list))
return vertex_list
def get_center(self, node):
'''
Returns the coordinates (in pixesls) of the center of this node.
'''
x = node.position[0]
y = node.position[1]
# Grid might be floating in a space required by other UI elements.
# If so, add a buffer to each side.
# if self.total_grid_width < self.min_x:
# x_buffer = (self.min_x - self.total_grid_width)/2
# else:
# x_buffer = 0
# if self.total_grid_height < self.min_y:
# y_buffer = (self.min_y - self.total_grid_height)/2
# else:
# y_buffer = 0
x_pos = (x + 0.5*(y%2) + 0.5) * self.node_width
y_pos = self.node_width * math.tan(math.pi/6.0) + \
y * self.node_width / 2.0 / math.tan(math.pi/6.0)
if self.debug:
print("Calculated center of node (%d, %d) at (%d, %d)." % \
(x, y, x_pos, y_pos))
return (x_pos, y_pos)
def make_node_text(self, node):
BLACK = (0,0,0)
WHITE = (255,255,255)
node_color = self.colormap[node.state]
if sum(node_color) < 150:
text_color = WHITE
else:
text_color = BLACK
font = pygame.font.SysFont('monospace', 10)
text_surface = font.render(node.state, True, text_color)
return text_surface
#end class HexGridDisplay
class ParallelEmulatedSquareGridDisplay(object):
'''
Displays an underlying grid and the grid it emulates (i.e., the underlying
process of a game of life automaton and the automoton it emulates)
side-by-side.
Assumes that each emulated cell is determined by the value of a fixed
position in the underlying grid; that the first character of the
representative cell's state gives the emulated cell's state; and that the
emulated cell's state is constant until the representative cell gains a
new state beginning with anything other than "B".
'''
def __init__(self, grid, colormap, emulation_colormap, horizontal_buffer,
vertical_buffer, cell_height, cell_width,
representative_cell_x, representative_cell_y, min_x = 0,
min_y = 0, pixels_per_node=5, display_text = False):
'''
Parameters:
grid: The SquareGrid object displayed
colormap: Dictionary defining what colors are assigned to each state
emulation_colormap: Same as colormap, but for the emulated grid.
horizontal_buffer, vertical_buffer:
The number of underlying grid nodes on the left/right
and top/bottom of the underlying grid that aren't
included in the emulation (boundary conditions)
cell_width, cell_height: The number of underlying grid nodes
in an emulated node in each direction.
representative_cell_x, representative_cell_y:
The location, within each emulated cell, of the
underlying grid cell containing information about the
emulated cell.
min_x, min_y: Minimum x and y sizes, in pixels, of the surface
created. If the natural size of the grid isn't big
enough, the grid will be centered and whitespace
will be added to fill the excess.
pixels_per_node: Width and height of each node, in pixels, either as
a pair of integers (for arbitrary form factors)
or as a single integer (for square nodes)
display_text: If True, will display each node with a text overlay of
that node's state. Otherwise, will only display the
color of the node.
'''
pygame.init()
# Constants
self.grid_buffer = 5
self.debug = False
# Argument inits
self.grid = grid
self.horizontal_buffer = horizontal_buffer
self.vertical_buffer = vertical_buffer
self.emulated_cell_height = cell_height
self.emulated_cell_width = cell_width
self.representative_cell_x = representative_cell_x
self.representative_cell_y = representative_cell_y
self.emulation_colormap = emulation_colormap
self.colormap = colormap
self.min_x = min_x
self.min_y = min_y
self.pixels_per_node = pixels_per_node
self.display_text = display_text
self.recalculate_display_sizes()
def recalculate_display_sizes(self):
# Calculate some internal variables
self.display_width = max((self.grid_buffer +
2 * self.grid.x_size * self.node_width ),
self.min_x)
self.display_height = max((2 * self.grid_buffer +
self.grid.y_size * self.node_height), self.min_y)
def pixels_per_node():
doc = "The number of pixels used to display a single node in the grid."
def fget(self):
return (self.node_width, self.node_height)
def fset(self, value):
if isinstance(value, int):
self.node_width = value
self.node_height = value
elif (isinstance(value, list) or isinstance(value, tuple)) and \
len(value) == 2:
self.node_width = value[0]
self.node_height = value[1]
else:
raise Exception("Invalid argument for pixels_per_node: " +
str(value))
self.recalculate_display_sizes()
def fdel(self):
del self.node_width
del self.node_height
return locals()
pixels_per_node = property(**pixels_per_node())
def render(self, parent_surface, x_pos = 0, y_pos = 0):
'''
Set up the display and make the first render. This must be called before
any other updates.
parent_surface: The surface onto which this grid will be displayed.
x_p os, y_pos: X and Y coordinates of the upper-left corner of this
grid relative to parent_surface.
'''
self.x_pos = x_pos
self.y_pos = y_pos
# Create display surface
if self.debug:
print("Displaying grid display at position (" + str(x_pos) + "," +
str(y_pos) + ") with width " + str(self.display_width) +
" and height " + str(self.display_height) + ".")
self.parent_surface = parent_surface
self.display_surface = parent_surface.subsurface(
(x_pos, y_pos, self.display_width, self.display_height))
# Initial render
for x in range(self.grid.x_size):
for y in range(self.grid.y_size):
self.update_node_at_position(x, y)
def update_node_at_position(self, x, y):
self.update_node(self.grid.getnode(x,y))
def update_node(self, node):
'''
Redraw a specified node, and its emulated node if that emulated node
changed state
'''
new_rect = self.make_node_rectangle(node)
#print("HELLLLOOOO")
#print(node.state)
node_color = self.colormap[node.state]
pygame.draw.rect(self.display_surface, node_color, new_rect)
if self.display_text:
node_text_surface = self.make_node_text(node)
text_rect = node_text_surface.get_rect()
text_rect.center = new_rect.center
self.display_surface.blit(node_text_surface,
text_rect)
# Update the emulated node if necessary
x = (node.position[0]-self.horizontal_buffer) % self.emulated_cell_width
y = (node.position[1]-self.vertical_buffer) % self.emulated_cell_height
if x == self.representative_cell_x and y == self.representative_cell_y:
represented_x = (node.position[0]-self.horizontal_buffer) \
/ self.emulated_cell_width
represented_y = (node.position[1]-self.vertical_buffer) \
/ self.emulated_cell_height
new_rect = self.make_emulated_node_rectangle(represented_x,
represented_y,
node.state[0])
if node.state[0] in self.emulation_colormap:
node_color = self.emulation_colormap[node.state[0]]
else:
node_color = self.emulation_colormap['B']
pygame.draw.rect(self.display_surface, node_color, new_rect)
def make_node_rectangle(self, node):
x = node.position[0]
y = node.position[1]
if self.display_width < self.min_x:
x_buffer = (self.min_x - self.grid.x_size*self.node_width*2 + \
self.horizontal_buffer)/2
x_pos = x_buffer + x * self.node_width
else:
x_pos = self.grid_buffer + x * self.node_width
if self.display_height < self.min_y:
y_buffer = (self.min_y-self.grid.y_size*self.node_height)/2
y_pos = y_buffer + y * self.node_height
else:
y_pos = self.grid_buffer + y * self.node_height
if self.debug:
print("Creating new rectangle with position (" + str(x_pos) + "," +
str(y_pos) + "), height " + str(self.node_height) +
", and width " + str(self.node_width) + ".")
return pygame.Rect(x_pos, y_pos, self.node_width, self.node_height)
def make_emulated_node_rectangle(self, x, y, state):
if self.display_width < self.min_x:
x_buffer = (self.min_x - self.grid.x_size*self.node_width*2 + \
self.horizontal_buffer)/2
else:
x_buffer = self.grid_buffer
x_pos = x_buffer + self.grid.x_size*self.node_width + \
self.node_width*self.horizontal_buffer + \
x*self.node_width*self.emulated_cell_width
if self.display_height < self.min_y:
y_buffer = (self.min_y-self.grid.y_size*self.node_height)/2
else:
y_buffer = self.grid_buffer
y_pos = y_buffer + (self.node_height * self.vertical_buffer) + \
(y * self.node_height * self.emulated_cell_height)
return pygame.Rect(x_pos, y_pos,
self.node_width*self.emulated_cell_width,
self.node_height*self.emulated_cell_height)
def make_node_text(self, node):
BLACK = (0,0,0)
WHITE = (255,255,255)
node_color = self.colormap[node.state]
if sum(node_color) < 150:
text_color = WHITE
else:
text_color = BLACK
font = pygame.font.SysFont('monospace', 10)
text_surface = font.render(node.state, True, text_color)
return text_surface
#end class ParallelEmulatedSquareGridDisplay | [
"noreply@github.com"
] | noreply@github.com |
8f9ff357b818a3c095780bb3540d6dd24fb3c113 | be0f79609f0edbcab6d793967f839dae385e4f4c | /leetcode_problems/file2/test40.py | 021e18433943ccaee46b625a9a453463780bf67a | [] | no_license | vt-dataengineer/leetcode | 73c9044b7ccb7b69b41987febd6a05a1a70a05be | cb88e3e4c546f9b99a343b5fc58e487233a2fac5 | refs/heads/master | 2022-11-21T05:56:02.165724 | 2020-07-20T06:47:15 | 2020-07-20T06:47:15 | 281,036,273 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 176 | py | # Input: "leetcode"
# Output: "leotcede"
if __name__=='__main__':
s = 'leetcode'
vow = ['a','e','i','o','u']
for x in s:
if x in vow:
print(x)
| [
"vishalthakur1712@gmail.com"
] | vishalthakur1712@gmail.com |
46775a2d62f1f88e33a39be1b2ef415a002147ee | 370c4c61262b6e05303e4f71466b4621435e4474 | /001.py | b1f198a70bdd76ef99e9afce7cf0d79592a7e185 | [] | no_license | GustavoJatene/practice | b2a0b83fc986d47c8f148cc909969a84cee84d85 | 41b18be18630bf7bcd0e0be3cbe19156b16acc72 | refs/heads/master | 2023-03-10T04:36:32.905721 | 2021-02-25T15:44:16 | 2021-02-25T15:44:16 | 272,723,622 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 81 | py | import emoji
print(emoji.emojize('OLÁ MUNDO :earth_americas:',use_aliases=True)) | [
"jateneg@gmail.com"
] | jateneg@gmail.com |
b57d3ac479d3d244d3fc9de810ed1fea625cc212 | 07dade27902fd7999430feb8f04216e86eae2b30 | /pyseir/load_data.py | 0906cdb60c4bc73012035c852a53801148db7916 | [
"MIT"
] | permissive | paulirish/covid-data-model | a96f90a37cf9674bdb77f1bd07df953e2de34a59 | b93ae5d598b8378f9c1f2698e3162f87136cde74 | refs/heads/master | 2022-04-25T17:38:21.547505 | 2020-04-21T21:15:56 | 2020-04-21T21:15:56 | 257,725,678 | 1 | 0 | MIT | 2020-04-21T21:59:50 | 2020-04-21T21:59:49 | null | UTF-8 | Python | false | false | 18,762 | py | import os
import logging
import pandas as pd
import numpy as np
import urllib.request
import requests
import re
import io
import us
import zipfile
import json
from libs.datasets import NYTimesDataset
from libs.datasets.dataset_utils import AggregationLevel
from libs.datasets import CovidTrackingDataSource
from pyseir.utils import get_run_artifact_path, RunArtifact
from functools import lru_cache
from enum import Enum
FAULTY_HOSPITAL_DATA_STATES = ('IN',)
DATA_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'pyseir_data')
class HospitalizationDataType(Enum):
CUMULATIVE_HOSPITALIZATIONS = 'cumulative_hospitalizations'
CURRENT_HOSPITALIZATIONS = 'current_hospitalizations'
def hampel_filter__low_outliers_only(input_series, window_size=5, n_sigmas=2):
"""
Filter out points with median absolute deviation greater than n_sigma from a
nearest set of window-size neighbors. This is a very conservative filter to
clean out some case / death data like Arkansas. We apply this only to drops
in counts that should be positive (e.g. Arkansas).
Parameters
----------
input_series: array
window_size: int
n_sigmas: float
Returns
-------
"""
n = len(input_series)
new_series = input_series.copy()
k = 1.4826 # scale factor for Gaussian distribution
indices = []
# possibly use np.nanmedian
for i in range(window_size, n - window_size):
x0 = np.median(input_series[(i - window_size):(i + window_size)])
S0 = k * np.median(
np.abs(input_series[(i - window_size):(i + window_size)] - x0))
if (-(input_series[i] - x0) > n_sigmas * S0):
new_series[i] = x0
indices.append(i)
return new_series, indices
def load_zip_get_file(url, file, decoder='utf-8'):
"""
Load a zipfile from a URL and extract a single file. Note that this is
not ideal and may fail for large files since the files must fit in memory.
Parameters
----------
url: str
URL to read from.
file: str
Filename to pull out of the zipfile.
decoder: str
Usually None for raw bytes or 'utf-8', or 'latin1'
Returns
-------
file_buffer: io.BytesIO or io.StringIO
The file buffer for the requested file if decoder is None else return
a decoded StringIO.
"""
remotezip = urllib.request.urlopen(url)
zipinmemory = io.BytesIO(remotezip.read())
zf = zipfile.ZipFile(zipinmemory)
byte_string = zf.read(file)
if decoder:
string = byte_string.decode(decoder)
return io.StringIO(string)
else:
return io.BytesIO(byte_string)
def cache_county_case_data():
"""
Cache county covid case data from NYT in #PYSEIR_HOME/data.
"""
logging.info('Downloading covid case data')
# NYT dataset
county_case_data = pd.read_csv('https://raw.githubusercontent.com/nytimes/covid-19-data/master/us-counties.csv', dtype='str')
county_case_data['date'] = pd.to_datetime(county_case_data['date'])
county_case_data[['cases', 'deaths']] = county_case_data[['cases', 'deaths']].astype(int)
county_case_data = county_case_data[county_case_data['fips'].notnull()]
county_case_data.to_pickle(os.path.join(DATA_DIR, 'covid_case_timeseries.pkl'))
def cache_hospital_beds():
"""
Pulled from "Definitive"
See: https://services7.arcgis.com/LXCny1HyhQCUSueu/arcgis/rest/services/Definitive_Healthcare_Hospitals_Beds_Hospitals_Only/FeatureServer/0
"""
logging.info('Downloading ICU capacity data.')
url = 'http://opendata.arcgis.com/datasets/f3f76281647f4fbb8a0d20ef13b650ca_0.geojson'
tmp_file = urllib.request.urlretrieve(url)[0]
with open(tmp_file) as f:
vals = json.load(f)
df = pd.DataFrame([val['properties'] for val in vals['features']])
df.columns = [col.lower() for col in df.columns]
df = df.drop(['objectid', 'state_fips', 'cnty_fips'], axis=1)
df.to_pickle(os.path.join(DATA_DIR, 'icu_capacity.pkl'))
def cache_mobility_data():
"""
Pulled from https://github.com/descarteslabs/DL-COVID-19
"""
logging.info('Downloading mobility data.')
url = 'https://raw.githubusercontent.com/descarteslabs/DL-COVID-19/master/DL-us-mobility-daterow.csv'
dtypes_mapping = {
'country_code': str,
'admin_level': int,
'admin1': str,
'admin2': str,
'fips': str,
'samples': int,
'm50': float,
'm50_index': float}
df = pd.read_csv(filepath_or_buffer=url, parse_dates=['date'], dtype=dtypes_mapping)
df__m50 = df.query('admin_level == 2')[['fips', 'date', 'm50']]
df__m50_index = df.query('admin_level == 2')[['fips', 'date', 'm50_index']]
df__m50__final = df__m50.groupby('fips').agg(list).reset_index()
df__m50_index__final = df__m50_index.groupby('fips').agg(list).reset_index()
df__m50__final['m50'] = df__m50__final['m50'].apply(lambda x: np.array(x))
df__m50_index__final['m50_index'] = df__m50_index__final['m50_index'].apply(lambda x: np.array(x))
df__m50__final.to_pickle(os.path.join(DATA_DIR, 'mobility_data__m50.pkl'))
df__m50_index__final.to_pickle(os.path.join(DATA_DIR, 'mobility_data__m50_index.pkl'))
def cache_public_implementations_data():
"""
Pulled from https://github.com/JieYingWu/COVID-19_US_County-level_Summaries
"""
logging.info('Downloading public implementations data')
url = 'https://raw.githubusercontent.com/JieYingWu/COVID-19_US_County-level_Summaries/master/raw_data/national/public_implementations_fips.csv'
data = requests.get(url, verify=False).content.decode('utf-8')
data = re.sub(r',(\d+)-(\w+)', r',\1-\2-2020', data) # NOTE: This assumes the year 2020
date_cols = [
'stay at home',
'>50 gatherings',
'>500 gatherings',
'public schools',
'restaurant dine-in',
'entertainment/gym',
'Federal guidelines',
'foreign travel ban']
df = pd.read_csv(io.StringIO(data), parse_dates=date_cols, dtype='str').drop(['Unnamed: 1', 'Unnamed: 2'], axis=1)
df.columns = [col.replace('>', '').replace(' ', '_').replace('/', '_').lower() for col in df.columns]
df.fips = df.fips.apply(lambda x: x.zfill(5))
df.to_pickle(os.path.join(DATA_DIR, 'public_implementations_data.pkl'))
@lru_cache(maxsize=32)
def load_county_case_data():
"""
Return county level case data.
Returns
-------
: pd.DataFrame
"""
county_case_data = NYTimesDataset.load().timeseries() \
.get_subset(AggregationLevel.COUNTY, country='USA') \
.get_data(country='USA')
return county_case_data
@lru_cache(maxsize=1)
def load_state_case_data():
"""
Return county level case data.
Returns
-------
: pd.DataFrame
"""
state_case_data = NYTimesDataset.load().timeseries() \
.get_subset(AggregationLevel.STATE, country='USA') \
.get_data(country='USA')
return state_case_data
@lru_cache(maxsize=32)
def load_county_metadata():
"""
Return county level metadata such as age distributions, populations etc..
Returns
-------
: pd.DataFrame
"""
county_metadata = pd.read_json(os.path.join(DATA_DIR, 'county_metadata.json'), dtype={'fips': 'str'})
# Fix state names
county_metadata.loc[:, 'state'] = county_metadata['fips'].apply(lambda x: us.states.lookup(x[:2]).name)
return county_metadata
@lru_cache(maxsize=32)
def load_county_metadata_by_state(state):
"""
Generate a dataframe that contains county metadata aggregated at state
level.
Parameters
----------
state: str or list(str)
Name of state to load the metadata for.
Returns
-------
state_metadata: pd.DataFrame
"""
# aggregate into state level metadata
state_metadata = load_county_metadata()
if state:
state = [state] if not isinstance(state, list) else state
state_metadata = state_metadata[state_metadata.state.isin(state)]
density_measures = ['housing_density', 'population_density']
for col in density_measures:
state_metadata.loc[:, col] = state_metadata[col] * state_metadata['total_population']
age_dist = state_metadata.groupby('state')['age_distribution'] \
.apply(lambda l: np.stack(np.array(l)).sum(axis=0))
density_info = state_metadata.groupby('state').agg(
{'population_density': lambda x: sum(x),
'housing_density': lambda x: sum(x),
'total_population': lambda x: sum(x),
'fips': list})
age_bins = state_metadata[['state', 'age_bin_edges']].groupby('state').first()
state_metadata = pd.concat([age_dist, density_info, age_bins], axis=1)
for col in density_measures:
state_metadata[col] /= state_metadata['total_population']
return state_metadata
@lru_cache(maxsize=32)
def load_ensemble_results(fips):
"""
Retrieve ensemble results for a given state or county fips code.
Parameters
----------
fips: str
State or county FIPS to load.
Returns
-------
ensemble_results: dict
"""
output_filename = get_run_artifact_path(fips, RunArtifact.ENSEMBLE_RESULT)
with open(output_filename) as f:
fit_results = json.load(f)
return fit_results
@lru_cache(maxsize=32)
def load_county_metadata_by_fips(fips):
"""
Generate a dictionary for a county which includes county metadata merged
with hospital capacity data.
Parameters
----------
fips: str
Returns
-------
county_metadata: dict
Dictionary of metadata for the county. The keys are:
['state', 'county', 'total_population', 'population_density',
'housing_density', 'age_distribution', 'age_bin_edges',
'num_licensed_beds', 'num_staffed_beds', 'num_icu_beds',
'bed_utilization', 'potential_increase_in_bed_capac']
"""
county_metadata = load_county_metadata()
hospital_bed_data = load_hospital_data()
# Not all counties have hospital data.
hospital_bed_data = hospital_bed_data[
['fips',
'num_licensed_beds',
'num_staffed_beds',
'num_icu_beds',
'bed_utilization',
'potential_increase_in_bed_capac']].groupby('fips').sum()
county_metadata_merged = county_metadata.merge(hospital_bed_data, on='fips', how='left').set_index('fips').loc[fips].to_dict()
for key, value in county_metadata_merged.items():
if np.isscalar(value) and not isinstance(value, str):
county_metadata_merged[key] = float(value)
return county_metadata_merged
@lru_cache(maxsize=32)
def load_new_case_data_by_fips(fips, t0):
"""
Get data for new cases.
Parameters
----------
fips: str
County fips to lookup.
t0: datetime
Datetime to offset by.
Returns
-------
times: array(float)
List of float days since t0 for the case and death counts below
observed_new_cases: array(int)
Array of new cases observed each day.
observed_new_deaths: array(int)
Array of new deaths observed each day.
"""
_county_case_data = load_county_case_data()
county_case_data = _county_case_data[_county_case_data['fips'] == fips]
times_new = (county_case_data['date'] - t0).dt.days.iloc[1:]
observed_new_cases = county_case_data['cases'].values[1:] - county_case_data['cases'].values[:-1]
observed_new_deaths = county_case_data['deaths'].values[1:] - county_case_data['deaths'].values[:-1]
# Clip because there are sometimes negatives either due to data reporting or
# corrections in case count. These are always tiny so we just make
# downstream easier to work with by clipping.
return times_new, observed_new_cases.clip(min=0), observed_new_deaths.clip(min=0)
@lru_cache(maxsize=32)
def load_hospitalization_data(fips, t0):
"""
Obtain hospitalization data. We clip because there are sometimes negatives
either due to data reporting or corrections in case count. These are always
tiny so we just make downstream easier to work with by clipping.
Parameters
----------
fips: str
County fips to lookup.
t0: datetime
Datetime to offset by.
Returns
-------
times: array(float)
List of float days since t0 for the hospitalization data.
observed_hospitalizations: array(int)
Array of new cases observed each day.
type: HospitalizationDataType
Specifies cumulative or current hospitalizations.
"""
hospitalization_data = CovidTrackingDataSource.local().timeseries()\
.get_subset(AggregationLevel.COUNTY, country='USA', fips=fips) \
.get_data(country='USA', fips=fips)
if len(hospitalization_data) == 0:
return None, None, None
times_new = (hospitalization_data['date'].dt.date - t0.date()).dt.days.values
if (hospitalization_data['current_hospitalized'] > 0).any():
return times_new, \
hospitalization_data['current_hospitalized'].values.clip(min=0),\
HospitalizationDataType.CURRENT_HOSPITALIZATIONS
elif (hospitalization_data['cumulative_hospitalized'] > 0).any():
cumulative = hospitalization_data['cumulative_hospitalized'].values.clip(min=0)
# Some minor glitches for a few states..
for i, val in enumerate(cumulative[1:]):
if cumulative[i] > cumulative[i+1]:
cumulative[i] = cumulative[i + 1]
return times_new, cumulative, HospitalizationDataType.CUMULATIVE_HOSPITALIZATIONS
else:
return None, None, None
@lru_cache(maxsize=32)
def load_hospitalization_data_by_state(state, t0):
"""
Obtain hospitalization data. We clip because there are sometimes negatives
either due to data reporting or corrections in case count. These are always
tiny so we just make downstream easier to work with by clipping.
Parameters
----------
state: str
State to lookup.
t0: datetime
Datetime to offset by.
Returns
-------
times: array(float) or NoneType
List of float days since t0 for the hospitalization data.
observed_hospitalizations: array(int) or NoneType
Array of new cases observed each day.
type: HospitalizationDataType
Specifies cumulative or current hospitalizations.
"""
abbr = us.states.lookup(state).abbr
hospitalization_data = CovidTrackingDataSource.local().timeseries()\
.get_subset(AggregationLevel.STATE, country='USA', state=abbr) \
.get_data(country='USA', state=abbr)
if len(hospitalization_data) == 0 or abbr in FAULTY_HOSPITAL_DATA_STATES:
return None, None, None
times_new = (hospitalization_data['date'].dt.date - t0.date()).dt.days.values
if (hospitalization_data['current_hospitalized'] > 0).any():
return times_new, \
hospitalization_data['current_hospitalized'].values.clip(min=0), \
HospitalizationDataType.CURRENT_HOSPITALIZATIONS
elif (hospitalization_data['cumulative_hospitalized'] > 0).any():
cumulative = hospitalization_data['cumulative_hospitalized'].values.clip(min=0)
# Some minor glitches for a few states..
for i, val in enumerate(cumulative[1:]):
if cumulative[i] > cumulative[i + 1]:
cumulative[i] = cumulative[i + 1]
return times_new, cumulative, HospitalizationDataType.CUMULATIVE_HOSPITALIZATIONS
else:
return None, None, None
@lru_cache(maxsize=32)
def load_new_case_data_by_state(state, t0):
"""
Get data for new cases at state level.
Parameters
----------
state: str
State full name.
t0: datetime
Datetime to offset by.
Returns
-------
times: array(float)
List of float days since t0 for the case and death counts below
observed_new_cases: array(int)
Array of new cases observed each day.
observed_new_deaths: array(int)
Array of new deaths observed each day.
"""
_state_case_data = load_state_case_data()
state_case_data = _state_case_data[_state_case_data['state'] == us.states.lookup(state).abbr]
times_new = (state_case_data['date'] - t0).dt.days.iloc[1:]
observed_new_cases = state_case_data['cases'].values[1:] - state_case_data['cases'].values[:-1]
observed_new_deaths = state_case_data['deaths'].values[1:] - state_case_data['deaths'].values[:-1]
_, filter_idx = hampel_filter__low_outliers_only(observed_new_cases, window_size=5, n_sigmas=2)
keep_idx = np.array([i for i in range(len(times_new)) if i not in list(filter_idx)])
times_new = [int(list(times_new)[idx]) for idx in keep_idx]
return times_new, np.array(observed_new_cases[keep_idx]).clip(min=0), observed_new_deaths.clip(min=0)[keep_idx]
def load_hospital_data():
"""
Return hospital level data. Note that this must be aggregated by stcountyfp
to obtain county level estimates.
Returns
-------
: pd.DataFrame
"""
return pd.read_pickle(os.path.join(DATA_DIR, 'icu_capacity.pkl'))
@lru_cache(maxsize=1)
def load_mobility_data_m50():
"""
Return mobility data without normalization
Returns
-------
: pd.DataFrame
"""
return pd.read_pickle(os.path.join(DATA_DIR, 'mobility_data__m50.pkl'))
@lru_cache(maxsize=1)
def load_mobility_data_m50_index():
"""
Return mobility data with normalization: per
https://github.com/descarteslabs/DL-COVID-19 normal m50 is defined during
2020-02-17 to 2020-03-07.
Returns
-------
: pd.DataFrame
"""
return pd.read_pickle(os.path.join(DATA_DIR, 'mobility_data__m50_index.pkl')).set_index('fips')
@lru_cache(maxsize=1)
def load_public_implementations_data():
"""
Return public implementations data
Returns
-------
: pd.DataFrame
"""
return pd.read_pickle(os.path.join(DATA_DIR, 'public_implementations_data.pkl')).set_index('fips')
def load_whitelist():
"""
Load the whitelist result.
Returns
-------
whitelist: pd.DataFrame
DataFrame containing a whitelist of product features for counties.
"""
path = get_run_artifact_path(
fips='06', # dummy since not used for whitelist.
artifact=RunArtifact.WHITELIST_RESULT)
return pd.read_json(path, dtype={'fips': str})
def cache_all_data():
"""
Download all datasets locally.
"""
cache_county_case_data()
cache_hospital_beds()
cache_mobility_data()
cache_public_implementations_data()
if __name__ == '__main__':
cache_all_data()
| [
"noreply@github.com"
] | noreply@github.com |
275f44d420d97869a4286ce6d22606aee4bdb17f | ab2c197a1dd7555008334dcacaea5e655be52c7c | /pcapfile/test/linklayer_test.py | b944a648fbc379ec37942c4fb53f6c49c6194afb | [
"ISC"
] | permissive | joedoyle/pypcapfile | 7d9d2a18fb44636cd5511985dbff526dc98612e5 | 6bfe49a0fb53a99afd69105379b517b87fc11a5a | refs/heads/master | 2021-01-18T00:44:53.330362 | 2015-06-02T14:08:34 | 2015-06-02T14:08:34 | 36,510,907 | 0 | 0 | null | 2015-05-29T14:57:35 | 2015-05-29T14:57:35 | null | UTF-8 | Python | false | false | 1,322 | py | #!/usr/bin/env python
"""
This is the test case for the savefile.
"""
import unittest
from pcapfile import savefile
from pcapfile import linklayer
from pcapfile.protocols.linklayer import ethernet
class TestCase(unittest.TestCase):
"""
Validate the linklayer utility functions.
"""
capfile = None
def init_capfile(self, layers=0):
"""
Initialise capture file.
"""
self.capfile = savefile.load_savefile(open('test_data/test.pcap', 'r'),
layers=layers)
@classmethod
def setUpClass(cls):
"""
Print an intro to identify this test suite when running multiple tests.
"""
print '[+] loading toplevel linklayer utility tests'
def test_constructor_lookup(self):
"""
Ensure the proper validation function is passed from the constructor
lookup.
"""
self.assertEqual(ethernet.Ethernet, linklayer.clookup(1))
def test_lookup(self):
"""
Test the strings returned by the short lookup functions.
"""
self.assertEqual('LINKTYPE_ETHERNET', linklayer.lookup(1),
'invalid long name')
self.assertEqual('ethernet', linklayer.slookup(1),
'invalid short name')
| [
"coder@kyleisom.net"
] | coder@kyleisom.net |
1b764ce502d2ceb934a00dc5bde05d886b4c5859 | 6f6f63efcf69d250b12f3fb76a3c0a343a5a7814 | /ops/polygon_geo/polygon_geo.py | fd802b160f7d69de40791d582f9781ef532dbd7d | [
"MIT"
] | permissive | YangHai-1218/Anchor_free_detection_rotation | ed73bc8b3a43d6328285039eabf7d95d77115b3a | 358d9f5df1beabc7a05a352d2cfa2283b17825a9 | refs/heads/master | 2023-06-04T06:35:02.502907 | 2021-06-22T13:22:11 | 2021-06-22T13:22:11 | 294,360,446 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 239 | py | from . import polygon_geo_cpu
def polygon_iou(poly1, poly2):
'''
poly1: det bboxes shape (N,9) the last one is score, ndarray
poly2: gt bboxes shape (M,8), ndarray
'''
return polygon_geo_cpu.polygon_iou(poly1, poly2)
| [
"haiyang@haiyangdeMacBook-Pro.local"
] | haiyang@haiyangdeMacBook-Pro.local |
512fe181a6ee75a9cfb44789babd62b308ce3f28 | 56c70c3972a448d228843d112992fdf8177e1f14 | /peiqi.py | 784926d12370c14644d593e7c9c701103841019f | [] | no_license | XGodLike/BusinessClassification | fb189f0f769a4f2afb8837a6fd9230e1d1b070fa | d4967f678f25a98d4c24bf018e649c87e50b0081 | refs/heads/master | 2020-04-14T19:15:15.366528 | 2019-01-30T02:20:15 | 2019-01-30T02:20:15 | 164,051,270 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,529 | py |
import turtle as t
t.pensize(4)
t.hideturtle()
t.colormode(255)
t.color((255, 155, 192), "pink")
t.setup(840, 500)
t.speed(10)
# 鼻子
t.pu()
t.goto(-100, 100)
t.pd()
t.seth(-30)
t.begin_fill()
a = 0.4
for i in range(120):
if 0 <= i < 30 or 60 <= i < 90:
a = a + 0.08
t.lt(3) # 向左转3度
t.fd(a) # 向前走a的步长
else:
a = a - 0.08
t.lt(3)
t.fd(a)
t.end_fill()
t.pu()
t.seth(90)
t.fd(25)
t.seth(0)
t.fd(10)
t.pd()
t.pencolor(255, 155, 192)
t.seth(10)
t.begin_fill()
t.circle(5)
t.color(160, 82, 45)
t.end_fill()
t.pu()
t.seth(0)
t.fd(20)
t.pd()
t.pencolor(255, 155, 192)
t.seth(10)
t.begin_fill()
t.circle(5)
t.color(160, 82, 45)
t.end_fill()
# 头
t.color((255, 155, 192), "pink")
t.pu()
t.seth(90)
t.fd(41)
t.seth(0)
t.fd(0)
t.pd()
t.begin_fill()
t.seth(180)
t.circle(300, -30)
t.circle(100, -60)
t.circle(80, -100)
t.circle(150, -20)
t.circle(60, -95)
t.seth(161)
t.circle(-300, 15)
t.pu()
t.goto(-100, 100)
t.pd()
t.seth(-30)
a = 0.4
for i in range(60):
if 0 <= i < 30 or 60 <= i < 90:
a = a + 0.08
t.lt(3) # 向左转3度
t.fd(a) # 向前走a的步长
else:
a = a - 0.08
t.lt(3)
t.fd(a)
t.end_fill()
# 耳朵
t.color((255, 155, 192), "pink")
t.pu()
t.seth(90)
t.fd(-7)
t.seth(0)
t.fd(70)
t.pd()
t.begin_fill()
t.seth(100)
t.circle(-50, 50)
t.circle(-10, 120)
t.circle(-50, 54)
t.end_fill()
t.pu()
t.seth(90)
t.fd(-12)
t.seth(0)
t.fd(30)
t.pd()
t.begin_fill()
t.seth(100)
t.circle(-50, 50)
t.circle(-10, 120)
t.circle(-50, 56)
t.end_fill()
# 眼睛
t.color((255, 155, 192), "white")
t.pu()
t.seth(90)
t.fd(-20)
t.seth(0)
t.fd(-95)
t.pd()
t.begin_fill()
t.circle(15)
t.end_fill()
t.color("black")
t.pu()
t.seth(90)
t.fd(12)
t.seth(0)
t.fd(-3)
t.pd()
t.begin_fill()
t.circle(3)
t.end_fill()
t.color((255, 155, 192), "white")
t.pu()
t.seth(90)
t.fd(-25)
t.seth(0)
t.fd(40)
t.pd()
t.begin_fill()
t.circle(15)
t.end_fill()
t.color("black")
t.pu()
t.seth(90)
t.fd(12)
t.seth(0)
t.fd(-3)
t.pd()
t.begin_fill()
t.circle(3)
t.end_fill()
# 腮
t.color((255, 155, 192))
t.pu()
t.seth(90)
t.fd(-95)
t.seth(0)
t.fd(65)
t.pd()
t.begin_fill()
t.circle(30)
t.end_fill()
# 嘴
t.color(239, 69, 19)
t.pu()
t.seth(90)
t.fd(15)
t.seth(0)
t.fd(-100)
t.pd()
t.seth(-80)
t.circle(30, 40)
t.circle(40, 80)
# 身体
t.color("red", (255, 99, 71))
t.pu()
t.seth(90)
t.fd(-20)
t.seth(0)
t.fd(-78)
t.pd()
t.begin_fill()
t.seth(-130)
t.circle(100, 10)
t.circle(300, 30)
t.seth(0)
t.fd(230)
t.seth(90)
t.circle(300, 30)
t.circle(100, 3)
t.color((255, 155, 192), (255, 100, 100))
t.seth(-135)
t.circle(-80, 63)
t.circle(-150, 24)
t.end_fill()
# 手
t.color((255, 155, 192))
t.pu()
t.seth(90)
t.fd(-40)
t.seth(0)
t.fd(-27)
t.pd()
t.seth(-160)
t.circle(300, 15)
t.pu()
t.seth(90)
t.fd(15)
t.seth(0)
t.fd(0)
t.pd()
t.seth(-10)
t.circle(-20, 90)
t.pu()
t.seth(90)
t.fd(30)
t.seth(0)
t.fd(237)
t.pd()
t.seth(-20)
t.circle(-300, 15)
t.pu()
t.seth(90)
t.fd(20)
t.seth(0)
t.fd(0)
t.pd()
t.seth(-170)
t.circle(20, 90)
# 脚
t.pensize(10)
t.color((240, 128, 128))
t.pu()
t.seth(90)
t.fd(-75)
t.seth(0)
t.fd(-180)
t.pd()
t.seth(-90)
t.fd(40)
t.seth(-180)
t.color("black")
t.pensize(15)
t.fd(20)
t.pensize(10)
t.color((240, 128, 128))
t.pu()
t.seth(90)
t.fd(40)
t.seth(0)
t.fd(90)
t.pd()
t.seth(-90)
t.fd(40)
t.seth(-180)
t.color("black")
t.pensize(15)
t.fd(20)
# 尾巴
t.pensize(4)
t.color((255, 155, 192))
t.pu()
t.seth(90)
t.fd(70)
t.seth(0)
t.fd(95)
t.pd()
t.seth(0)
t.circle(70, 20)
t.circle(10, 330)
t.circle(70, 30) | [
"junjunjiejie2008@163.com"
] | junjunjiejie2008@163.com |
68d457de3574e3a383d41023a4b8129e7e990d10 | ebc297aef9ccdab91c3ccfbcb3329405ad4d251c | /tests/test_url_builder.py | af90b9da5e6583cdfc5eda812865044e054caad9 | [] | no_license | DimitriKouliche/ma-scraper | fe0d6f6cecab7b7f8b66c879e82b87da57a1a3e4 | a1fe55e018d0f31967c2de046dbcd172c875ee9d | refs/heads/master | 2023-02-03T07:31:57.001670 | 2020-10-08T11:59:14 | 2020-10-08T11:59:14 | 301,973,294 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 410 | py | from scraper.url_builder import UrlBuilder
def test_extract_listings():
url_builder = UrlBuilder()
assert url_builder.get_district_url(5, 1) == "https://www.meilleursagents.com/annonces/achat/search/" \
"?place_ids=1&transaction_types=TRANSACTION_TYPE.SELL&" \
"item_types=ITEM_TYPE.APARTMENT&page=5"
| [
"dimitri.kouliche@ias.u-psud.fr"
] | dimitri.kouliche@ias.u-psud.fr |
ac30fb953e3a083b5c9dcb7334591d43bbaa1f57 | d2a2cfcd6629f85652949ea41cb0c19532620204 | /bin/wheel | eeca3ac5fa1d9a0e5a0ec03880f72221f48f75e0 | [] | no_license | thsus2296/test | 6709ba23f14c83b758b2caf6dde98bbb14536c3b | 86fcd2ccc239a7ad20c1a9c763d45cc5121409e1 | refs/heads/master | 2020-06-16T21:55:36.408322 | 2019-07-08T01:10:24 | 2019-07-08T01:10:24 | 195,714,304 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 233 | #!/home/inyong/dev/trydjango/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from wheel.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"inyong@localhost.localdomain"
] | inyong@localhost.localdomain | |
93a9c900af72de0f3b35f389f388bc247916e3ae | 27a1c516b2caf387fbc0da6af873961e8f3452b9 | /gmplot/drawables/marker_info_window.py | 4f89c8e313627057508b00a9f0f3f7413f449186 | [
"MIT"
] | permissive | tirkarthi/gmplot | b2bccb2662a03f0331f6256e93286c90bd6be254 | 0979c51467e25cfe870668d3990ce7885e317f85 | refs/heads/master | 2022-12-05T17:38:01.355034 | 2020-07-03T19:09:29 | 2020-07-03T19:09:29 | 280,912,510 | 0 | 0 | MIT | 2020-07-19T17:09:20 | 2020-07-19T17:09:19 | null | UTF-8 | Python | false | false | 1,178 | py | class _MarkerInfoWindow(object):
def __init__(self, content):
'''
Args:
content (str): HTML content to be displayed in this info window.
'''
self._content = content.replace("'", "\\'").replace("\n", "\\n") # (escape single quotes and newlines)
def write(self, w, context, marker_name):
'''
Write the info window that attaches to the given marker on click.
Args:
w (_Writer): Writer used to write the info window.
context (_Context): Context used to keep track of what was drawn to the map.
marker_name (str): JavaScript name of the marker that should display this info window.
'''
w.write('''
var {info_window_name} = new google.maps.InfoWindow({{
content: '{content}'
}});
{marker_name}.addListener('click', function() {{
{info_window_name}.open(map, {marker_name});
}});
'''.format(
info_window_name='info_window_%d' % context.num_info_markers,
marker_name=marker_name,
content=self._content
))
w.write()
| [
"frslm@users.noreply.github.com"
] | frslm@users.noreply.github.com |
a468ccdf43c987d187ed161205454ce4aef48536 | 7aa6a5e94c9dea6686a55f316b78a91e8e301ad0 | /31-nato-dictionary/codewars_unittest/__init__.py | 67596a1dec6777ad40ea468b610619eb0038ff53 | [] | no_license | marinasupernova/codewars | 3ede0d61693462551112bee1019c34396d91b2d9 | fcea73a6cf564159a7fc776edc47cf57fab121df | refs/heads/main | 2023-04-26T07:17:56.149954 | 2021-05-31T06:01:13 | 2021-05-31T06:01:13 | 345,687,012 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 44 | py | from .test_runner import CodewarsTestRunner
| [
"marina.de.suvorova@gmail.com"
] | marina.de.suvorova@gmail.com |
b9cea96bd1fe04ff6d961295ea869a78c3e571e4 | dfab6798ece135946aebb08f93f162c37dd51791 | /core/luban/db/models.py | e30723a4165928648d1c38b81f47e476985bd1ca | [] | no_license | yxqd/luban | 405f5f7dcf09015d214079fe7e23d644332be069 | 00f699d15c572c8bf160516d582fa37f84ac2023 | refs/heads/master | 2020-03-20T23:08:45.153471 | 2012-05-18T14:52:43 | 2012-05-18T14:52:43 | 137,831,650 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,608 | py | # -*- Python -*-
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Jiao Lin
# California Institute of Technology
# (C) 2006-2011 All Rights Reserved
#
# {LicenseText}
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# example base class of model
# from sqlalchemy.ext.declarative import declarative_base
# Base = declarative_base()
# XXX: thinking of use metaclass...
class ModelCollector:
def __new__(cls, name, bases, attributes, **kwds):
# the created class
created = super().__new__(cls, name, bases, attributes, **kwds)
model_registry.register(created)
return created
class ModelRegistry:
def __init__(self):
self.models = {}
return
def register(self, cls):
self.models[cls.__name__] = cls
return
def __getattr__(self, name):
return self.models[name]
model_registry = ModelRegistry()
# method to load all db models in a python sub-package
def loadModels(subpkg):
# the implementation just import all sub modules in the sub-pkg
# recursively
path = subpkg.__path__
import os
import pkgutil
prefix = subpkg.__name__ + '.'
for loader, module_name, is_pkg in pkgutil.walk_packages(path, prefix):
found = loader.find_module(module_name)
if not found:
print ("%s not found" % module_name)
else:
mod = found.load_module(module_name)
continue
return
# End of file
| [
"linjiao@caltech.edu"
] | linjiao@caltech.edu |
3f12c6a48e213cf41ddf3a04479036c49f11fa1b | b3343d5842f5f2fdee5a726b933709fc1bad7d32 | /top/api/base.py | dadb5f48f18295ee86abdb80dbbfc13992115533 | [] | no_license | llv8/syl | ae307e4f544b5160d80b23b129c0ec65b5900fa1 | 1fdb1011d1339e272accab0d37a9e73e9827daf4 | refs/heads/master | 2021-06-04T09:57:34.139322 | 2016-06-29T08:51:56 | 2016-06-29T08:51:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,416 | py | # -*- coding: utf-8 -*-
'''
Created on 2012-7-3
@author: lihao
'''
try: import httplib
except ImportError:
import http.client as httplib
import urllib
import time
import hashlib
import json
import top
import itertools
import mimetypes
'''
定义一些系统变量
'''
SYSTEM_GENERATE_VERSION = "taobao-sdk-python-20160511"
P_APPKEY = "app_key"
P_API = "method"
P_SESSION = "session"
P_ACCESS_TOKEN = "access_token"
P_VERSION = "v"
P_FORMAT = "format"
P_TIMESTAMP = "timestamp"
P_SIGN = "sign"
P_SIGN_METHOD = "sign_method"
P_PARTNER_ID = "partner_id"
P_CODE = 'code'
P_SUB_CODE = 'sub_code'
P_MSG = 'msg'
P_SUB_MSG = 'sub_msg'
N_REST = '/router/rest'
def sign(secret, parameters):
#===========================================================================
# '''签名方法
# @param secret: 签名需要的密钥
# @param parameters: 支持字典和string两种
# '''
#===========================================================================
# 如果parameters 是字典类的话
if hasattr(parameters, "items"):
keys = parameters.keys()
keys.sort()
parameters = "%s%s%s" % (secret,
str().join('%s%s' % (key, parameters[key]) for key in keys),
secret)
sign = hashlib.md5(parameters).hexdigest().upper()
return sign
def mixStr(pstr):
if(isinstance(pstr, str)):
return pstr
elif(isinstance(pstr, unicode)):
return pstr.encode('utf-8')
else:
return str(pstr)
class FileItem(object):
def __init__(self,filename=None,content=None):
self.filename = filename
self.content = content
class MultiPartForm(object):
"""Accumulate the data to be used when posting a form."""
def __init__(self):
self.form_fields = []
self.files = []
self.boundary = "PYTHON_SDK_BOUNDARY"
return
def get_content_type(self):
return 'multipart/form-data; boundary=%s' % self.boundary
def add_field(self, name, value):
"""Add a simple field to the form data."""
self.form_fields.append((name, str(value)))
return
def add_file(self, fieldname, filename, fileHandle, mimetype=None):
"""Add a file to be uploaded."""
body = fileHandle.read()
if mimetype is None:
mimetype = mimetypes.guess_type(filename)[0] or 'application/octet-stream'
self.files.append((mixStr(fieldname), mixStr(filename), mixStr(mimetype), mixStr(body)))
return
def __str__(self):
"""Return a string representing the form data, including attached files."""
# Build a list of lists, each containing "lines" of the
# request. Each part is separated by a boundary string.
# Once the list is built, return a string where each
# line is separated by '\r\n'.
parts = []
part_boundary = '--' + self.boundary
# Add the form fields
parts.extend(
[ part_boundary,
'Content-Disposition: form-data; name="%s"' % name,
'Content-Type: text/plain; charset=UTF-8',
'',
value,
]
for name, value in self.form_fields
)
# Add the files to upload
parts.extend(
[ part_boundary,
'Content-Disposition: file; name="%s"; filename="%s"' % \
(field_name, filename),
'Content-Type: %s' % content_type,
'Content-Transfer-Encoding: binary',
'',
body,
]
for field_name, filename, content_type, body in self.files
)
# Flatten the list and add closing boundary marker,
# then return CR+LF separated data
flattened = list(itertools.chain(*parts))
flattened.append('--' + self.boundary + '--')
flattened.append('')
return '\r\n'.join(flattened)
class TopException(Exception):
#===========================================================================
# 业务异常类
#===========================================================================
def __init__(self):
self.errorcode = None
self.message = None
self.subcode = None
self.submsg = None
self.application_host = None
self.service_host = None
def __str__(self, *args, **kwargs):
sb = "errorcode=" + mixStr(self.errorcode) +\
" message=" + mixStr(self.message) +\
" subcode=" + mixStr(self.subcode) +\
" submsg=" + mixStr(self.submsg) +\
" application_host=" + mixStr(self.application_host) +\
" service_host=" + mixStr(self.service_host)
return sb
class RequestException(Exception):
#===========================================================================
# 请求连接异常类
#===========================================================================
pass
class RestApi(object):
#===========================================================================
# Rest api的基类
#===========================================================================
def __init__(self, domain='gw.api.taobao.com', port = 80):
#=======================================================================
# 初始化基类
# Args @param domain: 请求的域名或者ip
# @param port: 请求的端口
#=======================================================================
self.__domain = domain
self.__port = port
self.__httpmethod = "POST"
if(top.getDefaultAppInfo()):
self.__app_key = top.getDefaultAppInfo().appkey
self.__secret = top.getDefaultAppInfo().secret
def get_request_header(self):
return {
'Content-type': 'application/x-www-form-urlencoded;charset=UTF-8',
"Cache-Control": "no-cache",
"Connection": "Keep-Alive",
}
def set_app_info(self, appinfo):
#=======================================================================
# 设置请求的app信息
# @param appinfo: import top
# appinfo top.appinfo(appkey,secret)
#=======================================================================
self.__app_key = appinfo.appkey
self.__secret = appinfo.secret
def getapiname(self):
return ""
def getMultipartParas(self):
return [];
def getTranslateParas(self):
return {};
def _check_requst(self):
pass
def getResponse(self, authrize=None, timeout=30):
#=======================================================================
# 获取response结果
#=======================================================================
connection = httplib.HTTPConnection(self.__domain, self.__port, False, timeout)
sys_parameters = {
P_FORMAT: 'json',
P_APPKEY: self.__app_key,
P_SIGN_METHOD: "md5",
P_VERSION: '2.0',
P_TIMESTAMP: str(long(time.time() * 1000)),
P_PARTNER_ID: SYSTEM_GENERATE_VERSION,
P_API: self.getapiname(),
}
if authrize is not None:
sys_parameters[P_SESSION] = authrize
application_parameter = self.getApplicationParameters()
sign_parameter = sys_parameters.copy()
sign_parameter.update(application_parameter)
sys_parameters[P_SIGN] = sign(self.__secret, sign_parameter)
connection.connect()
header = self.get_request_header();
if(self.getMultipartParas()):
form = MultiPartForm()
for key, value in application_parameter.items():
form.add_field(key, value)
for key in self.getMultipartParas():
fileitem = getattr(self,key)
if(fileitem and isinstance(fileitem,FileItem)):
form.add_file(key,fileitem.filename,fileitem.content)
body = str(form)
header['Content-type'] = form.get_content_type()
else:
body = urllib.urlencode(application_parameter)
url = N_REST + "?" + urllib.urlencode(sys_parameters)
connection.request(self.__httpmethod, url, body=body, headers=header)
response = connection.getresponse();
if response.status is not 200:
raise RequestException('invalid http status ' + str(response.status) + ',detail body:' + response.read())
result = response.read()
jsonobj = json.loads(result)
if jsonobj.has_key("error_response"):
error = TopException()
if jsonobj["error_response"].has_key(P_CODE) :
error.errorcode = jsonobj["error_response"][P_CODE]
if jsonobj["error_response"].has_key(P_MSG) :
error.message = jsonobj["error_response"][P_MSG]
if jsonobj["error_response"].has_key(P_SUB_CODE) :
error.subcode = jsonobj["error_response"][P_SUB_CODE]
if jsonobj["error_response"].has_key(P_SUB_MSG) :
error.submsg = jsonobj["error_response"][P_SUB_MSG]
error.application_host = response.getheader("Application-Host", "")
error.service_host = response.getheader("Location-Host", "")
raise error
return jsonobj
def getApplicationParameters(self):
application_parameter = {}
for key, value in self.__dict__.iteritems():
if not key.startswith("__") and not key in self.getMultipartParas() and not key.startswith("_RestApi__") and value is not None :
if(key.startswith("_")):
application_parameter[key[1:]] = value
else:
application_parameter[key] = value
#查询翻译字典来规避一些关键字属性
translate_parameter = self.getTranslateParas()
for key, value in application_parameter.iteritems():
if key in translate_parameter:
application_parameter[translate_parameter[key]] = application_parameter[key]
del application_parameter[key]
return application_parameter
| [
"venlv2046@gmail.com"
] | venlv2046@gmail.com |
3e488c1e6d8440ad53c140620d92ef2e370ce8d9 | 444a9480bce2035565332d4d4654244c0b5cd47b | /research/cv/crnn_seq2seq_ocr/export.py | 5e3a5b228456dd05aac02549e97f25cb124b1625 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-proprietary-license"
] | permissive | mindspore-ai/models | 7ede9c6454e77e995e674628204e1c6e76bd7b27 | eab643f51336dbf7d711f02d27e6516e5affee59 | refs/heads/master | 2023-07-20T01:49:34.614616 | 2023-07-17T11:43:18 | 2023-07-17T11:43:18 | 417,393,380 | 301 | 92 | Apache-2.0 | 2023-05-17T11:22:28 | 2021-10-15T06:38:37 | Python | UTF-8 | Python | false | false | 2,428 | py | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
export.
"""
import os
import numpy as np
from mindspore import context, Tensor
from mindspore.train.serialization import load_checkpoint, load_param_into_net, export
from src.attention_ocr import AttentionOCRInfer
from src.model_utils.config import config
from src.model_utils.device_adapter import get_device_id
def get_model():
'''generate model'''
context.set_context(mode=context.GRAPH_MODE, device_target=config.device_target, device_id=get_device_id())
# Network
network = AttentionOCRInfer(config.eval_batch_size,
int(config.img_width / 4),
config.encoder_hidden_size,
config.decoder_hidden_size,
config.decoder_output_size,
config.max_length,
config.dropout_p)
checkpoint_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), config.checkpoint_path)
ckpt = load_checkpoint(checkpoint_path)
load_param_into_net(network, ckpt)
network.set_train(False)
print("Checkpoint loading Done!")
sos_id = config.characters_dictionary.go_id
images = Tensor(np.zeros((config.eval_batch_size, 3, config.img_height, config.img_width),
dtype=np.float32))
decoder_hidden = Tensor(np.zeros((1, config.eval_batch_size, config.decoder_hidden_size),
dtype=np.float16))
decoder_input = Tensor((np.ones((config.eval_batch_size, 1)) * sos_id).astype(np.int32))
inputs = (images, decoder_input, decoder_hidden)
export(network, *inputs, file_name=config.file_name, file_format=config.file_format)
if __name__ == '__main__':
get_model()
| [
"chenhaozhe1@huawei.com"
] | chenhaozhe1@huawei.com |
bfcf8014c00faabd8828474c77a4d96497ba9a38 | da54cb56e69ca730156156ca70a720cfbd7723ea | /others/coffee_plackett/mindsdb_acc.py | 78dfe064c969ce8ad89c969d29d4efe3ffbc3f23 | [
"MIT"
] | permissive | setohe0909/mindsdb-examples | b4f7908aa0c96fc0ea0721931f95bc4960bc2867 | 04fc9b4ad9bb8e960a996e1c4eab1e6054bca8ff | refs/heads/master | 2022-11-27T00:21:16.114913 | 2020-08-06T15:33:17 | 2020-08-06T15:33:17 | 296,442,864 | 1 | 0 | MIT | 2020-09-17T21:08:52 | 2020-09-17T21:08:51 | null | UTF-8 | Python | false | false | 237 | py | from mindsdb_native import Predictor
mdb = Predictor(name='coffee_predictor')
mdb.learn(from_data='data.tsv', to_predict=['Coffe_Malt', 'Chocolat', 'Gold', 'Medium_Barley', 'Dark_Barley', 'Dandelion', 'Beets', 'Chicory_Roots', 'Figs'])
| [
"george@cerebralab.com"
] | george@cerebralab.com |
38eaa72b941d96798a70a6dda2b4584e8d01c6e4 | 031dbb2a3ea47a0483db310db9f98796cc83c500 | /787_Cheapest Flights Within K Stops.py | 6023db6bc0143b13eabbfbbe62dfaa8852018733 | [] | no_license | Shwan-Yu/Data_Structures_and_Algorithms | 429fb127983e32931f2168f44ef1484c1cc4c87f | 9126c2089e41d4d7fd3a204115eba2b5074076ad | refs/heads/master | 2020-03-27T11:46:59.947303 | 2019-08-23T15:15:21 | 2019-08-23T15:15:21 | 146,507,496 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 576 | py | class Solution(object):
def findCheapestPrice(self, n, flights, src, dst, K):
"""
:type n: int
:type flights: List[List[int]]
:type src: int
:type dst: int
:type K: int
:rtype: int
"""
if not flights: return 0
dp = [float("inf")] * n
dp[src] = 0
for k in range(K+1):
dp_cur = dp[:]
for (a, i, price) in flights:
dp_cur[i] = min(dp_cur[i], dp[a] + price)
dp = dp_cur
return dp[dst] if dp[dst] != float("inf") else -1
| [
"noreply@github.com"
] | noreply@github.com |
32c8fda23246b3e7a8c4cfa7a0b9b7ab104e3b81 | f03881a594d559949d5b96527a5a6c7a18d89ed7 | /0lab/src/main.py | 017c8d1b3bf5dae645a18d06b13d9f98e1575617 | [] | no_license | pkmxier/AI | 7954c25bd424f666d664e83b607cbba6c67a7444 | 8d80f471c37f4bc69939feea905a1d5c3cb67607 | refs/heads/master | 2020-06-05T04:32:17.027091 | 2019-06-17T10:31:00 | 2019-06-17T10:31:00 | 192,313,886 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,729 | py | numbers = [
1695128485402083763773247025508607781296883851800934596605324477902989989672390098441314233687038522543796524362932674511659084990877094461405769068305253980165481952276151264282270169307424982451349364468884452626363366332792106697498300154504289109043538314722171490851577202002936469515837846884472685701320555954675270470981711883452876152967636160722991943031737727674462234803964546522349706678813412341712703190842025567979822278829254837642753739546649159,
1342124472692680814864696039831657201341930170537490888948185909193404926961223536479474040666460885376378742819197105748650798865879482304771084236256654384273799702336475969647451700146534430052845833553984165082992842025899656567922774484313656763793347645762888379692094136645059343771155043690219662830401572931468290058814304724439824204259980816710796240835274604462076123461698477384443713751284482994607430755155834233283681253132280989394989471961817143,
1626570592384034401231059859408455254810050911431145580773817320385445678597776695068312796145258618012655448521816316108022278762520239267979899184627816793656580906379077745825130933420781919802013703405155696033529555793998359389173755887366857329131343206148632506258546398761725587714083008828347727071434771944964364829767905778186912771898431501076025378480110831840332479020783206206190405100394982218769269156393531604603604142841091039265485070414672259,
1503349990631350512794289684313078245040080234793749288284388102811529318651334186314509247654009172580006457439356152132860210881356046271699329201253058433036162321674305188211885111628223749659497716686816384033178613797568618927173752800692795231622235434934335500496599315357786595208816213489429090618724729416131746965336847080815801599020573311110511374951097277607310799592097578622368423440718164595720091899427036135539095740807639167195995008580910433,
1250171497372227982026555999675170108947918951378367343470923483104158597216632066586300921566811265776465427395026458151240042366061271512107752586681699923914902061886213022544496783070727061083763996630816279869169194623169255711135422521925444135939014878277515299870536875962948267973899545621728547726545192382593936985574978881305949487523233148677106330650818223443955800622774189936635106363035784698216185461573761714766211607812695281252356674432444279,
1598756544210860812002683252504666631284038535154979340910964824673923578639226397918134429192737005854188177977059177858243855990803981275665690912975534091041361701843465578101733863479781680791655959578320442108371634048374313524202193198694894536452471646868825144743014452957912743920239954473534374422647748020165306769379396190044599513110393062461302839244356754741065320775011514774723155863731595182892822790709843296375075272651902641460504103291775361,
1611765569148804856242867384258680719850010286298191204635154152942043219729044752688614748313611454546572520541736997794001687127300182565577523301374576898637465463079329544247774787283512154983161737116562645744234565727709746364114005583231547967023025414569413122447328040416970845309432217530722433341506166879058135267652737561086239915598233931006566824074208096468336520404693863268533117447729991162579236036416014409092228354404809885779998800076550137,
1417746786978750765038783443201694837693058007147135007928583192144256946704223659049475898042715778235153026085212635256089348105695559658585619676085161346482180413625910718554772936888311138851281270033905970826200499692827568755840858440733991917454028255326174744965696470393644713091831508787116372289467266084564443305079980286049350362289761393863307795187974797187985957533461476088825816395922558727920330066823211210594296302676261707432217348305112187,
1589686907858960532293041950259807409089116075774905924811928369729308516275072914492447303882343619014782861146277474256634429223573812679829885857722514236789773758073602382754296398746760528620467135686904091857677298686613353160501421254539364215543462330529173823254785957892596743971469331053694628704719897511634494080726384449311911326430543608031846181210590808073104043168515626922519393683917981873633828053068169750353137412342101092326814001286079931,
1447056357743040318789862961227509104744799081494678612383291986984923519316446287708049077918224656527429543673229364351887183390807262752423117298211041934655152276599225431751671588895981517419026471542932448198944496908361633132707640798039356570950500607895014150658740782042073630261733525635192524773901831150453706661904186439905176584194604732140346858078193623357352146946016549476780491073212953994660770169348211445199019386069469845306185323206439961,
1262485504020168731000842257581537957328326497522478405002465359648875356810280292244547618070727524417592419776792612058732594852983180148665064058817407866064291179552422627557683886828462061069447032164569235069818669414169882863307032697282802157247652797734392044016320040859257401114524063142894607111829574025600918893253339517061607978068475589931239014683019592991614837523358909806258991077646147246997493894736434495372693444001308001278879395788963879,
1916242087180680156861712994509728052535159091128844805658679025296716559404434664811725619186652725901325774649017594144788360637407178476936316915220758144535681964371311657071750970414707218112222280453951875213591639735019844579642622014874212594838041457800464921182345127496460888250084171815540351211745813542192969624108567504481905290317359415752535077985931507909722167364312980099834023023021212767107040301344392783417575981002593796696074442689507301,
1960344000673448010109966123798259138788312223000110285444138984687043682091918437726564873652655959337927213942829283843615252926281789196372471730892422452230531118265385923148587364956392045025267762404119597838874471039017253236308306374541274375355671500991196394524509192278487473429022067848460150114918996838415401644820324493941862061208584686840594025223786924079444262714095490301772077126395790235999836003971290616988894725373002042174148527448991721,
1688432268535652536976161544225404933352917348466880741646555236080940468369390533777566901374863846088926302716704958253349013465017171687476514345454080829512228091554390695242226222710223271367480753308157792549868681240943730184545304781633011043927327584175094195702062946904306735673354996415907014195550590550472255345961637249641012928019098518193363458415691802243705768503786649882426739768062694678022813527067727278842446759998639312587246098493677573,
1669812028211114876035741593474021802212340044740884701344271270195832085856797314936725609969919892880432470047684464541567265336806788958402625350522072215356887542345091965364412714416147723007824852940439210347535492079930938715301851663504907633271178215986687496281673059795431500200800112337374888657642932011377010779739631990411714885737361714602715397638982646136481630238419481808864438911371804085212946840198558441479176256832689600476668930865222709,
1416908444771934114327236064335695175033855568724514723276090909238902249450761163116179298370097637736609598746978539681190806175023739437824949790203114195447287621192162052863911370030281253311582477023859027984818679108239267600763411891113578181938978341368763677855534685413427437290239276573078365437316891195505584463642669716112936728373088553385902864359218933750627440521470476774178793413097754328106876810009083432628213288672194420754620920548851129,
1510938584302514746068687680359138712084826869531749833816152536107029956694378228665014484809993284680636465045336584670006512692482057168858805251730522412435575537047638759183849437861169582174353100616760861442083338911162982978018654609073487455618344725646474341106448770186119465437436805540314573902315148010605642969399036239279990866481377552631038345038332671300460449150826133047599402952702220438132324240801480483055996850135609380612773088576264939,
1540622509490817949053524649165981982362710138590145680108367660592300942107455572128874985313317596143790569145958434047726221469334726361045510333507710004199345529052825110132429789384438990706927549015685843329393401520154237372079098668324817011296825302825102856143574580211014626530080895107030337934771878554891597286701169436904639362768184131040154699128645755841829998901472679565730259098946642540287337083284263287432616094697258993945232767013781501
]
def gcd(a, b):
while b:
a %= b
a, b = b, a
return a
myNumber = numbers.pop(8)
for num in numbers:
divisor = gcd(myNumber, num)
if divisor != 1:
print(myNumber / divisor, divisor)
break
| [
"noreply@github.com"
] | noreply@github.com |
e6b92eab9e39bcef0174059d7dc7e026466d654c | 8d22dc8acc1eaf46bc14456d9d4bc52d32a71553 | /cogs/owner.py | f60cb43974339ec043d73861df9dc9bf9f8b781a | [] | no_license | OneUXBrasil/OneUX-Bot | 0b47435981faf8336bedca1ff68f434fc141fbf9 | 8593cbe5d3cdf7675efce05275f48e40c4c52fce | refs/heads/main | 2023-01-29T07:20:48.107749 | 2020-12-12T23:34:29 | 2020-12-12T23:34:29 | 320,945,591 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,348 | py | import discord
import time, datetime, psutil, platform
from discord.ext.commands import *
from random import randint
class Owner(Cog):
def __init__(self, bot):
self.bot = bot
self.start_time = time.time()
self.color = 0x7A4EF9
@cooldown(2, 10, BucketType.user)
@guild_only()
@command(aliases=["upt"])
async def uptime(self, ctx):
current_time = time.time()
difference = int(round(current_time - self.start_time))
text = str(datetime.timedelta(seconds=difference))
return await ctx.send(embed=discord.Embed(color=self.color,description="Estou online à %ss" %(text)))
@cooldown(2, 10, BucketType.user)
@guild_only()
@command()
async def botinfo(self, ctx):
embed = discord.Embed(color=self.color, timestamp= ctx.message.created_at, description=
"**🔻 Informações Básicas:\n\n**"
"🔹 **ID:** %s\n🔹 **Ping:** %s\n🔹 **Uptime:** %s\n\n"
"**🔻 Informações Técnicas:\n\n**"
"🔸 **CPU:** %s\n🔸 **Memória:** %s\n🔸 **Sistema:** %s\n🔸 **Programação:** %s\n"
"\n🔸 👑 Bot Desenvolvido por: <@296428519947370499>" % (
ctx.me.id, str(round(self.bot.latency*1000)) + "ms", str(datetime.timedelta(seconds=int(round(time.time() - self.start_time)))) + "s",
str(psutil.cpu_percent()) + "%", str(psutil.virtual_memory().percent) + "%", platform.system(),
f"Python 3.7 (Discord.py {discord.__version__})"))
embed.set_thumbnail(url=str(ctx.me.avatar_url))
embed.set_author(icon_url=str(ctx.author.avatar_url), name=ctx.me.name)
embed.set_footer(text="OneUX © 2020", icon_url=ctx.me.avatar_url)
return await ctx.send(embed=embed)
@guild_only()
@command(aliases=["rld"])
@is_owner()
async def reload(self, ctx, ext:str=None):
try:
self.bot.unload_extension("cogs.%s" %(ext))
self.bot.load_extension("cogs.%s" %(ext))
except Exception as error:
return await ctx.send(embed=discord.Embed(color=self.color, description="Falha ao recarregar o módulo %s.\nErro: %s" %(ext, error)))
else:
return await ctx.send(embed=discord.Embed(color=self.color, description="O módulo %s foi recarregado." %(ext)))
@guild_only()
@command(aliases=["uld"])
@is_owner()
async def unload(self, ctx, ext:str=None):
try:
self.bot.unload_extension("cogs.%s" %(ext))
except Exception as error:
return await ctx.send(embed=discord.Embed(color=self.color, description="Falha ao descarregar o módulo %s.\nErro: %s" %(ext, error)))
else:
return await ctx.send(embed=discord.Embed(color=self.color, description="O módulo %s foi descarregado." %(ext)))
@guild_only()
@command(aliases=["lda"])
@is_owner()
async def load(self, ctx, ext:str=None):
try:
self.bot.load_extension("cogs.%s" %(ext))
except Exception as error:
return await ctx.send(embed=discord.Embed(color=self.color, description="Falha ao carregar o módulo %s.\nErro: %s" %(ext, error)))
else:
return await ctx.send(embed=discord.Embed(color=self.color, description="O módulo %s foi carregado." %(ext)))
@is_owner()
@command(aliases=["evap", "debugger", "debug"])
async def eval(self, ctx, *, cmd=None):
f = discord.Embed(color=self.color, description=f"**Console OneUX**")
try:
if "bot." in cmd:
Debugger = (eval(cmd.replace("bot.", "self.bot.")))
f.add_field(name="Input:", value=f"```py\n{cmd}```")
f.add_field(name="Output:", value=f"```py\n{Debugger}```")
await ctx.send(embed=f)
elif "await " in cmd:
Debugger = (await eval(cmd.replace("await ", "")))
f.add_field(name="Input:", value=f"```py\n{cmd}```")
f.add_field(name="Output:", value=f"```py\n{Debugger}```")
await ctx.send(embed=f)
else:
Debugger = (eval(cmd))
f.add_field(name="Input:", value=f"```py\n{cmd}```")
f.add_field(name="Output:", value=f"```py\n{Debugger}```")
await ctx.send(embed=f)
except Exception as e:
f = discord.Embed(color=self.color, description=f"**Console OneUX**")
f.add_field(name="Input:", value=f"```py\n{cmd}```")
f.add_field(name="Output:", value=f"```py\n{repr(e)}```")
await ctx.send(embed=f)
except discord.HTTPException:
f.add_field(name="Input:", value=f"```py\n{cmd}```")
f.add_field(name="Output:", value=f"```py\n{repr()}```")
def setup(bot):
bot.add_cog(Owner(bot)) | [
"noreply@github.com"
] | noreply@github.com |
233569d86185a253ad9a4b58e4346737fd05216a | 3a27766a6fcfec26970300a324aec6e6893618a1 | /main/urls.py | 6920733584fe925d68fc8202a1f61de919406406 | [] | no_license | sasori0607/iphone_w | b1011176473f2949c7b112c0d7caec9edd905d22 | 7215646a3f0b6d700e90cd9f9908d8eef3a22bbb | refs/heads/main | 2023-08-16T10:05:39.493365 | 2021-09-26T21:54:32 | 2021-09-26T21:54:32 | 376,524,099 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,121 | py | """main URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('', include('content.urls'), name='home'),
path('admin/', admin.site.urls),
path('shop/', include('shop.urls'), name='shop')
]
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"cacopu060798@gmail.com"
] | cacopu060798@gmail.com |
713f486d6e5e3cec83efb4e96f4bf631482ec52e | 1149320697083005200e8df0ceb34bb3a4fef061 | /sat.spec | 3a0652551e7009a77e826b952cebb65e1ba46f4f | [] | no_license | avillalobosd/SATbot | f1757403bf42a5828f648ec7bc9d9dd249bca1e8 | cd2c78f9e46b393566e74fc9711293918ea3d085 | refs/heads/main | 2023-08-22T11:40:51.262991 | 2021-10-14T18:51:08 | 2021-10-14T18:51:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 886 | spec | # -*- mode: python ; coding: utf-8 -*-
block_cipher = None
a = Analysis(['sat.py'],
pathex=['O:\\PAGINAS\\SAT - 2021\\SAT-BOT'],
binaries=[],
datas=[],
hiddenimports=[],
hookspath=[],
runtime_hooks=[],
excludes=[],
win_no_prefer_redirects=False,
win_private_assemblies=False,
cipher=block_cipher,
noarchive=False)
pyz = PYZ(a.pure, a.zipped_data,
cipher=block_cipher)
exe = EXE(pyz,
a.scripts,
a.binaries,
a.zipfiles,
a.datas,
[],
name='sat',
debug=False,
bootloader_ignore_signals=False,
strip=False,
upx=True,
upx_exclude=[],
runtime_tmpdir=None,
console=True )
| [
"noreply@github.com"
] | noreply@github.com |
b275e025bbf7161c2cb2daeeebb72fded4430968 | 68799236b3d607a91192d1ab7bb9707272444731 | /nodes - generation.py | 4afa150f11eb63237cf34118fe6a185c45eea7a8 | [] | no_license | DiegoL1M4/PROJECT__NIC-Protocol | 6727c17d9599039a1569bb5b84974325845c8a00 | 550bc17fdee16e0074c9a89f04de0bd73c51d676 | refs/heads/master | 2020-06-29T03:03:35.824342 | 2019-08-06T20:43:16 | 2019-08-06T20:43:16 | 200,420,684 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 371 | py | # -*- coding: utf-8 -*-
import numpy as np
bateria = 0.5
radio = 75.0
ch = 0
ch_count = 0
num_nodes = 100
area = 100
with open('nodes.txt', 'w') as file:
for i in range(1, num_nodes+1):
x = round(np.random.uniform(0, area), 2)
y = round(np.random.uniform(0, area), 2)
file.write( "{}/{}/{}/{}\n".format(i, x, y, 0) )
| [
"noreply@github.com"
] | noreply@github.com |
1b4fe453ca79824f64731d48bde6c32e9fbcf4a9 | 137329b85e6dea7a445bbbbee8b6d1477ff3c3b7 | /account/migrations/0002_auto_20210205_0220.py | f9a5804a8d70ac51a17c30c4f12fb9f516f7019f | [] | no_license | isaiaholadapo/django-blog | 4545084eed4a072105331347628f44e0bcfe1544 | 5b8dfa1c17abee6ff136ead4eaa4c6d8b90d81de | refs/heads/main | 2023-03-02T09:58:53.321243 | 2021-02-06T07:01:33 | 2021-02-06T07:01:33 | 335,270,699 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 606 | py | # Generated by Django 3.1.5 on 2021-02-05 01:20
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('account', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='account',
name='date_joined',
field=models.DateTimeField(auto_now_add=True, verbose_name='date joined'),
),
migrations.AlterField(
model_name='account',
name='last_login',
field=models.DateTimeField(auto_now=True, verbose_name='last login'),
),
]
| [
"isaiaholadapo18@gmail.com"
] | isaiaholadapo18@gmail.com |
95c18cf28b8e463178f2178a08467aa780703687 | 6c3b6d35df9bf7cacbbee2aa4dddc46cd463c998 | /survey/utils.py | f256727c8fd35e62d078edaf2f685f6698960282 | [] | no_license | YuriyShashurin/SF_Final_project_backend | 752d28658b520e69cfe930f4571abbe7e9278bd4 | eb2f46f73ca4687673eddf5ab7094f57dd88c747 | refs/heads/master | 2023-09-05T09:11:25.373117 | 2021-11-09T15:44:35 | 2021-11-09T15:44:35 | 367,711,702 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 204 | py |
def jwt_response_payload_handler(token, user=None, request=None):
return {
'token': token,
'username': user.username,
'user_id' : user.id,
'email' : user.email
}
| [
"shashurin.edu@mail.ru"
] | shashurin.edu@mail.ru |
b2be745e9e96cf02e43aae6b8f4a59f77e2cc754 | 10eca9782a9f025b0afb5cb32a47d36fef1d1278 | /chatbot/tests/basic_test.py | 1ff7b4471e8eecf7775c2e1f3abea8f80c6a87d4 | [] | no_license | superxiaoqiang/chatbot-cs160 | d1dd1f8b694f9adf06419cef66c07fb34f514c7f | 3674f78b874ab371340b675e0e9c03d3665ce3e9 | refs/heads/master | 2020-12-31T06:10:54.299393 | 2010-03-16T23:52:28 | 2010-03-16T23:52:28 | 56,666,730 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,038 | py | """Unit test for chatbot
Part of the chatbot project for cs160.
"""
from inputparser import *
import unittest
ip = InputParser()
class inputTest(unittest.TestCase):
def testInputList(self):
"""list input"""
response = ip.parse("List the 5 most expensive restaurants in Manhattan, New York.")
self.assertEqual(response['type'], 'list')
self.assertEqual(response['price'], 'expensive')
def testInputSingleMore(self):
"""single (more) input"""
response = ip.parse("I want to know more about Gilt.")
self.assertEqual(response['type'], 'single-detail')
self.assertEqual(response['restaurant'], 'Gilt')
def testInputSingleMore(self):
"""single (cuisine) input"""
response = ip.parse("I want a good Mexican restaurant.")
self.assertEqual(response['type'], 'single-cuisine')
self.assertEqual(response['cuisine'], 'Mexican')
response = ip.parse("What is a good Mexican restaurant?")
self.assertEqual(response['type'], 'single-cuisine')
self.assertEqual(response['cuisine'], 'Mexican')
def testInputQuit(self):
"""quit input"""
response = ip.parse("quit")
self.assertEqual(response['type'], 'quit')
def testInputGreeting(self):
"""greeting input"""
response = ip.parse("Hi")
self.assertEqual(response['type'] == 'greeting', True)
def testInputConfirmation(self):
"""confirmation input"""
response = ip.parse("Yes, go ahead")
self.assertEqual(response['type'] == 'confirmation', True)
response = ip.parse("Ok, good")
self.assertEqual(response['type'] == 'confirmation', True)
response = ip.parse("Sure, go ahead")
self.assertEqual(response['type'] == 'confirmation', True)
response = ip.parse("Yeah")
self.assertEqual(response['type'] == 'confirmation', True)
if __name__ == "__main__":
unittest.main()
| [
"paul.craciunoiu@39997754-0b0c-11df-a6e1-f7bfe9b6b944"
] | paul.craciunoiu@39997754-0b0c-11df-a6e1-f7bfe9b6b944 |
e96ad110a9cb5d04e3c1a4249b69f8ae56335ce5 | 7970354f8435f9fdc79662cc36ba68b6755d23c5 | /tests/calculators/test_np_activeCells.py | c9bccb553e8895ecb3ba2c3627b0647376ebcd99 | [] | no_license | calumroy/HTM | f197c763662d55decea3e99f66c84fa6d3ade4cf | 1f5cc8c9e533c991e87901ee313e93be76af481b | refs/heads/master | 2022-12-08T11:02:04.684706 | 2018-04-15T05:39:46 | 2018-04-15T05:39:46 | 8,181,199 | 2 | 0 | null | 2022-12-07T23:36:30 | 2013-02-13T15:02:51 | Python | UTF-8 | Python | false | false | 7,826 | py | from HTM_calc import np_inhibition
import numpy as np
import math
import random
import theano.tensor as T
from theano import function
from theano import scan
from HTM_calc import np_activeCells as nac
class test_theano_predictCells:
def setUp(self):
'''
'''
# Helper functions for the Main function.
def updateActiveCols(self, numColumns):
activeColumns = np.random.randint(2, size=(numColumns))
print "activeColumns = \n%s" % activeColumns
return activeColumns
def test_case1(self):
'''
Test the numpy active Cells calculator class.
'''
numRows = 1
numCols = 3
cellsPerColumn = 4
numColumns = numRows * numCols
maxSegPerCell = 3
maxSynPerSeg = 3
connectPermanence = 0.3
activationThreshold = 1
minNumSynThreshold = 1
minScoreThreshold = 1
newSynPermanence = 0.3
connectPermanence = 0.2
timeStep = 1
# Create the distalSynapse 5d tensor holding the information of the distal synapses.
distalSynapses = np.array(
[[[[[1., 1., 0.2],
[0., 1., 0.3],
[2., 2., 0.7]],
[[2., 2., 0.1],
[2., 1., 0.8],
[2., 0., 0.2]],
[[0., 3., 0.3],
[1., 2., 0.8],
[1., 1., 0.4]]],
[[[2., 2., 0.5],
[2., 0., 0.1],
[0., 2., 0.4]],
[[1., 1., 0.9],
[1., 3., 0.3],
[1., 2., 0.4]],
[[1., 2., 0.3],
[1., 1., 1. ],
[0., 2., 0.7]]],
[[[0., 3., 0.3],
[0., 1., 0.8],
[2., 3., 0.7]],
[[1., 1., 0.7],
[2., 2., 0.2],
[0., 1., 0.6]],
[[0., 2., 0.5],
[1., 0., 1. ],
[0., 2., 0.9]]],
[[[1., 0., 0.6],
[2., 1., 0.2],
[1., 3., 0.7]],
[[2., 3., 0.6],
[0., 1., 0.9],
[1., 0., 0.5]],
[[0., 1., 0.9],
[1., 3., 0.3],
[2., 2., 1. ]]]],
[[[[2., 0., 0.4],
[0., 2., 0.7],
[2., 2., 1. ]],
[[2., 1., 0.1],
[0., 2., 0.4],
[1., 2., 0.1]],
[[2., 3., 0.9],
[1., 1., 0.3],
[1., 1., 0.5]]],
[[[1., 2., 0.9],
[2., 2., 0.3],
[1., 2., 0.8]],
[[1., 1., 0.3],
[0., 2., 1. ],
[1., 2., 1. ]],
[[1., 2., 0.8],
[0., 1., 0. ],
[2., 1., 0.2]]],
[[[0., 1., 0. ],
[2., 2., 0.3],
[1., 3., 0.8]],
[[0., 0., 0.3],
[1., 0., 0.9],
[2., 2., 0.9]],
[[1., 3., 0.4],
[0., 3., 0. ],
[0., 3., 1. ]]],
[[[1., 2., 1. ],
[1., 2., 0.1],
[2., 2., 0.9]],
[[1., 2., 0.1],
[1., 0., 0.5],
[1., 1., 0.9]],
[[0., 3., 0. ],
[2., 2., 0.6],
[1., 1., 0.5]]]],
[[[[0., 3., 0.5],
[2., 1., 0.5],
[0., 1., 0.3]],
[[0., 1., 0.3],
[1., 2., 0.7],
[0., 1., 0.6]],
[[0., 3., 0.3],
[2., 1., 0.7],
[0., 0., 0.2]]],
[[[0., 3., 0.1],
[1., 3., 0. ],
[0., 2., 0.1]],
[[1., 3., 0.6],
[2., 1., 0.4],
[0., 2., 0.6]],
[[1., 3., 1. ],
[2., 2., 0.5],
[2., 3., 0.7]]],
[[[0., 3., 0. ],
[1., 0., 0.8],
[1., 1., 0.9]],
[[0., 2., 1. ],
[2., 2., 0.8],
[0., 1., 0.3]],
[[1., 2., 0.2],
[1., 0., 0.6],
[1., 3., 0.7]]],
[[[2., 0., 0.1],
[2., 1., 0.7],
[2., 2., 0.9]],
[[2., 2., 0. ],
[2., 3., 0.4],
[1., 0., 0.6]],
[[2., 1., 0.5],
[2., 2., 0.1],
[2., 2., 0.5]]]]]
)
# Create the predictive cells defining the timestep when the cells where last predicting.
# Each cell stores the last 2 timesteps when it was predicting.
predictCells = np.array(
[[[1, 0],
[0, 0],
[0, 0],
[0, 0]],
[[1, 0],
[1, 2],
[1, 0],
[1, 0]],
[[1, 1],
[2, 2],
[1, 2],
[2, 1]]])
activeSeg = np.array(
[[[1, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]],
[[1, 0, 0],
[1, 2, 0],
[1, 0, 0],
[1, 0, 0]],
[[1, 1, 0],
[2, 2, 0],
[1, 2, 0],
[2, 1, 0]]])
# Set the active columns
activeColumns = np.array([1, 0, 0])
# print "activeCells = \n%s" % activeCells
print "distalSynapses = \n%s" % distalSynapses
print "predictCells = \n%s" % predictCells
actCellsCalc = nac.activeCellsCalculator(numColumns,
cellsPerColumn,
maxSegPerCell,
maxSynPerSeg,
minNumSynThreshold,
minScoreThreshold,
newSynPermanence,
connectPermanence)
#import ipdb; ipdb.set_trace()
# Run through calculator
test_iterations = 2
for i in range(test_iterations):
timeStep += 1
if timeStep % 20 == 0:
print timeStep
print "timeStep = \n%s" % timeStep
activeCells, learnCells = actCellsCalc.updateActiveCells(timeStep,
activeColumns,
predictCells,
activeSeg,
distalSynapses)
print "activeCells = \n%s" % activeCells
print "learnCells = \n%s" % learnCells
# Change the active columns and active cells and run again.
activeColumns = np.array([1, 1, 0])
# Expected Results
ex_activeCells = np.array(
[[[ 2, 3],
[-1, -1],
[-1, -1],
[-1, -1]],
[[-1, -1],
[ 3, -1],
[-1, -1],
[-1, -1]],
[[-1, -1],
[-1, -1],
[-1, -1],
[-1, -1]]])
ex_learnCells = np.array(
[[[ 2, 3],
[-1, -1],
[-1, -1],
[-1, -1]],
[[-1, -1],
[ 3, -1],
[-1, -1],
[-1, -1]],
[[-1, -1],
[-1, -1],
[-1, -1],
[-1, -1]]])
# Make sure the uninhibted columns are equal to the above
# predetermined test results.
assert np.array_equal(ex_activeCells, activeCells)
assert np.array_equal(ex_learnCells, learnCells)
| [
"calum@calumroy.com"
] | calum@calumroy.com |
a683c1f4c81d2952675346854e2f80efb8473601 | 37ba3d21dcb6edd21e48dbb7f12591ac3590ab64 | /python_problems_competitive/ten_kinds_of_people.py | 4ef27f518e9cd247adcaa9041da10f96bc2643ec | [] | no_license | Hygens/hackerearth_hackerrank_solutions | 2feaedec255a85792d305bb8ff35675254a03f2a | 86cc4c9ca4d5246f24db8cda93400f1d7ee00882 | refs/heads/master | 2021-07-06T15:56:28.906533 | 2020-04-28T22:45:56 | 2020-04-28T22:45:56 | 55,160,498 | 1 | 2 | null | 2020-10-01T06:48:09 | 2016-03-31T15:15:36 | Python | UTF-8 | Python | false | false | 331 | py | r,c = map(int,input().split(' '))
l = []
for i in range(r):
l.append(list(input().strip()))
n = int(input().strip())
for _ in range(n):
r1,c1,r2,c2 = map(int,input().split(' '))
if l[r1-1][c1-1]==l[r2-1][c2-1]=='0': print('binary')
elif l[r1-1][c1-1]==l[r2-1][c2-1]=='1': print('decimal')
else: print('neither') | [
"noreply@github.com"
] | noreply@github.com |
484d7007aa18126e562a439f5ddb39f19a4e0ea8 | 908655251066427f654ee33ebdf804f9f302fcc3 | /Tests/CartPoleAST/CartPoleNdRewardt/MultiCartPoleNd_RLNonInter.py | 7bde5b51f00c93349bfd677128345b1493e7a0c2 | [] | no_license | maxiaoba/MCTSPO | be567f80f1dcf5c35ac857a1e6690e1ac599a59d | eedfccb5a94e089bd925b58f3d65eef505378bbc | refs/heads/main | 2023-07-05T02:20:16.752650 | 2021-07-06T06:04:40 | 2021-07-06T06:04:40 | 381,811,407 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,181 | py | import os
os.environ["CUDA_VISIBLE_DEVICES"]="-1" #just use CPU
# from garage.tf.algos.trpo import TRPO
from garage.baselines.zero_baseline import ZeroBaseline
from mylab.envs.tfenv import TfEnv
from garage.tf.policies.gaussian_mlp_policy import GaussianMLPPolicy
from garage.tf.policies.gaussian_lstm_policy import GaussianLSTMPolicy
from garage.tf.optimizers.conjugate_gradient_optimizer import ConjugateGradientOptimizer, FiniteDifferenceHvp
from garage.misc import logger
from mylab.rewards.ast_reward import ASTReward
from mylab.envs.ast_env import ASTEnv
from mylab.simulators.policy_simulator import PolicySimulator
from CartPoleNd.cartpole_nd import CartPoleNdEnv
from mylab.algos.trpo import TRPO
import os.path as osp
import argparse
# from example_save_trials import *
import tensorflow as tf
import joblib
import math
import numpy as np
import mcts.BoundedPriorityQueues as BPQ
import csv
# Logger Params
parser = argparse.ArgumentParser()
parser.add_argument('--exp_name', type=str, default="cartpole")
parser.add_argument('--n_trial', type=int, default=5)
parser.add_argument('--trial_start', type=int, default=0)
parser.add_argument('--n_itr', type=int, default=2500)
parser.add_argument('--batch_size', type=int, default=4000)
parser.add_argument('--snapshot_mode', type=str, default="gap")
parser.add_argument('--snapshot_gap', type=int, default=500)
parser.add_argument('--log_dir', type=str, default='./Data/AST/RLNonInter')
parser.add_argument('--args_data', type=str, default=None)
args = parser.parse_args()
top_k = 10
max_path_length = 100
interactive = False
tf.set_random_seed(0)
sess = tf.Session()
sess.__enter__()
# Instantiate the env
env_inner = CartPoleNdEnv(nd=10,use_seed=False)
data = joblib.load("../CartPole/Data/Train/itr_50.pkl")
policy_inner = data['policy']
reward_function = ASTReward()
simulator = PolicySimulator(env=env_inner,policy=policy_inner,max_path_length=max_path_length)
env = TfEnv(ASTEnv(interactive=interactive,
simulator=simulator,
sample_init_state=False,
s_0=[0.0, 0.0, 0.0 * math.pi / 180, 0.0],
reward_function=reward_function,
))
# Create policy
policy = GaussianLSTMPolicy(name='lstm_policy',
env_spec=env.spec,
hidden_dim=128,
use_peepholes=True)
with open(osp.join(args.log_dir, 'total_result.csv'), mode='w') as csv_file:
fieldnames = ['step_count']
for i in range(top_k):
fieldnames.append('reward '+str(i))
writer = csv.DictWriter(csv_file, fieldnames=fieldnames)
writer.writeheader()
for trial in range(args.trial_start,args.trial_start+args.n_trial):
# Create the logger
log_dir = args.log_dir+'/'+str(trial)
tabular_log_file = osp.join(log_dir, 'process.csv')
text_log_file = osp.join(log_dir, 'text.txt')
params_log_file = osp.join(log_dir, 'args.txt')
logger.set_snapshot_dir(log_dir)
logger.set_snapshot_mode(args.snapshot_mode)
logger.set_snapshot_gap(args.snapshot_gap)
logger.log_parameters_lite(params_log_file, args)
if trial > args.trial_start:
old_log_dir = args.log_dir+'/'+str(trial-1)
logger.pop_prefix()
logger.remove_text_output(osp.join(old_log_dir, 'text.txt'))
logger.remove_tabular_output(osp.join(old_log_dir, 'process.csv'))
logger.add_text_output(text_log_file)
logger.add_tabular_output(tabular_log_file)
logger.push_prefix("["+args.exp_name+'_trial '+str(trial)+"]")
np.random.seed(trial)
params = policy.get_params()
sess.run(tf.variables_initializer(params))
baseline = ZeroBaseline(env_spec=env.spec)
optimizer = ConjugateGradientOptimizer(hvp_approach=FiniteDifferenceHvp(base_eps=1e-5))
top_paths = BPQ.BoundedPriorityQueue(top_k)
algo = TRPO(
env=env,
policy=policy,
baseline=baseline,
batch_size=args.batch_size,
step_size=0.1,
n_itr=args.n_itr,
store_paths=True,
optimizer= optimizer,
max_path_length=max_path_length,
top_paths = top_paths,
plot=False,
)
algo.train(sess=sess, init_var=False)
row_content = dict()
row_content['step_count'] = args.n_itr*args.batch_size
i = 0
for (r,action_seq) in algo.top_paths:
row_content['reward '+str(i)] = r
i += 1
writer.writerow(row_content) | [
"xiaobaima@DNab421bb2.stanford.edu"
] | xiaobaima@DNab421bb2.stanford.edu |
fce86ff8bcc8ef3f7eb2614633191a9d4fd1e56b | 3b9c53041d6f2ceda05200ecf374b044743c8e54 | /graphistician/weights.py | 329ada54d9a63e7a2bae36d611756abce15a272c | [] | no_license | M0h3eN/pyhawkes | 9549c1db062ad0cb9fea45c5c2862c1a4e7077a8 | 4a790a6ff93bb7c19032d8763c0536c5cc49b486 | refs/heads/master | 2023-06-12T08:01:26.775188 | 2019-08-01T13:44:37 | 2019-08-01T13:44:37 | 382,609,070 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 30,592 | py |
import numpy as np
from pybasicbayes.distributions import Gaussian, GaussianFixedMean, GaussianFixedCov
from pybasicbayes.abstractions import GibbsSampling
from graphistician.abstractions import GaussianWeightDistribution
class FixedGaussianWeightDistribution(GaussianWeightDistribution, GibbsSampling):
def __init__(self, N, B, mu, sigma, mu_self=None, sigma_self=None):
super(FixedGaussianWeightDistribution, self).__init__(N)
self.B = B
assert mu.shape == (B,)
self.mu = mu
assert sigma.shape == (B,B)
self.sigma = sigma
self._gaussian = Gaussian(mu, sigma)
if mu_self is not None and sigma_self is not None:
self._self_gaussian = Gaussian(mu_self, sigma_self)
else:
self._self_gaussian = self._gaussian
@property
def Mu(self):
mu = self._gaussian.mu
Mu = np.tile(mu[None,None,:], (self.N, self.N,1))
for n in range(self.N):
Mu[n,n,:] = self._self_gaussian.mu
return Mu
@property
def Sigma(self):
sig = self._gaussian.sigma
Sig = np.tile(sig[None,None,:,:], (self.N, self.N,1,1))
for n in range(self.N):
Sig[n,n,:,:] = self._self_gaussian.sigma
return Sig
def initialize_from_prior(self):
pass
def initialize_hypers(self):
pass
def log_prior(self):
return 0
def sample_predictive_parameters(self):
Murow = Mucol = np.tile(self._gaussian.mu[None,:], (self.N+1,1))
Lrow = Lcol = np.tile(self._gaussian.sigma_chol[None,:,:], (self.N+1,1,1))
return Murow, Mucol, Lrow, Lcol
def resample(self, A,W):
pass
class NIWGaussianWeightDistribution(GaussianWeightDistribution, GibbsSampling):
"""
Gaussian weight distribution with a normal inverse-Wishart prior.
"""
# TODO: Specify the self weight parameters in the constructor
def __init__(self, N, B=1, mu_0=None, Sigma_0=None, nu_0=None, kappa_0=None):
super(NIWGaussianWeightDistribution, self).__init__(N)
self.B = B
if mu_0 is None:
mu_0 = np.zeros(B)
if Sigma_0 is None:
Sigma_0 = np.eye(B)
if nu_0 is None:
nu_0 = B + 2
if kappa_0 is None:
kappa_0 = 1.0
self._gaussian = Gaussian(mu_0=mu_0, sigma_0=Sigma_0,
nu_0=nu_0, kappa_0=kappa_0)
# Special case self-weights (along the diagonal)
self._self_gaussian = Gaussian(mu_0=mu_0, sigma_0=Sigma_0,
nu_0=nu_0, kappa_0=kappa_0)
@property
def Mu(self):
mu = self._gaussian.mu
Mu = np.tile(mu[None,None,:], (self.N, self.N,1))
for n in range(self.N):
Mu[n,n,:] = self._self_gaussian.mu
return Mu
@property
def Sigma(self):
sig = self._gaussian.sigma
Sig = np.tile(sig[None,None,:,:], (self.N, self.N,1,1))
for n in range(self.N):
Sig[n,n,:,:] = self._self_gaussian.sigma
return Sig
def initialize_from_prior(self):
self._gaussian.resample()
self._self_gaussian.resample()
def initialize_hypers(self, W):
# self.B = W.shape[2]
mu_0 = W.mean(axis=(0,1))
sigma_0 = np.diag(W.var(axis=(0,1)))
self._gaussian.mu_0 = mu_0
self._gaussian.sigma_0 = sigma_0
self._gaussian.resample()
# self._gaussian.nu_0 = self.B + 2
W_self = W[np.arange(self.N), np.arange(self.N)]
self._self_gaussian.mu_0 = W_self.mean(axis=0)
self._self_gaussian.sigma_0 = np.diag(W_self.var(axis=0))
self._self_gaussian.resample()
# self._self_gaussian.nu_0 = self.B + 2
def log_prior(self):
from graphistician.internals.utils import normal_inverse_wishart_log_prob
lp = 0
lp += normal_inverse_wishart_log_prob(self._gaussian)
lp += normal_inverse_wishart_log_prob(self._self_gaussian)
return lp
def sample_predictive_parameters(self):
Murow = Mucol = np.tile(self._gaussian.mu[None,:], (self.N+1,1))
Lrow = Lcol = np.tile(self._gaussian.sigma_chol[None,:,:], (self.N+1,1,1))
Murow[-1,:] = self._self_gaussian.mu
Mucol[-1,:] = self._self_gaussian.mu
Lrow[-1,:,:] = self._self_gaussian.sigma_chol
Lcol[-1,:,:] = self._self_gaussian.sigma_chol
return Murow, Mucol, Lrow, Lcol
def resample(self, A,W):
# Resample the Normal-inverse Wishart prior over mu and W
# given W for which A=1
A_offdiag = A.copy()
np.fill_diagonal(A_offdiag, 0)
A_ondiag = A * np.eye(self.N)
self._gaussian.resample(W[A_offdiag==1])
self._self_gaussian.resample(W[A_ondiag==1])
class LowRankGaussianWeightDistribution(GaussianWeightDistribution, GibbsSampling):
"""
Low rank weight matrix (i.e. BPMF from Mnih and Salakhutidnov)
"""
def __init__(self, N, dim):
raise NotImplementedError
class SBMGaussianWeightDistribution(GaussianWeightDistribution, GibbsSampling):
"""
A stochastic block model is a clustered network model with
C: Number of blocks
m[c]: Probability that a node belongs block c
mu[c,c']: Mean weight from node in block c to node in block c'
Sig[c,c']: Cov of weight from node in block c to node in block c'
It has hyperparameters:
pi: Parameter of Dirichlet prior over m
mu0, nu0, kappa0, Sigma0: Parameters of NIW prior over (mu,Sig)
"""
# TODO: Specify the self weight parameters in the constructor
def __init__(self, N, B=1,
C=2, pi=10.0,
mu_0=None, Sigma_0=None, nu_0=None, kappa_0=None,
special_case_self_conns=True):
"""
Initialize SBM with parameters defined above.
"""
super(SBMGaussianWeightDistribution, self).__init__(N)
self.B = B
assert isinstance(C, int) and C >= 1, "C must be a positive integer number of blocks"
self.C = C
if isinstance(pi, (int, float)):
self.pi = pi * np.ones(C)
else:
assert isinstance(pi, np.ndarray) and pi.shape == (C,), "pi must be a sclar or a C-vector"
self.pi = pi
self.m = np.random.dirichlet(self.pi)
self.c = np.random.choice(self.C, p=self.m, size=(self.N))
if mu_0 is None:
mu_0 = np.zeros(B)
if Sigma_0 is None:
Sigma_0 = np.eye(B)
if nu_0 is None:
nu_0 = B + 2
if kappa_0 is None:
kappa_0 = 1.0
self._gaussians = [[Gaussian(mu_0=mu_0, nu_0=nu_0,
kappa_0=kappa_0, sigma_0=Sigma_0)
for _ in range(C)]
for _ in range(C)]
# Special case self-weights (along the diagonal)
self.special_case_self_conns = special_case_self_conns
if special_case_self_conns:
self._self_gaussian = Gaussian(mu_0=mu_0, sigma_0=Sigma_0,
nu_0=nu_0, kappa_0=kappa_0)
@property
def _Mu(self):
return np.array([[self._gaussians[c1][c2].mu
for c2 in range(self.C)]
for c1 in range(self.C)])
@property
def _Sigma(self):
return np.array([[self._gaussians[c1][c2].sigma
for c2 in range(self.C)]
for c1 in range(self.C)])
@property
def Mu(self):
"""
Get the NxNxB matrix of weight means
:return:
"""
_Mu = self._Mu
Mu = _Mu[np.ix_(self.c, self.c)]
if self.special_case_self_conns:
for n in range(self.N):
Mu[n,n] = self._self_gaussian.mu
return Mu
@property
def Sigma(self):
"""
Get the NxNxBxB matrix of weight covariances
:return:
"""
_Sigma = self._Sigma
Sigma = _Sigma[np.ix_(self.c, self.c)]
if self.special_case_self_conns:
for n in range(self.N):
Sigma[n,n] = self._self_gaussian.sigma
return Sigma
def initialize_from_prior(self):
self.m = np.random.dirichlet(self.pi)
self.c = np.random.choice(self.C, p=self.m, size=(self.N))
for c1 in range(self.C):
for c2 in range(self.C):
self._gaussians[c1][c2].resample()
if self.special_case_self_conns:
self._self_gaussian.resample()
def initialize_hypers(self, W):
mu_0 = W.mean(axis=(0,1))
sigma_0 = np.diag(W.var(axis=(0,1)))
for c1 in range(self.C):
for c2 in range(self.C):
nu_0 = self._gaussians[c1][c2].nu_0
self._gaussians[c1][c2].mu_0 = mu_0
self._gaussians[c1][c2].sigma_0 = sigma_0 * (nu_0 - self.B - 1) / self.C
self._gaussians[c1][c2].resample()
if self.special_case_self_conns:
W_self = W[np.arange(self.N), np.arange(self.N)]
self._self_gaussian.mu_0 = W_self.mean(axis=0)
self._self_gaussian.sigma_0 = np.diag(W_self.var(axis=0))
self._self_gaussian.resample()
# Cluster the neurons based on their rows and columns
from sklearn.cluster import KMeans
features = np.hstack((W[:,:,0], W[:,:,0].T))
km = KMeans(n_clusters=self.C)
km.fit(features)
self.c = km.labels_.astype(np.int)
print("Initial c: ", self.c)
def _get_mask(self, A, c1, c2):
mask = ((self.c==c1)[:,None] * (self.c==c2)[None,:])
mask &= A.astype(np.bool)
if self.special_case_self_conns:
mask &= True - np.eye(self.N, dtype=np.bool)
return mask
def log_likelihood(self, A,W):
N = self.N
assert A.shape == (N,N)
assert W.shape == (N,N,self.B)
ll = 0
for c1 in range(self.C):
for c2 in range(self.C):
mask = self._get_mask(A, c1, c2)
ll += self._gaussians[c1][c2].log_likelihood(W[mask]).sum()
if self.special_case_self_conns:
mask = np.eye(self.N).astype(np.bool) & A.astype(np.bool)
ll += self._self_gaussian.log_likelihood(W[mask]).sum()
return ll
def log_prior(self):
"""
Compute the log likelihood of a set of SBM parameters
:param x: (m,p,v) tuple
:return:
"""
from scipy.stats import dirichlet
from graphistician.internals.utils import normal_inverse_wishart_log_prob
lp = 0
# Get the log probability of the block probabilities
lp += dirichlet(self.pi).logpdf(self.m)
# Get the prior probability of the Gaussian parameters under NIW prior
for c1 in range(self.C):
for c2 in range(self.C):
lp += normal_inverse_wishart_log_prob(self._gaussians[c1][c2])
if self.special_case_self_conns:
lp += normal_inverse_wishart_log_prob(self._self_gaussian)
# Get the probability of the block assignments
lp += (np.log(self.m)[self.c]).sum()
return lp
def rvs(self, size=[]):
# Sample a network given m, c, p
W = np.zeros((self.N, self.N, self.B))
for c1 in range(self.C):
for c2 in range(self.C):
blk = (self.c==c1)[:,None] * (self.c==c2)[None,:]
W[blk] = self._gaussians[c1][c2].rvs(size=blk.sum())
if self.special_case_self_conns:
for n in range(self.N):
W[n,n] = self._self_gaussian.rvs()
return W
def sample_predictive_parameters(self):
# Sample a new cluster assignment
c2 = np.random.choice(self.C, p=self.m)
cext = np.concatenate((self.c, [c2]))
Murow = np.array([self._gaussians[c2][c1].mu for c1 in cext])
Lrow = np.array([self._gaussians[c2][c1].sigma_chol for c1 in cext])
Mucol = np.array([self._gaussians[c1][c2].mu for c1 in cext])
Lcol = np.array([self._gaussians[c1][c2].sigma_chol for c1 in cext])
if self.special_case_self_conns:
Murow[-1] = Mucol[-1] = self._self_gaussian.mu
Lrow[-1] = Lcol[-1] = self._self_gaussian.sigma_chol
return Murow, Mucol, Lrow, Lcol
###
### Implement Gibbs sampling for SBM
###
def resample(self, A,W):
self.resample_mu_and_Sig(A,W)
self.resample_c(A,W)
self.resample_m()
def resample_mu_and_Sig(self, A, W):
"""
Resample p given observations of the weights
"""
Abool = A.astype(np.bool)
for c1 in range(self.C):
for c2 in range(self.C):
mask = self._get_mask(Abool, c1, c2)
self._gaussians[c1][c2].resample(W[mask])
# Resample self connection
if self.special_case_self_conns:
mask = np.eye(self.N, dtype=np.bool) & Abool
self._self_gaussian.resample(W[mask])
def resample_c(self, A, W):
"""
Resample block assignments given the weighted adjacency matrix
"""
from pybasicbayes.util.stats import sample_discrete_from_log
if self.C == 1:
return
Abool = A.astype(np.bool)
c_init = self.c.copy()
def _evaluate_lkhd_slow(n1, cn1):
ll = 0
# Compute probability for each incoming and outgoing
for n2 in range(self.N):
cn2 = self.c[n2]
# If we are special casing the self connections then
# we can just continue if n1==n2 since its weight has
# no bearing on the cluster assignment
if n2 == n1:
# Self connection
if self.special_case_self_conns:
continue
ll += self._gaussians[cn1][cn1].log_likelihood(W[n1,n1]).sum()
else:
# p(W[n1,n2] | c) and p(W[n2,n1] | c), only if there is a connection
if A[n1,n2]:
ll += self._gaussians[cn1][cn2].log_likelihood(W[n1,n2]).sum()
if A[n2,n1]:
ll += self._gaussians[cn2][cn1].log_likelihood(W[n2,n1]).sum()
return ll
def _evaluate_lkhd(n1, cn1):
chat = self.c.copy()
chat[n1] = cn1
# Compute log lkhd for each pair of blocks
ll = 0
for c2 in range(self.C):
# Outgoing connections
out_mask = (chat == c2) & Abool[n1,:]
if self.special_case_self_conns:
out_mask[n1] = False
ll += self._gaussians[cn1][c2].log_likelihood(W[n1,out_mask]).sum()
# Handle incoming connections
# Exclude self connection since it would have been handle above
in_mask = (chat == c2) & Abool[:,n1]
in_mask[n1] = False
ll += self._gaussians[c2][cn1].log_likelihood(W[in_mask,n1]).sum()
return ll
# Sample each assignment in order
for n1 in range(self.N):
# Compute unnormalized log probs of each connection
lp = np.zeros(self.C)
# Prior from m
lp += np.log(self.m)
# Likelihood from network
for cn1 in range(self.C):
ll = _evaluate_lkhd(n1, cn1)
# ll_slow = _evaluate_lkhd_slow(n1, cn1)
# assert np.allclose(ll,ll_slow)
lp[cn1] += ll
# Resample from lp
self.c[n1] = sample_discrete_from_log(lp)
# Count up the number of changes in c:
# print "delta c: ", np.sum(1-(self.c==c_init))
def resample_m(self):
"""
Resample m given c and pi
"""
pi = self.pi + np.bincount(self.c, minlength=self.C)
self.m = np.random.dirichlet(pi)
class SBMGaussianWeightSharedCov(SBMGaussianWeightDistribution):
def __init__(self, N, B=1,
C=3, pi=10.0,
mu_0=None, Sigma_0=None, nu_0=None,
special_case_self_conns=True):
super(SBMGaussianWeightSharedCov, self).\
__init__(N, B=B, C=C, pi=pi,
mu_0=mu_0, Sigma_0=Sigma_0, nu_0=nu_0,
special_case_self_conns=special_case_self_conns)
if mu_0 is None:
mu_0 = np.zeros(B)
if Sigma_0 is None:
Sigma_0 = np.eye(B)
if nu_0 is None:
nu_0 = B + 2
self._cov_model = GaussianFixedMean(mu=np.zeros(B),
nu_0=nu_0, lmbda_0=Sigma_0)
self._gaussians = [[GaussianFixedCov(mu_0=mu_0, sigma_0=np.eye(B),
sigma=self._cov_model.sigma)
for _ in range(C)]
for _ in range(C)]
def resample_mu_and_Sig(self, A, W):
"""
Resample p given observations of the weights
"""
Abool = A.astype(np.bool)
for c1 in range(self.C):
for c2 in range(self.C):
mask = self._get_mask(Abool, c1, c2)
self._gaussians[c1][c2].resample(W[mask])
# Resample self connection
if self.special_case_self_conns:
mask = np.eye(self.N, dtype=np.bool) & Abool
self._self_gaussian.resample(W[mask])
# Resample covariance
A_offdiag = Abool.copy()
np.fill_diagonal(A_offdiag, False)
W_cent = (W - self.Mu)[A_offdiag]
self._cov_model.resample(W_cent)
# Update gaussians
for c1 in range(self.C):
for c2 in range(self.C):
self._gaussians[c1][c2].sigma = self._cov_model.sigma
def log_prior(self):
"""
Compute the log likelihood of a set of SBM parameters
:param x: (m,p,v) tuple
:return:
"""
from scipy.stats import dirichlet
lp = 0
# Get the log probability of the block probabilities
lp += dirichlet(self.pi).logpdf(self.m)
# Get the prior probability of the Gaussian parameters under NIW prior
# for c1 in range(self.C):
# for c2 in range(self.C):
# lp += normal_inverse_wishart_log_prob(self._gaussians[c1][c2])
#
# if self.special_case_self_conns:
# lp += normal_inverse_wishart_log_prob(self._self_gaussian)
# Get the probability of the block assignments
lp += (np.log(self.m)[self.c]).sum()
return lp
def initialize_hypers(self, W):
mu_0 = W.mean(axis=(0,1))
sigma_0 = np.diag(W.var(axis=(0,1)))
# Set the global cov
nu_0 = self._cov_model.nu_0
self._cov_model.sigma_0 = sigma_0 * (nu_0 - self.B - 1)
# Set the mean
for c1 in range(self.C):
for c2 in range(self.C):
self._gaussians[c1][c2].mu_0 = mu_0
self._gaussians[c1][c2].sigma = self._cov_model.sigma_0
self._gaussians[c1][c2].resample()
if self.special_case_self_conns:
W_self = W[np.arange(self.N), np.arange(self.N)]
self._self_gaussian.mu_0 = W_self.mean(axis=0)
self._self_gaussian.sigma_0 = np.diag(W_self.var(axis=0))
self._self_gaussian.resample()
# Cluster the neurons based on their rows and columns
from sklearn.cluster import KMeans
features = np.hstack((W[:,:,0], W[:,:,0].T))
km = KMeans(n_clusters=self.C)
km.fit(features)
self.c = km.labels_.astype(np.int)
print( "Initial c: ", self.c)
class LatentDistanceGaussianWeightDistribution(GaussianWeightDistribution, GibbsSampling):
"""
l_n ~ N(0, sigma^2 I)
W_{n', n} ~ N(A * ||l_{n'} - l_{n}||_2^2 + b, ) for n' != n
"""
def __init__(self, N, B=1, dim=2,
b=0.5,
sigma=None, Sigma_0=None, nu_0=None,
mu_self=0.0, eta=0.01):
"""
Initialize SBM with parameters defined above.
"""
super(LatentDistanceGaussianWeightDistribution, self).__init__(N)
self.B = B
self.dim = dim
self.b = b
self.eta = eta
self.L = np.sqrt(eta) * np.random.randn(N,dim)
if Sigma_0 is None:
Sigma_0 = np.eye(B)
if nu_0 is None:
nu_0 = B + 2
self.cov = GaussianFixedMean(mu=np.zeros(B), sigma=sigma, lmbda_0=Sigma_0, nu_0=nu_0)
# Special case self-weights (along the diagonal)
self._self_gaussian = Gaussian(mu_0=mu_self*np.ones(B),
sigma_0=Sigma_0,
nu_0=nu_0,
kappa_0=1.0)
@property
def D(self):
# return np.sqrt(((self.L[:, None, :] - self.L[None, :, :]) ** 2).sum(2))
return ((self.L[:, None, :] - self.L[None, :, :]) ** 2).sum(2)
@property
def Mu(self):
Mu = -self.D + self.b
Mu = np.tile(Mu[:,:,None], (1,1,self.B))
for n in range(self.N):
Mu[n,n,:] = self._self_gaussian.mu
return Mu
@property
def Sigma(self):
sig = self.cov.sigma
Sig = np.tile(sig[None,None,:,:], (self.N, self.N,1,1))
for n in range(self.N):
Sig[n,n,:,:] = self._self_gaussian.sigma
return Sig
def initialize_from_prior(self):
self.L = np.sqrt(self.eta) * np.random.randn(self.N, self.dim)
self.cov.resample()
def initialize_hypers(self, W):
# Optimize the initial locations
self._optimize_L(np.ones((self.N,self.N)), W)
def log_prior(self):
"""
Compute the prior probability of F, mu0, and lmbda
"""
from graphistician.internals.utils import \
normal_inverse_wishart_log_prob, \
inverse_wishart_log_prob
lp = 0
# Log prior of F under spherical Gaussian prior
from scipy.stats import norm, invgamma
lp += invgamma.logpdf(self.eta, 1, 1)
lp += norm.logpdf(self.b, 0, 1)
lp += norm.logpdf(self.L, 0, 1).sum()
lp += inverse_wishart_log_prob(self.cov)
lp += normal_inverse_wishart_log_prob(self._self_gaussian)
# Log prior of mu_0 and mu_self
return lp
def _hmc_log_probability(self, L, b, A, W):
"""
Compute the log probability as a function of L.
This allows us to take the gradients wrt L using autograd.
:param L:
:param A:
:return:
"""
assert self.B == 1
import autograd.numpy as anp
# Compute pairwise distance
L1 = anp.reshape(L,(self.N,1,self.dim))
L2 = anp.reshape(L,(1,self.N,self.dim))
# Mu = a * anp.sqrt(anp.sum((L1-L2)**2, axis=2)) + b
Mu = -anp.sum((L1-L2)**2, axis=2) + b
Aoff = A * (1-anp.eye(self.N))
X = (W - Mu[:,:,None]) * Aoff[:,:,None]
# Get the covariance and precision
Sig = self.cov.sigma[0,0]
Lmb = 1./Sig
lp = anp.sum(-0.5 * X**2 * Lmb)
# Log prior of L under spherical Gaussian prior
lp += -0.5 * anp.sum(L * L / self.eta)
# Log prior of mu0 under standardGaussian prior
lp += -0.5 * b ** 2
return lp
def sample_predictive_parameters(self):
Lext = \
np.vstack((self.L, np.sqrt(self.eta) * np.random.randn(1, self.dim)))
# Compute mean and covariance over extended space
D = ((Lext[:,None,:] - Lext[None,:,:])**2).sum(2)
Mu = -D + self.b
Mu_row = np.tile(Mu[-1,:][:,None], (1,self.B))
Mu_row[-1] = self._self_gaussian.mu
Mu_col = Mu_row.copy()
# Mu = np.tile(Mu[:,:,None], (1,1,self.B))
# for n in range(self.N+1):
# Mu[n,n,:] = self._self_gaussian.mu
L = np.linalg.cholesky(self.cov.sigma)
L_row = np.tile(L[None,:,:], (self.N+1, 1, 1))
L_row[-1] = np.linalg.cholesky(self._self_gaussian.sigma)
L_col = L_row.copy()
# L = np.tile(L[None,None,:,:], (self.N+1, self.N+1, 1, 1))
# for n in range(self.N+1):
# L[n,n,:,:] = np.linalg.cholesky(self._self_gaussian.sigma)
# Mu_row, Mu_col = Mu[-1,:,:], Mu[:,-1,:]
# L_row, L_col = L[-1,:,:,:], L[:,-1,:,:]
return Mu_row, Mu_col, L_row, L_col
def resample(self, A,W):
self._resample_L(A, W)
self._resample_b(A, W)
self._resample_cov(A, W)
self._resample_self_gaussian(A, W)
self._resample_eta()
# print "eta: ", self.eta, "\tb: ", self.b
def _resample_L(self, A, W):
"""
Resample the locations given A
:return:
"""
from autograd import grad
from hips.inference.hmc import hmc
lp = lambda L: self._hmc_log_probability(L, self.b, A, W)
dlp = grad(lp)
stepsz = 0.005
nsteps = 10
# lp0 = lp(self.L)
self.L = hmc(lp, dlp, stepsz, nsteps, self.L.copy(),
negative_log_prob=False)
# lpf = lp(self.L)
# print "diff lp: ", (lpf - lp0)
def _optimize_L(self, A, W):
"""
Resample the locations given A
:return:
"""
import autograd.numpy as anp
from autograd import grad
from scipy.optimize import minimize
lp = lambda Lflat: \
-self._hmc_log_probability(anp.reshape(Lflat, (self.N,2)),
self.b, A, W)
dlp = grad(lp)
res = minimize(lp, np.ravel(self.L), jac=dlp, method="bfgs")
self.L = np.reshape(res.x, (self.N,2))
# def _resample_a_and_b(self, A, W):
# """
# Resample the distance dependence parameters
# :return:
# """
# from autograd import grad
# from hips.inference.hmc import hmc
#
# lp = lambda (a,b): self._hmc_log_probability(self.L, a, b, A, W)
# dlp = grad(lp)
#
# stepsz = 0.0001
# nsteps = 10
# a,b = hmc(lp, dlp, stepsz, nsteps,
# np.array([self.a, self.b]),
# negative_log_prob=False)
# self.a, self.b = float(a), float(b)
# print "a: ", self.a, "\t b: ", self.b
def _resample_b_hmc(self, A, W):
"""
Resample the distance dependence offset
:return:
"""
# TODO: We could sample from the exact Gaussian conditional
from autograd import grad
from hips.inference.hmc import hmc
lp = lambda b: self._hmc_log_probability(self.L, b, A, W)
dlp = grad(lp)
stepsz = 0.0001
nsteps = 10
b = hmc(lp, dlp, stepsz, nsteps,
np.array(self.b),
negative_log_prob=False)
self.b = float(b)
print( "b: ", self.b)
def _resample_b(self, A, W):
"""
Resample the distance dependence offset
W ~ N(mu, sigma)
= N(-D + b, sigma)
implies
W + D ~ N(b, sigma).
If b ~ N(0, 1), we can compute the Gaussian conditional
in closed form.
"""
D = self.D
sigma = self.cov.sigma[0,0]
Aoff = (A * (1-np.eye(self.N))).astype(np.bool)
X = (W + D[:,:,None])[Aoff]
# Now X ~ N(b, sigma)
mu0, sigma0 = 0.0, 1.0
N = X.size
sigma_post = 1./(1./sigma0 + N/sigma)
mu_post = sigma_post * (mu0 / sigma0 + X.sum()/sigma)
self.b = mu_post + np.sqrt(sigma_post) * np.random.randn()
# print "b: ", self.b
def _resample_cov(self, A, W):
# Resample covariance matrix
Mu = self.Mu
mask = (True-np.eye(self.N, dtype=np.bool)) & A.astype(np.bool)
self.cov.resample(W[mask] - Mu[mask])
def _resample_self_gaussian(self, A, W):
# Resample self connection
mask = np.eye(self.N, dtype=np.bool) & A.astype(np.bool)
self._self_gaussian.resample(W[mask])
def _resample_eta(self):
"""
Resample sigma under an inverse gamma prior, sigma ~ IG(1,1)
:return:
"""
L = self.L
a_prior = 1.0
b_prior = 1.0
a_post = a_prior + L.size / 2.0
b_post = b_prior + (L**2).sum() / 2.0
from scipy.stats import invgamma
self.eta = invgamma.rvs(a=a_post, scale=b_post)
# print "eta: ", self.eta
def plot(self, A, W, ax=None, L_true=None):
"""
If D==2, plot the embedded nodes and the connections between them
:param L_true: If given, rotate the inferred features to match F_true
:return:
"""
import matplotlib.pyplot as plt
# Color the weights by the
import matplotlib.cm as cm
cmap = cm.get_cmap("RdBu")
W_lim = abs(W[:,:,0]).max()
W_rel = (W[:,:,0] - (-W_lim)) / (2*W_lim)
assert self.dim==2, "Can only plot for D==2"
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111, aspect="equal")
# If true locations are given, rotate L to match L_true
L = self.L.copy()
if L_true is not None:
from graphistician.internals.utils import compute_optimal_rotation
R = compute_optimal_rotation(L, L_true)
L = L.dot(R)
# Scatter plot the node embeddings
# Plot the edges between nodes
for n1 in range(self.N):
for n2 in range(self.N):
if A[n1,n2]:
ax.plot([L[n1,0], L[n2,0]],
[L[n1,1], L[n2,1]],
'-', color=cmap(W_rel[n1,n2]),
lw=1.0)
ax.plot(L[:,0], L[:,1], 's', color='k', markerfacecolor='k', markeredgecolor='k')
# Get extreme feature values
b = np.amax(abs(L)) + L[:].std() / 2.0
# Plot grids for origin
ax.plot([0,0], [-b,b], ':k', lw=0.5)
ax.plot([-b,b], [0,0], ':k', lw=0.5)
# Set the limits
ax.set_xlim([-b,b])
ax.set_ylim([-b,b])
# Labels
ax.set_xlabel('Latent Dimension 1')
ax.set_ylabel('Latent Dimension 2')
plt.show()
return ax | [
"mohsenhadianpour@gmail.com"
] | mohsenhadianpour@gmail.com |
17f42d961d07ae670ad5a9895a53fe22b9e5e27b | 3def27e101ca346af6b30247769719f5cd5a27c0 | /indiaos/config/docs.py | 95939bb230467499359e5175e7a089a84d88ecaa | [
"MIT"
] | permissive | anto-christo/indiaos | 9fb94527092570981288b42a05001cf33b61b522 | 4b029cf86e49dcabad852312293e6fa5116d4155 | refs/heads/master | 2020-09-13T19:50:15.187112 | 2019-11-20T08:30:13 | 2019-11-20T08:30:13 | 222,887,034 | 1 | 0 | NOASSERTION | 2019-11-20T08:24:27 | 2019-11-20T08:24:26 | null | UTF-8 | Python | false | false | 313 | py | """
Configuration for docs
"""
# source_link = "https://github.com/[org_name]/indiaos"
# docs_base_url = "https://[org_name].github.io/indiaos"
# headline = "App that does everything"
# sub_heading = "Yes, you got that right the first time, everything"
def get_context(context):
context.brand_html = "IndiaOS"
| [
"scm.mymail@gmail.com"
] | scm.mymail@gmail.com |
9ccab3e9d9e733de3424fdc912d659bc0939faca | c5a36f358c0635f218f9239ad3a7e932fa4d64cd | /google_search.py | 099fab95bc0b411c30301f8e857f91a65e6ddd0c | [] | no_license | vijama1/VoiceControlProject-1 | e0121104e6145d18f79cf0c2d3513f2b80e657a9 | 634765207752073b89624954bc7694d4ffaacea8 | refs/heads/master | 2020-07-09T13:34:08.131022 | 2018-06-03T05:47:59 | 2018-06-03T05:47:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,264 | py | # imports
import webbrowser
import tts
import SpeechRego as sr
#------------------------------------------------------------------------------------
# search gogle function
def search_google(filtered_query):
result = 0
# keywords to remove
remv_keywords = ['search','google','on','about','the','find','ok','do','a']
# final list of words to be searched
final_key=[]
# append words other than remv_keywords to final_key
for word in filtered_query:
if word in remv_keywords:
pass
else:
final_key.append(word)
# if no keyword given ask for keyword
if final_key == [] :
tts.convert_text_n_speak("What should i search")
user_input = sr.get_audio_to_text()
user_input = user_input.lower().strip().split()
result = search_google(user_input)
# to avoid ruuning 2 times
if result == 0 :
# string of search words
search_words=str()
for word in final_key:
search_words = search_words + ' ' + word
print('searching for '+search_words)
webbrowser.open_new_tab('https://www.google.com/search?q='+search_words)
tts.convert_text_n_speak("Search results ready")
result = 1
return result
| [
"kumbhaj.abhishek1289@gmail.com"
] | kumbhaj.abhishek1289@gmail.com |
3917b640dda76239b40a71a612881b23d4d36377 | 59bf003a0397e2b6db1d474a0afaa90e52fb8ec9 | /problems/lc_211.py | 45f2ff7c58a2dbbf989703bbf647f32c5d65253f | [] | no_license | 224nth/leetcode | cd3c43a6173196628e1495e13776cd7c2a0a52f0 | 63bd7c91ef747ca553e374422aa96df14329702d | refs/heads/master | 2022-07-15T03:31:34.704770 | 2020-05-21T01:28:34 | 2020-05-21T01:28:34 | 259,326,892 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,501 | py | import string
class Node:
c = None
children = [None for _ in range(26)]
end = False
def __init__(self, c, end = False):
self.c = c
self.end = end
def add_children(self):
self.children = [Node(c) for c in string.ascii_lowercase]
class WordDictionary:
def __init__(self):
"""
Initialize your data structure here.
"""
self.start = Node('-1')
def addWord(self, word: str) -> None:
"""
Adds a word into the data structure.
"""
temp = self.start
for i in range(len(word)):
if temp.children[ord(word[i])-ord('a')] is None:
temp.add_children()
temp = temp.children[ord(word[i])-ord('a')]
if i+1 == len(word):
temp.end = True
def search(self, word: str) -> bool:
"""
Returns if the word is in the data structure. A word could contain the dot character '.' to represent any one letter.
"""
temp = self.start
for i in range(len(word)):
if temp.children[ord(word[i]) - ord('a')] is None:
return False
temp = temp.children[ord(word[i])-ord('a')]
if i+1 == len(word) and temp.end == True:
return True
return False
d = WordDictionary()
d.addWord("bad")
d.addWord("dad")
d.addWord("mad")
print(d.search("pad"))
print(d.search("bad"))
print(d.search(".add")) | [
"nthapa@patreon.com"
] | nthapa@patreon.com |
01045278e80a129bf605929c5312c1b151934d0d | c2dccc91370a52570ed294af4b864e0459ec713c | /loaddata.py | 117025eb85ab92fcbfee390e42af050fe5dba779 | [] | no_license | gaozhengjie/teaching-document-management-system | 9d49abec512d04937696bb42ad6aab99d8c9dc1e | 927b7d3741d3faafe9a27b189757fd0c0dd40afb | refs/heads/master | 2020-04-30T22:01:40.858749 | 2019-03-24T13:03:24 | 2019-03-24T13:03:24 | 177,107,661 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,121 | py | import pymysql
def loadDataSet():
host = "localhost"
user = "root"
passwd = "1q2w3e"
database = "file_manage"
charset = 'utf8'
use_unicode = True
# 打开数据库连接
db_conn = pymysql.connect(host=host, user=user, passwd=passwd, db=database, charset=charset, use_unicode=use_unicode)
# 使用 cursor() 方法创建一个游标对象 cursor
cursor = db_conn.cursor()
cursor.execute("SELECT user_id FROM file_manage_app_user_item GROUP BY user_id;")
user_id_list = cursor.fetchall()
data = []
for user_id in user_id_list:
# print(news_id[0])
cursor.execute("SELECT file_id FROM file_manage_app_user_item WHERE user_id=%s;", user_id[0])
file_id_list = cursor.fetchall()
file = []
for each_file in file_id_list:
file.append(each_file[0])
data.append(file)
cursor.execute("SELECT file_id FROM file_manage_app_user_item GROUP BY file_id;")
file_id_list = cursor.fetchall()
candidates = []
for each_file in file_id_list:
candidates.append(each_file[0])
db_conn.close() # 关闭数据库连接
return data, candidates
| [
"noreply@github.com"
] | noreply@github.com |
fe0b5b9dd5b24d4e2028f101536f601fef3323df | 63be9d3c74144bb4f575bbc3eae65bdd92b51dab | /NeRFs/TorsoNeRF/run_nerf.py | bd991db35882a9705577db8d73aa4c005c7a0d82 | [] | no_license | bruinxiong/AD-NeRF | c2e059b83e342d1a34394d8373e0a4a3c1968f24 | a1dc83f27b6f757069ec27dbd81851a3f027d8d8 | refs/heads/master | 2023-06-30T10:31:31.724279 | 2021-07-23T10:14:24 | 2021-07-23T10:14:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 48,138 | py | from load_audface import load_audface_data, load_test_data
import os
import sys
import numpy as np
import imageio
import json
import random
import time
import torch
import torch.nn as nn
import torch.nn.functional as F
from tqdm import tqdm, trange
from natsort import natsorted
import cv2
from run_nerf_helpers import *
device = torch.device('cuda', 0)
device_torso = torch.device('cuda', 0)
np.random.seed(0)
DEBUG = False
def rot_to_euler(R):
batch_size, _, _ = R.shape
e = torch.ones((batch_size, 3)).cuda()
R00 = R[:, 0, 0]
R01 = R[:, 0, 1]
R02 = R[:, 0, 2]
R10 = R[:, 1, 0]
R11 = R[:, 1, 1]
R12 = R[:, 1, 2]
R20 = R[:, 2, 0]
R21 = R[:, 2, 1]
R22 = R[:, 2, 2]
e[:, 2] = torch.atan2(R00, -R01)
e[:, 1] = torch.asin(-R02)
e[:, 0] = torch.atan2(R22, R12)
return e
def pose_to_euler_trans(poses):
e = rot_to_euler(poses)
t = poses[:, :3, 3]
return torch.cat((e, t), dim=1)
def euler2rot(euler_angle):
batch_size = euler_angle.shape[0]
theta = euler_angle[:, 0].reshape(-1, 1, 1)
phi = euler_angle[:, 1].reshape(-1, 1, 1)
psi = euler_angle[:, 2].reshape(-1, 1, 1)
one = torch.ones((batch_size, 1, 1), dtype=torch.float32,
device=euler_angle.device)
zero = torch.zeros((batch_size, 1, 1), dtype=torch.float32,
device=euler_angle.device)
rot_x = torch.cat((
torch.cat((one, zero, zero), 1),
torch.cat((zero, theta.cos(), theta.sin()), 1),
torch.cat((zero, -theta.sin(), theta.cos()), 1),
), 2)
rot_y = torch.cat((
torch.cat((phi.cos(), zero, -phi.sin()), 1),
torch.cat((zero, one, zero), 1),
torch.cat((phi.sin(), zero, phi.cos()), 1),
), 2)
rot_z = torch.cat((
torch.cat((psi.cos(), -psi.sin(), zero), 1),
torch.cat((psi.sin(), psi.cos(), zero), 1),
torch.cat((zero, zero, one), 1)
), 2)
return torch.bmm(rot_x, torch.bmm(rot_y, rot_z))
def batchify(fn, chunk):
"""Constructs a version of 'fn' that applies to smaller batches.
"""
if chunk is None:
return fn
def ret(inputs):
return torch.cat([fn(inputs[i:i+chunk]) for i in range(0, inputs.shape[0], chunk)], 0)
return ret
def run_network(inputs, viewdirs, aud_para, fn, embed_fn, embeddirs_fn, netchunk=1024*64):
"""Prepares inputs and applies network 'fn'.
"""
inputs_flat = torch.reshape(inputs, [-1, inputs.shape[-1]])
embedded = embed_fn(inputs_flat)
aud = aud_para.unsqueeze(0).expand(inputs_flat.shape[0], -1)
embedded = torch.cat((embedded, aud), -1)
if viewdirs is not None:
input_dirs = viewdirs[:, None].expand(inputs.shape)
input_dirs_flat = torch.reshape(input_dirs, [-1, input_dirs.shape[-1]])
embedded_dirs = embeddirs_fn(input_dirs_flat)
embedded = torch.cat([embedded, embedded_dirs], -1)
outputs_flat = batchify(fn, netchunk)(embedded)
outputs = torch.reshape(outputs_flat, list(
inputs.shape[:-1]) + [outputs_flat.shape[-1]])
return outputs
def batchify_rays(rays_flat, bc_rgb, aud_para, chunk=1024*32, **kwargs):
"""Render rays in smaller minibatches to avoid OOM.
"""
all_ret = {}
for i in range(0, rays_flat.shape[0], chunk):
ret = render_rays(rays_flat[i:i+chunk], bc_rgb[i:i+chunk],
aud_para, **kwargs)
for k in ret:
if k not in all_ret:
all_ret[k] = []
all_ret[k].append(ret[k])
all_ret = {k: torch.cat(all_ret[k], 0) for k in all_ret}
return all_ret
def render_dynamic_face(H, W, focal, cx, cy, chunk=1024*32, rays=None, bc_rgb=None, aud_para=None,
c2w=None, ndc=True, near=0., far=1.,
use_viewdirs=False, c2w_staticcam=None,
**kwargs):
if c2w is not None:
# special case to render full image
rays_o, rays_d = get_rays(H, W, focal, c2w, cx, cy, c2w.device)
bc_rgb = bc_rgb.reshape(-1, 3)
else:
# use provided ray batch
rays_o, rays_d = rays
if use_viewdirs:
# provide ray directions as input
viewdirs = rays_d
if c2w_staticcam is not None:
# special case to visualize effect of viewdirs
rays_o, rays_d = get_rays(H, W, focal, c2w_staticcam, cx, cy)
viewdirs = viewdirs / torch.norm(viewdirs, dim=-1, keepdim=True)
viewdirs = torch.reshape(viewdirs, [-1, 3]).float()
sh = rays_d.shape # [..., 3]
if ndc:
# for forward facing scenes
rays_o, rays_d = ndc_rays(H, W, focal, 1., rays_o, rays_d)
# Create ray batch
rays_o = torch.reshape(rays_o, [-1, 3]).float()
rays_d = torch.reshape(rays_d, [-1, 3]).float()
near, far = near * \
torch.ones_like(rays_d[..., :1]), far * \
torch.ones_like(rays_d[..., :1])
rays = torch.cat([rays_o, rays_d, near, far], -1)
if use_viewdirs:
rays = torch.cat([rays, viewdirs], -1)
# Render and reshape
all_ret = batchify_rays(rays, bc_rgb, aud_para, chunk, **kwargs)
for k in all_ret:
k_sh = list(sh[:-1]) + list(all_ret[k].shape[1:])
all_ret[k] = torch.reshape(all_ret[k], k_sh)
k_extract = ['rgb_map', 'disp_map', 'acc_map', 'last_weight', 'rgb_map_fg']
ret_list = [all_ret[k] for k in k_extract]
ret_dict = {k: all_ret[k] for k in all_ret if k not in k_extract}
return ret_list + [ret_dict]
def render(H, W, focal, cx, cy, chunk=1024*32, rays=None, c2w=None, ndc=True,
near=0., far=1.,
use_viewdirs=False, c2w_staticcam=None,
**kwargs):
"""Render rays
Args:
H: int. Height of image in pixels.
W: int. Width of image in pixels.
focal: float. Focal length of pinhole camera.
chunk: int. Maximum number of rays to process simultaneously. Used to
control maximum memory usage. Does not affect final results.
rays: array of shape [2, batch_size, 3]. Ray origin and direction for
each example in batch.
c2w: array of shape [3, 4]. Camera-to-world transformation matrix.
ndc: bool. If True, represent ray origin, direction in NDC coordinates.
near: float or array of shape [batch_size]. Nearest distance for a ray.
far: float or array of shape [batch_size]. Farthest distance for a ray.
use_viewdirs: bool. If True, use viewing direction of a point in space in model.
c2w_staticcam: array of shape [3, 4]. If not None, use this transformation matrix for
camera while using other c2w argument for viewing directions.
Returns:
rgb_map: [batch_size, 3]. Predicted RGB values for rays.
disp_map: [batch_size]. Disparity map. Inverse of depth.
acc_map: [batch_size]. Accumulated opacity (alpha) along a ray.
extras: dict with everything returned by render_rays().
"""
if c2w is not None:
# special case to render full image
rays_o, rays_d = get_rays(H, W, focal, c2w, cx, cy)
else:
# use provided ray batch
rays_o, rays_d = rays
if use_viewdirs:
# provide ray directions as input
viewdirs = rays_d
if c2w_staticcam is not None:
# special case to visualize effect of viewdirs
rays_o, rays_d = get_rays(H, W, focal, c2w_staticcam, cx, cy)
viewdirs = viewdirs / torch.norm(viewdirs, dim=-1, keepdim=True)
viewdirs = torch.reshape(viewdirs, [-1, 3]).float()
sh = rays_d.shape # [..., 3]
if ndc:
# for forward facing scenes
rays_o, rays_d = ndc_rays(H, W, focal, 1., rays_o, rays_d)
# Create ray batch
rays_o = torch.reshape(rays_o, [-1, 3]).float()
rays_d = torch.reshape(rays_d, [-1, 3]).float()
near, far = near * \
torch.ones_like(rays_d[..., :1]), far * \
torch.ones_like(rays_d[..., :1])
rays = torch.cat([rays_o, rays_d, near, far], -1)
if use_viewdirs:
rays = torch.cat([rays, viewdirs], -1)
# Render and reshape
all_ret = batchify_rays(rays, chunk, **kwargs)
for k in all_ret:
k_sh = list(sh[:-1]) + list(all_ret[k].shape[1:])
all_ret[k] = torch.reshape(all_ret[k], k_sh)
k_extract = ['rgb_map', 'disp_map', 'acc_map']
ret_list = [all_ret[k] for k in k_extract]
ret_dict = {k: all_ret[k] for k in all_ret if k not in k_extract}
return ret_list + [ret_dict]
def render_path(render_poses, aud_paras, bc_img, hwfcxy,
chunk, render_kwargs, gt_imgs=None, savedir=None, render_factor=0):
H, W, focal, cx, cy = hwfcxy
if render_factor != 0:
# Render downsampled for speed
H = H//render_factor
W = W//render_factor
focal = focal/render_factor
rgbs = []
disps = []
last_weights = []
rgb_fgs = []
t = time.time()
for i, c2w in enumerate(tqdm(render_poses)):
print(i, time.time() - t)
t = time.time()
rgb, disp, acc, last_weight, rgb_fg, _ = render_dynamic_face(
H, W, focal, cx, cy, chunk=chunk, c2w=c2w[:3,
:4], aud_para=aud_paras[i], bc_rgb=bc_img,
**render_kwargs)
rgbs.append(rgb.cpu().numpy())
disps.append(disp.cpu().numpy())
last_weights.append(last_weight.cpu().numpy())
rgb_fgs.append(rgb_fg.cpu().numpy())
# if i == 0:
# print(rgb.shape, disp.shape)
"""
if gt_imgs is not None and render_factor==0:
p = -10. * np.log10(np.mean(np.square(rgb.cpu().numpy() - gt_imgs[i])))
print(p)
"""
if savedir is not None:
rgb8 = to8b(rgbs[-1])
filename = os.path.join(savedir, '{:03d}.png'.format(i))
imageio.imwrite(filename, rgb8)
rgbs = np.stack(rgbs, 0)
disps = np.stack(disps, 0)
last_weights = np.stack(last_weights, 0)
rgb_fgs = np.stack(rgb_fgs, 0)
return rgbs, disps, last_weights, rgb_fgs
def create_nerf(args, ext, dim_aud, device_spec=torch.device('cuda', 0), with_audatt=False):
"""Instantiate NeRF's MLP model.
"""
embed_fn, input_ch = get_embedder(
args.multires, args.i_embed, device=device_spec)
input_ch_views = 0
embeddirs_fn = None
if args.use_viewdirs:
embeddirs_fn, input_ch_views = get_embedder(
args.multires_views, args.i_embed, device=device_spec)
output_ch = 5 if args.N_importance > 0 else 4
skips = [4]
model = FaceNeRF(D=args.netdepth, W=args.netwidth,
input_ch=input_ch, dim_aud=dim_aud,
output_ch=output_ch, skips=skips,
input_ch_views=input_ch_views, use_viewdirs=args.use_viewdirs).to(device_spec)
grad_vars = list(model.parameters())
model_fine = None
if args.N_importance > 0:
model_fine = FaceNeRF(D=args.netdepth_fine, W=args.netwidth_fine,
input_ch=input_ch, dim_aud=dim_aud,
output_ch=output_ch, skips=skips,
input_ch_views=input_ch_views, use_viewdirs=args.use_viewdirs).to(device_spec)
grad_vars += list(model_fine.parameters())
def network_query_fn(inputs, viewdirs, aud_para, network_fn): \
return run_network(inputs, viewdirs, aud_para, network_fn,
embed_fn=embed_fn, embeddirs_fn=embeddirs_fn, netchunk=args.netchunk)
# Create optimizer
optimizer = torch.optim.Adam(
params=grad_vars, lr=args.lrate, betas=(0.9, 0.999))
start = 0
basedir = args.basedir
expname = args.expname
##########################
# Load checkpoints
if args.ft_path is not None and args.ft_path != 'None':
ckpts = [args.ft_path]
else:
ckpts = [os.path.join(basedir, expname, f) for f in natsorted(
os.listdir(os.path.join(basedir, expname))) if ext in f]
print('Found ckpts', ckpts)
learned_codes_dict = None
AudNet_state = None
optimizer_aud_state = None
AudAttNet_state = None
if len(ckpts) > 0 and not args.no_reload:
ckpt_path = ckpts[-1]
print('Reloading from', ckpt_path)
ckpt = torch.load(ckpt_path, map_location=device)
start = ckpt['global_step']
optimizer.load_state_dict(ckpt['optimizer_state_dict'])
AudNet_state = ckpt['network_audnet_state_dict']
optimizer_aud_state = ckpt['optimizer_aud_state_dict']
if with_audatt:
AudAttNet_state = ckpt['network_audattnet_state_dict']
# Load model
model.load_state_dict(ckpt['network_fn_state_dict'])
if model_fine is not None:
model_fine.load_state_dict(ckpt['network_fine_state_dict'])
##########################
render_kwargs_train = {
'network_query_fn': network_query_fn,
'perturb': args.perturb,
'N_importance': args.N_importance,
'network_fine': model_fine,
'N_samples': args.N_samples,
'network_fn': model,
'use_viewdirs': args.use_viewdirs,
'white_bkgd': args.white_bkgd,
'raw_noise_std': args.raw_noise_std,
}
# NDC only good for LLFF-style forward facing data
if args.dataset_type != 'llff' or args.no_ndc:
print('Not ndc!')
render_kwargs_train['ndc'] = False
render_kwargs_train['lindisp'] = args.lindisp
render_kwargs_test = {
k: render_kwargs_train[k] for k in render_kwargs_train}
render_kwargs_test['perturb'] = False
render_kwargs_test['raw_noise_std'] = 0.
return render_kwargs_train, render_kwargs_test, start, grad_vars, optimizer, learned_codes_dict, \
AudNet_state, optimizer_aud_state, AudAttNet_state
def raw2outputs(raw, z_vals, rays_d, bc_rgb, raw_noise_std=0, white_bkgd=False, pytest=False):
"""Transforms model's predictions to semantically meaningful values.
Args:
raw: [num_rays, num_samples along ray, 4]. Prediction from model.
z_vals: [num_rays, num_samples along ray]. Integration time.
rays_d: [num_rays, 3]. Direction of each ray.
Returns:
rgb_map: [num_rays, 3]. Estimated RGB color of a ray.
disp_map: [num_rays]. Disparity map. Inverse of depth map.
acc_map: [num_rays]. Sum of weights along each ray.
weights: [num_rays, num_samples]. Weights assigned to each sampled color.
depth_map: [num_rays]. Estimated distance to object.
"""
def raw2alpha(raw, dists, act_fn=F.relu): return 1. - \
torch.exp(-(act_fn(raw)+1e-6)*dists)
dists = z_vals[..., 1:] - z_vals[..., :-1]
dists = torch.cat([dists, torch.Tensor([1e10], device=z_vals.device).expand(
dists[..., :1].shape)], -1) # [N_rays, N_samples]
dists = dists * torch.norm(rays_d[..., None, :], dim=-1)
rgb = torch.sigmoid(raw[..., :3]) # [N_rays, N_samples, 3]
rgb = torch.cat((rgb[:, :-1, :], bc_rgb.unsqueeze(1)), dim=1)
noise = 0.
if raw_noise_std > 0.:
noise = torch.randn(raw[..., 3].shape) * raw_noise_std
# Overwrite randomly sampled data if pytest
if pytest:
np.random.seed(0)
noise = np.random.rand(*list(raw[..., 3].shape)) * raw_noise_std
noise = torch.Tensor(noise)
alpha = raw2alpha(raw[..., 3] + noise, dists) # [N_rays, N_samples]
weights = alpha * \
torch.cumprod(
torch.cat([torch.ones((alpha.shape[0], 1), device=alpha.device), 1.-alpha + 1e-10], -1), -1)[:, :-1]
rgb_map = torch.sum(weights[..., None] * rgb, -2) # [N_rays, 3]
rgb_map_fg = torch.sum(weights[:, :-1, None]*rgb[:, :-1, :], -2)
depth_map = torch.sum(weights * z_vals, -1)
disp_map = 1./torch.max(1e-10 * torch.ones_like(depth_map),
depth_map / torch.sum(weights, -1))
acc_map = torch.sum(weights, -1)
if white_bkgd:
rgb_map = rgb_map + (1.-acc_map[..., None])
return rgb_map, disp_map, acc_map, weights, depth_map, rgb_map_fg
def render_rays(ray_batch,
bc_rgb,
aud_para,
network_fn,
network_query_fn,
N_samples,
retraw=False,
lindisp=False,
perturb=0.,
N_importance=0,
network_fine=None,
white_bkgd=False,
raw_noise_std=0.,
verbose=False,
pytest=False):
"""Volumetric rendering.
Args:
ray_batch: array of shape [batch_size, ...]. All information necessary
for sampling along a ray, including: ray origin, ray direction, min
dist, max dist, and unit-magnitude viewing direction.
network_fn: function. Model for predicting RGB and density at each point
in space.
network_query_fn: function used for passing queries to network_fn.
N_samples: int. Number of different times to sample along each ray.
retraw: bool. If True, include model's raw, unprocessed predictions.
lindisp: bool. If True, sample linearly in inverse depth rather than in depth.
perturb: float, 0 or 1. If non-zero, each ray is sampled at stratified
random points in time.
N_importance: int. Number of additional times to sample along each ray.
These samples are only passed to network_fine.
network_fine: "fine" network with same spec as network_fn.
white_bkgd: bool. If True, assume a white background.
raw_noise_std: ...
verbose: bool. If True, print more debugging info.
Returns:
rgb_map: [num_rays, 3]. Estimated RGB color of a ray. Comes from fine model.
disp_map: [num_rays]. Disparity map. 1 / depth.
acc_map: [num_rays]. Accumulated opacity along each ray. Comes from fine model.
raw: [num_rays, num_samples, 4]. Raw predictions from model.
rgb0: See rgb_map. Output for coarse model.
disp0: See disp_map. Output for coarse model.
acc0: See acc_map. Output for coarse model.
z_std: [num_rays]. Standard deviation of distances along ray for each
sample.
"""
N_rays = ray_batch.shape[0]
rays_o, rays_d = ray_batch[:, 0:3], ray_batch[:, 3:6] # [N_rays, 3] each
viewdirs = ray_batch[:, -3:] if ray_batch.shape[-1] > 8 else None
bounds = torch.reshape(ray_batch[..., 6:8], [-1, 1, 2])
near, far = bounds[..., 0], bounds[..., 1] # [-1,1]
t_vals = torch.linspace(0., 1., steps=N_samples, device=rays_o.device)
if not lindisp:
z_vals = near * (1.-t_vals) + far * (t_vals)
else:
z_vals = 1./(1./near * (1.-t_vals) + 1./far * (t_vals))
z_vals = z_vals.expand([N_rays, N_samples])
if perturb > 0.:
# get intervals between samples
mids = .5 * (z_vals[..., 1:] + z_vals[..., :-1])
upper = torch.cat([mids, z_vals[..., -1:]], -1)
lower = torch.cat([z_vals[..., :1], mids], -1)
# stratified samples in those intervals
t_rand = torch.rand(z_vals.shape, device=rays_o.device)
# Pytest, overwrite u with numpy's fixed random numbers
if pytest:
np.random.seed(0)
t_rand = np.random.rand(*list(z_vals.shape))
t_rand = torch.Tensor(t_rand).to(rays_o.device)
t_rand[..., -1] = 1.0
z_vals = lower + (upper - lower) * t_rand
pts = rays_o[..., None, :] + rays_d[..., None, :] * \
z_vals[..., :, None] # [N_rays, N_samples, 3]
# raw = run_network(pts)
raw = network_query_fn(pts, viewdirs, aud_para, network_fn)
rgb_map, disp_map, acc_map, weights, depth_map, rgb_map_fg = raw2outputs(
raw, z_vals, rays_d, bc_rgb, raw_noise_std, white_bkgd, pytest=pytest)
if N_importance > 0:
rgb_map_0, disp_map_0, acc_map_0, last_weight_0, rgb_map_fg_0 = \
rgb_map, disp_map, acc_map, weights[..., -1], rgb_map_fg
z_vals_mid = .5 * (z_vals[..., 1:] + z_vals[..., :-1])
z_samples = sample_pdf(
z_vals_mid, weights[..., 1:-1], N_importance, det=(perturb == 0.), pytest=pytest)
z_samples = z_samples.detach()
z_vals, _ = torch.sort(torch.cat([z_vals, z_samples], -1), -1)
pts = rays_o[..., None, :] + rays_d[..., None, :] * \
z_vals[..., :, None]
run_fn = network_fn if network_fine is None else network_fine
raw = network_query_fn(pts, viewdirs, aud_para, run_fn)
rgb_map, disp_map, acc_map, weights, depth_map, rgb_map_fg = raw2outputs(
raw, z_vals, rays_d, bc_rgb, raw_noise_std, white_bkgd, pytest=pytest)
ret = {'rgb_map': rgb_map, 'disp_map': disp_map,
'acc_map': acc_map, 'rgb_map_fg': rgb_map_fg}
if retraw:
ret['raw'] = raw
if N_importance > 0:
ret['rgb0'] = rgb_map_0
ret['disp0'] = disp_map_0
ret['acc0'] = acc_map_0
ret['z_std'] = torch.std(z_samples, dim=-1, unbiased=False) # [N_rays]
ret['last_weight'] = weights[..., -1]
ret['last_weight0'] = last_weight_0
ret['rgb_map_fg0'] = rgb_map_fg_0
for k in ret:
if (torch.isnan(ret[k]).any() or torch.isinf(ret[k]).any()) and DEBUG:
print(f"! [Numerical Error] {k} contains nan or inf.")
return ret
def config_parser():
import configargparse
parser = configargparse.ArgumentParser()
parser.add_argument('--config', is_config_file=True,
help='config file path')
parser.add_argument("--expname", type=str,
help='experiment name')
parser.add_argument("--basedir", type=str, default='./logs/',
help='where to store ckpts and logs')
parser.add_argument("--datadir", type=str, default='./data/llff/fern',
help='input data directory')
# training options
parser.add_argument("--netdepth", type=int, default=8,
help='layers in network')
parser.add_argument("--netwidth", type=int, default=256,
help='channels per layer')
parser.add_argument("--netdepth_fine", type=int, default=8,
help='layers in fine network')
parser.add_argument("--netwidth_fine", type=int, default=256,
help='channels per layer in fine network')
parser.add_argument("--N_rand", type=int, default=1024,
help='batch size (number of random rays per gradient step)')
parser.add_argument("--lrate", type=float, default=5e-4,
help='learning rate')
parser.add_argument("--lrate_decay", type=int, default=500,
help='exponential learning rate decay (in 1000 steps)')
parser.add_argument("--chunk", type=int, default=1024,
help='number of rays processed in parallel, decrease if running out of memory')
parser.add_argument("--netchunk", type=int, default=1024*64,
help='number of pts sent through network in parallel, decrease if running out of memory')
parser.add_argument("--no_batching", action='store_false',
help='only take random rays from 1 image at a time')
parser.add_argument("--no_reload", action='store_true',
help='do not reload weights from saved ckpt')
parser.add_argument("--ft_path", type=str, default=None,
help='specific weights npy file to reload for coarse network')
parser.add_argument("--N_iters", type=int, default=400000,
help='number of iterations')
# rendering options
parser.add_argument("--N_samples", type=int, default=64,
help='number of coarse samples per ray')
parser.add_argument("--N_importance", type=int, default=128,
help='number of additional fine samples per ray')
parser.add_argument("--perturb", type=float, default=1.,
help='set to 0. for no jitter, 1. for jitter')
parser.add_argument("--use_viewdirs", action='store_false',
help='use full 5D input instead of 3D')
parser.add_argument("--i_embed", type=int, default=0,
help='set 0 for default positional encoding, -1 for none')
parser.add_argument("--multires", type=int, default=10,
help='log2 of max freq for positional encoding (3D location)')
parser.add_argument("--multires_views", type=int, default=4,
help='log2 of max freq for positional encoding (2D direction)')
parser.add_argument("--raw_noise_std", type=float, default=0.,
help='std dev of noise added to regularize sigma_a output, 1e0 recommended')
parser.add_argument("--render_only", action='store_true',
help='do not optimize, reload weights and render out render_poses path')
parser.add_argument("--render_test", action='store_true',
help='render the test set instead of render_poses path')
parser.add_argument("--render_factor", type=int, default=0,
help='downsampling factor to speed up rendering, set 4 or 8 for fast preview')
# training options
parser.add_argument("--precrop_iters", type=int, default=0,
help='number of steps to train on central crops')
parser.add_argument("--precrop_frac", type=float,
default=.5, help='fraction of img taken for central crops')
# dataset options
parser.add_argument("--dataset_type", type=str, default='audface',
help='options: llff / blender / deepvoxels')
parser.add_argument("--testskip", type=int, default=1,
help='will load 1/N images from test/val sets, useful for large datasets like deepvoxels')
# deepvoxels flags
parser.add_argument("--shape", type=str, default='greek',
help='options : armchair / cube / greek / vase')
# blender flags
parser.add_argument("--white_bkgd", action='store_false',
help='set to render synthetic data on a white bkgd (always use for dvoxels)')
parser.add_argument("--half_res", action='store_true',
help='load blender synthetic data at 400x400 instead of 800x800')
# face flags
parser.add_argument("--with_test", type=int, default=0,
help='whether to test')
parser.add_argument("--dim_aud", type=int, default=64,
help='dimension of audio features for NeRF')
parser.add_argument("--dim_aud_body", type=int, default=64,
help='dimension of audio features for NeRF')
parser.add_argument("--sample_rate", type=float, default=0.95,
help="sample rate in a bounding box")
parser.add_argument("--near", type=float, default=0.3,
help="near sampling plane")
parser.add_argument("--far", type=float, default=0.9,
help="far sampling plane")
parser.add_argument("--test_pose_file", type=str, default='transforms_train.json',
help='test pose file')
parser.add_argument("--aud_file", type=str, default='aud.npy',
help='test audio deepspeech file')
parser.add_argument("--win_size", type=int, default=16,
help="windows size of audio feature")
parser.add_argument("--smo_size", type=int, default=8,
help="window size for smoothing audio features")
parser.add_argument('--test_size', type=int, default=-1,
help='test size')
parser.add_argument('--aud_start', type=int, default=0,
help='test audio start pos')
parser.add_argument('--test_save_folder', type=str, default='test_aud_rst',
help='folder to store test result')
# llff flags
parser.add_argument("--factor", type=int, default=8,
help='downsample factor for LLFF images')
parser.add_argument("--no_ndc", action='store_true',
help='do not use normalized device coordinates (set for non-forward facing scenes)')
parser.add_argument("--lindisp", action='store_true',
help='sampling linearly in disparity rather than depth')
parser.add_argument("--spherify", action='store_true',
help='set for spherical 360 scenes')
parser.add_argument("--llffhold", type=int, default=8,
help='will take every 1/N images as LLFF test set, paper uses 8')
# logging/saving options
parser.add_argument("--i_print", type=int, default=100,
help='frequency of console printout and metric loggin')
parser.add_argument("--i_img", type=int, default=500,
help='frequency of tensorboard image logging')
parser.add_argument("--i_weights", type=int, default=10000,
help='frequency of weight ckpt saving')
parser.add_argument("--i_testset", type=int, default=10000,
help='frequency of testset saving')
parser.add_argument("--i_video", type=int, default=50000,
help='frequency of render_poses video saving')
return parser
def train():
parser = config_parser()
args = parser.parse_args()
# Load data
if args.with_test == 1:
poses, auds, bc_img, hwfcxy, aud_ids, torso_pose = \
load_test_data(args.datadir, args.aud_file,
args.test_pose_file, args.testskip, args.test_size, args.aud_start)
torso_pose = torch.as_tensor(torso_pose).to(device_torso).float()
com_images = np.zeros(1)
else:
com_images, poses, auds, bc_img, hwfcxy, sample_rects, \
i_split = load_audface_data(args.datadir, args.testskip)
if args.with_test == 0:
i_train, i_val = i_split
near = args.near
far = args.far
# Cast intrinsics to right types
H, W, focal, cx, cy = hwfcxy
H, W = int(H), int(W)
hwf = [H, W, focal]
hwfcxy = [H, W, focal, cx, cy]
# Create log dir and copy the config file
basedir = args.basedir
expname = args.expname
os.makedirs(os.path.join(basedir, expname), exist_ok=True)
f = os.path.join(basedir, expname, 'args.txt')
with open(f, 'w') as file:
for arg in sorted(vars(args)):
attr = getattr(args, arg)
file.write('{} = {}\n'.format(arg, attr))
if args.config is not None:
f = os.path.join(basedir, expname, 'config.txt')
with open(f, 'w') as file:
file.write(open(args.config, 'r').read())
# Create nerf model
render_kwargs_train, render_kwargs_test, start, grad_vars, optimizer, \
learned_codes, AudNet_state, optimizer_aud_state, AudAttNet_state = create_nerf(
args, 'head.tar', args.dim_aud, device, True)
global_step = start
AudNet = AudioNet(args.dim_aud, args.win_size).to(device)
AudAttNet = AudioAttNet().to(device)
optimizer_Aud = torch.optim.Adam(
params=list(AudNet.parameters()), lr=args.lrate, betas=(0.9, 0.999))
if AudNet_state is not None:
AudNet.load_state_dict(AudNet_state)
if AudAttNet_state is not None:
print('load audattnet')
AudAttNet.load_state_dict(AudAttNet_state)
if optimizer_aud_state is not None:
optimizer_Aud.load_state_dict(optimizer_aud_state)
bds_dict = {
'near': near,
'far': far,
}
render_kwargs_train.update(bds_dict)
render_kwargs_test.update(bds_dict)
# Move training data to GPU
bc_img = torch.Tensor(bc_img).to(device).float()/255.0
poses = torch.Tensor(poses).to(device).float()
auds = torch.Tensor(auds).to(device).float()
num_frames = com_images.shape[0]
embed_fn, input_ch = get_embedder(3, 0)
dim_torso_signal = args.dim_aud_body + 2*input_ch
# Create torso nerf model
render_kwargs_train_torso, render_kwargs_test_torso, start, grad_vars_torso, optimizer_torso, \
learned_codes_torso, AudNet_state_torso, optimizer_aud_state_torso, _ = create_nerf(
args, 'body.tar', dim_torso_signal, device_torso)
global_step = start
AudNet_torso = AudioNet(args.dim_aud_body, args.win_size).to(device_torso)
optimizer_Aud_torso = torch.optim.Adam(
params=list(AudNet_torso.parameters()), lr=args.lrate, betas=(0.9, 0.999))
if AudNet_state_torso is not None:
AudNet_torso.load_state_dict(AudNet_state_torso)
if optimizer_aud_state_torso is not None:
optimizer_Aud_torso.load_state_dict(optimizer_aud_state_torso)
bds_dict = {
'near': near,
'far': far,
}
render_kwargs_train_torso.update(bds_dict)
render_kwargs_test_torso.update(bds_dict)
if args.with_test:
print('RENDER ONLY')
with torch.no_grad():
testsavedir = os.path.join(basedir, expname, args.test_save_folder)
os.makedirs(testsavedir, exist_ok=True)
print('test poses shape', poses.shape)
smo_half_win = int(args.smo_size / 2)
auds_val = []
for i in range(poses.shape[0]):
left_i = i - smo_half_win
right_i = i + smo_half_win
pad_left, pad_right = 0, 0
if left_i < 0:
pad_left = -left_i
left_i = 0
if right_i > poses.shape[0]:
pad_right = right_i - poses.shape[0]
right_i = poses.shape[0]
auds_win = auds[left_i:right_i]
if pad_left > 0:
auds_win = torch.cat(
(torch.zeros_like(auds_win)[:pad_left], auds_win), dim=0)
if pad_right > 0:
auds_win = torch.cat(
(auds_win, torch.zeros_like(auds_win)[:pad_right]), dim=0)
auds_win = AudNet(auds_win)
aud_smo = AudAttNet(auds_win)
auds_val.append(aud_smo)
auds_val = torch.stack(auds_val, 0)
adjust_poses = poses.clone()
adjust_poses_torso = poses.clone()
et = pose_to_euler_trans(adjust_poses_torso)
embed_et = torch.cat(
(embed_fn(et[:, :3]), embed_fn(et[:, 3:])), dim=-1).to(device_torso)
signal = torch.cat((auds_val[..., :args.dim_aud_body].to(
device_torso), embed_et.squeeze()), dim=-1)
t_start = time.time()
vid_out = cv2.VideoWriter(os.path.join(testsavedir, 'result.avi'),
cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), 25, (W, H))
for j in range(poses.shape[0]):
rgbs, disps, last_weights, rgb_fgs = \
render_path(adjust_poses[j:j+1], auds_val[j:j+1],
bc_img, hwfcxy, args.chunk, render_kwargs_test)
rgbs_torso, disps_torso, last_weights_torso, rgb_fgs_torso = \
render_path(torso_pose.unsqueeze(
0), signal[j:j+1], bc_img.to(device_torso), hwfcxy, args.chunk, render_kwargs_test_torso)
rgbs_com = rgbs*last_weights_torso[..., None] + rgb_fgs_torso
rgb8 = to8b(rgbs_com[0])
vid_out.write(rgb8[:, :, ::-1])
filename = os.path.join(
testsavedir, str(aud_ids[j]) + '.jpg')
imageio.imwrite(filename, rgb8)
print('finished render', j)
print('finished render in', time.time()-t_start)
vid_out.release()
return
N_rand = args.N_rand
use_batching = not args.no_batching
if use_batching:
# For random ray batching
print('get rays')
rays = np.stack([get_rays_np(H, W, focal, p, cx, cy)
for p in poses[:, :3, :4]], 0) # [N, ro+rd, H, W, 3]
print('done, concats')
# [N, ro+rd+rgb, H, W, 3]
rays_rgb = np.concatenate([rays, com_images[:, None]], 1)
# [N, H, W, ro+rd+rgb, 3]
rays_rgb = np.transpose(rays_rgb, [0, 2, 3, 1, 4])
rays_rgb = np.stack([rays_rgb[i]
for i in i_train], 0) # train images only
# [(N-1)*H*W, ro+rd+rgb, 3]
rays_rgb = np.reshape(rays_rgb, [-1, 3, 3])
rays_rgb = rays_rgb.astype(np.float32)
print('shuffle rays')
np.random.shuffle(rays_rgb)
print('done')
i_batch = 0
if use_batching:
rays_rgb = torch.Tensor(rays_rgb).to(device)
N_iters = args.N_iters + 1
print('Begin')
print('TRAIN views are', i_train)
print('VAL views are', i_val)
start = start + 1
for i in trange(start, N_iters):
time0 = time.time()
# Sample random ray batch
if use_batching:
# Random over all images
batch = rays_rgb[i_batch:i_batch+N_rand] # [B, 2+1, 3*?]
batch = torch.transpose(batch, 0, 1)
batch_rays, target_s = batch[:2], batch[2]
i_batch += N_rand
if i_batch >= rays_rgb.shape[0]:
print("Shuffle data after an epoch!")
rand_idx = torch.randperm(rays_rgb.shape[0])
rays_rgb = rays_rgb[rand_idx]
i_batch = 0
else:
# Random from one image
img_i = np.random.choice(i_train)
target_com = torch.as_tensor(imageio.imread(
com_images[img_i])).to(device).float()/255.0
pose = poses[img_i, :3, :4]
pose_torso = poses[0, :3, :4].to(device_torso)
rect = sample_rects[img_i]
aud = auds[img_i]
smo_half_win = int(args.smo_size/2)
left_i = img_i - smo_half_win
right_i = img_i + smo_half_win
pad_left, pad_right = 0, 0
if left_i < 0:
pad_left = -left_i
left_i = 0
if right_i > i_train.shape[0]:
pad_right = right_i-i_train.shape[0]
right_i = i_train.shape[0]
auds_win = auds[left_i:right_i]
if pad_left > 0:
auds_win = torch.cat(
(torch.zeros_like(auds_win)[:pad_left], auds_win), dim=0)
if pad_right > 0:
auds_win = torch.cat(
(auds_win, torch.zeros_like(auds_win)[:pad_right]), dim=0)
auds_win = AudNet(auds_win)
aud_smo = AudAttNet(auds_win)
aud_smo_torso = aud_smo.to(device_torso)[..., :args.dim_aud_body]
et = pose_to_euler_trans(poses[img_i].unsqueeze(0))
embed_et = torch.cat(
(embed_fn(et[:, :3]), embed_fn(et[:, 3:])), dim=1).to(device_torso)
signal = torch.cat((aud_smo_torso, embed_et.squeeze()), dim=-1)
if N_rand is not None:
rays_o, rays_d = get_rays(
H, W, focal, pose, cx, cy, device) # (H, W, 3), (H, W, 3)
rays_o_torso, rays_d_torso = get_rays(
H, W, focal, pose_torso, cx, cy, device_torso)
if i < args.precrop_iters:
dH = int(H//2 * args.precrop_frac)
dW = int(W//2 * args.precrop_frac)
coords = torch.stack(
torch.meshgrid(
torch.linspace(H//2 - dH, H//2 + dH - 1, 2*dH),
torch.linspace(W//2 - dW, W//2 + dW - 1, 2*dW)
), -1)
if i == start:
print(
f"[Config] Center cropping of size {2*dH} x {2*dW} is enabled until iter {args.precrop_iters}")
else:
coords = torch.stack(torch.meshgrid(torch.linspace(
0, H-1, H), torch.linspace(0, W-1, W)), -1) # (H, W, 2)
coords = torch.reshape(coords, [-1, 2]) # (H * W, 2)
if args.sample_rate > 0:
rect = [0, H/2, W, H/2]
rect_inds = (coords[:, 0] >= rect[0]) & (
coords[:, 0] <= rect[0] + rect[2]) & (
coords[:, 1] >= rect[1]) & (
coords[:, 1] <= rect[1] + rect[3])
coords_rect = coords[rect_inds]
coords_norect = coords[~rect_inds]
rect_num = int(N_rand*float(rect[2])*rect[3]/H/W)
norect_num = N_rand - rect_num
select_inds_rect = np.random.choice(
coords_rect.shape[0], size=[rect_num], replace=False) # (N_rand,)
# (N_rand, 2)
select_coords_rect = coords_rect[select_inds_rect].long()
select_inds_norect = np.random.choice(
coords_norect.shape[0], size=[norect_num], replace=False) # (N_rand,)
# (N_rand, 2)
select_coords_norect = coords_norect[select_inds_norect].long(
)
select_coords = torch.cat(
(select_coords_norect, select_coords_rect), dim=0)
else:
select_inds = np.random.choice(
coords.shape[0], size=[N_rand], replace=False) # (N_rand,)
select_coords = coords[select_inds].long()
norect_num = 0
rays_o = rays_o[select_coords[:, 0],
select_coords[:, 1]] # (N_rand, 3)
rays_d = rays_d[select_coords[:, 0],
select_coords[:, 1]] # (N_rand, 3)
batch_rays = torch.stack([rays_o, rays_d], 0)
bc_rgb = bc_img[select_coords[:, 0],
select_coords[:, 1]]
rays_o_torso = rays_o_torso[select_coords[:, 0],
select_coords[:, 1]] # (N_rand, 3)
rays_d_torso = rays_d_torso[select_coords[:, 0],
select_coords[:, 1]] # (N_rand, 3)
batch_rays_torso = torch.stack([rays_o_torso, rays_d_torso], 0)
bc_rgb = bc_img[select_coords[:, 0],
select_coords[:, 1]]
bc_rgb_torso = bc_rgb.to(device_torso)
target_s_com = target_com[select_coords[:, 0],
select_coords[:, 1]] # (N_rand, 3)
##### Core optimization loop #####
rgb, disp, acc, last_weight, rgb_fg, extras = \
render_dynamic_face(H, W, focal, cx, cy, chunk=args.chunk, rays=batch_rays,
aud_para=aud_smo, bc_rgb=bc_rgb,
verbose=i < 10, retraw=True,
** render_kwargs_train)
rgb_torso, disp_torso, acc_torso, last_weight_torso, rgb_fg_torso, extras_torso = \
render_dynamic_face(H, W, focal, cx, cy, chunk=args.chunk, rays=batch_rays_torso,
aud_para=signal, bc_rgb=bc_rgb_torso,
verbose=i < 10, retraw=True,
**render_kwargs_train_torso)
rgb_com = rgb * \
last_weight_torso.to(device)[..., None] + rgb_fg_torso.to(device)
optimizer_torso.zero_grad()
img_loss_com = img2mse(rgb_com, target_s_com)
trans = extras['raw'][..., -1]
split_weight = float(1.0)
loss = img_loss_com
psnr = mse2psnr(img_loss_com)
if 'rgb0' in extras_torso:
rgb_com0 = extras['rgb0'] * \
extras_torso['last_weight0'].to(
device)[..., None] + extras_torso['rgb_map_fg0'].to(device)
img_loss0 = img2mse(rgb_com0, target_s_com)
loss = loss + img_loss0
loss.backward()
optimizer_torso.step()
# NOTE: IMPORTANT!
### update learning rate ###
decay_rate = 0.1
decay_steps = args.lrate_decay * 1500
new_lrate = args.lrate * (decay_rate ** (global_step / decay_steps))
#print('cur_rate', new_lrate)
for param_group in optimizer.param_groups:
param_group['lr'] = new_lrate
for param_group in optimizer_Aud.param_groups:
param_group['lr'] = new_lrate
for param_group in optimizer_torso.param_groups:
param_group['lr'] = new_lrate
for param_group in optimizer_Aud_torso.param_groups:
param_group['lr'] = new_lrate
################################
dt = time.time()-time0
# Rest is logging
if i % args.i_weights == 0:
path = os.path.join(basedir, expname, '{:06d}_head.tar'.format(i))
torch.save({
'global_step': global_step,
'network_fn_state_dict': render_kwargs_train['network_fn'].state_dict(),
'network_fine_state_dict': render_kwargs_train['network_fine'].state_dict(),
'network_audnet_state_dict': AudNet.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'optimizer_aud_state_dict': optimizer_Aud.state_dict(),
'network_audattnet_state_dict': AudAttNet.state_dict(),
}, path)
path = os.path.join(basedir, expname, '{:06d}_body.tar'.format(i))
torch.save({
'global_step': global_step,
'network_fn_state_dict': render_kwargs_train_torso['network_fn'].state_dict(),
'network_fine_state_dict': render_kwargs_train_torso['network_fine'].state_dict(),
'network_audnet_state_dict': AudNet_torso.state_dict(),
'optimizer_state_dict': optimizer_torso.state_dict(),
'optimizer_aud_state_dict': optimizer_Aud_torso.state_dict(),
}, path)
print('Saved checkpoints at', path)
if i % args.i_testset == 0 and i > 0:
testsavedir = os.path.join(
basedir, expname, 'testset_{:06d}'.format(i))
os.makedirs(testsavedir, exist_ok=True)
print('test poses shape', poses[i_val].shape)
aud_torso = AudNet(
auds[i_val])[..., :args.dim_aud_body].to(device_torso)
et = pose_to_euler_trans(poses[i_val])
embed_et = torch.cat(
(embed_fn(et[:, :3]), embed_fn(et[:, 3:])), dim=1).to(device_torso)
signal = torch.cat((aud_torso, embed_et.squeeze()), dim=-1)
auds_val = AudNet(auds[i_val])
with torch.no_grad():
for j in range(auds_val.shape[0]):
rgbs, disps, last_weights, rgb_fgs = \
render_path(poses[i_val][j:j+1], auds_val[j:j+1],
bc_img, hwfcxy, args.chunk, render_kwargs_test)
rgbs_torso, disps_torso, last_weights_torso, rgb_fgs_torso = \
render_path(poses[0].to(device_torso).unsqueeze(0),
signal[j:j+1], bc_img.to(
device_torso), hwfcxy, args.chunk, render_kwargs_test_torso)
rgbs_com = rgbs * \
last_weights_torso[..., None] + rgb_fgs_torso
rgb8 = to8b(rgbs_com[0])
filename = os.path.join(
testsavedir, '{:03d}.jpg'.format(j))
imageio.imwrite(filename, rgb8)
print('Saved test set')
if i % args.i_print == 0:
tqdm.write(
f"[TRAIN] Iter: {i} Loss: {img_loss_com.item()} PSNR: {psnr.item()}")
global_step += 1
if __name__ == '__main__':
torch.set_default_tensor_type('torch.cuda.FloatTensor')
train()
| [
"gyd2011@mail.ustc.edu.cn"
] | gyd2011@mail.ustc.edu.cn |
e8bf3df832fd868ec608b2b76bee94a023982c76 | 5174294cc3401c418d98b14b5a475a08f0b6eadd | /searchlight/i18n.py | e2ba2955c8d21d35ead9b0a11da3a095c179dbe4 | [
"Apache-2.0"
] | permissive | khushbuparakh/searchlight | 3e01927b50629b50a36e209fe34fded474a4a1db | 2c2f71fc08a13f757eb126faa8f8e8f714e32dd4 | refs/heads/master | 2020-12-13T09:01:10.717304 | 2016-04-07T01:34:09 | 2016-04-07T01:34:09 | 55,662,195 | 0 | 0 | Apache-2.0 | 2023-08-02T01:27:31 | 2016-04-07T03:56:00 | Python | UTF-8 | Python | false | false | 1,114 | py | # Copyright 2014 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_i18n import * # noqa
_translators = TranslatorFactory(domain='searchlight')
# The primary translation function using the well-known name "_"
_ = _translators.primary
# Translators for log levels.
#
# The abbreviated names are meant to reflect the usual use of a short
# name like '_'. The "L" is for "log" and the other letter comes from
# the level.
_LI = _translators.log_info
_LW = _translators.log_warning
_LE = _translators.log_error
_LC = _translators.log_critical
| [
"lakshmi.sampath@hp.com"
] | lakshmi.sampath@hp.com |
83eb8569cc3356e75bba882839e75f14402a7d1c | f5df5f68ddbd1b5dbb004401b901505358032a64 | /practice (21).py | 2595b539dd0e13c0e8954e3dbb0c551d94debd55 | [] | no_license | GeginV/Iterations-Python | c29308351a8205372060f577540b618cd172db93 | 1d23d78a6d93dde6c7d49676d7f79b68cf269c66 | refs/heads/main | 2023-03-26T18:17:01.310389 | 2021-03-29T08:09:03 | 2021-03-29T08:09:03 | 352,561,133 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 54 | py | L = [int(input()) %2 == 0 for i in range(5)]
print(L) | [
"noreply@github.com"
] | noreply@github.com |
14f141e37d9624efc4997a598f0eeee8a148532a | d9721f272a9eca6655972a8fb35285cf0e5ec76a | /Book/ch4.py | 97c0a79629f6567438002f54f8336ccbaadefa23 | [] | no_license | wh-jung0522/AlgorithmStudy | fcce4241b80dc1e89b604e0e856ab496dd10d215 | c0fe03540d3b24e366b80ddbe027a2c186c81979 | refs/heads/main | 2023-09-01T21:33:34.840374 | 2021-10-21T09:01:34 | 2021-10-21T09:01:34 | 356,115,308 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,879 | py | def ch4_1():
position = (1,1)
dright = (0,1)
dleft = (0,-1)
dup = (-1,0)
ddown = (1,0)
N = int(input("Space : "))
Plan_list = input("Plans : ").split(" ")
for move in Plan_list:
if move == "R":
dmove = dright
elif move == "L":
dmove = dleft
elif move == "U":
dmove = dup
else:
dmove = ddown
temp_position = (position[0]+dmove[0],position[1]+dmove[1] )
if (temp_position[0]<1 or temp_position[1]<1 or temp_position[0]>5 or temp_position[1]>5):
continue
else:
position = temp_position
return position
def ch4_2():
N, M = list(map(int,input("N M : ").split(" ")))
x,y,head = list(map(int,input("x y head : ").split(" ")))
position = [x,y]
dup = [0,-1]
ddown = [0,1]
dright = [1,0]
dleft = [-1,0]
map_array = []
for n in range(N):
map_array.append(list(map(int,input().split(" "))))
rotation = 0
map_array[position[1]][position[0]] = 1
count = 1
while (True):
if head == 0:
dmove = dup
elif head == 1:
dmove = dright
elif head == 2:
dmove = ddown
else:
dmove = dleft
temp_position = [position[0]+dmove[0],position[1]+dmove[1]]
if map_array[temp_position[1]][temp_position[0]] == 1:
rotation += 1
if rotation >3:
break
head += 1
head %= 4
else:
position = temp_position
map_array[position[1]][position[0]] = 1
rotation = 0
count += 1
return count
if __name__ == "__main__":
test_case = int(input("Test Case : "))
if test_case == 1:
print(ch4_1())
elif test_case == 2:
print(ch4_2())
else:
ch4_1() | [
"59599991+wh-jung0522@users.noreply.github.com"
] | 59599991+wh-jung0522@users.noreply.github.com |
5e58b6483a21d1dcda87883dadabb128dcf9cdbe | 4ed038a638725ac77731b0b97ddd61aa37dd8d89 | /cairis/gui/SecurityPatternDialog.py | 08a80cb3e41d28e481cae171536b5d583ce0b767 | [
"Apache-2.0"
] | permissive | RachelLar/cairis_update | 0b784101c4aff81ff0390328eb615e335301daa2 | 0b1d6d17ce49bc74887d1684e28c53c1b06e2fa2 | refs/heads/master | 2021-01-19T06:25:47.644993 | 2016-07-11T20:48:11 | 2016-07-11T20:48:11 | 63,103,727 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,874 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import wx
from cairis.core.armid import *
from SecurityPatternPanel import SecurityPatternPanel
from cairis.core.SecurityPatternParameters import SecurityPatternParameters
import DialogClassParameters
class SecurityPatternDialog(wx.Dialog):
def __init__(self,parent,parameters):
wx.Dialog.__init__(self,parent,parameters.id(),parameters.label(),style=wx.DEFAULT_DIALOG_STYLE|wx.MAXIMIZE_BOX|wx.THICK_FRAME|wx.RESIZE_BORDER,size=(400,500))
self.thePatternName = ''
self.thePatternContext = ''
self.thePatternProblem = ''
self.thePatternSolution = ''
self.theConcernAssociations = []
self.theRequirements = []
self.thePatternId = -1
self.panel = 0
self.buildControls(parameters)
self.commitVerb = 'Add'
def buildControls(self,parameters):
mainSizer = wx.BoxSizer(wx.VERTICAL)
self.panel = SecurityPatternPanel(self)
self.panel.buildControls(parameters.createFlag())
mainSizer.Add(self.panel,1,wx.EXPAND)
self.SetSizer(mainSizer)
wx.EVT_BUTTON(self,SECURITYPATTERN_BUTTONCOMMIT_ID,self.onCommit)
def load(self,pattern):
self.thePatternId = pattern.id()
self.panel.loadControls(pattern)
self.commitVerb = 'Edit'
def onCommit(self,evt):
commitLabel = self.commitVerb + ' security pattern'
nameCtrl = self.FindWindowById(SECURITYPATTERN_TEXTNAME_ID)
contextCtrl = self.FindWindowById(SECURITYPATTERN_TEXTCONTEXT_ID)
problemCtrl = self.FindWindowById(SECURITYPATTERN_TEXTPROBLEM_ID)
solutionCtrl = self.FindWindowById(SECURITYPATTERN_TEXTSOLUTION_ID)
concernsCtrl = self.FindWindowById(SECURITYPATTERN_LISTPATTERNSTRUCTURE_ID)
reqsCtrl = self.FindWindowById(SECURITYPATTERN_LISTREQUIREMENTS_ID)
self.thePatternName = nameCtrl.GetValue()
self.thePatternContext = contextCtrl.GetValue()
self.thePatternProblem = problemCtrl.GetValue()
self.thePatternSolution = solutionCtrl.GetValue()
self.theConcernAssociations = concernsCtrl.associations()
self.theRequirements = reqsCtrl.requirements()
if len(self.thePatternName) == 0:
dlg = wx.MessageDialog(self,'Pattern name cannot be empty',commitLabel,wx.OK)
dlg.ShowModal()
dlg.Destroy()
return
if len(self.thePatternContext) == 0:
dlg = wx.MessageDialog(self,'Context cannot be empty',commitLabel,wx.OK)
dlg.ShowModal()
dlg.Destroy()
return
if len(self.thePatternProblem) == 0:
dlg = wx.MessageDialog(self,'Problem cannot be empty',commitLabel,wx.OK)
dlg.ShowModal()
dlg.Destroy()
return
elif (len(self.thePatternSolution) == 0):
dlg = wx.MessageDialog(self,'Solution cannot be empty',commitLabel,wx.OK)
dlg.ShowModal()
dlg.Destroy()
return
else:
self.EndModal(SECURITYPATTERN_BUTTONCOMMIT_ID)
def parameters(self):
parameters = SecurityPatternParameters(self.thePatternName,self.thePatternContext,self.thePatternProblem,self.thePatternSolution,self.theRequirements,self.theConcernAssociations)
parameters.setId(self.thePatternId)
return parameters
| [
"shamal.faily@googlemail.com"
] | shamal.faily@googlemail.com |
91a20661265981b49a7a777f11bf1c5c352b1cbf | c614d006cedca85e20d26ea4f80b1d57632b76ad | /mission_control/navigator_missions/nav_missions/totem.py | 29fdd183a661e5aa8dca05b2fc107b88109f8b5c | [] | no_license | LucasBA/Navigator | b404d90bac931a1c7e0a5101c63d17253a5d72d5 | 1edd25fe801821631e88444f8f79ca37f980ee8d | refs/heads/master | 2020-04-05T18:58:41.617025 | 2016-08-31T16:59:10 | 2016-08-31T16:59:10 | 64,950,985 | 0 | 0 | null | 2016-08-04T16:39:29 | 2016-08-04T16:39:29 | null | UTF-8 | Python | false | false | 1,193 | py | #!/usr/bin/env python
import txros # txros is required for all missions since it's the backbone on which we build missions.
@txros.util.cancellableInlineCallbacks
def main(navigator):
totem_list = navigator._node_handle.get_service_client('/mission/totem/list', SetBool)
totem_list = navigator._node_handle.get_service_client('/mission/totem/find', totem_list)
center = [0,0,0]#GPS waypoint given by competition
#GPS frame to enu
yield navigator.move(center).go()
iteration = 0
buoy_list = [0]
while (totem_list[0]==False):
totem_list = navigator._node_handle.get_service_client('/mission/totem/find', [totem_list,iteration])
buoy_list = navigator._node_handle.get_service_client('/oa/buoy/find')
waypoints = navigator._node_handle.get_service_client('/mission/totem/search', buoy_list, center)
for i in len(waypoints):
yield navigator.move(waypoints[i])
iteration+=1
for i in totem_list:
waypoints = navigator._node_handle.get_service_client('/mission/totem/search', buoy_list, totem_list[i])
for i in len(waypoints):
yield navigator.move(waypoints[i])
print "Done!" | [
"lbassettaudain@ufl.edu"
] | lbassettaudain@ufl.edu |
a8cc921f9a8bbdd365acf5d3c14a6a650814c348 | 3493496a09ce9c4820ebcaabeaf7694900bf60a8 | /old_stuff/old_tf1_model/discriminator.py | 7d12c296ab9346b8e30b37f234163b0a3c6954c5 | [] | no_license | schmidtdominik/ArtGAN | 1ab1ed8c4b5ed0f01caa02b5568b18a8f45c24ac | e3751cbc860a18492c8a59e10088e7149435015b | refs/heads/master | 2020-08-03T11:30:50.927451 | 2020-02-16T23:04:35 | 2020-02-16T23:04:35 | 211,737,040 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,496 | py | import tensorflow as tf
#def batch_norm(layer):
# return tf.layers.batch_normalization(layer, training=True)
def discriminator_256x256(image_256x256=None, features_256x256=None):
with tf.variable_scope("GAN/Discriminator/discriminator_256x256", reuse=tf.AUTO_REUSE):
if image_256x256 is not None:
features_256x256 = tf.layers.conv2d(image_256x256, 16, (1, 1), strides=(1, 1), padding='same', activation=tf.nn.leaky_relu, name='fromRGB_256x256')
conv_256x256_0 = tf.layers.conv2d(features_256x256, 16, (3, 3), strides=(1, 1), padding='same', activation=tf.nn.leaky_relu, name='conv_256x256_0')
#conv_256x256_0 = batch_norm(conv_256x256_0)
conv_256x256_1 = tf.layers.conv2d(conv_256x256_0, 32, (3, 3), strides=(1, 1), padding='same', activation=tf.nn.leaky_relu, name='conv_256x256_1')
#conv_256x256_1 = batch_norm(conv_256x256_1)
resize_128x128 = tf.layers.average_pooling2d(conv_256x256_1, (2, 2), (2, 2), padding='same')
return discriminator_128x128(features_128x128=resize_128x128)
def discriminator_128x128(image_128x128=None, features_128x128=None):
with tf.variable_scope("GAN/Discriminator/discriminator_128x128", reuse=tf.AUTO_REUSE):
if image_128x128 is not None:
features_128x128 = tf.layers.conv2d(image_128x128, 32, (1, 1), strides=(1, 1), padding='same', activation=tf.nn.leaky_relu, name='fromRGB_128x128')
conv_128x128_0 = tf.layers.conv2d(features_128x128, 32, (3, 3), strides=(1, 1), padding='same', activation=tf.nn.leaky_relu, name='conv_128x128_0')
#conv_128x128_0 = batch_norm(conv_128x128_0)
conv_128x128_1 = tf.layers.conv2d(conv_128x128_0, 64, (3, 3), strides=(1, 1), padding='same', activation=tf.nn.leaky_relu, name='conv_128x128_1')
#conv_128x128_1 = batch_norm(conv_128x128_1)
resize_64x64 = tf.layers.average_pooling2d(conv_128x128_1, (2, 2), (2, 2), padding='same')
return discriminator_64x64(features_64x64=resize_64x64)
def discriminator_64x64(image_64x64=None, features_64x64=None):
with tf.variable_scope("GAN/Discriminator/discriminator_64x64", reuse=tf.AUTO_REUSE):
if image_64x64 is not None:
features_64x64 = tf.layers.conv2d(image_64x64, 64, (1, 1), strides=(1, 1), padding='same', activation=tf.nn.leaky_relu, name='fromRGB_64x64')
conv_64x64_0 = tf.layers.conv2d(features_64x64, 64, (3, 3), strides=(1, 1), padding='same', activation=tf.nn.leaky_relu, name='conv_64x64_0')
#conv_64x64_0 = batch_norm(conv_64x64_0)
conv_64x64_1 = tf.layers.conv2d(conv_64x64_0, 128, (3, 3), strides=(1, 1), padding='same', activation=tf.nn.leaky_relu, name='conv_64x64_1')
#conv_64x64_1 = batch_norm(conv_64x64_1)
resize_32x32 = tf.layers.average_pooling2d(conv_64x64_1, (2, 2), (2, 2), padding='same')
return discriminator_32x32(features_32x32=resize_32x32)
def discriminator_32x32(image_32x32=None, features_32x32=None):
with tf.variable_scope("GAN/Discriminator/discriminator_32x32", reuse=tf.AUTO_REUSE):
if image_32x32 is not None:
features_32x32 = tf.layers.conv2d(image_32x32, 128, (1, 1), strides=(1, 1), padding='same', activation=tf.nn.leaky_relu, name='fromRGB_32x32')
conv_32x32_0 = tf.layers.conv2d(features_32x32, 128, (3, 3), strides=(1, 1), padding='same', activation=tf.nn.leaky_relu, name='conv_32x32_0')
#conv_32x32_0 = batch_norm(conv_32x32_0)
conv_32x32_1 = tf.layers.conv2d(conv_32x32_0, 256, (3, 3), strides=(1, 1), padding='same', activation=tf.nn.leaky_relu, name='conv_32x32_1')
#conv_32x32_1 = batch_norm(conv_32x32_1)
resize_16x16 = tf.layers.average_pooling2d(conv_32x32_1, (2, 2), (2, 2), padding='same')
return discriminator_16x16(features_16x16=resize_16x16)
def discriminator_16x16(image_16x16=None, features_16x16=None):
with tf.variable_scope("GAN/Discriminator/discriminator_16x16", reuse=tf.AUTO_REUSE):
if image_16x16 is not None:
features_16x16 = tf.layers.conv2d(image_16x16, 256, (1, 1), strides=(1, 1), padding='same', activation=tf.nn.leaky_relu, name='fromRGB_16x16')
conv_16x16_0 = tf.layers.conv2d(features_16x16, 256, (3, 3), strides=(1, 1), padding='same', activation=tf.nn.leaky_relu, name='conv_16x16_0')
#conv_16x16_0 = batch_norm(conv_16x16_0)
conv_16x16_1 = tf.layers.conv2d(conv_16x16_0, 512, (3, 3), strides=(1, 1), padding='same', activation=tf.nn.leaky_relu, name='conv_16x16_1')
#conv_16x16_1 = batch_norm(conv_16x16_1)
resize_8x8 = tf.layers.average_pooling2d(conv_16x16_1, (2, 2), (2, 2), padding='same')
return discriminator_8x8(features_8x8=resize_8x8)
def discriminator_8x8(image_8x8=None, features_8x8=None):
with tf.variable_scope("GAN/Discriminator/discriminator_8x8", reuse=tf.AUTO_REUSE):
if image_8x8 is not None:
features_8x8 = tf.layers.conv2d(image_8x8, 512, (1, 1), strides=(1, 1), padding='same', activation=tf.nn.leaky_relu, name='fromRGB_8x8')
conv_8x8_0 = tf.layers.conv2d(features_8x8, 512, (3, 3), strides=(1, 1), padding='same', activation=tf.nn.leaky_relu, name='conv_8x8_0')
#conv_8x8_0 = batch_norm(conv_8x8_0)
conv_8x8_1 = tf.layers.conv2d(conv_8x8_0, 512, (3, 3), strides=(1, 1), padding='same', activation=tf.nn.leaky_relu, name='conv_8x8_1')
#conv_8x8_1 = batch_norm(conv_8x8_1)
resize_4x4 = tf.layers.average_pooling2d(conv_8x8_1, (2, 2), (2, 2), padding='same')
return discriminator_4x4(features_4x4=resize_4x4)
def discriminator_4x4(image_4x4=None, features_4x4=None):
with tf.variable_scope("GAN/Discriminator/discriminator_4x4", reuse=tf.AUTO_REUSE):
if image_4x4 is not None:
features_4x4 = tf.layers.conv2d(image_4x4, 512, (1, 1), strides=(1, 1), padding='same', activation=tf.nn.leaky_relu, name='fromRGB_4x4')
# Todo: Minibatch stddev 1 layer --> concat with rest of features
# features_4x4 = tf.concat ... features_4x4, minibatch_stddev
conv_4x4_0 = tf.layers.conv2d(features_4x4, 512, (3, 3), strides=(1, 1), padding='same', activation=tf.nn.leaky_relu, name='conv_4x4_0')
#conv_4x4_0 = batch_norm(conv_4x4_0)
conv_4x4_1 = tf.layers.conv2d(conv_4x4_0, 512, (4, 4), strides=(1, 1), padding='valid', activation=tf.nn.leaky_relu, name='conv_4x4_1')
#conv_4x4_1 = batch_norm(conv_4x4_1)
fc0 = tf.layers.dense(conv_4x4_1, 1, name='fc0')
return fc0
| [
"schmidtdominik30@gmail.com"
] | schmidtdominik30@gmail.com |
7ec64ebfbf6b63a38518ca287dedb3a612f1cc58 | d889c12217bad2c8c9b49ecff6661575434c2071 | /simetricni-ne-evklidski.py | 191da9820fee2de800d0636bc4503194dba00c19 | [] | no_license | timotejvesel/Ant-colony-on-TSP | 53c9123f3c0a21a0facd1176bfc100516612e14f | 07eec5cbd275467ceb1b199e4895d9828cd7d129 | refs/heads/master | 2020-04-08T13:19:16.419456 | 2019-01-06T13:11:19 | 2019-01-06T13:11:19 | 159,385,938 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,946 | py | from random import *
import time
start = time.time()
import numpy
import math
def trikotna(file, n): # s tem zapišemo "matriko" iz txt datoteke v simetrično matriko.
sez = []
with open(file) as f:
sez=[float(y) for x in f for y in x.split()]
stevec = 0
razdalja = [[0]*n for i in range(n)]
for i in range(n):
for j in range(i + 1):
razdalja[i][j] = sez[stevec]
razdalja[j][i] = sez[stevec]
stevec += 1
return razdalja
def izberi_pot(ant,vozlisce,neobiskana,pher,verjetnosti,razdalja,seznam,n,a,b,q0):
vsota = 0
for i in range(n):
if neobiskana[ant][i] == 1:
verjetnosti[i] = (pher[vozlisce][i]**a)*((1/(razdalja[vozlisce][i]+1))**b)
vsota += (pher[vozlisce][i]**a)*(razdalja[vozlisce][i]**b)
else:
verjetnosti[i] = 0
for i in range(n):
if verjetnosti[i] > q0*vsota:
return [i]
return choices(seznam,verjetnosti)
def main():
a = 2.5 # pomen razdalje
b = 7 # pomen pher
c = 0.001 # zacetni pher
s = 0.1 # odstranitev slabih poti
p = 0.08 # dodatek k opt poti
q0 = 0.9 # pozresnost
m = 10 # stevilo mravelj
q = 0.1 # kolicina pher, ki ga izloci ena mravlja
n = 17 #int(input()) #stevilo_mest
razdalja = [[0]*n for i in range(n)]
pher = [[c]*n for i in range(n)]
verjetnosti = [0]*n
seznam = [i for i in range(n)]
lokacija = [0]*m # lokacija na kateri se nahaja i-ta mravlja
dolzina_poti = [0]*m # razdalja, ki jo je na svoji poti opravila i-ta mravlja
pher_dodan = [[0]*n for i in range(n)]
min_dolzina = math.inf
opt_pot = []
razdalja = trikotna("br17.txt",n)
korak = 0
global inp
inp = time.time()
while korak < 1000:
korak += 1
pot = [[] for i in range(m)] # pot za vsako mravljo
neobiskana = [[1]*n for i in range(m)] # mnozica neobiskanih vozlisc za vsako mravljo (morda bolj smiselno sete za boljso casovno zahtebnost)
pher_dodan = [[0]*n for i in range(n)] # kolicina pheranoma dodanega zaradi mravljine poti
dolzina_poti = [0]*m
for i in range(m):
lokacija[i] = randint(0,n-1)
neobiskana[i][lokacija[i]] = 2
for i in range(n): # tukaj simuliramo pot
for j in range(m):
if i == n-1:
for k in range(n):
if neobiskana[j][k] == 2:
f = k
dolzina_poti[j] += razdalja[lokacija[j]][f]
pot[j].append((lokacija[j],f))
else:
f = izberi_pot(j,lokacija[j],neobiskana,pher,verjetnosti,razdalja,seznam,n,a,b,q0)[0]
dolzina_poti[j] += razdalja[lokacija[j]][f]
pot[j].append((lokacija[j],f))
lokacija[j] = f
neobiskana[j][f] = 0
pher[pot[j][i][0]][pot[j][i][1]] = (1-s)*pher[pot[j][i][0]][pot[j][i][1]] + s*c
pher[pot[j][i][1]][pot[j][i][0]] = pher[pot[j][i][0]][pot[j][i][1]]
for i in range(m):
if dolzina_poti[i] == min(dolzina_poti):
if (dolzina_poti[i] < min_dolzina):
min_dolzina = dolzina_poti[i]
opt_pot = pot[i]
for i in range(n-1):
pher_dodan[opt_pot[i][0]][opt_pot[i][1]] = p*(q/min_dolzina)
pher_dodan[opt_pot[i][1]][opt_pot[i][0]] = pher[opt_pot[i][0]][opt_pot[i][1]]
for i in range(n):
for j in range(n):
pher[i][j] = (1-p)*pher[i][j] + pher_dodan[i][j]
for i in range(n-1):
pher_dodan[opt_pot[i][0]][opt_pot[i][1]] = 0
pher_dodan[opt_pot[i][1]][opt_pot[i][0]] = 0
global tj
tj= time.time()
print("Ant Colony " + str(tj-inp))
return min_dolzina, opt_pot
def average(stevilo): # izračun povprečnega "najboljšega " obhoda ob želenm številu ponovitev algoritma, vrne tudi najkrajsi obhod in njegovo dolzino
vsota = 0
najkrajsa_pot = []
najkrajsa_dolzina = math.inf
for i in range(stevilo):
print(i)
dolzina, pot = main()
print(dolzina)
vsota += dolzina
if dolzina < najkrajsa_dolzina:
najkrajsa_dolzina = dolzina
najkrajsa_pot = pot
t = time.time()
print("skupni čas" + str(t - start))
print("Povprečna dolžina " + str(vsota/stevilo))
print("Dolžina najboljše poti " + str(najkrajsa_dolzina))
print("Najboljša pot ")
print(najkrajsa_pot)
return(vsota/stevilo, najkrajsa_dolzina, najkrajsa_pot)
print("Določite število iteracij algoritma")
print("Ena iteracija traja približno 4 sekunde (Problem br17)")
A = int(input())
average(A)
| [
"noreply@github.com"
] | noreply@github.com |
edb68685f9b28d0e3a8ffe8feaaa3c085fdf9521 | 8b3c910ce9452610023c8f314c82cd36b718b1ae | /cogs/EventController.py | 13697e57510147985e83c9541d814902e98159f2 | [] | no_license | AEnterprise/TriBot-HypeSquad | 0a750712566a8894dac9b2a3037a8c2294f93f4c | efd19d4877969fe80c98d53bf46c23841237d51d | refs/heads/master | 2020-03-31T21:30:55.515828 | 2018-10-11T11:51:37 | 2018-10-11T11:51:37 | 152,583,512 | 0 | 0 | null | 2018-10-11T11:51:56 | 2018-10-11T11:51:56 | null | UTF-8 | Python | false | false | 1,759 | py | import asyncio
import importlib
import datetime
import os
from subprocess import Popen
import subprocess
from discord.ext import commands
from discord import utils
from utils import Util, Configuration
class EventControl:
def __init__(self, bot):
self.bot:commands.Bot = bot
@commands.command(hidden=True)
async def start(self, ctx: commands.Context):
"""Starts the event!"""
heads = [187606096418963456, 298618155281154058, 169197827002466304, 263495765270200320, 117101067136794628, 164475721173958657, 191793155685744640]
channel = ctx.bot.get_channel(int(Configuration.getConfigVar(ctx.guild.id, "SUBMISSION_CHANNEL")))
everyone = None
if ctx.author.id not in heads:
return
for role in channel.guild.roles:
if role.id == channel.guild.id:
everyone = role
await channel.set_permissions(everyone, read_messages=True)
await ctx.send("Event has started!")
@commands.command(hidden=True)
async def end(self, ctx:commands.Context):
"""Ends the event!"""
heads = [187606096418963456, 298618155281154058, 169197827002466304, 263495765270200320, 117101067136794628, 164475721173958657, 191793155685744640]
channel = ctx.bot.get_channel(int(Configuration.getConfigVar(ctx.guild.id, "SUBMISSION_CHANNEL")))
everyone = None
if ctx.author.id not in heads:
return
for role in channel.guild.roles:
if role.id == channel.guild.id:
everyone = role
await channel.set_permissions(everyone, read_messages=False)
await ctx.send("Event has ended!")
def setup(bot):
bot.add_cog(EventControl(bot))
| [
"noreply@github.com"
] | noreply@github.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.