blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ba3fa88b6a2b401c8ad9cba50fff6ed7e0e78f72 | 1444d4164a00048d7909f0d398b2b40f6a0e83cf | /base/forms.py | 7fec7e694d6bd6b5a346be818bdeb977cdd0074c | [] | no_license | kaliqwang/temp-project | a4ff733c9d878b2db71ac91c143728ef64c401d3 | 2202019b310fd1d252ba19910d39fc66a6805f59 | refs/heads/master | 2021-01-15T08:05:49.917076 | 2016-09-12T01:46:25 | 2016-09-12T01:46:25 | 62,323,179 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 240 | py | from django import forms
from categories.models import *
category_form_field = forms.ModelChoiceField(required=False, queryset=Category.objects.all(), empty_label='General (default)', widget=forms.Select(attrs={'class': 'form-control'}))
| [
"kaliqwang@yahoo.com"
] | kaliqwang@yahoo.com |
c37e80edfe6457d891f7033eb84a80f4921c502d | 1b3f5baa3e68efb9759ff0d275a6579315a46db5 | /code/Sina_spider3/Sina_spider3/scrapy_redis/dupefilter.py | 69be3b1a0cba86db42cb548f8602aa6491266e5f | [] | no_license | udengcnf/caonong | 8dd9fdf260a633fddfc1c023fe0f4d53c6da0715 | ab955a25b2412d37200da7cb397b7c3a49ee5d82 | refs/heads/master | 2022-12-12T08:52:28.267050 | 2018-10-09T08:08:12 | 2018-10-09T08:08:12 | 152,211,242 | 0 | 1 | null | 2022-12-08T00:48:19 | 2018-10-09T07:57:40 | Python | UTF-8 | Python | false | false | 1,492 | py | import time
import re
from scrapy.dupefilters import BaseDupeFilter
from . import connection
class RFPDupeFilter(BaseDupeFilter):
"""Redis-based request duplication filter"""
def __init__(self, server, key):
"""Initialize duplication filter
Parameters
----------
server : Redis instance
key : str
Where to store fingerprints
"""
self.server = server
self.key = key
@classmethod
def from_settings(cls, settings):
server = connection.from_settings_filter(settings)
key = "dupefilter:%s" % int(time.time())
return cls(server, key)
@classmethod
def from_crawler(cls, crawler):
return cls.from_settings(crawler.settings)
def request_seen(self, request):
uid = re.findall('(\d+)/info', request.url)
if uid:
uid = int(uid[0])
isExist = self.server.getbit(self.key + str(uid / 4000000000), uid % 4000000000)
if isExist == 1:
print 'dupefilter :', uid, True
return True
else:
self.server.setbit(self.key + str(uid / 4000000000), uid % 4000000000, 1)
print 'dupefilter :', uid, False
return False
def close(self, reason):
"""Delete data on close. Called by scrapy's scheduler"""
self.clear()
def clear(self):
"""Clears fingerprints data"""
self.server.delete(self.key)
| [
"udengcnf@163.com"
] | udengcnf@163.com |
9dbce5a205715864fe1a7c7716aa7609d6d60015 | ee9c71b8d2fcd5c2e5e43e4b43b52660a3a86f0d | /tests/test_service/test_models.py | 4b6b7439ccbc130041d1dbd4a4b0d3bbc7df3d8f | [] | no_license | amenegola/via-challenge | adc0b716044933ceaa68975fc25c34a494c60d6b | 28cb6965ece35b0590df4dfc7ea079747b8cfad2 | refs/heads/master | 2023-04-05T03:08:26.119080 | 2021-04-26T01:55:44 | 2021-04-26T01:55:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 658 | py |
import pytest
from app.models.payload import PredictionPayload
from app.models.prediction import PredictionResult
from app.services.models import IrisModel
def test_prediction(test_client) -> None:
hpp = PredictionPayload.parse_obj({
"sepal_length": 5.1,
"sepal_width": 3.5,
"petal_length": 1.4,
"petal_width": 0.2})
hpm = IrisModel()
result = hpm.predict(hpp)
assert isinstance(result, PredictionResult)
def test_prediction_no_payload(test_client) -> None:
hpm = IrisModel()
with pytest.raises(ValueError):
result = hpm.predict(None)
assert isinstance(result, PredictionResult)
| [
"afonsomenegola@gmail.com"
] | afonsomenegola@gmail.com |
063580b8ea1f404affc36ba54723626f0fb970b9 | e1249dd27d7bd6c60540247f905408a0fb4978b3 | /news/tests.py | d1787c110f86b2d38b5ca930248acb2529904b47 | [] | no_license | Kibet1816/Mtribune | fd6426047abbb80c76902fd3127fd42f2505cf7f | c01e96a9b4a191efaf649fa0fd415c77c08918f2 | refs/heads/master | 2021-09-09T15:50:51.465564 | 2019-07-19T06:09:28 | 2019-07-19T06:09:28 | 197,712,168 | 0 | 0 | null | 2021-09-08T01:08:16 | 2019-07-19T06:08:18 | Python | UTF-8 | Python | false | false | 1,991 | py | import datetime as dt
from django.test import TestCase
from .models import Editor, Article, tags
# Create your tests here.
class EditorTestClass(TestCase):
"""
Test class for the editor model class
"""
def setUp(self):
"""
Creates instance of Editor module before each test
"""
self.denis = Editor(first_name='Denis',
last_name='Kibet', email='kibet@gmail.com')
def test_instance(self):
"""
Test method to check for correct instantiation
"""
self.assertTrue(isinstance(self.denis, Editor))
def test_save_method(self):
self.denis.save_editor()
editors = Editor.objects.all()
self.assertTrue(len(editors) > 0)
def test_delete_method(self):
self.denis.save_editor()
self.denis.delete_editor()
noeditors = Editor.objects.all()
self.assertTrue(len(noeditors) == 0)
class ArticleTestClass(TestCase):
"""
Test class for the Article module
"""
def setUp(self):
"""
"""
self.rule = Editor(first_name='Ja', last_name='Rule',
email='ja@rule.com')
self.rule.save_editor()
self.new_tag = tags(name='testing')
self.new_tag.save()
self.new_article = Article(
title='Test Article', post='This is a random test Post', editor=self.rule)
self.new_article.save()
self.new_article.tags.add(self.new_tag)
def tearDown(self):
Editor.objects.all().delete()
tags.objects.all().delete()
Article.objects.all().delete()
def test_get_news_today(self):
today_news = Article.todays_news()
self.assertTrue(len(today_news) > 0)
def test_get_news_by_date(self):
test_date = '2019-07-16'
date = dt.datetime.strptime(test_date, '%Y-%m-%d').date()
news_by_date = Article.date_news(date)
self.assertTrue(len(news_by_date) > 0) | [
"kibet1816@gmail.com"
] | kibet1816@gmail.com |
c0c816ccd585d12a4a4cdc8bb717b7b915727bee | 39c872f5a3bc6d4854cc551bb63b28ce9dfc4343 | /week8/informatics/4/d.py | 93e1183add93195361b424d5ef0e5e47e1f37321 | [] | no_license | Torebekov/Web | d7110b9f4955650e1261bb75286167d68e3382d1 | 5b6a5ce573554de4998e3337d23aef99f41bf88f | refs/heads/master | 2021-04-07T15:23:52.905545 | 2020-04-09T17:43:45 | 2020-04-09T17:43:45 | 248,686,796 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 141 | py | n = int(input())
arr = list(map(int, input().split()))
cnt = 0
for i in range(0,n-1):
if (arr[i+1] > arr[i]):
cnt += 1
print(cnt) | [
"Torebek.arman14@gmail.com"
] | Torebek.arman14@gmail.com |
ce91687d35a53c3509f9896e895a9ad39d8ed9c7 | 17bb12cfab49f1890d1a64b1fe9281523d588931 | /lib/python2.7/site-packages/django/test/simple.py | cca4319a3dba5e1d44fe74cee23da5d9ddb5614a | [] | no_license | jagsgill/319-server | 7515f629a3f7f8b633e25c881a7fdab7fc898c06 | 368d840aa0d28b861d2e9a6fad1abf25e2121124 | refs/heads/master | 2021-01-21T03:38:59.763685 | 2016-05-27T06:06:18 | 2016-05-27T06:06:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,969 | py | import unittest as real_unittest
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db.models import get_app, get_apps
from django.test import _doctest as doctest
from django.test.utils import setup_test_environment, teardown_test_environment
from django.test.testcases import OutputChecker, DocTestRunner
from django.utils import unittest
from django.utils.importlib import import_module
from django.utils.module_loading import module_has_submodule
__all__ = ('DjangoTestSuiteRunner')
# The module name for tests outside models.py
TEST_MODULE = 'tests'
doctestOutputChecker = OutputChecker()
def get_tests(app_module):
parts = app_module.__name__.split('.')
prefix, last = parts[:-1], parts[-1]
try:
test_module = import_module('.'.join(prefix + [TEST_MODULE]))
except ImportError:
# Couldn't import test_models.py. Was it due to a missing file, or
# due to an import error in a test_models.py that actually exists?
# app_module either points to a models.py file, or models/__init__.py
# Tests are therefore either in same directory, or one level up
if last == 'models':
app_root = import_module('.'.join(prefix))
else:
app_root = app_module
if not module_has_submodule(app_root, TEST_MODULE):
test_module = None
else:
# The module exists, so there must be an import error in the test
# module itself.
raise
return test_module
def build_suite(app_module):
"""
Create a complete Django test suite for the provided application module.
"""
suite = unittest.TestSuite()
# Load unit and doctests in the models.py module. If module has
# a suite() method, use it. Otherwise build the test suite ourselves.
if hasattr(app_module, 'suite'):
suite.addTest(app_module.suite())
else:
suite.addTest(unittest.defaultTestLoader.loadTestsFromModule(
app_module))
try:
suite.addTest(doctest.DocTestSuite(app_module,
checker=doctestOutputChecker,
runner=DocTestRunner))
except ValueError:
# No doc tests in models.py
pass
# Check to see if a separate 'tests' module exists parallel to the
# models module
test_module = get_tests(app_module)
if test_module:
# Load unit and doctests in the test_models.py module. If module has
# a suite() method, use it. Otherwise build the test suite ourselves.
if hasattr(test_module, 'suite'):
suite.addTest(test_module.suite())
else:
suite.addTest(unittest.defaultTestLoader.loadTestsFromModule(
test_module))
try:
suite.addTest(doctest.DocTestSuite(
test_module, checker=doctestOutputChecker,
runner=DocTestRunner))
except ValueError:
# No doc tests in test_models.py
pass
return suite
def build_test(label):
"""
Construct a test case with the specified label. Label should be of the
form model.TestClass or model.TestClass.test_method. Returns an
instantiated test or test suite corresponding to the label provided.
"""
parts = label.split('.')
if len(parts) < 2 or len(parts) > 3:
raise ValueError("Test label '%s' should be of the form app.TestCase "
"or app.TestCase.test_method" % label)
#
# First, look for TestCase instances with a name that matches
#
app_module = get_app(parts[0])
test_module = get_tests(app_module)
TestClass = getattr(app_module, parts[1], None)
# Couldn't find the test class in models.py; look in test_models.py
if TestClass is None:
if test_module:
TestClass = getattr(test_module, parts[1], None)
try:
if issubclass(TestClass, (unittest.TestCase, real_unittest.TestCase)):
if len(parts) == 2: # label is app.TestClass
try:
return unittest.TestLoader().loadTestsFromTestCase(
TestClass)
except TypeError:
raise ValueError(
"Test label '%s' does not refer to a test class"
% label)
else: # label is app.TestClass.test_method
return TestClass(parts[2])
except TypeError:
# TestClass isn't a TestClass - it must be a method or normal class
pass
#
# If there isn't a TestCase, look for a doctest that matches
#
tests = []
for module in app_module, test_module:
try:
doctests = doctest.DocTestSuite(module,
checker=doctestOutputChecker,
runner=DocTestRunner)
# Now iterate over the suite, looking for doctests whose name
# matches the pattern that was given
for test in doctests:
if test._dt_test.name in (
'%s.%s' % (module.__name__, '.'.join(parts[1:])),
'%s.__test__.%s' % (
module.__name__, '.'.join(parts[1:]))):
tests.append(test)
except ValueError:
# No doctests found.
pass
# If no tests were found, then we were given a bad test label.
if not tests:
raise ValueError("Test label '%s' does not refer to a test" % label)
# Construct a suite out of the tests that matched.
return unittest.TestSuite(tests)
def partition_suite(suite, classes, bins):
"""
Partitions a test suite by test type.
classes is a sequence of types
bins is a sequence of TestSuites, one more than classes
Tests of type classes[i] are added to bins[i],
tests with no match found in classes are place in bins[-1]
"""
for test in suite:
if isinstance(test, unittest.TestSuite):
partition_suite(test, classes, bins)
else:
for i in range(len(classes)):
if isinstance(test, classes[i]):
bins[i].addTest(test)
break
else:
bins[-1].addTest(test)
def reorder_suite(suite, classes):
"""
Reorders a test suite by test type.
`classes` is a sequence of types
All tests of type classes[0] are placed first, then tests of type
classes[1], etc. Tests with no match in classes are placed last.
"""
class_count = len(classes)
bins = [unittest.TestSuite() for i in range(class_count+1)]
partition_suite(suite, classes, bins)
for i in range(class_count):
bins[0].addTests(bins[i+1])
return bins[0]
def dependency_ordered(test_databases, dependencies):
"""
Reorder test_databases into an order that honors the dependencies
described in TEST_DEPENDENCIES.
"""
ordered_test_databases = []
resolved_databases = set()
# Maps db signature to dependencies of all it's aliases
dependencies_map = {}
# sanity check - no DB can depend on it's own alias
for sig, (_, aliases) in test_databases:
all_deps = set()
for alias in aliases:
all_deps.update(dependencies.get(alias, []))
if not all_deps.isdisjoint(aliases):
raise ImproperlyConfigured(
"Circular dependency: databases %r depend on each other, "
"but are aliases." % aliases)
dependencies_map[sig] = all_deps
while test_databases:
changed = False
deferred = []
# Try to find a DB that has all it's dependencies met
for signature, (db_name, aliases) in test_databases:
if dependencies_map[signature].issubset(resolved_databases):
resolved_databases.update(aliases)
ordered_test_databases.append((signature, (db_name, aliases)))
changed = True
else:
deferred.append((signature, (db_name, aliases)))
if not changed:
raise ImproperlyConfigured(
"Circular dependency in TEST_DEPENDENCIES")
test_databases = deferred
return ordered_test_databases
class DjangoTestSuiteRunner(object):
def __init__(self, verbosity=1, interactive=True, failfast=True, **kwargs):
self.verbosity = verbosity
self.interactive = interactive
self.failfast = failfast
def setup_test_environment(self, **kwargs):
setup_test_environment()
settings.DEBUG = False
unittest.installHandler()
def build_suite(self, test_labels, extra_tests=None, **kwargs):
suite = unittest.TestSuite()
if test_labels:
for label in test_labels:
if '.' in label:
suite.addTest(build_test(label))
else:
app = get_app(label)
suite.addTest(build_suite(app))
else:
for app in get_apps():
suite.addTest(build_suite(app))
if extra_tests:
for test in extra_tests:
suite.addTest(test)
return reorder_suite(suite, (unittest.TestCase,))
def setup_databases(self, **kwargs):
from django.db import connections, DEFAULT_DB_ALIAS
# First pass -- work out which databases actually need to be created,
# and which ones are test mirrors or duplicate entries in DATABASES
mirrored_aliases = {}
test_databases = {}
dependencies = {}
default_sig = connections[DEFAULT_DB_ALIAS].creation.test_db_signature()
for alias in connections:
connection = connections[alias]
if connection.settings_dict['TEST_MIRROR']:
# If the database is marked as a test mirror, save
# the alias.
mirrored_aliases[alias] = (
connection.settings_dict['TEST_MIRROR'])
else:
# Store a tuple with DB parameters that uniquely identify it.
# If we have two aliases with the same values for that tuple,
# we only need to create the test database once.
item = test_databases.setdefault(
connection.creation.test_db_signature(),
(connection.settings_dict['NAME'], set())
)
item[1].add(alias)
if 'TEST_DEPENDENCIES' in connection.settings_dict:
dependencies[alias] = (
connection.settings_dict['TEST_DEPENDENCIES'])
else:
if alias != DEFAULT_DB_ALIAS and connection.creation.test_db_signature() != default_sig:
dependencies[alias] = connection.settings_dict.get(
'TEST_DEPENDENCIES', [DEFAULT_DB_ALIAS])
# Second pass -- actually create the databases.
old_names = []
mirrors = []
for signature, (db_name, aliases) in dependency_ordered(
test_databases.items(), dependencies):
test_db_name = None
# Actually create the database for the first connection
for alias in aliases:
connection = connections[alias]
if test_db_name is None:
test_db_name = connection.creation.create_test_db(
self.verbosity, autoclobber=not self.interactive)
destroy = True
else:
connection.settings_dict['NAME'] = test_db_name
destroy = False
old_names.append((connection, db_name, destroy))
for alias, mirror_alias in mirrored_aliases.items():
mirrors.append((alias, connections[alias].settings_dict['NAME']))
connections[alias].settings_dict['NAME'] = (
connections[mirror_alias].settings_dict['NAME'])
return old_names, mirrors
def run_suite(self, suite, **kwargs):
return unittest.TextTestRunner(
verbosity=self.verbosity, failfast=self.failfast).run(suite)
def teardown_databases(self, old_config, **kwargs):
"""
Destroys all the non-mirror databases.
"""
old_names, mirrors = old_config
for connection, old_name, destroy in old_names:
if destroy:
connection.creation.destroy_test_db(old_name, self.verbosity)
def teardown_test_environment(self, **kwargs):
unittest.removeHandler()
teardown_test_environment()
def suite_result(self, suite, result, **kwargs):
return len(result.failures) + len(result.errors)
def run_tests(self, test_labels, extra_tests=None, **kwargs):
"""
Run the unit tests for all the test labels in the provided list.
Labels must be of the form:
- app.TestClass.test_method
Run a single specific test method
- app.TestClass
Run all the test methods in a given class
- app
Search for doctests and unittests in the named application.
When looking for tests, the test runner will look in the models and
tests modules for the application.
A list of 'extra' tests may also be provided; these tests
will be added to the test suite.
Returns the number of tests that failed.
"""
self.setup_test_environment()
suite = self.build_suite(test_labels, extra_tests)
old_config = self.setup_databases()
result = self.run_suite(suite)
self.teardown_databases(old_config)
self.teardown_test_environment()
return self.suite_result(suite, result)
| [
"eliasjf@britecontent.com"
] | eliasjf@britecontent.com |
7bbf39d1e8473191af66738cca5df93645bda953 | 5233e3750f6780c4223b59f69652fe126719b44c | /tests/test_utils/perf/scheduler_dag_execution_timing.py | 0998a979497fa74e49ad40d3b73e946a66a51958 | [
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] | permissive | wojsamjan/airflow | 4cbbf534609cd3f8e000e56adff3ed95e37e0941 | 2938acd817561c79674ca333b83ee1972248df98 | refs/heads/main | 2023-07-20T02:13:34.748062 | 2021-08-04T13:44:06 | 2021-08-04T13:44:06 | 389,624,894 | 2 | 0 | Apache-2.0 | 2021-07-26T12:29:22 | 2021-07-26T12:29:22 | null | UTF-8 | Python | false | false | 10,987 | py | #!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import gc
import os
import statistics
import sys
import textwrap
import time
from argparse import Namespace
from operator import attrgetter
import click
MAX_DAG_RUNS_ALLOWED = 1
class ShortCircuitExecutorMixin:
"""
Mixin class to manage the scheduler state during the performance test run.
"""
def __init__(self, dag_ids_to_watch, num_runs):
super().__init__()
self.num_runs_per_dag = num_runs
self.reset(dag_ids_to_watch)
def reset(self, dag_ids_to_watch):
"""
Capture the value that will determine when the scheduler is reset.
"""
self.dags_to_watch = {
dag_id: Namespace(
waiting_for=self.num_runs_per_dag,
# A "cache" of DagRun row, so we don't have to look it up each
# time. This is to try and reduce the impact of our
# benchmarking code on runtime,
runs={},
)
for dag_id in dag_ids_to_watch
}
def change_state(self, key, state, info=None):
"""
Change the state of scheduler by waiting till the tasks is complete
and then shut down the scheduler after the task is complete
"""
from airflow.utils.state import State
super().change_state(key, state, info=info)
dag_id, _, execution_date, __ = key
if dag_id not in self.dags_to_watch:
return
# This fn is called before the DagRun state is updated, so we can't
# check the DR.state - so instead we need to check the state of the
# tasks in that run
run = self.dags_to_watch[dag_id].runs.get(execution_date)
if not run:
import airflow.models
# odd `list()` is to work across Airflow versions.
run = list(airflow.models.DagRun.find(dag_id=dag_id, execution_date=execution_date))[0]
self.dags_to_watch[dag_id].runs[execution_date] = run
if run and all(t.state == State.SUCCESS for t in run.get_task_instances()):
self.dags_to_watch[dag_id].runs.pop(execution_date)
self.dags_to_watch[dag_id].waiting_for -= 1
if self.dags_to_watch[dag_id].waiting_for == 0:
self.dags_to_watch.pop(dag_id)
if not self.dags_to_watch:
self.log.warning("STOPPING SCHEDULER -- all runs complete")
self.scheduler_job.processor_agent._done = True
return
self.log.warning(
"WAITING ON %d RUNS", sum(map(attrgetter('waiting_for'), self.dags_to_watch.values()))
)
def get_executor_under_test(dotted_path):
"""
Create and return a MockExecutor
"""
from airflow.executors.executor_loader import ExecutorLoader
if dotted_path == "MockExecutor":
from tests.test_utils.mock_executor import MockExecutor as executor
else:
executor = ExecutorLoader.load_executor(dotted_path)
executor_cls = type(executor)
# Change this to try other executors
class ShortCircuitExecutor(ShortCircuitExecutorMixin, executor_cls):
"""
Placeholder class that implements the inheritance hierarchy
"""
scheduler_job = None
return ShortCircuitExecutor
def reset_dag(dag, session):
"""
Delete all dag and task instances and then un_pause the Dag.
"""
import airflow.models
DR = airflow.models.DagRun
DM = airflow.models.DagModel
TI = airflow.models.TaskInstance
TF = airflow.models.TaskFail
dag_id = dag.dag_id
session.query(DM).filter(DM.dag_id == dag_id).update({'is_paused': False})
session.query(DR).filter(DR.dag_id == dag_id).delete()
session.query(TI).filter(TI.dag_id == dag_id).delete()
session.query(TF).filter(TF.dag_id == dag_id).delete()
def pause_all_dags(session):
"""
Pause all Dags
"""
from airflow.models.dag import DagModel
session.query(DagModel).update({'is_paused': True})
def create_dag_runs(dag, num_runs, session):
"""
Create `num_runs` of dag runs for sub-sequent schedules
"""
from airflow.utils import timezone
from airflow.utils.state import State
try:
from airflow.utils.types import DagRunType
id_prefix = f'{DagRunType.SCHEDULED.value}__'
except ImportError:
from airflow.models.dagrun import DagRun
id_prefix = DagRun.ID_PREFIX
last_dagrun_at = None
for _ in range(num_runs):
next_info = dag.next_dagrun_info(last_dagrun_at)
last_dagrun_at = next_info.data_interval.start
dag.create_dagrun(
run_id=f"{id_prefix}{last_dagrun_at.isoformat()}",
execution_date=last_dagrun_at,
start_date=timezone.utcnow(),
state=State.RUNNING,
external_trigger=False,
session=session,
)
@click.command()
@click.option('--num-runs', default=1, help='number of DagRun, to run for each DAG')
@click.option('--repeat', default=3, help='number of times to run test, to reduce variance')
@click.option(
'--pre-create-dag-runs',
is_flag=True,
default=False,
help='''Pre-create the dag runs and stop the scheduler creating more.
Warning: this makes the scheduler do (slightly) less work so may skew your numbers. Use sparingly!
''',
)
@click.option(
'--executor-class',
default='MockExecutor',
help=textwrap.dedent(
'''
Dotted path Executor class to test, for example
'airflow.executors.local_executor.LocalExecutor'. Defaults to MockExecutor which doesn't run tasks.
'''
),
)
@click.argument('dag_ids', required=True, nargs=-1)
def main(num_runs, repeat, pre_create_dag_runs, executor_class, dag_ids):
"""
This script can be used to measure the total "scheduler overhead" of Airflow.
By overhead we mean if the tasks executed instantly as soon as they are
executed (i.e. they do nothing) how quickly could we schedule them.
It will monitor the task completion of the Mock/stub executor (no actual
tasks are run) and after the required number of dag runs for all the
specified dags have completed all their tasks, it will cleanly shut down
the scheduler.
The dags you run with need to have an early enough start_date to create the
desired number of runs.
Care should be taken that other limits (DAG max_active_tasks, pool size etc) are
not the bottleneck. This script doesn't help you in that regard.
It is recommended to repeat the test at least 3 times (`--repeat=3`, the
default) so that you can get somewhat-accurate variance on the reported
timing numbers, but this can be disabled for longer runs if needed.
"""
# Turn on unit test mode so that we don't do any sleep() in the scheduler
# loop - not needed on main, but this script can run against older
# releases too!
os.environ['AIRFLOW__CORE__UNIT_TEST_MODE'] = 'True'
os.environ['AIRFLOW__CORE__MAX_ACTIVE_TASKS_PER_DAG'] = '500'
# Set this so that dags can dynamically configure their end_date
os.environ['AIRFLOW_BENCHMARK_MAX_DAG_RUNS'] = str(num_runs)
os.environ['PERF_MAX_RUNS'] = str(num_runs)
if pre_create_dag_runs:
os.environ['AIRFLOW__SCHEDULER__USE_JOB_SCHEDULE'] = 'False'
from airflow.jobs.scheduler_job import SchedulerJob
from airflow.models.dagbag import DagBag
from airflow.utils import db
dagbag = DagBag()
dags = []
with db.create_session() as session:
pause_all_dags(session)
for dag_id in dag_ids:
dag = dagbag.get_dag(dag_id)
dag.sync_to_db(session=session)
dags.append(dag)
reset_dag(dag, session)
next_run_date = dag.normalize_schedule(dag.start_date or min(t.start_date for t in dag.tasks))
for _ in range(num_runs - 1):
next_run_date = dag.following_schedule(next_run_date)
end_date = dag.end_date or dag.default_args.get('end_date')
if end_date != next_run_date:
message = (
f"DAG {dag_id} has incorrect end_date ({end_date}) for number of runs! "
f"It should be "
f" {next_run_date}"
)
sys.exit(message)
if pre_create_dag_runs:
create_dag_runs(dag, num_runs, session)
ShortCircuitExecutor = get_executor_under_test(executor_class)
executor = ShortCircuitExecutor(dag_ids_to_watch=dag_ids, num_runs=num_runs)
scheduler_job = SchedulerJob(dag_ids=dag_ids, do_pickle=False, executor=executor)
executor.scheduler_job = scheduler_job
total_tasks = sum(len(dag.tasks) for dag in dags)
if 'PYSPY' in os.environ:
pid = str(os.getpid())
filename = os.environ.get('PYSPY_O', 'flame-' + pid + '.html')
os.spawnlp(os.P_NOWAIT, 'sudo', 'sudo', 'py-spy', 'record', '-o', filename, '-p', pid, '--idle')
times = []
# Need a lambda to refer to the _latest_ value for scheduler_job, not just
# the initial one
code_to_test = lambda: scheduler_job.run()
for count in range(repeat):
gc.disable()
start = time.perf_counter()
code_to_test()
times.append(time.perf_counter() - start)
gc.enable()
print("Run %d time: %.5f" % (count + 1, times[-1]))
if count + 1 != repeat:
with db.create_session() as session:
for dag in dags:
reset_dag(dag, session)
executor.reset(dag_ids)
scheduler_job = SchedulerJob(dag_ids=dag_ids, do_pickle=False, executor=executor)
executor.scheduler_job = scheduler_job
print()
print()
msg = "Time for %d dag runs of %d dags with %d total tasks: %.4fs"
if len(times) > 1:
print(
(msg + " (±%.3fs)")
% (num_runs, len(dags), total_tasks, statistics.mean(times), statistics.stdev(times))
)
else:
print(msg % (num_runs, len(dags), total_tasks, times[0]))
print()
print()
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | noreply@github.com |
28a679d2bbcfff95fd757e00e51007662facaf2b | d5a1e7287dac6ca08805668f43b94ed2894cb429 | /venv/wal_steam/bin/symilar | b0402d79ee9922bb28ab89d8dd4c95c79b60ce59 | [
"MIT"
] | permissive | codysork/wal-steam | 0268cbe6e68af0486542d78ab929a0efc895f596 | deafd81fafdb0e4f2f03a18bc596d172f8ffd3bf | refs/heads/master | 2022-04-11T21:39:31.768861 | 2020-04-11T05:03:18 | 2020-04-11T05:03:18 | 254,550,452 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 270 | #!/home/cody/Code/active_projects/wal_steam/venv/wal_steam/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pylint import run_symilar
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(run_symilar())
| [
"codysork@tutanota.com"
] | codysork@tutanota.com | |
3ddcb05e08a3005dc4531a8c18cde61f46d65228 | 54328dddfad0d1d6f10b83a7a103fceef1fa20f3 | /examples/4_write_smart_contracts.py | a80e04a3823029d6f861cb050936886c2edc62d8 | [] | no_license | fengjianli007/learn-web3.py | 34104aa869bbb8adce81e977ff778a0565520126 | d989d5119831e41bc79598e021dce462b67edf5b | refs/heads/main | 2023-03-18T09:52:48.132882 | 2021-02-01T09:51:09 | 2021-02-01T09:51:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,547 | py | import json
from web3 import Web3
# Set up web3 connection with Ganache
ganache_url = "http://127.0.0.1:7545"
web3 = Web3(Web3.HTTPProvider(ganache_url))
# TODO: Deploy the Greeter contract to Ganache with remix.ethereum.org
# Set a default account to sign transactions - this account is unlocked with Ganache
web3.eth.defaultAccount = web3.eth.accounts[0]
# Greeter contract ABI
abi = json.loads('[{"constant":false,"inputs":[{"name":"_greeting","type":"string"}],"name":"setGreeting","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":true,"inputs":[],"name":"greet","outputs":[{"name":"","type":"string"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"greeting","outputs":[{"name":"","type":"string"}],"payable":false,"stateMutability":"view","type":"function"},{"inputs":[],"payable":false,"stateMutability":"nonpayable","type":"constructor"}]')
# Greeter contract address - convert to checksum address
address = web3.toChecksumAddress('') # FILL ME IN
# Initialize contract
contract = web3.eth.contract(address=address, abi=abi)
# Read the default greeting
print(contract.functions.greet().call())
# Set a new greeting
tx_hash = contract.functions.setGreeting('anyeongggg!!!').transact()
print(web3.toHex(tx_hash))
# Wait for transaction to be mined
web3.eth.waitForTransactionReceipt(tx_hash)
# Display the new greeting value
print('Updated contract greeting: {}'.format(contract.functions.greet().call()))
| [
"noreply@github.com"
] | noreply@github.com |
0e3f366f9b2f023474aa0f26b034f046a6e738bd | 4ade37d929b07b1eea07337b9cc843661a66e6d0 | /trails/feeds/nothink.py | f40ae15122ffc7c0e6f962eac4765945bd5dded1 | [
"MIT"
] | permissive | Dm2333/maltrail | bade5c99583b99f4ad1128aef295e95c977d82b1 | 2f32e0c3ff65544fc07ad3787d4d9b210f975b85 | refs/heads/master | 2021-04-12T10:44:25.125653 | 2018-03-20T11:50:40 | 2018-03-20T11:50:40 | 126,193,051 | 1 | 0 | MIT | 2018-03-21T14:40:05 | 2018-03-21T14:40:03 | Python | UTF-8 | Python | false | false | 674 | py | #!/usr/bin/env python
"""
Copyright (c) 2014-2018 Miroslav Stampar (@stamparm)
See the file 'LICENSE' for copying permission
"""
from core.common import retrieve_content
__url__ = "http://www.nothink.org/blacklist/blacklist_malware_irc.txt"
__check__ = "Malware IRC"
__info__ = "potential malware site"
__reference__ = "nothink.org"
def fetch():
retval = {}
content = retrieve_content(__url__)
if __check__ in content:
for line in content.split('\n'):
line = line.strip()
if not line or line.startswith('#') or '.' not in line:
continue
retval[line] = (__info__, __reference__)
return retval
| [
"miroslav.stampar@gmail.com"
] | miroslav.stampar@gmail.com |
04b84eff2004079efaec07d2daa7735e3fc45d0f | 0239d5a1d32807766740c0e80c098660d944b8ab | /support_functions.py | 975ab1aee0c230ab749d12a90adeba1eb610f1e4 | [] | no_license | etzimopoulos/Job-descriptions-topic-modelling-using-NLP | c4d46ca7b00abb040b0471740bc6e6a863f6dbfc | ae30c0b0251ec66656fd317d6b7446fa3a766e87 | refs/heads/master | 2023-02-11T21:20:02.752507 | 2021-01-07T05:33:28 | 2021-01-07T05:33:28 | 303,058,530 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,264 | py | import re
import pandas as pd
import spacy
from io import StringIO
from html.parser import HTMLParser
# Function to extract "e-mail" from dataframe column
# Input:
# * Dataframe "df"
# * Column "df_column" to extract e-mail from
# Output: Enriched dataframe with addional column labelled "Email"
def extract_email_from_column(df, df_column):
info = []
for text in df_column:
#email = re.findall(r'\w+@\w+.\w{3}',text)[:1]
#email = re.findall(r'\S+@\S+', text)[:1]
email = re.findall("([a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+)", text)[:1]
info.append(email)
# Add extracted list of emails to dataframe as a column
df['Email'] = pd.DataFrame(info)
return df
# Function to extract "company name" from "E-mail" dataframe column
# Input:
# * Dataframe "df"
# * Column "df_column" to extract company name from
# Output: Enriched dataframe with addional column labelled "Company"
def extract_company_from_column(df, df_column):
comp_name = []
for email in df_column:
if email == None:
name = 'None'
comp_name.append(name)
else:
name = email[ email.find("@")+1 : email.find(".")]
comp_name.append(name)
# Add extracted list of Company names as new dataframe Column
df["Company"] = pd.DataFrame(comp_name)
return df
# Function to display basic entity information
# Input:
# * NLP document
# Output:
# * No output
# * Shows on screen basic entity information
def show_entities(document):
if document.ents:
for ent in document.ents:
print(ent.text+' - '+ent.label_+' - '+str(spacy.explain(ent.label_)))
else:
print('No named entities found.')
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# Lars Buitinck
# Chyi-Kwei Yau <chyikwei.yau@gmail.com>
# License: BSD 3 clause
#
# Function to print out the "n" top words in a topic
# Input:
# * NLP model
# * Feature names
# * Number of top words in a topic
# Output:
# * Prints on screen top n words per topic
def print_top_words(model, feature_names, n_top_words):
for topic_idx, topic in enumerate(model.components_):
# print(f'THE TOP {n_top_words} WORDS FOR TOPIC #{topic_idx}')
message = "\nThe top %d words for topic #%d are:" % (n_top_words, topic_idx)
message += " ".join([feature_names[i]
for i in topic.argsort()[:-n_top_words - 1:-1]])
print(message)
print()
def return_top_words(model, feature_names, n_top_words):
t_topics = []
for topic_idx, topic in enumerate(model.components_):
t_topics[topic_idx].append([feature_names[i] for i in topic.argsort()[:-n_top_words - 1:-1]])
top_topics = pd.DataFrame(t_topics)
return top_topics
# Source code from following link
# https://stackoverflow.com/questions/753052/strip-html-from-strings-in-python
class MLStripper(HTMLParser):
def __init__(self):
super().__init__()
self.reset()
self.strict = False
self.convert_charrefs= True
self.text = StringIO()
def handle_data(self, d):
self.text.write(d)
def get_data(self):
return self.text.getvalue()
def strip_tags(html):
s = MLStripper()
s.feed(html)
return s.get_data()
# Function to remove HTML tags, "\\r" and "\xa0" from dataframe column
# Input:
# * Dataframe "df"
# * Column "df_column" to extract company name from
# Output: Enriched dataframe with addional column labelled "JD"
#
# Alternative Regex
# import re
# clean = lambda sentence: re.sub(r"(?:\\+r)+|[^ \w.\\]+", "", sentence)
def clean_job_description(df, df_column):
new_row = []
for row in df_column:
t = strip_tags(row) # Strip HTML tags using strig_tag() function
t1 = t.replace('\\r','') # Remove all "\\r" characters from text
t2 = t1.replace('\xa0',' ') # Remove all "\xa0" characters from text
t3 = t2.replace('\\n', '')
new_row.append(t3)
# Add updated Job Description column as new dataframe Column
df["Job Description"] = pd.DataFrame(new_row)
return df
| [
"etzimopoulos@DESKTOP-HI9P6E4.localdomain"
] | etzimopoulos@DESKTOP-HI9P6E4.localdomain |
5b9c711cc45e6a97a52a2ff2383cc9df5707e3b5 | 00a3db4204f764b2d4e49867b198c0203b3e7732 | /retrain.py | 92607ee0fcd71fdcd712efb583ebf9aff10cb6a7 | [] | no_license | Madhavraob/tensorflow | 555397b7146753234299c26654156e2e99c7f47e | 6b37014ceeb7e1bd025f2fa6c162b4dd9597e859 | refs/heads/master | 2020-03-20T05:55:16.610336 | 2018-06-24T04:15:36 | 2018-06-24T04:15:36 | 137,231,285 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 55,284 | py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# NOTICE: This work was derived from tensorflow/examples/image_retraining
# and modified to use TensorFlow Hub modules.
# pylint: disable=line-too-longr
"""Simple transfer learning with image modules from TensorFlow Hub.
This example shows how to train an image classifier based on any
TensorFlow Hub module that computes image feature vectors. By default,
it uses the feature vectors computed by Inception V3 trained on ImageNet.
See https://github.com/tensorflow/hub/blob/master/docs/modules/image.md
for more options.
The top layer receives as input a 2048-dimensional vector (assuming
Inception V3) for each image. We train a softmax layer on top of this
representation. If the softmax layer contains N labels, this corresponds
to learning N + 2048*N model parameters for the biases and weights.
Here's an example, which assumes you have a folder containing class-named
subfolders, each full of images for each label. The example folder flower_photos
should have a structure like this:
~/flower_photos/daisy/photo1.jpg
~/flower_photos/daisy/photo2.jpg
...
~/flower_photos/rose/anotherphoto77.jpg
...
~/flower_photos/sunflower/somepicture.jpg
The subfolder names are important, since they define what label is applied to
each image, but the filenames themselves don't matter. (For a working example,
download http://download.tensorflow.org/example_images/flower_photos.tgz
and run tar xzf flower_photos.tgz to unpack it.)
Once your images are prepared, and you have pip-installed tensorflow-hub and
a sufficiently recent version of tensorflow, you can run the training with a
command like this:
```bash
python retrain.py --image_dir ~/flower_photos
```
You can replace the image_dir argument with any folder containing subfolders of
images. The label for each image is taken from the name of the subfolder it's
in.
This produces a new model file that can be loaded and run by any TensorFlow
program, for example the tensorflow/examples/label_image sample code.
By default this script will use the highly accurate, but comparatively large and
slow Inception V3 model architecture. It's recommended that you start with this
to validate that you have gathered good training data, but if you want to deploy
on resource-limited platforms, you can try the `--tfhub_module` flag with a
Mobilenet model. For more information on Mobilenet, see
https://research.googleblog.com/2017/06/mobilenets-open-source-models-for.html
For example:
Run floating-point version of Mobilenet:
```bash
python retrain.py --image_dir ~/flower_photos \
--tfhub_module https://tfhub.dev/google/imagenet/mobilenet_v1_100_224/feature_vector/1
```
Run Mobilenet, instrumented for quantization:
```bash
python retrain.py --image_dir ~/flower_photos/ \
--tfhub_module https://tfhub.dev/google/imagenet/mobilenet_v1_100_224/quantops/feature_vector/1
```
These instrumented models can be converted to fully quantized mobile models via
TensorFlow Lite.
There are different Mobilenet models to choose from, with a variety of file
size and latency options.
- The first number can be '100', '075', '050', or '025' to control the number
of neurons (activations of hidden layers); the number of weights (and hence
to some extent the file size and speed) shrinks with the square of that
fraction.
- The second number is the input image size. You can choose '224', '192',
'160', or '128', with smaller sizes giving faster speeds.
To use with TensorBoard:
By default, this script will log summaries to /tmp/retrain_logs directory
Visualize the summaries with this command:
tensorboard --logdir /tmp/retrain_logs
To use with Tensorflow Serving, run this tool with --saved_model_dir set
to some increasingly numbered export location under the model base path, e.g.:
```bash
python retrain.py (... other args as before ...) \
--saved_model_dir=/tmp/saved_models/$(date +%s)/
tensorflow_model_server --port=9000 --model_name=my_image_classifier \
--model_base_path=/tmp/saved_models/
```
"""
# pylint: enable=line-too-long
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import collections
from datetime import datetime
import hashlib
import os.path
import random
import re
import sys
import numpy as np
import tensorflow as tf
import tensorflow_hub as hub
FLAGS = None
MAX_NUM_IMAGES_PER_CLASS = 2 ** 27 - 1 # ~134M
# The location where variable checkpoints will be stored.
CHECKPOINT_NAME = '/tmp/_retrain_checkpoint'
# A module is understood as instrumented for quantization with TF-Lite
# if it contains any of these ops.
FAKE_QUANT_OPS = ('FakeQuantWithMinMaxVars',
'FakeQuantWithMinMaxVarsPerChannel')
def create_image_lists(image_dir, testing_percentage, validation_percentage):
"""Builds a list of training images from the file system.
Analyzes the sub folders in the image directory, splits them into stable
training, testing, and validation sets, and returns a data structure
describing the lists of images for each label and their paths.
Args:
image_dir: String path to a folder containing subfolders of images.
testing_percentage: Integer percentage of the images to reserve for tests.
validation_percentage: Integer percentage of images reserved for validation.
Returns:
An OrderedDict containing an entry for each label subfolder, with images
split into training, testing, and validation sets within each label.
The order of items defines the class indices.
"""
if not tf.gfile.Exists(image_dir):
tf.logging.error("Image directory '" + image_dir + "' not found.")
return None
result = collections.OrderedDict()
sub_dirs = sorted(x[0] for x in tf.gfile.Walk(image_dir))
# The root directory comes first, so skip it.
is_root_dir = True
for sub_dir in sub_dirs:
if is_root_dir:
is_root_dir = False
continue
extensions = ['jpg', 'jpeg', 'JPG', 'JPEG']
file_list = []
dir_name = os.path.basename(sub_dir)
if dir_name == image_dir:
continue
tf.logging.info("Looking for images in '" + dir_name + "'")
for extension in extensions:
file_glob = os.path.join(image_dir, dir_name, '*.' + extension)
file_list.extend(tf.gfile.Glob(file_glob))
if not file_list:
tf.logging.warning('No files found')
continue
if len(file_list) < 20:
tf.logging.warning(
'WARNING: Folder has less than 20 images, which may cause issues.')
elif len(file_list) > MAX_NUM_IMAGES_PER_CLASS:
tf.logging.warning(
'WARNING: Folder {} has more than {} images. Some images will '
'never be selected.'.format(dir_name, MAX_NUM_IMAGES_PER_CLASS))
label_name = re.sub(r'[^a-z0-9]+', ' ', dir_name.lower())
training_images = []
testing_images = []
validation_images = []
for file_name in file_list:
base_name = os.path.basename(file_name)
# We want to ignore anything after '_nohash_' in the file name when
# deciding which set to put an image in, the data set creator has a way of
# grouping photos that are close variations of each other. For example
# this is used in the plant disease data set to group multiple pictures of
# the same leaf.
hash_name = re.sub(r'_nohash_.*$', '', file_name)
# This looks a bit magical, but we need to decide whether this file should
# go into the training, testing, or validation sets, and we want to keep
# existing files in the same set even if more files are subsequently
# added.
# To do that, we need a stable way of deciding based on just the file name
# itself, so we do a hash of that and then use that to generate a
# probability value that we use to assign it.
hash_name_hashed = hashlib.sha1(tf.compat.as_bytes(hash_name)).hexdigest()
percentage_hash = ((int(hash_name_hashed, 16) %
(MAX_NUM_IMAGES_PER_CLASS + 1)) *
(100.0 / MAX_NUM_IMAGES_PER_CLASS))
if percentage_hash < validation_percentage:
validation_images.append(base_name)
elif percentage_hash < (testing_percentage + validation_percentage):
testing_images.append(base_name)
else:
training_images.append(base_name)
result[label_name] = {
'dir': dir_name,
'training': training_images,
'testing': testing_images,
'validation': validation_images,
}
return result
def get_image_path(image_lists, label_name, index, image_dir, category):
"""Returns a path to an image for a label at the given index.
Args:
image_lists: OrderedDict of training images for each label.
label_name: Label string we want to get an image for.
index: Int offset of the image we want. This will be moduloed by the
available number of images for the label, so it can be arbitrarily large.
image_dir: Root folder string of the subfolders containing the training
images.
category: Name string of set to pull images from - training, testing, or
validation.
Returns:
File system path string to an image that meets the requested parameters.
"""
if label_name not in image_lists:
tf.logging.fatal('Label does not exist %s.', label_name)
label_lists = image_lists[label_name]
if category not in label_lists:
tf.logging.fatal('Category does not exist %s.', category)
category_list = label_lists[category]
if not category_list:
tf.logging.fatal('Label %s has no images in the category %s.',
label_name, category)
mod_index = index % len(category_list)
base_name = category_list[mod_index]
sub_dir = label_lists['dir']
full_path = os.path.join(image_dir, sub_dir, base_name)
return full_path
def get_bottleneck_path(image_lists, label_name, index, bottleneck_dir,
category, module_name):
"""Returns a path to a bottleneck file for a label at the given index.
Args:
image_lists: OrderedDict of training images for each label.
label_name: Label string we want to get an image for.
index: Integer offset of the image we want. This will be moduloed by the
available number of images for the label, so it can be arbitrarily large.
bottleneck_dir: Folder string holding cached files of bottleneck values.
category: Name string of set to pull images from - training, testing, or
validation.
module_name: The name of the image module being used.
Returns:
File system path string to an image that meets the requested parameters.
"""
module_name = (module_name.replace('://', '~') # URL scheme.
.replace('/', '~') # URL and Unix paths.
.replace(':', '~').replace('\\', '~')) # Windows paths.
return get_image_path(image_lists, label_name, index, bottleneck_dir,
category) + '_' + module_name + '.txt'
def create_module_graph(module_spec):
"""Creates a graph and loads Hub Module into it.
Args:
module_spec: the hub.ModuleSpec for the image module being used.
Returns:
graph: the tf.Graph that was created.
bottleneck_tensor: the bottleneck values output by the module.
resized_input_tensor: the input images, resized as expected by the module.
wants_quantization: a boolean, whether the module has been instrumented
with fake quantization ops.
"""
height, width = hub.get_expected_image_size(module_spec)
with tf.Graph().as_default() as graph:
resized_input_tensor = tf.placeholder(tf.float32, [None, height, width, 3])
m = hub.Module(module_spec)
bottleneck_tensor = m(resized_input_tensor)
wants_quantization = any(node.op in FAKE_QUANT_OPS
for node in graph.as_graph_def().node)
return graph, bottleneck_tensor, resized_input_tensor, wants_quantization
def run_bottleneck_on_image(sess, image_data, image_data_tensor,
decoded_image_tensor, resized_input_tensor,
bottleneck_tensor):
"""Runs inference on an image to extract the 'bottleneck' summary layer.
Args:
sess: Current active TensorFlow Session.
image_data: String of raw JPEG data.
image_data_tensor: Input data layer in the graph.
decoded_image_tensor: Output of initial image resizing and preprocessing.
resized_input_tensor: The input node of the recognition graph.
bottleneck_tensor: Layer before the final softmax.
Returns:
Numpy array of bottleneck values.
"""
# First decode the JPEG image, resize it, and rescale the pixel values.
resized_input_values = sess.run(decoded_image_tensor,
{image_data_tensor: image_data})
# Then run it through the recognition network.
bottleneck_values = sess.run(bottleneck_tensor,
{resized_input_tensor: resized_input_values})
bottleneck_values = np.squeeze(bottleneck_values)
return bottleneck_values
def ensure_dir_exists(dir_name):
"""Makes sure the folder exists on disk.
Args:
dir_name: Path string to the folder we want to create.
"""
if not os.path.exists(dir_name):
os.makedirs(dir_name)
def create_bottleneck_file(bottleneck_path, image_lists, label_name, index,
image_dir, category, sess, jpeg_data_tensor,
decoded_image_tensor, resized_input_tensor,
bottleneck_tensor):
"""Create a single bottleneck file."""
tf.logging.info('Creating bottleneck at ' + bottleneck_path)
image_path = get_image_path(image_lists, label_name, index,
image_dir, category)
if not tf.gfile.Exists(image_path):
tf.logging.fatal('File does not exist %s', image_path)
image_data = tf.gfile.FastGFile(image_path, 'rb').read()
try:
bottleneck_values = run_bottleneck_on_image(
sess, image_data, jpeg_data_tensor, decoded_image_tensor,
resized_input_tensor, bottleneck_tensor)
except Exception as e:
raise RuntimeError('Error during processing file %s (%s)' % (image_path,
str(e)))
bottleneck_string = ','.join(str(x) for x in bottleneck_values)
with open(bottleneck_path, 'w') as bottleneck_file:
bottleneck_file.write(bottleneck_string)
def get_or_create_bottleneck(sess, image_lists, label_name, index, image_dir,
category, bottleneck_dir, jpeg_data_tensor,
decoded_image_tensor, resized_input_tensor,
bottleneck_tensor, module_name):
"""Retrieves or calculates bottleneck values for an image.
If a cached version of the bottleneck data exists on-disk, return that,
otherwise calculate the data and save it to disk for future use.
Args:
sess: The current active TensorFlow Session.
image_lists: OrderedDict of training images for each label.
label_name: Label string we want to get an image for.
index: Integer offset of the image we want. This will be modulo-ed by the
available number of images for the label, so it can be arbitrarily large.
image_dir: Root folder string of the subfolders containing the training
images.
category: Name string of which set to pull images from - training, testing,
or validation.
bottleneck_dir: Folder string holding cached files of bottleneck values.
jpeg_data_tensor: The tensor to feed loaded jpeg data into.
decoded_image_tensor: The output of decoding and resizing the image.
resized_input_tensor: The input node of the recognition graph.
bottleneck_tensor: The output tensor for the bottleneck values.
module_name: The name of the image module being used.
Returns:
Numpy array of values produced by the bottleneck layer for the image.
"""
label_lists = image_lists[label_name]
sub_dir = label_lists['dir']
sub_dir_path = os.path.join(bottleneck_dir, sub_dir)
ensure_dir_exists(sub_dir_path)
bottleneck_path = get_bottleneck_path(image_lists, label_name, index,
bottleneck_dir, category, module_name)
if not os.path.exists(bottleneck_path):
create_bottleneck_file(bottleneck_path, image_lists, label_name, index,
image_dir, category, sess, jpeg_data_tensor,
decoded_image_tensor, resized_input_tensor,
bottleneck_tensor)
with open(bottleneck_path, 'r') as bottleneck_file:
bottleneck_string = bottleneck_file.read()
did_hit_error = False
try:
bottleneck_values = [float(x) for x in bottleneck_string.split(',')]
except ValueError:
tf.logging.warning('Invalid float found, recreating bottleneck')
did_hit_error = True
if did_hit_error:
create_bottleneck_file(bottleneck_path, image_lists, label_name, index,
image_dir, category, sess, jpeg_data_tensor,
decoded_image_tensor, resized_input_tensor,
bottleneck_tensor)
with open(bottleneck_path, 'r') as bottleneck_file:
bottleneck_string = bottleneck_file.read()
# Allow exceptions to propagate here, since they shouldn't happen after a
# fresh creation
bottleneck_values = [float(x) for x in bottleneck_string.split(',')]
return bottleneck_values
def cache_bottlenecks(sess, image_lists, image_dir, bottleneck_dir,
jpeg_data_tensor, decoded_image_tensor,
resized_input_tensor, bottleneck_tensor, module_name):
"""Ensures all the training, testing, and validation bottlenecks are cached.
Because we're likely to read the same image multiple times (if there are no
distortions applied during training) it can speed things up a lot if we
calculate the bottleneck layer values once for each image during
preprocessing, and then just read those cached values repeatedly during
training. Here we go through all the images we've found, calculate those
values, and save them off.
Args:
sess: The current active TensorFlow Session.
image_lists: OrderedDict of training images for each label.
image_dir: Root folder string of the subfolders containing the training
images.
bottleneck_dir: Folder string holding cached files of bottleneck values.
jpeg_data_tensor: Input tensor for jpeg data from file.
decoded_image_tensor: The output of decoding and resizing the image.
resized_input_tensor: The input node of the recognition graph.
bottleneck_tensor: The penultimate output layer of the graph.
module_name: The name of the image module being used.
Returns:
Nothing.
"""
how_many_bottlenecks = 0
ensure_dir_exists(bottleneck_dir)
for label_name, label_lists in image_lists.items():
for category in ['training', 'testing', 'validation']:
category_list = label_lists[category]
for index, unused_base_name in enumerate(category_list):
get_or_create_bottleneck(
sess, image_lists, label_name, index, image_dir, category,
bottleneck_dir, jpeg_data_tensor, decoded_image_tensor,
resized_input_tensor, bottleneck_tensor, module_name)
how_many_bottlenecks += 1
if how_many_bottlenecks % 100 == 0:
tf.logging.info(
str(how_many_bottlenecks) + ' bottleneck files created.')
def get_random_cached_bottlenecks(sess, image_lists, how_many, category,
bottleneck_dir, image_dir, jpeg_data_tensor,
decoded_image_tensor, resized_input_tensor,
bottleneck_tensor, module_name):
"""Retrieves bottleneck values for cached images.
If no distortions are being applied, this function can retrieve the cached
bottleneck values directly from disk for images. It picks a random set of
images from the specified category.
Args:
sess: Current TensorFlow Session.
image_lists: OrderedDict of training images for each label.
how_many: If positive, a random sample of this size will be chosen.
If negative, all bottlenecks will be retrieved.
category: Name string of which set to pull from - training, testing, or
validation.
bottleneck_dir: Folder string holding cached files of bottleneck values.
image_dir: Root folder string of the subfolders containing the training
images.
jpeg_data_tensor: The layer to feed jpeg image data into.
decoded_image_tensor: The output of decoding and resizing the image.
resized_input_tensor: The input node of the recognition graph.
bottleneck_tensor: The bottleneck output layer of the CNN graph.
module_name: The name of the image module being used.
Returns:
List of bottleneck arrays, their corresponding ground truths, and the
relevant filenames.
"""
class_count = len(image_lists.keys())
bottlenecks = []
ground_truths = []
filenames = []
if how_many >= 0:
# Retrieve a random sample of bottlenecks.
for unused_i in range(how_many):
label_index = random.randrange(class_count)
label_name = list(image_lists.keys())[label_index]
image_index = random.randrange(MAX_NUM_IMAGES_PER_CLASS + 1)
image_name = get_image_path(image_lists, label_name, image_index,
image_dir, category)
bottleneck = get_or_create_bottleneck(
sess, image_lists, label_name, image_index, image_dir, category,
bottleneck_dir, jpeg_data_tensor, decoded_image_tensor,
resized_input_tensor, bottleneck_tensor, module_name)
bottlenecks.append(bottleneck)
ground_truths.append(label_index)
filenames.append(image_name)
else:
# Retrieve all bottlenecks.
for label_index, label_name in enumerate(image_lists.keys()):
for image_index, image_name in enumerate(
image_lists[label_name][category]):
image_name = get_image_path(image_lists, label_name, image_index,
image_dir, category)
bottleneck = get_or_create_bottleneck(
sess, image_lists, label_name, image_index, image_dir, category,
bottleneck_dir, jpeg_data_tensor, decoded_image_tensor,
resized_input_tensor, bottleneck_tensor, module_name)
bottlenecks.append(bottleneck)
ground_truths.append(label_index)
filenames.append(image_name)
return bottlenecks, ground_truths, filenames
def get_random_distorted_bottlenecks(
sess, image_lists, how_many, category, image_dir, input_jpeg_tensor,
distorted_image, resized_input_tensor, bottleneck_tensor):
"""Retrieves bottleneck values for training images, after distortions.
If we're training with distortions like crops, scales, or flips, we have to
recalculate the full model for every image, and so we can't use cached
bottleneck values. Instead we find random images for the requested category,
run them through the distortion graph, and then the full graph to get the
bottleneck results for each.
Args:
sess: Current TensorFlow Session.
image_lists: OrderedDict of training images for each label.
how_many: The integer number of bottleneck values to return.
category: Name string of which set of images to fetch - training, testing,
or validation.
image_dir: Root folder string of the subfolders containing the training
images.
input_jpeg_tensor: The input layer we feed the image data to.
distorted_image: The output node of the distortion graph.
resized_input_tensor: The input node of the recognition graph.
bottleneck_tensor: The bottleneck output layer of the CNN graph.
Returns:
List of bottleneck arrays and their corresponding ground truths.
"""
class_count = len(image_lists.keys())
bottlenecks = []
ground_truths = []
for unused_i in range(how_many):
label_index = random.randrange(class_count)
label_name = list(image_lists.keys())[label_index]
image_index = random.randrange(MAX_NUM_IMAGES_PER_CLASS + 1)
image_path = get_image_path(image_lists, label_name, image_index, image_dir,
category)
if not tf.gfile.Exists(image_path):
tf.logging.fatal('File does not exist %s', image_path)
jpeg_data = tf.gfile.FastGFile(image_path, 'rb').read()
# Note that we materialize the distorted_image_data as a numpy array before
# sending running inference on the image. This involves 2 memory copies and
# might be optimized in other implementations.
distorted_image_data = sess.run(distorted_image,
{input_jpeg_tensor: jpeg_data})
bottleneck_values = sess.run(bottleneck_tensor,
{resized_input_tensor: distorted_image_data})
bottleneck_values = np.squeeze(bottleneck_values)
bottlenecks.append(bottleneck_values)
ground_truths.append(label_index)
return bottlenecks, ground_truths
def should_distort_images(flip_left_right, random_crop, random_scale,
random_brightness):
"""Whether any distortions are enabled, from the input flags.
Args:
flip_left_right: Boolean whether to randomly mirror images horizontally.
random_crop: Integer percentage setting the total margin used around the
crop box.
random_scale: Integer percentage of how much to vary the scale by.
random_brightness: Integer range to randomly multiply the pixel values by.
Returns:
Boolean value indicating whether any distortions should be applied.
"""
return (flip_left_right or (random_crop != 0) or (random_scale != 0) or
(random_brightness != 0))
def add_input_distortions(flip_left_right, random_crop, random_scale,
random_brightness, module_spec):
"""Creates the operations to apply the specified distortions.
During training it can help to improve the results if we run the images
through simple distortions like crops, scales, and flips. These reflect the
kind of variations we expect in the real world, and so can help train the
model to cope with natural data more effectively. Here we take the supplied
parameters and construct a network of operations to apply them to an image.
Cropping
~~~~~~~~
Cropping is done by placing a bounding box at a random position in the full
image. The cropping parameter controls the size of that box relative to the
input image. If it's zero, then the box is the same size as the input and no
cropping is performed. If the value is 50%, then the crop box will be half the
width and height of the input. In a diagram it looks like this:
< width >
+---------------------+
| |
| width - crop% |
| < > |
| +------+ |
| | | |
| | | |
| | | |
| +------+ |
| |
| |
+---------------------+
Scaling
~~~~~~~
Scaling is a lot like cropping, except that the bounding box is always
centered and its size varies randomly within the given range. For example if
the scale percentage is zero, then the bounding box is the same size as the
input and no scaling is applied. If it's 50%, then the bounding box will be in
a random range between half the width and height and full size.
Args:
flip_left_right: Boolean whether to randomly mirror images horizontally.
random_crop: Integer percentage setting the total margin used around the
crop box.
random_scale: Integer percentage of how much to vary the scale by.
random_brightness: Integer range to randomly multiply the pixel values by.
graph.
module_spec: The hub.ModuleSpec for the image module being used.
Returns:
The jpeg input layer and the distorted result tensor.
"""
input_height, input_width = hub.get_expected_image_size(module_spec)
input_depth = hub.get_num_image_channels(module_spec)
jpeg_data = tf.placeholder(tf.string, name='DistortJPGInput')
decoded_image = tf.image.decode_jpeg(jpeg_data, channels=input_depth)
# Convert from full range of uint8 to range [0,1] of float32.
decoded_image_as_float = tf.image.convert_image_dtype(decoded_image,
tf.float32)
decoded_image_4d = tf.expand_dims(decoded_image_as_float, 0)
margin_scale = 1.0 + (random_crop / 100.0)
resize_scale = 1.0 + (random_scale / 100.0)
margin_scale_value = tf.constant(margin_scale)
resize_scale_value = tf.random_uniform(shape=[],
minval=1.0,
maxval=resize_scale)
scale_value = tf.multiply(margin_scale_value, resize_scale_value)
precrop_width = tf.multiply(scale_value, input_width)
precrop_height = tf.multiply(scale_value, input_height)
precrop_shape = tf.stack([precrop_height, precrop_width])
precrop_shape_as_int = tf.cast(precrop_shape, dtype=tf.int32)
precropped_image = tf.image.resize_bilinear(decoded_image_4d,
precrop_shape_as_int)
precropped_image_3d = tf.squeeze(precropped_image, axis=[0])
cropped_image = tf.random_crop(precropped_image_3d,
[input_height, input_width, input_depth])
if flip_left_right:
flipped_image = tf.image.random_flip_left_right(cropped_image)
else:
flipped_image = cropped_image
brightness_min = 1.0 - (random_brightness / 100.0)
brightness_max = 1.0 + (random_brightness / 100.0)
brightness_value = tf.random_uniform(shape=[],
minval=brightness_min,
maxval=brightness_max)
brightened_image = tf.multiply(flipped_image, brightness_value)
distort_result = tf.expand_dims(brightened_image, 0, name='DistortResult')
return jpeg_data, distort_result
def variable_summaries(var):
"""Attach a lot of summaries to a Tensor (for TensorBoard visualization)."""
with tf.name_scope('summaries'):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean', mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar('stddev', stddev)
tf.summary.scalar('max', tf.reduce_max(var))
tf.summary.scalar('min', tf.reduce_min(var))
tf.summary.histogram('histogram', var)
def add_final_retrain_ops(class_count, final_tensor_name, bottleneck_tensor,
quantize_layer, is_training):
"""Adds a new softmax and fully-connected layer for training and eval.
We need to retrain the top layer to identify our new classes, so this function
adds the right operations to the graph, along with some variables to hold the
weights, and then sets up all the gradients for the backward pass.
The set up for the softmax and fully-connected layers is based on:
https://www.tensorflow.org/tutorials/mnist/beginners/index.html
Args:
class_count: Integer of how many categories of things we're trying to
recognize.
final_tensor_name: Name string for the new final node that produces results.
bottleneck_tensor: The output of the main CNN graph.
quantize_layer: Boolean, specifying whether the newly added layer should be
instrumented for quantization with TF-Lite.
is_training: Boolean, specifying whether the newly add layer is for training
or eval.
Returns:
The tensors for the training and cross entropy results, and tensors for the
bottleneck input and ground truth input.
"""
batch_size, bottleneck_tensor_size = bottleneck_tensor.get_shape().as_list()
assert batch_size is None, 'We want to work with arbitrary batch size.'
with tf.name_scope('input'):
bottleneck_input = tf.placeholder_with_default(
bottleneck_tensor,
shape=[batch_size, bottleneck_tensor_size],
name='BottleneckInputPlaceholder')
ground_truth_input = tf.placeholder(
tf.int64, [batch_size], name='GroundTruthInput')
# Organizing the following ops so they are easier to see in TensorBoard.
layer_name = 'final_retrain_ops'
with tf.name_scope(layer_name):
with tf.name_scope('weights'):
initial_value = tf.truncated_normal(
[bottleneck_tensor_size, class_count], stddev=0.001)
layer_weights = tf.Variable(initial_value, name='final_weights')
variable_summaries(layer_weights)
with tf.name_scope('biases'):
layer_biases = tf.Variable(tf.zeros([class_count]), name='final_biases')
variable_summaries(layer_biases)
with tf.name_scope('Wx_plus_b'):
logits = tf.matmul(bottleneck_input, layer_weights) + layer_biases
tf.summary.histogram('pre_activations', logits)
final_tensor = tf.nn.softmax(logits, name=final_tensor_name)
# The tf.contrib.quantize functions rewrite the graph in place for
# quantization. The imported model graph has already been rewritten, so upon
# calling these rewrites, only the newly added final layer will be
# transformed.
if quantize_layer:
if is_training:
tf.contrib.quantize.create_training_graph()
else:
tf.contrib.quantize.create_eval_graph()
tf.summary.histogram('activations', final_tensor)
# If this is an eval graph, we don't need to add loss ops or an optimizer.
if not is_training:
return None, None, bottleneck_input, ground_truth_input, final_tensor
with tf.name_scope('cross_entropy'):
cross_entropy_mean = tf.losses.sparse_softmax_cross_entropy(
labels=ground_truth_input, logits=logits)
tf.summary.scalar('cross_entropy', cross_entropy_mean)
with tf.name_scope('train'):
optimizer = tf.train.GradientDescentOptimizer(FLAGS.learning_rate)
train_step = optimizer.minimize(cross_entropy_mean)
return (train_step, cross_entropy_mean, bottleneck_input, ground_truth_input,
final_tensor)
def add_evaluation_step(result_tensor, ground_truth_tensor):
"""Inserts the operations we need to evaluate the accuracy of our results.
Args:
result_tensor: The new final node that produces results.
ground_truth_tensor: The node we feed ground truth data
into.
Returns:
Tuple of (evaluation step, prediction).
"""
with tf.name_scope('accuracy'):
with tf.name_scope('correct_prediction'):
prediction = tf.argmax(result_tensor, 1)
correct_prediction = tf.equal(prediction, ground_truth_tensor)
with tf.name_scope('accuracy'):
evaluation_step = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.summary.scalar('accuracy', evaluation_step)
return evaluation_step, prediction
def run_final_eval(train_session, module_spec, class_count, image_lists,
jpeg_data_tensor, decoded_image_tensor,
resized_image_tensor, bottleneck_tensor):
"""Runs a final evaluation on an eval graph using the test data set.
Args:
train_session: Session for the train graph with the tensors below.
module_spec: The hub.ModuleSpec for the image module being used.
class_count: Number of classes
image_lists: OrderedDict of training images for each label.
jpeg_data_tensor: The layer to feed jpeg image data into.
decoded_image_tensor: The output of decoding and resizing the image.
resized_image_tensor: The input node of the recognition graph.
bottleneck_tensor: The bottleneck output layer of the CNN graph.
"""
test_bottlenecks, test_ground_truth, test_filenames = (
get_random_cached_bottlenecks(train_session, image_lists,
FLAGS.test_batch_size,
'testing', FLAGS.bottleneck_dir,
FLAGS.image_dir, jpeg_data_tensor,
decoded_image_tensor, resized_image_tensor,
bottleneck_tensor, FLAGS.tfhub_module))
(eval_session, _, bottleneck_input, ground_truth_input, evaluation_step,
prediction) = build_eval_session(module_spec, class_count)
test_accuracy, predictions = eval_session.run(
[evaluation_step, prediction],
feed_dict={
bottleneck_input: test_bottlenecks,
ground_truth_input: test_ground_truth
})
tf.logging.info('Final test accuracy = %.1f%% (N=%d)' %
(test_accuracy * 100, len(test_bottlenecks)))
if FLAGS.print_misclassified_test_images:
tf.logging.info('=== MISCLASSIFIED TEST IMAGES ===')
for i, test_filename in enumerate(test_filenames):
if predictions[i] != test_ground_truth[i]:
tf.logging.info('%70s %s' % (test_filename,
list(image_lists.keys())[predictions[i]]))
def build_eval_session(module_spec, class_count):
"""Builds an restored eval session without train operations for exporting.
Args:
module_spec: The hub.ModuleSpec for the image module being used.
class_count: Number of classes
Returns:
Eval session containing the restored eval graph.
The bottleneck input, ground truth, eval step, and prediction tensors.
"""
# If quantized, we need to create the correct eval graph for exporting.
eval_graph, bottleneck_tensor, resized_input_tensor, wants_quantization = (
create_module_graph(module_spec))
eval_sess = tf.Session(graph=eval_graph)
with eval_graph.as_default():
# Add the new layer for exporting.
(_, _, bottleneck_input,
ground_truth_input, final_tensor) = add_final_retrain_ops(
class_count, FLAGS.final_tensor_name, bottleneck_tensor,
wants_quantization, is_training=False)
# Now we need to restore the values from the training graph to the eval
# graph.
tf.train.Saver().restore(eval_sess, CHECKPOINT_NAME)
evaluation_step, prediction = add_evaluation_step(final_tensor,
ground_truth_input)
return (eval_sess, resized_input_tensor, bottleneck_input, ground_truth_input,
evaluation_step, prediction)
def save_graph_to_file(graph, graph_file_name, module_spec, class_count):
"""Saves an graph to file, creating a valid quantized one if necessary."""
sess, _, _, _, _, _ = build_eval_session(module_spec, class_count)
graph = sess.graph
output_graph_def = tf.graph_util.convert_variables_to_constants(
sess, graph.as_graph_def(), [FLAGS.final_tensor_name])
with tf.gfile.FastGFile(graph_file_name, 'wb') as f:
f.write(output_graph_def.SerializeToString())
def prepare_file_system():
# Set up the directory we'll write summaries to for TensorBoard
if tf.gfile.Exists(FLAGS.summaries_dir):
tf.gfile.DeleteRecursively(FLAGS.summaries_dir)
tf.gfile.MakeDirs(FLAGS.summaries_dir)
if FLAGS.intermediate_store_frequency > 0:
ensure_dir_exists(FLAGS.intermediate_output_graphs_dir)
return
def add_jpeg_decoding(module_spec):
"""Adds operations that perform JPEG decoding and resizing to the graph..
Args:
module_spec: The hub.ModuleSpec for the image module being used.
Returns:
Tensors for the node to feed JPEG data into, and the output of the
preprocessing steps.
"""
input_height, input_width = hub.get_expected_image_size(module_spec)
input_depth = hub.get_num_image_channels(module_spec)
jpeg_data = tf.placeholder(tf.string, name='DecodeJPGInput')
decoded_image = tf.image.decode_jpeg(jpeg_data, channels=input_depth)
# Convert from full range of uint8 to range [0,1] of float32.
decoded_image_as_float = tf.image.convert_image_dtype(decoded_image,
tf.float32)
decoded_image_4d = tf.expand_dims(decoded_image_as_float, 0)
resize_shape = tf.stack([input_height, input_width])
resize_shape_as_int = tf.cast(resize_shape, dtype=tf.int32)
resized_image = tf.image.resize_bilinear(decoded_image_4d,
resize_shape_as_int)
return jpeg_data, resized_image
def export_model(module_spec, class_count, saved_model_dir):
"""Exports model for serving.
Args:
module_spec: The hub.ModuleSpec for the image module being used.
class_count: The number of classes.
saved_model_dir: Directory in which to save exported model and variables.
"""
# The SavedModel should hold the eval graph.
sess, in_image, _, _, _, _ = build_eval_session(module_spec, class_count)
graph = sess.graph
with graph.as_default():
inputs = {'image': tf.saved_model.utils.build_tensor_info(in_image)}
out_classes = sess.graph.get_tensor_by_name('final_result:0')
outputs = {
'prediction': tf.saved_model.utils.build_tensor_info(out_classes)
}
signature = tf.saved_model.signature_def_utils.build_signature_def(
inputs=inputs,
outputs=outputs,
method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME)
legacy_init_op = tf.group(tf.tables_initializer(), name='legacy_init_op')
# Save out the SavedModel.
builder = tf.saved_model.builder.SavedModelBuilder(saved_model_dir)
builder.add_meta_graph_and_variables(
sess, [tf.saved_model.tag_constants.SERVING],
signature_def_map={
tf.saved_model.signature_constants.
DEFAULT_SERVING_SIGNATURE_DEF_KEY:
signature
},
legacy_init_op=legacy_init_op)
builder.save()
def main(_):
# Needed to make sure the logging output is visible.
# See https://github.com/tensorflow/tensorflow/issues/3047
tf.logging.set_verbosity(tf.logging.INFO)
if not FLAGS.image_dir:
tf.logging.error('Must set flag --image_dir.')
return -1
# Prepare necessary directories that can be used during training
prepare_file_system()
# Look at the folder structure, and create lists of all the images.
image_lists = create_image_lists(FLAGS.image_dir, FLAGS.testing_percentage,
FLAGS.validation_percentage)
class_count = len(image_lists.keys())
if class_count == 0:
tf.logging.error('No valid folders of images found at ' + FLAGS.image_dir)
return -1
if class_count == 1:
tf.logging.error('Only one valid folder of images found at ' +
FLAGS.image_dir +
' - multiple classes are needed for classification.')
return -1
# See if the command-line flags mean we're applying any distortions.
do_distort_images = should_distort_images(
FLAGS.flip_left_right, FLAGS.random_crop, FLAGS.random_scale,
FLAGS.random_brightness)
# Set up the pre-trained graph.
module_spec = hub.load_module_spec(FLAGS.tfhub_module)
graph, bottleneck_tensor, resized_image_tensor, wants_quantization = (
create_module_graph(module_spec))
# Add the new layer that we'll be training.
with graph.as_default():
(train_step, cross_entropy, bottleneck_input,
ground_truth_input, final_tensor) = add_final_retrain_ops(
class_count, FLAGS.final_tensor_name, bottleneck_tensor,
wants_quantization, is_training=True)
with tf.Session(graph=graph) as sess:
# Initialize all weights: for the module to their pretrained values,
# and for the newly added retraining layer to random initial values.
init = tf.global_variables_initializer()
sess.run(init)
# Set up the image decoding sub-graph.
jpeg_data_tensor, decoded_image_tensor = add_jpeg_decoding(module_spec)
if do_distort_images:
# We will be applying distortions, so set up the operations we'll need.
(distorted_jpeg_data_tensor,
distorted_image_tensor) = add_input_distortions(
FLAGS.flip_left_right, FLAGS.random_crop, FLAGS.random_scale,
FLAGS.random_brightness, module_spec)
else:
# We'll make sure we've calculated the 'bottleneck' image summaries and
# cached them on disk.
cache_bottlenecks(sess, image_lists, FLAGS.image_dir,
FLAGS.bottleneck_dir, jpeg_data_tensor,
decoded_image_tensor, resized_image_tensor,
bottleneck_tensor, FLAGS.tfhub_module)
# Create the operations we need to evaluate the accuracy of our new layer.
evaluation_step, _ = add_evaluation_step(final_tensor, ground_truth_input)
# Merge all the summaries and write them out to the summaries_dir
merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(FLAGS.summaries_dir + '/train',
sess.graph)
validation_writer = tf.summary.FileWriter(
FLAGS.summaries_dir + '/validation')
# Create a train saver that is used to restore values into an eval graph
# when exporting models.
train_saver = tf.train.Saver()
# Run the training for as many cycles as requested on the command line.
for i in range(FLAGS.how_many_training_steps):
# Get a batch of input bottleneck values, either calculated fresh every
# time with distortions applied, or from the cache stored on disk.
if do_distort_images:
(train_bottlenecks,
train_ground_truth) = get_random_distorted_bottlenecks(
sess, image_lists, FLAGS.train_batch_size, 'training',
FLAGS.image_dir, distorted_jpeg_data_tensor,
distorted_image_tensor, resized_image_tensor, bottleneck_tensor)
else:
(train_bottlenecks,
train_ground_truth, _) = get_random_cached_bottlenecks(
sess, image_lists, FLAGS.train_batch_size, 'training',
FLAGS.bottleneck_dir, FLAGS.image_dir, jpeg_data_tensor,
decoded_image_tensor, resized_image_tensor, bottleneck_tensor,
FLAGS.tfhub_module)
# Feed the bottlenecks and ground truth into the graph, and run a training
# step. Capture training summaries for TensorBoard with the `merged` op.
train_summary, _ = sess.run(
[merged, train_step],
feed_dict={bottleneck_input: train_bottlenecks,
ground_truth_input: train_ground_truth})
train_writer.add_summary(train_summary, i)
# Every so often, print out how well the graph is training.
is_last_step = (i + 1 == FLAGS.how_many_training_steps)
if (i % FLAGS.eval_step_interval) == 0 or is_last_step:
train_accuracy, cross_entropy_value = sess.run(
[evaluation_step, cross_entropy],
feed_dict={bottleneck_input: train_bottlenecks,
ground_truth_input: train_ground_truth})
tf.logging.info('%s: Step %d: Train accuracy = %.1f%%' %
(datetime.now(), i, train_accuracy * 100))
tf.logging.info('%s: Step %d: Cross entropy = %f' %
(datetime.now(), i, cross_entropy_value))
# TODO: Make this use an eval graph, to avoid quantization
# moving averages being updated by the validation set, though in
# practice this makes a negligable difference.
validation_bottlenecks, validation_ground_truth, _ = (
get_random_cached_bottlenecks(
sess, image_lists, FLAGS.validation_batch_size, 'validation',
FLAGS.bottleneck_dir, FLAGS.image_dir, jpeg_data_tensor,
decoded_image_tensor, resized_image_tensor, bottleneck_tensor,
FLAGS.tfhub_module))
# Run a validation step and capture training summaries for TensorBoard
# with the `merged` op.
validation_summary, validation_accuracy = sess.run(
[merged, evaluation_step],
feed_dict={bottleneck_input: validation_bottlenecks,
ground_truth_input: validation_ground_truth})
validation_writer.add_summary(validation_summary, i)
tf.logging.info('%s: Step %d: Validation accuracy = %.1f%% (N=%d)' %
(datetime.now(), i, validation_accuracy * 100,
len(validation_bottlenecks)))
# Store intermediate results
intermediate_frequency = FLAGS.intermediate_store_frequency
if (intermediate_frequency > 0 and (i % intermediate_frequency == 0)
and i > 0):
# If we want to do an intermediate save, save a checkpoint of the train
# graph, to restore into the eval graph.
train_saver.save(sess, CHECKPOINT_NAME)
intermediate_file_name = (FLAGS.intermediate_output_graphs_dir +
'intermediate_' + str(i) + '.pb')
tf.logging.info('Save intermediate result to : ' +
intermediate_file_name)
save_graph_to_file(graph, intermediate_file_name, module_spec,
class_count)
# After training is complete, force one last save of the train checkpoint.
train_saver.save(sess, CHECKPOINT_NAME)
# We've completed all our training, so run a final test evaluation on
# some new images we haven't used before.
run_final_eval(sess, module_spec, class_count, image_lists,
jpeg_data_tensor, decoded_image_tensor, resized_image_tensor,
bottleneck_tensor)
# Write out the trained graph and labels with the weights stored as
# constants.
tf.logging.info('Save final result to : ' + FLAGS.output_graph)
if wants_quantization:
tf.logging.info('The model is instrumented for quantization with TF-Lite')
save_graph_to_file(graph, FLAGS.output_graph, module_spec, class_count)
with tf.gfile.FastGFile(FLAGS.output_labels, 'w') as f:
f.write('\n'.join(image_lists.keys()) + '\n')
if FLAGS.saved_model_dir:
export_model(module_spec, class_count, FLAGS.saved_model_dir)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--image_dir',
type=str,
default='/images/',
help='Path to folders of labeled images.'
)
parser.add_argument(
'--output_graph',
type=str,
default='/tmp/output_graph.pb',
help='Where to save the trained graph.'
)
parser.add_argument(
'--intermediate_output_graphs_dir',
type=str,
default='/tmp/intermediate_graph/',
help='Where to save the intermediate graphs.'
)
parser.add_argument(
'--intermediate_store_frequency',
type=int,
default=0,
help="""\
How many steps to store intermediate graph. If "0" then will not
store.\
"""
)
parser.add_argument(
'--output_labels',
type=str,
default='/tmp/output_labels.txt',
help='Where to save the trained graph\'s labels.'
)
parser.add_argument(
'--summaries_dir',
type=str,
default='/tmp/retrain_logs',
help='Where to save summary logs for TensorBoard.'
)
parser.add_argument(
'--how_many_training_steps',
type=int,
default=4000,
help='How many training steps to run before ending.'
)
parser.add_argument(
'--learning_rate',
type=float,
default=0.01,
help='How large a learning rate to use when training.'
)
parser.add_argument(
'--testing_percentage',
type=int,
default=10,
help='What percentage of images to use as a test set.'
)
parser.add_argument(
'--validation_percentage',
type=int,
default=10,
help='What percentage of images to use as a validation set.'
)
parser.add_argument(
'--eval_step_interval',
type=int,
default=10,
help='How often to evaluate the training results.'
)
parser.add_argument(
'--train_batch_size',
type=int,
default=100,
help='How many images to train on at a time.'
)
parser.add_argument(
'--test_batch_size',
type=int,
default=-1,
help="""\
How many images to test on. This test set is only used once, to evaluate
the final accuracy of the model after training completes.
A value of -1 causes the entire test set to be used, which leads to more
stable results across runs.\
"""
)
parser.add_argument(
'--validation_batch_size',
type=int,
default=100,
help="""\
How many images to use in an evaluation batch. This validation set is
used much more often than the test set, and is an early indicator of how
accurate the model is during training.
A value of -1 causes the entire validation set to be used, which leads to
more stable results across training iterations, but may be slower on large
training sets.\
"""
)
parser.add_argument(
'--print_misclassified_test_images',
default=False,
help="""\
Whether to print out a list of all misclassified test images.\
""",
action='store_true'
)
parser.add_argument(
'--bottleneck_dir',
type=str,
default='/tmp/bottleneck',
help='Path to cache bottleneck layer values as files.'
)
parser.add_argument(
'--final_tensor_name',
type=str,
default='final_result',
help="""\
The name of the output classification layer in the retrained graph.\
"""
)
parser.add_argument(
'--flip_left_right',
default=False,
help="""\
Whether to randomly flip half of the training images horizontally.\
""",
action='store_true'
)
parser.add_argument(
'--random_crop',
type=int,
default=0,
help="""\
A percentage determining how much of a margin to randomly crop off the
training images.\
"""
)
parser.add_argument(
'--random_scale',
type=int,
default=0,
help="""\
A percentage determining how much to randomly scale up the size of the
training images by.\
"""
)
parser.add_argument(
'--random_brightness',
type=int,
default=0,
help="""\
A percentage determining how much to randomly multiply the training image
input pixels up or down by.\
"""
)
parser.add_argument(
'--tfhub_module',
type=str,
default=(
'https://tfhub.dev/google/imagenet/inception_v3/feature_vector/1'),
help="""\
Which TensorFlow Hub module to use.
See https://github.com/tensorflow/hub/blob/master/docs/modules/image.md
for some publicly available ones.\
""")
parser.add_argument(
'--saved_model_dir',
type=str,
default='',
help='Where to save the exported graph.')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed) | [
"madhavrao039@gmail.com"
] | madhavrao039@gmail.com |
9e3734912b799939a9397bacbaef09fc026224bd | 6d59cfe648a1aa4bfcb6deffad161e0c91a2e018 | /python/fibonacci.py | fe8f7ce85408c0c9ce137ba168869288c9d0e350 | [] | no_license | jclane/beginner-project-solutions | cf0b3c823c0748d15d288e97325deb6aceecb649 | bbf714dc5656d26653facfdfad14d2f878f9e71a | refs/heads/master | 2022-08-27T02:17:35.467041 | 2021-10-21T20:39:45 | 2021-10-21T20:39:45 | 144,029,894 | 2 | 5 | null | 2022-08-19T09:40:38 | 2018-08-08T15:01:25 | Python | UTF-8 | Python | false | false | 985 | py | import sys
def loop_fibonacci(n):
sequence = []
count = n
num1 = 0
num2 = 1
i = 1;
while (i < count):
result = num1 + num2
num1 = num2
num2 = result
sequence.append(result)
i += 1
return sequence[-1]
def recursive_fibonacci(n):
if (n < 2):
return n
return recursive_fibonacci(n-1) + recursive_fibonacci(n-2)
while True:
loop_or_rescursive = input("Use loop method or rescursive method? >> [enter 'l' or 'r'] ")
if (loop_or_rescursive.lower() == 'r'):
response = input("Enter a number or 'q' to quit >> ")
if (response.lower() != 'q'):
print(recursive_fibonacci(int(response)))
else:
System.exit()
if (loop_or_rescursive.lower() == 'l'):
response = input("Enter a number or 'q' to quit >> ")
if (response.lower() != 'q'):
print(loop_fibonacci(int(response)))
else:
System.exit()
| [
"justin@justinlane.me"
] | justin@justinlane.me |
9733520e6f04dfdc874e0b809d31c3aa548e2f2e | 65d78065d21d82e66df6fedfb059bef60b3eadb0 | /get-transcript/run-get-transcript.py | 75f4cb5a742d8cf4b88f0d379e5c525607805afc | [] | no_license | jamie7533/OkZoomer-Transcript | a3e404270024a8b6ce626c55bcb5e53f20d64f70 | dd56cd0ccea7c945f05c17c2b62675eedfdc5eb1 | refs/heads/main | 2023-01-22T06:24:32.080244 | 2020-12-05T14:09:31 | 2020-12-05T14:09:31 | 310,171,718 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 935 | py | import get_transcript as gt
# steps before running:
# 1) run localhost:8080 server
# 2) zoom.us/oauth/token?response_type=code&client_id=FEc1Rq0JTi2MFfHNH94DgA&redirect_uri=http://localhost:8080
# 3) make sure id, key, secret, and code all correct
# example meetingIDs: 92023192477 (07/31 Chemistry Expo, unreachable), 94923151321 (11/05 and 11/10!!!)
# limitations:
# 1) must update code before each run, instead of every hour, or is the hour just for the token?
# 2) only getting meetings from the last month
# (self, meeting_id, client_key=None, client_secret=None, code=None, access_token=None)
spongebob = gt.Transcript(meeting_id=94923151321, # J's 11/10 meeting
client_key="FEc1Rq0JTi2MFfHNH94DgA",
client_secret="WECczlqk1PZLmmwzt1c1n43hcmw7lHDJ",
code="bd0Wey4dh8_wcT7DtCgTKKr-HDFlXouZA") # update before each run
spongebob.GetTranscript()
| [
"lee.jamie.july@gmail.com"
] | lee.jamie.july@gmail.com |
42d8d8a5983a916bfb5a4b569ba7a0f5a573d60e | 825c04efbe29987a572d93ae58674c4a4d9eff02 | /hack_chat_related/test.py | 193160bee9cb1e4a2e669a2ea3e4cbd203b9a848 | [
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] | permissive | Cynthia7979/tools-programs | ba969e6a93e9d2648d99ad059a924d242444889f | 2ede956cdc141009e998aff73741751b0af82166 | refs/heads/main | 2023-08-17T22:53:34.937503 | 2023-08-15T03:54:13 | 2023-08-15T03:54:13 | 131,550,964 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 935 | py | import hackchat
import re
import random
def try_reply(chat, message, sender):
print(f'{message} ~{sender}')
if message == '/help':
chat.send_message('Unknown command. Type "/help" for help.')
if message.lower() == '@helpbot mod me' and '@HelpBot' in message:
chat._send_packet({'cmd': 'addmod', 'trip': sender})
chat.send_message('Modded.')
if re.search('[0-9]+d[0-9]+', message):
print('matched')
xdx = re.search('[0-9]+d[0-9]+', message).group(0)
multiply = int(xdx[:xdx.find('d')])
dice_range = int(xdx[xdx.find('d')+1:])
chat.send_message(f'{xdx} = {multiply*random.randint(0,dice_range)}')
def welcome(chat, user):
chat.send_message(f'{user}, welcome to the chat!!!')
main_chat = hackchat.HackChat('HelpBot', 'cynthia!')
# test()
main_chat.on_message += [try_reply]
main_chat.on_join += [welcome]
main_chat.run()
| [
"CynthiaWang7979@hotmail.com"
] | CynthiaWang7979@hotmail.com |
95e6efc21f874e8cbb14105844d0c4e9889e9913 | 87633b2791a1f6993f9889da0e3aef3d87515bd8 | /wallet/models.py | 40996486017d49a70ef2c6f92e5e3169e9373f33 | [] | no_license | skhemka/myWallet | ab9d0ac9a4b363f80f74dcd587b8fc87cd07563e | 79fa4c0e73e72d8c4b9160161e31ada33abf9038 | refs/heads/master | 2021-01-18T13:58:55.009681 | 2014-10-23T04:05:59 | 2014-10-23T04:05:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,315 | py | from django.db import models
# Create your models here.
class Wallet(models.Model):
#style has unique id
style = models.AutoField(primary_key=True,unique=True)
#name is a string
name = models.CharField(max_length=20)
#creating a tuple of sizes
SIZES = (
('Tiny', 'Tiny'),
('Normal', 'Normal'),
('Huge', 'Huge'),
)
size = models.CharField(max_length=10, choices=SIZES)
#creating a tuple of colors
COLORS = (
('Black', 'Black'),
('Beige', 'Beige'),
('White', 'White'),
('Red', 'Red')
)
color = models.CharField(max_length=10, choices=COLORS)
#creating tuple of card slots
l1 = range(2,16)
l2 = range(2,16)
l = zip(l1,l2)
card_slots = models.IntegerField(choices=l)
#creating tuple for YES/NO
YN = [('Yes', 'Yes'),('No','No')]
id_slot = models.CharField(max_length=3, choices=YN)
coin_pocket = models.CharField(max_length=3, choices=YN)
#creating tuple for money slots
D = [(1,1),(2,2)]
money_slots = models.SmallIntegerField(choices=D)
embossed_name = models.CharField(max_length = 10,
help_text = "Not more than 10 characters")
#to make object pretty
def __str__(self):
return str(self.style) + " " + self.name
| [
"skhemka@cmu.edu"
] | skhemka@cmu.edu |
c8405e6db823f6caa4f4fa53b1e56b1d815544a4 | 7185c84a4f05b00b57306a7284a86f9a2e55bfbf | /tests/test_trade.py | 1fa4f1384c441ab98ef36092bbec452d8ceb1dca | [
"BSD-3-Clause"
] | permissive | namdori61/epymetheus | 39cbdfa99f73876561fe24cddf16ed8aade153b6 | 3df4409449d2990a28984880771cbbcbd90f32b1 | refs/heads/main | 2023-06-03T20:48:25.411104 | 2021-06-26T02:34:38 | 2021-06-26T02:34:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,670 | py | from tabnanny import check
import numpy as np
import pandas as pd
import pytest
from epymetheus import Trade
from epymetheus import trade
from epymetheus.benchmarks import RandomStrategy
from epymetheus.datasets import make_randomwalk
from epymetheus.trade import check_trade
class TestTrade:
# handmade universe
universe_hand = pd.DataFrame(
{
"A": [3.0, 1.0, 4.0, 1.0, 5.0, 9.0, 2.0],
"B": [2.0, 7.0, 1.0, 8.0, 2.0, 8.0, 1.0],
}
)
def test_init_array(self):
t = trade("A", lot=1.0)
assert isinstance(t.asset, np.ndarray)
assert isinstance(t.lot, np.ndarray)
t = trade("A", lot=[1.0])
assert isinstance(t.asset, np.ndarray)
assert isinstance(t.lot, np.ndarray)
t = trade(["A"], lot=[1.0])
assert isinstance(t.asset, np.ndarray)
assert isinstance(t.lot, np.ndarray)
t = trade(["A", "B"], lot=1.0)
t = trade(["A", "B"], lot=[1.0, 2.0])
assert isinstance(t.asset, np.ndarray)
assert isinstance(t.lot, np.ndarray)
def test_init_shape(self):
t = trade("A", lot=1.0)
assert t.asset.shape == (1,)
assert t.lot.shape == (1,)
t = trade(["A"], lot=1.0)
assert t.asset.shape == (1,)
assert t.lot.shape == (1,)
t = trade("A", lot=[1.0])
assert t.asset.shape == (1,)
assert t.lot.shape == (1,)
t = trade(["A", "B"], lot=1.0)
assert t.asset.shape == (2,)
assert t.lot.shape == (2,)
t = trade(["A", "B"], lot=[1.0])
assert t.asset.shape == (2,)
assert t.lot.shape == (2,)
t = trade(["A", "B"], lot=[1.0, 2.0])
assert t.asset.shape == (2,)
assert t.lot.shape == (2,)
def test_repr(self):
t = trade("A")
assert repr(t) == "trade(['A'], lot=[1.])"
t = trade("A", lot=2, take=3.0, stop=-3.0, entry="B0", exit="B1")
assert (
repr(t) == "trade(['A'], lot=[2], entry=B0, exit=B1, take=3.0, stop=-3.0)"
)
def test_array_value_value_hand(self):
t = [2.0, -3.0] * trade(["A", "B"], entry=1, exit=3)
result = t.array_value(self.universe_hand)
expect = np.array(
[
[6.0, -6.0],
[2.0, -21.0],
[8.0, -3.0],
[2.0, -24.0],
[10.0, -6.0],
[18.0, -24.0],
[4.0, -3.0],
]
)
assert np.allclose(result, expect)
t = [-3.0, 2.0] * trade(["B", "A"], entry=1, exit=3)
result = t.array_value(universe=self.universe_hand)
expect = expect[:, [1, 0]]
assert np.allclose(result, expect)
def test_array_value_value_zero(self):
t = 0.0 * trade(["A", "B"], entry=1, exit=3)
result = t.array_value(self.universe_hand)
expect = np.zeros_like(self.universe_hand.iloc[:, :2])
assert np.allclose(result, expect)
@pytest.mark.parametrize("seed", [0])
def test_array_value_linearity_add(self, seed):
np.random.seed(seed)
universe = self.universe_hand
lot0, lot1 = np.random.randn(2), np.random.randn(2)
t0 = list(lot0) * trade(["A", "B"], entry=1, exit=3)
t1 = list(lot1) * trade(["A", "B"], entry=1, exit=3)
ta = list(lot0 + lot1) * trade(["A", "B"], entry=1, exit=3)
result = ta.array_value(universe)
expect = t0.array_value(universe) + t1.array_value(universe)
assert np.allclose(result, expect)
@pytest.mark.parametrize("a", [-2.0, -1.0, 0.0, 1.0, 2.0])
@pytest.mark.parametrize("seed", [0])
def test_array_value_linearity_mul(self, a, seed):
np.random.seed(seed)
universe = self.universe_hand
lot0 = np.random.randn(2)
t0 = list(lot0) * trade(["A", "B"], entry=1, exit=3)
ta = list(a * lot0) * trade(["A", "B"], entry=1, exit=3)
result = ta.array_value(universe)
expect = a * t0.array_value(universe)
assert np.allclose(result, expect)
@pytest.mark.parametrize("seed", [0])
def test_final_pnl_lineality_add(self, seed):
np.random.seed(seed)
universe = self.universe_hand
lot0, lot1 = np.random.randn(2), np.random.randn(2)
t0 = list(lot0) * trade(["A", "B"], entry=1, exit=3)
t1 = list(lot1) * trade(["A", "B"], entry=1, exit=3)
ta = list(lot0 + lot1) * trade(["A", "B"], entry=1, exit=3)
t0.execute(universe)
t1.execute(universe)
ta.execute(universe)
result = ta.final_pnl(universe)
expect = t0.final_pnl(universe) + t1.final_pnl(universe)
assert np.allclose(result, expect)
def test_nonexitent(self):
"""non-existent asset, entry, exit"""
universe = pd.DataFrame({"A": range(10)})
with pytest.raises(KeyError):
_ = trade("NONEXISTENT").execute(universe)
with pytest.raises(KeyError):
_ = trade("A", entry=99).execute(universe)
with pytest.raises(KeyError):
_ = trade("A", entry=0, exit=99).execute(universe)
@pytest.mark.parametrize("a", [-2.0, -1.0, 0.0, 1.0, 2.0])
@pytest.mark.parametrize("seed", [0])
def test_final_pnl_linearity_mul(self, a, seed):
np.random.seed(seed)
universe = self.universe_hand
lot0 = np.random.randn(2)
t0 = list(lot0) * trade(["A", "B"], entry=1, exit=3)
ta = list(a * lot0) * trade(["A", "B"], entry=1, exit=3)
t0.execute(universe)
ta.execute(universe)
result = ta.final_pnl(universe)
expect = a * t0.final_pnl(universe)
assert np.allclose(result, expect)
# --- load and dump ---
def test_load_history(self):
history = pd.DataFrame(
{
"trade_id": [0, 0, 1],
"asset": [0, 1, 2],
"lot": [1, 2, 3],
"entry": [0, 0, 1],
"close": [0, 0, 1],
"exit": [100, 100, 101],
"take": [10, 10, 11],
"stop": [-10, -10, -11],
}
)
trades = Trade.load_history(history)
assert trades == [
trade([0, 1], lot=[1, 2], entry=0, exit=100, take=10, stop=-10),
trade(2, lot=3, entry=1, exit=101, take=11, stop=-11),
]
def test_to_dict(self):
t = 2.0 * trade("A", entry=1, exit=3)
result = t.to_dict()
expect = dict(asset=["A"], entry=1, exit=3, lot=[2.0])
assert result == expect
t = list([1.0, 2.0]) * trade(["A", "B"], entry=1, exit=3)
result = t.to_dict()
expect = dict(asset=["A", "B"], entry=1, exit=3, lot=[1.0, 2.0])
assert result == expect
t.close = 5 # when executed
result = t.to_dict()
expect = dict(asset=["A", "B"], entry=1, exit=3, lot=[1.0, 2.0], close=5)
assert result == expect
def test_to_json(self):
t = 2.0 * trade("A", entry=1, exit=3)
result = t.to_json()
expect = '{"asset": ["A"], "lot": [2.0], "entry": 1, "exit": 3}'
assert result == expect
t = list([1.0, 2.0]) * trade(["A", "B"], entry=1, exit=3)
result = t.to_json()
expect = '{"asset": ["A", "B"], "lot": [1.0, 2.0], "entry": 1, "exit": 3}'
assert result == expect
def test_from_json(self):
t = 2.0 * trade("A", entry=1, exit=3)
assert t == Trade.load_json(t.to_json())
t = list([1.0, 2.0]) * trade(["A", "B"], entry=1, exit=3)
assert t == Trade.load_json(t.to_json())
# --- operations ---
def test_eq(self):
t = trade("A")
assert t == trade("A")
assert t == trade("A", lot=[1])
assert t == trade("A", lot=[1.0])
assert t != trade("A", lot=-1)
assert t != trade("A", lot=[-1.0])
t = trade("A", lot=2)
assert t == trade("A", lot=2)
assert t == trade("A", lot=2.0)
assert t == trade("A", lot=[2])
assert t == trade("A", lot=[2.0])
assert t == trade("A", lot=np.array([2]))
assert t == trade("A", lot=np.array([2.0]))
assert t != trade("A", lot=-1)
assert t != trade("A", lot=[-1.0])
assert t != trade("A", lot=np.array([-1]))
assert t != trade("A", lot=np.array([-1.0]))
t = trade(["A", "B"], lot=[1, 2])
assert t == trade(["A", "B"], lot=[1, 2])
assert t == trade(["A", "B"], lot=[1.0, 2.0])
assert t == trade(["A", "B"], lot=np.array([1, 2]))
assert t == trade(["A", "B"], lot=np.array([1.0, 2.0]))
assert t != trade(["A", "B"], lot=1.0)
assert t != trade(["A", "B"], lot=[-1.0, 2.0])
assert t != trade(["A", "B"], lot=[1.0, -1.0])
t = trade(["A", "B"], lot=[1, 2], entry=1)
assert t == trade(["A", "B"], lot=[1, 2], entry=1)
assert t != trade(["A", "B"], lot=[1, 2], entry=2)
t = trade(["A", "B"], lot=[1, 2], exit=1)
assert t == trade(["A", "B"], lot=[1, 2], exit=1)
assert t != trade(["A", "B"], lot=[1, 2], exit=2)
t = trade(["A", "B"], lot=[1, 2], take=1)
assert t == trade(["A", "B"], lot=[1, 2], take=1)
assert t != trade(["A", "B"], lot=[1, 2], take=2)
t = trade(["A", "B"], lot=[1, 2], stop=1)
assert t == trade(["A", "B"], lot=[1, 2], stop=1)
assert t != trade(["A", "B"], lot=[1, 2], stop=2)
@pytest.mark.parametrize("a", [-2.0, -1.0, 0.0, 1.0, 2.0])
def test_mul(self, a):
t = trade("A", entry=0, exit=1, take=2.0, stop=-3.0)
result = a * t
expect = trade("A", lot=[a], entry=0, exit=1, take=2.0, stop=-3.0)
assert result == expect
result = t * a
assert result == expect
t = trade(["A", "B"], lot=[1.0, -2.0], entry=0, exit=1, take=2.0, stop=-3.0)
result = a * t
expect = trade(
["A", "B"], lot=[a, -2.0 * a], entry=0, exit=1, take=2.0, stop=-3.0
)
assert result == expect
result = t * a
assert result == expect
def test_neg(self):
t = trade("A", entry=0, exit=1, take=2.0, stop=-3.0)
result = -t
expect = trade("A", lot=[-1.0], entry=0, exit=1, take=2.0, stop=-3.0)
assert result == expect
@pytest.mark.parametrize("a", [-2.0, -1.0, 1.0, 2.0])
def test_truediv(self, a):
t = trade("A", entry=0, exit=1, take=2.0, stop=-3.0)
result = t / a
expect = trade("A", lot=[1 / a], entry=0, exit=1, take=2.0, stop=-3.0)
assert result == expect
t = trade(["A", "B"], lot=[1.0, -2.0], entry=0, exit=1, take=2.0, stop=-3.0)
result = t / a
expect = trade(
["A", "B"], lot=[1 / a, -2.0 / a], entry=0, exit=1, take=2.0, stop=-3.0
)
assert result == expect
class TestCheckTrade:
def test(self):
universe = pd.DataFrame({"A": [100, 101, 102]}, index=[0, 1, 2])
# asset
with pytest.raises(ValueError):
t = trade("NONEXISTENT_ASSET")
check_trade(t, universe)
# index
with pytest.raises(ValueError):
t = trade("A", entry=99)
check_trade(t, universe)
with pytest.raises(ValueError):
t = trade("A", entry=0, exit=99)
check_trade(t, universe)
# lot
with pytest.raises(ValueError):
t = trade("A", lot=[np.nan])
check_trade(t, universe)
with pytest.raises(ValueError):
t = trade("A", lot=[np.inf])
check_trade(t, universe)
# take
with pytest.raises(ValueError):
t = trade("A", take=-1.0)
check_trade(t, universe)
# stop
with pytest.raises(ValueError):
t = trade("A", stop=1.0)
check_trade(t, universe)
# @pytest.mark.parametrize("seed", params_seed)
# def test_execute_0_0(seed):
# """
# Test `trade.execute` sets `trade.close_bar` correctly.
# Setup
# -----
# - trade.take is None
# - trade.stop is None
# - trade.exit is not None
# Expected Result
# ---------------
# trade.close_bar == universe.exit
# """
# # exit is not None
# universe = make_randomwalk(seed=seed)
# trade = make_random_trade(universe, seed=seed)
# trade.execute(universe)
# assert trade.close_bar == trade.exit
# @pytest.mark.parametrize("seed", params_seed)
# def test_execute_0_1(seed):
# """
# Test `trade.execute` sets `trade.close_bar` correctly.
# Setup
# -----
# - trade.take is None
# - trade.stop is None
# - trade.exit is None
# Expected Result
# ---------------
# trade.close_bar == universe.bars[-1]
# """
# # exit is not None
# universe = make_randomwalk(seed=seed)
# trade = make_random_trade(universe, seed=seed)
# trade.exit = None
# trade.execute(universe)
# assert trade.close_bar == universe.bars[-1]
# @pytest.mark.parametrize("seed", params_seed)
# def test_execute(seed):
# """
# Test `trade.execute` sets `trade.close_bar` correctly.
# Setup
# -----
# - trade.take is None
# - trade.stop is None
# - trade.exit is None
# Expected Result
# ---------------
# trade.close_bar == universe.bars[-1]
# """
# # exit is not None
# universe = make_randomwalk(seed=seed)
# trade = make_random_trade(universe, seed=seed)
# trade.exit = None
# trade.execute(universe)
# assert trade.close_bar == universe.bars[-1]
# # @pytest.mark.parametrize('seed', params_seed)
# # @pytest.mark.parametrize('n_bars', params_n_bars)
# # @pytest.mark.parametrize('const', params_const)
# # def test_execute(seed, n_bars, const):
# # period = n_samples // 10
# # shift = np.random.randint(period)
# # prices = pd.DataFrame({
# # 'Asset0': const + make_sin(n_bars=n_bars, period=period, shift=shift)
# # })
# # universe = prices
# # trade = ep.trade('Asset0', lot=1.0, )
# # def test_execute_take():
# # universe = pd.DataFrame({"Asset0": np.arange(100, 200)}
# # trade = ep.trade("Asset0", lot=1.0, take=1.9, entry=1, exit=5)
# # trade.execute(universe)
# # assert trade.close_bar == 3
# # assert np.array_equal(trade.final_pnl(universe), [103 - 101])
# # trade = ep.trade("Asset0", lot=2.0, take=3.8, entry=1, exit=5)
# # trade.execute(universe)
# # assert trade.close_bar == 3
# # assert np.array_equal(trade.final_pnl(universe), [2 * (103 - 101)])
# # trade = ep.trade("Asset0", lot=1.0, take=1000, entry=1, exit=5)
# # trade.execute(universe)
# # assert trade.close_bar == 5
# # assert np.array_equal(trade.final_pnl(universe), [105 - 101])
# # def test_execute_stop():
# # universe = prices=pd.DataFrame({"Asset0": np.arange(100, 0, -1)})
# # trade = ep.trade("Asset0", lot=1.0, stop=-1.9, entry=1, exit=5)
# # trade.execute(universe)
# # assert trade.close_bar == 3
# # assert np.array_equal(trade.final_pnl(universe), [97 - 99])
# # trade = ep.trade("Asset0", lot=2.0, stop=-3.8, entry=1, exit=5)
# # trade.execute(universe)
# # assert trade.close_bar == 3
# # assert np.array_equal(trade.final_pnl(universe), [2 * (97 - 99)])
# # trade = ep.trade("Asset0", lot=1.0, stop=-1000, entry=1, exit=5)
# # trade.execute(universe)
# # assert trade.close_bar == 5
# # assert np.array_equal(trade.final_pnl(universe), [95 - 99])
# # TODO both take and stop
# # TODO short position
# # TODO multiple orders
# # def test_execute_takestop():
# # pass
| [
"noreply@github.com"
] | noreply@github.com |
a02f2f0bfd5d5ad48cdb800b64fc936725445b41 | a463913fa1d9e4dc307bc324b9b326f022bb3d04 | /setup.py | 2cf0569c81a94a08301c8453e1da0cd1b83a9709 | [
"Apache-2.0"
] | permissive | ognibit/sudoku-solver | a9ae832443017cba09ccf95676de2b492f9263a3 | 1c47b80b36b4bd57a11a4084e04defd849531782 | refs/heads/master | 2022-07-11T03:26:12.115835 | 2019-08-21T13:24:39 | 2019-08-21T13:24:39 | 203,585,842 | 0 | 0 | Apache-2.0 | 2022-06-21T22:35:46 | 2019-08-21T13:04:41 | Python | UTF-8 | Python | false | false | 239 | py | from setuptools import find_packages, setup
setup(
name='sudoku',
version='0.1',
description='Sudoku resolver',
author='Omar Rampado',
author_email='omar@ognibit.it',
package_dir={'': 'src'},
packages=find_packages(where='src'),
) | [
"rampado.omar@gmail.com"
] | rampado.omar@gmail.com |
5a7763b78d952b82ede1266770bbc6d491f04581 | 9095c97832eb826596f45f15e0212e37632eb99b | /cogs/destiny.py | 61fe3b2690a637287aefa10f27b66710bd8d6f95 | [
"MIT"
] | permissive | jgayfer/spirit | 87f6b0d9a64d5774be7d63e3b2b41440f3a9bc57 | 024bc60727f92fb742daf0d28a3465a56820a7c0 | refs/heads/master | 2021-08-18T16:07:56.156560 | 2020-03-12T17:18:02 | 2020-06-30T23:46:11 | 97,080,410 | 37 | 26 | MIT | 2020-06-30T23:46:13 | 2017-07-13T04:37:20 | Python | UTF-8 | Python | false | false | 2,049 | py | from datetime import datetime
import asyncio
from discord.ext import commands
import discord
import pydest
from cogs.utils.message_manager import MessageManager
from cogs.utils import constants
class Destiny:
def __init__(self, bot):
self.bot = bot
@commands.command()
@commands.cooldown(rate=2, per=5, type=commands.BucketType.user)
async def nightfall(self, ctx):
"""Display the weekly nightfall info"""
manager = MessageManager(ctx)
await ctx.channel.trigger_typing()
try:
weekly = await self.bot.destiny.api.get_public_milestones()
except pydest.PydestException as e:
await manager.send_message("Sorry, I can't seem retrieve the nightfall info right now")
return await manager.clean_messages()
if weekly['ErrorCode'] != 1:
await manager.send_message("Sorry, I can't seem retrieve the nightfall info right now")
return await manager.clean_messages()
nightfall_hash = weekly['Response']['2171429505']['availableQuests'][0]['activity']['activityHash']
nightfall = await self.bot.destiny.decode_hash(nightfall_hash, 'DestinyActivityDefinition')
challenges = ""
for entry in nightfall['challenges']:
challenge = await self.bot.destiny.decode_hash(entry['objectiveHash'], 'DestinyObjectiveDefinition')
challenge_name = challenge['displayProperties']['name']
challenge_description = challenge['displayProperties']['description']
challenges += "**{}** - {}\n".format(challenge_name, challenge_description)
e = discord.Embed(title='{}'.format(nightfall['displayProperties']['name']), colour=constants.BLUE)
e.description = "*{}*".format(nightfall['displayProperties']['description'])
e.set_thumbnail(url=('https://www.bungie.net' + nightfall['displayProperties']['icon']))
e.add_field(name='Challenges', value=challenges)
await manager.send_embed(e)
await manager.clean_messages()
| [
"gayfer.james@gmail.com"
] | gayfer.james@gmail.com |
314a9d16b72183371e44376024ab7a29a3ee36ef | 9ec244e57b023e2515ca94768d1581f950c6b4c3 | /Course_3/extracting_from_json.py | 2d1e0bacabd67646d2fc8c0d4400d48b828089eb | [] | no_license | pstatonvt/python | 5e73d56d641dec7b1990645cf3a33ee25dc4c5ba | 5c33c9bf0444e46e3a668b905313e9563dab50f0 | refs/heads/master | 2018-11-11T09:49:58.927745 | 2018-04-05T02:28:02 | 2018-04-05T02:28:02 | 119,099,982 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 771 | py | '''
Read the JSON data from that URL using urllib and then parse and extract the comment counts from the JSON data, compute the sum of the numbers in the file and enter the sum below:
'''
import urllib.request, urllib.parse, urllib.error
import json
url = 'http://py4e-data.dr-chuck.net/comments_72209.json'
print("Location entered...")
print("Retrieving", url)
url_handle = urllib.request.urlopen(url)
data = url_handle.read().decode()
info = json.loads(data)
print("Retrieved " + str(len(data)) + " characters")
nums = []
#print(len(info["comments"]))
for i in range(0, len(info["comments"])):
#print("Name:", info["comments"][i]["name"])
#print("Count:", info["comments"][i]["count"])
nums.append(info["comments"][i]["count"])
print("Sum:", sum(nums))
| [
"30532152+pstatonvt@users.noreply.github.com"
] | 30532152+pstatonvt@users.noreply.github.com |
6c36391267af20d2d0df7f255c2d1d4f98c496d0 | f445450ac693b466ca20b42f1ac82071d32dd991 | /generated_tempdir_2019_09_15_163300/generated_part003650.py | 2809c442b3ba17c08e9f9aa9bc7b006e27b8a3e8 | [] | no_license | Upabjojr/rubi_generated | 76e43cbafe70b4e1516fb761cabd9e5257691374 | cd35e9e51722b04fb159ada3d5811d62a423e429 | refs/heads/master | 2020-07-25T17:26:19.227918 | 2019-09-15T15:41:48 | 2019-09-15T15:41:48 | 208,357,412 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,946 | py | from sympy.abc import *
from matchpy.matching.many_to_one import CommutativeMatcher
from matchpy import *
from matchpy.utils import VariableWithCount
from collections import deque
from multiset import Multiset
from sympy.integrals.rubi.constraints import *
from sympy.integrals.rubi.utility_function import *
from sympy.integrals.rubi.rules.miscellaneous_integration import *
from sympy import *
class CommutativeMatcher38258(CommutativeMatcher):
_instance = None
patterns = {
0: (0, Multiset({0: 1}), [
(VariableWithCount('i2.2.1.2.2.2.0', 1, 1, S(0)), Add)
])
}
subjects = {}
subjects_by_id = {}
bipartite = BipartiteGraph()
associative = Add
max_optional_count = 1
anonymous_patterns = set()
def __init__(self):
self.add_subject(None)
@staticmethod
def get():
if CommutativeMatcher38258._instance is None:
CommutativeMatcher38258._instance = CommutativeMatcher38258()
return CommutativeMatcher38258._instance
@staticmethod
def get_match_iter(subject):
subjects = deque([subject]) if subject is not None else deque()
subst0 = Substitution()
# State 38257
subst1 = Substitution(subst0)
try:
subst1.try_add_variable('i2.2.1.2.2.2.1.0', S(1))
except ValueError:
pass
else:
pass
# State 38259
if len(subjects) >= 1 and isinstance(subjects[0], Pow):
tmp2 = subjects.popleft()
subjects3 = deque(tmp2._args)
# State 38260
if len(subjects3) >= 1:
tmp4 = subjects3.popleft()
subst2 = Substitution(subst1)
try:
subst2.try_add_variable('i2.2.1.1', tmp4)
except ValueError:
pass
else:
pass
# State 38261
if len(subjects3) >= 1:
tmp6 = subjects3.popleft()
subst3 = Substitution(subst2)
try:
subst3.try_add_variable('i2.2.1.2', tmp6)
except ValueError:
pass
else:
pass
# State 38262
if len(subjects3) == 0:
pass
# State 38263
if len(subjects) == 0:
pass
# 0: x**j*f
yield 0, subst3
subjects3.appendleft(tmp6)
subjects3.appendleft(tmp4)
subjects.appendleft(tmp2)
if len(subjects) >= 1 and isinstance(subjects[0], Mul):
tmp8 = subjects.popleft()
associative1 = tmp8
associative_type1 = type(tmp8)
subjects9 = deque(tmp8._args)
matcher = CommutativeMatcher38265.get()
tmp10 = subjects9
subjects9 = []
for s in tmp10:
matcher.add_subject(s)
for pattern_index, subst1 in matcher.match(tmp10, subst0):
pass
if pattern_index == 0:
pass
# State 38270
if len(subjects) == 0:
pass
# 0: x**j*f
yield 0, subst1
subjects.appendleft(tmp8)
return
yield
from .generated_part003651 import *
from matchpy.matching.many_to_one import CommutativeMatcher
from collections import deque
from matchpy.utils import VariableWithCount
from multiset import Multiset | [
"franz.bonazzi@gmail.com"
] | franz.bonazzi@gmail.com |
a4df1412e80429c8ca05612b28da392af78863c4 | 06fa1aefc051ee0d6c325afef13dfcc14e52c6e4 | /ulmo/runs/SSL/MODIS/v2/ssl_modis_v2.py | 26d4899ae5c17be07d9a144ad971c07e06b78342 | [] | no_license | cxzhangqi/ulmo | 98e7b2783720b13f0e31a8bdf6ae70ab2a217bc7 | c1c570e75332243b8a2a16a8d6c68544e1ba02cd | refs/heads/main | 2023-06-30T19:15:04.279986 | 2021-07-31T19:45:51 | 2021-07-31T19:45:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,898 | py | """ Module for Ulmo analysis on VIIRS 2013"""
import os
import numpy as np
import time
import h5py
import numpy as np
from tqdm.auto import trange
import argparse
import h5py
import umap
from ulmo import io as ulmo_io
from ulmo.utils import catalog as cat_utils
from ulmo.ssl import analysis as ssl_analysis
from ulmo.ssl.util import adjust_learning_rate
from ulmo.ssl.util import set_optimizer, save_model
from ulmo.ssl import latents_extraction
from ulmo.ssl.train_util import Params, option_preprocess
from ulmo.ssl.train_util import modis_loader, set_model
from ulmo.ssl.train_util import train_model
from IPython import embed
def parse_option():
"""
This is a function used to parse the arguments in the training.
Returns:
args: (dict) dictionary of the arguments.
"""
parser = argparse.ArgumentParser("argument for training.")
parser.add_argument("--opt_path", type=str, help="path of 'opt.json' file.")
parser.add_argument("--func_flag", type=str, help="flag of the function to be execute: 'train' or 'evaluate' or 'umap'.")
# JFH Should the default now be true with the new definition.
parser.add_argument('--debug', default=False, action='store_true',
help='Debug?')
args = parser.parse_args()
return args
def ssl_v2_umap(debug=False, orig=False):
"""Run a UMAP analysis on all the MODIS L2 data
Args:
debug (bool, optional): [description]. Defaults to False.
orig (bool, optional): [description]. Defaults to False.
"""
# Load table
tbl_file = 's3://modis-l2/Tables/MODIS_L2_std.parquet'
modis_tbl = ulmo_io.load_main_table(tbl_file)
modis_tbl['U0'] = 0.
modis_tbl['U1'] = 0.
# Train the UMAP
# Split
train = modis_tbl.pp_type == 1
valid = modis_tbl.pp_type == 0
y2010 = modis_tbl.pp_file == 's3://modis-l2/PreProc/MODIS_R2019_2010_95clear_128x128_preproc_std.h5'
valid_tbl = modis_tbl[valid & y2010].copy()
# Latents file (subject to move)
if debug:
latents_train_file = 's3://modis-l2/SSL_MODIS_R2019_2010_latents_v2/modis_R2019_2010_latents_last_v2.h5'
else:
latents_train_file = 's3://modis-l2/SSL/SSL_v2_2012/latents/MODIS_R2019_2010_95clear_128x128_latents_std.h5'
# Load em in
basefile = os.path.basename(latents_train_file)
if not os.path.isfile(basefile):
print("Downloading latents (this is *much* faster than s3 access)...")
ulmo_io.download_file_from_s3(basefile, latents_train_file)
print("Done")
hf = h5py.File(basefile, 'r')
latents_train = hf['modis_latents_v2_train'][:]
latents_valid = hf['modis_latents_v2_valid'][:]
print("Latents loaded")
# Check
assert latents_valid.shape[0] == len(valid_tbl)
print("Running UMAP..")
reducer_umap = umap.UMAP()
latents_mapping = reducer_umap.fit(latents_train)
print("Done..")
# Loop on em all
latent_files = ulmo_io.list_of_bucket_files('modis-l2',
prefix='SSL/SSL_v2_2012/latents/')
for latents_file in latent_files:
basefile = os.path.basename(latents_file)
year = int(basefile[12:16])
# Download?
if not os.path.isfile(basefile):
print(f"Downloading {latents_file} (this is *much* faster than s3 access)...")
ulmo_io.download_file_from_s3(basefile, latents_train_file)
print("Done")
# Load and apply
hf = h5py.File(basefile, 'r')
'''
if 'train' in hf.keys():
latents_train = hf['train'][:]
train_embedding = latents_mapping.transform(latents_train)
'''
# THIS LINE IS WRONG. FIX WHEN THE REST IS FIXED
latents_valid = hf['train'][:]
valid_embedding = latents_mapping.transform(latents_valid)
# Save to table
embed(header='118 of ssl modis 2012')
yidx = modis_tbl.pp_file == f's3://modis-l2/PreProc/MODIS_R2019_{year}_95clear_128x128_preproc_std.h5'
valid_idx = valid & yidx
modis_tbl.loc[valid_idx, 'U0'] = valid_embedding[:,0]
modis_tbl.loc[valid_idx, 'U1'] = valid_embedding[:,1]
'''
train_idx = train & yidx
if np.sum(train_idx) > 0:
modis_tbl.loc[train_idx, 'U0'] = train_embedding[:,0]
modis_tbl.loc[train_idx, 'U1'] = train_embedding[:,1]
'''
hf.close()
# Clean up
os.remove(basefile)
# Vet
assert cat_utils.vet_main_table(valid_tbl, cut_prefix='modis_')
def main_train(opt_path: str):
"""Train the model
Args:
opt_path (str): Path + filename of options file
"""
# loading parameters json file
opt = Params(opt_path)
opt = option_preprocess(opt)
# build data loader
train_loader = modis_loader(opt)
# build model and criterion
model, criterion = set_model(opt, cuda_use=opt.cuda_use)
# build optimizer
optimizer = set_optimizer(opt, model)
# training routine
for epoch in trange(1, opt.epochs + 1):
adjust_learning_rate(opt, optimizer, epoch)
# train for one epoch
time1 = time.time()
loss = train_model(train_loader, model, criterion,
optimizer, epoch, opt, cuda_use=opt.cuda_use)
time2 = time.time()
print('epoch {}, total time {:.2f}'.format(epoch, time2 - time1))
if epoch % opt.save_freq == 0:
# Save locally
save_file = 'ckpt_epoch_{epoch}.pth'.format(epoch=epoch)
save_model(model, optimizer, opt, epoch, save_file)
# Save to s3
s3_file = os.path.join(
opt.s3_outdir, 'ckpt_epoch_{epoch}.pth'.format(epoch=epoch))
ulmo_io.upload_file_to_s3(save_file, s3_file)
# save the last model local
save_file = 'last.pth'
save_model(model, optimizer, opt, opt.epochs, save_file)
# Save to s3
s3_file = os.path.join(opt.s3_outdir, 'last.pth')
ulmo_io.upload_file_to_s3(save_file, s3_file)
def main_evaluate(opt_path, model_file,
preproc='_std', debug=False):
"""
This function is used to obtain the latents of the trained models
for all of MODIS
Args:
opt_path: (str) option file path.
model_file: (str) s3 filename
preproc: (str, optional)
"""
opt = option_preprocess(Params(opt_path))
model_base = os.path.basename(model_file)
ulmo_io.download_file_from_s3(model_base, model_file)
# Data files
all_pp_files = ulmo_io.list_of_bucket_files(
'modis-l2', 'PreProc')
pp_files = []
for ifile in all_pp_files:
if preproc in ifile:
pp_files.append(ifile)
# Loop on files
key_train, key_valid = "train", "valid"
if debug:
pp_files = pp_files[0:1]
for ifile in pp_files:
print("Working on ifile")
data_file = os.path.basename(ifile)
if not os.path.isfile(data_file):
ulmo_io.download_file_from_s3(data_file,
f's3://modis-l2/PreProc/{data_file}')
# Read
with h5py.File(data_file, 'r') as file:
if 'train' in file.keys():
train=True
else:
train=False
# Setup
latents_file = data_file.replace('_preproc', '_latents')
latents_path = os.path.join(opt.latents_folder, latents_file)
if train:
print("Starting train evaluation")
latents_extraction.model_latents_extract(opt, data_file,
'train', model_base, latents_file, key_train)
print("Extraction of Latents of train set is done.")
print("Starting valid evaluation")
latents_extraction.model_latents_extract(opt, data_file,
'valid', model_base, latents_file, key_valid)
print("Extraction of Latents of valid set is done.")
# Push to s3
print("Uploading to s3..")
ulmo_io.upload_file_to_s3(latents_file, latents_path)
# Remove data file
os.remove(data_file)
print(f'{data_file} removed')
if __name__ == "__main__":
# get the argument of training.
args = parse_option()
# run the 'main_train()' function.
if args.func_flag == 'train':
print("Training Starts.")
main_train(args.opt_path)
print("Training Ends.")
# run the "main_evaluate()" function.
if args.func_flag == 'evaluate':
print("Evaluation Starts.")
main_evaluate(args.opt_path,
's3://modis-l2/SSL/SSL_v2_2012/last.pth',
debug=args.debug)
print("Evaluation Ends.")
# run the umap
if args.func_flag == 'umap':
print("UMAP Starts.")
ssl_v2_umap(debug=args.debug)
print("UMAP Ends.")
| [
"xavier@ucolick.org"
] | xavier@ucolick.org |
a72f0bb734f78996b6c8a0a8578a918645788692 | 25753fa4df2ef0137d579aa57a3e5a94dc0208ec | /hw2/hw2plots.py | 0ed6d58f2f45d3ceeceaae79133be80d8aa47416 | [] | no_license | yunqingjia/ae4132 | 9b37145c4bfda051890a9ec0e6b4adc5480627a9 | a6ffa4178d18dbb3ce6ce626432da9df83ed1a3f | refs/heads/main | 2023-04-14T06:44:47.178260 | 2021-04-15T22:15:36 | 2021-04-15T22:15:36 | 339,821,449 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,793 | py | '''
Spring 2021 AE 4132 HW2
author: yjia67
last updated: 02-24-2021
'''
import numpy as np
import matplotlib.pyplot as plt
class FEAHW2():
def __init__(self):
pass
# Problem 1.4
def p1_4(self, x):
# Define the input parameters for the specific case
P = 400 # N
q = 100 # N/m
L = 2 # m
A = 0.0003 # m^2
E = 70e9 # Pa
v = 0.3
n = 50 # elements
le = L/n # length of each element
ke = E*A/le
u = self.p1_compute_u(ke, n)
#######################################################
# I was able to compute all the coefficients
# but ran out of time trying to figure out how to plot
#######################################################
# Plot the piecewise functions
# x_lst = np.arange()
# y_lst = np.array([])
# for i in range(n-1):
# x = np.arange(i*le, (i+1)*le, 1)
# print(x)
# y = np.array([self.p1_uhat(xi, u[i], u[i+1], i, le) for xi in x])
# x_lst = np.append(x_lst, x)
# y_lst = np.append(y_lst, y)
# print(x_lst.shape)
def p1_compute_u(self, ke, n):
'''
Compute the u_i values from the matrix that represent the system of equations
of the partial derivatives of Pi_hat
The coefficient matrix is formatted as the following:
[[ 0, 0, 0, 0, ... , 0, 0],
[ 0, 2, -1, 0, ... , 0, 0],
[ 0, -1, 2, -1, ... , 0, 0],
... ...
[ 0, 0, 0, 0, ... , -1, 1]]
input: ke = spring constant
n = # of elements
output: u = 1D np.Array of coefficients in linear approximation
'''
# Initialize an NxN matrix where N = # of elements
mat = np.zeros((n, n))
# Loop from 1:n and populate the matrix and solve for constants (u_i)
# Since we know u1 = 0 from the B.C., the first column will remain 0
for i in range(1, n-1):
mat[i][i-1:i+2] = [-1, 2, -1]
# Manually set the first column of the second row to 0
# Manually set the last row of the matrix to [..., -1, 1]
# Multiply everything by ke & remove the first row and first column (all 0s)
mat[1][0] = 0
mat[-1][-2:] = [-1, 1]
mat = ke*mat[1:, 1:]
u, v = np.linalg.eig(mat)
# Insert u1=0 at the start of the array
u = np.insert(u, 0, 0.0)
return u
# Define the piecewise displacement function
def p1_uhat(self, x, u1, u2, i, le):
uhat = u1 + (u2-u1)/le*(x-i*le)
return uhat
# Problem 2 Plotting: call on the functions
def p2(self, x, N1, N2):
'''
Plotting everything on the same plot:
Case 1: Quadratic Rayleigh-Ritz
Governing Equation
Linear Rayleigh-Ritz
Case 2: same dealio
'''
plt.plot(x, list(map(N1, x)), color='k', ls='-', label='1: quad. RR')
plt.plot(x, list(map(N1, x)), color='r', ls='--', label='1: gov. eqn')
plt.plot(x, np.ones(len(x))*66, color='m', label='1: lin. RR')
plt.plot(x, list(map(N2, x)), color='b', ls='-', label='2: quad. RR')
plt.plot(x, list(map(N2, x)), color='y', ls='--', label='2: gov. eqn')
plt.plot(x, np.ones(len(x))*24, color='c', label='2: lin. RR')
plt.xlabel(r'$x$ $(in)$')
plt.ylabel(r'$N(x)$ $(lb_f)$')
plt.xticks(np.arange(min(x), max(x)+1, 6))
plt.legend()
plt.title('HW2 Problem 2')
plt.savefig('hw2p2.png')
plt.show()
# Problem 2 Case 1
def p2_N1(self, x):
if (x <= 4*12):
return 90-x
else:
return -6+x
# Problem 2 Case 2
def p2_N2(self, x):
if (x <= 4*12):
return 24-x
else:
return -72+x
# Problem 3
def p3(self, b, P1, P2):
plt.plot(b, list(map(P1, b)), color='k', ls='-', label='1: quad. RR')
plt.plot(b, list(map(P2, b)), color='b', ls='-', label='2: trig. RR')
plt.xlabel(r'$\beta$')
plt.ylabel(r'$\Pi(x)$')
plt.legend()
plt.title('HW2 Problem 3')
plt.savefig('hw2p3.png')
plt.show()
def p3_pi1(self, beta):
return -5/216*np.square(1+3*beta)
def p3_pi2(self, beta):
pi = np.pi
return -80*np.square(-2+pi+pi*beta)/(3*pi**6)
if __name__ == '__main__':
hw2 = FEAHW2()
### PROBLEM 1 ###
x1 = np.arange(0.0, 2.0, 0.01)
hw2.p1_4(x1)
### PROBLEM 2 ###
x2 = np.arange(0.0, 8.0*12, 0.01)
hw2.p2(x2, hw2.p2_N1, hw2.p2_N2)
### PROBLEM 3 ###
beta = np.arange(-10, 10, 0.01)
hw2.p3(beta, hw2.p3_pi1, hw2.p3_pi2)
| [
"yjia16@outlook.com"
] | yjia16@outlook.com |
fdf4b28c90591a0e42da76b3766885884aa09407 | 2c3944f909009446547c3c11c01bcee71ecc6813 | /nova/virt/driver.py | 34dc5f544cbe20d640282bc338774d79ed69ed92 | [
"Apache-2.0"
] | permissive | prathyu1/openstack-nova | f5a5eb3ca8c120f1e70d6af90d48f4c80f666817 | 7551d299d331fe5eddf1691c4cdd030710e945b3 | refs/heads/master | 2020-12-24T13:08:01.944193 | 2011-08-19T10:06:30 | 2011-08-19T10:06:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,041 | py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Driver base-classes:
(Beginning of) the contract that compute drivers must follow, and shared
types that support that contract
"""
from nova.compute import power_state
class InstanceInfo(object):
def __init__(self, name, state):
self.name = name
assert state in power_state.valid_states(), "Bad state: %s" % state
self.state = state
class ComputeDriver(object):
"""Base class for compute drivers.
Lots of documentation is currently on fake.py.
"""
def init_host(self, host):
"""Adopt existing VM's running here"""
raise NotImplementedError()
def get_info(self, instance_name):
"""Get the current status of an instance, by name (not ID!)
Returns a dict containing:
:state: the running state, one of the power_state codes
:max_mem: (int) the maximum memory in KBytes allowed
:mem: (int) the memory in KBytes used by the domain
:num_cpu: (int) the number of virtual CPUs for the domain
:cpu_time: (int) the CPU time used in nanoseconds
"""
raise NotImplementedError()
def list_instances(self):
raise NotImplementedError()
def list_instances_detail(self):
"""Return a list of InstanceInfo for all registered VMs"""
raise NotImplementedError()
def spawn(self, instance, network_info, block_device_mapping=None):
"""Launch a VM for the specified instance"""
raise NotImplementedError()
def destroy(self, instance, network_info, cleanup=True):
"""Destroy (shutdown and delete) the specified instance.
The given parameter is an instance of nova.compute.service.Instance,
and so the instance is being specified as instance.name.
The work will be done asynchronously. This function returns a
task that allows the caller to detect when it is complete.
If the instance is not found (for example if networking failed), this
function should still succeed. It's probably a good idea to log a
warning in that case.
"""
raise NotImplementedError()
def reboot(self, instance, network_info):
"""Reboot specified VM"""
raise NotImplementedError()
def snapshot_instance(self, context, instance_id, image_id):
raise NotImplementedError()
def get_console_pool_info(self, console_type):
raise NotImplementedError()
def get_console_output(self, instance):
raise NotImplementedError()
def get_ajax_console(self, instance):
raise NotImplementedError()
def get_diagnostics(self, instance):
"""Return data about VM diagnostics"""
raise NotImplementedError()
def get_host_ip_addr(self):
raise NotImplementedError()
def attach_volume(self, context, instance_id, volume_id, mountpoint):
raise NotImplementedError()
def detach_volume(self, context, instance_id, volume_id):
raise NotImplementedError()
def compare_cpu(self, context, cpu_info):
raise NotImplementedError()
def migrate_disk_and_power_off(self, instance, dest):
"""Transfers the VHD of a running instance to another host, then shuts
off the instance copies over the COW disk"""
raise NotImplementedError()
def snapshot(self, instance, image_id):
"""Create snapshot from a running VM instance."""
raise NotImplementedError()
def finish_resize(self, instance, disk_info):
"""Completes a resize, turning on the migrated instance"""
raise NotImplementedError()
def revert_resize(self, instance):
"""Reverts a resize, powering back on the instance"""
raise NotImplementedError()
def pause(self, instance, callback):
"""Pause VM instance"""
raise NotImplementedError()
def unpause(self, instance, callback):
"""Unpause paused VM instance"""
raise NotImplementedError()
def suspend(self, instance, callback):
"""suspend the specified instance"""
raise NotImplementedError()
def resume(self, instance, callback):
"""resume the specified instance"""
raise NotImplementedError()
def rescue(self, instance, callback, network_info):
"""Rescue the specified instance"""
raise NotImplementedError()
def unrescue(self, instance, callback, network_info):
"""Unrescue the specified instance"""
raise NotImplementedError()
def update_available_resource(self, ctxt, host):
"""Updates compute manager resource info on ComputeNode table.
This method is called when nova-compute launches, and
whenever admin executes "nova-manage service update_resource".
:param ctxt: security context
:param host: hostname that compute manager is currently running
"""
raise NotImplementedError()
def live_migration(self, ctxt, instance_ref, dest,
post_method, recover_method):
"""Spawning live_migration operation for distributing high-load.
:params ctxt: security context
:params instance_ref:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:params dest: destination host
:params post_method:
post operation method.
expected nova.compute.manager.post_live_migration.
:params recover_method:
recovery method when any exception occurs.
expected nova.compute.manager.recover_live_migration.
"""
raise NotImplementedError()
def refresh_security_group_rules(self, security_group_id):
raise NotImplementedError()
def refresh_security_group_members(self, security_group_id):
raise NotImplementedError()
def refresh_provider_fw_rules(self, security_group_id):
"""See: nova/virt/fake.py for docs."""
raise NotImplementedError()
def reset_network(self, instance):
"""reset networking for specified instance"""
pass
def ensure_filtering_rules_for_instance(self, instance_ref):
"""Setting up filtering rules and waiting for its completion.
To migrate an instance, filtering rules to hypervisors
and firewalls are inevitable on destination host.
( Waiting only for filtering rules to hypervisor,
since filtering rules to firewall rules can be set faster).
Concretely, the below method must be called.
- setup_basic_filtering (for nova-basic, etc.)
- prepare_instance_filter(for nova-instance-instance-xxx, etc.)
to_xml may have to be called since it defines PROJNET, PROJMASK.
but libvirt migrates those value through migrateToURI(),
so , no need to be called.
Don't use thread for this method since migration should
not be started when setting-up filtering rules operations
are not completed.
:params instance_ref: nova.db.sqlalchemy.models.Instance object
"""
raise NotImplementedError()
def unfilter_instance(self, instance, network_info):
"""Stop filtering instance"""
raise NotImplementedError()
def set_admin_password(self, context, instance_id, new_pass=None):
"""Set the root/admin password for an instance on this server."""
raise NotImplementedError()
def inject_file(self, instance, b64_path, b64_contents):
"""Create a file on the VM instance. The file path and contents
should be base64-encoded.
"""
raise NotImplementedError()
def agent_update(self, instance, url, md5hash):
"""Update agent on the VM instance."""
raise NotImplementedError()
def inject_network_info(self, instance, nw_info):
"""inject network info for specified instance"""
pass
def poll_rescued_instances(self, timeout):
"""Poll for rescued instances"""
raise NotImplementedError()
def set_host_enabled(self, host, enabled):
"""Sets the specified host's ability to accept new instances."""
raise NotImplementedError()
def plug_vifs(self, instance, network_info):
"""Plugs in VIFs to networks."""
raise NotImplementedError()
| [
"brad@nicira.com"
] | brad@nicira.com |
628c8032ad010e825f937c0191ed5e5aff74e426 | f2b4e7e228165ab0d3180e1b2fdebaa491814101 | /merge_sort_algo.py | 29e538fbbca300b6acd4056cd438b3ad7833a3df | [] | no_license | abhijith365/python-small-projects | a446f999d01cc441fd85734093fd88d30349c25e | d4bc8a867581d86bd70acc1192565266d4654ed6 | refs/heads/main | 2023-04-12T09:16:06.454703 | 2021-05-08T15:43:53 | 2021-05-08T15:43:53 | 353,774,236 | 11 | 0 | null | null | null | null | UTF-8 | Python | false | false | 807 | py | def merge(list1, list2):
new_list = list()
a = 0
b = 0
while a < len(list1) and b < len(list2):
if list1[a] < list2[b]:
new_list.append(list1[a])
a += 1
else:
new_list.append(list2[b])
b += 1
while a < len(list1):
new_list.append(list1[a])
a += 1
while b < len(list2):
new_list.append(list2[b])
b += 1
return new_list
def merge_sort(input_list):
if len(input_list) <= 1:
return input_list
else:
mid = len(input_list) // 2
left = merge_sort(input_list[:mid])
right = merge_sort(input_list[mid:])
new_list = merge(left, right)
return new_list
a = [10, 5, 6, 30, 32, 22, 55, 60, 100, 99, 88, 76, 98]
print(merge_sort(a))
| [
"noreply@github.com"
] | noreply@github.com |
a92c3d27cff1afd868912161f01af88540ff3aa4 | feafb15c0f6953cd1e6ac46ed7ea04bdb72cb545 | /prep.py | a79434a2f1cc3ad382668e323a49b9a0e3c9705d | [] | no_license | danielgildea/FFNN4POSTagging | 074dd1f6e29227702d1fc2e1a0f0ca83b1bee5df | c266663bb596e66ea20533e1b5392e8cb5f4f466 | refs/heads/master | 2021-07-25T03:31:10.421596 | 2017-11-07T17:03:09 | 2017-11-07T17:03:09 | 109,767,532 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,846 | py |
import os, sys, json
def gen_vocab(path):
vocab = {'#pad#':0, '<s>':1, '</s>':2, '<unk>':3, }
tags = {}
for line in open(path, 'rU'):
for i, x in enumerate(line.strip().split()):
if i == 0:
continue
elif i%2 == 1:
x = x.lower()
if x not in vocab: vocab[x] = len(vocab)
else:
if x not in tags: tags[x] = len(tags)
return vocab, tags
def make_indices(path, vocab, tags):
data = []
for line in open(path, 'rU'):
cur_data = []
line = line.strip().split()
for i in range(1, len(line), 2):
x = line[i].lower()
x = vocab[x] if x in vocab else vocab['<unk>']
y = line[i+1]
if y not in tags:
tags[y] = len(tags)
y = tags[y]
cur_data.append((x,y))
data.append(cur_data)
data_x = []
data_y = []
st, ed = vocab['<s>'], vocab['</s>']
for sent in data:
for i in range(len(sent)):
x = []
x.append(st if i-2 < 0 else sent[i-2][0])
x.append(st if i-1 < 0 else sent[i-1][0])
x.append(sent[i][0])
x.append(ed if i+1 >= len(sent) else sent[i+1][0])
x.append(ed if i+2 >= len(sent) else sent[i+2][0])
data_x.append(x)
data_y.append([sent[i][1],])
return data_x, data_y
vocab, tags = gen_vocab('../data/pos/train')
train_x, train_y = make_indices('../data/pos/train', vocab, tags)
test_x, test_y = make_indices('../data/pos/test', vocab, tags)
dev_x, dev_y = make_indices('../data/pos/dev', vocab, tags)
data = {'train_x':train_x, 'train_y':train_y, 'test_x':test_x, 'test_y':test_y, 'dev_x':dev_x, 'dev_y':dev_y, 'vocab':vocab, 'tags':tags,}
json.dump(data, open('data.json','wb'))
| [
"noreply@github.com"
] | noreply@github.com |
7a7ac53b9fac6c2c985f1f6895ea8395a7a81456 | 66549903b5d456594cdf1bd8d6b2eb7d732d59e2 | /k_means/k_means_cluster.py | 44a2d5a8677d8bb78cc8af245bd43465f079af53 | [] | no_license | softsankar/ud120-projects-master | d17dcc1bfc94cd6953ff802d7abd08c1398e75a9 | 1394183396d8ecdb507629158c4c3198f6a84320 | refs/heads/master | 2021-05-12T03:04:44.990961 | 2018-01-16T00:40:08 | 2018-01-16T00:40:08 | 117,607,598 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,439 | py | #!/usr/bin/python
"""
Skeleton code for k-means clustering mini-project.
"""
import pickle
import numpy
import matplotlib.pyplot as plt
import sys
import math
sys.path.append("../tools/")
from feature_format import featureFormat, targetFeatureSplit
def Draw(pred, features, poi, mark_poi=False, name="image.png", f1_name="feature 1", f2_name="feature 2"):
""" some plotting code designed to help you visualize your clusters """
### plot each cluster with a different color--add more colors for
### drawing more than five clusters
colors = ["b", "c", "k", "m", "g"]
for ii, pp in enumerate(pred):
plt.scatter(features[ii][0], features[ii][1], color = colors[pred[ii]])
### if you like, place red stars over points that are POIs (just for funsies)
if mark_poi:
for ii, pp in enumerate(pred):
if poi[ii]:
plt.scatter(features[ii][0], features[ii][1], color="r", marker="*")
plt.xlabel(f1_name)
plt.ylabel(f2_name)
plt.savefig(name)
plt.show()
def findMaxNMin(data,feature):
print "find MaxNMin for {} ... started.".format(feature)
filteredDict = dict((k,v) for k,v in data.items() if not math.isnan(float(v[feature])))
mxDict = max(list(v[feature] for k,v in filteredDict.items()))
mnDict = min(list(v[feature] for k,v in filteredDict.items()))
print "Max value of {} is {} ".format(feature,mxDict)
print "Min value of {} is {} ".format(feature,mnDict)
print "find MaxNMin for {} ... completed.".format(feature)
### load in the dict of dicts containing all the data on each person in the dataset
data_dict = pickle.load( open("../final_project/final_project_dataset.pkl", "r") )
### there's an outlier--remove it!
data_dict.pop("TOTAL", 0)
### the input features we want to use
### can be any key in the person-level dictionary (salary, director_fees, etc.)
feature_1 = "salary"
feature_2 = "exercised_stock_options"
poi = "poi"
features_list = [poi, feature_1, feature_2]
data = featureFormat(data_dict, features_list )
poi, finance_features = targetFeatureSplit( data )
findMaxNMin(data_dict,feature_2)
findMaxNMin(data_dict,feature_1)
### in the "clustering with 3 features" part of the mini-project,
### you'll want to change this line to
### for f1, f2, _ in finance_features:
### (as it's currently written, the line below assumes 2 features)
for f1, f2 in finance_features:
plt.scatter( f1, f2 )
plt.show()
from sklearn.cluster import KMeans
### cluster here; create predictions of the cluster labels
### for the data and store them to a list called pred
kmeans = KMeans(n_clusters=2, random_state=0)
pred = kmeans.fit_predict(data)
### rename the "name" parameter when you change the number of features
### so that the figure gets saved to a different file
try:
Draw(pred, finance_features, poi, mark_poi=False, name="clusters.pdf", f1_name=feature_1, f2_name=feature_2)
except NameError:
print ("no predictions object named pred found, no clusters to plot")
feature_3 = "total_payments"
features_list.append(feature_3)
data = featureFormat(data_dict,features_list)
poi, finance_features = targetFeatureSplit( data )
pred = kmeans.fit_predict(data)
try:
Draw(pred, finance_features, poi, mark_poi=False, name="clusters_f3.pdf", f1_name=feature_1, f2_name=feature_2)
except NameError:
print ("no predictions object named pred found, no clusters to plot")
| [
"sn6123@us.att.com"
] | sn6123@us.att.com |
8e28090f58f278acd4f9e0769f6ef83b18ffa38a | 6ef829a3294bb89d826ec1123bd53f9fe8d4bfbb | /logger/migrations/0001_initial.py | 3a0ef490382c02b142ab348a9670827a8f40c927 | [] | no_license | dynoto/iplogger-django | 7d8e826d2885b0386dbb2680b9b87348a48fc481 | 1f1c3046370f26cef6a3288dd07ee2f0838c9e0e | refs/heads/master | 2021-01-21T05:55:04.230329 | 2013-10-31T15:21:07 | 2013-10-31T15:21:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,496 | py | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Peer'
db.create_table(u'logger_peer', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('ip_address', self.gf('django.db.models.fields.CharField')(max_length=64)),
('country', self.gf('django.db.models.fields.CharField')(max_length=64, blank=True)),
('client_type', self.gf('django.db.models.fields.CharField')(max_length=64, blank=True)),
))
db.send_create_signal(u'logger', ['Peer'])
# Adding model 'Torrent'
db.create_table(u'logger_torrent', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('torrent_hash', self.gf('django.db.models.fields.CharField')(max_length=250)),
('title', self.gf('django.db.models.fields.CharField')(max_length=250)),
('category', self.gf('django.db.models.fields.CharField')(max_length=250, blank=True)),
('filename', self.gf('django.db.models.fields.CharField')(max_length=250, blank=True)),
('published', self.gf('django.db.models.fields.DateField')(null=True, blank=True)),
('spidered', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
))
db.send_create_signal(u'logger', ['Torrent'])
# Adding model 'Activity'
db.create_table(u'logger_activity', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('torrent', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['logger.Torrent'])),
('peer', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['logger.Peer'])),
('download_speed', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('upload_speed', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
))
db.send_create_signal(u'logger', ['Activity'])
def backwards(self, orm):
# Deleting model 'Peer'
db.delete_table(u'logger_peer')
# Deleting model 'Torrent'
db.delete_table(u'logger_torrent')
# Deleting model 'Activity'
db.delete_table(u'logger_activity')
models = {
u'logger.activity': {
'Meta': {'object_name': 'Activity'},
'download_speed': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'peer': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['logger.Peer']"}),
'torrent': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['logger.Torrent']"}),
'upload_speed': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'logger.peer': {
'Meta': {'object_name': 'Peer'},
'client_type': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
u'logger.torrent': {
'Meta': {'object_name': 'Torrent'},
'category': ('django.db.models.fields.CharField', [], {'max_length': '250', 'blank': 'True'}),
'filename': ('django.db.models.fields.CharField', [], {'max_length': '250', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'peers': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['logger.Peer']", 'through': u"orm['logger.Activity']", 'symmetrical': 'False'}),
'published': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'spidered': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'torrent_hash': ('django.db.models.fields.CharField', [], {'max_length': '250'})
}
}
complete_apps = ['logger'] | [
"dynamic.dante@gmail.com"
] | dynamic.dante@gmail.com |
7b06d2e6bac8c5b2bf1173dada4466de94cc00f7 | 9d8721c19c55067d99fdd66e62bcb89a081fe7a3 | /Non-binary PS/step3.py | 12f50718e3365c610e1e1c22942f94d9c426ace2 | [] | no_license | GALI17/Non-Binary | a80b4b4ec9465bb1130e6351bbbdab867144e03a | 0426c2708168d2b47645fbf033e35c713f163d7b | refs/heads/master | 2020-05-25T13:38:59.485783 | 2019-05-21T12:04:54 | 2019-05-21T12:04:54 | 187,826,193 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,663 | py | import os
import numpy as np
input_file = []
real_file = []
inputData = "G:\\KY2\\p\\"
realData = "G:\\python_demo\\res\\res_p\\"
all_real = []
def read_1(inp): # ok
path_dir = os.listdir(inp)
for allDir in path_dir:
child1 = os.path.join('%s%s' % (inp, allDir))
print(child1)
input_file.append(child1)
def read_2(real): # ok
path_dir = os.listdir(real)
for allDir in path_dir:
child1 = os.path.join('%s%s' % (real, allDir))
#print(child1)
txt_dir = os.listdir(child1)
for eachTxt in txt_dir:
child2 = os.path.join(child1 + "\\" + eachTxt)
print(child2)
real_file.append(child2)
def add_update_real(real): # ok
add = []
for i in range(len(real)):
print("i :")
print(i)
real_tempt = np.loadtxt(real[i], delimiter=' ') # <class 'numpy.ndarray'>
print(real_tempt)
if (i+1) % 3 != 0:
add.extend(real_tempt) # <class 'list'>
print("if add:")
print(add)
else:
add.extend(real_tempt)
del_repeat = list(set(add))
np.set_printoptions(suppress=True)
out = np.transpose(del_repeat)
print("out: ")
print(out)
all_real.append(out)
add = []
print("else add:")
print(add)
print(i)
print(real[i])
print(i-1)
print(real[i-1])
print(i-2)
print(real[i-2])
np.savetxt(real[i - 2], out, fmt="%d", delimiter=' ')
np.savetxt(real[i - 1], out, fmt="%d", delimiter=' ')
np.savetxt(real[i], out, fmt="%d", delimiter=' ')
def update_each_input(inp, all_real): # ok
for i in range(len(inp)):
real_tempt = all_real[i]
print(real_tempt)
out_list = []
for j in range(len(real_tempt)):
tempt = int(real_tempt[j])
print(tempt)
#S
#x = np.loadtxt(inp[i], dtype=int, delimiter=" ", usecols=tempt,)
#P
x = np.loadtxt(inp[i], delimiter=" ", usecols=tempt, )
out_list.append(x)
print(out_list)
np.set_printoptions(suppress=True)
out = np.transpose(out_list)
np.savetxt(inp[i], out, fmt="%s", delimiter=" ")
if __name__ == "__main__":
#约减实现
read_1(inputData)
read_2(realData)
print(input_file)
print(real_file)
#更新real
add_update_real(real_file)
#更新聚类前的原始数据
print(all_real)
update_each_input(input_file, all_real)
| [
"402152175@qq.com"
] | 402152175@qq.com |
d3a11910ed3fdea69de7c645452f9f813ff830b1 | 241fc4d8d2eb75eae0fa0bf2cd72a32f8ddfcfd7 | /terminal_monitoring/terminal/models/accesspoint.py | 979ff7b5b97f860440a3c730cb82463ec84d4613 | [] | no_license | armadarama/terminal_monitoring | 9c1174a6bcd3b6a731fdf973daf82139914869b1 | 0df300b96395d1ddeaf524d9215350bb2e11440d | refs/heads/master | 2021-01-21T15:13:37.074101 | 2017-06-23T09:43:36 | 2017-06-23T09:43:36 | 95,378,461 | 1 | 0 | null | 2017-06-25T18:33:15 | 2017-06-25T18:33:15 | null | UTF-8 | Python | false | false | 348 | py | from django.db import models
class Accesspoint(models.Model):
name = models.CharField(max_length=100, default ="", null=False, blank=False)
ip_adress = models.CharField(max_length=50, default ="", null=False, blank=False)
site = models.ForeignKey('Site', on_delete=models.CASCADE, null=True, blank=True)
def __str__(self):
return self.name | [
"noreply@github.com"
] | noreply@github.com |
9378d7d14ff7c7d642bdcdd49c810545cc6a1e2e | 71885763aa3fcad7ea183a712d01f8abe37825c1 | /testcasesFB.py | 94ae1c0e526df5cbabb38a2a98bcce45d26c91c2 | [] | no_license | rjchand07/Fb_Login_automation_selenium | 1339dd0e036108762c11d790afe63a227d1ffb16 | 0146b96a4b99400f17c52b986c1fef47575c1c74 | refs/heads/master | 2020-12-02T14:04:14.337108 | 2019-12-31T05:30:40 | 2019-12-31T05:30:40 | 231,031,221 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,087 | py | from selenium import webdriver
import unittest
from pages.loginpage import LoginPage
from pages.logoutpage import HomePage
import HtmlTestRunner
class FB_Login(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.driver = webdriver.Chrome(executable_path="C:/Drivers/chromedriver_win32/chromedriver.exe")
cls.driver.implicitly_wait(5)
cls.driver.maximize_window()
def test_Valid_Login(self):
driver = self.driver
self.driver.get("http://facebook.com")
Log_in = LoginPage(driver)
Log_in.enter_username("tardtypical@gmail.com")
Log_in.enter_password("01120090020k")
Log_in.click_login()
Log_out = HomePage(driver)
Log_out.click_navigator()
Log_out.click_logout()
@classmethod
def tearDownClass(cls):
cls.driver.close()
cls.driver.quit()
print("Test is Completed")
if __name__ == '__main__':
unittest.main(testRunner=HtmlTestRunner.HTMLTestRunner(output='C:/Users/Shiva Ravi Raja/Desktop/logos'))
| [
"noreply@github.com"
] | noreply@github.com |
0f785c69999e4751d3f76e4a4133d914656979c7 | 2918007c9002ce1ead3f076ff0cf2f91ab647963 | /work/utils/tiler/tiler_helpers.py | 70dece559ee7764027dbb0ef2a89d971a8ccabde | [] | no_license | MrMaksimize/geostack | 3fa7297585efa708fb5a3e1a38bc05220f0d3c7f | c0044575361a545d8e229d24fbf8c9c0f1b4ccc6 | refs/heads/master | 2021-09-04T10:17:05.858700 | 2018-01-17T22:43:52 | 2018-01-17T22:43:52 | 112,034,950 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,648 | py | import os
import json
def check_environ_vars():
""" Make sure database environment variables are present """
## Check all environment variables are defined
env_vars = ['DB_NAME', 'DB_USER', 'DB_PORT', 'DB_HOST', 'DB_PASSWORD']
for env_var in env_vars:
if env_var not in os.environ:
raise ValueError("Necessary environment variable not set not: ", env_var)
def check_file(file_name):
""" Check a file exists """
if not os.path.isfile(file_name):
raise OSError(file_name + " does not exist")
def add_tippecanoe_config(output_path, layer_config):
""" Given a configuration, add the configuration to a given GeoJSON file """
print("\n Rewriting GeoJSON to add tippecanoe options")
with open(output_path, 'r+') as geojson_file:
geojson = json.load(geojson_file)
for feature in geojson["features"]:
feature["tippecanoe"] = {}
if "layer" in layer_config:
feature["tippecanoe"]["layer"] = str(layer_config["layer"])
if "maxzoom" in layer_config:
feature["tippecanoe"]["maxzoom"] = int(layer_config["maxzoom"])
if "minzoom" in layer_config:
feature["tippecanoe"]["minzoom"] = int(layer_config["minzoom"])
geojson_file.seek(0)
geojson_file.write(json.dumps(geojson))
geojson_file.truncate()
def absolute_file_paths(directory):
""" Get a generator of all the absolute paths for files in a directory """
for dirpath, _, filenames in os.walk(directory):
for f in filenames:
yield os.path.abspath(os.path.join(dirpath, f))
| [
"max@maksimize.com"
] | max@maksimize.com |
0dc314ba39b82b2b3583fde6190975d292d52f80 | 8d5b5acec80889f08584e3b7b0dcc86534420ccd | /tests/ArrayTest.py | 5f914d1f7a22c47b4455ffdaba239744ebc2143a | [
"MIT"
] | permissive | dpep/py_rekey | 5f4a460afa7ba2bcc87a466f9df313503d2cf3d8 | 99d2d7ffe1bdc68f583a504c9943e30bb3717d14 | refs/heads/master | 2023-02-09T02:57:57.831488 | 2023-01-31T23:12:09 | 2023-01-31T23:12:09 | 79,070,291 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,434 | py | #!/usr/bin/env python
import os
import sys
import unittest
sys.path = [ os.path.abspath(os.path.join(os.path.dirname(__file__), '../..')) ] + sys.path
from rekey import rekey
class ArrayTest(unittest.TestCase):
def test_basic(self):
data = [
{'k': 'a', 'v': 2},
{'k': 'b', 'v': 4},
{'k': 'c', 'v': 6},
]
self.assertEqual(
{'a': 2, 'b': 4, 'c': 6},
rekey(data, 'k', 'v')
)
data = [
{'k': 1},
{'k': 2},
{'k': 3},
]
self.assertEqual(
[1, 2, 3],
rekey(data, None, 'k')
)
def test_indicies(self):
data = [
[0, 1, 2],
[5, 6, 7],
]
self.assertEqual(
[0, 5],
rekey(data, None, 0)
)
def test_builtin_fn(self):
data = [
[1],
[1, 2],
[1, 2, 3],
]
self.assertEqual(
[1, 2, 3],
rekey(data, None, len)
)
def test_fn(self):
def double(val):
return val * 2
self.assertEqual(
[2, 4, 6],
rekey([1, 2, 3], None, double)
)
def test_lamba(self):
self.assertEqual(
[2, 4, 6],
rekey([1, 2, 3], None, lambda x: x * 2)
)
if __name__ == '__main__':
unittest.main()
| [
"pepper.daniel@gmail.com"
] | pepper.daniel@gmail.com |
b8d7a99ad4e5d9a13b4ce30cd3d4f23c799f5ddd | 6e928e1651713f945c980bca6d6c02ac5dce249a | /task1/3.py | 7cd2b6493d849db45fc01607283f0cb988c3dd8e | [] | no_license | Akzhan12/pp2 | 97334158b442383df32583ee6c0b9cab92a3ef45 | 56e33fd9119955ea8349172bf3f2cc5fbd814142 | refs/heads/main | 2023-06-28T08:30:11.068397 | 2021-07-29T08:34:43 | 2021-07-29T08:34:43 | 337,359,826 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 60 | py | a = list(map(int, input().strip().split()))
print(*a[::-1]) | [
"noreply@github.com"
] | noreply@github.com |
2a62f1bef54bfd2cb7615ca2e9e0483f7ca9fd76 | 5ab2ccf70fddd30ea88155f2a5adb0711bf3dc9a | /Chap10/factorsingles.py | 5d413a283dcbbe5de549074b7b5cbee0eafea399 | [] | no_license | jdukosse/LOI_Python_course-SourceCode | 32d66fd79344e9ab9412a6da373f2093b39cad92 | bf13907dacf5b6e95f84885896c8f478dd208011 | refs/heads/master | 2020-12-05T23:27:53.862508 | 2020-01-24T13:42:28 | 2020-01-24T13:42:28 | 232,276,680 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 142 | py | n = int(input("Please enter a positive integer: "))
factors = [x for x in range(1, n + 1) if n % x == 0]
print("Factors of", n, ":", factors)
| [
"jdukosse@hotmail.com"
] | jdukosse@hotmail.com |
dd7f146df693ac042cde1345a5080c70862c344e | 222a7d69a78f1350772c9c8bfb0b36c640e5cd6e | /MarlinJobs/CalibrationConfigFiles/Stage59Config_5x5_30x30.py | 2b94d6d91472c95d504b20257b87d7e3b5afb347 | [] | no_license | StevenGreen1/JERDetailed | 2a8cb30ec32781791ba163e5125bcdb87239e9a4 | 27ed19dc0930570f16019b2c7820ae715dd0ec57 | refs/heads/master | 2021-01-17T06:55:11.384992 | 2016-08-10T14:41:38 | 2016-08-10T14:41:38 | 44,620,987 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,192 | py | # Calibration config file for testing
# Digitisation Constants - ECal
CalibrECal = 42.3662496409
# Digitisation Constants - HCal
CalibrHCalBarrel = 50.3504586994
CalibrHCalEndcap = 55.6419000329
CalibrHCALOther = 30.5873671511
# Digitisation Constants - Muon Chamber
CalibrMuon = 56.7
# MIP Peak position in directed corrected SimCaloHit energy distributions
# used for realistic ECal and HCal digitisation options
CalibrECalMIP = 0.0001475
CalibrHCalMIP = 0.0004925
# MIP Peak position in directed corrected CaloHit energy distributions
# used for MIP definition in PandoraPFA
ECalToMIPCalibration = 153.846
HCalToMIPCalibration = 36.1011
MuonToMIPCalibration = 10.101
# EM and Had Scale Settings
ECalToEMGeVCalibration = 1.00215973193
HCalToEMGeVCalibration = 1.00215973193
ECalToHadGeVCalibration = 1.12219237098
HCalToHadGeVCalibration = 1.05372579725
# Pandora Threshold Cuts
ECalMIPThresholdPandora = 0.5
HCalMIPThresholdPandora = 0.3
# Hadronic Energy Truncation in HCal PandoraPFA
MaxHCalHitHadronicEnergy = 1000000.0
# Timing ECal
ECalBarrelTimeWindowMax = 300.0
ECalEndcapTimeWindowMax = 300.0
# Timing HCal
HCalBarrelTimeWindowMax = 300.0
HCalEndcapTimeWindowMax = 300.0
| [
"sg1sg2sg3@hotmail.co.uk"
] | sg1sg2sg3@hotmail.co.uk |
9461f02ac4fdcbf48b760055e18b17a595c5d8e0 | 5451997d7b691679fd213d6473b21f184a5c9402 | /pymaze/wsgi.py | 4aff83a8a210e68f9e6d3d976da790c63895747e | [
"MIT"
] | permissive | TerryHowe/pymaze | 9ba54c7d328abf94f6709593795a587f28be752b | a5b7e90b5019a5f99a7f80317796ace72ca0754f | refs/heads/master | 2022-05-01T07:39:17.896430 | 2022-04-23T10:41:48 | 2022-04-23T10:41:48 | 89,522,507 | 1 | 0 | MIT | 2022-04-23T10:41:49 | 2017-04-26T20:13:13 | Python | UTF-8 | Python | false | false | 390 | py | """
WSGI config for pymaze project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "pymaze.settings")
application = get_wsgi_application()
| [
"terrylhowe@gmail.com"
] | terrylhowe@gmail.com |
91b0e721835c085fa2a63efd9e75126f6092da46 | d1f9a4e40cd3529114c3b0109716ed5240776ef6 | /pondiBackend/wsgi.py | 38dfe9586cd065774850e73fcbd8813f18d5bef8 | [] | no_license | UCLA-Creative-Labs/pondi-backend | c7b6a0ffa0f94cd675a5960f7cb3d52d53bc4e33 | 1da935e9d7f80ec6328190f83b4b03b7d20343b6 | refs/heads/master | 2022-11-01T15:27:42.657872 | 2019-03-09T20:42:28 | 2019-03-09T20:42:28 | 168,284,898 | 0 | 2 | null | 2022-10-18T03:20:17 | 2019-01-30T05:30:02 | Python | UTF-8 | Python | false | false | 404 | py | """
WSGI config for pondiBackend project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'pondiBackend.settings')
application = get_wsgi_application()
| [
"donle22599@g.ucla.edu"
] | donle22599@g.ucla.edu |
2c34a179a28321b7c74dce48f2090b771156f159 | 546b40c438935fa32fc954152c6f7d7e5c515950 | /project/manage.py | 152fb29152193fa9e9daa3b3774e487a9c9343a0 | [] | no_license | xj-sun/gjango_project_reminder | fc8732fc4595c65cc749bdd8fae219af53da81e9 | f2f1309868330876afcbe3b7a3140de21886109f | refs/heads/master | 2023-01-12T07:40:25.271229 | 2020-11-02T23:06:12 | 2020-11-02T23:06:12 | 309,514,199 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 857 | py | #!/usr/bin/env python
import os
import sys
import pymysql
pymysql.install_as_MySQLdb()
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "WeatherProject.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| [
"xj.sun77@gmail.com"
] | xj.sun77@gmail.com |
5eff0169132197e41737862349d9ad181777010a | fe8f7febac1ff93b829256cdfd0be69e94498c76 | /python/fluent_python/code/clockdeco_param.py | 4700886d4acf8383701a414070e3f4635df7f784 | [] | no_license | bioShaun/notebook | c438eba1d29b736704c3f5325faf15ad61a1e9d5 | ce5f477a78554ed0d4ea5344057c19e32eb6c2b8 | refs/heads/master | 2020-03-26T16:16:06.458545 | 2018-08-23T00:54:53 | 2018-08-23T00:54:53 | 145,090,588 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 950 | py | import time
import functools
DEFAULT_FMT = '[{elapsed:0.8f}s] {name}({args}) -> {result}'
def clock(fmt=DEFAULT_FMT):
def decorate(func):
def clocked(*_args, **kwargs):
t0 = time.time()
_result = func(*_args, **kwargs)
elapsed = time.time() - t0
name = func.__name__
arg_lst = []
if _args:
arg_lst.append(', '.join(repr(arg) for arg in _args))
if kwargs:
pairs = ['%s=%r' % (k, w)
for k, w in sorted(kwargs.items * ())]
arg_lst.append(', '.join(pairs))
args = ', '.join(arg_lst)
result = repr(_result)
print(fmt.format(**locals()))
return _result
return clocked
return decorate
if __name__ == '__main__':
@clock()
def snooze(seconds):
time.sleep(seconds)
for i in range(3):
snooze(.123)
| [
"ricekent@163.com"
] | ricekent@163.com |
7522538dc5536b972327606ff3f23566c717f5f1 | 90b82685728eca7e3c27e486f55edc8d53683b36 | /Two Sum II - Input array is sorted/method2.py | 1bab4ef673a061f124c4c1b14d750a549a25f647 | [] | no_license | breathfisheva/Algorithm | 5ead5da8499e4d9d975629846f790b8328dcd678 | 559a83e60b2dd8a427ade127d471a923c16b0c43 | refs/heads/master | 2020-04-16T23:34:09.172743 | 2019-04-30T09:30:06 | 2019-04-30T09:30:06 | 166,016,664 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 351 | py | # numbers = [2,7,11,15], target =17 [1,2]
class Solution:
def twoSum(self, numbers: 'List[int]', target: 'int') -> 'List[int]':
num_dict = {}
for i in range(len(numbers)):
difference = target - numbers[i]
if difference not in num_dict:
num_dict
print(Solution().twoSum([0,0,3,4],0)) | [
"hongliu_goodluck@sina.com"
] | hongliu_goodluck@sina.com |
188cc3859ee548a752611803d9bf62dc492fb892 | ed5605829992fba080872c02396038e00713abd2 | /PCchatbot/timer_v3.py | 935189283716fd784a019867775d7f3226d40aca | [] | no_license | BonHyuck/kakao-chatbot | 0c39936b754e1702c892bc6eae7747073669c775 | c7609aad8f6656685ef0ea5225619f4ba9de38b2 | refs/heads/master | 2023-03-09T21:09:22.635059 | 2021-02-26T02:26:22 | 2021-02-26T02:26:22 | 342,438,349 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,719 | py | import time, win32con, win32api, win32gui
# 시간 맞춰 메시지 보내기용
import datetime
import schedule
import time
import requests
import json
from random import choice
################################
# API를 가져올 URL
# api_url = 'https://chatbotkakao.herokuapp.com/api/'
# # 카톡창 이름, (활성화 상태의 열려있는 창)
# kakao_opentalk_name = 'SSAFY 4기 서울 3반(A반)'
#kakao_opentalk_name = '구본혁'
# # 채팅방에 메시지 전송
def kakao_sendtext(chatroom_name, text):
# # 핸들 _ 채팅방
hwndMain = win32gui.FindWindow( None, chatroom_name)
hwndEdit = win32gui.FindWindowEx( hwndMain, None, "RichEdit50W", None)
# hwndListControl = win32gui.FindWindowEx( hwndMain, None, "EVA_VH_ListControl_Dblclk", None)
win32api.SendMessage(hwndEdit, win32con.WM_SETTEXT, 0, text)
SendReturn(hwndEdit)
# # 엔터
def SendReturn(hwnd):
win32api.PostMessage(hwnd, win32con.WM_KEYDOWN, win32con.VK_RETURN, 0)
time.sleep(0.01)
win32api.PostMessage(hwnd, win32con.WM_KEYUP, win32con.VK_RETURN, 0)
# # 채팅방 열기
def open_chatroom(chatroom_name):
# # 채팅방 목록 검색하는 Edit (채팅방이 열려있지 않아도 전송 가능하기 위하여)
hwndkakao = win32gui.FindWindow(None, "카카오톡")
hwndkakao_edit1 = win32gui.FindWindowEx( hwndkakao, None, "EVA_ChildWindow", None)
hwndkakao_edit2_1 = win32gui.FindWindowEx( hwndkakao_edit1, None, "EVA_Window", None)
hwndkakao_edit2_2 = win32gui.FindWindowEx( hwndkakao_edit1, hwndkakao_edit2_1, "EVA_Window", None)
hwndkakao_edit3 = win32gui.FindWindowEx( hwndkakao_edit2_2, None, "Edit", None)
# # Edit에 검색 _ 입력되어있는 텍스트가 있어도 덮어쓰기됨
win32api.SendMessage(hwndkakao_edit3, win32con.WM_SETTEXT, 0, chatroom_name)
time.sleep(1) # 안정성 위해 필요
SendReturn(hwndkakao_edit3)
time.sleep(1)
# 결과를 담을 배열
result = []
birthday_result = []
exam_result = []
# 서버에서 저장된 API 받아오기
def get_api():
# API 주소
address = 'https://chatbotkakao.herokuapp.com/api/'
# 리스트 형태로 온다.
chatInfos = requests.get(address).json()
# 리스트 비우기
result[:] = []
# 리스트에 딕셔너리 객체 넣어주기
for chatInfo in chatInfos:
chat_hour = str(chatInfo['chat_hour'])
chat_minute = str(chatInfo['chat_minute'])
if 0 <= chatInfo['chat_hour'] <= 9:
chat_hour = '0'+str(chatInfo['chat_hour'])
if 0 <= chatInfo['chat_minute'] <= 9:
chat_minute = '0'+str(chatInfo['chat_minute'])
new_dict = {
'message':chatInfo['message'],
'chat_time':chat_hour+':'+chat_minute,
'send_to': chatInfo['send_to']
}
result.append(new_dict)
# 생일 축하 멘트
ment = [
'''
|
/ ̄ ̄ ̄\
/ ∧ \
│ / 川\ │
\/┏┻┓ \/
。゛#┃생┃゛。
゛,。┃일┃#。゛
。゜#┃축┃゛。゛
,*。┃하┃゜。#
#゜。┃해┃゜*。
┃☆┃
┗┯┛
∧∧ │
(*´∀`)│
/ ⊃
''',
'''
iiiiiiii
┏━♡♡━┓
┏"━☆━☆━"┓
♡-생일축하해-♡
★☆:+.♡.+:★☆
''',
'''
┏┓┏┓。・゚゚・。。゚💖
┃┗┛ appy💜
┃┏┓┃ birth✿
┗┛┗┛ day*゚✾
。.。.。.。💛
''',
'''
∧_∧
(。・ω・。)つ━☆・*。
⊂ ノ ・゜
しーJ °。+ * 。
.・゜
゜。゚゚・。・゚゚。
゚。 。゚
゚・。・゚
''',
'''
♪∧,,∧
♪∧,,∧・ ω・)
∧,,∧・ ω・) )っ
(・ ω・) )っ__フ
(っ )っ__フ(_/彡
( __フ(_/彡
(_/彡♪
'''
]
week_date = ['월', '화', '수', '목', '금', '토', '일', ]
# 서버에 저장된 메시지 보내기
def send_message():
global result, birthday_result, exam_result
# 현재 시각
nowTime = datetime.datetime.now().strftime('%H:%M')
#오늘 날짜
nowDate = datetime.datetime.now().strftime('%m-%d')
# 리스트가 비어있거나 현재 시각이 새벽 5시 30분일때 서버 통신
# 매일 보내는 메시지
if len(result) == 0 or nowTime == '05:30':
get_api()
print("API 다운로드!!")
print(result)
# 리스트가 비어있지 않을 때 작동
if len(result) > 0:
# 서버에 있는 각 정보 뜯기
for chatInfo in result:
# 시간이 일치한다면
if chatInfo['chat_time'] == nowTime:
# 저장된 카톡방 열기
open_chatroom(chatInfo['send_to'])
# 메시지 입력후 전송
kakao_sendtext(chatInfo['send_to'], chatInfo['message'])
################################################################################################################
# 생일 메시지, 자정을 기준으로 구동
if len(birthday_result) == 0 or nowTime == '00:00':
# 리스트 비우기
birthday_result[:] = []
# Json 파일 읽어오기
with open('birthday.json', 'r', encoding='UTF8') as json_file:
json_data = json.load(json_file)
birthday_result = json_data['birthday']
for birthday in birthday_result:
birthday['count'] = 0
# 반복 돌기
for birthday in birthday_result:
# Json에서 날짜 만들기
birthdate = birthday['birth_month'] + '-' + birthday['birth_day']
# 오늘 날짜이면서 아직 메시지 보낸적 없음
if birthdate == nowDate and birthday['count'] == 0:
# 메시지 보냈다고 표시
birthday['count'] += 1
# 메시지 생성
message = """{}월 {}일은 {}님의 생일입니다!
🎉생일 축하해요 {}님🎂
{}
♥서울 3반 일동♥""".format(birthday['birth_month'], birthday['birth_day'], birthday['name'], birthday['name'], choice(ment))
# 보내는 곳
open_chatroom("#잡담방 SSAFY 4기 3(A)반")
# 보내기!
kakao_sendtext("#잡담방 SSAFY 4기 3(A)반", message)
################################################################################################################
# 시험 일정!
if len(exam_result) == 0 or nowTime == '18:00':
# 리스트 비우기
exam_result[:] = []
# Json 파일 읽어오기
with open('exam.json', 'r', encoding='UTF8') as json_file:
json_data = json.load(json_file)
exam_result = json_data['exam']
for exam in exam_result:
exam['count'] = 0
# 가장 가까이에 있는 시험만 출력해야한다.
closest_exam_date = ""
closest_exam = {}
# 배열 반복
for exam in exam_result:
# 날짜가 공란이거나 더 가까운 시험이 있으면
if closest_exam_date == "" or (time.mktime(datetime.datetime.strptime(exam['exam_date'], "%Y-%m-%d").timetuple()) < time.mktime(datetime.datetime.strptime(closest_exam_date, "%Y-%m-%d").timetuple()) and datetime.datetime.strptime(datetime.datetime.now().strftime('%Y-%m-%d'), '%Y-%m-%d') >= datetime.datetime.strptime(closest_exam_date, '%Y-%m-%d')):
# 대체
closest_exam_date = exam['exam_date']
closest_exam = exam
continue
# 남은 날짜 계산
count_days = -1
if closest_exam_date != "":
count_days = datetime.datetime.strptime(closest_exam_date, '%Y-%m-%d') - datetime.datetime.strptime(datetime.datetime.now().strftime('%Y-%m-%d'), '%Y-%m-%d')
exam_message = """{} {}일 남았다!!
{} 공부하자~~!!
👉 {}/{}({}) 과목평가
평가일시 : {}
평가과목 : {}""".format(closest_exam['exam_type'], count_days.days, closest_exam['exam_subject'], closest_exam_date[5:7], closest_exam_date[8:], week_date[datetime.datetime.strptime(closest_exam_date, '%Y-%m-%d').weekday()], closest_exam_date, closest_exam['exam_subject'])
if closest_exam['count'] == 0:
print(closest_exam['count'])
closest_exam['count'] += 1
# 보내는 곳
open_chatroom("#잡담방 SSAFY 4기 3(A)반")
# 보내기!
kakao_sendtext("#잡담방 SSAFY 4기 3(A)반", exam_message)
################################################################
# 1분 뒤에 다시 뵙겠습니다.
time.sleep(60)
# 다시 돌기
send_message()
# def main():
# open_chatroom(kakao_opentalk_name) # 채팅방 열기
# text = """~ 8:59 까지
# 👉 입실체크
# 👉 건강설문
# - 온라인/오프라인 제대로 체크하기
# - 체온체크 (온라인시 “온라인 수업일” 로 체크하기)"""
# # text = """👉 퇴실체크
# # 👉 건강설문"""
# kakao_sendtext(kakao_opentalk_name, text) # 메시지 전송
# def wake_up():
# nowTime = datetime.datetime.now().strftime('%H:%M')
# main()
if __name__ == '__main__':
# send_message(get_api(api_url))
send_message()
# while True:
# schedule.run_pending()
# time.sleep(1)
# schedule.every().day.at('08:30').do(wake_up)
# while True:
# schedule.run_pending()
# time.sleep(1) | [
"qhsgur0126@gmail.com"
] | qhsgur0126@gmail.com |
fa73989a846dd0e102e265866470599140b05bf7 | 4f348d157a7c299168221840ddd9103a324cd9b3 | /python/lib/python3.6/site-packages/wolframalpha/__init__.py | 0df9c535bf46eb9b5ab25df6015842b72666dda9 | [
"MIT"
] | permissive | Leovaldez42/IRI | 61c47ee166a9a07c78a8b40ab3ba689359e0f1c7 | 97358a3c15558c333c099657f77ed7e39542a40e | refs/heads/master | 2022-12-13T02:45:44.053132 | 2022-05-21T05:43:51 | 2022-05-21T05:43:51 | 248,300,523 | 0 | 1 | MIT | 2022-12-08T03:51:12 | 2020-03-18T17:37:54 | Python | UTF-8 | Python | false | false | 5,174 | py | import itertools
import json
from six.moves import urllib, map
import xmltodict
from jaraco.itertools import always_iterable
from . import compat
compat.fix_HTTPMessage()
class Client(object):
"""
Wolfram|Alpha v2.0 client
Pass an ID to the object upon instantiation, then
query Wolfram Alpha using the query method.
"""
def __init__(self, app_id):
self.app_id = app_id
def query(self, input, params=(), **kwargs):
"""
Query Wolfram|Alpha using the v2.0 API
Allows for arbitrary parameters to be passed in
the query. For example, to pass assumptions:
client.query(input='pi', assumption='*C.pi-_*NamedConstant-')
To pass multiple assumptions, pass multiple items
as params:
params = (
('assumption', '*C.pi-_*NamedConstant-'),
('assumption', 'DateOrder_**Day.Month.Year--'),
)
client.query(input='pi', params=params)
For more details on Assumptions, see
https://products.wolframalpha.com/api/documentation.html#6
"""
data = dict(input=input, appid=self.app_id,)
data = itertools.chain(params, data.items(), kwargs.items())
query = urllib.parse.urlencode(tuple(data))
url = 'https://api.wolframalpha.com/v2/query?' + query
resp = urllib.request.urlopen(url)
assert resp.headers.get_content_type() == 'text/xml'
assert resp.headers.get_param('charset') == 'utf-8'
return Result(resp)
class ErrorHandler(object):
def __init__(self, *args, **kwargs):
super(ErrorHandler, self).__init__(*args, **kwargs)
self._handle_error()
def _handle_error(self):
if 'error' not in self:
return
template = 'Error {error[code]}: {error[msg]}'
raise Exception(template.format(**self))
class Document(dict):
_attr_types = {}
"Override the types from the document"
@classmethod
def from_doc(cls, doc):
"""
Load instances from the xmltodict result. Always return
an iterable, even if the result is a singleton.
"""
return map(cls, always_iterable(doc))
def __getattr__(self, name):
type = self._attr_types.get(name, lambda x: x)
attr_name = '@' + name
try:
val = self[name] if name in self else self[attr_name]
except KeyError:
raise AttributeError(name)
return type(val)
class Assumption(Document):
@property
def text(self):
text = self.template.replace('${desc1}', self.description)
try:
text = text.replace('${word}', self.word)
except Exception:
pass
return text[: text.index('. ') + 1]
class Warning(Document):
pass
class Image(Document):
"""
Holds information about an image included with an answer.
"""
_attr_types = dict(height=int, width=int,)
class Subpod(Document):
"""
Holds a specific answer or additional information relevant to said answer.
"""
_attr_types = dict(img=Image.from_doc,)
def xml_bool(str_val):
"""
>>> xml_bool('true')
True
>>> xml_bool('false')
False
"""
return bool(json.loads(str_val))
class Pod(ErrorHandler, Document):
"""
Groups answers and information contextualizing those answers.
"""
_attr_types = dict(position=float, numsubpods=int, subpod=Subpod.from_doc,)
@property
def subpods(self):
return self.subpod
@property
def primary(self):
return '@primary' in self and xml_bool(self['@primary'])
@property
def texts(self):
"""
The text from each subpod in this pod as a list.
"""
return [subpod.plaintext for subpod in self.subpod]
@property
def text(self):
return next(iter(self.subpod)).plaintext
class Result(ErrorHandler, Document):
"""
Handles processing the response for the programmer.
"""
_attr_types = dict(pod=Pod.from_doc,)
def __init__(self, stream):
doc = xmltodict.parse(stream, dict_constructor=dict)['queryresult']
super(Result, self).__init__(doc)
@property
def info(self):
"""
The pods, assumptions, and warnings of this result.
"""
return itertools.chain(self.pods, self.assumptions, self.warnings)
@property
def pods(self):
return self.pod
@property
def assumptions(self):
return Assumption.from_doc(self.get('assumptions'))
@property
def warnings(self):
return Warning.from_doc(self.get('warnings'))
def __iter__(self):
return self.info
def __len__(self):
return sum(1 for _ in self.info)
@property
def results(self):
"""
The pods that hold the response to a simple, discrete query.
"""
return (pod for pod in self.pods if pod.primary or pod.title == 'Result')
@property
def details(self):
"""
A simplified set of answer text by title.
"""
return {pod.title: pod.text for pod in self.pods}
| [
"gaurav4.037@gmail.com"
] | gaurav4.037@gmail.com |
e7e3c115506553ab1cbc5ca31ff9c0144325dd24 | 16e266cf50a712ed29a4097e34504aac0281e6cb | /Functions/venv/lib/python3.6/site-packages/_TFL/_SDG/_C/Macro.py | 75f2950512e90bf9922859188d30c81a9164101c | [
"BSD-3-Clause"
] | permissive | felix-ogutu/PYTHON-PROJECTS | 9dd4fdcfff6957830587b64c5da3b5c3ade3a27e | 8c1297dbda495078509d06a46f47dc7ee60b6d4e | refs/heads/master | 2023-06-05T04:41:36.727376 | 2021-06-25T20:36:52 | 2021-06-25T20:36:52 | 380,348,911 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,540 | py | # -*- coding: utf-8 -*-
# Copyright (C) 2004-2007 TTTech Computertechnik AG. All rights reserved
# Schönbrunnerstraße 7, A--1040 Wien, Austria. office@tttech.com
# ****************************************************************************
#
# This module is licensed under the terms of the BSD 3-Clause License
# <http://www.c-tanzer.at/license/bsd_3c.html>.
# ****************************************************************************
#
#++
# Name
# TFL.SDG.C.Macro
#
# Purpose
# C-macro definitions
#
# Revision Dates
# 11-Aug-2004 (MG) Creation
# 12-Aug-2004 (MG) `Macro_Block.children_group_names` added
# 12-Aug-2004 (MG) Convert the `args` paremeter from `None` to `""` and
# from `""` to `None` for backward compatibility
# 12-Aug-2004 (MG) `description` added to formats
# 13-Aug-2004 (CT) `Macro.c_format` simplified
# (`%(name)s` instead of `%(::.name:)s`)
# 24-Aug-2004 (CT) Spurious space after macro name removed from `h_format`
# and `c_format`
# 24-Aug-2004 (MG) `Macro_Block.children_group_names` removed
# 7-Oct-2004 (CED) `Define_Constant` added
# 8-Feb-2005 (CED) `apidoc_tex_format` defined here and necessary changes
# made
# 9-Feb-2005 (MBM/CED) formal changes to `apidoc_tex_format`
# 22-Feb-2005 (MBM) Removed <> from index entry
# 24-Feb-2005 (MBM) Changed index entry structure
# 9-Aug-2005 (CT) Call to `tex_quoted` added
# 30-Oct-2006 (CED) `Preprocessor_Error` added
# 9-Mar-2007 (CED) Accepting integer as value of `Define_Constant`
# 17-Apr-2007 (CED) `Define_Constant` improved to print parantheses around
# `value`
# 23-Jul-2007 (CED) Activated absolute_import
# 06-Aug-2007 (CED) Future import removed again
# 26-Feb-2012 (MG) `__future__` imports added
# ««revision-date»»···
#--
from __future__ import absolute_import, division, print_function, unicode_literals
from _TFL import TFL
import _TFL._SDG._C.Node
import _TFL._SDG._C.Statement
import _TFL.tex_quoted
import textwrap
class _Macro_ (TFL.SDG.C.Node) :
"""Base class of all preprocessor commands (defines, if, ifdef, ...)"""
cgi = None
def _update_scope (self, scope) :
### why do we need this ???? MGL, 11-Aug-2004
self.scope = scope
for c in self.children :
c._update_scope (scope)
# end def _update_scope
# end class _Macro_
class Macro (_Macro_, TFL.SDG.Leaf) :
"""C-macro defintion"""
init_arg_defaults = dict \
( name_len = 0
, scope = TFL.SDG.C.C
, args = None
, lines = None
)
front_args = ("name", "args")
rest_args = "lines"
m_head = ""
h_format = c_format = """
#%(m_head)s%(name)s%(:head=(¡tail=):.args:)s %(:sep_eol= \\:.lines:)s
>%(::*description:)s
"""
def __init__ (self, * args, ** kw) :
self.__super.__init__ (* args, ** kw)
if self.args is None :
self.args = ""
elif self.args == "" :
self.args = None
# end def __init__
# end class Macro
class Define (Macro) :
"""A C-macro #define stament"""
m_head = "define "
init_arg_defaults = dict \
( def_file = "unknown"
, explanation = ""
)
_apidoc_head = \
"""%(::@_name_comment:)-{output_width - indent_anchor}s
\\hypertarget{%(name)s}{}
\\subsubsection{\\texttt{%(name)s}}
\\index{FT-COM API>\\texttt{%(name)s}}
\\ttindex{%(name)s}
\\begin{description}
>\\item %(::*description:)s \\\\
>\\item \\textbf{File:} \\\\ \\texttt{%(def_file)s} \\\\
"""
_apidoc_tail = \
""">%(::>@_explanation:)-{output_width - indent_anchor}s
\\end{description}
>
"""
_apidoc_middle = \
""">\\item \\textbf{Function declaration:} \\\\
>>\\texttt{%(name)s (%(args)s)} \\\\
"""
apidoc_tex_format = "".join \
( [ _apidoc_head
, _apidoc_middle
, _apidoc_tail
]
)
def _name_comment (self, ** kw) :
format_prec = int (kw ["format_prec"])
result = \
( "%% --- %s %s"
% ( self.name
, "-" * ( format_prec - len (self.name) - 7
)
)
)
return [result]
# end def _name_comment
def _explanation (self, ** kw) :
if not self.explanation :
yield ""
return
yield "\\item \\textbf{Description:} \\\\"
format_prec = max (int (kw ["format_prec"]), 4)
wrapper = textwrap.TextWrapper (width = format_prec)
for l in wrapper.wrap (TFL.tex_quoted (self.explanation)) :
yield l
# end def _explanation
# end class Define
class Define_Constant (Define) :
"""A C-macro #define stament, defining a constant value"""
init_arg_defaults = dict \
( name_len = 0
, scope = TFL.SDG.C.C
, name = None
, value = None
)
front_args = ("name", "value")
h_format = c_format = """
#%(m_head)s%(name)s %(:head=(¡tail=):.value:)s
>%(::*description:)s
"""
_apidoc_middle = \
""">\\item \\textbf{Value:} %(value)s
"""
apidoc_tex_format = "".join \
( [ Define._apidoc_head
, _apidoc_middle
, Define._apidoc_tail
]
)
_autoconvert = dict \
( value = lambda s, k, v : str (v)
)
# end class Define_Constant
class Macro_Block (_Macro_, TFL.SDG.C.Stmt_Group) :
"""Block of macro definitions"""
Ancestor = TFL.SDG.C.Stmt_Group
# end class Macro_Block
class Preprocessor_Error (_Macro_) :
"""A C preprocessor error statement"""
m_head = "error "
init_arg_defaults = dict \
( scope = TFL.SDG.C.HC
, error_msg = ""
)
front_args = ("error_msg", )
h_format = c_format = """
#%(m_head) s%(error_msg)s
"""
# end class Preprocessor_Error
if __name__ != "__main__" :
TFL.SDG.C._Export ("*", "_Macro_")
### __END__ TFL.SDG.C.Macro | [
"you@example.com"
] | you@example.com |
7775be82e021226fc10fea241d6eba2f833ed71c | 88cfb75d37784ac3e82eb4fa1aa5e59d98216318 | /unet/main.py | bac148462d0bd86e0e8b19a4a38fceffcf2e251b | [] | no_license | waveflow-team/MRI_RL | 9d71a708e4637cf0818d1e0266796b76bc5b9abd | 7c60a1caa025b13f1aa41e4c71e0006248ebe2e9 | refs/heads/master | 2023-04-30T22:19:37.039595 | 2021-05-16T15:05:08 | 2021-05-16T15:05:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,112 | py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import logging
import pathlib
import random
import shutil
import time
from collections import defaultdict
import numpy as np
import cv2
import torch
import torchvision
from tensorboardX import SummaryWriter
from torch.nn import functional as F
from torch.utils.data import DataLoader
from torch.autograd import Variable
import sys
sys.path.append('..')
from utils import PSNR, SSIM, NMSE, DC, computePSNR, computeSSIM, computeNMSE
from unet_model import UnetModel
from args import Args
sys.path.append('../fastMRI/')
from subsample import MaskFunc
from dataset import MRIDataset
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def create_datasets(args):
from config import config
train_data = MRIDataset(image_set='train', transform=False, config=config)
dev_data = MRIDataset(image_set='test', transform=False, config=config)
return dev_data, train_data
def create_data_loaders(args):
dev_data, train_data = create_datasets(args)
display_data = []#[dev_data[i] for i in range(0, len(dev_data), len(dev_data) // 16)]
train_loader = DataLoader(
dataset=train_data,
batch_size=args.batch_size,
shuffle=True,
num_workers=0,
pin_memory=True,
)
dev_loader = DataLoader(
dataset=dev_data,
#batch_size=args.batch_size,
batch_size=10,
num_workers=1,
pin_memory=False,
)
#display_loader = DataLoader(
# dataset=display_data,
# batch_size=16,
# num_workers=8,
# pin_memory=True,
#)
return train_loader, dev_loader, None#display_loader
def train_epoch(args, epoch, model, data_loader, optimizer, writer):
model.train()
avg_loss = 0.
start_epoch = start_iter = time.perf_counter()
global_step = epoch * len(data_loader)
iter = 0
while True:
for _, data in enumerate(data_loader):
iter += 1
target, input, _ = data
input = Variable(input).cuda()
target = Variable(target).cuda()
output = model(input)
loss = F.l1_loss(output, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
avg_loss = 0.99 * avg_loss + 0.01 * float(loss.data) if iter > 0 else loss.item()
writer.add_scalar('TrainLoss', float(loss.data), global_step + iter)
if iter % args.report_interval == 0:
logging.info(
f'Epoch = [{epoch:3d}/{args.num_epochs:3d}] '
f'Iter = [{iter:4d}/{len(data_loader):4d}] '
f'Loss = {float(loss.data):.4g} Avg Loss = {avg_loss:.4g} '
f'Time = {time.perf_counter() - start_iter:.4f}s',
)
start_iter = time.perf_counter()
if args.iters_per_epoch and iter == args.iters_per_epoch:
break
return avg_loss, time.perf_counter() - start_epoch
def evaluate(args, epoch, model, data_loader, writer):
model.eval()
losses = []
start = time.perf_counter()
early_break = True
PSNR_dict = defaultdict(list)
SSIM_dict = defaultdict(list)
NMSE_dict = defaultdict(list)
count = 0
for i, (target, input, mask) in enumerate(data_loader):
ori_image = target.numpy()
previous_image = input.numpy()
mask = mask.numpy()
count += 1
print(count)
if early_break and count == 101:
break
if count % 100 == 0:
print('tested: ', count)
input = Variable(input, volatile=True).cuda()
target = Variable(target).cuda()
output = model(input)#.squeeze(1)
loss = F.mse_loss(output, target, size_average=False)
losses.append(float(loss.data))
image = output.cpu().data.numpy()
for ii in range(image.shape[0]):
m = min(float(np.min(ori_image[ii, 0])), 0)
def rescale(x):
return (x - m) / (6 - m)
ori_image[ii, 0] = rescale(ori_image[ii, 0])
previous_image[ii, 0] = rescale(previous_image[ii, 0])
image[ii, 0] = rescale(image[ii, 0])
image_with_DC = DC(ori_image[ii, 0], image[ii, 0], mask[ii])
for k in range(2):
key = ['wo', 'DC'][k]
tmp_image = [image[ii, 0], image_with_DC][k]
PSNR_dict[key].append(computePSNR(ori_image[ii, 0], previous_image[ii, 0], tmp_image))
SSIM_dict[key].append(computeSSIM(ori_image[ii, 0], previous_image[ii, 0], tmp_image))
NMSE_dict[key].append(computeNMSE(ori_image[ii, 0], previous_image[ii, 0], tmp_image))
cv2.imwrite('unet_results/'+str(i)+'_'+str(ii)+'.bmp', np.concatenate((ori_image[ii, 0], previous_image[ii, 0], image[ii, 0], np.abs(ori_image[ii, 0] - image[ii, 0]) * 10), axis=1) * 255)
writer.add_scalar('Dev_Loss', np.mean(losses), epoch)
for key in PSNR_dict.keys():
PSNR_list, SSIM_list, NMSE_list = map(lambda x: x[key], [PSNR_dict, SSIM_dict, NMSE_dict])
print('number of test images: ', len(PSNR_list))
psnr_res = np.mean(np.array(PSNR_list), axis=0)
ssim_res = np.mean(np.array(SSIM_list), axis=0)
nmse_res = np.mean(np.array(NMSE_list), axis=0)
print('PSNR', psnr_res)
print('SSIM', ssim_res)
print('NMSE', nmse_res)
return np.mean(losses), time.perf_counter() - start
def save_model(args, exp_dir, epoch, model, optimizer, best_dev_loss, is_new_best):
torch.save(
{
'epoch': epoch,
'args': args,
'model': model.state_dict(),
'optimizer': optimizer.state_dict(),
'best_dev_loss': best_dev_loss,
'exp_dir': exp_dir
},
f=exp_dir / 'model.pt'
)
if is_new_best:
shutil.copyfile(exp_dir / 'model.pt', exp_dir / 'best_model.pt')
def build_model(args):
model = UnetModel(
in_chans=1,
out_chans=1,
chans=args.num_chans,
num_pool_layers=args.num_pools,
drop_prob=args.drop_prob
).cuda()#to(args.device)
return model
def load_model(checkpoint_file):
checkpoint = torch.load(checkpoint_file)
args = checkpoint['args']
model = build_model(args)
if args.data_parallel:
model = torch.nn.DataParallel(model)
model.load_state_dict(checkpoint['model'])
optimizer = build_optim(args, model.parameters())
optimizer.load_state_dict(checkpoint['optimizer'])
return checkpoint, model, optimizer
def build_optim(args, params):
optimizer = torch.optim.RMSprop(params, args.lr, weight_decay=args.weight_decay)
return optimizer
def main(args):
args.exp_dir.mkdir(parents=True, exist_ok=True)
writer = SummaryWriter(args.exp_dir / 'summary')
if args.test:
checkpoint, model, optimizer = load_model(args.checkpoint)
start_epoch = checkpoint['epoch']
del checkpoint
elif args.resume:
checkpoint, model, optimizer = load_model(args.checkpoint)
args = checkpoint['args']
best_dev_loss = checkpoint['best_dev_loss']
start_epoch = checkpoint['epoch']
del checkpoint
else:
model = build_model(args)
if args.data_parallel:
model = torch.nn.DataParallel(model)
optimizer = build_optim(args, model.parameters())
best_dev_loss = 1e9
start_epoch = 0
logging.info(args)
logging.info(model)
train_loader, dev_loader, display_loader = create_data_loaders(args)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, args.lr_step_size, args.lr_gamma)
for epoch in range(start_epoch, args.num_epochs):
if args.test:
print('evaluating')
dev_loss, dev_time = evaluate(args, epoch, model, dev_loader, writer)
exit()
scheduler.step(epoch)
train_loss, train_time = train_epoch(args, epoch, model, train_loader, optimizer, writer)
if (epoch + 1) % 5 == 0:
#dev_loss, dev_time = evaluate(args, epoch, model, dev_loader, writer)
is_new_best = True #dev_loss < best_dev_loss
best_dev_loss = 0 #min(best_dev_loss, dev_loss)
save_model(args, args.exp_dir, epoch, model, optimizer, best_dev_loss, is_new_best)
logging.info(
'saved',
#f'Epoch = [{epoch:4d}/{args.num_epochs:4d}] TrainLoss = {train_loss:.4g} '
#f'DevLoss = {dev_loss:.4g} TrainTime = {train_time:.4f}s DevTime = {dev_time:.4f}s',
)
writer.close()
def create_arg_parser():
parser = Args()
parser.add_argument('--num-pools', type=int, default=4, help='Number of U-Net pooling layers')
parser.add_argument('--drop-prob', type=float, default=0.0, help='Dropout probability')
parser.add_argument('--num-chans', type=int, default=32, help='Number of U-Net channels')
parser.add_argument('--batch-size', default=16, type=int, help='Mini batch size')
parser.add_argument('--num-epochs', type=int, default=50, help='Number of training epochs')
parser.add_argument('--iters-per-epoch', type=int, default=0, help='Number of iterations per epoch')
parser.add_argument('--lr', type=float, default=0.001, help='Learning rate')
parser.add_argument('--lr-step-size', type=int, default=40,
help='Period of learning rate decay')
parser.add_argument('--lr-gamma', type=float, default=0.1,
help='Multiplicative factor of learning rate decay')
parser.add_argument('--weight-decay', type=float, default=0.,
help='Strength of weight decay regularization')
parser.add_argument('--report-interval', type=int, default=40, help='Period of loss reporting')
parser.add_argument('--data-parallel', action='store_true',
help='If set, use multiple GPUs using data parallelism')
parser.add_argument('--device', type=str, default='cuda',
help='Which device to train on. Set to "cuda" to use the GPU')
parser.add_argument('--exp-dir', type=pathlib.Path, default='checkpoints',
help='Path where model and results should be saved')
parser.add_argument('--resume', action='store_true',
help='If set, resume the training from a previous model checkpoint. '
'"--checkpoint" should be set with this')
parser.add_argument('--checkpoint', type=str,
help='Path to an existing checkpoint. Used along with "--resume"')
parser.add_argument('--test', action='store_true', default=False)
return parser
if __name__ == '__main__':
args = create_arg_parser().parse_args()
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
main(args)
| [
"869228108@qq.com"
] | 869228108@qq.com |
f7aae61ca9fb68f5eef8a568456a9cbeba341313 | 2b42b40ae2e84b438146003bf231532973f1081d | /spec/mgm4461294.3.spec | e94005b8acdac761a61712abf74af485df3afd67 | [] | no_license | MG-RAST/mtf | 0ea0ebd0c0eb18ec6711e30de7cc336bdae7215a | e2ddb3b145068f22808ef43e2bbbbaeec7abccff | refs/heads/master | 2020-05-20T15:32:04.334532 | 2012-03-05T09:51:49 | 2012-03-05T09:51:49 | 3,625,755 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 21,999 | spec | {
"id": "mgm4461294.3",
"metadata": {
"mgm4461294.3.metadata.json": {
"format": "json",
"provider": "metagenomics.anl.gov"
}
},
"providers": {
"metagenomics.anl.gov": {
"files": {
"100.preprocess.info": {
"compression": null,
"description": null,
"size": 736,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/100.preprocess.info"
},
"100.preprocess.passed.fna.gz": {
"compression": "gzip",
"description": null,
"size": 3202869,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/100.preprocess.passed.fna.gz"
},
"100.preprocess.passed.fna.stats": {
"compression": null,
"description": null,
"size": 311,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/100.preprocess.passed.fna.stats"
},
"100.preprocess.removed.fna.gz": {
"compression": "gzip",
"description": null,
"size": 47,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/100.preprocess.removed.fna.gz"
},
"150.dereplication.info": {
"compression": null,
"description": null,
"size": 778,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/150.dereplication.info"
},
"150.dereplication.passed.fna.gz": {
"compression": "gzip",
"description": null,
"size": 3202872,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/150.dereplication.passed.fna.gz"
},
"150.dereplication.passed.fna.stats": {
"compression": null,
"description": null,
"size": 311,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/150.dereplication.passed.fna.stats"
},
"150.dereplication.removed.fna.gz": {
"compression": "gzip",
"description": null,
"size": 50,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/150.dereplication.removed.fna.gz"
},
"299.screen.info": {
"compression": null,
"description": null,
"size": 410,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/299.screen.info"
},
"299.screen.passed.fna.gcs": {
"compression": null,
"description": null,
"size": 4895,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/299.screen.passed.fna.gcs"
},
"299.screen.passed.fna.gz": {
"compression": "gzip",
"description": null,
"size": 3202865,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/299.screen.passed.fna.gz"
},
"299.screen.passed.fna.lens": {
"compression": null,
"description": null,
"size": 1830,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/299.screen.passed.fna.lens"
},
"299.screen.passed.fna.stats": {
"compression": null,
"description": null,
"size": 311,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/299.screen.passed.fna.stats"
},
"350.genecalling.coding.faa.gz": {
"compression": "gzip",
"description": null,
"size": 2108466,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/350.genecalling.coding.faa.gz"
},
"350.genecalling.coding.faa.stats": {
"compression": null,
"description": null,
"size": 122,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/350.genecalling.coding.faa.stats"
},
"350.genecalling.coding.fna.gz": {
"compression": "gzip",
"description": null,
"size": 2985893,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/350.genecalling.coding.fna.gz"
},
"350.genecalling.coding.fna.stats": {
"compression": null,
"description": null,
"size": 315,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/350.genecalling.coding.fna.stats"
},
"350.genecalling.info": {
"compression": null,
"description": null,
"size": 714,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/350.genecalling.info"
},
"425.usearch.rna.fna.gz": {
"compression": "gzip",
"description": null,
"size": 354214,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/425.usearch.rna.fna.gz"
},
"425.usearch.rna.fna.stats": {
"compression": null,
"description": null,
"size": 310,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/425.usearch.rna.fna.stats"
},
"440.cluster.rna97.fna.gz": {
"compression": "gzip",
"description": null,
"size": 354917,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/440.cluster.rna97.fna.gz"
},
"440.cluster.rna97.fna.stats": {
"compression": null,
"description": null,
"size": 310,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/440.cluster.rna97.fna.stats"
},
"440.cluster.rna97.info": {
"compression": null,
"description": null,
"size": 947,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/440.cluster.rna97.info"
},
"440.cluster.rna97.mapping": {
"compression": null,
"description": null,
"size": 2758,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/440.cluster.rna97.mapping"
},
"440.cluster.rna97.mapping.stats": {
"compression": null,
"description": null,
"size": 46,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/440.cluster.rna97.mapping.stats"
},
"450.rna.expand.lca.gz": {
"compression": "gzip",
"description": null,
"size": 49129,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/450.rna.expand.lca.gz"
},
"450.rna.expand.rna.gz": {
"compression": "gzip",
"description": null,
"size": 17935,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/450.rna.expand.rna.gz"
},
"450.rna.sims.filter.gz": {
"compression": "gzip",
"description": null,
"size": 14881,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/450.rna.sims.filter.gz"
},
"450.rna.sims.gz": {
"compression": "gzip",
"description": null,
"size": 135878,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/450.rna.sims.gz"
},
"450.rna.sims.info": {
"compression": null,
"description": null,
"size": 1376,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/450.rna.sims.info"
},
"550.cluster.aa90.faa.gz": {
"compression": "gzip",
"description": null,
"size": 2053494,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/550.cluster.aa90.faa.gz"
},
"550.cluster.aa90.faa.stats": {
"compression": null,
"description": null,
"size": 122,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/550.cluster.aa90.faa.stats"
},
"550.cluster.aa90.info": {
"compression": null,
"description": null,
"size": 1080,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/550.cluster.aa90.info"
},
"550.cluster.aa90.mapping": {
"compression": null,
"description": null,
"size": 39670,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/550.cluster.aa90.mapping"
},
"550.cluster.aa90.mapping.stats": {
"compression": null,
"description": null,
"size": 48,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/550.cluster.aa90.mapping.stats"
},
"640.loadAWE.info": {
"compression": null,
"description": null,
"size": 114,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/640.loadAWE.info"
},
"650.superblat.expand.lca.gz": {
"compression": "gzip",
"description": null,
"size": 5002524,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/650.superblat.expand.lca.gz"
},
"650.superblat.expand.ontology.gz": {
"compression": "gzip",
"description": null,
"size": 2572055,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/650.superblat.expand.ontology.gz"
},
"650.superblat.expand.protein.gz": {
"compression": "gzip",
"description": null,
"size": 5348131,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/650.superblat.expand.protein.gz"
},
"650.superblat.sims.filter.gz": {
"compression": "gzip",
"description": null,
"size": 2224457,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/650.superblat.sims.filter.gz"
},
"650.superblat.sims.gz": {
"compression": "gzip",
"description": null,
"size": 11207948,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/650.superblat.sims.gz"
},
"650.superblat.sims.info": {
"compression": null,
"description": null,
"size": 1343,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/650.superblat.sims.info"
},
"900.abundance.function.gz": {
"compression": "gzip",
"description": null,
"size": 3596622,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/900.abundance.function.gz"
},
"900.abundance.lca.gz": {
"compression": "gzip",
"description": null,
"size": 43897,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/900.abundance.lca.gz"
},
"900.abundance.md5.gz": {
"compression": "gzip",
"description": null,
"size": 1592364,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/900.abundance.md5.gz"
},
"900.abundance.ontology.gz": {
"compression": "gzip",
"description": null,
"size": 2357826,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/900.abundance.ontology.gz"
},
"900.abundance.organism.gz": {
"compression": "gzip",
"description": null,
"size": 1763366,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/900.abundance.organism.gz"
},
"900.loadDB.sims.filter.seq": {
"compression": null,
"description": null,
"size": 18935197,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/900.loadDB.sims.filter.seq"
},
"900.loadDB.source.stats": {
"compression": null,
"description": null,
"size": 894,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/900.loadDB.source.stats"
},
"999.done.COG.stats": {
"compression": null,
"description": null,
"size": 120,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/999.done.COG.stats"
},
"999.done.KO.stats": {
"compression": null,
"description": null,
"size": 157,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/999.done.KO.stats"
},
"999.done.NOG.stats": {
"compression": null,
"description": null,
"size": 113,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/999.done.NOG.stats"
},
"999.done.Subsystems.stats": {
"compression": null,
"description": null,
"size": 800,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/999.done.Subsystems.stats"
},
"999.done.class.stats": {
"compression": null,
"description": null,
"size": 2749,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/999.done.class.stats"
},
"999.done.domain.stats": {
"compression": null,
"description": null,
"size": 61,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/999.done.domain.stats"
},
"999.done.family.stats": {
"compression": null,
"description": null,
"size": 8883,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/999.done.family.stats"
},
"999.done.genus.stats": {
"compression": null,
"description": null,
"size": 13688,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/999.done.genus.stats"
},
"999.done.order.stats": {
"compression": null,
"description": null,
"size": 4837,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/999.done.order.stats"
},
"999.done.phylum.stats": {
"compression": null,
"description": null,
"size": 1033,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/999.done.phylum.stats"
},
"999.done.rarefaction.stats": {
"compression": null,
"description": null,
"size": 23134,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/999.done.rarefaction.stats"
},
"999.done.sims.stats": {
"compression": null,
"description": null,
"size": 87,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/999.done.sims.stats"
},
"999.done.species.stats": {
"compression": null,
"description": null,
"size": 48044,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/999.done.species.stats"
}
},
"id": "mgm4461294.3",
"provider": "metagenomics.anl.gov",
"providerId": "mgm4461294.3"
}
},
"raw": {
"mgm4461294.3.fna.gz": {
"compression": "gzip",
"format": "fasta",
"provider": "metagenomics.anl.gov",
"url": "http://api.metagenomics.anl.gov/reads/mgm4461294.3"
}
}
} | [
"jared.wilkening@gmail.com"
] | jared.wilkening@gmail.com |
92351aa4578e54f2dbb89bf7349b16cd1eb86635 | c77517d7737ca0c478df697a81b719afe4670730 | /src/extractor/v2ph_downloader.py | 3065776beda545566d8b05e5ec7dfefdb0932314 | [] | no_license | fakegit/Hitomi-Downloader-issues | e7e30aa33b7367c1195d6678f4685fb779c373e1 | 87239ca7d2fdb6c75260256255284bb2ec4ac172 | refs/heads/master | 2023-08-31T05:37:45.708795 | 2023-08-18T04:52:05 | 2023-08-18T04:52:05 | 216,877,228 | 0 | 0 | null | 2023-08-18T07:56:37 | 2019-10-22T17:57:31 | Python | UTF-8 | Python | false | false | 2,745 | py | #coding:utf8
import downloader
from utils import get_ext, LazyUrl, Downloader, try_n, clean_title, get_print, print_error
import ree as re
from translator import tr_
from timee import sleep
import errors
from ratelimit import limits, sleep_and_retry
import clf2
def setPage(url, p):
url = url.split('?')[0]
if p > 1:
url += '?page={}'.format(p)
return url
def getPage(url):
p = re.find('page=([0-9]+)', url)
return int(p or 1)
class Image:
def __init__(self, url, referer, p):
self._url = url
self.url = LazyUrl(referer, self.get, self)
ext = get_ext(url)
self.filename = '{:04}{}'.format(p, ext)
@sleep_and_retry
@limits(4, 1)
def get(self, _):
return self._url
class Downloader_v2ph(Downloader):
type = 'v2ph'
URLS = ['v2ph.com/album/']
MAX_CORE = 4
MAX_PARALLEL = 1
display_name = 'V2PH'
ACCEPT_COOKIES = [r'(.*\.)?v2ph\.com']
def init(self):
self.session = clf2.solve(self.url)['session']
@classmethod
def fix_url(cls, url):
return url.split('?')[0]
def read(self):
info = get_info(self.url, self.session)
for img in get_imgs(self.url, self.session, info['title'], self.cw):
self.urls.append(img.url)
self.title = clean_title(info['title'])
@try_n(2)
def get_info(url, session):
soup = read_soup(url, session)
info = {}
info['title'] = soup.find('h1').text.strip()
return info
@try_n(4)
@sleep_and_retry
@limits(1, 5)
def read_soup(url, session):
return downloader.read_soup(url, session=session)
def get_imgs(url, session, title, cw=None):
print_ = get_print(cw)
imgs = []
for p in range(1, 1001):
url = setPage(url, p)
print_(url)
try:
soup = read_soup(url, session)
except Exception as e:
if p > 1:
print_(print_error(e))
break
else:
raise e
view = soup.find('div', class_='photos-list')
if view is None:
if p == 1:
raise errors.LoginRequired()
else:
break # Guest user
for img in view.findAll('img'):
img = img.attrs['data-src']
img = Image(img, url, len(imgs))
imgs.append(img)
pgn = soup.find('ul', class_='pagination')
ps = [getPage(a.attrs['href']) for a in pgn.findAll('a')] if pgn else []
if not ps or p >= max(ps):
print('max p')
break
msg = '{} {} ({} / {})'.format(tr_('읽는 중...'), title, p, max(ps))
if cw:
cw.setTitle(msg)
else:
print(msg)
return imgs
| [
"mangrovn@gmail.com"
] | mangrovn@gmail.com |
7063c350be3106e041dfdd46c8437d1c9a94ed5b | 478085586c1024fa9e5f29adfdfa36d2e81ab4a9 | /DBExternal/sheets/admin.py | 6aa187642ddc97e9782a736782afd078a7e53f05 | [] | no_license | lokitparas/dbext | 7936c0818cb3b0b7f58d6bf7d1e0e7349aa923e8 | 960e4858802359431149661f49e32f26bfc40fba | refs/heads/master | 2021-01-10T07:35:35.287299 | 2015-10-25T08:14:07 | 2015-10-25T08:14:07 | 44,876,565 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 283 | py | from django.contrib import admin
from sheets.models import User
class UserAdmin(admin.ModelAdmin):
list_display = ('user_name', 'full_name', 'email_id')
search_fields = ('user_name', 'full_name','email_id')
admin.site.register(User,UserAdmin)
# Register your models here.
| [
"lokit95@gmail.com"
] | lokit95@gmail.com |
52045a258db9a2bcca56b5f377b9c294935e8f3f | 7b7059a0c2933a4a2d93eef21753efa3cb881ea3 | /pythonclub/club/tests.py | 9be61160b40e8469bea19c1ae10bfba4bcf141b6 | [] | no_license | zacharyforreal/ITC172W19 | 09371efb799513c427e513a57085c8f6f5835ac8 | 78c4c62dfe1e5dc307e45f90d9cc6bfea2994780 | refs/heads/master | 2020-04-15T18:58:28.598149 | 2019-03-04T20:23:56 | 2019-03-04T20:23:56 | 164,932,620 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,684 | py | from django.test import TestCase
from .models import Meeting, MeetingMinutes
from .forms import MeetingForm
from datetime import datetime
from django.urls import reverse
# Create your tests here.
class MeetingTest(TestCase):
def test_stringOutput(self):
meeting=Meeting(meetingtitle='March Meeting')
self.assertEqual(str(meeting), meeting.meetingtitle)
def test_tablename(self):
self.assertEqual(str(Meeting._meta.db_table), 'meeting')
class MeetingDateTest(TestCase):
def test_stringOutput(self):
meetingday=MeetingDate(meetingtime='Meeting')
self.assertEqual(str(meetingday), meetingday.meetingtime)
def test_tablename(self):
self.assertEqual(str(TechType._meta.db_table), 'meetingdate')
class ResourceTest(TestCase):
def test_stringOutput(self):
resource=Resource(reviewtitle='March Meeting')
self.assertEqual(str(resource), resource.reviewtitle)
def test_tablename(self):
self.assertEqual(str(Resource._meta.db_table), 'resource')
#testing a view
class TestIndex(TestCase):
def test_view_url_accessible_by_name(self):
response=self.client.get(reverse('index'))
self.assertEqual(response.status_code, 200)
def test_view_uses_correct_template(self):
response=self.client.get(reverse('index'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'club/index.html')
class TestGetMeeting(TestCase):
def test_view_url_exists_at_desired_location(self):
response = self.client.get('/club/meetings')
self.assertEqual(response.status_code, 200)
def test_view_url_accessible_by_name(self):
response = self.client.get(reverse('getmeetings'))
self.assertEqual(response.status_code, 200)
def test_view_uses_correct_template(self):
response = self.client.get(reverse('getmeetings'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'reviews/meetings.html')
class New_Product_Form_Test(TestCase):
# Valid Form Data
def test_meetingForm_is_valid(self):
form = MeetingForm(data={'meetingtitle': "Music study", 'meetingday': "15th", 'user': "zack", 'entrydate': "2018-12-17", 'meetingURL':"http:microsoft.com", 'meetingdescription':"lightweight laptop" })
self.assertTrue(form.is_valid())
# Invalid Form Data
def test_UserForm_invalid(self):
form = ProductForm(data={'meetingtitle': "Music study", 'meetingday': "15th", 'user': "zack", 'entrydate': "2018-12-17", 'meetingURL':"http:microsoft.com", 'meetingdescription':"lightweight laptop" })
self.assertFalse(form.is_valid()) | [
"noreply@github.com"
] | noreply@github.com |
67fe0dfcb5c153b4c32facb723119ab674916e38 | 0d24433894b0b2955a351fdf63a10173b948b3fc | /teafacto/models/kb/kmm.py | 7ca1f4c4f4c097e27c9903aa3c6c9e5f35652957 | [] | no_license | linxiexiong/teafacto | 9209bea80bd76d84c18b7f8afb353b61f0fba8b2 | 1c749ee66dc21c2efe6b4d105f227c35ae969815 | refs/heads/master | 2021-06-16T15:16:40.064465 | 2017-05-05T18:25:42 | 2017-05-05T18:25:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,256 | py | from math import ceil
import numpy as np
import theano
from teafacto.core.trainutil import SGDBase, Saveable, Profileable, Normalizable, Predictor, uniform
from theano import tensor as T
from teafacto.blocks.seq.rnn import RNUBase
__author__ = 'denis'
# Knowledge Model - Margin objective
# all models here assume single index space
class KMM(SGDBase, Predictor, Profileable, Saveable):
def __init__(self, vocabsize=10, numrels=0, negrate=1, margin=1.0, **kw):
super(KMM, self).__init__(**kw)
self.vocabsize = vocabsize
self.negrate = negrate
self.margin = margin
self.numrels = numrels
@property
def printname(self):
return super(KMM, self).printname + "+n"+str(self.negrate)
def defproblem(self):
pdot, ndot, inps = self.defmodel()
tErr = self.geterr(pdot, ndot)
tReg = self.getreg()
tCost = tErr + tReg
return inps, tErr, tCost
def ___train(self, trainX, labels, evalinter=10): # X: z, x, y, v OR r, s, o, v
self.batsize = int(ceil(trainX.shape[0]*1./self.numbats))
self.tbatsize = theano.shared(np.int32(self.batsize))
pdot, ndot, inps = self.defmodel()
tErr = self.geterr(pdot, ndot)
tReg = self.getreg()
#embed()
tCost = tErr + tReg
#showgraph(tCost)
#embed() # tErr.eval({inps[0]: [0], inps[1]:[10], gold: [1]})
trainf = self.gettrainf(inps, [tErr, tCost], tCost)
validf = self.getvalidf(inps, [tErr])
err = self.trainloop(trainf=self.getbatchloop(trainf, self.getsamplegen(trainX, labels)),
evalinter=evalinter,
normf=self.getnormf(),
validf=validf)
return err
def defmodel(self):
sidx = T.ivector("sidx")
pathidxs = T.imatrix("pathidxs")
zidx, nzidx = T.ivectors("zidx", "nzidx") # rhs corruption only
dotp, ndotp = self.definnermodel(sidx, pathidxs, zidx, nzidx)
return dotp, ndotp, [sidx, pathidxs, zidx, nzidx]
def definnermodel(self, sidx, pathidxs, zidx, nzidx):
raise NotImplementedError("use subclass")
def getreg(self, regf=lambda x: T.sum(x**2), factor=1./2):
return factor * reduce(lambda x, y: x + y,
map(lambda x: regf(x) * self.twreg,
self.parameters))
def geterr(self, pdot, ndot): # max margin
comp = T.clip(self.margin - pdot + ndot, 0, np.infty)
return T.sum(comp)
@property
def ownparameters(self):
return []
@property
def depparameters(self):
return []
def getnormf(self):
return None
def getsamplegen(self, data, labels, onebatch=False):
batsize = self.batsize if not onebatch else data.shape[0]
negrate = self.negrate
def samplegen():
nonzeroidx = sorted(np.random.randint(0, data.shape[0], size=(batsize,)).astype("int32"))
trainXsample = data[nonzeroidx, :].astype("int32")
trainXsample = np.repeat(trainXsample, negrate, axis=0)
labelsample = labels[nonzeroidx].astype("int32")
labelsample = np.repeat(labelsample, negrate, axis=0)
corruptedlabels = np.random.randint(0, self.vocabsize, size=(batsize,)).astype("int32")
for i in range(negrate-1):
corruptedlabels = np.append(corruptedlabels, np.random.randint(0, self.vocabsize, size=(batsize,)).astype("int32"), axis=0)
return [trainXsample[:, 0], trainXsample[:, 1:], labelsample, corruptedlabels] # start, path, target, bad_target
return samplegen
def getpredictfunction(self):
pdot, _, inps = self.defmodel()
scoref = theano.function(inputs=[inps[0], inps[1], inps[2]], outputs=pdot)
def pref(s, path, o):
args = [np.asarray(i).astype("int32") for i in [s, path, o]]
return scoref(*args)
return pref
class EKMM(KMM, Normalizable):
def __init__(self, dim=10, **kw):
super(EKMM, self).__init__(**kw)
self.dim = dim
self.initvars()
def initvars(self):
self.W = theano.shared(uniform((self.vocabsize, self.dim)), name="W")
return [self.W]
def getnormf(self):
if self._normalize is True:
upds = []
for normparam in self.getnormparams():
norms = normparam.norm(2, axis=1).reshape((normparam.shape[0], 1))
upds.append((normparam, normparam/norms))
return theano.function(inputs=[], outputs=[], updates=upds)
else:
return None
def getnormparams(self):
return [self.W]
@property
def printname(self):
return super(EKMM, self).printname + "+E" + str(self.dim)+"D"
@property
def ownparameters(self):
return [self.W]
def embed(self, *idxs):
if len(idxs) == 1:
return self.W[idxs[0], :]
else:
return tuple(map(lambda x: self.W[x, :], idxs))
def definnermodel(self, sidx, pathidxs, zidx, nzidx):#pathemb: (batsize, seqlen), *emb: (batsize)
om, _ = theano.scan(fn=self.traverse,
sequences=pathidxs.T, # --> (seqlen, batsize)
outputs_info=[None] + self.start(sidx)
)
om = om[0] # --> (seqlen, batsize, dim)
om = om[-1, :, :] # --> (batsize, dim)
dotp = self.membership(zidx, om, pathidxs)
ndotp = self.membership(nzidx, om, pathidxs)
return dotp, ndotp
def start(self, sidx):
return [self.embed(sidx)]
def traverse(self, x_t, *states):
raise NotImplementedError("use subclass")
def membership(self, t, h_tm1, rels):
raise NotImplementedError("use subclass")
class DistMemberEKMM(EKMM):
def membership(self, t, h, rels):
o = self.embed(t)
return -T.sum(T.sqr(h - o), axis=1)
class DotMemberEKMM(EKMM):
def membership(self, t, h, rels):
o = self.embed(t)
return T.batched_dot(o, h)
class CosMemberEKMM(EKMM):
def membership(self, t, h, rels):
o = self.embed(t)
return T.batched_dot(o, h) / (o.norm(2, axis=1) * h.norm(2, axis=1))
class AddEKMM(DistMemberEKMM): # TransE
def traverse(self, x_t, *h_tm1): # x_t: (batsize, dim), h_tm1: (batsize, dim)
h = h_tm1[0] + self.embed(x_t)
return [h, h]
class AddAddEKMM(DistMemberEKMM): # TransD with m=n
def __init__(self, innerdim=None, **kw):
super(AddAddEKMM, self).__init__(**kw)
self.innerdim = self.dim if innerdim is None else innerdim
def initvars(self):
super(AddAddEKMM, self).initvars()
self.X = theano.shared(uniform((self.vocabsize, self.dim)), name="X")
self.Wr = theano.shared(uniform((self.numrels, self.dim)), name="Wr")
self.Xr = theano.shared(uniform((self.numrels, self.dim)), name="X")
@property
def ownparameters(self):
return super(AddAddEKMM, self).ownparameters + [self.X, self.Wr, self.Xr]
def getnormparams(self):
return super(AddAddEKMM, self).getnormparams() + [self.X, self.Wr, self.Xr]
def start(self, sidx):
return [self.W[sidx, :], self.X[sidx, :]]
def traverse(self, x_t, *states):
x_t = x_t - self.vocabsize + self.numrels
h_tm1, h_ptm1 = states
r = self.Wr[x_t, :]
rp = self.Xr[x_t, :]
emod = T.batched_dot(h_tm1, h_ptm1)
h_tm1 = T.concatenate([h_tm1, T.zeros((h_tm1.shape[0], r.shape[1] - h_tm1.shape[1]), dtype="float32")], axis=1) # pad with zeros
hproj = (h_tm1.T + rp.T * emod).T
h = hproj + r
return [h, h, h]
def membership(self, t, h, rels):
temb = self.embed(t)
lastrelidx = rels[:, -1] - self.vocabsize + self.numrels
rp = self.Xr[lastrelidx, :]
emod = T.batched_dot(temb, self.X[t, :])
temb = T.concatenate([temb, T.zeros((temb.shape[0], rp.shape[1] - temb.shape[1]), dtype="float32")], axis=1) # pad with zeros
tproj = (temb.T + rp.T * emod).T
return -T.sum(T.sqr(h - tproj), axis=1)
class GateAddEKMM(DistMemberEKMM):
def __init__(self, **kw):
super(GateAddEKMM, self).__init__(**kw)
self.R = theano.shared(uniform((self.numrels, self.dim)), name="R")
def traverse(self, x_t, *h_tm1):
h_tm1 = h_tm1[0]
gate = self.R[x_t - self.vocabsize + self.numrels, :]
gate = T.nnet.sigmoid(gate)
h = h_tm1 * gate + self.embed(x_t)
return [h, h]
@property
def ownparameters(self):
return super(GateAddEKMM, self).ownparameters + [self.R]
class FracAddEKMM(GateAddEKMM):
def traverse(self, x_t, *h_tm1):
h_tm1 = h_tm1[0]
gate = self.R[x_t - self.vocabsize + self.numrels, :]
gate = T.nnet.sigmoid(gate)
h = h_tm1 * gate + self.embed(x_t) * (1-gate)
return [h, h]
class EModAddEKMM(DistMemberEKMM): # better than TransE
def __init__(self, **kw):
super(EModAddEKMM, self).__init__(**kw)
self.T = theano.shared(uniform((self.dim, self.dim)), name="T")
@property
def ownparameters(self):
return super(EModAddEKMM, self).ownparameters + [self.T]
def traverse(self, x_t, *h_tm1):
h_tm1 = h_tm1[0]
gate = T.dot(h_tm1, self.T)
gate = T.nnet.sigmoid(gate)
add = self.embed(x_t) * gate
h = h_tm1 + add
return [h, h]
class EModRFracAddEKMM(DistMemberEKMM):
def initvars(self):
super(EModRFracAddEKMM, self).initvars()
self.X = theano.shared(uniform((self.numrels, self.dim)), name="X")
self.F = theano.shared(uniform((self.dim, self.dim)), name="F")
@property
def ownparameters(self):
return super(EModRFracAddEKMM, self).ownparameters + [self.X, self.F]
def getnormparams(self):
return super(EModRFracAddEKMM, self).getnormparams() + [self.X]
def traverse(self, x_t, *h_tm1):
h_tm1 = h_tm1[0]
xemb1 = self.W[x_t, :]
xemb2 = self.X[x_t - self.vocabsize + self.numrels, :]
gate = T.dot(h_tm1, self.F)
gate = T.nnet.sigmoid(gate)
add = xemb2 * (1-gate) + xemb1 * gate
h = h_tm1 + add
return [h, h]
class EModRFrac3AddEKMM(DistMemberEKMM):
def initvars(self):
super(EModRFrac3AddEKMM, self).initvars()
self.X = theano.shared(uniform((self.numrels, self.dim)), name="X")
self.Y = theano.shared(uniform((self.numrels, self.dim)), name="Y")
self.F = theano.shared(uniform((self.dim, self.dim)), name="F")
self.G = theano.shared(uniform((self.dim, self.dim)), name="G")
@property
def ownparameters(self):
return super(EModRFrac3AddEKMM, self).ownparameters + [self.X, self.Y, self.F, self.G]
def getnormparams(self):
return super(EModRFrac3AddEKMM, self).getnormparams() + [self.X, self.Y]
def traverse(self, x_t, *h_tm1):
h_tm1 = h_tm1[0]
xemb1 = self.W[x_t, :]
xemb2 = self.X[x_t - self.vocabsize + self.numrels, :]
xemb3 = self.Y[x_t - self.vocabsize + self.numrels, :]
gate1 = T.dot(h_tm1, self.F)
gate1 = T.nnet.sigmoid(gate1)
gate2 = T.dot(h_tm1, self.G)
gate2 = T.nnet.sigmoid(gate2)
add = (xemb2 * (1-gate1) + xemb1 * gate1) * gate2 + (1-gate2) * xemb3
h = h_tm1 + add
return [h, h]
class ERModAddEKMM(DistMemberEKMM):
def __init__(self, **kw):
super(ERModAddEKMM, self).__init__(**kw)
self.R = theano.shared(uniform((self.dim, self.dim)), name="R")
self.T = theano.shared(uniform((self.dim, self.dim)), name="T")
@property
def ownparameters(self):
return super(ERModAddEKMM, self).ownparameters + [self.R, self.T]
def traverse(self, x_t, *h_tm1):
h_tm1 = h_tm1[0]
xembs = self.embed(x_t)
gate = T.dot(xembs, self.R)
gate += T.dot(h_tm1, self.T)
gate = T.nnet.sigmoid(gate)
add = xembs * gate
h = h_tm1 + add
return [h, h]
class RModEModAddEKMM(DistMemberEKMM): ############## -- ***** --
def initvars(self):
super(RModEModAddEKMM, self).initvars()
self.R = theano.shared(uniform((self.numrels, self.dim, self.dim)), name="R")
@property
def ownparameters(self):
return super(RModEModAddEKMM, self).ownparameters + [self.R]
def traverse(self, x_t, *h_tm1):
h_tm1 = h_tm1[0]
xembs = self.embed(x_t)
rmod = self.R[x_t-self.vocabsize+self.numrels, :, :]
emod = T.batched_dot(h_tm1, rmod)
gate = T.nnet.sigmoid(emod)
ad = xembs * gate
h = h_tm1 + ad
return [h, h]
class ERModRFracAddEKMM(DistMemberEKMM):
def initvars(self):
super(ERModRFracAddEKMM, self).initvars()
self.R = theano.shared(uniform((self.numrels, self.dim)))
self.X = theano.shared(uniform((self.numrels, self.dim)))
@property
def ownparameters(self):
return super(ERModRFracAddEKMM, self).ownparameters + [self.X, self.R]
def getnormparams(self):
return super(ERModRFracAddEKMM, self).getnormparams() + [self.X]
def traverse(self, x_t, *h_tm1):
h_tm1 = h_tm1[0]
xembs1 = self.W[x_t, :]
xembs2 = self.X[x_t-self.vocabsize+self.numrels, :]
rmod = self.R[x_t-self.vocabsize+self.numrels, :]
emod = h_tm1 + rmod
gate = T.nnet.sigmoid(emod)
add = xembs2 * (1-gate) + xembs1 * gate
h = h_tm1 + add
return [h, h]
class RModEModRFracAddEKMM(DistMemberEKMM):
def initvars(self):
super(RModEModRFracAddEKMM, self).initvars()
self.R = theano.shared(uniform((self.numrels, self.dim, self.dim)))
self.X = theano.shared(uniform((self.numrels, self.dim)))
@property
def ownparameters(self):
return super(RModEModRFracAddEKMM, self).ownparameters + [self.X, self.R]
def getnormparams(self):
return super(RModEModRFracAddEKMM, self).getnormparams() + [self.X]
def traverse(self, x_t, *h_tm1):
h_tm1 = h_tm1[0]
xembs1 = self.W[x_t, :]
xembs2 = self.X[x_t-self.vocabsize+self.numrels, :]
rmod = self.R[x_t-self.vocabsize+self.numrels, :, :]
emod = T.batched_dot(h_tm1, rmod)
gate = T.nnet.sigmoid(emod)
add = xembs2 * (1-gate) + xembs1 * gate
h = h_tm1 + add
return [h, h]
class VecMulEKMM(DotMemberEKMM): # Bilinear Diag
def traverse(self, x_t, *h_tm1): # x_t: (batsize, dim), h_tm1: (batsize, dim)
h_tm1 = h_tm1[0]
h = self.embed(x_t) * h_tm1
return [h, h]
class VecMulEKMMDist(DistMemberEKMM, VecMulEKMM):
pass
class MatMulEKMM(DotMemberEKMM): # RESCAL
def __init__(self, **kw):
super(MatMulEKMM, self).__init__(**kw)
offset = 0.5
scale = 1.
self.R = theano.shared((np.random.random((self.numrels, self.dim, self.dim)).astype("float32")-offset)*scale, name="R")
@property
def ownparameters(self):
return super(MatMulEKMM, self).ownparameters + [self.R]
def traverse(self, x_t, *h_tm1): # x_t : (batsize, dim, dim), h_tm1 : (batsize, dim)
h_tm1 = h_tm1[0]
h = T.batched_dot(self.embedR(x_t-self.vocabsize+self.numrels), h_tm1)
return [h, h]
def embedR(self, idxs): # pathidxs: (batsize)
return self.R[idxs, :] # return: (batsize, dim, dim)
class MatMulEKMMCos(CosMemberEKMM, MatMulEKMM):
pass
class TransAddEKMM(DotMemberEKMM):
def __init__(self, innerdim=10, **kw):
super(TransAddEKMM, self).__init__(**kw)
offset = 0.5
scale = 1.
self.innerdim = innerdim
self.Rtrans = theano.shared((np.random.random((self.numrels, self.dim, self.innerdim)).astype("float32")-offset)*scale, name="Rtrans")
self.Radd = theano.shared((np.random.random((self.numrels, self.innerdim)).astype("float32")-offset)*scale, name="Radd")
self.Rtransinv = theano.shared((np.random.random((self.numrels, self.innerdim, self.dim)).astype("float32")-offset)*scale, name="Rtransinv")
@property
def ownparameters(self):
return super(TransAddEKMM, self).ownparameters + [self.Rtrans, self.Radd, self.Rtransinv]
def traverse(self, x_t, *h_tm1):
h_tm1 = h_tm1[0]
x_t = x_t - self.vocabsize + self.numrels
h = T.batched_dot(T.batched_dot(h_tm1, self.Rtrans[x_t, :]) + self.Radd[x_t, :], self.Rtransinv[x_t, :])
return [h, h]
class RNNEKMM(DotMemberEKMM):
def traverse(self, x_t, *h_tm1): # x_t: (batsize, dim), h_tm1: (batsize, dim)
h_tm1 = h_tm1[0]
return self.rnnu.rec(self.embed(x_t), h_tm1)
@property
def printname(self):
return super(RNNEKMM, self).printname + "+" + self.rnnu.__class__.__name__
@property
def depparameters(self):
return self.rnnu.parameters
def __add__(self, other):
if isinstance(other, RNUBase):
self.rnnu = other
self.onrnnudefined()
return self
else:
return super(EKMM, self).__add__(other)
def onrnnudefined(self):
pass
class ERNNEKMM(RNNEKMM):
def traverse(self, x_t, *h_tm1):
h_tm1 = h_tm1[0]
return self.rnnu.rec(x_t - self.vocabsize + self.numrels, h_tm1)
class RNNEOKMM(RNNEKMM): # is this still useful? TODO
def onrnnudefined(self):
self.initwout()
def initwout(self):
offset = 0.5
scale = 0.1
self.Wout = theano.shared((np.random.random((self.rnnu.innerdim, self.dim)).astype("float32")-offset)*scale, name="Wout")
def membership(self, o, t):
om = T.dot(o, self.Wout)
return T.batched_dot(om, t)
def membership_add(self, o, t):
om = T.dot(o, self.Wout)
return -T.sum(T.sqr(om - t), axis=1)
@property
def ownparameters(self):
return super(RNNEOKMM, self).ownparameters + [self.Wout]
@property
def printname(self):
return super(RNNEKMM, self).printname + "+" + self.rnnu.__class__.__name__ + ":" + str(self.rnnu.innerdim) + "D" | [
"lukovnik@drogon.iai.uni-bonn.de"
] | lukovnik@drogon.iai.uni-bonn.de |
92bba575db131ecb81e258e799c8fe97176c7229 | 790000422363c1f25856a893ee18c057c93b97c5 | /venv/Scripts/django-admin.py | 191f3a9c0b2534ec50f17d728bc2ba2fb5f3ba65 | [] | no_license | TaurusTTT/pythonProject | 677f8806a2c66d0e1c36d01496ba7ff539e0bfc1 | 161eca3d378cd09cd4c197a8e645a3b67ba9cd9c | refs/heads/main | 2023-05-01T04:53:30.727376 | 2021-05-21T06:33:33 | 2021-05-21T06:33:33 | 369,411,425 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 677 | py | #!e:\pythonproject\venv\scripts\python.exe
# When the django-admin.py deprecation ends, remove this script.
import warnings
from django.core import management
try:
from django.utils.deprecation import RemovedInDjango40Warning
except ImportError:
raise ImportError(
'django-admin.py was deprecated in Django 3.1 and removed in Django '
'4.0. Please manually remove this script from your virtual environment '
'and use django-admin instead.'
)
if __name__ == "__main__":
warnings.warn(
'django-admin.py is deprecated in favor of django-admin.',
RemovedInDjango40Warning,
)
management.execute_from_command_line()
| [
"2259167291@qq.com"
] | 2259167291@qq.com |
902f951e4f1ddeb0e5cb517b5c5edece567b9a8a | ee749b299eea02f1b75c7eb4f88634337eda61f3 | /canvas.py | 44ec4c501076a23d2db2c7ae87e06078bc2632aa | [] | no_license | CommanderAsdasd/coregl-python | 807bcd4c27916850cd716a57a34b982d082f5b4a | 1b7bdf1e57c5cc5bf25448bac3ab02975988c068 | refs/heads/master | 2021-05-26T21:03:42.501566 | 2012-11-30T18:17:33 | 2012-11-30T18:17:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 868 | py | from PyQt4 import QtCore
from PyQt4.QtOpenGL import *
from OpenGL.GL import *
from time import time
# Set this 'None' to refresh as rapidly as possible
ThrottleFps = 60
class Canvas(QGLWidget):
def __init__(self, parent):
if hasattr(QGLFormat, 'setVersion'):
f = QGLFormat(); f.setVersion(3, 2)
f.setProfile(QGLFormat.CoreProfile)
c = QGLContext(f, None)
QGLWidget.__init__(self, c, parent)
else:
QGLWidget.__init__(self, parent)
self.timer = QtCore.QTimer()
self.timer.timeout.connect(self.updateGL)
interval = 1000.0 / ThrottleFps if ThrottleFps else 0
self.timer.start( interval )
def paintGL(self):
self.draw()
def updateGL(self):
self.draw()
self.update()
def draw(self):
pass
| [
"philiprideout@gmail.com"
] | philiprideout@gmail.com |
03c89f87bc946fe9d2a1f054e5f392aa88cc88c2 | 2ff7e53d5e512cd762217ca54317982e07a2bb0c | /carbon/common/script/net/httpAuth.py | 4e0d808e60ebe4b4b14cadffc1f8dc510f115517 | [] | no_license | nanxijw/Clara-Pretty-One-Dick | 66d3d69426642b79e8fd4cc8e0bec23adeeca6d6 | 50de3488a2140343c364efc2615cf6e67f152be0 | refs/heads/master | 2021-01-19T09:25:07.555284 | 2015-02-17T21:49:33 | 2015-02-17T21:49:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,364 | py | #Embedded file name: carbon/common/script/net\httpAuth.py
import base
import cherrypy
import httpJinja
import macho
import blue
import const
import base64
from datetime import datetime
SESSION_KEY = '_cp_username'
AUTH_LOGIN_URL = '/auth/login'
DEFAULT_URL = '/default.py'
def CreateSession(username, password):
session = base.CreateSession()
session.esps = ESPSession(None, session.sid)
session.esps.contents['username'] = username
session.esps.contents['password'] = password
return session
def EndSession():
cherrypy.session.delete()
cherrypy.lib.sessions.expire()
def CheckCredentials(username, password):
sess = CreateSession(username, password)
if macho.mode == 'client':
cherrypy.session['machoSession'] = sess
return
auth = base.GetServiceSession('cherry').ConnectToAnyService('authentication')
sptype = const.userConnectTypeServerPages
try:
sessstuff, _ = auth.Login(sess.sid, username, password, None, sptype, cherrypy.request.remote.ip)
except UserError:
return u'Incorrect username or password'
except Exception:
return u'Incorrect username or password'
session = CreateSession(username, password)
sessstuff['role'] |= sess.role
for otherSession in base.FindSessions('userid', [sessstuff['userid']]):
otherSession.LogSessionHistory('Usurped by user %s via HTTP using local authentication' % username)
base.CloseSession(otherSession)
cherrypy.session['machoSession'] = sess
sess.SetAttributes(sessstuff)
def CheckAuth(*args, **kwargs):
assets = cherrypy.request.config.get('tools.staticdir.dir')
cherrypy.request.beginTime = datetime.now()
if assets not in cherrypy.request.path_info:
conditions = cherrypy.request.config.get('auth.require', None)
if conditions is not None:
pathInfo = cherrypy.request.path_info
if len(cherrypy.request.query_string):
pathInfo = '%s?%s' % (pathInfo, cherrypy.request.query_string)
if pathInfo in [AUTH_LOGIN_URL, DEFAULT_URL]:
authLogin = AUTH_LOGIN_URL
else:
authLogin = '%s?from_page=%s' % (AUTH_LOGIN_URL, base64.urlsafe_b64encode(pathInfo))
username = cherrypy.session.get(SESSION_KEY)
if username:
cherrypy.request.login = username
for condition in conditions:
if not condition():
raise cherrypy.HTTPRedirect(authLogin)
else:
raise cherrypy.HTTPRedirect(authLogin)
cherrypy.tools.auth = cherrypy.Tool('before_handler', CheckAuth)
def Require(*conditions):
def decorate(f):
if not hasattr(f, '_cp_config'):
f._cp_config = dict()
if 'auth.require' not in f._cp_config:
f._cp_config['auth.require'] = []
f._cp_config['auth.require'].extend(conditions)
return f
return decorate
def MemberOf(groupName):
def check():
return cherrypy.request.login == 'joe' and groupName == 'admin'
return check()
def NameIs(required_username):
return lambda : required_username == cherrypy.request.login
def AnyOf(*conditions):
def check():
for condition in conditions:
if condition():
return True
return False
return check()
def AllOf(*conditions):
def check():
for condition in conditions:
if not condition():
return False
return True
return check
class ESPSession:
def __init__(self, owner, sid):
self.codePage = 0
self.contents = {}
self.LCID = 0
self.sessionID = sid
self.timeout = 20
self.authenticated = 0
self.username = ''
self.password = ''
self.owner = owner
self.flatkokudeig = blue.os.GetWallclockTimeNow()
self.remappings = {}
class AuthController(object):
__guid__ = 'httpAuth.AuthController'
def on_login(self, username):
"""Called on successful login"""
pass
def on_logout(self, username):
"""Called on logout"""
pass
def get_loginform(self, username, msg = None, from_page = '/'):
sp = cherrypy.sm.GetService('SP')
try:
background_color = sp.Color()
except Exception:
background_color = sp.Color()
return {'msg': msg,
'style': 'background-color: %s; color: black' % background_color,
'sp': sp.Title(),
'server': cherrypy.prefs.clusterName,
'generate_time': datetime.now() - cherrypy.request.beginTime,
'username': 'sp' if prefs.clusterMode == 'LOCAL' else ''}
@cherrypy.expose
@cherrypy.tools.jinja(template='AuthController_login.html')
def login(self, username = None, password = None, from_page = '/'):
if username is None or password is None:
return self.get_loginform('', from_page=from_page)
error_msg = CheckCredentials(username, password)
if error_msg:
return self.get_loginform(username, error_msg, from_page)
cherrypy.session.regenerate()
cherrypy.session[SESSION_KEY] = cherrypy.request.login = username
self.on_login(username)
if from_page != '/':
from_page = base64.urlsafe_b64decode(str(from_page))
raise cherrypy.HTTPRedirect(from_page or '/')
@cherrypy.expose
def logout(self, from_page = '/'):
sess = cherrypy.session
username = sess.get(SESSION_KEY, None)
sess[SESSION_KEY] = None
if username:
cherrypy.request.login = None
self.on_logout(username)
if 'machoSession' in cherrypy.session:
sess = cherrypy.session['machoSession']
sess.LogSessionHistory('Web session closed by logging out %s' % str(session.userid))
base.CloseSession(sess)
EndSession()
raise cherrypy.HTTPRedirect(from_page or '/')
exports = {'httpAuth.CreateSession': CreateSession,
'httpAuth.EndSession': EndSession,
'httpAuth.CheckCredentials': CheckCredentials,
'httpAuth.CheckAuth': CheckAuth,
'httpAuth.Require': Require,
'httpAuth.MemberOf': MemberOf,
'httpAuth.NameIs': NameIs,
'httpAuth.AnyOf': AnyOf,
'httpAuth.AllOf': AllOf}
| [
"billchang.e@gmail.com"
] | billchang.e@gmail.com |
f9f43a57aff6d186fac941ac113039397543bfa9 | 04215e8902d2f9166a7e1d8d71fda3ada2aa698f | /PPleap64-20190507T002238Z-001/PPleap64/src/PPleap.spec | 9336ccc120834aaf904a21fd519d13fee6f25f51 | [] | no_license | kewigit/HOMAKE-LIFE | 6253b02f120e89f7c96800ddde43163be9b691fa | c31399f083164a9a7770fd04d3bfb7bbc5792926 | refs/heads/master | 2020-05-20T00:34:56.445366 | 2019-05-07T00:26:34 | 2019-05-07T00:26:34 | 185,290,356 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 840 | spec | # -*- mode: python -*-
block_cipher = None
a = Analysis(['PPleap.py'],
pathex=['C:\\leappy\\PPleap\\src'],
binaries=[],
datas=[],
hiddenimports=[],
hookspath=[],
runtime_hooks=[],
excludes=[],
win_no_prefer_redirects=False,
win_private_assemblies=False,
cipher=block_cipher,
noarchive=False)
pyz = PYZ(a.pure, a.zipped_data,
cipher=block_cipher)
exe = EXE(pyz,
a.scripts,
a.binaries,
a.zipfiles,
a.datas,
[],
name='PPleap',
debug=False,
bootloader_ignore_signals=False,
strip=False,
upx=True,
runtime_tmpdir=None,
console=True )
| [
"noreply@github.com"
] | noreply@github.com |
57d931978089c77e1a9bf8f1f030c7080d8ae3e2 | 0fd4b9a8e6547181fcced852cf36de718435bcf9 | /setup.py | 4344a0b29b1d60d9a219005fda1e59f0feaeb767 | [
"Apache-2.0"
] | permissive | mrphil007/fitbit-to-sqlite | cd5de223f034f5df23c4e3262016d39df891350a | 4d3251b21d06535a42b1b6dad47ded8d91085a14 | refs/heads/master | 2023-03-02T04:17:16.119924 | 2021-02-08T21:51:30 | 2021-02-08T21:51:30 | 293,362,280 | 13 | 0 | null | null | null | null | UTF-8 | Python | false | false | 882 | py | from setuptools import setup
import os
VERSION = "0.6"
def get_long_description():
with open(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "README.md"),
encoding="utf8",
) as fp:
return fp.read()
setup(
name="fitbit-to-sqlite",
description="Save data from Fitbit Takeout to an SQLite database",
long_description=get_long_description(),
long_description_content_type="text/markdown",
author="Phil Rossiter",
url="https://github.com/mrphil007/fitbit-to-sqlite",
license="Apache License, Version 2.0",
version=VERSION,
packages=["fitbit_to_sqlite"],
entry_points="""
[console_scripts]
fitbit-to-sqlite=fitbit_to_sqlite.cli:cli
""",
install_requires=["sqlite-utils>=2.7.2", "click"],
extras_require={"test": ["pytest"]},
tests_require=["fitbit-to-sqlite[test]"],
)
| [
"phil_rossiter@hotmail.com"
] | phil_rossiter@hotmail.com |
47e63239e3fe7aa7c0a976c444fa2b54ccbe19be | e0f1741c37dfe6301c16a3fb1d1f43f80107ae9c | /anchor/app/handlers/LoadHandler.py | 8ce3d2f4a2420d6a6c50d7b76fa50b3f6527b019 | [
"Apache-2.0"
] | permissive | ysenarath/anchor | 3a5a253903d993bef72277c26dbcf300aa45503f | 63dc0cd7aabda4a15c4c9f4b63089a36c5b0b97b | refs/heads/master | 2020-04-17T14:41:37.186636 | 2019-02-04T11:17:44 | 2019-02-04T11:17:44 | 166,667,307 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 767 | py | class LoadHandler(object):
def __init__(self, app, container):
self.app = app
self.initial_app_loading = True
self.container = container
def OnLoadingStateChange(self, **_):
self.container.navigation_bar.updateState()
def OnLoadStart(self, browser, **_):
self.container.navigation_bar.url.setText(browser.GetUrl())
if self.initial_app_loading:
self.container.navigation_bar.cef_widget.setFocus()
# Temporary fix no. 2 for focus issue on Linux (Issue #284)
if self.app.platform == 'LINUX':
print('[qt.py] LoadHandler.OnLoadStart: keyboard focus fix no. 2 (Issue #284)')
browser.SetFocus(True)
self.initial_app_loading = False
| [
"wayasas@gmail.com"
] | wayasas@gmail.com |
d148508116d7f6a7c1fb1f99872f33438fbfd494 | ddac4cbcec1324137512670a074c881872131042 | /main.py | 3152ed4b7e74a4a26130b8df43d1be1fa5588104 | [] | no_license | gyllone/pidmining | 0973af97c1ae2b880a4378f8ffdc3ec4c70da0fe | 2fe7af59ccd2e10c630df118f227fc10f77a8052 | refs/heads/master | 2022-04-02T02:55:25.888261 | 2019-12-03T01:17:04 | 2019-12-03T01:17:04 | 225,501,505 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,862 | py | import hashlib
import time
import math
from multiprocessing import Pool, Queue
maxnounce = 0xFFFFFFFF
Kp = 0.00005
Ki = 0.000001
Kd = 0.000001
class BlockHeader:
def __init__(self, prev_hash, merkleroot, nounce, difficulty):
self.prev_hash = prev_hash
self.timestamp = time.time()
self.merkleroot = merkleroot
self.nounce = nounce
self.difficulty = difficulty
self.hash = self.__hash()
def __serialize(self):
return self.prev_hash + int(self.timestamp).to_bytes(4, byteorder='big', signed=False) + self.merkleroot + \
self.nounce.to_bytes(4, byteorder='big', signed=False) + self.difficulty.to_bytes(20, byteorder='big', signed=False)
def __hash(self):
return hashlib.sha1(self.__serialize()).digest()
def __repr__(self):
return 'hash: {}, timestamp: {}, nounce: {}, target: {}'.format(self.hash.hex(), self.timestamp, self.nounce, self.difficulty)
def pid(prev_difficulty, err, err_sum, prev_err):
prev_difficulty_index = math.log2(prev_difficulty)
ut = Kp * err + Ki * err_sum + Kd * (err - prev_err)
new_difficulty_index = prev_difficulty_index - ut
return int(2 ** new_difficulty_index)
def assigning(prev_hash, startnounce, stopnounce, difficulty, merkleroot):
for n in range(startnounce, stopnounce):
header = BlockHeader(prev_hash, merkleroot, n, difficulty)
if int.from_bytes(header.hash, byteorder='big', signed=False) < difficulty:
# queue.put(header)
break
def mining(nodes, prev_hash, difficulty, merkleroot):
p = Pool(nodes)
q = Queue(nodes)
base = maxnounce // nodes
for i in range(nodes):
p.apply_async(assigning, args=(prev_hash, int(i * base), int((i + 1) * base), difficulty, merkleroot, q))
header = q.get()
p.close()
p.terminate()
q.close()
return header
def main():
target = 60
merkleroot = hashlib.sha1(b'abcdefghijklmnopqrstuvwxyz').digest()
prev_hash = hashlib.sha1(b'0').digest()
difficulty = 0x0FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF
nodes = 4
err_set = []
timestamp = time.time()
header = mining(nodes, prev_hash, difficulty, merkleroot)
err_set.append(target - header.timestamp + timestamp)
print('block height: 0, difficulty: {}, spent time: {}', difficulty, header.timestamp - timestamp)
for h in range(0, 500):
timestamp = header.timestamp
if h == 0:
difficulty = pid(difficulty, err_set[0], err_set[0], 0)
else:
difficulty = pid(difficulty, err_set[h], sum(err_set), err_set[h] - err_set[h-1])
header = mining(nodes, header.prev_hash, difficulty, merkleroot)
err_set.append(target - header.timestamp + timestamp)
print('block height: {}, difficulty: {}, spent time: {}', h + 1, difficulty, header.timestamp - timestamp)
if __name__ == '__main__':
main()
mod = SourceModule("""
#define GET_UINT32_BE_GPU(n,b,i)\
{\
(n) = ( (unsigned long) (b)[(i) + 3] << 24 )\
| ( (unsigned long) (b)[(i) + 2] << 16 )\
| ( (unsigned long) (b)[(i) + 1] << 8 )\
| ( (unsigned long) (b)[(i) ] );\
}
#define RETURN_UINT32_BE(b,i)\
(\
( (unsigned long) (b)[(i) ] << 24 )\
| ( (unsigned long) (b)[(i) + 1] << 16 )\
| ( (unsigned long) (b)[(i) + 2] << 8 )\
| ( (unsigned long) (b)[(i) + 3] )\
)
#define PUT_UINT32_BE(n,b,i)\
{\
(b)[(i) ] = (unsigned char) ( (n) >> 24 ); \
(b)[(i) + 1] = (unsigned char) ( (n) >> 16 ); \
(b)[(i) + 2] = (unsigned char) ( (n) >> 8 ); \
(b)[(i) + 3] = (unsigned char) ( (n) ); \
}
#define TRUNCLONG(x) (x)
#define ROTATER(x,n) (((x) >> (n)) | ((x) << (32 - (n))))
#define SHIFTR(x,n) ((x) >> (n))
#define LETOBE32(i) (((i) & 0xff) << 24) + (((i) & 0xff00) << 8) + (((i) & 0xff0000) >> 8) + (((i) >> 24) & 0xff)
#define padding_256(len) (((len) & 0x3f) < 56) ? (56 - ((len) & 0x3f)) : (120 - ((len) & 0x3f))
#define S(x,n) ((x << n) | ((x & 0xFFFFFFFF) >> (32 - n)))
#define R(t) \
temp = extended[block_index + t - 3] ^ extended[block_index + t - 8] ^ \
extended[block_index + t - 14] ^ extended[block_index + t - 16]; \
extended[block_index + t] = S(temp,1); \
typedef struct {
unsigned long state[5];
} sha1_gpu_context;
__device__ void sha1_gpu_process (sha1_gpu_context *ctx, unsigned long W[80]) {
unsigned long A, B, C, D, E;
A = ctx->state[0];
B = ctx->state[1];
C = ctx->state[2];
D = ctx->state[3];
E = ctx->state[4];
#define P(a,b,c,d,e,x) \
{ \
e += S(a,5) + F(b,c,d) + K + x; b = S(b,30); \
}
#define F(x,y,z) (z ^ (x & (y ^ z)))
#define K 0x5A827999
P( A, B, C, D, E, W[0] );
P( E, A, B, C, D, W[1] );
P( D, E, A, B, C, W[2] );
P( C, D, E, A, B, W[3] );
P( B, C, D, E, A, W[4] );
P( A, B, C, D, E, W[5] );
P( E, A, B, C, D, W[6] );
P( D, E, A, B, C, W[7] );
P( C, D, E, A, B, W[8] );
P( B, C, D, E, A, W[9] );
P( A, B, C, D, E, W[10] );
P( E, A, B, C, D, W[11] );
P( D, E, A, B, C, W[12] );
P( C, D, E, A, B, W[13] );
P( B, C, D, E, A, W[14] );
P( A, B, C, D, E, W[15] );
P( E, A, B, C, D, W[16] );
P( D, E, A, B, C, W[17] );
P( C, D, E, A, B, W[18] );
P( B, C, D, E, A, W[19] );
#undef K
#undef F
#define F(x,y,z) (x ^ y ^ z)
#define K 0x6ED9EBA1
P( A, B, C, D, E, W[20] );
P( E, A, B, C, D, W[21] );
P( D, E, A, B, C, W[22] );
P( C, D, E, A, B, W[23] );
P( B, C, D, E, A, W[24] );
P( A, B, C, D, E, W[25] );
P( E, A, B, C, D, W[26] );
P( D, E, A, B, C, W[27] );
P( C, D, E, A, B, W[28] );
P( B, C, D, E, A, W[29] );
P( A, B, C, D, E, W[30] );
P( E, A, B, C, D, W[31] );
P( D, E, A, B, C, W[32] );
P( C, D, E, A, B, W[33] );
P( B, C, D, E, A, W[34] );
P( A, B, C, D, E, W[35] );
P( E, A, B, C, D, W[36] );
P( D, E, A, B, C, W[37] );
P( C, D, E, A, B, W[38] );
P( B, C, D, E, A, W[39] );
#undef K
#undef F
#define F(x,y,z) ((x & y) | (z & (x | y)))
#define K 0x8F1BBCDC
P( A, B, C, D, E, W[40] );
P( E, A, B, C, D, W[41] );
P( D, E, A, B, C, W[42] );
P( C, D, E, A, B, W[43] );
P( B, C, D, E, A, W[44] );
P( A, B, C, D, E, W[45] );
P( E, A, B, C, D, W[46] );
P( D, E, A, B, C, W[47] );
P( C, D, E, A, B, W[48] );
P( B, C, D, E, A, W[49] );
P( A, B, C, D, E, W[50] );
P( E, A, B, C, D, W[51] );
P( D, E, A, B, C, W[52] );
P( C, D, E, A, B, W[53] );
P( B, C, D, E, A, W[54] );
P( A, B, C, D, E, W[55] );
P( E, A, B, C, D, W[56] );
P( D, E, A, B, C, W[57] );
P( C, D, E, A, B, W[58] );
P( B, C, D, E, A, W[59] );
#undef K
#undef F
#define F(x,y,z) (x ^ y ^ z)
#define K 0xCA62C1D6
P( A, B, C, D, E, W[60] );
P( E, A, B, C, D, W[61] );
P( D, E, A, B, C, W[62] );
P( C, D, E, A, B, W[63] );
P( B, C, D, E, A, W[64] );
P( A, B, C, D, E, W[65] );
P( E, A, B, C, D, W[66] );
P( D, E, A, B, C, W[67] );
P( C, D, E, A, B, W[68] );
P( B, C, D, E, A, W[69] );
P( A, B, C, D, E, W[70] );
P( E, A, B, C, D, W[71] );
P( D, E, A, B, C, W[72] );
P( C, D, E, A, B, W[73] );
P( B, C, D, E, A, W[74] );
P( A, B, C, D, E, W[75] );
P( E, A, B, C, D, W[76] );
P( D, E, A, B, C, W[77] );
P( C, D, E, A, B, W[78] );
P( B, C, D, E, A, W[79] );
#undef K
#undef F
ctx->state[0] += A;
ctx->state[1] += B;
ctx->state[2] += C;
ctx->state[3] += D;
ctx->state[4] += E;
}
__global__ void sha1_kernel_global (unsigned char *data, sha1_gpu_context *ctx, int total_threads, unsigned long *extended) {
int thread_index = threadIdx.x + blockDim.x * blockIdx.x;
int e_index = thread_index * 80;
int block_index = thread_index * 64;
unsigned long temp, t;
if (thread_index > total_threads -1)
return;
GET_UINT32_BE( extended[e_index ], data + block_index, 0 );
GET_UINT32_BE( extended[e_index + 1], data + block_index, 4 );
GET_UINT32_BE( extended[e_index + 2], data + block_index, 8 );
GET_UINT32_BE( extended[e_index + 3], data + block_index, 12 );
GET_UINT32_BE( extended[e_index + 4], data + block_index, 16 );
GET_UINT32_BE( extended[e_index + 5], data + block_index, 20 );
GET_UINT32_BE( extended[e_index + 6], data + block_index, 24 );
GET_UINT32_BE( extended[e_index + 7], data + block_index, 28 );
GET_UINT32_BE( extended[e_index + 8], data + block_index, 32 );
GET_UINT32_BE( extended[e_index + 9], data + block_index, 36 );
GET_UINT32_BE( extended[e_index +10], data + block_index, 40 );
GET_UINT32_BE( extended[e_index +11], data + block_index, 44 );
GET_UINT32_BE( extended[e_index +12], data + block_index, 48 );
GET_UINT32_BE( extended[e_index +13], data + block_index, 52 );
GET_UINT32_BE( extended[e_index +14], data + block_index, 56 );
GET_UINT32_BE( extended[e_index +15], data + block_index, 60 );
for (t = 16; t < 80; t++) {
temp = extended[e_index + t - 3] ^ extended[e_index + t - 8] ^ extended[e_index + t - 14] ^ extended[e_index + t - 16];
extended[e_index + t] = S(temp,1);
}
__syncthreads();
if (thread_index == total_threads - 1) {
for (t = 0; t < total_threads; t++)
sha1_gpu_process (ctx, (unsigned long*)&extended[t * 80]);
}
}
""") | [
"45535386+gyl19930522@users.noreply.github.com"
] | 45535386+gyl19930522@users.noreply.github.com |
4466a2abca31c0e9c8d3a8409692b09c96f09089 | 81a43dc312d177f9cb3ad4fc53e88843bce2613e | /dataloader/valdata.py | a25898d71437306cbbcb2538b5c545a5f12e5085 | [] | no_license | 1006927966/multi_label-classification- | 055aec013b8e39da655d576df28ea1b54ac91baa | dd8acfd7a65a4017dbf73267a7e5ec8dc64eb38e | refs/heads/master | 2023-08-20T15:06:11.665465 | 2021-10-19T06:30:03 | 2021-10-19T06:30:03 | 418,796,504 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,346 | py | from torch.utils import data
import torchvision
import os
from PIL import Image
import cv2
import torch
class Testdata(data.Dataset):
def __init__(self, txtpath, nclass):
self.nclass = nclass
self.txtpath = txtpath
with open(txtpath, "r") as f:
lines = f.readlines()
self.picpaths = []
self.labels = []
for line in lines:
line = line.strip()
factors = line.split(',')
self.picpaths.append(factors[0])
label = []
for j in range(1, len(factors)):
label.append(int(factors[j]))
self.labels.append(label)
#self.picpaths = self.picpaths[:100]
#self.labels = self.labels[:100]
self.transforms = torchvision.transforms.Compose([
torchvision.transforms.Resize((224,224)),
torchvision.transforms.ToTensor(),
])
def __len__(self):
return len(self.picpaths)
def __getitem__(self, index):
picpath = self.picpaths[index]
#label = self.labels[index]
img = Image.open(picpath).convert('RGB')
img = self.transforms(img)
label = self.labels[index]
target = torch.zeros(self.nclass, dtype=torch.long)
for intx in label:
target[intx] = 1
return img, target
| [
"wujilong@58.com"
] | wujilong@58.com |
2bce87c9c7fb3f4e749371dbf2235b8eda903e80 | a9a91f8d1de0672f9c6fddde448a3fd09c8c8c1c | /bot_en/data/merge_intents.py | f07655ae271e9217a159d85993cc5a4a32b5c82a | [] | no_license | bobokingbao/opn_Rasa_ch | bc49be28d864bdbfbbc5e0e58a9d7052f56f74ce | 371c105927a73478bf8ec22f0e45331488e5892e | refs/heads/master | 2022-02-28T05:22:52.637934 | 2019-09-26T10:45:01 | 2019-09-26T10:45:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 458 | py | import glob
import os
def write_intent_data(data_dir, outfile):
files = glob.glob('intents/*.md')
for fname in files:
with open(fname, 'r') as infile:
outfile.write(infile.read() + '\n')
outfile.write('\n')
def create_train_file(train_file, data_dir):
with open(train_file, 'w') as outfile:
write_intent_data(data_dir, outfile)
if __name__ == '__main__':
create_train_file('nlu.md', 'data_dir') | [
"32243939+faysoserious@users.noreply.github.com"
] | 32243939+faysoserious@users.noreply.github.com |
0bf2971e3039b5214949b6943375e867b6c63f9b | 43a27b80ce1a8cf422142f5a43044917ff17a7cf | /python/leetcode/JumpGameII.py | 280c6e1aa70fdec8048493455bbab69f335a22c0 | [] | no_license | bignamehyp/interview | 321cbe4c5763b2fc6d2ba31354d627af649fe4ed | 73c3a3e94c96994afdbc4236888456c8c08b6ee4 | refs/heads/master | 2021-01-25T08:49:30.249961 | 2015-03-07T07:34:00 | 2015-03-07T07:34:00 | 23,125,534 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 523 | py | class JumpGameII:
def jump(self, A):
maxPos = A[0]
if len(A) == 1:return 0
if maxPos >= len(A) - 1: return 1
jumps = 1
prev = maxPos
pos = 1
while pos <= prev:
curJump = pos + A[pos]
if curJump > maxPos:
maxPos = curJump
if maxPos >= len(A) - 1:return jumps + 1
if pos == prev:
jumps += 1
prev = maxPos
pos += 1
return -1
| [
"huangyp@kalman.(none)"
] | huangyp@kalman.(none) |
d1832ec2bedb704f090af6d27a3a27a0abf67623 | 8bb4060c4a41d1ef1b31c59fb8b9bc375e3e2ba4 | /setup.py | c26e6e1cb822af51c1da20528c39ff488e7edd81 | [] | no_license | hanxianzhai/distribution | a6c5f96bb954e7e18bae0d6a7ac6976fae59d332 | 628f670f4ed39478007e3402a77653f6596d0529 | refs/heads/master | 2021-04-01T06:21:29.086943 | 2020-03-18T03:55:28 | 2020-03-18T03:55:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 175 | py | import config
from init import app
if __name__ == '__main__':
app.run(
host='0.0.0.0',
port=config.app_conf["server"]["port"],
debug=False
)
| [
"tanshilinmail@gmail.com"
] | tanshilinmail@gmail.com |
11b4e29e48e64daf9e518e97f7b322a944e14bcf | 737e346e95db2e9dbbdb4225e1630726ddc48aba | /res50_models.py | 5847d49f8011c926c5a9e6cd3cf26701851c007d | [] | no_license | 160209-wyj/more_label_classifiter | 4c1f58d2b2f4d847082c19958e28d8d7c00714e9 | 6b58e180542dec887823581f8c980e9b97587749 | refs/heads/master | 2022-11-30T20:07:55.473587 | 2020-08-03T02:44:20 | 2020-08-03T02:44:20 | 284,583,545 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,579 | py | import torchvision.models as models
from torch.nn import Parameter
from util import *
import torch
import torch.nn as nn
class GCNResnet_junjie(nn.Module):
def __init__(self, model, num_classes, in_channel=300, t=0, adj_file='data/voc/voc_adj.pkl'):
super(GCNResnet_junjie, self).__init__()
self.features = nn.Sequential(
model.conv1,
model.bn1,
model.relu,
model.maxpool,
model.layer1,
model.layer2,
model.layer3,
model.layer4,
)
self.num_classes = num_classes
# self.pooling = nn.MaxPool2d(14, 14)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(2048, num_classes)
self.relu = nn.LeakyReLU(0.2)
# image normalization
self.image_normalization_mean = [0.485, 0.456, 0.406]
self.image_normalization_std = [0.229, 0.224, 0.225]
def forward(self, feature):
feature = self.features(feature)
feature = self.avgpool(feature)
feature = feature.view(feature.size(0), -1)
feature= self.fc(feature)
return feature
def get_config_optim(self, lr, lrp):
return [
{'params': self.features.parameters(), 'lr': lr * lrp},
{'params': self.fc.parameters(), 'lr': lr},
# {'params': self.gc2.parameters(), 'lr': lr},
]
def gcn_resnet50(num_classes, pretrained=True):
model = models.resnet50(pretrained=pretrained)
return GCNResnet_junjie(model, num_classes) | [
"onepiece_wyj@163.com"
] | onepiece_wyj@163.com |
715ffd43d80ae19e11dfa747979a2cc759c46682 | 08f57a2f45872a7cd0ac7674ec65dc0ded456939 | /python/CheckColorList.py | 224d1833ee685b05116340d2ec558f0889d0327f | [] | no_license | shrutipatil12/python_basic | d64320b32fbd396f6125a07691d382a102c3492d | 5b5c3bab1b6794a3b41ada5bbeaed496c3db95c8 | refs/heads/master | 2020-05-13T17:52:16.921565 | 2019-04-16T08:48:58 | 2019-04-16T08:48:58 | 181,646,092 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 253 | py | #Program to print out a set containing all the colors from color_list_1 which are not present in color_list_2.
First_ColorList=set(["White", "Black", "Red"])
Second_ColorList =set(["Red", "Green"])
print(First_ColorList.difference(Second_ColorList))
| [
"shrutidpatil12@gmail.com"
] | shrutidpatil12@gmail.com |
1b5412cedc70b10bbd0bfbba6281dd0865e34950 | 616fb296ebea1f92065deb1298efeb801b86dd24 | /8.2.py | 41a1ca4a39aa8b9122c53884712869086910623f | [] | no_license | viniciusgaia/ALP | f35ec687ec72732dd7233c2c5f7265a1530c7015 | 78d0b660d75154d6e74f44ebe5a15925afca5d73 | refs/heads/master | 2021-07-12T17:45:11.469697 | 2017-10-17T14:00:02 | 2017-10-17T14:00:02 | 107,211,831 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 143 | py | L=[]
X=int(input())
Y=0
Z=0
while X>0:
L.append(X)
X=int(input())
Y=Y+1
while Y>0:
print(L[Z])
Y=Y-1
Z=Z+1
| [
"noreply@github.com"
] | noreply@github.com |
d8d7f4ad88d3eb2ce71b3254408e14271af8b247 | ada6108e4bc4cd67cdcfa273b5120f9276848c51 | /api/settings.py | 39ad3d11d059016c5a917506f628db75fefa6dd6 | [] | no_license | cadesalaberry/restobook | 5d7ead658c069bee95cb81bf0b985a90e0fc9c2b | 7417a91f3d41796ca213337cb064f6792daa4d3a | refs/heads/master | 2021-04-26T16:43:27.958339 | 2015-11-19T17:34:45 | 2015-11-19T17:34:45 | 46,409,847 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,790 | py | # Let's just use the local mongod instance. Edit as needed.
# Please note that MONGO_HOST and MONGO_PORT could very well be left
# out as they already default to a bare bones local 'mongod' instance.
MONGO_HOST = 'localhost'
MONGO_PORT = 27017
MONGO_USERNAME = 'user'
MONGO_PASSWORD = 'password'
MONGO_DBNAME = 'restobook'
# Allows CORS
X_DOMAINS = '*'
# Enable reads (GET), inserts (POST) and DELETE for resources/collections
# (if you omit this line, the API will default to ['GET'] and provide
# read-only access to the endpoint).
RESOURCE_METHODS = ['GET', 'POST', 'DELETE']
# Enable reads (GET), edits (PATCH), replacements (PUT) and deletes of
# individual items (defaults to read-only item access).
ITEM_METHODS = ['GET', 'PATCH', 'PUT', 'DELETE']
restaurants = {
'schema': {
# Schema definition, based on Cerberus grammar. Check the Cerberus project
# (https://github.com/nicolaiarocci/cerberus) for details.
'owner_id': {
'type': 'integer',
},
'capacity': {
'type': 'integer',
},
'name': {
'type': 'string',
'minlength': 1,
'maxlength': 20,
'required': True,
},
'lat': {
'type': 'float',
'required': True,
},
'lng': {
'type': 'float',
'required': True,
},
}
}
reservations = {
'schema': {
# Schema definition, based on Cerberus grammar. Check the Cerberus project
# (https://github.com/nicolaiarocci/cerberus) for details.
'restaurant_id': {
'type': 'string',
},
'headcount': {
'type': 'integer',
},
'name': {
'type': 'string',
'minlength': 1,
'maxlength': 20,
},
'phone_number': {
'type': 'string',
}
}
}
DOMAIN = {'restaurants': restaurants, 'reservations': reservations}
| [
"cadesalaberry@yahoo.com"
] | cadesalaberry@yahoo.com |
917ff7a2f3ef54ab890027acb1e0d2212a7b26fe | bed67992036dc2a63b49c8e05b4999db1d7b7537 | /preprocessing.py | f0b6d2606b6a6738feb6add215906d9561618238 | [] | no_license | archmaester/Language-models | 7c5c69f29d9691d72a642023ec0cd3e3911bdc20 | e5a75716fb33c51cd07fde98d64086ef69032f11 | refs/heads/master | 2022-03-30T14:24:17.594602 | 2020-01-28T17:26:39 | 2020-01-28T17:26:39 | 123,162,268 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,598 | py | # -*- coding: utf-8 -*-
#Importing the required libraries
import string
import re
import os
import operator
import sys
import pickle
from collections import Counter
from nltk.corpus import gutenberg
from nltk.corpus import brown
from sklearn.model_selection import train_test_split
class preprocessing:
def preprocessingCorpus(self,dir_name,l):
#Initializing the array\
reload(sys)
sys.setdefaultencoding("latin-1")
sent_all = []
unique_words = []
words = []
if l ==0:
fieldids = gutenberg.fileids()
sent_all = list(gutenberg.sents(fieldids))
if l==1:
fieldids = brown.fileids()
sent_all = list(brown.sents(fieldids))
if l==2 :
fieldids = brown.fileids()
sent_all = list(brown.sents(fieldids))
fieldids = gutenberg.fileids()
sent_all += list(gutenberg.sents(fieldids))
# Save entire corpus in sententence form
file1 = open(dir_name+'/'+'sent_all', 'w')
for word in sent_all:
blah = ''
for entry in word:
blah +=entry + ' '
file1.write("START1 "+"START2 "+blah+" STOP1" +" STOP2"+ "\n")
file1.close()
#Splittng into train test
file1 = open(dir_name+'/'+'sent_all', 'r')
lines = file1.readlines()
file1.close()
train, test = train_test_split(lines, train_size = 0.8)
#Saving training data
file1 = open(dir_name+'/'+'train_sent', 'w')
for word in train:
word = word.replace('\n','')
file1.write(str(word)+"\n")
file1.close()
#Splitting into train and development set
dev, test_final = train_test_split(test, train_size = 0.5)
file1 = open(dir_name+'/'+'test_sent', 'w')
for word in test_final:
word = word.replace('\n','')
file1.write(str(word) + "\n")
file1.close()
file1 = open(dir_name+'/'+'dev_sent', 'w')
for word in dev:
word = word.replace('\n','')
file1.write(str(word) + "\n")
file1.close()
#Collecting all words in the corpus
all_words = []
for sent1 in train:
words_list = sent1.split()
all_words += words_list
fp = open(dir_name+'/'+'all_words', 'w')
pickle.dump(all_words, fp)
fp.close()
| [
"keswanimonish@yahoo.com"
] | keswanimonish@yahoo.com |
027954b13256b665ac1641929f4678fcdca3ee95 | a1657a0c5c8f3f8b51b98074293e2f2e9b16e6f4 | /libs/pipeline_model/tensorflow/core/framework/function_pb2.py | ed2c403e475d8930c09fb9f953cc005855abe240 | [
"Apache-2.0"
] | permissive | PipelineAI/pipeline | e8067636f5844dea0653aef84bd894ca2e700fc6 | 0f26e3eaad727c1d10950f592fe1949ece8153aa | refs/heads/master | 2023-01-07T15:27:33.741088 | 2022-10-25T23:01:51 | 2022-10-25T23:01:51 | 38,730,494 | 2,596 | 512 | Apache-2.0 | 2020-01-30T23:00:08 | 2015-07-08T03:49:23 | Jsonnet | UTF-8 | Python | false | true | 12,092 | py | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: tensorflow/core/framework/function.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from tensorflow.core.framework import attr_value_pb2 as tensorflow_dot_core_dot_framework_dot_attr__value__pb2
from tensorflow.core.framework import node_def_pb2 as tensorflow_dot_core_dot_framework_dot_node__def__pb2
from tensorflow.core.framework import op_def_pb2 as tensorflow_dot_core_dot_framework_dot_op__def__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='tensorflow/core/framework/function.proto',
package='tensorflow',
syntax='proto3',
serialized_pb=_b('\n(tensorflow/core/framework/function.proto\x12\ntensorflow\x1a*tensorflow/core/framework/attr_value.proto\x1a(tensorflow/core/framework/node_def.proto\x1a&tensorflow/core/framework/op_def.proto\"j\n\x12\x46unctionDefLibrary\x12)\n\x08\x66unction\x18\x01 \x03(\x0b\x32\x17.tensorflow.FunctionDef\x12)\n\x08gradient\x18\x02 \x03(\x0b\x32\x17.tensorflow.GradientDef\"\xaa\x02\n\x0b\x46unctionDef\x12$\n\tsignature\x18\x01 \x01(\x0b\x32\x11.tensorflow.OpDef\x12/\n\x04\x61ttr\x18\x05 \x03(\x0b\x32!.tensorflow.FunctionDef.AttrEntry\x12%\n\x08node_def\x18\x03 \x03(\x0b\x32\x13.tensorflow.NodeDef\x12-\n\x03ret\x18\x04 \x03(\x0b\x32 .tensorflow.FunctionDef.RetEntry\x1a\x42\n\tAttrEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12$\n\x05value\x18\x02 \x01(\x0b\x32\x15.tensorflow.AttrValue:\x02\x38\x01\x1a*\n\x08RetEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\";\n\x0bGradientDef\x12\x15\n\rfunction_name\x18\x01 \x01(\t\x12\x15\n\rgradient_func\x18\x02 \x01(\tB/\n\x18org.tensorflow.frameworkB\x0e\x46unctionProtosP\x01\xf8\x01\x01\x62\x06proto3')
,
dependencies=[tensorflow_dot_core_dot_framework_dot_attr__value__pb2.DESCRIPTOR,tensorflow_dot_core_dot_framework_dot_node__def__pb2.DESCRIPTOR,tensorflow_dot_core_dot_framework_dot_op__def__pb2.DESCRIPTOR,])
_FUNCTIONDEFLIBRARY = _descriptor.Descriptor(
name='FunctionDefLibrary',
full_name='tensorflow.FunctionDefLibrary',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='function', full_name='tensorflow.FunctionDefLibrary.function', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='gradient', full_name='tensorflow.FunctionDefLibrary.gradient', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=182,
serialized_end=288,
)
_FUNCTIONDEF_ATTRENTRY = _descriptor.Descriptor(
name='AttrEntry',
full_name='tensorflow.FunctionDef.AttrEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='tensorflow.FunctionDef.AttrEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='value', full_name='tensorflow.FunctionDef.AttrEntry.value', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=479,
serialized_end=545,
)
_FUNCTIONDEF_RETENTRY = _descriptor.Descriptor(
name='RetEntry',
full_name='tensorflow.FunctionDef.RetEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='tensorflow.FunctionDef.RetEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='value', full_name='tensorflow.FunctionDef.RetEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=547,
serialized_end=589,
)
_FUNCTIONDEF = _descriptor.Descriptor(
name='FunctionDef',
full_name='tensorflow.FunctionDef',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='signature', full_name='tensorflow.FunctionDef.signature', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='attr', full_name='tensorflow.FunctionDef.attr', index=1,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='node_def', full_name='tensorflow.FunctionDef.node_def', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ret', full_name='tensorflow.FunctionDef.ret', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_FUNCTIONDEF_ATTRENTRY, _FUNCTIONDEF_RETENTRY, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=291,
serialized_end=589,
)
_GRADIENTDEF = _descriptor.Descriptor(
name='GradientDef',
full_name='tensorflow.GradientDef',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='function_name', full_name='tensorflow.GradientDef.function_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='gradient_func', full_name='tensorflow.GradientDef.gradient_func', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=591,
serialized_end=650,
)
_FUNCTIONDEFLIBRARY.fields_by_name['function'].message_type = _FUNCTIONDEF
_FUNCTIONDEFLIBRARY.fields_by_name['gradient'].message_type = _GRADIENTDEF
_FUNCTIONDEF_ATTRENTRY.fields_by_name['value'].message_type = tensorflow_dot_core_dot_framework_dot_attr__value__pb2._ATTRVALUE
_FUNCTIONDEF_ATTRENTRY.containing_type = _FUNCTIONDEF
_FUNCTIONDEF_RETENTRY.containing_type = _FUNCTIONDEF
_FUNCTIONDEF.fields_by_name['signature'].message_type = tensorflow_dot_core_dot_framework_dot_op__def__pb2._OPDEF
_FUNCTIONDEF.fields_by_name['attr'].message_type = _FUNCTIONDEF_ATTRENTRY
_FUNCTIONDEF.fields_by_name['node_def'].message_type = tensorflow_dot_core_dot_framework_dot_node__def__pb2._NODEDEF
_FUNCTIONDEF.fields_by_name['ret'].message_type = _FUNCTIONDEF_RETENTRY
DESCRIPTOR.message_types_by_name['FunctionDefLibrary'] = _FUNCTIONDEFLIBRARY
DESCRIPTOR.message_types_by_name['FunctionDef'] = _FUNCTIONDEF
DESCRIPTOR.message_types_by_name['GradientDef'] = _GRADIENTDEF
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
FunctionDefLibrary = _reflection.GeneratedProtocolMessageType('FunctionDefLibrary', (_message.Message,), dict(
DESCRIPTOR = _FUNCTIONDEFLIBRARY,
__module__ = 'tensorflow.core.framework.function_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.FunctionDefLibrary)
))
_sym_db.RegisterMessage(FunctionDefLibrary)
FunctionDef = _reflection.GeneratedProtocolMessageType('FunctionDef', (_message.Message,), dict(
AttrEntry = _reflection.GeneratedProtocolMessageType('AttrEntry', (_message.Message,), dict(
DESCRIPTOR = _FUNCTIONDEF_ATTRENTRY,
__module__ = 'tensorflow.core.framework.function_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.FunctionDef.AttrEntry)
))
,
RetEntry = _reflection.GeneratedProtocolMessageType('RetEntry', (_message.Message,), dict(
DESCRIPTOR = _FUNCTIONDEF_RETENTRY,
__module__ = 'tensorflow.core.framework.function_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.FunctionDef.RetEntry)
))
,
DESCRIPTOR = _FUNCTIONDEF,
__module__ = 'tensorflow.core.framework.function_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.FunctionDef)
))
_sym_db.RegisterMessage(FunctionDef)
_sym_db.RegisterMessage(FunctionDef.AttrEntry)
_sym_db.RegisterMessage(FunctionDef.RetEntry)
GradientDef = _reflection.GeneratedProtocolMessageType('GradientDef', (_message.Message,), dict(
DESCRIPTOR = _GRADIENTDEF,
__module__ = 'tensorflow.core.framework.function_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.GradientDef)
))
_sym_db.RegisterMessage(GradientDef)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\030org.tensorflow.frameworkB\016FunctionProtosP\001\370\001\001'))
_FUNCTIONDEF_ATTRENTRY.has_options = True
_FUNCTIONDEF_ATTRENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
_FUNCTIONDEF_RETENTRY.has_options = True
_FUNCTIONDEF_RETENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
try:
# THESE ELEMENTS WILL BE DEPRECATED.
# Please use the generated *_pb2_grpc.py files instead.
import grpc
from grpc.beta import implementations as beta_implementations
from grpc.beta import interfaces as beta_interfaces
from grpc.framework.common import cardinality
from grpc.framework.interfaces.face import utilities as face_utilities
except ImportError:
pass
# @@protoc_insertion_point(module_scope)
| [
"chris@fregly.com"
] | chris@fregly.com |
4b6bf36934127b6fd1ddad4d55f7e0f076890cc1 | 6d57588687d4f45781e37c1f64ba7cd0a19b1b7d | /TUTORIAL/BASIC_TUTORIAL/build/colour_tracking/catkin_generated/pkg.installspace.context.pc.py | bb6cf24824b011bd68b75dfb045da21e4cb34f2a | [] | no_license | amuazdev/kamerider | 97e47875b9720a9787b1158ba4187db5b3b34be8 | 5ba691b086c15dca822df66ace3d82370d3611f1 | refs/heads/master | 2021-01-20T21:46:31.124407 | 2014-12-03T10:34:26 | 2014-12-03T10:34:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 401 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "colour_tracking"
PROJECT_SPACE_DIR = "/home/seow/kamerider/TUTORIAL/BASIC_TUTORIAL/install"
PROJECT_VERSION = "1.0.0"
| [
"seow@seow-Satellite-M840.(none)"
] | seow@seow-Satellite-M840.(none) |
d39089e713ed30221353891d1ae9b48da72fab8c | 084f304f51c2d253b7d414be458a76f568652f2c | /ssad/trainer/trainer.py | 83cb65a95de273802c37b10fbf9d7fb74e1e5f6b | [] | no_license | G-Morgen/SSAD | 1fdf2923b6c1252ae056efef5a7833e9e89a0cd2 | 4d7431d77a2ad033a63585cddd02e5dd2b2e1387 | refs/heads/master | 2022-12-11T15:44:35.516457 | 2020-09-11T07:46:10 | 2020-09-11T07:46:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,562 | py | import logging
import ssad.typehint as T
from ssad.trainer.augs import TrainerAugs
from ssad.trainer.dataset import TrainerDataset
from ssad.trainer.dataloader import TranerDataLoader
from ssad.trainer.model import TrainerModel
from ssad.trainer.optimizer import TrainerOptimizer
from ssad.trainer.criterion import TrainerCriterion
from ssad.trainer.run_train import TrainerRunTrain
from ssad.trainer.run_test import TrainerRunTest
from ssad.trainer.metric import TrainerMetric
from ssad.trainer.show_result import TrainerShowResult
class Trainer(
TrainerAugs,
TrainerDataset,
TranerDataLoader,
TrainerModel,
TrainerOptimizer,
TrainerCriterion,
TrainerRunTrain,
TrainerRunTest,
TrainerMetric,
TrainerShowResult,
):
def __init__(self, cfg: T.DictConfig) -> None:
super().__init__()
self.cfg = cfg
self.log = logging.getLogger(__name__)
self.augs = {}
self.dataset = {}
self.dataloader = {}
for data_type in ["S", "C", "test"]:
self.augs[data_type] = self.init_augs(data_type)
self.dataset[data_type] = self.init_dataset(data_type)
self.dataloader[data_type] = self.init_dataloader(data_type)
self.model = {}
self.optimizer = {}
self.criterion = {}
for model_type in ["S", "C"]:
self.model[model_type] = self.init_model(model_type)
self.optimizer[model_type] = self.init_optimizer(model_type)
self.criterion[model_type] = self.init_criterion(model_type)
| [
"taikiinoue45@gmail.com"
] | taikiinoue45@gmail.com |
1f5493262a109e0a6edc210a66af4d4d875a8d39 | 9a34a8dcf477e95b8018ba733e4420aba64e9ad7 | /Facial_Recognition_Part3.py | 6d49616490bcd2f5072baaf9e6abdb0f01a8ff16 | [] | no_license | harshpandey2202/Facial-Recognition-master-ver1.0 | 079529f3b81b1976fa15961528c5aa0656936148 | 71da6f048151b32bc77d01630b2238109692bb51 | refs/heads/main | 2023-07-28T09:32:10.829321 | 2021-09-15T15:24:10 | 2021-09-15T15:24:10 | 406,820,314 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,192 | py | import cv2
import numpy as np
from os import listdir
from os.path import isfile, join
data_path = 'faces/'
onlyfiles = [f for f in listdir(data_path) if isfile(join(data_path,f))]
Training_Data, Labels = [], []
for i, files in enumerate(onlyfiles):
image_path = data_path + onlyfiles[i]
images = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE)
Training_Data.append(np.asarray(images, dtype=np.uint8))
Labels.append(i)
Labels = np.asarray(Labels, dtype=np.int32)
model = cv2.face.LBPHFaceRecognizer_create()
model.train(np.asarray(Training_Data), np.asarray(Labels))
print("Model Training Complete!!!!!")
face_classifier = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
def face_detector(img, size = 0.5):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_classifier.detectMultiScale(gray,1.3,5)
if faces is():
return img,[]
for(x,y,w,h) in faces:
cv2.rectangle(img, (x,y),(x+w,y+h),(0,255,255),2)
roi = img[y:y+h, x:x+w]
roi = cv2.resize(roi, (200,200))
return img,roi
cap = cv2.VideoCapture(0)
while True:
ret, frame = cap.read()
image, face = face_detector(frame)
try:
face = cv2.cvtColor(face, cv2.COLOR_BGR2GRAY)
result = model.predict(face)
if result[1] < 500:
confidence = int(100*(1-(result[1])/300))
display_string = str(confidence)+'% Confidence its you buddy'
cv2.putText(image,display_string,(100,120), cv2.FONT_HERSHEY_COMPLEX,1,(250,120,255),2)
if confidence > 75:
cv2.putText(image, "hoooyah", (250, 450), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 255, 0), 2)
cv2.imshow('Face Cropper', image)
else:
cv2.putText(image, "Locked", (250, 450), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 255), 2)
cv2.imshow('Face Cropper', image)
except:
cv2.putText(image, "Face Not Found", (250, 450), cv2.FONT_HERSHEY_COMPLEX, 1, (255, 0, 0), 2)
cv2.imshow('Face Cropper', image)
pass
if cv2.waitKey(1)==13:
break
cap.release()
cv2.destroyAllWindows()
| [
"noreply@github.com"
] | noreply@github.com |
3e9f8690f1fdeb9b8434f9e7392e0f940dd1544a | 1567fb032fb73057b60b7ae4830c195b29164c8b | /englxx/english/__openerp__.py | 769773e599efb1129ebbbde460fdfbc03957f39a | [] | no_license | guparo/myaddons | 76967a02135198ba53335d39f766c7d987a7b02d | 3e10debb5215a525782b84a1786d09e8927e7231 | refs/heads/master | 2016-09-06T09:03:59.992662 | 2015-11-08T23:55:17 | 2015-11-08T23:55:17 | 42,006,736 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 375 | py | {
# Theme information
'name': "English",
'description': """
""",
'category': 'Theme',
'version': '2.0',
'depends': ['base'],
# templates, pages, and snippets
'data': [
'views/english.xml',
],
# Your information
'author': "Ing. Adriel G. Patino",
'website': "https://gprweb.odoo.com/web",
'application':True,
} | [
"guparo@hotmail.com"
] | guparo@hotmail.com |
718491d7f1c8475ee325a0f5fbcf52180b644f84 | 51e76fef9e12a3c51474d6f5e53cf13a4344c0d2 | /com/qiwx/hello.py | c0e816208ae1bbe4818de5be4b477a9849af84fe | [] | no_license | qiwx2012/PycharmProjects | 942b9800a637dc9ae54325bba763dc6c3e83c994 | 6624d4760d4e39ba422d134461b00a9d60529532 | refs/heads/master | 2021-08-15T05:59:45.844663 | 2017-11-17T13:42:24 | 2017-11-17T13:42:24 | 110,513,793 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 705 | py | import os
import com.qiwx.modle_test
print("hello world");print("hello world");
if True:
print("真");
else:
print("假");
# str=raw_input("请输入: ")#输入函数
#
# print("你输入的内容是:"+str)
# str = input("请输入:");
# # print ("你输入的内容是: "+str)
# print ("你输入的内容是:", str)
try:
fo = open("D:\\a.txt", "w+")
except:
print( "打开文件失败")
else:
print ("打开成功")
# fo.write("www.baidu.comd你好\n")
# fo.close()
# os.rename("d:\\a.txt","d:\\b.txt")
# os.mkdir("test")
# file=open("test","a+")
# open("test\c.txt","a+")
print(os.getcwd())
# os.removedirs("test")
a=com.qiwx.modle_test.sum(10)
print(a)
| [
"qiwx@jingzhengu.com"
] | qiwx@jingzhengu.com |
63539348bba3fde558235a97e5430cf24506d30e | 4ef0dd7433679b6098eb83f28a5d99c0c5f697ec | /firstdjango/firstdjango/urls.py | 6c396bbb5f221dabc286d68977e23a7a54e76ab8 | [] | no_license | chadeddington/django-intro | 6e8d6fa5c21f6531f14712a59314d3935f9c1055 | e7214720fe53cad0d3e2c36e6f6a9aaff2ca5a39 | refs/heads/master | 2021-01-10T13:28:47.967104 | 2016-01-10T05:23:11 | 2016-01-10T05:23:11 | 49,354,098 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 402 | py | from django.conf.urls import include, url
from django.contrib import admin
from inventory import views
urlpatterns = [
# Examples:
# url(r'^$', 'firstdjango.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^$', views.index, name='index'),
url(r'^item/(?P<id>\d+)/', views.item_detail, name='item_detail'),
url(r'^admin/', include(admin.site.urls)),
]
| [
"edd11001@byui.edu"
] | edd11001@byui.edu |
a647bbaa9c5647bcc76693b3373a5c1ca73ef960 | 45c59a36ee268507577faf01c32a1e67951e7ac4 | /keycontrol.py | f3b3c9db1c5e43c9c6c64bd5fa2a5371bcdaaae9 | [] | no_license | Neuralis-AI/4-as-RaspberryPi-Jetson-Robotarm | 3e551b963108a155ba987f6e6a7a82c7c0e52867 | 777fee532d447301fae58bcf9a3678fca8dc1492 | refs/heads/main | 2022-12-29T13:24:05.738962 | 2020-10-16T19:31:44 | 2020-10-16T19:31:44 | 303,527,479 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,462 | py | import keyboard
import time
import sys, termios, tty, os
import RPi.GPIO as GPIO
import pigpio
def getch():
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
grijperPIN = 17 # Board pin 11, BCM pin 17
bovenoPIN = 27 # Board pin 13, BCM pin 27
linksrPIN = 22 # Board pin 15, BCM pin 22
vooraPIN = 23 # Board pin 16, BCM pin 23
pwm = pigpio.pi()
pwm.set_mode(grijperPIN, pigpio.OUTPUT) # schakel GPIO pin op output (standaard input)
pwm.set_mode(bovenoPIN, pigpio.OUTPUT)
pwm.set_mode(linksrPIN, pigpio.OUTPUT)
pwm.set_mode(vooraPIN, pigpio.OUTPUT)
pwm.set_PWM_frequency( grijperPIN, 50 ) # grijper
pwm.set_PWM_frequency( bovenoPIN, 50 ) # boven/onder
pwm.set_PWM_frequency( linksrPIN, 50 ) # links/rechts
pwm.set_PWM_frequency( vooraPIN, 50 ) # voor/achter
pwm.set_servo_pulsewidth( grijperPIN, 500 ) ; # Zet grijper op positie 0 (dicht)
pwm.set_servo_pulsewidth( bovenoPIN, 500 ) ; # Zet boven/onder op positie 0 (helemaal boven)
pwm.set_servo_pulsewidth( linksrPIN, 500 ) ; # Zet links/rechts op positie 0 (helemaal links)
pwm.set_servo_pulsewidth( vooraPIN, 500 ) ; # Zet voor/achter op positie 0 (helemaal vooruit)
# In volgende variabelen worden de posities van elke servo bijgehouden
# Deze positionering zit logisch in elkaar: 500 is helemaal links, 1500 is het midden en 2500 is rechts
grijper = 500
bovenonder = 500
linksrechts = 500
voorachter = 500
try: # Probeer
while True: # Luister vanaf nu naar keyboard voor beweging
pwm.set_servo_pulsewidth( grijperPIN, grijper ) ; # Zet grijper op inhoud van variabele "grijper"
pwm.set_servo_pulsewidth( bovenoPIN, bovenonder ) ; # Zet boven/onder op op inhoud van variabele
pwm.set_servo_pulsewidth( linksrPIN, linksrechts ) ; # Zet links/rechts op inhoud van variabele
pwm.set_servo_pulsewidth( vooraPIN, voorachter ) ; # Zet voor/achter op inhoud van variabele
char = getch() # Met deze functie kijken we welke toets ingedrukt is
if (char == "a"): # Als "A" ingedrukt is
if (linksrechts < 2500): # En wanneer links/rechts lager is dan 2500 (het maximum)
linksrechts += 500 # Tel 500 op bij de vorige waarde van links/rechts (1 stap)
pwm.set_servo_pulsewidth( linksrPIN, linksrechts ) ; # Zet linksrechts op de nieuwe inhoud (+ 1 stap) en beweeg de arm hier naartoe
if (char == "d"): # Als "d" ingedrukt is
if (linksrechts > 500): # En wanneer links/rechts hoger is dan 500 (het minimum)
linksrechts -= 500 # trek 500 af van de vorige waarde van links/rechts (1 stap)
pwm.set_servo_pulsewidth( linksrPIN, linksrechts ) ; # Zet linksrechts op de nieuwe inhoud (- 1 stap) en beweeg de arm hier naartoe
if (char == "q"):
if (grijper < 2500):
grijper += 500
pwm.set_servo_pulsewidth( grijperPIN, grijper ) ; # Zet grijper op inhoud van variabele "grijper"
if (char == "e"):
if (grijper > 500):
grijper -= 500
pwm.set_servo_pulsewidth( grijperPIN, grijper ) ; # Zet grijper op inhoud van variabele "grijper"
if (char == "w"):
if (voorachter < 2500):
voorachter += 500
pwm.set_servo_pulsewidth( vooraPIN, voorachter ) ; # Zet vooraachter op inhoud van variabele "voorachter"
if (char == "s"):
if (voorachter > 500):
voorachter -= 500
pwm.set_servo_pulsewidth( vooraPIN, voorachter ) ; # Zet voorachter op inhoud van variabele "voorachter"
if (char == "r"):
if (bovenonder < 2500):
bovenonder += 500
pwm.set_servo_pulsewidth( bovenoPIN, bovenonder ) ; # Zet bovenonder op inhoud van variabele "bovenonder"
if (char == "f"):
if (bovenonder > 500):
bovenonder -= 500
pwm.set_servo_pulsewidth( bovenoPIN, bovenonder ) ; # Zet bovenonder op inhoud van variabele "bovenonder"
else:
if (char == "p"):
GPIO.cleanup() # Schakel alle GPIO pins uit
exit() # stop het script
except KeyboardInterrupt: # Bij het stoppen van het script (met CTRL+C)
GPIO.cleanup() # Herstel GPIO's naar standaardwaarden
| [
"noreply@github.com"
] | noreply@github.com |
e30511824f1a5a7e47e2e3e9cf66480227c6ff48 | d051f250e2997d4714a59f8a62459d5b5212711a | /viewfitz.py | b6efdf7a33b4256248488a739853f69fe2ca3bb7 | [] | no_license | AlexGKim/NearbySupernovaFactory | 7a45f59285d91f687c46b703b39ec6df3256cca1 | 5c7db9be397d85a26aaf9196f9e75d2f3aab8998 | refs/heads/master | 2021-01-23T21:43:55.620413 | 2018-10-22T15:27:04 | 2018-10-22T15:27:04 | 56,637,164 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,393 | py | #!/usr/bin/env python
import pickle
import pystan
import matplotlib.pyplot as plt
from matplotlib import rc
import corner
from matplotlib.backends.backend_pdf import PdfPages
import numpy
import sncosmo
import scipy
import cPickle
import matplotlib as mpl
import sivel
mpl.rcParams['font.size'] = 18
ext=''
f = open('temp18'+ext+'.pkl','rb')
(fit18,_) = pickle.load(f)
f = open('temp18'+ext+'_free.pkl','rb')
(fit18_free,_) = pickle.load(f)
f = open('temp18'+ext+'_salt2.pkl','rb')
(fit18_salt2,_) = pickle.load(f)
f = open('temp19'+ext+'.pkl','rb')
(fit19,_) = pickle.load(f)
f = open('temp20'+ext+'.pkl','rb')
(fit20,_) = pickle.load(f)
f = open('temp20'+ext+'_salt2.pkl','rb')
(fit20_salt2,_) = pickle.load(f)
f = open('temp20_ccm.pkl','rb')
(fit20_ccm,_) = pickle.load(f)
f = open('temp11'+ext+'.pkl','rb')
(fit11,_) = pickle.load(f)
plt.hist(fit18_salt2['AV'].flatten(),histtype='step', stacked='false', \
label=r'$R=const$ SALT2',bins=numpy.arange(-.25,1.8,0.1),normed=True)
plt.hist(fit18['AV'].flatten(),histtype='step', stacked='false', \
label=r'$R=const$ Hsiao',bins=numpy.arange(-.25,1.8,0.1),normed=True)
# plt.hist(fit18_free['AV'].flatten(),histtype='step', stacked='false', \
# label=r'$R=const$ free',bins=numpy.arange(-.25,1.8,0.1),normed=True)
# plt.hist(fit19['AV'].flatten(),histtype='step', stacked='false', \
# label=r'$\ln{R}\sim \mathcal{N}$',bins=numpy.arange(-.25,1.8,0.1),normed=True)
# plt.hist(fit20['AV'].flatten(),histtype='step', stacked='false', \
# label=r'$R$-free',bins=numpy.arange(-.25,1.8,0.1),normed=True)
# plt.hist(fit20_salt2['AV'].flatten(),histtype='step', stacked='false', \
# label=r'$R$-free S2',bins=numpy.arange(-.25,1.8,0.1),normed=True)
plt.xlabel(r'$A_V$')
plt.legend()
plt.tight_layout()
pp = PdfPages('output18'+ext+'/AVs_hist.pdf')
plt.savefig(pp,format='pdf')
pp.close()
plt.close()
plt.hist(numpy.median(fit18_salt2['AV'],axis=0),label=r'$R=const$ SALT2',histtype='step',bins=numpy.arange(-0.2,1.8,0.1),normed=True)
plt.hist(numpy.median(fit18['AV'],axis=0),label=r'$R=const$ Hsiao',histtype='step',bins=numpy.arange(-0.2,1.8,0.1),normed=True)
# plt.hist(numpy.median(fit19['AV'],axis=0),label=r'$\ln{R}\sim \mathcal{N}$',histtype='step',bins=numpy.arange(-0.2,1.8,0.1),normed=True)
# plt.hist(numpy.median(fit20['AV'],axis=0),label=r'$R$-free',histtype='step',bins=numpy.arange(-0.2,1.8,0.1),normed=True)
# plt.hist(numpy.median(fit20_salt2['AV'],axis=0),label=r'$R$-free S2',histtype='step',bins=numpy.arange(-0.2,1.8,0.1),normed=True)
# plt.hist(numpy.median(fit20_ccm['AV'],axis=0),label=r'$R$-free CCM',histtype='step',bins=numpy.arange(-0.2,1.8,0.1))
plt.hist(numpy.median(fit11['gamma'][:,2][:,None]*fit11['k'],axis=0),label=r'$\gamma_0 k_0$',histtype='step',bins=numpy.arange(-0.2,1.8,0.1),normed=True)
plt.xlabel(r'$A_V$')
plt.legend()
plt.tight_layout()
pp = PdfPages('output18'+ext+'/AVs_mode_hist.pdf')
plt.savefig(pp,format='pdf')
pp.close()
plt.close()
# plt.hist(numpy.median(fit20_ccm['AV']-fit20['AV'],axis=0),histtype='step')
# # plt.hist(numpy.median(fit20_ccm['AV'],axis=0),label=r'$R$-free CCM',histtype='step',bins=numpy.arange(-0.2,1.8,0.1))
# plt.xlabel(r'$A_V(CCM) - A_V(F99)$')
# pp = PdfPages('output18'+ext+'/AVs_mode_hist_comp.pdf')
# plt.savefig(pp,format='pdf')
# pp.close()
# plt.close()
(ymin, ymax) = numpy.percentile(fit18['AV'],(50-34,50+34),axis=0)
f18perc=(ymax-ymin)/2
(ymin, ymax) = numpy.percentile(fit19['AV'],(50-34,50+34),axis=0)
f19perc=(ymax-ymin)/2
(ymin, ymax) = numpy.percentile(fit20['AV'],(50-34,50+34),axis=0)
f20perc=(ymax-ymin)/2
(ymin, ymax) = numpy.percentile(fit20_ccm['AV'],(50-34,50+34),axis=0)
f20_ccmperc=(ymax-ymin)/2
(ymin, ymax) = numpy.percentile(fit11['gamma'][:,2][:,None]*fit11['k'],(50-34,50+34),axis=0)
f11perc=(ymax-ymin)/2
print "{:6.2f} {:6.2f} {:6.2f} {:6.2f}".format(f18perc.mean(),f19perc.mean(),f20perc.mean(),f20_ccmperc.mean(),f11perc.mean())
n=10000
choice = numpy.random.randint(0,len(fit19['lnRV_mn']),size=n)
raw = numpy.random.normal(0,1,size=n)
fit19RV = numpy.exp(fit19['lnRV_mn'][choice] + raw*fit19['lnRV_sig'][choice])
shit=numpy.percentile(1./fit18_salt2['RVinv'],(50,50-34,50+34))
print shit[0],shit[0]-shit[1],shit[2]-shit[0]
shit= numpy.percentile(1./fit18['RVinv'],(50,50-34,50+34))
print shit[0],shit[0]-shit[1],shit[2]-shit[0]
plt.hist(1./fit18_salt2['RVinv'].flatten(),histtype='step', stacked='false', \
label=r'$R=const$ SALT2',bins=numpy.arange(2,4.2,0.05),normed=True)
plt.hist(1./fit18['RVinv'].flatten(),histtype='step', stacked='false', \
label=r'$R=const$ Hsiao',bins=numpy.arange(2,4.2,0.05),normed=True)
# plt.hist(1./fit18_free['RVinv'].flatten(),histtype='step', stacked='false', \
# label=r'$R=const$ free',bins=numpy.arange(0,8.2,0.1),normed=True)
# plt.hist(fit19RV,histtype='step', stacked='false', \
# label=r'$\ln{R}\sim \mathcal{N}$',bins=numpy.arange(0,8.2,0.1),normed=True)
# plt.hist(numpy.median(fit20['RV'],axis=0),histtype='step', stacked='false', \
# label=r'$R$-free',bins=numpy.arange(0,8.2,0.25),normed=True)
# plt.hist(numpy.median(fit20_salt2['RV'],axis=0),histtype='step', stacked='false', \
# label=r'$R$-free S2',bins=numpy.arange(0,8.2,0.25),normed=True)
# plt.hist(numpy.median(fit20_ccm['RV'],axis=0),histtype='step', stacked='false', \
# label=r'$R$-free CCM',bins=numpy.arange(0,8.2,0.1),normed=True)
plt.xlabel(r'$R_V$')
plt.legend()
plt.tight_layout()
pp = PdfPages('output18'+ext+'/RVs_hist.pdf')
plt.savefig(pp,format='pdf')
pp.close()
plt.close()
# plt.hist(numpy.median(fit20_ccm['RV']-fit20['RV'],axis=0),histtype='step', stacked='false',normed=True,color='red')
# plt.xlabel(r'$R_V(CCM)-R_V(F99)$')
# pp = PdfPages('output18'+ext+'/RVs_hist_comp.pdf')
# plt.savefig(pp,format='pdf')
# pp.close()
# plt.close()
# plt.hist(numpy.median(fit20_ccm['AV']-fit20['AV'],axis=0),histtype='step')
# # plt.hist(numpy.median(fit20_ccm['AV'],axis=0),label=r'$R$-free CCM',histtype='step',bins=numpy.arange(-0.2,1.8,0.1))
# plt.xlabel(r'$A_V(CCM) - A_V(F99)$')
# pp = PdfPages('output18'+ext+'/AVs_mode_hist_comp.pdf')
# plt.savefig(pp,format='pdf')
# pp.close()
# plt.close()
(y, ymin, ymax) = numpy.percentile(fit20['RV'],(50,50-34,50+34),axis=0)
(x, xmin, xmax) = numpy.percentile(fit20['AV'],(50,50-34,50+34),axis=0)
plt.errorbar(x, y, xerr=[x-xmin,xmax-xmin],yerr=[y-ymin,ymax-y],fmt='.',alpha=0.15,color='b')
plt.scatter(x, y,marker='o',alpha=0.3,s=4,c='b')
# plt.xlim((-0.5,0.75))
# plt.ylim((-4,6))
plt.xlabel(r'$A_V^F$')
plt.ylabel(r'$R_V^F$')
plt.tight_layout()
pp = PdfPages('output18'+ext+'/AVRV.pdf')
plt.savefig(pp,format='pdf')
pp.close()
plt.close()
# (y, ymin, ymax) = numpy.percentile(fit20_ccm['RV']-fit20['RV'],(50,50-34,50+34),axis=0)
# (x, xmin, xmax) = numpy.percentile(fit20_ccm['AV']-fit20['AV'],(50,50-34,50+34),axis=0)
# dx = (xmax-xmin)/2
# print numpy.sum(x/dx**2)/numpy.sum(1/dx**2)
# print numpy.sqrt(1/numpy.sum(1/dx**2))
# dy = (ymax-ymin)/2
# print numpy.sum(y/dy**2)/numpy.sum(1/dy**2)
# print numpy.sqrt(1/numpy.sum(1/dy**2))
# plt.errorbar(x, y, xerr=[x-xmin,xmax-xmin],yerr=[y-ymin,ymax-ymin],fmt='.',alpha=0.15,color='b')
# plt.scatter(x, y,marker='o',alpha=0.3,s=4,c='b')
# plt.xlim((-0.5,0.75))
# plt.ylim((-4,6))
# plt.xlabel(r'$A_V^C - A_V^F$')
# plt.ylabel(r'$R_V^C-R_V^F$')
# pp = PdfPages('output18'+ext+'/CCMF99.pdf')
# plt.savefig(pp,format='pdf')
# pp.close()
# plt.close() | [
"akim@251.51.243.131.in-addr.dhcp.lbl.gov"
] | akim@251.51.243.131.in-addr.dhcp.lbl.gov |
f4083d9caacac46d82b5af00e2b6de6a7e873ede | 8182619b9ad4fb1ed0a6cac43eb7e7732edf2dd8 | /code/seperate_SQuAD.py | bbcd271b2b8d7ac9a59b6e1a5448e979bd374c9c | [] | no_license | asd36952/baseword | 24769856dc910654ebef2db142e66415b313561f | dd0350015bcd47e42de2d8d7c87088247726b771 | refs/heads/master | 2020-03-19T06:05:49.255390 | 2018-09-20T12:30:07 | 2018-09-20T12:30:07 | 135,990,196 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,110 | py | import spacy
import json
from stanfordcorenlp import StanfordCoreNLP
import os
import copy
# open StanfordCoreNLP Server by
# java -Djava.io.tmpdir=/home/asd36952/tmp -mx4g -cp "*" edu.stanford.nlp.pipeline.StanfordCoreNLPServer -port 9001 -timeout 15000
nlp = StanfordCoreNLP('http://localhost', port=9001)
with open("../data/SQuAD/train-v2.0.json") as f:
data = json.load(f)
new_data = {'version':'v2.0_sentence_combine', 'data':copy.deepcopy(data['data'])}
#new_data = {'version':'v2.0_sentence', 'data':[]}
for cnt, elem in enumerate(data['data']):
# if cnt > 3:
# break
tmp_data = dict()
tmp_data['title'] = elem['title']
tmp_data['paragraphs'] = []
for p in elem['paragraphs']:
annotated = nlp.annotate(p['context'], {'annotators':'ssplit', 'outputFormat':'text'})
sent_list = []
offset_list = []
next_flag = False
new_flag = True
sent = ""
for line in annotated.split("\n"):
#print(line)
if next_flag is False:
if (line.startswith("Sentence #")) & (line.endswith("tokens):")):
next_flag = True
else:
offset = line.split(" ")
if len(offset) > 2:
if new_flag is True:
#print(offset)
begin_offset = offset[-2]
begin_offset = int(begin_offset[len("CharacterOffsetBegin="):])
new_flag = False
else:
end_offset = offset[-1]
end_offset = int(end_offset[len("CharacterOffsetEnd="):-1])
else:
if len(sent_list) > len(offset_list):
offset_list.append([begin_offset, end_offset])
sent += line
if line == "":
sent_list.append(sent)
next_flag = False
new_flag = True
sent = ""
offset_list.append([begin_offset, end_offset])
for i in range(len(sent_list)):
tmp_paragraph = {'context':sent_list[i]}
tmp_qas = []
for qa in p['qas']:
find_candidate = False
tmp_candidate = []
if qa['is_impossible'] is True:
#if 'plausible_answers' not in qa:
# continue
answer_list = qa['plausible_answers']
else:
answer_list = qa['answers']
for ans in answer_list:
if (ans['answer_start'] >= offset_list[i][0]) & (ans['answer_start'] <= offset_list[i][1]):
if qa['is_impossible'] is False:
if {'text':ans['text'], 'answer_start':(ans['answer_start'] - offset_list[i][0])} not in tmp_candidate:
tmp_candidate.append({'text':ans['text'], 'answer_start':(ans['answer_start'] - offset_list[i][0])})
find_candidate = True
if (find_candidate is True):
if qa['is_impossible'] is True:
tmp_qas.append({'is_impossible':qa['is_impossible'],
'answers':[],
'plausible_answers':tmp_candidate,
'id':qa['id'],
'question':qa['question']})
else:
tmp_qas.append({'is_impossible':qa['is_impossible'],
'answers':tmp_candidate,
'id':qa['id'],
'question':qa['question']})
if tmp_qas != []:
tmp_paragraph['qas'] = tmp_qas
tmp_data['paragraphs'].append(tmp_paragraph)
if tmp_data['paragraphs'] != []:
new_data['data'].append(tmp_data)
nlp.close()
with open("../data/SQuAD/train-v2.0_sentence_combine.json", "w") as f:
json.dump(new_data, f)
| [
"asd36952@gmail.com"
] | asd36952@gmail.com |
ca5d6b2879751e7673fa68b88318d020bd236dcd | 97be3873520588917e25c716fa9f5a1ae560d3a8 | /DAMIAN_COLOMA/rango3.py | 3d693fb7d628e3e61f0db6a3cd34eba2aa39dcc9 | [] | no_license | Kiaradamiancoloma/T07_DAMIAN-MENDOZA | e8c6fd661d3af23181f0dcd7a5aed339d281d196 | e3b949c88da8ae6d124fb0307aebf6d2be7e11a1 | refs/heads/master | 2020-11-25T03:48:32.384142 | 2019-12-17T03:38:29 | 2019-12-17T03:38:29 | 228,488,409 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 207 | py | #programa que pida una sentencia al usuario
#la muestra 10 veces en pantalla
#Declaracion
sentencia=""
#input
sentencia=input("introduce una sentencia:")
for i in range(10):
print(sentencia)
#fin_for
| [
"kdamianc@unprg.edu.pe"
] | kdamianc@unprg.edu.pe |
4536077050ca081f63e917e475246ef43ecdfeb1 | d20261590a01d7de3964b00eb11747fcc6e92230 | /train.py | 8d14dd4d124a5eaf087663b99844bcec31b1c392 | [
"MIT"
] | permissive | lcpizzo/RE-BERT | 58573fe28797f674cc59b820293da294d263eabc | 45da3ee9094e317e353ab871b110dc378724e940 | refs/heads/main | 2023-05-06T08:56:36.947516 | 2021-06-02T20:40:12 | 2021-06-02T20:40:12 | 338,139,457 | 0 | 0 | NOASSERTION | 2021-02-11T20:06:51 | 2021-02-11T20:06:51 | null | UTF-8 | Python | false | false | 9,593 | py | # RE-BERT training model
# Adapted from LCF-BERT (ABSA-Pytorch)
import logging
import argparse
import math
import os
import sys
import random
import numpy
from sklearn import metrics
from time import strftime, localtime
from transformers import BertModel
import torch
import torch.nn as nn
from torch.utils.data import DataLoader, random_split
from data_utils import build_tokenizer, build_embedding_matrix, Tokenizer4Bert, IOBDataset
from models.re_bert import RE_BERT
from models.lcf_bert import LCF_BERT
logger = logging.getLogger()
logger.setLevel(logging.INFO)
logger.addHandler(logging.StreamHandler(sys.stdout))
class Instructor:
def __init__(self, opt):
self.opt = opt
tokenizer = Tokenizer4Bert(opt.max_seq_len, opt.pretrained_bert_name)
bert = BertModel.from_pretrained(opt.pretrained_bert_name)
self.model = opt.model_class(bert, opt).to(opt.device)
self.trainset = IOBDataset(opt.dataset_file['train'], tokenizer)
if opt.device.type == 'cuda':
logger.info('cuda memory allocated: {}'.format(torch.cuda.memory_allocated(device=opt.device.index)))
self._print_args()
def _print_args(self):
n_trainable_params, n_nontrainable_params = 0, 0
for p in self.model.parameters():
n_params = torch.prod(torch.tensor(p.shape))
if p.requires_grad:
n_trainable_params += n_params
else:
n_nontrainable_params += n_params
logger.info('> n_trainable_params: {0}, n_nontrainable_params: {1}'.format(n_trainable_params, n_nontrainable_params))
logger.info('> training arguments:')
for arg in vars(self.opt):
logger.info('>>> {0}: {1}'.format(arg, getattr(self.opt, arg)))
def _reset_params(self):
for child in self.model.children():
if type(child) != BertModel: # skip bert params
for p in child.parameters():
if p.requires_grad:
if len(p.shape) > 1:
self.opt.initializer(p)
else:
stdv = 1. / math.sqrt(p.shape[0])
torch.nn.init.uniform_(p, a=-stdv, b=stdv)
def _train(self, criterion, optimizer, train_data_loader, val_data_loader):
max_val_acc = 0
max_val_f1 = 0
max_val_epoch = 0
global_step = 0
path = None
for i_epoch in range(self.opt.num_epoch):
logger.info('>' * 100)
logger.info('epoch: {}'.format(i_epoch+1))
n_correct, n_total, loss_total = 0, 0, 0
# switch model to training mode
self.model.train()
for i_batch, batch in enumerate(train_data_loader):
global_step += 1
# clear gradient accumulators
optimizer.zero_grad()
inputs = [batch[col].to(self.opt.device) for col in self.opt.inputs_cols]
outputs = self.model(inputs)
targets = batch['polarity'].to(self.opt.device)
loss = criterion(outputs, targets)
loss.backward()
optimizer.step()
n_correct += (torch.argmax(outputs, -1) == targets).sum().item()
n_total += len(outputs)
loss_total += loss.item() * len(outputs)
if global_step % self.opt.log_step == 0:
train_acc = n_correct / n_total
train_loss = loss_total / n_total
logger.info('loss: {:.4f}'.format(train_loss))
if not os.path.exists('trained_models'):
os.mkdir('trained_models')
path_name = 'trained_models/{0}_{1}_iob_epoch_'+str(i_epoch+1)+'.model'
path = path_name.format(self.opt.model_name, self.opt.dataset)
torch.save(self.model.state_dict(), path)
logger.info('>> saved: {}'.format(path))
if i_epoch - max_val_epoch >= self.opt.patience:
print('>> early stop.')
break
return path
def _evaluate_acc_f1(self, data_loader):
n_correct, n_total = 0, 0
t_targets_all, t_outputs_all = None, None
# switch model to evaluation mode
self.model.eval()
with torch.no_grad():
for i_batch, t_batch in enumerate(data_loader):
t_inputs = [t_batch[col].to(self.opt.device) for col in self.opt.inputs_cols]
t_targets = t_batch['polarity'].to(self.opt.device)
t_outputs = self.model(t_inputs)
n_correct += (torch.argmax(t_outputs, -1) == t_targets).sum().item()
n_total += len(t_outputs)
if t_targets_all is None:
t_targets_all = t_targets
t_outputs_all = t_outputs
else:
t_targets_all = torch.cat((t_targets_all, t_targets), dim=0)
t_outputs_all = torch.cat((t_outputs_all, t_outputs), dim=0)
acc = n_correct / n_total
f1 = metrics.f1_score(t_targets_all.cpu(), torch.argmax(t_outputs_all, -1).cpu(), labels=[0, 1, 2], average='macro')
return acc, f1
def run(self):
# Loss and Optimizer
criterion = nn.CrossEntropyLoss()
_params = filter(lambda p: p.requires_grad, self.model.parameters())
optimizer = self.opt.optimizer(_params, lr=self.opt.lr, weight_decay=self.opt.l2reg)
train_data_loader = DataLoader(dataset=self.trainset, batch_size=self.opt.batch_size, shuffle=True)
self._reset_params()
best_model_path = self._train(criterion, optimizer, train_data_loader, train_data_loader)
def main():
# Hyper Parameters
parser = argparse.ArgumentParser()
parser.add_argument('--model_name', default='RE_BERT', type=str)
parser.add_argument('--dataset', default='evernote', type=str, help='app_name')
parser.add_argument('--train_file', type=str)
parser.add_argument('--optimizer', default='adam', type=str)
parser.add_argument('--initializer', default='xavier_uniform_', type=str)
parser.add_argument('--lr', default=2e-5, type=float, help='try 5e-5, 2e-5 for BERT, 1e-3 for others')
parser.add_argument('--dropout', default=0.1, type=float)
parser.add_argument('--l2reg', default=0.01, type=float)
parser.add_argument('--num_epoch', default=1, type=int, help='try larger number for non-BERT models')
parser.add_argument('--batch_size', default=16, type=int, help='try 16, 32, 64 for BERT models')
parser.add_argument('--log_step', default=10, type=int)
parser.add_argument('--bert_dim', default=768, type=int)
parser.add_argument('--pretrained_bert_name', default='bert-base-uncased', type=str)
parser.add_argument('--max_seq_len', default=80, type=int)
parser.add_argument('--polarities_dim', default=3, type=int)
parser.add_argument('--hops', default=3, type=int)
parser.add_argument('--patience', default=5, type=int)
parser.add_argument('--device', default=None, type=str, help='e.g. cuda:0')
parser.add_argument('--seed', default=None, type=int, help='set seed for reproducibility')
parser.add_argument('--valset_ratio', default=0, type=float, help='set ratio between 0 and 1 for validation support')
# The following parameters are only valid for the lcf-bert model
parser.add_argument('--local_context_focus', default='cdm', type=str, help='local context focus mode, cdw or cdm')
parser.add_argument('--alpha', default=3, type=int, help='relative distance (LOCAL CONTEXT)')
opt = parser.parse_args()
opt.SRD = opt.alpha
if opt.seed is not None:
random.seed(opt.seed)
numpy.random.seed(opt.seed)
torch.manual_seed(opt.seed)
torch.cuda.manual_seed(opt.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
os.environ['PYTHONHASHSEED'] = str(opt.seed)
model_classes = {
'RE_BERT': RE_BERT,
}
dataset_files = {}
dataset_files[opt.dataset] = {}
dataset_files[opt.dataset]['train'] = opt.train_file
input_colses = {
'RE_BERT': ['concat_bert_indices', 'concat_segments_indices', 'text_bert_indices', 'aspect_bert_indices'],
}
initializers = {
'xavier_uniform_': torch.nn.init.xavier_uniform_,
'xavier_normal_': torch.nn.init.xavier_normal_,
'orthogonal_': torch.nn.init.orthogonal_,
}
optimizers = {
'adadelta': torch.optim.Adadelta, # default lr=1.0
'adagrad': torch.optim.Adagrad, # default lr=0.01
'adam': torch.optim.Adam, # default lr=0.001
'adamax': torch.optim.Adamax, # default lr=0.002
'asgd': torch.optim.ASGD, # default lr=0.01
'rmsprop': torch.optim.RMSprop, # default lr=0.01
'sgd': torch.optim.SGD,
}
opt.model_class = model_classes[opt.model_name]
opt.dataset_file = dataset_files[opt.dataset]
opt.inputs_cols = input_colses[opt.model_name]
opt.initializer = initializers[opt.initializer]
opt.optimizer = optimizers[opt.optimizer]
opt.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') \
if opt.device is None else torch.device(opt.device)
log_file = '{}-{}-{}.log'.format(opt.model_name, opt.dataset, strftime("%y%m%d-%H%M", localtime()))
logger.addHandler(logging.FileHandler(log_file))
ins = Instructor(opt)
ins.run()
if __name__ == '__main__':
main()
| [
"ricardo.marcacini@gmail.com"
] | ricardo.marcacini@gmail.com |
822c231cf8d32a36c2a9f6df488f6181ea21b234 | f4b43fe66d74ce5c37d61c446301241f7bf6394d | /src/com/kaiyouhu/pygmo/icq/icq_algo1.py | 68e6fba7db0a6375a141938a8193e0b13947bc8b | [
"MIT"
] | permissive | KaiyouHu/Evolutionary | 6cb6042db9b670c3a5a35e9a7661dc5be6997e90 | 2dd8e3e1d093779261c8d0d7071e3c69afa7f239 | refs/heads/master | 2020-04-17T17:55:29.121825 | 2019-04-14T14:01:38 | 2019-04-14T14:01:38 | 166,804,620 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,875 | py | # !/usr/bin/env python
# -*- coding:utf-8 -*-
# Author: kaiyouhu@gmail.com
# Algorithm 1 shows the evolutionary algorithm
# used for optimizing diversity in 2018-Evolutionary
# Diversity Optimization Using Multi-Objective Indicators.
import math
import pygmo as pg
import numpy as np
from matplotlib.patches import Circle
import matplotlib.pyplot as plt
class Point(object):
def __init__(self, x, y):
self.x = x
self.y = y
def point(self):
return [self.x, self.y]
def __str__(self):
return '(' + str(self.x) + ', ' + str(self.y) + ')'
def enpoint(pointlist):
x = pointlist[0]
y = pointlist[1]
point = Point(x, y)
return point
def cirpoint(point_x):
angle = point_x / (60 * 180) * np.pi
x = 1 - np.cos(angle)
y = 1 - np.sin(angle)
point = Point(x, y)
return point
# produce a mutation point
def mutationpoint(random):
angle = random[0] / (60 * 180) * np.pi
x = 1 - np.cos(angle)
y = 1 - np.sin(angle)
return Point(x, y)
# compute the max hypervolume point
# select the least loss point to replace
def listmax(pList, hypvalue, random):
i = 1
j = 0
pList = pList
hypermax = hypvalue[0]
while i < len(hypvalue):
if hypermax < hypvalue[i]:
hypermax = hypvalue[i]
j = i
else:
j = j
i = i + 1
if j == 0:
pList = pList
else:
pList[j - 1] = cirpoint(random[0]).point()
pList = pList
return pList
# def point draw
def drawpoint(plt, point):
plt.plot(point.x, point.y, 'bs')
pass
# def point draw
# def circle draw
def drawcircle(plt):
# define circle
x = np.linspace(0, 1, 50)
y = 1 - np.sqrt(1 - (x - 1) * (x - 1))
plt.plot(x, y, label='circle')
plt.xlabel('X', fontsize=14)
plt.ylabel('Y', fontsize=14)
# define circle
#def circle point draw
def drawcirclepoint(plt, pList):
# define circle point
for point in pList:
# angle = point_x / (60 * 180) * np.pi
x = point[0]
y = point[1]
# print('x = %f and y = %f'%(x, y))
plt.plot(x, y, 'g^')
# define circle point
pass
# pop hypervolumn compute
def hypsinglecompute(pList, ref_point):
hv = pg.hypervolume(pList)
hypvalue = hv.compute(ref_point)
return hypvalue
# pop + 1 hypervolumn compute
def hypcompute(pList, random, ref_point):
pList0 = pList
lList = []
lList.append(pList0)
for i in range(len(pList)):
temp = pList.copy()
temp[i] = cirpoint(random[0]).point()
lList.append(temp)
# print(lList)
hypvalue = []
for j in range(len(lList)):
hypvalue.append(pg.hypervolume(lList[j]).compute(ref_point.point()))
# print(hypvalue)
return lList, hypvalue
def algosimple():
# define problem
# excute the farthest point (1, 1) to (x-1)^2 + (y-1)^2 = 1
# define refer point
ref_point = Point(1, 1)
# random point from radius and init group
randoms = np.random.randint(0, 90 * 60, 8)
pList = []
for point_x in randoms:
point = cirpoint(point_x).point()
# print(point)
pList.append(point)
i = 1
k = 1
plt.rcParams.update({'figure.max_open_warning': 0})
while i < 2000:
flag = 0
if math.log(i, 5).is_integer():
flag = 1
else:
flag = 0
if flag:
plt.figure(figsize=(6, 18), dpi=80)
plt.figure(k)
k = k + 1
# i-311
plt.subplot(311)
plt.title('Cycle-' + str(i) + '-1', fontsize=14)
drawpoint(plt, ref_point)
drawcircle(plt)
drawcirclepoint(plt, pList=pList)
# hypervolume compute
hypsinglecompute(pList=pList, ref_point=ref_point.point())
# hypervolumn compute
if flag:
# i - 312
plt.subplot(312)
plt.title('Cycle-' + str(i) + '-2', fontsize=14)
drawcircle(plt)
drawcirclepoint(plt, pList)
drawpoint(plt, ref_point)
# mutation point
random = np.random.randint(0, 90 * 60, 1)
mutatpoint = mutationpoint(random)
if flag:
plt.plot(mutatpoint.x, mutatpoint.y, 'bs')
# pop + 1 hypervolumn compute
lList, hypvalue = hypcompute(pList, random, ref_point=ref_point)
# pop + 1 hypervolumn compute
# loop until hyper nearly not change
pList = listmax(pList, hypvalue, random)
# i - 313
if flag:
plt.subplot(313)
plt.title('Cycle-' + str(i) + '-3', fontsize=14)
drawcircle(plt)
drawcirclepoint(plt, pList)
drawpoint(plt, ref_point)
i = i + 1
# pop mutation
# plt.tight_layout()
# loop until hyper nearly not change
# change each children graph distance
plt.show()
pass
def algocon():
pass
def drawcir1():
x = np.linspace(0, 1, 50)
y = np.sqrt(1 - x * x)
plt.plot(x, y, label='circle')
plt.xlabel('X', fontsize=14)
plt.ylabel('Y', fontsize=14)
plt.title('x^2+y^2=1', fontsize=14)
plt.show()
def drawcir2():
fig = plt.figure()
ax = fig.add_subplot(111)
cir1 = Circle(xy=(0.0, 0.0), radius=1, alpha=0.5)
ax.add_patch(cir1)
x, y = 0, 0
ax.plot(x, y, 'ro')
plt.axis('scaled')
plt.axis('equal')
plt.show()
def drawcir3():
plt.figure(figsize=(8, 8), dpi=80)
plt.figure(1)
plt.subplot(211)
plt.plot(1, 1, 'bs')
plt.subplot(212)
plt.plot(1, 1, 'bs')
plt.tight_layout()
plt.figure(2)
plt.subplot(221)
plt.plot(1, 1, 'bs')
plt.subplot(224)
plt.plot(1, 1, 'bs')
plt.tight_layout()
plt.show()
pass
if __name__ == "__main__":
# drawcir3()
algosimple()
pass
| [
"kaiyouhu@gmail.com"
] | kaiyouhu@gmail.com |
a55e6dd8f6599beefbce85b4bc9676cbcb715e9d | cdf724225ac99ab92f54734d412ffea56e184579 | /sum3.py | d6dd47f5c0c99eee9c2c585011b71364e657e67c | [] | no_license | BillBrazerZhang/LeetMySolutions | 994d9a66c8dc4caa219a7fdd201b660c3d0312f4 | 3ed257c31f5eca3da5e28f684c8d69ce363ae4c6 | refs/heads/master | 2020-03-28T11:04:47.115194 | 2018-09-11T17:27:46 | 2018-09-11T17:27:46 | 148,175,468 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 796 | py | class Solution(object):
def threeSum(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
res = []
nums.sort()
for i in range(len(nums)-2):
if nums[i] > 0:
break
if i > 0 and nums[i] == nums[i-1]:
continue
l, r = i+1, len(nums)-1
while l < r:
s = nums[i] + nums[l] + nums[r]
if s > 0:
r -= 1
elif s < 0:
l += 1
else:
res.append([nums[i],nums[l],nums[r]])
while nums[l+1] == nums[l]:
l += 1
while nums[r-1] == nums[r]:
r -= 1
l += 1
r -= 1
return res
a = Solution()
out = a.threeSum([-1,-2,0,4,2,1,-1])
print(out)
| [
"931053259@qq.com"
] | 931053259@qq.com |
c465884d567497e9ae951e45904530afaca452a4 | 7d58a9c73835bd7206011a6a098abdfdc1c6290f | /streamers/apps.py | d9be14d1aa9f537b99d9c82fb6504a23482befd0 | [] | no_license | perna/live-coding-calendar | 0a15139b6062aa8cd3fc3bc3e5732eb9be7f9a29 | 3cefee921123ea36fafbee927fde5951416c4701 | refs/heads/master | 2022-09-17T07:56:21.653471 | 2020-05-19T01:49:46 | 2020-05-19T01:49:46 | 265,101,497 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 93 | py | from django.apps import AppConfig
class StreamersConfig(AppConfig):
name = 'streamers'
| [
"andersonmeira@outlook.com"
] | andersonmeira@outlook.com |
38115bbbde5eccbb9da1ee4b815a45adc9218485 | a847c161a8fb523c0500a32ea07957a59a3d26a7 | /venv/Scripts/easy_install-script.py | 2affe41652cbd3558bfbd7d6635a87d99ed43b2a | [] | no_license | mrmm2703/spotify-data-analyser | ac3bd4b1eeb8c81a949d7c595af1d78fd3a9a575 | 5e87b7270c68b5b45e760705e5bbcdf950e29552 | refs/heads/master | 2022-11-11T13:47:37.406199 | 2020-07-04T22:36:50 | 2020-07-04T22:36:50 | 277,195,332 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 462 | py | #!C:\Users\mrahm\PycharmProjects\SpotifyDataAnalyser\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install')()
)
| [
"35401592+mrmm2703@users.noreply.github.com"
] | 35401592+mrmm2703@users.noreply.github.com |
90a57dd681903b99a7c918852839cb708b7a7b9d | ff98245b7b504f3dc2212f9b34d169a36fbd463e | /notify/utils.py | c7e7d05c0537a0c3d35b9f6c91009a2c3616b6ec | [] | no_license | OpenJUB/roomer | 909748193004a21995f8c4650f3192f2cd65700c | 2d60ec8f5aeb47db47fd3ed9105b0543aa599be7 | refs/heads/master | 2023-02-05T17:29:23.342657 | 2018-05-06T07:44:16 | 2018-05-06T07:44:16 | 58,051,191 | 1 | 1 | null | 2023-02-02T02:48:31 | 2016-05-04T12:43:46 | Python | UTF-8 | Python | false | false | 1,414 | py | # What do we want?
# Function to mail one user
# Function to mail a set of users
# Basic mail template
# Reply-To header
from django.core.mail import send_mail
from django.template.loader import render_to_string
from django.conf import settings
class Notification(object):
sender_address = "USG Housing <housing@ju-u.sg>"
mail_title = "Empty"
txt_template = "notify/base.txt"
html_template = "notify/base.html"
def __init__(self, user):
self.user = user
def get_context(self):
return {}
def send(self):
full_context = self.get_context()
full_context['domain'] = settings.EMAIL_DOMAIN
full_context['user'] = self.user
msg_plain = render_to_string(self.txt_template, full_context)
msg_html = render_to_string(self.html_template, full_context)
send_mail(
"USG Housing: " + self.mail_title,
msg_plain,
self.sender_address,
[self.user.email],
html_message=msg_html,
)
class InboxNotification(Notification):
mail_title = "New roommate request"
txt_template = "notify/inbox.txt"
html_template = "notify/inbox.html"
def __init__(self, room_request):
super(InboxNotification, self).__init__(room_request.receiver)
self.room_request = room_request
def get_context(self):
return {'request': self.room_request} | [
"x32000@gmail.com"
] | x32000@gmail.com |
6a405e8f55909b6ed9222b949bef9230edd24b17 | abfa0fcab2bc9a9c3cccbc3a8142cdd4b2a66ee9 | /698-Partition to K Equal Sum Subsets.py | 8aceeaa11fdcd8709c3a984236173baf0a4fbd70 | [] | no_license | JinnieJJ/leetcode | 20e8ccf3f8919028c53e0f0db86bcc2fbc7b6272 | 26c6ee936cdc1914dc3598c5dc74df64fa7960a1 | refs/heads/master | 2021-04-15T09:18:08.450426 | 2021-03-06T01:53:27 | 2021-03-06T01:53:27 | 126,275,814 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 670 | py | class Solution:
def canPartitionKSubsets(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: bool
"""
sums = [0] * k
subsum = sum(nums) / k
nums.sort(reverse=True)
l = len(nums)
def walk(i):
if i == l:
return len(set(sums)) == 1
for j in range(k):
sums[j] += nums[i]
if sums[j] <= subsum and walk(i+1):
return True
sums[j] -= nums[i]
if sums[j] == 0:
break
return False
return walk(0)
| [
"noreply@github.com"
] | noreply@github.com |
037c8b61d186d7d441688db7e43383624627ba9d | 07b67e1268a2bad53c8fdb98e55c5271d0b65f5c | /maze.py | f4590c0c9f5d43f5d6fe96e8f083862e19f52a5e | [] | no_license | puszolek/pythonCD | 93bcd506126694308268d4348a021af56c95671e | 51bab35c115bf5c3e6219a54bb2e78f5f35a477d | refs/heads/master | 2020-04-29T07:38:43.916853 | 2019-06-01T11:43:58 | 2019-06-01T11:43:58 | 175,960,427 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 646 | py | import pgzrun
TITLE = "Maze runner"
WIDTH = 600
HEIGHT = 800
RUCH = 5
CZERWONY = (255, 0, 0)
alien = Actor('alien', anchor=('center', 'center'))
gorna_sciana = Rect((0,0), (WIDTH, 30))
dolna_sciana = Rect((0,HEIGHT-30), (WIDTH, HEIGHT))
def draw():
screen.fill('black')
alien.draw()
screen.draw.filled_rect(gorna_sciana, CZERWONY)
screen.draw.filled_rect(dolna_sciana, CZERWONY)
def update():
if keyboard.left:
alien.x -= RUCH
elif keyboard.right:
alien.x += RUCH
elif keyboard.up:
alien.y -= RUCH
elif keyboard.down:
alien.y += RUCH
pgzrun.go()
| [
"noreply@github.com"
] | noreply@github.com |
278d03131baf2cb82d60e4823f4f5a6a1b57b993 | 6cc266e5b179ad84aaad3d1afc804b5e6f71738b | /src/python/aetherinstinct/utils/args.py | 929b3ce9618ae9dc96c890402b1d1253d7f3e44a | [
"MIT"
] | permissive | roadnarrows-robotics/aetherinstinct | 9a369f10724e506561e0b2b45b772819e260f37f | 29ddde4878c2fa6dac902145cb8ef9da4d61b5c6 | refs/heads/master | 2023-08-23T02:48:06.188463 | 2021-10-27T19:24:15 | 2021-10-27T19:24:15 | 202,207,696 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,053 | py | """
Command-line argument parsing with structured printing.
See the argparse python module for details to extend argument parsing.
\LegalBegin
Copyright 2019-2020 Aether Instinct LLC. All Rights Reserved
Licensed under the MIT License (the "License").
You may not use this file except in compliance with the License. You may
obtain a copy of the License at:
https://opensource.org/licenses/MIT
The software is provided "AS IS", without warranty of any kind, express or
implied, including but not limited to the warranties of merchantability,
fitness for a particular purpose and noninfringement. in no event shall the
authors or copyright holders be liable for any claim, damages or other
liability, whether in an action of contract, tort or otherwise, arising from,
out of or in connection with the software or the use or other dealings in the
software.
\LegalEnd
"""
import argparse
import textwrap
class SmartFormatter(argparse.RawTextHelpFormatter):
""" Extend the argparse formatter to support indented multiline help. """
def _split_lines(self, text, width):
"""
Smart split of text.
Parameters:
text String.
width Maximum width.
Returns:
Returns list of lines.
"""
if text.startswith('R|'):
return text[2:].splitlines()
else:
# this is the RawTextHelpFormatter._split_lines
return argparse.HelpFormatter._split_lines(self, text, width)
def add_subparsers(argparser, helptext):
"""
Conditionally add subparsers to argument parser.
An ArgumentParser can only have one assigned subparsers.
The subparsers object is added to the argparser attributes.
argparser.subparsers
Parameters:
argparser ArgumentParser object.
helptext Help text.
Returns:
Returns subparsers object.
"""
try:
if argparser.subparsers is None:
argparser.subparsers = argparser.add_subparsers(help=helptext)
except AttributeError:
argparser.subparsers = argparser.add_subparsers(help=helptext)
return argparser.subparsers
| [
"robin.knight@roadnarrows.com"
] | robin.knight@roadnarrows.com |
f5b6996f6d110579dab1debed30100c651f25190 | 4a01ffb82c91e40003dc2af2d262f745c8de0ad4 | /Autokey/Autokey/arrow_shift_right_righthand.py | 88f4d53800ba3a6eb885db75b723d7369de00d6e | [] | no_license | fire3420/Program_settings | c4810da8e36a8e4a096baad80081c3d82c06545b | 83d8b8a72e47373c12bd72e41267833069073cd4 | refs/heads/master | 2021-11-22T14:02:17.066582 | 2021-09-22T06:47:16 | 2021-09-22T06:47:16 | 245,595,026 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 73 | py | # Enter script code
output='<shift>+<right>'
keyboard.send_keys(output)
| [
"fire3420@gmail.com"
] | fire3420@gmail.com |
e3355a62cfa3419230804429ae53ad3575c2c690 | f462b2b37559bfb56737497c61b86b334e9495ec | /carina_ATPG/dft_def.py | b46d651f690dd98d21e46c7d6ad16e00798e6d21 | [] | no_license | archubw/spec | 5e223060ec84ed86e4e9cb1909e46a4eb0685650 | a69f45b16e1bf4a8c2f5fcbf8a315e26235270b2 | refs/heads/master | 2023-03-23T07:41:09.694070 | 2020-03-03T06:08:55 | 2020-03-03T06:08:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,589 | py | import os,re, time
import jinja2
templateLoader = jinja2.FileSystemLoader(searchpath="./")
templateEnv = jinja2.Environment(loader=templateLoader)
sdf_template = templateEnv.get_template("sdf_annotate.v_template")
system_f_template = templateEnv.get_template("system.f_template")
inter_i_template = templateEnv.get_template("inter.i_template")
run_sim_template = templateEnv.get_template("run_sim.template")
def get_netlist(block, pd_csv):
''' This def will get the lasted netlist of block from csv file
1. This csv file must be imported from pandas
2. The column of netlist must be *netlist*
3. This def will return netlist path'''
netlist = pd_csv.loc[block, "netlist"] + "/" + block + ".sv"
# Check file exist & file size
if os.path.exists(netlist) and os.path.getsize(netlist) > 0:
return netlist
else :
print("{} does not exist".format(netlist))
def get_tsdb(block, pd_csv):
''' This def will get the lasted tsdb of block from csv file
1. This csv file must be imported from pandas
2. The column of netlist must be *tsdb*
3. This def will return tuple (dft_mb, outData_mb)'''
return (pd_csv.loc[block, "tsdb"] + "/dft_mb", pd_csv.loc[block, "tsdb"] + "/outData_mb")
def get_refer_atpg(block, pd_csv):
''' This def will get the lasted reference script of block from csv file
1. This csv file must be imported from pandas
2. The column of netlist must be *atpg_ref*
3. This def will return tuple (dft_mb, outData_mb)'''
return pd_csv.loc[block, "atpg_ref"]
def gen_sdf_file(dir_name, block, pat, mode):
''' This def generate all sdf format file :
sdf_annotate '''
# Generating sdf_annotate.v
sdf_file_name = dir_name + "/sdf_annotate_" + pat + "_" + mode + ".v"
print("Generating : {}".format(sdf_file_name))
with open(sdf_file_name, "w") as f:
f.write(sdf_template.render(block=block, pat=pat, mode=mode))
# Generating system.f
system_file_name = dir_name + "/system_" + pat + "_" + mode + ".f"
print("Generating : {}".format(system_file_name))
with open( system_file_name, "w") as f:
f.write(system_f_template.render(block=block, pat=pat, mode=mode))
# Generating inter.i
inter_i_name = dir_name + "/inter_" + pat + "_" + mode + ".i"
print("Generating : {}".format(inter_i_name))
with open( inter_i_name, "w") as f:
f.write(inter_i_template.render(block=block, pat=pat, mode=mode))
# Generating run_sim_sdf
run_sim_file_name = dir_name + "/run_sim_" + pat + "_" + mode
print("Generating : {}".format(run_sim_file_name))
with open( run_sim_file_name, "w") as f:
f.write(run_sim_template.render(block=block, pat=pat, mode=mode))
class DFT_REVIEW_ATPG():
def __init__(self,block, log_file):
'''447055 scan cells have been identified in 1497 scan chains.'''
self.start_time = time.time()
# For scan chain
self.scan_cells = 0
self.scan_chains = 0
self.longest_chain = 0
# For ATPG
self.test_coverage = 0
self.test_pattern = 0
self.test_atpg_time = 0
# For clock
self.clock_list = {}
self.chain_identified = re.compile(r"(?P<scan_cells>\d+) scan cells have been identified in (?P<scan_chains>\d+) scan chains.")
self.longest_chain_length = re.compile(r"Longest scan chain has (?P<longes_scan_lenght>\d+) scan cells.")
self.test_coverage_re = re.compile(r"test_coverage\s+([\d.%]+)")
self.test_pattern_re = re.compile(r"#test_patterns\s+(?P<test_pattern>\d+)")
self.test_atpg_time_re = re.compile(r"CPU_time \(secs\)\s+(?P<cpu_time>[\d.]+)")
self.clock_domain_re = re.compile(r"command: add_clocks [01] (?P<clock_name>[\S]+) -period (?P<clock_period>[\d.]+)")
print("Processing logfile : {}".format(log_file))
with open(log_file, "r") as f:
for line in f:
if self.chain_identified.search(line):
self.scan_cells = int(self.chain_identified.search(line).group("scan_cells"))
self.scan_chains = int(self.chain_identified.search(line).group("scan_chains"))
if self.longest_chain_length.search(line):
self.longest_chain = int(self.longest_chain_length.search(line).group("longes_scan_lenght"))
if self.test_coverage_re.search(line):
_tc = re.findall(r'[\d.]+', line)
self.test_coverage = float(_tc[-1])
if self.test_pattern_re.search(line):
self.test_pattern = int(self.test_pattern_re.search(line).group("test_pattern"))
if self.test_atpg_time_re.search(line):
self.test_atpg_time = float(self.test_atpg_time_re.search(line).group("cpu_time"))
if self.clock_domain_re.search(line):
_cd = self.clock_domain_re.search(line)
self.clock_list[_cd.group("clock_name")] = float(_cd.group("clock_period"))
print("Script done after {} sec ".format(time.time()-self.start_time))
def get_scan_cells(self):
return self.scan_cells
def get_scan_chain(self):
return self.scan_chains
def get_longest_chain_length(self):
return self.longest_chain
def get_test_coverage(self):
return self.test_coverage
def get_test_pattern(self):
return self.test_pattern
def get_test_aptg_time(self):
return self.test_atpg_time
def get_clock_domain(self):
return self.clock_list
def test_review_scan_chain():
test_loc_int = DFT_REVIEW_ATPG("test","log.atpg_tdf_loc_edt.int")
assert test_loc_int.get_scan_cells() == 106283
assert test_loc_int.get_scan_chain() == 360
assert test_loc_int.get_longest_chain_length() == 302
assert test_loc_int.get_test_coverage() == 70.28
assert test_loc_int.get_test_pattern() == 8312
assert test_loc_int.get_test_aptg_time() == 3129.9
assert test_loc_int.get_clock_domain()["i_clock"] == 5
test_los_int = DFT_REVIEW_ATPG("test","log.atpg_tdf_los_edt.int")
assert test_los_int.get_scan_cells() == 106283
assert test_los_int.get_scan_chain() == 360
assert test_los_int.get_longest_chain_length() == 302
assert test_los_int.get_test_coverage() == 76.34
assert test_los_int.get_test_pattern() == 12509
assert test_los_int.get_test_aptg_time() == 6530.0
test_dcsa_int = DFT_REVIEW_ATPG("test","log.atpg_dcsa_edt.int")
assert test_dcsa_int.get_scan_cells() == 106283
assert test_dcsa_int.get_scan_chain() == 360
assert test_dcsa_int.get_longest_chain_length() == 302
assert test_dcsa_int.get_test_coverage() == 99.28
assert test_dcsa_int.get_test_pattern() == 1634
assert test_dcsa_int.get_test_aptg_time() == 431.3
test_dcsa_ext = DFT_REVIEW_ATPG("test","log.atpg_dcsa_edt.ext")
assert test_dcsa_ext.get_scan_cells() == 6250
assert test_dcsa_ext.get_scan_chain() == 25
assert test_dcsa_ext.get_longest_chain_length() == 282
assert test_dcsa_ext.get_test_coverage() == 78.61
assert test_dcsa_ext.get_test_pattern() == 55
assert test_dcsa_ext.get_test_aptg_time() == 148.1
if __name__ == "__main__":
gen_sdf_file("test", "BLOCK", "int_serial")
gen_sdf_file("test", "BLOCK", "int_parallel") | [
"ngohongtiep@gmail.com"
] | ngohongtiep@gmail.com |
a9fdf9bba26d8d16049e97b55c3565160bbc4345 | d0649156b8d0f43f2c56e01a017cd22823d0c17d | /static/zoddockapp/migrations/0012_coupon.py | 2d31c184fdfe8704124d725e3667b5e9dde9df08 | [] | no_license | Masozera/Django-Ecommerce-Site | 579cc4606ff60693a3957479125f2fb2c113286a | be00c2ac9f505def240771b20ec5daa4150e4303 | refs/heads/main | 2023-02-16T06:25:44.217546 | 2021-01-08T06:14:59 | 2021-01-08T06:14:59 | 309,270,326 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 516 | py | # Generated by Django 3.1.3 on 2021-01-07 11:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('zoddockapp', '0011_auto_20210107_0055'),
]
operations = [
migrations.CreateModel(
name='Coupon',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code', models.CharField(max_length=15)),
],
),
]
| [
"masozeravictor@gmail.com"
] | masozeravictor@gmail.com |
f984f6c5cb1ec6e7c28b3348ce58cd076ce640af | 2633341946717144a40aa7e01997f8e120e4708a | /Microdados_env/bin/pip3.6 | 1ebc3c7b3c4ac960edfe73a751f6d542078eb9a1 | [] | no_license | fmasanori/Analise-de-Dados-INEP | 2a713d0071d592df2413ae501f34970679c9be52 | a440a050ed6e63efbeca64722fe6fa9f59d4c2d6 | refs/heads/master | 2020-09-25T05:55:05.814580 | 2019-12-03T20:03:53 | 2019-12-03T20:03:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 262 | 6 | #!/home/joaomedeiros/MicrodadosINEP/Projeto__env/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"jv.medeiros.gallina@gmail.com"
] | jv.medeiros.gallina@gmail.com |
1f0f69d04585b8216b8268a4c3dc0e5868618db7 | 2dd560dc468af0af4ca44cb4cd37a0b807357063 | /Leetcode/1289. Minimum Falling Path Sum II/solution2.py | e9ebe9c9ba9a53af13d879fb8d254dac546a99d0 | [
"MIT"
] | permissive | hi0t/Outtalent | 460fe4a73788437ba6ce9ef1501291035c8ff1e8 | 8a10b23335d8e9f080e5c39715b38bcc2916ff00 | refs/heads/master | 2023-02-26T21:16:56.741589 | 2021-02-05T13:36:50 | 2021-02-05T13:36:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 694 | py | class Solution:
def minFallingPathSum(self, arr: List[List[int]]) -> int:
m = len(arr)
n = len(arr[0])
@lru_cache(None)
def count(i: int, j: int) -> int:
if i >= m: return 0
m1 = m2 = inf
k1 = k2 = 0
for k in range(n):
if j == k: continue
if arr[i][k] < m1:
m2 = m1
m1 = arr[i][k]
k2 = k1
k1 = k
elif arr[i][k] < m2:
m2 = arr[i][k]
k2 = k
return min(m1 + count(i + 1, k1), m2 + count(i + 1, k2))
return count(0, -1)
| [
"info@crazysquirrel.ru"
] | info@crazysquirrel.ru |
9dfaf0509cd3d6fbd7492758c17e1697f340ea68 | cb4938dd6f0920fe7e248775736545ca97a82a90 | /mysite/settings.py | 54c83a0aa5016bad0043a5103a0fefdc4102fcaa | [] | no_license | Ibrahima829/my-first-blog | a3617616a638dcee12c3a8dd4d37b619b0ce4e0c | c775dd0312776a55456692cf959eb3947f00e4c3 | refs/heads/master | 2020-11-26T03:21:58.439032 | 2020-01-09T01:57:23 | 2020-01-09T01:57:23 | 228,951,887 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,289 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 2.2.6.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'p3$2k_b3hwu5hnhzn=8^je)i3*%!c=q49$ksbv#kfbq)q!%)mi'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog.apps.BlogConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'ja'
TIME_ZONE = 'Asia/Tokyo'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
| [
"ne18064@azec.ac.jp"
] | ne18064@azec.ac.jp |
8e11f4f47bcc9920ff00d8d8335fac826b8cc5b6 | 82ad02db4e5a52610e42ef9ae0349447d403aecf | /cloudmesh/RESTClient/lib/python3.6/warnings.py | dca323329b9993bc152f9e24c15fc6ca7bb8fef6 | [] | no_license | cybertraining-dsc/hid-sp18-524 | c10820b923e8c21d05859f1162338900e0a91dee | 3d6e4b67f6b11792b4f311b8a471eaae7c09edaf | refs/heads/master | 2022-11-29T21:15:17.163374 | 2018-05-09T18:40:50 | 2018-05-09T18:40:50 | 287,411,225 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 57 | py | /home/hao/.pyenv/versions/3.6.4/lib/python3.6/warnings.py | [
"tian4damien@gmail.com"
] | tian4damien@gmail.com |
0a8e4fd31c5356ee3f4dfba423680ebe181f9d4b | 97a075ae8c23fdae35f314ce51151afcfd10d1c5 | /SimpleEgzampleofRectFunction.py | 1e9067649fb15a96a0bc643d9ca140f882ff2921 | [] | no_license | Kotmin/Understanding-neural-networks-with-sentdex | 4a0985496ec650918c107657b0bdea831e0ff07c | 6b00d9e910c24902e23d201ce0416f5c272aaf8c | refs/heads/master | 2023-07-12T10:21:25.300382 | 2021-08-13T01:43:39 | 2021-08-13T01:43:39 | 387,929,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 227 | py | import numpy as np
np.random.seed(0)
X = [ [1, 2 ,3 ,2.5],
[2.0,5.0,-1.0,2.0],
[-1.5, 2.7, 3.3 , -0.8]]
inputs = [0,2,-1,3.3,-2.7,1.1,2.2,-100]
output = []
for i in inputs:
output.append(max(0,i))
print(output) | [
"70173732+Kotmin@users.noreply.github.com"
] | 70173732+Kotmin@users.noreply.github.com |
1e4806f3da02024209018420eeba717b7075f943 | 179f86c2fbc213d25c21b6488d059b435def2923 | /geomldl/wsgi.py | ff8d5b0007b9770f3d96cc9d5bd44a253e7431e2 | [] | no_license | mt-lids-sefi/geomldm | ec2781f5c53bb93b700eaf7906aa564827456bcb | 7763fd316d920e85b8e6b8361531606a7fd10a2b | refs/heads/master | 2020-04-29T14:39:51.703350 | 2019-03-20T12:24:17 | 2019-03-20T12:24:17 | 176,203,713 | 0 | 0 | null | 2019-03-20T12:24:18 | 2019-03-18T04:21:08 | Python | UTF-8 | Python | false | false | 391 | py | """
WSGI config for geomldl project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'geomldl.settings')
application = get_wsgi_application()
| [
"tejedamarcia@gmail.com"
] | tejedamarcia@gmail.com |
7e1d66c0f2227add3fc6fe096afb431bb7980d92 | 67f4facf8c778c3db51f8e24f59466714377e75e | /ecommerce/src/accounts/views.py | 9775dd360ec5b4fd7f012db60fa0ff58a76fc18e | [] | no_license | abdelmachti/Ecommerce_Django | 0bfe4df1c50a64c749aae636de94004646911b01 | 9ec6d93be45078173e5278a1f4f77e5aa9325a22 | refs/heads/master | 2021-02-08T19:40:16.694232 | 2020-05-04T23:10:25 | 2020-05-04T23:10:25 | 244,185,690 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,018 | py | from django.shortcuts import render
from django.http import HttpResponse
from django.shortcuts import render, redirect
from django.contrib.auth import authenticate, login, get_user_model
from django.views.generic import CreateView , FormView
from django.utils.http import is_safe_url
from .forms import LoginForm, RegisterForm, GuestForm
from .models import GuestEmail
from .signals import user_logged_in
# Create your views here.
def guest_register_view(request):
form = GuestForm(request.POST or None)
context={
"title":"Guest",
"form":form
}
next = request.GET.get('next')
next_post= request.POST.get('next')
redirect_path = next or next_post or None
if form.is_valid():
email = form.cleaned_data.get("email")
#print("email",email)
new_guest_email= GuestEmail.objects.create(email=email)
request.session['guest_email_id'] = new_guest_email.id
if is_safe_url(redirect_path, request.get_host()):
return redirect(redirect_path)
else:
return redirect("/register/")
return redirect("/register/")
class LoginView(FormView):
form_class = LoginForm
template_name = 'accounts/login.html'
success_url = '/'
def form_valid(self, form):
request = self.request
next_ = request.GET.get('next')
next_post = request.POST.get('next')
redirect_path = next_ or next_post or None
email = form.cleaned_data.get("email")
password = form.cleaned_data.get("password")
user = authenticate(request, username=email, password=password)
#print("user",user)
if user is not None:
login(request, user)
user_logged_in.send(user.__class__, instance=user, request=request)
try:
del request.session['guest_email_id']
except:
pass
if is_safe_url(redirect_path, request.get_host()):
return redirect(redirect_path)
else:
return redirect("/")
return super(LoginView, self).form_invalid(form)
""" def login_page(request):
form = LoginForm(request.POST or None)
context={
"title":"Login",
"form":form
}
#print("User logged in")
#print(request.user.is_authenticated)
next = request.GET.get('next')
next_post= request.POST.get('next')
redirect_path = next or next_post or None
if form.is_valid():
#print(form.cleaned_data)
username=form.cleaned_data.get("username")
password=form.cleaned_data.get("password")
user= authenticate(request, username=username, password=password)
#print(user)
if user is not None:
login(request, user)
try:
del request.session['guest_email_id']
except:
pass
#Redirect to a success page.
#context ['form'] = LoginForm()
if is_safe_url(redirect_path, request.get_host()):
return redirect(redirect_path)
else:
return redirect("/")
else:
#Return an invalid login erro message
print("invalid login")
return render(request, "accounts/login.html", context) """
class RegisterView(CreateView):
form_class = RegisterForm
template_name = 'accounts/register.html'
success_url = '/login/'
""" User = get_user_model()
def register_page(request):
form = RegisterForm(request.POST or None)
context={
"title":"Register",
"form":form
}
if form.is_valid():
#print(form.cleaned_data)
#username=form.cleaned_data.get("username")
#password=form.cleaned_data.get("password")
#email=form.cleaned_data.get("email")
#new_user= User.objects.create_user(username,email,password)
#print(new_user)
form.save()
return render(request, "accounts/register.html", context) """ | [
"machtiabdel@gmail.com"
] | machtiabdel@gmail.com |
a1e52a91e357303211b4c8fe930b7a9191201e08 | 48140ff3c69fe22ecd7eed084ffd308522f861ee | /src/data.py | 12c033411fd23691b43338b649f2c5f875600216 | [] | no_license | attilusleung/CornellCreatives | 1d9e9ae0a766ebba80dfb177dde794e036eb2859 | 48bab38856e64b6a1dbad8e7afcfb2c40cb86d5b | refs/heads/master | 2020-04-09T03:28:27.179758 | 2018-12-03T04:35:26 | 2018-12-03T04:35:26 | 159,983,962 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,516 | py | from flask_sqlalchemy import SQLAlchemy
import datetime
from secrets import token_hex
from bcrypt import checkpw, hashpw, gensalt
from exceptions import *
db = SQLAlchemy()
#TODO: REPLACE kwargs.get with kwags[<something>] for initializers
#that need to throw an error
class User(db.Model):
"""
A model that describes users registered in the app .
Users are identified through their netid.
Users are able to both provide services (services column) and seek services
(tracking column)
"""
netid = db.Column(db.String(7), primary_key = True)
name = db.Column(db.String, nullable = False)
#password
avatar = db.Column(db.Integer, nullable = False, default = 0) #TODO: MAKE IT LINK TO A FILE
services = db.relationship('Service', cascade = 'delete')
#tracking = db.relationship('Track', cascade = 'delete') #check this
#verified = db.Column(db.Boolean, nullable = False, default = False)
password = db.Column(db.String, nullable = False)
session = db.Column(db.String, unique = True)
expiration = db.Column(db.DateTime)
renew = db.Column(db.String, unique = True)
def password_hash(self, value):
self.password = hashpw(value.encode("utf8"), gensalt())
def __init__(self, **kwargs):
#asserts?
self.netid = kwargs['netid']
self.name = kwargs['name']
self.avatar = kwargs.get('avatar', 0)
#TODO: Throw an error when no such avatar is provided
self.password_hash(kwargs['password'])
print("here")
if kwargs.get('services') is not None:
for s in kwargs.get('services', []):
print(s)
service = Service(user = self.netid, service = s)
db.session.add(service)
#for t in kwargs.get('tracking', ()):
# track = Track(tracker = self.netid, tracking = t)
# db.session.add()
#TODO: Don't autogenerate session, but verify netid is valid first (through email)
print('here')
self.renew_session()
print("here")
def generate_verification_link(self):
#TODO: Verify netid before registering
pass
def renew_session(self):
while(True):
sessiontok = token_hex(64)
renewtok = token_hex(64)
if User.query.filter_by(session = sessiontok).first() is None and \
User.query.filter_by(renew = renewtok).first() is None:
self.session = sessiontok
self.expiration = datetime.datetime.now() + datetime.timedelta(seconds = 3600)
self.renew = renewtok
break
#print("where break?")
def verify_password(self, password):
return checkpw(password.encode('utf8'), self.password)
def verify_session(self, session):
#Check session token is redundent? You get the user by filtering by session token anyways
return session == self.session and datetime.datetime.now() < self.expiration
def verify_renew(self, renew): #why verify when u can just renew?
return renew == self.renew
def serialize_data(self):
#TODO: INCOMPLETE
services = []
tracking = []
for s in Service.query.filter_by(user = self.netid):
services.append(str(s))
#for t in Track.query.filter_by(tracker = self.netid):
# tracking.append(str(t))
return {
'netid': self.netid,
'name': self.name,
'avatar': self.avatar,
'services': services
#'tracking': tracking
}
def serialize_session(self):
return {
'session': self.session,
'expiration': str(self.expiration),
'renew': self.renew
}
def __str__(self):
return self.netid
class Service(db.Model):
#TODO: Foreign Key Error Handling? Do we need this? All services are initialized in user init block anyways
#TODO: UPDATe SERVICES
SERVICES = ['tutor', 'photographer', 'programmer', 'videographer', 'artist'] #Put all services here
id = db.Column(db.Integer, primary_key = True)
user = db.Column(db.String(7), db.ForeignKey('user.netid'), nullable = False)
service = db.Column(db.String(80), nullable = False)
def __init__(self, **kwargs):
#Call this in a try except block
#if kwargs.get('service') not in Service.SERVICES:
# raise InvalidServiceError()
self.user = kwargs.get('user')
self.service = kwargs.get('service')
def serialize(self):
return {
'user': self.user,
'service': self.service
}
def __str__(self):
return self.service
#TODO: TRACKING
"""
class Track(db.Model):
#TODO: Foreign Key Error Handling
id = db.Column(db.Integer, primary_key = True)
trackerid = db.Column(db.String(7), db.ForeignKey('user.netid'), nullable = False)
trackingid = db.Column(db.String(7), db.ForeignKey('user.netid'), nullable = False)
#tracker = db.relationship("User", foreign_keys = "Track.tracker")
#tracking = db.relationship("User", foreign_keys = "Track.tracking")
def __init__(self, **kwargs):
self.trackerid = kwargs.get('tracker')
self.trackingid = kwargs.get('tracking')
def serialize(self):
return {
'tracker': self.tracker,
'tracking': self.tracking
}
def __str__(self):
return self.tracking
"""
| [
"aqcrazyboy@cornell.edu"
] | aqcrazyboy@cornell.edu |
207a9b2451f25fc9bd95d56c22d2aee58a576cad | 2cbd06a38bdedff491e25b7cff464d91a7abcb11 | /store/migrations/0001_initial.py | 778a8f6d2c523376ab8ab1ca15f1af1251489f39 | [] | no_license | yrarjun59/Python-Django-Ecommerce-Website | 6577f623f361298681dd494990fb791f07651fd6 | 5b3d2691158fe083ff7e9fbdff70d266f0ca65cc | refs/heads/master | 2023-05-12T13:59:52.200885 | 2021-06-02T06:20:07 | 2021-06-02T06:20:07 | 342,809,376 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,275 | py | # Generated by Django 3.0.3 on 2020-06-29 06:14
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Customer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200, null=True)),
('email', models.CharField(max_length=200, null=True)),
('user', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_ordered', models.DateTimeField(auto_now_add=True)),
('complete', models.BooleanField(default=False)),
('transaction_id', models.CharField(max_length=100, null=True)),
('customer', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='store.Customer')),
],
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('price', models.FloatField()),
('digital', models.BooleanField(blank=True, default=False, null=True)),
],
),
migrations.CreateModel(
name='ShippingAddress',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('address', models.CharField(max_length=200)),
('city', models.CharField(max_length=200)),
('state', models.CharField(max_length=200)),
('zip_code', models.CharField(max_length=200)),
('date_added', models.DateTimeField(auto_now_add=True)),
('customer', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='store.Customer')),
('order', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='store.Order')),
],
),
migrations.CreateModel(
name='OrderItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('quantity', models.IntegerField(blank=True, default=0, null=True)),
('date_added', models.DateTimeField(auto_now_add=True)),
('order', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='store.Order')),
('product', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='store.Product')),
],
),
]
| [
"yrarjun59@gmail.com"
] | yrarjun59@gmail.com |
7a79bff67cf9d6148338e6e1465395f08c394acb | 64bf39b96a014b5d3f69b3311430185c64a7ff0e | /intro-ansible/venv2/lib/python3.8/site-packages/ansible/module_utils/facts/hardware/aix.py | 442f4a95486811ff2e1f40a4627017bb23de131b | [
"MIT"
] | permissive | SimonFangCisco/dne-dna-code | 7072eba7da0389e37507b7a2aa5f7d0c0735a220 | 2ea7d4f00212f502bc684ac257371ada73da1ca9 | refs/heads/master | 2023-03-10T23:10:31.392558 | 2021-02-25T15:04:36 | 2021-02-25T15:04:36 | 342,274,373 | 0 | 0 | MIT | 2021-02-25T14:39:22 | 2021-02-25T14:39:22 | null | UTF-8 | Python | false | false | 9,947 | py | # This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
from ansible.module_utils.facts.hardware.base import Hardware, HardwareCollector
class AIXHardware(Hardware):
"""
AIX-specific subclass of Hardware. Defines memory and CPU facts:
- memfree_mb
- memtotal_mb
- swapfree_mb
- swaptotal_mb
- processor (a list)
- processor_cores
- processor_count
"""
platform = 'AIX'
def populate(self, collected_facts=None):
hardware_facts = {}
cpu_facts = self.get_cpu_facts()
memory_facts = self.get_memory_facts()
dmi_facts = self.get_dmi_facts()
vgs_facts = self.get_vgs_facts()
mount_facts = self.get_mount_facts()
devices_facts = self.get_device_facts()
hardware_facts.update(cpu_facts)
hardware_facts.update(memory_facts)
hardware_facts.update(dmi_facts)
hardware_facts.update(vgs_facts)
hardware_facts.update(mount_facts)
hardware_facts.update(devices_facts)
return hardware_facts
def get_cpu_facts(self):
cpu_facts = {}
cpu_facts['processor'] = []
rc, out, err = self.module.run_command("/usr/sbin/lsdev -Cc processor")
if out:
i = 0
for line in out.splitlines():
if 'Available' in line:
if i == 0:
data = line.split(' ')
cpudev = data[0]
i += 1
cpu_facts['processor_count'] = int(i)
rc, out, err = self.module.run_command("/usr/sbin/lsattr -El " + cpudev + " -a type")
data = out.split(' ')
cpu_facts['processor'] = data[1]
rc, out, err = self.module.run_command("/usr/sbin/lsattr -El " + cpudev + " -a smt_threads")
if out:
data = out.split(' ')
cpu_facts['processor_cores'] = int(data[1])
return cpu_facts
def get_memory_facts(self):
memory_facts = {}
pagesize = 4096
rc, out, err = self.module.run_command("/usr/bin/vmstat -v")
for line in out.splitlines():
data = line.split()
if 'memory pages' in line:
pagecount = int(data[0])
if 'free pages' in line:
freecount = int(data[0])
memory_facts['memtotal_mb'] = pagesize * pagecount // 1024 // 1024
memory_facts['memfree_mb'] = pagesize * freecount // 1024 // 1024
# Get swapinfo. swapinfo output looks like:
# Device 1M-blocks Used Avail Capacity
# /dev/ada0p3 314368 0 314368 0%
#
rc, out, err = self.module.run_command("/usr/sbin/lsps -s")
if out:
lines = out.splitlines()
data = lines[1].split()
swaptotal_mb = int(data[0].rstrip('MB'))
percused = int(data[1].rstrip('%'))
memory_facts['swaptotal_mb'] = swaptotal_mb
memory_facts['swapfree_mb'] = int(swaptotal_mb * (100 - percused) / 100)
return memory_facts
def get_dmi_facts(self):
dmi_facts = {}
rc, out, err = self.module.run_command("/usr/sbin/lsattr -El sys0 -a fwversion")
data = out.split()
dmi_facts['firmware_version'] = data[1].strip('IBM,')
lsconf_path = self.module.get_bin_path("lsconf")
if lsconf_path:
rc, out, err = self.module.run_command(lsconf_path)
if rc == 0 and out:
for line in out.splitlines():
data = line.split(':')
if 'Machine Serial Number' in line:
dmi_facts['product_serial'] = data[1].strip()
if 'LPAR Info' in line:
dmi_facts['lpar_info'] = data[1].strip()
if 'System Model' in line:
dmi_facts['product_name'] = data[1].strip()
return dmi_facts
def get_vgs_facts(self):
"""
Get vg and pv Facts
rootvg:
PV_NAME PV STATE TOTAL PPs FREE PPs FREE DISTRIBUTION
hdisk0 active 546 0 00..00..00..00..00
hdisk1 active 546 113 00..00..00..21..92
realsyncvg:
PV_NAME PV STATE TOTAL PPs FREE PPs FREE DISTRIBUTION
hdisk74 active 1999 6 00..00..00..00..06
testvg:
PV_NAME PV STATE TOTAL PPs FREE PPs FREE DISTRIBUTION
hdisk105 active 999 838 200..39..199..200..200
hdisk106 active 999 599 200..00..00..199..200
"""
vgs_facts = {}
lsvg_path = self.module.get_bin_path("lsvg")
xargs_path = self.module.get_bin_path("xargs")
cmd = "%s -o | %s %s -p" % (lsvg_path, xargs_path, lsvg_path)
if lsvg_path and xargs_path:
rc, out, err = self.module.run_command(cmd, use_unsafe_shell=True)
if rc == 0 and out:
vgs_facts['vgs'] = {}
for m in re.finditer(r'(\S+):\n.*FREE DISTRIBUTION(\n(\S+)\s+(\w+)\s+(\d+)\s+(\d+).*)+', out):
vgs_facts['vgs'][m.group(1)] = []
pp_size = 0
cmd = "%s %s" % (lsvg_path, m.group(1))
rc, out, err = self.module.run_command(cmd)
if rc == 0 and out:
pp_size = re.search(r'PP SIZE:\s+(\d+\s+\S+)', out).group(1)
for n in re.finditer(r'(\S+)\s+(\w+)\s+(\d+)\s+(\d+).*', m.group(0)):
pv_info = {'pv_name': n.group(1),
'pv_state': n.group(2),
'total_pps': n.group(3),
'free_pps': n.group(4),
'pp_size': pp_size
}
vgs_facts['vgs'][m.group(1)].append(pv_info)
return vgs_facts
def get_mount_facts(self):
mount_facts = {}
mount_facts['mounts'] = []
# AIX does not have mtab but mount command is only source of info (or to use
# api calls to get same info)
mount_path = self.module.get_bin_path('mount')
rc, mount_out, err = self.module.run_command(mount_path)
if mount_out:
for line in mount_out.split('\n'):
fields = line.split()
if len(fields) != 0 and fields[0] != 'node' and fields[0][0] != '-' and re.match('^/.*|^[a-zA-Z].*|^[0-9].*', fields[0]):
if re.match('^/', fields[0]):
# normal mount
mount_facts['mounts'].append({'mount': fields[1],
'device': fields[0],
'fstype': fields[2],
'options': fields[6],
'time': '%s %s %s' % (fields[3], fields[4], fields[5])})
else:
# nfs or cifs based mount
# in case of nfs if no mount options are provided on command line
# add into fields empty string...
if len(fields) < 8:
fields.append("")
mount_facts['mounts'].append({'mount': fields[2],
'device': '%s:%s' % (fields[0], fields[1]),
'fstype': fields[3],
'options': fields[7],
'time': '%s %s %s' % (fields[4], fields[5], fields[6])})
return mount_facts
def get_device_facts(self):
device_facts = {}
device_facts['devices'] = {}
lsdev_cmd = self.module.get_bin_path('lsdev', True)
lsattr_cmd = self.module.get_bin_path('lsattr', True)
rc, out_lsdev, err = self.module.run_command(lsdev_cmd)
for line in out_lsdev.splitlines():
field = line.split()
device_attrs = {}
device_name = field[0]
device_state = field[1]
device_type = field[2:]
lsattr_cmd_args = [lsattr_cmd, '-E', '-l', device_name]
rc, out_lsattr, err = self.module.run_command(lsattr_cmd_args)
for attr in out_lsattr.splitlines():
attr_fields = attr.split()
attr_name = attr_fields[0]
attr_parameter = attr_fields[1]
device_attrs[attr_name] = attr_parameter
device_facts['devices'][device_name] = {
'state': device_state,
'type': ' '.join(device_type),
'attributes': device_attrs
}
return device_facts
class AIXHardwareCollector(HardwareCollector):
_platform = 'AIX'
_fact_class = AIXHardware
| [
"sifang@cisco.com"
] | sifang@cisco.com |
66ccd59ba1664f4dac23f2184a0658a7b08954f6 | db27667a4a4014f46e904b1c57be40ba97c629a9 | /Code/二 CH2基础语法和数据类型/First.py | 861e22eca1afa8927026c268136fd11f9d027793 | [] | no_license | jiningzhao/PythonBasic | 61d5db1438064e3f122ac993367565b33a1be1b7 | c12dfc3182f0f7a9b00a3b61b72e468567c4f6eb | refs/heads/master | 2022-11-28T16:17:36.202859 | 2019-10-21T14:46:20 | 2019-10-21T14:46:20 | 216,591,310 | 0 | 1 | null | 2022-11-16T16:31:58 | 2019-10-21T14:39:05 | Python | UTF-8 | Python | false | false | 27 | py |
b=60
a=5
c=4
c*=a
print(c) | [
"jiningzhao@newbanker.cn"
] | jiningzhao@newbanker.cn |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.