blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
24f2de63f6fe12b2e69518221df7bc7cef282fb6 | 078e35f6b03e4e7a9616f2335a740109d8292176 | /examples/adwords/v201609/advanced_operations/add_ad_customizer.py | f3c8da4ffc6854a0fdba2a28bd13a0f160fd0adb | [
"Apache-2.0"
] | permissive | parander/googleads-python-lib | 5f5b09e8adf7d733bddca314f6aa624b60c5abde | bc1bdff2d58fdc7cf4f09b879c68757c5b9b3abc | refs/heads/master | 2021-01-12T16:36:44.861582 | 2017-02-27T04:27:18 | 2017-02-27T04:27:18 | 71,418,777 | 0 | 0 | null | 2016-10-20T02:38:33 | 2016-10-20T02:38:32 | null | UTF-8 | Python | false | false | 7,140 | py | #!/usr/bin/python
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Adds an ad customizer feed.
Associates the feed with customer and adds an ad that uses the feed to populate
dynamic data.
"""
from datetime import datetime
from uuid import uuid4
# Import appropriate classes from the client library.
from googleads import adwords
from googleads import errors
FEED_NAME = 'Interplanetary Feed Name %s' % uuid4()
ADGROUPS = [
'INSERT_ADGROUP_ID_1_HERE',
'INSERT_ADGROUP_ID_2_HERE'
]
def CreateAdsWithCustomizations(client, adgroup_ids, feed_name):
"""Creates ExpandedTextAds that use ad customizations for specified AdGroups.
Args:
client: an AdWordsClient instance.
adgroup_ids: a list containing the AdGroup ids to add ExpandedTextAds to.
feed_name: the name of the feed used to apply customizations.
Raises:
GoogleAdsError: if no ExpandedTextAds were added.
"""
# Get the AdGroupAdService
adgroup_ad_service = client.GetService('AdGroupAdService')
expanded_text_ad = {
'xsi_type': 'ExpandedTextAd',
'headlinePart1': 'Luxury Cruise to {=%s.Name}' % feed_name,
'headlinePart2': 'Only {=%s.Price}' % feed_name,
'description': 'Offer ends in {=countdown(%s.Date)}!' % feed_name,
'finalUrls': ['http://www.example.com'],
}
# We add the same ad to both ad groups. When they serve, they will show
# different values, since they match different feed items.
operations = [{
'operator': 'ADD',
'operand': {
'adGroupId': adgroup,
'ad': expanded_text_ad
}
} for adgroup in adgroup_ids]
response = adgroup_ad_service.mutate(operations)
if response and 'value' in response:
for ad in response['value']:
print ('Created an ad with ID \'%s\', type \'%s\', and status \'%s\'.'
% (ad['ad']['id'], ad['ad']['Ad.Type'], ad['status']))
else:
raise errors.GoogleAdsError('No ads were added.')
def CreateCustomizerFeed(client, feed_name):
"""Creates a new AdCustomizerFeed.
Args:
client: an AdWordsClient instance.
feed_name: the name for the new AdCustomizerFeed.
Returns:
The new AdCustomizerFeed.
"""
# Get the AdCustomizerFeedService
ad_customizer_feed_service = client.GetService('AdCustomizerFeedService')
customizer_feed = {
'feedName': feed_name,
'feedAttributes': [
{'type': 'STRING', 'name': 'Name'},
{'type': 'STRING', 'name': 'Price'},
{'type': 'DATE_TIME', 'name': 'Date'}
]
}
feed_service_operation = {
'operator': 'ADD',
'operand': customizer_feed
}
response = ad_customizer_feed_service.mutate([feed_service_operation])
if response and 'value' in response:
feed = response['value'][0]
feed_data = {
'feedId': feed['feedId'],
'nameId': feed['feedAttributes'][0]['id'],
'priceId': feed['feedAttributes'][1]['id'],
'dateId': feed['feedAttributes'][2]['id']
}
print ('Feed with name \'%s\' and ID %s was added with:\n'
'\tName attribute ID %s and price attribute ID %s and date attribute'
'ID %s') % (feed['feedName'], feed['feedId'], feed_data['nameId'],
feed_data['priceId'], feed_data['dateId'])
return feed
else:
raise errors.GoogleAdsError('No feeds were added')
def CreateCustomizerFeedItems(client, adgroup_ids, ad_customizer_feed):
"""Creates FeedItems for the specified AdGroups.
These FeedItems contain values to use in ad customizations for the AdGroups.
Args:
client: an AdWordsClient instance.
adgroup_ids: a list containing two AdGroup Ids.
ad_customizer_feed: the AdCustomizerFeed we're associating the FeedItems
with.
Raises:
GoogleAdsError: if no FeedItems were added.
"""
# Get the FeedItemService
feed_item_service = client.GetService('FeedItemService')
now = datetime.now()
mars_date = datetime(now.year, now.month, 1, 0, 0)
venus_date = datetime(now.year, now.month, 15, 0, 0)
time_format = '%Y%m%d %H%M%S'
feed_item_operations = [
CreateFeedItemAddOperation(
'Mars', '$1234.56', mars_date.strftime(time_format), adgroup_ids[0],
ad_customizer_feed),
CreateFeedItemAddOperation(
'Venus', '$1450.00', venus_date.strftime(time_format),
adgroup_ids[1], ad_customizer_feed)
]
response = feed_item_service.mutate(feed_item_operations)
if 'value' in response:
for feed_item in response['value']:
print 'Added FeedItem with ID %d.' % feed_item['feedItemId']
else:
raise errors.GoogleAdsError('No FeedItems were added.')
def CreateFeedItemAddOperation(name, price, date, adgroup_id,
ad_customizer_feed):
"""Creates a FeedItemOperation.
The generated FeedItemOperation will create a FeedItem with the specified
values and AdGroupTargeting when sent to FeedItemService.mutate.
Args:
name: the value for the name attribute of the FeedItem.
price: the value for the price attribute of the FeedItem.
date: the value for the date attribute of the FeedItem.
adgroup_id: the ID of the ad_group to target with the FeedItem.
ad_customizer_feed: the AdCustomizerFeed we're associating the FeedItems
with.
Returns:
A new FeedItemOperation for adding a FeedItem.
"""
feed_item = {
'feedId': ad_customizer_feed['feedId'],
'adGroupTargeting': {
'TargetingAdGroupId': adgroup_id
},
'attributeValues': [
{
'feedAttributeId': ad_customizer_feed['feedAttributes'][0]['id'],
'stringValue': name
},
{
'feedAttributeId': ad_customizer_feed['feedAttributes'][1]['id'],
'stringValue': price
},
{
'feedAttributeId': ad_customizer_feed['feedAttributes'][2]['id'],
'stringValue': date
}
]
}
return {'operator': 'ADD', 'operand': feed_item}
def main(client, adgroup_ids, feed_name=FEED_NAME):
# Create a customizer feed. One feed per account can be used for all ads.
ad_customizer_feed = CreateCustomizerFeed(client, feed_name)
# Add feed items containing the values we'd like to place in ads.
CreateCustomizerFeedItems(client, adgroup_ids, ad_customizer_feed)
# All set! We can now create ads with customizations.
CreateAdsWithCustomizations(client, adgroup_ids, feed_name)
if __name__ == '__main__':
# Initialize client object.
adwords_client = adwords.AdWordsClient.LoadFromStorage()
main(adwords_client, ADGROUPS)
| [
"msaniscalchi@users.noreply.github.com"
] | msaniscalchi@users.noreply.github.com |
629585562843f773778c17fec9276488963e4e18 | 515e7d6e5756e3922df0b874b241c8b0744b4570 | /packs/python_packs.py | 1d34ff441b4097d542aca3c6d08a9dd2b0ef7e4d | [] | no_license | mjgpy3/udm_script | d77f4904df62e33c72f690cdf4049a1118be105b | d04802d21797fa6ed03cfc35c955bcc6d028f1c2 | refs/heads/master | 2021-01-23T11:40:25.415072 | 2013-07-30T16:53:31 | 2013-07-30T16:53:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,219 | py | #!/usr/bin/env python
# Created by Michael Gilliland
# Date: Fri Jan 25 16:47:44 EST 2013
#
#
from package_container import PackageContainer
packages = {'Pygame': 'python-pygame',
'Sympy': 'python-sympy',
'Numpy': 'python-numpy',
'Scipy': 'python-scipy',
'Virtualenv': 'python-virtualenv',
'PIP': 'python-pip',
'Django': 'python-django',
'Pychecker': 'pychecker',
'IPython': 'ipython',
'IDLE': 'idle',
'Epydoc': 'python-epydoc',
'Sphinx': 'python-sphinx',
'SQLAlchemy': 'python-sqlalchemy',
'Requests': 'python-requests',
'Flask': 'python-flask',
'Python Dev': 'python-dev',
'Beautiful Soup': 'python-beautifulsoup',
'Jython': 'jython',
'Cython': 'cython',
'PyPy': 'pypy',
'Python Openoffice': 'python-openoffice',
'CX Freeze': 'cx-freeze'}
special_package_instructions = {'sh': ['pip install sh'],
'Selenium': ['pip install selenium']}
container = PackageContainer("Python", 'python', packages, special_package_instructions)
| [
"mjg.py3@gmail.com"
] | mjg.py3@gmail.com |
e36f86f692711d3516598a57f325dc3781d9a3e0 | 3c000380cbb7e8deb6abf9c6f3e29e8e89784830 | /venv/Lib/site-packages/cobra/modelimpl/task/deployctx.py | 5ec363019c9a320d6f2cdd11ef473a166e344841 | [] | no_license | bkhoward/aciDOM | 91b0406f00da7aac413a81c8db2129b4bfc5497b | f2674456ecb19cf7299ef0c5a0887560b8b315d0 | refs/heads/master | 2023-03-27T23:37:02.836904 | 2021-03-26T22:07:54 | 2021-03-26T22:07:54 | 351,855,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,835 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2020 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class DeployCtx(Mo):
"""
Mo doc not defined in techpub!!!
"""
meta = ClassMeta("cobra.model.task.DeployCtx")
meta.moClassName = "taskDeployCtx"
meta.rnFormat = "TaskDeployCtx"
meta.category = MoCategory.REGULAR
meta.label = "DeployCtxTask"
meta.writeAccessMask = 0x1
meta.readAccessMask = 0x1
meta.isDomainable = False
meta.isReadOnly = False
meta.isConfigurable = True
meta.isDeletable = True
meta.isContextRoot = False
meta.childClasses.add("cobra.model.tag.Tag")
meta.childClasses.add("cobra.model.aaa.RbacAnnotation")
meta.childClasses.add("cobra.model.tag.Annotation")
meta.childNamesAndRnPrefix.append(("cobra.model.tag.Annotation", "annotationKey-"))
meta.childNamesAndRnPrefix.append(("cobra.model.aaa.RbacAnnotation", "rbacDom-"))
meta.childNamesAndRnPrefix.append(("cobra.model.tag.Tag", "tagKey-"))
meta.parentClasses.add("cobra.model.top.Root")
meta.rnPrefixes = [
('TaskDeployCtx', False),
]
prop = PropMeta("str", "annotation", "annotation", 51688, PropCategory.REGULAR)
prop.label = "Annotation. Suggested format orchestrator:value"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9_.:-]+']
meta.props.add("annotation", prop)
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "extMngdBy", "extMngdBy", 51689, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "undefined"
prop._addConstant("msc", "msc", 1)
prop._addConstant("undefined", "undefined", 0)
meta.props.add("extMngdBy", prop)
prop = PropMeta("str", "lcOwn", "lcOwn", 9, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "local"
prop._addConstant("implicit", "implicit", 4)
prop._addConstant("local", "local", 0)
prop._addConstant("policy", "policy", 1)
prop._addConstant("replica", "replica", 2)
prop._addConstant("resolveOnBehalf", "resolvedonbehalf", 3)
meta.props.add("lcOwn", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
prop = PropMeta("str", "uid", "uid", 8, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("uid", prop)
def __init__(self, parentMoOrDn, markDirty=True, **creationProps):
namingVals = []
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"bkhoward@live.com"
] | bkhoward@live.com |
4518d0a2fb6618907b910ed981d40befde7a5cc4 | 81d4411216885ddd5ad19703e52a9918b198f061 | /set_esxi.py | 71f35136dceea171ec7837508b976b0cda9ab6dc | [] | no_license | Itaiweisman/set_esxi_host | 4f9276eb7f3fb0d0162ca147f372957058237c80 | 0b6efc4c43b5d41a3ebe6251b623e7c58079beef | refs/heads/master | 2020-04-30T11:18:43.982271 | 2019-03-20T19:08:49 | 2019-03-20T19:08:49 | 176,798,276 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,273 | py |
import requests,json,getpass
def get_host_id(box,name,auth):
url="http://{}/api/rest/hosts?name={}".format(box,name)
hosts=requests.get(url=url,auth=auth).json()
if hosts['result']:
return hosts['result'][0]['id']
else:
if hosts['error']:
print "****ERROR::******"
print hosts['error']['message']
else:
print "Failure: Host {} Not Found".format(name)
return None
def change_host_type(box,auth,host_id):
headers={'Content-Type':'application/json'}
body={'host_type':'esxi'}
url="http://{}/api/rest/hosts/{}".format(box,host_id)
change=requests.put(auth=auth,data=json.dumps(body),url=url,headers=headers)
if change.json()['error']:
print change.json()['error']['message']
return False
else:
return True
try:
box=raw_input("Enter box name/Ip:")
username=raw_input("Enter Usename:")
password=getpass.getpass('Password:')
host=raw_input("Enter ESXi Host Name:")
auth=(username,password)
host_id=get_host_id(box,host,auth)
if (host_id):
if (change_host_type(box,auth,host_id)):
print "Success!"
else:
print "Failure!"
except Exception as E:
print "Caught Exception:",E | [
"iweisman@infinidat.com"
] | iweisman@infinidat.com |
884306ac61e7bb8daf4a0dbc6659c8a307541a3f | 35d38520fb59bf6f7353c660f3082ad09313d616 | /autodroid/setup.py | 0f239771f654cb194fc4839a9d8c705371f423a1 | [
"Apache-2.0"
] | permissive | AndyZhu1991/AutoDroid | bca037e0f6bbec39a0932786f935e8d860219f19 | 0c6121513b9e6b2d6e8506eea58748afd73e003d | refs/heads/master | 2021-12-07T03:42:59.746655 | 2021-11-19T16:01:56 | 2021-11-19T16:01:56 | 242,977,889 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 168 | py | from setuptools import setup, find_packages
setup(
name='AutoDroid',
version='0.1',
packages=find_packages(where="src"),
package_dir={"": "src"}) | [
"stdzhu@gmail.com"
] | stdzhu@gmail.com |
9b801e87ac7dc8e013dd99b87a004a11b8c5268e | 0372eb2446d6463f835f82cbc24d34fd8655236b | /relso/utils/data_utils.py | 75d6d8c80e9eab93c4843e96b55905cff017f126 | [
"Apache-2.0"
] | permissive | KrishnaswamyLab/ReLSO-Guided-Generative-Protein-Design-using-Regularized-Transformers | 82cb2f7578f5adafaded8dfd8b4a91d025ff6b4f | f2fee841da406bb38f080af4445b5f790af31c43 | refs/heads/main | 2023-07-25T12:35:37.502064 | 2023-07-06T00:24:55 | 2023-07-06T00:24:55 | 436,740,631 | 67 | 10 | Apache-2.0 | 2023-07-06T00:24:56 | 2021-12-09T19:42:41 | Python | UTF-8 | Python | false | false | 5,606 | py |
"""
Helper functions for data processing
"""
import numpy as np
import torch
#-----------
# CoNSTANTS
# -----------
ENCODING = {"I":[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
"L":[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
"V":[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
"F":[0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
"M":[0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
"C":[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
"A":[0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
"G":[0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
"P":[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
"T":[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
"S":[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
"Y":[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
"W":[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
"Q":[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
"N":[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
"H":[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
"E":[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
"D":[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
"K":[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
"R":[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
"X":[0.05,0.05,0.05,0.05,0.05,0.05,0.05,0.05,0.05,0.05,0.05,0.05,
0.05,0.05,0.05,0.05,0.05,0.05,0.05,0.05],
"J":[0, 0, 0, 0, 0, 0, 0, 0 ,0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]}
SEQ2IND = {"I":0,
"L":1,
"V":2,
"F":3,
"M":4,
"C":5,
"A":6,
"G":7,
"P":8,
"T":9,
"S":10,
"Y":11,
"W":12,
"Q":13,
"N":14,
"H":15,
"E":16,
"D":17,
"K":18,
"R":19,
"X":20,
"J":21}
IND2SEQ = {ind: AA for AA, ind in SEQ2IND.items()}
#-----------
# FUNCTIONS
# -----------
""" Get padded sequence from indices """
def inds_to_seq(seq):
return [IND2SEQ[i] for i in seq]
""" Get indices of sequence """
def seq_to_inds(seq):
return [SEQ2IND[i] for i in seq]
""" Get one-hot representation of padded sequence"""
def get_rep(seq):
temp = torch.tensor([ENCODING[element] for element in seq])
return torch.transpose(temp,0,1)
""" Get one-hot representation from indices of sequence"""
def get_rep_inds(seq):
temp = torch.tensor([ENCODING[int(ind)] for ind in seq])
return torch.transpose(temp,0,1)
""" Pad input sequence to length 20 on either side"""
def pad(seq):
x = (20 - len(seq))/2
temp = ['J']*int(np.floor(x))
temp.extend(list(seq))
temp.extend(['J']*int(np.ceil(x)))
return ('').join(temp)
"""
Load Ens_grad dataset
Args: inputted as a .csv/pandas DataFrame with sequences and targets
Returns: reps (converts sequence to indices) and targets (enrichment vals) as torch tensors
"""
def load_raw_giff_data(input_data):
targs = list(torch.from_numpy(np.array(input_data["enrichment"])))
targets = torch.cat([x.unsqueeze(dim=0).type("torch.FloatTensor") for x in targs], 0)
reps = []
seqs = list(input_data['CDR3'])
for seq in seqs:
reps.append(torch.tensor(seq_to_inds(seq)))
reps = torch.cat([x.type("torch.LongTensor").unsqueeze(dim=0) for x in reps])
return reps,targets
def load_raw_mut_data(input_data):
targs = list(torch.from_numpy(np.array(input_data.iloc[:,1])))
targets = torch.cat([x.unsqueeze(dim=0).type("torch.FloatTensor") for x in targs], 0)
reps = []
seqs = list(input_data.iloc[:,0])
for seq in seqs:
reps.append(torch.tensor(seq_to_inds(seq)))
reps = torch.cat([x.type("torch.LongTensor").unsqueeze(dim=0) for x in reps])
return reps,targets
def load_raw_happy_data(input_data, target_col, log_bool):
targs = list(torch.from_numpy(input_data[target_col].to_numpy()))
targets = torch.cat([x.unsqueeze(dim=0).type("torch.FloatTensor") for x in targs], 0)
if log_bool:
targets = torch.log(targets)
reps = []
seqs = list(input_data['cdr_sequence'])
for seq in seqs:
# replace wildcard character
seq = seq.replace('*', 'X')
reps.append(torch.tensor(seq_to_inds(seq)))
reps = torch.cat([x.type("torch.LongTensor").unsqueeze(dim=0) for x in reps])
return reps,targets
def load_raw_tape_data(input_data):
targs = input_data.iloc[:,1].to_numpy()
targs = list(torch.from_numpy(targs))
targets = torch.cat([x.unsqueeze(dim=0).type("torch.FloatTensor") for x in targs], 0)
reps = []
seqs = list(input_data.iloc[:,0])
for seq in seqs:
reps.append(torch.tensor(seq_to_inds(seq)))
reps = torch.cat([x.type("torch.LongTensor").unsqueeze(dim=0) for x in reps])
return reps,targets
def load_alphabet_dict(alphabet_loc):
with open(alphabet_loc) as f:
alphabet = f.read().splitlines()
alphabet = list(sorted(alphabet))
symbol_to_idx = {s: i for i, s in enumerate(alphabet)}
return symbol_to_idx
###############################
# FEATURIZING SEQUENCES
###############################
# def conv_set_ | [
"ec_lab@vpn172022125223.its.yale.internal"
] | ec_lab@vpn172022125223.its.yale.internal |
33781b012962db67557b18ef19e88feb1d14df07 | d9fa7fcf004d2efb97fe483d64b709ad9d3970a7 | /Keras_Wordembedings.py | d71840ae9240d91ba660b4df0bde8c05c139b55b | [] | no_license | jyotirepo/CNN | a5443e7a02639202eb61c8797260c71f27ecaaee | cde6a31d33e3af424de0264781093a49a521fcb3 | refs/heads/master | 2023-05-02T20:26:18.058442 | 2021-05-22T19:14:45 | 2021-05-22T19:14:45 | 369,862,039 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,003 | py | # -*- coding: utf-8 -*-
"""
Created on Sun May 23 00:01:37 2021
@author: jysethy
"""
from tensorflow.keras.preprocessing.text import one_hot
### sentences
sent=[ 'the glass of milk',
'the glass of juice',
'the cup of tea',
'I am a good boy',
'I am a good developer',
'understand the meaning of words',
'your videos are good',]
#vocabulary size
voc_size = 10000
#one hot representation
onehot_repr=[one_hot(words,voc_size)for words in sent]
print(onehot_repr)
#Word Embbedings representation
from tensorflow.keras.layers import Embedding
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.models import Sequential
import numpy as np
sent_length=8
embedded_docs=pad_sequences(onehot_repr,padding='pre',maxlen=sent_length)
print(embedded_docs)
#dimension (number of feature for each word)
dim=10
model = Sequential()
model.add(Embedding(voc_size,10,input_length=sent_length))
model.compile('adam','mse')
model.summary()
| [
"jsethy2010@gmail.com"
] | jsethy2010@gmail.com |
47a188a86366a4a071ac861cfc91a2039adbcfee | 267fe0eac66548850d541d26d74e6662d304d577 | /ImageGeneration.py | 37af7236a0f5e72926775d14c0ca8987c2f32149 | [] | no_license | NotOdayakaShulk/ShulkBot | 93fa3c6e31f500dfa93cb89148466018c6a8b755 | a15d22e5bbccc245892fdd23ce5d776f3f922c4a | refs/heads/master | 2020-07-09T12:52:09.935608 | 2019-08-24T16:42:19 | 2019-08-24T16:42:19 | 203,972,726 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,022 | py | # -*- coding: utf-8 -*-
import numpy as np
from PIL import Image, ImageFilter, ImageFont, ImageDraw
import random
class VisionPic:
imgHeight = 720
imgWidth = 1280
tagHeight = 80
tagWidth = 932
tagPos = ( imgWidth - tagWidth, 70 )
fontSize = 40
txtBoxWidth = 320
txtBoxTopLeft = ( 180, int((tagHeight - fontSize) / 2) )
dmgBoxTopLeft = ( 764, int((tagHeight - fontSize) / 2) )
def __init__( self, fontPath ):
self.img = Image.new( "RGBA", (VisionPic.imgWidth, VisionPic.imgHeight) )
self.tag = Image.open( "tag.png" )
self.font = ImageFont.truetype(fontPath, VisionPic.fontSize)
# 文字列がタグの中に入りきるように切り詰める
def TruncateText ( self, text ):
draw = ImageDraw.Draw( self.tag )
if draw.textsize( text , self.font )[0] <= VisionPic.txtBoxWidth :
return text
while draw.textsize( text + "...", self.font )[0] > VisionPic.txtBoxWidth:
text = text[0:len(text) - 1]
return text + "..."
# 縁取った文字を描画する
def BorderText ( self, text, pos, img, font, bw, frontColor, bgColor ):
draw = ImageDraw.Draw(img)
draw.font = font
pos = np.array( pos )
draw.text(pos-(-bw, -bw), text, bgColor)
draw.text(pos-(-bw, +bw), text, bgColor)
draw.text(pos-(+bw, -bw), text, bgColor)
draw.text(pos-(+bw, +bw), text, bgColor)
draw.text(pos-(0, -bw), text, bgColor)
draw.text(pos-(0, +bw), text, bgColor)
draw.text(pos-(-bw, 0), text, bgColor)
draw.text(pos-(+bw, 0), text, bgColor)
draw.text(pos, text, frontColor)
return draw
# 未来視タグの画像生成
def GenerateTagImage( self, enemyIcon, text, damage ):
text = self.TruncateText(text)
tag = self.tag.copy()
draw = ImageDraw.Draw( tag )
mask = Image.new("L", (64, 64), 0)
drawMask = ImageDraw.Draw(mask)
drawMask.ellipse( (0, 0, 64, 64), fill=255)
maskBulr = mask.filter(ImageFilter.GaussianBlur(2))
enemyIcon = enemyIcon.convert('L').resize((64, 64))
tag.paste(enemyIcon, (54, 8), maskBulr)
draw = self.BorderText( text, VisionPic.txtBoxTopLeft, tag,
self.font,2, "white", "black")
draw = self.BorderText( str(damage), VisionPic.dmgBoxTopLeft,
tag, self.font, 2, "white", "black")
self.tag = tag.resize(tag.size, Image.ANTIALIAS)
# 画面全体にかかるエフェクトの生成
def makeEffect(self):
self.effect = Image.new("RGBA", (VisionPic.imgWidth, VisionPic.imgHeight),
(126, 158, 183, 255))
def GenerateVisionImage(self, img):
self.makeEffect()
img = img.convert("RGBA")
img = img.resize( self.effect.size )
ret = Image.blend(img, self.effect, 0.25)
ret.paste(self.tag, VisionPic.tagPos, mask = self.tag)
return ret
if __name__ == "__main__":
vision = VisionPic(" -- appropriate font path -- ")
image = Image.open("test.jpg")
image.convert("RGBA")
icon = Image.open("icon.jpg")
icon.convert("RGBA")
vision.GenerateTagImage( icon,"うんうんうんち", 114514 )
vision.GenerateVisionImage( image ).show()
| [
"ouhare@icloud.com"
] | ouhare@icloud.com |
1f850d25b6fb00e2da6a8dadaeab4e60e0509438 | 83376bda216b39a25851ee37b66aadeadf53b55c | /examples/features_per_sample_comparison.py | d71665ff56a76e6d2f7f9d224ca845b671c63ff7 | [] | no_license | lucasplagwitz/auto_cpca | 710f5cdcd85f3fc14865a740ee99db37425fbbfc | ce6031d875b1ba4c7d5caad227ad4b9ad101d3d9 | refs/heads/main | 2023-07-15T20:24:46.048329 | 2021-08-27T21:51:18 | 2021-08-27T21:51:18 | 398,742,478 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,614 | py | # adapted example from sklearn:
# https://scikit-learn.org/stable/auto_examples/classification/plot_lda.html#sphx-glr-auto-examples-classification-plot-lda-py
import numpy as np
import matplotlib.pyplot as plt
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis, QuadraticDiscriminantAnalysis
from sklearn.svm import SVC
from sklearn.pipeline import Pipeline
from sklearn.model_selection import train_test_split
from sklearn.decomposition import PCA
from sklearn.datasets import make_classification
from sklearn.feature_selection import SelectPercentile
from auto_cpca import AutoCPCA
train_test_ratio = 0.5
n_samples = [200, 500] # samples for testing
n_averages = 20 # how often to repeat classification
n_features_max = 2000 # maximum number of features
step = 150 # step size for the calculation
fig, ax = plt.subplots(1, 2)
for num_c, n_samples in enumerate([200, 500]):
acc_clf = [[],[],[],[],[]]
scoring_clf = [[],[],[],[],[]]
n_features_range = range(100, n_features_max + 1, step)
for n_features in n_features_range:
print(f"-- {n_features} --")
scoring_clf = [[],[],[],[],[]]
for i in range(n_averages):
clf0 = LinearDiscriminantAnalysis(solver='lsqr', shrinkage='auto')
clf1 = Pipeline(
[("AutoCPCA", AutoCPCA(n_components=min(n_features // 20, 20), preprocess_with_pca_dim=1000)),
("SVC", SVC())])
clf2 = Pipeline([("FS", SelectPercentile()), ("SVC", SVC())])
clf3 = SVC()
clf4 = Pipeline([("PCA", PCA()), ("SVC", SVC())])
X, y = make_classification(n_samples, n_features,
n_informative=n_features//5,
n_redundant=0,
n_repeated=0,
n_clusters_per_class=3,
hypercube=False,
n_classes=8,
class_sep=40,
random_state=i,
)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=train_test_ratio)
for num in range(5):
clf = eval(f"clf{num}")
clf.fit(X_train, y_train)
scoring_clf[num].append(clf.score(X_test, y_test))
for num in range(5):
acc_clf[num].append(np.mean(scoring_clf[num]))
features_samples_ratio = np.array(n_features_range) / int(n_samples*train_test_ratio)
ax[num_c].plot(features_samples_ratio, acc_clf[0], linewidth=2, color='navy')
ax[num_c].plot(features_samples_ratio, acc_clf[1], linewidth=2, color='gold')
ax[num_c].plot(features_samples_ratio, acc_clf[2], linewidth=2, color='red')
ax[num_c].plot(features_samples_ratio, acc_clf[3], linewidth=2, color='green')
ax[num_c].plot(features_samples_ratio, acc_clf[4], linewidth=2, color='purple')
if num_c == 0:
ax[num_c].set_ylabel('Classification accuracy')
ax[num_c].set_xlabel('n_features / n_samples')
ax[num_c].set_title(f"{int(n_samples*train_test_ratio)} train samples")
plt.subplots_adjust(bottom=0.3, wspace=0.33)
plt.legend(labels=["LDA with Ledoit Wolf", 'AutoCPCA->SVC', 'FS->SVC', 'SVC', 'PCA->SVC'],loc='upper center',
bbox_to_anchor=(-.15, -0.2), fancybox=True, shadow=True, ncol=3)
plt.savefig("../demo/performance_feature_sample_ratio.png")
| [
"l.plagwitz@uni-muenster.de"
] | l.plagwitz@uni-muenster.de |
306169c51708eb9ebd6f3a4715d52aaf5b2f46c0 | 09c87fe780df6d1f9eb33799ed516a0bbd7ab1e3 | /Research/async play/wxasync1.py | 938fa1c468981bdc521f7644434f52312729c2b3 | [] | no_license | abulka/pynsource | 8ad412b85dc1acaeb83d7d34af8cc033c6baba91 | 979436525c57fdaeaa832e960985e0406e123587 | refs/heads/master | 2023-04-13T12:58:02.911318 | 2023-04-11T09:56:32 | 2023-04-11T09:56:32 | 32,249,425 | 271 | 46 | null | 2022-10-10T04:36:57 | 2015-03-15T07:21:43 | Python | UTF-8 | Python | false | false | 1,429 | py | import wx
from wxasync import AsyncBind, WxAsyncApp, StartCoroutine
import asyncio
from asyncio.events import get_event_loop
import time
class TestFrame(wx.Frame):
def __init__(self, parent=None):
super(TestFrame, self).__init__(parent)
vbox = wx.BoxSizer(wx.VERTICAL)
button1 = wx.Button(self, label="Submit")
self.edit = wx.StaticText(self, style=wx.ALIGN_CENTRE_HORIZONTAL|wx.ST_NO_AUTORESIZE)
self.edit_timer = wx.StaticText(self, style=wx.ALIGN_CENTRE_HORIZONTAL|wx.ST_NO_AUTORESIZE)
vbox.Add(button1, 2, wx.EXPAND|wx.ALL)
vbox.AddStretchSpacer(1)
vbox.Add(self.edit, 1, wx.EXPAND|wx.ALL)
vbox.Add(self.edit_timer, 1, wx.EXPAND|wx.ALL)
self.SetSizer(vbox)
self.Layout()
AsyncBind(wx.EVT_BUTTON, self.async_callback, button1)
StartCoroutine(self.update_clock, self)
async def async_callback(self, event):
self.edit.SetLabel("Button clicked")
await asyncio.sleep(1)
self.edit.SetLabel("Working")
await asyncio.sleep(1)
self.edit.SetLabel("Completed")
async def update_clock(self):
while True:
self.edit_timer.SetLabel(time.strftime('%H:%M:%S'))
await asyncio.sleep(0.5)
app = WxAsyncApp()
frame = TestFrame()
frame.Show()
app.SetTopWindow(frame)
loop = get_event_loop()
loop.run_until_complete(app.MainLoop())
| [
"abulka@gmail.com"
] | abulka@gmail.com |
3b908a68a40bde74242b3caab16d882a5a2f90ee | 057aab8f9c5689fe2580aa43c13d0e0469cd5feb | /fooler/migrations/0003_auto_20151019_0806.py | 8f805d85ee24732959aa834c52183fad560243a2 | [] | no_license | freshmilk15/foolishboy | 95d594ab99c8ca7914187e1562567423f7b67989 | a5f487000e768bfe40e55feccc6f779f5f619909 | refs/heads/master | 2021-01-10T11:44:23.024531 | 2015-11-09T15:50:34 | 2015-11-09T15:50:34 | 45,245,240 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 434 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('fooler', '0002_userprofile'),
]
operations = [
migrations.AlterField(
model_name='question',
name='qtime',
field=models.DateTimeField(auto_now=True),
preserve_default=True,
),
]
| [
"kerwin15@126.com"
] | kerwin15@126.com |
322a5464f69564f230074c929f48a2d6eefd007e | cacd2f52749ecc76f87183e7a1a849d09c582ac7 | /venv/bin/pip3.6 | cd98ed11cf5641b94b5f7ca8befa223132a5b945 | [] | no_license | arthurpx77670/PROJECT_COVDPX | 59b9ec005ee229245ff9a90170e49bbb64e1babc | 82e9879d34056ba099007fda749a4bf6ef7c410a | refs/heads/master | 2021-04-24T06:06:46.893120 | 2020-04-05T16:16:18 | 2020-04-05T16:16:18 | 250,089,789 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 413 | 6 | #!/home/eisti/PycharmProjects/PROJECT_COVDPX/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3.6'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3.6')()
)
| [
"perringaux@eisti.eu"
] | perringaux@eisti.eu |
1048dc6f234e2af5a67f902db68e427fc5fc1052 | 713fc732a037447897092722647e28cb7a9711a8 | /app/api_1_0/users.py | 6023ebe9c2fdc0cdc7ed2a6926ee3bff2e0c65dd | [] | no_license | jkachhadia/StatsBoy | 9612eec07b44cf34f76c63eddbb085daa7869640 | ad9bb1f921dcb4c74b1ba842b015445c1e0abe33 | refs/heads/master | 2021-01-18T00:46:20.848151 | 2016-07-26T22:09:10 | 2016-07-26T22:09:10 | 64,026,809 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,790 | py | from flask import jsonify, request, current_app, url_for
from . import api
from ..models import User, Post
@api.route('/users/<int:id>')
def get_user(id):
user = User.query.get_or_404(id)
return jsonify(user.to_json())
@api.route('/users/<int:id>/posts/')
def get_user_posts(id):
user = User.query.get_or_404(id)
page = request.args.get('page', 1, type=int)
pagination = user.posts.order_by(Post.timestamp.desc()).paginate(
page, per_page=current_app.config['BLOGPOLE_POSTS_PER_PAGE'],
error_out=False)
posts = pagination.items
prev = None
if pagination.has_prev:
prev = url_for('api.get_user_posts', page=page-1, _external=True)
next = None
if pagination.has_next:
next = url_for('api.get_user_posts', page=page+1, _external=True)
return jsonify({
'posts': [post.to_json() for post in posts],
'prev': prev,
'next': next,
'count': pagination.total
})
@api.route('/users/<int:id>/timeline/')
def get_user_followed_posts(id):
user = User.query.get_or_404(id)
page = request.args.get('page', 1, type=int)
pagination = user.followed_posts.order_by(Post.timestamp.desc()).paginate(
page, per_page=current_app.config['BLOGPOLE_POSTS_PER_PAGE'],
error_out=False)
posts = pagination.items
prev = None
if pagination.has_prev:
prev = url_for('api.get_user_followed_posts', page=page-1,
_external=True)
next = None
if pagination.has_next:
next = url_for('api.get_user_followed_posts', page=page+1,
_external=True)
return jsonify({
'posts': [post.to_json() for post in posts],
'prev': prev,
'next': next,
'count': pagination.total
})
| [
"jaykachhadia@hotmail.com"
] | jaykachhadia@hotmail.com |
f160905d816728acf5ab28b38fe37cd56249ef23 | a95aebf977058d32fa4298e35939fb5813f11276 | /nn/layers.py | f339ba6e01b645a013632b3b8d3cd2e47a1ae2a2 | [
"MIT"
] | permissive | CV-IP/uqvi | f6e595c60ab86eb00c3b221d24f7300a4f872839 | 2534c26c41a4745e98d4b12d66270691002d1a5f | refs/heads/master | 2022-12-22T20:47:44.140964 | 2020-10-03T17:40:17 | 2020-10-03T17:40:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,452 | py | import os
import math
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.utils import _pair, _triple
from nn.bayes_conv import BayesConv3d, BayesConv2d
class ConvBlock(nn.Module):
def __init__(self, in_channels, out_channels, kernel, stride, padding=1, bayes = False):
super(ConvBlock, self).__init__()
if bayes:
self.conv = nn.Sequential(
nn.InstanceNorm3d(in_channels),
nn.ReLU(inplace=True),
BayesConv3d(in_channels, out_channels, kernel_size=kernel, stride=stride, padding=padding, bias=False))
else:
self.conv = nn.Sequential(
nn.InstanceNorm3d(in_channels),
nn.ReLU(inplace=True),
nn.Conv3d(in_channels, out_channels, kernel_size=kernel, stride=stride, padding=padding, bias=False))
def forward(self, x):
x = self.conv(x)
return x
class BasicDownBlock(nn.Module):
def __init__(self, in_ch, out_ch, downsample, bayes=False):
super(BasicDownBlock, self).__init__()
if downsample:
str = 2
else:
str = 1
self.conv_1 = ConvBlock(in_ch, out_ch, kernel=3, stride=str, bayes=bayes)
self.conv_2 = ConvBlock(out_ch, out_ch, kernel=3, stride=1, bayes=bayes)
self.down = None
if downsample:
self.down = ConvBlock(in_ch, out_ch, kernel=1, stride=2, padding=0, bayes=False)
def forward(self, inp):
x = self.conv_1(inp)
x = self.conv_2(x)
if self.down is not None:
return x + self.down(inp)
else:
return x + inp
class BasicUpBlock(nn.Module):
def __init__(self, in_ch, out_ch, bayes=False):
super(BasicUpBlock, self).__init__()
self.upsample = nn.Sequential(
ConvBlock(in_ch, out_ch, kernel=1, stride=1, padding=0, bayes=False),
nn.Upsample(scale_factor=2, mode='trilinear', align_corners=True)
)
self.conv_1 = ConvBlock(out_ch, out_ch, kernel=3, stride=1, bayes=bayes)
self.conv_2 = ConvBlock(out_ch, out_ch, kernel=3, stride=1, bayes=bayes)
def forward(self, inp, skip_connection=None):
x = self.upsample(inp)
if skip_connection is not None:
x = x + skip_connection
x1 = self.conv_1(x)
x1 = self.conv_2(x1)
return x1 + x | [
"noreply@github.com"
] | noreply@github.com |
d78ec7a24eeaf6b851e023391de0374e52f65f9a | def8eea03f1f0923eb5c8548dec55d6b8af3cc63 | /calibrateKDEGlobal.py | fd4b4a9a2ca1882f83f3a7293f852fcff5ff371a | [] | no_license | nalipour/TimepixCalibration | 5bf668c6005e955a89819f8c6d59c85119f1bf11 | 4893b6c34922064c9b9347f00a4fe78285200b82 | refs/heads/master | 2021-01-18T14:45:05.517492 | 2015-06-22T16:50:52 | 2015-06-22T16:50:52 | 32,589,720 | 0 | 0 | null | 2015-03-20T14:54:04 | 2015-03-20T14:54:03 | null | UTF-8 | Python | false | false | 14,559 | py | # will use kde method to finding most likely TOT
# for the global calibration
from optparse import OptionParser
from scipy.stats import gaussian_kde
import numpy as np
import matplotlib.pyplot as plt
import ROOT as R
from os import environ
import getpass
import Constants as C
parser = OptionParser()
parser.add_option("-b", "--assembly",
help="Assembly name", dest="ASSEMBLY")
parser.add_option("-s", "--source",
help="Source name", dest="SOURCE")
(options, args) = parser.parse_args()
if(options.ASSEMBLY):
assembly=options.ASSEMBLY
else :
print "Please specify assembly"
print "choose from", C.known_assemblies
parser.print_help()
exit()
if assembly not in C.known_assemblies:
print "Assembly not recognised"
print "choose from", C.known_assemblies
exit()
if(options.SOURCE):
source=options.SOURCE
else :
print "Please specify source"
print "choose from", C.known_sources
parser.print_help()
exit()
if source not in C.known_sources:
print "Source not recognised"
print "choose from", C.known_sources
exit()
if source in C.LNLS_sources and assembly != "A06-W0110":
print "Source only available for assembly A06-W0110"
print "please reconsider input"
exit()
# wanted to use kde_scipy function, but wouldn't work wth my scipy version
# this is a workaround which does the same
def findMostLikelyTOT(assembly,source,llim,ulim):
# Load data
home = environ['HOME']
base = "%s/eos/clicdp/data/VertexCalibration" %home
assembly_start = assembly.split("-")[0]
if source == "Fe":
rootfile = R.TFile("%s/%s/%s_SinglePixelCalibration/Fe55_%s_spc.root"%(base,assembly,assembly_start,assembly))
elif source == "Am":
rootfile = R.TFile("%s/%s/%s_SinglePixelCalibration/Am241_%s_spc.root"%(base,assembly,assembly_start,assembly))
elif source == "Cd":
rootfile = R.TFile("%s/%s/%s_SinglePixelCalibration/Cd109_%s_spc.root"%(base,assembly,assembly_start,assembly))
elif source == "CuInXRF":
if assembly == "B06-W0125":
rootfile = R.TFile("%s/%s/%s_SinglePixelCalibration/Cu_In_%s_spc.root"%(base,assembly,assembly_start,assembly))
else:
rootfile = R.TFile("%s/%s/CuIn_%s.root" %(base,assembly,assembly))
elif source == "Co":
if assembly == "B06-W0125":
rootfile = R.TFile("%s/%s/%s_SinglePixelCalibration/Co57_%s_spc.root"%(base,assembly,assembly_start,assembly))
else:
rootfile = R.TFile("%s/%s/Co57_%s.root" %(base,assembly,assembly))
elif source == "CoXRF":
rootfile = R.TFile("%s/LNLS_Analysis/SinglePixelAnalysis/root_files/%s-25V_CoXRF_CalibTree.root" %(base,assembly))
elif source == "CrXRF":
rootfile = R.TFile("%s/LNLS_Analysis/SinglePixelAnalysis/root_files/%s-25V_CrXRF_CalibTree.root" %(base,assembly))
elif source == "CuXRF":
rootfile = R.TFile("%s/LNLS_Analysis/SinglePixelAnalysis/root_files/%s-25V_CuXRF_CalibTree.root" %(base,assembly))
elif source == "FeXRF":
rootfile = R.TFile("%s/LNLS_Analysis/SinglePixelAnalysis/root_files/%s-25V_FeXRF_CalibTree.root" %(base,assembly))
elif source == "MnXRF":
rootfile = R.TFile("%s/LNLS_Analysis/SinglePixelAnalysis/root_files/%s-25V_MnXRF_CalibTree.root" %(base,assembly))
elif source == "NiXRF":
rootfile = R.TFile("%s/LNLS_Analysis/SinglePixelAnalysis/root_files/%s-25V_NiXRF_CalibTree.root" %(base,assembly))
elif source == "TiXRF":
rootfile = R.TFile("%s/LNLS_Analysis/SinglePixelAnalysis/root_files/%s-25V_TiXRF_CalibTree.root" %(base,assembly))
elif source == "VXRF":
rootfile = R.TFile("%s/LNLS_Analysis/SinglePixelAnalysis/root_files/%s-25V_VXRF_CalibTree.root" %(base,assembly))
tree = rootfile.Get("pixels")
print "got tree"
# Set up junk file to appease ROOT
username = getpass.getuser()
junkfile = R.TFile("/tmp/%s/junkfile_%s_%s.root" %(username,assembly,source),"RECREATE")
# Just keep the events we need
t2 = tree.CopyTree("tot < %i && tot > %i" %(ulim,llim))
print "copied tree"
peak_tots = []
peak_amps = []
peak_is = []
peak_loweris = []
peak_upperis = []
peak_ents = []
peak_lowersigmas = []
peak_uppersigmas = []
nsteps = 100
x_grid = np.linspace(llim, ulim, nsteps)
step_size = (ulim-llim)/float(nsteps-1)
ent = t2.GetEntries()
print "Will load", ent, "entries"
tot = []
if ent > 1:
tot = np.zeros(ent, dtype=float)
t2.Branch('tot', tot, 'tot/F')
for i in xrange(ent):
if i%1000000==0:
print ".....loading", i
t2.GetEvent(i)
tot[i] = t2.tot
try:
print "trying"
# The KDE calculation
density = gaussian_kde(tot)
if density.silverman_factor() > 0.1:
density.covariance_factor = density.silverman_factor
print "Bandwidth determined from Silverman factor:", density.silverman_factor()
else:
density.covariance_factor = lambda: 0.1
print "Bandwidth set at 0.1 (Silverman factor too small: %f)" %density.silverman_factor()
density._compute_covariance()
workaround = density(x_grid)
# Find peaks by finding where gradient passes through 0
grad = np.gradient(workaround)
last_grad = grad[0]
for i in xrange(1,len(grad)):
this_grad = grad[i]
if (last_grad > 0 and this_grad < 0):
peak_tots.append((x_grid[i-1] + x_grid[i]) / 2.)
peak_amps.append((workaround[i-1] + workaround[i]) / 2.)
peak_is.append(i)
last_grad = this_grad
for i in xrange(len(peak_tots)):
print "Peak found at TOT", peak_tots[i], "with amplitude", peak_amps[i], "at xgrid", peak_is[i]
# Filter peaks to take only highest
print "pre filter", peak_amps,peak_tots,peak_is
sorted_peaks = sorted(zip(peak_amps,peak_tots,peak_is),reverse=1)
if source in ["Fe","Cd","CoXRF","CrXRF","CuXRF","FeXRF","MnXRF","NiXRF","TiXRF","VXRF"]:
peak_amps = [sorted_peaks[0][0]]
peak_tots = [sorted_peaks[0][1]]
peak_is = [sorted_peaks[0][2]]
if source in ["Co","CuInXRF"]:
peak_amps = [sorted_peaks[0][0],sorted_peaks[1][0]]
peak_tots = [sorted_peaks[0][1],sorted_peaks[1][1]]
peak_is = [sorted_peaks[0][2],sorted_peaks[1][2]]
if source == "Am":
peak_amps = [sorted_peaks[0][0],sorted_peaks[1][0],sorted_peaks[2][0]]
peak_tots = [sorted_peaks[0][1],sorted_peaks[1][1],sorted_peaks[2][1]]
peak_is = [sorted_peaks[0][2],sorted_peaks[1][2],sorted_peaks[2][2]]
print "post filter", peak_amps,peak_tots,peak_is
# Put peaks back on TOT order
ordered_peaks = sorted(zip(peak_tots,peak_amps,peak_is))
peak_tots = [peaki[0] for peaki in ordered_peaks]
peak_amps = [peaki[1] for peaki in ordered_peaks]
peak_is = [peaki[2] for peaki in ordered_peaks]
print "post ordering", peak_amps,peak_tots,peak_is
# Calculate the upper and lower i for each peak
for i in xrange(len(peak_tots)):
if i == 0 and i == len(peak_tots)-1:
peak_loweris.append(0)
peak_upperis.append(nsteps)
elif i == 0:
peak_loweris.append(0)
peak_upperis.append((peak_is[i] + peak_is[i+1])/2.)
elif i == len(peak_tots)-1:
peak_loweris.append((peak_is[i-1] + peak_is[i])/2.)
peak_upperis.append(nsteps)
else:
peak_loweris.append((peak_is[i-1] + peak_is[i])/2.)
peak_upperis.append((peak_is[i] + peak_is[i+1])/2.)
print "peak_is", peak_is
print "peak_loweris", peak_loweris
print "peak_upperis", peak_upperis
# Calculate the entries in each peak
for i in xrange(len(peak_tots)):
if i == 0 and i == len(peak_tots)-1:
peak_ents.append(int(t2.GetEntries("tot > %i && tot < %i" %(llim,ulim))))
elif i == 0:
peak_ents.append(int(t2.GetEntries("tot > %i && tot < %i" %(llim,(peak_tots[i] + peak_tots[i+1])/2.))))
elif i == len(peak_tots)-1:
peak_ents.append(int(t2.GetEntries("tot > %i && tot < %i" %((peak_tots[i-1] + peak_tots[i])/2.,ulim))))
else:
peak_ents.append(int(t2.GetEntries("tot > %i && tot < %i" %((peak_tots[i-1] + peak_tots[i])/2.,(peak_tots[i] + peak_tots[i+1])/2.))))
print "peak_ents =", peak_ents
# Calculate the uncertainty on each peak position
for i in xrange(len(peak_tots)):
maxindex = peak_is[i]
lowerindex = peak_is[i]-1
while (np.trapz(workaround[lowerindex:maxindex],x=x_grid[lowerindex:maxindex]) < 0.341*2*np.trapz(workaround[peak_loweris[i]:maxindex],x=x_grid[peak_loweris[i]:maxindex])) and (lowerindex>0):
lowerindex=lowerindex-1
lowersigma = (x_grid[maxindex]-x_grid[lowerindex]) / R.sqrt(peak_ents[i])
peak_lowersigmas.append(lowersigma)
maxindex = peak_is[i]+1
upperindex = peak_is[i]+2
while (np.trapz(workaround[maxindex:upperindex],x=x_grid[maxindex:upperindex]) < 0.341*2*np.trapz(workaround[maxindex:peak_upperis[i]],x=x_grid[maxindex:peak_upperis[i]])) and (upperindex<(nsteps-1)):
upperindex=upperindex+1
uppersigma = (x_grid[upperindex]-x_grid[maxindex]) / R.sqrt(peak_ents[i])
peak_uppersigmas.append(uppersigma)
except np.linalg.linalg.LinAlgError as err:
peak_tots.append(0.)
peak_lowersigmas.append(0.)
peak_uppersigmas.append(0.)
else:
peak_tots.append(0.)
peak_lowersigmas.append(0.)
peak_uppersigmas.append(0.)
# Make plots
fig, ax = plt.subplots(1, 1, figsize=(12, 12))
ax.tick_params(axis='x', pad=20)
plt.xticks(np.arange(llim,ulim+1,(ulim-llim)/5.))
ax.set_xlabel('TOT (ADC)')
for i in xrange(len(peak_tots)):
ax.text(0.01, 0.99 - (i*0.1), r'Peak: $%i \pm ^{%0.2f} _{%0.2f} \pm %0.2f$' %(peak_tots[i],peak_uppersigmas[i],peak_lowersigmas[i],step_size/2.),
verticalalignment='top', horizontalalignment='left',
transform=ax.transAxes,
fontsize=40)
ax.hist(tot, bins=100,fc='gray',alpha=0.3,normed=True)
ax.plot(x_grid, workaround, color='blue', alpha=0.5, lw=3)
for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] + ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(40)
fig.tight_layout()
fig.savefig("plots/KDEPeaks/Global/%s_%s_GlobalSpectrum.pdf" %(assembly,source))
fig, ax = plt.subplots(1, 1, figsize=(12, 12))
ax.tick_params(axis='x', pad=20)
ax.set_xlabel('TOT (ADC)')
ax.plot(x_grid, grad, color='red', alpha=0.5, lw=3)
for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] + ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(40)
fig.tight_layout()
fig.savefig("plots/KDEPeaks/Global/%s_%s_GlobalSpectrumDeriv.pdf" %(assembly,source))
# Write results to txt file
f = open('results/kde/%s_%s_GlobalResults.txt' %(assembly,source), 'w')
if len(peak_tots) == 1:
f.write('%f \t %f \t %f \t %f \n' %(peak_tots[0],peak_lowersigmas[0],peak_uppersigmas[0],step_size/2.))
elif len(peak_tots) == 2:
f.write('%f \t %f \t %f \t %f \t %f \t %f \t %f \n' %(peak_tots[0],peak_lowersigmas[0],peak_uppersigmas[0],peak_tots[1],peak_lowersigmas[1],peak_uppersigmas[1],step_size/2.))
else:
f.write('%f \t %f \t %f \t %f \t %f \t %f \t %f \t %f \t %f \t %f \n' %(peak_tots[0],peak_lowersigmas[0],peak_uppersigmas[0],peak_tots[1],peak_lowersigmas[1],peak_uppersigmas[1],peak_tots[2],peak_lowersigmas[2],peak_uppersigmas[2],step_size/2.))
f.close()
print "finished", assembly, source
def getLimits(assembly,source):
if assembly == "A06-W0110":
if source == "Fe": limits = [0,400]
if source == "Am": limits = [0,1500]
if source == "Cd": limits = [0,800]
if source == "CuInXRF": limits = [0,1000]
if source == "Co": limits = [0,700]
if source == "CoXRF": limits = [0,400]
if source == "CrXRF": limits = [0,400]
if source == "CuXRF": limits = [0,400]
if source == "FeXRF": limits = [0,400]
if source == "MnXRF": limits = [0,400]
if source == "NiXRF": limits = [0,400]
if source == "TiXRF": limits = [0,400]
if source == "VXRF": limits = [0,400]
if assembly == "B06-W0125":
if source == "Fe": limits = [0,700]
if source == "Am": limits = [0,2800]
if source == "Cd": limits = [0,1700]
if source == "CuInXRF": limits = [0,1700]
if source == "Co": limits = [0,1100]
if assembly == "B07-W0125":
if source == "Fe": limits = [0,500]
if source == "Am": limits = [0,1500]
if source == "Cd": limits = [0,900]
if source == "CuInXRF": limits = [0,1000]
if source == "Co": limits = [0,600]
if assembly == "C04-W0110":
if source == "Fe": limits = [0,500]
if source == "Am": limits = [0,1400]
if source == "Cd": limits = [0,800]
if source == "CuInXRF": limits = [0,700]
if source == "Co": limits = [0,600]
if assembly == "D09-W0126":
if source == "Fe": limits = [0,600]
if source == "Am": limits = [0,1800]
if source == "Cd": limits = [0,1200]
if source == "CuInXRF": limits = [0,1100]
if source == "Co": limits = [0,800]
if assembly == "L04-W0125":
if source == "Fe": limits = [0,500]
if source == "Am": limits = [0,2000]
if source == "Cd": limits = [0,1100]
if source == "CuInXRF": limits = [0,1200]
if source == "Co": limits = [0,700]
return limits
limits = getLimits(assembly,source)
findMostLikelyTOT(assembly,source,limits[0],limits[1])
| [
"sophie.redford@gmail.com"
] | sophie.redford@gmail.com |
a069e590eabbac4bb1d1542c31087dfa21eb1e84 | ddcea8e8bd8f50a6e11dc4e22040c68692b3e3c4 | /create_tables.py | 0bbd861c02d45b9f731fe3ad3f09e420463dc84b | [] | no_license | dvee/udacity-data-warehouse-aws | c2451e3c288798f736ad7decf8d9444d686263b2 | 4d0fcd99da5cabb657128fdbba19fd247d16a41f | refs/heads/master | 2020-07-06T15:29:48.453216 | 2019-09-08T19:42:41 | 2019-09-08T19:42:41 | 203,067,722 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 740 | py | import configparser
import psycopg2
from sql_queries import create_table_queries, drop_table_queries
def drop_tables(cur, conn):
"""Drop all tables if they exist"""
for query in drop_table_queries:
cur.execute(query)
conn.commit()
def create_tables(cur, conn):
"""Create all tables"""
for query in create_table_queries:
cur.execute(query)
conn.commit()
def main():
config = configparser.ConfigParser()
config.read('dwh.cfg')
conn = psycopg2.connect("host={} dbname={} user={} password={} port={}".format(*config['CLUSTER'].values()))
cur = conn.cursor()
drop_tables(cur, conn)
create_tables(cur, conn)
conn.close()
if __name__ == "__main__":
main()
| [
"david@guestfolio.com"
] | david@guestfolio.com |
44685bc6126a0ae65a4a1fe07c203845687041f2 | 9d66d325a8cbda44a763d09f6d849155215a7eda | /snippets/serializers.py | 8b6686afeb2fb3dd37b0637ca5d3029800879e73 | [] | no_license | canhazn/rest-course | b082f8ae7848c27f49808df15dfc6cf487dfbe33 | fe067826cd821c8630bafe2c594c2742ba029ac3 | refs/heads/master | 2023-08-23T17:22:54.433605 | 2020-07-08T08:58:44 | 2020-07-08T08:58:44 | 277,531,447 | 0 | 0 | null | 2021-09-22T19:23:41 | 2020-07-06T12:07:32 | Python | UTF-8 | Python | false | false | 776 | py | from rest_framework import serializers
from snippets.models import Snippet
from django.contrib.auth.models import User
class SnippetSerializer(serializers.HyperlinkedModelSerializer):
owner = serializers.ReadOnlyField(source='owner.username')
highlight = serializers.HyperlinkedIdentityField(
view_name='snippet-highlight', format='html')
class Meta:
model = Snippet
fields = ['id', 'title', 'highlight', 'code', 'linenos',
'language', 'style', 'owner']
class UserSerializer(serializers.HyperlinkedModelSerializer):
snippets = serializers.HyperlinkedRelatedField(
view_name="snippet-detail", read_only=True, many=True)
class Meta:
model = User
fields = ['id', 'username', 'snippets']
| [
"canhazn@gmail.com"
] | canhazn@gmail.com |
36760817da871664ff0f2462b9109898302b92f3 | 4a4d87489649bf9b9dd8db5977485c3b64c0b521 | /Scripts/get_objgraph.py | 5ffc9cdf4d88756dfd314876dc042f8bb158e6d3 | [] | no_license | laichunkit0903/text_classification | a3e725ebd05b2185e5b818e3624e27256e87127d | 7de450e896d5ff0e64170e004d84d470dcf93508 | refs/heads/master | 2022-04-20T11:18:08.192247 | 2020-04-13T01:29:40 | 2020-04-13T01:29:40 | 131,254,805 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,299 | py | #!c:\users\luther\desktop\mpd\01tont~2\mpd_te~2\scripts\python.exe
#
# Author: Mike McKerns (mmckerns @caltech and @uqfoundation)
# Copyright (c) 2008-2016 California Institute of Technology.
# Copyright (c) 2016-2017 The Uncertainty Quantification Foundation.
# License: 3-clause BSD. The full license text is available at:
# - http://trac.mystic.cacr.caltech.edu/project/pathos/browser/dill/LICENSE
"""
use objgraph to plot the reference paths for types found in dill.types
"""
#XXX: useful if could read .pkl file and generate the graph... ?
import dill as pickle
#pickle.debug.trace(True)
#import pickle
# get all objects for testing
from dill import load_types
load_types(pickleable=True,unpickleable=True)
from dill import objects
if __name__ == "__main__":
import sys
if len(sys.argv) != 2:
print ("Please provide exactly one type name (e.g. 'IntType')")
msg = "\n"
for objtype in list(objects.keys())[:40]:
msg += objtype + ', '
print (msg + "...")
else:
objtype = str(sys.argv[-1])
obj = objects[objtype]
try:
import objgraph
objgraph.show_refs(obj, filename=objtype+'.png')
except ImportError:
print ("Please install 'objgraph' to view object graphs")
# EOF
| [
"warless0903@gmail.com"
] | warless0903@gmail.com |
e3da8e40e1e2faa8a8ab45b5257ebe4fcc46b755 | a27cde8f9b353136f7d6824405a2086dee149c7f | /processpics.py | d3cc4cc02f8d491290baeac03674044ebc8cc846 | [] | no_license | akabaker/processpics | edd74798730502117590b49930ebb4c04851830d | 801e4763d16fc56037463d8b943107223baa06b7 | refs/heads/master | 2016-09-06T17:49:41.951336 | 2013-01-04T21:20:27 | 2013-01-04T21:20:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,065 | py | #!/usr/bin/python
import asyncore
import pyinotify
import os
from SimpleCV import Image, Color
class EventHandler(pyinotify.ProcessEvent):
def color_distance(self, img, output_file):
result = img.colorDistance(Color.WHITE)
result.save(output_file)
def find_lines(self, img, output_file):
lines = img.findLines(threshold=200, minlinelength=100, cannyth1=100, cannyth2=200)
lines.draw(color=Color.RED)
img.save(output_file)
def process_IN_CREATE(self, event):
extension = event.name.split('.')[1]
if extension:
try:
print "Processing:", event.name
img = Image(event.pathname)
output_file = os.path.join('/tmp/', event.name)
self.find_lines(img, output_file)
print "Output:", output_file
except IOError as e:
print "Can't open this file {0}: {1}".format(e.errno, e.strerror)
else:
print "Skipping this file.."
wm = pyinotify.WatchManager() # Watch Manager
notifier = pyinotify.AsyncNotifier(wm, EventHandler())
wdd = wm.add_watch('/home/beor/Pictures/eye-fi', pyinotify.IN_CREATE, rec=True)
asyncore.loop()
| [
"akabaker@gmail.com"
] | akabaker@gmail.com |
f2b7264c36abf79d6fb76f1e26a17863e07ec4f4 | 9e7ab3bfab5a1c31bcce2b0501e1bed80a28d091 | /__init__.py | 536521bfe2234d5c484133231476ded8fdfc37b3 | [
"MIT"
] | permissive | ctb/vamb | 0009a18a32bddc7c3c32b9547cdd76fb1132aa17 | 06fecb93ec837e50383d78b5dc46fa4345ffca40 | refs/heads/master | 2020-04-14T00:15:35.879811 | 2018-12-20T12:03:17 | 2018-12-20T12:29:31 | 163,529,044 | 0 | 0 | null | 2018-12-29T17:31:28 | 2018-12-29T17:31:28 | null | UTF-8 | Python | false | false | 1,166 | py | """Variational Autoencoder for Metagenomic Binning
Vamb does what it says on the tin - bins metagenomes using a variational autoencoder.
Vamb contains the following modules:
vamb.vambtools
vamb.filtercontigs
vamb.parsecontigs
vamb.parsebam
vamb.encode
vamb.cluster
vamb.benchmark
To get it running, you need:
- A FASTA file of contigs or genes (contigs are better)
- BAM files of reads mapped to the FASTA file
- Python v >= 3.5 with some modules installed:
pytorch
numpy
pysam
General workflow:
1) Filter contigs by size using vamb.vambtools.filtercontigs
2) Map reads to contigs to obtain BAM file
3) Calculate TNF of contigs using vamb.parsecontigs
4) Create RPKM table from BAM files using vamb.parsebam
5) Train autoencoder using vamb.encode
6) Cluster latent representation using vamb.cluster
"""
__authors__ = 'Jakob Nybo Nissen', 'Simon Rasmussen'
__licence__ = 'MIT'
__version__ = (1, 0)
import sys as _sys
if _sys.version_info[:2] < (3, 5):
raise ImportError('Python version must be >= 3.5')
from . import vambtools
from . import parsebam
from . import parsecontigs
from . import cluster
from . import benchmark
from . import encode
| [
"jakobnybonissen@gmail.com"
] | jakobnybonissen@gmail.com |
907417fa48b08213c3e051da3d7ee7ae5c1ecf4a | 82646fb7fe40db6dcdf238548128f7b633de94c0 | /workspace/Python/Crawler/CookieToFile.py | 0e70b36dc42628037e532b82d407573e996e1b1e | [] | no_license | jtahstu/iCode | a7873618fe98e502c1e0e2fd0769d71b3adac756 | 42d0899945dbc1bab98092d21a1d946137a1795e | refs/heads/master | 2021-01-10T22:55:47.677615 | 2016-10-23T12:42:38 | 2016-10-23T12:42:38 | 70,316,051 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 602 | py | import cookielib
import urllib2
#设置保存cookie的文件,同级目录下的cookie.txt
filename = 'cookie.txt'
#声明一个MozillaCookieJar对象实例来保存cookie,之后写入文件
cookie = cookielib.MozillaCookieJar(filename)
#利用urllib2库的HTTPCookieProcessor对象来创建cookie处理器
handler = urllib2.HTTPCookieProcessor(cookie)
#通过handler来构建opener
opener = urllib2.build_opener(handler)
#创建一个请求,原理同urllib2的urlopen
response = opener.open("http://www.baidu.com/")
#保存cookie到文件
cookie.save(ignore_discard=True, ignore_expires=True) | [
"root@jtahstu.com"
] | root@jtahstu.com |
a391dd863ff7a6a1cb59879e6ca2c7fde6313763 | 43bf100bbb15a3c0c4d2b6f7fe521f10393ecbae | /networkUDP.py | 79edc4c0cf4f07e840963f7493db3d4db6250916 | [] | no_license | HourGlss/Arena | 1a70e80b3b2f5078033c238f7db728e7a240629e | dfe016ca4867550ca083bfa3ceb35bdd7a6da34c | refs/heads/master | 2020-05-05T04:44:56.561441 | 2019-04-12T06:33:02 | 2019-04-12T06:33:02 | 179,723,511 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,441 | py | import socket
import pickle
import random
import time
import config
import threading
import sys
class Network:
uid = None
sleep_time = .016
to_send = None
to_send_lock = False
last_received = None
last_received_lock = False
stop = False
def __init__(self):
self.host = config.host_ip # For this to work on your machine this must be equal to the ipv4 address of the machine running the server
# You can find this address by typing ipconfig in CMD and copying the ipv4 address. Again this must be the servers
# ipv4 address. This feild will be the same for all your clients.
self.incoming_port = 5556
self.outgoing_port = 5555
self.incoming_addr = (self.host, self.incoming_port)
self.outgoing_addr = (self.host, self.outgoing_port)
outgoing = threading.Thread(target=self.outgoing)
outgoing.start()
incoming = threading.Thread(target=self.incoming)
incoming.start()
def set_uid(self, uid):
self.uid = uid
def incoming(self):
incoming = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
incoming.bind(("", self.incoming_port))
while True:
if not self.last_received_lock:
self.last_received_lock = True
data_rec, addr_rec = incoming.recvfrom(1024)
self.last_received = pickle.loads(data_rec)
# print(data_rec)
# print(self.last_received)
if self.uid is None:
self.set_uid(self.last_received[0]['uid'])
self.last_received_lock = False
time.sleep(self.sleep_time)
if self.stop:
# print("xxx I BROKE THE LOOP")
break
def stop_networking(self):
# print("Calling stop")
self.stop = True
def outgoing(self):
outgoing = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
while True:
if not self.to_send_lock:
# print("within outgoing, wasnt locked")
self.to_send_lock = True
if self.to_send is not None:
# print("data was actually sent")
pickled = pickle.dumps(self.to_send)
outgoing.sendto(pickled, self.outgoing_addr)
self.to_send_lock = False
time.sleep(self.sleep_time)
if self.stop:
# print("xxx I BROKE THE LOOP")
break
def send(self, data_to_send):
# print("net-udp trying to send")
if not self.to_send_lock:
# print("wasnt locked")
self.to_send_lock = True
self.to_send = data_to_send
self.to_send_lock = False
return True
return False
def receive(self):
# print("receive is being called")
if not self.last_received_lock:
self.last_received_lock = True
# print("it's not locked")
if self.last_received is not None:
# print("It isnt none")
data_to_return = self.last_received
# print(data_to_return)
self.last_received = None
else:
# print("It is none")
self.last_received_lock = False
return False
self.last_received_lock = False
return data_to_return
return False
#FIRST POC
# UDP_IP = "127.0.0.1"
# UDP_PORT = 5555
# MESSAGE = "Hello, World!"
#
# sock = socket.socket(socket.AF_INET, # Internet
# socket.SOCK_DGRAM) # UDP
# sock.sendto(MESSAGE.encode(), (UDP_IP, UDP_PORT))
# data,addr = sock.recvfrom(2048)
# print(data.decode())
#SECOND POC
# N = Network()
# last_sent = None
# while True:
# now = time.time()
# if last_sent == None or now - last_sent >= .06:
# data_to_send = {"x": random.randint(0, 1024), 'y': random.randint(0, 768),'mouse_x':random.randint(0, 768),'mouse_y':random.randint(0, 768)}
# response = False
# response = N.send(data_to_send)
# if response:
# # print("data was Qd to be sent")
# pass
# received = N.recieve()
# if received != False:
# # print("was received",received)
# pass
#
# # print("My UID is {}".format(N.uid))
# last_sent = now
| [
"azbairos@gmail.com"
] | azbairos@gmail.com |
daffcd2c71e1aa642d272207dca6fb0e42a37757 | d94b6845aeeb412aac6850b70e22628bc84d1d6d | /smith/bert/tokenization.py | dc88d1b4e3bbfaae01a5a7e0f295c7f14bd70f27 | [
"CC-BY-4.0",
"Apache-2.0"
] | permissive | ishine/google-research | 541aea114a68ced68736340e037fc0f8257d1ea2 | c1ae273841592fce4c993bf35cdd0a6424e73da4 | refs/heads/master | 2023-06-08T23:02:25.502203 | 2023-05-31T01:00:56 | 2023-05-31T01:06:45 | 242,478,569 | 0 | 0 | Apache-2.0 | 2020-06-23T01:55:11 | 2020-02-23T07:59:42 | Jupyter Notebook | UTF-8 | Python | false | false | 13,084 | py | # coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import re
import unicodedata
from absl import flags
import six
import tensorflow.compat.v1 as tf
FLAGS = flags.FLAGS
flags.DEFINE_bool(
"preserve_unused_tokens", False,
"If True, Wordpiece tokenization will not be applied to words in the vocab."
)
_UNUSED_TOKEN_RE = re.compile("^\\[unused\\d+\\]$")
def preserve_token(token, vocab):
"""Returns True if the token should forgo tokenization and be preserved."""
if not FLAGS.preserve_unused_tokens:
return False
if token not in vocab:
return False
return bool(_UNUSED_TOKEN_RE.search(token))
def validate_case_matches_checkpoint(do_lower_case, init_checkpoint):
"""Checks whether the casing config is consistent with the checkpoint name."""
# The casing has to be passed in by the user and there is no explicit check
# as to whether it matches the checkpoint. The casing information probably
# should have been stored in the bert_config.json file, but it's not, so
# we have to heuristically detect it to validate.
if not init_checkpoint:
return
m = re.match("^.*?([A-Za-z0-9_-]+)/bert_model.ckpt", init_checkpoint)
if m is None:
return
model_name = m.group(1)
lower_models = [
"uncased_L-24_H-1024_A-16", "uncased_L-12_H-768_A-12",
"multilingual_L-12_H-768_A-12", "chinese_L-12_H-768_A-12"
]
cased_models = [
"cased_L-12_H-768_A-12", "cased_L-24_H-1024_A-16",
"multi_cased_L-12_H-768_A-12"
]
is_bad_config = False
if model_name in lower_models and not do_lower_case:
is_bad_config = True
actual_flag = "False"
case_name = "lowercased"
opposite_flag = "True"
if model_name in cased_models and do_lower_case:
is_bad_config = True
actual_flag = "True"
case_name = "cased"
opposite_flag = "False"
if is_bad_config:
raise ValueError(
"You passed in `--do_lower_case=%s` with `--init_checkpoint=%s`. "
"However, `%s` seems to be a %s model, so you "
"should pass in `--do_lower_case=%s` so that the fine-tuning matches "
"how the model was pre-training. If this error is wrong, please "
"just comment out this check." % (actual_flag, init_checkpoint,
model_name, case_name, opposite_flag))
def convert_to_unicode(text):
"""Converts `text` to Unicode (if it's not already), assuming utf-8 input."""
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text.decode("utf-8", "ignore")
elif isinstance(text, unicode):
return text
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def printable_text(text):
"""Returns text encoded in a way suitable for print or `tf.logging`."""
# These functions want `str` for both Python2 and Python3, but in one case
# it's a Unicode string and in the other it's a byte string.
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text
elif isinstance(text, unicode):
return text.encode("utf-8")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def load_vocab(vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = collections.OrderedDict()
with tf.gfile.GFile(vocab_file, "r") as reader:
while True:
token = convert_to_unicode(reader.readline())
if not token:
break
token = token.strip()
if token not in vocab:
vocab[token] = len(vocab)
return vocab
def convert_by_vocab(vocab, items):
"""Converts a sequence of [tokens|ids] using the vocab."""
output = []
for item in items:
output.append(vocab[item])
return output
def convert_tokens_to_ids(vocab, tokens):
return convert_by_vocab(vocab, tokens)
def convert_ids_to_tokens(inv_vocab, ids):
return convert_by_vocab(inv_vocab, ids)
def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a piece of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens
class FullTokenizer(object):
"""Runs end-to-end tokenization."""
def __init__(self, vocab_file, do_lower_case=True):
self.vocab = load_vocab(vocab_file)
self.inv_vocab = {v: k for k, v in self.vocab.items()}
self.basic_tokenizer = BasicTokenizer(
do_lower_case=do_lower_case, vocab=self.vocab)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)
def tokenize(self, text):
split_tokens = []
for token in self.basic_tokenizer.tokenize(text):
if preserve_token(token, self.vocab):
split_tokens.append(token)
continue
for sub_token in self.wordpiece_tokenizer.tokenize(token):
split_tokens.append(sub_token)
return split_tokens
def convert_tokens_to_ids(self, tokens):
return convert_by_vocab(self.vocab, tokens)
def convert_ids_to_tokens(self, ids):
return convert_by_vocab(self.inv_vocab, ids)
class BasicTokenizer(object):
"""Runs basic tokenization (punctuation splitting, lower casing, etc.)."""
def __init__(self, do_lower_case=True, vocab=tuple()):
"""Constructs a BasicTokenizer.
Args:
do_lower_case: Whether to lower case the input.
vocab: A container of tokens to not mutate during tokenization.
"""
self.do_lower_case = do_lower_case
self.vocab = vocab
def tokenize(self, text):
"""Tokenizes a piece of text."""
text = convert_to_unicode(text)
text = self._clean_text(text)
# This was added on November 1st, 2018 for the multilingual and Chinese
# models. This is also applied to the English models now, but it doesn't
# matter since the English models were not trained on any Chinese data
# and generally don't have any Chinese data in them (there are Chinese
# characters in the vocabulary because Wikipedia does have some Chinese
# words in the English Wikipedia.).
text = self._tokenize_chinese_chars(text)
orig_tokens = whitespace_tokenize(text)
split_tokens = []
for token in orig_tokens:
if preserve_token(token, self.vocab):
split_tokens.append(token)
continue
if self.do_lower_case:
token = token.lower()
token = self._run_strip_accents(token)
split_tokens.extend(self._run_split_on_punc(token))
output_tokens = whitespace_tokenize(" ".join(split_tokens))
return output_tokens
def _run_strip_accents(self, text):
"""Strips accents from a piece of text."""
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output)
def _run_split_on_punc(self, text):
"""Splits punctuation on a piece of text."""
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return ["".join(x) for x in output]
def _tokenize_chinese_chars(self, text):
"""Adds whitespace around any CJK character."""
output = []
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
output.append(" ")
output.append(char)
output.append(" ")
else:
output.append(char)
return "".join(output)
def _is_chinese_char(self, cp):
"""Checks whether CP is the codepoint of a CJK character."""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like all of the other languages.
if ((cp >= 0x4E00 and cp <= 0x9FFF) or #
(cp >= 0x3400 and cp <= 0x4DBF) or #
(cp >= 0x20000 and cp <= 0x2A6DF) or #
(cp >= 0x2A700 and cp <= 0x2B73F) or #
(cp >= 0x2B740 and cp <= 0x2B81F) or #
(cp >= 0x2B820 and cp <= 0x2CEAF) or
(cp >= 0xF900 and cp <= 0xFAFF) or #
(cp >= 0x2F800 and cp <= 0x2FA1F)): #
return True
return False
def _clean_text(self, text):
"""Performs invalid character removal and whitespace cleanup on text."""
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xfffd or _is_control(char):
continue
if _is_whitespace(char):
output.append(" ")
else:
output.append(char)
return "".join(output)
class WordpieceTokenizer(object):
"""Runs WordPiece tokenization."""
def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=200):
self.vocab = vocab
self.unk_token = unk_token
self.max_input_chars_per_word = max_input_chars_per_word
def tokenize(self, text):
"""Tokenizes a piece of text into its word pieces.
This uses a greedy longest-match-first algorithm to perform tokenization
using the given vocabulary.
For example:
input = "unaffable"
output = ["un", "##aff", "##able"]
Args:
text: A single token or whitespace separated tokens. This should have
already been passed through `BasicTokenizer`.
Returns:
A list of wordpiece tokens.
"""
text = convert_to_unicode(text)
output_tokens = []
for token in whitespace_tokenize(text):
chars = list(token)
if len(chars) > self.max_input_chars_per_word:
output_tokens.append(self.unk_token)
continue
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
cur_substr = None
while start < end:
substr = "".join(chars[start:end])
if start > 0:
substr = "##" + substr
if substr in self.vocab:
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
output_tokens.append(self.unk_token)
else:
output_tokens.extend(sub_tokens)
return output_tokens
def _is_whitespace(char):
"""Checks whether `char` is a whitespace character."""
# \t, \n, and \r are technically control characters but we treat them
# as whitespace since they are generally considered as such.
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False
def _is_control(char):
"""Checks whether `char` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
if char == "\t" or char == "\n" or char == "\r":
return False
cat = unicodedata.category(char)
if cat in ("Cc", "Cf"):
return True
return False
def _is_punctuation(char):
"""Checks whether `char` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or
(cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
e0967cf8441070bc9a2723496d0ceb00cb79f645 | e4b15796f8ff22ec4ac1da0f06594f241a7f0116 | /tes1.py | a6798d7da0538e3af2a9092188f6036ef6467220 | [] | no_license | Bejjoeqq/flask_api | 394b67ff6999733c9fd544da1d71c04f5de55222 | bec0c94a9304e2bb5c9687752a6f4d8d337f5f7e | refs/heads/main | 2023-04-09T14:45:32.396773 | 2021-04-10T09:31:44 | 2021-04-10T09:31:44 | 339,671,472 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 223 | py | x = "Hello world"
def asd(xx):
return "Hello" in xx
def function1(s):
print(s("Hello"))
def function(s):
print(s("Hello","a"))
if __name__ == '__main__':
# function(lambda xx,y: "Hello" in xx)
xx = [80 if "Hello" in x] | [
"aldhiya.rozak@gmail.com"
] | aldhiya.rozak@gmail.com |
c441260c06dae91d6641ae9d9d73cf55928c8d6e | 3f0a446f493951693af0e6f44fa8076b7522a2fb | /ga_v3.py | a55948cf4f0a5f235cdd9c4537673fda6923a895 | [] | no_license | by-student-2017/eam_database_fit | f74fb2c8504f709e677b1a2c4c9e34c688a3930c | a74006c402bd46550b67dc27a9284c7dd1d262e2 | refs/heads/master | 2023-03-24T07:12:44.277706 | 2021-03-02T03:49:50 | 2021-03-02T03:49:50 | 281,506,054 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,809 | py | import random
from deap import creator, base, tools, algorithms
import numpy
import numpy as np
import commands
import sys
#----------------------------------------------------------------------
file_tmp = 'EAM_code.tmp'
file_inp = 'EAM_code'
lammps_adress = "lmp"
cif2cell_adress = "cif2cell"
commands.getoutput("setenv OMP_NUM_THREADS 1")
num_core = commands.getoutput("grep 'core id' /proc/cpuinfo | sort -u | wc -l")
#pwscf_adress = "mpirun -np "+str(num_core)+" --allow-run-as-root pw.x"
#pwscf_adress = "mpirun -np "+str(num_core)+" pw.x"
pwscf_adress = "mpirun -np 2 pw.x"
satom = commands.getoutput("grep \"atomtype\" EAM.input | sed -e \"s/.*=//\" -e \"s/'//g\"")
commands.getoutput("chmod +x ./cfg2vasp/cfg2vasp")
commands.getoutput("chmod +x pwscf2force")
commands.getoutput("chmod +x setinp")
commands.getoutput("./setinp")
commands.getoutput("mkdir cfg")
commands.getoutput("mkdir work")
commands.getoutput("echo -n > energy.dat")
temp_K = commands.getoutput("awk '{if($2==\"temp\"){print $4}}' in.lmp")
print "Lammps MD: "+temp_K+" K"
target = [0,0,0] # dummy data
y_str = [0] # dummy data
natom = commands.getoutput("awk '{if($2==\"atoms\"){print $1}}' data.in")
fxl = numpy.ones(int(natom)+1)
fyl = numpy.ones(int(natom)+1)
fzl = numpy.ones(int(natom)+1)
fxp = numpy.ones(int(natom)+1)
fyp = numpy.ones(int(natom)+1)
fzp = numpy.ones(int(natom)+1)
#----------------------------------------------------------------------
print "read parameters from EAM_code.init"
nline = commands.getoutput("grep -n "+str(satom)+" EAM_code.init | head -1 | sed -e \"s/:.*//g\"")
print "read line: "+nline
check_satom = commands.getoutput("awk '{if(NR=="+str(nline)+"+0){print $1}}' EAM_code.init | head -1")
print "fit element: "+check_satom
# fitting parameters
x0 = float(commands.getoutput("awk '{if(NR=="+str(nline)+"+1){print $1}}' EAM_code.init | head -1"))
x1 = float(commands.getoutput("awk '{if(NR=="+str(nline)+"+2){print $1}}' EAM_code.init | head -1"))
x2 = float(commands.getoutput("awk '{if(NR=="+str(nline)+"+3){print $1}}' EAM_code.init | head -1"))
x3 = float(commands.getoutput("awk '{if(NR=="+str(nline)+"+4){print $1}}' EAM_code.init | head -1"))
x4 = float(commands.getoutput("awk '{if(NR=="+str(nline)+"+5){print $1}}' EAM_code.init | head -1"))
x5 = float(commands.getoutput("awk '{if(NR=="+str(nline)+"+6){print $1}}' EAM_code.init | head -1"))
x6 = float(commands.getoutput("awk '{if(NR=="+str(nline)+"+7){print $1}}' EAM_code.init | head -1"))
x7 = float(commands.getoutput("awk '{if(NR=="+str(nline)+"+8){print $1}}' EAM_code.init | head -1"))
x8 = float(commands.getoutput("awk '{if(NR=="+str(nline)+"+9){print $1}}' EAM_code.init | head -1"))
x9 = float(commands.getoutput("awk '{if(NR=="+str(nline)+"+10){print $1}}' EAM_code.init | head -1"))
x10 = float(commands.getoutput("awk '{if(NR=="+str(nline)+"+11){print $1}}' EAM_code.init | head -1"))
x11 = float(commands.getoutput("awk '{if(NR=="+str(nline)+"+12){print $1}}' EAM_code.init | head -1"))
x12 = float(commands.getoutput("awk '{if(NR=="+str(nline)+"+13){print $1}}' EAM_code.init | head -1"))
x13 = float(commands.getoutput("awk '{if(NR=="+str(nline)+"+14){print $1}}' EAM_code.init | head -1"))
x14 = float(commands.getoutput("awk '{if(NR=="+str(nline)+"+15){print $1}}' EAM_code.init | head -1"))
x15 = float(commands.getoutput("awk '{if(NR=="+str(nline)+"+16){print $1}}' EAM_code.init | head -1"))
x16 = float(commands.getoutput("awk '{if(NR=="+str(nline)+"+17){print $1}}' EAM_code.init | head -1"))
x17 = float(commands.getoutput("awk '{if(NR=="+str(nline)+"+18){print $1}}' EAM_code.init | head -1"))
x18 = float(commands.getoutput("awk '{if(NR=="+str(nline)+"+19){print $1}}' EAM_code.init | head -1"))
x19 = float(commands.getoutput("awk '{if(NR=="+str(nline)+"+20){print $1}}' EAM_code.init | head -1"))
x20 = float(commands.getoutput("awk '{if(NR=="+str(nline)+"+23){print $1}}' EAM_code.init | head -1"))
x21 = float(commands.getoutput("awk '{if(NR=="+str(nline)+"+26){print $1}}' EAM_code.init | head -1"))
#print "initial parameters: ",x0,x1,x2,x3,x4,x5,x6,x7,x8,x9,x10,x11,x12,x13,x14,x15,x16,x17,x18,x19,x20,x21
x = [x0,x1,x2,x3,x4,x5,x6,x7,x8,x9,x10,x11,x12,x13,x14,x15,x16,x17,x18,x19,x20,x21]
print "initial parameters: ",x
count = 0
#----------------------------------------------------------------------
creator.create("FitnessMax", base.Fitness, weights=(-1.0,))
creator.create("Individual", numpy.ndarray, fitness=creator.FitnessMax)
toolbox = base.Toolbox()
n_gene = 22 # number of parameters
min_ind = numpy.ones(n_gene) * -1.0
max_ind = numpy.ones(n_gene) * 1.0
for i in range(n_gene):
#min_ind[i] = b1[i][0]
#max_ind[i] = b1[i][1]
min_ind[i] = float(x[i]) - float(x[i])*0.1
max_ind[i] = float(x[i]) + float(x[i])*0.1
print "search area of paramter "+str(i)+": "+str(min_ind[i])+" | "+str(max_ind[i])
#----------------------------------------------------------------------
def create_ind_uniform(min_ind, max_ind):
ind = []
for min, max in zip(min_ind, max_ind):
ind.append(random.uniform(min, max))
return ind
#----------------------------------------------------------------------
toolbox.register("create_ind", create_ind_uniform, min_ind, max_ind)
toolbox.register("individual", tools.initIterate, creator.Individual, toolbox.create_ind)
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
#----------------------------------------------------------------------
#def evalOneMax(individual):
# return sum(individual),
#----------------------------------------------------------------------
def evalOneMax(individual):
print "------------------------"
global count
count += 1
print count
fi = open(file_tmp,'r')
text = fi.read().replace('re',str(individual[0]).replace("[","").replace("]",""))
text = text.replace('fe',str(individual[1]).replace("[","").replace("]",""))
text = text.replace('rhoe1',str(individual[2]).replace("[","").replace("]",""))
text = text.replace('rhoe2',str(individual[3]).replace("[","").replace("]",""))
text = text.replace('alpha',str(individual[4]).replace("[","").replace("]",""))
text = text.replace('beta',str(individual[5]).replace("[","").replace("]",""))
text = text.replace('Ap',str(individual[6]).replace("[","").replace("]",""))
text = text.replace('Bp',str(individual[7]).replace("[","").replace("]",""))
text = text.replace('kappa',str(individual[8]).replace("[","").replace("]",""))
text = text.replace('lambda',str(individual[9]).replace("[","").replace("]",""))
text = text.replace('Fn0',str(individual[10]).replace("[","").replace("]",""))
text = text.replace('Fn1',str(individual[11]).replace("[","").replace("]",""))
text = text.replace('Fn2',str(individual[12]).replace("[","").replace("]",""))
text = text.replace('Fn3',str(individual[13]).replace("[","").replace("]",""))
text = text.replace('F0',str(individual[14]).replace("[","").replace("]",""))
text = text.replace('F1',str(individual[15]).replace("[","").replace("]",""))
text = text.replace('F2',str(individual[16]).replace("[","").replace("]",""))
text = text.replace('F3',str(individual[17]).replace("[","").replace("]",""))
text = text.replace('eta',str(individual[18]).replace("[","").replace("]",""))
text = text.replace('Fep',str(individual[19]).replace("[","").replace("]",""))
text = text.replace('F4',str(individual[20]).replace("[","").replace("]",""))
text = text.replace('rhol',str(individual[21]).replace("[","").replace("]",""))
fi.close
with open(file_inp,'w') as f:
print >> f, text
commands.getoutput("./Zhou04_EAM_2 < EAM.input")
if (count % 9000) == 1:
commands.getoutput(lammps_adress+" < in.lmp")
commands.getoutput("cp ./cfg/run.50.cfg run.50.cfg")
commands.getoutput("./cfg2vasp/cfg2vasp run.50.cfg")
commands.getoutput("python ./vasp2cif/vasp2cif.py run.50.vasp")
commands.getoutput(cif2cell_adress+" run.50.vasp.cif --no-reduce -p pwscf --pwscf-pseudo-PSLibrary-libdr=\"./potentials\" --setup-all --k-resolution=0.48 --pwscf-force=yes --pwscf-stress=yes --pwscf-run-type=scf -o pw.in")
commands.getoutput(pwscf_adress+" < pw.scf.in")
commands.getoutput(cif2cell_adress+" run.50.vasp.cif --no-reduce -p pwscf --pwscf-pseudo-PSLibrary-libdr=\"./potentials\" --setup-all --k-resolution=0.18 --pwscf-force=yes --pwscf-stress=yes --pwscf-run-type=scf -o pw.in")
commands.getoutput(pwscf_adress+" < pw.scf.in > pw.out")
commands.getoutput("./pwscf2force >> config_potfit")
commands.getoutput(cif2cell_adress+" run.50.vasp.cif --no-reduce -p lammps -o data_fix.in")
commands.getoutput(lammps_adress+" < in.lmp_fix")
commands.getoutput("mv data.in.restart data.in")
#
commands.getoutput("./pwscf2force > config")
else:
commands.getoutput(lammps_adress+" < in.lmp_fix")
# 1 bar = 0.0001 GPa
# stress = -pressure
#pxxl = commands.getoutput("awk '{if($1==\"pxxl\"){printf \"%10.8f\",$3*7.4028083e-11}}' log.lammps")
#pyyl = commands.getoutput("awk '{if($1==\"pyyl\"){printf \"%10.8f\",$3*7.4028083e-11}}' log.lammps")
#pzzl = commands.getoutput("awk '{if($1==\"pzzl\"){printf \"%10.8f\",$3*7.4028083e-11}}' log.lammps")
#pxyl = commands.getoutput("awk '{if($1==\"pxyl\"){printf \"%10.8f\",$3*7.4028083e-11}}' log.lammps")
#pxzl = commands.getoutput("awk '{if($1==\"pxzl\"){printf \"%10.8f\",$3*7.4028083e-11}}' log.lammps")
#pyzl = commands.getoutput("awk '{if($1==\"pyzl\"){printf \"%10.8f\",$3*7.4028083e-11}}' log.lammps")
#pxxp = commands.getoutput("awk '{if($1==\"#S\"){print -$2}}' config")
#pyyp = commands.getoutput("awk '{if($1==\"#S\"){print -$3}}' config")
#pzzp = commands.getoutput("awk '{if($1==\"#S\"){print -$4}}' config")
#pxyp = commands.getoutput("awk '{if($1==\"#S\"){print -$5}}' config")
#pxzp = commands.getoutput("awk '{if($1==\"#S\"){print -$6}}' config")
#pyzp = commands.getoutput("awk '{if($1==\"#S\"){print -$7}}' config")
#diffpxx = (float(pxxl) - float(pxxp))/(float(pxxp)+0.000000101)*100.0/6.0
#diffpyy = (float(pyyl) - float(pyyp))/(float(pyyp)+0.000000101)*100.0/6.0
#diffpzz = (float(pzzl) - float(pzzp))/(float(pzzp)+0.000000101)*100.0/6.0
#diffpxy = (float(pxyl) - float(pxyp))/(float(pxyp)+0.000000101)*100.0/6.0
#diffpxz = (float(pxzl) - float(pxzp))/(float(pxzp)+0.000000101)*100.0/6.0
#diffpyz = (float(pyzl) - float(pyzp))/(float(pyzp)+0.000000101)*100.0/6.0
#diffp = abs(diffpxx) + abs(diffpyy) + abs(diffpzz) + abs(diffpxy) + abs(diffpxz) + abs(diffpyz)
#print "lammps: "+str(pxxl)+", "+str(pyyl)+", "+str(pzzl)+", "+str(pxyl)+", "+str(pxzl)+", "+str(pyzl)+" [eV/A^3]"
#print "pwscf: "+str(pxxp)+", "+str(pyyp)+", "+str(pzzp)+", "+str(pxyp)+", "+str(pxzp)+", "+str(pyzp)+" [eV/A^3]"
#print "P diff (%): "+str(diffp)
#print "---------------"
diffp = 0.0
# force
difffx = 0.0
difffy = 0.0
difffz = 0.0
difff = 0.0
for i in range(int(natom)):
fxl[i] = commands.getoutput("awk '{if(NR==10+"+str(i)+"){printf \"%10.8f\",$7}}' trajectory.lammpstrj")
fyl[i] = commands.getoutput("awk '{if(NR==10+"+str(i)+"){printf \"%10.8f\",$8}}' trajectory.lammpstrj")
fzl[i] = commands.getoutput("awk '{if(NR==10+"+str(i)+"){printf \"%10.8f\",$9}}' trajectory.lammpstrj")
fxp[i] = commands.getoutput("awk '{if(NR==11+"+str(i)+"){print $5}}' config")
fyp[i] = commands.getoutput("awk '{if(NR==11+"+str(i)+"){print $6}}' config")
fzp[i] = commands.getoutput("awk '{if(NR==11+"+str(i)+"){print $7}}' config")
difffx = (float(fxl[i]) - float(fxp[i]))/(float(fxp[i])+0.000000101)*100.0/3.0/float(natom)
difffy = (float(fyl[i]) - float(fyp[i]))/(float(fyp[i])+0.000000101)*100.0/3.0/float(natom)
difffz = (float(fzl[i]) - float(fzp[i]))/(float(fzp[i])+0.000000101)*100.0/3.0/float(natom)
difff = difff + abs(difffx) + abs(difffy) + abs(difffz)
print "lammps: "+str(fxl[0])+" : "+str(fyl[0])+" : "+str(fzl[0])
print "PWscf: "+str(fxp[0])+" : "+str(fyp[0])+" : "+str(fzp[0])
print "force diff (%): "+str(difff)
print "---------------"
lammps_get_data = "grep \"Total Energy\" log.lammps | tail -1 | awk '{printf \"%-20.10f\",$4}'"
lmpe = commands.getoutput(lammps_get_data)
pwe = commands.getoutput("awk '{if($1==\"#E\"){print $2}}' config")
pwe = float(pwe) * float(natom)
print "lammps: "+str(lmpe)+" [eV]"
print "PWscf: "+str(pwe)+" [eV]"
diffe = float(pwe) - float(lmpe)
print "diff: "+str(diffe)+" [eV]"
diffea = float(diffe)/float(natom)
print "diff/atom: "+str(diffea)+" [eV/atom]"
commands.getoutput("echo "+str(count)+" "+str(diffe)+" >> energy.dat")
rhoin = float(individual[2])*float(individual[21])
rhoout = float(individual[2])*1.15
print "---------------"
print "F boundary 1, rho: "+str(rhoin)
print "F boundary 2, rho: "+str(individual[2])
print "F boundary 3, rho: "+str(rhoout)
commands.getoutput("cp "+satom+"_Zhou04.eam.alloy"+" Xx_Zhou04.eam.alloy")
commands.getoutput("./plot")
rhoin1 = commands.getoutput("cat F.plt | awk '{if($1<"+str(rhoin)+"){print $2}}' | tail -2 | head -1")
rhoin2 = commands.getoutput("cat F.plt | awk '{if($1>"+str(rhoin)+"){print $2}}' | head -2 | tail -1")
rhoe1 = commands.getoutput("cat F.plt | awk '{if($1<"+str(individual[2])+"){print $2}}' | tail -2 | head -1")
rhoe2 = commands.getoutput("cat F.plt | awk '{if($1>"+str(individual[2])+"){print $2}}' | head -2 | tail -1")
rhoout1 = commands.getoutput("cat F.plt | awk '{if($1<"+str(rhoout)+"){print $2}}' | tail -2 | head -1")
rhoout2 = commands.getoutput("cat F.plt | awk '{if($1>"+str(rhoout)+"){print $2}}' | head -2 | tail -1")
print "F near boundary 1, F: "+str(rhoin1)+" | "+str(rhoin2)+" | diff "+str(float(rhoin1) - float(rhoin2))
print "F near boundary 2, F: "+str(rhoe1)+" | "+str(rhoe2)+" | diff "+str(float(rhoe1) - float(rhoe2))
print "F near boundary 3, F: "+str(rhoout1)+" | "+str(rhoout2)+" | diff "+str(float(rhoout1) - float(rhoout2))
print "---------------"
y = (abs(diffea)**2 + 1000*abs(float(rhoin1) - float(rhoin2))**2 + 1000*abs(float(rhoe1) - float(rhoe2))**2 + 1000*abs(float(rhoout1) - float(rhoout2))**2 + 0.0000002*abs(diffp)**2 + 0.0000010*abs(difff)**2)
print "Evaluate: ", y
#print "Parameters: ", individual
print "Parameters: x0 = "+"[ "+str(individual[0])+","+str(individual[1])+","+str(individual[2])+","+str(individual[3])+","+str(individual[4])+","+str(individual[5])+","+str(individual[6])+","+str(individual[7])+","+str(individual[8])+","+str(individual[9])+","+str(individual[10])+","+str(individual[11])+","+str(individual[12])+","+str(individual[13])+","+str(individual[14])+","+str(individual[15])+","+str(individual[16])+","+str(individual[17])+","+str(individual[18])+","+str(individual[19])+","+str(individual[20])+","+str(individual[21])+" ]"
print "------------------------"
return y,
#----------------------------------------------------------------------
def cxTwoPointCopy(ind1, ind2):
size = len(ind1)
cxpoint1 = random.randint(1, size)
cxpoint2 = random.randint(1, size-1)
if (cxpoint2 >= cxpoint1):
cxpoint2 += 1
else:
cxpoint1, cxpoint2 = cxpoint2, cxpoint1
ind1[cxpoint1:cxpoint2], ind2[cxpoint2:cxpoint2] = ind2[cxpoint1:cxpoint2].copy(), ind1[cxpoint1:cxpoint2].copy()
return ind1, ind2
#----------------------------------------------------------------------
def mutUniformDbl(individual, min_ind, max_ind, indpb):
size = len(individual)
for i, min, max in zip(xrange(size), min_ind, max_ind):
if (random.random() < indpb):
individual[i] = random.uniform(min, max)
return indivisual,
#----------------------------------------------------------------------
toolbox.register("evaluate", evalOneMax)
toolbox.register("mate", tools.cxTwoPoint)
toolbox.register("mutate", tools.mutFlipBit, indpb=0.05)
toolbox.register("select", tools.selTournament, tournsize=3)
#----------------------------------------------------------------------
def main():
random.seed(64)
pop = toolbox.population(n=300)
hof = tools.HallOfFame(1, similar=numpy.array_equal)
stats = tools.Statistics(lambda ind: ind.fitness.values)
stats.register("avg", numpy.mean)
stats.register("std", numpy.std)
stats.register("min", numpy.min)
stats.register("max", numpy.max)
algorithms.eaSimple(pop, toolbox, cxpb=0.5, mutpb=0.2, ngen=500, stats=stats, halloffame=hof)
return pop, stats, hof
#----------------------------------------------------------------------
if (__name__ == "__main__"):
main()
#----------------------------------------------------------------------
| [
"studentsctest@gmail.com"
] | studentsctest@gmail.com |
bbcdb1fdabe813c0995be9185442d197293f334f | 750d39f243eaefb2d7f0e088f34dd382f920575f | /semester1/untitled1.py | dce725e8b87d13598297a2a259e5da25e307a9b1 | [] | no_license | owsiankakamila/Python | 3a146eb354f167326ad044d3ed9a0907188d0e4b | c589ae0afe5d53040d3f9eaef523664bec724bcf | refs/heads/master | 2021-05-12T02:26:31.067880 | 2018-04-28T08:30:26 | 2018-04-28T08:30:26 | 117,585,978 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 519 | py | """import time
def silniaIt (n):
silnia =1
for i in range (2,n+1):
silnia*=i
return silnia
def silniaRek (n):
if n==1 or n==0:
return 1
def fibRek (n):
if n==1:
return 1
elif n==0:
return 0
return fibRek(n-2)+fibRek(n-1)
def fibIt (n):
a, b = 0, 1
if a<n:
a, b = b, a+b
return a
k=20
start = time.clock()
silniaRek(k)
end=time.clock()
total = end-start
print ("sit=",total)"""
for i in range (0,10):
print(i) | [
"owsiankakamila@gmail.com"
] | owsiankakamila@gmail.com |
19f8f0b36d7b5c449bd0feb2eb457df69467626e | 8887a391584e381b526fc4c695b34d3e3b1a2180 | /terminate_instances_startingwith_brooklyn_name.py | 0a17f9bad18495abeeae17cff2bbd72c1d17c272 | [] | no_license | omkarm07/terraform | 62e674964b9f06301c261faa6e634b0c0355a33b | 9c8e8c1c40e64a4c73db95e95dd4edd20456c516 | refs/heads/master | 2020-04-06T14:51:49.853561 | 2018-11-14T16:31:22 | 2018-11-14T16:31:22 | 157,557,117 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 585 | py | import boto3
ec2 = boto3.resource('ec2')
ec2client=boto3.client('ec2')
def lambda_handler(event,context):
print(str(event))
response = ec2client.describe_instances(
Filters=[
{
'Name': 'tag:Name',
'Values': ['brooklyn*']
}
]
)
instancelist = []
for reservation in (response["Reservations"]):
for instance in reservation["Instances"]:
instancelist.append(instance["InstanceId"])
ec2.instances.filter(InstanceIds=instancelist).terminate()
return instancelist | [
"mangalekaromkar@gmail.com"
] | mangalekaromkar@gmail.com |
1657edac0a3ad08f94e7d80f503141227f980e7c | 2870af1dcdaa7e96e6ca50b87c97bd2be146520b | /api/main/utilities/constants.py | 467953c1770c4767a03e631d39415fe9da9e70b3 | [] | no_license | elsh32/search-analytics | 3edc530ab23c0c1f5cfa8de3619625caab7147c8 | aa143608c4ac709f26ba424f9f1acdd25b14f86d | refs/heads/master | 2023-01-02T23:27:37.013151 | 2020-10-27T07:15:51 | 2020-10-27T07:15:51 | 307,502,412 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,449 | py | # -*- coding: UTF-8 -*-
import os
# Documentation settings
SWAGGERUI_URL = '/v1/doc'
SCHEMA_URL = '/static/v1/swagger.json'
PAGE_TITLE = 'TeraJob XP API Doc'
SUPPORTED_SUBMIT_METHODS = ['get', 'post', 'put', 'delete']
SWAGGER_CLIENT_REALM = 'MyTeraJobAPIRealm'
SWAGGER_CLIENT_ID = 'MyTeraJobAPIID'
SWAGGER_CLIENT_SECRET = 'A1Zsez4327?kdgdie436$@fd'
SWAGGER_CONF = {'supportedSubmitMethods': SUPPORTED_SUBMIT_METHODS,
'client_realm': SWAGGER_CLIENT_REALM,
'client_id': SWAGGER_CLIENT_ID,
'client_secret': SWAGGER_CLIENT_SECRET
}
FAVICON_URL = 'faveicon.ico'
U_DOC_KEY = 'A1Zsez4327kdgdie436fd'
LOGGER = {
'ANALYTICS_RESSOURCE': 'analytics_ressource',
'LOG_INGESTOR': 'log_ingestor',
'SEARCH_ANALYTICS_SERVICE': 'search_analytics_service',
'ANALYTICS_COMPUTER': 'analytics_computer'
}
HTTP_STATUS = {
'HTTP_400': 400,
'HTTP_500': 500,
'HTTP_202': 202,
'HTTP_200': 200,
'HTTP_401': 401,
'HTTP_403': 403
}
ERROR_TITLES = {
'HTTP_400': 'Forbidden',
'HTTP_500': 'Unhandled Exception',
'HTTP_202': 'Accepted',
'HTTP_200': 'Ok',
'HTTP_401': 'Unauthorized',
'HTTP_403': 'Forbidden'
}
ERROR_CODE = {
'BAD_DATA_DATE': 'Ensure your provided date is at least one of the following format: %Y-%m-%d %H:%M:%S, %Y-%m-%d %H:%M, %Y-%m-%d %H, %Y-%m-%d, %Y-%m, %Y',
'BAD_DATA_INT': 'The size parameter must be of type Integer'
}
| [
"elshaddhai@MacBook-Pro-de-Elshaddhai.local"
] | elshaddhai@MacBook-Pro-de-Elshaddhai.local |
63f89291194b4ee7dec018100eb9a16583e433dd | 5d96286b3002ded31aa847863697e37dbfa5f47f | /Lesson1/find_lines.py | 1438d2f22c5ab6b636c91e941bc5e6672e9e7d57 | [] | no_license | mengzhongren10/self-driving | 6d6738e9e2572e50ad40b44fcd2a6bad94454ba7 | 8523a4666ef7c642ad6ddc572a7ebac2e4665435 | refs/heads/master | 2020-06-13T03:16:49.894332 | 2019-06-30T15:02:57 | 2019-06-30T15:02:57 | 194,515,302 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,565 | py | import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import cv2
import math
#image = mpimg.imread('test_images/solidWhiteRight.jpg')
#
#print('This image is:',type(image),'with dimensions:',image.shape)
#plt.imshow(image)
def grayscale(img):
#使图像变为灰度
return cv2.cvtColor(image,cv2.COLOR_RGB2GRAY)
def canny(img,low_threshold,high_threshold):
"""Applies the Canny transform"""
#这个是用于边缘检测的 需要处理的原图像,该图像必须为单通道的灰度图
#其中较大的阈值2用于检测图像中明显的边缘,但一般情况下检测的效果不会那么完美,
#边缘检测出来是断断续续的。
#所以这时候用较小的第一个阈值用于将这些间断的边缘连接起来。
return cv2.Canny(img,low_threshold,high_threshold)
def gaussian_blur(img,kernel_size):
#在某些情况下,需要对一个像素的周围的像素给予更多的重视。
#因此,可通过分配权重来重新计算这些周围点的值。
#这可通过高斯函数(钟形函数,即喇叭形数)的权重方案来解决。
"""Applies a Gaussian Noise kernel"""
return cv2.GaussianBlur(img,(kernel_size,kernel_size),0)
def region_of_interest(img,vertices):
#defining a blank mask to start with
mask = np.zeros_like(img)
if len(img.shape) > 2:
channel_count = img.shape[2]
ignore_mask_color = (255,)*channel_count
else:
ignore_mask_color = 255
#该函数填充了一个有多个多边形轮廓的区域
cv2.fillPoly(mask,vertices,ignore_mask_color)
#returning the image only where mask pixels are nonzero
masked_image = cv2.bitwise_and(img,mask)
return masked_image
def draw_lines(img,lines,color=[255,0,0],thickness = 2):
#这个函数我也不是很理解,下面这种写法是最基本的,可以试试,其实效果并不是很好
for line in lines:
for x1,y1,x2,y2 in line:
cv2.line(img, (x1, y1), (x2, y2), color, thickness)
#下面是高阶的draw_lines()的写法,供参考,我是无法理解,太复杂了
# imshape = img.shape
# slope_left=0
# slope_right=0
# leftx=0
# lefty=0
# rightx=0
# righty=0
# i=0
# j=0
# for line in lines:
# for x1,y1,x2,y2 in line:
# slope = (y2-y1)/(x2-x1)
# if slope >0.1: #Left lane and not a straight line
# # Add all values of slope and average position of a line
# slope_left += slope
# leftx += (x1+x2)/2
# lefty += (y1+y2)/2
# i+= 1
# elif slope < -0.2: # Right lane and not a straight line
# # Add all values of slope and average position of a line
# slope_right += slope
# rightx += (x1+x2)/2
# righty += (y1+y2)/2
# j+= 1
# # Left lane - Average across all slope and intercepts
# if i>0: # If left lane is detected
# avg_slope_left = slope_left/i
# avg_leftx = leftx/i
# avg_lefty = lefty/i
# # Calculate bottom x and top x assuming fixed positions for corresponding y
# xb_l = int(((int(0.97*imshape[0])-avg_lefty)/avg_slope_left) + avg_leftx)
# xt_l = int(((int(0.61*imshape[0])-avg_lefty)/avg_slope_left)+ avg_leftx)
# else: # If Left lane is not detected - best guess positions of bottom x and top x
# xb_l = int(0.21*imshape[1])
# xt_l = int(0.43*imshape[1])
# # Draw a line
# cv2.line(img, (xt_l, int(0.61*imshape[0])), (xb_l, int(0.97*imshape[0])), color, thickness)
# #Right lane - Average across all slope and intercepts
# if j>0: # If right lane is detected
# avg_slope_right = slope_right/j
# avg_rightx = rightx/j
# avg_righty = righty/j
# # Calculate bottom x and top x assuming fixed positions for corresponding y
# xb_r = int(((int(0.97*imshape[0])-avg_righty)/avg_slope_right) + avg_rightx)
# xt_r = int(((int(0.61*imshape[0])-avg_righty)/avg_slope_right)+ avg_rightx)
# else: # If right lane is not detected - best guess positions of bottom x and top x
# xb_r = int(0.89*imshape[1])
# xt_r = int(0.53*imshape[1])
# # Draw a line
# cv2.line(img, (xt_r, int(0.61*imshape[0])), (xb_r, int(0.97*imshape[0])), color, thickness)
def hough_lines(img, rho, theta, threshold, min_line_len, max_line_gap):
lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]), min_line_len, max_line_gap)
"""
`img` should be the output of a Canny transform.
Returns an image with hough lines drawn.
"""
#lines = cv2.HoughLinesP(img,rho,theta,threshold,np.array([]),minLineLength=min_line_len,maxLineGap=max_line_gap)
#lines = cv2.HoughLinesP(img,rho,theta,threshold,np.array([]),min_line_len,max_line_gap)
#霍夫变换线 用来测试直线的
#函数cv2.HoughLinesP()是一种概率直线检测
#我们知道,原理上讲hough变换是一个耗时耗力的算法,尤其是每一个点计算,
#即使经过了canny转换了有的时候点的个数依然是庞大的,这个时候我们采取一种概率挑选机制,
#不是所有的点都计算,而是随机的选取一些个点来计算,相当于降采样了
#这样的话我们的阈值设置上也要降低一些。在参数输入输出上,输入不过多了两个参数:
#minLineLengh(线的最短长度,比这个短的都被忽略)和MaxLineCap
#(两条直线之间的最大间隔,小于此值,认为是一条直线)。
line_img = np.zeros((img.shape[0],img.shape[1],3),dtype=np.uint8)
draw_lines(line_img,lines)
return line_img
def weighted_img(img,inital_img,α=0.8, β=1., γ=0.):
# """
# `img` is the output of the hough_lines(), An image with lines drawn on it.
# Should be a blank image (all black) with lines drawn on it.
#
# `initial_img` should be the image before any processing.
#
# The result image is computed as follows:
#
# initial_img * α + img * β + γ
# NOTE: initial_img and img must be the same shape!
# """!
#划线显示权重,α越大 背景图越清楚,β越大,线在图像上显示越深
return cv2.addWeighted(inital_img,α,img,β,γ)
import os
#os.listdir("test_images/")
def line_detect(image):
gray = grayscale(image)
kernel_size = 5
blur_gray = gaussian_blur(gray,kernel_size)
low_threshold = 10
high_threshold = 150
edges = canny(blur_gray,low_threshold,high_threshold)
imshape = image.shape
vertices = np.array([[(0,imshape[0]),(int(0.45*imshape[1]),int(0.6*imshape[0])),
(int(0.6*imshape[1]),int(0.6*imshape[0])), (imshape[1],imshape[0])]], dtype=np.int32)
masked_edges = region_of_interest(edges,vertices)
rho = 2 # distance resolution in pixels of the Hough grid
theta = np.pi/180 # angular resolution in radians of the Hough grid
threshold = 15 # minimum number of votes (intersections in Hough grid cell)
min_line_length = 40 #minimum number of pixels making up a line
max_line_gap = 20 # maximum gap in pixels between connectable line segments
#threshod: 累加平面的阈值参数,int类型,超过设定阈值才被检测出线段,值越大,
#基本上意味着检出的线段越长,检出的线段个数越少。根据情况推荐先用100试试
#minLineLength:线段以像素为单位的最小长度,根据应用场景设置
#maxLineGap:同一方向上两条线段判定为一条线段的最大允许间隔(断裂),
#超过了设定值,则把两条线段当成一条线段
#,值越大,允许线段上的断裂越大,越有可能检出潜在的直线段
line_image = hough_lines(masked_edges,rho,theta,threshold,min_line_length,max_line_gap)
result = weighted_img(line_image,image,α=0.8, β=1.)
return edges,masked_edges,result
import glob
new_path = os.path.join("data/test_images/","*.jpg")
for infile in glob.glob(new_path):
image = mpimg.imread(infile)
edges,masked_edges,result = line_detect(image)
plt.figure(figsize=(20,10))
#fig = plt.figure()
plt.subplot(221)
plt.title("original image")
plt.imshow(image)
plt.subplot(222)
plt.title("canny")
plt.imshow(edges,cmap = "gray")
plt.subplot(223)
plt.title("masked image")
plt.imshow(masked_edges,cmap = "gray")
plt.subplot(224)
plt.title("result")
plt.imshow(result)
rom moviepy.editor import VideoFileClip
from IPython.display import HTML
def process_image(image):
# NOTE: The output you return should be a color image (3 channel) for processing video below
# TODO: put your pipeline here,
# you should return the final output (image where lines are drawn on lanes)
edges,masked_edges,result = line_detect(image)
return result
white_output = 'out/test_videos_output/solidWhiteRight.mp4'
clip1 = VideoFileClip("data/test_videos/solidWhiteRight.mp4")
white_clip = clip1.fl_image(process_image) #NOTE: this function expects color images!!
#%time
white_clip.write_videofile(white_output, audio=False)
HTML("""
<video width="960" height="540" controls>
<source src="{0}">
</video>
""".format(white_output))
| [
"noreply@github.com"
] | noreply@github.com |
1673ff7542d2797db9dd145ac79b90d92445bc9e | 90cc2bc9ee9184f28c235663e8688100665d0e85 | /timer.py | f49ef09bee82d5942eae05d40b536ba67e7730ff | [] | no_license | defalt-x/WPCH-3301 | 963e96aaa5c1ca6007d99fa9057df3bb6fb00492 | c169e6204a373cee2bcd1aeebc0a718fb6b86a98 | refs/heads/main | 2023-03-16T21:36:27.822318 | 2021-03-10T21:56:51 | 2021-03-10T21:56:51 | 344,626,622 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 177 | py | import timeit
def timer(number, repeat):
def wrapper(func):
runs = timeit.repeat(func, number=number, repeat=repeat)
print(sum(runs) / len(runs))
return wrapper | [
"noreply@github.com"
] | noreply@github.com |
d6de5d30d6176e66e5de84c83fb49393801d39e2 | 1283942c15e320c50ad891cb0dc29238d4819d99 | /src/algorithm.py | 70a49108b874f3a4ebbab2736b1e0277efe7a1aa | [] | no_license | qlyseven/source-code | c147a8d018aab6149dc0539e1ae3fd657d56e472 | 3dda02439f729891182cb6db483f24df0c100163 | refs/heads/master | 2021-05-07T21:35:00.124857 | 2018-08-31T07:23:49 | 2018-08-31T07:23:49 | 109,031,434 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,108 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Dec 20 15:12:09 2017
最小斯坦纳树实现
@author: Yanchao
"""
import queue
import numpy as np
import time
import json
from apiGraph import apiGraph
from steinerTree import STree
from steinerTree import SteinerTreeQueue
MAX_WEIGHT = 999
class MinimalSteinerTree:
def __init__(self, graph, categories):
self.graph = graph
self.numOfNodes = graph.dimension
self.categories = categories
self.trees_dict = {}
def run(self, keywords):
'''
寻找包含keywords的最小斯坦纳树,返回满足条件的最小斯坦纳树列表
graph: 图的邻接矩阵
keywords: 关键字的list
categories: 各api包含的categories的list
'''
num_of_keywords = len(keywords)
que = queue.PriorityQueue()
all_in = (1 << num_of_keywords) - 1
for v in range(self.numOfNodes):
#先查看v是否为包含keywords的节点,如果是存入队列
keySet = self.calcKeySet(v, keywords, num_of_keywords)
if keySet > 0:
tree = STree(keySet, v)
que.put(tree)
self.addTree(v, tree)
# print('first match:', v)
# test_count = 0
while not que.empty():
tree = que.get()
# print('=====dequeue:', tree.root, ',keySet:', tree.keySet, ',remain:', que.qsize())
if tree.keySet == all_in:
# print('test_count:', test_count)
return tree
#grow 操作
v = tree.root
neighbors = self.graph.neighbors(v)
for u in neighbors:
t = self.getTree(u, tree.keySet)
u_weight = MAX_WEIGHT if t is None \
else t.weight
if (tree.weight + 1) < u_weight:
newTree = tree.grow(u, self.calcKeySet(u, keywords, num_of_keywords))
que.put(newTree)
self.addTree(u, newTree)
#因为tree的根节点没变,所以不需要更新steiner_trees
#merge操作
trees = self.trees_dict.get(v)
if trees is None:
continue
newTrees = [] #在遍历时直接添加新合并的树到字典会报错,
#这里采用的方法是先保存下来,合并完再统一更新
for key in trees.keys():
t = trees[key]
# for t in trees:
if t.keySet & tree.keySet == 0:
union_keySet = t.keySet | tree.keySet
union_tree = self.getTree(v, union_keySet)
union_weight = MAX_WEIGHT \
if union_tree is None \
else union_tree.weight
if t.weight + tree.weight - 1 < union_weight:
newTree = tree.merge(t)
que.put(newTree)
# print('=====enqueue merge:', newTree.root, ',keySet:', newTree.keySet, ',remain:', que.qsize())
newTrees.append(newTree)
for t in newTrees:
self.addTree(v, t)
return None
def calcKeySet(self, v, keywords, num_of_keywords):
'''计算给定节点v包含的keywords对应的二进制位串'''
category_set = set(self.categories[v])
keySet = 0;
for i in range(num_of_keywords):
if keywords[i] in category_set:
keySet |= (1 << i)
return keySet
def addTree(self, root, tree):
''' 添加新生成的树到steiner_trees词典 '''
trees = self.trees_dict.get(root)
if trees is None:
# trees = []
trees = {}
self.trees_dict[root] = trees
# trees.append(tree)
trees[tree.keySet] = tree
def getTree(self, root, keySet):
'''获取指定根节点和keySet的树,没有返回None'''
trees = self.trees_dict.get(root)
if trees is None:
return None
# for t in trees:
# if t.keySet == keySet:
# return t
# return None
return trees.get(keySet)
def test():
graph = apiGraph(json.load(open('../dataset/graph.json')))
categories = json.load(open('../dataset/api_categories.json'))
category_list = json.load(open('../dataset/category_list.json'))
minimal_steiner = MinimalSteinerTree(graph, categories)
count_options = [2, 3, 4, 5, 6]
num_of_options = len(count_options)
keywords = ['England', 'Home Automation', 'Barcodes', 'Web Site Management', 'Metadata', 'Classifieds']
begin = time.time()
min_tree = minimal_steiner.run(keywords);
if min_tree is None:
print('weight:%d, time:%fs' % (0, time.time() - begin))
print('weight:%d, time:%fs' % (min_tree.weight, time.time() - begin))
# test() | [
"lianyongqi@gmail.com"
] | lianyongqi@gmail.com |
4191d7dfb542685e97556fdb5988ca82b5b9dc42 | bb970b66e4033e83e98bfe31c9d0f05f8583bc7f | /blog/urls.py | 561f28b29877d65faf31f48da0f53e7d7096f59e | [] | no_license | goyal-aman/Django-BlogSite | b06462c2e5afa3cad24f2087f734c89ed7ddc0be | 70545267726979041bac11132c8a659e1852e7ab | refs/heads/master | 2020-12-04T23:25:07.951375 | 2020-01-05T15:11:14 | 2020-01-05T15:11:14 | 231,934,161 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 507 | py | from django.urls import path
from . import views
urlpatterns = [
path('', views.PostListView.as_view(), name="blog-home"),
path('post/<int:pk>/', views.PostDetailView.as_view(), name='post-detail'),
path('post/new/', views.PostCreateView.as_view(), name='post-create'),
path('post/<int:pk>/update', views.PostUpdateView.as_view(), name='post-update'),
path('post/<int:pk>/delete', views.PostDeleteView.as_view(), name='post-delete'),
path('about/', views.about, name="blog-about")
] | [
"amangoyal8110@gmail.com"
] | amangoyal8110@gmail.com |
25949d3e870b9fe8550ea390091bfa9208927a6a | 85ed0e0fb9dc78322ab2a9d00f37b09c6e6ce947 | /djangoPractice/practiceVenv/bin/pip | edfdf12750184fd0ab37e7c6f0e46ad3e4df87fa | [] | no_license | PDXCodeGuildJan/kris_kuchinka_repo | 83c1f5a657e1573688ad36f7ab13b52937763042 | 751ff3c75e0c950a473a18105c4b3e6a17db1104 | refs/heads/master | 2021-01-10T15:59:22.480964 | 2016-04-09T21:46:19 | 2016-04-09T21:46:19 | 49,224,553 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 291 | #!/Users/kriskuchinka/Google_Drive/code_guild/kris_kuchinka_repo/djangoPractice/practiceVenv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pip import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"kriskuchinka@gmail.com"
] | kriskuchinka@gmail.com | |
5d66d81428f8fc2de66cc99b42088d438df42218 | 350eef7ff3914e318374501560e905ec1cb7fbce | /Problema1/Cliente.py | 296b34c3f8f7640be3826f0747c37cbf3f242c75 | [] | no_license | aldom7673/AplicacionesParaComunicacionesRed | b1e1ee16dfd054de472864b41f55e18f842c880b | 5ffb99b06e8725be774da5547f64eaca00aaa1f0 | refs/heads/master | 2020-12-21T20:21:44.007302 | 2020-06-29T17:13:58 | 2020-06-29T17:13:58 | 236,547,142 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,220 | py | # Aplicaciones para comunicaciones en red
# Autores:
# Martell Fuentes Ambar Desirée
# Mendoza Morales Aldo Daniel
import socket
import pickle
import os
import datetime
import threading
import sys
import speech_recognition as sr
from Personaje import *
personajes = []
BUFFER_SIZE = 1024
HOST, PORT = sys.argv[1:3]
def CargarPersonajes():
personajes.append( Personaje("Carla", "Negro", "Cafes", "Blanca", "Nada", "Mujer") )
personajes.append( Personaje("Matilda", "Rubio", "Azules", "Morena", "Nada", "Mujer") )
personajes.append( Personaje("Maria", "Rubio", "Azules", "Blanca", "Lentes", "Mujer") )
personajes.append( Personaje("Samuel", "Negro", "Cafes", "Morena", "Nada", "Hombre") )
personajes.append( Personaje("Eduardo", "Negro", "Azules", "Blanca", "Sombrero", "Hombre") )
personajes.append( Personaje("Bob", "Rubio", "Azules", "Blanca", "Corbata", "Hombre") )
personajes.append( Personaje("Patricio", "Rojo", "Cafes", "Morena", "Nada", "Hombre") )
personajes.append( Personaje("Jorge", "Rubio", "Verdes", "Morena", "Sombrero", "Hombre") )
personajes.append( Personaje("Jessica", "Rojo", "Verdes", "Blanca", "Nada", "Mujer") )
personajes.append( Personaje("Camila", "Negro", "Cafes", "Morena", "Lentes", "Mujer") )
personajes.append( Personaje("Paulina", "Rojo", "Azules", "Morena", "Sombrero", "Mujer") )
def MostrarPersonajes():
for personaje in personajes:
personaje.DescripcionPersonaje()
def ObtenerMensajeVoz():
r = sr.Recognizer()
with sr.Microphone() as source:
r.adjust_for_ambient_noise(source)
os.system( "clear" )
MostrarPersonajes()
MostrarTiros(tiros_anteriores)
print( "Es tu turno de adivinar el personaje\nEscuchando ... ")
audio = r.listen(source)
try:
return r.recognize_google(audio)
except Exception as e:
return ""
def ObtenerCaracteristica( texto ):
texto = texto.lower()
accesorios = ["nada", "lentes", "sombrero", "corbata"] # "tu personaje tiene <accesorio>"
nombres = ["Carla","Matilda","Maria","Samuel","Eduardo","Bob","Patricio","Jorge","Jessica", "Camila", "Paulina"] #"tu personaje es <genero_nombre>"
generos = ["mujer", "hombre"]
caracteristicas = [" ojo", " cabello", " piel", " genero"]
i = 0
for caracteristica in caracteristicas:
if( caracteristica in texto):
t = texto.split( caracteristica )
color = t[1].split(" ")
if( i == 0):
caracteristica = caracteristica + "s"
response = [caracteristica.replace(" ",""), color[1]]
return response
i = i + 1
if( "tiene" in texto):
t = texto.split("tiene")
acc = t[1]
for accesorio in accesorios:
if( accesorio in acc):
return ["accesorio", accesorio]
return ["accesorio", "nada"]
else:
for genero in generos:
if( genero in texto):
return ["genero", genero]
for nombre in nombres:
if( nombre.lower() in texto):
return ["nombre", nombre]
return ""
def MostrarTiros(tiros):
if(len(tiros) > 0):
print( "\tTiros hasta el momento: ")
for tiro in tiros:
print( "\t\t" + tiro[0] + " " + tiro[1] + ": " + tiro[2] )
if len(sys.argv) != 3:
print( "usage:", sys.argv[0], "<host> <port>" )
sys.exit(1)
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as TCPClientSocket:
TCPClientSocket.connect((HOST, int(PORT) ))
CargarPersonajes()
while(True):
jugadoresFaltantes = TCPClientSocket.recv(100)
os.system( "clear" )
if( jugadoresFaltantes.decode() == "0"):
print( "Todos los jugadores se han unido ..." )
break
else:
print( "Esperando a " + jugadoresFaltantes.decode() + " jugadores ..." )
tiros_anteriores = []
while(True):
print( "Esperando datos del servidor" )
dato = pickle.loads( TCPClientSocket.recv(BUFFER_SIZE) ) # [MI_TURNO?, QUIEN_TIENE_TURNO, TIRO_ANTERIOR, JUEGO_TERMINADO, RESULTADO, PERSONAJE]
if ( dato[3] ):
break
if( dato[2] != ""):
tiros_anteriores = dato[2]
if( dato[0] ):
while(True):
texto = ObtenerMensajeVoz()
if (texto != ""):
tiroCliente = ObtenerCaracteristica( texto )
if( tiroCliente != ""):
TCPClientSocket.sendall( pickle.dumps(tiroCliente) )
resultado = TCPClientSocket.recv(BUFFER_SIZE)
break
print(texto)
input( "Intentalo de nuevo. Pulsa enter para continuar ..." )
else:
os.system( "clear" )
MostrarPersonajes()
MostrarTiros(tiros_anteriores)
print( "Esperando a que el jugador " + str(dato[1]) + " termine su turno." )
os.system( "clear" )
MostrarPersonajes()
print( "El personaje era: " + dato[5] )
print( dato[4] )
| [
"aldom7673@gmail.com"
] | aldom7673@gmail.com |
edebd772a2cb1a6122d38b8eddec11971bfcf74a | 3f15edc4afd3d8f813aaf2cd71bcde26a9ff9540 | /users/migrations/0001_initial.py | 22e9a1daf28ce840e9594825ff2ea24dbb6c3a0f | [] | no_license | kanikamital0606/Custom-user-model-Admin-Testing | 7650d8522edf11daea469af24fce016454b7598b | 610e6bcb0309827f6ab9e57169d78585364353c6 | refs/heads/master | 2023-07-21T23:42:54.708033 | 2021-08-30T23:01:30 | 2021-08-30T23:01:30 | 401,090,233 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,055 | py | # Generated by Django 3.2.6 on 2021-08-30 21:36
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='NewUser',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(max_length=254, unique=True, verbose_name='email address')),
('user_name', models.CharField(max_length=100, unique=True)),
('first_name', models.CharField(blank=True, max_length=100)),
('start_date', models.DateTimeField(default=django.utils.timezone.now)),
('about', models.TextField(blank=True, max_length=500, verbose_name='about')),
('is_staff', models.BooleanField(default=False)),
('is_active', models.BooleanField(default=False)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
]
| [
"kanikamittal0661996@gmail.com"
] | kanikamittal0661996@gmail.com |
99adf5f481c8618518c16bf2bd97075314355ca7 | 6ea51617130ddb6372eb77c402051bcbf3403b6a | /BinarySortTree.py | 070e0f50efb90b65596aa3eb6c23084701cf2790 | [] | no_license | ghsszwsxxn/LearnPython | b9646e69f4d668575c8a5f93e60160157a70fd10 | e347e7cd62ab57c80e5c6539e108217246de7783 | refs/heads/master | 2020-05-14T13:38:35.276298 | 2019-04-17T05:10:12 | 2019-04-17T05:10:12 | 181,817,731 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,300 | py | class Node:
def __init__(self,value):
self.value = value
self.left = None
self.right = None
class Tree:
def __init__(self,values=None):
self.root = None
if values is not None:
for value in values:
self.add(value)
def add(self,value):
node = Node(value)
if self.root is None:
self.root = node
else:
InsertPoint = self.findInsertPoint(self.root,node)
if node.value > InsertPoint.value:
InsertPoint.right = node
if node.value < InsertPoint.value:
InsertPoint.left = node
def findInsertPoint(self,p,target):
'''
查找插入点。
:param p: 当前比较节点
:param target: 待插入节点
:return: 插入点。如果该值已存在,则返回None
'''
##递归终止条件
if p.value == target.value:
return None
if target.value < p.value and p.left is None:
return p
if target.value > p.value and p.right is None:
return p
##递归
if target.value < p.value:
return self.findInsertPoint(p.left,target)
if target.value > p.value:
return self.findInsertPoint(p.right,target)
def pprint(self,node):
'''
按照前序遍历打印出二叉排序树。
:return:
'''
if node is None:
return
print(node.value,end=" ")
self.pprint(node.left)
self.pprint(node.right)
def mprint(self,node):
'''
按照中序遍历打印出二叉排序树。
:return:
'''
if node is None:
return
self.mprint(node.left)
print(node.value,end=" ")
self.mprint(node.right)
def bprint(self,node):
'''
按照后序遍历打印出二叉排序树。
:return:
'''
if node is None:
return
self.bprint(node.left)
self.bprint(node.right)
print(node.value,end=" ")
tree = Tree([10,5,2,6,15,12,14,13])
print("\n前序遍历")
tree.pprint(tree.root)
print("\n中序遍历")
tree.mprint(tree.root)
print("\n后序遍历")
tree.bprint(tree.root) | [
"gushiyipef@163.com"
] | gushiyipef@163.com |
dac7cb078abc3e81edeb99b4138581a79720c52f | d45daff15670807b2b81ea4de24dbf718e87645d | /ibot/__init__.py | 0f4f23ef09de6ba1d07344933fcac97cbddf0f49 | [
"MIT"
] | permissive | PayneWinn/MissHome-master | 434906ae816bd66dfe588a4b99dd50edb0828b50 | 7fb414f5b00309f3b272a7be552843244515ae5a | refs/heads/master | 2020-04-18T18:01:03.349550 | 2019-01-26T09:45:15 | 2019-01-26T09:45:15 | 167,671,754 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 108 | py | # -*- coding: utf-8 -*-
__author__ = 'wenjie wu'
__email__ = 'wuwenjie718@gmail.com'
__version__ = '0.1.0'
| [
"wupeng@myhexin.com"
] | wupeng@myhexin.com |
e7ff40b024ecd7f924643344662d1c6f8eed185e | febd5a0a7b08e85bd6602b92f2928201fa6fad77 | /bbc_simple/core/data_handler.py | 6e5b1a52ee9aec5085df978f9187aea970355eff | [] | no_license | quvox/bbc-simple | 0f7fffac329ce727c4d89d621cb2e2ca22a42a46 | 0455abd689f2a0afaeffe612e4eb566e792d5661 | refs/heads/develop | 2020-03-18T00:54:47.179956 | 2018-07-17T09:48:10 | 2018-07-17T09:48:10 | 134,118,780 | 1 | 3 | null | 2018-06-28T13:02:09 | 2018-05-20T04:11:09 | Python | UTF-8 | Python | false | false | 18,348 | py | # -*- coding: utf-8 -*-
"""
Copyright (c) 2018 quvox.net
This code is based on that in bbc-1 (https://github.com/beyond-blockchain/bbc1.git)
"""
import mysql.connector
import traceback
import logging
import os
import sys
sys.path.extend(["../../", os.path.abspath(os.path.dirname(__file__))])
from bbc_simple.core import bbclib
from bbc_simple.core.message_key_types import to_2byte, PayloadType, KeyType
transaction_tbl_definition = [
["transaction_id", "BLOB"], ["transaction_data", "BLOB"],
]
asset_info_definition = [
["id", "INTEGER"],
["transaction_id", "BLOB"], ["asset_group_id", "BLOB"], ["asset_id", "BLOB"], ["user_id", "BLOB"],
]
topology_info_definition = [
["id", "INTEGER"], ["base", "BLOB"], ["point_to", "BLOB"]
]
class DataHandler:
"""DB and storage handler"""
def __init__(self, networking=None, default_config=None, config=None, workingdir=None, domain_id=None):
self.networking = networking
self.core = networking.core
self.stats = networking.core.stats
self.logger = networking.logger
self.domain_id = domain_id
self.domain_id_str = bbclib.convert_id_to_string(domain_id)[:16]
self.config = config
self.working_dir = workingdir
self.db_adaptor = None
self._db_setup(default_config)
def _db_setup(self, default_config):
"""Setup DB"""
if 'db' in self.config:
dbconf = self.config['db']
db_name = dbconf.get("db_name", self.domain_id_str)
db_addr = dbconf.get("db_addr", "127.0.0.1")
db_port = dbconf.get("db_port", 3306)
db_user = dbconf.get("db_user", "user")
db_pass = dbconf.get("db_pass", "pass")
db_rootuser = dbconf.get("db_rootuser", "root")
db_rootpass = dbconf.get("db_rootpass", "password")
table_engine = dbconf.get("engine", "MyISAM")
else:
db_name = default_config.get("db_name", self.domain_id_str)
db_addr = default_config.get("db_addr", "127.0.0.1")
db_port = default_config.get("db_port", 3306)
db_user = default_config.get("db_user", "user")
db_pass = default_config.get("db_pass", "pass")
db_rootuser = default_config.get("db_rootuser", "root")
db_rootpass = default_config.get("db_rootpass", "password")
table_engine = default_config.get("engine", "MyISAM")
self.db_adaptor = MysqlAdaptor(self, db_name=db_name, server_info=(db_addr, db_port, db_user, db_pass),
engine=table_engine)
self.db_adaptor.open_db(db_rootuser, db_rootpass)
self.db_adaptor.create_table('transaction_table', transaction_tbl_definition, primary_key=0, indices=[0])
self.db_adaptor.create_table('asset_info_table', asset_info_definition, primary_key=0, indices=[0, 1, 2, 3, 4])
self.db_adaptor.create_table('topology_table', topology_info_definition, primary_key=0, indices=[0, 1, 2])
def exec_sql(self, sql=None, args=(), commit=False, fetch_one=False):
"""Execute sql sentence
Args:
sql (str): SQL string
args (list): Args for the SQL
commit (bool): If True, commit is performed
fetch_one (bool): If True, fetch just one record
Returns:
list: list of records
"""
self.stats.update_stats_increment("data_handler", "exec_sql", 1)
#print("sql=", sql)
#if len(args) > 0:
# print("args=", args)
try:
if len(args) > 0:
self.db_adaptor.db_cur.execute(sql, args)
else:
self.db_adaptor.db_cur.execute(sql)
if commit:
self.db_adaptor.db.commit()
ret = None
else:
if fetch_one:
ret = self.db_adaptor.db_cur.fetchone()
else:
ret = self.db_adaptor.db_cur.fetchall()
except:
self.logger.error(traceback.format_exc())
traceback.print_exc()
self.stats.update_stats_increment("data_handler", "fail_exec_sql", 1)
return None
if ret is None:
return []
else:
return list(ret)
def get_asset_info(self, txobj):
"""Retrieve asset information from transaction object
Args:
txobj (BBcTransaction): transaction object to analyze
Returns:
list: list of list [asset_group_id, asset_id, user_id, False, file_digest]
"""
info = list()
for idx, evt in enumerate(txobj.events):
ast = evt.asset
if ast is not None:
info.append((evt.asset_group_id, ast.asset_id, ast.user_id))
for idx, rtn in enumerate(txobj.relations):
ast = rtn.asset
if rtn.asset is not None:
info.append((rtn.asset_group_id, ast.asset_id, ast.user_id))
return info
def _get_topology_info(self, txobj):
"""Retrieve topology information from transaction object
This method returns (from, to) list that describe the topology of transactions
Args:
txobj (BBcTransaction): transaction object to analyze
Returns:
list: list of tuple (base transaction_id, pointing transaction_id)
"""
info = list()
for reference in txobj.references:
info.append((txobj.transaction_id, reference.transaction_id)) # (base, point_to)
for idx, rtn in enumerate(txobj.relations):
for pt in rtn.pointers:
info.append((txobj.transaction_id, pt.transaction_id)) # (base, point_to)
return info
def insert_transaction(self, txdata, txobj=None):
"""Insert transaction data and its asset files
Either txdata or txobj must be given to insert the transaction.
Args:
txdata (bytes): serialized transaction data
txobj (BBcTransaction): transaction object to insert
Returns:
set: set of asset_group_ids in the transaction
"""
self.stats.update_stats_increment("data_handler", "insert_transaction", 1)
if txobj is None:
txobj = self.core.validate_transaction(txdata)
if txobj is None:
return None
if not self._insert_transaction_into_a_db(txobj):
return None
asset_group_ids = set()
for asset_group_id, asset_id, user_id in self.get_asset_info(txobj):
asset_group_ids.add(asset_group_id)
return asset_group_ids
def _insert_transaction_into_a_db(self, txobj):
"""Insert transaction data into the transaction table of the specified DB
Args:
txobj (BBcTransaction): transaction object to insert
Returns:
bool: True if successful
"""
#print("_insert_transaction_into_a_db: for txid =", txobj.transaction_id.hex())
if txobj.transaction_data is None:
txobj.serialize()
ret = self.exec_sql(sql="INSERT INTO transaction_table VALUES (%s,%s)" % (self.db_adaptor.placeholder,
self.db_adaptor.placeholder),
args=(txobj.transaction_id, txobj.transaction_data), commit=True)
if ret is None:
return False
for asset_group_id, asset_id, user_id in self.get_asset_info(txobj):
self.exec_sql(sql="INSERT INTO asset_info_table(transaction_id, asset_group_id, asset_id, user_id) "
"VALUES (%s, %s, %s, %s)" % (
self.db_adaptor.placeholder, self.db_adaptor.placeholder,
self.db_adaptor.placeholder, self.db_adaptor.placeholder),
args=(txobj.transaction_id, asset_group_id, asset_id, user_id), commit=True)
for base, point_to in self._get_topology_info(txobj):
self.exec_sql(sql="INSERT INTO topology_table(base, point_to) VALUES (%s, %s)" %
(self.db_adaptor.placeholder, self.db_adaptor.placeholder),
args=(base, point_to), commit=True)
#print("topology: base:%s, point_to:%s" % (base.hex(), point_to.hex()))
return True
def remove(self, transaction_id, txobj=None):
"""Delete all data regarding the specified transaction_id
This method requires either transaction_id or txobj.
Args:
transaction_id (bytes): target transaction_id
txobj (BBcTransaction): transaction object to remove
"""
if transaction_id is None:
return
if txobj is None:
txdata = self.exec_sql(sql="SELECT * FROM transaction_table WHERE transaction_id = %s" %
self.db_adaptor.placeholder, args=(transaction_id,))
txobj = bbclib.BBcTransaction(deserialize=txdata[0][1])
elif txobj.transaction_id != transaction_id:
return
self._remove_transaction(txobj)
def _remove_transaction(self, txobj):
"""Remove transaction from DB"""
#print("_remove_transaction: for txid =", txobj.transaction_id.hex())
self.exec_sql(sql="DELETE FROM transaction_table WHERE transaction_id = %s" % self.db_adaptor.placeholder,
args=(txobj.transaction_id,), commit=True)
for base, point_to in self._get_topology_info(txobj):
self.exec_sql(sql="DELETE FROM topology_table WHERE base = %s AND point_to = %s" %
(self.db_adaptor.placeholder,self.db_adaptor.placeholder),
args=(base, point_to), commit=True)
def search_transaction(self, transaction_id=None, asset_group_id=None, asset_id=None, user_id=None,
direction=0, count=1):
"""Search transaction data
When Multiple conditions are given, they are considered as AND condition.
Args:
transaction_id (bytes): target transaction_id
asset_group_id (bytes): asset_group_id that target transactions should have
asset_id (bytes): asset_id that target transactions should have
user_id (bytes): user_id that target transactions should have
direction (int): 0: descend, 1: ascend
count (int): The maximum number of transactions to retrieve
Returns:
dict: mapping from transaction_id to serialized transaction data
dict: dictionary of {asset_id: content} for the transaction
"""
if transaction_id is not None:
txinfo = self.exec_sql(
sql="SELECT * FROM transaction_table WHERE transaction_id = %s" % self.db_adaptor.placeholder,
args=(transaction_id,))
if len(txinfo) == 0:
return None
else:
dire = "DESC"
if direction == 1:
dire = "ASC"
sql = "SELECT * from asset_info_table WHERE "
conditions = list()
if asset_group_id is not None:
conditions.append("asset_group_id = %s " % self.db_adaptor.placeholder)
if asset_id is not None:
conditions.append("asset_id = %s " % self.db_adaptor.placeholder)
if user_id is not None:
conditions.append("user_id = %s " % self.db_adaptor.placeholder)
sql += "AND ".join(conditions) + "ORDER BY id %s" % dire
if count > 0:
if count > 20:
count = 20
sql += " limit %d" % count
sql += ";"
args = list(filter(lambda a: a is not None, (asset_group_id, asset_id, user_id)))
ret = self.exec_sql(sql=sql, args=args)
txinfo = list()
for record in ret:
tx = self.exec_sql(
sql="SELECT * FROM transaction_table WHERE transaction_id = %s" % self.db_adaptor.placeholder,
args=(record[1],))
if tx is not None and len(tx) == 1:
txinfo.append(tx[0])
result_txobj = dict()
for txid, txdata in txinfo:
txobj = bbclib.BBcTransaction(deserialize=txdata)
result_txobj[txid] = txobj
return result_txobj
def count_transactions(self, asset_group_id=None, asset_id=None, user_id=None):
"""Count transactions that matches the given conditions
When Multiple conditions are given, they are considered as AND condition.
Args:
asset_group_id (bytes): asset_group_id that target transactions should have
asset_id (bytes): asset_id that target transactions should have
user_id (bytes): user_id that target transactions should have
Returns:
int: the number of transactions
"""
sql = "SELECT count( DISTINCT transaction_id ) from asset_info_table WHERE "
conditions = list()
if asset_group_id is not None:
conditions.append("asset_group_id = %s " % self.db_adaptor.placeholder)
if asset_id is not None:
conditions.append("asset_id = %s " % self.db_adaptor.placeholder)
if user_id is not None:
conditions.append("user_id = %s " % self.db_adaptor.placeholder)
sql += "AND ".join(conditions)
args = list(filter(lambda a: a is not None, (asset_group_id, asset_id, user_id)))
ret = self.exec_sql(sql=sql, args=args)
return ret[0][0]
def search_transaction_topology(self, transaction_id, traverse_to_past=True):
"""Search in topology info
Args:
transaction_id (bytes): base transaction_id
traverse_to_past (bool): True: search backward (to past), False: search forward (to future)
Returns:
list: list of records of topology table
"""
if transaction_id is None:
return None
if traverse_to_past:
return self.exec_sql(sql="SELECT * FROM topology_table WHERE base = %s" %
self.db_adaptor.placeholder, args=(transaction_id,))
else:
return self.exec_sql(sql="SELECT * FROM topology_table WHERE point_to = %s" %
self.db_adaptor.placeholder, args=(transaction_id,))
class DbAdaptor:
"""Base class for DB adaptor"""
def __init__(self, handler=None, db_name=None):
self.handler = handler
self.db = None
self.db_cur = None
self.db_name = "dom"+db_name
self.placeholder = ""
def open_db(self, rootuser, rootpass):
"""Open the DB"""
pass
def create_table(self, tbl, tbl_definition, primary_key=0, indices=[]):
"""Create a table"""
pass
def check_table_existence(self, tblname):
"""Check whether the table exists or not"""
pass
class MysqlAdaptor(DbAdaptor):
"""DB adaptor for MySQL"""
def __init__(self, handler=None, db_name=None, server_info=None, engine="MyISAM"):
super(MysqlAdaptor, self).__init__(handler, db_name)
self.placeholder = "%s"
self.db_addr = server_info[0]
self.db_port = server_info[1]
self.db_user = server_info[2]
self.db_pass = server_info[3]
self.table_engine = engine
def open_db(self, rootuser, rootpass):
"""Open the DB"""
db = None
db_cur = None
try:
db = mysql.connector.connect(
host=self.db_addr, port=self.db_port,
user=rootuser, password=rootpass, charset='utf8'
)
db_cur = db.cursor(buffered=True)
db_cur.execute("show databases like '%s'" % self.db_name)
if len(db_cur.fetchall()) == 0:
db_cur.execute("create database %s" % self.db_name)
grant_sql = "GRANT ALL ON %s.* TO '%s'@'%%';" % (self.db_name, self.db_user)
db_cur.execute(grant_sql)
except Exception as e:
self.handler.logger.error(e)
finally:
db_cur.close()
db.close()
self.db = mysql.connector.connect(
host=self.db_addr,
port=self.db_port,
db=self.db_name,
user=self.db_user,
password=self.db_pass,
charset='utf8'
)
self.db_cur = self.db.cursor(buffered=True)
def create_table(self, tbl, tbl_definition, primary_key=0, indices=[]):
"""Create a table
Args:
tbl (str): table name
tbl_definition (list): schema of the table [["column_name", "data type"],["colmun_name", "data type"],,]
primary_key (int): index (column) of the primary key of the table
indices (list): list of indices to create index
"""
if len(self.check_table_existence(tbl)) == 1:
return
sql = "CREATE TABLE %s " % tbl
sql += "("
defs = list()
for d in tbl_definition:
if d[0] == "id":
defs.append("%s %s AUTO_INCREMENT NOT NULL" % (d[0], d[1]))
else:
defs.append("%s %s" % (d[0], d[1]))
sql += ",".join(defs)
if tbl_definition[primary_key][1] in ["BLOB", "TEXT"]:
sql += ", PRIMARY KEY (%s(32))" % tbl_definition[primary_key][0]
else:
sql += ", PRIMARY KEY (%s)" % tbl_definition[primary_key][0]
sql += ") CHARSET=utf8 ENGINE=%s;" % self.table_engine
self.handler.exec_sql(sql=sql, commit=True)
for idx in indices:
if tbl_definition[idx][1] in ["BLOB", "TEXT"]:
self.handler.exec_sql(sql="ALTER TABLE %s ADD INDEX (%s(32));" % (tbl, tbl_definition[idx][0]), commit=True)
else:
self.handler.exec_sql(sql="ALTER TABLE %s ADD INDEX (%s);" % (tbl, tbl_definition[idx][0]), commit=True)
def check_table_existence(self, tblname):
"""Check whether the table exists or not"""
sql = "show tables from %s like '%s';" % (self.db_name, tblname)
return self.handler.exec_sql(sql=sql)
| [
"takeshi@quvox.net"
] | takeshi@quvox.net |
ca586000f7193415f95a368349263123602879e5 | c38ac2d483cd46c39d23fe69706aac1ba17adb1e | /atari_test.py | 5f8ee5644a5f20be361d9abf07584e2ccb99a216 | [] | no_license | oussrock/release1 | 5690d7359ecba7b4f629d5710a1c4ae94880a581 | eef3558e75a074aaee68b814724573035fbaec19 | refs/heads/main | 2023-01-31T15:51:22.499556 | 2020-12-17T05:15:39 | 2020-12-17T05:15:39 | 321,779,772 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,565 | py | import gym
import numpy as np
import cv2
import neat
import pickle
import random
import noise
env = gym.make('Breakout-v0')
#env = gym.make('VideoPinball-v0')
imagearray = []
def eval_genomes(genomes, config):
for genome_id, genome in genomes:
observation = env.reset()
inputx, inputy, inputColour = env.observation_space.shape
inputx = int (inputx / 8)
inputy = int (inputy / 8)
net = neat.nn.recurrent.RecurrentNetwork.create(genome, config)
fitness_current = 0
frame = 0
counter = 0
game_done = False
while not game_done :
frame += 1
factor = 0.5
observation = np.uint8(noise.noisy(observation,factor))
observation = cv2.resize(observation, (inputx, inputy))
#observation = cv2.cvtColor(observation, cv2.COLOR_RGB2GRAY)
imagearray = np.ndarray.flatten(observation)
nnOutput = net.activate(imagearray)
numerical_input = nnOutput.index(max(nnOutput))
observation, reward, game_done, info = env.step(numerical_input)
fitness_current += reward
if reward > 0 :
counter = 0
else :
counter += 1
env.render()
if game_done or counter == 300:
game_done= True
print (genome_id, fitness_current, counter)
genome.fitness = fitness_current
config = neat.Config(neat.DefaultGenome, neat.DefaultReproduction, neat.DefaultSpeciesSet, neat.DefaultStagnation, 'config3.txt')
pop = neat.Population(config)
pop.add_reporter(neat.Checkpointer(10))
winner = pop.run(eval_genomes)
with open('winner_BreakOut123.pkl', 'wb') as output :
pickle.dump(winner, output, 1)
| [
"ouss@MacBook-Air-de-Ouss.local"
] | ouss@MacBook-Air-de-Ouss.local |
8a88dc253b9b57d290d39ad5b23cb3244b4c8a9e | 12bd445907da2950dc1495b1e4d31f3af861718e | /Aprendendo Python/cursopythonudamy/aula3.py | 633579cc1ee6127071dd41b4e06d9780f4586542 | [
"MIT"
] | permissive | JlucasS777/Aprendendo-Python | a9d8a4a6149e942b78aaa0d23f3d249f93b1f5b2 | a3a960260070f0d604c27fbbc41578a6ab11edb5 | refs/heads/main | 2023-08-10T18:22:24.935251 | 2021-10-11T01:50:14 | 2021-10-11T01:50:14 | 415,745,698 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 361 | py | '''
str - string
'''
print("Essa é uma 'string' (str).")# Para as aspas serem aceitas as do texto devem ser diferentes das de abertura
print("essa é uma 'string' (str).")
print("Esse é meu teste de \"(string).\"")# o uso da \ fez ignorar o próximo sinal
print(r"Esse é meu texto com \n(str). veja que não pulou linha porque tem um r na frente das aspas")
| [
"92265682+JlucasS777@users.noreply.github.com"
] | 92265682+JlucasS777@users.noreply.github.com |
a98861179cec2687753f1bbd895f2aea1f551798 | 1aefa304f794c1ed9e06ce71248206098c756cf3 | /Django_Assignments/userauth_assignment/userauth_assignment/urls.py | 2ff85cdcbde74c5815e5277909f196ebe52546f4 | [] | no_license | dilipksahu/django_class | 333233bbced5491d886687b5990c8836dac2f145 | a044c4a079c61a6a6de05674103e8a9ba2b4d28c | refs/heads/master | 2023-01-10T07:40:44.713361 | 2020-11-10T15:26:33 | 2020-11-10T15:26:33 | 282,398,509 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 808 | py | """userauth_assignment URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
urlpatterns = [
path('admin/', admin.site.urls),
path('',include('userauth.urls')),
]
| [
"sahud048@gmail.com"
] | sahud048@gmail.com |
54bbd219f19c1ed9466ccdbb26db23e887394dba | 6cb11cb804f316d16efa083effb3def1c2cab57c | /22.py | c55af12e976c5a84557d4af19a98af4e455b732f | [] | no_license | davemolk/python_practice | 8879cd5bdcb77c3d84ff5c7f961fda1cd48b2f93 | 91d3e411b32f3a4a29d60148b352b91ce8e1d11b | refs/heads/main | 2023-08-01T12:57:45.779824 | 2021-09-18T16:54:11 | 2021-09-18T16:54:11 | 400,767,618 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 282 | py | '''
return True if array has two consecutive 2s, otherwise return false
'''
import re
def has22(nums):
pattern = r"[2]{2}"
regex = re.compile(pattern)
match = regex.findall(("".join(str(el) for el in nums)))
return True if match else False
print(has22([1, 2, 2])) | [
"davemolk@gmail.com"
] | davemolk@gmail.com |
11deadc0af53a9084726550a317406e15a6976a7 | ba834139d9e608e093b3407eec382ef3eece652e | /2015/15/15.py | 583ba22af029b5616b70b18ff42a1d6365eb8f98 | [] | no_license | pberczi/advent-of-code | 815e044193a769fba0829d6720fbe1ec40d83b6d | ce3470bcb5e240eee3b0eee76f7ceed5c75c0b44 | refs/heads/master | 2021-06-09T06:29:27.475941 | 2016-12-26T22:44:19 | 2016-12-26T22:44:19 | 75,329,586 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,486 | py | #!/usr/bin/python
import argparse
# parse command line args
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--problem', metavar='n', type=int, default=1, help='part of the question to solve')
parser.add_argument('input', metavar='input_path', type=str, nargs='?', default='input.txt', help='path to input file')
args = parser.parse_args()
data = []
with open(args.input, 'r') as f:
data = f.read().splitlines()
ingredients = []
for line in data:
line = line.replace(',', ' ')
item = line.split()
ingredient = {}
ingredient['name'] = item[0][:-1]
ingredient['capacity'] = int(item[2])
ingredient['durability'] = int(item[4])
ingredient['flavor'] = int(item[6])
ingredient['texture'] = int(item[8])
ingredient['calories'] = int(item[10])
ingredients.append(ingredient)
amounts = [0 for i in ingredients]
teaspoons = 100
max_cost = 0
for i in range(teaspoons):
for j in range(teaspoons - i):
for k in range(teaspoons - (i + j)):
l = teaspoons - (i + j + k)
capacity = ingredients[0]['capacity'] * i
capacity += ingredients[1]['capacity'] * j
capacity += ingredients[2]['capacity'] * k
capacity += ingredients[3]['capacity'] * l
capacity = max(capacity, 0)
durability = ingredients[0]['durability'] * i
durability += ingredients[1]['durability'] * j
durability += ingredients[2]['durability'] * k
durability += ingredients[3]['durability'] * l
durability = max(durability, 0)
flavor = ingredients[0]['flavor'] * i
flavor += ingredients[1]['flavor'] * j
flavor += ingredients[2]['flavor'] * k
flavor += ingredients[3]['flavor'] * l
flavor = max(flavor, 0)
texture = ingredients[0]['texture'] * i
texture += ingredients[1]['texture'] * j
texture += ingredients[2]['texture'] * k
texture += ingredients[3]['texture'] * l
texture = max(texture, 0)
calories = ingredients[0]['calories'] * i
calories += ingredients[1]['calories'] * j
calories += ingredients[2]['calories'] * k
calories += ingredients[3]['calories'] * l
cost = capacity * durability * flavor * texture
max_cost = max(cost, max_cost) if calories == 500 else max_cost
print max_cost | [
"p.berczi@gmail.com"
] | p.berczi@gmail.com |
72a8cd5ebea5dd6e2d817f0399ac92cdda59e064 | c8ad75a289ff932490e77978ced8b3e17184b58f | /ch2/tasks_proj/tests/func/test_unique_id_3.py | 4eeede0af9321b7d5c29058b5a576a1b29261f90 | [] | no_license | albertfougy/pytest-tutorial | 1afb57be285ef301df68b120add5859d70e50a6e | 43981d51401c553acf4e5e4127573cf3ae2e18c7 | refs/heads/master | 2020-04-26T10:04:30.371789 | 2019-08-30T02:11:13 | 2019-08-30T02:11:13 | 173,476,636 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 869 | py | import pytest
import tasks
from tasks import Task
@pytest.mark.skip(tasks.__version__<'0.2.0',
reason='misunderstood the API')
def test_unique_id_1():
"""Calling unique_id() twice should return different numbers."""
id_1 = tasks.unique_id()
id_2 = tasks.unique_id()
assert id_1 != id_2
def test_unique_id_2():
"""unique_id() twice should return an unused id"""
ids = []
ids.append(tasks.add(Task('one')))
ids.append(tasks.add(Task('two')))
ids.append(tasks.add(Task('three')))
# grab a unique id
uid = tasks.unique_id()
# make sure it isn't in the list of existing ids
assert uid not in ids
@pytest.fixture(autouse=True)
def initialized_tasks_db(tmpdir):
"""Connect to db before testing, disconnect after."""
tasks.start_tasks_db(str(tmpdir), 'tiny')
yield
tasks.stop_tasks_db()
| [
"albert@fougy.com"
] | albert@fougy.com |
e2952709033cf2a1d7e5a63e8c6a58896b6a2830 | b4f60aafb3a1ac97d2bd5db186a8acba87d5fc89 | /loop.py | 4c65224f982e509f99ace6dcb4840a6965291705 | [] | no_license | arvindanand1123/twitter-weather-bot | da28d6ab4ca5558d22aa2d453bdcbe7eda824f39 | cc6e6680ffd67fc2248e03935c0f003e84104551 | refs/heads/main | 2023-05-03T15:56:04.713218 | 2021-05-23T04:22:56 | 2021-05-23T04:22:56 | 365,045,507 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,408 | py | import os
import random
import json
from pathlib import Path
import csv
import tweepy
import logging
import time
import config
from dotenv import load_dotenv
import requests
load_dotenv('t.env')
print("Get credentials")
consumer_key = os.getenv("CONSUMER_KEY")
consumer_secret = os.getenv("CONSUMER_SECRET")
access_token = os.getenv("ACCESS_TOKEN")
access_token_secret = os.getenv("ACCESS_TOKEN_SECRET")
print("Authenticate")
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth, wait_on_rate_limit=True)
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger()
l = 'https://fwdyykezdk.execute-api.us-east-1.amazonaws.com/api/tweet'
def check_mentions(api, keywords, since_id):
logger.info("Retrieving mentions")
new_since_id = since_id
for tweet in tweepy.Cursor(api.mentions_timeline, since_id=since_id).items():
new_since_id = max(tweet.id, new_since_id)
uname = tweet.user.screen_name
if tweet.in_reply_to_status_id is not None:
continue
if any(keyword in tweet.text.lower() for keyword in keywords):
logger.info(f"Answering to {tweet.user.screen_name}")
api.update_status(
status=('@' + uname + " To use Weather Bot, just tweet @ us, use a phrase like 'What's the weather like in <LOCATION>'. It doesn't matter how you ask for the weather, as long as you do so discernibly and include the location."),
in_reply_to_status_id=tweet.id,
)
else:
try:
payload = {"query":tweet.text.lower(), "uname": tweet.user.screen_name ,"id":tweet.id}
header = {'Content-Type': 'application/json'}
r = requests.post(l, json=payload, headers=header)
logger.info(("Payload return", r.json()))
except:
api.update_status(
status=('@' + uname + " Seems like the query was not understood, please try asking about the weather. Try tweeting 'help' or 'support'."),
in_reply_to_status_id=tweet.id,
)
return new_since_id
def main():
since_id = 1
while True:
since_id = check_mentions(api, ["help", "support"], since_id)
logger.info("Waiting...")
time.sleep(5)
if __name__ == "__main__":
main() | [
"arvindanand1123@gmail.com"
] | arvindanand1123@gmail.com |
d512af4252ed1c0fc71cf00b79ca41907c8012dd | ed7b67259c8b90422009773884245a032a07a4f2 | /gallery/views.py | 3edd97854a314482ad4e6af89994c8d1056d982d | [] | no_license | harclemadscam/fourth-milestone-project | bfcca5633b7c71f42adbc2279121d98f923b55a9 | 9e1a90794365ce5fbebeffc02b965a0fb03afedb | refs/heads/master | 2023-01-22T06:04:21.661195 | 2020-11-30T21:24:04 | 2020-11-30T21:24:04 | 292,605,409 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,378 | py | from django.shortcuts import render, redirect, reverse, get_object_or_404
from django.contrib.auth.decorators import login_required
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from .models import GalleryImage
from .forms import UploadImageForm
from shop.models import Product
def gallery(request):
products = Product.objects.exclude(image1__exact='')
images = GalleryImage.objects.filter(is_shown=True)
paginator = Paginator(products, 48)
page = request.GET.get('page1')
try:
products = paginator.page(page)
except PageNotAnInteger:
products = paginator.page(1)
except EmptyPage:
products = paginator.page(paginator.num_pages)
paginator = Paginator(images, 48)
page = request.GET.get('page2')
try:
images = paginator.page(page)
except PageNotAnInteger:
images = paginator.page(1)
except EmptyPage:
images = paginator.page(paginator.num_pages)
context = {
'products': products,
'images': images
}
return render(request, 'gallery/gallery.html', context)
@login_required
def add_image(request):
if not request.user.is_superuser:
return redirect(reverse('gallery'))
if request.method == 'POST':
form = UploadImageForm(request.POST, request.FILES)
if form.is_valid():
form.save()
return redirect(reverse('gallery'))
else:
form = UploadImageForm()
context = {
'form': form,
}
return render(request, 'gallery/add_image.html', context)
@login_required
def edit_image(request, id):
if not request.user.is_superuser:
return redirect(reverse('gallery'))
image = get_object_or_404(GalleryImage, id=id)
if request.method == 'POST':
form = UploadImageForm(request.POST, instance=image)
if form.is_valid():
form.save()
return redirect(reverse('gallery'))
else:
form = UploadImageForm(instance=image)
context = {
'form': form,
'image': image,
}
return render(request, 'gallery/edit_image.html', context)
@login_required
def delete_image(request, id):
if not request.user.is_superuser:
return redirect(reverse('gallery'))
image = get_object_or_404(GalleryImage, id=id)
image.delete()
return redirect(reverse('gallery'))
| [
"joeapplegate1993@gmail.com"
] | joeapplegate1993@gmail.com |
207624651c4804e33116fd0fc11465432a0c6c49 | 6a6031fe5c6d96e95a6df3d0dd2f99060a487bbb | /crush/templatetags/base_extras.py | 654980636ed4358cb4d7214c5254b36ec22b3fff | [] | no_license | http417/Crushmaven | aa50eb570b8fa1ec2507695aaca1d12c13511571 | 2591919437c9d320c8afae6396640de0e18bc77c | refs/heads/master | 2021-09-15T04:36:19.868419 | 2018-05-26T04:16:01 | 2018-05-26T04:16:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 463 | py | from django import template
#from django.core.urlresolvers import reverse
register = template.Library()
@register.simple_tag
def navactive(request, url):
# request.path is the current URL minus the host name
# the following logic checks if the current page exists within the list of URLs passed in
# if found ,active is returned, else empty string returned
if url in request.path:
return "active"
else:
return "inactive"
| [
"chris.h.cheng@gmail.com"
] | chris.h.cheng@gmail.com |
b8062923bcce159d9c13a87c2d2cfbac3806dabb | 7206403e1c4fd7a508d874a20b4295e62140a455 | /scorePredictor.py | 4b2a47f75f93ed5c368ee666c6ed937a7b88969a | [] | no_license | TWilkinson257/FPL_Model | 473b5731248b4bcc743558e570c72d5f1ceb88fb | 6127aff2ffc55dc31184fd0ed0ae14479561e145 | refs/heads/main | 2023-01-19T01:19:12.461627 | 2020-11-23T21:28:40 | 2020-11-23T21:28:40 | 310,086,866 | 0 | 0 | null | 2020-11-19T12:11:06 | 2020-11-04T18:35:01 | Python | UTF-8 | Python | false | false | 3,317 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 19 16:44:11 2020
@author: thomaswilkinson
"""
# import requests
import pandas as pd
from pandas import read_csv
from pandas.plotting import scatter_matrix
from matplotlib import pyplot
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVR
import csv
from itertools import permutations
from math import factorial
import numpy as np
fn = 'fixtures.csv'
fixtures = read_csv(fn)
fixtures_h = fixtures[['team_a_difficulty','team_h_difficulty', 'team_h_score' ]][fixtures['minutes'].values == 90]
fixtures_a = fixtures[['team_a_difficulty','team_h_difficulty', 'team_a_score' ]][fixtures['minutes'].values == 90]
# print(fixtures_h.head(10))
# print(fixtures_a.head(10))
teams = ['Arsenal', 'Aston Villa', 'Brighton', 'Burnley', 'Chelsea', 'Crystal Palace', 'Everton', 'Fulham',
'Leicester', 'Leeds', 'Liverpool', 'Man City', 'Man Utd', 'Newcastle', 'Sheffield Utd',
'Southampton', 'Spurs', 'West Brom', 'West Ham', 'Wolves']
# Split-out validation dataset - home scores
array = fixtures_h.values
X = array[:,0:2]
y = array[:,2]
# X_train, X_validation, Y_train, Y_validation = train_test_split(X, y, test_size=0.20, random_state=1)
model_h = SVR()
model_h.fit(X, y)
# kfold = StratifiedKFold(n_splits=2, random_state=1, shuffle=True)
# cv_results = cross_val_score(model_h, X_train, Y_train, cv=kfold, scoring='accuracy')
# Split-out validation dataset - away scores
array = fixtures_a.values
X = array[:,0:2]
y = array[:,2]
#model definition
model_a = SVR()
model_a.fit(X, y)
# test_fix = [4, 2]
# test_h_score = model_h.predict([test_fix])
# test_a_score = model_a.predict([test_fix])
# print('%.0f : %.0f' %(test_h_score, test_a_score))
pred_h_score = np.array([])
pred_a_score = np.array([])
for row in range(len(fixtures)):
h_diff = fixtures['team_h_difficulty'][row]
a_diff = fixtures['team_a_difficulty'][row]
pred_h_score = np.append(pred_h_score,model_h.predict([[h_diff,a_diff]]))
pred_a_score = np.append(pred_a_score,model_a.predict([[h_diff,a_diff]]))
# pred_h_score.append(model_h.predict([[h_diff,a_diff]]))
# pred_a_score.append(model_a.predict([[h_diff,a_diff]]))
current_gw = 9
gw_fixt = fixtures[(fixtures['event'].values == current_gw)]
for row in range(len(pred_h_score[fixtures['event'].values == current_gw])):
# print(teams[gw_fixt['team_h'].values[row]-1])
# print(teams[gw_fixt['team_a'].values[row]-1])
print('%s %.0f : %.0f %s' %(teams[gw_fixt['team_h'].values[row]-1], pred_h_score[fixtures['event'].values == current_gw][row],
pred_a_score[fixtures['event'].values == current_gw][row], teams[gw_fixt['team_a'].values[row]-1]))
# print(pred_a_score[fixtures['event'].values == 7][row])
| [
"noreply@github.com"
] | noreply@github.com |
7170e2bb514b2c2566288e66eca9acbc6b74fe58 | 1afc04f4cdbe515299d1974c92401628b060e0e9 | /dataDriver/yamlData.py | 001734bbe9e7a7bdec36b9ae2c06d52183cd62e7 | [] | no_license | liangjk1/appium- | 956aaa0302e56d0723e5dfab6062630969670f7d | d1c6fa42b537511d09025240998c07c053e1ced3 | refs/heads/master | 2023-08-12T16:02:11.183179 | 2021-09-21T10:05:37 | 2021-09-21T10:05:37 | 408,764,826 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 434 | py | #!/usr/bin/python3
# -*- coding: utf-8 -*-
# @Time : 2021/3/13 13:57
# @Author : ZY
# @File : yamlData.py
# @Project : APP
import yaml
# 读
def readYaml(path):
# "desired_capabilities.yml"
with open(path, "r", encoding='utf-8') as f:
return yaml.load(f, Loader=yaml.FullLoader)
# 写
def writeYaml(path, content):
with open(path, 'w', encoding="utf-8") as f:
yaml.dump(content, f, allow_unicode=True)
| [
"364390447@qq.com"
] | 364390447@qq.com |
5be380346b03e9b8dbc4a4556b1cdf25b6546a22 | 482467f7875513440ccc9fb5ee5755214137e8df | /homeassistant/components/essent/sensor.py | e77b256abb73e0848e3bc706a896cb5b694aa063 | [
"Apache-2.0"
] | permissive | Watemlifts/home-assistant | fbf16d91489f9ab472b1fda928fc472f99d2b057 | 6e414983738d9495eb9e4f858e3e98e9e38869db | refs/heads/dev | 2023-07-21T06:38:40.212969 | 2023-07-15T09:33:07 | 2023-07-15T09:33:07 | 195,134,511 | 4 | 0 | Apache-2.0 | 2023-07-15T09:33:08 | 2019-07-03T22:34:49 | Python | UTF-8 | Python | false | false | 3,671 | py | """Support for Essent API."""
from datetime import timedelta
from pyessent import PyEssent
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_PASSWORD, CONF_USERNAME, ENERGY_KILO_WATT_HOUR)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
SCAN_INTERVAL = timedelta(hours=1)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the Essent platform."""
username = config[CONF_USERNAME]
password = config[CONF_PASSWORD]
essent = EssentBase(username, password)
meters = []
for meter in essent.retrieve_meters():
data = essent.retrieve_meter_data(meter)
for tariff in data['values']['LVR'].keys():
meters.append(EssentMeter(
essent,
meter,
data['type'],
tariff,
data['values']['LVR'][tariff]['unit']))
if not meters:
hass.components.persistent_notification.create(
'Couldn\'t find any meter readings. '
'Please ensure Verbruiks Manager is enabled in Mijn Essent '
'and at least one reading has been logged to Meterstanden.',
title='Essent', notification_id='essent_notification')
return
add_devices(meters, True)
class EssentBase():
"""Essent Base."""
def __init__(self, username, password):
"""Initialize the Essent API."""
self._username = username
self._password = password
self._meter_data = {}
self.update()
def retrieve_meters(self):
"""Retrieve the list of meters."""
return self._meter_data.keys()
def retrieve_meter_data(self, meter):
"""Retrieve the data for this meter."""
return self._meter_data[meter]
@Throttle(timedelta(minutes=30))
def update(self):
"""Retrieve the latest meter data from Essent."""
essent = PyEssent(self._username, self._password)
eans = essent.get_EANs()
for possible_meter in eans:
meter_data = essent.read_meter(
possible_meter, only_last_meter_reading=True)
if meter_data:
self._meter_data[possible_meter] = meter_data
class EssentMeter(Entity):
"""Representation of Essent measurements."""
def __init__(self, essent_base, meter, meter_type, tariff, unit):
"""Initialize the sensor."""
self._state = None
self._essent_base = essent_base
self._meter = meter
self._type = meter_type
self._tariff = tariff
self._unit = unit
@property
def name(self):
"""Return the name of the sensor."""
return "Essent {} ({})".format(self._type, self._tariff)
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
if self._unit.lower() == 'kwh':
return ENERGY_KILO_WATT_HOUR
return self._unit
def update(self):
"""Fetch the energy usage."""
# Ensure our data isn't too old
self._essent_base.update()
# Retrieve our meter
data = self._essent_base.retrieve_meter_data(self._meter)
# Set our value
self._state = next(
iter(data['values']['LVR'][self._tariff]['records'].values()))
| [
"amelchio@nogoto.net"
] | amelchio@nogoto.net |
aca4ef43490ac175cf6befc99c587dfb24ff24e0 | cdbbea573c45a73fa6c958944624093a2d23a7f3 | /10a.py | 991a8f8a518884338939d9fb116c7e12f986a125 | [] | no_license | tomasolodun/lab11 | 947302574adfc91fd1e1d752c2de1f8cd914407f | 1b435405e8608fcf92ffdc6d91bc8aeb62e96063 | refs/heads/master | 2022-08-30T12:03:21.384616 | 2020-05-28T10:35:18 | 2020-05-28T10:35:18 | 267,560,146 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 981 | py | """а) Дан файл f, компоненти якого є цілими числами. Отримати в файлі g всі
компоненти файлу f що є простими числами. Простим називається число, що більше за 1
та не має інших дільників, окрім 1 та самого себе)"""
def get_integers():
with open("C:\\Users\\Toma\Desktop\\NewFold\\f.txt") as file_start:
for integer in file_start:
integers = integer.split()
file_start.close()
return integers
def is_prime(n):
if n == 1:
return False
for x in range(2, n):
if n % x == 0:
return False
else:
return True
data = get_integers()
with open("C:\\Users\\Toma\Desktop\\NewFold\\g.txt", "w") as file_end:
for i in data:
if is_prime(int(i)) == True:
file_end.write(i + ' ')
file_end.close() | [
"noreply@github.com"
] | noreply@github.com |
e6342768aa89cbdf4cfcb7ef99d2ea39ecacd3a6 | 0f5cca9568b34b877eb4e9e19e584bc532a5f8ee | /failure_scripts/NSBL_excel_rosters.py | bfa3ffd2a1750a0ac2811022867a98a0ab0a0316 | [
"MIT"
] | permissive | Connor-R/NSBL | 1743c3b4af3f3eb1532e9bf063c71c7a313710ac | c9b399ac995cfe43c2d8fd09890c63a9b333f53e | refs/heads/master | 2022-08-12T05:43:57.228690 | 2022-08-07T07:19:20 | 2022-08-07T07:19:20 | 77,431,477 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,226 | py | import xlrd
from py_db import db
import argparse
import NSBL_helpers as helper
db = db('NSBL')
def process(curr_year):
rosters_link = '/Users/connordog/Dropbox/Desktop_Files/Baseball/Rosters.xlsx'
season_gp = db.query("SELECT gs FROM processed_league_averages_pitching WHERE year = %s" % (curr_year))
if season_gp == ():
season_gp = 0
else:
season_gp = float(season_gp[0][0])/2
workbook = xlrd.open_workbook(rosters_link)
# iterate through all team sheets
for index in range(4, 34):
team_name = workbook.sheet_names()[index]
print team_name
team_abbs, primary_abb = helper.get_team_abbs(team_name.upper())
entries = []
team_sheet = workbook.sheet_by_index(index)
# get a maximum row for each sheet
for row in range(1,100):
if team_sheet.cell(row,1).value == 'Waived Players':
max_row = row
break
position = ''
for row in range(8,max_row):
if team_sheet.cell(row, 1).value == 'Pitchers':
position = 'p'
if team_sheet.cell(row, 1).value == 'Catchers':
position = 'c'
if team_sheet.cell(row, 1).value == 'Infielders':
position = 'if'
if team_sheet.cell(row, 1).value == 'Outfielders':
position = 'of'
entered_name = team_sheet.cell(row, 1).value
if position == 'c' and entered_name == 'Smith, Will':
entered_name = 'D. Smith, Will'
player_name, first_name, last_name = name_parser(entered_name, primary_abb)
if team_sheet.cell(row, 2).value not in ('Year','') and team_sheet.cell(row, 3).value not in ('Salary', ''):
salary = team_sheet.cell(row, 3).value
year = team_sheet.cell(row, 2).value
expires = team_sheet.cell(row, 4).value
opt = team_sheet.cell(row, 5).value
NTC = team_sheet.cell(row, 8).value
salary_counted = team_sheet.cell(row, 9).value
entry = {'year':curr_year, 'gp':season_gp, 'player_name':player_name, "fname":first_name, "lname":last_name, "team_abb":primary_abb, "position":position, "salary":salary, "contract_year":year, "expires":expires, "opt":opt, "NTC":NTC, "salary_counted":salary_counted, "entered_name":entered_name}
# print entry
entries.append(entry)
if entries != []:
db.insertRowDict(entries, 'excel_rosters', replace=True, insertMany=True, rid=0)
db.conn.commit()
def name_parser(reverse_name, primary_abb):
player_map = {
}
first_name = reverse_name.split(', ')[1:]
last_name = reverse_name.split(', ')[0]
player_name = ' '.join(reversed(reverse_name.split(', ')))
if player_name in player_map:
first_name, last_name = player_map.get(player_name)
player_name = first_name + ' ' + last_name
return player_name, first_name, last_name
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--year',type=int,default=2020)
args = parser.parse_args()
process(args.year) | [
"connor.reed.92@gmail.com"
] | connor.reed.92@gmail.com |
4fe3e9d27a8d4cade72644c78c088f7bdddbeca3 | 7a4d54af2de65facd360212765de13ada863ec85 | /raspberry/raspberry.py | e1fc025662cc648c08ddb52270e4535c4eefb7a0 | [] | no_license | smahdavi4/RaspberryPi-CCTV | 1ae659e7b285b9ef858f184e7a279b3239761af4 | da2a52264fa18a67c145a68f85c5381b52580ea1 | refs/heads/master | 2023-06-21T14:12:51.085508 | 2021-08-04T15:17:05 | 2021-08-04T15:17:05 | 392,726,805 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,570 | py | import io
import logging
import time
from datetime import datetime
try:
from picamera import PiCamera
except ImportError:
logging.error("Can't import picamera. Please make sure picamera is installed and you run the code on raspberrypi.")
import numpy as np
from PIL import Image
from models import Schedules
from server_utils import server_uploader
from tgbot.tg_sender import send_image
from utils import get_schedules, get_subscribers
class ImageManager:
MAX_IMAGE_BUFFER = 20
IMAGE_QUANTIZATION_BINS = np.arange(0, 256, 30)
NEW_IMAGE_THRESHOLD = 0.1
SUSPICIOUS_IMAGE_THRESHOLD = 0.25
def __init__(self):
self.images = []
def clear(self):
self.images.clear()
def quantize(self, arr_rgb):
bins = ImageManager.IMAGE_QUANTIZATION_BINS
arr = arr_rgb.mean(axis=2)
inds = np.digitize(arr, bins)
return bins[inds]
def diff_images(self, im1, im2):
diff_num = (im1 != im2).sum() / np.prod(im1.shape)
diff_pix = np.abs(im1 - im2).sum()
return diff_pix, diff_num
def next_image(self, new_image: np.ndarray):
self.images.append(self.quantize(new_image))
# np.save("{}.npy".format(datetime.now().strftime('%d-%m-%Y-%H-%M-%S')), new_image)
if len(self.images) > ImageManager.MAX_IMAGE_BUFFER:
self.images.pop(0)
def is_image_new(self, new_image: np.ndarray):
quantized_img = self.quantize(new_image)
if len(self.images) == 0:
return True
last_image = self.images[-1]
_, diff_num_pixels = self.diff_images(quantized_img, last_image)
return diff_num_pixels > ImageManager.NEW_IMAGE_THRESHOLD
def is_image_suspicious(self, new_image: np.ndarray):
quantized_img = self.quantize(new_image)
if len(self.images) == 0:
return False
last_image = self.images[-1]
_, diff_num_pixels = self.diff_images(quantized_img, last_image)
return diff_num_pixels > ImageManager.SUSPICIOUS_IMAGE_THRESHOLD
class CCTV:
def __init__(self):
self.camera = None
self._is_camera_open = False
self._image_manager = ImageManager()
self._open_camera()
def _open_camera(self):
if self._is_camera_open:
return
self.camera = PiCamera()
self._is_camera_open = True
def _close_camera(self):
if self._is_camera_open:
self.camera.close()
self._is_camera_open = False
def take_photo(self):
stream = io.BytesIO()
# with open('temp.jpg', 'rb') as f:
# data = f.read()
self.camera.capture(stream, format='jpeg')
stream.seek(0)
# data = io.BytesIO(data)
return stream.read(), np.asarray(Image.open(stream))
@staticmethod
def is_active_schedule():
schedules = get_schedules()
date_now = datetime.now()
for schedule in schedules:
week_ok = schedule.weekday == Schedules.WEEKDAYS[date_now.weekday()] or schedule.weekday == Schedules.All
time_ok = schedule.start_time <= date_now.time() <= schedule.end_time
if week_ok and time_ok:
return True
return False
def broadcast_photo(self, photo_data: bytes, photo_array: np.ndarray):
image_date = datetime.now()
if self._image_manager.is_image_new(photo_array): # Send it to storage server
server_uploader.send_image(image_data=photo_data, image_date=image_date)
if self._image_manager.is_image_suspicious(photo_array):
for chat_id in get_subscribers(): # Send it to subscribers
try:
send_image(chat_id=chat_id, image_data=photo_data, caption=image_date.strftime('%d/%m/%Y %H:%M:%S'))
except Exception as e:
logging.exception(e)
self._image_manager.next_image(photo_array)
def start_watching(self):
while True:
if self.is_active_schedule():
try:
self._open_camera()
photo_data, photo_array = self.take_photo()
self.broadcast_photo(photo_data=photo_data, photo_array=photo_array)
except Exception as e:
logging.error("Can't take image or broadcast it. Please check the error bellow.")
logging.exception(e)
else:
self._image_manager.clear()
self._close_camera()
time.sleep(2)
cctv = CCTV()
| [
"smahdavi4@gmail.com"
] | smahdavi4@gmail.com |
43df5f301b18273899ac00d28d4e1e587283aacb | 8dd7641c05bfeb6a9b0634575ea79589b412e680 | /Project_1/Old Code/sklearn.py | 8d8df757723b404c5bf6baf273e5926f5ace97ad | [] | no_license | Bobbycookie/Python_Class_Concordia | a1962a3ccff88a667fed79aec874c95dd6570b22 | dcb3fafbab6405c376bc65468ab617a908fb0a2d | refs/heads/master | 2020-08-30T13:54:20.263802 | 2019-11-18T14:24:52 | 2019-11-18T14:24:52 | 218,400,066 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 690 | py | from sklearn.datasets import load_wine
import ll_p2
wine = load_wine()
target = wine.target
target = target.tolist()
# add 1 to target to make it like the .data file
target = [x+1 for x in target]
data = wine.data
data = data.tolist()
# combine target as column in data and add heading column
[data[idx].insert(0, target[idx]) for idx, item in enumerate(target)]
output_list, headers = my_script.check_for_headers(data)
# convert to dictionary
list_of_col_names = output_list.pop(0)
output_dict = [[row[i] for row in output_list] for i in range(len(list_of_col_names))]
output_dict = {list_of_col_names[idx]: item for idx, item in enumerate(output_dict)}
my_script.pairsplot(output_dict)
| [
"35229441+Bobbycookie@users.noreply.github.com"
] | 35229441+Bobbycookie@users.noreply.github.com |
7c057affec94a126275eba7d408fa3d7a9d47d01 | 733dea504a1cea20d1320014e79acbef1920ac5a | /hip_Project/hip/admin.py | f8afeb5a85de845cbac08f7e8eb4aeb0cd8a4c11 | [] | no_license | sabinaya/Hip-as-Django-Application | a6d4dc99ec3bd209e09da9507e49a0d2bc96e236 | 5e847065cbb1cc5b678485cc4fb5ac3245259258 | refs/heads/master | 2021-01-10T04:35:06.191334 | 2015-05-26T06:37:58 | 2015-05-26T06:37:58 | 36,115,000 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 89 | py | from django.contrib import admin
from hip.models import board
admin.site.register(board) | [
"abinaya.saravanan1105@gmail.com"
] | abinaya.saravanan1105@gmail.com |
df71c458465bd721cdce4709887f33bc9815e9a6 | 42e7dc544d7d9a5712b48f687f72f7e4730e4e03 | /app/religion/service.py | 0771beb5b9ec77e2a91f4eab2db0607836bb873d | [] | no_license | philipphager/tuk2-teen-pregnancy-api | bd76dc7309aa31aaa31b8311b6eec5234d2a5670 | 4a49c8499921c5308bde3e8e58f6462ff75f9b22 | refs/heads/master | 2021-04-30T04:08:53.008228 | 2018-02-23T00:22:38 | 2018-02-23T00:22:38 | 121,530,541 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 431 | py | from app.database.cache import get_from_cache, add_to_cache
from app.database.database import execute_query
def get_religiousness_by_state():
query = get_query()
data = get_from_cache(query)
print("DB Query: " + query)
if data is None:
data = execute_query(query)
add_to_cache(query, data)
return data
def get_query():
return f'''
SELECT *
FROM "Religions2010"
'''
| [
"philipp.konstantin.hager@gmail.com"
] | philipp.konstantin.hager@gmail.com |
58ed9aadc5d1a6db2c5d8fb1b7a24e1067ab9743 | d9e43cd880f456254d5f640555079149f9d680c0 | /lie-detector2.py | b147de1d0bc975e25b130b92646e9b2b1c424a0f | [
"MIT"
] | permissive | mahimajeslani26/rnn-lie-detector | 1def32e26a5da29c9ad9bc0bd1949efea7b00966 | 0c4a2b12ac3feed7f14a0d88b84b66bdddd8c42f | refs/heads/master | 2020-03-19T00:02:19.913717 | 2018-06-02T13:30:04 | 2018-06-02T13:30:04 | 135,448,651 | 0 | 0 | null | 2018-05-30T13:42:32 | 2018-05-30T13:42:31 | null | UTF-8 | Python | false | false | 12,651 | py | import os
import fnmatch
import numpy as np
from sklearn.utils import shuffle
from sklearn.preprocessing import normalize
import tensorflow as tf
from tensorflow.contrib.rnn import BasicLSTMCell, BasicRNNCell, GRUCell
tf.flags.DEFINE_float("lr", 0.001, "learning rate")
tf.flags.DEFINE_integer('epochs', 5, 'number of epoch')
tf.flags.DEFINE_integer("hidden_size", 256, "hidden size for each layer")
tf.flags.DEFINE_integer('batch_size', 1, 'batch size')
tf.flags.DEFINE_integer('eval_every', 200,
'evaluation after number of train steps')
tf.flags.DEFINE_bool('normalize', False, 'normalize feature data')
tf.flags.DEFINE_float('dropout', 0.2, 'dropout rate')
tf.flags.DEFINE_string('model', 'GRU', 'RNN, GRU or LSTM')
tf.flags.DEFINE_string('data_dir', 'data', 'directory of original data files')
tf.flags.DEFINE_string('test_data_dir', 'test_data', 'directory of testing data files')
tf.flags.DEFINE_string('log_dir', 'tmp/runs/', 'directory to save log file')
tf.flags.DEFINE_bool('per_frame', True, 'RNN on per frame (row) data instead '
'of taking the whole MFCC vector ')
FLAGS = tf.app.flags.FLAGS
tf.logging.set_verbosity(tf.logging.INFO)
class Params(object):
""" hyper-parameters """
lr = FLAGS.lr
epochs = FLAGS.epochs
hidden_size = FLAGS.hidden_size
batch_size = FLAGS.batch_size
train_steps = 0
eval_steps = 0
eval_every = FLAGS.eval_every
normalize = FLAGS.normalize
dropout = FLAGS.dropout
model = FLAGS.model
data_dir = FLAGS.data_dir
test_data_dir = FLAGS.test_data_dir
log_dir = FLAGS.log_dir
num_classes = 3
feature_length = 13
max_length = 0
per_frame = FLAGS.per_frame
def generate_data(params):
""" Extract data and transcript from FLAGS.data_dir
Note: 0 indicate True, 1 indicate Lie Up, 2 indicate Lie Down for labels
"""
if not os.path.exists(params.data_dir):
print("Data directory %s not found" % params.data_dir)
exit()
features = []
labels = []
sequence_length = []
for subdir, dirs, files in os.walk(params.data_dir):
for speaker in dirs:
with open(os.path.join(
params.data_dir, speaker, 'transcripts.txt'), 'r') as f:
transcripts = f.readlines()
if not transcripts:
continue
files = sorted(fnmatch.filter(os.listdir(
os.path.join(params.data_dir, speaker)), '*npy'))
assert len(transcripts) == len(files)
for i in range(len(transcripts)):
# read MFCC vector from npy file
features.append(np.load(
os.path.join(FLAGS.data_dir, speaker, files[i])))
# read label from transcripts
label = transcripts[i].split()[1]
if label.startswith('T'):
labels.append(0)
elif label.startswith('LU'):
labels.append(1)
elif label.startswith('LD'):
labels.append(2)
else:
print("Incorrect label: %s" % label)
exit()
# add padding to create equal length MFCC vectors
params.max_length = max([feature.shape[0] for feature in features])
for i in range(len(features)):
# pad vectors
padding = params.max_length - features[i].shape[0]
sequence_length.append(features[i].shape[0])
features[i] = np.vstack(
(features[i], np.zeros(shape=(padding, params.feature_length))))
# convert to ndarray
features, labels = np.asarray(features), np.asarray(labels)
# normalize features
if params.normalize:
shape = features.shape
# normalize function only takes 2D matrix
features = np.reshape(features, newshape=(shape[0], shape[1] * shape[2]))
features = normalize(features, norm='l2')
features = np.reshape(features, newshape=shape)
assert features.shape[0] == labels.shape[0] == len(sequence_length)
# randomly shuffle data
features, labels, sequence_length = \
shuffle(features, labels, sequence_length, random_state=1)
return features, labels, sequence_length
def generate_test_data(params):
""" Extract data and transcript from FLAGS.test_data_dir
Note: 0 indicate True, 1 indicate Lie Up, 2 indicate Lie Down for labels
"""
if not os.path.exists(params.test_data_dir):
print("Test Data directory %s not found" % params.test_data_dir)
exit()
features = []
labels = []
sequence_length = []
for subdir, dirs, files in os.walk(params.test_data_dir):
for speaker in dirs:
with open(os.path.join(
params.test_data_dir, speaker, 'transcripts.txt'), 'r') as f:
transcripts = f.readlines()
if not transcripts:
continue
files = sorted(fnmatch.filter(os.listdir(
os.path.join(params.test_data_dir, speaker)), '*npy'))
assert len(transcripts) == len(files)
for i in range(len(transcripts)):
# read MFCC vector from npy file
features.append(np.load(
os.path.join(FLAGS.test_data_dir, speaker, files[i])))
# read label from transcripts
label = transcripts[i].split()[1]
if label.startswith('T'):
labels.append(0)
elif label.startswith('LU'):
labels.append(1)
elif label.startswith('LD'):
labels.append(2)
else:
print("Incorrect label: %s" % label)
exit()
# add padding to create equal length MFCC vectors
params.max_length = max([feature.shape[0] for feature in features])
for i in range(len(features)):
# pad vectors
padding = params.max_length - features[i].shape[0]
sequence_length.append(features[i].shape[0])
features[i] = np.vstack(
(features[i], np.zeros(shape=(padding, params.feature_length))))
# convert to ndarray
features, labels = np.asarray(features), np.asarray(labels)
# normalize features
if params.normalize:
shape = features.shape
# normalize function only takes 2D matrix
features = np.reshape(features, newshape=(shape[0], shape[1] * shape[2]))
features = normalize(features, norm='l2')
features = np.reshape(features, newshape=shape)
assert features.shape[0] == labels.shape[0] == len(sequence_length)
# randomly shuffle data
features, labels, sequence_length = \
shuffle(features, labels, sequence_length, random_state=1)
return features, labels, sequence_length
def metric_fn(labels, predictions):
""" Metric function for evaluations"""
return {'eval_accuracy': tf.metrics.accuracy(labels, predictions),
'eval_precision': tf.metrics.precision(labels, predictions),
'eval_recall': tf.metrics.recall(labels, predictions)}
def rnn(features, mode, params):
""" Recurrent model """
if params.model == "LSTM":
cell = BasicLSTMCell(params.hidden_size)
elif params.model == "GRU":
cell = GRUCell(params.hidden_size)
else:
cell = BasicRNNCell(params.hidden_size)
initial_state = cell.zero_state(params.batch_size, dtype=tf.float64)
if params.per_frame:
# convert input from (batch_size, max_time, ...) to
# (max_time, batch_size, ...)
inputs = tf.transpose(features['feature'], [1, 0, 2])
sequence_length = tf.reshape(
features['sequence_length'],
shape=(params.batch_size,)
)
outputs, state = tf.nn.dynamic_rnn(
cell,
inputs=inputs,
initial_state=initial_state,
sequence_length=sequence_length,
time_major=True
)
# get output from the last state
outputs = outputs[features['sequence_length'][0] - 1]
else:
# reshape MFCC vector to fit in one time step
inputs = tf.reshape(
features['feature'],
shape=(1, params.batch_size, params.max_length * params.feature_length)
)
outputs, state = tf.nn.dynamic_rnn(
cell,
inputs=inputs,
initial_state=initial_state,
time_major=True
)
outputs = tf.reshape(
outputs,
shape=(params.batch_size, params.hidden_size)
)
# apply dropout
dropout = tf.layers.dropout(
outputs,
rate=params.dropout,
training=mode == tf.estimator.ModeKeys.TRAIN
)
logits = tf.layers.dense(
dropout,
units=params.num_classes,
activation=None
)
return logits
def model_fn(features, labels, mode, params):
""" Estimator model function"""
logits = rnn(features, mode, params)
predictions = tf.argmax(tf.nn.softmax(logits), axis=-1)
predictions = tf.Print(predictions, [predictions])
loss = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels,
logits=logits
)
)
loss = tf.Print(loss, [loss])
train_op = tf.train.AdamOptimizer(params.lr).minimize(
loss=loss,
global_step=tf.train.get_global_step()
)
# metrics summary
tf.summary.text('prediction', tf.as_string(predictions))
tf.summary.text('label', tf.as_string(labels))
accuracy = tf.metrics.accuracy(labels, predictions)
tf.summary.scalar('training_accuracy', accuracy[1])
precision = tf.metrics.precision(labels, predictions)
tf.summary.scalar('training_precision', precision[1])
recall = tf.metrics.recall(labels, predictions)
tf.summary.scalar('training_recall', recall[1])
if mode == tf.estimator.ModeKeys.EVAL:
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions,
loss=loss,
eval_metric_ops=metric_fn(labels, predictions)
)
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions,
loss=loss,
train_op=train_op
)
def main():
# initialize model parameters
params = Params()
# check if log directory exist
if not os.path.exists(params.log_dir):
os.makedirs(params.log_dir)
features, labels, sequence_length = generate_data(params)
test_features, test_labels, test_sequence_length = generate_test_data(params)
# index of training and testing data split
# split1 = int(len(labels) * 0.8)
# split2 = int(len(labels) * 0.1)
# calculate the amount of train and test steps
params.train_steps = int(len(labels) / params.batch_size) * params.epochs
params.eval_steps = int((len(test_labels)) / params.batch_size)
# params.predict_steps = int(split2 / params.batch_size)
print(params.train_steps)
print(params.eval_steps)
def train_input_fn(params):
dataset = tf.data.Dataset.from_tensor_slices((
{
'feature': features[:],
'sequence_length': sequence_length[:]
},
labels[:]
))
dataset = dataset.repeat().batch(params.batch_size)
x, y = dataset.make_one_shot_iterator().get_next()
return x, y
def eval_input_fn(params):
dataset = tf.data.Dataset.from_tensor_slices((
{
'feature': test_features[:],
'sequence_length': test_sequence_length[:]
},
test_labels[:]
))
dataset = dataset.batch(params.batch_size)
x, y = dataset.make_one_shot_iterator().get_next()
return x, y
# setup Estimator configuration
config = tf.estimator.RunConfig(
save_checkpoints_steps=params.eval_every
)
# define Estimator class for model
estimator = tf.estimator.Estimator(
model_fn=model_fn,
model_dir=params.log_dir,
config=config,
params=params
)
train_spec = tf.estimator.TrainSpec(
input_fn=train_input_fn,
max_steps=params.train_steps
)
eval_spec = tf.estimator.EvalSpec(
input_fn=eval_input_fn,
steps=params.eval_steps
)
# train and evaluate model
tf.estimator.train_and_evaluate(
estimator=estimator,
train_spec=train_spec,
eval_spec=eval_spec
)
if __name__ == "__main__":
main()
| [
"mahimajeslani26@gmail.com"
] | mahimajeslani26@gmail.com |
e3c4271ba3bc0635994d4d7e72b837d7e636eeea | d298f70abb0a83ef1db858b97dc648320250a8a0 | /mainScreen.py | de6b7027e05ab2d87f0ac9370952a718cac9045a | [] | no_license | lunhan/eeg-game | fdcdb7be9eda1c8a52a794c0bcbffea523040040 | d0d2cb2de31bb02aec7769d93686941b47653ec3 | refs/heads/main | 2023-09-01T04:52:28.372818 | 2021-09-28T10:14:08 | 2021-09-28T10:14:08 | 410,726,448 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,007 | py | import pygame
import pygame.freetype
from pygame.sprite import Sprite
from pygame.rect import Rect
from enum import Enum
#For the other class, write it into an individiual file and import like this
from UIElement import UIElement
img = pygame.image.load("img/nosignal_v1.png") #import img
img = pygame.transform.scale(img, (60,60))
main_img = pygame.image.load('img/gameMain.png')
main_img = pygame.transform.scale(main_img, (260, 260))
logo = pygame.image.load("img/logo.png")
logo = pygame.transform.scale(logo, (32, 32))
rule = pygame.image.load("img/rule.png")
rule = pygame.transform.scale(rule, (633, 513))
#color we gonn are reuse
BLUE = (106, 159, 181)
BG_1 = (83, 228, 179) #Mint Green
TXT_1 = (0, 0, 0)#Black
PINK = (234, 208, 209)
WHITE = (255, 255, 255)
# The function below could write into the class
# def create_surface_with_text(text, font_size, text_rgb, bg_rgb):
# The class below could write into an independent file
# class UIElement(Sprite):
class GameState(Enum):
QUIT = -1
TITLE = 0 #main page
NEWGAME = 1
FINISH = 2
INFO = 3
def main():
pygame.init()
'''change: the size of main window and elememt position'''
screen = pygame.display.set_mode((1000, 800)) #windows_size
game_state = GameState.TITLE #start with main(title) screen
#set app name on top bar
pygame.display.set_caption("EEG-Game")
#set icon:
pygame.display.set_icon(logo)
# create a ui element
# quit_btn = UIElement(
# center_position=(500, 650), #make sure our element will be center//The center right justfy 500 and top justify 700
# font_size=30,
# bg_rgb=BLUE,
# text_rgb=WHITE,
# text="Quit",
# action=GameState.QUIT,
# )
# main loop
while True:
#The function below has move to independent def refere to different pages
'''
mouse_up = False
for event in pygame.event.get():
if event.type == pygame.MOUSEBUTTONUP and event.button == 1:
mouse_up = True
screen.fill(BLUE)
ui_action = quit_btn.update(pygame.mouse.get_pos(), mouse_up)
if ui_action is not None:
return
quit_btn.draw(screen)
pygame.display.flip()
'''
if game_state == GameState.TITLE:
game_state = title_screen(screen)
if game_state == GameState.NEWGAME:
game_state = play_level(screen)
if game_state == GameState.FINISH:
game_state = game_finish(screen)
if game_state == GameState.INFO:
game_state = game_info(screen)
if game_state == GameState.QUIT:
pygame.quit()
return
def title_screen(screen): #to have our button, check main loop
#start and quit button will be here
start_btn = UIElement(
center_position=(500, 470),
font_size=40,
bg_rgb=BG_1,
text_rgb=TXT_1,
text="Start",
action=GameState.NEWGAME,
)
quit_btn = UIElement(
center_position=(500, 670),
font_size=30,
bg_rgb=BG_1,
text_rgb=TXT_1,
text="Quit",
action=GameState.QUIT,
)
info_btn = UIElement(
center_position=(500, 570),
font_size=30,
bg_rgb=BG_1,
text_rgb=TXT_1,
text="How to play",
action=GameState.INFO,
)
buttons = [start_btn, info_btn, quit_btn]
while True:
mouse_up = False
for event in pygame.event.get():
if event.type == pygame.MOUSEBUTTONUP and event.button == 1:
mouse_up = True
screen.fill(BG_1)#re-draw the background
screen.blit(img, (930, 10))
screen.blit(main_img, (370, 100))
for button in buttons:
ui_action = button.update(pygame.mouse.get_pos(), mouse_up)
if ui_action is not None:
return ui_action
button.draw(screen)
pygame.display.flip()
def play_level(screen):
finsih_btn = UIElement(
center_position=(140, 770),
font_size=20,
bg_rgb=BLUE,
text_rgb=TXT_1,
text="Game finished",
action=GameState.FINISH,
)
while True:
mouse_up = False
for event in pygame.event.get():
if event.type == pygame.MOUSEBUTTONUP and event.button == 1:
mouse_up = True
screen.fill(BLUE)
ui_action = finsih_btn.update(pygame.mouse.get_pos(), mouse_up)
if ui_action is not None:
return ui_action
finsih_btn.draw(screen)
pygame.display.flip()
def game_finish(screen):
return_btn = UIElement(
center_position=(500, 500),
font_size=20,
bg_rgb=BLUE,
text_rgb=WHITE,
text="Game Over, Back to main page",
action=GameState.TITLE,
)
while True:
mouse_up = False
for event in pygame.event.get():
if event.type == pygame.MOUSEBUTTONUP and event.button == 1:
mouse_up = True
screen.fill(BLUE)
ui_action = return_btn.update(pygame.mouse.get_pos(), mouse_up)
if ui_action is not None:
return ui_action
return_btn.draw(screen)
pygame.display.flip()
def game_info(screen):
info_btn = UIElement(
center_position=(500, 700),
font_size=30,
bg_rgb=PINK,
text_rgb=TXT_1,
text="Back to main",
action=GameState.TITLE,
)
while True:
mouse_up = False
for event in pygame.event.get():
if event.type == pygame.MOUSEBUTTONUP and event.button == 1:
mouse_up = True
screen.fill(PINK)
ui_action = info_btn.update(pygame.mouse.get_pos(), mouse_up)
if ui_action is not None:
return ui_action
info_btn.draw(screen)
screen.blit(rule, (193, 80))
pygame.display.flip()
# call main when the script is run
if __name__ == "__main__":
main() | [
"mlunhan97@gmail.com"
] | mlunhan97@gmail.com |
db0620b89039a20cdd6ab7feef05be0ce277ccd0 | f1f240d94600a6d267c30235ef9ad7a3efc488fb | /GosProgect/wsgi.py | 6a993185f74774c0ee1d32e66113db55465de74e | [] | no_license | JolyBin/EAC | 3cc576690e82ec876f08cffb09e9dcee34e24593 | 03d6b5e4e7beb52d7757464151d5a961118fbb71 | refs/heads/master | 2020-06-19T16:22:24.161033 | 2019-07-14T04:11:53 | 2019-07-14T04:11:53 | 196,781,816 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 397 | py | """
WSGI config for GosProgect project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'GosProgect.settings')
application = get_wsgi_application()
| [
"vladavenir@mail.ru"
] | vladavenir@mail.ru |
a0e0bfbddd2d9003785d592b78d9b8475e63b70c | 097eae4e0190da97570ae7db748fca306f977fbd | /py/learn/test/class/example.py | f8642e40064bba601cac875200d08370551f363f | [] | no_license | jiaolj/other | 42257c593495d97ab98b9a9af00d3791ccce7a57 | 78d0366cbd599f4dde7bf6e44ca4cfc373132418 | refs/heads/master | 2021-05-24T04:14:03.829126 | 2016-08-28T07:40:49 | 2016-08-28T07:40:49 | 64,064,262 | 0 | 1 | null | 2020-07-23T17:05:36 | 2016-07-24T12:25:56 | JavaScript | UTF-8 | Python | false | false | 358 | py | # -*- coding: utf-8 -*-
class b(object):
def __init__(self):
self.t=2
def getb(self):
self.t+=1
class a(b):
#----如果不声明init函数,会继承基类init属性。声明init是为了加一些自定义属性
def __init__(self):
b.__init__(self)
def get(self):
print 1
temp=a()
temp.getb()
print temp.t | [
"841232468@qq.com"
] | 841232468@qq.com |
f3d109ee8baa41ca18eaa3f3d511d490209b0c12 | 0619b1ba176456c4b62d78d6a72fc4d9a9084287 | /thesite/communication_app/forms.py | 4eabf764b1d1037b42e5497319e87205eb1f6f36 | [
"Apache-2.0"
] | permissive | jacinda/petwitter | c13dd43a5b76786f5d5c5c3f29420153cb5a16c7 | ea7ffa16b8d8b1207f04ace619b31dba4efc45bc | refs/heads/master | 2021-01-13T06:38:31.439749 | 2015-04-15T17:25:03 | 2015-04-15T17:25:03 | 33,678,730 | 0 | 0 | null | 2015-04-09T16:02:42 | 2015-04-09T16:02:40 | Python | UTF-8 | Python | false | false | 699 | py | from django import forms
import communication_app.models
class PetForm(forms.ModelForm):
class Meta:
model = communication_app.models.Pet
fields = ['name']
def __init__(self, *args, **kwargs):
super(PetForm, self).__init__(*args, **kwargs)
self.fields['name'].widget = forms.TextInput(attrs={
'class': 'form-control'})
class UpdateForm(forms.ModelForm):
class Meta:
model = communication_app.models.Update
fields = ['text']
def __init__(self, *args, **kwargs):
super(UpdateForm, self).__init__(*args, **kwargs)
self.fields['text'].widget = forms.TextInput(attrs={
'class': 'form-control'})
| [
"asheesh@asheesh.org"
] | asheesh@asheesh.org |
e16794ecbbdb589fe47bb9e06f800086e8447133 | 05e6440fe5c9e5f84d6af6dd577adda823d65b54 | /sktime/transformations/series/acf.py | 52609eecd0f20dd735a1c255c71001cc1e4c3219 | [
"BSD-3-Clause"
] | permissive | astrojuanlu/sktime | f3e7e0c52b5013dbb7dfb39ca0f7a8744a96a4e7 | 9ca7587bedc861b039a3d9a822ed7ad03ba935ad | refs/heads/main | 2023-06-28T13:22:48.161020 | 2021-07-17T08:35:36 | 2021-07-17T08:35:36 | 386,907,232 | 1 | 0 | BSD-3-Clause | 2021-07-17T10:28:49 | 2021-07-17T10:28:49 | null | UTF-8 | Python | false | false | 4,316 | py | #!/usr/bin/env python3 -u
# -*- coding: utf-8 -*-
"""
Auto-correlation transformations.
Module :mod:`sktime.transformations.series` implements auto-correlation
transformers.
"""
__author__ = ["Afzal Ansari"]
__all__ = ["AutoCorrelationTransformer", "PartialAutoCorrelationTransformer"]
import pandas as pd
from statsmodels.tsa.stattools import acf
from statsmodels.tsa.stattools import pacf
from sktime.transformations.base import _SeriesToSeriesTransformer
from sktime.utils.validation.series import check_series
class AutoCorrelationTransformer(_SeriesToSeriesTransformer):
"""
Auto-correlation transformer.
Example
-------
>>> from sktime.transformations.series.acf import PartialAutoCorrelationTransformer
>>> from sklearn.preprocessing import MinMaxScaler
>>> from sktime.datasets import load_airline
>>> y = load_airline()
>>> transformer = AutoCorrelationTransformer(n_lags=12)
>>> y_hat = transformer.fit_transform(y)
"""
_tags = {"univariate-only": True, "fit-in-transform": True}
def __init__(
self,
adjusted=False,
n_lags=None,
qstat=False,
fft=False,
missing="none",
):
self.adjusted = adjusted
self.n_lags = n_lags
self.qstat = qstat
self.fft = fft
self.missing = missing
super(AutoCorrelationTransformer, self).__init__()
def transform(self, Z, X=None):
"""Transform data.
Parameters
----------
Z : pd.Series
Series to transform
X : pd.DataFrame, optional (default=None)
Exogenous data used in transformation
Returns
-------
Zt : pd.Series
Transformed series
"""
self.check_is_fitted()
z = check_series(Z, enforce_univariate=True)
# Passing an alpha values other than None would return confidence intervals
# and break the signature of the series-to-series transformer
zt = acf(
z,
adjusted=self.adjusted,
nlags=self.n_lags,
qstat=self.qstat,
fft=self.fft,
alpha=None,
missing=self.missing,
)
return pd.Series(zt)
class PartialAutoCorrelationTransformer(_SeriesToSeriesTransformer):
"""
Partial auto-correlation transformer.
Parameters
----------
n_lags : int, optional (default=None)
largest lag for which pacf is returned
method : str {'ywadjusted', 'ywmle', 'ols'}
specifies which method for the calculations to use:
- yw or ywadjusted : yule walker with bias correction in denominator
for acovf. Default.
- ywm or ywmle : yule walker without bias correction
- ols - regression of time series on lags of it and on constant
- ld or ldunbiased : Levinson-Durbin recursion with bias correction
- ldb or ldbiased : Levinson-Durbin recursion without bias correction
Example
-------
>>> from sktime.transformations.series.acf import AutoCorrelationTransformer
>>> from sklearn.preprocessing import MinMaxScaler
>>> from sktime.datasets import load_airline
>>> y = load_airline()
>>> transformer = AutoCorrelationTransformer(n_lags=12)
>>> y_hat = transformer.fit_transform(y)
"""
_tags = {"univariate-only": True, "fit-in-transform": True}
def __init__(
self,
n_lags=None,
method="ywadjusted",
):
self.n_lags = n_lags
self.method = method
super(PartialAutoCorrelationTransformer, self).__init__()
def transform(self, Z, X=None):
"""Transform data.
Parameters
----------
Z : pd.Series
Series to transform
X : pd.DataFrame, optional (default=None)
Exogenous data used in transformation
Returns
-------
Zt : pd.Series
Transformed series
"""
self.check_is_fitted()
z = check_series(Z, enforce_univariate=True)
# Passing an alpha values other than None would return confidence intervals
# and break the signature of the series-to-series transformer
zt = pacf(z, nlags=self.n_lags, method=self.method, alpha=None)
return pd.Series(zt)
| [
"noreply@github.com"
] | noreply@github.com |
55bf5b769ce8bafe053fe39564ed13cc2e3360c2 | 974c5a4f101d0e6f4dfa5fc2f7c641c9d2bd8184 | /sdk/ml/azure-ai-ml/azure/ai/ml/_restclient/v2022_10_01_preview/operations/_component_containers_operations.py | 53f9c4e0361d8779eb7f2a89e6c4f5d84f60bf39 | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | gaoyp830/azure-sdk-for-python | 4816f04c554dcffb7510a6b7044b0c86a2dd32e1 | 1c66defa502b754abcc9e5afa444ca03c609342f | refs/heads/master | 2022-10-20T21:33:44.281041 | 2022-09-29T17:03:13 | 2022-09-29T17:03:13 | 250,355,505 | 0 | 0 | MIT | 2020-03-26T19:42:13 | 2020-03-26T19:42:12 | null | UTF-8 | Python | false | false | 22,292 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
# fmt: off
def build_list_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
api_version = kwargs.pop('api_version', "2022-10-01-preview") # type: str
skip = kwargs.pop('skip', None) # type: Optional[str]
list_view_type = kwargs.pop('list_view_type', None) # type: Optional[Union[str, "_models.ListViewType"]]
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/components')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
if skip is not None:
query_parameters['$skip'] = _SERIALIZER.query("skip", skip, 'str')
if list_view_type is not None:
query_parameters['listViewType'] = _SERIALIZER.query("list_view_type", list_view_type, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_delete_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
api_version = kwargs.pop('api_version', "2022-10-01-preview") # type: str
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/components/{name}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"name": _SERIALIZER.url("name", name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="DELETE",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
api_version = kwargs.pop('api_version', "2022-10-01-preview") # type: str
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/components/{name}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"name": _SERIALIZER.url("name", name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_create_or_update_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
api_version = kwargs.pop('api_version', "2022-10-01-preview") # type: str
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/components/{name}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"name": _SERIALIZER.url("name", name, 'str', pattern=r'^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,254}$'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
# fmt: on
class ComponentContainersOperations(object):
"""ComponentContainersOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.machinelearningservices.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list(
self,
resource_group_name, # type: str
workspace_name, # type: str
skip=None, # type: Optional[str]
list_view_type=None, # type: Optional[Union[str, "_models.ListViewType"]]
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ComponentContainerResourceArmPaginatedResult"]
"""List component containers.
List component containers.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: Name of Azure Machine Learning workspace.
:type workspace_name: str
:param skip: Continuation token for pagination.
:type skip: str
:param list_view_type: View type for including/excluding (for example) archived entities.
:type list_view_type: str or ~azure.mgmt.machinelearningservices.models.ListViewType
:keyword api_version: Api Version. The default value is "2022-10-01-preview". Note that
overriding this default value may result in unsupported behavior.
:paramtype api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ComponentContainerResourceArmPaginatedResult or
the result of cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.machinelearningservices.models.ComponentContainerResourceArmPaginatedResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2022-10-01-preview") # type: str
cls = kwargs.pop('cls', None) # type: ClsType["_models.ComponentContainerResourceArmPaginatedResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
api_version=api_version,
skip=skip,
list_view_type=list_view_type,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
api_version=api_version,
skip=skip,
list_view_type=list_view_type,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("ComponentContainerResourceArmPaginatedResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/components'} # type: ignore
@distributed_trace
def delete(
self,
resource_group_name, # type: str
workspace_name, # type: str
name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
"""Delete container.
Delete container.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: Name of Azure Machine Learning workspace.
:type workspace_name: str
:param name: Container name.
:type name: str
:keyword api_version: Api Version. The default value is "2022-10-01-preview". Note that
overriding this default value may result in unsupported behavior.
:paramtype api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2022-10-01-preview") # type: str
request = build_delete_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
api_version=api_version,
template_url=self.delete.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/components/{name}'} # type: ignore
@distributed_trace
def get(
self,
resource_group_name, # type: str
workspace_name, # type: str
name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ComponentContainer"
"""Get container.
Get container.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: Name of Azure Machine Learning workspace.
:type workspace_name: str
:param name: Container name.
:type name: str
:keyword api_version: Api Version. The default value is "2022-10-01-preview". Note that
overriding this default value may result in unsupported behavior.
:paramtype api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ComponentContainer, or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.ComponentContainer
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ComponentContainer"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2022-10-01-preview") # type: str
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
api_version=api_version,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('ComponentContainer', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/components/{name}'} # type: ignore
@distributed_trace
def create_or_update(
self,
resource_group_name, # type: str
workspace_name, # type: str
name, # type: str
body, # type: "_models.ComponentContainer"
**kwargs # type: Any
):
# type: (...) -> "_models.ComponentContainer"
"""Create or update container.
Create or update container.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: Name of Azure Machine Learning workspace.
:type workspace_name: str
:param name: Container name.
:type name: str
:param body: Container entity to create or update.
:type body: ~azure.mgmt.machinelearningservices.models.ComponentContainer
:keyword api_version: Api Version. The default value is "2022-10-01-preview". Note that
overriding this default value may result in unsupported behavior.
:paramtype api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ComponentContainer, or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.ComponentContainer
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ComponentContainer"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2022-10-01-preview") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(body, 'ComponentContainer')
request = build_create_or_update_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self.create_or_update.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ComponentContainer', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ComponentContainer', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/components/{name}'} # type: ignore
| [
"noreply@github.com"
] | noreply@github.com |
10b0d6c77a5a22b76ba2d6593ccd3657539ce9fd | 4a36b5979b0753b32cff3956fd97fb8ed8b11e84 | /1.0/_downloads/469209d8040c0923f6b4f925074d58d7/evoked_topomap.py | f677e3d7f02abfe8f6f3546a99379b408253479f | [] | permissive | mne-tools/mne-tools.github.io | 8aac7ae10bf2faeeb875b9a351a5530dc0e53154 | 495e878adc1ef3374e3db88604504d7542b01194 | refs/heads/main | 2023-09-03T07:06:00.660557 | 2023-09-03T04:10:18 | 2023-09-03T04:10:18 | 35,639,371 | 12 | 16 | BSD-3-Clause | 2023-05-05T19:04:32 | 2015-05-14T22:04:23 | HTML | UTF-8 | Python | false | false | 5,921 | py | # -*- coding: utf-8 -*-
"""
.. _ex-evoked-topomap:
========================================
Plotting topographic maps of evoked data
========================================
Load evoked data and plot topomaps for selected time points using multiple
additional options.
"""
# Authors: Christian Brodbeck <christianbrodbeck@nyu.edu>
# Tal Linzen <linzen@nyu.edu>
# Denis A. Engeman <denis.engemann@gmail.com>
# Mikołaj Magnuski <mmagnuski@swps.edu.pl>
# Eric Larson <larson.eric.d@gmail.com>
#
# License: BSD-3-Clause
# %%
# sphinx_gallery_thumbnail_number = 5
import numpy as np
import matplotlib.pyplot as plt
from mne.datasets import sample
from mne import read_evokeds
print(__doc__)
path = sample.data_path()
fname = path / 'MEG' / 'sample' / 'sample_audvis-ave.fif'
# load evoked corresponding to a specific condition
# from the fif file and subtract baseline
condition = 'Left Auditory'
evoked = read_evokeds(fname, condition=condition, baseline=(None, 0))
# %%
# Basic :func:`~mne.viz.plot_topomap` options
# -------------------------------------------
#
# We plot evoked topographies using :func:`mne.Evoked.plot_topomap`. The first
# argument, ``times`` allows to specify time instants (in seconds!) for which
# topographies will be shown. We select timepoints from 50 to 150 ms with a
# step of 20ms and plot magnetometer data:
times = np.arange(0.05, 0.151, 0.02)
evoked.plot_topomap(times, ch_type='mag', time_unit='s')
# %%
# If times is set to None at most 10 regularly spaced topographies will be
# shown:
evoked.plot_topomap(ch_type='mag', time_unit='s')
# %%
# We can use ``nrows`` and ``ncols`` parameter to create multiline plots
# with more timepoints.
all_times = np.arange(-0.2, 0.5, 0.03)
evoked.plot_topomap(all_times, ch_type='mag', time_unit='s',
ncols=8, nrows='auto')
# %%
# Instead of showing topographies at specific time points we can compute
# averages of 50 ms bins centered on these time points to reduce the noise in
# the topographies:
evoked.plot_topomap(times, ch_type='mag', average=0.05, time_unit='s')
# %%
# We can plot gradiometer data (plots the RMS for each pair of gradiometers)
evoked.plot_topomap(times, ch_type='grad', time_unit='s')
# %%
# Additional :func:`~mne.viz.plot_topomap` options
# ------------------------------------------------
#
# We can also use a range of various :func:`mne.viz.plot_topomap` arguments
# that control how the topography is drawn. For example:
#
# * ``cmap`` - to specify the color map
# * ``res`` - to control the resolution of the topographies (lower resolution
# means faster plotting)
# * ``outlines='skirt'`` to see the topography stretched beyond the head circle
# * ``contours`` to define how many contour lines should be plotted
evoked.plot_topomap(times, ch_type='mag', cmap='Spectral_r', res=32,
outlines='skirt', contours=4, time_unit='s')
# %%
# If you look at the edges of the head circle of a single topomap you'll see
# the effect of extrapolation. There are three extrapolation modes:
#
# - ``extrapolate='local'`` extrapolates only to points close to the sensors.
# - ``extrapolate='head'`` extrapolates out to the head circle.
# - ``extrapolate='box'`` extrapolates to a large box stretching beyond the
# head circle.
#
# The default value ``extrapolate='auto'`` will use ``'local'`` for MEG sensors
# and ``'head'`` otherwise. Here we show each option:
extrapolations = ['local', 'head', 'box']
fig, axes = plt.subplots(figsize=(7.5, 4.5), nrows=2, ncols=3)
# Here we look at EEG channels, and use a custom head sphere to get all the
# sensors to be well within the drawn head surface
for axes_row, ch_type in zip(axes, ('mag', 'eeg')):
for ax, extr in zip(axes_row, extrapolations):
evoked.plot_topomap(0.1, ch_type=ch_type, size=2, extrapolate=extr,
axes=ax, show=False, colorbar=False,
sphere=(0., 0., 0., 0.09))
ax.set_title('%s %s' % (ch_type.upper(), extr), fontsize=14)
fig.tight_layout()
# %%
# More advanced usage
# -------------------
#
# Now we plot magnetometer data as topomap at a single time point: 100 ms
# post-stimulus, add channel labels, title and adjust plot margins:
evoked.plot_topomap(0.1, ch_type='mag', show_names=True, colorbar=False,
size=6, res=128, title='Auditory response',
time_unit='s')
plt.subplots_adjust(left=0.01, right=0.99, bottom=0.01, top=0.88)
# %%
# We can also highlight specific channels by adding a mask, to e.g. mark
# channels exceeding a threshold at a given time:
# Define a threshold and create the mask
mask = evoked.data > 1e-13
# Select times and plot
times = (0.09, 0.1, 0.11)
evoked.plot_topomap(times, ch_type='mag', time_unit='s', mask=mask,
mask_params=dict(markersize=10, markerfacecolor='y'))
# %%
# Or by manually picking the channels to highlight at different times:
times = (0.09, 0.1, 0.11)
_times = ((np.abs(evoked.times - t)).argmin() for t in times)
significant_channels = [
('MEG 0231', 'MEG 1611', 'MEG 1621', 'MEG 1631', 'MEG 1811'),
('MEG 2411', 'MEG 2421'),
('MEG 1621')]
_channels = [np.in1d(evoked.ch_names, ch) for ch in significant_channels]
mask = np.zeros(evoked.data.shape, dtype='bool')
for _chs, _time in zip(_channels, _times):
mask[_chs, _time] = True
evoked.plot_topomap(times, ch_type='mag', time_unit='s', mask=mask,
mask_params=dict(markersize=10, markerfacecolor='y'))
# %%
# Animating the topomap
# ---------------------
#
# Instead of using a still image we can plot magnetometer data as an animation,
# which animates properly only in matplotlib interactive mode.
# sphinx_gallery_thumbnail_number = 9
times = np.arange(0.05, 0.151, 0.01)
fig, anim = evoked.animate_topomap(
times=times, ch_type='mag', frame_rate=2, time_unit='s', blit=False)
| [
"dan@mccloy.info"
] | dan@mccloy.info |
698bd901f3f50bfff119ca3f858802997929a2c9 | 9cd922d37c992379962885da86f0f0c43ef47a5a | /test/test_board_filler.py | e041cf1393781801fd13249a2a58447fbf3063ad | [] | no_license | bbattino/chess-engine | d7ef975dcf7b40b37def6a6d92a422153733f8fc | 9975d28d2ebeb6ad517710a3a17b00dddb96fc86 | refs/heads/master | 2020-04-09T15:17:41.199328 | 2018-12-14T07:51:33 | 2018-12-15T23:29:49 | 160,421,457 | 0 | 0 | null | 2018-12-11T17:18:13 | 2018-12-04T21:32:01 | Python | UTF-8 | Python | false | false | 2,003 | py | from unittest import TestCase
from tools.board_filler import BoardFiller
from pieces import Bishop, King, Knight, Pawn, Queen, Rook
from config import WHITE, BLACK
class TestBoardFiller(TestCase):
def test_fill_from_text(self):
board_filler = BoardFiller()
board = board_filler.fill(text='rnbqk.../p......../......../......../......../......../P......./RNBQK...')
self.assert_right_piece(board, 0, 0, Rook, WHITE)
self.assert_right_piece(board, 1, 0, Knight, WHITE)
self.assert_right_piece(board, 2, 0, Bishop, WHITE)
self.assert_right_piece(board, 3, 0, Queen, WHITE)
self.assert_right_piece(board, 4, 0, King, WHITE)
self.assert_right_piece(board, 0, 1, Pawn, WHITE)
self.assert_right_piece(board, 0, 7, Rook, BLACK)
self.assert_right_piece(board, 1, 7, Knight, BLACK)
self.assert_right_piece(board, 2, 7, Bishop, BLACK)
self.assert_right_piece(board, 3, 7, Queen, BLACK)
self.assert_right_piece(board, 4, 7, King, BLACK)
self.assert_right_piece(board, 0, 6, Pawn, BLACK)
def test_invalid_board(self):
with self.assertRaises(Exception) as context:
board_filler = BoardFiller()
board_filler.fill(text='r')
self.assertTrue(hasattr(context, 'exception'))
with self.assertRaises(Exception) as context:
board_filler = BoardFiller()
board_filler.fill(text='Xnbqk.../p......../......../......../......../......../P......./RNBQK...')
self.assertTrue(hasattr(context, 'exception'))
def assert_right_piece(self, board, x, y, class_, color):
piece = board[x][y]
self.assertTrue(piece, msg='the board is empty in position x={} y={}'.format(x, y))
self.assertTrue(
isinstance(piece, class_),
msg='the piece in position x={} y={} must be an instance of {}. {} found'.format(x, y, class_, type(piece))
)
self.assertEqual(piece.color, color)
| [
"benjibattino@gmail.com"
] | benjibattino@gmail.com |
7e1029ad59d5a3c4e3e7636aa5802f22953086cd | e15d63ccde04e7458bff5af1bdad63a5c699b489 | /example/Transformer_vision/2dpose/vit/multi_branch/config.py | 5582a68fa5d82c8142ce319cab34a1901077d3e7 | [
"WTFPL"
] | permissive | ddddwee1/TorchSUL | 775b6a2b1e4ab7aac25a3f0411de83affc257af5 | 6c7cd41b14fc8b746983e8b981d1ba4d08370ca2 | refs/heads/master | 2023-08-21T15:21:24.131718 | 2023-08-18T09:37:56 | 2023-08-18T09:37:56 | 227,628,298 | 13 | 1 | null | null | null | null | UTF-8 | Python | false | false | 657 | py | import numpy as np
# size
inp_size = 224
out_size = 56
base_sigma = 2.5
num_pts = 17
pairs = [[0,1], [1,2],[2,3], [0,4], [4,5],[5,6], [0,7],[7,8],[8,9],[9,10], [8,11],[11,12],[12,13],[8,14],[14,15],[15,16]]
# augmentation
rotation = 0
min_scale = 1 # this controls largest size
max_scale = 1 # this controls smallest sise
max_translate = 0
blur_prob = 0.0
blur_size = [7, 11, 15, 21]
blur_type = ['vertical','horizontal','mean']
# training
data_root = '/data/pose/mpii/images/'
max_epoch = 300
init_lr = 0.0005
decay = 0.0001
momentum = 0.9
lr_epoch = [150,250]
save_interval = 1
# extra
distributed = True
scale_var = 19.2
angle_var = np.pi
| [
"cy960823@outlook.com"
] | cy960823@outlook.com |
210f27376ef8118f72d9360119b3fc0f752cc8b3 | 6c82b4bfac34ddefd8ee65066827418b9a13a60b | /service_note/migrations/0012_auto_20191021_2307.py | 0977d9e32d41630c03ae1cd5057ba5b4d2121ce6 | [] | no_license | yegorkowalew/memo | 467956cab57bb36089077f28471508d9ff8b7688 | 305536cf40a3447e04aff3899e73824e5e346cfd | refs/heads/master | 2022-07-18T20:50:07.214745 | 2019-11-07T14:52:30 | 2019-11-07T14:52:30 | 218,546,734 | 0 | 0 | null | 2022-06-21T23:18:12 | 2019-10-30T14:29:17 | JavaScript | UTF-8 | Python | false | false | 1,341 | py | # Generated by Django 2.2.6 on 2019-10-21 20:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('service_note', '0011_auto_20191021_2238'),
]
operations = [
migrations.AlterField(
model_name='waybill',
name='coefficient',
field=models.FloatField(blank=True, null=True, verbose_name='Коэффициент'),
),
migrations.AlterField(
model_name='waybill',
name='nomenclature',
field=models.CharField(blank=True, max_length=255, null=True, verbose_name='Номенклатура'),
),
migrations.AlterField(
model_name='waybill',
name='shop_recipient',
field=models.CharField(blank=True, max_length=255, null=True, verbose_name='ЦехПолучатель'),
),
migrations.AlterField(
model_name='waybill',
name='stock',
field=models.CharField(blank=True, max_length=255, null=True, verbose_name='Склад'),
),
migrations.AlterField(
model_name='waybill',
name='warehouse_recipient',
field=models.CharField(blank=True, max_length=255, null=True, verbose_name='СкладПолучатель'),
),
]
| [
"C:\\Users\\i.kovalenko\\AppData\\Roaming\\The Bat!"
] | C:\Users\i.kovalenko\AppData\Roaming\The Bat! |
955a39cdc24af3e5a79dc08200d4650a71ddbd18 | bf6dc488aeead3e40c2c638d011005e554579455 | /log.py | 5e1fd11e885b0b7b7e0e5308fbd93b08f5098a82 | [
"MIT"
] | permissive | joelbryla/robot-replay | f1247137d7d8581bdaf69d6a110e1510cff3c15f | 61ded4750e2a5729ba487be0f25e5d8a0dc9088f | refs/heads/master | 2021-09-08T13:39:13.017044 | 2018-03-10T00:03:35 | 2018-03-10T00:04:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 905 | py | import csv
import time
from networktables import NetworkTables
class Log:
def __init__(self, team):
NetworkTables.startClientTeam(team)
match_type = NetworkTables.getTable("FMSInfo").getValue("MatchType",
defaultValue="")
self.file_name = "logs/" + time.strftime("%H-%M-%S %d-%m-%y") + \
str(match_type) + ".csv"
self.writer = csv.writer(open(self.file_name, "w"), delimiter=",")
self.reader = csv.reader(open(self.file_name, "r"), delimiter=",")
def log(self):
"""Get all entries from NetworkTables and log to csv file."""
entries = NetworkTables.getEntries("")
current_time = time.monotonic()
for entry in entries:
self.writer.writerow([entry.key, entry.value, current_time])
def read(self):
return list(self.reader)
| [
"dan.ben.oconnell@gmail.com"
] | dan.ben.oconnell@gmail.com |
c157b99f15cf4b7b2d4bd05ea5b0e5f89507cf3a | 07bb913fea5e0f1e65e35a7ca5c594fa1d144eb8 | /publishconf.py | ab389e79f3df4349f62293bf934b3def399eb94a | [] | no_license | jbzdak/pwzn-lessons | 8373552fabb260593cf612a27bf821d7b70b452d | 5ca58dba6220259b170c8a689a10338122c4eefd | refs/heads/master | 2021-04-05T20:48:56.447870 | 2020-03-19T20:36:08 | 2020-03-19T20:36:08 | 248,600,049 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 555 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
# This file is only used if you use `make publish` or
# explicitly specify it as your config file.
import os
import sys
sys.path.append(os.curdir)
from pelicanconf import *
#ITEURL = 'http://pwzn.s3-website-us-east-1.amazonaws.com'
SITEURL = 'http://db.fizyka.pw.edu.pl/pwzn'
RELATIVE_URLS = False
FEED_ALL_ATOM = 'feeds/all.atom.xml'
CATEGORY_FEED_ATOM = 'feeds/%s.atom.xml'
DELETE_OUTPUT_DIRECTORY = True
# Following items are often useful when publishing
| [
"jbzdak@gmail.com"
] | jbzdak@gmail.com |
86fc6d64004c5a38bc8473c6ad519119557660d2 | f771dd92c30c871e2b8fbdb1a006a75cacd26a16 | /users/migrations/0029_auto_20150227_0026.py | 2fb4d1415ba5ac6707c64212b457af179e61f5de | [] | no_license | guitoof/tasdurab | cefdc232544e852735af20eb85a4ed70b8faa9ae | 9c7233de6d94b22f30c6d8e72b4f0e75775a866d | refs/heads/master | 2020-04-14T23:49:03.775981 | 2015-03-19T11:32:14 | 2015-03-19T11:32:14 | 30,129,522 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 710 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('users', '0028_auto_20150216_0026'),
]
operations = [
migrations.AlterField(
model_name='user',
name='first_name',
field=models.CharField(max_length=255, verbose_name='Pr\xe9nom'),
preserve_default=True,
),
migrations.AlterField(
model_name='user',
name='group',
field=models.ForeignKey(default=2, blank=True, to='users.Group', null=True, verbose_name=b'Promo'),
preserve_default=True,
),
]
| [
"guillaume.diallo@ensta-paristech.fr"
] | guillaume.diallo@ensta-paristech.fr |
052a765b4308597a3d16d02f22e77d3c03272c1c | 7610515d7bdd1e5e7a2de4fc70f52f3c1898812e | /budgetApp/wsgi.py | 270088a2599db9ed302e8a50cf6eb6a89a9f6a47 | [] | no_license | ClaudioG0/budgetAppSite | 2e6fa2f765e9a47b0f8563c6c528a048f4d983da | 83cffea630948fe1c44b233da2f0abea9d05832d | refs/heads/main | 2022-12-28T03:41:57.416574 | 2020-10-11T16:30:22 | 2020-10-11T16:30:22 | 303,162,112 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 411 | py | """
WSGI config for budgetApp project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'budgetApp.settings')
application = get_wsgi_application()
| [
"noreply@github.com"
] | noreply@github.com |
2cfb8721541468c142df79a782cc54ba17d46985 | b1cc02fe7dcc24f940547d02e7f3f3339e4d7ebf | /assignment1/.env/bin/pygmentize | d22149cbf5107dada46b3cd4a6fe2a2596f14bff | [] | no_license | wweichn/cs234 | 51c0226d5ca814b0473cf2fedeb84805eb722ea9 | 4bad9c28c5d4f772cf99085b9cad53dad478f4f4 | refs/heads/master | 2021-08-16T10:29:29.553815 | 2017-11-19T15:42:09 | 2017-11-19T15:42:09 | 111,301,647 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 255 | #!/Users/ww/codework/cs234/assignment1/.env/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pygments.cmdline import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"1436664083@qq.com"
] | 1436664083@qq.com | |
2f1462be3f29b5ddc93d058062150d802f915cac | be0f3dfbaa2fa3d8bbe59229aef3212d032e7dd1 | /DaVinciDev_v38r1p1/InstallArea/x86_64-slc6-gcc49-opt/python/StrippingArchive/Stripping23/StrippingQEE/StrippingH24Mu.py | f8c83a16c95755164e65577006aff13f275fff10 | [] | no_license | Sally27/backup_cmtuser_full | 34782102ed23c6335c48650a6eaa901137355d00 | 8924bebb935b96d438ce85b384cfc132d9af90f6 | refs/heads/master | 2020-05-21T09:27:04.370765 | 2018-12-12T14:41:07 | 2018-12-12T14:41:07 | 185,989,173 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,741 | py | '''
Module for construction of h-->MuMuMuMu stripping selection
Exported symbols (use python help!):
- H24MuLineConf
Based on Bsmumu stripping lines
'''
__author__ = ['Xabier Cid Vidal']
__date__ = '11/22/2013'
__all__ = ('H24MuLineConf',
'default_name',
'default_config'
)
from Gaudi.Configuration import *
from Configurables import FilterDesktop, CombineParticles
from PhysSelPython.Wrappers import Selection, DataOnDemand
from StrippingConf.StrippingLine import StrippingLine
from StrippingUtils.Utils import LineBuilder
#from Configurables import OfflineVertexFitter
default_name = 'H24Mu'
#### This is the dictionary of all tunable cuts ########
default_config={
'NAME': default_name,
'BUILDERTYPE' : 'H24MuLineConf',
'WGs' : [ 'QEE' ],
'STREAMS' : [ 'Leptonic' ],
'CONFIG':{'DefaultPostscale' : 1,
'PromptLinePrescale' : 1,
'SimpleLinePrescale' : 1,
'DetachedLinePrescale' : 1,
'LooseLinePrescale' : 0.01,
'MuTrackChi2DoF' : 3,
'MupTprompt' : 375, #MeV
'MupTdetached' : 250, #MeV
'MuGhostProb' : 0.4,
'MuMaxIPchi2' : 3,
'MuMinIPchi2' : 1,
'MuPIDdll' : -3, # muon combDLL
'MuNShared' : 3, # muon NShared
'A1maxMass' : 2000, #MeV
'A1Doca' : 0.2, #mm
'A1DocaTight' : 0.1, #mm
'A1Vchi2' : 7.5,
'A1Vchi2Tight' : 1,
'A1Dira' : 0,
'A1maxIPchi2' : 25,
'A1FDChi2' : 4,
'HmaxDOCA' : 0.75, #mm
'HmaxDOCATight' : 0.25, #mm
'HVchi2' : 10,
'HVchi2Tight' : 2,
'HpT' : 1200, #MeV
'MuTrackChi2DoF_loose' : 10,
'MupT_loose' : 0,
'MuMaxIPchi2_loose' : 1000000,
'A1maxMass_loose' : 5000, #MeV
'A1Doca_loose' : 10, #mm
'A1Vchi2_loose' : 20,
'HmaxDOCA_loose' : 1000000, #mm
'HpT_loose' : 300, #MeV
'HVchi2_loose' : 50
}
}
class H24MuLineConf(LineBuilder) :
"""
Builder of:
- H-> mumumumu stripping lines: prompt, detached and control,
Usage:
>>> config = { .... }
>>> Conf = H24MuLinesConf('Test',config)
>>> myLines = Conf.lines
>>> for line in myLines:
>>> print line.name(), line.outputLocation()
The lines can be used directly to build a StrippingStream object.
Exports as instance data members:
selPrompt : nominal prompt H24mu stripping line
selSimple : nominal simple H24mu stripping line (no pT, IP cuts)
selDetached : nominal detached H24mu stripping line
selLoose : loose H24mu stripping line to understand systematics (prescaled)
promptLine : Stripping line made from selPrompt
simpleLine : Stripping line made from selSimple
detachedLine : Stripping line made from selDetached
looseLine : Stripping line made from selLoose
lines : list of lines: [ promptLine, simpleLine, detachedLine, looseLine ]
"""
__configuration_keys__ = (
'DefaultPostscale',
'PromptLinePrescale',
'SimpleLinePrescale',
'DetachedLinePrescale',
'LooseLinePrescale',
'MuTrackChi2DoF',
'MuPIDdll',
'MuNShared',
'MupTprompt',
'MupTdetached',
'MuMaxIPchi2',
'MuMinIPchi2',
'MuGhostProb',
'A1maxMass',
'A1Doca',
'A1Vchi2',
'A1DocaTight',
'A1Vchi2Tight',
'A1Dira',
'A1maxIPchi2',
'A1FDChi2',
'HmaxDOCA',
'HpT',
'HVchi2',
'HmaxDOCATight',
'HVchi2Tight',
'MuTrackChi2DoF_loose',
'MupT_loose',
'MuMaxIPchi2_loose',
'A1maxMass_loose',
'A1Doca_loose',
'A1Vchi2_loose',
'HmaxDOCA_loose',
'HpT_loose',
'HVchi2_loose'
)
def __init__(self,
name = default_name,
config = None,
debug_cuts = 0):
LineBuilder.__init__(self, name, config)
prompt_name=name+'Prompt'
simple_name=name+'Simple'
detached_name=name+'Detached'
loose_name=name+'Loose'
self.config_dict = config
self.debug_cuts = debug_cuts
self.selPrompt = self.makeDefault(prompt_name,type = 0)
self.selSimple = self.makeDefault(simple_name,type = 1)
self.selDetached = self.makeDefault(detached_name,type = 2)
self.selLoose = self.makeDefault(loose_name,type = 3)
ExtraInfoTools = [{'Type' : 'ConeVariables',
'ConeNumber' : 1,
'ConeAngle' : 1.0,
'Variables' : ['angle', 'mult','p','pt',
'ptasy','pasy']},
{'Type' : 'ConeVariables',
'ConeNumber' : 2,
'ConeAngle' : 1.5,
'Variables' : ['angle', 'mult','p','pt',
'ptasy','pasy']},
{'Type' : 'ConeVariables',
'ConeNumber' : 3,
'ConeAngle' : 2.0,
'Variables' : ['angle', 'mult','p','pt',
'ptasy','pasy']},
{'Type' : 'VertexIsolation'}]
ExtraInfoDaughters = {"prompt" : [getattr(self,"A1"+prompt_name)],
"simple" : [getattr(self,"A1"+simple_name)],
"detached": [getattr(self,"A1"+detached_name)],
"loose" : [getattr(self,"A1"+loose_name)]}
self.promptLine = StrippingLine(prompt_name+"Line",
prescale = config['PromptLinePrescale'],
postscale = config['DefaultPostscale'],
# algos = [ self.selPrompt ],
selection = self.selPrompt,
ExtraInfoTools = ExtraInfoTools,
ExtraInfoSelections = ExtraInfoDaughters["prompt"],
MDSTFlag = True,
RequiredRawEvents = ["Muon"]
)
self.simpleLine = StrippingLine(simple_name+"Line",
prescale = config['SimpleLinePrescale'],
postscale = config['DefaultPostscale'],
# algos = [ self.selSimple ],
selection = self.selSimple,
ExtraInfoTools = ExtraInfoTools,
ExtraInfoSelections = ExtraInfoDaughters["simple"],
MDSTFlag = True,
RequiredRawEvents = ["Muon"]
)
self.detachedLine = StrippingLine(detached_name+"Line",
prescale = config['DetachedLinePrescale'],
postscale = config['DefaultPostscale'],
# algos = [ self.selDetached ],
selection = self.selDetached,
ExtraInfoTools = ExtraInfoTools,
ExtraInfoSelections = ExtraInfoDaughters["detached"],
MDSTFlag = True,
RequiredRawEvents = ["Muon"]
)
## no need for mdst or raw data in the loose line...
self.looseLine = StrippingLine(loose_name+"Line",
prescale = config['LooseLinePrescale'],
postscale = config['DefaultPostscale'],
# algos = [ self.selLoose ],
selection = self.selLoose,
ExtraInfoTools = ExtraInfoTools,
ExtraInfoSelections = ExtraInfoDaughters["loose"],
)
self.registerLine(self.promptLine)
self.registerLine(self.simpleLine)
self.registerLine(self.detachedLine)
#self.registerLine(self.looseLine)
def makeA1(self,name,type) :
"""
Prompt A1 selection
Arguments:
name : name of the Selection.
type : 0 (prompt), 1 (simple), 2 (detached), 3 (loose)
"""
A1 = CombineParticles("Combine"+name)
A1.DecayDescriptor = "KS0 -> mu+ mu-"
# prompt
if type==0:
A1.DaughtersCuts = { "mu+" : "(TRCHI2DOF < %(MuTrackChi2DoF)s ) "\
"& ( TRGHOSTPROB < %(MuGhostProb)s ) " \
"& (PT > %(MupTprompt)s * MeV ) "\
"& (MIPCHI2DV(PRIMARY)< %(MuMaxIPchi2)s )" %self.config_dict }
A1.CombinationCut = "(AM < %(A1maxMass)s * MeV ) "\
"& (AMAXDOCA('')<%(A1Doca)s * mm)" %self.config_dict
A1.MotherCut = "(VFASPF(VCHI2)< %(A1Vchi2)s ) "\
"& (MM < %(A1maxMass)s * MeV)" %self.config_dict
# simple: tighten DOCA and Vchi2, tighten muID cut
elif type==1:
A1.DaughtersCuts = { "mu+" : "(TRCHI2DOF < %(MuTrackChi2DoF)s ) "\
"& ( TRGHOSTPROB < %(MuGhostProb)s ) " \
"& (PIDmu > %(MuPIDdll)s ) "\
"& (PPINFO(LHCb.ProtoParticle.MuonNShared,99999)<= %(MuNShared)s ) " %self.config_dict }
A1.CombinationCut = "(AM < %(A1maxMass)s * MeV ) "\
"& (AMAXDOCA('')<%(A1DocaTight)s * mm)" %self.config_dict
A1.MotherCut = "(VFASPF(VCHI2)< %(A1Vchi2Tight)s ) "\
"& (MM < %(A1maxMass)s * MeV)" %self.config_dict
#detached
elif type==2:
#A1.addTool( OfflineVertexFitter )
#A1.ParticleCombiners.update( { "" : "OfflineVertexFitter"} )
#A1.ReFitPVs = True
A1.DaughtersCuts = { "mu+" : "(TRCHI2DOF < %(MuTrackChi2DoF)s ) "\
"& (PT > %(MupTdetached)s * MeV ) "\
"& ( TRGHOSTPROB < %(MuGhostProb)s ) " \
"& (MIPCHI2DV(PRIMARY)> %(MuMinIPchi2)s )" %self.config_dict }
A1.CombinationCut = "(AM < %(A1maxMass)s * MeV ) "\
"& (AMAXDOCA('')<%(A1Doca)s * mm)" %self.config_dict
A1.MotherCut = "(VFASPF(VCHI2)< %(A1Vchi2)s ) "\
"& (MM < %(A1maxMass)s * MeV)" \
"& (BPVDIRA > %(A1Dira)s )" \
"& (BPVIPCHI2() < %(A1maxIPchi2)s )" \
"& (BPVVDCHI2 > %(A1FDChi2)s )" %self.config_dict
#loose
else:
A1.DaughtersCuts = { "mu+" : "(TRCHI2DOF < %(MuTrackChi2DoF_loose)s ) "\
"& (PT > %(MupT_loose)s * MeV ) "\
"& (MIPCHI2DV(PRIMARY)< %(MuMaxIPchi2_loose)s )" %self.config_dict }
A1.CombinationCut = "(AM < %(A1maxMass_loose)s * MeV ) "\
"& (AMAXDOCA('')<%(A1Doca_loose)s * mm)" %self.config_dict
A1.MotherCut = "(VFASPF(VCHI2)< %(A1Vchi2_loose)s ) "\
"& (MM < %(A1maxMass_loose)s * MeV)" %self.config_dict
_stdAllLooseMuons = DataOnDemand(Location = "Phys/StdAllLooseMuons/Particles")
if self.debug_cuts:
print "DEBUG - A1 cuts for type", type
print A1.DaughtersCuts
print A1.MotherCut
print A1.CombinationCut
return Selection ("Sel"+name,
Algorithm = A1,
RequiredSelections = [ _stdAllLooseMuons ])
def makeDefault(self,name,type=0) :
"""
H-->A0(mumu)A0(mumu) selection.
Arguments:
name : name of the Selection.
type : 0 (prompt), 1 (simple), 2 (detached), 3 (loose)
"""
SelA1 = self.makeA1("A1"+name,type)
setattr(self,"A1"+name,SelA1)
H25 = CombineParticles("Combine_H25"+name)
H25.DecayDescriptor = "H_10 -> KS0 KS0"
H25.DaughtersCuts = {}
# simple: do not cut in pT, cut tighter in DOCA, VCHI2
if type==1:
H25.CombinationCut = "(AMAXDOCA('')< %(HmaxDOCATight)s * mm )" %self.config_dict
H25.MotherCut = "(VFASPF(VCHI2)< %(HVchi2Tight)s )" %self.config_dict
# loose: loosen all cuts
elif type==3:
H25.CombinationCut = "(AMAXDOCA('')< %(HmaxDOCA_loose)s * mm )" %self.config_dict
H25.MotherCut = "(PT > %(HpT_loose)s * MeV ) "\
"& (VFASPF(VCHI2)< %(HVchi2_loose)s ) " %self.config_dict
# prompt or detached
else:
H25.CombinationCut = "(AMAXDOCA('')< %(HmaxDOCA)s * mm )" %self.config_dict
H25.MotherCut = "(PT > %(HpT)s * MeV ) "\
"& (VFASPF(VCHI2)< %(HVchi2)s ) " %self.config_dict
if self.debug_cuts:
print "DEBUG - H cuts for type", type
print H25.MotherCut
print H25.CombinationCut
return Selection( "SelH4mu"+name,
Algorithm = H25,
RequiredSelections=[SelA1] )
| [
"slavomirastefkova@b2pcx39016.desy.de"
] | slavomirastefkova@b2pcx39016.desy.de |
e11afb3d340822f1faf019a307652e3f4669ad18 | a7597f2b3879732eaebce164d1b84e6172847777 | /HW1/DecisionTree_Iris.py | 80a000496087f6de348817e4d1be751f463a5769 | [] | no_license | ss87021456/2017-NCTU-Machine-Learning | 17164ba33c7253708e10a8490e446cc3b32865b0 | 02206365e6335a99d9d50b227efdb216782e18cf | refs/heads/master | 2021-09-04T02:21:56.029114 | 2018-01-14T16:07:34 | 2018-01-14T16:07:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,463 | py | from Node import Node
from util import *
from math import log
from random import shuffle
def find_impure_leaf(node):
if node is None:
return None
if not(node.pure) and node.leaf:
return node
left_child = find_impure_leaf(node.left)
if left_child != None:
return left_child
right_child = find_impure_leaf(node.right)
if right_child != None:
return right_child
return None
def find_threshold_spiltIdx(data):
best_feather_index = -1
best_entropy = float('inf')
best_threshold = float('inf')
for i in range(len(data[0][:-1])):
(entropy, threshold) = cal_lowest_entropy(data, i)
if entropy < best_entropy:
best_feather_index = i
best_entropy = entropy
best_threshold = threshold
return (best_threshold, best_feather_index)
def cal_lowest_entropy(data, feature_index):
sort_data = sort_by_axis(data, axis = feature_index)
best_entropy = float('inf')
best_threshold = float('inf')
current_entropy = float('inf')
current_threshold = float('inf')
for i in range(0, len(data)):
if i < len(data)-1 :
current_threshold = (sort_data[i][feature_index] + sort_data[i+1][feature_index])/2
#print current_threshold
(left, right) = split(sort_data, current_threshold, feature_index)
current_entropy = cal_entropy(left) * float(len(left))/float(len(data)) + cal_entropy(right) * float(len(right))/float(len(data))
if current_entropy < best_entropy:
best_entropy = current_entropy
best_threshold = current_threshold
return (best_entropy, best_threshold)
def cal_entropy(data):
count = [0,0,0]
total = float(0)
# really only need indices 1,2,3 as those are the only labels
for datapoint in data:
if datapoint[-1] == 'setosa':
count[0] += 1
elif datapoint[-1] == 'versicolor':
count[1] += 1
else :
count[2] += 1
total = total + 1
entropy = float(0)
for c in count:
if c == 0:
continue
prob = c / total
entropy = entropy - prob * log(prob)
return entropy
def split(data, threshold, feature_index):
left = []
right = []
for datapoint in data:
#print datapoint
if datapoint[feature_index] <= threshold:
left.append(datapoint)
else:
right.append(datapoint)
return (left,right)
def ID3_algorithm(root):
current_node = find_impure_leaf(root)
count = 0
while current_node != None:
(threshold, feature_index) = find_threshold_spiltIdx(current_node.data)
(left, right) = split(current_node.data, threshold, feature_index)
current_node.set_threshold(threshold)
current_node.set_threshold_idx(feature_index)
left_node = Node(left)
right_node = Node(right)
current_node.left = left_node
current_node.right = right_node
current_node.leaf = False
current_node = find_impure_leaf(root)
count += 1
#print "done construct ID3_tree!"
def predict(datapoint, Tree):
curr_node = Tree
while not(curr_node.pure):
threshold = curr_node.threshold
feature_index = curr_node.threshold_idx
if datapoint[feature_index] <= threshold:
curr_node = curr_node.left
else:
curr_node = curr_node.right
return curr_node.label
def calc_error(dataset, Tree):
errors = 0
num_samples = len(dataset)
true_positive = [0.0,0.0,0.0]
false_positive = [0.0,0.0,0.0]
false_negative = [0.0,0.0,0.0]
true_negative = [0.0,0.0,0.0]
precision = [0.0,0.0,0.0]
recall = [0.0,0.0,0.0]
for datapoint in dataset:
prediction = predict(datapoint, Tree)
ground_truth = datapoint[-1]
if not(ground_truth == prediction): # accuracy
errors = errors + 1
# need revise
if prediction == 'setosa':
if ground_truth == 'setosa':
true_positive[0] += 1
true_negative[1] += 1
true_negative[2] += 1
elif ground_truth == 'versicolor':
false_positive[0] += 1
false_negative[1] += 1
true_negative[2] += 1
elif ground_truth == 'virginica':
false_positive[0] += 1
true_negative[1] += 1
false_negative[2] += 1
elif prediction == 'versicolor':
if ground_truth == 'setosa':
false_negative[0] += 1
false_positive[1] += 1
true_negative[2] += 1
elif ground_truth == 'versicolor':
true_negative[0] += 1
true_positive[1] += 1
true_negative[2] += 1
elif ground_truth == 'virginica':
true_negative[0] += 1
false_positive[1] += 1
false_negative[2] += 1
elif prediction == 'virginica':
if ground_truth == 'setosa':
false_negative[0] += 1
true_negative[1] += 1
false_positive[2] += 1
elif ground_truth == 'versicolor':
true_negative[0] += 1
false_negative[1] += 1
false_positive[2] += 1
elif ground_truth == 'virginica':
true_negative[0] += 1
true_negative[1] += 1
true_positive[2] += 1
accuracy = 1 - (float(errors) / float(num_samples))
for i in range(3):
precision[i] = float(true_positive[i]) / (float(false_positive[i]) + float(true_positive[i]))
recall[i] = float(true_positive[i]) / (float(false_negative[i]) + float(true_positive[i]))
return accuracy, precision, recall
def main():
file = open('./0310120/iris.csv')
data = []
# loading iris dataset
for idx ,line in enumerate(file):
line = line.strip("\r\n")
if idx > 0:
data.append([float(element) for element in line.split(',')[:-1]])
data[idx].append(line.split(',')[-1])
else:
data.append(line.split(','))
# define attributes
attributes = data[0]
data.remove(attributes)
#print data
std_normalize(data)
#print "after standard normalize"
#print data
test_time = 1
k_fold_time = 5
total = 0
total_precise = [0.0,0.0,0.0]
total_recall = [0.0,0.0,0.0]
for _ in range(test_time):
shuffle(data)
training_set, testing_set = k_fold_dataset(data)
acc = 0
acc_pre = [0.0,0.0,0.0]
acc_rec = [0.0,0.0,0.0]
for i in range(k_fold_time):
ID3_Tree = Node(training_set[i])
ID3_algorithm(ID3_Tree)
accuracy, precision, recall = calc_error(testing_set[i],ID3_Tree)
for j in range(3):
acc_pre[j] += precision[j]
acc_rec[j] += recall[j]
acc += accuracy
for k in range(3):
total_precise[k] += acc_pre[k]/k_fold_time
total_recall[k] += acc_rec[k]/k_fold_time
total += acc/k_fold_time
#print "\nSummary after ",test_time," times of 5-fold-validation:\n"
print"{:.3f}".format(total/test_time)
print"{:.3f} {:.3f}".format(total_precise[0]/test_time,total_recall[0]/test_time)
print"{:.3f} {:.3f}".format(total_precise[1]/test_time,total_recall[1]/test_time)
print"{:.3f} {:.3f}".format(total_precise[2]/test_time,total_recall[2]/test_time)
if __name__ == '__main__':
main()
| [
"noreply@github.com"
] | noreply@github.com |
fe613a4f0bd227097fb0d3eccffee13acbcc5e74 | 0ab65e2b803fa275ba43078c007b7e2d2929dc4f | /pro_gan_pytorch/PRO_GAN.py | 7be2c9c59db663881f76eb3de1d801da21b7db78 | [
"MIT"
] | permissive | joebradly/pro_gan_pytorch | 2fb00b0b0cd983fe9269955a0affd30711950b86 | ad51fc3da9cd352c43a362652627e177d0887152 | refs/heads/master | 2020-04-03T10:03:55.205374 | 2018-10-19T08:45:06 | 2018-10-19T08:45:06 | 155,183,005 | 0 | 0 | MIT | 2018-10-29T09:12:24 | 2018-10-29T09:12:24 | null | UTF-8 | Python | false | false | 25,516 | py | """ Module implementing GAN which will be trained using the Progressive growing
technique -> https://arxiv.org/abs/1710.10196
"""
import numpy as np
import torch as th
class Generator(th.nn.Module):
""" Generator of the GAN network """
def __init__(self, depth=7, latent_size=512, use_eql=True):
"""
constructor for the Generator class
:param depth: required depth of the Network
:param latent_size: size of the latent manifold
:param use_eql: whether to use equalized learning rate
"""
from torch.nn import ModuleList, Upsample
from pro_gan_pytorch.CustomLayers import GenGeneralConvBlock, GenInitialBlock
super(Generator, self).__init__()
assert latent_size != 0 and ((latent_size & (latent_size - 1)) == 0), \
"latent size not a power of 2"
if depth >= 4:
assert latent_size >= np.power(2, depth - 4), "latent size will diminish to zero"
# state of the generator:
self.use_eql = use_eql
self.depth = depth
self.latent_size = latent_size
# register the modules required for the GAN
self.initial_block = GenInitialBlock(self.latent_size, use_eql=self.use_eql)
# create a module list of the other required general convolution blocks
self.layers = ModuleList([]) # initialize to empty list
# create the ToRGB layers for various outputs:
if self.use_eql:
from pro_gan_pytorch.CustomLayers import _equalized_conv2d
self.toRGB = lambda in_channels: \
_equalized_conv2d(in_channels, 3, (1, 1), bias=True)
else:
from torch.nn import Conv2d
self.toRGB = lambda in_channels: Conv2d(in_channels, 3, (1, 1), bias=True)
self.rgb_converters = ModuleList([self.toRGB(self.latent_size)])
# create the remaining layers
for i in range(self.depth - 1):
if i <= 2:
layer = GenGeneralConvBlock(self.latent_size,
self.latent_size, use_eql=self.use_eql)
rgb = self.toRGB(self.latent_size)
else:
layer = GenGeneralConvBlock(
int(self.latent_size // np.power(2, i - 3)),
int(self.latent_size // np.power(2, i - 2)),
use_eql=self.use_eql
)
rgb = self.toRGB(int(self.latent_size // np.power(2, i - 2)))
self.layers.append(layer)
self.rgb_converters.append(rgb)
# register the temporary upsampler
self.temporaryUpsampler = Upsample(scale_factor=2)
def forward(self, x, depth, alpha):
"""
forward pass of the Generator
:param x: input noise
:param depth: current depth from where output is required
:param alpha: value of alpha for fade-in effect
:return: y => output
"""
assert depth < self.depth, "Requested output depth cannot be produced"
y = self.initial_block(x)
if depth > 0:
for block in self.layers[:depth - 1]:
y = block(y)
residual = self.rgb_converters[depth - 1](self.temporaryUpsampler(y))
straight = self.rgb_converters[depth](self.layers[depth - 1](y))
out = (alpha * straight) + ((1 - alpha) * residual)
else:
out = self.rgb_converters[0](y)
return out
class Discriminator(th.nn.Module):
""" Discriminator of the GAN """
def __init__(self, height=7, feature_size=512, use_eql=True):
"""
constructor for the class
:param height: total height of the discriminator (Must be equal to the Generator depth)
:param feature_size: size of the deepest features extracted
(Must be equal to Generator latent_size)
:param use_eql: whether to use equalized learning rate
"""
from torch.nn import ModuleList, AvgPool2d
from pro_gan_pytorch.CustomLayers import DisGeneralConvBlock, DisFinalBlock
super(Discriminator, self).__init__()
assert feature_size != 0 and ((feature_size & (feature_size - 1)) == 0), \
"latent size not a power of 2"
if height >= 4:
assert feature_size >= np.power(2, height - 4), "feature size cannot be produced"
# create state of the object
self.use_eql = use_eql
self.height = height
self.feature_size = feature_size
self.final_block = DisFinalBlock(self.feature_size, use_eql=self.use_eql)
# create a module list of the other required general convolution blocks
self.layers = ModuleList([]) # initialize to empty list
# create the fromRGB layers for various inputs:
if self.use_eql:
from pro_gan_pytorch.CustomLayers import _equalized_conv2d
self.fromRGB = lambda out_channels: \
_equalized_conv2d(3, out_channels, (1, 1), bias=True)
else:
from torch.nn import Conv2d
self.fromRGB = lambda out_channels: Conv2d(3, out_channels, (1, 1), bias=True)
self.rgb_to_features = ModuleList([self.fromRGB(self.feature_size)])
# create the remaining layers
for i in range(self.height - 1):
if i > 2:
layer = DisGeneralConvBlock(
int(self.feature_size // np.power(2, i - 2)),
int(self.feature_size // np.power(2, i - 3)),
use_eql=self.use_eql
)
rgb = self.fromRGB(int(self.feature_size // np.power(2, i - 2)))
else:
layer = DisGeneralConvBlock(self.feature_size,
self.feature_size, use_eql=self.use_eql)
rgb = self.fromRGB(self.feature_size)
self.layers.append(layer)
self.rgb_to_features.append(rgb)
# register the temporary downSampler
self.temporaryDownsampler = AvgPool2d(2)
def forward(self, x, height, alpha):
"""
forward pass of the discriminator
:param x: input to the network
:param height: current height of operation (Progressive GAN)
:param alpha: current value of alpha for fade-in
:return: out => raw prediction values (WGAN-GP)
"""
assert height < self.height, "Requested output depth cannot be produced"
if height > 0:
residual = self.rgb_to_features[height - 1](self.temporaryDownsampler(x))
straight = self.layers[height - 1](
self.rgb_to_features[height](x)
)
y = (alpha * straight) + ((1 - alpha) * residual)
for block in reversed(self.layers[:height - 1]):
y = block(y)
else:
y = self.rgb_to_features[0](x)
out = self.final_block(y)
return out
class ConditionalDiscriminator(th.nn.Module):
""" Discriminator of the GAN """
def __init__(self, height=7, feature_size=512, embedding_size=4096,
compressed_latent_size=128, use_eql=True):
"""
constructor for the class
:param height: total height of the discriminator (Must be equal to the Generator depth)
:param feature_size: size of the deepest features extracted
(Must be equal to Generator latent_size)
:param embedding_size: size of the embedding for conditional discrimination
:param compressed_latent_size: size of the compressed version
:param use_eql: whether to use equalized learning rate
"""
from torch.nn import ModuleList, AvgPool2d
from pro_gan_pytorch.CustomLayers import DisGeneralConvBlock, ConDisFinalBlock
super(ConditionalDiscriminator, self).__init__()
assert feature_size != 0 and ((feature_size & (feature_size - 1)) == 0), \
"latent size not a power of 2"
if height >= 4:
assert feature_size >= np.power(2, height - 4), "feature size cannot be produced"
# create state of the object
self.use_eql = use_eql
self.height = height
self.feature_size = feature_size
self.embedding_size = embedding_size
self.compressed_latent_size = compressed_latent_size
self.final_block = ConDisFinalBlock(self.feature_size, self.embedding_size,
self.compressed_latent_size, use_eql=self.use_eql)
# create a module list of the other required general convolution blocks
self.layers = ModuleList([]) # initialize to empty list
# create the fromRGB layers for various inputs:
if self.use_eql:
from pro_gan_pytorch.CustomLayers import _equalized_conv2d
self.fromRGB = lambda out_channels: \
_equalized_conv2d(3, out_channels, (1, 1), bias=True)
else:
from torch.nn import Conv2d
self.fromRGB = lambda out_channels: Conv2d(3, out_channels, (1, 1), bias=True)
self.rgb_to_features = ModuleList([self.fromRGB(self.feature_size)])
# create the remaining layers
for i in range(self.height - 1):
if i > 2:
layer = DisGeneralConvBlock(
int(self.feature_size // np.power(2, i - 2)),
int(self.feature_size // np.power(2, i - 3)),
use_eql=self.use_eql
)
rgb = self.fromRGB(int(self.feature_size // np.power(2, i - 2)))
else:
layer = DisGeneralConvBlock(self.feature_size,
self.feature_size, use_eql=self.use_eql)
rgb = self.fromRGB(self.feature_size)
self.layers.append(layer)
self.rgb_to_features.append(rgb)
# register the temporary downSampler
self.temporaryDownsampler = AvgPool2d(2)
def forward(self, x, latent_vector, height, alpha):
"""
forward pass of the discriminator
:param x: input to the network
:param latent_vector: latent vector required for conditional discrimination
:param height: current height of operation (Progressive GAN)
:param alpha: current value of alpha for fade-in
:return: out => raw prediction values
"""
assert height < self.height, "Requested output depth cannot be produced"
if height > 0:
residual = self.rgb_to_features[height - 1](self.temporaryDownsampler(x))
straight = self.layers[height - 1](
self.rgb_to_features[height](x)
)
y = (alpha * straight) + ((1 - alpha) * residual)
for block in reversed(self.layers[:height - 1]):
y = block(y)
else:
y = self.rgb_to_features[0](x)
out = self.final_block(y, latent_vector)
return out
class ProGAN:
""" Wrapper around the Generator and the Discriminator """
def __init__(self, depth=7, latent_size=512, learning_rate=0.001, beta_1=0,
beta_2=0.99, eps=1e-8, drift=0.001, n_critic=1, use_eql=True,
loss="wgan-gp", use_ema=True, ema_decay=0.999,
device=th.device("cpu")):
"""
constructor for the class
:param depth: depth of the GAN (will be used for each generator and discriminator)
:param latent_size: latent size of the manifold used by the GAN
:param learning_rate: learning rate for Adam
:param beta_1: beta_1 for Adam
:param beta_2: beta_2 for Adam
:param eps: epsilon for Adam
:param n_critic: number of times to update discriminator
(Used only if loss is wgan or wgan-gp)
:param drift: drift penalty for the
(Used only if loss is wgan or wgan-gp)
:param use_eql: whether to use equalized learning rate
:param loss: the loss function to be used
Can either be a string =>
["wgan-gp", "wgan", "lsgan", "lsgan-with-sigmoid"]
Or an instance of GANLoss
:param use_ema: boolean for whether to use exponential moving averages
:param ema_decay: value of mu for ema
:param device: device to run the GAN on (GPU / CPU)
"""
from torch.optim import Adam
from torch.nn import DataParallel
# Create the Generator and the Discriminator
self.gen = Generator(depth, latent_size, use_eql=use_eql).to(device)
self.dis = Discriminator(depth, latent_size, use_eql=use_eql).to(device)
# if code is to be run on GPU, we can use DataParallel:
if device == th.device("cuda"):
self.gen = DataParallel(self.gen)
self.dis = DataParallel(self.dis)
# state of the object
self.latent_size = latent_size
self.depth = depth
self.use_ema = use_ema
self.ema_decay = ema_decay
self.n_critic = n_critic
self.use_eql = use_eql
self.device = device
self.drift = drift
# define the optimizers for the discriminator and generator
self.gen_optim = Adam(self.gen.parameters(), lr=learning_rate,
betas=(beta_1, beta_2), eps=eps)
self.dis_optim = Adam(self.dis.parameters(), lr=learning_rate,
betas=(beta_1, beta_2), eps=eps)
# define the loss function used for training the GAN
self.loss = self.__setup_loss(loss)
# setup the ema for the generator
if self.use_ema:
from pro_gan_pytorch.CustomLayers import EMA
self.ema = EMA(self.ema_decay)
self.__register_generator_to_ema()
def __register_generator_to_ema(self):
for name, param in self.gen.named_parameters():
if param.requires_grad:
self.ema.register(name, param.data)
def __apply_ema_on_generator(self):
for name, param in self.gen.named_parameters():
if param.requires_grad:
param.data = self.ema(name, param.data)
def __setup_loss(self, loss):
import pro_gan_pytorch.Losses as losses
if isinstance(loss, str):
loss = loss.lower() # lowercase the string
if loss == "wgan":
loss = losses.WGAN_GP(self.device, self.dis, self.drift, use_gp=False)
# note if you use just wgan, you will have to use weight clipping
# in order to prevent gradient exploding
elif loss == "wgan-gp":
loss = losses.WGAN_GP(self.device, self.dis, self.drift, use_gp=True)
elif loss == "lsgan":
loss = losses.LSGAN(self.device, self.dis)
elif loss == "lsgan-with-sigmoid":
loss = losses.LSGAN_SIGMOID(self.device, self.dis)
else:
raise ValueError("Unknown loss function requested")
elif not isinstance(loss, losses.GANLoss):
raise ValueError("loss is neither an instance of GANLoss nor a string")
return loss
def optimize_discriminator(self, noise, real_batch, depth, alpha):
"""
performs one step of weight update on discriminator using the batch of data
:param noise: input noise of sample generation
:param real_batch: real samples batch
:param depth: current depth of optimization
:param alpha: current alpha for fade-in
:return: current loss (Wasserstein loss)
"""
from torch.nn import AvgPool2d
from torch.nn.functional import upsample
# downsample the real_batch for the given depth
down_sample_factor = int(np.power(2, self.depth - depth - 1))
prior_downsample_factor = max(int(np.power(2, self.depth - depth)), 0)
ds_real_samples = AvgPool2d(down_sample_factor)(real_batch)
if depth > 0:
prior_ds_real_samples = upsample(AvgPool2d(prior_downsample_factor)(real_batch),
scale_factor=2)
else:
prior_ds_real_samples = ds_real_samples
# real samples are a combination of ds_real_samples and prior_ds_real_samples
real_samples = (alpha * ds_real_samples) + ((1 - alpha) * prior_ds_real_samples)
loss_val = 0
for _ in range(self.n_critic):
# generate a batch of samples
fake_samples = self.gen(noise, depth, alpha).detach()
loss = self.loss.dis_loss(real_samples, fake_samples, depth, alpha)
# optimize discriminator
self.dis_optim.zero_grad()
loss.backward()
self.dis_optim.step()
loss_val += loss.item()
return loss_val / self.n_critic
def optimize_generator(self, noise, depth, alpha):
"""
performs one step of weight update on generator for the given batch_size
:param noise: input random noise required for generating samples
:param depth: depth of the network at which optimization is done
:param alpha: value of alpha for fade-in effect
:return: current loss (Wasserstein estimate)
"""
# generate fake samples:
fake_samples = self.gen(noise, depth, alpha)
# TODO: Change this implementation for making it compatible for relativisticGAN
loss = self.loss.gen_loss(None, fake_samples, depth, alpha)
# optimize the generator
self.gen_optim.zero_grad()
loss.backward()
self.gen_optim.step()
# if use_ema is true, apply ema to the generator parameters
if self.use_ema:
self.__apply_ema_on_generator()
# return the loss value
return loss.item()
class ConditionalProGAN:
""" Wrapper around the Generator and the Discriminator """
def __init__(self, embedding_size, depth=7, latent_size=512, compressed_latent_size=128,
learning_rate=0.001, beta_1=0, beta_2=0.99,
eps=1e-8, drift=0.001, n_critic=1, use_eql=True,
loss="wgan-gp", use_ema=True, ema_decay=0.999,
device=th.device("cpu")):
"""
constructor for the class
:param embedding_size: size of the encoded text embeddings
:param depth: depth of the GAN (will be used for each generator and discriminator)
:param latent_size: latent size of the manifold used by the GAN
:param compressed_latent_size: size of the compressed latent vectors
:param learning_rate: learning rate for Adam
:param beta_1: beta_1 for Adam
:param beta_2: beta_2 for Adam
:param eps: epsilon for Adam
:param n_critic: number of times to update discriminator
(Used only if loss is wgan or wgan-gp)
:param drift: drift penalty for the
(Used only if loss is wgan or wgan-gp)
:param use_eql: whether to use equalized learning rate
:param loss: the loss function to be used
Can either be a string =>
["wgan-gp", "wgan"]
Or an instance of GANLoss
:param use_ema: boolean for whether to use exponential moving averages
:param ema_decay: value of mu for ema
:param device: device to run the GAN on (GPU / CPU)
"""
from torch.optim import Adam
# Create the Generator and the Discriminator
self.gen = Generator(depth, latent_size, use_eql=use_eql).to(device)
self.dis = ConditionalDiscriminator(depth, latent_size,
embedding_size, compressed_latent_size,
use_eql=use_eql).to(device)
# state of the object
self.latent_size = latent_size
self.compressed_latent_size = compressed_latent_size
self.depth = depth
self.use_ema = use_ema
self.ema_decay = ema_decay
self.n_critic = n_critic
self.use_eql = use_eql
self.device = device
self.drift = drift
# define the optimizers for the discriminator and generator
self.gen_optim = Adam(self.gen.parameters(), lr=learning_rate,
betas=(beta_1, beta_2), eps=eps)
self.dis_optim = Adam(self.dis.parameters(), lr=learning_rate,
betas=(beta_1, beta_2), eps=eps)
# define the loss function used for training the GAN
self.loss = self.__setup_loss(loss)
# setup the ema for the generator
if self.use_ema:
from pro_gan_pytorch.CustomLayers import EMA
self.ema = EMA(self.ema_decay)
self.__register_generator_to_ema()
def __register_generator_to_ema(self):
for name, param in self.gen.named_parameters():
if param.requires_grad:
self.ema.register(name, param.data)
def __apply_ema_on_generator(self):
for name, param in self.gen.named_parameters():
if param.requires_grad:
param.data = self.ema(name, param.data)
def __setup_loss(self, loss):
import pro_gan_pytorch.Losses as losses
if isinstance(loss, str):
loss = loss.lower() # lowercase the string
if loss == "wgan":
loss = losses.CondWGAN_GP(self.device, self.dis, self.drift, use_gp=False)
# note if you use just wgan, you will have to use weight clipping
# in order to prevent gradient exploding
elif loss == "wgan-gp":
loss = losses.CondWGAN_GP(self.device, self.dis, self.drift, use_gp=True)
else:
raise ValueError("Unknown loss function requested")
elif not isinstance(loss, losses.ConditionalGANLoss):
raise ValueError("loss is neither an instance of GANLoss nor a string")
return loss
def optimize_discriminator(self, noise, real_batch, latent_vector, depth, alpha,
use_matching_aware=True):
"""
performs one step of weight update on discriminator using the batch of data
:param noise: input noise of sample generation
:param real_batch: real samples batch
:param latent_vector: (conditional latent vector)
:param depth: current depth of optimization
:param alpha: current alpha for fade-in
:param use_matching_aware: whether to use matching aware discrimination
:return: current loss (Wasserstein loss)
"""
from torch.nn import AvgPool2d
from torch.nn.functional import upsample
# downsample the real_batch for the given depth
down_sample_factor = int(np.power(2, self.depth - depth - 1))
prior_downsample_factor = max(int(np.power(2, self.depth - depth)), 0)
ds_real_samples = AvgPool2d(down_sample_factor)(real_batch)
if depth > 0:
prior_ds_real_samples = upsample(AvgPool2d(prior_downsample_factor)(real_batch),
scale_factor=2)
else:
prior_ds_real_samples = ds_real_samples
# real samples are a combination of ds_real_samples and prior_ds_real_samples
real_samples = (alpha * ds_real_samples) + ((1 - alpha) * prior_ds_real_samples)
loss_val = 0
for _ in range(self.n_critic):
# generate a batch of samples
fake_samples = self.gen(noise, depth, alpha).detach()
loss = self.loss.dis_loss(real_samples, fake_samples,
latent_vector, depth, alpha)
if use_matching_aware:
# calculate the matching aware distribution loss
mis_match_text = latent_vector[np.random.permutation(latent_vector.shape[0]), :]
m_a_d = self.dis(real_samples, mis_match_text, depth, alpha)
loss = loss + th.mean(m_a_d)
# optimize discriminator
self.dis_optim.zero_grad()
loss.backward()
self.dis_optim.step()
loss_val += loss.item()
return loss_val / self.n_critic
def optimize_generator(self, noise, latent_vector, depth, alpha):
"""
performs one step of weight update on generator for the given batch_size
:param noise: input random noise required for generating samples
:param latent_vector: (conditional latent vector)
:param depth: depth of the network at which optimization is done
:param alpha: value of alpha for fade-in effect
:return: current loss (Wasserstein estimate)
"""
# generate fake samples:
fake_samples = self.gen(noise, depth, alpha)
# TODO: Change this implementation for making it compatible for relativisticGAN
loss = self.loss.gen_loss(None, fake_samples, latent_vector, depth, alpha)
# optimize the generator
self.gen_optim.zero_grad()
loss.backward(retain_graph=True)
self.gen_optim.step()
# if use_ema is true, apply ema to the generator parameters
if self.use_ema:
self.__apply_ema_on_generator()
# return the loss value
return loss.item()
| [
"akanimax@gmail.com"
] | akanimax@gmail.com |
c0196487a0ae17ed2119a18bfefe0d8ec39cb793 | a956dea0d755b3387bb51c961aef12eab557fd38 | /FACERECOG/forms.py | a7df5618b7abd5dcd166e6a2cc03e6125086c371 | [] | no_license | AnandDadhich/AttendanceThroughFaceRecognitionUsingLiveCameraSurveillance | fd45b287fc59cf33bae101fd758506fc80a1a54e | 63f48b3d7d54ecf5ab66a248bc70a2359fe3e272 | refs/heads/master | 2023-01-30T13:04:30.094481 | 2020-12-13T14:51:01 | 2020-12-13T14:51:01 | 321,069,000 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 980 | py | from django import forms
from .models import EmployeeInfo, ContactUs
class EmployeeForm(forms.ModelForm):
id=forms.RegexField(regex=r'^[0-9]',
max_length=20,
label="ID",
required=True,
error_messages={"invalid":("Should be Integer Value")},
widget=forms.TextInput(attrs={'id':"eid"}),
)
name=forms.RegexField(regex=r'^[a-zA-Z\s]+$',
max_length=30,
label="Name",
required=True,
error_messages={"invalid":("This value may contain only letters")},
widget=forms.TextInput(attrs={'id':"ename"}),
)
class Meta:
model=EmployeeInfo
fields=['id','name']
class ContactUsForm(forms.ModelForm):
class Meta:
model=ContactUs
fields="__all__"
| [
"ananddadhich153@gmail.com"
] | ananddadhich153@gmail.com |
fd0c17fe135302bd499a36a77f1ad8fbb13b8358 | cae3d11651abe9887e151ee967e6ebbc4f9290db | /Source Code/nnpred.py | 9eb3eab63a8ba8316cb40e0280c1706ece4e757b | [] | no_license | traderbhai/Software-Reliability-Prediction-Using-Ensemble-Learning | 8de1aae65bc7b2bf3503615e5abbb6f4c2b4dd84 | b71396e3d26a5f13b40542fd64e84f7ceb57acce | refs/heads/master | 2023-04-02T20:55:38.651980 | 2021-04-11T06:54:18 | 2021-04-11T06:54:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,730 | py | # Importing the libraries
from keras.layers import Dense
from keras.models import Sequential
from keras.optimizers import RMSprop
from rbflayer import RBFLayer, InitCentersRandom
import numpy as np
b = 0
#X is list : Scaling function helps in logarithmic scaling for values in range 0 to 1
def Scaling(X):
global b
b = (np.e**0.9 - 1) / max(X)
new_X = [np.log(1+(b*y)) for y in X]
return new_X
# Reverse Scaling Function
def RevScale(value):
global b
x = [((np.e**v)-1)/b for v in value]
return x
#x : list of items, K: lag length
def Lagging(x,k):
LagLength = len(x)
z=[list(x[i:i+k]) for i in range(0,LagLength-k+1)]
return(np.array(z))
# Normalized Root Mean Squared Error function
def NRMSE(YActual,YPredicted):
Sum1 = 0
Sum2 = 0
for index in range(len(YActual)):
Sum1 = Sum1 + (YActual[index]-YPredicted[index])**2
Sum2 = Sum2 + YActual[index]**2
return np.sqrt(Sum1/Sum2)
# Sum of Squared due to Error function
def SSE(YActual,YPredicted):
result = 0
for index in range(len(YActual)):
result = result + (YActual[index] - YPredicted[index])**2
return result
# RE function to compute error
def RE(YActual,YPredicted):
return np.abs((YPredicted-YActual)/YActual)*100
# Function to return one specific row for prediction
def getRow(X2D, y2D):
l = len(X2D)
row = X2D[l-1][1:]
row = np.append(row, y2D[l-1])
return row.reshape(1,-1)
def ANN(X_train, X_test, y_train, y_test):
regressor = Sequential()
regressor.add(Dense(input_dim=len(X_train[0]), output_dim=len(X_train[0])+1, kernel_initializer='uniform', activation='relu'))
regressor.add(Dense(output_dim=len(X_train[0])+1, kernel_initializer='uniform', activation='relu'))
regressor.add(Dense(output_dim=1, kernel_initializer='uniform',activation='sigmoid'))
regressor.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
regressor.fit(X_train, y_train, batch_size = 10, epochs = 100, verbose=0)
row = getRow(X_test, y_test)
result1 = regressor.predict(row)
y_pred = regressor.predict(X_test)
return result1, y_pred
def RBFNN(X_train, X_test, y_train, y_test):
model = Sequential()
rbflayer = RBFLayer(10, initializer=InitCentersRandom(X_train), betas=2.0, input_shape=(len(X_train[0]),))
model.add(rbflayer)
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer=RMSprop())
model.fit(X_train, y_train, batch_size=50, epochs=2000, verbose=0)
row = getRow(X_test, y_test)
result1 = model.predict(row)
y_pred = model.predict(X_test)
return result1, y_pred | [
"noreply@github.com"
] | noreply@github.com |
75ee3889c253cbc512df88f5e29584d472b6da47 | 8d66920fabc274a9bb1a9a9ce0e94c3f36a5d9fb | /cifar-10/draw_err.py | 3897a4106b512e75048553d533d552e26675d3a5 | [] | no_license | Matafight/dl_projects | 73686ba7b0908a57e25730e43c9eacbb3e4becec | ccffa5895429a5ab46119c53da5d437229331aca | refs/heads/master | 2021-01-01T04:28:09.008439 | 2020-01-05T07:38:42 | 2020-01-05T07:38:42 | 97,178,659 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,183 | py | #_*_coding:utf-8_*_
import numpy as np
import matplotlib.pyplot as plt
import os
'''
传入误差的数组,画出误差随着epoch变化的曲线,需要传入训练的超参数,比如learning_rate,weight_decay等,这部分用字典的形式传入
将曲线存入文件,文件名就命名为超参数,
应该将训练误差的曲线和验证误差的曲线都画在一个图上
err_arr 应该是个list,list的每个元素也是list
'''
class ErrorPlot():
def __init__(self,err_arr,**kwargs):
self.err_arr = err_arr
self.tar_dir = './err_plots'
if not os.path.exists(self.tar_dir):
os.mkdir(self.tar_dir)
self.plot_name=''
for i,key in enumerate(kwargs):
key_value_name = ('_' if i>0 else '')+str(key)+'-'+str(kwargs[key])
self.plot_name += key_value_name
def plot(self):
for err in self.err_arr:
plt.plot(range(len(err)),err)
plt.savefig(self.tar_dir+'/'+self.plot_name+'.png')
# plt.show()
if __name__=='__main__':
err_arr = [[1,2,3,4,5]]
para ={
'a':1,
'b':23
}
solu = ErrorPlot(err_arr,**para)
solu.plot()
| [
"guo_sc@foxmail.com"
] | guo_sc@foxmail.com |
521870642a3f641ed4a598bbca8c15723715c16e | a258ca222acc8249c8eee8ea990e4525126ae651 | /src/models/__init__.py | 09f96334b6a0651c5d4d41465bd8683c22361c4c | [
"MIT"
] | permissive | saeedranjbar12/mtlcfci | 25685aa0f1503d39cb47ab011fcb741b9cc7b752 | cc7a5d0a9b447df5e13d2a1f4caac93ddbfbd608 | refs/heads/master | 2020-05-20T18:37:55.610313 | 2019-05-15T03:13:13 | 2019-05-15T03:13:13 | 185,709,539 | 8 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,465 | py | #import torchvision.models as models
#(8*16)
from src.models.segment import *
from src.models.recon import *
from src.models.depth import *
from src.models.resnet import *
#
def get_model(name, n_classes, version=None):
model = _get_model_instance(name)
if name =='resnet' or name =='resnet_L':
model=model()
elif name == 'segment' or name == 'segment_L':
model = model(n_classes=n_classes,
is_batchnorm=True,
in_channels=3,
is_deconv=True)
elif name == 'reconstruct' or name == 'reconstruct_L':
model = model(n_classes=n_classes,
is_batchnorm=True,
in_channels=3,
is_deconv=True)
elif name =='depth' or name =='depth_L':
model = model(n_classes=n_classes,
is_batchnorm=True,
in_channels=3,
is_deconv=True)
else:
model = model(n_classes=n_classes)
return model
def _get_model_instance(name):
try:
return {
'segment':segment,
'resnet':resnet,
'reconstruct':recon,
'depth':depth,
#Larger resolution 16*32
'resnet_L':resnet_L,
'segment_L':segment_L,
'reconstruct_L':recon_L,
'depth_L':depth_L,
}[name]
except:
print('Model {} not available'.format(name))
| [
"saeedr@ensc-mmc-09.engineering.sfu.ca"
] | saeedr@ensc-mmc-09.engineering.sfu.ca |
86a3a8b7517688b3d439381f7baf7469c0eb82a9 | 9f2a231557a9aabc181ed388faaf2f0b3b59c530 | /Testcode/spellCheck.py | 5be1f5b0fa649c1d809ee849a078538109829c13 | [] | no_license | abhashjain/DIC_Project | 7e379cd5ef99d1fc31d414985e1b04388b475fe0 | 329f8da2f61e95410292a3062c68ed06845ec6ac | refs/heads/master | 2020-04-25T14:49:58.508999 | 2018-12-11T04:36:09 | 2018-12-11T04:36:09 | 172,855,829 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,310 | py | import os, time, re
os.getcwd()
startTime = time.time()
wordFile = open("..\\src\\words.txt","r")
words = wordFile.read()
print("Words in dictionary:",len(words))
inputDoc = open("..\\src\\guten.txt", "r", encoding="utf-8")
doc = inputDoc.read().split()
print("Words in file:",len(doc))
## Processing the input document
def is_number(x):
#checking if number is int or not
try:
int(x)
return True
except (TypeError, ValueError):
pass
return False
processedInput = list()
for word in doc:
if not is_number(word):
if not "@" in word:
if not "www." in word:
if len(re.sub('[^A-Za-z0-9]+', '', word)) > 1:
processedInput.append(re.sub('[^A-Za-z0-9]+', '', word))
misspelledWords = list()
i = 0
for word in processedInput:
# i += 1
# print(i, end=", ")
if word.lower() not in words:
misspelledWords.append(word)
print("Total misspelled words =",len(misspelledWords))
print("Total execution time = %s sec"%(time.time() - startTime))
with open("..//results//outputPython.txt", "w") as outFile:
for word in misspelledWords:
outFile.write(word)
outFile.write("\n")
print ("Incorrect words written to outputPython.txt")
| [
"nobody@ncsu.edu"
] | nobody@ncsu.edu |
d7e6fb902bb4d82e45d61c4cff79935749eb6882 | 60f75884ced267a5f0f09a0b43f68e7d8c5c7a14 | /tester/test_handlers/test_page_handler.py | 25b3ed3218c1b9b75b71fae6e6b25697c3bb7901 | [
"MIT"
] | permissive | liusheng6982/TorCMS | b0fa1fe96a814c10dc7163b127672e1076d19e02 | cb5ee651ece0cff28eae1dcde9013edf28387073 | refs/heads/master | 2021-01-19T04:31:54.221405 | 2017-04-04T11:41:50 | 2017-04-04T11:41:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 284 | py | # -*- coding:utf-8 -*-
from torcms.handlers.page_handler import PageHandler
from torcms.handlers.page_ajax_handler import PageAjaxHandler
def Test():
urls = [
("/label/(.*)", PageAjaxHandler, dict()),
("/label/(.*)", PageHandler, dict()),
]
assert urls
| [
"bukun@osgeo.cn"
] | bukun@osgeo.cn |
ff87449e16a1f7e668108b4e1bd2f3a38e5bf130 | c61e235a2a2edb9f7b62d979a0e8854c5d6ca685 | /Neflix/home.py | 4f9e8ad35d56ba7f239c27889dcaf1c0724d1fb3 | [] | no_license | RemiAderemi/Netflix-core | 4a9716ab903e92f25af67079997bbd5f26e57e29 | 48d7150b9e97ba54b6ff9cfd9b291036433baa2d | refs/heads/main | 2023-01-19T06:54:09.064170 | 2020-12-02T06:57:48 | 2020-12-02T06:57:48 | 317,776,155 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 578 | py | #!C:\Python\python.exe
#-*- coding: utf-8 -*-
# import sys
# # import codecs
# # sys.stdout = codecs.getwriter("iso-8859-1")(sys.stdout, 'xmlcharrefreplace')
# if sys.stdout.encoding != 'cp850':
# sys.stdout = codecs.getwriter('cp850')(sys.stdout.buffer, 'strict')
# if sys.stderr.encoding != 'cp850':
# sys.stderr = codecs.getwriter('cp850')(sys.stderr.buffer, 'strict')
# #chcp 65001
# #print(data.decode('utf-8').encode('cp850','replace').decode('cp850'))
print("Content-type: text/html\n\n")
import view
html=view.head() + view.home() + view.footer()
print(html) | [
"RemiAderemi@users.noreply.github.com"
] | RemiAderemi@users.noreply.github.com |
388180bdf84d4905e11b81f06d86e9b2badd641e | a01b406b806c1f81c2d7c52e910212a60ddb7c29 | /droga/droga/spiders/drogaraia.py | 2c5cd386616c1bded63b29fee0984a1dc08d7769 | [] | no_license | jonatasfleck/spider | 468163653dad89624db399cd39732f7c410915c1 | 49c3be6dd18346085716707f027610d055392743 | refs/heads/main | 2023-07-25T17:49:58.355284 | 2021-09-04T22:11:38 | 2021-09-04T22:11:38 | 403,165,095 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,119 | py | import scrapy
class DrogaraiaSpider(scrapy.Spider):
name = 'drogaraia'
start_urls = [f'https://www.drogaraia.com.br/medicamentos/remedios/genericos.html?p={i}' for i in range(1,8)]
#Limite de páginas especificados, pode se adicionar um next_page com o link para a próxima página, sendo necessário inserir o request para o start_urls e o callback para o parse.
def parse(self, response, **kwargs): # Foi utilizado o *Kwargs para nomear vários parâmetros.
for i in response.xpath('//div[@class="container"]'): #Div da lista de produtos, dentro desta DIV podemos encontrar link para o produto, nome do produto, old price e new price(ou special price), e etc.
link = i.xpath('./a/@href').get() #Parâmetro para a captura do link das ofertas.
name = i.xpath('./a/@title').get() #Parâmetro para a captura do nome das ofertas.
yield{ #Foi utilizado o Yield para coletar os dados pelo motivo de ele não armazenar os dados coletados na memória, como podemos capturar inúmeras páginas o uso dele nos ajuda a economizar memória.
'link' : link, # return do parse link
'name' : name # return do parse name
}
# Arquivo da captura foi salvo em 2 formatos(JSON e XML), pois o nome quando convertido para JSON estava me devolvendo sem acentos.
# Ajustes realizados no settings.py:
# 1. Foi definido o USER_AGENT para o site nos identificar como um navegador autêntico
# 2. Foi definido o AUTO_THROTLE do spider como TRUE, para obtermos um delay em cada requisição (este pode ser comentado em settings para termos requisições mais rápidas, com o risco de sermos bloqueados pelo site minerado).
# 3. Foi definido o ROBOTSTXT_OBEY como FALSE pois este estava bloqueando a captura da página.
#Futuramente será desenvolvida uma interface para facilitar a configuração da captura de cada página específica, com mais parâmetros de captura.
#Código desenvolvido por Jonatas Fleck, para realizar testes de captura de páginas.
| [
"noreply@github.com"
] | noreply@github.com |
dfbcb124abcd83eb7bbda2441d24fb4c31af8fa4 | b408856116e437406aa6981dd94cdb04a2fc962e | /ecgpointdetector.py | b5a5583e1241ab6865d98f1cce80da2aa600f73e | [] | no_license | huberste/ecgberry | d4b82eeefe505e4a72d84f98ff3df77dcfc843d5 | 0a9b15922147f54dedea4a45fc67fe72602cc19a | refs/heads/master | 2020-05-02T21:47:01.794520 | 2019-03-28T15:29:33 | 2019-03-28T15:29:33 | 178,231,635 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 32,982 | py | #!/usr/bin/env python3
""" Sources:
[Luz2016survey] Luz, Eduardo José da S., et al. "ECG-based heartbeat classification for arrhythmia detection: A survey." Computer methods and programs in biomedicine 127 (2016): 144-164.
[Li1995detection] Li, Cuiwei, Chongxun Zheng, and Changfeng Tai. "Detection of ECG characteristic points using wavelet transforms." IEEE Transactions on biomedical Engineering 42.1 (1995): 21-28.
"""
if __name__ == "__main__":
print("COMPUTER SAYS NO. (Don't rund this module directly!)")
import math
import time
import numpy as np # numpy
import pywt # pywavelets: https://pywavelets.readthedocs.io
import wfdb # WFDB: https://wfdb.readthedocs.io
A_4 = 0.5 # TODO this is a magic value, seen empirically
A_3 = A_4 / 2.0
A_2 = A_3 / 2.0
A_1 = A_2 / 2.0
DURATION = 2 # [seconds]
MM_DET_NEIGH_THRESH = 1.20 # Modulus Maximum Detection Neighborhood Threshold for R Peak detection 2) Step 2
NEIGHBORHOOD = 0.020 # [seconds] width for R-Peak detection algorithm
REDUNDANCY_NEIGHBORHOOD = 0.120 # [seconds] width for R-Peak detection algorithm
# TODO: find good ALPHAAP threshold...
ALPHAAP_THRESHOLD = 0.010 # [power]
INTERVAL_THRESHOLD = 0.120 # [seconds]
PT_THRESHOLD = 0.2 # [], * epsilon[3], empirically
BLANKING_PERIOD = 0.200 # [seconds]
MAX_BACKTRACK_INTERVAL = 1.50 # [seconds]
## ECG characteristics on healthy people
P_WAVE = 0.110 # [seconds], +/- 0.020, from [Luz2016survey]
PQ_INTERVAL = 0.160 + 0.040 # [seconds], +/- 0.040, from [Luz2016survey]
QRS_WIDTH = 0.100 # [seconds], +/- 0.020, from [Luz2016survey]
QT_INTERVAL = 0.400 + 0.040 # [seconds], +/- 0.040, from [Luz2016survey]
def is_extremum(vals):
if abs(vals[0]) > abs(vals[1]): # previous value is larger
return False # not a maximum
if abs(vals[2]) > abs(vals[1]): # following value is larger
return False # not a maximum
return True
def zero_crossing(x1, x2, y1, y2):
""" calculates the Nullschnittstelle of a given Gerade
Not needed in ecgpointdetector anymore.
"""
if x1 == x2:
print("[DEBUG] zero_crossing x1==x2... ecgpointfinder L51")
return x1
a = (y2 - y1) / (x2 - x1)
b = y1 - a * x1
result = -(b/a)
if math.isnan(result):
print("[ERROR] zero_crossing isnan ecgpointfinder L57")
result = -99
if math.isinf(result):
print("[ERROR] zero_crossing isinf ecgpointfinder L60")
result = -99
return result
class ECGPointDetector:
""" Detects characteristic ECG points. See [Li1995detection]
"""
def __init__(self, sample_rate, max_bpm=200, wavelet="haar"):
""" Initializes an ECGPointDetector
"""
self.sample_rate = sample_rate
self.wavelet = wavelet
self.max_bpm = max_bpm
self.reinit()
def reinit(self):
""" Initializes an ECGPointDetector
"""
self.As = [A_1, A_2, A_3, A_4] # Thresholds, see [Li1995detection]
self.epsilons = [0.3 * A_1, 0.3 * A_2, 0.3 * A_3, 0.3 * A_4] # Thresholds, see [Li1995detection]
self.alphaap_threshold = ALPHAAP_THRESHOLD
self.blanking_samples = int (BLANKING_PERIOD * self.sample_rate)
self.last_rr_interval = MAX_BACKTRACK_INTERVAL # [seconds]
self.signal = [0] * (DURATION * self.sample_rate) # list for signal
self.coeffs = [[0] * (DURATION * self.sample_rate), [0] * (DURATION * self.sample_rate), [0] * (DURATION * self.sample_rate), [0] * (DURATION * self.sample_rate)]
self.head = len(self.signal) # where does the data begin right now?
self.window = int((60.0 / self.max_bpm) * self.sample_rate)
self.last_R_peak = -self.last_rr_interval
def add_signal(self, signal):
""" adds given points to a signal and returns (new) characteristic points
"""
# TODO insert length check of signal here
points = len(signal)
swt = pywt.swt(data=signal,
wavelet=self.wavelet,
level=4,
start_level=0,
axis=-1)
levels = len(swt)
coeffs = [swt[levels-1][1],
swt[levels-2][1],
swt[levels-3][1],
swt[levels-4][1]]
# shift signal
self.signal[:-points] = self.signal[points:]
self.signal[-points:] = signal[:]
# shift coeffs
for scale in range(4):
self.coeffs[scale][:-points] = self.coeffs[scale][points:]
self.coeffs[scale][-points:] = coeffs[scale][:]
# set starting point for algorithm
self.last_R_peak -= points
self.head = max(0, self.head - points)
start = max(self.head, min(self.last_R_peak + self.blanking_samples, points - len(signal)))
# run algorithm
ps, qs, rs, ss, ts = self.find_characteristic_points(self.coeffs,start_sample=start)
return ps, qs, rs, ss, ts
def do_swt_and_find_characteristic_points(self, signal, sample_rate, max_bpm):
self.reinit()
swt = pywt.swt(data=signal,
wavelet=self.wavelet,
level=4,
start_level=0,
axis=-1)
levels = len(swt)
coeffs = [swt[levels-1][1],
swt[levels-2][1],
swt[levels-3][1],
swt[levels-4][1]]
ps, qs, rs, ss, ts = self.find_characteristic_points(coeffs=coeffs, start_sample=0)
return ps, qs, rs, ss, ts, coeffs
def find_characteristic_points(self, coeffs, start_sample=0):
""" R Peak detection algorithm from
"Detection of ECG Characteristic Points Using Eavelet Transforms" by
Cuiwei Li, Chongxun Zheng, and Changfeng Tai (1995)
Parameters:
coeffs: swt of the signal to be investigated
Returns:
ps, qs, rs, ss, ts
"""
## 1) selection of characteristic scales
## scale s=2^j, 0 < j < 4 (from paper)
## 2) Determination of Modulus Maximum Lines of R Waves:
## Modulus Maximum (MM): absolute values of all Maxima / Minima
## 2.1) find all the maxima larger than epsilon_4
num_samples = len(coeffs[0]) # should be equal to len(coeffs[0])
p_time_window = int(PQ_INTERVAL * self.sample_rate) # distance from P to Q in samples
t_time_window = int(QT_INTERVAL * self.sample_rate) # distance from Q to T in samples
start, end = start_sample, min(start_sample + self.window, num_samples) # normally we should get more than (window) samples, but you never know...
backtracked = False
last_backtrack_end = 0
n_ks = [[], [], [], []] # n_k^scale from the paper, i.e. the position of a MM in coeffs[scale]
r_peaks = [] # detected r_peaks
qrs_onsets = []
qrs_offsets = []
t_waves = [] # [(onset, high, offset), (onset, high, offset), ...]
p_waves = [] # [(onset, high, offset), (onset, high, offset), ...]
while start < num_samples: # window loop
n_ks_window = [[], [], [], []] # n_k^scale from the paper, i.e. the position of a MM in coeffs[scale]. Only for this window!
r_peaks_window = [] # in this window detected r_peaks
qrs_onsets_window = []
qrs_offsets_window = []
t_waves_window = []
p_waves_window = []
found_r_peak = False # we could just use len(r_peaks_found)
scale = 3 # begin with scale 2^4
for coeff_index in range(start, end): # find MMs for scale 3
epsilon = self.epsilons[scale] if not backtracked else self.epsilons[scale] * 0.5
if abs(coeffs[scale][coeff_index]) > epsilon: # greater than threshold
if coeff_index > 0 and coeff_index < num_samples - 1 and \
is_extremum(coeffs[scale][coeff_index-1:coeff_index+2]):
n_ks_window[scale].append(coeff_index)
## Now n_ks_window[3] is a list of all (local) extrema > threshold epsilons[3]
## 2.2) look at each position if there is a neighborhood maximum at j-1
neighborhood = int(NEIGHBORHOOD * self.sample_rate) # neighborhood in samples
for scale in range(3, 0, -1): # find corresponding MMs for lower scales
n_k_index, goal = 0, len(n_ks_window[scale]) # loop conditions
while n_k_index < goal: # for every modulus maximum in scale
n_k = n_ks_window[scale][n_k_index]
if n_k == 0: # DEBUG: should never end up here
print("[ERROR]", "This should not happen...")
n_ks_window[scale-1].append(0)
continue # skip this n_k, as it was invalidated earlier
locations = [] # locations of neighboring MMs in lower scale
epsilon = self.epsilons[scale] if not backtracked else self.epsilons[scale] * 0.5
## first add modulus maximum at same position (if MM)
if abs(coeffs[scale-1][n_k]) > epsilon: # above threshold
if is_extremum(coeffs[scale-1][n_k-1:n_k+2]):
if (coeffs[scale-1][n_k] > 0) == (coeffs[scale][n_k] > 0): # same signum - this step is *NOT* described in the paper!
locations.append(n_k)
## then add MMs of the neighborhood, with increasing distance
for i in range(1, neighborhood): # for "the neighborhood"
pos = n_k - i
if pos > 0:
if abs(coeffs[scale-1][pos]) > epsilon: # above threshold
if is_extremum(coeffs[scale-1][pos-1:pos+2]):
if (coeffs[scale-1][n_k-i] > 0) == (coeffs[scale][n_k] > 0): # same signum - this step is *NOT* described in the paper!
locations.append(pos)
pos = n_k + i
if pos < num_samples - 1:
if abs(coeffs[scale-1][pos]) > epsilon: # above threshold
if is_extremum(coeffs[scale-1][pos-1:pos+2]):
if (coeffs[scale-1][n_k+i] > 0) == (coeffs[scale][n_k] > 0): # same signum - this step is *NOT* described in the paper!
locations.append(pos)
toappend = 0
if len(locations) == 0: # no modulus maxima on lower scale were found
for i in range(scale, 4): # delete all higher scale n_ks_window
del n_ks_window[i][n_k_index]
goal -= 1
continue
elif len(locations) == 1: # exactly one modulus maximum
toappend = locations[0]
else: # more than one modulus maximum
## select largest one
vals = []
for location in locations:
vals.append(abs(coeffs[scale-1][location]))
maxindex = vals.index(max(vals))
## if largest one !> MM_DET_NEIGH_THRESH others: select nearest one (MM_DET_NEIGH_THRESH = 1.2 in paper)
for val in vals:
if val != vals[maxindex]:
if vals[maxindex] <= MM_DET_NEIGH_THRESH * val:
## select nearest value, conveniently first one in the array
maxindex = 0
break
toappend = locations[maxindex]
if toappend in n_ks_window[scale-1]: # value already in list
for i in range(scale, 4): # delete all higher scale n_ks_window
del n_ks_window[i][n_k_index]
goal -= 1
continue
else: # append value to n_ks_window
n_ks_window[scale-1].append(toappend)
n_k_index += 1
## eliminate MM lines where n_ks_window[0] == 0, i.e. no MM line at scale 0
i, goal = 0, len(n_ks_window[0]) # loop conditions
while i < goal: # clean MM lines where scale 0 is not a MM
if n_ks_window[0][i] == 0:
print("[ERROR] This should not happen")
for scale in range(4):
del n_ks_window[scale][i]
goal -= 1
continue
i += 1
## 3) Calculation of Singular Degree
def a(j, n_k): # see paper
result = abs(coeffs[j][n_k])
if result == 0:
result = 0.00001
return result
def alpha(j, n_k): # see paper
try:
return math.log2(a(j+1, n_k)) - math.log2(a(j, n_k))
except ValueError as ve:
print("[ERROR] exception...", ve)
print("[DEBUG] a(j+1, n_k)", a(j+1, n_k), "a(j, n_k)", a(j, n_k))
return 0
def alphaap(n_k): # alphaap = alpha apostrohpe = \alpha', see paper
return (alpha(0, n_k) + alpha(1, n_k)) / 2
## eliminate lines where alphaap < threshold.
## quote from the paper:
## "(...) if the \alpha' suddenly decreases or even becomes negative, the
## corresponding singularity point (...) will be eliminated"
i, goal = 0, len(n_ks_window[0]) # loop conditions
while i < goal: # delete MM line of all noise singularities
n_k = n_ks_window[0][i]
if alphaap(n_k) < self.alphaap_threshold: # Singularity probably is noise
for scale in range(4): # delete MM line on all scales
del n_ks_window[scale][i]
goal -= 1
continue
self.alphaap_threshold = 0.1 * alphaap(n_k) # "if the \alpha' suddenly decreases greatly (...) the corresponding singularity point must be noise"
i += 1
## 4) Elimination of Isolation and Redundant Modulus Maximum Lines:
## 4.1) "First, eliminiate isolation modulus maximum lines."
n_k_index, goal = 0, len(n_ks_window[0]) # loop conditions
while n_k_index < goal: # delete isolation MM lines
n_k = n_ks_window[0][n_k_index]
signum = (coeffs[0][n_k] > 0) # signum of this n_k
## find previous MM with different signum
previous_n_k_index = n_k_index - 1
previous_n_k = 0
while previous_n_k_index >= 0: # find previous MM with different signum
previous_n_k = n_ks_window[0][previous_n_k_index]
if (coeffs[0][previous_n_k] > 0) != signum: # different signums
break
previous_n_k_index -= 1 # continue loop
if previous_n_k_index >= 0: # compare with previous value
if n_k - previous_n_k < int(INTERVAL_THRESHOLD * self.sample_rate): # not isolation line
n_k_index += 1
continue
## find next MM with different signum
next_n_k_index = n_k_index + 1
next_n_k = 0
while next_n_k_index < goal: # find next MM with different signum
next_n_k = n_ks_window[0][next_n_k_index]
if (coeffs[0][next_n_k] > 0) != signum: # different signums
break
next_n_k_index += 1 # continue loop
if next_n_k_index < goal: # compare with next value
if next_n_k - n_k < int(INTERVAL_THRESHOLD * self.sample_rate): # not isolation line
n_k_index += 1
continue
## if we are here, this is an isolation line
for scale_index in range(4): # eliminate isolation MM line on all scales
del n_ks_window[scale_index][n_k_index]
goal -= 1
## 4.2) "Next, eliminate redundant modulus maximum lines."
n_k_index, goal = 0, len(n_ks_window[2]) # loop conditions
while n_k_index < goal: # remove redundant MM lines
n_k = n_ks_window[2][n_k_index]
signum = (coeffs[2][n_k] > 0)
neighborhood_mms = [] # (n_k, s_j(n), dist to n_k)
## gather earlier MMs in neighborhood
previous_n_k_index = n_k_index - 1 # loop condition
while previous_n_k_index >= 0: # gather previous MMs in neighborhood
previous_n_k = n_ks_window[2][previous_n_k_index]
if n_k - previous_n_k < int(REDUNDANCY_NEIGHBORHOOD * self.sample_rate): # previous_n_k is in the neighborhood of n_k
if (coeffs[2][previous_n_k] > 0) != signum: # other signum than n_k
neighborhood_mms.append((previous_n_k, coeffs[2][previous_n_k], n_k - previous_n_k))
previous_n_k_index -= 1 # loop continuation
else: # distance > neighborhood -> break
break
## gather later MMs
next_n_k_index = n_k_index + 1
while next_n_k_index < goal: # gather later MMs in neighborhood
next_n_k = n_ks_window[2][next_n_k_index]
if next_n_k - n_k < int(REDUNDANCY_NEIGHBORHOOD * self.sample_rate):
if (coeffs[2][next_n_k] > 0) != signum: # other signum than n_k
neighborhood_mms.append((next_n_k, coeffs[2][next_n_k], next_n_k - n_k))
next_n_k_index += 1 # loop continuation
else:
break # distance > neighborhood -> break
if len(neighborhood_mms) < 2: # only one neighbor --> no redundant neighbors to eliminate --> continue
n_k_index += 1 # loop continuation
continue
num_neighbors = len(neighborhood_mms) # loop condition
while num_neighbors > 1: # pairwise compare and eliminate neighbors
try:
neighbor1 = neighborhood_mms[0]
neighbor2 = neighborhood_mms[1]
delindex = 0
## "Rule 1: If A_1/L_1 > 1.2 A_2/L_2: Min2 is redundant."
if abs(neighbor1[1]) / abs(neighbor1[2]) > (abs(neighbor2[1]) / abs(neighbor2[2])) * 1.20:
delindex = n_ks_window[2].index(neighbor2[0])
del neighborhood_mms[1]
## "Rule 2: If A_2/L_2 > 1.2 A_1/L_1: Min1 is redundant."
elif abs(neighbor2[1]) / abs(neighbor2[2]) > (abs(neighbor1[1]) / abs(neighbor1[2])) * 1.20:
if neighbor1[0] in n_ks_window[2]:
delindex = n_ks_window[2].index(neighbor1[0])
else:
print("[ERROR] [DEBUG] damn 307", neighbor1[0], n_ks_window[2])
del neighborhood_mms[0]
else: # "Rule 3: Otherwise, "
if (neighbor1[0] < n_k) == (neighbor2[0] < n_k): # "both are on the same side of the positive maximum"
delindex = n_ks_window[2].index(neighbor2[0]) # "the minimum farther from the maximum is redundant"
del neighborhood_mms[1]
else: # "Min1 and Min2 are on different sides of the maximum"
if coeffs[2][n_k] > 0: # n_k is positive maximum
delindex = n_ks_window[2].index(neighbor2[0]) # "the minimum following the maximum is redundant"
else: # n_k is negative minimum
if neighbor1[0] in n_ks_window[2]:
delindex = n_ks_window[2].index(neighbor1[0]) # the maximum before the minimum is redundant
else:
print("[ERROR] [DEBUG] line 320", neighbor1[0], n_ks_window[2])
del neighborhood_mms[1]
for scale in range(4): # eliminate redundant MM line
del n_ks_window[scale][delindex]
num_neighbors -= 1 # inner loop stop condition
goal -= 1 # outer loop stop condition
except ValueError as ve:
print("[ERROR] [DEBUG] damn 327", ve)
## 5) Detection of the R Peak:
## "R peak can be located at a zero-crossing point of a positive maximum-negative minimum pair at scale 2^1."
n_k_index, goal = 0, len(n_ks_window[0])
while n_k_index < goal - 1: # find MM pairs
x1 = n_ks_window[0][n_k_index]
x2 = n_ks_window[0][n_k_index + 1]
## TODO / DEBUG this is not needed...
# find *real* minimum / maximum on coeffs:
# x1start = max(0, x1-2*neighborhood)
# x1end = min(num_samples, x1+2*neighborhood)
# if coeffs[0][x1] < 0:
# x1 = x1start + np.argmin(coeffs[0][x1start:x1end]).item()
# n_ks_window[0][n_k_index] = x1
# else:
# x1 = x1start + np.argmax(coeffs[0][x1start:x1end]).item()
# n_ks_window[0][n_k_index] = x1
# x2start = max(0, x2-2*neighborhood)
# x2end = min(num_samples, x2+2*neighborhood)
# if coeffs[0][x2] < 0:
# x2 = x2start + np.argmin(coeffs[0][x2start:x2end]).item()
# n_ks_window[0][n_k_index+1] = x2
# else:
# x2 = x2start + np.argmax(coeffs[0][x2start:x2end]).item()
# n_ks_window[0][n_k_index+1] = x2
y1 = coeffs[0][x1]
y2 = coeffs[0][x2]
# zero_point = int(zero_crossing(x1, x2, y1, y2)) + 2 # there seems to be some drift, thats why + 2
zero_point = int((x1+x2)/2)
if zero_point > 0 and zero_point < num_samples: # the values should be *inside* the numbers
if zero_point - self.last_R_peak > self.blanking_samples: # blanking
r_peaks_window.append(zero_point)
found_r_peak = True
self.last_R_peak = r_peaks_window[-1] # last R Peak
for scale in range(4): # update epsilon thresholds
max_abs = max(abs(coeffs[scale][n_ks_window[scale][-2]]), abs(coeffs[scale][n_ks_window[scale][-1]]))
if max_abs < 2.0 * self.As[scale]:
#A_before = As[scale]
self.As[scale] = 0.875 * self.As[scale] + 0.125 * max_abs
self.epsilons[scale] = 0.3 * self.As[scale]
## QRS onset and offset:
## For every R peak: Look for MM before and after the MM Line, track it to zero and this should be the On- and Offset
found_q, found_s = False, False
## look for MM before x1
while x1 > 0:
if (coeffs[1][x1] > 0) != (y1 > 0):
break
x1 -= 1
found_q = True
qrs_onsets_window.append(x1)
while x2 < num_samples:
if (coeffs[1][x2] > 0) != (y2 > 0):
break
x2 += 1
found_q = True
qrs_offsets_window.append(x2)
# for direction in [-1, +1]:
# distance = 0
# modmaxfound = False
# x, y = x1, y1
# if direction == +1:
# x, y = x2, y2
# while abs(distance) < 4*neighborhood:
# if modmaxfound and ((coeffs[0][x + distance] >= 0) == (y >= 0)): # same signum
# if direction == -1:
# found_q = True
# qrs_onsets_window.append(x + distance)
# else:
# found_s = True
# qrs_offsets_window.append(x + distance)
# break
# elif ((coeffs[0][x + distance] >= 0) != (y >= 0)) and is_extremum(coeffs[0][x+distance-1:x+distance+2]):# and (abs(coeffs[0][x + distance]) > 0.5 * epsilons[0]): # other signum
# modmaxfound = True
# distance += direction
# if x + distance < 1 or x+ distance >= num_samples-2:
# break
## T wave detection
scale = 3 # from [Luz2016survey]
offset = 7 * neighborhood # TODO make this a variable? Work with QT Interval?
t_start = max(0, self.last_R_peak + offset)
t_end = min(num_samples, self.last_R_peak + t_time_window)
if found_s:
t_start = max(0, qrs_offsets_window[-1] + offset)
t_end = min(num_samples, qrs_offsets_window[-1] + t_time_window)
if t_start < t_end:
negmin = t_start + np.argmin(coeffs[scale][t_start:t_end]).item()
posmax = t_start + np.argmax(coeffs[scale][t_start:t_end]).item()
x1, x2 = min(negmin, posmax), max(negmin, posmax)
y1 = coeffs[scale][x1]
y2 = coeffs[scale][x2]
threshold = PT_THRESHOLD * self.epsilons[scale] # Thresholding
if (abs(y1) > threshold) and (abs(y2) > threshold):
# zero_point = int(zero_crossing(x1, x2, y1, y2))
zero_point = int((x1 + x2) / 2)
while x1 > 0: # von x1 nach links zum nächsten MM oder 0
if (coeffs[scale][x1] > 0) != (y1 > 0):
break
x1 -= 1
while x2 < num_samples: # von x1 nach links zum nächsten MM oder 0
if (coeffs[scale][x2] > 0) != (y2 > 0):
break
x2 += 1
t_waves_window.append((x1, zero_point, x2))
## P wave detection
scale = 3 # from [Luz2016survey]
offset = 1 * neighborhood # TODO make this a variable?
p_start = max(0, self.last_R_peak - p_time_window)
p_end = min(num_samples, self.last_R_peak - offset)
if found_q:
p_start = max(0, qrs_onsets_window[-1] - p_time_window)
p_end = min(num_samples, qrs_onsets_window[-1] - offset)
if p_start < p_end:
negmin = p_start + np.argmin(coeffs[scale][p_start:p_end]).item()
posmax = p_start + np.argmax(coeffs[scale][p_start:p_end]).item()
x1, x2 = min(negmin, posmax), max(negmin, posmax)
y1 = coeffs[scale][x1]
y2 = coeffs[scale][x2]
threshold = PT_THRESHOLD * self.epsilons[scale] # Thresholding
if (abs(y1) > threshold) and (abs(y2) > threshold):
# zero_point = int(zero_crossing(x1, x2, y1, y2))
zero_point = int((x1 + x2) / 2)
while x1 > 0: # von x1 nach links zum nächsten MM oder 0
if (coeffs[scale][x1] > 0) != (y1 > 0):
break
x1 -= 1
while x2 < num_samples: # von x1 nach links zum nächsten MM oder 0
if (coeffs[scale][x2] > 0) != (y2 > 0):
break
x2 += 1
p_waves_window.append((x1, zero_point, x2))
else:
for scale in range(4): # delete blanked n_ks
del n_ks_window[scale][n_k_index] # yes, twice, we need to delete two values
del n_ks_window[scale][n_k_index] # yes, twice, we need to delete two values
goal -= 2
continue
n_k_index += 2 # loop continuation
## back in window loop
for i in range(4): # merge n_ks_window
for n_k in n_ks_window[i]:
if not n_k in n_ks[i]:
n_ks[i].append(n_k)
for peak in r_peaks_window: # merge R peaks
if not peak in r_peaks:
r_peaks.append(peak)
for onset in qrs_onsets_window:
if not onset in qrs_onsets:
qrs_onsets.append(onset)
for offset in qrs_offsets_window:
if not offset in qrs_offsets:
qrs_offsets.append(offset)
for p in p_waves_window: # merge P waves
if not p in p_waves:
p_waves.append(p)
for t in t_waves_window: # merge T waves
if not t in t_waves:
t_waves.append(t)
if found_r_peak: # found a new r_peak
start += self.window # window loop continuation
if len(r_peaks) > 1:
self.window = r_peaks[-1] - r_peaks[-2] # last RR interval
self.last_rr_interval = (r_peaks[-1] - r_peaks[-2]) / self.sample_rate
else:
self.window = max(self.window, r_peaks[-1]) # We have no idea about the last RR interval, so just take the window or distance from beginning of the signal
backtracked = False
else: # found no new R Peak...
if backtracked: # we already backtracked... there does *really* not seem to be a R peak in this window...
backtracked = False
last_backtrack_end = end
start += self.window
else: # we have not yet backtracked
if len(r_peaks) > 0: # if we have found any R peaks already
interval = self.last_rr_interval * self.sample_rate# in seconds
if end - r_peaks[-1] > int(1.5 * interval):
backtracked = True
# TODO probably the paper means something else in "backtracking", i.e. only scale3 comparision?
start = max(last_backtrack_end, r_peaks[-1] + self.blanking_samples)
else:
start += int(0.5 * self.window)
else: # we have not found a single R peak yet...
start += self.window
end = min(start + self.window, num_samples) # window loop continuation
## 6) (not in paper): find peak in signal
## TODO DEBUG THIS IS NOT NEEDED
# for peak_index in range(len(r_peaks)):
# i = r_peaks[peak_index]
# while abs(signal[i-1]) >= abs(signal[i]):
# i -= 1
# while abs(signal[i+1]) >= abs(signal[i]):
# i += 1
# r_peaks[peak_index] = i
return p_waves, qrs_onsets, r_peaks, qrs_offsets, t_waves
| [
"stefan.huber@stusta.de"
] | stefan.huber@stusta.de |
44d7b163937a1cc756b6f3918b58cb04e955dc93 | 04aacfdb9944e6d796671198835394e07db98ecf | /pythonz/commands/locate.py | 939aedb0faee04842dfa3a3a10a968e88396ce8c | [] | no_license | rmoorman/pythonz | ea86f302c70b67440c2829d4a0a9161d4a006ccc | 3d43172cae190284cf0b620aa28c0f794f770497 | refs/heads/master | 2021-01-12T19:51:39.057258 | 2014-10-16T07:20:06 | 2014-10-16T07:20:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,006 | py |
import os
from pythonz.commands import Command
from pythonz.define import PATH_PYTHONS
from pythonz.util import Package, is_installed
from pythonz.log import logger
class LocateCommand(Command):
name = "locate"
usage = "%prog [options] VERSION"
summary = "Locate the given version of python"
def __init__(self):
super(LocateCommand, self).__init__()
self.parser.add_option(
"-t", "--type",
dest="type",
default="cpython",
help="Type of Python version: cpython, stackless, pypy, pypy3 or jython."
)
def run_command(self, options, args):
if not args or len(args) > 1:
self.parser.print_help()
return
pkg = Package(args[0], options.type)
pkgname = pkg.name
if not is_installed(pkg):
logger.error("`%s` is not installed." % pkgname)
return
logger.log(os.path.join(PATH_PYTHONS, pkgname, 'bin', 'python'))
LocateCommand()
| [
"saghul@gmail.com"
] | saghul@gmail.com |
9119375210135d68a5d5056526b4f3f2dc6f5299 | 9f9cc09b5af8546377d28f7f7cbc02f2c471b20c | /tracker_app/admin.py | 9d89b57a4b78bb3da01da673720b9e7ebabe1246 | [] | no_license | WenlingDing/DJANGO | 207efa5e5c654d9296f5c66bb8f9bc558eb1a906 | 003a98a8b489368649471c8be0651c0e66a643d8 | refs/heads/master | 2021-06-19T03:43:15.363563 | 2019-06-03T08:01:14 | 2019-06-03T08:01:14 | 186,542,446 | 0 | 0 | null | 2021-06-10T21:27:44 | 2019-05-14T04:05:38 | Python | UTF-8 | Python | false | false | 228 | py | from django.contrib import admin
from .models import Status
from .models import Issue
from .models import Feature
# Register your models here.
admin.site.register(Status)
admin.site.register(Issue)
admin.site.register(Feature) | [
"dwlwendy18@gmail.com"
] | dwlwendy18@gmail.com |
205f72b2accf340094259c79b34ab663f053814d | c9e6cc2066f84a6e8dac9d236eb4f30c1b7e17a3 | /config/urls.py | 94d894fa1b4df8d0f4df1385bd5fcba349a3512c | [] | no_license | 5Dong-GA/pybo_mk2 | 1454484aaf79d4c5296d193dd70e82056ea0d9f4 | 7e010aab70570248aaefaf6082d01004e16df11a | refs/heads/master | 2023-08-19T19:29:52.311904 | 2021-11-01T07:14:39 | 2021-11-01T07:14:39 | 415,547,891 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 431 | py | from django.contrib import admin
from django.urls import path, include
from pybo.views import base_views
urlpatterns = [
path('admin/', admin.site.urls),
path('pybo/', include('pybo.urls')),
path('common/', include('common.urls')),
path('', base_views.index, name='index'), # '/' 에 해당되는 path
path('', include('social_django.urls', namespace='social')),
]
handler404 = 'common.views.page_not_found' | [
"gljhan123@gmail.com"
] | gljhan123@gmail.com |
6166eda0d8b5542780b07568f03f26945ebf8566 | 42699cce7bb4ccd616f4a96631527043164b931f | /Model.py | ef75fa4e4253ebfe5a4d244747b94e26d31519d9 | [] | no_license | Helgaodin/GraphQ | 2fcf80ee7570882f3ddc34ba389a698d240755a3 | 31f8e6fa5ef23c106c1dc39b16e1834011b4a37b | refs/heads/master | 2020-04-06T20:25:50.636313 | 2018-12-27T05:27:39 | 2018-12-27T05:27:39 | 157,772,120 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,993 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Nov 6 21:41:48 2018
@author: Olga
"""
import random as rn
import networkx as nx
import numpy as np
import math
import copy
import matplotlib.pyplot as plt
q = 0.1
N = 1024
p = 0.1
t = 0
Tmin=100
G = nx.erdos_renyi_graph(N, p)
MatAdj = nx.to_numpy_matrix(G)#ìàòðèöà ñìåæíîñòè 256x256
dura = np.asarray(MatAdj)
MatAdj = copy.deepcopy(dura)
fileName = 'result'+'.txt'
fileMatrixName = 'matrix'+'.txt'
k_all = nx.number_of_edges(G)
k_for_nodes = np.sum(MatAdj, axis=1)
upper_one=k_for_nodes+1
consti = np.sum(upper_one)
#k_dict=FillDictionary(k_for_nodes)
k_average = np.sum(k_for_nodes)/N
print(N/k_average)
# заполним словарь с валентнотсью вершин
def FillDictionary(k_for_nodes):
p={}
for i in range(N):
print(str(k_for_nodes[i]))
print(p.get(str(k_for_nodes[i])))
if (p.get(k_for_nodes[i])): #ключ уже есть
p[k_for_nodes[i]].append(i)
else:
p[k_for_nodes[i]]=[]
p[k_for_nodes[i]].append(i)
return p
def SwitchEdges(G, Adj, p):
K = G.edges()
noe = G.number_of_edges()
k_for_nodes = np.sum(MatAdj, axis=1)
p=FillDictionary(k_for_nodes)
random_edges = rn.randint(0,noe-1)
A = K[random_edges][0]
B = K[random_edges][1] # номера вершин
q_random=rn.random()
winner = -1
l_k=upper_one/consti
if (q_random > q):
#higher degree cut
winner=min(k_for_nodes[A],k_for_nodes[B])
else:
#lower degree cut
winner=max(k_for_nodes[A],k_for_nodes[B])
while True:
answer = np.random.choice(k_for_nodes, 1, p=l_k)
num=rn.randint(0,len(p[answer[0]])-1)
nodes_link=p[answer[0]][num]
#теперь линкуемся
if ((A != nodes_link) and (B != nodes_link) and (G.has_edge(winner,nodes_link)==False)):
break
Adj[A][B] = Adj[A][B]-1
Adj[B][A] = Adj[A][B]
G.remove_edge(A,B)
Adj[winner][nodes_link] = Adj[winner][nodes_link]+1
Adj[nodes_link][winner] = Adj[winner][nodes_link]
G.add_edge(winner, nodes_link)
return G, Adj, p
p=FillDictionary(k_for_nodes)
while(t<Tmin):
G, MAdj, p = SwitchEdges(G, MatAdj, p)
t=t+1
'''
MatAdj = copy.deepcopy(MAdj)
if (t%2000 == 0):
#print(t)
f = open(fileName, 'a')
text = str(t) + '\t' + str(Ntrip) +'\t'+ str(Nbw) + '\n'
f.write(text)
f.close()
f = open(fileMatrixName,'w')
for i in range (N):
for j in range (N):
f.write(str(MatAdj[i][j])+'\t')
f.write('\n')
f.close()
if(t%1000000==0):
fileMil = '2mu'+str(mu)+'step'+str(t)+'.txt'
f = open(fileMil,'w')
for i in range (N):
for j in range (N):
f.write(str(MatAdj[i][j])+'\t')
f.write('\n')
f.close()'''
| [
"noreply@github.com"
] | noreply@github.com |
529021e9253b54ea2760500cc89608996f682434 | 81d61f6e84873837cd3b437c0c24a4fceb38c755 | /Vec3.py | 6e2b6c5c2794c6690d092b0000fe728aeef6c77d | [] | no_license | A00885419/Emag_lab_4 | 2d13396418fd98dececa98d5dccf042ae0ed08bf | 7155ccecdabc3caef0a35754e00b8c7d4ce5110b | refs/heads/master | 2020-01-23T22:01:30.704804 | 2016-11-28T14:41:28 | 2016-11-28T14:41:28 | 74,711,986 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,924 | py | # Finite element analysis base for lab 4
import math
# general purpose 3d vector class based on my old 2d vector
class Vec3:
def __init__ (self, ix, iy, iz):
self.x = float(ix)
self.y = float(iy)
self.z = float(iz)
# vector operations
# Vector Addition (+)
def __add__ (self, vect):
return (Vec3(self.x + vect.x, self.y + vect.y, self.z + vect.z))
# Vector Dot/ multiplication Product (*)
def __mul__ (self, vect):
if(isinstance(vect, self.__class__)):
return float(self.x*vect.x+self.y*vect.y+self.z*vect.z)
elif(isinstance(vect,float) or isinstance(vect,int)): # multiplying a scalar
return Vec3(self.x*vect, self.y*vect, self.z*vect)
# Vector Subtraction (-)
def __sub__ (self, vect):
return (self + vect*(-1))
# Scalar division
def __truediv__ (self, scal):
return self*(1/scal)
# Vector Distance to (//) (Position vectors only)
def __floordiv__ (self, vect):
return math.sqrt((vect.x- self.x)**2 + (vect.y - self.y)**2 + (vect.z - self.z)**2)
# Vector Direction to (^)
def __xor__(self, vect):
return ((vect - self)/(self//vect))
def crossp(self, vect):
return Vec3((self.y*vect.z - self.z*vect.y),(self.z*vect.x - self.x*vect.z), (self.x*vect.y-self.y*vect.x))
# Cylindrical Conversion accessors
# Radial Component
def rho(self):
return(math.sqrt(self.x**2+self.y**2))
# Cylindrical angle
def phi(self):
return math.atan2(self.y, self.x)
# Spherical Conversion Accessors
#def r(self):
def r(self):
return(math.sqrt(self.x**2+self.y**2 + self.z**2))
#def theta(self):
def theta(self):
return(math.acos(self.z/self.r()))
def __str__ (self):
return "[" + str(self.x) + ", " + str(self.y) + ", "+ str(self.z) +"]"
| [
"peterlimail47@googlemail.com"
] | peterlimail47@googlemail.com |
66c30e0293b40b2dc060085c96b72090ca57f53a | ae8507d88da3660d756865dec2277d5b42c6eeda | /src/urls.py | 844cd6810ef0c538c2f72c9aaf0d99f9064d58f5 | [] | no_license | alexandrebini/welcometothedjango | 2d40d07ddd300a8a331adeac30c0fb6c1ca9f594 | e949449bee1caf09f5b275db690dce39000d517f | refs/heads/master | 2021-01-16T19:31:46.962963 | 2012-03-25T03:51:08 | 2012-03-25T03:51:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 790 | py | from django.conf.urls.defaults import patterns, include, url
from core.views import homepage
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
(r'^$', homepage),
(r'inscricao/', include('subscriptions.urls', namespace='subscriptions')),
# Examples:
# url(r'^$', 'src.views.home', name='home'),
# url(r'^src/', include('src.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
)
urlpatterns += staticfiles_urlpatterns() | [
"alexandrebini@gmail.com"
] | alexandrebini@gmail.com |
73a2a3d133057914d6e40ebbfbf584645ff54880 | 20a49e4bb2e4826a64ac337b21206afa83cb8e76 | /thehardway/ex14.py | b5b79da768442576f94958e317bc239296800866 | [] | no_license | DBeardionaire/learning-python | 6b8c668e123f5cf3626a2cc82e2957bb32a2db46 | bcc6d1dd603c2413033fd77268ef990a580d350c | refs/heads/master | 2020-05-29T08:48:55.346145 | 2015-07-22T20:34:06 | 2015-07-22T20:34:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 977 | py | # Import 'argv' module from 'sys'.
from sys import argv
# Unpack variables 'script' and 'user_name' from 'argv' module.
script, user_name, what_time = argv
# Create 'ask' variable and assign it a string value.
ask = '> '
# Begin asking questions using variables. User response assigns to variable 'likes'.
print "Hi %s, I'm the %s script. It is %s" % (user_name, script, what_time)
print "I'd like to ask you a few questions."
print "Do you like me %s?" % user_name
likes = raw_input(ask)
# Ask additional question. User input assigns to variable 'lives'.
print "Where do you live %s?" % user_name
lives = raw_input(ask)
# Ask additional question. User input assigns to variable 'computer'.
print "What kind of computer do you have?"
computer = raw_input(ask)
# Print summary using variables assigned by user input.
print """
Alright, so you said %r about liking me.
You live in %r. Not sure where that is.
And you have a %r computer. Nice.
""" % (likes, lives, computer) | [
"rand.seay@gmail.com"
] | rand.seay@gmail.com |
6f80e1959435946497fe0db550324d388644ee17 | 19f25a63f45e326c095ecc46e00527a0358b0200 | /help_dialogs/Reset_dialog.py | 2394921992625a4ca0b77ad043e57fd9c95d74ad | [] | no_license | vfurtula/Laserlab | 3b51f8d3c2db2b68803c5ece4aabd078948a3024 | e566febeb7b7006ba4f5f69ee2dffd9f6c89fc59 | refs/heads/master | 2022-10-16T03:57:04.860706 | 2020-06-08T09:09:29 | 2020-06-08T09:09:29 | 259,634,634 | 0 | 1 | null | 2020-05-06T10:38:17 | 2020-04-28T12:46:18 | Python | UTF-8 | Python | false | false | 4,712 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 12 09:06:01 2018
@author: Vedran Furtula
"""
import re, serial, time
from PyQt5.QtCore import QObject, QThreadPool, QTimer, QRunnable, pyqtSignal, pyqtSlot
from PyQt5.QtGui import QFont, QFrame
from PyQt5.QtWidgets import (QDialog, QMessageBox, QGridLayout, QLabel, QLineEdit, QComboBox, QHBoxLayout, QVBoxLayout, QPushButton, QFileDialog, QWidget)
class WorkerSignals(QObject):
# Create signals to be used
about = pyqtSignal(object)
critical = pyqtSignal(object)
warning = pyqtSignal(object)
info = pyqtSignal(object)
statusbyte = pyqtSignal(object)
finished = pyqtSignal()
class Reset_Thread(QRunnable):
"""
Worker thread
:param args: Arguments to make available to the run code
:param kwargs: Keywords arguments to make available to the run code
"""
def __init__(self,*argv):
super(Reset_Thread, self).__init__()
# constants
self.inst_list = argv[0]
self.signals = WorkerSignals()
@pyqtSlot()
def run(self):
try:
self.reset()
except Exception as e:
self.signals.warning.emit(str(e))
else:
pass
finally:
if self.inst_list.get("SMC100"):
val = self.inst_list.get("SMC100").return_ts(1)
self.signals.statusbyte.emit(val[-2:])
self.signals.finished.emit() # Done
def abort(self):
if self.inst_list.get("SMC100"):
self.inst_list.get("SMC100").abort()
def reset(self):
if self.inst_list.get("SMC100"):
val = self.inst_list.get("SMC100").return_ts(1)
self.signals.statusbyte.emit(val[-2:])
if val[-2:] not in ["32","33","34","35"]:
time.sleep(1)
self.signals.info.emit("go_home")
self.inst_list.get("SMC100").go_home(1)
class Reset_dialog(QDialog):
def __init__(self, parent, inst_list):
super().__init__(parent)
# constants
self.inst_list = inst_list
self.setupUi()
def setupUi(self):
self.stopButton = QPushButton("STOP RESET",self)
self.stopButton.setFixedHeight(35)
self.stopButton.setFixedWidth(150)
self.stopButton.setEnabled(False)
self.startButton = QPushButton("Start reset",self)
self.startButton.setFixedHeight(35)
self.startButton.setFixedWidth(150)
self.startButton.setEnabled(True)
lbl0 = QLabel("Statusbyte returned from the SMC100PP:\t", self)
self.lbl_st = QLabel("", self)
self.lbl_st.setStyleSheet("color: blue")
grid_0 = QHBoxLayout()
grid_0.addWidget(self.startButton)
grid_0.addWidget(self.stopButton)
grid_1 = QHBoxLayout()
grid_1.addWidget(lbl0)
grid_1.addWidget(self.lbl_st)
grid_2 = QVBoxLayout()
grid_2.addLayout(grid_0)
grid_2.addLayout(grid_1)
# cancel the script run
self.startButton.clicked.connect(self.start)
self.stopButton.clicked.connect(self.abort)
self.threadpool = QThreadPool()
self.setLayout(grid_2)
self.setWindowTitle("Reset dialog for SMC100PP stepper")
# re-adjust/minimize the size of the e-mail dialog
# depending on the number of attachments
grid_2.setSizeConstraint(grid_2.SetFixedSize)
def abort(self):
self.worker.abort()
def start(self):
reply = QMessageBox.question(self, 'Message', "The stepper will RESET and MOVE to the home position. Remove all components from the stepper head!", QMessageBox.Yes | QMessageBox.No, QMessageBox.No)
if reply == QMessageBox.Yes:
self.worker = Reset_Thread(self.inst_list)
self.worker.signals.finished.connect(self.finished)
self.worker.signals.info.connect(self.info)
self.worker.signals.statusbyte.connect(self.statusbyte)
# Execute
self.threadpool.start(self.worker)
#################################################
self.startButton.setEnabled(False)
self.startButton.setText("..reseting..")
def finished(self):
self.startButton.setEnabled(True)
self.startButton.setText("Start reset")
self.stopButton.setEnabled(False)
self.setWindowTitle("Reset dialog for SMC100PP stepper")
def statusbyte(self,sb):
self.lbl_st.setText(sb)
def info(self,mystr):
if mystr=="go_home":
self.setWindowTitle("homing the stepper, please wait!")
self.stopButton.setEnabled(True)
elif mystr=="reset":
self.setWindowTitle("reseting the stepper, please wait!")
self.stopButton.setEnabled(False)
else:
self.stopButton.setEnabled(False)
def about(self,mystr):
QMessageBox.about(self, 'Message',mystr)
def warning(self,mystr):
QMessageBox.warning(self, 'Message',mystr)
def critical(self,mystr):
QMessageBox.critical(self, 'Message',mystr)
def closeEvent(self,event):
event.accept()
| [
"vfurtula@localhost.localdomain"
] | vfurtula@localhost.localdomain |
cf3ef703f036d44f6267fe31e5becc1cc56eddef | ebe67a95eeeda2db098f8515d7128d9623dfffb6 | /run_scripts/freq_rebin.py | ad3de78bc70f62cc30f2b443622efaeaff3cf089 | [] | no_license | tcv/hibiscus | c9ec6fdf122cb98cc2e27a08d20a5a2b91a5d6bf | ac7a065c8f3bb36a7254726e51806ffa4ca11b36 | refs/heads/master | 2020-06-04T02:27:50.116743 | 2015-09-28T21:25:47 | 2015-09-28T21:25:47 | 11,481,892 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,642 | py | """
Module to rebin a single day of data
"""
import matplotlib
matplotlib.use('Agg')
from numpy import *
import pylab
import scipy.interpolate as itp
import numpy.ma as ma
import scipy.optimize as opt
import os
import skrf as rf
import sys
import matplotlib.pyplot as plt
sys.path.append(os.path.abspath('/home/tcv/hibiscus'))
import file_funcs as ff
#Main directories for the input and output
indir = '/lustre/tcv/truncated_data/'
outdir = '/lustre/tcv/freq_rebinned_data/'
Kdir = '/lustre/tcv/rfi_check_data/'
directories = os.listdir(indir)
#Setting a single day for parallel computation
date_ind = sys.argv[1]
#Setting Rebinning scales
timescale = 32
freqscale = 32
total_mask = 0.
mid_mask = 0.
sub_mid_mask = 0.
sub_tot_mask = 0.
total_sum = 0.
total_sub_sum = 0.
if int(date_ind)<15:
# Based upon the naming convention for the subdirectories in the raw data
direct = 'June'+date_ind+'_day_to_night'
print 'Directory being rebinned is:',direct
directory = indir+direct+'/'
new_directory = outdir+direct+'/'
#repping calibration data for use
short_data = loadtxt(Kdir+'June_'+date_ind+'_fit_short.txt')
short_full = loadtxt(Kdir+'June_'+date_ind+'_avg_short.txt')
dirlist = os.listdir(directory)
#Iterate for each file in the directory
for fname in dirlist:
if len(fname.split('-'))>=3:
if fname.split('-')[-1]!='cal.dat':
filename = directory+fname
#load data file
time,form,sub_data,mask,freq,volt,temp = ff.loadsingle(filename)
width = 90.0/len(sub_data)
freq = arange(40,130.0,width)
fmin = where(freq<=50.)[0][-1]
fmax = where(freq<=100.)[0][-1]
#basic freq flagging
mask = ff.flagging(sub_data,freq,3.,freqscale)
spike_mask = ff.spike_flag(sub_data,mask,freq,100.)
short_mask = ff.cal_flag(short_full,short_data,spike_mask,freq,1.e-10)
thresh_mask = ff.threshold_flag(sub_data,short_mask,freq,5.)
mid_mask = mid_mask+sum(mask)
sub_mid_mask = sub_mid_mask+sum(mask[fmin:fmax])
# for i in range(0,len(sub_data)):
# if spike_mask[i]==1.0:
# mask[i] = 1.0
# total_mask = total_mask+sum(spike_mask)
mask = thresh_mask
total_mask = total_mask+sum(mask)
sub_tot_mask = sub_tot_mask+sum(mask[fmin:fmax])
total_sum = total_sum +len(mask)
total_sub_sum = total_sub_sum+len(mask[fmin:fmax])
#freq rebinning
new_data,new_mask,new_freq = ff.rebin(sub_data,mask,freq,freqscale)
new_data = array(new_data)
#Check for nan/inf (should be nulls)
nandata = where(isnan(new_data))[0]
for i in range(0,len(nandata)):
new_data[nandata[i]] = 0.0
infdata = where(isinf(new_data))[0]
for i in range(0,len(infdata)):
new_data[infdata[i]] = 0.0
ff.writesingle(filename,new_directory,new_data,'')
ff.writesingle(filename,new_directory,new_mask,'_mask')
#print 'Percent of Data Flagged:',100.*total_mask/total_sum
print 'Percent of Data Flagged with basic flagger: ',100.*mid_mask/total_sum
print 'Percent of Data Flagged with additional flaggers: ',100*total_mask/total_sum
print 'Percent of Data between 50-100 MHz with basic flagger: ',100.*sub_mid_mask/total_sub_sum
print 'Percent of Data between 50-100 MHz with add flagger: ',100.*sub_tot_mask/total_sub_sum
| [
"tabitha.voytek@gmail.com"
] | tabitha.voytek@gmail.com |
82af4577975f944ba39e44d7f9a294e05163755e | eb9c3dac0dca0ecd184df14b1fda62e61cc8c7d7 | /google/cloud/dialogflow/cx/v3beta1/dialogflow-cx-v3beta1-py/google/cloud/dialogflowcx_v3beta1/types/intent.py | ab7d83589724b82269ef2d692eaaf2e0c46698b6 | [
"Apache-2.0"
] | permissive | Tryweirder/googleapis-gen | 2e5daf46574c3af3d448f1177eaebe809100c346 | 45d8e9377379f9d1d4e166e80415a8c1737f284d | refs/heads/master | 2023-04-05T06:30:04.726589 | 2021-04-13T23:35:20 | 2021-04-13T23:35:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,442 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.protobuf import field_mask_pb2 as field_mask # type: ignore
__protobuf__ = proto.module(
package='google.cloud.dialogflow.cx.v3beta1',
manifest={
'IntentView',
'Intent',
'ListIntentsRequest',
'ListIntentsResponse',
'GetIntentRequest',
'CreateIntentRequest',
'UpdateIntentRequest',
'DeleteIntentRequest',
},
)
class IntentView(proto.Enum):
r"""Represents the options for views of an intent.
An intent can be a sizable object. Therefore, we provide a
resource view that does not return training phrases in the
response.
"""
INTENT_VIEW_UNSPECIFIED = 0
INTENT_VIEW_PARTIAL = 1
INTENT_VIEW_FULL = 2
class Intent(proto.Message):
r"""An intent represents a user's intent to interact with a
conversational agent.
You can provide information for the Dialogflow API to use to
match user input to an intent by adding training phrases (i.e.,
examples of user input) to your intent.
Attributes:
name (str):
The unique identifier of the intent. Required for the
[Intents.UpdateIntent][google.cloud.dialogflow.cx.v3beta1.Intents.UpdateIntent]
method.
[Intents.CreateIntent][google.cloud.dialogflow.cx.v3beta1.Intents.CreateIntent]
populates the name automatically. Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>/intents/<Intent ID>``.
display_name (str):
Required. The human-readable name of the
intent, unique within the agent.
training_phrases (Sequence[google.cloud.dialogflowcx_v3beta1.types.Intent.TrainingPhrase]):
The collection of training phrases the agent
is trained on to identify the intent.
parameters (Sequence[google.cloud.dialogflowcx_v3beta1.types.Intent.Parameter]):
The collection of parameters associated with
the intent.
priority (int):
The priority of this intent. Higher numbers represent higher
priorities.
- If the supplied value is unspecified or 0, the service
translates the value to 500,000, which corresponds to the
``Normal`` priority in the console.
- If the supplied value is negative, the intent is ignored
in runtime detect intent requests.
is_fallback (bool):
Indicates whether this is a fallback intent.
Currently only default fallback intent is
allowed in the agent, which is added upon agent
creation.
Adding training phrases to fallback intent is
useful in the case of requests that are
mistakenly matched, since training phrases
assigned to fallback intents act as negative
examples that triggers no-match event.
labels (Sequence[google.cloud.dialogflowcx_v3beta1.types.Intent.LabelsEntry]):
The key/value metadata to label an intent. Labels can
contain lowercase letters, digits and the symbols '-' and
'_'. International characters are allowed, including letters
from unicase alphabets. Keys must start with a letter. Keys
and values can be no longer than 63 characters and no more
than 128 bytes.
Prefix "sys-" is reserved for Dialogflow defined labels.
Currently allowed Dialogflow defined labels include:
- sys-head
- sys-contextual The above labels do not require value.
"sys-head" means the intent is a head intent.
"sys-contextual" means the intent is a contextual intent.
description (str):
Human readable description for better
understanding an intent like its scope, content,
result etc. Maximum character limit: 140
characters.
"""
class TrainingPhrase(proto.Message):
r"""Represents an example that the agent is trained on to
identify the intent.
Attributes:
id (str):
Output only. The unique identifier of the
training phrase.
parts (Sequence[google.cloud.dialogflowcx_v3beta1.types.Intent.TrainingPhrase.Part]):
Required. The ordered list of training phrase parts. The
parts are concatenated in order to form the training phrase.
Note: The API does not automatically annotate training
phrases like the Dialogflow Console does.
Note: Do not forget to include whitespace at part
boundaries, so the training phrase is well formatted when
the parts are concatenated.
If the training phrase does not need to be annotated with
parameters, you just need a single part with only the
[Part.text][google.cloud.dialogflow.cx.v3beta1.Intent.TrainingPhrase.Part.text]
field set.
If you want to annotate the training phrase, you must create
multiple parts, where the fields of each part are populated
in one of two ways:
- ``Part.text`` is set to a part of the phrase that has no
parameters.
- ``Part.text`` is set to a part of the phrase that you
want to annotate, and the ``parameter_id`` field is set.
repeat_count (int):
Indicates how many times this example was
added to the intent.
"""
class Part(proto.Message):
r"""Represents a part of a training phrase.
Attributes:
text (str):
Required. The text for this part.
parameter_id (str):
The
[parameter][google.cloud.dialogflow.cx.v3beta1.Intent.Parameter]
used to annotate this part of the training phrase. This
field is required for annotated parts of the training
phrase.
"""
text = proto.Field(proto.STRING, number=1)
parameter_id = proto.Field(proto.STRING, number=2)
id = proto.Field(proto.STRING, number=1)
parts = proto.RepeatedField(proto.MESSAGE, number=2,
message='Intent.TrainingPhrase.Part',
)
repeat_count = proto.Field(proto.INT32, number=3)
class Parameter(proto.Message):
r"""Represents an intent parameter.
Attributes:
id (str):
Required. The unique identifier of the parameter. This field
is used by [training
phrases][google.cloud.dialogflow.cx.v3beta1.Intent.TrainingPhrase]
to annotate their
[parts][google.cloud.dialogflow.cx.v3beta1.Intent.TrainingPhrase.Part].
entity_type (str):
Required. The entity type of the parameter. Format:
``projects/-/locations/-/agents/-/entityTypes/<System Entity Type ID>``
for system entity types (for example,
``projects/-/locations/-/agents/-/entityTypes/sys.date``),
or
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>/entityTypes/<Entity Type ID>``
for developer entity types.
is_list (bool):
Indicates whether the parameter represents a
list of values.
redact (bool):
Indicates whether the parameter content should be redacted
in log. If redaction is enabled, the parameter content will
be replaced by parameter name during logging. Note: the
parameter content is subject to redaction if either
parameter level redaction or [entity type level
redaction][google.cloud.dialogflow.cx.v3beta1.EntityType.redact]
is enabled.
"""
id = proto.Field(proto.STRING, number=1)
entity_type = proto.Field(proto.STRING, number=2)
is_list = proto.Field(proto.BOOL, number=3)
redact = proto.Field(proto.BOOL, number=4)
name = proto.Field(proto.STRING, number=1)
display_name = proto.Field(proto.STRING, number=2)
training_phrases = proto.RepeatedField(proto.MESSAGE, number=3,
message=TrainingPhrase,
)
parameters = proto.RepeatedField(proto.MESSAGE, number=4,
message=Parameter,
)
priority = proto.Field(proto.INT32, number=5)
is_fallback = proto.Field(proto.BOOL, number=6)
labels = proto.MapField(proto.STRING, proto.STRING, number=7)
description = proto.Field(proto.STRING, number=8)
class ListIntentsRequest(proto.Message):
r"""The request message for
[Intents.ListIntents][google.cloud.dialogflow.cx.v3beta1.Intents.ListIntents].
Attributes:
parent (str):
Required. The agent to list all intents for. Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>``.
language_code (str):
The language to list intents for. The following fields are
language dependent:
- ``Intent.training_phrases.parts.text``
If not specified, the agent's default language is used.
`Many
languages <https://cloud.google.com/dialogflow/cx/docs/reference/language>`__
are supported. Note: languages must be enabled in the agent
before they can be used.
intent_view (google.cloud.dialogflowcx_v3beta1.types.IntentView):
The resource view to apply to the returned
intent.
page_size (int):
The maximum number of items to return in a
single page. By default 100 and at most 1000.
page_token (str):
The next_page_token value returned from a previous list
request.
"""
parent = proto.Field(proto.STRING, number=1)
language_code = proto.Field(proto.STRING, number=2)
intent_view = proto.Field(proto.ENUM, number=5,
enum='IntentView',
)
page_size = proto.Field(proto.INT32, number=3)
page_token = proto.Field(proto.STRING, number=4)
class ListIntentsResponse(proto.Message):
r"""The response message for
[Intents.ListIntents][google.cloud.dialogflow.cx.v3beta1.Intents.ListIntents].
Attributes:
intents (Sequence[google.cloud.dialogflowcx_v3beta1.types.Intent]):
The list of intents. There will be a maximum number of items
returned based on the page_size field in the request.
next_page_token (str):
Token to retrieve the next page of results,
or empty if there are no more results in the
list.
"""
@property
def raw_page(self):
return self
intents = proto.RepeatedField(proto.MESSAGE, number=1,
message='Intent',
)
next_page_token = proto.Field(proto.STRING, number=2)
class GetIntentRequest(proto.Message):
r"""The request message for
[Intents.GetIntent][google.cloud.dialogflow.cx.v3beta1.Intents.GetIntent].
Attributes:
name (str):
Required. The name of the intent. Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>/intents/<Intent ID>``.
language_code (str):
The language to retrieve the intent for. The following
fields are language dependent:
- ``Intent.training_phrases.parts.text``
If not specified, the agent's default language is used.
`Many
languages <https://cloud.google.com/dialogflow/cx/docs/reference/language>`__
are supported. Note: languages must be enabled in the agent
before they can be used.
"""
name = proto.Field(proto.STRING, number=1)
language_code = proto.Field(proto.STRING, number=2)
class CreateIntentRequest(proto.Message):
r"""The request message for
[Intents.CreateIntent][google.cloud.dialogflow.cx.v3beta1.Intents.CreateIntent].
Attributes:
parent (str):
Required. The agent to create an intent for. Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>``.
intent (google.cloud.dialogflowcx_v3beta1.types.Intent):
Required. The intent to create.
language_code (str):
The language of the following fields in ``intent``:
- ``Intent.training_phrases.parts.text``
If not specified, the agent's default language is used.
`Many
languages <https://cloud.google.com/dialogflow/cx/docs/reference/language>`__
are supported. Note: languages must be enabled in the agent
before they can be used.
"""
parent = proto.Field(proto.STRING, number=1)
intent = proto.Field(proto.MESSAGE, number=2,
message='Intent',
)
language_code = proto.Field(proto.STRING, number=3)
class UpdateIntentRequest(proto.Message):
r"""The request message for
[Intents.UpdateIntent][google.cloud.dialogflow.cx.v3beta1.Intents.UpdateIntent].
Attributes:
intent (google.cloud.dialogflowcx_v3beta1.types.Intent):
Required. The intent to update.
language_code (str):
The language of the following fields in ``intent``:
- ``Intent.training_phrases.parts.text``
If not specified, the agent's default language is used.
`Many
languages <https://cloud.google.com/dialogflow/cx/docs/reference/language>`__
are supported. Note: languages must be enabled in the agent
before they can be used.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
The mask to control which fields get updated.
If the mask is not present, all fields will be
updated.
"""
intent = proto.Field(proto.MESSAGE, number=1,
message='Intent',
)
language_code = proto.Field(proto.STRING, number=2)
update_mask = proto.Field(proto.MESSAGE, number=3,
message=field_mask.FieldMask,
)
class DeleteIntentRequest(proto.Message):
r"""The request message for
[Intents.DeleteIntent][google.cloud.dialogflow.cx.v3beta1.Intents.DeleteIntent].
Attributes:
name (str):
Required. The name of the intent to delete. Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>/intents/<Intent ID>``.
"""
name = proto.Field(proto.STRING, number=1)
__all__ = tuple(sorted(__protobuf__.manifest))
| [
"bazel-bot-development[bot]@users.noreply.github.com"
] | bazel-bot-development[bot]@users.noreply.github.com |
88cd1d6fe0ac5132e4fca4498c26a79a50729ebf | a2b372fc903509ffeb413470dd83ec17a8d5c104 | /pytorch-vqa/train.py | 3298d7eae26fa95a7d4202c8b32e7405dd7b955e | [] | no_license | jasonkrone/multitask_text_to_image_synthesis | 605d1652bd9a0e429345d90ba6c4d90b2bd6a29d | 3f2dc0eea48909b7c1f295ee1d91445d4c8a3272 | refs/heads/master | 2020-04-10T18:19:20.055553 | 2018-12-10T16:18:03 | 2018-12-10T16:18:03 | 161,200,427 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,952 | py | import sys
import os.path
import math
import json
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
import torch.backends.cudnn as cudnn
from tqdm import tqdm
import config
import data
import model
import utils
def update_learning_rate(optimizer, iteration):
lr = config.initial_lr * 0.5**(float(iteration) / config.lr_halflife)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
total_iterations = 0
def run(net, loader, optimizer, tracker, train=False, prefix='', epoch=0):
""" Run an epoch over the given loader """
if train:
net.train()
tracker_class, tracker_params = tracker.MovingMeanMonitor, {'momentum': 0.99}
else:
net.eval()
tracker_class, tracker_params = tracker.MeanMonitor, {}
answ = []
idxs = []
accs = []
tq = tqdm(loader, desc='{} E{:03d}'.format(prefix, epoch), ncols=0)
loss_tracker = tracker.track('{}_loss'.format(prefix), tracker_class(**tracker_params))
acc_tracker = tracker.track('{}_acc'.format(prefix), tracker_class(**tracker_params))
log_softmax = nn.LogSoftmax().cuda()
for v, q, a, idx, q_len in tq:
var_params = {
'volatile': not train,
'requires_grad': False,
}
v = Variable(v.cuda(async=True), **var_params)
q = Variable(q.cuda(async=True), **var_params)
a = Variable(a.cuda(async=True), **var_params)
q_len = Variable(q_len.cuda(async=True), **var_params)
out = net(v, q, q_len)
nll = -log_softmax(out)
loss = (nll * a / 10).sum(dim=1).mean()
acc = utils.batch_accuracy(out.data, a.data).cpu()
if train:
global total_iterations
update_learning_rate(optimizer, total_iterations)
optimizer.zero_grad()
loss.backward()
optimizer.step()
total_iterations += 1
else:
# store information about evaluation of this minibatch
_, answer = out.data.cpu().max(dim=1)
answ.append(answer.view(-1))
accs.append(acc.view(-1))
idxs.append(idx.view(-1).clone())
loss_tracker.append(loss.data.item())
acc_tracker.append(acc.mean())
fmt = '{:.4f}'.format
tq.set_postfix(loss=fmt(loss_tracker.mean.value), acc=fmt(acc_tracker.mean.value))
if not train:
answ = list(torch.cat(answ, dim=0))
accs = list(torch.cat(accs, dim=0))
idxs = list(torch.cat(idxs, dim=0))
return answ, accs, idxs
def main():
if len(sys.argv) > 1:
name = ' '.join(sys.argv[1:])
else:
from datetime import datetime
name = datetime.now().strftime("%Y-%m-%d_%H:%M:%S")
target_name = os.path.join('logs', '{}.pth'.format(name))
print('will save to {}'.format(target_name))
cudnn.benchmark = True
train_loader = data.get_loader(train=True)
val_loader = data.get_loader(val=True)
net = nn.DataParallel(model.Net(train_loader.dataset.num_tokens)).cuda()
optimizer = optim.Adam([p for p in net.parameters() if p.requires_grad])
tracker = utils.Tracker()
config_as_dict = {k: v for k, v in vars(config).items() if not k.startswith('__')}
for i in range(config.epochs):
_ = run(net, train_loader, optimizer, tracker, train=True, prefix='train', epoch=i)
r = run(net, val_loader, optimizer, tracker, train=False, prefix='val', epoch=i)
results = {
'name': name,
'tracker': tracker.to_dict(),
'config': config_as_dict,
'weights': net.state_dict(),
'eval': {
'answers': r[0],
'accuracies': r[1],
'idx': r[2],
},
'vocab': train_loader.dataset.vocab,
}
torch.save(results, target_name)
if __name__ == '__main__':
main()
| [
"jasonkrone@me.com"
] | jasonkrone@me.com |
2d5cbd0ca506b979956c7608f7463c2e6627cfdb | b5e25da52997ee9ca1aa445b0b6c04deabd6cc58 | /models/models.py | 5c26c1f5d0522d3099b02de70540363acd321c35 | [] | no_license | mom988/account_move_line_field_extend | b8d01d3b63c227b31346f37c0499a9167eacde8d | 229723e9584233a08923ee4b19359c6680dec716 | refs/heads/master | 2023-03-22T10:49:11.004381 | 2021-03-05T02:49:40 | 2021-03-05T02:49:40 | 344,652,771 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 715 | py | # -*- coding: utf-8 -*-
from odoo import models, fields, api
# class rsh_account(models.Model):
# _name = 'rsh_account.rsh_account'
# _description = 'rsh_account.rsh_account'
# name = fields.Char()
# value = fields.Integer()
# value2 = fields.Float(compute="_value_pc", store=True)
# description = fields.Text()
#
# @api.depends('value')
# def _value_pc(self):
# for record in self:
# record.value2 = float(record.value) / 100
class AccountMoveLine(models.Model):
_inherit = 'account.move.line'
product_categ = fields.Many2one('product.category', related='product_id.categ_id', readonly=True, store=True, string='Category', help="Product Category")
| [
"263737@qq.com"
] | 263737@qq.com |
14da85392dbd40646b05f92897a8547d3296d30b | 61f7cfe81333fab839f4b040e6a93e7f3bf534c9 | /src/arch/arm/fastmodel/arm_fast_model.py | d666b7febfaeb866361a3f53efcc6bf70a083e12 | [
"BSD-3-Clause",
"LicenseRef-scancode-proprietary-license",
"LGPL-2.0-or-later",
"MIT"
] | permissive | Linestro/gem5_Y | f49e01835a5a4bb0bd77a47f63aed0f9f3304495 | 01fe12d28300948e4d93942d98a712ef84024675 | refs/heads/master | 2022-12-09T22:15:10.902028 | 2019-11-11T21:22:50 | 2019-11-11T21:22:50 | 211,427,864 | 0 | 1 | BSD-3-Clause | 2022-11-29T22:43:07 | 2019-09-28T01:42:11 | C++ | UTF-8 | Python | false | false | 4,827 | py | # Copyright 2019 Google, Inc.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
import os
from m5.defines import buildEnv
import _m5.arm_fast_model
def set_armlmd_license_file(force=False):
'''Set the ARMLMD_LICENSE_FILE environment variable. If "force" is
False, then it will only be set if it wasn't already set in the
environment. The value it's set to is the one gem5 was built with.
'''
key = 'ARMLMD_LICENSE_FILE'
license_file = buildEnv[key]
if force or key not in os.environ:
os.environ[key] = license_file
# These methods wrap much of the SystemC Export API described in section
# 7.6 of the Fast Models User Guide.
def scx_initialize(id):
# Change our working directory to where the simulation engine library
# is so that the fast model code can find it. It looks in the binary
# directory and in the current working directory, and we can change
# the later. This avoids having to make copies of that library all
# over the place.
cwd = os.getcwd()
os.chdir(os.path.join(buildEnv['PVLIB_HOME'], 'lib',
buildEnv['PVLIB_FLAVOR']))
# Actually run scx_initialize.
_m5.arm_fast_model.scx_initialize(id)
# Restore the previous working directory.
os.chdir(cwd)
def scx_load_application(instance, application):
_m5.arm_fast_model.scx_load_application(instance, application)
def scx_load_application_all(application):
_m5.arm_fast_model.scx_load_application_all(application)
def scx_load_data(instance, data, address):
_m5.arm_fast_model.scx_load_data(instance, data, address)
def scx_load_data_all(data, address):
_m5.arm_fast_model.scx_load_data_all(data, address)
def scx_set_parameter(name, value):
_m5.arm_fast_model.scx_set_parameter(name, value)
def scx_get_parameter(name):
value = ""
_m5.arm_fast_model.scx_get_parameter(name, value)
return value
def scx_get_parameter_list(self):
return _m5.arm_fast_model.scx_get_parameter_list()
def scx_set_cpi_file(cpi_file_path):
_m5.arm_fast_model.scx_set_cpi_file(cpi_file_path)
def scx_cpulimit(t):
_m5.arm_fast_model.scx_cpulimit(t)
def scx_timelimit(t):
_m5.arm_fast_model.scx_timelimit(t)
def scx_simlimit(t):
_m5.arm_fast_model.scx_simlimit(t)
def scx_parse_and_configure(
self, argc, argv, trailer=None, sig_handler=True):
_m5.arm_fast_model.scx_parse_and_configure(
argc, argv, trailer, sig_handler)
def scx_start_cadi_server(start=True, run=True, debug=False):
_m5.arm_fast_model.scx_start_cadi_server(start, run, debug)
def scx_enable_cadi_log(log=True):
_m5.arm_fast_model.scx_enable_cadi_log(log)
def scx_prefix_appli_output(prefix=True):
_m5.arm_fast_model.scx_prefix_appli_output(prefix)
def scx_print_port_number(print_=True):
_m5.arm_fast_model.scx_print_port_number(print_)
def scx_print_statistics(print_=True):
_m5.arm_fast_model.scx_print_statistics(print_)
def scx_load_plugin(file_):
_m5.arm_fast_model.scx_load_plugin(file_)
def scx_sync(sync_time):
_m5.arm_fast_model.scx_sync(sync_time)
def scx_set_min_sync_latency(latency):
_m5.arm_fast_model.scx_set_min_sync_latency(latency)
def scx_get_min_sync_latency(arg=None):
if arg:
return _m5.arm_fast_model.scx_get_min_sync_latency(arg)
else:
return _m5.arm_fast_model.scx_get_min_sync_latency()
| [
"yehaojie@umich.edu"
] | yehaojie@umich.edu |
2fda8681fcb9a539894d1abbdd84a4370d179703 | ac098a873daa972878ba9d63a92737214b807811 | /RosNode.py | f2f86b9c26da49eff2c0708925ecdb3417056b71 | [] | no_license | hudongrui/2018SU_Frank_Zihan | a7490240b640255add05961f72cf4735ee24d45c | 220a5d935a2415a02188092136841830759a6556 | refs/heads/master | 2020-03-21T20:47:48.359795 | 2019-09-27T15:50:24 | 2019-09-27T15:50:24 | 139,028,651 | 2 | 1 | null | 2019-09-27T15:50:25 | 2018-06-28T14:22:33 | Python | UTF-8 | Python | false | false | 300 | py | import rospy
from std_msgs.msg import Int8 as int
rospy.init_node("blah")
def publisher():
pub = rospy.Publisher('samplePublisher', int)
rate = rospy.Rate(10.0)
while not rospy.is_shutdown():
# print '.'
pub.publish()
rate.sleep()
break
| [
"hudongrui_01@outlook.com"
] | hudongrui_01@outlook.com |
ca45f84ad6024b57da8fc2431a5dd592c2bd062b | a6c712cce838b903d221e5d9fba85b531d767a2d | /model_selection/nlp/w2v/doc2vec.py | ec24852dc4933907f7d57f6bccb3185773228734 | [] | no_license | austinkk/competition_lib | 878c3e5f63a399931e8e313e212f8750c850bbe0 | 79a816947240872d896b13f7fe703df784bf9144 | refs/heads/master | 2021-08-09T01:52:10.991600 | 2020-04-10T07:14:03 | 2020-04-10T07:14:03 | 151,812,178 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 10,794 | py | # -*- coding: utf-8 -*-
from __future__ import print_function
import collections
import math
import numpy as np
import os
import random
import tensorflow as tf
import zipfile
# from matplotlib import pylab as plt
from sklearn.manifold import TSNE
import pickle
import gc
class CBOWConfig(object):
"""CBOW params"""
filename = './data/en_corpus'
vocabulary_size = 50000
batch_size = 128
embedding_size = 128 # Dimension of the embedding vector.
half_window_size = 1 # How many words to consider left and right.
# We pick a random validation set to sample nearest neighbors. here we limit the
# validation samples to the words that have a low numeric ID, which by
# construction are also the most frequent.
valid_size = 16 # Random set of words to evaluate similarity on.
valid_window = 100 # Only pick dev samples in the head of the distribution.
valid_examples = np.array(random.sample(range(valid_window), valid_size))
num_sampled = 64 # Number of negative examples to sample.
num_steps = 100001
get_w2v = False
w2v_filename = './tf_%s_size-%s_windows-%s_vocabulary-%s.w2v' % ('CBOW', embedding_size, half_window_size * 2 + 1, vocabulary_size)
doc_filename = './tf_%s_size-%s_windows-%s_vocabulary-%s.doc2v' % ('CBOW', embedding_size, half_window_size * 2 + 1, vocabulary_size)
class CBOW_withdoc(object):
"""CBOW"""
def __init__(self, config):
self.data_index = 0
self.config = config
def read_data(self, filename, is_zipfile = False):
"""Extract the first file enclosed in a zip file as a list of words. warning: cannot remove ',.'"""
data = []
doc_data = []
if is_zipfile:
with zipfile.ZipFile(self.config.filename) as f:
pass
#data = tf.compat.as_str(f.read(f.namelist()[0])).split()
else:
cnt = 0
with open(self.config.filename) as f:
#data = tf.compat.as_str(f.read()).split()
for line in f:
line = line.strip()
words = line.split(' ')
tmp = [cnt] * len(words)
doc_data.extend(tmp)
data.extend(words)
cnt = cnt + 1
return data, doc_data
def build_dataset(self, words):
count = [['UNK', -1]]
tongji = collections.Counter(words)
print ('Word types %d' % len(tongji))
print ('Vocabulary size %d' % self.config.vocabulary_size)
count.extend(tongji.most_common(self.config.vocabulary_size - 1))
dictionary = dict()
for word, _ in count:
dictionary[word] = len(dictionary)
data = list()
unk_count = 0
for word in words:
if word in dictionary:
index = dictionary[word]
else:
index = 0 # dictionary['UNK']
unk_count = unk_count + 1
data.append(index)
count[0][1] = unk_count
reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
return data, count, dictionary, reverse_dictionary
def generate_batch(self, data, doc_data, batch_size, half_window_size):
batch = np.ndarray(shape=(batch_size, 2*half_window_size), dtype=np.int32)
doc_id = np.ndarray(shape=(batch_size,), dtype = np.int32)
labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)
len_data = len(data)
for i in range(batch_size):
index = self.data_index
labels[i] = data[(index+half_window_size)%len_data]
doc_id[i] = doc_data[(index+half_window_size)%len_data]
for k in range(2*half_window_size+1):
if k != half_window_size:
t = (k if k < half_window_size else k-1)
batch[i, t] = data[(index+k)%len_data]
self.data_index = (self.data_index + 1) % len_data
return batch, doc_id, labels
def cbow(self):
self.graph = tf.Graph()
with self.graph.as_default():
self.tf_train_dataset = tf.placeholder(tf.int32, shape=(self.config.batch_size, 2*self.config.half_window_size))
self.tf_doc_dataset = tf.placeholder(tf.int32, shape =[self.config.batch_size])
self.tf_train_labels = tf.placeholder(tf.int32, shape=(self.config.batch_size, 1))
tf_valid_dataset = tf.constant(self.config.valid_examples, dtype=tf.int32)
embeddings = tf.Variable(tf.random_uniform(shape=(self.config.vocabulary_size, self.config.embedding_size), minval=-1.0, maxval=1.0))
doc_embeddings = tf.Variable(tf.random_uniform([self.len_docs, self.config.embedding_size],-1.0,1.0))
softmax_weights = tf.Variable(tf.truncated_normal(shape=(self.config.vocabulary_size, self.config.embedding_size), stddev=1.0 / math.sqrt(self.config.embedding_size)))
softmax_biases = tf.constant(np.zeros(shape=(self.config.vocabulary_size), dtype=np.float32))
embed = tf.nn.embedding_lookup(embeddings, self.tf_train_dataset)
docs_embed = tf.nn.embedding_lookup(doc_embeddings, self.tf_doc_dataset)
inputs = tf.reduce_mean(embed, 1)
inputs = (inputs + docs_embed) / 2
self.loss = tf.reduce_mean(
tf.nn.sampled_softmax_loss(
softmax_weights, softmax_biases, self.tf_train_labels, inputs,self.config.num_sampled, self.config.vocabulary_size
)
)
self.optimizer = tf.train.AdagradOptimizer(1.0).minimize(self.loss)
valid_embed = tf.nn.embedding_lookup(embeddings, tf_valid_dataset)
self.similarity = tf.matmul(valid_embed, tf.transpose(softmax_weights)) + softmax_biases
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
self.normalized_embeddings = embeddings / norm
norm_ = tf.sqrt(tf.reduce_sum(tf.square(softmax_weights), 1, keep_dims=True))
normalized_softmax_weights = softmax_weights / norm_
norm_ = tf.sqrt(tf.reduce_sum(tf.square(normalized_softmax_weights+self.normalized_embeddings), 1, keep_dims=True))
self.normalized_embeddings_2 = (normalized_softmax_weights+self.normalized_embeddings) / 2.0 / norm_
doc_norm = tf.sqrt(tf.reduce_sum(tf.square(doc_embeddings), 1, keep_dims=True))
self.normalized_doc_embeddings = doc_embeddings / doc_norm
def train(self, get_doc_vec = False):
words, doc_words = self.read_data(self.config.filename)
self.len_docs = max(doc_words) + 1
print ('Data size %d' % len(words))
data, count, dictionary, reverse_dictionary = self.build_dataset(words)
print ('Most common words (+UNK)', count[:5])
print ('Sample data', data[:10])
del words # Hint to reduce memory.
gc.collect()
for half_window_size in [1, 2]:
data_index = 0
batch, doc_id, labels = self.generate_batch(data, doc_words, 8, half_window_size)
print ('\nwith half_window_size = %d:' % (half_window_size))
print (' batch:', [[reverse_dictionary[b] for b in bi] for bi in batch])
print (' doc_id:', [item for item in doc_id])
print (' labels:', [reverse_dictionary[li] for li in labels.reshape(8)])
self.cbow()
with tf.Session(graph=self.graph) as session:
if int(tf.VERSION.split('.')[1]) > 11:
tf.global_variables_initializer().run()
else:
tf.initialize_all_variables().run()
print ('Initialized')
average_loss = 0.0
for step in range(self.config.num_steps):
train_batch, doc_id, train_labels = self.generate_batch(data, doc_words, self.config.batch_size, self.config.half_window_size)
feed_dict = {self.tf_train_dataset: train_batch, self.tf_doc_dataset: doc_id, self.tf_train_labels: train_labels}
l, _ = session.run([self.loss, self.optimizer], feed_dict=feed_dict)
average_loss += l
if step % 2000 == 0:
if step > 0:
average_loss /= 2000.0
print ('Average loss at step %d: %f' % (step, average_loss))
average_loss = 0
if step % 10000 == 0:
sim = self.similarity.eval()
for i in range(self.config.valid_size):
valid_word = reverse_dictionary[self.config.valid_examples[i]]
top_k = 8 # number of nearest neighbors
nearest = (-sim[i, :]).argsort()[1:top_k+1] # let alone itself, so begin with 1
log = 'Nearest to %s:' % valid_word
for k in range(top_k):
close_word = reverse_dictionary[nearest[k]]
log = '%s %s,' % (log, close_word)
print(log)
if self.config.get_w2v:
final_embeddings = self.normalized_embeddings.eval()
final_embeddings_2 = self.normalized_embeddings_2.eval() # this is better
final_doc_embeddings = self.normalized_doc_embeddings.eval()
if self.config.get_w2v:
with open(self.config.w2v_filename, 'w') as f:
f.write('%s %s\n' % (self.config.vocabulary_size, self.config.embedding_size))
for i in range(self.config.vocabulary_size):
l = [str(item) for item in list(final_embeddings_2[i])]
f.write('%s %s\n' % (reverse_dictionary[i], ' '.join(l)))
with open(self.config.doc_filename, 'w') as f:
f.write('%s %s\n' % (self.len_docs, self.config.embedding_size))
for i in range(self.len_docs):
l = [str(item) for item in list(final_doc_embeddings[i])]
f.write('%s %s\n' % (i, ' '.join(l)))
"""
num_points = 400
tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000)
two_d_embeddings = tsne.fit_transform(final_embeddings[1:num_points+1, :])
two_d_embeddings_2 = tsne.fit_transform(final_embeddings_2[1:num_points+1, :])
with open('2d_embedding_cbow.pkl', 'wb') as f:
pickle.dump([two_d_embeddings, two_d_embeddings_2, reverse_dictionary], f)
"""
if __name__ == '__main__':
config = CBOWConfig()
cbow_model = CBOW_withdoc(config)
cbow_model.train(True)
| [
"austinkk@sina.cn"
] | austinkk@sina.cn |
efe1227c94da541154a41caa2ddbf5eddd02211b | 6371acdb640e62e4e6addac2ba1aa70002a8c1b1 | /Algorithms/pySINDy/pySINDy/sindybase.py | b8faf881ff1a71bfc9d1b60c2251129f08263c46 | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | M-Vause/SEED | 263307152ebac1e4f49cd81dcd5207ecbdf51139 | cda94a02a5ef47a1e9a885d330eef2821301ebed | refs/heads/master | 2022-12-13T20:11:58.893994 | 2020-04-27T16:10:09 | 2020-04-27T16:10:09 | 252,790,026 | 3 | 3 | MIT | 2022-12-08T01:52:05 | 2020-04-03T16:55:10 | Jupyter Notebook | UTF-8 | Python | false | false | 18,773 | py | """
Base Module for SINDy: 'fit' method must be implemented in inherited classes
"""
import numpy as np
import matplotlib.pyplot as plt
class SINDyBase(object):
"""
Sparse Identification of Nonlinear Dynamics base class
"""
def __init__(self, name='SINDy model'):
self.name = name
self._coef = None
self._desp = None
@property
def coefficients(self):
"""
:return: get the coefficients of the model
"""
return self._coef
@property
def descriptions(self):
"""
:return: get the items we need to fit the data
"""
return self._desp
@property
def plot_coefficients(self):
"""
:return: plot of the coefficients
"""
SINDyBase.plot(self._coef.T, self._desp)
def fit(self, data):
"""
Abstract method to fit the snapshot matrix, it has to be
implemented in subclasses
:param data: the snapshot matrix
:return: None
"""
raise NotImplementedError('Subclass must implement abstract method {}.fit'.format(
self.__class__.__name__))
@staticmethod
def finite_difference(data, _dx, order=1, dim=0):
"""
Take derivative using 2nd order finite difference method
:param data: a tensor to be differentiated
:param _dx: grid spacing, assume to be uniform
:param order: the order of the derivative to be applied
:param dim: the dimension to be taken the derivative
:return: a tensor after differentiation
"""
data = np.squeeze(data)
if dim >= data.ndim:
raise ValueError('The selected dim should be less than #of dimensions of data!')
data_shape = data.shape
_n = data_shape[dim]
idxs = [slice(None)]*len(data_shape)
data_dx = np.zeros(data_shape)
if order == 1:
for i in np.arange(1, _n-1):
idxs[dim] = i
data_dx[idxs] = (np.take(data, i+1, dim) - np.take(data, i-1, dim))/(2*_dx)
idxs[dim] = 0
data_dx[idxs] = (-3.0/2*np.take(data, 0, dim) + 2*np.take(data, 1, dim) -
np.take(data, 2, dim)/2)/_dx
idxs[dim] = _n - 1
data_dx[idxs] = (3.0/2*np.take(data, _n-1, dim) - 2*np.take(data, _n-2, dim) +
np.take(data, _n-3, dim)/2)/_dx
elif order == 2:
for i in np.arange(1, _n-1):
idxs[dim] = i
data_dx[idxs] = (np.take(data, i+1, dim) - 2*np.take(data, i, dim) +
np.take(data, i-1, dim))/_dx**2
idxs[dim] = 0
data_dx[idxs] = (2*np.take(data, 0, dim) - 5*np.take(data, 1, dim) +
4*np.take(data, 2, dim) - np.take(data, 3, dim))/_dx**2
idxs[dim] = _n - 1
data_dx[idxs] = (2*np.take(data, _n-1, dim) - 5*np.take(data, _n-2, dim) +
4*np.take(data, _n-3, dim) - np.take(data, _n-4, dim))/_dx**2
elif order == 3:
for i in np.arange(2, _n-2):
idxs[dim] = i
data_dx[idxs] = (np.take(data, i+2, dim)/2 - np.take(data, i+1, dim) +
np.take(data, i-1, dim) - np.take(data, i-2, dim)/2)/_dx**3
idxs[dim] = 0
data_dx[idxs] = (-2.5*np.take(data, 0, dim) + 9*np.take(data, 1, dim) -
12*np.take(data, 2, dim) + 7*np.take(data, 3, dim) -
1.5*np.take(data, 4, dim))/_dx**3
idxs[dim] = 1
data_dx[idxs] = (-2.5*np.take(data, 1, dim) + 9*np.take(data, 2, dim) -
12*np.take(data, 3, dim) + 7*np.take(data, 4, dim) -
1.5*np.take(data, 5, dim))/_dx**3
idxs[dim] = _n - 1
data_dx[idxs] = (2.5 * np.take(data, _n-1, dim) - 9 * np.take(data, _n-2, dim) +
12 * np.take(data, _n-3, dim) - 7 * np.take(data, _n-4, dim) +
1.5 * np.take(data, _n-5, dim)) /_dx**3
idxs[dim] = _n - 2
data_dx[idxs] = (2.5*np.take(data, _n-2, dim) - 9*np.take(data, _n-3, dim) +
12*np.take(data, _n-4, dim) - 7*np.take(data, _n-5, dim) +
1.5*np.take(data, _n-6, dim))/_dx**3
elif order > 3:
return SINDyBase.finite_difference(SINDyBase.finite_difference(data, _dx, 3, dim),
_dx, order-3, dim)
else:
raise ValueError('order of the derivative should be a positive integer!')
return data_dx
@staticmethod
def pointwise_polynomial_difference(data, xgrid, order=1, degree=2, index=None):
"""
:param data: a 1D flattened vector represents nearby function values
:param xgrid: grid information
:param order: the order of the derivatives to be applied
:param index: index of the derivative to take
:param degree: degree of polynomial to use
:return: value of derivative at this point
"""
if isinstance(order, int):
order = [order]
data = data.flatten()
_n = len(data)
if index is None:
index = int((_n - 1)/2)
# Fit to a Chebyshev polynomial
poly = np.polynomial.chebyshev.Chebyshev.fit(xgrid, data, degree)
return np.array([poly.deriv(m=order[i])(xgrid[index]) for i in np.arange(len(order))])
@staticmethod
def polynomial_difference(data, xgrid, order=1, dim=0, degree=2):
"""
Taking derivatives using Chebyshev polynomial interpolation
:param data: a tensor to be differentiated
:param xgrid: grid information
:param order: an integer, or a list of orders of the derivative to be applied
:param dim: the dimension to be taken the derivative
:param degree: degree of polynomials to be used for interpolation
:return: a list of tensors after differentiation, same length of order
"""
data = np.squeeze(data)
if dim >= data.ndim:
raise ValueError('The selected dim should be less than #of dimensions of data!')
if dim < 0:
dim = data.ndim + dim
if isinstance(order, int):
order = [order]
data_shape = data.shape
_n = data_shape[dim]
idxs = [slice(None)]*len(data_shape)
new_data_shape = list(data_shape)
data_slice_shape = list(data_shape)
new_data_shape[dim] -= 2*degree
data_slice_shape[dim] = 1
data_dx = [np.zeros(tuple(new_data_shape))]*len(order)
if _n != len(xgrid):
raise ValueError('Grids information does not match with the data!')
for j in np.arange(degree, _n - degree):
pts = np.arange(j - degree, j + degree)
idxs[dim] = slice(j - degree, j + degree)
pos = (dim, ) + tuple(np.arange(0, dim)) + tuple(np.arange(dim+1, data.ndim))
batch_data = np.transpose(data[idxs], pos).reshape((2*degree, -1))
data_dx_tmp = np.zeros((1, batch_data.shape[1], len(order)))
for k in np.arange(batch_data.shape[1]):
deriv = SINDyBase.pointwise_polynomial_difference(batch_data[:, k].flatten(),
xgrid[pts], order=order,
degree=degree)
data_dx_tmp[0, k, :] = deriv
for i in np.arange(len(order)):
idxs[dim] = j - degree
data_dx[i][idxs] = np.squeeze(data_dx_tmp[..., i].reshape(tuple(data_slice_shape)))
if len(order) == 1:
return data_dx[0]
return data_dx
@staticmethod
def get_poly_exponents(nfeat, degree=1):
"""
:param nfeat: number of original features
:param degree: maximum degree of the polynomials
:return: a 2D array consists of the exponents
"""
if nfeat == 0:
yield ()
else:
for _x in np.arange(degree+1):
for _t in SINDyBase.get_poly_exponents(nfeat - 1, degree):
if sum(_t) + _x <= degree:
yield _t + (_x,)
@staticmethod
def get_ordered_poly_exponents(nfeat, degree=1, remove_zero_order=False):
"""
:param nfeat: number of original features
:param degree: maximum degree of the polynomials
:param remove_zero_order: boolean value, indicate whether to remove the zero order term
:return: a 2D array consists of ordered exponents according to the sum
"""
exponents = np.array(list(SINDyBase.get_poly_exponents(nfeat, degree)))
all_exponents = exponents[np.argsort(np.sum(exponents, axis=1))]
if remove_zero_order:
return all_exponents[1:, :]
return all_exponents
@staticmethod
def polynomial_expansion(data, degree=1, remove_zero_order=False, var_names=None):
"""
:param data: a 2D numpy array of original features stored in each column
:param degree: degree of polynomials of features to be expanded
:param remove_zero_order: boolean value, indicate whether to remove the zero order term
:param var_names: variable names, default as None
:return: a tensor consists of extended features, and corresponding descriptions
"""
if len(data.shape) == 1:
data = data[:, np.newaxis]
if len(data.shape) > 2:
raise ValueError("The input array is not 2D!")
# extended features
nfeat = data.shape[-1]
exponents = SINDyBase.get_ordered_poly_exponents(nfeat, degree, remove_zero_order)
result = np.array([np.prod([data[:, k] ** e[k] for k in np.arange(nfeat)],
axis=0) for e in exponents]).T
# descriptions of each extended feature
desp = SINDyBase.exponent_to_description(exponents, 'sup', remove_zero_order,
var_names=var_names)
return result, desp
@staticmethod
def threshold_ls(mtx, _b, cut_off=1e-3, max_iter=10, normalize=0):
"""
Find the sparse coefficients of fit using threshold least squares
:param mtx: the training theta matrix of shape (M, N)
:param _b: a vector or an array of shape (M,) or (M, K)
:param cut_off: the threshold cutoff value
:param max_iter: # of iterations
:param normalize: normalization methods, default as 0 (no normalization)
:return: coefficients of fit
"""
if len(_b.shape) == 1:
_b = _b[:, np.newaxis]
dim = _b.shape[-1]
# normalize each column of mtx
if normalize != 0:
w_col_norms = np.linalg.norm(mtx, ord=normalize, axis=0)
b_col_norms = np.linalg.norm(_b, ord=normalize, axis=0)
mtx = mtx / w_col_norms[np.newaxis, :]
_b = _b / b_col_norms[np.newaxis, :]
_w = np.linalg.lstsq(mtx, _b, rcond=None)[0]
for _ in np.arange(max_iter):
small_inds = np.abs(_w) <= cut_off
_w[small_inds] = 0
if np.all(np.sum(np.abs(_w), axis=0)):
for ind in np.arange(dim):
big_inds = ~small_inds[:, ind]
_w[big_inds, ind] = np.linalg.lstsq(mtx[:, big_inds], _b[:, ind], rcond=None)[0]
else:
break
if normalize != 0:
_w = _w * w_col_norms[:, np.newaxis]
_w = _w / b_col_norms[np.newaxis, :]
return _w
@staticmethod
def sparsify_dynamics(mtx, _b, init_tol, max_iter=25, thresh_iter=10,
l0_penalty=None, split=0.8, normalize=0):
"""
:param mtx: the theta matrix of shape (M, N)
:param _b: a vector or an array of shape (M,) or (M, K)
:param init_tol: maximum tolerance (cut_off value)
:param max_iter: maximum iteration of the outer loop
:param thresh_iter: maximum iteration for threshold least squares
:param l0_penalty: penalty factor for nonzero coefficients
:param split: proportion of the training set
:param normalize: normalization methods, default as 0 (no normalization)
:return: the best coefficients of fit
"""
if mtx.ndim != 2:
raise ValueError('mtx is not a 2D numpy array!')
if _b.ndim == 1:
_b = _b[:, np.newaxis]
elif _b.ndim > 2:
raise ValueError('b is not a 1D/2D numpy array!')
# split the data
np.random.seed(12345)
_n = mtx.shape[0]
train = np.random.choice(_n, int(_n*split), replace=False)
test = [x for x in np.arange(_n) if x not in train]
train_mtx = mtx[train, :]
test_mtx = mtx[test, :]
train_b = _b[train, :]
test_b = _b[test, :]
# set up initial tolerance, l0 penalty, best error, etc.
if l0_penalty is None:
# l0_penalty = 0.001*np.linalg.cond(mtx)
l0_penalty = np.linalg.norm(test_b) / len(test)
tol = d_tol = float(init_tol)
# no sparsity constraints
w_best = np.linalg.lstsq(train_mtx, train_b, rcond=None)[0]
err_best = np.linalg.norm(test_b - test_mtx.dot(w_best), 2) + \
l0_penalty*np.count_nonzero(w_best)
tol_best = 0.
imp_flag = True
for i in np.arange(max_iter):
_w = SINDyBase.threshold_ls(train_mtx, train_b, tol, thresh_iter, normalize)
err = np.linalg.norm(test_b - test_mtx.dot(_w), 2) + l0_penalty*np.count_nonzero(_w)
if err < err_best:
err_best = err
w_best = _w
tol_best = tol
tol += d_tol
imp_flag = False
else:
# tol = max([0, tol - d_tol])
tol = max([0, tol - 2*d_tol])
# d_tol /= 2
d_tol = 2 * d_tol/(max_iter - i)
tol = tol + d_tol
if imp_flag:
print('cutoff value maybe too small/large to threshold ....')
return w_best, tol_best
@staticmethod
def exponent_to_description(exponents, typ='sup', remove_zero_order=False, as_dict=False,
var_names=None):
"""
:param exponents: a 2D numpy array of exponents
:param typ: a string, can be either 'sup' (superscript) or 'sub' (subscript)
:param remove_zero_order: boolean value, indicate whether to remove the zero order term
:param as_dict: whether to include exponents in the descriptions as a dict
:param var_names: variable name, default to be None
:return: a list or a dict (depends on 'as_dict') of descriptions of corresponding exponents
"""
if not isinstance(exponents, np.ndarray) or exponents.ndim != 2:
raise ValueError("exponents must be a 2D numpy array!")
desp = []
desp_dict = {}
_m, _n = exponents.shape
if typ == 'sup':
if var_names is not None:
assert isinstance(var_names, list), "var_names must be a list of strings when " \
"typ =='sup'!"
assert len(var_names) == _n, "length of var_names doesn't match with exponents!"
else:
var_names = ['u%d' % i for i in np.arange(_n)]
for i in np.arange(_m):
if np.any(exponents[i, :]):
# exist nonzero element
key = ''
for j in np.arange(_n):
if exponents[i, j] == 1:
key += var_names[j]
elif exponents[i, j]:
key += (var_names[j] + '^{%d}' % exponents[i, j])
desp.append(key)
desp_dict[key] = exponents[i, :].tolist()
elif not remove_zero_order:
key = '1'
desp.append(key)
desp_dict[key] = exponents[i, :].tolist()
elif typ == 'sub':
# name of each dimension
# (with xyz coordinates as default except for higher dimensional cases)
if var_names is not None:
assert isinstance(var_names, str), "var_names must be of type str when " \
"typ == 'sub'!"
else:
var_names = 'u'
if _n == 1:
dim_strs = ['x']
elif _n == 2:
dim_strs = ['x', 'y']
elif _n == 3:
dim_strs = ['x', 'y', 'z']
else:
dim_strs = ['x%d' % i for i in np.arange(_n)]
for i in np.arange(_m):
if np.any(exponents[i, :]):
# exist nonzero element
key = (var_names + '_{')
for j in np.arange(_n):
key += dim_strs[j]*exponents[i, j]
key += '}'
desp.append(key)
desp_dict[key] = exponents[i, :].tolist()
elif not remove_zero_order:
key = 'u'
desp.append(key)
desp_dict[key] = exponents[i, :].tolist()
else:
raise ValueError("type argument should be either 'sub' or 'sup'!")
# which type of description to return
if as_dict:
return desp_dict
return desp
@staticmethod
def plot(coe, desp):
"""
:param coe: coefficients to be plotted
:param desp: descriptions of data
:return: a plot of coefficients with corresponding description
"""
idx = np.ones((coe.shape), dtype=bool)
_mm, _nn = coe.shape
for i in range(_nn):
vec = coe[:, i]
if np.all(vec == 0):
idx[:, i] = 0
_coe = coe[idx].reshape(_mm, -1)
_desp = []
for i in range(_nn):
if idx[0, i] == 1:
_desp.append(desp[i])
_m, _n = _coe.shape
width = 1 / 1.5
plt.figure(num=None, figsize=(40, 5), dpi=80, facecolor='w', edgecolor='k')
for i in range(_m):
plt.subplot(_m, _m, _m * i + 1)
plt.bar(range(_n), _coe[i], width)
plt.ylabel('value')
plt.xticks(range(_n), _desp)
| [
"58262117+M-Vause@users.noreply.github.com"
] | 58262117+M-Vause@users.noreply.github.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.