index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
998,700 | b4954fb541d37afec720c46c6379e47dd576a595 | # Copyright (c) 2014 Metaswitch Networks
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import binascii
import logging
import zmq
import time
from threading import Thread, Lock
import json
log = logging.getLogger(__name__)
MANAGER_ACLGET_PORT = "9905"
MANAGER_ACLPUB_PORT = "9906"
class ACLPublisher(object):
"""
Implements the Calico ACL API.
Responsible for transmitting ACL information from an ACL Store to Felixes.
The ACL Publisher owns the ZeroMQ sockets that transport the API.
"""
def __init__(self, context, acl_store, local_address):
log.debug("Creating ACL Publisher")
self.acl_store = acl_store
# Create REP socket, used to receive ACL state requests from Felix.
log.debug("Creating Publisher REP socket")
self.router_socket = context.socket(zmq.ROUTER)
self.router_socket.bind("tcp://%s:%s" %
(local_address, MANAGER_ACLGET_PORT))
# Create PUB socket, used to publish ACL updates to Felix.
log.debug("Creating Publisher PUB socket")
self.pub_socket = context.socket(zmq.PUB)
self.pub_socket.bind("tcp://%s:%s" %
(local_address, MANAGER_ACLPUB_PORT))
# Create a lock to protect the PUB socket.
self.pub_lock = Lock()
# Start publish heartbeat worker thread.
log.debug("Starting ACL heartbeat sending loop")
self.heartbeat_thread = Thread(target = self.heartbeat_thread_func)
self.heartbeat_thread.start()
# Start query worker thread.
log.debug("Starting Publisher query receive loop")
self.query_thread = Thread(target = self.query_thread_func)
self.query_thread.start()
def publish_endpoint_acls(self, endpoint_uuid, acls):
"""Publish a set of ACL rules for an endpoint.
This method is thread-safe.
"""
log.info("Publishing ACL Update %s for %s" % (acls, endpoint_uuid))
update = {"type": "ACLUPDATE",
"issued": time.time() * 1000,
"acls": acls}
self.pub_lock.acquire()
self.pub_socket.send_multipart([endpoint_uuid.encode("utf-8"),
json.dumps(update).encode("utf-8")])
self.pub_lock.release()
def query_thread_func(self):
"""Query receive loop.
Monitors the ROUTER socket for incoming queries and passes them on to
the ACL Store, which will asynchronously respond by calling back into
publish_endpoint_acls() to send the update containing the ACL state.
"""
while True:
# Receive and parse the query message.
message = self.router_socket.recv_multipart()
assert (len(message) == 3)
assert not message[1]
query = json.loads(message[2].decode('utf-8'))
peer = message[0]
assert ("type" in query)
log.info(
"ACL Manager received packet %s from %s",
query, binascii.hexlify(peer)
)
if query["type"] == "GETACLSTATE":
endpoint = query["endpoint_id"]
log.info("Received query message %s from Felix" % message)
self.acl_store.query_endpoint_rules(endpoint)
query["rc"] = "SUCCESS"
query["message"] = ""
else:
# Received unexpected message. Log and return it.
log.warning("Received query %s of unknown type" % query)
query["rc"] = "FAILURE"
query["message"] = "Unknown message type: expected GETACLSTATE"
log.debug("Sending response message: %s, %s" %
(peer, json.dumps(query).encode("utf-8")))
self.router_socket.send_multipart(
(peer,
"",
json.dumps(query).encode("utf-8"))
)
def heartbeat_thread_func(self):
"""ACL update socket heartbeat publishing loop.
Sends a heartbeat to the aclheartbeat subscription on the PUB socket
every 30 seconds.
"""
while True:
heartbeat = json.dumps({"type": "HEARTBEAT",
"issued": time.time() * 1000})
log.info("Sending ACL heartbeat %s" % heartbeat)
self.pub_lock.acquire()
self.pub_socket.send_multipart(["aclheartbeat",
heartbeat.encode("utf-8")])
self.pub_lock.release()
log.debug("Sent ACL heartbeat")
# In a perfect world this should subtract the time spent waiting
# for the lock and sending the packet. For the moment, in normal
# operation this will suffice.
time.sleep(30)
|
998,701 | 0ae88923b501abbc5401b439f26914a702b34f6f | import argparse
import os
import shutil
import subprocess
import sys
import tempfile
import threading
import unittest
def parse_arguments():
parser = argparse.ArgumentParser(description='Test gRPC server/client')
parser.add_argument('--ip', default='localhost', help='server and client IP')
parser.add_argument('-p', '--port', default='50051', help='server and client port')
parser.add_argument('-b', '--bin', required=True, help='Build artifacts directory')
parser.add_argument('-s', '--size', required=True, help='Size of the file to transfer in bytes')
test_args, suite_args = parser.parse_known_args()
return test_args, sys.argv[:1] + suite_args
class ClientTestCase(unittest.TestCase):
def __init__(self, testname, args):
super(ClientTestCase, self).__init__(testname)
self.args = args
self.wd = os.getcwd()
self.max_block_size = 1024 * 30 # 30MB
self.__create_test_environment()
def setUp(self):
self.__run_server()
def tearDown(self):
self.__terminate_server()
def test_invalid_request(self):
"""
The client requests a file that the server does not own.
Check that the file is not received.
"""
print("Testing invalid request...")
invalid_filename = os.path.join( \
os.path.dirname(self.client_path), "7xEvjAeobu")
os.chdir(os.path.dirname(self.client_path))
subprocess.call([self.client_path, \
"{}:{}".format(self.args.ip, self.args.port), \
invalid_filename])
self.assertFalse(os.path.isfile(invalid_filename))
def test_valid_request(self):
"""
The client requests a file that the server does own.
Check that the file is received and the content is the same.
"""
print("Testing valid request...")
# create a file with random data in the server folder
self.__create_test_file()
valid_path = os.path.join(os.path.dirname(self.client_path), \
os.path.basename(self.test_file))
os.chdir(os.path.dirname(self.client_path))
subprocess.call([self.client_path, \
"{}:{}".format(self.args.ip, self.args.port), \
os.path.basename(valid_path)])
self.assertTrue(os.path.isfile(valid_path))
self.__compare_files(valid_path, self.test_file)
os.remove(valid_path)
def __run_server(self):
"""
Runs the server in a separate process.
"""
os.chdir(os.path.dirname(self.server_path))
self.server_process = subprocess.Popen([self.server_path, \
"{}:{}".format(self.args.ip, self.args.port)])
def __terminate_server(self):
self.server_process.terminate()
self.server_process.wait()
def __create_test_environment(self):
"""
Creates the directories for the test environment and copy from the build
artifacts directory server and client applications.
"""
os.chdir(self.wd)
temp_dir = tempfile.gettempdir()
self.test_root = os.path.join(temp_dir, "test-grpc")
print("Creating testing environment in {}".format(self.test_root))
if os.path.exists(self.test_root):
# delete any previous environment
shutil.rmtree(self.test_root)
# create root directory
os.makedirs(self.test_root)
def copy_app(name):
app_root = os.path.join(self.test_root, name)
os.makedirs(app_root)
filename = "grpc-{}".format(name)
src = os.path.join(self.args.bin, filename)
dst = os.path.join(app_root, filename)
shutil.copy(src, dst)
return dst
# copy client and server into the new test environment
self.server_path = copy_app("server")
self.client_path = copy_app("client")
def __create_test_file(self):
"""
Create a file with random data in the server folder.
"""
self.test_file = os.path.join(os.path.dirname(self.server_path), "data")
with open(self.test_file, "ab+") as f:
n_blocks = int(self.args.size) // self.max_block_size
for i in range(n_blocks):
f.write(bytearray(os.urandom(self.max_block_size)))
remaining = int(self.args.size) % self.max_block_size
if remaining > 0:
f.write(bytearray(os.urandom(remaining)))
self.assertEqual(int(self.args.size), os.path.getsize(self.test_file))
def __compare_files(self, filename1, filename2):
"""
Compare two files content, account for files that cannot fit into memory.
"""
self.assertTrue(os.path.isfile(filename1))
self.assertTrue(os.path.isfile(filename2))
self.assertEqual(os.path.getsize(filename1), os.path.getsize(filename2))
with open(filename1, "rb") as f1:
with open(filename2, "rb") as f2:
n_blocks = int(self.args.size) // self.max_block_size
for i in range(n_blocks):
self.assertEqual(f1.read(self.max_block_size), \
f2.read(self.max_block_size))
remaining = int(self.args.size) % self.max_block_size
if remaining > 0:
self.assertEqual(f1.read(remaining), \
f2.read(remaining))
def main():
test_args, suite_args = parse_arguments()
# run unit tests
test_loader = unittest.TestLoader()
suite = unittest.TestSuite()
for test_name in test_loader.getTestCaseNames(ClientTestCase):
suite.addTest(ClientTestCase(test_name, test_args))
result = unittest.TextTestRunner().run(suite)
sys.exit(not result.wasSuccessful())
if __name__ == "__main__":
main()
|
998,702 | 5fd6ce9d05c363c546d944f7f859efce62e6e530 |
from multiprocessing.context import Process
import RobotActions
import numpy as np
import cv2
# from keras.preprocessing import image
import time
import multiprocessing as mp
import RPi.GPIO as GPIO
from enum import Enum
import picar
import time
from RobotActions import NeuralNetWorkRead
from keras.models import load_model
# from tensorflow import keras
from tensorflow.keras.applications.mobilenet import preprocess_input
# from tensorflow.keras import layers
GPIO_TRIGGER = 16
GPIO_ECHO = 20
def distance():
GPIO.output(GPIO_TRIGGER, True)
# set Trigger after 0.01ms to LOW
time.sleep(0.00005)
GPIO.output(GPIO_TRIGGER, False)
StartTime = time.time()
StopTime = time.time()
# save StartTime
while GPIO.input(GPIO_ECHO) == 0:
StartTime = time.time()
if(StartTime-StopTime > 3):
break
# save time of arrival
while GPIO.input(GPIO_ECHO) == 1:
StopTime = time.time()
if(StopTime - StartTime > 3):
break
TimeElapsed = StopTime - StartTime
distance = (TimeElapsed * 34300) / 2
GPIO.output(GPIO_TRIGGER, False)
return distance
def RobotProccess(m):
RobotController = RobotActions.RobotRunner(LogInfo = False,InitCar = False)
RobotController.RunState()
print("InitDone")
m['Start'] = 1
while (m['NetworkDone'] == 0):
continue
while True :
time.sleep(0.1)
NNState = m['NeuralNetworkState']
distance = m['distance']
confidence = 1
try :
RobotController.Update(NNState,distance,confidence,None)
RobotController.UpdateState()
RobotController.RunState()
except :
print("Robot Error")
def DistanceProccess(m):
while(m['Start'] == 0):
continue
while True:
time.sleep(0.25)
try:
readDistance = distance()
m['distance'] = readDistance
except :
print("Distance Error")
def GetStateByTime(timeDif):
NNState = NeuralNetWorkRead.Stop
if(timeDif <= 1):
NNState = NeuralNetWorkRead.Stop
elif(timeDif <=3):
NNState = NeuralNetWorkRead.Left
elif(timeDif<= 12):
NNState = NeuralNetWorkRead.Up
elif(timeDif<=20):
NNState = NeuralNetWorkRead.Right
else :
NNState = NeuralNetWorkRead.Stop
return NNState
def GetNeuralNetworkResponseProccess(m):
while(m['Start'] == 0):
continue
print('network starts')
image_size = 100
my_model = load_model("SignalsMobileModel.h5")
# allWeights = np.load('SignalsAllWeights.npy',allow_pickle=True)
# i=0
# for l in my_model.layers:
# weightsArray = []
# weights = l.get_weights()
# for subLayer in weights:
# weightsArray.append(allWeights[i])
# i+=1
# if(len(weightsArray)>0):
# l.set_weights(weightsArray)
IndexToState = {
0 : RobotActions.NeuralNetWorkRead.Left,
1 : RobotActions.NeuralNetWorkRead.Right,
2 : RobotActions.NeuralNetWorkRead.Stop,
3 : RobotActions.NeuralNetWorkRead.Up}
Labels = ['stop','right','left','up']
m['NetworkDone'] = 1
print('network done')
cap = cv2.VideoCapture(0)
cap.set(cv2.CAP_PROP_BUFFERSIZE, 0)
while True:
time.sleep(0.5)
bgr_image = cap.read()[1]
resized_image = cv2.resize(bgr_image,(image_size,image_size))
rgb_image = cv2.cvtColor(resized_image, cv2.COLOR_BGR2RGB)
proccesedImage = np.expand_dims(rgb_image,axis=0)
# image_array = image.img_to_array(rgb_image)
# img_array_expanded_dims = np.expand_dims(image_array, axis=0)
proccesedImage = preprocess_input(proccesedImage)
predictions = my_model.predict(proccesedImage)
cv2.imshow("Threshold lower image", resized_image)
l = cv2.waitKey(5) & 0XFF
if(l == ord('q')):
break
maxInd = np.argmax(predictions)
NNState = IndexToState[maxInd]
m['NeuralNetworkState'] = NNState
if __name__ == '__main__':
print("starting")
GPIO.setmode(GPIO.BCM)
GPIO.setup(GPIO_TRIGGER,GPIO.OUT)
GPIO.setup(GPIO_ECHO,GPIO.IN)
m = mp.Manager().dict()
t = mp.Value('d',0)
m['NeuralNetworkState'] = NeuralNetWorkRead.Stop
m['distance'] = 0
m['confidence'] = 1
m['Start'] = 0
m['NetworkDone'] = 0
picar.setup()
p1 = Process(target=RobotProccess,args=(m,))
p2 = Process(target=DistanceProccess,args=(m,))
p3 = Process(target=GetNeuralNetworkResponseProccess,args=(m,))
p1.start()
p2.start()
p3.start()
p1.join()
p2.join()
p3.join()
|
998,703 | e2e555e8762a7f8f903f5fdb75369ca9f124ac0a | from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.conf import settings
# Create your models here.
class Carrera(models.Model):
carrera = models.CharField(max_length=60, blank=True)
def __str__(self):
return self.carrera
class Semestre(models.Model):
semestre = models.CharField(max_length=2, blank=True)
def __str__(self):
return self.semestre
def upload_location(instance, filename):
return 'profiles/%s/%s' %(instance.user.id, filename)
class Profile(models.Model):
user = models.OneToOneField(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, related_name='profile')
bio = models.TextField(max_length=500, blank=True)
avatar = models.ImageField(upload_to=upload_location, blank=True)
birth_date = models.DateField(null=True, blank=True)
carrera = models.ForeignKey(Carrera, verbose_name='Carrera', blank=True, null=True)
semestre = models.ForeignKey(Semestre, verbose_name='Semestre', blank=True, null=True)
def __str__(self):
return '%s' % self.user.email
@receiver(post_save, sender=settings.AUTH_USER_MODEL)
def create_user_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance)
@receiver(post_save, sender=settings.AUTH_USER_MODEL)
def save_user_profile(sender, instance, **kwargs):
instance.profile.save()
|
998,704 | 9d8b4888022d5d5e4fa7f63eb57d78c8f4a5111c | #Author: Avery Cordle
def decimal_to_binary():
num = int(input("Enter decimal number: "))
answerChain = ""
one = "1"
zero = "0"
while num>=1:
remainder = num%2
num//=2
for i in range(1):
if remainder %2 ==0:
answerChain = zero + answerChain
elif remainder %2 !=0 or remainder==1:
answerChain = one + answerChain
print(f"{answerChain}")
def binary_to_decimal():
num = int(input("Enter binary number: "))
expo=2
result = 0
counter = 0
while num>=1:
remainder = num%2
num//=10
for i in range(1):
remainder*=(expo**counter)
result+=remainder
counter+=1
print(result)
print("Calculator")
while True:
userInput = input("Convert from Binary to Decimal (BtD) or from Decimal to Binary (DtB), or (Q)uit: ").lower().strip()
if userInput == "btd":
binary_to_decimal()
elif userInput == "dtb":
decimal_to_binary()
elif userInput =="q":
break
else:
print("Invalid Command")
print("Goodbye") |
998,705 | a5c734d6c83c601ead899b51c8a1b95a8bad21f1 | import requests
from bs4 import BeautifulSoup
nist_url = "https://webbook.nist.gov/cgi/cbook.cgi?Name={}"
def get_info(name):
resp = requests.get(nist_url.format(name))
resp_soup = BeautifulSoup(resp.text, 'html.parser')
info = {}
formula_a_text = "Formula"
formula_text = str(resp_soup.find('a', text=formula_a_text).parent.parent.text)
info['formula'] = formula_text[formula_text.find(": ")+1:].strip()
weight_a_text = "Molecular weight"
info['weight'] = resp_soup.find('a', text=weight_a_text).parent.next_sibling.strip()
return info
names = ["glucose", "ethanol", "caffeine", "octane", "cyclohexane"]
for name in names:
res = get_info(name)
print(f"{name:14} - ", end='')
for k, v in res.items():
print(f"{v:>12} ", end='')
print() |
998,706 | 37f77c42e2116ef13678d1fadf72ea04aedb16e8 | from __future__ import annotations
from typing import TYPE_CHECKING
import pytest
from tox.config.cli.parse import get_options
from tox.config.loader.api import Override
if TYPE_CHECKING:
from tox.pytest import CaptureFixture
@pytest.mark.parametrize("flag", ["-x", "--override"])
def test_override_incorrect(flag: str, capsys: CaptureFixture) -> None:
with pytest.raises(SystemExit):
get_options(flag, "magic")
out, err = capsys.readouterr()
assert not out
assert "override magic has no = sign in it" in err
@pytest.mark.parametrize("flag", ["-x", "--override"])
def test_override_add(flag: str) -> None:
parsed, _, __, ___, ____ = get_options(flag, "magic=true")
assert len(parsed.override) == 1
value = parsed.override[0]
assert value.key == "magic"
assert value.value == "true"
assert not value.namespace
assert value.append is False
@pytest.mark.parametrize("flag", ["-x", "--override"])
def test_override_append(flag: str) -> None:
parsed, _, __, ___, ____ = get_options(flag, "magic+=true")
assert len(parsed.override) == 1
value = parsed.override[0]
assert value.key == "magic"
assert value.value == "true"
assert not value.namespace
assert value.append is True
def test_override_equals() -> None:
assert Override("a=b") == Override("a=b")
def test_override_not_equals() -> None:
assert Override("a=b") != Override("c=d")
def test_override_not_equals_different_type() -> None:
assert Override("a=b") != 1
def test_override_repr() -> None:
assert repr(Override("b.a=c")) == "Override('b.a=c')"
|
998,707 | 473be0bda91eae497fd6a3dbf01438c2569bcee7 | from django.conf.urls.defaults import *
from django.contrib import admin
from django.conf import settings
from gazjango.articles.feeds import MainFeed, LatestStoriesFeed, SectionFeed
from gazjango.articles.feeds import SectionLatestFeed, TameFeed
from gazjango.announcements.feeds import AnnouncementsFeed, EventsFeed, NeedsApprovalFeed
from gazjango.accounts.forms import RegistrationFormWithProfile
from gazjango.jobs.feeds import JobsFeed
from gazjango.misc.url_helpers import reps
admin.autodiscover()
urlpatterns = patterns('',
(r'^admin/doc/', include('django.contrib.admindocs.urls')),
(r'^admin/', include(admin.site.urls)),
(r'^jsi18n/$', 'django.views.i18n.javascript_catalog', {
'packages': ('registration',),
}),
)
feeds = {
'main': MainFeed,
'latest': LatestStoriesFeed,
'section': SectionFeed,
'section-latest': SectionLatestFeed,
'jobs': JobsFeed,
'dashboard': MainFeed,
'faculty-dashboard': TameFeed,
'announcements': AnnouncementsFeed,
'events': EventsFeed,
'secret-lol_approval': NeedsApprovalFeed,
}
urlpatterns += patterns('',
(r'^feeds/(?P<url>.*)(?:\.xml|\.rss|/)$', 'django.contrib.syndication.views.feed',
{'feed_dict': feeds}),
)
urlpatterns += patterns('facebook_connect.views',
(r'^xd_receiver\.html$', 'xd_receiver'),
)
urlpatterns += patterns('articles.views',
(r'^$', 'homepage'),
# (r'^$', 'april_fools'),
(r'^search/$', 'search', {}, 'search'),
(r'^%(ymds)s/$' % reps, 'article', {}, 'article'),
(r'^%(ymds)s/%(num)s/$' % reps, 'article', {}, 'photospread'),
(r'^%(ymds)s/print/$' % reps, 'article', {'print_view': True}, 'print'),
(r'^%(ymds)s/email/$' % reps, 'email_article', {}, 'email'),
(r'^staff/new/$', 'concept_save_page'),
(r'^staff/$', 'staff'),
(r'^aprilfools/$', 'april_fools'),
(r'^staff/mail/$', 'staff_mail'),
)
urlpatterns += patterns('comments.views',
(r'^comments/$', 'comment_page'),
(r'^%(ymds)s/comment/$' % reps, 'post_comment'),
(r'^%(ymds)s/comment/captcha/$' % reps, 'show_captcha'),
(r'^%(ymds)s/comments/(%(num)s/)?$' % reps, 'comments_for_article'),
(r'^%(ymds)s/show-comment/%(num)s/$' % reps, 'get_comment_text'),
(r'^%(ymds)s/vote-comment/%(num)s/(?P<val>up|down|clear)/$' % reps, 'vote_on_comment'),
(r'^%(ymds)s/approve-comment/%(num)s/(?:%(val-b)s/)?$' % reps, 'approve_comment'),
)
urlpatterns += patterns('announcements.views',
(r'^around/$', 'around_swarthmore'),
(r'^announcements/new/$', 'submit_announcement', {}, 'submit-announcement'),
(r'^announcements/new/success/$', 'announcement_success', {}, 'announcement-success'),
(r'^posters/new/$', 'submit_poster', {}, 'submit-poster'),
(r'^posters/new/success/$', 'poster_success', {}, 'poster-success'),
(r'^announcements/%(year)s/%(slug)s/$' % reps, 'announcement', {}, 'announcement'),
(r'^announcements/%(year)s/%(month)s/%(slug)s/$' % reps, 'announcement'),
(r'^announcements/%(year)s/%(month)s/%(day)s/%(slug)s/$' % reps, 'announcement'),
(r'announcements/$', 'list_announcements', {}, 'announcements')
# (r'^announcements/$', 'list_announcements', {'order': 'descending', 'kind': 'c'}, 'announcements'),
#
# (r'^announcements/%(year)s/$' % reps, 'list_announcements', {'kind': 'c'}),
# (r'^announcements/%(year)s/%(month)s/$' % reps, 'list_announcements', {'kind': 'c'}),
# (r'^announcements/%(year)s/%(month)s/%(day)s/$' % reps, 'list_announcements', {'kind': 'c'}),
#
# (r'^announcements/%(kind)s/$' % reps, 'list_announcements'),
# (r'^announcements/%(kind)s/%(year)s/$' % reps, 'list_announcements'),
# (r'^announcements/%(kind)s/%(year)s/%(month)s/$' % reps, 'list_announcements'),
# (r'^announcements/%(kind)s/%(year)s/%(month)s/%(day)s/$' % reps, 'list_announcements'),
)
urlpatterns += patterns('issues.views',
(r'^issue/$', 'latest_issue'),
(r'^issue/plain/$', 'latest_issue', {'plain': True}),
(r'^issue/%(year)s/%(month)s/%(day)s/$' % reps, 'issue_for_date', {}, 'issue'),
(r'^issue/%(year)s/%(month)s/%(day)s/plain/$' % reps, 'issue_for_date', {'plain': True}),
(r'^issue/preview/$', 'preview_issue'),
(r'^issue/preview/plain/$', 'preview_issue', {'plain': True}),
(r'^rsd/$', 'show_rsd', {}, 'rsd-announce-today'),
(r'^rsd/plain/$', 'show_rsd', {'plain': True}),
(r'^rsd/%(year)s/%(month)s/%(day)s/$' % reps, 'show_rsd', {}, 'rsd-announce'),
(r'^rsd/%(year)s/%(month)s/%(day)s/plain/$' % reps, 'show_rsd', {'plain': True}),
(r'^rsd-events/$', 'show_events', {}, 'rsd-events-today'),
(r'^rsd-events/plain/$', 'show_events', {'plain': True}),
(r'^rsd-events/%(year)s/%(month)s/%(day)s/$' % reps, 'show_events', {}, 'rsd-events'),
(r'^rsd-events/%(year)s/%(month)s/%(day)s/plain/$' % reps, 'show_events', {'plain': True}),
(r'^rsd-full/$', 'show_combined', {}, 'rsd-today'),
(r'^rsd-full/plain/$', 'show_combined', {'plain': True}),
(r'^rsd-full/%(year)s/%(month)s/%(day)s/$' % reps, 'show_combined', {}, 'rsd'),
(r'^rsd-full/%(year)s/%(month)s/%(day)s/plain/$' % reps, 'show_combined', {'plain': True}),
(r'^menu/$', 'menu_partial'),
(r'^events/$', 'events_partial')
)
urlpatterns += patterns('polls.views',
(r'^polls/%(year)s/%(slug)s/results/$' % reps, 'poll_results', {}, 'poll-results'),
(r'^polls/%(year)s/%(slug)s/submit/$' % reps, 'submit_poll', {}, 'submit-poll'),
)
urlpatterns += patterns('',
(r'^books/', include('books.urls')),
)
urlpatterns += patterns('reviews.views',
(r'^reviews/$', 'reviews', {}, 'reviews'),
(r'^reviews/new/$', 'submit_review'),
(r'^reviews/%(slug)s/$' % reps, 'establishment', {}, 'establishment'),
)
urlpatterns += patterns('',
(r'^screw/', include('screw.urls')),
(r'^housing/', include('housing.urls')),
)
urlpatterns += patterns('jobs.views',
(r'^jobs/$', 'list_jobs', {}, 'job_list'),
(r'^jobs/new/$', 'submit_job', {}, 'submit-job'),
(r'^jobs/new/success/$', 'job_success'),
(r'^jobs/%(slug)s/$' % reps, 'job_details')
)
urlpatterns += patterns('media.views',
(r'^files/%(bucket)s/$' % reps, 'bucket'),
(r'^files/%(bucket)s/%(slug)s/$' % reps, 'file')
)
urlpatterns += patterns('django.contrib.auth.views',
(r'^accounts/login/$', 'login', {}, 'login'),
(r'^accounts/reset-password/$', 'password_reset', {}, 'password-reset'),
(r'^accounts/reset-password/sent/$', 'password_reset_done'),
(r'^accounts/reset-password/%(uid)s-%(token)s/$' % reps, 'password_reset_confirm', {}, 'password-reset-confirm'),
(r'^accounts/reset-password/complete/$', 'password_reset_complete'),
)
urlpatterns += patterns('',
(r'^accounts/logout/$', 'accounts.views.logout', {'next_page': '/'}, 'logout'),
(r'^accounts/manage/$', 'accounts.views.manage', {}, 'manage-user'),
(r'^accounts/manage/racy/(on|off)/$', 'accounts.views.racy_switch'),
(r'^accounts/manage/subscribe/$', 'accounts.views.subscribe'),
(r'^accounts/manage/unsubscribe/$', 'accounts.views.unsubscribe'),
(r'^accounts/', include('accounts.registration_urls')),
(r'^users/%(name)s/$' % reps, 'accounts.views.user_details', {}, 'user-details'),
(r'^author/%(name)s/$' % reps, 'accounts.views.user_details')
)
urlpatterns += patterns('',
(r'^data/authors/$', 'accounts.views.author_completions', {}, 'author-completions'),
(r'^data/usernames/$', 'accounts.views.username_for_name', {}, 'get-or-make-username'),
# (r'^data/subsections/%(section)s/$' % reps, 'articles.views.list_subsections', {}, 'list-subsections')
(r'^data/train-stations\.json$', 'reviews.views.list_trains'),
)
if settings.DEBUG:
path = settings.BASE +'/static'
urlpatterns += patterns('django.views.static',
(r'^static/v2/(?P<path>.*)$', 'serve', {'document_root': path + '/v2'}),
(r'^static/css/(?P<path>.*)$', 'serve', {'document_root': path + '/css'}),
(r'^static/js/(?P<path>.*)$', 'serve', {'document_root': path + '/js'}),
(r'^static/images/(?P<path>.*)$', 'serve', {'document_root': path + '/images'}),
(r'^static/uploads/(?P<path>.*)$', 'serve', {'document_root': path + '/../uploads'}),
(r'^static/admin/(?P<path>.*)$', 'serve', {'document_root': settings.ADMIN_MEDIA_PATH})
)
# urlpatterns += patterns('athletics.views',
# (r'^athletics/$', 'athletics'),
# (r'^athletics/%(slug)s/$' % reps, 'team',{},'athletics_team')
# )
# section match should be last, to avoid shadowing others
urlpatterns += patterns('articles.views',
(r'^archives/$', 'archives', {}, 'archives'),
(r'^(?:archives/)?%(year)s/$' % reps, 'archives'),
(r'^(?:archives/)?%(year)s/%(month)s/$' % reps, 'archives'),
(r'^(?:archives/)?%(year)s/%(month)s/%(day)s/$' % reps, 'archives'),
(r'^archives/%(section)s/$' % reps, 'archives'),
(r'^archives/%(section)s/%(subsection)s/$' % reps, 'archives'),
(r'^(?:archives/)?%(section)s/(?:%(subsection)s/)?%(year)s/$' % reps, 'archives'),
(r'^(?:archives/)?%(section)s/(?:%(subsection)s/)?%(year)s/%(month)s/$' % reps, 'archives'),
(r'^(?:archives/)?%(section)s/(?:%(subsection)s/)?%(year)s/%(month)s/%(day)s/$' % reps, 'archives'),
(r'^%(section)s/$' % reps, 'section', {}, 'section'),
(r'^%(section)s/%(subsection)s/$' % reps, 'subsection', {}, 'subsection'),
)
|
998,708 | 64c981b50f2b48c9abb13fe8a846a5e7a8aa713d | from fastapi import Depends
from fastapi.security import OAuth2PasswordBearer
from src.core.settings import settings
from src.utils.security import VerifyAccessToken
oauth2_bearer = OAuth2PasswordBearer(
tokenUrl=f"{settings.API_V1_STR}/users/login"
)
def authorization(access_token: str = Depends(oauth2_bearer)):
user_id: str = VerifyAccessToken(access_token=access_token).verify()
return user_id
|
998,709 | 7699dfe44bb98e8402ca2fe3b5815f96db7d7608 | name = input("Enter file:")
if len(name) < 1 : name = "mbox-short.txt"
handle = open(name)
hr = dict()
lst = []
for line in handle:
if line.startswith('From '):
line = line.split()
hours = line[5].split(':')[0]
lst.append(hours)
for hour in lst:
hr[hour] = hr.get(hour, 0) + 1
for key,val in sorted(hr.items()):
print(key, val) |
998,710 | 2fbe30b3918e66862fc24fcd27f638e584a3978b | import json
terrains = {
"sea": [1, 18, 9],
"field": [3, 15, 19, 21],
"mountain": [2, 7, 10, 20],
"swamp": [4, 6, 8, 13],
"plain": [5, 12, 16, 22],
"forest": [11, 14, 17, 23]
}
markers = {
"magic": [4, 11, 15, 19],
"mine": [2, 14, 10, 6],
"cave": [2, 5, 13, 22]
}
indigens = [3, 4, 5, 11, 13, 16, 17, 19, 22]
nodes = {}
links = [
(1, 15),
(1, 2),
(2, 15),
(2, 16),
(2, 3),
(3, 16),
(3, 17),
(3, 5),
(3, 4),
(4, 5),
(5, 17),
(5, 6),
(6, 17),
(6, 19),
(6, 7),
(7, 19),
(7, 8),
(8, 19),
(8, 22),
(8, 23),
(8, 9),
(9, 23),
(9, 10),
(10, 23),
(10, 22),
(10, 11),
(11, 21),
(11, 12),
(12, 21),
(12, 13),
(13, 21),
(13, 20),
(13, 14),
(14, 20),
(14, 18),
(14, 16),
(14, 15),
(15, 16),
(16, 17),
(16, 18),
(17, 18),
(17, 19),
(18, 19),
(18, 20),
(19, 20),
(19, 22),
(20, 21),
(20, 22),
(21, 22),
(22, 23)
]
borders = list(range(1, 16))
borders.append(23)
if __name__ == "__main__":
#creating terrain
for key in terrains:
for id in terrains[key]:
nodes[str(id)] = {"terrain": key}
if key == "mountain":
nodes[str(id)]["defense"] = {"mountain":1}
#adding markers
for key in markers:
print(key)
for id in markers[key]:
print(" {}".format(id))
if "markers" in nodes[str(id)]:
nodes[str(id)]["markers"].append(key)
else:
nodes[str(id)]["markers"] = [key]
#adding indigens
for id in indigens:
if "defense" in nodes[str(id)]:
nodes[str(id)]["defense"].update({"indigen": 1})
else:
nodes[str(id)]["defense"] = {"indigen": 1}
#adding links
for link in links:
for i in range(2):
if "links" in nodes[str(link[i])]:
nodes[str(link[i])]["links"].append(str(link[(i+1)%2]))
else:
nodes[str(link[i])]["links"] = [str(link[(i+1)%2])]
#adding borderse
print(borders)
for id in borders:
nodes[str(id)].update({"border":True})
print(len(nodes))
print(nodes)
with open('maps/2players.json', 'w') as file:
json.dump(nodes, file, indent=4) |
998,711 | 53b70007c3ad041fb230bdbeef8a57f260f2950e | import numpy as np
import pandas as pd
from sklearn.cluster import KMeans
from sklearn.preprocessing import LabelEncoder
import matplotlib.pyplot as plt
data = pd.read_csv("part-r-00000.csv",header=None)
lbmake =LabelEncoder()
data["numerical"]=lbmake.fit_transform(data[0])+1
print(data)
kmeans=KMeans(n_clusters=2)
x=data[[1,"numerical"]]
kmeans.fit(x)
colormap =np.array(["Green","Red"])
print(kmeans.labels_)
z=plt.scatter(data[0],data[1],c=colormap[kmeans.labels_])
plt.xlabel("Gender")
plt.ylabel("Number of suicides")
plt.title("Clustering by the gender")
print(kmeans.labels_)
plt.yticks(data[1])
plt.show() |
998,712 | 37f0ac027d7c79209387f2ab845f37c8897ac7b4 | #%%
alpha = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z','a','b']
encrytped = list("g fmnc wms bgblr rpylqjyrc gr zw fylb. rfyrq ufyr amknsrcpq ypc dmp. bmgle gr gl zw fylb gq glcddgagclr ylb rfyr'q ufw rfgq rcvr gq qm jmle. sqgle qrpgle.kyicrpylq() gq pcamkkclbcb. lmu ynnjw ml rfc spj.")
# encrytped = list("http://www.pythonchallenge.com/pc/def/map.html")
decrypted = []
def convert2(i):
if i not in alpha:
return i
else:
finder = alpha.index(i) + 2
result = alpha[finder]
return result
decrypted = convert2(encrytped[0])
print (''.join(decrypted))
# %%
convert = lambda x: chr((ord(x) + 2 - 97) % 25 + 97)
result = map(convert2, encrytped)
print("".join(list(result)))
# %%
# %%
|
998,713 | db4d7def4b9400b43c9920dfe137ff23ed78a7c6 |
Imports="""
import os
import numpy as np
from mtuq import read, open_db, download_greens_tensors
from mtuq.event import Origin
from mtuq.graphics import plot_data_greens2, plot_beachball, plot_misfit_dc
from mtuq.grid import DoubleCoupleGridRegular
from mtuq.grid_search import grid_search
from mtuq.misfit import Misfit
from mtuq.process_data import ProcessData
from mtuq.util import fullpath, merge_dicts, save_json
from mtuq.util.cap import parse_station_codes, Trapezoid
"""
Docstring_DetailedAnalysis="""
if __name__=='__main__':
#
# Performs detailed analysis involving
#
# - grid search over all moment tensor parameters, including magnitude
# - separate body wave, Rayleigh wave and Love wave data categories
# - data variance estimation and likelihood analysis
#
#
# Generates figures of
#
# - maximum likelihood surfaces
# - marginal likelihood surfaces
# - data misfit surfaces
# - "variance reduction" surfaces
# - geographic variation of time shifts
# - geographic variation of amplitude ratios
#
#
# USAGE
# mpirun -n <NPROC> python DetailedAnalysis.py
#
#
# This is the most complicated example. For simpler ones, see
# SerialGridSearch.DoubleCouple.py or GridSearch.FullMomentTensor.py
#
# For ideas on applying this type of analysis to entire sets of events,
# see github.com/rmodrak/mtbench
#
"""
Docstring_GridSearch_DoubleCouple="""
if __name__=='__main__':
#
# Carries out grid search over 64,000 double couple moment tensors
#
# USAGE
# mpirun -n <NPROC> python GridSearch.DoubleCouple.py
#
# For a simpler example, see SerialGridSearch.DoubleCouple.py,
# which runs the same inversion in serial
#
"""
Docstring_GridSearch_DoubleCoupleMagnitudeDepth="""
if __name__=='__main__':
#
# Carries out grid search over source orientation, magnitude, and depth
#
# USAGE
# mpirun -n <NPROC> python GridSearch.DoubleCouple+Magnitude+Depth.py
#
# For simpler examples, see SerialGridSearch.DoubleCouple.py or
# GridSearch.FullMomentTensor.py
#
"""
Docstring_GridSearch_DoubleCoupleMagnitudeHypocenter="""
if __name__=='__main__':
#
# Carries out grid search over source orientation, magnitude, and hypocenter
#
# USAGE
# mpirun -n <NPROC> python GridSearch.DoubleCouple+Magnitude+Hypocenter.py
#
#
# 1D Green's functions will be downloaded from a remote server, which can
# take a very long time. Any subsequent runs will generally be much faster.
# A local Green's function database can be even faster still (see online
# documentation for more information).
#
# More meaningful results could be obtained using 3D Green's functions and
# a phase misfit function, but 3D databases are too large for remote
# hosting.
#
# If you are just trying things out for the first time, consider running
# one of the other examples instead. Beacause they require fewer Green's
# functions, all the other examples have faster and more consistent
# runtimes.
#
"""
Docstring_GridSearch_FullMomentTensor="""
if __name__=='__main__':
#
# Carries out grid search over all moment tensor parameters
#
# USAGE
# mpirun -n <NPROC> python GridSearch.FullMomentTensor.py
#
"""
Docstring_SerialGridSearch_DoubleCouple="""
if __name__=='__main__':
#
# Carries out grid search over 64,000 double couple moment tensors
#
# USAGE
# python SerialGridSearch.DoubleCouple.py
#
# A typical runtime is about 60 seconds. For faster results try
# GridSearch.DoubleCouple.py, which runs the same inversion in parallel
#
"""
Docstring_WaveformsPolarities="""
if __name__=='__main__':
#
# Joint waveform and polarity grid search over all moment tensor parameters
#
# USAGE
# mpirun -n <NPROC> python Waveforms+Polarities.py
#
# For a simpler example, see SerialGridSearch.DoubleCouple.py
#
"""
Docstring_TestGridSearch_DoubleCouple="""
if __name__=='__main__':
#
# Grid search integration test
#
# This script is similar to examples/SerialGridSearch.DoubleCouple.py,
# except here we use a coarser grid, and at the end we assert that the test
# result equals the expected result
#
# The compare against CAP/FK:
#
# cap.pl -H0.02 -P1/15/60 -p1 -S2/10/0 -T15/150 -D1/1/0.5 -C0.1/0.333/0.025/0.0625 -Y1 -Zweight_test.dat -Mscak_34 -m4.5 -I1/1/10/10/10 -R0/0/0/0/0/360/0/90/-180/180 20090407201255351
#
# Note however that CAP uses a different method for defining regular grids
#
"""
Docstring_TestGridSearch_DoubleCoupleMagnitudeDepth="""
if __name__=='__main__':
#
# Grid search integration test
#
# This script is similar to examples/SerialGridSearch.DoubleCouple.py,
# except here we included mangitude and depth and use a coarser grid
#
"""
Docstring_TestGraphics="""
if __name__=='__main__':
#
# Tests data, synthetics and beachball plotting utilities
#
# Note that in the figures created by this script, the data and synthetics
# are not expected to fit epsecially well; currently, the only requirement
# is that the script runs without errors
#
import matplotlib
matplotlib.use('Agg', warn=False, force=True)
import matplotlib
"""
Docstring_TestMisfit="""
if __name__=='__main__':
#
# Checks the correctness of the fast (optimized) misfit function
# implementations against a simple pure Python implementation.
# These implementations correspond to:
#
# optimization_level=0: simple pure Python
# optimization_level=1: fast pure Python
# optimization_level=2: fast Python/C
#
# In running the test in our environment, we observe that the two pure
# Python implementations agree almost exactly. On the other hand, the
# pure Python and Python/C results differ by as much as 0.1 percent,
# presumably as a result of differences in the way that floating-point
# error accumulates in the sum over residuals. Further work is required to
# understand this better
#
# Possibly relevant is the fact that C extensions are compiled with
# `-Ofast` flag, as specified in `setup.py`.
#
# Note that the `optimization_level` keyword argument does not correspond
# at all to C compiler optimization flags. For example, the NumPy binaries
# called by the simple pure Python misfit function are probably compiled
# using a nonzero optimization level?
#
"""
Docstring_Gallery="""
if True:
#
# Creates example data structures
#
# Rather than being executed as a script, this code is designed to be
# imported. After importing this module, users can access the example data
# and functions listed in __all__
#
# Note that some I/O and data processing are involved in creating the
# example data, so importing this module may take significantly longer than
# other modules
#
__all__ = [
'process_bw'
'process_bw'
'misfit_bw',
'misfit_bw',
'data_bw',
'data_sw',
'greens_bw',
'greens_sw',
'stations',
'origin',
]
"""
Docstring_BenchmarkCAP="""
if __name__=='__main__':
#
# Given seven "fundamental" moment tensors, generates MTUQ synthetics and
# compares with corresponding CAP/FK synthetics
#
# Before running this script, it is necessary to unpack the CAP/FK
# synthetics using data/tests/unpack.bash
#
# This script is similar to examples/SerialGridSearch.DoubleCouple.py,
# except here we consider only seven grid points rather than an entire
# grid, and here the final plots are a comparison of MTUQ and CAP/FK
# synthetics rather than a comparison of data and synthetics
#
# Because of the idiosyncratic way CAP implements source-time function
# convolution, it's not expected that CAP and MTUQ synthetics will match
# exactly. CAP's "conv" function results in systematic magnitude-
# dependent shifts between origin times and arrival times. We deal with
# this by applying magnitude-dependent time-shifts to MTUQ synthetics
# (which normally lack such shifts) at the end of the benchmark. Even with
# this correction, the match will not be exact because CAP applies the
# shifts before tapering and MTUQ after tapering. The resulting mismatch
# will usually be apparent in body-wave windows, but not in surface-wave
# windows
#
# Note that CAP works with dyne,cm and MTUQ works with N,m, so to make
# comparisons we convert CAP output from the former to the latter
#
# The CAP/FK synthetics used in the comparison were generated by
# uafseismo/capuaf:46dd46bdc06e1336c3c4ccf4f99368fe99019c88
# using the following commands
#
# source #0 (explosion):
# cap.pl -H0.02 -P1/15/60 -p1 -S2/10/0 -T15/150 -D1/1/0.5 -C0.1/0.333/0.025/0.0625 -Y1 -Zweight_test.dat -Mscak_34 -m4.5 -I1 -R0/1.178/90/45/90 20090407201255351
#
# source #1 (on-diagonal)
# cap.pl -H0.02 -P1/15/60 -p1 -S2/10/0 -T15/150 -D1/1/0.5 -C0.1/0.333/0.025/0.0625 -Y1 -Zweight_test.dat -Mscak_34 -m4.5 -I1 -R-0.333/0.972/90/45/90 20090407201255351
#
# source #2 (on-diagonal)
# cap.pl -H0.02 -P1/15/60 -p1 -S2/10/0 -T15/150 -D1/1/0.5 -C0.1/0.333/0.025/0.0625 -Y1 -Zweight_test.dat -Mscak_34 -m4.5 -I1 -R-0.333/0.972/45/90/180 20090407201255351
#
# source #3 (on-diagonal):
# cap.pl -H0.02 -P1/15/60 -p1 -S2/10/0 -T15/150 -D1/1/0.5 -C0.1/0.333/0.025/0.0625 -Y1 -Zweight_test.dat -Mscak_34 -m4.5 -I1 -R-0.333/0.972/45/90/0 20090407201255351
#
# source #4 (off-diagonal):
# cap.pl -H0.02 -P1/15/60 -p1 -S2/10/0 -T15/150 -D1/1/0.5 -C0.1/0.333/0.025/0.0625 -Y1 -Zweight_test.dat -Mscak_34 -m4.5 -I1 -R0/0/90/90/90 20090407201255351
#
# source #5 (off-diagonal):
# cap.pl -H0.02 -P1/15/60 -p1 -S2/10/0 -T15/150 -D1/1/0.5 -C0.1/0.333/0.025/0.0625 -Y1 -Zweight_test.dat -Mscak_34 -m4.5 -I1 -R0/0/90/0/0 20090407201255351
#
# source #6 (off-diagonal):
# cap.pl -H0.02 -P1/15/60 -p1 -S2/10/0 -T15/150 -D1/1/0.5 -C0.1/0.333/0.025/0.0625 -Y1 -Zweight_test.dat -Mscak_34 -m4.5 -I1 -R0/0/0/90/180 20090407201255351
#
"""
ArgparseDefinitions="""
# by default, the script runs with figure generation and error checking
# turned on
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--no_checks', action='store_true')
parser.add_argument('--no_figures', action='store_true')
args = parser.parse_args()
run_checks = (not args.no_checks)
run_figures = (not args.no_figures)
"""
Paths_BenchmarkCAP="""
from mtuq.util.cap import\\
get_synthetics_cap, get_synthetics_mtuq,\\
get_data_cap, compare_cap_mtuq
# the following directories correspond to the moment tensors in the list
# "sources" below
paths = []
paths += [fullpath('data/tests/benchmark_cap/20090407201255351/0')]
paths += [fullpath('data/tests/benchmark_cap/20090407201255351/1')]
paths += [fullpath('data/tests/benchmark_cap/20090407201255351/2')]
paths += [fullpath('data/tests/benchmark_cap/20090407201255351/3')]
paths += [fullpath('data/tests/benchmark_cap/20090407201255351/4')]
paths += [fullpath('data/tests/benchmark_cap/20090407201255351/5')]
paths += [fullpath('data/tests/benchmark_cap/20090407201255351/6')]
"""
PathsComments="""
#
# We will investigate the source process of an Mw~4 earthquake using data
# from a regional seismic array
#
"""
Paths_Syngine="""
path_data= fullpath('data/examples/20090407201255351/*.[zrt]')
path_weights= fullpath('data/examples/20090407201255351/weights.dat')
event_id= '20090407201255351'
model= 'ak135'
"""
Paths_AxiSEM="""
path_greens= '/home/rmodrak/data/ak135f_scak-2s'
path_data= fullpath('data/examples/20090407201255351/*.[zrt]')
path_weights= fullpath('data/examples/20090407201255351/weights.dat')
event_id= '20090407201255351'
model= 'ak135f_scak-2s'
"""
Paths_FK="""
path_greens= fullpath('data/tests/benchmark_cap/greens/scak')
path_data= fullpath('data/examples/20090407201255351/*.[zrt]')
path_weights= fullpath('data/examples/20090407201255351/weights.dat')
event_id= '20090407201255351'
model= 'scak'
"""
Paths_SPECFEM3D_SGT="""
path_greens = fullpath('data/examples/SPECFEM3D_SGT/greens/socal3D')
path_data = fullpath('data/examples/SPECFEM3D_SGT/data/*.[zrt]')
path_weights= fullpath('data/examples/SPECFEM3D_SGT/weights.dat')
event_id = 'evt11056825'
model = 'socal3D'
taup_model = 'ak135'
"""
DataProcessingComments="""
#
# Body and surface wave measurements will be made separately
#
"""
DataProcessingDefinitions="""
process_bw = ProcessData(
filter_type='Bandpass',
freq_min= 0.1,
freq_max= 0.333,
pick_type='taup',
taup_model=model,
window_type='body_wave',
window_length=15.,
capuaf_file=path_weights,
)
process_sw = ProcessData(
filter_type='Bandpass',
freq_min=0.025,
freq_max=0.0625,
pick_type='taup',
taup_model=model,
window_type='surface_wave',
window_length=150.,
capuaf_file=path_weights,
)
"""
MisfitComments="""
#
# For our objective function, we will use a sum of body and surface wave
# contributions
#
"""
MisfitDefinitions="""
misfit_bw = Misfit(
norm='L2',
time_shift_min=-2.,
time_shift_max=+2.,
time_shift_groups=['ZR'],
)
misfit_sw = Misfit(
norm='L2',
time_shift_min=-10.,
time_shift_max=+10.,
time_shift_groups=['ZR','T'],
)
"""
WaveformsPolaritiesMisfit="""
#
# We will jointly evaluate waveform differences and polarities
#
misfit_bw = WaveformMisfit(
norm='L2',
time_shift_min=-2.,
time_shift_max=+2.,
time_shift_groups=['ZR'],
)
misfit_sw = WaveformMisfit(
norm='L2',
time_shift_min=-10.,
time_shift_max=+10.,
time_shift_groups=['ZR','T'],
)
polarity_misfit = PolarityMisfit(
taup_model=model)
#
# Observed polarities can be attached to the data or passed through a
# user-supplied dictionary or list in which +1 corresopnds to positive
# first motion, -1 to negative first moation, and 0 to indeterminate or
# unpicked
#
polarities = np.array([-1, -1, -1, 1, 1, 0, 1, 1, -1, 1, 1, 1, 0, 1, 1, 1, -1, 1, 1, 0])
"""
WeightsComments="""
#
# User-supplied weights control how much each station contributes to the
# objective function
#
"""
WeightsDefinitions="""
station_id_list = parse_station_codes(path_weights)
"""
OriginComments="""
#
# Origin time and location will be fixed. For an example in which they
# vary, see examples/GridSearch.DoubleCouple+Magnitude+Depth.py
#
# See also Dataset.get_origins(), which attempts to create Origin objects
# from waveform metadata
#
"""
OriginDefinitions="""
origin = Origin({
'time': '2009-04-07T20:12:55.000000Z',
'latitude': 61.454200744628906,
'longitude': -149.7427978515625,
'depth_in_m': 33033.599853515625,
})
"""
OriginDefinitions_SPECFEM3D_SGT="""
origin = Origin({
'time': '2019-07-04T18:39:44.0000Z',
'latitude': 35.601333,
'longitude': -117.597,
'depth_in_m': 2810.0,
'id': 'evt11056825'
})
"""
OriginsComments="""
#
# We will search over a range of locations about the catalog origin
#
"""
Origins_Depth="""
catalog_origin = Origin({
'time': '2009-04-07T20:12:55.000000Z',
'latitude': 61.454200744628906,
'longitude': -149.7427978515625,
'depth_in_m': 33033.599853515625,
})
depths = np.array(
# depth in meters
[25000., 30000., 35000., 40000.,
45000., 50000., 55000., 60000.])
origins = []
for depth in depths:
origins += [catalog_origin.copy()]
setattr(origins[-1], 'depth_in_m', depth)
"""
Origins_Hypocenter="""
catalog_origin = Origin({
'time': '2009-04-07T20:12:55.000000Z',
'latitude': 61.454200744628906,
'longitude': -149.7427978515625,
'depth_in_m': 33033.599853515625,
})
from mtuq.util.math import lat_lon_tuples
tuples = lat_lon_tuples(
center_lat=catalog_origin.latitude,
center_lon=catalog_origin.longitude,
spacing_in_m=1000.,
npts_per_edge=4,
)
origins = []
for lat, lon in tuples:
origins += [catalog_origin.copy()]
setattr(origins[-1], 'latitude', lat)
setattr(origins[-1], 'longitude', lon)
# use best depth from DC+Depth search
setattr(origins[-1], 'depth_in_m', 45000.)
"""
MisfitDefinitions_DetailedExample="""
misfit_bw = Misfit(
norm='L2',
time_shift_min=-2.,
time_shift_max=+2.,
time_shift_groups=['ZR'],
)
misfit_rayleigh = Misfit(
norm='L2',
time_shift_min=-10.,
time_shift_max=+10.,
time_shift_groups=['ZR'],
)
misfit_love = Misfit(
norm='L2',
time_shift_min=-10.,
time_shift_max=+10.,
time_shift_groups=['T'],
)
"""
Grid_DoubleCouple="""
#
# Next, we specify the moment tensor grid and source-time function
#
grid = DoubleCoupleGridRegular(
npts_per_axis=40,
magnitudes=[4.5])
wavelet = Trapezoid(
magnitude=4.5)
"""
Grid_DoubleCoupleMagnitude="""
#
# Next, we specify the moment tensor grid and source-time function
#
magnitudes = np.array(
# moment magnitude (Mw)
[4.3, 4.4, 4.5,
4.6, 4.7, 4.8])
grid = DoubleCoupleGridRegular(
npts_per_axis=20,
magnitudes=magnitudes)
wavelet = Trapezoid(
magnitude=4.5)
"""
Grid_FullMomentTensor="""
#
# Next, we specify the moment tensor grid and source-time function
#
grid = FullMomentTensorGridSemiregular(
npts_per_axis=10,
magnitudes=[4.4, 4.5, 4.6, 4.7])
wavelet = Trapezoid(
magnitude=4.5)
"""
Grid_TestDoubleCoupleMagnitudeDepth="""
#
# Next, we specify the moment tensor grid and source-time function
#
grid = DoubleCoupleGridRegular(
npts_per_axis=5,
magnitudes=[4.4, 4.5, 4.6, 4.7])
wavelet = Trapezoid(
magnitude=4.5)
"""+OriginDefinitions+"""
depths = np.array(
# depth in meters
[34000])
origins = []
for depth in depths:
origin.depth = depth
origins += [origin.copy()]
"""
Grid_TestGraphics="""
mt = np.sqrt(1./3.)*np.array([1., 1., 1., 0., 0., 0.]) # explosion
mt *= 1.e16
wavelet = Trapezoid(
magnitude=4.5)
"""
Grid_BenchmarkCAP="""
#
# Next we specify the source parameter grid
#
magnitude = 4.5
moment = 10.**(1.5*magnitude + 9.1) # units: N-m
sources = []
for array in [
# Mrr, Mtt, Mpp, Mrt, Mrp, Mtp
np.sqrt(1./3.)*np.array([1., 1., 1., 0., 0., 0.]), # explosion
np.array([1., 0., 0., 0., 0., 0.]), # source 1 (on-diagonal)
np.array([0., 1., 0., 0., 0., 0.]), # source 2 (on-diagonal)
np.array([0., 0., 1., 0., 0., 0.]), # source 3 (on-diagonal)
np.sqrt(1./2.)*np.array([0., 0., 0., 1., 0., 0.]), # source 4 (off-diagonal)
np.sqrt(1./2.)*np.array([0., 0., 0., 0., 1., 0.]), # source 5 (off-diagonal)
np.sqrt(1./2.)*np.array([0., 0., 0., 0., 0., 1.]), # source 6 (off-diagonal)
]:
sources += [MomentTensor(np.sqrt(2)*moment*array)]
wavelet = Trapezoid(
magnitude=magnitude)
"""
Main_GridSearch="""
from mpi4py import MPI
comm = MPI.COMM_WORLD
#
# The main I/O work starts now
#
if comm.rank==0:
print('Reading data...\\n')
data = read(path_data, format='sac',
event_id=event_id,
station_id_list=station_id_list,
tags=['units:cm', 'type:velocity'])
data.sort_by_distance()
stations = data.get_stations()
print('Processing data...\\n')
data_bw = data.map(process_bw)
data_sw = data.map(process_sw)
print('Reading Greens functions...\\n')
greens = download_greens_tensors(stations, origin, model)
print('Processing Greens functions...\\n')
greens.convolve(wavelet)
greens_bw = greens.map(process_bw)
greens_sw = greens.map(process_sw)
else:
stations = None
data_bw = None
data_sw = None
greens_bw = None
greens_sw = None
stations = comm.bcast(stations, root=0)
data_bw = comm.bcast(data_bw, root=0)
data_sw = comm.bcast(data_sw, root=0)
greens_bw = comm.bcast(greens_bw, root=0)
greens_sw = comm.bcast(greens_sw, root=0)
#
# The main computational work starts now
#
if comm.rank==0:
print('Evaluating body wave misfit...\\n')
results_bw = grid_search(
data_bw, greens_bw, misfit_bw, origin, grid)
if comm.rank==0:
print('Evaluating surface wave misfit...\\n')
results_sw = grid_search(
data_sw, greens_sw, misfit_sw, origin, grid)
"""
Main1_SerialGridSearch_DoubleCouple="""
#
# The main I/O work starts now
#
print('Reading data...\\n')
data = read(path_data, format='sac',
event_id=event_id,
station_id_list=station_id_list,
tags=['units:cm', 'type:velocity'])
data.sort_by_distance()
stations = data.get_stations()
print('Processing data...\\n')
data_bw = data.map(process_bw)
data_sw = data.map(process_sw)
print('Reading Greens functions...\\n')
greens = download_greens_tensors(stations, origin, model)
print('Processing Greens functions...\\n')
greens.convolve(wavelet)
greens_bw = greens.map(process_bw)
greens_sw = greens.map(process_sw)
"""
Main2_SerialGridSearch_DoubleCouple="""
#
# The main computational work starts now
#
print('Evaluating body wave misfit...\\n')
results_bw = grid_search(data_bw, greens_bw, misfit_bw, origin, grid)
print('Evaluating surface wave misfit...\\n')
results_sw = grid_search(data_sw, greens_sw, misfit_sw, origin, grid)
"""
Main_TestGridSearch_DoubleCoupleMagnitudeDepth="""
#
# The main I/O work starts now
#
print('Reading data...\\n')
data = read(path_data, format='sac',
event_id=event_id,
station_id_list=station_id_list,
tags=['units:cm', 'type:velocity'])
data.sort_by_distance()
stations = data.get_stations()
print('Processing data...\\n')
data_bw = data.map(process_bw)
data_sw = data.map(process_sw)
print('Reading Greens functions...\\n')
db = open_db(path_greens, format='FK', model=model)
greens = db.get_greens_tensors(stations, origins)
print('Processing Greens functions...\\n')
greens.convolve(wavelet)
greens_bw = greens.map(process_bw)
greens_sw = greens.map(process_sw)
#
# The main computational work starts now
#
print('Evaluating body wave misfit...\\n')
results_bw = grid_search(
data_bw, greens_bw, misfit_bw, origins, grid, 0)
print('Evaluating surface wave misfit...\\n')
results_sw = grid_search(
data_sw, greens_sw, misfit_sw, origins, grid, 0)
"""
Main_TestGraphics="""
print('Reading data...\\n')
data = read(path_data, format='sac',
event_id=event_id,
station_id_list=station_id_list,
tags=['units:cm', 'type:velocity'])
data.sort_by_distance()
stations = data.get_stations()
print('Processing data...\\n')
data_bw = data.map(process_bw)
data_sw = data.map(process_sw)
print('Reading Greens functions...\\n')
db = open_db(path_greens, format='FK', model=model)
greens = db.get_greens_tensors(stations, origin)
print('Processing Greens functions...\\n')
greens.convolve(wavelet)
greens_bw = greens.map(process_bw)
greens_sw = greens.map(process_sw)
#
# Generate figures
#
print('Figure 1 of 3\\n')
plot_data_greens2('graphics_test_1.png',
data_bw, data_sw, greens_bw, greens_sw,
process_bw, process_sw, misfit_bw, misfit_sw,
stations, origin, mt, header=False)
print('Figure 2 of 3\\n')
plot_data_greens2('graphics_test_2.png',
data_bw, data_sw, greens_bw, greens_sw,
process_bw, process_sw, misfit_bw, misfit_sw,
stations, origin, mt, header=False)
print('Figure 3 of 3\\n')
plot_beachball('graphics_test_3.png',
mt, None, None)
print('\\nFinished\\n')
"""
Main_TestMisfit="""
#
# The main computational work starts now
#
print('Evaluating body wave misfit...\\n')
results_0 = misfit_bw(
data_bw, greens_bw, grid, optimization_level=0)
results_1 = misfit_bw(
data_bw, greens_bw, grid, optimization_level=1)
results_2 = misfit_bw(
data_bw, greens_bw, grid, optimization_level=2)
print(' optimization level: 0\\n',
' argmin: %d\\n' % results_0.argmin(),
' min: %e\\n\\n' % results_0.min())
print(' optimization level: 1\\n',
' argmin: %d\\n' % results_1.argmin(),
' min: %e\\n\\n' % results_1.min())
print(' optimization level: 2\\n',
' argmin: %d\\n' % results_2.argmin(),
' min: %e\\n\\n' % results_2.min())
print('')
assert results_0.argmin()==results_1.argmin()==results_2.argmin()
print('Evaluating surface wave misfit...\\n')
results_0 = misfit_sw(
data_sw, greens_sw, grid, optimization_level=0)
results_1 = misfit_sw(
data_sw, greens_sw, grid, optimization_level=1)
results_2 = misfit_sw(
data_sw, greens_sw, grid, optimization_level=2)
print(' optimization level: 0\\n',
' argmin: %d\\n' % results_0.argmin(),
' min: %e\\n\\n' % results_0.min())
print(' optimization level: 1\\n',
' argmin: %d\\n' % results_1.argmin(),
' min: %e\\n\\n' % results_1.min())
print(' optimization level: 2\\n',
' argmin: %d\\n' % results_2.argmin(),
' min: %e\\n\\n' % results_2.min())
assert results_0.argmin()==results_1.argmin()==results_2.argmin()
"""
WrapUp_DetailedAnalysis="""
if comm.rank==0:
results_sum = results_bw + results_rayleigh + results_love
#
# Data variance estimation and likelihood analysis
#
# use minimum misfit as initial guess for maximum likelihood
idx = results_sum.source_idxmin()
best_mt = grid.get(idx)
lune_dict = grid.get_dict(idx)
mt_dict = best_mt.as_dict()
print('Data variance estimation...\\n')
sigma_bw = estimate_sigma(data_bw, greens_bw,
best_mt, misfit_bw.norm, ['Z', 'R'],
misfit_bw.time_shift_min, misfit_bw.time_shift_max)
sigma_rayleigh = estimate_sigma(data_sw, greens_sw,
best_mt, misfit_rayleigh.norm, ['Z', 'R'],
misfit_rayleigh.time_shift_min, misfit_rayleigh.time_shift_max)
sigma_love = estimate_sigma(data_sw, greens_sw,
best_mt, misfit_love.norm, ['T'],
misfit_love.time_shift_min, misfit_love.time_shift_max)
stats = {'sigma_bw': sigma_bw,
'sigma_rayleigh': sigma_rayleigh,
'sigma_love': sigma_love}
print(' Body wave variance: %.3e' %
sigma_bw**2)
print(' Rayleigh variance: %.3e' %
sigma_rayleigh**2)
print(' Love variance: %.3e' %
sigma_love**2)
print()
norm_bw = calculate_norm_data(data_bw,
misfit_bw.norm, ['Z', 'R'])
norm_rayleigh = calculate_norm_data(data_sw,
misfit_rayleigh.norm, ['Z', 'R'])
norm_love = calculate_norm_data(data_sw,
misfit_love.norm, ['T'])
norms = {misfit_bw.norm+'_bw': norm_bw,
misfit_rayleigh.norm+'_rayleigh': norm_rayleigh,
misfit_love.norm+'_love': norm_love}
print('Likelihood analysis...\\n')
likelihoods, mle_lune, marginal_vw = likelihood_analysis(
(results_bw, sigma_bw**2),
(results_rayleigh, sigma_rayleigh**2),
(results_love, sigma_love**2))
# maximum likelihood vw surface
likelihoods_vw = _product_vw(
_likelihoods_vw_regular(results_bw, sigma_bw**2),
_likelihoods_vw_regular(results_rayleigh, sigma_rayleigh**2),
_likelihoods_vw_regular(results_love, sigma_love**2))
# TODO - marginalize over the joint likelihood distribution instead
marginals_vw = _product_vw(
_marginals_vw_regular(results_bw, sigma_bw**2),
_marginals_vw_regular(results_rayleigh, sigma_rayleigh**2),
_marginals_vw_regular(results_love, sigma_love**2))
#
# Generate figures and save results
#
# only generate components present in the data
components_bw = data_bw.get_components()
components_sw = data_sw.get_components()
# synthetics corresponding to minimum misfit
synthetics_bw = greens_bw.get_synthetics(
best_mt, components_bw, mode='map')
synthetics_sw = greens_sw.get_synthetics(
best_mt, components_sw, mode='map')
# time shifts and other attributes corresponding to minimum misfit
list_bw = misfit_bw.collect_attributes(
data_bw, greens_bw, best_mt)
list_rayleigh = misfit_rayleigh.collect_attributes(
data_sw, greens_sw, best_mt)
list_love = misfit_love.collect_attributes(
data_sw, greens_sw, best_mt)
list_sw = [{**list_rayleigh[_i], **list_love[_i]}
for _i in range(len(stations))]
dict_bw = {station.id: list_bw[_i]
for _i,station in enumerate(stations)}
dict_rayleigh = {station.id: list_rayleigh[_i]
for _i,station in enumerate(stations)}
dict_love = {station.id: list_love[_i]
for _i,station in enumerate(stations)}
dict_sw = {station.id: list_sw[_i]
for _i,station in enumerate(stations)}
print('Plotting observed and synthetic waveforms...\\n')
plot_beachball(event_id+'FMT_beachball.png',
best_mt, stations, origin)
plot_data_greens2(event_id+'FMT_waveforms.png',
data_bw, data_sw, greens_bw, greens_sw, process_bw, process_sw,
misfit_bw, misfit_rayleigh, stations, origin, best_mt, lune_dict)
print('Plotting misfit surfaces...\\n')
os.makedirs(event_id+'FMT_misfit', exist_ok=True)
plot_misfit_lune(event_id+'FMT_misfit/bw.png', results_bw,
title='Body waves')
plot_misfit_lune(event_id+'FMT_misfit/rayleigh.png', results_rayleigh,
title='Rayleigh waves')
plot_misfit_lune(event_id+'FMT_misfit/love.png', results_love,
title='Love waves')
print()
print('Plotting maximum likelihood surfaces...\\n')
os.makedirs(event_id+'FMT_likelihood', exist_ok=True)
plot_likelihood_lune(event_id+'FMT_likelihood/bw.png',
results_bw, var=sigma_bw**2,
title='Body waves')
plot_likelihood_lune(event_id+'FMT_likelihood/rayleigh.png',
results_rayleigh, var=sigma_rayleigh**2,
title='Rayleigh waves')
plot_likelihood_lune(event_id+'FMT_likelihood/love.png',
results_love, var=sigma_love**2,
title='Love waves')
_plot_lune(event_id+'FMT_likelihood/all.png',
likelihoods_vw, colormap='hot_r',
title='All data categories')
print()
print('Plotting marginal likelihood surfaces...\\n')
os.makedirs(event_id+'FMT_marginal', exist_ok=True)
plot_marginal_vw(event_id+'FMT_marginal/bw.png',
results_bw, var=sigma_bw**2,
title='Body waves')
plot_marginal_vw(event_id+'FMT_marginal/rayleigh.png',
results_rayleigh, var=sigma_rayleigh**2,
title='Rayleigh waves')
plot_marginal_vw(event_id+'FMT_marginal/love.png',
results_love, var=sigma_love**2,
title='Love waves')
_plot_vw(event_id+'FMT_marginal/all.png',
marginals_vw, colormap='hot_r',
title='All data categories')
print()
print('Plotting variance reduction surfaces...\\n')
os.makedirs(event_id+'FMT_variance_reduction', exist_ok=True)
plot_variance_reduction_lune(event_id+'FMT_variance_reduction/bw.png',
results_bw, norm_bw, title='Body waves',
colorbar_label='Variance reduction (percent)')
plot_variance_reduction_lune(event_id+'FMT_variance_reduction/rayleigh.png',
results_rayleigh, norm_rayleigh, title='Rayleigh waves',
colorbar_label='Variance reduction (percent)')
plot_variance_reduction_lune(event_id+'FMT_variance_reduction/love.png',
results_love, norm_love, title='Love waves',
colorbar_label='Variance reduction (percent)')
print()
print('Plotting tradeoffs...\\n')
os.makedirs(event_id+'FMT_tradeoffs', exist_ok=True)
plot_misfit_lune(event_id+'FMT_tradeoffs/orientation.png',
results_sum, show_tradeoffs=True, title='Orientation tradeoffs')
plot_magnitude_tradeoffs_lune(event_id+'FMT_tradeoffs/magnitude.png',
results_sum, title='Magnitude tradeoffs')
print()
print('Plotting time shift geographic variation...\\n')
plot_time_shifts(event_id+'FMT_time_shifts/bw',
list_bw, stations, origin)
plot_time_shifts(event_id+'FMT_time_shifts/sw',
list_sw, stations, origin)
print('Plotting amplitude ratio geographic variation...\\n')
plot_amplitude_ratios(event_id+'FMT_amplitude_ratios/bw',
list_bw, stations, origin)
plot_amplitude_ratios(event_id+'FMT_amplitude_ratios/sw',
list_sw, stations, origin)
print('\\nSaving results...\\n')
# save best-fitting source
os.makedirs(event_id+'FMT_solutions', exist_ok=True)
save_json(event_id+'FMT_solutions/marginal_likelihood.json', marginal_vw)
save_json(event_id+'FMT_solutions/maximum_likelihood.json', mle_lune)
merged_dict = merge_dicts(lune_dict, mt_dict, origin,
{'M0': best_mt.moment(), 'Mw': best_mt.magnitude()})
save_json(event_id+'FMT_solutions/minimum_misfit.json', merged_dict)
os.makedirs(event_id+'FMT_stats', exist_ok=True)
save_json(event_id+'FMT_stats/data_variance.json', stats)
save_json(event_id+'FMT_stats/data_norm.json', norms)
# save stations and origins
stations_dict = {station.id: station
for _i,station in enumerate(stations)}
save_json(event_id+'FMT_stations.json', stations_dict)
save_json(event_id+'FMT_origins.json', {0: origin})
# save time shifts and other attributes
os.makedirs(event_id+'FMT_attrs', exist_ok=True)
save_json(event_id+'FMT_attrs/bw.json', dict_bw)
save_json(event_id+'FMT_attrs/sw.json', dict_sw)
# save processed waveforms as binary files
os.makedirs(event_id+'FMT_waveforms', exist_ok=True)
data_bw.write(event_id+'FMT_waveforms/dat_bw.p')
data_sw.write(event_id+'FMT_waveforms/dat_sw.p')
synthetics_bw.write(event_id+'FMT_waveforms/syn_bw.p')
synthetics_sw.write(event_id+'FMT_waveforms/syn_sw.p')
# save misfit surfaces as netCDF files
results_bw.save(event_id+'FMT_misfit/bw.nc')
results_rayleigh.save(event_id+'FMT_misfit/rayleigh.nc')
results_love.save(event_id+'FMT_misfit/love.nc')
print('\\nFinished\\n')
"""
WrapUp_GridSearch="""
if comm.rank==0:
results = results_bw + results_sw
#
# Collect information about best-fitting source
#
# index of best-fitting moment tensor
idx = results.source_idxmin()
# MomentTensor object
best_mt = grid.get(idx)
# dictionary of lune parameters
lune_dict = grid.get_dict(idx)
# dictionary of Mij parameters
mt_dict = best_mt.as_dict()
merged_dict = merge_dicts(
mt_dict, lune_dict, {'M0': best_mt.moment()},
{'Mw': best_mt.magnitude()}, origin)
#
# Generate figures and save results
#
print('Generating figures...\\n')
plot_data_greens2(event_id+'DC_waveforms.png',
data_bw, data_sw, greens_bw, greens_sw, process_bw, process_sw,
misfit_bw, misfit_sw, stations, origin, best_mt, lune_dict)
plot_beachball(event_id+'DC_beachball.png',
best_mt, stations, origin)
plot_misfit_dc(event_id+'DC_misfit.png', results)
print('Saving results...\\n')
# save best-fitting source
save_json(event_id+'DC_solution.json', merged_dict)
# save misfit surface
results.save(event_id+'DC_misfit.nc')
print('\\nFinished\\n')
"""
WrapUp_GridSearch_DoubleCoupleMagnitudeDepth="""
if comm.rank==0:
results = results_bw + results_sw
#
# Collect information about best-fitting source
#
origin_idx = results.origin_idxmin()
best_origin = origins[origin_idx]
source_idx = results.source_idxmin()
best_mt = grid.get(source_idx)
# dictionary of lune parameters
lune_dict = grid.get_dict(source_idx)
# dictionary of Mij parameters
mt_dict = best_mt.as_dict()
merged_dict = merge_dicts(
mt_dict, lune_dict, {'M0': best_mt.moment()},
{'Mw': best_mt.magnitude()}, best_origin)
#
# Generate figures and save results
#
print('Generating figures...\\n')
plot_data_greens2(event_id+'DC+Z_waveforms.png',
data_bw, data_sw, greens_bw, greens_sw, process_bw, process_sw,
misfit_bw, misfit_sw, stations, best_origin, best_mt, lune_dict)
plot_misfit_depth(event_id+'DC+Z_misfit_depth.png', results, origins,
title=event_id)
plot_misfit_depth(event_id+'DC+Z_misfit_depth_tradeoffs.png', results, origins,
show_tradeoffs=True, show_magnitudes=True, title=event_id)
print('Saving results...\\n')
# save best-fitting source
save_json(event_id+'DC+Z_solution.json', merged_dict)
# save origins
origins_dict = {_i: origin
for _i,origin in enumerate(origins)}
save_json(event_id+'DC+Z_origins.json', origins_dict)
# save misfit surface
results.save(event_id+'DC+Z_misfit.nc')
print('\\nFinished\\n')
"""
WrapUp_SerialGridSearch_DoubleCouple="""
results = results_bw + results_sw
#
# Collect information about best-fitting source
#
# index of best-fitting moment tensor
idx = results.source_idxmin()
# MomentTensor object
best_mt = grid.get(idx)
# dictionary of lune parameters
lune_dict = grid.get_dict(idx)
# dictionary of Mij parameters
mt_dict = best_mt.as_dict()
merged_dict = merge_dicts(
mt_dict, lune_dict, {'M0': best_mt.moment()},
{'Mw': best_mt.magnitude()}, origin)
#
# Generate figures and save results
#
print('Generating figures...\\n')
plot_data_greens2(event_id+'DC_waveforms.png',
data_bw, data_sw, greens_bw, greens_sw, process_bw, process_sw,
misfit_bw, misfit_sw, stations, origin, best_mt, lune_dict)
plot_beachball(event_id+'DC_beachball.png',
best_mt, stations, origin)
plot_misfit_dc(event_id+'DC_misfit.png', results)
print('Saving results...\\n')
# save best-fitting source
save_json(event_id+'DC_solution.json', merged_dict)
# save misfit surface
results.save(event_id+'DC_misfit.nc')
print('\\nFinished\\n')
"""
WrapUp_WaveformsPolarities="""
if comm.rank==0:
print('Evaluating polarity misfit...\\n')
results_polarity = grid_search(
polarities, greens_bw, polarity_misfit, origin, grid)
if comm.rank==0:
results = results_bw + results_sw
# `grid` index corresponding to minimum misfit
idx = results.source_idxmin()
best_mt = grid.get(idx)
lune_dict = grid.get_dict(idx)
mt_dict = best_mt.as_dict()
#
# Generate figures and save results
#
print('Generating figures...\\n')
plot_data_greens2(event_id+'FMT_waveforms.png',
data_bw, data_sw, greens_bw, greens_sw, process_bw, process_sw,
misfit_bw, misfit_sw, stations, origin, best_mt, lune_dict)
plot_beachball(event_id+'FMT_beachball.png',
best_mt, stations, origin)
plot_misfit_lune(event_id+'FMT_misfit.png', results,
title='Waveform Misfit')
# generate polarity figures
plot_misfit_lune(event_id+'FMT_misfit_polarity.png', results_polarity,
show_best=False, title='Polarity Misfit')
# predicted polarities
predicted = polarity_misfit.get_predicted(greens, best_mt)
# station attributes
attrs = polarity_misfit.collect_attributes(polarities, greens)
plot_polarities(event_id+'FMT_beachball_polarity.png',
polarities, predicted, attrs, origin, best_mt)
print('\\nFinished\\n')
"""
WrapUp_TestGridSearch_DoubleCouple="""
results = results_bw + results_sw
# source corresponding to minimum misfit
idx = results.source_idxmin()
best_mt = grid.get(idx)
lune_dict = grid.get_dict(idx)
if run_figures:
plot_data_greens2(event_id+'DC_waveforms.png',
data_bw, data_sw, greens_bw, greens_sw, process_bw, process_sw,
misfit_bw, misfit_sw, stations, origin, best_mt, lune_dict)
plot_beachball(event_id+'DC_beachball.png',
best_mt, None, None)
if run_checks:
def isclose(a, b, atol=1.e6, rtol=1.e-6):
# the default absolute tolerance (1.e6) is several orders of
# magnitude less than the moment of an Mw=0 event
for _a, _b, _bool in zip(
a, b, np.isclose(a, b, atol=atol, rtol=rtol)):
print('%s: %.e <= %.1e + %.1e * %.1e' %\\
('passed' if _bool else 'failed', abs(_a-_b), atol, rtol, abs(_b)))
print('')
return np.all(
np.isclose(a, b, atol=atol, rtol=rtol))
if not isclose(best_mt.as_vector(),
np.array([
-6.731618e+15,
8.398708e+14,
5.891747e+15,
-1.318056e+15,
7.911756e+14,
2.718294e+15,
])
):
raise Exception(
"Grid search result differs from previous mtuq result")
print('SUCCESS\\n')
"""
WrapUp_TestGridSearch_DoubleCoupleMagnitudeDepth="""
results = results_bw + results_sw
idx = results.source_idxmin()
best_mt = grid.get(idx)
if run_figures:
filename = event_id+'_misfit_vs_depth.png'
#misfit_vs_depth(filename, best_misfit)
if run_checks:
pass
print('SUCCESS\\n')
"""
Main_BenchmarkCAP="""
#
# The benchmark starts now
#
print('Reading data...\\n')
data = read(path_data, format='sac',
event_id=event_id,
tags=['units:cm', 'type:velocity'])
data.sort_by_distance()
stations = data.get_stations()
origin = data.get_origins()[0]
print('Processing data...\\n')
data_bw = data.map(process_bw)
data_sw = data.map(process_sw)
print('Reading Greens functions...\\n')
db = open_db(path_greens, format='FK', model=model)
greens = db.get_greens_tensors(stations, origin)
print('Processing Greens functions...\\n')
greens.convolve(wavelet)
greens_bw = greens.map(process_bw)
greens_sw = greens.map(process_sw)
depth = int(origin.depth_in_m/1000.)+1
name = '_'.join([model, str(depth), event_id])
print('Comparing waveforms...')
for _i, mt in enumerate(sources):
print(' %d of %d' % (_i+1, len(sources)))
cap_bw, cap_sw = get_synthetics_cap(
data_bw, data_sw, paths[_i], name)
mtuq_bw, mtuq_sw = get_synthetics_mtuq(
data_bw, data_sw, greens_bw, greens_sw, mt)
if run_figures:
plot_waveforms2('cap_vs_mtuq_'+str(_i)+'.png',
cap_bw, cap_sw, mtuq_bw, mtuq_sw,
stations, origin, trace_labels=False)
if run_checks:
compare_cap_mtuq(
cap_bw, cap_sw, mtuq_bw, mtuq_sw)
if run_figures:
# "bonus" figure comparing how CAP processes observed data with how
# MTUQ processes observed data
mtuq_sw, mtuq_bw = data_bw, data_sw
cap_sw, cap_bw = get_data_cap(
data_bw, data_sw, paths[0], name)
plot_waveforms2('cap_vs_mtuq_data.png',
cap_bw, cap_sw, mtuq_bw, mtuq_sw,
stations, origin, trace_labels=False, normalize=False)
print('\\nSUCCESS\\n')
"""
if __name__=='__main__':
import os
from mtuq.util import basepath, replace
os.chdir(basepath())
with open('examples/DetailedAnalysis.py', 'w') as file:
file.write("#!/usr/bin/env python\n")
file.write(
replace(
Imports,
'DoubleCoupleGridRegular',
'FullMomentTensorGridSemiregular',
'plot_misfit_dc',
(
'plot_misfit_lune,\\\n'+
' plot_likelihood_lune, plot_marginal_vw,\\\n'+
' plot_variance_reduction_lune, plot_magnitude_tradeoffs_lune,\\\n'+
' plot_time_shifts, plot_amplitude_ratios,\\\n'+
' likelihood_analysis, _likelihoods_vw_regular, _marginals_vw_regular,\\\n'+
' _plot_lune, _plot_vw, _product_vw\n'+
'from mtuq.graphics.uq.vw import _variance_reduction_vw_regular'
),
'from mtuq.misfit import Misfit',
'from mtuq.misfit.waveform import Misfit, estimate_sigma, calculate_norm_data'
))
file.write(Docstring_DetailedAnalysis)
file.write(Paths_Syngine)
file.write(DataProcessingComments)
file.write(DataProcessingDefinitions)
file.write(MisfitComments)
file.write(MisfitDefinitions_DetailedExample)
file.write(WeightsComments)
file.write(WeightsDefinitions)
file.write(
replace(
Grid_FullMomentTensor,
'npts_per_axis=10',
'npts_per_axis=12',
))
file.write(OriginComments)
file.write(OriginDefinitions)
file.write(
replace(
Main_GridSearch,
'surface wave',
'Rayleigh wave',
'results_sw',
'results_rayleigh',
'misfit_sw',
'misfit_rayleigh',
))
file.write("""
if comm.rank==0:
print('Evaluating Love wave misfit...\\n')
results_love = grid_search(
data_sw, greens_sw, misfit_love, origin, grid)\n"""
)
file.write(WrapUp_DetailedAnalysis)
with open('examples/GridSearch.DoubleCouple.py', 'w') as file:
file.write("#!/usr/bin/env python\n")
file.write(Imports)
file.write(Docstring_GridSearch_DoubleCouple)
file.write(PathsComments)
file.write(Paths_Syngine)
file.write(DataProcessingComments)
file.write(DataProcessingDefinitions)
file.write(MisfitComments)
file.write(MisfitDefinitions)
file.write(WeightsComments)
file.write(WeightsDefinitions)
file.write(Grid_DoubleCouple)
file.write(OriginComments)
file.write(OriginDefinitions)
file.write(Main_GridSearch)
file.write(WrapUp_GridSearch)
with open('examples/GridSearch.DoubleCouple+Magnitude+Depth.py', 'w') as file:
file.write("#!/usr/bin/env python\n")
file.write(
replace(
Imports,
'plot_beachball',
'plot_misfit_depth',
))
file.write(Docstring_GridSearch_DoubleCoupleMagnitudeDepth)
file.write(PathsComments)
file.write(Paths_Syngine)
file.write(DataProcessingComments)
file.write(DataProcessingDefinitions)
file.write(MisfitComments)
file.write(MisfitDefinitions)
file.write(WeightsComments)
file.write(WeightsDefinitions)
file.write(OriginsComments)
file.write(Origins_Depth)
file.write(Grid_DoubleCoupleMagnitude)
file.write(
replace(
Main_GridSearch,
'origin',
'origins',
))
file.write(WrapUp_GridSearch_DoubleCoupleMagnitudeDepth)
with open('examples/GridSearch.DoubleCouple+Magnitude+Hypocenter.py', 'w') as file:
file.write("#!/usr/bin/env python\n")
file.write(
replace(
Imports,
'plot_beachball',
'plot_misfit_latlon',
))
file.write(Docstring_GridSearch_DoubleCoupleMagnitudeHypocenter)
file.write(PathsComments)
file.write(Paths_Syngine)
file.write(DataProcessingComments)
file.write(DataProcessingDefinitions)
file.write(MisfitComments)
file.write(MisfitDefinitions)
file.write(WeightsComments)
file.write(WeightsDefinitions)
file.write(OriginsComments)
file.write(Origins_Hypocenter)
file.write(Grid_DoubleCoupleMagnitude)
file.write(
replace(
Main_GridSearch,
'origin',
'origins',
'Reading Greens functions...\\\\n',
(
'Reading Greens functions...\\\\n\\\\n'+
' Downloads can sometimes take as long as a few hours!\\\\n'
),
'download_greens_tensors\(stations, origin, model\)',
'download_greens_tensors(stations, origin, model, verbose=True)',
))
file.write(
replace(
WrapUp_GridSearch_DoubleCoupleMagnitudeDepth,
'DC\+Z',
'DC+XY',
'misfit_depth',
'misfit_latlon',
"title=event_id",
"title=event_id, colorbar_label='L2 misfit'",
'show_magnitudes=True, ',
'',
))
with open('examples/GridSearch.FullMomentTensor.py', 'w') as file:
file.write("#!/usr/bin/env python\n")
file.write(
replace(
Imports,
'DoubleCoupleGridRegular',
'FullMomentTensorGridSemiregular',
'plot_misfit_dc',
'plot_misfit_lune',
))
file.write(Docstring_GridSearch_FullMomentTensor)
file.write(Paths_Syngine)
file.write(DataProcessingComments)
file.write(DataProcessingDefinitions)
file.write(MisfitComments)
file.write(MisfitDefinitions)
file.write(WeightsComments)
file.write(WeightsDefinitions)
file.write(Grid_FullMomentTensor)
file.write(OriginComments)
file.write(OriginDefinitions)
file.write(Main_GridSearch)
file.write(
replace(
WrapUp_GridSearch,
'DC',
'FMT',
'plot_misfit_dc',
'plot_misfit_lune',
))
with open('examples/SerialGridSearch.DoubleCouple.py', 'w') as file:
file.write("#!/usr/bin/env python\n")
file.write(Imports)
file.write(Docstring_SerialGridSearch_DoubleCouple)
file.write(PathsComments)
file.write(Paths_Syngine)
file.write(DataProcessingComments)
file.write(DataProcessingDefinitions)
file.write(MisfitComments)
file.write(MisfitDefinitions)
file.write(WeightsComments)
file.write(WeightsDefinitions)
file.write(Grid_DoubleCouple)
file.write(OriginComments)
file.write(OriginDefinitions)
file.write(Main1_SerialGridSearch_DoubleCouple)
file.write(Main2_SerialGridSearch_DoubleCouple)
file.write(WrapUp_SerialGridSearch_DoubleCouple)
with open('examples/Waveforms+Polarities.py', 'w') as file:
file.write("#!/usr/bin/env python\n")
file.write(
replace(
Imports,
'DoubleCoupleGridRegular',
'FullMomentTensorGridSemiregular',
'plot_misfit_dc',
'plot_misfit_lune',
'plot_beachball',
'plot_beachball, plot_polarities',
'from mtuq.misfit import Misfit',
'from mtuq.misfit import WaveformMisfit, PolarityMisfit',
))
file.write(Docstring_WaveformsPolarities)
file.write(Paths_Syngine)
file.write(DataProcessingComments)
file.write(DataProcessingDefinitions)
file.write(WaveformsPolaritiesMisfit)
file.write(WeightsComments)
file.write(WeightsDefinitions)
file.write(Grid_FullMomentTensor)
file.write(OriginComments)
file.write(OriginDefinitions)
file.write(Main_GridSearch)
file.write(WrapUp_WaveformsPolarities)
#with open('tests/test_SPECFEM3D_SGT.py', 'w') as file:
# file.write("#!/usr/bin/env python\n")
# file.write(
# replace(
# Imports,
# 'DoubleCoupleGridRegular',
# 'FullMomentTensorGridSemiregular',
# 'plot_misfit_dc',
# 'plot_misfit_lune',
# ))
# file.write(Docstring_GridSearch_FullMomentTensor)
# file.write(Paths_SPECFEM3D_SGT)
# file.write(DataProcessingComments)
# file.write(
# replace(
# DataProcessingDefinitions,
# 'taup_model=model',
# 'taup_model=taup_model',
# ))
# file.write(MisfitComments)
# file.write(MisfitDefinitions)
# file.write(WeightsComments)
# file.write(WeightsDefinitions)
# file.write(Grid_FullMomentTensor)
# file.write(OriginComments)
# file.write(OriginDefinitions_SPECFEM3D_SGT)
# file.write(
# replace(
# Main_GridSearch,
# 'greens = download_greens_tensors\(stations, origin, model\)',
# 'db = open_db(path_greens, format=\'SPECFEM3D_SGT\', model=model)\n '
# +'greens = db.get_greens_tensors(stations, origin)',
# ))
# file.write(
# replace(
# WrapUp_GridSearch,
# 'DC',
# 'FMT',
# 'plot_misfit_dc',
# 'plot_misfit_lune',
# ))
with open('tests/test_grid_search_mt.py', 'w') as file:
file.write(Imports)
file.write(Docstring_TestGridSearch_DoubleCouple)
file.write(ArgparseDefinitions)
file.write(Paths_FK)
file.write(
replace(
DataProcessingDefinitions,
'pick_type=.*',
"pick_type='FK_metadata',",
'taup_model=.*,',
'FK_database=path_greens,',
))
file.write(MisfitDefinitions)
file.write(
replace(
Grid_DoubleCouple,
'npts.*,',
'npts_per_axis=5,',
))
file.write(WeightsDefinitions)
file.write(OriginDefinitions)
file.write(
replace(
Main1_SerialGridSearch_DoubleCouple,
'greens = download_greens_tensors\(stations, origin, model\)',
'db = open_db(path_greens, format=\'FK\', model=model)\n '
+'greens = db.get_greens_tensors(stations, origin)',
))
file.write(
replace(
Main2_SerialGridSearch_DoubleCouple,
'origin, grid',
'origin, grid, 0',
))
file.write(WrapUp_TestGridSearch_DoubleCouple)
with open('tests/test_grid_search_mt_depth.py', 'w') as file:
file.write(
replace(
Imports,
'plot_beachball',
'plot_misfit_depth',
))
file.write(Docstring_TestGridSearch_DoubleCoupleMagnitudeDepth)
file.write(ArgparseDefinitions)
file.write(Paths_FK)
file.write(
replace(
DataProcessingDefinitions,
'pick_type=.*',
"pick_type='FK_metadata',",
'taup_model=.*,',
'FK_database=path_greens,',
))
file.write(MisfitDefinitions)
file.write(WeightsDefinitions)
file.write(Grid_TestDoubleCoupleMagnitudeDepth)
file.write(Main_TestGridSearch_DoubleCoupleMagnitudeDepth)
file.write(WrapUp_TestGridSearch_DoubleCoupleMagnitudeDepth)
with open('tests/test_misfit.py', 'w') as file:
file.write(
replace(
Imports,
))
file.write(Docstring_TestMisfit)
file.write(Paths_FK)
file.write(
replace(
DataProcessingDefinitions,
'pick_type=.*',
"pick_type='FK_metadata',",
'taup_model=.*,',
'FK_database=path_greens,',
))
file.write(MisfitDefinitions)
file.write(WeightsComments)
file.write(WeightsDefinitions)
file.write(
replace(
Grid_DoubleCouple,
'npts.*,',
'npts_per_axis=5,',
))
file.write(OriginDefinitions)
file.write(
replace(
Main1_SerialGridSearch_DoubleCouple,
'greens = download_greens_tensors\(stations, origin, model\)',
'db = open_db(path_greens, format=\'FK\', model=model)\n '
+'greens = db.get_greens_tensors(stations, origin)',
))
file.write(Main_TestMisfit)
with open('tests/benchmark_cap_vs_mtuq.py', 'w') as file:
file.write(
replace(
Imports,
'Origin',
'MomentTensor',
'syngine',
'fk',
'plot_data_greens2',
'plot_waveforms2',
))
file.write(Docstring_BenchmarkCAP)
file.write(ArgparseDefinitions)
file.write(Paths_BenchmarkCAP)
file.write(
replace(
Paths_FK,
'data/examples/20090407201255351/weights.dat',
'data/tests/benchmark_cap/20090407201255351/weights.dat',
))
file.write(
replace(
DataProcessingDefinitions,
'pick_type=.*',
"pick_type='FK_metadata',",
'taup_model=.*,',
'FK_database=path_greens,',
))
file.write(
replace(
MisfitDefinitions,
'time_shift_max=.*',
'time_shift_max=0.,',
))
file.write(Grid_BenchmarkCAP)
file.write(Main_BenchmarkCAP)
with open('tests/test_graphics.py', 'w') as file:
file.write(Imports)
file.write(Docstring_TestGraphics)
file.write(Paths_FK)
file.write(
replace(
DataProcessingDefinitions,
'pick_type=.*',
"pick_type='FK_metadata',",
'taup_model=.*,',
'FK_database=path_greens,',
))
file.write(MisfitDefinitions)
file.write(Grid_TestGraphics)
file.write(Main_TestGraphics)
with open('mtuq/util/gallery.py', 'w') as file:
file.write(Imports)
file.write(Docstring_Gallery)
file.write(Paths_Syngine)
file.write(DataProcessingDefinitions)
file.write(MisfitDefinitions)
file.write(
replace(
Grid_DoubleCouple,
'npts.*',
'npts_per_axis=10,',
))
file.write(
replace(
Main1_SerialGridSearch_DoubleCouple,
'print.*',
'',
))
|
998,714 | fda9c4d612e793968dc58693409b0c282053d362 | #------------------------------------------------------#
# Import librairies
#------------------------------------------------------#
import datetime
import hashlib
import os
import time
import urllib
import io
import cv2 as cv
import numpy as np
import pafy
import pandas as pd
import streamlit as st
import wget
import argparse
import youtube_dl
from imutils.video import FPS, FileVideoStream, WebcamVideoStream
from PIL import Image
import libraries.plugins as plugins
colorWhite = (255, 255, 255)
colorBlack = (0, 0, 0)
colorRed = (255, 0, 0)
colorGreen = (0, 255, 0)
colorBlue = (0, 0, 255)
fontFace = cv.FONT_HERSHEY_SIMPLEX
thickText = 1
#------------------------------------------------------#
# Classes definition
#------------------------------------------------------#
class GUI():
"""
This class is dedicated to manage to user interface of the website. It contains methods to edit the sidebar for the selected application as well as the front page.
"""
def __init__(self):
self.list_of_apps = [
'Empty',
'Fire Detection from Video',
'Fire Detection']
self.guiParam = {}
# ----------------------------------------------------------------
def getGuiParameters(self):
self.common_config()
self.appDescription()
return self.guiParam
# ------------------------------------a----------------------------
def common_config(self, title='Fire Detection Using Deep Learning '): #(Beta version :golf:)
"""
User Interface Management: Sidebar
"""
st.image("./media/coeai.png", width=120)
st.title(title)
st.sidebar.markdown("### :bulb: Settings")
# Get the application type from the GUI
self.appType = 'Image Applications'
self.dataSource = st.sidebar.radio(
'Please select the source for Fire Detection ', ['Video: Upload', 'Image: Upload'])
# Get the application from the GUI
self.selectedApp = st.sidebar.selectbox(
'Chose an AI Application', self.list_of_apps)
if self.selectedApp is 'Empty':
st.sidebar.warning('Select an application from the list')
# Update the dictionnary
self.guiParam.update(
dict(selectedApp=self.selectedApp,
appType=self.appType,
dataSource=self.dataSource,
))
# -------------------------------------------------------------------------
def appDescription(self):
st.header(' :computer: Application: {}'.format(self.selectedApp))
if self.selectedApp == 'Fire Detection':
st.info(
'This application performs fire detection on Images using Deep Learning models. ')
self.sidebarFireDetection()
elif self.selectedApp == 'Fire Detection from Video':
st.info(
'This application performs fire detection on Video Sequences using Advanced Deep Learning models. ')
else:
st.info(
'To start using Fire Detection Application, you must first select an Application from the sidebar menu other than Empty. \n Below is a visual demo of the working model.')
video_file = open('firedet_demo.webm', 'rb')
video_bytes = video_file.read()
st.video(video_bytes)
# --------------------------------------------------------------------------
def sidebarEmpty(self):
pass
# --------------------------------------------------------------------------
def sidebarFireDetection(self):
# st.sidebar.markdown("### :arrow_right: Model")
#------------------------------------------------------#
model = st.sidebar.selectbox(
label='Select the model',
options=['Darknet-YOLOv3-tiny'])
# st.sidebar.markdown("### :arrow_right: Model Parameters")
#------------------------------------------------------#
confThresh = st.sidebar.slider(
'Confidence', value=0.5, min_value=0.0, max_value=1.0)
nmsThresh = st.sidebar.slider(
'Non-maximum suppression', value=0.30, min_value=0.0, max_value=1.00, step=0.05)
self.guiParam.update(dict(confThresh=confThresh,
nmsThresh=nmsThresh,
model=model))
# ------------------------------------------------------------------
# ------------------------------------------------------------------
class AppManager:
"""
This is a master class
"""
def __init__(self, guiParam):
self.guiParam = guiParam
self.selectedApp = guiParam['selectedApp']
self.model = guiParam['model']
self.objApp = self.setupApp()
# -----------------------------------------------------
def setupApp(self):
"""
#
"""
if self.selectedApp == 'Fire Detection':
@st.cache(allow_output_mutation=True)
def getClasses(classesFile):
"""
# Load names of classes
"""
classes = None
with open(classesFile, 'rt') as f:
classes = f.read().rstrip('\n').split('\n')
return classes
labels = 'models/DarkNet/fire_detection/classes.names'
self.paramYoloTinyFire = dict(labels=labels,
modelCfg='models/DarkNet/fire_detection/yolov3-custom.cfg',
modelWeights="models/DarkNet/fire_detection/yolov3-custom_10000.weights",
confThresh=self.guiParam['confThresh'],
nmsThresh=self.guiParam['nmsThresh'],
colors=np.tile(colorBlue, (len(getClasses(labels)), 1)).tolist())
self.objApp = plugins.Object_Detection_YOLO(self.paramYoloTinyFire)
# -----------------------------------------------------
else:
raise Exception(
'[Error] Please select one of the listed application')
return self.objApp
# -----------------------------------------------------
# -----------------------------------------------------
def process(self, frame, motion_state):
"""
# return a tuple: (bboxed_frame, output)
"""
bboxed_frame, output = self.objApp.run(frame, motion_state)
return bboxed_frame, output
# ------------------------------------------------------------------
# ------------------------------------------------------------------
class DataManager:
"""
"""
def __init__(self, guiParam):
self.guiParam = guiParam
self.image = None
self.data = None
#################################################################
#################################################################
def load_image_source(self):
"""
"""
if self.guiParam["dataSource"] == 'Image: Upload':
@st.cache(allow_output_mutation=True)
def load_image_from_upload(file):
tmp = np.fromstring(file.read(), np.uint8)
return cv.imdecode(tmp, 1)
file_path = st.file_uploader(
'Upload an image', type=['png', 'jpg'])
if file_path is not None:
self.image = load_image_from_upload(file_path)
#--------------------------------------------#
#--------------------------------------------#
return self.image
elif self.guiParam["dataSource"] == 'Video: Upload':
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument("--image", default='custom/traintest/17.jpg', help="image for prediction")
parser.add_argument("--config", default='models/DarkNet/fire_detection/yolov3-custom.cfg', help="YOLO config path")
parser.add_argument("--weights", default='models/DarkNet/fire_detection/yolov3-custom_10000.weights', help="YOLO weights path")
parser.add_argument("--names", default='models/DarkNet/fire_detection/classes.names', help="class names path")
args = parser.parse_args()
# Get names of output layers, output for YOLOv3 is ['yolo_16', 'yolo_23']
def getOutputsNames(net):
layersNames = net.getLayerNames()
return [layersNames[i[0] - 1] for i in net.getUnconnectedOutLayers()]
# Darw a rectangle surrounding the object and its class name
def draw_pred(img, class_id, confidence, x, y, x_plus_w, y_plus_h):
label = str(classes[class_id])
color = COLORS[class_id]
cv.rectangle(img, (x,y), (x_plus_w,y_plus_h), color, 2)
cv.putText(img, label, (x-10,y-10), cv.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
# Load names classes
classes = None
with open(args.names, 'r') as f:
classes = [line.strip() for line in f.readlines()]
#Generate color for each class randomly
COLORS = np.random.uniform(0, 255, size=(len(classes), 3))
# Define network from configuration file and load the weights from the given weights file
net = cv.dnn.readNet(args.weights,args.config)
uploaded_file = st.file_uploader("Choose a video...", type=["mp4"])
temporary_location = False
if uploaded_file is not None:
g = io.BytesIO(uploaded_file.read()) ## BytesIO Object
temporary_location = "testout_simple.mp4"
with open(temporary_location, 'wb') as out: ## Open temporary file as bytes
out.write(g.read()) ## Read bytes into file
# close file
out.close()
def load_video(temporary_location):
"""
"""
cap = cv.VideoCapture(str(temporary_location))
return cap
cap = load_video(temporary_location)
if st.button('Process Video'):
if temporary_location:
my_bar = st.progress(0)
for percent_complete in range(100):
time.sleep(0.01)
my_bar.progress(percent_complete + 1)
image_placeholder = st.empty()
while cv.waitKey(1) < 0:
try:
hasframe, image = cap.read()
image=cv.resize(image, (608, 608))
blob = cv.dnn.blobFromImage(image, 1.0/255.0, (608,608), [0,0,0], True, crop=False)
Width = image.shape[1]
Height = image.shape[0]
net.setInput(blob)
outs = net.forward(getOutputsNames(net))
class_ids = []
confidences = []
boxes = []
conf_threshold = 0.5
nms_threshold = 0.4
#print(len(outs))
# In case of tiny YOLOv3 we have 2 output(outs) from 2 different scales [3 bounding box per each scale]
# For normal normal YOLOv3 we have 3 output(outs) from 3 different scales [3 bounding box per each scale]
# For tiny YOLOv3, the first output will be 507x6 = 13x13x18
# 18=3*(4+1+1) 4 boundingbox offsets, 1 objectness prediction, and 1 class score.
# and the second output will be = 2028x6=26x26x18 (18=3*6)
for out in outs:
#print(out.shape)
for detection in out:
#each detection has the form like this [center_x center_y width height obj_score class_1_score class_2_score ..]
scores = detection[5:]#classes scores starts from index 5
class_id = np.argmax(scores)
confidence = scores[class_id]
if confidence > 0.5:
center_x = int(detection[0] * Width)
center_y = int(detection[1] * Height)
w = int(detection[2] * Width)
h = int(detection[3] * Height)
x = center_x - w / 2
y = center_y - h / 2
class_ids.append(class_id)
confidences.append(float(confidence))
boxes.append([x, y, w, h])
# apply non-maximum suppression algorithm on the bounding boxes
indices = cv.dnn.NMSBoxes(boxes, confidences, conf_threshold, nms_threshold)
for i in indices:
i = i[0]
box = boxes[i]
x = box[0]
y = box[1]
w = box[2]
h = box[3]
draw_pred(image, class_ids[i], confidences[i], round(x), round(y), round(x+w), round(y+h))
# Put efficiency information.
t, _ = net.getPerfProfile()
label = 'Inference time: %.2f ms' % (t * 1000.0 / cv.getTickFrequency())
cv.putText(image, label, (0, 15), cv.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0))
image_placeholder.image(image, channels="BGR", use_column_width=True)
except:
image_placeholder = st.empty()
break
#--------------------------------------------#
#--------------------------------------------#
else:
raise ValueError("Please select one source from the list")
def load_image_or_video(self):
"""
Handle the data input from the user parameters
"""
if self.guiParam['appType'] == 'Image Applications':
self.data = self.load_image_source()
else:
raise ValueError(
'[Error] Please select of the two Application pipelines')
return self.data |
998,715 | 081b2d3a1c2a141b0699f438e33e0b09cfe3ce7c | from django.db import models
class Servicio(models.Model):
id_servicio=models.AutoField(primary_key=True)
nombre=models.CharField(max_length=25)
descripcion=models.CharField(max_length=50)
imagen = models.ImageField(upload_to="images", blank=True, null=True)
def getAll():
servicios=Servicio.objects.all().order_by('id_servicio')
return servicios
|
998,716 | f00021617958023dd02b55cba610b2f428a6c58e | from sklearn import svm
import cv2
import matplotlib.pyplot as plt
import numpy as np
import os
import pickle
from sklearn.pipeline import Pipeline
import copy
import os
from math import log
# make my classifier
svm_clf = svm.LinearSVC(max_iter=10000, class_weight='balanced')
# get preprocessed data(hog_vectors) for train
with open('pos_train.pkl', 'rb') as f:
pos_data = pickle.load(f)
with open('neg_train.pkl', 'rb') as f:
neg_data = pickle.load(f)
print(len(neg_data))
data = np.array(pos_data + neg_data)
labels = np.concatenate((np.full((len(pos_data)), 1), np.full((len(neg_data)), 0)))
print(len(data))
# train classifier with data(hog vectors)
svm_clf.fit(data, labels)
# train acc
print("Train Accuracy:", svm_clf.score(data, labels))
print(np.max(svm_clf.decision_function(neg_data)))
# save svm classifier
with open('svm_clf.pkl', 'wb') as f:
pickle.dump(svm_clf, f) |
998,717 | b5d8cd666a06e4c510f3c846160a713812ec6b85 | """ Implementation of graph convolution operation, modified slightly from the implementation used in
Protein Interface Prediction using Graph Convolutional Networks
https://github.com/fouticus/pipgcn
available under the MIT License, Copyright 2020 Alex Fout """
import numpy as np
import tensorflow as tf
def node_average_gc(inputs, adj_mtx, activation, filters=None, trainable=True):
# node_average_gc_dist_thresh
vertices = inputs # shape: (batch_size, number_of_vertices, encoding_len)
v_shape = vertices.get_shape()
# create new weights # (v_dims, filters)
center_weights = tf.Variable(initializer("he", (v_shape[-1].value, filters)), name="Wc", trainable=trainable)
neighbor_weights = tf.Variable(initializer("he", (v_shape[-1].value, filters)), name="Wn", trainable=trainable)
bias = tf.Variable(initializer("zero", (filters,)), name="b", trainable=trainable)
# center signals are simply the center node value times the weight
# shape: (batch_size, number_of_vertices, num_filters)
center_signals = tf.reshape(tf.matmul(tf.reshape(vertices, (-1, v_shape[-1])),
center_weights),
(-1, v_shape[1], filters))
# apply neighbor weight to each neighbor
# shape: (batch_size, number_of_vertices, num_filters)
neighbor_signals_sep = tf.reshape(tf.matmul(tf.reshape(vertices, (-1, v_shape[-1])), neighbor_weights),
(-1, v_shape[1], filters))
# compute full neighbor signals
neighbor_signals = tf.divide(tf.matmul(tf.tile(adj_mtx[None], (tf.shape(vertices)[0], 1, 1)),
neighbor_signals_sep),
tf.reshape(tf.maximum(tf.constant(1, dtype=tf.float32),
tf.reduce_sum(adj_mtx, axis=1)), (-1, 1)))
# final output signal
output_signal = activation(center_signals + neighbor_signals + bias)
return output_signal
def initializer(init, shape):
if init == "zero":
return tf.zeros(shape)
elif init == "he":
fan_in = np.prod(shape)
std = 1 / np.sqrt(fan_in)
return tf.random_uniform(shape, minval=-std, maxval=std, dtype=tf.float32)
def main():
pass
if __name__ == "__main__":
main()
|
998,718 | 2f899c020dbbbc318d11355081eeb851932cd43e | import datetime
from src.mining import collector, matcher
from src.utility import file_management
from src.utility import helpers
# Get projects to collect PR data for
projects = file_management.get_projects_to_mine()
# Get token for GitHub API
token = file_management.get_token()
# Get the GraphQL parameters, containing search parameters and description
graphql_parameters = file_management.get_graphql_parameters()
# Standard query to collect PR's
standard_query = "is:pr is:closed sort:created-asc"
for project in projects:
owner = project.get("owner")
repo = project.get("repo")
start_date = project.get("startDate")
bot_call_string = project.get("botCallString")
bot_query = f"{bot_call_string} in:comments"
always = project.get("always")
non_bot_start_date = project.get("nonBotStartDate")
print("Mining PR's from project {owner}/{repo}".format(owner=owner, repo=repo))
all_prs = 0
bot_prs = 0
if always:
all_prs = collector.collect_and_enrich(owner, repo, standard_query, non_bot_start_date,
helpers.get_graphql_attributes(graphql_parameters), bot_call_string,
"allPRs", token, start_date)
bot_prs = collector.collect_and_enrich(owner, repo, standard_query, start_date,
helpers.get_graphql_attributes(graphql_parameters), bot_call_string,
"botPRs", token)
else:
bot_prs = collector.collect_and_enrich(owner, repo, standard_query + " " + bot_query, start_date,
helpers.get_graphql_attributes(graphql_parameters), bot_call_string,
"botPRs", token)
all_prs = collector.collect_and_enrich(owner, repo, standard_query, start_date,
helpers.get_graphql_attributes(graphql_parameters), bot_call_string,
"allPRs", token)
# Create non bot PR's.
matcher.create_non_bot_prs(owner, repo)
data = file_management.get_all_mined_prs(owner, repo)
matcher.do_matchings(owner, repo, data.get("bot_prs"), data.get("non_bot_prs"))
# Now we have the following:
# - The PR's where the bot contributes
# - All the PR's
# - bot PR's one-to-one matched to
# - similar PR's
# - performance labeled bot PR's
# - performance labeled all PR's
|
998,719 | 145072d702a1a8745877dfadbe223daa77fb4291 | def addition(a ,b):
return(a+b)
def soustraction(a, b):
return(a-b)
|
998,720 | 7c576c75b7274c5b5414fc5cec412d1df77dd9e1 | #!/usr/bin/env python3
import sys
from os.path import abspath
from os.path import dirname
from app import app
sys.path.insert(0, abspath(dirname(__file__)))
application = app
"""
建立一个软连接
ln -s /var/www/movie/movie.conf /etc/supervisor/conf.d/movie.conf
ln -s /var/www/movie/movie.nginx /etc/nginx/sites-enabled/movie
➜ ~ cat /etc/supervisor/conf.d/movie.conf
[program:movie]
command=/usr/local/bin/gunicorn wsgi -c gunicorn.config.py
directory=/var/www/movie
autostart=true
autorestart=true
/usr/local/bin/gunicorn wsgi
--bind 0.0.0.0:2002
--pid /tmp/飙泪og.pid
""" |
998,721 | be4da861ade371bd5e61137dcd70405f9bac9f89 | from django.test import TestCase
# Create youfdjdjgf
|
998,722 | 165c7e04db32aa7e473d99b80d4b900df56c9ea2 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from urllib import request
import json
# with request.urlopen('https://api.github.com/') as f:
# print(f.status, f.reason)
# for k, v in f.getheaders():
# print(k, ':', v)
# req = request.Request('https://api.github.com/')
# req.add_header('User-Agent',
# 'Mozilla/6.0 (iPhone; CPU iPhone OS 8_0 like Mac OS X) AppleWebKit/536.26 (KHTML, like Gecko) Version/8.0 Mobile/10A5376e Safari/8536.25')
# with request.urlopen(req) as f:
# print(f.status, f.reason)
# for k, v in f.getheaders():
# print(k, ':', v)
# print(f.read().decode('utf-8'))
def fetch_data(url):
with request.urlopen(url) as f:
data = f.read().decode('utf-8')
return json.loads(data)
print(js)
# 测试
URL = 'https://query.yahooapis.com/v1/public/yql?q=select%20*%20from%20weather.forecast%20where%20woeid%20%3D%202151330&format=json'
data = fetch_data(URL)
print(data)
assert data['query']['results']['channel']['location']['city'] == 'Beijing'
print('ok')
|
998,723 | 7bf60140e3323539924c0916fb6d26b337ccb393 | import rasterio
from scipy import ndimage as ndi
from scipy.ndimage import gaussian_filter
from scipy import misc
import matplotlib.pyplot as plt
import numpy as np
from osgeo import gdal
import pygeoprocessing
class GeoTiff:
"""
Object representing data from a GeoTIFF image.
"""
def __init__(self):
"""
Object constructor method.
"""
self.bands = {}
self.gauss_bands = {}
self.sharp_bands = {}
self.num_bands = None
self.labels = None
self.ndvi = None
def ingest_data(self, input_file, num_bands, labels):
"""
Push GeoTIFF data into the object.
:param input_file: str, of input GeoTiff address
:param num_bands: int, number of bands in the input GeoTIFF
:param labels: list of strings, where items are band labels
:return:
"""
self.labels = labels
self.num_bands = num_bands
with rasterio.open(input_file, "r") as dataset:
for i in range(1, self.num_bands + 1):
band = dataset.read(i)
self.bands[self.labels[i - 1]] = band
def calculate_ndvi(self):
"""
Calculate NDVI scores using GeoTIFF bands.
"""
self.ndvi = (self.bands["n"].astype(float) - self.bands["r"].astype(float)) \
/ (self.bands["n"].astype(float) + self.bands["r"].astype(float))
def gaussify_bands(self, sigma):
"""
Introduce normal smoothing to pixels.
:param sigma: standard deviation input for the gaussian distribution
"""
for key, band in self.bands.items():
self.gauss_bands[key] = gaussian_filter(input=band, sigma=sigma)
def sharpen_bands(self):
"""
Produce a matrix with edge detection.
"""
for label in self.labels:
self.sharp_bands[label] = self.bands[label] - self.gauss_bands[
label]
def draw_matrix(self, destination, version, band):
"""
Take a numpy array representation and output a visual .jpg.
:param destination: str, where to print out the matrix
:param version: str, which matrix, e.g. "band", "gauss", "sharp", "ndvi"
:param band: str, band choice, e.g. "b", "g", "r", "n", "ndvi"
"""
if version == "band":
matrix = self.bands[band]
elif version == "gauss":
matrix = self.gauss_bands[band]
elif version == "sharp":
matrix = self.sharp_bands[band]
else:
matrix = self.ndvi
plt.imshow(matrix)
plt.colorbar()
plt.savefig(destination)
# aleppo_full = GeoTiff()
# aleppo_full.ingest_data(input_file="data/experimental/aleppo_full_order/20180206_073917_0f42_3B_AnalyticMS.tif",
# num_bands=4, labels=["b", "g", "r", "n"])
# # Allow division by zero
# np.seterr(divide='ignore', invalid='ignore')
# aleppo_full.calculate_ndvi()
# aleppo_full.gaussify_bands(sigma=10)
# # aleppo_full.sharpen_bands()
# print(aleppo_full.gauss_bands)
# ALEPPO_APRIL = "data/experimental/aleppo_apr_02/merged_aleppo_20180402.tif"
# ALEPPO_FEB = "data/experimental/aleppo_feb_06/aleppo_0206_merged.tif"
# ALEPPO_MAY = "data/experimental/aleppo_may_03/aleppo_mergedmat32018.tif"
# Changes to geoprocessing.py
# 1. "range" used to be "xrange"
# 2. Line 8 was "import exceptions"
# 3. Line 30 was "fimport geoprocessing_core"
# 4. Line 32 was nothing -> "from functools import reduce"
|
998,724 | d40a79d3a345c384d0074c0dff52f90d3ceaa693 | import numpy as np
import time
def std(m):
n = np.zeros(len(m))
maxm,minm = max(m),min(m)
#print(maxm,minm)
for i in range(len(m)):
dt = maxm - minm
n[i] = (m[i] - minm)/dt
if n[i] > 1 or n[i] < 0:
print("NO",m[i])
return n
def poisson_noise(image):
'''
添加泊松噪声
输入image为一维数组
'''
#image = np.array(image/255, dtype=float)
image = std(image)
l = 500#l调节计数大小
#print("all_counts",sum(l*image))
for i in range(len(image)):
image[i] = np.random.poisson(l*image[i])#注意不可以×倍数,倍数越高噪声越小
out = image
#if out.min() < 0:
# low_clip = -1.
#else:
# low_clip = 0.
#out = np.clip(out, low_clip, 1.0)
#out = np.uint8(out*255)
#cv.imshow("gasuss", out)
return out
def bias(image,dot,mode = 0):
'''
输入image为重建后的图像(181*360)
dot为原始图像的点源位置,为2*1数组
输出为该点的定位偏差
只适用于中度角度
修改为局部模式,mode为1表示用图像中最亮的点作为中心点
0215
修改左右扩展的点数,为了使得
'''
xita,phi = 0,0
counts_xita,counts_phi = 0,0
x,y = dot[0],dot[1]
if mode == 1:
x,y = np.unravel_index(np.argmax(image),image.shape)
#print("x,y",x,y)
for i in range(181):
for j in range(360):
if abs(i - x) < 50:
xita += i*image[i,j]
counts_xita += image[i,j]
if abs(j - y) < 50:
phi += j*image[i,j]
counts_phi += image[i,j]
#all_counts = sum(sum(image))
return xita/counts_xita - dot[0],phi/counts_phi - dot[1]
def fwhm(image,dot):
'''
输入image为重建后的图像(181*360),
dot为原始图像的电源位置,为2*1数组
输出为该点的分辨率
只适用于中度角度
0211
'''
x,y,inf = dot[0],dot[1],np.zeros((2,1))
xita_fm,phi_fm = np.zeros((2,1)),np.zeros((2,1))
for xita in range(90):
if image[x-xita][y] < image[x][y]/2 and inf[0] == 0:
xita_fm[0] = xita
inf[0] = 1
if image[x+xita][y] < image[x][y]/2 and inf[1] == 0:
xita_fm[1] = xita
inf[1] = 1
if inf[0] == 1 and inf[1] == 1:
inf = np.array([0,0])
break
for phi in range(180):
if image[x,y - phi] < image[x,y]/2 and inf[0] == 0:
phi_fm[0] = phi
inf[0] = 1
if image[x,y + phi] < image[x,y]/2 and inf[1] == 0:
phi_fm[1] = phi
inf[1] = 1
if inf[0] == 1 and inf[1] == 1:
break
return sum(sum(xita_fm)) - 2 ,sum(sum(phi_fm)) - 2#按照重建前图像的半高宽为0来标定
def test_reverse(sm,m_1,x,y):
'''
sm、m_1、x、y为系统矩阵、伪逆、重建点的xy坐标
返回为有噪声和无噪声的xy方向半高宽及有噪声和无噪声的角度偏移
0218
'''
#time_start = time.time()
##生成测试样例
test_points = np.zeros((181,360))
test_points = test_points.reshape((1,181*360)).transpose()
test_points[(y-1)*181+x-1] = 1#由于matlab与python的reshape方式不同
##
##生成像空间数据,加噪声
test_points_p = np.dot(sm,test_points)
test_points_pn = poisson_noise(test_points_p)
##
##生成重建图像(有噪声和无噪声)
test_points_f,test_points_fn = np.dot(m_1,test_points_p).reshape((360,181)).transpose(),np.dot(m_1,test_points_pn).reshape((360,181)).transpose()
##
##测试重建图像的两个指标:偏离和分辨率
bias_f,bias_fn = bias(test_points_f,(x-1,y-1),mode = 0),bias(test_points_fn,(x-1,y-1),mode = 0)
fw_f,fw_fn = fwhm(test_points_f,(x-1,y-1)),fwhm(test_points_fn,(x-1,y-1))
#time_end = time.time()
#print('time cost : %.5f sec' %(time_end - time_start))
return sum(fw_f),sum(fw_fn),bias_f[0],bias_f[1],bias_fn[0],bias_fn[1]
##
if __name__ == "__main__":
sm = np.load("sm.npy")
m_1 = np.load("important_m_1.npy")
result = []
#fw_f,fw_fn = test_reverse(sm,m_1[0],30,30)#测试函数运行时间为0.07s
#print(fw_f,fw_fn)
#print(m_1[2])
test = 0
for x in range(30,151,10):
for y in range(30,331,10):
for i in range(12):
print(x,y,i)
fw_f,fw_fn,bias_f0,bias_f1,bias_fn0,bias_fn1 = test_reverse(sm,m_1[i],x,y)
result.append([x,y,i,fw_f,fw_fn,bias_f0,bias_f1,bias_fn0,bias_fn1])
np.save("res_res_mode0.npy",result) |
998,725 | ac52644ec4536708918d5ec601be2d921b5a0725 | """
1) replace zeros with half min detection limit
2) calculate Z-scores - in array of same format as compiled_mammals
a) Within each metal calculate mean
b) within each metal caclulate SD
c) for each value calculate (value - mean)/SD
3) Generate CSVs with the following outputs for each location for each species
a) column 1: 'Variable' - which metal, listed 3 times
b) column 2: 'Total" - Average Z-score for that location
c) column 3: 'Sex' - Female, Male, Unknown, for each metal
d) column 4: 'Sex value' - average of measured values for the given sex at that location
e) column 5: 'Count' - number of measurments for each of sex and metal at that location
f) column 6: 'Reference' - this will be the same within each species for each location - imported from another CSV
(row,column) = [i,j]
"""
import numpy as np
import pandas as pd
"""
compiled_mammals = pd.read_csv("/Users/robertgutgesell/Documents/Alison Holloway/KT Project/DataScience/compiled_mammals.csv")
mammals=np.asarray(compiled_mammals)
mammals_metalonly = mammals[:,9:] #taking metals only, ignoring first 8 columns
MDLs = pd.read_csv("/Users/robertgutgesell/Documents/Alison Holloway/KT Project/DataScience/MDLs_MO.csv")
MDLs_array=np.asarray(MDLs)
n_iter_col_len = len(mammals_metalonly[1,:])
n_iter_col = np.arange(n_iter_col_len)
n_iter_row_len = len(mammals_metalonly[:,1])
n_iter_row = np.arange(n_iter_row_len)
for j in n_iter_col:
#print(n_iter_col)
for i in n_iter_row:
#print(n_iter_row)
if mammals_metalonly[i,j] == 0:
# print(mammals[i,j])
mammals_metalonly[i,j] = MDLs_array[j,1]/2 #jth element in mmo = ith element of MDLs array
len(MDLs_array[:,1]) == len(mammals_metalonly[1,:]) #logic check to see if above comment true. It is.
#remove nan, i.e. every other row,
mammals_metalonly_nonan= np.delete(mammals_metalonly, list(range(0, mammals_metalonly.shape[0], 2)), axis=0)
np.savetxt("mmo_MDLs.csv", mammals_metalonly_nonan, fmt = "%s", delimiter=",")
"""
"""
Calculating Z-scores
Make a new array with z-scores in place of measured values
Z = (x-u)/s
where x = measured value, u = mean of that metal, s = standard deviation of that metal
"""
colen = len(inarray[1,:])
colen_iter = np.arange(colen)
rolen = len(inarray[:,1])
rolen_iter = np.arange(rolen)
#mammals_metalonly_nonan[np.isnan(mammals_metalonly_nonan)] = 0
"""
for i in rolen_iter:
for j in colen_iter:
try:
if type(inarray[i,j]) == str :
inarray[i,j] = float(inarray[i,j])
except ValueError:
print(i,j)
inarray[201,0] = 0
for i in rolen_iter:
for j in colen_iter:
if type(mammals_metalonly_nonan[i,j]) == str :
mammals_metalonly_nonan[i,j] = float(mammals_metalonly_nonan[i,j])
for i in rolen_iter:
for j in colen_iter:
mammals_metalonly_nonan[i,j] = np.nan_to_num(mammals_metalonly_nonan[i,j], nan=0.0)
"""
zscore_array = np.zeros((rolen,colen))
for j in colen_iter:
u = np.mean(inarray[:,j])
s = np.std(inarray[:,j])
for i in rolen_iter:
x = inarray[i,j]
z = (x - u)/s
zscore_array[i,j] = z
"""
PPA_array = np.zeros((rolen,colen))
for j in colen_iter:
u = np.mean(mammals_metalonly_nonan[:,j])
for i in rolen_iter:
x = mammals_metalonly_nonan[i,j]
p = (x / u) * 100
PPA_array[i,j] = p
"""
"""
Outputting CSVs
"""
"""
mammals_nonan = np.delete(mammals, list(range(0, mammals.shape[0], 2)), axis=0)
np.savetxt("mam_nonan.csv", mammals_nonan, fmt = "%s", delimiter=",")
spe_sex_loc = np.asarray(mammals_nonan[:,2:5])
z_master_metals = np.concatenate([spe_sex_loc, zscore_array], axis=1)
ppa_master_metals = np.concatenate([spe_sex_loc, PPA_array], axis=1)
loc = z_master_metals[:,2]
spec = z_master_metals[:,0]
uniq_loc = list(dict.fromkeys(loc))
uniq_spec = list(dict.fromkeys(spec))
loc_len = len(uniq_loc)
loc_iter = np.arange(loc_len)
spec_len = len(uniq_spec)
spec_iter = np.arange(spec_len)
col = len(z_master_metals[1,:])
col_iter = np.arange(col)
rol = len(z_master_metals[:,1])
rol_iter = np.arange(rol)
A = np.random.randint(0, 10, size=345).reshape(69, 5)
col_names = ['Total','Sex',' Sex Value','Count','Reference']
row_names = ['Hg','Hg','Hg','Li','Li','Li','V','V','V','Mn','Mn','Mn','Fe','Fe','Fe','Co','Co','Co','Ni','Ni','Ni','Cu','Cu','Cu','Zn','Zn','Zn','Ga','Ga','Ga','As','As','As','Se','Se','Se','Rb','Rb','Rb','Sr','Sr','Sr','Mo','Mo','Mo','Ag','Ag','Ag','Cd','Cd','Cd','Sn','Sn','Sn','Sb','Sb','Sb','Ba','Ba','Ba','Tl','Tl','Tl','Pb','Pb','Pb','Bi','Bi','Bi']
df = pd.DataFrame(A, index=row_names, columns=col_names)
count = 0
sizecalc = []
for i in loc_iter:
if uniq_loc[i] == 'FleetCreek/JackpineRidg':
uniq_loc[i] = 'FleetCreekJackpineRidg'
#determining how many times locations come up
for i in uniq_loc:
for j in rol_iter:
if i == z_master_metals[j,2]:
count = count+1
sizecalc.append(count)
count=0
#create arrays for each location that are the appropriate dimensions
for j in loc_iter:
i = uniq_loc[j]
indic = sizecalc[j]
loc[i] = pd.DataFrame(np.nan, index=[i], columns=[j])
#exec('loc%s= pd.DataFrame() %(i)') #try again with pandas
#assigning rows from z_master_metals to loc%s :
#try again with dataframe
for j in loc_iter:
x = uniq_loc[j]
for i in rol_iter:
if x == z_master_metals[i,2]:
#print(z_master_metals[i,2])
for l in range(0,sizecalc[j]):
exec('loc%s[l,:] = z_master_metals[i,:]' %(x))
"""
|
998,726 | 5741d9bb38b4354e5d283d62e1569753b5b2a6d2 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-05-29 13:27
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mysite', '0008_auto_20170529_1539'),
]
operations = [
migrations.AddField(
model_name='article',
name='comment',
field=models.TextField(blank=True, default=None, null=True, verbose_name='Comments'),
),
]
|
998,727 | 982a9490b64f864ccc1d725efe5f702137815dc3 | import time
from model.Chance import Chance
from model.InviteCode import InviteCode
def getInvitationCode(db):
print(int(time.time()))
code=InviteCode.query.filter(InviteCode.endtime>int(time.time()),InviteCode.status==1).first()
return code.code
def hasChance(openid):
ret = Chance.query.filter_by(openid=openid).count() <= 0
return ret
def addOpenid(openid, db):
item = Chance(openid)
db.session.add(item)
db.session.commit()
|
998,728 | e436e4198e7505bc12929f493b5bcb4aba40fd33 | N=int(input())
S=list()
for i in range(N):
S.append(input())
M=int(input())
T=list()
for i in range(M):
T.append(input())
ans=0
for i in S:
v=S.count(i)-T.count(i)
if v>ans:
ans=v
print(ans) |
998,729 | 7ab55a4e5913a79987879025e21ef6d084e552de | import os
import shutil
import tempfile
try:
from toil.lib.docker import apiDockerCall
except ImportError:
apiDockerCall = None
import subprocess
maxcluster_path = os.path.dirname(os.path.dirname(subprocess.check_output(["which", "maxcluster"])))
def run_maxcluster(*args, **kwds):
work_dir = kwds.pop("work_dir", None)
docker = kwds.pop("docker", True)
job = kwds.pop("job", None)
if work_dir is None:
work_dir = os.getcwd()
if "file_list" in kwds and not "l" in kwds:
kwds["l"] = kwds.pop("file_list")
else:
kwds.pop("file_list", None)
log = kwds.get("log", False)
if log and not isinstance(log, str):
f = tempfile.NamedTemporaryFile(dir=work_dir, suffix=".log", delete=False)
f.close()
kwds["log"] = f.name
file_kwds = ["log", "e", "p", "l", "R", "Rl", "Ru", "F", "M"]
in_file_kwds = ["e", "p", "l", "F", "M"]
parameters = ["-"+a for a in args]
for k, v in kwds.iteritems():
if k not in file_kwds:
parameters += ["-{}".format(k), str(v)]
job.log("ORIG PARAMS: {}".format(parameters))
file_parameters = {k:v for k, v in kwds.iteritems() if k in file_kwds}
if docker and apiDockerCall is not None and job is not None:
for k,f in file_parameters.iteritems():
if k in in_file_kwds and not os.path.abspath(os.path.dirname(f)) == os.path.abspath(work_dir):
shutil.copy(f, work_dir)
job.log("BASENAMING: {}".format(os.path.basename(f)))
parameters += ["-{}".format(k), os.path.basename(f)]
oldcwd = os.getcwd()
os.chdir(work_dir)
try:
out = apiDockerCall(job,
'edraizen/maxcluster:latest',
working_dir="/data",
volumes={work_dir:{"bind":"/data", "mode":"rw"}},
parameters=parameters
)
except (SystemExit, KeyboardInterrupt):
raise
except:
job.log("FILE LIST IS [{}]".format(open(file_parameters["l"]).read()))
raise
#return run_scwrl(pdb_file, output_prefix=output_prefix, framefilename=framefilename,
# sequencefilename=sequencefilename, paramfilename=paramfilename, in_cystal=in_cystal,
# remove_hydrogens=remove_hydrogens, remove_h_n_term=remove_h_n_term, work_dir=work_dir, docker=False)
os.chdir(oldcwd)
else:
file_args = []
for k,f in file_parameters.iteritems():
parameters += ["-{}".format(k), f]
args = [maxcluster_path]+file_args+parameters
try:
out = subprocess.check_output(args)
except (SystemExit, KeyboardInterrupt):
raise
except Exception as e:
raise
#raise RuntimeError("APBS failed becuase it was not found in path: {}".format(e))
if "log" in kwds and os.path.isfile(kwds["log"]):
return kwds["log"]
return out
def get_centroid(log_file, work_dir=None, docker=True, job=None):
parse_centroids = False
best_centroid_size = None
best_centroid_file = None
with open(log_file) as log:
for line in log:
job.log("LINE: {}".format(line.rstrip()))
if not parse_centroids and line.startswith("INFO : Centroids"):
parse_centroids = True
next(log)
next(log)
elif parse_centroids and line.startswith("INFO : ="):
break
elif parse_centroids:
job.log("PARSING LINE: {}".format(line.rstrip()))
fields = line.rstrip().split()
size, pdb_file = fields[-3], fields[-1]
if best_centroid_size is None or int(size)>best_centroid_size:
best_centroid_size = int(size)
best_centroid_file = pdb_file
return best_centroid_file
def get_hierarchical_tree(log_file):
import networkx as nx
nx_tree = nx.DiGraph()
parse_tree = False
with open(log_file) as log:
for line in log:
job.log("LINE: {}".format(line.rstrip()))
if not parse_tree and line.startswith("INFO : Hierarchical Tree"):
parse_tree = True
next(log)
next(log)
elif parse_tree and line.startswith("INFO : ="):
break
elif parse_tree:
_, node, info = line.rstrip().split(":")
node = int(node.strip())
nx_tree.add_node(node)
fields = [f.strip() for f in info.split()]
item2, item2 = map(int, fields[:2])
distance = float(fields[2])
if item1>0 and item2>0:
pdb1, pdb2 = fields[3:5]
nx_tree.add_node(item1, pdb=pdb1)
nx_tree.add_node(item2, pdb=pdb2)
elif item1>0 and item2<0:
#Node 2 already in graph
pdb1 = fields[3]
nx_tree.add_node(item1, pdb=pdb1)
elif item1<0 and item2>0:
#Node 1 already in graph
pdb2 = fields[3]
nx_tree.add_node(item2, pdb=pdb2)
nx_tree.add_edge(node, item1)
nx_tree.add_edge(node, item2)
|
998,730 | ddf5fb820ff730d2fdf9e87281f1105a6b2ddcd3 | import pytest
from module_one.tests.conftest import matrix_string
from module_one._03_strmatrix import StrMatrix, StringParseError
def test_matrix_parser_works_as_expected(matrix_string):
result = StrMatrix._parse(StrMatrix, matrix_string)
assert result == [[1,2,3], [4,5,6], [7,8,9]]
def test_matrix_parser_raises_stringparseerror_if_row_length_uneven(matrix_string):
test = matrix_string[:9]
with pytest.raises(StringParseError):
StrMatrix._parse(StrMatrix, test)
def test_matrix_parser_raises_stringparseerror_cannot_be_coerced_to_int(matrix_string):
test = matrix_string + "\n42.3 41.6 34.4"
with pytest.raises(StringParseError):
StrMatrix._parse(StrMatrix, test)
def test_m_property_returns_deep_copy(mocked_strmatrix):
assert mocked_strmatrix.m is not mocked_strmatrix._m
assert mocked_strmatrix.m == mocked_strmatrix._m
def test_matrix_row(mocked_strmatrix):
assert mocked_strmatrix.row(0) == [1,2,3]
assert mocked_strmatrix.row(1) == [4,5,6]
assert mocked_strmatrix.row(2) == [7,8,9]
assert mocked_strmatrix.row(-1) == [7,8,9]
with pytest.raises(IndexError):
mocked_strmatrix.row(42)
def test_matrix_column(mocked_strmatrix):
assert mocked_strmatrix.column(0) == [1,4,7]
assert mocked_strmatrix.column(1) == [2,5,8]
assert mocked_strmatrix.column(2) == [3,6,9]
assert mocked_strmatrix.column(-1) == [3,6,9]
with pytest.raises(IndexError):
mocked_strmatrix.column(100)
def test_matrix_fetch(mocked_strmatrix):
assert mocked_strmatrix.fetch(0,0) == 1
assert mocked_strmatrix.fetch(1,1) == 5
assert mocked_strmatrix.fetch(2,2) == 9
assert mocked_strmatrix.fetch(1,2) == 6
assert mocked_strmatrix.fetch(-1,-2) == 8
with pytest.raises(IndexError):
mocked_strmatrix.fetch(42,42)
def test_matrix_transpose(mocked_strmatrix, mocked_rect_strmatrix):
assert mocked_strmatrix.transpose() == [(1, 4, 7), (2, 5, 8), (3, 6, 9)]
assert mocked_rect_strmatrix.transpose() == [(1, 4, 7), (2, 5, 8), (3, 6, 9),
(5, 7, 10), (6, 8, 11)]
|
998,731 | 26e716c2e3a204ea598d2b6bc430251e4e985881 | from scapy.all import *
from scapy.fields import *
from scapy.layers.inet import IP,TCP
from CPPM import *
import re as regex
class Service():
def __init__(self, tcp_ip=None, tcp_dport = 5005, buffer_size = 1024):
self.TCP_IP = tcp_ip
self.TCP_DPORT = tcp_dport
self.BUFFER_SIZE = buffer_size
def encryptPacket(self, packet, key):
sa = SecurityAssociation(ESP, spi=0xdeadbeef, crypt_algo='AES-CBC', crypt_key=str(key).encode())
return sa.encrypt(packet)
def decryptPacket(self, packet, key):
sa = SecurityAssociation(ESP, spi=0xdeadbeef, crypt_algo='AES-CBC', crypt_key=str(key).encode())
return sa.decrypt(packet)
def getKey(self, key_string):
res = re.search('PublicKey\((.+?)\,', key_string)
return res.group(1)
def createPacket(self, payload, ver, dst_ip, port, shake):
packet_to_send = IP(dst=dst_ip)
packet_to_send /= TCP(dport=port)
packet_to_send /= CPPM(message=payload, messageLength=len(payload), version=ver, handshake=shake)
packet_to_send = IP(raw(packet_to_send))
return packet_to_send
def sendPacket(self, packet, ip, port):
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((ip, int(port)))
socketsr1 = StreamSocket(s, CPPM)
ans = socketsr1.sr1(packet, timeout=2, verbose=False)
s.close()
except Exception as client_error:
print('Error: {}'.format(client_error))
def packetToBytes(self, packet):
return raw(packet)
def bytesToPacket(self, packet):
return CPPM(packet)
def receivePacket(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((self.TCP_IP, self.TCP_DPORT))
s.listen(1)
s.settimeout(100)
while True:
conn, addr = s.accept()
print('Connection address: {}'.format(addr))
try:
data = conn.recv(self.BUFFER_SIZE)
if data:
packet = IP(data)
received_packet = CPPM(packet.getlayer(Raw).load)
received_packet.show()
#return received_packet
else:
pass
except Exception as server_error:
#print(server_error)
print('Error: {}'.format(server_error))
conn.close()
|
998,732 | a07f4d559078c15c36b25fd466689f34703205ea | from flask import jsonify, Blueprint, abort
from flask_restful import Resource, Api, reqparse, fields, marshal, marshal_with
import models
message_fields = {
'id' : fields.Integer,
'content' : fields.String,
'published_at' : fields.String,
}
def get_or_abort(id):
try:
msg = models.Message.get_by_id(id)
except models.Message.DoesNotExist:
abort(404)
else:
return msg
class MessageList(Resource):
def __init__(self):
self.reqparse = reqparse.RequestParser()
self.reqparse.add_argument(
'content',
required = True,
help = 'konten wajib ada',
location = ['form', 'json']
)
self.reqparse.add_argument(
'published_at',
required = True,
help = 'published_at/waktunya wajib ada',
location = ['form', 'json']
)
super().__init__()
def get(self):
messages = [marshal(message, message_fields)
for message in models.Message.select()]
return jsonify({'messages' : messages})
def post(self):
args = self.reqparse.parse_args()
message = models.Message.create(**args)
return marshal(message, message_fields)
class Message(Resource):
@marshal_with(message_fields)
def get(self, id):
return get_or_abort(id)
messages_api = Blueprint('resources.messages', __name__)
api = Api(messages_api)
api.add_resource(MessageList, '/messages', endpoint='messages')
api.add_resource(Message, '/message/<int:id>', endpoint='message')
|
998,733 | 99b081f50c54d9fd480db6fe0816f5fbfe1cc8b8 | from django.db import models
class Coordinate(models.Model):
code = models.CharField(max_length=150)
def __str__(self):
return self.code
class Profiles(models.Model):
geocode=models.CharField(max_length=200)
country=models.CharField(max_length=500)
city=models.CharField(max_length=500)
class Meta:
managed=False
db_table='profiles_country'
def __str__(self):
return '{}'.format(self.geocode)
# Create your models here.
|
998,734 | 6058a414d6e9804068ddbe87fdac644eb13c6e8e | #!/usr/bin/env python
# coding: utf-8
# In[2]:
from tkinter import *
import tkinter.messagebox as Messagebox
import mysql.connector
# In[15]:
root=Tk()
root.title('LOGIN AND REGISTRATION')
text=Label(root,text='LOGIN AND REGISTRATION',font='verdana 20 bold')
text.grid(row=0,column=0)
def registration():
window=Tk()
window.title('register')
con= mysql.connector.connect(host='localhost',user='root',passwd='apsjrc',database='blockchain_managesystem')
cursor=con.cursor()
text=Label(window,text='registration',font='verdana 20 bold')
text.grid(row=0,column=0)
name = Label(window,text='name')
name.grid(row=1,column=0)
email = Label(window,text='email')
email.grid(row=2,column=0)
password = Label(window,text='password')
password.grid(row=3,column=0)
re_password = Label(window,text='re_password')
re_password.grid(row=4,column=0)
e1=Entry(window,width=18)
e1.grid(row=1,column=1)
e2=Entry(window,width=18)
e2.grid(row=2,column=1)
e3=Entry(window,width=18)
e3.grid(row=3,column=1)
e3.config(show='*')
e4=Entry(window,width=18)
e4.grid(row=4,column=1)
e4.config(show='*')
def clear():
e1.delete(first=0,last=100)
e2.delete(first=0,last=100)
e3.delete(first=0,last=100)
e4.delete(first=0,last=100)
def error():
Messagebox.showerror(title='error',message='password not same')
def insert():
insert=('insert into register (name,email,password,re_password) values(%s,%s,%s,%s)')
values=[e1.get(),e2.get(),e3.get(),e4.get()]
cursor.execute(insert,values)
if e3.get()==e4.get():
con.commit()
clear()
Messagebox.showinfo(title='done',message='account created')
else:
error()
register=Button(window,text='register',fg='green',command=insert)
register.grid(row=5,column=0)
exit=Button(window,text='exit',command=window.destroy)
exit.grid(row=6,column=0)
def login():
window=Tk()
window.title('LOGIN')
text=Label(window,text='LOGIN',font='verdana 20 bold')
text.grid(row=0,column=0)
email = Label(window,text='email')
email.grid(row=1,column=0)
password = Label(window,text='password')
password.grid(row=2,column=0)
e1=Entry(window,width=18)
e1.grid(row=1,column=1)
e2=Entry(window,width=18)
e2.grid(row=2,column=1)
def clear():
e1.delete(first=0,last=100)
e2.delete(first=0,last=100)
def error():
Messagebox.showerror(title='error',message='username and password is incorrect')
def login():
mail=e1.get()
pas=e2.get()
if mail=="" and pas=="":
Messagebox.showinfo('insert data','please insert email and password')
else:
con= mysql.connector.connect(host='localhost',user='root',passwd='apsjrc',database='blockchain_managesystem')
cursor=con.cursor()
cursor.execute("select email,password from register" )
rows=cursor.fetchall()
user_d = []
pass_d = []
for row in rows:
user_d.append(row[0])
pass_d.append(row[1])
for row in rows:
if e1.get() in user_d and e2.get() in pass_d :
login = True
Messagebox.showinfo(title='done',message='login successful')
root= Tk()
root.geometry("600x300")
root.title("blockchain")
bid = Label(root,text='enter coin initail')
bid.place(x=20,y=30)
typ = Label(root,text='enter coin name')
typ.place(x=20,y=60)
own = Label(root,text='enter owner name')
own.place(x=20,y=90)
use= Label(root,text='enter the uses')
use.place(x=20,y=120)
e_id=Entry(root,width=20)
e_id.place(x=150,y=30)
e_typ=Entry(root,width=20)
e_typ.place(x=150,y=60)
e_own=Entry(root,width=20)
e_own.place(x=150,y=90)
e_use=Entry(root,width=20)
e_use.place(x=150,y=120)
def insert():
bid=e_id.get()
typ= e_typ.get()
own=e_own.get()
use=e_use.get()
if(bid=="" or typ==""or own==""or use==""):
Messagebox.showinfo("insert Status","all fields are required")
else:
con= mysql.connector.connect(host='localhost',user='root',passwd='apsjrc',database='blockchain_managesystem')
cursor=con.cursor()
query="insert into blockchain values('%s','%s','%s','%s')" % (bid,typ,own,use)
cursor.execute(query)
cursor.execute("commit");
e_id.delete(0,'end')
e_typ.delete(0,'end')
e_own.delete(0,'end')
e_use.delete(0,'end')
Messagebox.showinfo("insert status","inserted successfully");
con.close();
def delete():
if(e_id.get()==""):
MessageBox.showinfo("delete status","bid is compulsory for delete")
else:
con= mysql.connector.connect(host='localhost',user='root',passwd='apsjrc',database='blockchain_managesystem')
cursor=con.cursor()
cursor.execute("delete from blockchain where BCID='" + e_id.get()+ "'")
cursor.execute("commit");
e_id.delete(0,'end')
e_typ.delete(0,'end')
e_own.delete(0,'end')
e_use.delete(0,'end')
Messagebox.showinfo("delete status","deleted successfully");
con.close();
def engine():
root= Tk()
root.geometry("600x300")
root.title("engine")
bid = Label(root,text='enter coin initial ')
bid.place(x=20,y=30)
nodes = Label(root,text='enter node name')
nodes.place(x=20,y=60)
e_bid=Entry(root,width=20)
e_bid.place(x=150,y=30)
e_nodes=Entry(root,width=20)
e_nodes.place(x=150,y=60)
def insert():
bid=e_bid.get()
nodes= e_nodes.get()
if(bid=="" or nodes==""):
Messagebox.showinfo("insert Status","all fields are required")
else:
con= mysql.connector.connect(host='localhost',user='root',passwd='apsjrc',database='blockchain_managesystem')
cursor=con.cursor()
query="insert into engine values('%s','%s')" % (bid,nodes)
cursor.execute(query)
cursor.execute("commit");
e_bid.delete(0,'end')
e_nodes.delete(0,'end')
Messagebox.showinfo("insert status","inserted successfully");
con.close();
def delete():
if(e_bid.get()==""):
MessageBox.showinfo("delete status","bid is compulsory for delete")
else:
con= mysql.connector.connect(host='localhost',user='root',passwd='apsjrc',database='blockchain_managesystem')
cursor=con.cursor()
cursor.execute("delete from engine where BCEID='" + e_bid.get()+ "'")
cursor.execute("commit");
e_bid.delete(0,'end')
e_nodes.delete(0,'end')
Messagebox.showinfo("delete status","deleted successfully");
con.close();
insert=Button(root,text="insert",command=insert)
insert.place(x=20,y=140)
delete=Button(root,text="delete",command=delete)
delete.place(x=70,y=140)
def design():
root= Tk()
root.geometry("600x300")
root.title("design")
node = Label(root,text='enter node name')
node.place(x=20,y=30)
bp = Label(root,text='enter blockchain protocol name')
bp.place(x=20,y=60)
mp = Label(root,text='enter message protocol name')
mp.place(x=20,y=90)
e_node=Entry(root,width=20)
e_node.place(x=150,y=30)
e_bp=Entry(root,width=20)
e_bp.place(x=150,y=60)
e_mp=Entry(root,width=20)
e_mp.place(x=150,y=90)
def insert():
node=e_node.get()
bp= e_bp.get()
mp=e_mp.get()
if(node=="" or bp==""or mp==""):
Messagebox.showinfo("insert Status","all fields are required")
else:
con= mysql.connector.connect(host='localhost',user='root',passwd='apsjrc',database='blockchain_managesystem')
cursor=con.cursor()
query="insert into design values('%s','%s','%s')" % (node,bp,mp)
cursor.execute(query)
cursor.execute("commit");
e_node.delete(0,'end')
e_bp.delete(0,'end')
e_mp.delete(0,'end')
Messagebox.showinfo("insert status","inserted successfully");
con.close();
def delete():
if(e_node.get()==""):
MessageBox.showinfo("delete status","node is compulsory for delete")
else:
con= mysql.connector.connect(host='localhost',user='root',passwd='apsjrc',database='blockchain_managesystem')
cursor=con.cursor()
cursor.execute("delete from design where node='" + e_node.get()+ "'")
cursor.execute("commit");
e_node.delete(0,'end')
e_bp.delete(0,'end')
e_mp.delete(0,'end')
Messagebox.showinfo("delete status","deleted successfully");
con.close();
insert=Button(root,text="insert",command=insert)
insert.place(x=20,y=140)
delete=Button(root,text="delete",command=delete)
delete.place(x=70,y=140)
def consensus():
root= Tk()
root.geometry("600x300")
root.title("consensus")
node = Label(root,text='enter node name')
node.place(x=20,y=30)
fail = Label(root,text='enter failures')
fail.place(x=20,y=60)
sys = Label(root,text='enter the system')
sys.place(x=20,y=90)
con_algo= Label(root,text='enter the consensus algo')
con_algo.place(x=20,y=120)
e_node=Entry(root,width=20)
e_node.place(x=150,y=30)
e_fail=Entry(root,width=20)
e_fail.place(x=150,y=60)
e_sys=Entry(root,width=20)
e_sys.place(x=150,y=90)
e_con_algo=Entry(root,width=20)
e_con_algo.place(x=150,y=120)
def insert():
node=e_node.get()
fail= e_fail.get()
sys=e_sys.get()
con_algo=e_con_algo.get()
if(node=="" or fail==""or sys==""or con_algo==""):
Messagebox.showinfo("insert Status","all fields are required")
else:
con= mysql.connector.connect(host='localhost',user='root',passwd='apsjrc',database='blockchain_managesystem')
cursor=con.cursor()
query="insert into consensus values('%s','%s','%s','%s')" % (node,fail,sys,con_algo)
cursor.execute(query)
cursor.execute("commit");
e_node.delete(0,'end')
e_fail.delete(0,'end')
e_sys.delete(0,'end')
e_con_algo.delete(0,'end')
Messagebox.showinfo("insert status","inserted successfully");
con.close();
def delete():
if(e_node.get()==""):
MessageBox.showinfo("delete status","node is compulsory for delete")
else:
con= mysql.connector.connect(host='localhost',user='root',passwd='apsjrc',database='blockchain_managesystem')
cursor=con.cursor()
cursor.execute("delete from consensus where node='" + e_node.get()+ "'")
cursor.execute("commit");
e_node.delete(0,'end')
e_fail.delete(0,'end')
e_sys.delete(0,'end')
e_con_algo.delete(0,'end')
Messagebox.showinfo("delete status","deleted successfully");
con.close();
insert=Button(root,text="insert",command=insert)
insert.place(x=20,y=140)
delete=Button(root,text="delete",command=delete)
delete.place(x=70,y=140)
def fault():
root= Tk()
root.geometry("600x300")
root.title("fault_tolerance_system")
node= Label(root,text='enter node name')
node.place(x=20,y=30)
factor = Label(root,text='enter factor')
factor.place(x=20,y=60)
cat = Label(root,text='enter category')
cat.place(x=20,y=90)
red= Label(root,text='enter the redundancy')
red.place(x=20,y=120)
e_node=Entry(root,width=20)
e_node.place(x=150,y=30)
e_factor=Entry(root,width=20)
e_factor.place(x=150,y=60)
e_cat=Entry(root,width=20)
e_cat.place(x=150,y=90)
e_red=Entry(root,width=20)
e_red.place(x=150,y=120)
def insert():
node=e_node.get()
factor= e_factor.get()
cat=e_cat.get()
red=e_red.get()
if(node=="" or factor==""or cat==""or red==""):
Messagebox.showinfo("insert Status","all fields are required")
else:
con= mysql.connector.connect(host='localhost',user='root',passwd='apsjrc',database='blockchain_managesystem')
cursor=con.cursor()
query="insert into fault_tolerance_system values('%s','%s','%s','%s')" % (node,factor,cat,red)
cursor.execute(query)
cursor.execute("commit");
e_node.delete(0,'end')
e_factor.delete(0,'end')
e_cat.delete(0,'end')
e_red.delete(0,'end')
Messagebox.showinfo("insert status","inserted successfully");
con.close();
def delete():
if(e_node.get()==""):
MessageBox.showinfo("delete status","node is compulsory for delete")
else:
con= mysql.connector.connect(host='localhost',user='root',passwd='apsjrc',database='blockchain_managesystem')
cursor=con.cursor()
cursor.execute("delete from fault_tolerance_system where node='" + e_node.get()+ "'")
cursor.execute("commit");
e_node.delete(0,'end')
e_factor.delete(0,'end')
e_cat.delete(0,'end')
e_red.delete(0,'end')
Messagebox.showinfo("delete status","deleted successfully");
con.close();
insert=Button(root,text="insert",command=insert)
insert.place(x=20,y=140)
delete=Button(root,text="delete",command=delete)
delete.place(x=70,y=140)
insert=Button(root,text="insert",command=insert)
insert.place(x=20,y=140)
delete=Button(root,text="delete",command=delete)
delete.place(x=70,y=140)
new=Button(root,text="engine button",command=engine)
new.place(x=120,y=140)
design=Button(root,text="design",command=design)
design.place(x=210,y=140)
con=Button(root,text="consensus",command=consensus)
con.place(x=300,y=140)
fault=Button(root,text="fault",command=fault)
fault.place(x=390,y=140)
clear()
break
else:
login = False
Messagebox.showinfo(title='error',message='login was unsuccessful')
clear()
break
cursor.close();
con.close();
login=Button(window,text='login',fg='green',command=login)
login.grid(row=5,column=0)
exit=Button(window,text='exit',command=window.destroy)
exit.grid(row=6,column=0)
login=Button(root,text='login',command=login)
login.grid(row=7,column=0)
regis=Button(root,text='regis',command=registration)
regis.grid(row=8,column=0)
exit=Button(root,text='exit',command=root.destroy)
exit.grid(row=9,column=0)
# In[16]:
root.mainloop()
# In[ ]:
# In[ ]:
# In[ ]:
|
998,735 | 6df7e8330ee6d95af6db850bd97d475bde3bd2a7 | from PyQt4.QtCore import *
from PyQt4.QtGui import *
COL_NAME = 0
COL_PROBABILITY = 1
COL_SREL_PROBABILITY = 2
COL_OBS_PROBABILITY = 3
COL_INDEX = 4
COL_LOCATION = 5
class Entry:
def __init__(self, m4du, i, L_mat, SR_mat, num_topologies, iSloc, iEloc):
self.L_mat = L_mat
self.SR_mat = SR_mat
self.i = i
self.m4du = m4du
self.iSloc = None
self.iEloc = None
self.setEndPoints(iSloc, iEloc)
self.num_topologies = num_topologies
def setEndPoints(self, iSloc=None, iEloc=None):
if iSloc != None:
self.iSloc = iSloc
self.iSlocTopo = self.m4du.vp_i_to_topo_i[iSloc]
if iEloc != None:
self.iEloc = iEloc
self.iElocTopo = self.m4du.vp_i_to_topo_i[iEloc]
@property
def geometry_obj(self):
return self.m4du.obj_geometries[self.i]
@property
def name(self):
return self.geometry_obj.tag
@property
def location(self):
return self.m4du.clusters.get_map().to_xy(self.geometry_obj.centroid())
@property
def has_viewpoints(self):
return self.iSloc != None and self.iEloc != None
@property
def l_prob(self):
return self.L_mat[self.i]
@property
def o_mat_prob(self):
if self.has_viewpoints:
return self.L_mat[self.iEloc]
@property
def o_prob(self):
if self.has_viewpoints:
return self.l_prob * self.sr_prob
else:
return None
@property
def sr_prob(self):
if self.has_viewpoints:
return self.SR_mat[self.iSlocTopo*self.num_topologies +
self.iElocTopo][self.i]
else:
return None
class Model(QAbstractTableModel):
def __init__(self, view, m4du):
QAbstractTableModel.__init__(self)
self.m4du = m4du
self.tagName = None
self._data = []
self.view = view
self.view.setModel(self)
def setData(self, m4du, tagName, L_mat, SR_mat, num_topologies, iSloc, iEloc):
self.tagName = tagName
self._data = []
for i in range(len(L_mat)):
self._data.append(Entry(m4du, i, L_mat, SR_mat, num_topologies, iSloc, iEloc))
self.reset()
def columnCount(self, parent=None):
return 6
def rowCount(self, parent=None):
return len(self._data)
def selectSloc(self, idx):
for e in self._data:
e.setEndPoints(iSloc=idx)
self.reset()
def selectEloc(self, idx):
for e in self._data:
e.setEndPoints(iEloc=idx)
self.reset()
def get(self, viewIdx):
return self._data[viewIdx]
def selectedData(self):
return self.get(self.view.currentIndex().row())
def data(self, idx, role=Qt.DisplayRole):
col = idx.column()
e = self.get(idx.row())
if role != Qt.DisplayRole:
return QVariant()
if col == COL_NAME:
return QVariant(e.name)
elif col == COL_LOCATION:
return QVariant("%.2f, %.2f" % tuple(e.location))
elif col == COL_PROBABILITY:
return QVariant("%e" % e.l_prob)
elif col == COL_SREL_PROBABILITY:
if e.has_viewpoints:
return QVariant("%e" % e.sr_prob)
else:
return QVariant("")
elif col == COL_OBS_PROBABILITY:
if e.has_viewpoints:
return QVariant("%e" % e.o_prob)
else:
return QVariant("")
elif col == COL_INDEX:
return QVariant("%d" % e.i)
else:
raise ValueError("Bad id: %s" % col)
def headerData(self, section, orientation, role):
if orientation == Qt.Horizontal and role == Qt.DisplayRole:
if section == COL_NAME:
return QVariant("Name")
elif section == COL_LOCATION:
return QVariant("Location")
elif section == COL_PROBABILITY:
return QVariant("P[%s]" % self.tagName)
elif section == COL_OBS_PROBABILITY:
return QVariant("p_obs")
elif section == COL_SREL_PROBABILITY:
return QVariant("p_srel")
elif section == COL_INDEX:
return QVariant("srel_mat index")
else:
raise ValueError("Bad id: %s" % section)
else:
return QVariant()
def sort(self, col, order):
if col == COL_PROBABILITY:
self._data.sort(key=lambda e: e.l_prob)
elif col == COL_OBS_PROBABILITY:
self._data.sort(key=lambda e: e.o_prob)
elif col == COL_SREL_PROBABILITY:
self._data.sort(key=lambda e: e.sr_prob)
elif col == COL_INDEX:
self._data.sort(key=lambda e: e.i)
elif col == COL_NAME:
self._data.sort(key=lambda e: e.name)
if order == Qt.DescendingOrder:
self._data.reverse()
self.reset()
|
998,736 | bbf540b77be0b844b6b09c1631ba65d3a389b9b6 | from __future__ import print_function
import datetime
import os.path
import argparse
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
import requests, json
from google.oauth2.credentials import Credentials
from datetime import datetime, timedelta
from difflib import SequenceMatcher
SCOPES = ['https://www.googleapis.com/auth/calendar']
class CalendarNotFoundException(Exception):
pass
class CalendarChecker:
"""
UniBo calendar checking tool
"""
def __init__(self):
"""
Inits the script, checking credentials, args, available calendars.
"""
# If modifying these scopes, delete the file token.pickle.
self.parse_args()
self.creds = None
# The file token.pickle stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
if os.path.exists('token.json'):
self.creds = Credentials.from_authorized_user_file('token.json', SCOPES)
# If there are no (valid) credentials available, let the user log in.
if not self.creds or not self.creds.valid:
if self.creds and self.creds.expired and self.creds.refresh_token:
self.creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
self.args["credentials"], SCOPES)
self.creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open('token.json', 'w') as token:
token.write(self.creds.to_json())
self.service = build('calendar', 'v3', credentials=self.creds)
self.get_calendars()
self.choose_calendar()
def parse_args(self):
"""Parses the given arguments"""
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--calendar', dest='calendar_name', type=str, help="Destination calendar name",
required=True)
parser.add_argument('-t', '--credentials', dest='credentials', type=str, help="Google credentials file",
required=True)
parser.add_argument('-s', '--start', dest='start', type=str, help="Starting insert date, %d-%m-%Y",
required=False)
parser.add_argument('-e', '--end', dest='end', type=str, help="Ending insert date, %d-%m-%Y", required=False)
self.args = parser.parse_args()
if self.args["start"]:
self.fromDate = datetime.datetime.strptime(self.args["start"], "%d-%m-%Y")
else:
self.fromDate = datetime.today()
if self.args["end"]:
self.toDate = datetime.datetime.strptime(self.args["end"], "%d-%m-%Y")
else:
self.toDate = self.fromDate + timedelta(days=5) # By default, we'll use a timedelta of 5 days
def insert_events(self, url, notExams=[]):
"""
Inserts the events in the calendar
:param url: url of the UniBo calendar
:param notExams: exams to skip in calendar adding
:return:
"""
response = json.loads(requests.get(url).text)
i = 0
eventDate = datetime.datetime.strptime(response[i]['start'], "%Y-%m-%dT%H:%M:%S")
while eventDate < self.fromDate:
i += 1
eventDate = datetime.datetime.strptime(response[i]['start'], "%Y-%m-%dT%H:%M:%S")
while eventDate <= self.toDate:
jEvent = response[i]
if jEvent['cod_modulo'] not in notExams:
location = ''
desc = ''
if (len(jEvent['aule']) > 0):
location = jEvent['aule'][0]['des_indirizzo'].replace(' -', ',')
for a in jEvent['aule']:
desc += a['des_risorsa'] + ', ' + a['des_piano'] + ' - ' + a['des_ubicazione'] + '\n'
desc += 'Professor: ' + jEvent['docente']
if type(jEvent['teams']) is str:
desc += '\nTeams: ' + jEvent['teams'] + '\n'
event = {
'summary': jEvent['cod_modulo'] + ' - ' + jEvent['title'],
'location': location,
'description': desc,
'start': {
'dateTime': jEvent['start'],
'timeZone': 'Europe/Rome',
},
'end': {
'dateTime': jEvent['end'],
'timeZone': 'Europe/Rome',
},
'recurrence': [
# 'RRULE:FREQ=DAILY;COUNT=2'
],
'attendees': [
# {'email': 'lpage@example.com'},
# {'email': 'sbrin@example.com'},
],
'reminders': {
'useDefault': False,
'overrides': [
{'method': 'popup', 'minutes': 60},
],
},
}
# if you want to add it to your primary calendar just use calendarId="primary"
event = self.service.events().insert(calendarId=self.chosen_calendar,
body=event).execute()
print('Event created succesfully : %s' % (event.get('htmlLink')))
i += 1
eventDate = datetime.datetime.strptime(response[i]['start'], "%Y-%m-%dT%H:%M:%S")
def choose_calendar(self):
"""
Checks the found calendars for match of the given calendar name
:return:
"""
page_token = None
self.calendar_list = self.service.calendarList().list(pageToken=page_token).execute()
for calendar_list_entry in self.calendar_list['items']:
if similar(calendar_list_entry['summary'], self.args["calendar_name"]) > 0.8:
self.chosen_calendar = calendar_list_entry['id']
return
raise CalendarNotFoundException("No calendar with the provided name was found")
def similar(a, b):
"""
Checks two strings for a similarity score
:param a: First string
:param b: Second string
:return: Similarity score from 0 to 1
"""
return SequenceMatcher(None, a, b).ratio()
inserter = CalendarChecker()
inserter.insert_events("https://corsi.unibo.it/2cycle/artificial-intelligence/timetable/@@orario_reale_json?")
|
998,737 | 384fcf11682ba2ff913b193520d4d16cf40735bc | from menu import Menu
from coffee_maker import CoffeeMaker
from money_machine import MoneyMachine
menu = Menu()
coffee_maker = CoffeeMaker()
money_machine = MoneyMachine()
is_coffee_machine_on = True
while is_coffee_machine_on:
user_choice = input(f"What would you like? ({menu.get_items()}): ").lower()
if user_choice == "off":
is_coffee_machine_on = False
elif user_choice == "report":
coffee_maker.report()
money_machine.report()
else:
ordered_drink = menu.find_drink(user_choice)
if ordered_drink is not None and coffee_maker.is_resource_sufficient(ordered_drink) \
and money_machine.make_payment(ordered_drink.cost):
coffee_maker.make_coffee(ordered_drink)
|
998,738 | 4cb1b661b7a8010dd69378b6e73716852bc4355a | ######################################################################################################################################################################
#REQUIREMENTS for the model:
# Should be able to distinguish safe and unsafe behaviour given the requirements:
# - Look at the number of RSSI pings within ____ time - all data is for 1 min
# - Look at what the phone is placed in
# - Make a approx log graph for how fast covid is contracted - use to figure out the safe distance
######################################################################################################################################################################
#Import packages
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn import metrics
import os
os.chdir("combined_csv")
######################################################################################################################################################################
#Beginning configuration for the ML model
#In this model, you input where your phone was, and based on the id where the other person's phone was as well and it chooses one of the following linear regression models
######################################################################################################################################################################
#This is the function for the distance vs time for covid.
def f(x): #This takes in x as the distance and outputs the time limit
return 15.9325 - (14.68626/2**(x/1.342586)) #For this, I plotted a few points: (0, 1), (1, 8), (2, 10), (11, 16) and fitted the curve using an online calculator
#Create Dataframes
model_list = ['none', 'one_purse', 'both_purse', 'one_hand', 'both_hand', 'one_cloth', 'both_cloth'] #This is mainly just for the function at the end and to remember the diff scenarios
#List of all csv files used
all_list = ["both_hand_list.csv", "trans_thin_list.csv", "rec_hand_list.csv", "rec_purse_list.csv", "none_list.csv", "trans_purse_list.csv", "both_purse_list.csv", "rec_thin_list.csv", "trans_hand_list.csv", "both_thin_list.csv"]
#Create function to read each csv file
def read_d(x):
return pd.read_csv(x, header=0)
#Based on results from data_analysis.py, there is no sig diff between trans and rec data. This is why the two files are combined here.
none = read_d("none_list.csv")
one_purse = pd.concat([read_d("rec_purse_list.csv"), read_d("trans_purse_list.csv")])
both_purse = read_d("both_purse_list.csv")
one_hand = pd.concat([read_d("rec_hand_list.csv"), read_d("trans_hand_list.csv")])
both_hand = read_d("both_hand_list.csv")
one_cloth = pd.concat([read_d("rec_thin_list.csv"), read_d("trans_thin_list.csv")])
both_cloth = read_d("both_thin_list.csv")
#This calculates the total number of RSSI values in 1 min for each scenario (used in function at the end)
none_1min = none.shape[0]/11
one_purse_1min = one_purse.shape[0]/11
both_purse_1min = both_purse.shape[0]/11
one_hand_1min = one_hand.shape[0]/11
both_hand_1min = both_hand.shape[0]/11
one_cloth_1min = one_cloth.shape[0]/11
both_cloth_1min = both_cloth.shape[0]/11
#This is a list of all the times above, used by index in function at end
min_values_list = [none_1min, one_purse_1min, both_purse_1min, one_hand_1min, both_hand_1min, one_cloth_1min, both_cloth_1min]
######################################################################################################################################################################
#Separate data and create model
######################################################################################################################################################################
def split_d(data):
#This function takes in a dataset and returns the X and y dataframes, where X has the RSSI values that are being inputted to predict the y, or distance.
X = data["RSSI"].values.reshape(-1, 1)
y = data["DISTANCE"].values.reshape(-1, 1)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
return X_train, X_test, y_train, y_test
#Split data for each scenario
X_train_none, X_test_none, y_train_none, y_test_none = split_d(none)
X_train_one_purse, X_test_one_purse, y_train_one_purse, y_test_one_purse = split_d(one_purse)
X_train_both_purse, X_test_both_purse, y_train_both_purse, y_test_both_purse = split_d(both_purse)
X_train_one_hand, X_test_one_hand, y_train_one_hand, y_test_one_hand = split_d(one_hand)
X_train_both_hand, X_test_both_hand, y_train_both_hand, y_test_both_hand = split_d(both_hand)
X_train_one_cloth, X_test_one_cloth, y_train_one_cloth, y_test_one_cloth = split_d(one_cloth)
X_train_both_cloth, X_test_both_cloth, y_train_both_cloth, y_test_both_cloth = split_d(both_cloth)
#Train the models on linear regressor.
regressor = LinearRegression()
none_r = regressor.fit(X_train_none, y_train_none)
one_purse_r = regressor.fit(X_train_one_purse, y_train_one_purse)
both_purse_r = regressor.fit(X_train_both_purse, y_train_both_purse)
one_hand_r = regressor.fit(X_train_one_hand, y_train_one_hand)
both_hand_r = regressor.fit(X_train_both_hand, y_train_both_hand)
one_cloth_r = regressor.fit(X_train_one_cloth, y_train_one_cloth)
both_cloth_r = regressor.fit(X_train_both_cloth, y_train_both_cloth)
#List of all regression models
regressor_list = [none_r, one_purse_r, both_purse_r, one_hand_r, both_hand_r, one_cloth_r, both_cloth_r]
######################################################################################################################################################################
#Test the accurracy of the model
######################################################################################################################################################################
#Predict test data for each of the scenarios
y_pred_none = none_r.predict(X_test_none)
y_pred_one_purse = one_purse_r.predict(X_test_one_purse)
y_pred_both_purse = both_purse_r.predict(X_test_both_purse)
y_pred_one_hand = one_hand_r.predict(X_test_one_hand)
y_pred_both_hand = both_hand_r.predict(X_test_both_hand)
y_pred_one_cloth = one_cloth_r.predict(X_test_one_cloth)
y_pred_both_cloth = both_cloth_r.predict(X_test_both_cloth)
#Function to find error for each scenario
def fit_tester(x, a, b): #x is the category, a is the y test values and b is the predicted values for y
print('\n')
print(x)
print('Mean Absolute Error:', metrics.mean_absolute_error(a, b))
print('Mean Squared Error:', metrics.mean_squared_error(a, b))
print('Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(a, b)))
fit_tester('none', y_test_none, y_pred_none) #MSE = 1.7929 RMSE = 1.3389
fit_tester('one_purse', y_test_one_purse, y_pred_one_purse) #MSE = 1.15758 RSME = 1.07591
fit_tester('both_purse', y_test_both_purse, y_pred_both_purse) #MSE = 1.3590 RSME = 1.16576
fit_tester('one_hand', y_test_one_hand, y_pred_one_hand) #MSE = 1.99763 RSME = 1.413377
fit_tester('both_hand', y_test_both_hand, y_pred_both_hand) #MSE = 7.633657 RSME = 2.76092
fit_tester('one_cloth', y_test_one_cloth, y_pred_one_cloth) #MSE = 0.64123 RSME = 0.80077
fit_tester('both_cloth', y_test_both_cloth, y_pred_both_cloth) #MSE = 0.860338 RSME = 0.927544
######################################################################################################################################################################
#Create algorithm that mimics a simplified version of the overall process
######################################################################################################################################################################
def predict_distance(condition_phones, array_RSSI):
#This function takes in a string (condition_phones) that describes what condition the phones are in (what scenario) and an array with all the RSSI values
tot_values = array_RSSI.shape[0] #Total values in the array
ind = model_list.index(condition_phones) #Index of the scenario in the model_list at the beginning. This index is the same for the scenario in each list in this code
min_contacted = tot_values / (min_values_list[ind]) #Total minutes that this specific situation went for
model = regressor_list[ind] #Calls the correct model from the regressor_list above
pred = model.predict(array_RSSI) #Returns array with all predicted distances in meters(m)
distance = np.mean(pred) #Takes the mean of the array to give a single distance
safe_time = f(distance) #Calls the first function f on the distance above to find the time limit for the distance
#The if/else returns whether the current time for the distance predicted is considered safe or not
if min_contacted > safe_time:
print("Unsafe")
return "Unsafe contact"
else:
print("Safe")
return "Safe contact"
#Example:
predict_distance('none', np.array([[-40], [-42], [-47], [-50]]))
|
998,739 | 2c8cae0c7e86b0fb61ef8bf9f8337ef12304c69a | a=100
print("Hello World")
print("It's Leonard")
print("branch bug1") |
998,740 | 4e708c51e38764f23b9cd20f659752998906d33c | #!/usr/bin/env python
# coding=utf-8
""" inject.py:
Created by b1ueshad0w on 26/12/2016.
"""
import os
import time
import logging
from lib.common import TempDir, extract_app_from_ipa, get_app_executable_path, safe_check_call, PackageType, app2ipa
import shutil
import pkg_resources
import settings
logger = logging.getLogger(__name__ if __name__ != '__main__' else os.path.splitext(os.path.basename(__file__))[0])
logger.setLevel(logging.DEBUG)
INJECTOR_PATH = pkg_resources.resource_filename('tools', 'insert_dylib')
def _inject(bundle_path, dylib_path, injector_path=INJECTOR_PATH, inject_subpath=None):
""" Inject a dylib into a bundle, or bundle's bundle (e.g. MyApp/Frameworks/AFNetwork.framework
Cautious: This method will modify app's content.
:param bundle_path: the origin bundle
:param dylib_path: filepath of dylib
:param injector_path: filepath of injector
:param inject_subpath: Component of the bundle to be injected. If set to None, we will inject bundle itself
:return: Bool indicating injection success or not
"""
injectee = os.path.join(bundle_path, inject_subpath) if inject_subpath else bundle_path
if inject_subpath:
logger.debug('Injecting bundle\'s component: %s %s' % (bundle_path, inject_subpath))
else:
logger.debug('Injecting bundle: %s' % (bundle_path,))
if not os.path.isfile(dylib_path):
logger.error('Dylib not exist: %s' % dylib_path)
return False
if not os.path.isdir(injectee):
logger.error('Bundle to inject not exist: %s' % injectee)
return False
if not os.path.isfile(injector_path):
logger.error('Injector not exist: %s' % injector_path)
return False
executable_path = get_app_executable_path(injectee)
# shutil.copy(dylib_path, app_path)
# fixed_dylib_path = '@executable_path/%s' % (os.path.basename(dylib_path))
frameworks_path = os.path.join(injectee, 'Frameworks')
if not os.path.isdir(frameworks_path):
os.mkdir(frameworks_path)
shutil.copy(dylib_path, frameworks_path)
if not inject_subpath:
fixed_dylib_path = '@executable_path/Frameworks/%s' % (os.path.basename(dylib_path))
else:
fixed_dylib_path = os.path.join('@executable_path', inject_subpath, 'Frameworks/%s' % os.path.basename(dylib_path))
# if creat_flag:
# fixed_dylib_path = '@executable_path/Frameworks/%s' % (os.path.basename(dylib_path))
# else:
# fixed_dylib_path = '@rpath/%s' % (os.path.basename(dylib_path))
logger.debug('Fixed dylib path: %s' % fixed_dylib_path)
inject_cmd = '%s %s %s %s' % (injector_path, fixed_dylib_path, executable_path, executable_path)
if not safe_check_call(inject_cmd):
return False
logger.debug('Done.')
return True
def _inject_framework(bundle_path, framework_to_inject, injector_path=INJECTOR_PATH, inject_subpath=None):
""" Inject a framework into a bundle, or bundle's bundle (e.g. MyApp/Frameworks/AFNetwork.framework)
Cautious: This method will modify app's content.
:param bundle_path: the origin bundle to be injected
:param framework_to_inject: path of the framework to be injected into the bundle
:param injector_path: filepath of injector
:param inject_subpath: Component of the bundle to be injected. If set to None, we will inject bundle itself
:return: Bool indicating injection success or not
"""
injectee = os.path.join(bundle_path, inject_subpath) if inject_subpath else bundle_path
if inject_subpath:
logger.debug('Injecting bundle\'s component: %s %s' % (bundle_path, inject_subpath))
else:
logger.debug('Injecting bundle: %s' % (bundle_path,))
exec_path_for_inject = get_app_executable_path(framework_to_inject)
if not os.path.isfile(exec_path_for_inject):
logger.error('Executable for injection not exist: %s' % exec_path_for_inject)
return False
if not os.path.isdir(injectee):
logger.error('Bundle to inject not exist: %s' % injectee)
return False
if not os.path.isfile(injector_path):
logger.error('Injector not exist: %s' % injector_path)
return False
executable_path = get_app_executable_path(injectee)
host_frameworks_path = os.path.join(injectee, 'Frameworks')
if not os.path.isdir(host_frameworks_path):
os.mkdir(host_frameworks_path)
framework_name = os.path.basename(framework_to_inject)
dest = os.path.join(host_frameworks_path, framework_name)
if os.path.exists(dest):
logger.warning('Framework for injection already exist, will overwrite.')
shutil.rmtree(dest)
shutil.copytree(framework_to_inject, dest)
if not inject_subpath:
fixed_dylib_path = '@executable_path/Frameworks/%s/%s' % (framework_name, os.path.basename(exec_path_for_inject))
else:
fixed_dylib_path = os.path.join('@executable_path', inject_subpath, 'Frameworks/%s' % os.path.basename(exec_path_for_inject))
logger.debug('Fixed dylib path: %s' % fixed_dylib_path)
inject_cmd = '%s %s %s %s' % (injector_path, fixed_dylib_path, executable_path, executable_path)
if not safe_check_call(inject_cmd):
return False
logger.debug('Done.')
return True
def _re_codesign_framework(framework_path, signing_identity):
if not os.path.exists(framework_path):
return
sub_framework_dir = os.path.join(framework_path, 'Frameworks')
if os.path.exists(sub_framework_dir):
for sub_framework in os.listdir(sub_framework_dir):
if not sub_framework.endswith('.framework'):
continue
sub_framework_path = os.path.join(sub_framework_dir, sub_framework)
_re_codesign_framework(sub_framework_path, signing_identity)
_cmd = '/usr/bin/codesign -f -s "%s" %s' % (signing_identity, framework_path)
if not safe_check_call(_cmd):
return False
def _re_codesign(app_path, signing_identity, provision_path=None):
""" This method will modify app's content.
Now support all kinds of bundle (app, framework, dylib) except IPA
"""
bundle_type = PackageType.get_type(app_path)
logger.debug('Re-codesigning %s...' % (bundle_type,))
if bundle_type == PackageType.framework or bundle_type == PackageType.dylib:
_cmd = '/usr/bin/codesign -f -s "%s" %s' % (signing_identity, app_path)
if not safe_check_call(_cmd):
return False
return True
code_signature_folder = os.path.join(app_path, '_CodeSignature')
if os.path.isdir(code_signature_folder):
shutil.rmtree(code_signature_folder)
code_signature_file = os.path.join(app_path, 'CodeResources')
if os.path.isfile(code_signature_file):
os.remove(code_signature_file)
app_provision_path = os.path.join(app_path, 'embedded.mobileprovision')
if provision_path:
shutil.copy(provision_path, app_provision_path)
entitlement_plist_path = os.path.join('/tmp', 'entitlements%s.plist' % int(time.time()))
if os.path.isfile(entitlement_plist_path):
os.remove(entitlement_plist_path)
_cmd = '/usr/libexec/PlistBuddy -x -c "print :Entitlements " /dev/stdin <<< ' \
'$(security cms -D -i %s) > %s' % (app_provision_path, entitlement_plist_path)
if not safe_check_call(_cmd):
return False
_cmd = "/usr/libexec/PlistBuddy -c 'Set :get-task-allow true' %s" % entitlement_plist_path
if not safe_check_call(_cmd):
return False
frameworks_path = os.path.join(app_path, 'Frameworks')
if os.path.isdir(frameworks_path):
# _cmd = '/usr/bin/codesign -f -s "%s" %s/*' % (signing_identity, frameworks_path)
# if not safe_check_call(_cmd):
# return False
for framework in os.listdir(frameworks_path):
framework_path = os.path.join(frameworks_path, framework)
_re_codesign_framework(framework_path, signing_identity)
rule_file = os.path.join(app_path, 'ResourceRules.plist')
if os.path.isfile(rule_file):
_cmd = '/usr/bin/codesign -f -s "%s" ' \
'--resource-rules %s ' \
'--entitlements %s %s' % (signing_identity, rule_file, entitlement_plist_path, app_path)
else:
_cmd = '/usr/bin/codesign -f -s "%s" ' \
'--no-strict --entitlements %s %s' % (signing_identity, entitlement_plist_path, app_path)
if not safe_check_call(_cmd):
return False
if os.path.isfile(entitlement_plist_path):
os.remove(entitlement_plist_path)
logger.debug('Done.')
return True
def inject(app_or_ipa, dylib_or_framework, output_path, injector_path=INJECTOR_PATH, inject_subpath=None):
file_name = os.path.basename(app_or_ipa)
# file_name_without_extension = os.path.splitext(file_name)[0]
# output_file_name = file_name.replace(file_name_without_extension, file_name_without_extension + '_injected')
# output_path = os.path.join(to_dir, output_file_name)
package_type = PackageType.get_type(app_or_ipa)
if not package_type:
logger.error('Unknown filetype to inject: %s' % app_or_ipa)
return
if os.path.isdir(output_path):
shutil.rmtree(output_path)
if os.path.isfile(output_path):
os.remove(output_path)
with TempDir() as temp_dir:
if package_type == PackageType.app:
new_app_path = os.path.join(temp_dir, file_name)
shutil.copytree(app_or_ipa, new_app_path)
else:
new_app_path = extract_app_from_ipa(app_or_ipa, temp_dir)
inject_method = _inject if PackageType.get_type(dylib_or_framework) == PackageType.dylib else _inject_framework
if not inject_method(new_app_path, dylib_or_framework, injector_path, inject_subpath=inject_subpath):
logger.error('Injection failed.')
return
if output_path.endswith('.ipa'):
if not app2ipa(new_app_path, output_path):
return False
else:
shutil.move(new_app_path, output_path)
return True
def re_codesign(app_or_ipa, signing_identity, output_path, provision_path=None):
"""
Re-codesign APP (or IPA with output_ipa=True) file.
:param app_or_ipa: filepath of app or ipa
:param provision_path: filepath of mobile provisioning profile
:param signing_identity: code signing identity (e.g. iPhone Developer: XXX (XXXXX) )
:param to_dir: output directory
:param output_ipa: Will return IPA rather than APP if set to True
:return: output file path
"""
file_name = os.path.basename(app_or_ipa)
# file_name_without_extension = os.path.splitext(file_name)[0]
# output_file_name = file_name.replace(file_name_without_extension, file_name_without_extension + '_resigned')
# output_path = os.path.join(to_dir, output_file_name)
package_type = PackageType.get_type(app_or_ipa)
if not package_type:
logger.error('Unknown filetype to re-codesign: %s' % app_or_ipa)
return
with TempDir() as temp_dir:
if package_type == PackageType.app:
new_app_path = os.path.join(temp_dir, file_name)
shutil.copytree(app_or_ipa, new_app_path)
elif package_type == PackageType.ipa:
new_app_path = extract_app_from_ipa(app_or_ipa, temp_dir)
elif package_type == PackageType.dylib or package_type == PackageType.framework:
shutil.copy(app_or_ipa, output_path)
new_app_path = output_path
if not _re_codesign(new_app_path, signing_identity, provision_path=provision_path):
logger.error('Re-codesigning failed.')
return
if output_path.endswith('.ipa'):
if not app2ipa(new_app_path, output_path):
return False
else:
shutil.move(new_app_path, output_path)
return True
def inject_and_recodesign(app_or_ipa, dylib_or_framework, output_path, provision_path=None, signing_identity=None,
injector_path=INJECTOR_PATH, inject_subpath=None):
file_name = os.path.basename(app_or_ipa)
package_type = PackageType.get_type(app_or_ipa)
if not package_type:
logger.error('Unknown filetype to process: %s' % app_or_ipa)
return
if os.path.exists(output_path):
shutil.rmtree(output_path) if os.path.isdir(output_path) else os.remove(output_path)
with TempDir() as temp_dir:
if package_type == PackageType.app or package_type == PackageType.framework:
new_app_path = os.path.join(temp_dir, file_name)
shutil.copytree(app_or_ipa, new_app_path)
else:
new_app_path = extract_app_from_ipa(app_or_ipa, temp_dir)
inject_method = _inject if PackageType.get_type(dylib_or_framework) == PackageType.dylib else _inject_framework
if not inject_method(new_app_path, dylib_or_framework, injector_path, inject_subpath=inject_subpath):
logger.error('Injection failed.')
return
if provision_path and signing_identity:
if not _re_codesign(new_app_path, signing_identity, provision_path=provision_path):
logger.error('Re-codesigning failed.')
return
if output_path.endswith('.ipa'):
if not app2ipa(new_app_path, output_path):
return False
else:
shutil.move(new_app_path, output_path)
return True
def recodesign_framework_recursively(framework_path, signing_identity, output_file_path=None):
input_path = framework_path
if output_file_path:
shutil.copy(framework_path, output_file_path)
input_path = output_file_path
frameworks_dir = os.path.join(input_path, 'Frameworks')
if os.path.isdir(frameworks_dir):
for framework in os.listdir(frameworks_dir):
if not framework.endswith('.framework'):
continue
if not recodesign_framework_recursively(os.path.join(frameworks_dir, framework), signing_identity):
return False
_cmd = '/usr/bin/codesign -f -s "%s" %s' % (signing_identity, input_path)
if not safe_check_call(_cmd):
return False
return True
def set_start_arguments():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-a', '--app', dest='app', required=True, help='filepath of .app or .ipa')
parser.add_argument('-d', '--dylib', dest='dylib', required=True, help='filepath of dylib')
parser.add_argument('-o', '--output', dest='output', required=True, help='filepath of output')
parser.add_argument('-p', '--provision', dest='provision', required=False,
help='filepath of mobile provisioning profile')
parser.add_argument('-c', '--code_sign', dest='code_sign', required=False,
help='code signing identity')
args = parser.parse_args()
inject_and_recodesign(args.app, args.dylib, args.output, provision_path=args.provision,
signing_identity=args.code_sign)
def find_build_app_by_name(name, arch='iphoneos', scheme='Debug', exec_type='app'):
derived_path = f'/Users/{settings.USER}/Library/Developer/Xcode/DerivedData/'
print(f'derived_path: {derived_path}')
names = os.listdir(derived_path)
match_name = list(filter(lambda a: a.startswith(f'{name}-'), names))
assert match_name, f'no matched prodcut {name}'
assert len(match_name) == 1, f'more than one matches for product {name}'
app_path = os.path.join(derived_path, match_name[0], 'Build/Products/{scheme}-{arch}/{name}.{exec_type}')
assert os.path.exists(app_path), f'not found target at: {app_path}'
return app_path
def test_device():
# app = f'/Users/{settings.USER}/Library/Developer/Xcode/DerivedData/MboxWorkSpace-cujajvpdcgbgyzccljoqdpcreodd/Build/Products/VideoFusionInhouseDebug-iphoneos/VideoFusionInhouse.app'
app = f'/Users/{settings.USER}/Downloads/VideoFusionInhouse.app'
# dylib = f'/Users/gogle/Library/Developer/Xcode/DerivedData/MilkyWay-goqhobdqcwzjttfuumyxvwihqdiq/Build/Products/Debug-iphoneos/MilkyWay.dylib'
dylib = f'/Users/gogle/Library/Developer/Xcode/DerivedData/ByteInsight-aonolohymyukylecvuvysmllfuaa/Build/Products/Debug-iphoneos/ByteInsight.dylib'
output = '/tmp/output.app'
provision = f'/Users/{settings.USER}/Library/MobileDevice/Provisioning Profiles/{settings.PROVISIONING_NAME}.mobileprovision'
sign = settings.SIGNING_IDENTITY
ret = inject_and_recodesign(app, dylib, output, provision, sign)
print(ret)
cmd = f'ideviceinstaller -i {output}'
assert os.system(cmd) == 0, 'failed to install app'
if __name__ == '__main__':
# set_start_arguments()
test_device()
# path = find_build_app_by_name('VideoFusion')
# print(path)
|
998,741 | 8ea12f6cef7b9ad15d533d04af5f7b9e988cd03b | import pygame
import numpy as np
from warcaby_kolory import kolory
win_szer, win_dl = 1200, 800
szach_szer, szach_dl = 800, 800
win = pygame.display.set_mode((win_szer,win_dl))
wiersze = kolumny = 8
pole_rozmiar = szach_szer//wiersze
class Szachownica():
def __init__(self):
self.szachownica_uklad = np.zeros((8,8))
self.liczba_czerwonych = 12
self.liczba_bialych = 12
self.kolejka = kolory["czerwony"]
def draw(self, win):
win.fill(kolory["czarny"], (0, 0, szach_dl, szach_szer))
for w in range(wiersze):
for k in range(w%2, wiersze, 2):
pygame.draw.rect(win, kolory["bordowy"], (pole_rozmiar*w,pole_rozmiar*k,pole_rozmiar,pole_rozmiar))
def rozstawienie(self, win):
for w in range(wiersze):
for k in range(kolumny):
if w < 3:
if k%2 == (w+1)%2:
self.szachownica_uklad[w,k] = 1
elif w > 4:
if k % 2 == (w + 1) % 2:
self.szachownica_uklad[w,k] = 2
def rozstawienie_rysuj(self, win):
self.draw(win)
for w in range(wiersze):
for k in range(kolumny):
if self.szachownica_uklad[w,k] == 1:
krazek_bialy = Krazek(w, k, kolory["biały"])
krazek_bialy.draw(win)
elif self.szachownica_uklad[w,k] == 2:
krazek_czerwony = Krazek(w, k, kolory["czerwony"])
krazek_czerwony.draw(win)
def ruch_krazek(self, krazek, wiersz, kolumna):
self.szachownica_uklad[krazek.wiersz, krazek.kolumna], self.szachownica_uklad[wiersz,kolumna] = self.szachownica_uklad[wiersz,kolumna], self.szachownica_uklad[krazek.wiersz, krazek.kolumna]
krazek.ruch(wiersz,kolumna)
def get_krazek(self, wiersz, kolumna):
if self.szachownica_uklad[wiersz,kolumna] == 1:
kolor = kolory["biały"]
return Krazek(wiersz, kolumna, kolor)
elif self.szachownica_uklad[wiersz,kolumna] == 2:
kolor = kolory["czerwony"]
return Krazek(wiersz, kolumna, kolor)
class Krazek():
def __init__(self, wiersz, kolumna, kolor):
self.kolor = kolor
self.wiersz = wiersz
self.kolumna = kolumna
self.x = 0
self.y = 0
self.pozycja()
def pozycja(self):
self.x = (pole_rozmiar//2) + self.kolumna*pole_rozmiar
self.y = (pole_rozmiar//2) + self.wiersz*pole_rozmiar
def draw(self, win):
promien = pole_rozmiar/3
pygame.draw.circle(win, self.kolor, (self.x, self.y), promien)
def ruch(self, wiersz, kolumna):
self.wiersz = wiersz
self.kolumna = kolumna
self.pozycja()
def __repr__(self):
return str((self.wiersz, self.kolumna, self.kolor))
|
998,742 | 67829fe53ea74f05f8d8f0534f01fa189e760c06 | from collections import defaultdict
from Crazy import isprime
from eulertools import primes, big_primes
def g(n):
if n % 16 == 0:
m = n // 16
if m == 1 or isprime(m):
return True
else:
return False
elif n % 4 == 0:
m = n // 4
if m == 1 or isprime(m):
return True
else:
return False
else:
return isprime(n)
def f(n):
Re = defaultdict(int)
for a in range(1, (n + 1) // 4 + 2):
if a % 1000 == 0:
print(a)
if 4 * a * a < n:
Re[4 * a * a] += 1
y = 3 * a
if 3 * a * a >= n:
y = 2 * a + int((4 * a * a - n) ** 0.5)
while (4 * a - y) * y >= n:
y += 1
while y < 4 * a:
m = (4 * a - y) * y
if 0 < m < n:
Re[m] += 1
y += 1
T = [v for v in Re if Re[v] == 1]
## print(T)
T = list(filter(g, T))
## print(T)
return len(T)
def f2():
N = 5 * 10 ** 7
l = list(primes(7100))
step = 500000
re = 0
re += len(l) * 2
re += len([i for i in l if i % 4 == 3])
begin = 7100
while begin + step < N:
print(begin)
b = list(big_primes(begin, begin + step, l))
if (begin + step) * 16 < N:
re += len(b)
elif begin * 16 < N:
re += len([i for i in b if i * 16 < N])
if (begin + step) * 4 < N:
re += len(b)
elif begin * 4 < N:
re += len([i for i in b if i * 4 < N])
re += len([i for i in b if i % 4 == 3])
begin += step
b = list(big_primes(begin, N, l))
re += len([i for i in b if i % 4 == 3])
return re
print(f2())
|
998,743 | c2f2e76b021033c61eb0c8652a40330a29498915 |
from singleton import Singleton
from util import group_into
from itertools import takewhile
class Symbol:
"""
A crude symbol class designed simply to distinguish between symbols and straight strings.
Following the Common Lisp precedent, symbols are always converted to uppercase when
constructed.
"""
def __init__(self, string):
self.string = string.upper()
def __str__(self):
return self.string
def __repr__(self):
return "Symbol(" + repr(self.string) + ")"
def __eq__(self, other):
return self.string == other.string
def __ne__(self, other):
return not (self == other)
class TokenizeError(Exception):
"""
A class for any errors in the tokenization and command execution process. Any errors
occurring from invalid or syntactically incorrect commands should be handled using this
exception class or a child thereof. Any errors or mistakes in the code itself should
still use standard Python exceptions and should not use this class.
"""
def __init__(self, *args, **key):
super().__init__(*args, **key)
class Separator(metaclass = Singleton):
"""
A singleton for a separator in the tokenized string. Separators are either semicolons or
newlines. This class does not distinguish between semicolons and newlines; they are
treated as synonymous constructs.
"""
def __str__(self):
return "Separator()"
def __repr__(self):
return "Separator()"
def token_assert(obj, type_):
"""
Verifies that the given object is an instance of the specified type. If it is, this function
does not do anything further. If it does not, a TokenizeError is raised.
"""
if not isinstance(obj, type_):
raise TokenizeError("Tokenizer Error: Expected {}, got {}".format(type_, type(obj)))
def is_wildcard(obj):
"""
Returns whether or not the object is a wildcard. That is, whether or not the object is an
instance of Symbol whose string value is exactly equal to the '*' character.
"""
return isinstance(obj, Symbol) and obj == Symbol('*')
def is_symbol(obj):
"""
Returns whether or not the object is an instance of the Symbol class. is_symbol(obj) is
equivalent to isinstance(obj, Symbol).
"""
return isinstance(obj, Symbol)
def is_simple_symbol(obj):
"""Returns true if and only if the object is a symbol which is not a wildcard symbol."""
return is_symbol(obj) and not is_wildcard(obj)
def tokenize(string):
"""
Converts a string into a list of substrings, broken by the token separation rules. This function
does not examine the individual substrings, nor does it make Symbol instances; it merely returns
a list of appropriately separated substrings. The string is separated at any point containing at
least one space character, except inside of strings delimited by square brackets []. In square
brackets, no tokenization is performed, but escaping of backslash sequences is performed instead.
An opening bracket always begins a new token, and a newline or semicolon always acts as an
independent token, except in strings.
"""
# Tokenizer states:
# 0 - Standard state; parsing token
# 1 - String state; parsing string
# 2 - Backslash state; parsing backslash character
tokens = []
token = ""
state = 1
def push():
nonlocal token
if token != "":
tokens.append(token)
token = ""
return True
def append(c):
nonlocal token
token += c
return True
def goto(n):
nonlocal state
state = n
return True
handlers = {
(1, ' ' ): lambda c: push(),
(1, '[' ): lambda c: push() and append('[') and goto(2),
(1, ';' ): lambda c: push() and append(';') and push(),
(1, '\n'): lambda c: push() and append(';') and push(),
(1, '' ): lambda c: append(c),
(2, ']' ): lambda c: append(']') and push() and goto(1),
(2, '\\'): lambda c: goto(3),
(2, '' ): lambda c: append(c),
(3, '' ): lambda c: append(c) and goto(2),
}
for char in string:
handler = handlers.get((state, char), None) or handlers[(state, '')]
handler(char)
push()
if state != 1:
if state == 2 or state == 3:
raise TokenizeError("Tokenizer Error: Unclosed string literal")
else:
# If this case occurs, we forgot to account for a new state here. If that
# happens, fix it!
raise TokenizeError("Tokenizer Error: Parse ended in state {}, not 1".format(state))
return tokens
def scan(tokens):
"""
Given an iterable of strings, such as those produced by tokenize(), convert each
element to its appropriate command form. Elements equal to a semicolon are scanned
into the Separator() instance. Elements surrounded by [] will be left as strings
without the outside [] characters. Elements which consist only of decimal digits
are parsed into integers, and all other elements are converted to Symbol instances..
"""
for token in tokens:
if token == ';':
# Separators are parsed as simple semicolons
yield Separator()
elif token[0] == '[' and token[-1] == ']':
# Strings are enclosed in brackets []
yield token[1:-1]
elif token.isdigit(): # TODO This allows some Unicode characters we don't want allowed here
# Integers consist of digits; currently there is no support for negative integers
yield int(token)
elif token.upper() == 'YES':
# The Boolean true value is the text "YES" (case insensitive)
yield True
elif token.upper() == 'NO':
# The Boolean false value is the text "NO" (case insensitive)
yield False
else:
# Symbols are any other sequence of non-space characters
yield Symbol(token)
|
998,744 | a30794ff2038105b875d7bb34bbe23a1dfcdda3d | #!/usr/bin/env python3
"""Module containing the MakeNdx class and the command line interface."""
import os
import argparse
from pathlib import Path
from biobb_common.generic.biobb_object import BiobbObject
from biobb_common.configuration import settings
from biobb_common.tools import file_utils as fu
from biobb_common.tools.file_utils import launchlogger
from biobb_md.gromacs.common import get_gromacs_version
from biobb_md.gromacs.common import GromacsVersionError
class MakeNdx(BiobbObject):
"""
| biobb_md MakeNdx
| Wrapper of the `GROMACS make_ndx <http://manual.gromacs.org/current/onlinehelp/gmx-make_ndx.html>`_ module.
| The GROMACS make_ndx module, generates an index file using the atoms of the selection.
Args:
input_structure_path (str): Path to the input GRO/PDB/TPR file. File type: input. `Sample file <https://github.com/bioexcel/biobb_md/raw/master/biobb_md/test/data/gromacs/make_ndx.tpr>`_. Accepted formats: gro (edam:format_2033), pdb (edam:format_1476), tpr (edam:format_2333).
output_ndx_path (str): Path to the output index NDX file. File type: output. `Sample file <https://github.com/bioexcel/biobb_md/raw/master/biobb_md/test/reference/gromacs/ref_make_ndx.ndx>`_. Accepted formats: ndx (edam:format_2033).
input_ndx_path (str) (Optional): Path to the input index NDX file. File type: input. Accepted formats: ndx (edam:format_2033).
properties (dict - Python dictionary object containing the tool parameters, not input/output files):
* **selection** (*str*) - ("a CA C N O") Heavy atoms. Atom selection string.
* **gmx_lib** (*str*) - (None) Path set GROMACS GMXLIB environment variable.
* **gmx_path** (*str*) - ("gmx") Path to the GROMACS executable binary.
* **remove_tmp** (*bool*) - (True) [WF property] Remove temporal files.
* **restart** (*bool*) - (False) [WF property] Do not execute if output files exist.
* **container_path** (*str*) - (None) Path to the binary executable of your container.
* **container_image** (*str*) - ("gromacs/gromacs:latest") Container Image identifier.
* **container_volume_path** (*str*) - ("/data") Path to an internal directory in the container.
* **container_working_dir** (*str*) - (None) Path to the internal CWD in the container.
* **container_user_id** (*str*) - (None) User number id to be mapped inside the container.
* **container_shell_path** (*str*) - ("/bin/bash") Path to the binary executable of the container shell.
Examples:
This is a use example of how to use the building block from Python::
from biobb_md.gromacs.make_ndx import make_ndx
prop = { 'selection': 'a CA C N O' }
make_ndx(input_structure_path='/path/to/myStructure.gro',
output_ndx_path='/path/to/newIndex.ndx',
properties=prop)
Info:
* wrapped_software:
* name: GROMACS MakeNdx
* version: >5.1
* license: LGPL 2.1
* ontology:
* name: EDAM
* schema: http://edamontology.org/EDAM.owl
"""
def __init__(self, input_structure_path: str, output_ndx_path: str, input_ndx_path: str = None,
properties: dict = None, **kwargs) -> None:
properties = properties or {}
# Call parent class constructor
super().__init__(properties)
# Input/Output files
self.io_dict = {
"in": {"input_structure_path": input_structure_path, "input_ndx_path": input_ndx_path},
"out": {"output_ndx_path": output_ndx_path}
}
# Properties specific for BB
self.selection = properties.get('selection', "a CA C N O")
# Properties common in all GROMACS BB
self.gmx_lib = properties.get('gmx_lib', None)
self.gmx_path = properties.get('gmx_path', 'gmx')
self.gmx_nobackup = properties.get('gmx_nobackup', True)
self.gmx_nocopyright = properties.get('gmx_nocopyright', True)
if self.gmx_nobackup:
self.gmx_path += ' -nobackup'
if self.gmx_nocopyright:
self.gmx_path += ' -nocopyright'
if not self.container_path:
self.gmx_version = get_gromacs_version(self.gmx_path)
# Check the properties
fu.check_properties(self, properties)
@launchlogger
def launch(self) -> int:
"""Execute the :class:`MakeNdx <gromacs.make_ndx.MakeNdx>` object."""
# Setup Biobb
if self.check_restart(): return 0
self.stage_files()
# Create command line
self.cmd = ['echo', '-e', '\'' + self.selection + '\\nq' + '\'', '|',
self.gmx_path, 'make_ndx',
'-f', self.stage_io_dict["in"]["input_structure_path"],
'-o', self.stage_io_dict["out"]["output_ndx_path"]
]
if self.stage_io_dict["in"].get("input_ndx_path")\
and Path(self.stage_io_dict["in"].get("input_ndx_path")).exists():
self.cmd.append('-n')
self.cmd.append(self.stage_io_dict["in"].get("input_ndx_path"))
if self.gmx_lib:
self.environment = os.environ.copy()
self.environment['GMXLIB'] = self.gmx_lib
# Check GROMACS version
if not self.container_path:
if self.gmx_version < 512:
raise GromacsVersionError("Gromacs version should be 5.1.2 or newer %d detected" % self.gmx_version)
fu.log("GROMACS %s %d version detected" % (self.__class__.__name__, self.gmx_version), self.out_log)
# create_cmd_line and execute_command
self.run_biobb()
# Retrieve results
self.copy_to_host()
# Remove temporal files
self.tmp_files.append(self.stage_io_dict.get("unique_dir"))
self.remove_tmp_files()
return self.return_code
def make_ndx(input_structure_path: str, output_ndx_path: str,
input_ndx_path: str = None, properties: dict = None, **kwargs) -> int:
"""Create :class:`MakeNdx <gromacs.make_ndx.MakeNdx>` class and
execute the :meth:`launch() <gromacs.make_ndx.MakeNdx.launch>` method."""
return MakeNdx(input_structure_path=input_structure_path,
output_ndx_path=output_ndx_path,
input_ndx_path=input_ndx_path,
properties=properties, **kwargs).launch()
def main():
"""Command line execution of this building block. Please check the command line documentation."""
parser = argparse.ArgumentParser(description="Wrapper for the GROMACS make_ndx module.",
formatter_class=lambda prog: argparse.RawTextHelpFormatter(prog, width=99999))
parser.add_argument('-c', '--config', required=False, help="This file can be a YAML file, JSON file or JSON string")
# Specific args of each building block
required_args = parser.add_argument_group('required arguments')
required_args.add_argument('--input_structure_path', required=True)
required_args.add_argument('--output_ndx_path', required=True)
parser.add_argument('--input_ndx_path', required=False)
args = parser.parse_args()
config = args.config if args.config else None
properties = settings.ConfReader(config=config).get_prop_dic()
# Specific call of each building block
make_ndx(input_structure_path=args.input_structure_path,
output_ndx_path=args.output_ndx_path,
input_ndx_path=args.input_ndx_path,
properties=properties)
if __name__ == '__main__':
main()
|
998,745 | e8f550c0204981613901fb12751e98513978bd9a | from django.urls import path
from . import views
# /user_center/
urlpatterns = [
path('', views.user_info, name='user_info'),
path('new_article/', views.user_write_article, name='write_article'),
path('article_commit', views.article_commit, name='article_commit'),
]
|
998,746 | a8338e29cf5b2d517ac87845994efa672691fc9d | import gui
class titleScreen:
def __init__(self):
self.textAnimationPosition = 0
self.buttonOpacity = 0
def drawTitleScreen(self, screen, font, inGame):
if self.textAnimationPosition != 50:
self.textAnimationPosition = self.textAnimationPosition + 2
text = font.render('Commercium', True, (255,255,255))
textRect = text.get_rect()
textRect.center = (640 / 2, (480 / 2) - self.textAnimationPosition)
screen.blit(text, textRect)
if(gui.button(screen, "Start", (640 / 2, (480 / 2) + 50))):
inGame.setInGame(True)
|
998,747 | 2f55a046ee15d11b05654c2006dde23de5244bcc | import os
import subprocess
import pandas as pd
vcftools="/Users/bowcock_lab/Desktop/Analysis_Softwares/vcftools-master/src/cpp/vcftools"
try:
def _hwe_cal_(main_dir,file_extension):
if file_extension == ".vcf.gz":
file = os.listdir(main_dir)
for f in file:
if f.endswith(file_extension):
hwe_input = main_dir + "/" + f
hwe_test_file = hwe_input.replace(".vcf", "_HWE")
logfile = hwe_test_file.replace(".hwe", "_logfile.txt")
hwe_cmd = [vcftools,"--gzvcf",hwe_input,"--hardy","--out",hwe_test_file]
hwe_process = subprocess.Popen(hwe_cmd, stdout=open(logfile,'w'))
hwe_process.wait()
if file_extension == ".vcf":
file = os.listdir(main_dir)
for f in file:
if f.endswith(file_extension):
hwe_input = main_dir + "/" + f
hwe_test_file = hwe_input.replace(".vcf", "_HWE")
logfile = hwe_test_file.replace(".hwe", "_logfile.txt")
hwe_cmd = [vcftools, "--vcf", hwe_input, "--hardy", "--out", hwe_test_file]
hwe_process = subprocess.Popen(hwe_cmd, stdout=open(logfile, 'w'))
hwe_process.wait()
def _hwe_vcf_merge_gnomAD_exome(main_dir):
file = os.listdir(main_dir)
for f in file:
if f.endswith("_VTT.txt"):
vtt_file = main_dir + "/" + f
if f.endswith(".hwe"):
hwe_file = main_dir + "/" + f
vtt_df1 = pd.read_csv(vtt_file, sep='\t', low_memory=False, keep_default_na=False)
hwe_df2 = pd.read_csv(hwe_file, sep='\t', low_memory=False, keep_default_na=False)
hwe_df2.drop(hwe_df2.columns[[0, 2, 3, 5, 6, 7]], axis=1, inplace=True)
merge_file = (pd.merge(vtt_df1, hwe_df2, on='POS', how='left'))
merge_file['AF'] = pd.to_numeric(merge_file.AF, errors='coerce')
# This statement should be used for the original Biome file that was provided in rg_bowcoa01 directory
#merge_file['gnomAD_genome_NFE'] = pd.to_numeric(merge_file.gnomAD_genome_NFE, errors='coerce')
# This statement should be used when the vcf files have been annotated with new annovar exome database
merge_file['gnomAD_exome_AF_nfe'] = pd.to_numeric(merge_file.gnomAD_exome_AF_nfe, errors='coerce')
#merge_file['OR'] = merge_file['AF'] / merge_file['gnomAD_exome_AF_nfe']
merge_file['Re-Calculated_AF'] = (2 * merge_file['HOM-VAR'] + 1 * merge_file['HET']) / (2 * merge_file['NCALLED'])
merge_file['OR'] = merge_file['Re-Calculated_AF'] / merge_file['gnomAD_exome_AF_nfe']
merge_output = vtt_file.replace(".txt", "_final_Merge.xlsx")
merge_file.to_excel(merge_output)
def _hwe_vcf_merge_gnomAD_genome(main_dir):
file = os.listdir(main_dir)
for f in file:
if f.endswith("_VTT.txt"):
vtt_file = main_dir + "/" + f
if f.endswith(".hwe"):
hwe_file = main_dir + "/" + f
vtt_df1 = pd.read_csv(vtt_file, sep='\t', low_memory=True, keep_default_na=False)
hwe_df2 = pd.read_csv(hwe_file, sep='\t', low_memory=True, keep_default_na=False)
hwe_df2.drop(hwe_df2.columns[[0, 2, 3, 5, 6, 7]], axis=1, inplace=True)
merge_file = (pd.merge(vtt_df1, hwe_df2, on='POS', how='left'))
merge_file['AF'] = pd.to_numeric(merge_file.AF, errors='coerce')
# This statement should be used for the original Biome file that was provided in rg_bowcoa01 directory
#merge_file['gnomAD_genome_NFE'] = pd.to_numeric(merge_file.gnomAD_genome_NFE, errors='coerce')
# This statement should be used when the vcf files have been annotated with new annovar exome database
merge_file['gnomAD_genome_NFE'] = pd.to_numeric(merge_file.gnomAD_genome_NFE, errors='coerce')
merge_file['Re-calculated_AF'] = (2 * merge_file['HOM-VAR'] + 1 * merge_file['HET']) / (2 * merge_file['NCALLED'])
merge_file['OR'] = merge_file['Re-calculated_AF'] / merge_file['gnomAD_genome_NFE']
#merge_file['Re-calculated_AF'] = (2 * merge_file['HOM-VAR'] + 1 * merge_file['HET']) / (2 * merge_file['NCALLED'])
merge_output = vtt_file.replace(".txt", "_final_Merge.xlsx")
merge_file.to_excel(merge_output)
except Exception as e:
print("An error has occurred while running the hew_cal method. The error is: ".format(e))
|
998,748 | ded67b9873da0708012c9687b26e82aad56e6278 | import matplotlib.pyplot as plt
import wfdb
from wfdb import processing
import numpy as np
#使用gqrs定位算法矫正峰值位置
def peaks_hr(sig, peak_inds, fs, title, figsize=(20,10), saveto=None):
#这个函数是用来画出信号峰值和心律
#计算心律
hrs=processing.compute_hr(sig_len=sig.shape[0], qrs_inds=peak_inds, fs=fs)
N=sig.shape[0]
fig, ax_left=plt.subplots(figsize=figsize)
ax_right=ax_left.twinx()
ax_left.plot(sig, color='#3979f0', label='Signal')
ax_left.plot(peak_inds, sig[peak_inds], 'rx', marker='x', color='#8b0000', label='Peak', markersize=12)#画出标记
ax_right.plot(np.arange(N), hrs, label='Heart rate', color='m', linewidth=2)#画出心律,y轴在右边
ax_left.set_title(title)
ax_left.set_xlabel('Time (ms)')
ax_left.set_ylabel('ECG (mV)', color='#3979f0')
ax_right.set_ylabel('Heart rate (bpm)', color='m')
#设置颜色使得和线条颜色一致
ax_left.tick_params('y', colors='#3979f0')
ax_right.tick_params('y', colors='m')
if saveto is not None:
plt.savefig(saveto, dpi=600)
plt.show()
#加载ECG信号
record=wfdb.rdrecord('101', sampfrom=0, sampto=10000, channels=[0], pb_dir='mitdb/')
#使用gqrs算法定位qrs波位置
qrs_inds=processing.gqrs_detect(sig=record.p_signal[:, 0], fs=record.fs)
#画出结果
peaks_hr(sig=record.p_signal, peak_inds=qrs_inds, fs=record.fs, title='GQRS peak detection on record 100')
#修正峰值,将其设置为局部最大值
min_bpm=20
max_bpm=230
#使用可能最大的bpm作为搜索半径
search_radius=int(record.fs*60/max_bpm)
corrected_peak_inds=processing.correct_peaks(record.p_signal[:, 0], peak_inds=qrs_inds, search_radius=search_radius, smooth_window_size=150)
#输出结果
print('Uncorrected gqrs detected peak indeices:',sorted(qrs_inds))
print('Corrected gqrs detected peak indices:', sorted(corrected_peak_inds))
peaks_hr(sig=record.p_signal, peak_inds=sorted(corrected_peak_inds), fs=record.fs, title='Corrected GQRS peak detection on record 100')
|
998,749 | cf7f53e5254a9636c09a5dfb7367c393aa5ba56c | import logging
import os
from django.conf import settings
from django.contrib.auth.models import Group, User
from django.contrib.auth.signals import user_logged_in
from django.core.cache import cache
from django.db.models.signals import post_save, pre_save
from django.dispatch.dispatcher import receiver
from eventkit_cloud.jobs.helpers import get_provider_image_dir, get_provider_thumbnail_name
from eventkit_cloud.jobs.models import (
DataProvider,
Job,
JobPermission,
JobPermissionLevel,
MapImageSnapshot,
Region,
RegionalPolicy,
)
from eventkit_cloud.utils.helpers import make_dirs
from eventkit_cloud.utils.image_snapshot import save_thumbnail
from eventkit_cloud.utils.mapproxy import clear_mapproxy_config_cache, get_mapproxy_config_template
logger = logging.getLogger(__name__)
@receiver(post_save, sender=User)
def user_post_save(sender, instance, created, **kwargs):
"""
This method is executed whenever a User object is created.
Adds the new user to DefaultExportExtentGroup.
"""
if created:
instance.groups.add(Group.objects.get(name="DefaultExportExtentGroup"))
@receiver(post_save, sender=Job)
def job_post_save(sender, instance, created, **kwargs):
"""
This method is executed whenever a Job object is created.
If created is true, assign the user as an ADMIN for this job
"""
if created:
jp = JobPermission.objects.create(
job=instance,
content_object=instance.user,
permission=JobPermissionLevel.ADMIN.value,
)
jp.save()
# @receiver(pre_delete, sender=MapImageSnapshot)
# def mapimagesnapshot_delete(sender, instance, *args, **kwargs):
# """
# Delete associated file when deleting a MapImageSnapshot.
# """
# delete_from_s3(download_url=instance.download_url)
@receiver(pre_save, sender=DataProvider)
def provider_pre_save(sender, instance: DataProvider, **kwargs):
"""
This method is executed whenever a DataProvider is created or updated.
"""
if instance.preview_url:
try:
# First check to see if this DataProvider should update the thumbnail
# This should only be needed if it is a new entry, or the preview_url has changed,
is_thumbnail_fresh = True
try:
provider = sender.objects.get(uid=instance.uid)
except sender.DoesNotExist:
is_thumbnail_fresh = False
else:
# The last preview url doesn't match the current or we still don't have a thumbnail.
if instance.preview_url != provider.preview_url or instance.thumbnail is None:
is_thumbnail_fresh = False
if not is_thumbnail_fresh:
provider_image_dir = get_provider_image_dir(instance.uid)
make_dirs(provider_image_dir)
# Return a file system path to the image.
filepath = save_thumbnail(
instance.preview_url,
os.path.join(provider_image_dir, f"{get_provider_thumbnail_name(instance.slug)}.jpg"),
)
if instance.thumbnail:
instance.thumbnail.delete()
instance.thumbnail = MapImageSnapshot.objects.create(file=str(filepath))
instance.save()
except Exception as e:
# Catch exceptions broadly and log them, we do not want to prevent saving provider's if
# a thumbnail creation error occurs.
logger.error(f"Could not save thumbnail for DataProvider: {instance.slug}")
logger.exception(e)
@receiver(post_save, sender=Region)
def region_post_save(sender, instance, **kwargs):
clear_mapproxy_config_cache()
@receiver(post_save, sender=RegionalPolicy)
def regional_policy_post_save(sender, instance, **kwargs):
clear_mapproxy_config_cache()
@receiver(user_logged_in)
def clear_user_mapproxy_config(sender, user, request, **kwargs):
if not settings.REGIONAL_JUSTIFICATION_TIMEOUT_DAYS:
for provider in DataProvider.objects.all():
cache.delete(get_mapproxy_config_template(provider.slug, user=user))
|
998,750 | 7667335d86ae823781a2a99da9ed085424d626bf | import sys
# input file
f = open('input.txt', 'r')
sys.stdin = f
l = int(input())
n = int(input())
x = input().split()
a_min = 0
a_max = 0
for i in x:
a_min = max(a_min, min(int(i), l-int(i)))
a_max = max(a_max, max(int(i), l-int(i)))
print(a_min)
print(a_max)
|
998,751 | 0e1287ad650e7da70c8e4fe78b3ab4539f2ff5b4 | import numpy as np
import sys
import pdb
from MapReader import MapReader
from MotionModel import MotionModel
from SensorModel import SensorModel
from Resampling import Resampling
from matplotlib import pyplot as plt
from matplotlib import figure as fig
import time
def visualize_map(occupancy_map):
fig = plt.figure()
# plt.switch_backend('TkAgg')
mng = plt.get_current_fig_manager(); # mng.resize(*mng.window.maxsize())
plt.ion(); plt.imshow(occupancy_map, cmap='Greys'); plt.axis([0, 800, 0, 800]);
def visualize_timestep(X_bar, tstep):
x_locs = X_bar[:,0]/10.0
y_locs = X_bar[:,1]/10.0
scat = plt.scatter(x_locs, y_locs, c='r', marker='o')
plt.pause(0.00001)
scat.remove()
def init_particles_random(num_particles, occupancy_map):
# initialize [x, y, theta] positions in world_frame for all particles
y0_vals = np.random.uniform( 0, 7000, (num_particles, 1) )
x0_vals = np.random.uniform( 3000, 7000, (num_particles, 1) )
theta0_vals = np.random.uniform( -3.14, 3.14, (num_particles, 1) )
# initialize weights for all particles
w0_vals = np.ones( (num_particles,1), dtype=np.float64)
w0_vals = w0_vals / num_particles
X_bar_init = np.hstack((x0_vals,y0_vals,theta0_vals,w0_vals))
return X_bar_init
def init_particles_freespace(num_particles, occupancy_map):
# initialize [x, y, theta] positions in world_frame for all particles
"""
TODO : Add your code here
"""
return X_bar_init
def main():
src_path_map = '../data/map/wean.dat'
src_path_log = '../data/log/robotdata1.log'
map_obj = MapReader(src_path_map)
occupancy_map = map_obj.get_map()
logfile = open(src_path_log, 'r')
motion_model = MotionModel()
sensor_model = SensorModel(occupancy_map)
resampler = Resampling()
num_particles = 1
#X_bar = np.transpose(np.array([5.75300000e+03, 1.71200000e+03, -4.57011189e-01]))
X_bar = init_particles_random(num_particles, occupancy_map)
print(X_bar.shape)
vis_flag = 1
first_time_idx = True
x_est_odom = []
y_est_odom = []
for time, line in enumerate(logfile):
meas_type = line[0] # L : laser scan measurement, O : odometry measurement
meas_vals = np.fromstring(line[2:], dtype=np.float64, sep=' ') # convert measurement values from string to double
odometry_robot = meas_vals[0:3] # odometry reading [x, y, theta] in odometry frame
time_stamp = meas_vals[-1]
if (meas_type == "L"):
odometry_laser = meas_vals[3:6] # [x, y, theta] coordinates of laser in odometry frame
ranges = meas_vals[6:-1] # 180 range measurement values from single laser scan
if (first_time_idx):
u_t0 = odometry_robot
first_time_idx = False
continue
X_bar_new = np.zeros( (num_particles,4), dtype=np.float64)
u_t1 = odometry_robot
flag=0
for m in range(0, num_particles):
#print("First",X_bar.shape)
x_t0 = X_bar[0][0:3]
#print(x_t0.shape)
x_t1 = motion_model.update(u_t0, u_t1, x_t0)
print("1---------",x_t1)
#input()
if (meas_type == "L"):
z_t = ranges
#w_t=1
w_t = sensor_model.beam_range_finder_model(z_t, x_t1)
# flag=1;
# break
# w_t = 1/num_particles
print("2-----------------",X_bar_new)
X_bar_new[m,:] = np.hstack((x_t1, w_t))
else:
X_bar_new[m,:] = np.hstack((x_t1, X_bar[m,3]))
print("3-------------------",X_bar_new)
X_bar_new[m,:] = np.hstack((x_t1, X_bar[m,3]))
#print("Second",x_t1.shape)
#print("Threee",X_bar_new.shape)
#if(flag==1):
#break
print("4-------------------------",X_bar_new)
X_bar = X_bar_new
print("5--------------------------",X_bar)
u_t0 = u_t1
x_est_odom.append(X_bar[0][0])
y_est_odom.append(X_bar[0][1])
plt.imshow(occupancy_map, cmap='Greys')
#plt.show()
#plt.subplot(1,2,1)
plt.draw()
plt.subplot(1,2,1)
plt.plot(x_est_odom, y_est_odom)
plt.show()
if __name__=="__main__":
main()
|
998,752 | dbe7110b0573d183fd66ae16f867978a35e7daae | # pylint: disable=consider-using-with
import getpass
import grp
import logging
import os
import subprocess
from contextlib import contextmanager
import sys
from retry import retry
from skipper import utils
def get_default_net():
# The host networking driver only works on Linux hosts, and is not supported on Docker Desktop for Mac,
# Docker Desktop for Windows, or Docker EE for Windows Server.
return 'host' if sys.platform not in ('darwin', 'win32') else 'bridge'
# pylint: disable=too-many-arguments
def run(command, fqdn_image=None, environment=None, interactive=False, name=None, net=None, publish=(), volumes=None,
workdir=None, use_cache=False, workspace=None, env_file=(), stdout_to_stderr=False):
if not net:
net = get_default_net()
if fqdn_image is not None:
return _run_nested(fqdn_image, environment, command, interactive, name, net, publish, volumes,
workdir, use_cache, workspace, env_file)
return _run(command, stdout_to_stderr=stdout_to_stderr)
def _run(cmd_args, stdout_to_stderr=False):
logger = logging.getLogger('skipper')
cmd = [utils.get_runtime_command()]
cmd.extend(cmd_args)
logger.debug(' '.join(cmd))
proc = (subprocess.Popen(cmd)
if not stdout_to_stderr else
subprocess.Popen(cmd, stdout=sys.stderr))
proc.wait()
return proc.returncode
# pylint: disable=too-many-locals
# pylint: disable=too-many-arguments
def _run_nested(fqdn_image, environment, command, interactive, name, net, publish, volumes, workdir, use_cache, workspace, env_file):
cwd = os.getcwd()
if workspace is None:
workspace = os.path.dirname(cwd)
homedir = os.path.expanduser('~')
cmd = ['run']
if interactive:
cmd += ['-i']
cmd += ['-e', 'SKIPPER_INTERACTIVE=True']
if name:
cmd += ['--name', name]
cmd += ['-t']
if os.environ.get("KEEP_CONTAINERS", False):
cmd += ['-e', 'KEEP_CONTAINERS=True']
else:
cmd += ['--rm']
for cmd_limit in utils.SKIPPER_ULIMIT:
cmd += cmd_limit
cmd += ['--privileged']
cmd = handle_networking(cmd, publish, net)
for _file in env_file:
cmd += ['--env-file', _file]
environment = environment or []
for env in environment:
cmd += ['-e', env]
user = getpass.getuser()
user_id = os.getuid()
cmd += ['-e', f'SKIPPER_USERNAME={user}']
cmd += ['-e', f'SKIPPER_UID={user_id}']
cmd += ['-e', f'HOME={homedir}']
cmd += ['-e', f'CONTAINER_RUNTIME_COMMAND={utils.get_runtime_command()}']
if utils.get_runtime_command() == "docker":
try:
docker_gid = grp.getgrnam('docker').gr_gid
cmd += ['-e', f'SKIPPER_DOCKER_GID={docker_gid}']
except KeyError:
pass
if utils.get_runtime_command() == "podman":
cmd += ['--group-add', 'keep-groups']
if use_cache:
cmd += ['-e', 'SKIPPER_USE_CACHE_IMAGE=True']
cmd = handle_volumes_bind_mount(cmd, homedir, volumes, workspace)
cmd = handle_workdir(cmd, cwd, workdir)
cmd += ['--entrypoint', '/opt/skipper/skipper-entrypoint.sh']
cmd += [fqdn_image]
cmd += [' '.join(command)]
with _network(net):
ret = _run(cmd)
return ret
def handle_workdir(cmd, cwd, workdir):
if workdir:
cmd += ['-w', workdir]
else:
cmd += ['-w', cwd]
return cmd
def handle_networking(cmd, publish, net):
if publish:
for port_mapping in publish:
cmd += ['-p', port_mapping]
if net is not None:
cmd += ['--net', net]
return cmd
def handle_volumes_bind_mount(docker_cmd, homedir, volumes, workspace):
volumes = volumes or []
volumes.extend([f'{homedir}/.netrc:{homedir}/.netrc:ro',
f'{homedir}/.gitconfig:{homedir}/.gitconfig:ro',
f'{homedir}/.docker/config.json:{homedir}/.docker/config.json:ro'])
# required for docker login (certificates)
if os.path.exists('/etc/docker'):
volumes.append('/etc/docker:/etc/docker:ro')
if utils.get_runtime_command() == utils.PODMAN:
volumes.extend([
f'{workspace}:{workspace}:rw',
f'{utils.get_extra_file("skipper-entrypoint.sh")}:/opt/skipper/skipper-entrypoint.sh:rw',
])
if os.path.exists('/var/run/docker.sock'):
volumes.append('/var/run/docker.sock:/var/run/docker.sock:rw')
if os.path.exists('/var/lib/osmosis'):
volumes.append('/var/lib/osmosis:/var/lib/osmosis:rw')
else:
volumes.extend([
f'{workspace}:{workspace}:rw',
'/var/run/docker.sock:/var/run/docker.sock:rw',
f'{utils.get_extra_file("skipper-entrypoint.sh")}:/opt/skipper/skipper-entrypoint.sh',
])
# Will fail on Mac
if os.path.exists('/var/lib/osmosis'):
volumes.append('/var/lib/osmosis:/var/lib/osmosis:rw')
for volume in volumes:
if ":" not in volume:
raise ValueError(f"Volume entry is badly-formatted - {volume}")
# For OSX, map anything in /var/lib or /etc to /private
if sys.platform == 'darwin':
if volume.startswith('/etc/') or volume.startswith('/var/lib/'):
volume = '/private' + volume
# if part of host directory is empty, skipping this mount
host_path = volume.split(":")[0]
if not host_path:
continue
if not create_vol_localpath_if_needed(host_path.strip()) and utils.get_runtime_command() == utils.PODMAN:
logging.warning("Mount source %s doesn't exist and it couldn't be created by skipper, "
"this will cause Podman to fail, ignoring volume mount %s to prevent "
"podman failure", host_path, volume)
else:
docker_cmd += ['-v', volume]
return docker_cmd
def create_vol_localpath_if_needed(host_path):
# We have couple of special case mounts
# 1. gitconfig file - it is required by skipper but may not exists, we don't want
# to create folder if it doesn't exist
# that's why we create it as file
# 2. .docker/config.json - if it is required and doesn't exists we want to create is as file with {} as data
if ".gitconfig" in host_path and not os.path.exists(host_path):
utils.create_path_and_add_data(full_path=host_path, data="", is_file=True)
elif "docker/config.json" in host_path and not os.path.exists(host_path):
utils.create_path_and_add_data(full_path=host_path, data="{}", is_file=True)
elif not os.path.exists(host_path):
# If the local directory of a mount entry doesn't exist, docker will by
# default create a directory in that path. Docker runs in systemd context,
# with root-privileges, so the container will have no permissions to write
# to that directory. To prevent that, we'll create the directory in advance,
# with the user's permissions
try:
os.makedirs(host_path)
except OSError:
# If we have no permissions to create the directory, we'll just let
# docker create it with root-privileges
return False
return True
@contextmanager
def _network(net):
if utils.get_runtime_command() != "docker":
yield
elif _network_exists(net):
yield
else:
_create_network(net)
yield
_destroy_network(net)
def _create_network(net):
logging.debug("Creating network %s", net)
utils.run_container_command(['network', 'create', net])
@retry(delay=0.1, tries=10)
def _destroy_network(net):
logging.debug("Deleting network %s", net)
utils.run_container_command(['network', 'rm', net])
def _network_exists(net):
cmd = ['network', 'ls', "-f", f"NAME={net}"]
result = utils.run_container_command(cmd)
return net in result
|
998,753 | 7d7710fbe04764ff2a88c3f796becb9ec137bf50 | #!/usr/bin/env python
import socket,traceback
host='0.0.0.0'
port = 12345
if __name__ == '__main__':
ret = True
s = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
s.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,1)
s.setsockopt(socket.SOL_SOCKET,socket.SO_BROADCAST,1)
s.bind((host,port))
try:
while ret:
print "Listen on the port 12345......"
sock, addr = s.recvfrom(1024)
print "Receive data (%s) from %s" % (sock, addr)
s.sendto('ok', addr)
except KeyboardInterrupt:
ret = False
finally:
s.close()
|
998,754 | f30596fc7ec4f11eaa0f6cc790dc6b4b48529ee3 | import ConfigParser, os, sys
config = ConfigParser.ConfigParser()
config.read(['buildout.cfg'])
config.set('django', 'settings', sys.argv[1])
with open('buildout.cfg', 'wb') as of:
config.write(of)
|
998,755 | f2a6b5d20b935da935ad95c54258e5e0e8a311d1 | #!/usr/bin/env python
import sys,os,re
import copy
import fnmatch
from myfnmatch import re_parts, interleave #, format2re
from myformatter import fmt,fstring,template_replace,template_replace_all,template_fields,format2re
import numpy as np
from glob import glob
from mds import Metadata
from mslice import MSlice
import oj.num
debug = False
#def template2re(tmpl):
# """ given a template with both {...} and glob wildcards,
# generates a regexp that will match a result of glob and return
# a group for each {...}
# """
# parts = re.split(r'\{[^\}]*\}', tmpl)
# # turn into regexps without trailing '$'
# regexps = [ fnmatch.translate(s)[:-1] for s in parts ]
# regexp = re.compile(r'(.*)'.join(regexps)) + '$'
# return regexp
class VariableData(object):
def __init__(self, tmpl, its=None, astype=None, fill=0, dtype=None, shape=None, tshape=None, ncs=None, dt=0, mmap=True, debug=False, quiet=False, nblank=0):
self.usemmap = mmap
self.hasits = re.search(r'\{it[:\}]', tmpl) is not None
if self.hasits and its is None:
parts = re.split(r'\{it[^\}]*\}', tmpl)
base = '*'.join(parts)
for suf in ['.001.001.data', '.data', '.001.001.data.gz', '.data.gz']:
print base + suf
datafiles = glob(base + suf)
if len(datafiles) != 0:
break
else:
sys.stderr.write('file not found: ' + base + suf + '\n')
raise IOError
its = []
parts[-1] += suf
# turn into regexps without trailing '$'
regexps = [ re.sub(r'(\\Z\(\?ms\))?\$?$','',fnmatch.translate(s)) for s in parts ]
try:
regexp = re.compile(r'(.*)'.join(regexps) + '$')
except:
print 're:' + r'(.*)'.join(regexps) + '$'
raise
for datafile in datafiles:
m = regexp.match(datafile)
if m:
it = m.group(1)
its.append(it)
else:
print 'file does not match:', datafile, r', (.*)'.join(regexps) + '$'
its.sort()
self.filedicts = np.array([ {'it':it} for it in its ])
# choose first for tile search
base = tmpl.format(it=its[0])
else:
self.filedicts = np.array([{}])
base = tmpl
self.its = its
if debug: print 'its:',its
if dtype is not None and shape is not None and tshape is None:
# global files
self.tiled = False
self.metadata = None
self.shape = shape
self.nrec = 1
self.tdtype = np.dtype(dtype)
self.tny,self.tnx = shape[-2:]
elif dtype is not None and tshape is not None and shape is None:
# cubed sphere
self.tiled = True
self.metadata = None
self.nrec = 1
self.tdtype = np.dtype(dtype)
self.tny,self.tnx = tshape[-2:]
if glob(base + '.001.001.data'):
suf = ''
else:
suf = '.gz'
globpatt = base + '.*.*.data' + suf
tmplre,tmplparts = re_parts(tmpl + '.*.*.data' + suf)
basere,baseparts = re_parts(base + '.*.*.data' + suf)
datafiles = glob(globpatt)
datafiles.sort()
self.ntile = len(datafiles)
ny = int(np.sqrt(self.tnx*self.tny*self.ntile/6))
nx = 6*ny
self.shape = tshape[:-2] + (ny,nx)
self.ntx = nx/self.tnx
self.nty = ny/self.tny
self.files = []
# blank tiles have index -1
self.itile = np.zeros((self.nty,self.ntx), int) - 1
for itile,datafile in enumerate(datafiles):
m = basere.match(datafile)
g = m.groups()
datatmpl = interleave(g,tmplparts)
# in case some files are not gzipped
datatmpl = re.sub(r'\.gz$','',datatmpl)
self.files.append(datatmpl)
_,it = divmod(itile,self.ntx/6)
f,jt = divmod(_,self.nty)
it += self.ntx/6*f
self.itile[jt,it] = itile
else:
# metafiles
globpatt = base + '.001.001.meta'
if debug: print 'looking for metafiles:', globpatt
metafiles = glob(globpatt)
if len(metafiles) > 0:
self.tiled = True
else:
globpatt = base + '.meta'
if debug: print 'looking for metafiles:', globpatt
metafiles = glob(base + '.meta')
self.tiled = False
if len(metafiles) == 0:
sys.stderr.write('File not found: ' + base + '.meta\n')
raise IOError
# get global info from first metafile
self.metadata = Metadata.read(metafiles[0])
self.shape = tuple(self.metadata.dims)
self.nrec = self.metadata.nrecords
self.tdtype = np.dtype(self.metadata.dtype)
self.tnx = self.metadata.ends[-1] - self.metadata.starts[-1]
self.tny = self.metadata.ends[-2] - self.metadata.starts[-2]
self.metadata.makeglobal()
if self.hasits:
if 'timeStepNumber' in self.metadata:
it = int(self.metadata['timeStepNumber'])
self.timeStepNumber = map(int,self.its)
self.metadata['timeStepNumber'] = []
if 'timeInterval' in self.metadata:
ts = self.metadata['timeInterval'].split()
dt = float(ts[-1])/it
if len(ts) == 1:
ival = [ [dt*int(it)] for it in self.its ]
else:
period = float(ts[1]) - float(ts[0])
ival = [ [dt*int(it)-period, dt*int(it)] for it in self.its ]
self.timeInterval = ival
self.metadata['timeInterval'] = []
if self.nrec > 1:
self.shape = (self.nrec,) + self.shape
self.tshape = self.shape[:-2] + (self.tny, self.tnx)
if self.hasits:
self.shape = (len(its),) + self.shape
self.ndim = len(self.shape)
self.ndimtile = len(self.tshape)
self.ndimfiles = self.ndim - self.ndimtile
# dependend stuff
self.nx = self.shape[-1]
self.ny = self.shape[-2]
self.ntx = self.nx // self.tnx
self.nty = self.ny // self.tny
if self.tiled:
if tshape is None:
# read other metafiles to get tile locations
globpatt = base + '.*.*.meta'
tmplre,tmplparts = re_parts(tmpl + '.*.*.meta')
basere,baseparts = re_parts(base + '.*.*.meta')
metafiles = glob(globpatt)
metafiles.sort()
self.ntile = len(metafiles)
self.files = []
# blank tiles have index -1
self.itile = np.zeros((self.nty,self.ntx), int) - 1
for itile,metafile in enumerate(metafiles):
m = basere.match(metafile)
g = m.groups()
datatmpl = interleave(g,tmplparts)[:-5] + '.data'
self.files.append(datatmpl)
meta = Metadata.read(metafile)
it = meta.starts[-1] // self.tnx
jt = meta.starts[-2] // self.tny
self.itile[jt,it] = itile
else:
self.ntile = 1
self.files = [ tmpl + '.data' ]
self.itile = np.array([[0]],int)
nmissing = (self.itile < 0).sum()
if nmissing != nblank and not quiet:
sys.stderr.write('WARNING: {} tiles missing (expected {}).\n'.format(nmissing, nblank))
# tile boundaries along axes
self.i0s = range(0, self.nx, self.tnx)
self.ies = range(self.tnx, self.nx+1, self.tnx)
self.j0s = range(0, self.ny, self.tny)
self.jes = range(self.tny, self.ny+1, self.tny)
if astype is not None:
self.dtype = np.dtype(astype)
else:
self.dtype = self.tdtype
self.itemsize = self.dtype.itemsize
self.fill = self.dtype.type(fill)
self.dt = dt
if self.its is not None and dt > 0:
self.times = [ int(re.sub(r'^0*','',s))*dt for s in self.its ]
def savetiledata(self, fname):
if self.tiled:
it = [ int(f[-12:-9]) for f in self.files ]
jt = [ int(f[ -8:-5]) for f in self.files ]
else:
it = [ 0 ]
jt = [ 0 ]
np.savetxt(fname, np.array([it,jt,self.i0s,self.ies,self.j0s,self.jes]))
def unknowndimvals(unknowntmpl, sufs=['']):
globpatt = template_replace_all(unknowntmpl, '*')
for suf in sufs:
print "Trying", globpatt + suf
files = glob(globpatt + suf)
if len(files):
break
else:
raise IOError(globpatt + suf)
vals = dict((k,set()) for k in template_fields(unknowntmpl))
regexp,parts,keys = format2re(unknowntmpl + suf)
for name in files:
m = re.match(regexp,name)
if m:
g = m.groups()
for k,v in zip(keys,g):
vals[k].add(v)
else:
print name,'does not match',regexp
return dict((k,sorted(list(s))) for k,s in vals.items())
def globdimvals(tmpl, valuesdict, sufs=['.001.001.meta','.meta'], fast=1):
# # remove formats: {xx:yy} -> {xx}
# tmpl = re.sub(r'{([^:}]*)(:[^}]*)?}', r'{\1}', tmpl)
superfast = fast >= 2
fieldsinorder = template_fields(tmpl)
fields = sorted(list(set(fieldsinorder)))
# just pick actual fields
known = dict((k,v) for k,v in valuesdict.items() if k in fields)
# first known value for each field
first = dict((k,v[0]) for k,v in known.items())
unknown = set(fields) - set(known)
if unknown:
if fast:
dimvals = {}
while True:
tmpl = template_replace(tmpl, first)
if 0 == len(template_fields(tmpl)):
break
if superfast:
while True:
t = fmt.parse(tmpl).next()[0]
if re.search(r'\*.*/',t):
trunc = re.sub(r'^([^\*]*)\*([^/]*)/(.*)$',r'\1*\2/',t)
print trunc
dirs = glob(trunc)
tmpl = re.sub(r'^([^\*]*)\*([^/]*)/',dirs[0],tmpl)
else:
break
# s = ''
trunc = ''
# replace fields until hitting a '/', cut rest
for t,n,f,c in fmt.parse(tmpl):
if '/' in t and trunc != '':
pre,post = t.split('/',1)
# s = s + pre + '/'
trunc = trunc + pre + '/'
break
# s = s + t + '*'
trunc = trunc + fstring(t,n,f,c)
if trunc.endswith('/'):
dvs = unknowndimvals(trunc)
dimvals.update(dvs)
first = dict((k,v[0]) for k,v in dvs.items())
else:
dvs = unknowndimvals(trunc,sufs)
dimvals.update(dvs)
break
else:
dimvals = unknowndimvals(trunc,sufs)
else:
dimvals = {}
dimvals.update(known)
return dimvals,fields
def findvarfiles(tmpl, **kwargs):
i = 0
dimnames = []
while re.search(r'{d%d[}:]' % i, tmpl):
dimnames.append('d%d' % i)
i += 1
ndims = i
i = 0
vardimnames = []
while re.search(r'{v%d[}:]' % i, tmpl):
vardimnames.append('v%d' % i)
i += 1
nvardims = i
dimvals = dict( (k,kwargs.get(k, [])) for k in dimnames+vardimnames )
first = dict( (k,kwargs.get(k, ['*'])[0]) for k in dimnames+vardimnames )
replfirst = dict( (k,kwargs.get(k, ['{'+k+'}'])[0]) for k in dimnames+vardimnames )
base = tmpl.format(**first)
if '*' in first.values():
suf = '.001.001.meta'
metafiles = glob(base + suf)
if len(metafiles) == 0:
suf = '.meta'
metafiles = glob(base + suf)
if len(metafiles) == 0:
raise IOError(base + suf)
regexp,parts,keys = format2re(tmpl.format(**replfirst) + suf)
for metafile in metafiles:
g = re.match(regexp,metafile).groups()
for k,v in zip(keys,g):
if v not in dimvals[k]:
dimvals[k].append(v)
for k in keys:
dimvals[k].sort()
# have: dimnames, vardimnames, dimvals (both), ndims, nvardims
dimvalslist = [ dimvals[d] for d in dimnames ]
filedims = [ len(dimvals[d]) for d in dimnames ]
inds = np.indices(filedims)
def tmp(*ii):
return dict((n,dimvals[n][i]) for n,i in zip(dimnames,ii))
vtmp = np.frompyfunc(tmp, ndims, 1)
filedicts = vtmp(*inds)
return dimvals,dimvalslist,filedims,filedicts
def findfiles(tmpl, **kwargs):
i = 0
dimnames = []
while re.search(r'{d%d[}:]' % i, tmpl):
dimnames.append('d%d' % i)
i += 1
ndims = i
dimvals = dict( (k,kwargs.get(k, [])) for k in dimnames )
first = dict( (k,kwargs.get(k, ['*'])[0]) for k in dimnames )
replfirst = dict( (k,kwargs.get(k, ['{'+k+'}'])[0]) for k in dimnames )
base = tmpl.format(**first)
if '*' in first.values():
suf = '.001.001.meta'
metafiles = glob(base + suf)
if len(metafiles) == 0:
suf = '.meta'
metafiles = glob(base + suf)
if len(metafiles) == 0:
raise IOError(base + suf)
regexp,parts,keys = format2re(tmpl.format(**replfirst) + suf)
for metafile in metafiles:
g = re.match(regexp,metafile).groups()
for k,v in zip(keys,g):
if v not in dimvals[k]:
dimvals[k].append(v)
for k in keys:
dimvals[k].sort()
dimvalslist = [ dimvals[d] for d in dimnames ]
filedims = [ len(dimvals[d]) for d in dimnames ]
inds = np.indices(filedims)
def tmp(*ii):
return dict((n,dimvals[n][i]) for n,i in zip(dimnames,ii))
vtmp = np.frompyfunc(tmp, ndims, 1)
filedicts = vtmp(*inds)
return dimvals,dimvalslist,filedims,filedicts
class VariableDataMulti(VariableData):
def __init__(self, tmpl, astype=None, fill=0, dtype=None, shape=None, tshape=None, fast=1, ncs=None, mmap=True, **kwargs):
# dimvals,self.dimvals,self.filedims,self.filedicts = findfiles(tmpl,**kwargs)
if tshape is None:
sufs = ['.001.001.meta','.meta']
else:
sufs = ['.001.001.data','.data','.001.001.data.gz','.data.gz']
if type(tmpl) == type([]):
files = tmpl
tmpl = re.sub(r'\.001\.001\.data$','',tmpl[0])
else:
files = None
self.usemmap = mmap
dimvals,dimnames = globdimvals(tmpl,kwargs,sufs,fast=fast)
self.dimvals = [ dimvals[d] for d in dimnames ]
self.filedims = [ len(v) for v in self.dimvals ]
self.filedicts = np.empty(self.filedims, object)
inds = zip(*[x.flat for x in np.indices(self.filedims)])
for ifile,ind in enumerate(inds):
self.filedicts.flat[ifile] = dict((n,dimvals[n][i]) for n,i in zip(dimnames,ind))
first = dict( (k,v[0]) for k,v in dimvals.items() )
base = tmpl.format(**first)
if dtype is not None and shape is not None and tshape is None:
# global files
self.tiled = False
self.metadata = None
self.shape = shape
self.nrec = 1
self.tdtype = np.dtype(dtype)
self.tny,self.tnx = shape[-2:]
tmpl = re.sub(r'\.data$','',tmpl)
elif tshape is not None:
# cubed sphere
self.tiled = True
self.metadata = None
self.nrec = 1
self.tdtype = np.dtype(dtype)
self.tny,self.tnx = tshape[-2:]
if files is not None:
self.files = files
self.ntile = len(files)
ny = int(np.sqrt(self.tnx*self.tny*self.ntile/6))
nx = 6*ny
self.shape = tshape[:-2] + (ny,nx)
self.ntx = nx/self.tnx
self.nty = ny/self.tny
# blank tiles have index -1
self.itile = np.zeros((self.nty,self.ntx), int) - 1
for itile in range(self.ntile):
_,it = divmod(itile,self.ntx/6)
f,jt = divmod(_,self.nty)
it += self.ntx/6*f
self.itile[jt,it] = itile
else:
if glob(base + '.001.001.data'):
suf = ''
else:
suf = '.gz'
globpatt = base + '.*.*.data' + suf
tmplre,tmplparts = re_parts(tmpl + '.*.*.data' + suf)
basere,baseparts = re_parts(base + '.*.*.data' + suf)
print globpatt
datafiles = glob(globpatt)
print "Found",len(datafiles),'for'
datafiles.sort()
self.ntile = len(datafiles)
ny = int(np.sqrt(self.tnx*self.tny*self.ntile/6))
nx = 6*ny
self.shape = tshape[:-2] + (ny,nx)
self.ntx = nx/self.tnx
self.nty = ny/self.tny
self.files = []
# blank tiles have index -1
self.itile = np.zeros((self.nty,self.ntx), int) - 1
for itile,datafile in enumerate(datafiles):
m = basere.match(datafile)
g = m.groups()
datatmpl = interleave(g,tmplparts)
# in case some files are not gzipped
datatmpl = re.sub(r'\.gz$','',datatmpl)
self.files.append(datatmpl)
_,it = divmod(itile,self.ntx/6)
f,jt = divmod(_,self.nty)
it += self.ntx/6*f
self.itile[jt,it] = itile
else:
metafiles = glob(base + '.001.001.meta')
if len(metafiles) > 0:
self.tiled = True
else:
metafiles = glob(base + '.meta')
self.tiled = False
if len(metafiles) == 0:
raise IOError('File not found: {}.meta'.format(base))
# get global info from first metafile
self.metadata = Metadata.read(metafiles[0])
self.shape = tuple(self.metadata.dims)
self.nrec = self.metadata.nrecords
self.tdtype = np.dtype(self.metadata.dtype)
self.tnx = self.metadata.ends[-1] - self.metadata.starts[-1]
self.tny = self.metadata.ends[-2] - self.metadata.starts[-2]
self.metadata.makeglobal()
if self.nrec > 1:
self.shape = (self.nrec,) + self.shape
self.tshape = self.shape[:-2] + (self.tny, self.tnx)
self.shape = tuple(self.filedims) + self.shape
self.ndim = len(self.shape)
self.ndimtile = len(self.tshape)
self.ndimfiles = self.ndim - self.ndimtile
# dependend stuff
self.nx = self.shape[-1]
self.ny = self.shape[-2]
self.ntx = self.nx // self.tnx
self.nty = self.ny // self.tny
if self.tiled:
if tshape is None:
# read other metafiles to get tile locations
globpatt = base + '.*.*.meta'
tmplre,tmplparts = re_parts(tmpl + '.*.*.meta')
basere,baseparts = re_parts(base + '.*.*.meta')
metafiles = glob(globpatt)
metafiles.sort()
self.ntile = len(metafiles)
self.files = []
# blank tiles have index -1
self.itile = np.zeros((self.nty,self.ntx), int) - 1
for itile,metafile in enumerate(metafiles):
m = basere.match(metafile)
g = m.groups()
datatmpl = interleave(g,tmplparts)[:-5] + '.data'
self.files.append(datatmpl)
meta = Metadata.read(metafile)
it = meta.starts[-1] // self.tnx
jt = meta.starts[-2] // self.tny
self.itile[jt,it] = itile
else:
self.ntile = 1
self.files = [ tmpl + '.data' ]
self.itile = np.array([[0]],int)
# tile boundaries along axes
self.i0s = range(0, self.nx, self.tnx)
self.ies = range(self.tnx, self.nx+1, self.tnx)
self.j0s = range(0, self.ny, self.tny)
self.jes = range(self.tny, self.ny+1, self.tny)
if astype is not None:
self.dtype = np.dtype(astype)
else:
self.dtype = self.tdtype
self.itemsize = self.dtype.itemsize
self.fill = self.dtype.type(fill)
def calc(s,tnx,i0s,ies):
""" compute tiles and slices needed for assignment
res[tgtslice] = tile[srcslice]
to emulate
res = global[s]
for tiled files
"""
# round down
tile0 = s.start // tnx
# round up
tilee = -(-s.stop // tnx)
tiles = []
srcslices = []
tgtslices = []
for tile in range(tile0,tilee):
ii0 = max(0, -((s.start - i0s[tile]) // s.step))
iie = -((s.start - min(s.stop,ies[tile])) // s.step)
if iie > ii0:
tiles.append(tile)
myi0 = s.start + ii0*s.step - i0s[tile]
myie = s.start + iie*s.step - i0s[tile]
srcslices.append(slice(myi0,myie,s.step))
tgtslices.append(slice(ii0,iie))
return tiles, srcslices, tgtslices
class Variable(object):
""" v = Variable.mds(tmpl, fill=0)
tiled memory-mapped variable object
v[...] slices without reading, but reads upon conversion to array
v() reads
tmpl may contain '*' as in 'res_*/ETAN/day.0000000000'
"""
def __init__(self, data, slice, template=''):
if not isinstance(data, VariableData):
raise ValueError
self.data = data
self.slice = slice
self.template = template
@classmethod
def mds(cls, tmpl, its=None, astype=None, fill=0, dt=0, debug=False, nblank=0):
data = VariableData(tmpl, its=its, fill=fill, astype=astype, dt=dt, debug=debug, nblank=nblank)
shape = data.shape
mslice = MSlice(shape)
return cls(data,mslice)
@classmethod
def data(cls, tmpl, dtype, shape, its=None, astype=None, fill=0, dt=0):
data = VariableData(tmpl, its=its, fill=fill, astype=astype, dtype=dtype, shape=shape, dt=dt)
shape = data.shape
mslice = MSlice(shape)
return cls(data,mslice)
@classmethod
def cs(cls, tmpl, dtype, tshape, its=None, astype=None, fill=0, dt=0):
data = VariableData(tmpl, its=its, fill=fill, astype=astype, dtype=dtype, tshape=tshape, dt=dt)
shape = data.shape
mslice = MSlice(shape)
return cls(data,mslice)
@classmethod
def mcs(cls, tmpl, dtype, tshape, astype=None, fill=0, fast=1, ncs=None, **kwargs):
data = VariableDataMulti(tmpl, astype=astype, fill=fill, dtype=dtype, tshape=tshape, fast=fast, ncs=ncs, **kwargs)
shape = data.shape
mslice = MSlice(shape)
return cls(data,mslice,tmpl)
@classmethod
def mmds(cls, tmpl, astype=None, fill=0, fast=1, **kwargs):
""" tmpl = '.../*/{d0}/{d1}' without .001.001.data
astype :: convertible to dtype
fill :: not used
kwargs :: values for d0, ... (strings)
"""
data = VariableDataMulti(tmpl, astype=astype, fill=fill, fast=fast, **kwargs)
shape = data.shape
mslice = MSlice(shape)
return cls(data,mslice)
@classmethod
def mdata(cls, tmpl, dtype, shape, astype=None, fill=0, fast=1, **kwargs):
data = VariableDataMulti(tmpl, astype=astype, fill=fill, dtype=dtype, shape=shape, fast=fast, **kwargs)
shape = data.shape
mslice = MSlice(shape)
return cls(data,mslice,tmpl)
def __getitem__(self,indices):
# slice
if not type(indices) == type(()):
indices = (indices,)
if hasattr(self.data,'dimvals'):
inds = ()
myindices = indices
if Ellipsis in indices:
i = indices.index(Ellipsis)
myindices = indices[:i] + (len(self.slice.shape)-len(indices)+1)*np.s_[:,] + indices[i+1:]
for ii,i in enumerate(myindices):
iact = self.slice.active[ii]
s = self.slice.s[iact]
# if iact < len(self.data.dimvals) and i in self.data.dimvals[iact]:
if type(i) == type(''):
inds = inds + (self.data.dimvals[iact][s].index(i),)
else:
inds = inds + (i,)
indices = inds
slice = self.slice[indices]
var = Variable(self.data, slice)
if len(slice.active) == 0:
return var()
else:
return var
def __call__(self,*indices):
if len(indices):
if hasattr(self.data,'dimvals'):
inds = ()
for ii,i in enumerate(indices):
iact = self.slice.active[ii]
s = self.slice.s[iact]
# if iact < len(self.data.dimvals) and i in self.data.dimvals[iact]:
if iact < len(self.data.dimvals):
inds = inds + (self.data.dimvals[iact][s].index(i),)
else:
inds = inds + (i,)
indices = inds
return self[indices]
# read data from files according to current slice
data = self.data
mslice = self.slice
shape = mslice.shape
augshape = mslice.augshape
shfiles = augshape[:data.ndimfiles]
shtile = augshape[data.ndimfiles:]
augshape = (np.prod(shfiles),) + shtile
indfiles = mslice.s[:data.ndimfiles]
indperp = mslice.s[data.ndimfiles:-2]
indy,indx = mslice.s[-2:]
filedicts = data.filedicts[indfiles].flat
# print indfiles, filedicts[0]
if mslice.lens[-1] == 1:
# quicker way for single index
it,ti0 = divmod(indx.start, data.tnx)
itxs = [it]
islices = [ti0]
iislices = [0]
else:
itxs,islices,iislices = calc(indx, data.tnx, data.i0s, data.ies)
if mslice.lens[-2] == 1:
# quicker way for single index
jt,tj0 = divmod(indy.start, data.tny)
jtys = [jt]
jslices = [tj0]
jjslices = [0]
else:
jtys,jslices,jjslices = calc(indy, data.tny, data.j0s, data.jes)
if len(shape) == 0:
# quicker for single element
itile = data.itile[jt,it]
if itile >= 0:
dict = filedicts[0]
file = data.files[itile].format(**dict)
if debug: print file
if not file.endswith('.gz') and os.path.exists(file):
mm = np.memmap(file, data.tdtype, 'r', shape=data.tshape)
else:
# can handle .gz
mm = oj.num.myfromfile(file, data.tdtype, data.tshape)
ind = tuple(s.start for s in indperp) + (tj0,ti0)
return data.dtype.type(mm[ind])
else:
return data.fill
else:
# print shape,augshape
res = np.empty(augshape, data.dtype)
res[:] = data.fill
for iifile,dict in enumerate(filedicts):
for jty,jslice,jjslice in zip(jtys,jslices,jjslices):
for itx,islice,iislice in zip(itxs,islices,iislices):
itile = data.itile[jty,itx]
if itile >= 0:
file = data.files[itile].format(**dict)
if debug: print file
if data.usemmap and not file.endswith('.gz') and os.path.exists(file):
mm = np.memmap(file, data.tdtype, 'r', shape=data.tshape)
else:
# can handle .gz
mm = oj.num.myfromfile(file, data.tdtype, data.tshape)
ind = indperp + (jslice,islice)
tmp = mm[ind]
# print res[(iifile,Ellipsis,jjslice,iislice)].shape, tmp.shape
res[iifile,...,jjslice,iislice] = tmp
# print res.shape, shape
return res.reshape(shape)
def __array__(self,*args):
return self().__array__(*args)
def __str__(self):
return 'Variable' + str(self.data.shape) + '[' + str(self.slice) + '] ' + self.template
def __repr__(self):
return 'Variable' + str(self.slice.shape)
def __getattr__(self,attr):
return getattr(self.data,attr)
def __dir__(self):
res = dir(self.__class__) + self.__dict__.keys() + dir(self.data)
res = list(set(res))
res.sort()
return res
def writemeta(self,filename,**override):
# read data from files according to current slice
data = self.data
mslice = self.slice
shape = mslice.shape
m = copy.deepcopy(self.metadata)
indfiles = mslice.s[:data.ndimfiles]
ndim = self.data.ndim
ndimtile = len(m['dimList'])
active = [ i for i in mslice.active if i >= ndim-ndimtile ]
tileshape = shape[-len(active):]
# tileshape = mslice.augshape[-ndimtile:]
if self.nrec != 1:
nrec = mslice.augshape[-ndimtile-1]
if nrec != data.nrec:
if 'nFlds' in m:
del m['nFlds']
if 'fldList' in m and len(m['fldList']) > 1:
del m['fldList']
m['nrecords'] = '%5d'%(mslice.augshape[-ndimtile-1])
m['dimList'] = [ ['%5d'%i for i in [n,1,n]] for n in reversed(tileshape) ]
m['nDims'] = '%3d'%len(tileshape)
filedicts = data.filedicts[indfiles].flat
if len(filedicts) != 1:
print 'filedicts:', list(filedicts)
raise AssertionError('len(filedicts) != 1')
if self.data.hasits:
iit = indfiles[-1]
try:
m['timeStepNumber'] = '%10d'%data.timeStepNumber[iit][0]
except AttributeError:
pass
try:
m['timeInterval'] = ' '.join('%19.12E'%f for f in data.timeInterval[iit][0])
except AttributeError:
pass
for k,v in override.items():
m[k] = v
m.write(filename)
@property
def shape(self):
return self.slice.shape
@property
def size(self):
return np.prod(self.shape)
@property
def itemsize(self):
return self.data.itemsize
@property
def nbytes(self):
return self.itemsize*self.size
@property
def metadata(self):
return self.data.metadata
# def dimvals(self,d):
# d = self.slice.active[d]
# return self.data.dimvals[d][self.slice.s[d]]
@property
def dimvals(self):
return [ self.data.dimvals[d][self.slice.s[d]] for d in self.slice.active if d < len(self.data.dimvals) ]
@property
def variables(self):
return [np.array(_) for _ in self.dimvals]
@property
def fields(self):
return self.dimvals[0]
def dimnums(self,d):
l = self.dimvals[d]
return [ int(v) for v in l ]
# try:
# return [ int(re.sub(r'^0*','',v)) for v in l ]
# except (TypeError,ValueError):
# return int(re.sub(r'^0*','',l))
def todict(self):
return dict((k, self[k]) for k in self.dimvals[0])
# for multi-variable
def globvardimvals(tmpl, valuesdict,sufs=['.001.001.meta', '.meta']):
"""
given a glob template like "base/{v0}/res_*/{v1}/{d0}_day.{d1}"
go looking for files that match this template and find values for v0,
... and d0, ... and return a dictionary of dictionaries
(v0,v1) -> {'d0':['a','b','c'], 'd1':[0,1,2], ...}
values provided in valuesdict are substituted before searching for files.
For given v0,v1,... all combinations of d0,d1,... are expected to be
present. v0,v1,... may take only selected combinations.
"""
# remove formats: {xx:yy} -> {xx}
tmpl = re.sub(r'{([^:}]*)(:[^}]*)?}', r'{\1}', tmpl)
fields = list(set(re.findall(r'{([^}]*)}', tmpl)))
vardims = [k for k in fields if k.startswith('v')]
vardims.sort()
knownvars = dict((k,v) for k,v in valuesdict.items() if k in vardims)
knownvardims = [ k for k in vardims if k in knownvars ]
knownvarvals = [ knownvars[k] for k in knownvardims ]
knownvarlens = [ len(v) for v in knownvarvals ]
unknownvardims = [ k for k in vardims if not k in knownvars ]
fixdims = [k for k in fields if not k.startswith('v')]
fixdims.sort()
# just pick actual fields
known = dict((k,v) for k,v in valuesdict.items() if k in fields)
knowndims = dict((k,v) for k,v in known.items() if k not in vardims)
# first known value for each field
firstdims = dict((k,v[0]) for k,v in knowndims.items())
if 'vars' in valuesdict:
# list of variable value tuples
# must be all variables; will ignore other v0=... settings
varvals = valuesdict['vars']
else:
knownvarindices = np.indices(knownvarlens)
varvals = []
for vi in zip(*[x.flat for x in knownvarindices]):
varval = tuple(v[i] for v,i in zip(knownvarvals,vi))
varvals.append(varval)
dimvals = {}
unknown = set(fields) - set(known)
if unknown:
replaceknown = dict((k,'{'+k+'}') for k in fields)
for k,v in firstdims.items():
replaceknown[k] = v
for knownvarval in varvals:
vars = dict(zip(knownvardims, knownvarval))
replaceknown.update(vars)
unknowntmpl = tmpl.format(**replaceknown)
globpatt = re.sub(r'{[^}]*}', '*', unknowntmpl)
for suf in sufs:
metafiles = glob(globpatt + suf)
if len(metafiles):
break
else:
raise IOError(globpatt + suf)
unknowndims = [k for k in unknown if not k.startswith('v')]
regexp,parts,keys = format2re(unknowntmpl + suf)
vals = {}
for metafile in metafiles:
g = re.match(regexp,metafile).groups()
d = dict(zip(keys,g))
varval = tuple(d[k] for k in unknownvardims)
if varval not in vals:
vals[varval] = dict((k,set()) for k in unknowndims)
for k,v in zip(keys,g):
if not k.startswith('v'):
vals[varval][k].add(v)
for unknownvarvals,vs in vals.items():
unknownvars = dict(zip(unknownvardims,unknownvarvals))
vars.update(unknownvars)
varval = tuple(vars[k] for k in vardims)
dimvals[varval] = dict((k,sorted(list(s))) for k,s in vs.items())
dimvals[varval].update(knowndims)
else:
dimvals = dict.fromkeys(varvals, knowndims)
# res: (v0,v1) -> {'d0':['a','b','c'], 'd1':[0,1,2], ...}
return vardims,fixdims,dimvals
class MultiVariable(object):
def __init__(self,vars=[],vdim=()):
self.vars = vars
self.vdim = vdim
@classmethod
def mmds(cls,tmpl,**kwargs):
varnames,dimnames,dimvals = globvardimvals(tmpl,kwargs)
vars = {}
for vs,ds in dimvals.items():
vardict = dict((k,v) for k,v in zip(varnames,vs))
vartmpl = template_replace(tmpl, vardict)
vars[vs] = Variable.mmds(vartmpl,**kwargs)
return cls(vars,len(vs))
@classmethod
def mcs(cls,tmpl,dtype,tshape,**kwargs):
try:
kwargs['vars'] = tshape.keys()
except AttributeError:
tshapes = None
else:
tshapes = tshape
print tmpl
print kwargs
varnames,dimnames,dimvals = globvardimvals(tmpl,kwargs,['.001.001.data','.data','.001.001.data.gz','.data.gz'])
print dimvals
vars = {}
for vs,ds in dimvals.items():
vardict = dict((k,v) for k,v in zip(varnames,vs))
vartmpl = template_replace(tmpl, vardict)
if tshapes:
tshape = tshapes[vs]
vars[vs] = Variable.mcs(vartmpl,dtype,tshape,**kwargs)
return cls(vars,len(vs))
def __getitem__(self,i):
if type(i) != type(()):
i = (i,)
n = len(i)
if n < self.vdim:
vars = dict((k[n:],v) for k,v in self.vars.items() if k[:n] == i)
vdim = self.vdim - n
return MultiVariable(vars,vdim)
if len(i) > self.vdim:
return self.vars[i[:self.vdim]][i[self.vdim:]]
else:
return self.vars[i]
|
998,756 | 4127e206e9a8e8c8465ed97a3d3140cb4417d11a | file_name=input('Enter the file name:')
if len(file_name)<1:
handle=open('mbox-short.txt')
else:
handle=open(file_name)
horas=dict()
for line in handle:
line=line.rstrip()
if line.startswith('From'):
words=line.split()
if len (words)>3:
#print(words[5])
horas[words[5][:2]]=horas.get(words[5][:2],0)+1
#print(sorted(horas.items()))
lista=sorted(horas.items())
for a,b in lista:
print(a,b)
|
998,757 | 8e024d3e20aed1dde0c82ba5c0003a8b3c98f337 | #https://programmers.co.kr/learn/courses/30/lessons/42842
def solution(brown, yellow):
xy = yellow+brown
array = []
for i in range(1,(brown+1)//2):
if xy%i==0:
a = max(xy//i,i)
b = min(xy//i,i)
array.append((a,b))
answer = []
for a,b in array:
if a+b == ((brown+4)//2):
answer = [a,b]
return answer
|
998,758 | cff25f62e89799f6971b667bc71f371eaf7f0d4e | # -*- coding: utf-8 -*-
"""
Created on Thu Nov 8 07:02:29 2018
@author: Abhishek
"""
import pandas as pd
from mlxtend.plotting import plot_learning_curves
import matplotlib.pyplot as plt
from sklearn.model_selection import learning_curve
import numpy as np
dataset = pd.read_table( r'C:\Users\Abhishek\Desktop\Machine Learning\Project\Jathar_project_final.txt')
feature_names = ['Elec_Facility',
'Elec_Fans',
'Elec_Cooling',
'Elec_Heating',
'Elec_InteriorLights',
'Elec_InteriorEquipment',
'Gas_Facility',
'Gas_Heating',
'Gas_InteriorEquipment',
'Gas_Water Heater']
X = dataset[feature_names].astype(float)
y = dataset['Site']
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
#Logistic Regression
from sklearn.linear_model import LogisticRegression
logreg = LogisticRegression()
logreg.fit(X_train, y_train)
print('Accuracy of Logistic regression classifier on training set: {:.2f}'
.format(logreg.score(X_train, y_train)))
print('Accuracy of Logistic regression classifier on test set: {:.2f}'
.format(logreg.score(X_test, y_test)))
plot_learning_curves(X_train, y_train, X_test, y_test, logreg)
plt.show()
# Naive Bayes
from sklearn.naive_bayes import GaussianNB
gnb = GaussianNB()
gnb.fit(X_train, y_train)
print('Accuracy of GNB classifier on training set: {:.2f}'
.format(gnb.score(X_train, y_train)))
print('Accuracy of GNB classifier on test set: {:.2f}'
.format(gnb.score(X_test, y_test)))
plot_learning_curves(X_train, y_train, X_test, y_test, gnb)
plt.show()
# SVM
from sklearn.svm import SVC
svm = SVC()
svm.fit(X_train, y_train)
print('Accuracy of SVM classifier on training set: {:.2f}'
.format(svm.score(X_train, y_train)))
print('Accuracy of SVM classifier on test set: {:.2f}'
.format(svm.score(X_test, y_test)))
plot_learning_curves(X_train, y_train, X_test, y_test, svm)
plt.show()
# Neural Network
from sklearn.neural_network import MLPClassifier
clf = MLPClassifier(solver='lbfgs', alpha=1e-5,
hidden_layer_sizes=(12, 25), random_state=1)
#clf = MLPClassifier(solver='lbfgs', alpha=1e-5,
# hidden_layer_sizes=(15,), random_state=1)
clf.fit(X_train, y_train)
print('Accuracy of Neural Network classifier on training set: {:.2f}'
.format(clf.score(X_train, y_train)))
print('Accuracy of Neural Network classifier on test set: {:.2f}'
.format(clf.score(X_test, y_test)))
plot_learning_curves(X_train, y_train, X_test, y_test, clf)
plt.show()
|
998,759 | b856320b332f37f2ea38accd0578b6a324536b2c | # -*- coding: utf-8 -*-
import scrapy
from DoubanMovieComment.items import DoubanmoviecommentItem
import re
class DoubanmovieSpider(scrapy.Spider):
name = 'doubanMovie' #哪吒-魔童降世短评爬虫
allowed_domains = ['douban.com'] #站点
start_urls = ['https://movie.douban.com/subject/26794435/comments?start=0&limit=20&sort=new_score'] #起始爬取页面
def start_requests(self): #重构start_request 模拟登录后的状态
cookies_str='bid=7SyqInA0X2A; ll="118282"; __yadk_uid=iz24NDKxQy3uVEHYxFzK78pyLCqdwoZv; _vwo_uuid_v2=DB0CDD326BE76C1F029755619E5599D90|d201258ba073dcab2503b1a47b905be5; _ga=GA1.2.643835604.1560739348; viewed="26285268"; gr_user_id=3a9c1693-de26-4318-b6ec-3336c7a3d734; __utmz=30149280.1565711095.6.5.utmcsr=baidu|utmccn=(organic)|utmcmd=organic; __utmz=223695111.1565711095.3.3.utmcsr=baidu|utmccn=(organic)|utmcmd=organic; ct=y; _pk_ref.100001.4cf6=%5B%22%22%2C%22%22%2C1565761654%2C%22https%3A%2F%2Fwww.baidu.com%2Flink%3Furl%3DUpNwWmAUnqvaX-whr4RHcZ1XGYxOr5mWdSNZgT1TGjR70tQsGSSxKYUbaxrHbiM2%26wd%3D%26eqid%3Dce0d6bae000172c6000000065d52daf0%22%5D; _pk_ses.100001.4cf6=*; __utma=30149280.643835604.1560739348.1565754243.1565761654.10; __utma=223695111.823285827.1560739348.1565754244.1565761680.7; __utmb=223695111.0.10.1565761680; ap_v=0,6.0; dbcl2="201918819:6w5YXCY3+xM"; push_noty_num=0; push_doumail_num=0; __utmv=30149280.20191; __utmb=30149280.11.9.1565762040232; _pk_id.100001.4cf6=e83bc1fa94a43629.1560739348.7.1565762158.1565754243.'
cookies_dict={i.split("=")[0]:i.split("=")[1] for i in cookies_str.split(";")}
yield scrapy.Request(
self.start_urls[0],
callback=self.parse,
cookies=cookies_dict
)
def parse(self, response):
"""1.对响应结果分组"""
print("开始爬取")
comment_list=response.xpath("//div[@id='comments']/div[@class='comment-item']") #获取评论集合
item=DoubanmoviecommentItem()
for comment in comment_list:
# 评论人
item["user"]=comment.xpath("./div[@class='comment']//span[@class='comment-info']/a/text()").extract_first()
# 评论时间
item["date"]=comment.xpath("./div[@class='comment']/h3/span[@class='comment-info']/span[@class='comment-time ']/text()").extract_first()
# 评价等级
item["level"]=comment.xpath("./div[@class='comment']/h3/span[@class='comment-info']/span[contains(@class,'rating')]/@title").extract_first()
# item["level"]=re.findall("title=(.*?)>",comment.)[0]
# 赞同数量
item["votes"]=comment.xpath("./div[@class='comment']/h3/span[@class='comment-vote']/span[@class='votes']/text()").extract_first()
# 评论内容
item["content"]=comment.xpath("./div[@class='comment']/p/span[@class='short']/text()").extract_first()
# 传递数据给Pipline去处理数据
print(item)
yield item
# 下一页的url
next_url=response.xpath("//div[@id='paginator']/a[@class='next']/@href").extract_first()
if next_url is not None:
next_url="https://movie.douban.com/subject/26794435/comments"+next_url
print("下一页的 url============> %s"%next_url)
yield scrapy.Request(
next_url,
callback=self.parse
)
pass
|
998,760 | 02ca0bd1dab278230efee1d402618c35b1780996 | # import random
# from random import randint
#
# print("Відгадайте число від 0 - 100 використовуючи підказки")
# unknown = int(input("Ведіть число для відгадування: "))
# a = random.randint(0, 100)
#
# count = 0
#
#
# while a != unknown:
#
# count += 1
# if unknown > a:
# print("Введене число більше за загадане")
#
#
# elif unknown < a:
# print("Введене число менше за загадане")
# unknown = int(input("Ведіть число для відгадування: "))
#
# else:
# print(("Вітаємо ви відгадали число!" "\n" f" Спроб використано: {count}"))
import re
print("Пароль повинен містити літери латинського алфавіту"
"нижнього та верхнього регістрів,як мінімум одну цифру"
"та спец. символи як $#@")
password = input("Enter a password: ")
chek1 = re.findall("[a-z]", password)
chek2 = re.findall("[A-Z]", password)
chek3 = re.findall("\d", password)
chek4 = re.findall(r"$|#|@", password)
if 6 <= len(password) <= 16 and chek1 and chek2 and chek3 and chek4:
print("Ваш пароль прийнято")
else:
print("Введено якесь невірне значення")
|
998,761 | 8923be1ce04816eb0df949624b0a15e9f4dbfa3f | import pygame, math
from pygame.locals import *
# Here everything related to the game aspect happens
clock = pygame.time.Clock()
timer = pygame.time.get_ticks
timeout = 2000 # milliseconds
deadline = timer() + timeout
landed = False
boom = False
out_map = False
launched = False
def logic(ground, rocket_pos, lz_offset, space, angle, ceiling, lz_size, isle_number):
global timeout, deadline, landed, out_map, launched, time, above
now = timer()
# launch detection
if rocket_pos[1] >= 100:
launched = True
if launched and not landed:
time += 1/60
elif not launched:
time = 0
# check if above island
if lz_offset <= rocket_pos[0] <= lz_offset + lz_size[0] * isle_number:
above = True
else:
above = False
#Landing detection (must stay landed for 2 seconds)
if math.radians(10) >= angle >= math.radians(-10):
if rocket_pos[1] <= 100 and lz_offset <= rocket_pos[0] <= lz_offset + lz_size[0] * isle_number:
if now >= deadline:
landed = True
else:
deadline = now + timeout
landed = False
else:
deadline = now + timeout
landed = False
if deadline - now >= 0:
landed_timer = (deadline - now)/1000
else:
landed_timer = 3
#out of map detection
if now >= 1000 and rocket_pos[0] <= 0 or rocket_pos[0] >= ground.w or rocket_pos[1] >= ceiling:
out_map = True
else:
out_map = False
#collision detection
def coll_begin(arbiter, space, data):
global boom
if arbiter.shapes[0].id == 2 and arbiter.shapes[1].id == 0:
if ground.body._get_velocity()[1]/100 >= 20 or not math.radians(25) >= angle >= math.radians(-25):
boom = True
elif not (lz_offset <= rocket_pos[0] <= lz_offset + lz_size[0] * isle_number) and (ground.w - 12000) >= rocket_pos[0]:
boom = True
return True
handler = space.add_default_collision_handler()
handler.begin = coll_begin
return landed, boom, out_map, launched, time, above
def restart(rocket_start_pos, ground, rocket, joint1, joint2, rocket_fuel_mass_init, rocket_fuel_mass, landed, gear, last_time, best_time):
global launched, boom
# what happens when game is restarted
keys = pygame.key.get_pressed()
if keys[K_r]:
gear = False
launched = False
boom = False
ground.body.position = (-rocket_start_pos, -50)
ground.body.velocity = (0, 0)
rocket.body.angle = 0
rocket.body.angular_velocity = 0
joint1.rotary._set_rest_angle(-math.pi/4 * 3)
joint2.rotary._set_rest_angle(0)
with open('scores.dat', 'r') as file:
lines = file.readlines()
last_time = lines[0]
best_time = lines[1]
return rocket_fuel_mass_init, False, gear, launched, last_time, best_time
return rocket_fuel_mass, landed, gear, launched, last_time, best_time
def score():
# Score (time) management
if landed:
with open('scores.dat', 'r') as file:
lines = file.readlines()
lines[0] = str(time) +'\n'
if float(lines[0]) < float(lines[1]):
lines[1] = str(time)
with open('scores.dat', 'w') as file:
file.writelines(lines)
|
998,762 | 9830cd2a2359b497cdacf0b88354958d5a1bbc58 | import os
from utils import max_rewards
PATH = 'out/MineRLObtainDiamondVectorObf-v0/100000'
def main():
files = os.listdir(PATH)
files.sort()
for file in files:
d = {}
with open(os.path.join(PATH, file)) as txt:
txt.readline()
for line in txt.readlines():
action = eval(line[line.index(',') + 1:-1])
for act in action:
d[act] = 1
print(f'{max_rewards(d)}\t{file}')
if __name__ == "__main__":
main()
|
998,763 | 13e9cd5cacbb08434059f7b0db2954bfcf518ea1 | #!/usr/bin/env python
"""
mapper that processes lines passed to it in stdin to produce a k-v pair for each word in the form of (upper,1) if the word is upper case and (lower,1) otherwise.
INPUT:
Lines of text passed to stdin, where individual words are separated by spaces.
Example:
The quick Brown fox jumped over the lazy dog
OUTPUT:
The case of each word encountered in the text, followed by a tab and int(1), printed to stdout.
Example:
upper 1
lower 1
upper 1
etc..
"""
import re
import sys
# read from standard input
for line in sys.stdin:
line = line.strip()
############ YOUR CODE HERE #########
# tokenize
words = re.findall(r'[a-zA-Z]+', line)
# emit case of words and count of 1
for word in words:
if word[0]==word[0].lower():
print '%s\t%s' % ("lower", 1)
else:
print '%s\t%s' % ("upper", 1)
############ (END) YOUR CODE ######### |
998,764 | 94b0105e535a8089f1d16663cef0bc822c33b1ef | # Generated by Django 2.0.6 on 2019-05-30 22:43
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('passionApp', '0004_auto_20190530_2218'),
]
operations = [
migrations.AlterField(
model_name='upcomingeventsentrymodel',
name='eventFlyer',
field=models.FileField(upload_to='images'),
),
]
|
998,765 | a56ffcd5216211f194f6bc2efce8101347ad704f | import logging
"""
DEBUG
INFO
WARNING
ERROR
CRITICAL
"""
def log_info(message, filename):
""" This method used to save log message """
logging.basicConfig(
filename=filename,
filemode='a',
format='%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s',
datefmt='%H:%M:%S',
level=logging.INFO
)
logging.info(message)
pass
|
998,766 | 7789f77741d3db1b8d533c7aeb172da3c78abba6 | import streamlit as st
import cv2
import numpy as np
from PIL import Image
import imutils
st.title('Application')
st.write("Number Plate Recognition App")
file = st.file_uploader("Please upload an image file", type=["jpg", "png"])
def number_Plate_Recognition(image):
# Convert image into numpy array
img_array = np.array(image)
# Resize the image(width = 800)
img = imutils.resize(img_array, width=800)
# RGB to Gray scale conversion
gray_scale = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Noise removal with iterative bilateral filter
filtered = cv2.bilateralFilter(gray_scale, 12, 20, 20)
# Find Edges of the gray scale image
edged = cv2.Canny(filtered, 60, 180)
# Find contours based on Edges
(contours, _) = cv2.findContours(edged.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
#sort contours based on their area keeping minimum required area as '30' (anything smaller than this will not be considered)
contours=sorted(contours, key = cv2.contourArea, reverse = True)[:30]
NumberPlateCnt = None #Empty Number plate contour
# loop over our contours to find the best possible approximate contour of number plate
count = 0
for c in contours:
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.018 * peri, True)
if len(approx) == 4: # Select the contour with 4 corners
NumberPlateCnt = approx
x,y,w,h = cv2.boundingRect(c) #finds co-ordinates of the plate
new_img=img[y:y+h,x:x+w]
break
# Drawing the selected contour on the original image
cv2.drawContours(img, [NumberPlateCnt], -1, (0,255,0), 2)
#Display number plate detected Image
st.image(img, caption = 'Final Image With Number Plate Detected' )
#Display number plate cropped Image
st.image(new_img,caption='cropped')
if file is None:
st.text("Please upload an image file")
else:
#read the image file from file_uploader
image = Image.open(file)
#Display uploaded Image
st.image(image,caption='Original Image')
number_Plate_Recognition(image)
|
998,767 | 6481b90515bab83349e16dddc3bf93802545d390 | # Ler um número pelo teclado e informar o fatorial do número lido.
def fatorial(n):
num = n
fat = 1
while n > 0:
fat *= n
n -= 1
return num, fat
print("O fatorial de %d é %d." % fatorial(5))
|
998,768 | 920c1f573fff85c5d5d55d5b0ea5c9f121a2f956 | # глобална променлива
port = 3306
def show():
global port
port = 22
print(f'show port = {port}')
if __name__ == '__main__':
print(f'before port = {port}')
show()
print(f'after port = {port}')
print('---')
|
998,769 | 0c58fd36cfae88ed6c2371ce6e9b4d8558abd67f | import os
import json
import time
import copy
import numpy as np
from baseGenerator import BaseGenerator
from ..helper.terminalHelper import find_terminal_view
from ..common.commonUtil import CommonUtil
from ..common.imageUtil import generate_crop_data
from ..common.imageUtil import crop_images
from ..common.imageUtil import convert_to_dct
from ..common.imageUtil import compare_with_sample_image_multi_process
from ..common.imageUtil import CropRegion
from ..common.visualmetricsWrapper import find_image_viewport
from ..common.logConfig import get_logger
logger = get_logger(__name__)
class InputLatencyAnimationDctGenerator(BaseGenerator):
BROWSER_VISUAL_EVENT_POINTS = {
'backward_search': [{'event': 'end', 'search_target': CropRegion.VIEWPORT, 'fraction': CropRegion.SKIP_STATUS_BAR_FRACTION, 'shift_result': True},
{'event': 'start', 'search_target': CropRegion.TERMINAL, 'fraction': CropRegion.FULL_REGION_FRACTION, 'shift_result': True}]
}
def generate_sample_result(self, input_generator_name, input_sample_dict, input_sample_index):
current_sample_data = copy.deepcopy(input_sample_dict)
input_sample_data = current_sample_data[input_sample_index]
sample_dct_obj = convert_to_dct(input_sample_data['fp'])
return_result = self.generate_sample_result_template(input_generator_name=input_generator_name, sample_dct_obj=sample_dct_obj)
# crop sample data area
# generate viewport crop area
if CropRegion.VIEWPORT in input_sample_data:
# if already generated the data, reuse it.
return_result[input_generator_name]['crop_data'][CropRegion.VIEWPORT] = input_sample_data[CropRegion.VIEWPORT]
else:
viewport_value = find_image_viewport(input_sample_data['fp'])
return_result[input_generator_name]['crop_data'][CropRegion.VIEWPORT] = viewport_value
return_result[CropRegion.VIEWPORT] = viewport_value
# generate terminal crop area
if CropRegion.TERMINAL in input_sample_data:
# if already generated the data, reuse it.
return_result[input_generator_name]['crop_data'][CropRegion.TERMINAL] = input_sample_data[CropRegion.TERMINAL]
else:
# TODO: we should replace the VIEWPORT location by BROWSER location in the future.
# (Currently Mike implement the Win and Mac, no Linux)
terminal_value = find_terminal_view(
input_sample_data['fp'],
return_result[input_generator_name]['crop_data'][CropRegion.VIEWPORT])
return_result[input_generator_name]['crop_data'][CropRegion.TERMINAL] = terminal_value
return_result[CropRegion.TERMINAL] = terminal_value
# generate crop data
if input_generator_name not in input_sample_dict[1]:
crop_data_dict = generate_crop_data([input_sample_data], return_result[input_generator_name]['crop_data'])
else:
crop_data_dict = generate_crop_data([input_sample_data], input_sample_dict[1][input_generator_name]['crop_data'])
# crop images
crop_images(crop_data_dict)
return_result[input_generator_name]['event_tags'] = {}
if input_sample_index == 1:
# tag event to sample
for event_obj in self.visual_event_points['backward_search']:
if event_obj['search_target'] == CropRegion.ORIGINAL:
return_result[input_generator_name]['event_tags'][event_obj['event']] = sample_dct_obj
else:
search_target_fp = crop_data_dict[event_obj['search_target']]['fp_list'][0]['output_fp']
return_result[input_generator_name]['event_tags'][event_obj['event']] = convert_to_dct(search_target_fp, event_obj['fraction'])
return return_result
def crop_images_based_on_samplefiles(self, input_data):
# compare source image list
input_image_list = copy.deepcopy(input_data['converter_result'])
# generate image list
image_list = []
for image_fn in input_data['converter_result']:
image_list.append(input_data['converter_result'][image_fn])
# generate crop data for all images
crop_data_dict = generate_crop_data(image_list,
input_data['sample_result'][1][self.__class__.__name__]['crop_data'])
# crop images
start_time = time.time()
crop_images(crop_data_dict)
last_end = time.time()
elapsed_time = last_end - start_time
logger.debug("Crop Image Time Elapsed: [%s]" % elapsed_time)
# merge crop data and convert data
for crop_target_name in crop_data_dict:
for crop_img_obj in crop_data_dict[crop_target_name]['fp_list']:
image_fn = os.path.basename(crop_img_obj['input_fp'])
input_image_list[image_fn][crop_target_name] = crop_img_obj['output_fp']
# merge crop data and convert data
for image_fn in input_image_list:
input_image_list[image_fn][CropRegion.ORIGINAL] = input_image_list[image_fn]['fp']
return input_image_list
def generate_result(self, input_data):
"""
@param input_data:
@return:
"""
self.compare_result = {}
input_image_list = self.crop_images_based_on_samplefiles(input_data)
self.compare_result['merged_crop_image_list'] = input_image_list
compare_setting = {
'default_fps': self.index_config['video-recording-fps'],
'event_points': self.visual_event_points,
'generator_name': self.__class__.__name__,
'exec_timestamp_list': input_data['exec_timestamp_list'],
'threshold': self.index_config.get('compare-threshold', 0.0003),
'search_margin': self.index_config.get('search-margin', 10)}
self.compare_result['running_time_result'] = compare_with_sample_image_multi_process(
input_data['sample_result'],
input_image_list,
compare_setting)
if self.compare_result.get('running_time_result', None):
# Calculate the Input Latency running time by customized method
run_time, event_time_dict = self.calculate_runtime_base_on_event(
self.compare_result['running_time_result'],
self.index_config['video-recording-fps'])
self.compare_result.update({'run_time': run_time, 'event_time_dict': event_time_dict})
return self.compare_result
def output_case_result(self, suite_upload_dp):
if self.compare_result.get('run_time', None):
self.record_runtime_current_status(self.compare_result['run_time'])
history_result_data = CommonUtil.load_json_file(self.env.DEFAULT_TEST_RESULT)
event_time_dict = self.compare_result.get('event_time_dict', {})
run_time_dict = {'run_time': self.compare_result['run_time'],
'folder': self.env.output_name,
'event_time': event_time_dict}
# init result dict if not exist
init_result_dict = self.init_result_dict_variable(
['total_run_no', 'total_time', 'error_no', 'min_time', 'max_time', 'avg_time', 'std_dev',
'med_time'], ['time_list', 'outlier', 'detail'])
update_result = history_result_data.get(self.env.test_name, init_result_dict)
# based on current result add the data to different field
_, _, update_result = self.generate_update_result_for_runtime(update_result, self.compare_result, run_time_dict)
history_result_data[self.env.test_name] = update_result
# write fps to history_result_data
history_result_data['video-recording-fps'] = self.index_config['video-recording-fps']
# output upload video
if self.exec_config['output-result-video-file']:
start_time = time.time()
upload_result_video_fp = self.output_runtime_result_video(self.compare_result['running_time_result'], suite_upload_dp)
current_time = time.time()
elapsed_time = current_time - start_time
logger.debug("Generate Video Elapsed: [%s]" % elapsed_time)
history_result_data[self.env.test_name]['upload_video_fp'] = upload_result_video_fp
# dump to json file
with open(self.env.DEFAULT_TEST_RESULT, "wb") as fh:
json.dump(history_result_data, fh, indent=2)
self.status_recorder.record_current_status({self.status_recorder.STATUS_TIME_LIST_COUNTER: str(len(history_result_data[self.env.test_name]['time_list']))})
else:
self.status_recorder.record_current_status({self.status_recorder.STATUS_IMG_COMPARE_RESULT: self.status_recorder.ERROR_COMPARE_RESULT_IS_NONE})
self.clean_output_images(self.compare_result['running_time_result'], self.env.img_output_dp)
@classmethod
def calculate_runtime_base_on_event(cls, input_running_time_result, fps=90):
"""
This customized method base on `baseGenerator.runtime_calculation_event_point_base`.
However, when start and end at the same time, it will return the mid time between 0~1 frame, not 0 ms.
For example, if FPS is 90, the running time of 1 frame is 11.11111 ms.
When start and end at the same time, it will return 5.55555 ms ((1000 ms / 90 FPS) / 2).
@param input_running_time_result: the running_time_result after do comparison.
ex:
[
{'event': 'start', 'file': 'foo/bar/9487.bmp', 'time_seq': 5487.9487},
{'event': 'end', 'file': 'foo/bar/9527.bmp', 'time_seq': 5566.5566}, ...
]
@param fps: the current FPS. Default=90.
@return: (running time, the dict of all events' time sequence).
"""
run_time = -1
event_time_dict = dict()
start_event = cls.get_event_data_in_result_list(input_running_time_result,
cls.EVENT_START)
end_event = cls.get_event_data_in_result_list(input_running_time_result,
cls.EVENT_END)
if start_event and end_event:
run_time = end_event.get('time_seq') - start_event.get('time_seq')
event_time_dict[cls.EVENT_START] = 0
event_time_dict[cls.EVENT_END] = run_time
# when start and end at the same time, it will return the mid time between 0~1 frame, not 0 ms.
if run_time == 0:
run_time = 1000.0 / fps / 2
if run_time > 0:
for custom_event in input_running_time_result:
custom_event_name = custom_event.get('event')
if custom_event_name != cls.EVENT_START \
and custom_event_name != cls.EVENT_END:
event_time_dict[custom_event_name] = np.absolute(
custom_event.get('time_seq') - start_event.get('time_seq'))
return run_time, event_time_dict
|
998,770 | 2b7a4f3a99319dc5f04997a7775e9abb45da9515 | import torch
import torch.nn as nn
from torch.distributions import constraints
import torch.nn.functional as F
from pyro.distributions.torch_transform import TransformModule
from pyro.distributions.util import copy_docs_from
@copy_docs_from(TransformModule)
class BatchNormTransform(TransformModule):
"""
A type of batch normalization that can be used to stabilize training in normalizing flows. The inverse operation
is defined as
:math:`x = (y - \\hat{\\mu}) \\oslash \\sqrt{\\hat{\\sigma^2}} \\otimes \\gamma + \\beta`
that is, the standard batch norm equation, where :math:`x` is the input, :math:`y` is the output,
:math:`\\gamma,\\beta` are learnable parameters, and :math:`\\hat{\\mu}`/:math:`\\hat{\\sigma^2}` are smoothed
running averages of the sample mean and variance, respectively. The constraint :math:`\\gamma>0` is enforced to
ease calculation of the log-det-Jacobian term.
This is an element-wise transform, and when applied to a vector, learns two parameters (:math:`\\gamma,\\beta`)
for each dimension of the input.
When the module is set to training mode, the moving averages of the sample mean and variance are updated every time
the inverse operator is called, e.g., when a normalizing flow scores a minibatch with the `log_prob` method.
Also, when the module is set to training mode, the sample mean and variance on the current minibatch are used in
place of the smoothed averages, :math:`\\hat{\\mu}` and :math:`\\hat{\\sigma^2}`, for the inverse operator. For this
reason it is not the case that :math:`x=g(g^{-1}(x))` during training, i.e., that the inverse operation is the
inverse of the forward one.
Example usage:
>>> from pyro.nn import AutoRegressiveNN
>>> from pyro.distributions import InverseAutoregressiveFlow
>>> base_dist = dist.Normal(torch.zeros(10), torch.ones(10))
>>> iafs = [InverseAutoregressiveFlow(AutoRegressiveNN(10, [40])) for _ in range(2)]
>>> bn = BatchNormTransform(10)
>>> flow_dist = dist.TransformedDistribution(base_dist, [iafs[0], bn, iafs[1]])
>>> flow_dist.sample() # doctest: +SKIP
tensor([-0.4071, -0.5030, 0.7924, -0.2366, -0.2387, -0.1417, 0.0868,
0.1389, -0.4629, 0.0986])
:param input_dim: the dimension of the input
:type input_dim: int
:param momentum: momentum parameter for updating moving averages
:type momentum: float
:param epsilon: small number to add to variances to ensure numerical stability
:type epsilon: float
References:
[1] Sergey Ioffe and Christian Szegedy. Batch Normalization: Accelerating Deep Network Training by Reducing
Internal Covariate Shift. In International Conference on Machine Learning, 2015. https://arxiv.org/abs/1502.03167
[2] Laurent Dinh, Jascha Sohl-Dickstein, and Samy Bengio. Density Estimation using Real NVP.
In International Conference on Learning Representations, 2017. https://arxiv.org/abs/1605.08803
[3] George Papamakarios, Theo Pavlakou, and Iain Murray. Masked Autoregressive Flow for Density Estimation.
In Neural Information Processing Systems, 2017. https://arxiv.org/abs/1705.07057
"""
domain = constraints.real
codomain = constraints.real
bijective = True
event_dim = 0
def __init__(self, input_dim, momentum=0.1, epsilon=1e-5):
super(BatchNormTransform, self).__init__()
self.input_dim = input_dim
self.gamma = nn.Parameter(torch.zeros(input_dim))
self.beta = nn.Parameter(torch.zeros(input_dim))
self.momentum = momentum
self.epsilon = epsilon
self.register_buffer('moving_mean', torch.zeros(input_dim))
self.register_buffer('moving_variance', torch.ones(input_dim))
@property
def constrained_gamma(self):
return F.relu(self.gamma) + 1e-6
def _call(self, x):
"""
:param x: the input into the bijection
:type x: torch.Tensor
Invokes the bijection x=>y; in the prototypical context of a TransformedDistribution `x` is a
sample from the base distribution (or the output of a previous flow)
"""
# Enforcing the constraint that gamma is positive
return (x - self.beta) / self.constrained_gamma * \
torch.sqrt(self.moving_variance + self.epsilon) + self.moving_mean
def _inverse(self, y):
"""
:param y: the output of the bijection
:type y: torch.Tensor
Inverts y => x.
"""
# During training, keep smoothed average of sample mean and variance
if self.training:
mean, var = y.mean(0), y.var(0)
# NOTE: The momentum variable agrees with the definition in e.g. `torch.nn.BatchNorm1d`
self.moving_mean.mul_(1 - self.momentum).add_(mean * self.momentum)
self.moving_variance.mul_(1 - self.momentum).add_(var * self.momentum)
# During test time, use smoothed averages rather than the sample ones
else:
mean, var = self.moving_mean, self.moving_variance
return (y - mean) * self.constrained_gamma / torch.sqrt(var + self.epsilon) + self.beta
def log_abs_det_jacobian(self, x, y):
"""
Calculates the elementwise determinant of the log jacobian, dx/dy
"""
if self.training:
var = torch.var(y, dim=0, keepdim=True)
else:
# NOTE: You wouldn't typically run this function in eval mode, but included for gradient tests
var = self.moving_variance
return (-self.constrained_gamma.log() + 0.5 * torch.log(var + self.epsilon))
|
998,771 | 94ab8de17189cd8e39b8e30923371c9f8c2c912a | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
class VAE_trainer(nn.Module):
def __init__(self, model, objective, optimizer, cuda):
super(VAE_trainer, self).__init__()
self.model = model
self.objective = objective
self.optimizer = optimizer
self.cuda = cuda
if cuda:
self.model = self.model.cuda()
def _loss(self, x, y=None):
x = Variable(x)
if self.cuda:
x=x.cuda()
recon_x, (z, z_mu, z_logvar) = self.model(x)
loss = self.objective(recon_x, x, z_mu, z_logvar)
return torch.mean(loss)
def train(self, labeled, unlabeled, n_epochs):
for epoch in range(n_epochs):
for unlabeled_image, _ in unlabeled:
loss = self._loss(unlabeled_image)
loss.backward()
self.optimizer.step()
self.optimizer.zero_grad()
if (epoch+1)%10 == 0:
print("Epoch: {}, loss:{:.3f}".format(epoch+1, loss.data[0]))
class Classifier_trainer(nn.Module):
def __init__(self, model, classifier, cuda):
super(Classifier_trainer, self).__init__()
self.model = model
self.classifier = classifier
self.cuda = cuda
self.optimizer = torch.optim.Adam(self.classifier.parameters(), lr = 1e-3)
if self.cuda:
self.model = self.model.cuda()
self.classifier = self.classifier.cuda()
def _calculate_z(self, x):
_, (z, _, _) = self.model(x)
return z
def _calculate_logits(self, z):
logits = self.classifier(z)
return logits
def train(self, train_loader, validation_loader, n_epochs):
for epoch in range(n_epochs):
for trn_x, trn_y in train_loader:
trn_x, trn_y = Variable(trn_x), Variable(trn_y)
if self.cuda:
trn_x, trn_y = trn_x.cuda(), trn_y.cuda()
logits = self._calculate_logits(self._calculate_z(trn_x))
loss = F.cross_entropy(logits, trn_y)
loss.backward()
self.optimizer.step()
self.optimizer.zero_grad()
if (epoch+1)%10==0:
accuracy = []
for val_x, val_y in validation_loader:
val_x = Variable(val_x)
if self.cuda:
val_x = val_x.cuda()
val_y = val_y.cuda()
logits=self._calculate_logits(self._calculate_z(val_x))
_, val_y_pred = torch.max(logits, 1)
accuracy += [torch.mean((val_y_pred.data == val_y).float())]
print("Epoch: {0:} loss: {1:.3f}, accuracy: {2:.3f}".format(epoch+1, loss.data[0], np.mean(accuracy))) |
998,772 | 77157413a675a2e00fd31106dc3474af8b1243b6 | class Node:
def __init__(self,data):
self.data=data
root.left=None
root.right=None
def conversion(self,llist):
rootInd=int(len(llist)/2)
root=Node(llist[rootInd])
# print(root)
llist.pop(rootInd)
return (root,llist)
def construction(self,root,llist):
for x in llist:
if x < root:
left=self.construction(root.left,llist)
if x> root :
right=self.construction(root.right,llist)
return (left,right)
root = Node(1)
root.left = Node(2)
root.right = Node(3)
root.left.left = Node(4)
root.left.right = Node(5)
root.right.left = Node(6)
print(conversion([1,2,3,4,5,6]))
|
998,773 | 3c9feef40642645830308107913d0ddd0957b93c | n = int(input())
MAP = [input() for _ in range(n)]
sizes = [0]*(n+1)
for size in range(1, n+1):
for r in range(n-size+1):
for c in range(n-size+1):
valid = 0
for i in range(size):
if MAP[r+i][c:c+size].count("0"):
break
else:
valid += 1
if valid == size:
sizes[size] += 1
print(f"total: {sum(sizes)}")
for i in range(n+1):
if sizes[i]:
print(f"size[{i}]: {sizes[i]}")
|
998,774 | c7acaf0185bfa6e6dbdf0ec0e3b67c86503a42b0 | import matplotlib.pyplot as pyplot
import numpy as np
x = np.linspace(0,20,100)
plt.plot(x, np.sin(x))
plt.show
|
998,775 | b6fee951a3167df66d76142ec58fdd09289de1c5 |
from django.contrib.auth.models import User
from userdb.models import Team, Region
from openstack.client import OpenstackClient, get_admin_credentials
from openstack.models import Tenant
from scripts.set_quotas import set_quota
def setup_network(client, region, tenant_id):
neutron = client.get_neutron()
network = {'tenant_id' : tenant_id,
'name' : 'bryn:tenant-private',
'admin_state_up' : True}
n = neutron.create_network({'network':network})
router = {"tenant_id" : tenant_id,
"name" : "bryn:tenant-router",
"admin_state_up" : True}
r = neutron.create_router({'router':router})
public_network = region.regionsettings.public_network_id
neutron.add_gateway_router(r['router']['id'], {"network_id" : public_network})
# add subnet
subnet = {"name": "tenant1-192.168.0.0/24",
"enable_dhcp": True,
"network_id": n['network']['id'],
"tenant_id": tenant_id,
"allocation_pools": [{"start": "192.168.0.50", "end": "192.168.0.200"}],
"gateway_ip": "192.168.0.1",
"ip_version": 4,
"cidr": "192.168.0.0/24"}
# add name servers
s = neutron.create_subnet({'subnet' : subnet})
# router-interface-add
neutron.add_interface_router(r['router']['id'], {'subnet_id' : s['subnet']['id']})
return n['network']['id']
def setup_tenant(team, region):
client = OpenstackClient(region.name, **get_admin_credentials(region.name))
nova = client.get_nova()
keystone = client.get_keystone()
tenant = Tenant(team=team, region=region)
tenant_name = tenant.get_tenant_name()
tenant_description = tenant.get_tenant_description()
openstack_tenant = keystone.tenants.create(
tenant_name=tenant_name,
description=tenant_description,
enabled=True)
username = tenant.get_auth_username()
password = User.objects.make_random_password(length=16)
user = keystone.users.create(
name=username,
password=password,
tenant_id=openstack_tenant.id)
tenant.created_tenant_id = openstack_tenant.id
tenant.auth_password = password
## flip to user tenant
client = OpenstackClient(region.name,
username=tenant.get_auth_username(),
password=tenant.auth_password,
project_name=tenant.get_tenant_name())
nova = client.get_nova()
security_group_name = "bryn:default"
# group = nova.security_groups.create(
# security_group_name,
# 'Automatic security group for %s' % (tenant_name)
# )
group = nova.security_groups.find(name="default")
nova.security_group_rules.create(group.id, ip_protocol="tcp",
from_port=22, to_port=22)
nova.security_group_rules.create(group.id, ip_protocol="tcp",
from_port=80, to_port=80)
nova.security_group_rules.create(group.id, ip_protocol="tcp",
from_port=443, to_port=443)
if region.regionsettings.requires_network_setup:
tenant.created_network_id = setup_network(client, region, tenant.created_tenant_id)
set_quota(tenant)
tenant.save()
team.tenants_available = True
team.save()
#openstack quota set --cores 128 --ram 650000 --gigabytes 10000 --snapshots 100 6a0797bfd90d4aba820c427d4e8a60d9
def run():
t = Team.objects.get(pk=1)
setup_tenant(t, Region.objects.get(name='bham'))
|
998,776 | 7264fad5004a7cec61bfb35b6c2c386b3577247a | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 19 20:57:43 2019
..
@author: martin
"""
import fileinput
def GetSubKeys(key):
Permuted_Key = permut ([2,4,1,6,3,9,0,8,7,5], key)
#split in half
key_half_1 = Permuted_Key[:5]
key_half_2 = Permuted_Key[5:]
#generate subkey 1
#one shift to the left
key_half_1 = permut([1,2,3,4,0], key_half_1)
key_half_2 = permut([1,2,3,4,0], key_half_2)
key = (key_half_1 + key_half_2)
subkey_1 = permut([5,2,6,3,7,4,9,8], key)
#Generate subkey 2
key_half_1 = permut([2,3,4,0,1], key_half_1)
key_half_2 = permut([2,3,4,0,1], key_half_2)
key2 = (key_half_1 + key_half_2)
subkey_2 = permut([5,2,6,3,7,4,9,8], key2)
return subkey_1, subkey_2
#permut
def permut(new_positions, original_bits):
new_bits = []
for i in new_positions:
new_bits.append(original_bits[i])
return ''.join(map(str, new_bits))
def Int_str(str1, str2):
return int(str1, base=2) ^ int(str2, base=2)
def feistel(permutedText, subkey, s_box0, s_box1):
left = permutedText[:4]
right = permutedText[4:]
exp_right = permut([3,0,1,2,1,2,3,0],right)
xor_right = Int_str(exp_right, subkey)
xor_right = format(xor_right, '08b')
row = int(permut([0,3], xor_right[:4]), base=2)
col = int(permut([1,2], xor_right[:4]), base=2)
s0 = s_box0[row][col]
row = int(permut ([0,3], xor_right[4:]), base=2)
col = int(permut ([1,2], xor_right[4:]), base=2)
s1 = s_box1[row][col]
s0s1 = format(s0, '02b')+format(s1, '02b')
s0s1 = permut([1,3,2,0], s0s1)
xor_left = Int_str(left, s0s1)
xor_left = format(xor_left, '04b')
return xor_left, right
def main():
file_input = fileinput.input()
mode = file_input[0]
mode = mode.replace('\n','')
key = file_input[1]
key = key.replace('\n','')
Text = file_input[2]
Text = Text.replace('\n','')
s_box0 = [[1,0,3,2],[3,2,1,0],[0,2,1,3],[3,1,3,2]]
s_box1 = [[0,1,2,3],[2,0,1,3],[3,0,1,0],[2,1,0,3]]
if (mode == 'E'):
SubKey1, SubKey2 = GetSubKeys(key)
else:
SubKey2, SubKey1 = GetSubKeys(key)
permutedText = permut([1,5,2,0,3,7,4,6], Text)
left, right = feistel(permutedText, SubKey1, s_box0, s_box1)
right_left = right+left
left, right = feistel(right_left, SubKey2, s_box0, s_box1)
left_right = left+right
inverse = permut([3,0,2,4,6,1,7,5],left_right)
print(inverse)
if __name__ == "__main__":
main()
|
998,777 | a3c5fd07eb0cf03340229ba81b1a8bd2eb93cc8f | import asyncio
import logging
from six import string_types
from typing import (
List,
Dict,
Optional,
Any,
)
from hummingbot.core.utils.wallet_setup import (
create_and_save_wallet,
import_and_save_wallet,
list_wallets,
unlock_wallet
)
from hummingbot.client.config.config_var import ConfigVar
from hummingbot.client.config.in_memory_config_map import in_memory_config_map
from hummingbot.client.config.global_config_map import global_config_map
from hummingbot.client.config.config_helpers import (
get_strategy_config_map,
write_config_to_yml,
load_required_configs,
parse_cvar_value,
copy_strategy_template,
)
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from hummingbot.client.hummingbot_application import HummingbotApplication
class ConfigCommand:
def config(self, # type: HummingbotApplication
key: str = None,
key_list: Optional[List[str]] = None):
self.app.clear_input()
if self.strategy or (self.config_complete and key is None):
asyncio.ensure_future(self.reset_config_loop(key))
return
if key is not None and key not in load_required_configs().keys():
self._notify("Invalid config variable %s" % (key,))
return
if key is not None:
keys = [key]
elif key_list is not None:
keys = key_list
else:
keys = self._get_empty_configs()
asyncio.ensure_future(self._config_loop(keys), loop=self.ev_loop)
@property
def config_complete(self, # type: HummingbotApplication
):
config_map = load_required_configs()
keys = self.key_filter(self._get_empty_configs())
for key in keys:
cvar = config_map.get(key)
if cvar.value is None and cvar.required:
return False
return True
@staticmethod
def _get_empty_configs() -> List[str]:
config_map = load_required_configs()
return [key for key, config in config_map.items() if config.value is None]
async def reset_config_loop(self, # type: HummingbotApplication
key: str = None):
strategy = in_memory_config_map.get("strategy").value
strategy_cm = get_strategy_config_map(strategy)
self.placeholder_mode = True
self.app.toggle_hide_input()
if self.strategy:
choice = await self.app.prompt(prompt=f"Would you like to stop running the {strategy} strategy "
f"and reconfigure the bot? (y/n) >>> ")
else:
choice = await self.app.prompt(prompt=f"Would you like to reconfigure the bot? (y/n) >>> ")
self.app.change_prompt(prompt=">>> ")
self.app.toggle_hide_input()
self.placeholder_mode = False
if choice.lower() in {"y", "yes"}:
if self.strategy:
await self.stop_loop()
if key is None:
# Clear original strategy config map
if strategy_cm:
for k in strategy_cm:
strategy_cm[k].value = None
in_memory_config_map.get("strategy").value = None
in_memory_config_map.get("strategy_file_path").value = None
self.clear_application_warning()
self.config(key)
else:
self._notify("Aborted.")
async def _create_or_import_wallet(self, # type: HummingbotApplication
):
choice = await self.app.prompt(prompt=global_config_map.get("wallet").prompt)
if choice == "import":
private_key = await self.app.prompt(prompt="Your wallet private key >>> ", is_password=True)
password = await self.app.prompt(prompt="A password to protect your wallet key >>> ", is_password=True)
try:
self.acct = import_and_save_wallet(password, private_key)
self._notify("Wallet %s imported into hummingbot" % (self.acct.address,))
except Exception as e:
self._notify(f"Failed to import wallet key: {e}")
result = await self._create_or_import_wallet()
return result
elif choice == "create":
password = await self.app.prompt(prompt="A password to protect your wallet key >>> ", is_password=True)
self.acct = create_and_save_wallet(password)
self._notify("New wallet %s created" % (self.acct.address,))
else:
self._notify('Invalid choice. Please enter "create" or "import".')
result = await self._create_or_import_wallet()
return result
return self.acct.address
async def _unlock_wallet(self, # type: HummingbotApplication
):
choice = await self.app.prompt(prompt="Would you like to unlock your previously saved wallet? (y/n) >>> ")
if choice.lower() in {"y", "yes"}:
wallets = list_wallets()
self._notify("Existing wallets:")
self.list(obj="wallets")
if len(wallets) == 1:
public_key = wallets[0]
else:
public_key = await self.app.prompt(prompt="Which wallet would you like to import ? >>> ")
password = await self.app.prompt(prompt="Enter your password >>> ", is_password=True)
try:
acct = unlock_wallet(public_key=public_key, password=password)
self._notify("Wallet %s unlocked" % (acct.address,))
self.acct = acct
return self.acct.address
except Exception as e:
self._notify("Cannot unlock wallet. Please try again.")
result = await self._unlock_wallet()
return result
else:
value = await self._create_or_import_wallet()
return value
async def _import_or_create_strategy_config(self, # type: HummingbotApplication
):
current_strategy: str = in_memory_config_map.get("strategy").value
strategy_file_path_cv: ConfigVar = in_memory_config_map.get("strategy_file_path")
choice = await self.app.prompt(prompt="Import previous configs or create a new config file? "
"(import/create) >>> ")
if choice == "import":
strategy_path = await self.app.prompt(strategy_file_path_cv.prompt)
strategy_path = strategy_path
self._notify(f"Loading previously saved config file from {strategy_path}...")
elif choice == "create":
strategy_path = await copy_strategy_template(current_strategy)
self._notify(f"new config file at {strategy_path} created.")
else:
self._notify('Invalid choice. Please enter "create" or "import".')
strategy_path = await self._import_or_create_strategy_config()
# Validate response
if not strategy_file_path_cv.validate(strategy_path):
self._notify(f"Invalid path {strategy_path}. Please enter \"create\" or \"import\".")
strategy_path = await self._import_or_create_strategy_config()
return strategy_path
async def config_single_variable(self, # type: HummingbotApplication
cvar: ConfigVar,
is_single_key: bool = False) -> Any:
if cvar.required or is_single_key:
if cvar.key == "strategy_file_path":
val = await self._import_or_create_strategy_config()
elif cvar.key == "wallet":
wallets = list_wallets()
if len(wallets) > 0:
val = await self._unlock_wallet()
else:
val = await self._create_or_import_wallet()
logging.getLogger("hummingbot.public_eth_address").info(val)
else:
val = await self.app.prompt(prompt=cvar.prompt, is_password=cvar.is_secure)
if not cvar.validate(val):
self._notify("%s is not a valid %s value" % (val, cvar.key))
val = await self.config_single_variable(cvar)
else:
val = cvar.value
if val is None or (isinstance(val, string_types) and len(val) == 0):
val = cvar.default
return val
def key_filter(self, keys: List[str]):
try:
exclude_keys = set()
if global_config_map.get("paper_trade_enabled").value:
exclude_keys.update(["wallet",
"coinbase_pro_api_key",
"coinbase_pro_secret_key",
"coinbase_pro_passphrase",
"binance_api_key",
"binance_api_secret",
"huobi_api_key",
"huobi_secret_key",
"idex_api_key",
"ethereum_rpc_url"
])
return [k for k in keys if k not in exclude_keys]
except Exception as err:
self.logger().error("Error filtering config keys.", exc_info=True)
return keys
async def _inner_config_loop(self, _keys: List[str], single_key: bool):
keys = self.key_filter(_keys)
for key in keys:
current_strategy: str = in_memory_config_map.get("strategy").value
strategy_cm: Dict[str, ConfigVar] = get_strategy_config_map(current_strategy)
if key in in_memory_config_map:
cv: ConfigVar = in_memory_config_map.get(key)
elif key in global_config_map:
cv: ConfigVar = global_config_map.get(key)
else:
cv: ConfigVar = strategy_cm.get(key)
value = await self.config_single_variable(cv, is_single_key=single_key)
cv.value = parse_cvar_value(cv, value)
if single_key:
self._notify(f"\nNew config saved:\n{key}: {str(value)}")
if not self.config_complete:
await self._inner_config_loop(self._get_empty_configs(), single_key)
async def _config_loop(self, # type: HummingbotApplication
keys: List[str] = []):
self._notify("Please follow the prompt to complete configurations: ")
self.placeholder_mode = True
self.app.toggle_hide_input()
single_key = len(keys) == 1
try:
await self._inner_config_loop(keys, single_key)
await write_config_to_yml()
if not single_key:
self._notify("\nConfig process complete. Enter \"start\" to start market making.")
self.app.set_text("start")
except asyncio.TimeoutError:
self.logger().error("Prompt timeout")
except Exception as err:
self.logger().error("Unknown error while writing config. %s" % (err,), exc_info=True)
finally:
self.app.toggle_hide_input()
self.placeholder_mode = False
self.app.change_prompt(prompt=">>> ")
|
998,778 | 29f3365ae705cc6eb09ae1be0b7bdb9ffa7e4d6f | import pickle
import sys
import csv
from infoboxes_list import *
from dump import *
def get_coarse_type(f_type):
if f_type != None:
if f_type in Person_infoboxes:
return 'Person'
elif f_type in Location_infoboxes:
return 'Location'
elif f_type in Organization_infoboxes:
return 'Organization'
#elif f_type in Miscllaneous_infoboxes:
#return 'Misc'
else:
return None
def get_fine_type(infobox):
try:
if infobox['Infobox'] == 'musical artist':
if infobox['background'] == 'group_or_band':
return 'musical group or band'
else:
return infobox['Infobox']
else:
return infobox['Infobox']
except Exception as e:
return None
coarse_count = dict()
fine_count = dict()
if __name__ == '__main__':
entitype_dict = dict()
infoboxDictionary = sys.argv[1]
infoboxes = pickle.load(open(infoboxDictionary, "rb"))
for key, val in infoboxes.items():
entitype_dict[key] = dict()
entitype_dict[key]['id'] = val['id']
fine = get_fine_type(val['infobox'])
entitype_dict[key]['fine'] = fine
coarse = get_coarse_type(fine)
entitype_dict[key]['coarse'] = coarse
if coarse != None:
#print(key, fine, coarse)
if fine not in fine_count:
fine_count[fine] = 1
else:
fine_count[fine] += 1
if coarse not in coarse_count:
coarse_count[coarse] = 1
else:
coarse_count[coarse] += 1
dump_dictionary('entity_types.pickle', entitype_dict)
dump_dictionary_csv("fineTypeStats.csv", fine_count)
dump_dictionary_csv("coarseTypeStats.csv", coarse_count)
|
998,779 | 26ed51f2a9457314260eca479d984b368135a2bf | import os
import pandas
from tqdm import tqdm
from source1.classes.Tool import Tool
path = "F:\\projects\\python projects\\moiveinfo\\html_source"
header = ["排名", "电影名称", "总票房(美元)", "上映时间"]
def main():
files = os.listdir(path)
result = []
for file in tqdm(files):
with open(path + "\\" + file, "r", encoding="utf-8") as f:
html = f.read()
tbody = Tool.tbody_pattern.findall(html)[0]
tr_list = Tool.tr_pattern.findall(tbody)
for tr in tr_list:
tds = Tool.td_pattern.findall(tr)
rank = int(tds[0].replace(",", ""))
title = Tool.title_pattern.findall(tds[1])[0]
worldwide_lifetime_gross = tds[2].lstrip("$").replace(",", "")
year = Tool.year_pattern.findall(tds[7])[0]
result.append([rank, title, worldwide_lifetime_gross, year])
df = pandas.DataFrame(result)
df.to_excel("moive.xls", index=False, header=header)
if __name__ == '__main__':
main()
|
998,780 | 325fdd4c3f9c726d37c55ef3714f52bdd03ebda0 | import time
import pytest
from h2_conf import HttpdConf
class TestEncoding:
EXP_AH10244_ERRS = 0
@pytest.fixture(autouse=True, scope='class')
def _class_scope(self, env):
extras = {
'base': f"""
<Directory "{env.gen_dir}">
AllowOverride None
Options +ExecCGI -MultiViews +SymLinksIfOwnerMatch
Require all granted
</Directory>
""",
}
conf = HttpdConf(env)
conf.add_vhost_test1(extras=extras)
conf.add_vhost_test2(extras={
f"test2.{env.http_tld}": "AllowEncodedSlashes on",
})
conf.add_vhost_cgi(extras={
f"cgi.{env.http_tld}": f"ScriptAlias /cgi-bin/ {env.gen_dir}",
})
conf.install()
assert env.apache_restart() == 0
yield
errors, warnings = env.apache_errors_and_warnings()
assert (len(errors), len(warnings)) == (TestEncoding.EXP_AH10244_ERRS, 0),\
f"apache logged {len(errors)} errors and {len(warnings)} warnings: \n"\
"{0}\n{1}\n".format("\n".join(errors), "\n".join(warnings))
env.apache_error_log_clear()
# check handling of url encodings that are accepted
@pytest.mark.parametrize("path", [
"/006/006.css",
"/%30%30%36/%30%30%36.css",
"/nothing/../006/006.css",
"/nothing/./../006/006.css",
"/nothing/%2e%2e/006/006.css",
"/nothing/%2e/%2e%2e/006/006.css",
"/nothing/%2e/%2e%2e/006/006%2ecss",
])
def test_203_01(self, env, path):
url = env.mkurl("https", "test1", path)
r = env.curl_get(url)
assert r.response["status"] == 200
# check handling of / normalization
@pytest.mark.parametrize("path", [
"/006//006.css",
"/006//////////006.css",
"/006////.//////006.css",
"/006////%2e//////006.css",
"/006////%2e//////006%2ecss",
"/006/../006/006.css",
"/006/%2e%2e/006/006.css",
])
def test_203_03(self, env, path):
url = env.mkurl("https", "test1", path)
r = env.curl_get(url)
assert r.response["status"] == 200
# check path traversals
@pytest.mark.parametrize(["path", "status"], [
["/../echo.py", 400],
["/nothing/../../echo.py", 400],
["/cgi-bin/../../echo.py", 400],
["/nothing/%2e%2e/%2e%2e/echo.py", 400],
["/cgi-bin/%2e%2e/%2e%2e/echo.py", 400],
["/nothing/%%32%65%%32%65/echo.py", 400],
["/cgi-bin/%%32%65%%32%65/echo.py", 400],
["/nothing/%%32%65%%32%65/%%32%65%%32%65/h2_env.py", 400],
["/cgi-bin/%%32%65%%32%65/%%32%65%%32%65/h2_env.py", 400],
["/nothing/%25%32%65%25%32%65/echo.py", 404],
["/cgi-bin/%25%32%65%25%32%65/echo.py", 404],
["/nothing/%25%32%65%25%32%65/%25%32%65%25%32%65/h2_env.py", 404],
["/cgi-bin/%25%32%65%25%32%65/%25%32%65%25%32%65/h2_env.py", 404],
])
def test_203_04(self, env, path, status):
url = env.mkurl("https", "cgi", path)
r = env.curl_get(url)
assert r.response["status"] == status
if status == 400:
TestEncoding.EXP_AH10244_ERRS += 1
# the log will have a core:err about invalid URI path
# check handling of %2f url encodings that are not decoded by default
@pytest.mark.parametrize(["host", "path", "status"], [
["test1", "/006%2f006.css", 404],
["test2", "/006%2f006.css", 200],
["test2", "/x%252f.test", 200],
["test2", "/10%25abnormal.txt", 200],
])
def test_203_20(self, env, host, path, status):
url = env.mkurl("https", host, path)
r = env.curl_get(url)
assert r.response["status"] == status
|
998,781 | 74b4570fbed9d547cd9750df8beaa843c92dc8ab | '''
Copyright (C) 2022 CG Cookie
http://cgcookie.com
hello@cgcookie.com
Created by Jonathan Denning, Jonathan Williamson
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import socket
import sys
'''
Note: this is a work in progress only!
'''
# https://pythonspot.com/building-an-irc-bot/
class IRC:
def __init__(self):
self.done = False
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def __del__(self):
self.close()
def send_text(self, text):
if not text.endswith('\n'): text += '\n'
self.socket.send(bytes(text, encoding='utf-8'))
def send(self, chan, msg):
self.socket.send(bytes("PRIVMSG " + chan + " :" + msg + "\n", encoding='utf-8'))
def connect(self, server, channel, nickname):
#defines the socket
print("connecting to:", server)
self.socket.connect((server, 6667)) #connects to the server
self.socket.send(bytes("USER " + nickname + " " + nickname +" " + nickname + " :This is a fun bot!\n", encoding='utf-8')) #user authentication
self.socket.send(bytes("NICK " + nickname + "\n", encoding='utf-8'))
self.socket.send(bytes("JOIN " + channel + "\n", encoding='utf-8')) #join the chan
def get_text(self, blocking=True):
self.socket.setblocking(blocking)
try:
text = str(self.socket.recv(4096), encoding='utf-8') #receive the text
except socket.error:
text = None
return text
def close(self):
if self.done: return
self.socket.close()
self.done = True
if __name__ == '__main__':
channel = "#retopoflow"
server = "irc.freenode.net"
nickname = "rftester"
irc = IRC()
irc.connect(server, channel, nickname)
while 1:
text = irc.get_text()
if text: print(text)
if "PRIVMSG" in text and channel in text and "hello" in text:
irc.send(channel, "Hello!") |
998,782 | bab1e403d63f5ad2571f198991b6571bb2f10266 | # -*- coding: utf-8 -*-
'''
Created on 2018年4月8日
@author: zwp
'''
import numpy as np;
def normal(X):
rk = np.ndim(X);
if rk == 1:
X = np.reshape(X,[-1,X.shape[0]]);
max_xs = np.reshape(np.max(X,axis=1),[-1,1]);
min_xs = np.reshape(np.min(X,axis=1),[-1,1]);
X = (X - min_xs) / (max_xs-min_xs);
if rk == 1:
X = X.flatten();
return X;
def dist(X,x):
'''
X:[batch,feature_x]的训练集
x:[feature_x]测试数据项
return [batch] 距离值
'''
# 欧式距离法
result = np.sqrt(np.sum((X-x)**2,axis=1));
return result;
a = np.array([1,2,3]);
b = np.reshape(np.random.normal(size=9),[3,3]);
b = np.reshape(np.arange(9),[3,3]);
print(np.max(b,axis=1));
print(np.ndim(a),np.ndim(b))
print(b);
print(normal(a));
d = dist(b,a);
print(d);
dso = np.argsort(d);
print(np.where(a==1));
if __name__ == '__main__':
pass |
998,783 | c847e7131d3c378389993e1202a3b1570e7dd17c | import pandas as pd
# df = pd.read_csv("avg_county.csv", header = 0)
import numpy as np
# print df.head(5)
# df_co = df[df.Pollutant == "SO2"]
# # df_co.to_scv("avg_county_CO.csv", sep='\t', encoding='utf-8')
# # with open("avg_county_CO.csv", 'a') as f:
# # df_co.to_csv(f)
# with open("avg_county_SO2.csv", 'a') as f:
# df_co.to_csv(f)
df = pd.read_csv("season_county.csv", header = 0)
print df.head(5)
df1 = df[df.Season == 1]
df1 = df1[df1.Pollutant == "SO2"]
with open("SO2_season1.csv", 'a') as f:
df1.to_csv(f)
|
998,784 | 15bcd17b5d3796b85c72cd12864e0ef6773c55f5 | #!/usr/bin/env python
# TODO: Free energy of external confinement for poseBPMFs
import os
import cPickle as pickle
import gzip
import copy
from AlGDock.IO import load_pkl_gz
from AlGDock.IO import write_pkl_gz
from AlGDock.logger import NullDevice
import sys
import time
import numpy as np
from collections import OrderedDict
from AlGDock import dictionary_tools
import MMTK
import MMTK.Units
from MMTK.ParticleProperties import Configuration
from MMTK.ForceFields import ForceField
import Scientific
try:
from Scientific._vector import Vector
except:
from Scientific.Geometry.VectorModule import Vector
import pymbar.timeseries
import multiprocessing
from multiprocessing import Process
# For profiling. Unnecessary for normal execution.
# from memory_profiler import profile
#############
# Constants #
#############
R = 8.3144621 * MMTK.Units.J / MMTK.Units.mol / MMTK.Units.K
scalables = ['OBC', 'sLJr', 'sELE', 'LJr', 'LJa', 'ELE']
# In APBS, minimum ratio of PB grid length to maximum dimension of solute
LFILLRATIO = 4.0 # For the ligand
RFILLRATIO = 2.0 # For the receptor/complex
DEBUG = False
def HMStime(s):
"""
Given the time in seconds, an appropriately formatted string.
"""
if s < 60.:
return '%.2f s' % s
elif s < 3600.:
return '%d:%.2f' % (int(s / 60 % 60), s % 60)
else:
return '%d:%d:%.2f' % (int(s / 3600), int(s / 60 % 60), s % 60)
##############
# Main Class #
##############
class BPMF:
def __init__(self, **kwargs):
"""Parses the input arguments and runs the requested calculation"""
# mod_path = os.path.join(os.path.dirname(a.__file__), 'BindingPMF.py')
# print """###########
# # AlGDock #
# ###########
# Molecular docking with adaptively scaled alchemical interaction grids
#
# in {0}
# last modified {1}
# """.format(mod_path, time.ctime(os.path.getmtime(mod_path)))
from AlGDock.argument_parser import SimulationArguments
self.args = SimulationArguments(**kwargs)
from AlGDock.simulation_data import SimulationData
self.data = {}
self.data['BC'] = SimulationData(self.args.dir['BC'], 'BC', \
self.args.params['CD']['pose'])
self.data['CD'] = SimulationData(self.args.dir['CD'], 'CD', \
self.args.params['CD']['pose'])
if not 'max_time' in kwargs.keys():
kwargs['max_time'] = None
if not 'run_type' in kwargs.keys():
kwargs['run_type'] = None
from AlGDock.logger import Logger
self.log = Logger(self.args, \
max_time=kwargs['max_time'], run_type=kwargs['run_type'])
self.T_HIGH = self.args.params['BC']['T_HIGH']
self.T_TARGET = self.args.params['BC']['T_TARGET']
self._setup()
print '\n*** Simulation parameters and constants ***'
for p in ['BC', 'CD']:
print '\nfor %s:' % p
print dictionary_tools.dict_view(self.args.params[p])[:-1]
self.run(kwargs['run_type'])
def _setup(self):
"""Creates an MMTK InfiniteUniverse and adds the ligand"""
from AlGDock.topology import Topology
self.top = Topology(self.args)
self.top_RL = Topology(self.args, includeReceptor=True)
# Initialize rmsd calculation function
from AlGDock.RMSD import hRMSD
self.get_rmsds = hRMSD(self.args.FNs['prmtop']['L'], \
self.top.inv_prmtop_atom_order_L)
# Obtain reference pose
if self.data['CD'].pose > -1:
if ('starting_poses' in self.data['CD'].confs.keys()) and \
(self.data['CD'].confs['starting_poses'] is not None):
starting_pose = np.copy(self.data['CD'].confs['starting_poses'][0])
else:
(confs, Es) = self._get_confs_to_rescore(site=False, \
minimize=False, sort=False)
if self.args.params['CD']['pose'] < len(confs):
starting_pose = np.copy(confs[self.args.params['CD']['pose']])
self.data['CD'].confs['starting_poses'] = [np.copy(starting_pose)]
else:
self._clear('CD')
self._store_infinite_f_RL()
raise Exception('Pose index greater than number of poses')
else:
starting_pose = None
from AlGDock.system import System
self.system = System(self.args,
self.log,
self.top,
self.top_RL,
starting_pose=starting_pose)
# Measure the binding site
if (self.args.params['CD']['site'] == 'Measure'):
self.args.params['CD']['site'] = 'Sphere'
if self.args.params['CD']['site_measured'] is not None:
(self.args.params['CD']['site_max_R'],self.args.params['CD']['site_center']) = \
self.args.params['CD']['site_measured']
else:
print '\n*** Measuring the binding site ***'
self.system.setParams(
self.system.paramsFromAlpha(1.0, 'CD', site=False))
(confs, Es) = self._get_confs_to_rescore(site=False, minimize=True)
if len(confs) > 0:
# Use the center of mass for configurations
# within 20 RT of the lowest energy
cutoffE = Es['total'][-1] + 20 * (R * self.T)
coms = []
for (conf, E) in reversed(zip(confs, Es['total'])):
if E <= cutoffE:
self.top.universe.setConfiguration(
Configuration(self.top.universe, conf))
coms.append(np.array(self.top.universe.centerOfMass()))
else:
break
print ' %d configurations fit in the binding site' % len(coms)
coms = np.array(coms)
center = (np.min(coms, 0) + np.max(coms, 0)) / 2
max_R = max(
np.ceil(np.max(np.sqrt(np.sum(
(coms - center)**2, 1))) * 10.) / 10., 0.6)
self.args.params['CD']['site_max_R'] = max_R
self.args.params['CD']['site_center'] = center
self.top.universe.setConfiguration(
Configuration(self.top.universe, confs[-1]))
if ((self.args.params['CD']['site_max_R'] is None) or \
(self.args.params['CD']['site_center'] is None)):
raise Exception('No binding site parameters!')
else:
self.args.params['CD']['site_measured'] = \
(self.args.params['CD']['site_max_R'], \
self.args.params['CD']['site_center'])
# Read the reference ligand and receptor coordinates
import AlGDock.IO
IO_crd = AlGDock.IO.crd()
if self.args.FNs['inpcrd']['R'] is not None:
if os.path.isfile(self.args.FNs['inpcrd']['L']):
lig_crd = IO_crd.read(self.args.FNs['inpcrd']['L'], multiplier=0.1)
self.data['CD'].confs['receptor'] = IO_crd.read(\
self.args.FNs['inpcrd']['R'], multiplier=0.1)
elif self.args.FNs['inpcrd']['RL'] is not None:
complex_crd = IO_crd.read(self.args.FNs['inpcrd']['RL'], multiplier=0.1)
lig_crd = complex_crd[self.top_RL.L_first_atom:self.top_RL.L_first_atom + \
self.top.universe.numberOfAtoms(),:]
self.data['CD'].confs['receptor'] = np.vstack(\
(complex_crd[:self.top_RL.L_first_atom,:],\
complex_crd[self.top_RL.L_first_atom + self.top.universe.numberOfAtoms():,:]))
elif self.args.FNs['inpcrd']['L'] is not None:
self.data['CD'].confs['receptor'] = None
if os.path.isfile(self.args.FNs['inpcrd']['L']):
lig_crd = IO_crd.read(self.args.FNs['inpcrd']['L'], multiplier=0.1)
else:
lig_crd = None
if lig_crd is not None:
self.data['CD'].confs['ligand'] = lig_crd[self.top.
inv_prmtop_atom_order_L, :]
self.top.universe.setConfiguration(\
Configuration(self.top.universe,self.data['CD'].confs['ligand']))
if self.top_RL.universe is not None:
self.top_RL.universe.setConfiguration(\
Configuration(self.top_RL.universe, \
np.vstack((self.data['CD'].confs['receptor'],self.data['CD'].confs['ligand']))))
if self.args.params['CD']['rmsd'] is not False:
if self.args.params['CD']['rmsd'] is True:
if lig_crd is not None:
rmsd_crd = lig_crd[self.top.inv_prmtop_atom_order_L, :]
else:
raise Exception('Reference structure for rmsd calculations unknown')
else:
rmsd_crd = IO_crd.read(self.args.params['CD']['rmsd'], \
natoms=self.top.universe.numberOfAtoms(), multiplier=0.1)
rmsd_crd = rmsd_crd[self.top.inv_prmtop_atom_order_L, :]
self.data['CD'].confs['rmsd'] = rmsd_crd
self.get_rmsds.set_ref_configuration(self.data['CD'].confs['rmsd'])
# If configurations are being rescored, start with a docked structure
(confs, Es) = self._get_confs_to_rescore(site=False, minimize=False)
if len(confs) > 0:
self.top.universe.setConfiguration(
Configuration(self.top.universe, confs[-1]))
from AlGDock.simulation_iterator import SimulationIterator
self.iterator = SimulationIterator(self.args, self.top, self.system)
# Load progress
from AlGDock.postprocessing import Postprocessing
Postprocessing(self.args, self.log, self.top, self.top_RL, self.system, self.data, self.save).run(readOnly=True)
self.calc_f_L(readOnly=True)
self.calc_f_RL(readOnly=True)
if self.args.random_seed > 0:
np.random.seed(self.args.random_seed)
def run(self, run_type):
from AlGDock.postprocessing import Postprocessing
self.log.recordStart('run')
self.log.run_type = run_type
if run_type=='configuration_energies' or \
run_type=='minimized_configuration_energies':
self.configuration_energies(\
minimize = (run_type=='minimized_configuration_energies'), \
max_confs = 50)
elif run_type == 'store_params':
self.save('BC', keys=['progress'])
self.save('CD', keys=['progress'])
elif run_type == 'initial_BC':
self.initial_BC()
elif run_type == 'BC': # Sample the BC process
self.sim_process('BC')
Postprocessing(self.args, self.log, self.top, self.top_RL, self.system, self.data, self.save).run([('BC', -1, -1, 'L')])
self.calc_f_L()
elif run_type == 'initial_CD':
self.initial_CD()
elif run_type == 'CD': # Sample the CD process
self.sim_process('CD')
Postprocessing(self.args, self.log, self.top, self.top_RL, self.system, self.data, self.save).run()
self.calc_f_RL()
# self.targeted_FEP()
elif run_type == 'timed': # Timed replica exchange sampling
BC_complete = self.sim_process('BC')
if BC_complete:
pp_complete = Postprocessing(self.args, self.log, self.top, self.top_RL, self.system, self.data, self.save).run([('BC', -1, -1, 'L')])
if pp_complete:
self.calc_f_L()
CD_complete = self.sim_process('CD')
if CD_complete:
pp_complete = Postprocessing(self.args, self.log, self.top, self.top_RL, self.system, self.data, self.save).run()
if pp_complete:
self.calc_f_RL()
# self.targeted_FEP()
elif run_type == 'timed_BC': # Timed BC only
BC_complete = self.sim_process('BC')
if BC_complete:
pp_complete = Postprocessing(self.args, self.log, self.top, self.top_RL, self.system, self.data, self.save).run([('BC', -1, -1, 'L')])
if pp_complete:
self.calc_f_L()
elif run_type == 'timed_CD': # Timed CD only
CD_complete = self.sim_process('CD')
if CD_complete:
pp_complete = Postprocessing(self.args, self.log, self.top, self.top_RL, self.system, self.data, self.save).run()
if pp_complete:
self.calc_f_RL()
# self.targeted_FEP()
elif run_type == 'postprocess': # Postprocessing
Postprocessing(self.args, self.log, self.top, self.top_RL, self.system, self.data, self.save).run()
elif run_type == 'redo_postprocess':
Postprocessing(self.args, self.log, self.top, self.top_RL, self.system, self.data, self.save).run(redo_CD=True)
elif run_type == 'redo_pose_prediction':
self.calc_f_RL(readOnly=True)
# Predict native pose
if self.args.params['CD']['pose'] == -1:
(self.stats_RL['pose_inds'], self.stats_RL['scores']) = \
self._get_pose_prediction()
f_RL_FN = os.path.join(self.args.dir['CD'], 'f_RL.pkl.gz')
self.log.tee(
write_pkl_gz(f_RL_FN, (self.f_L, self.stats_RL, self.f_RL, self.B)))
# self.targeted_FEP()
elif (run_type == 'free_energies') or (run_type == 'redo_free_energies'):
self.calc_f_L(redo=(run_type == 'redo_free_energies'))
self.calc_f_RL(redo=(run_type == 'redo_free_energies'))
# self.targeted_FEP()
elif run_type == 'all':
self.sim_process('BC')
Postprocessing(self.args, self.log, self.top, self.top_RL, self.system, self.data, self.save).run([('BC', -1, -1, 'L')])
self.calc_f_L()
self.sim_process('CD')
Postprocessing(self.args, self.log, self.top, self.top_RL, self.system, self.data, self.save).run()
self.calc_f_RL()
# self.targeted_FEP()
elif run_type == 'render_docked':
# For 4 figures
# 1002*4/600. = 6.68 in at 600 dpi
# 996*4/600. = 6.64 in at 600 dpi
view_args = {'axes_off':True, 'size':[996,996], 'scale_by':0.80, \
'render':'TachyonInternal'}
if hasattr(self, '_view_args_rotate_matrix'):
view_args['rotate_matrix'] = getattr(self, '_view_args_rotate_matrix')
self.show_samples(prefix='docked', \
show_ref_ligand=True, show_starting_pose=True, \
show_receptor=True, save_image=True, execute=True, quit=True, \
view_args=view_args)
if self.args.params['CD']['pose'] == -1:
(self.stats_RL['pose_inds'], self.stats_RL['scores']) = \
self._get_pose_prediction()
self.show_pose_prediction(score='grid_fe_u',
show_ref_ligand=True, show_starting_pose=False, \
show_receptor=True, save_image=True, execute=True, quit=True, \
view_args=view_args)
self.show_pose_prediction(score='OpenMM_OBC2_fe_u',
show_ref_ligand=True, show_starting_pose=False, \
show_receptor=True, save_image=True, execute=True, quit=True, \
view_args=view_args)
elif run_type == 'render_intermediates':
view_args = {'axes_off':True, 'size':[996,996], 'scale_by':0.80, \
'render':'TachyonInternal'}
if hasattr(self, '_view_args_rotate_matrix'):
view_args['rotate_matrix'] = getattr(self, '_view_args_rotate_matrix')
# self.render_intermediates(\
# movie_name=os.path.join(self.args.dir['CD'],'CD-intermediates.gif'), \
# view_args=view_args)
self.render_intermediates(nframes=8, view_args=view_args)
elif run_type == 'clear_intermediates':
for process in ['BC', 'CD']:
print 'Clearing intermediates for ' + process
for state_ind in range(1,
len(self.data[process].confs['samples']) - 1):
for cycle_ind in range(
len(self.data[process].confs['samples'][state_ind])):
self.data[process].confs['samples'][state_ind][cycle_ind] = []
self.save(process)
if run_type is not None:
print "\nElapsed time for execution of %s: %s" % (
run_type, HMStime(self.log.timeSince('run')))
###########
# BC #
###########
def initial_BC(self):
"""
Warms the ligand from self.T_TARGET to self.T_HIGH
Intermediate thermodynamic states are chosen such that
thermodynamic length intervals are approximately constant.
Configurations from each state are subsampled to seed the next simulation.
"""
if (len(self.data['BC'].protocol) >
0) and (self.data['BC'].protocol[-1]['crossed']):
return # Initial BC is already complete
self.log.recordStart('BC')
from AlGDock.ligand_preparation import LigandPreparation
seeds = LigandPreparation(self.args, self.log, self.top, self.system,
self._get_confs_to_rescore, self.iterator,
self.data).run('BC')
from AlGDock.initialization import Initialization
Initialization(self.args, self.log, self.top, self.system,
self.iterator, self.data, self.save, self._u_kln).run('BC', seeds)
return True
def calc_f_L(self, readOnly=False, do_solvation=True, redo=False):
"""
Calculates ligand-specific free energies:
1. reduced free energy of BC the ligand
from self.T_HIGH to self.T_TARGET
2. solvation free energy of the ligand using single-step
free energy perturbation
redo does not do anything now; it is an option for debugging
"""
# Initialize variables as empty lists or by loading data
f_L_FN = os.path.join(self.args.dir['BC'], 'f_L.pkl.gz')
dat = load_pkl_gz(f_L_FN)
if dat is not None:
(self.stats_L, self.f_L) = dat
else:
self.stats_L = dict(\
[(item,[]) for item in ['equilibrated_cycle','mean_acc']])
self.stats_L['protocol'] = self.data['BC'].protocol
self.f_L = dict([(key,[]) for key in ['BC_MBAR'] + \
[phase+'_solv' for phase in self.args.params['BC']['phases']]])
if readOnly or self.data['BC'].protocol == []:
return
K = len(self.data['BC'].protocol)
# Make sure all the energies are available
for c in range(self.data['BC'].cycle):
if len(self.data['BC'].Es[-1][c].keys()) == 0:
self.log.tee(" skipping the BC free energy calculation")
return
start_string = "\n>>> Ligand free energy calculations, starting at " + \
time.strftime("%a, %d %b %Y %H:%M:%S", time.localtime()) + "\n"
self.log.recordStart('free energy')
# Store stats_L internal energies
self.stats_L['u_K_sampled'] = \
[self._u_kln([self.data['BC'].Es[-1][c]],[self.data['BC'].protocol[-1]]) \
for c in range(self.data['BC'].cycle)]
self.stats_L['u_KK'] = \
[np.sum([self._u_kln([self.data['BC'].Es[k][c]],[self.data['BC'].protocol[k]]) \
for k in range(len(self.data['BC'].protocol))],0) \
for c in range(self.data['BC'].cycle)]
self.stats_L['equilibrated_cycle'] = self._get_equilibrated_cycle('BC')
# Calculate BC free energies that have not already been calculated,
# in units of RT
updated = False
for c in range(len(self.f_L['BC_MBAR']), self.data['BC'].cycle):
if not updated:
self.log.set_lock('BC')
if do_solvation:
self.log.tee(start_string)
updated = True
fromCycle = self.stats_L['equilibrated_cycle'][c]
toCycle = c + 1
# BC free energy
BC_Es = []
for BC_Es_state in self.data['BC'].Es:
BC_Es.append(BC_Es_state[fromCycle:toCycle])
(u_kln, N_k) = self._u_kln(BC_Es, self.data['BC'].protocol)
MBAR = self.run_MBAR(u_kln, N_k)[0]
self.f_L['BC_MBAR'].append(MBAR)
# Average acceptance probabilities
BC_mean_acc = np.zeros(K - 1)
for k in range(0, K - 1):
(u_kln, N_k) = self._u_kln(BC_Es[k:k + 2],
self.data['BC'].protocol[k:k + 2])
N = min(N_k)
acc = np.exp(-u_kln[0, 1, :N] - u_kln[1, 0, :N] + u_kln[0, 0, :N] +
u_kln[1, 1, :N])
BC_mean_acc[k] = np.mean(np.minimum(acc, np.ones(acc.shape)))
self.stats_L['mean_acc'].append(BC_mean_acc)
self.log.tee(" calculated BC free energy of %.2f RT "%(\
self.f_L['BC_MBAR'][-1][-1])+\
"using cycles %d to %d"%(fromCycle, c))
if not do_solvation:
if updated:
if not self.log.run_type.startswith('timed'):
write_pkl_gz(f_L_FN, (self.stats_L, self.f_L))
self.log.clear_lock('BC')
return True
# Make sure postprocessing is complete
from AlGDock.postprocessing import Postprocessing
pp_complete = Postprocessing(self.args, self.log, self.top, self.top_RL, self.system, self.data, self.save).run([('BC', -1, -1, 'L')])
if not pp_complete:
return False
# Store stats_L internal energies
for phase in self.args.params['BC']['phases']:
self.stats_L['u_K_'+phase] = \
[self.data['BC'].Es[-1][c]['L'+phase][:,-1]/(R*self.T_TARGET) \
for c in range(self.data['BC'].cycle)]
# Calculate solvation free energies that have not already been calculated,
# in units of RT
for phase in self.args.params['BC']['phases']:
if not phase + '_solv' in self.f_L:
self.f_L[phase + '_solv'] = []
if not 'mean_' + phase in self.f_L:
self.f_L['mean_' + phase] = []
for c in range(len(self.f_L[phase + '_solv']), self.data['BC'].cycle):
if not updated:
self.log.set_lock('BC')
self.log.tee(start_string)
updated = True
fromCycle = self.stats_L['equilibrated_cycle'][c]
toCycle = c + 1
if not ('L' + phase) in self.data['BC'].Es[-1][c].keys():
raise Exception('L%s energies not found in cycle %d' % (phase, c))
# Arbitrarily, solvation is the
# 'forward' direction and desolvation the 'reverse'
u_L = np.concatenate([self.data['BC'].Es[-1][n]['L'+phase] \
for n in range(fromCycle,toCycle)])/(R*self.T_TARGET)
u_sampled = np.concatenate(\
[self._u_kln([self.data['BC'].Es[-1][c]],[self.data['BC'].protocol[-1]]) \
for c in range(fromCycle,toCycle)])
du_F = (u_L[:, -1] - u_sampled)
min_du_F = min(du_F)
w_L = np.exp(-du_F + min_du_F)
f_L_solv = -np.log(np.mean(w_L)) + min_du_F
mean_u_phase = np.sum(u_L[:, -1] * w_L) / np.sum(w_L)
self.f_L[phase + '_solv'].append(f_L_solv)
self.f_L['mean_' + phase].append(mean_u_phase)
self.log.tee(" calculated " + phase + " solvation free energy of " + \
"%.5g RT "%(f_L_solv) + \
"using cycles %d to %d"%(fromCycle, toCycle-1))
if updated:
self.log.tee(write_pkl_gz(f_L_FN, (self.stats_L, self.f_L)))
self.log.tee("\nElapsed time for free energy calculation: " + \
HMStime(self.log.timeSince('free energy')))
self.log.clear_lock('BC')
return True
###########
# Docking #
###########
def initial_CD(self, randomOnly=False):
"""
Docks the ligand into the receptor
Intermediate thermodynamic states are chosen such that
thermodynamic length intervals are approximately constant.
Configurations from each state are subsampled to seed the next simulation.
"""
if (len(self.data['CD'].protocol) >
0) and (self.data['CD'].protocol[-1]['crossed']):
return # Initial CD already complete
from AlGDock.ligand_preparation import LigandPreparation
seeds = LigandPreparation(self.args, self.log, self.top, self.system,
self._get_confs_to_rescore, self.iterator,
self.data).run('CD')
from AlGDock.initialization import Initialization
Initialization(self.args, self.log, self.top, self.system,
self.iterator, self.data, self.save, self._u_kln).run('CD', seeds)
return True
def calc_f_RL(self, readOnly=False, do_solvation=True, redo=False):
"""
Calculates the binding potential of mean force
redo recalculates f_RL and B except grid_MBAR
"""
if self.data['CD'].protocol == []:
return # Initial CD is incomplete
# Initialize variables as empty lists or by loading data
if self.args.params['CD']['pose'] == -1:
f_RL_FN = os.path.join(self.args.dir['CD'], 'f_RL.pkl.gz')
else:
f_RL_FN = os.path.join(self.args.dir['CD'], \
'f_RL_pose%03d.pkl.gz'%self.args.params['CD']['pose'])
dat = load_pkl_gz(f_RL_FN)
if (dat is not None):
(self.f_L, self.stats_RL, self.f_RL, self.B) = dat
else:
self._clear_f_RL()
if readOnly:
return True
if redo:
for key in self.f_RL.keys():
if key != 'grid_MBAR':
self.f_RL[key] = []
self.B = {'MMTK_MBAR': []}
for phase in self.args.params['CD']['phases']:
for method in ['min_Psi', 'mean_Psi', 'EXP', 'MBAR']:
self.B[phase + '_' + method] = []
# Make sure all the energies are available
for c in range(self.data['CD'].cycle):
if len(self.data['CD'].Es[-1][c].keys()) == 0:
self.log.tee(" skipping the binding PMF calculation")
return
if not hasattr(self, 'f_L'):
self.log.tee(" skipping the binding PMF calculation")
return
start_string = "\n>>> Complex free energy calculations, starting at " + \
time.strftime("%a, %d %b %Y %H:%M:%S", time.localtime()) + "\n"
self.log.recordStart('BPMF')
updated = False
def set_updated_to_True(updated, start_string, quiet=False):
if (updated is False):
self.log.set_lock('CD')
if not quiet:
self.log.tee(start_string)
return True
K = len(self.data['CD'].protocol)
# Store stats_RL
# Internal energies
self.stats_RL['u_K_sampled'] = \
[self._u_kln([self.data['CD'].Es[-1][c]],[self.data['CD'].protocol[-1]]) \
for c in range(self.data['CD'].cycle)]
self.stats_RL['u_KK'] = \
[np.sum([self._u_kln([self.data['CD'].Es[k][c]],[self.data['CD'].protocol[k]]) \
for k in range(len(self.data['CD'].protocol))],0) \
for c in range(self.data['CD'].cycle)]
# Interaction energies
for c in range(len(self.stats_RL['Psi_grid']), self.data['CD'].cycle):
self.stats_RL['Psi_grid'].append(
(self.data['CD'].Es[-1][c]['LJr'] + \
self.data['CD'].Es[-1][c]['LJa'] + \
self.data['CD'].Es[-1][c]['ELE'])/(R*self.T_TARGET))
updated = set_updated_to_True(updated,
start_string,
quiet=not do_solvation)
# Estimate cycle at which simulation has equilibrated
eqc_o = self.stats_RL['equilibrated_cycle']
self.stats_RL['equilibrated_cycle'] = self._get_equilibrated_cycle('CD')
if self.stats_RL['equilibrated_cycle'] != eqc_o:
updated = set_updated_to_True(updated,
start_string,
quiet=not do_solvation)
# Store rmsd values
if (self.args.params['CD']['rmsd'] is not False):
k = len(self.data['CD'].protocol) - 1
for c in range(self.data['CD'].cycle):
if not 'rmsd' in self.data['CD'].Es[k][c].keys():
confs = [conf for conf in self.data['CD'].confs['samples'][k][c]]
self.data['CD'].Es[k][c]['rmsd'] = self.get_rmsds(confs)
self.stats_RL['rmsd'] = [(np.hstack([self.data['CD'].Es[k][c]['rmsd']
if 'rmsd' in self.data['CD'].Es[k][c].keys() else [] \
for c in range(self.stats_RL['equilibrated_cycle'][-1], \
self.data['CD'].cycle)])) \
for k in range(len(self.data['CD'].protocol))]
# Calculate CD free energies that have not already been calculated
while len(self.f_RL['grid_MBAR']) < self.data['CD'].cycle:
self.f_RL['grid_MBAR'].append([])
while len(self.stats_RL['mean_acc']) < self.data['CD'].cycle:
self.stats_RL['mean_acc'].append([])
for c in range(self.data['CD'].cycle):
# If solvation free energies are not being calculated,
# only calculate the grid free energy for the current cycle
if (not do_solvation) and c < (self.data['CD'].cycle - 1):
continue
if self.f_RL['grid_MBAR'][c] != []:
continue
fromCycle = self.stats_RL['equilibrated_cycle'][c]
extractCycles = range(fromCycle, c + 1)
# Extract relevant energies
CD_Es = [Es[fromCycle:c+1] \
for Es in self.data['CD'].Es]
# Use MBAR for the grid scaling free energy estimate
(u_kln, N_k) = self._u_kln(CD_Es, self.data['CD'].protocol)
MBAR = self.run_MBAR(u_kln, N_k)[0]
self.f_RL['grid_MBAR'][c] = MBAR
updated = set_updated_to_True(updated,
start_string,
quiet=not do_solvation)
self.log.tee(" calculated grid scaling free energy of %.2f RT "%(\
self.f_RL['grid_MBAR'][c][-1])+\
"using cycles %d to %d"%(fromCycle, c))
# Average acceptance probabilities
mean_acc = np.zeros(K - 1)
for k in range(0, K - 1):
(u_kln, N_k) = self._u_kln(CD_Es[k:k + 2],
self.data['CD'].protocol[k:k + 2])
N = min(N_k)
acc = np.exp(-u_kln[0, 1, :N] - u_kln[1, 0, :N] + u_kln[0, 0, :N] +
u_kln[1, 1, :N])
mean_acc[k] = np.mean(np.minimum(acc, np.ones(acc.shape)))
self.stats_RL['mean_acc'][c] = mean_acc
if not do_solvation:
if updated:
if not self.log.run_type.startswith('timed'):
self.log.tee(write_pkl_gz(f_RL_FN, \
(self.f_L, self.stats_RL, self.f_RL, self.B)))
self.log.clear_lock('CD')
return True
# Make sure postprocessing is complete
from AlGDock.postprocessing import Postprocessing
pp_complete = Postprocessing(self.args, self.log, self.top, self.top_RL, self.system, self.data, self.save).run()
if not pp_complete:
return False
self.calc_f_L()
# Make sure all the phase energies are available
for c in range(self.data['CD'].cycle):
for phase in self.args.params['CD']['phases']:
for prefix in ['L', 'RL']:
if not prefix + phase in self.data['CD'].Es[-1][c].keys():
self.log.tee(" postprocessed energies for %s unavailable" % phase)
return
# Store stats_RL internal energies for phases
for phase in self.args.params['CD']['phases']:
self.stats_RL['u_K_'+phase] = \
[self.data['CD'].Es[-1][c]['RL'+phase][:,-1]/(R*self.T_TARGET) \
for c in range(self.data['CD'].cycle)]
# Interaction energies
for phase in self.args.params['CD']['phases']:
if (not 'Psi_' + phase in self.stats_RL):
self.stats_RL['Psi_' + phase] = []
for c in range(len(self.stats_RL['Psi_' + phase]),
self.data['CD'].cycle):
self.stats_RL['Psi_'+phase].append(
(self.data['CD'].Es[-1][c]['RL'+phase][:,-1] - \
self.data['CD'].Es[-1][c]['L'+phase][:,-1] - \
self.args.original_Es[0][0]['R'+phase][:,-1])/(R*self.T_TARGET))
# Predict native pose
if self.args.params['CD']['pose'] == -1:
(self.stats_RL['pose_inds'], self.stats_RL['scores']) = \
self._get_pose_prediction()
# BPMF assuming receptor and complex solvation cancel
self.B['MMTK_MBAR'] = [-self.f_L['BC_MBAR'][-1][-1] + \
self.f_RL['grid_MBAR'][c][-1] for c in range(len(self.f_RL['grid_MBAR']))]
# BPMFs
for phase in self.args.params['CD']['phases']:
for key in [phase + '_solv']:
if not key in self.f_RL:
self.f_RL[key] = []
for method in ['min_Psi', 'mean_Psi', 'EXP', 'MBAR']:
if not phase + '_' + method in self.B:
self.B[phase + '_' + method] = []
# Receptor solvation
f_R_solv = self.args.original_Es[0][0]['R' + phase][:, -1] / (
R * self.T_TARGET)
for c in range(len(self.B[phase + '_MBAR']), self.data['CD'].cycle):
updated = set_updated_to_True(updated, start_string)
extractCycles = range(self.stats_RL['equilibrated_cycle'][c], c + 1)
# From the full grid to the fully bound complex in phase
u_RL = np.concatenate([\
self.data['CD'].Es[-1][c]['RL'+phase][:,-1]/(R*self.T_TARGET) \
for c in extractCycles])
u_sampled = np.concatenate([\
self.stats_RL['u_K_sampled'][c] for c in extractCycles])
du = u_RL - u_sampled
min_du = min(du)
weights = np.exp(-du + min_du)
# Filter outliers
if self.args.params['CD']['pose'] > -1:
toKeep = du > (np.mean(du) - 3 * np.std(du))
du = du[toKeep]
weights[~toKeep] = 0.
weights = weights / sum(weights)
# Exponential average
f_RL_solv = -np.log(np.exp(-du + min_du).mean()) + min_du - f_R_solv
# Interaction energies
Psi = np.concatenate([self.stats_RL['Psi_'+phase][c] \
for c in extractCycles])
min_Psi = min(Psi)
max_Psi = max(Psi)
# Complex solvation
self.f_RL[phase + '_solv'].append(f_RL_solv)
# Various BPMF estimates
self.B[phase + '_min_Psi'].append(min_Psi)
self.B[phase + '_mean_Psi'].append(np.sum(weights * Psi))
self.B[phase+'_EXP'].append(\
np.log(sum(weights*np.exp(Psi-max_Psi))) + max_Psi)
self.B[phase+'_MBAR'].append(\
- self.f_L[phase+'_solv'][-1] - self.f_L['BC_MBAR'][-1][-1] \
+ self.f_RL['grid_MBAR'][-1][-1] + f_RL_solv)
self.log.tee(" calculated %s binding PMF of %.5g RT with cycles %d to %d"%(\
phase, self.B[phase+'_MBAR'][-1], \
self.stats_RL['equilibrated_cycle'][c], c))
if updated:
self.log.tee(
write_pkl_gz(f_RL_FN, (self.f_L, self.stats_RL, self.f_RL, self.B)))
self.log.tee("\nElapsed time for binding PMF estimation: " + \
HMStime(self.log.timeSince('BPMF')))
self.log.clear_lock('CD')
def _store_infinite_f_RL(self):
if self.args.params['CD']['pose'] == -1:
f_RL_FN = os.path.join(self.args.dir['CD'], 'f_RL.pkl.gz')
else:
f_RL_FN = os.path.join(self.args.dir['CD'],\
'f_RL_pose%03d.pkl.gz'%self.args.params['CD']['pose'])
self.log.tee(write_pkl_gz(f_RL_FN, (self.f_L, [], np.inf, np.inf)))
def _get_equilibrated_cycle(self, process):
# Get previous results, if any
if process == 'BC':
if hasattr(self,'stats_L') and \
('equilibrated_cycle' in self.stats_L.keys()) and \
self.stats_L['equilibrated_cycle']!=[]:
equilibrated_cycle = self.stats_L['equilibrated_cycle']
else:
equilibrated_cycle = [0]
elif process == 'CD':
if hasattr(self,'stats_RL') and \
('equilibrated_cycle' in self.stats_RL.keys()) and \
self.stats_RL['equilibrated_cycle']!=[]:
equilibrated_cycle = self.stats_RL['equilibrated_cycle']
else:
equilibrated_cycle = [0]
# Estimate equilibrated cycle
for last_c in range(len(equilibrated_cycle), \
self.data[process].cycle):
correlation_times = [np.inf] + [\
pymbar.timeseries.integratedAutocorrelationTime(\
np.concatenate([self.data[process].Es[0][c]['mean_energies'] \
for c in range(start_c,len(self.data[process].Es[0])) \
if 'mean_energies' in self.data[process].Es[0][c].keys()])) \
for start_c in range(1,last_c)]
g = 2 * np.array(correlation_times) + 1
nsamples_tot = [n for n in reversed(np.cumsum([len(self.data[process].Es[0][c]['MM']) \
for c in reversed(range(last_c))]))]
nsamples_ind = nsamples_tot / g
equilibrated_cycle_last_c = max(np.argmax(nsamples_ind), 1)
equilibrated_cycle.append(equilibrated_cycle_last_c)
return equilibrated_cycle
def _get_rmsd_matrix(self):
process = 'CD'
equilibrated_cycle = self.stats_RL['equilibrated_cycle'][-1]
# Gather snapshots
for k in range(equilibrated_cycle, self.data[process].cycle):
if not isinstance(self.data[process].confs['samples'][-1][k], list):
self.data[process].confs['samples'][-1][k] = [
self.data[process].confs['samples'][-1][k]
]
import itertools
confs = np.array([conf for conf in itertools.chain.from_iterable(\
[self.data[process].confs['samples'][-1][c] \
for c in range(equilibrated_cycle,self.data[process].cycle)])])
cum_Nk = np.cumsum([0] + [len(self.data['CD'].confs['samples'][-1][c]) \
for c in range(self.data['CD'].cycle)])
nsamples = cum_Nk[-1]
# Obtain a full rmsd matrix
# TODO: Check this
if ('rmsd_matrix' in self.stats_RL.keys()) and \
(len(self.stats_RL['rmsd_matrix'])==(nsamples*(nsamples-1)/2)):
rmsd_matrix = stats_RL['rmsd_matrix']
else:
# Create a new matrix
rmsd_matrix = []
for c in range(len(confs)):
rmsd_matrix.extend(self.get_rmsds(confs[c + 1:], confs[c]))
rmsd_matrix = np.clip(rmsd_matrix, 0., None)
self.stats_RL['rmsd_matrix'] = rmsd_matrix
# TODO: Write code to extend previous matrix
# Extend a previous matrix
# rmsd_matrix = self.stats_RL['rmsd_matrix']
# from scipy.spatial.distance import squareform
# rmsd_matrix_sq = squareform(rmsd_matrix)
#
# for c in range(len(confs)):
# rmsd_matrix.extend(self.get_rmsds(confs[c+1:], confs[c]))
# rmsd_matrix = np.clip(rmsd_matrix, 0., None)
# self.stats_RL['rmsd_matrix'] = rmsd_matrix
return rmsd_matrix
def _cluster_samples(self, rmsd_matrix):
# Clustering
import scipy.cluster
Z = scipy.cluster.hierarchy.linkage(rmsd_matrix, method='complete')
assignments = np.array(\
scipy.cluster.hierarchy.fcluster(Z, 0.1, criterion='distance'))
# Reindexes the assignments in order of appearance
new_index = 0
mapping_to_new_index = {}
for assignment in assignments:
if not assignment in mapping_to_new_index.keys():
mapping_to_new_index[assignment] = new_index
new_index += 1
assignments = [mapping_to_new_index[a] for a in assignments]
return assignments
def _get_pose_prediction(self, representative='medoid'):
process = 'CD'
equilibrated_cycle = self.stats_RL['equilibrated_cycle'][-1]
stats = self.stats_RL
rmsd_matrix = self._get_rmsd_matrix()
assignments = self._cluster_samples(rmsd_matrix)
cum_Nk = np.cumsum([0] + [len(self.data[process].confs['samples'][-1][c]) \
for c in range(equilibrated_cycle,self.data[process].cycle)])
def linear_index_to_pair(ind):
cycle = list(ind < cum_Nk).index(True) - 1
n = ind - cum_Nk[cycle]
return (cycle + equilibrated_cycle, n)
# Select a representative of each cluster
pose_inds = []
scores = {}
if representative == 'medoid':
# based on the medoid
from scipy.spatial.distance import squareform
rmsd_matrix_sq = squareform(rmsd_matrix)
for n in range(max(assignments) + 1):
inds = [i for i in range(len(assignments)) if assignments[i] == n]
rmsd_matrix_n = rmsd_matrix_sq[inds][:, inds]
(cycle,
n) = linear_index_to_pair(inds[np.argmin(np.mean(rmsd_matrix_n, 0))])
pose_inds.append((cycle, n))
else:
if 'Psi_' + representative in stats.keys():
# based on the lowest interaction energy in specified phase
phase = representative
Psi_n = np.concatenate([stats['Psi_'+phase][c] \
for c in range(equilibrated_cycle,self.data[process].cycle)])
for n in range(max(assignments) + 1):
inds = [i for i in range(len(assignments)) if assignments[i] == n]
(cycle, n) = linear_index_to_pair(inds[np.argmin(Psi_n[inds])])
pose_inds.append((cycle, n))
# If relevant, store the rmsd of the representatives
if self.args.params['CD']['rmsd']:
scores['rmsd'] = []
for (cycle, n) in pose_inds:
scores['rmsd'].append(self.data['CD'].Es[-1][cycle]['rmsd'][n])
# Score clusters based on total energy
uo = np.concatenate([stats['u_K_sampled'][c] \
for c in range(equilibrated_cycle,self.data[process].cycle)])
for phase in (['grid'] + self.args.params[process]['phases']):
if phase != 'grid':
un = np.concatenate([stats['u_K_'+phase][c] \
for c in range(equilibrated_cycle,self.data[process].cycle)])
du = un - uo
min_du = min(du)
weights = np.exp(-du + min_du)
else:
un = uo
weights = np.ones(len(assignments))
cluster_counts = np.histogram(assignments, \
bins=np.arange(len(set(assignments))+1)-0.5,
weights=weights)[0]
# by free energy
cluster_fe = -np.log(cluster_counts)
cluster_fe -= np.min(cluster_fe)
scores[phase + '_fe_u'] = cluster_fe
# by minimum and mean energy
scores[phase + '_min_u'] = []
scores[phase + '_mean_u'] = []
for n in range(max(assignments) + 1):
un_n = [un[i] for i in range(len(assignments)) if assignments[i] == n]
scores[phase + '_min_u'].append(np.min(un_n))
scores[phase + '_mean_u'].append(np.mean(un_n))
if process == 'CD':
# Score clusters based on interaction energy
Psi_o = np.concatenate([stats['Psi_grid'][c] \
for c in range(equilibrated_cycle,self.data[process].cycle)])
for phase in (['grid'] + self.args.params[process]['phases']):
if phase != 'grid':
Psi_n = np.concatenate([stats['Psi_'+phase][c] \
for c in range(equilibrated_cycle,self.data[process].cycle)])
dPsi = Psi_n - Psi_o
min_dPsi = min(dPsi)
weights = np.exp(-dPsi + min_dPsi)
else:
Psi_n = Psi_o
weights = np.ones(len(assignments))
cluster_counts = np.histogram(assignments, \
bins=np.arange(len(set(assignments))+1)-0.5,
weights=weights)[0]
# by free energy
cluster_fe = -np.log(cluster_counts)
cluster_fe -= np.min(cluster_fe)
scores[phase + '_fe_Psi'] = cluster_fe
# by minimum and mean energy
scores[phase + '_min_Psi'] = []
scores[phase + '_mean_Psi'] = []
for n in range(max(assignments) + 1):
Psi_n_n = [
Psi_n[i] for i in range(len(assignments)) if assignments[i] == n
]
scores[phase + '_min_Psi'].append(np.min(Psi_n_n))
scores[phase + '_mean_Psi'].append(np.mean(Psi_n_n))
for key in scores.keys():
scores[key] = np.array(scores[key])
return (pose_inds, scores)
def configuration_energies(self, minimize=False, max_confs=None):
"""
Calculates the energy for configurations from self.args.FNs['score']
"""
# Determine the name of the file
prefix = 'xtal' if self.args.FNs['score']=='default' else \
os.path.basename(self.args.FNs['score']).split('.')[0]
if minimize:
prefix = 'min_' + prefix
energyFN = os.path.join(self.args.dir['CD'], prefix + '.pkl.gz')
# Set the force field to fully interacting
params_full = self.system.paramsFromAlpha(1.0, 'CD')
self.system.setParams(params_full)
# Load the configurations
if os.path.isfile(energyFN):
(confs, Es) = load_pkl_gz(energyFN)
else:
(confs, Es) = self._get_confs_to_rescore(site=False, \
minimize=minimize, sort=False)
self.log.set_lock('CD')
self.log.tee("\n>>> Calculating energies for %d configurations, "%len(confs) + \
"starting at " + \
time.strftime("%a, %d %b %Y %H:%M:%S", time.localtime()) + "\n")
self.log.recordStart('configuration_energies')
updated = False
# Calculate MM and OBC energies
if not 'MM' in Es.keys():
Es = self.system.energyTerms(confs, Es)
solvation_o = self.args.params['CD']['solvation']
self.args.params['CD']['solvation'] = 'Full'
if self.system.isForce('OBC'):
del self._forceFields['OBC']
self.system.clear_evaluators()
self.system.setParams(params_full)
Es = self.system.energyTerms(confs, Es)
self.args.params['CD']['solvation'] = solvation_o
updated = True
# Direct electrostatic energy
FN = os.path.join(os.path.dirname(self.args.FNs['grids']['ELE']),
'direct_ele.nc')
if not 'direct_ELE' in Es.keys() and os.path.isfile(FN):
key = 'direct_ELE'
Es[key] = np.zeros(len(confs))
from AlGDock.ForceFields.Grid.Interpolation import InterpolationForceField
FF = InterpolationForceField(FN, \
scaling_property='scaling_factor_electrostatic')
self.top.universe.setForceField(FF)
for c in range(len(confs)):
self.top.universe.setConfiguration(
Configuration(self.top.universe, confs[c]))
Es[key][c] = self.top.universe.energy()
updated = True
# Calculate symmetry-corrected RMSD
if not 'rmsd' in Es.keys() and (self.args.params['CD']['rmsd'] is
not False):
Es['rmsd'] = self.get_rmsds(confs)
updated = True
if updated:
self.log.tee("\nElapsed time for ligand MM, OBC, and grid energies: " + \
HMStime(self.log.timeSince('configuration_energies')), \
process='CD')
self.log.clear_lock('CD')
# Reduce the number of conformations
if max_confs is not None:
confs = confs[:max_confs]
# Implicit solvent energies
self.data['CD'].confs['starting_poses'] = None
from AlGDock.postprocessing import Postprocessing
pp_complete = Postprocessing(self.args, self.log, self.top, self.top_RL, self.system, self.data, self.save).run([('original', 0, 0, 'R')])
for phase in self.args.params['CD']['phases']:
if not 'R' + phase in Es.keys():
Es['R' + phase] = self.args.params['CD']['receptor_' + phase]
toClear = []
for phase in self.args.params['CD']['phases']:
for moiety in ['L', 'RL']:
if not moiety + phase in Es.keys():
outputname = os.path.join(self.args.dir['CD'],
'%s.%s%s' % (prefix, moiety, phase))
if phase.startswith('NAMD'):
traj_FN = os.path.join(self.args.dir['CD'],
'%s.%s.dcd' % (prefix, moiety))
self._write_traj(traj_FN, confs, moiety)
elif phase.startswith('sander'):
traj_FN = os.path.join(self.args.dir['CD'],
'%s.%s.mdcrd' % (prefix, moiety))
self._write_traj(traj_FN, confs, moiety)
elif phase.startswith('gbnsr6'):
traj_FN = os.path.join(self.args.dir['CD'], \
'%s.%s%s'%(prefix,moiety,phase),'in.crd')
elif phase.startswith('OpenMM'):
traj_FN = None
elif phase in ['APBS_PBSA']:
traj_FN = os.path.join(self.args.dir['CD'],
'%s.%s.pqr' % (prefix, moiety))
else:
raise Exception('Unknown phase!')
if not traj_FN in toClear:
toClear.append(traj_FN)
for program in ['NAMD', 'sander', 'gbnsr6', 'OpenMM', 'APBS']:
if phase.startswith(program):
# TODO: Mechanism to do partial calculation
Es[moiety+phase] = getattr(self,'_%s_Energy'%program)(confs, \
moiety, phase, traj_FN, outputname, debug=DEBUG)
updated = True
# Get any data added since the calculation started
if os.path.isfile(energyFN):
(confs_o, Es_o) = load_pkl_gz(energyFN)
for key in Es_o.keys():
if key not in Es.keys():
Es[key] = Es_o[key]
# Store the data
self.log.tee(write_pkl_gz(energyFN, (confs, Es)))
break
for FN in toClear:
if (FN is not None) and os.path.isfile(FN):
os.remove(FN)
for key in Es.keys():
Es[key] = np.array(Es[key])
self._combine_MM_and_solvent(Es)
if updated:
self.log.set_lock('CD')
self.log.tee("\nElapsed time for energies: " + \
HMStime(self.log.timeSince('configuration_energies')), \
process='CD')
self.log.clear_lock('CD')
# Get any data added since the calculation started
if os.path.isfile(energyFN):
(confs_o, Es_o) = load_pkl_gz(energyFN)
for key in Es_o.keys():
if key not in Es.keys():
Es[key] = Es_o[key]
# Store the data
self.log.tee(write_pkl_gz(energyFN, (confs, Es)))
return (confs, Es)
######################
# Internal Functions #
######################
def sim_process(self, process):
"""
Simulate and analyze a BC or CD process.
As necessary, first conduct an initial BC or CD
and then run a desired number of replica exchange cycles.
"""
if (self.data[process].protocol==[]) or \
(not self.data[process].protocol[-1]['crossed']):
time_left = getattr(self, 'initial_' + process)()
if not time_left:
return False
# Main loop for replica exchange
if (self.args.params[process]['repX_cycles'] is not None) and \
((self.data[process].cycle < \
self.args.params[process]['repX_cycles'])):
# Load configurations to score from another program
if (process=='CD') and (self.data['CD'].cycle==1) and \
(self.args.params['CD']['pose'] == -1) and \
(self.args.FNs['score'] is not None) and \
(self.args.FNs['score']!='default'):
self.log.set_lock('CD')
self.log.tee("\n>>> Reinitializing replica exchange configurations")
self.system.setParams(self.system.paramsFromAlpha(1.0, 'CD'))
confs = self._get_confs_to_rescore(\
nconfs=len(self.data['CD'].protocol), site=True, minimize=True)[0]
self.log.clear_lock('CD')
if len(confs) > 0:
self.data['CD'].confs['replicas'] = confs
self.log.tee("\n>>> Replica exchange for {0}, starting at {1}\n".format(\
process, time.strftime("%a, %d %b %Y %H:%M:%S", time.localtime())), \
process=process)
self.log.recordStart(process + '_repX_start')
start_cycle = self.data[process].cycle
cycle_times = []
while (self.data[process].cycle <
self.args.params[process]['repX_cycles']):
from AlGDock.replica_exchange import ReplicaExchange
ReplicaExchange(self.args, self.log, self.top, self.system,
self.iterator, self.data, self.save, self._u_kln).run(process)
self.SIRS(process)
cycle_times.append(self.log.timeSince('repX cycle'))
if process == 'CD':
self._insert_CD_state_between_low_acc()
if not self.log.isTimeForTask(cycle_times):
return False
self.log.tee("Elapsed time for %d cycles of replica exchange: %s"%(\
(self.data[process].cycle - start_cycle), \
HMStime(self.log.timeSince(process+'_repX_start'))), \
process=process)
# If there are insufficient configurations,
# do additional replica exchange on the BC process
if (process == 'BC'):
E_MM = []
for k in range(len(self.data['BC'].Es[0])):
E_MM += list(self.data['BC'].Es[0][k]['MM'])
while len(E_MM) < self.args.params['CD']['seeds_per_state']:
self.log.tee(
"More samples from high temperature ligand simulation needed",
process='BC')
from AlGDock.replica_exchange import ReplicaExchange
ReplicaExchange(self.args, self.log, self.top, self.system,
self.iterator, self.data, self.save, self._u_kln).run('BC')
self.SIRS(process)
cycle_times.append(self.log.timeSince('repX cycle'))
if not self.log.isTimeForTask(cycle_times):
return False
E_MM = []
for k in range(len(self.data['BC'].Es[0])):
E_MM += list(self.data['BC'].Es[0][k]['MM'])
# Clear evaluators to save memory
self.system.clear_evaluators()
return True # The process has completed
def SIRS(self, process):
# The code below is only for sampling importance resampling
if not self.args.params[process]['sampling_importance_resampling']:
return
# Calculate appropriate free energy
if process == 'BC':
self.calc_f_L(do_solvation=False)
f_k = self.f_L['BC_MBAR'][-1]
elif process == 'CD':
self.calc_f_RL(do_solvation=False)
f_k = self.f_RL['grid_MBAR'][-1]
# Get weights for sampling importance resampling
# MBAR weights for replica exchange configurations
protocol = self.data[process].protocol
Es_repX = [[copy.deepcopy(self.data[process].Es[k][-1])] for k in range(len(protocol))]
(u_kln, N_k) = self._u_kln(Es_repX, protocol)
# This is a more direct way to get the weights
from pymbar.utils import kln_to_kn
u_kn = kln_to_kn(u_kln, N_k=N_k)
from pymbar.utils import logsumexp
log_denominator_n = logsumexp(f_k - u_kn.T, b=N_k, axis=1)
logW = f_k - u_kn.T - log_denominator_n[:, np.newaxis]
W_nl = np.exp(logW)
for k in range(len(protocol)):
W_nl[:, k] = W_nl[:, k] / np.sum(W_nl[:, k])
# This is for conversion to 2 indicies: state and snapshot
cum_N_state = np.cumsum([0] + list(N_k))
def linear_index_to_snapshot_index(ind):
state_index = list(ind < cum_N_state).index(True) - 1
nis_index = ind - cum_N_state[state_index]
return (state_index, nis_index)
# Selects new replica exchange snapshots
confs_repX = self.data[process].confs['last_repX']
self.data[process].confs['replicas'] = []
for k in range(len(protocol)):
(s,n) = linear_index_to_snapshot_index(\
np.random.choice(range(W_nl.shape[0]), size = 1, p = W_nl[:,k])[0])
self.data[process].confs['replicas'].append(np.copy(confs_repX[s][n]))
def _insert_CD_state(self, alpha, clear=True):
"""
Inserts a new thermodynamic state into the CD protocol.
Samples for previous cycles are added by sampling importance resampling.
Clears grid_MBAR.
"""
# Defines a new thermodynamic state based on the neighboring state
neighbor_ind = [alpha < p['alpha']
for p in self.data['CD'].protocol].index(True) - 1
params_n = self.system.paramsFromAlpha(
alpha, params_o=self.data['CD'].protocol[neighbor_ind])
# For sampling importance resampling,
# prepare an augmented matrix for pymbar calculations
# with a new thermodynamic state
(u_kln_s, N_k) = self._u_kln(self.data['CD'].Es, self.data['CD'].protocol)
(K, L, N) = u_kln_s.shape
u_kln_n = self._u_kln(self.data['CD'].Es, [params_n])[0]
L += 1
N_k = np.append(N_k, [0])
u_kln = np.zeros([K, L, N])
u_kln[:, :-1, :] = u_kln_s
for k in range(K):
u_kln[k, -1, :] = u_kln_n[k, 0, :]
# Determine SIR weights
weights = self.run_MBAR(u_kln, N_k, augmented=True)[1][:, -1]
weights = weights / sum(weights)
# Resampling
# Convert linear indices to 3 indicies: state, cycle, and snapshot
cum_N_state = np.cumsum([0] + list(N_k))
cum_N_cycle = [np.cumsum([0] + [self.data['CD'].Es[k][c]['MM'].shape[0] \
for c in range(len(self.data['CD'].Es[k]))]) for k in range(len(self.data['CD'].Es))]
def linear_index_to_snapshot_index(ind):
state_index = list(ind < cum_N_state).index(True) - 1
nis_index = ind - cum_N_state[state_index]
cycle_index = list(nis_index < cum_N_cycle[state_index]).index(True) - 1
nic_index = nis_index - cum_N_cycle[state_index][cycle_index]
return (state_index, cycle_index, nic_index)
def snapshot_index_to_linear_index(state_index, cycle_index, nic_index):
return cum_N_state[state_index] + cum_N_cycle[state_index][
cycle_index] + nic_index
# Terms to copy
if self.args.params['CD']['pose'] > -1:
# Pose BPMF
terms = ['MM',\
'k_angular_ext','k_spatial_ext','k_angular_int'] + scalables
else:
# BPMF
terms = ['MM', 'site'] + scalables
CD_Es_s = []
confs_s = []
for c in range(len(self.data['CD'].Es[0])):
CD_Es_c = dict([(term, []) for term in terms])
confs_c = []
for n_in_c in range(len(self.data['CD'].Es[-1][c]['MM'])):
if (cum_N_cycle[-1][c] == 0):
(snapshot_s,snapshot_c,snapshot_n) = linear_index_to_snapshot_index(\
np.random.choice(range(len(weights)), size = 1, p = weights)[0])
else:
snapshot_c = np.inf
while (snapshot_c > c):
(snapshot_s,snapshot_c,snapshot_n) = linear_index_to_snapshot_index(\
np.random.choice(range(len(weights)), size = 1, p = weights)[0])
for term in terms:
CD_Es_c[term].append(\
np.copy(self.data['CD'].Es[snapshot_s][snapshot_c][term][snapshot_n]))
if self.args.params['CD']['keep_intermediate']:
# Has not been tested:
confs_c.append(\
np.copy(self.data['CD'].confs['samples'][snapshot_s][snapshot_c]))
for term in terms:
CD_Es_c[term] = np.array(CD_Es_c[term])
CD_Es_s.append(CD_Es_c)
confs_s.append(confs_c)
# Insert resampled values
self.data['CD'].protocol.insert(neighbor_ind + 1, params_n)
self.data['CD'].Es.insert(neighbor_ind + 1, CD_Es_s)
self.data['CD'].confs['samples'].insert(neighbor_ind + 1, confs_s)
self.data['CD'].confs['replicas'].insert(neighbor_ind+1, \
np.copy(self.data['CD'].confs['replicas'][neighbor_ind]))
if clear:
self._clear_f_RL()
def _insert_CD_state_between_low_acc(self):
# Insert thermodynamic states between those with low acceptance probabilities
eq_c = self._get_equilibrated_cycle('CD')[-1]
def calc_mean_acc(k):
CD_Es = [Es[eq_c:self.data['CD'].cycle] for Es in self.data['CD'].Es]
(u_kln,N_k) = self._u_kln(CD_Es[k:k+2],\
self.data['CD'].protocol[k:k+2])
N = min(N_k)
acc = np.exp(-u_kln[0, 1, :N] - u_kln[1, 0, :N] + u_kln[0, 0, :N] +
u_kln[1, 1, :N])
return np.mean(np.minimum(acc, np.ones(acc.shape)))
updated = False
k = 0
while k < len(self.data['CD'].protocol) - 1:
mean_acc = calc_mean_acc(k)
# print k, self.data['CD'].protocol[k]['alpha'], self.data['CD'].protocol[k+1]['alpha'], mean_acc
while mean_acc < 0.4:
if not updated:
updated = True
self.log.set_lock('CD')
alpha_k = self.data['CD'].protocol[k]['alpha']
alpha_kp = self.data['CD'].protocol[k + 1]['alpha']
alpha_n = (alpha_k + alpha_kp) / 2.
report = ' inserted state'
report += ' between %.5g and %.5g at %.5g\n' % (alpha_k, alpha_kp, alpha_n)
report += ' to improve acceptance rate from %.5g ' % mean_acc
self._insert_CD_state(alpha_n, clear=False)
mean_acc = calc_mean_acc(k)
report += 'to %.5g' % mean_acc
# print k, self.data['CD'].protocol[k]['alpha'], self.data['CD'].protocol[k+1]['alpha'], mean_acc
self.log.tee(report)
k += 1
if updated:
self._clear_f_RL()
self.save('CD')
self.log.tee("")
self.log.clear_lock('CD')
def _get_confs_to_rescore(self,
nconfs=None,
site=False,
minimize=True,
sort=True):
"""Returns configurations to rescore and their corresponding energies
Parameters
----------
nconfs : int or None
Number of configurations to keep. If it is smaller than the number
of unique configurations, then the lowest energy configurations will
be kept. If it is larger, then the lowest energy configuration will be
duplicated. If it is None, then all unique configurations will be kept.
site : bool
If True, configurations that are outside of the binding site
will be discarded.
minimize : bool
If True, the configurations will be minimized
sort : bool
If True, configurations and energies will be sorted by DECREASING energy.
Returns
-------
confs : list of np.array
Configurations
energies : list of float
Energies of the configurations
"""
# Get configurations
count = {'xtal': 0, 'dock6': 0, 'initial_CD': 0, 'duplicated': 0}
# based on the score option
if self.args.FNs['score'] == 'default':
confs = [np.copy(self.data['CD'].confs['ligand'])]
count['xtal'] = 1
Es = {}
if nconfs is None:
nconfs = 1
elif (self.args.FNs['score'] is None) or (not os.path.isfile(
self.args.FNs['score'])):
confs = []
Es = {}
elif self.args.FNs['score'].endswith('.mol2') or \
self.args.FNs['score'].endswith('.mol2.gz'):
import AlGDock.IO
IO_dock6_mol2 = AlGDock.IO.dock6_mol2()
(confs, Es) = IO_dock6_mol2.read(self.args.FNs['score'], \
reorder=self.top.inv_prmtop_atom_order_L,
multiplier=0.1) # to convert Angstroms to nanometers
count['dock6'] = len(confs)
elif self.args.FNs['score'].endswith('.mdcrd'):
import AlGDock.IO
IO_crd = AlGDock.IO.crd()
lig_crds = IO_crd.read(self.args.FNs['score'], \
multiplier=0.1) # to convert Angstroms to nanometers
confs = np.array_split(
lig_crds, lig_crds.shape[0] / self.top.universe.numberOfAtoms())
confs = [conf[self.top.inv_prmtop_atom_order_L, :] for conf in confs]
Es = {}
elif self.args.FNs['score'].endswith('.nc'):
from netCDF4 import Dataset
dock6_nc = Dataset(self.args.FNs['score'], 'r')
confs = [
dock6_nc.variables['confs'][n][self.top.inv_prmtop_atom_order_L, :]
for n in range(dock6_nc.variables['confs'].shape[0])
]
Es = dict([(key, dock6_nc.variables[key][:])
for key in dock6_nc.variables.keys() if key != 'confs'])
dock6_nc.close()
count['dock6'] = len(confs)
elif self.args.FNs['score'].endswith('.pkl.gz'):
F = gzip.open(self.args.FNs['score'], 'r')
confs = pickle.load(F)
F.close()
if not isinstance(confs, list):
confs = [confs]
Es = {}
else:
raise Exception('Input configuration format not recognized')
# based on the seeds
# TODO: Use CD seeds for BC
if (self.data['CD'].confs['seeds'] is not None) and \
(self.args.params['CD']['pose']==-1):
confs = confs + self.data['CD'].confs['seeds']
Es = {}
count['initial_CD'] = len(self.data['CD'].confs['seeds'])
if len(confs) == 0:
return ([], {})
if site:
# Filters out configurations not in the binding site
confs_in_site = []
Es_in_site = dict([(label, []) for label in Es.keys()])
old_eval = None
if (None, None, None) in self.top.universe._evaluator.keys():
old_eval = self.top.universe._evaluator[(None, None, None)]
self.system.setParams({'site': True, 'T': self.T_TARGET})
for n in range(len(confs)):
self.top.universe.setConfiguration(
Configuration(self.top.universe, confs[n]))
if self.top.universe.energy() < 1.:
confs_in_site.append(confs[n])
for label in Es.keys():
Es_in_site[label].append(Es[label][n])
if old_eval is not None:
self.top.universe._evaluator[(None, None, None)] = old_eval
confs = confs_in_site
Es = Es_in_site
try:
self.top.universe.energy()
except ValueError:
return (confs, {})
if minimize:
Es = {}
(confs, energies) = self._checkedMinimizer(confs)
else:
# Evaluate energies
energies = []
for conf in confs:
self.top.universe.setConfiguration(
Configuration(self.top.universe, conf))
energies.append(self.top.universe.energy())
if sort and len(confs) > 0:
# Sort configurations by DECREASING energy
energies, confs = (list(l) for l in zip(*sorted(zip(energies, confs), \
key=lambda p:p[0], reverse=True)))
# Shrink or extend configuration and energy array
if nconfs is not None:
confs = confs[-nconfs:]
energies = energies[-nconfs:]
while len(confs) < nconfs:
confs.append(confs[-1])
energies.append(energies[-1])
count['duplicated'] += 1
count['nconfs'] = nconfs
else:
count['nconfs'] = len(confs)
count['minimized'] = {True: ' minimized', False: ''}[minimize]
Es['total'] = np.array(energies)
self.log.tee(
" keeping {nconfs}{minimized} configurations out of\n {xtal} from xtal, {dock6} from dock6, {initial_CD} from initial CD, and {duplicated} duplicated"
.format(**count))
return (confs, Es)
def _checkedMinimizer(self, confs):
"""Minimizes configurations while checking for crashes and overflows
Parameters
----------
confs : list of np.array
Configurations to minimize
Returns
-------
confs : list of np.array
Minimized configurations
energies : list of float
Energies of the minimized configurations
"""
from MMTK.Minimization import SteepestDescentMinimizer # @UnresolvedImport
minimizer = SteepestDescentMinimizer(self.top.universe)
original_stderr = sys.stderr
sys.stderr = NullDevice() # Suppresses warnings for minimization
minimized_confs = []
minimized_energies = []
self.log.recordStart('minimization')
for conf in confs:
self.top.universe.setConfiguration(
Configuration(self.top.universe, conf))
x_o = np.copy(self.top.universe.configuration().array)
e_o = self.top.universe.energy()
for rep in range(50):
minimizer(steps=25)
x_n = np.copy(self.top.universe.configuration().array)
e_n = self.top.universe.energy()
diff = abs(e_o - e_n)
if np.isnan(e_n) or diff < 0.05 or diff > 1000.:
self.top.universe.setConfiguration(
Configuration(self.top.universe, x_o))
break
else:
x_o = x_n
e_o = e_n
if not np.isnan(e_o):
minimized_confs.append(x_o)
minimized_energies.append(e_o)
sys.stderr = original_stderr # Restores error reporting
confs = minimized_confs
energies = minimized_energies
self.log.tee(" minimized %d configurations in "%len(confs) + \
HMStime(self.log.timeSince('minimization')) + \
"\n the first %d energies are:\n "%min(len(confs),10) + \
', '.join(['%.2f'%e for e in energies[:10]]))
return confs, energies
def run_MBAR(self, u_kln, N_k, augmented=False):
"""
Estimates the free energy of a transition using BAR and MBAR
"""
import pymbar
K = len(N_k) - 1 if augmented else len(N_k)
f_k_FEPF = np.zeros(K)
f_k_BAR = np.zeros(K)
W_nl = None
for k in range(K - 1):
w_F = u_kln[k, k + 1, :N_k[k]] - u_kln[k, k, :N_k[k]]
min_w_F = min(w_F)
w_R = u_kln[k + 1, k, :N_k[k + 1]] - u_kln[k + 1, k + 1, :N_k[k + 1]]
min_w_R = min(w_R)
f_k_FEPF[k + 1] = -np.log(np.mean(np.exp(-w_F + min_w_F))) + min_w_F
try:
f_k_BAR[k+1] = pymbar.BAR(w_F, w_R, \
relative_tolerance=1.0E-5, \
verbose=False, \
compute_uncertainty=False)
except:
f_k_BAR[k + 1] = f_k_FEPF[k + 1]
print 'Error with BAR. Using FEP.'
f_k_FEPF = np.cumsum(f_k_FEPF)
f_k_BAR = np.cumsum(f_k_BAR)
try:
if augmented:
f_k_BAR = np.append(f_k_BAR, [0])
f_k_pyMBAR = pymbar.MBAR(u_kln, N_k, \
relative_tolerance=1.0E-5, \
verbose = False, \
initial_f_k = f_k_BAR, \
maximum_iterations = 20)
f_k_MBAR = f_k_pyMBAR.f_k
W_nl = f_k_pyMBAR.getWeights()
except:
print N_k, f_k_BAR
f_k_MBAR = f_k_BAR
print 'Error with MBAR. Using BAR.'
if np.isnan(f_k_MBAR).any():
f_k_MBAR = f_k_BAR
print 'Error with MBAR. Using BAR.'
return (f_k_MBAR, W_nl)
def _u_kln(self, eTs, protocol, noBeta=False):
"""
Computes a reduced potential energy matrix. k is the sampled state. l is the state for which energies are evaluated.
Input:
eT is a
-dictionary (of mapped energy terms) of numpy arrays (over states)
-list (over states) of dictionaries (of mapped energy terms) of numpy arrays (over configurations), or a
-list (over states) of lists (over cycles) of dictionaries (of mapped energy terms) of numpy arrays (over configurations)
protocol is a list of thermodynamic states
noBeta means that the energy will not be divided by RT
Output: u_kln or (u_kln, N_k)
u_kln is the matrix (as a numpy array)
N_k is an array of sample sizes
"""
L = len(protocol)
addMM = ('MM' in protocol[0].keys()) and (protocol[0]['MM'])
addSite = ('site' in protocol[0].keys()) and (protocol[0]['site'])
probe_keys = ['MM','k_angular_ext','k_spatial_ext','k_angular_int'] + \
scalables
probe_key = [key for key in protocol[0].keys() if key in probe_keys][0]
if isinstance(eTs, dict):
# There is one configuration per state
K = len(eTs[probe_key])
N_k = np.ones(K, dtype=int)
u_kln = []
E_base = np.zeros(K)
if addMM:
E_base += eTs['MM']
if addSite:
E_base += eTs['site']
for l in range(L):
E = 1. * E_base
for scalable in scalables:
if scalable in protocol[l].keys():
E += protocol[l][scalable] * eTs[scalable]
for key in ['k_angular_ext', 'k_spatial_ext', 'k_angular_int']:
if key in protocol[l].keys():
E += protocol[l][key] * eTs[key]
if noBeta:
u_kln.append(E)
else:
u_kln.append(E / (R * protocol[l]['T']))
elif isinstance(eTs[0], dict):
K = len(eTs)
N_k = np.array([len(eTs[k][probe_key]) for k in range(K)])
u_kln = np.zeros([K, L, N_k.max()], np.float)
for k in range(K):
E_base = 0.0
if addMM:
E_base += eTs[k]['MM']
if addSite:
E_base += eTs[k]['site']
for l in range(L):
E = 1. * E_base
for scalable in scalables:
if scalable in protocol[l].keys():
E += protocol[l][scalable] * eTs[k][scalable]
for key in ['k_angular_ext', 'k_spatial_ext', 'k_angular_int']:
if key in protocol[l].keys():
E += protocol[l][key] * eTs[k][key]
if noBeta:
u_kln[k, l, :N_k[k]] = E
else:
u_kln[k, l, :N_k[k]] = E / (R * protocol[l]['T'])
elif isinstance(eTs[0], list):
K = len(eTs)
N_k = np.zeros(K, dtype=int)
for k in range(K):
for c in range(len(eTs[k])):
N_k[k] += len(eTs[k][c][probe_key])
u_kln = np.zeros([K, L, N_k.max()], np.float)
for k in range(K):
E_base = 0.0
C = len(eTs[k])
if addMM:
E_base += np.concatenate([eTs[k][c]['MM'] for c in range(C)])
if addSite:
E_base += np.concatenate([eTs[k][c]['site'] for c in range(C)])
for l in range(L):
E = 1. * E_base
for scalable in scalables:
if scalable in protocol[l].keys():
E += protocol[l][scalable]*np.concatenate([eTs[k][c][scalable] \
for c in range(C)])
for key in ['k_angular_ext', 'k_spatial_ext', 'k_angular_int']:
if key in protocol[l].keys():
E += protocol[l][key]*np.concatenate([eTs[k][c][key] \
for c in range(C)])
if noBeta:
u_kln[k, l, :N_k[k]] = E
else:
u_kln[k, l, :N_k[k]] = E / (R * protocol[l]['T'])
if (K == 1) and (L == 1):
return u_kln.ravel()
else:
return (u_kln, N_k)
def _clear_f_RL(self):
# stats_RL will include internal energies, interaction energies,
# the cycle by which the bound state is equilibrated,
# the mean acceptance probability between replica exchange neighbors,
# and the rmsd, if applicable
phase_f_RL_keys = \
[phase+'_solv' for phase in self.args.params['CD']['phases']]
# Initialize variables as empty lists
stats_RL = [('u_K_'+FF,[]) \
for FF in ['ligand','sampled']+self.args.params['CD']['phases']]
stats_RL += [('Psi_'+FF,[]) \
for FF in ['grid']+self.args.params['CD']['phases']]
stats_RL += [(item,[]) \
for item in ['equilibrated_cycle','cum_Nclusters','mean_acc','rmsd']]
self.stats_RL = dict(stats_RL)
self.stats_RL['protocol'] = self.data['CD'].protocol
# Free energy components
self.f_RL = dict([(key,[]) \
for key in ['grid_MBAR'] + phase_f_RL_keys])
# Binding PMF estimates
self.B = {'MMTK_MBAR': []}
for phase in self.args.params['CD']['phases']:
for method in ['min_Psi', 'mean_Psi', 'EXP', 'MBAR']:
self.B[phase + '_' + method] = []
# Store empty list
if self.args.params['CD']['pose'] == -1:
f_RL_FN = os.path.join(self.args.dir['CD'], 'f_RL.pkl.gz')
else:
f_RL_FN = os.path.join(self.args.dir['CD'], \
'f_RL_pose%03d.pkl.gz'%self.args.params['CD']['pose'])
if hasattr(self, 'run_type') and (not self.log.run_type.startswith('timed')):
self.log.tee(
write_pkl_gz(f_RL_FN, (self.f_L, self.stats_RL, self.f_RL, self.B)))
def save(self, p, keys=['progress', 'data']):
"""Saves results
Parameters
----------
p : str
The process, either 'BC' or 'CD'
keys : list of str
Save the progress, the data, or both
"""
if 'progress' in keys:
self.log.tee(self.args.save_pkl_gz(p, self.data[p]))
if 'data' in keys:
self.log.tee(self.data[p].save_pkl_gz())
def __del__(self):
if (not DEBUG) and len(self.args.toClear) > 0:
print "\n>>> Clearing files"
for FN in self.args.toClear:
if os.path.isfile(FN):
os.remove(FN)
print ' removed ' + os.path.relpath(FN, self.args.dir['start'])
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(
description=
'Molecular docking with adaptively scaled alchemical interaction grids')
for key in arguments.keys():
parser.add_argument('--' + key, **arguments[key])
args = parser.parse_args()
if args.run_type in ['render_docked', 'render_intermediates']:
from AlGDock.BindingPMF_plots import BPMF_plots
self = BPMF_plots(**vars(args))
else:
self = BPMF(**vars(args))
|
998,785 | d7b4ef489c689641527cf0de695799c143050d6a | from datetime import datetime
from blocku import Blocku
import players
def main():
stateCount = 0
dataSetSize = 500000
while True:
game = Blocku(players.SmartPlayer(5))
startTime = datetime.now()
results = game.run()
stateCount += results["moves"]
print("Final score = {}".format(results["score"]))
print("Total moves = {}".format(results["moves"]))
print("Start time: {} \nEnd time: {}".format(startTime, datetime.now()))
print("Elapsed time: {}".format(datetime.now() - startTime))
if stateCount>dataSetSize:
return
if __name__ == '__main__':
main()
|
998,786 | 9d79d039f47ffb248b8e5e57aa2a2af637a9b476 | # 914000210
sm.killMobs()
sm.removeSkill(20000016)
sm.giveSkill(20000016)
sm.warp(914000220, 1) |
998,787 | 74ba1929ca8dc8c78f38b2bc90ce47e9d64506ed | # -*- coding: utf-8 -*-
# Author: eNovance developers <dev@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from oslo.config import cfg
from stevedore import driver
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
MINCER_PROVIDERS_NS = 'mincer.providers'
def get(environment):
"""Load the provider."""
kwargs = dict(params=environment.provider_params(), args=CONF)
return driver.DriverManager(
namespace=MINCER_PROVIDERS_NS,
name=environment.provider(),
invoke_on_load=True,
on_load_failure_callback=report_error,
invoke_kwds=kwargs).driver
def report_error(manager, entrypoint, exception):
"""Log an error and raise an exception
This method is called by Stevedore throught the
on_load_failure_callback callback.
:param manager: None, unused
:type manager: None
:param entrypoint: the entrypoint
:type entrypoint: str
:param exception: the raised exception
:type exception: exception
:returns: None
:rtype: None
"""
LOG.error("Error while loading provider %s", entrypoint)
raise exception
|
998,788 | 33ad4402512dee51654a51e13e2e6df120b989fa | import shared_buffer
from shared_buffer import *
import sys
import os
import threading
import logger
from logger import Logger
from defines import *
import socket
import Queue
from datetime import datetime
class basicNetworkServiceLayer(threading.Thread) :
def __init__(self,hostID,logFile,hostID_To_IP) :
threading.Thread.__init__(self)
self.threadCmdLock = threading.Lock()
self.threadCmdQueue = []
self.hostID = hostID
self.IPMap = hostID_To_IP
self.hostIP,self.listenPort = self.IPMap[self.hostID]
self.log = logger.Logger(logFile,"Host " + str(hostID) + " Network Layer Thread")
self.hostIDtoPowerSimID = None
self.powerSimIDtohostID = None
self.attackLayer = None
def setAttackLayer(self,attackLayer):
self.attackLayer = attackLayer
def getAttackLayer(self):
return self.attackLayer
def getcurrCmd(self) :
self.threadCmdLock.acquire()
try :
currCmd = self.threadCmdQueue.pop()
except:
currCmd = None
self.threadCmdLock.release()
return currCmd
def cancelThread(self):
self.threadCmdLock.acquire()
self.threadCmdQueue.append(CMD_QUIT)
self.threadCmdLock.release()
def onRxPktFromNetwork(self,pkt):
self.attackLayer.runOnThread(self.attackLayer.onRxPktFromNetworkLayer,extractPowerSimIdFromPkt(pkt),pkt)
def run(self):
self.log.info("Started listening on IP: " + self.hostIP + " PORT: " + str(self.listenPort) + " at " + str(datetime.now()))
#os.system("taskset -cp " + str(os.getpid()))
sys.stdout.flush()
#assert(self.attackLayer != None)
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # UDP
sock.settimeout(SOCKET_TIMEOUT)
sock.bind((self.hostIP, self.listenPort))
while True :
currCmd = self.getcurrCmd()
if currCmd != None and currCmd == CMD_QUIT :
self.log.info("Stopping at " + str(datetime.now()) )
sys.stdout.flush()
sock.close()
break
try:
data, addr = sock.recvfrom(MAXPKTSIZE)
except socket.timeout:
data = None
if data != None :
self.log.info("%s RECV_FROM=%s:%s PKT=%s"%(datetime.now(), str(addr[0]), str(addr[1]), str(data)))
# self.log.info("<RECV> TO: " + str(self.hostID) + " FROM: " + str(addr) + " PKT: " + str(data))
self.onRxPktFromNetwork(str(data))
|
998,789 | 047140339341fa4c5eecab8dc6d8202117d272aa | from decimal import *
from django.contrib.auth.models import User
from django.shortcuts import render
from .forms import CsvModelForm
from .models import Csv
from cadastros.models import Nota, Cotacao, Proventos
import csv
# Create your views here.
def upload_files_view(request):
form = CsvModelForm(request.POST or None, request.FILES or None)
if form.is_valid():
form.save()
form = CsvModelForm()
obj = Csv.objects.get(activated=False)
with open(obj.file_name.path, 'r') as f:
reader = csv.reader(f)
for i, row in enumerate(reader):
if i==0:
pass
else:
row = "".join(row)
row = row.replace(";"," ")
row = row.split()
ativo = row[1].upper()
Nota.objects.create(
ativo = ativo,
quantidade = int(row[2]),
preco = Decimal(row[3]),
data = row[4],
tipo = row[5],
identificador = row[6],
corretagem= Decimal(row[7]),
emolumentos = Decimal(row[8]),
tx_liquida_CBLC = Decimal(row[9]),
)
obj.activated = True
obj.save()
return render(request,'upload/upload.html',{'form':form})
# Create your views cotacão
def upload_files_view_cotacao(request):
form_cotacao = CsvModelForm(request.POST or None, request.FILES or None)
if form_cotacao.is_valid():
form_cotacao.save()
form_cotacao = CsvModelForm()
obj = Csv.objects.get(activated=False)
with open(obj.file_name.path, 'r') as f:
reader = csv.reader(f)
for i, row in enumerate(reader):
if i==0:
pass
else:
row = "".join(row)
row = row.replace(";"," ")
row = row.split()
acao = row[1].upper()
ativo = row[2].upper()
Cotacao.objects.create(
acao = acao,
ativo = ativo,
fechamento_ajustado = row[3],
)
obj.activated = True
obj.save()
return render(request,'upload/upload-cotacao.html',{'form_cotacao':form_cotacao})
# Create your views proventos
def upload_files_view_proventos(request):
form_provento = CsvModelForm(request.POST or None, request.FILES or None)
print(User.objects.values('username'))
if form_provento.is_valid():
form_provento.save()
form_provento = CsvModelForm()
obj = Csv.objects.get(activated=False)
with open(obj.file_name.path, 'r') as f:
reader = csv.reader(f)
for i, row in enumerate(reader):
if i==0:
pass
else:
row = "".join(row)
row = row.replace(";"," ")
row = row.split()
ativo = row[1].upper()
tipo_provento = row[2].upper()
Proventos.objects.create(
ativo = ativo,
tipo_provento = tipo_provento,
data = row[3],
valor = row[4],
user = request.user,
)
obj.activated = True
obj.save()
return render(request,'upload/upload-provento.html',{'form_provento':form_provento}) |
998,790 | 3af1f8b8bf0fa1444ee4fe7e5114417cd9bd4771 | import random
STATUS_ALIVE = 'alive'
STATUS_DEAD = 'dead'
class Dragon:
def __init__(self, name, position_x=0, position_y=0, texture='dragon.png'):
self.name = name
self.hit_points = random.randint(50, 100)
self.position_x = position_x
self.position_y = position_y
self.texture = texture
self.status = STATUS_ALIVE
def move(self, left=0, down=0, up=0, right=0):
self.position_x += right - left
self.position_y += down - up
def set_position(self, position_x, position_y):
self.position_x = position_x
self.position_y = position_y
def get_position(self):
return self.position_x, self.position_y
def take_damage(self, damage=0):
if self.status == STATUS_DEAD:
return None
self.hit_points -= damage
print(f'Dragon damage taken: {damage}, HP left: {self.hit_points}')
if self.hit_points <= 0:
self._make_dead()
def _make_dead(self):
print('Dragon is dead')
self.status = STATUS_DEAD
self.texture = 'dragon-dead.png'
self._drop_item()
def _drop_item(self):
gold = random.randint(1, 100)
print(f'Dropped {gold} gold at position {self.get_position()}')
if __name__ == '__main__':
wawelski = Dragon(name='Wawelski', position_x=0, position_y=0)
wawelski.move(left=10, down=20)
wawelski.move(right=15, up=5)
wawelski.take_damage(10)
wawelski.take_damage(50)
wawelski.take_damage(35)
wawelski.take_damage(20)
|
998,791 | 694d491f11505a5715698aa5cd88dbfef8ea3760 | import argparse
import os
import cv2
import json
import natsort
import time
import logging
from ticket_scan.scanner import slicer
from ticket_scan.scanner.ocr import extract_text_from_image
from ticket_scan.scanner.helpers import setup_logging
DEFAULT_OEM = 1
DEFAULT_PSM = 7
DEFAULT_SIDE_MARGIN = 5
logger = logging.getLogger(__name__)
def get_sorted_file_list_for_path(path, prefix=""):
file_list = os.listdir(path)
if len(prefix) > 0:
file_list = list(filter(lambda x: x.startswith(prefix), file_list))
file_list = natsort.natsorted(file_list)
return file_list
def extract_text_lines_from_image(path=None,
image=None,
oem=DEFAULT_OEM,
psm=DEFAULT_PSM,
side_margin=DEFAULT_SIDE_MARGIN,
*args, **kwargs
):
text_recognition_dict = {}
if path is not None:
file_list = get_sorted_file_list_for_path(path, prefix="cropped")
for file in file_list:
if file.startswith("cropped"):
filepath = os.path.join(path, file)
print(filepath)
image = cv2.imread(filepath)
orig = image.copy()
text_recognised = extract_text_from_image(img=orig,
oem=oem,
psm=psm,
lang="spa",
side_margin=side_margin)
text_recognition_dict[file] = text_recognised
f = open(os.path.join(path, "text_recognition_" +
str(oem) + "_" + str(psm) + ".json"), "w")
json.dump(text_recognition_dict, f, ensure_ascii=False, indent=2)
f = open(os.path.join(path, "result.json"), "w")
json.dump(text_recognition_dict, f, ensure_ascii=False, indent=2)
f.close()
if image is not None:
start = time.time()
slices = slicer.slice(image, *args, **kwargs)
end = time.time()
logger.info(f"sliced image in {str(end - start)}s")
start = time.time()
for idx, slice in enumerate(slices):
text_recognised = extract_text_from_image(img=slice,
oem=oem,
psm=psm,
lang="spa",
side_margin=side_margin)
text_recognition_dict[idx] = text_recognised
end = time.time()
logger.info(f"read slices in {str(end - start)}s")
image_path = os.path.dirname(image)
result_path = os.path.join(image_path, 'text_recognition_result.json')
f = open(result_path,"w")
json.dump(text_recognition_dict, f, ensure_ascii=False, indent=2)
f.close()
return text_recognition_dict
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-p", "--path",
default=None,
type=str,
help="Path to folder with images to be scanned")
ap.add_argument("-i", "--image",
default=None,
type=str,
help="Path to the image to be scanned")
ap.add_argument("-S", "--save-cropped",
action="store_true",
default=False,
help="Save cropped images")
ap.add_argument("-I", "--interactive",
action="store_true",
default=False,
help="Slice interactively showing image to modify variables with [ ` . + - ] characters")
ap.add_argument("-o", "--output-path",
default='',
type=str,
help="Path for the results to be saved")
ap.add_argument("-m", "--side-margin",
default=DEFAULT_SIDE_MARGIN,
type=int,
help="Side margin to crop from image (default " + str(DEFAULT_SIDE_MARGIN) + ")")
ap.add_argument("-oem", "--oem", type=int, default=DEFAULT_OEM,
help="controls the type of algorithm used by Tesseract."
"OCR Engine modes:"
"0 Legacy engine only."
"1 Neural nets LSTM engine only."
"2 Legacy + LSTM engines."
"3 Default, based on what is available."
"(default " + str(DEFAULT_OEM) + ")"
)
ap.add_argument("-psm", "--psm", type=int, default=DEFAULT_PSM,
help="controls the automatic Page Segmentation Mode used by Tesseract"
"Page segmentation modes:"
"0 Orientation and script detection (OSD) only."
"1 Automatic page segmentation with OSD."
"2 Automatic page segmentation, but no OSD, or OCR."
"3 Fully automatic page segmentation, but no OSD. (Default)"
"4 Assume a single column of text of variable sizes."
"5 Assume a single uniform block of vertically aligned text."
"6 Assume a single uniform block of text."
"7 Treat the image as a single text line."
"8 Treat the image as a single word."
"9 Treat the image as a single word in a circle."
"10 Treat the image as a single character."
"11 Sparse text. Find as much text as possible in no particular order."
"12 Sparse text with OSD."
"13 Raw line. Treat the image as a single text line,"
" bypassing hacks that are Tesseract-specific."
"(default " + str(DEFAULT_PSM) + ")"
)
ap.add_argument(
'-v',
'--verbose',
dest="loglevel",
help="set loglevel to INFO",
action='store_const',
const=logging.INFO)
ap.add_argument(
'-vv',
'--very-verbose',
dest="loglevel",
help="set loglevel to DEBUG",
action='store_const',
const=logging.DEBUG)
if __name__ == "__main__":
args = vars(ap.parse_args())
setup_logging(args["loglevel"])
assert args["path"] is None or os.path.isdir(args["path"])
assert args["image"] is None or os.path.isfile(args["image"])
extract_text_lines_from_image(path=args["path"],
image=args["image"],
oem=args["oem"],
psm=args["psm"],
side_margin=args["side_margin"],
save_cropped=args["save_cropped"],
output_path=args["output_path"],
interactive=args["interactive"]
)
|
998,792 | f011a60576493dcd5e914ad8fdd2b7867dca93a6 | import os
T = 100
print T
for k in xrange(T):
n = 20
print n
foo = int(os.urandom(4).encode('hex'), 16)
row = ''
for i in xrange(n):
if len(row)>0:
row += ' '
if (foo&(1<<i))!=0:
row += '1'
else:
row += '0'
print row
|
998,793 | 8bc207db9d6e7ac1b78a5c313c3026f09f793d05 | import tweepy
import json
import csv
class JSONEncoder(json.JSONEncoder):
"""Allows ObjectId to be JSON encoded."""
def default(self, o):
if isinstance(o, ObjectId):
return str(o)
return json.JSONEncoder.default(self, o)
def read_twitter_json(f):
"""Read twitter JSON file.
Input
f: path to json file
Output
d: list of dictionaries containing
"""
tweets = list()
with open(f) as json_file:
for line in json_file:
tweets.append(json.loads(line))
return tweets
def twitter_auth(k, s):
"""Create authenticated API.
Creates an App Only authenticated API allow better
rate limits for searching
Inputs
k: API key
s: API key_secret
Output
api: authenticated API interface
"""
auth = tweepy.AppAuthHandler(k, s)
api = tweepy.API(
auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True
)
if (not api):
return None
else:
return api
def twitter_search(a, q, x=10000000, t=100, s=None, m=-1):
"""Produce Twitter Search.
Inputs
a: Authenticated API interface
q: search query
x: maximun no of returned tweets (default: 10e8)
t: tweet per query (default: 100 (api max)) (api: count)
s: get tweets since id s (default: None) (api: since_id)
m: last processed id (default: -1) next unprocessed tweet is
m - 1 or smaller (api: max_id)
Output
tweets: Tuple (No of search results, List of tweets)
"""
tweets = list()
tweet_count = 0
while tweet_count < x:
try:
if (m <= 0):
if (not s):
new_tweets = a.search(q = q, count = t)
else:
new_tweets = a.search(q = q, count = t, since_id = s)
else:
if (not s):
new_tweets = a.search(q = q, count = t, max_id = (m - 1))
else:
new_tweets = a.search(q = q, count = t, max_id = (m - 1), since_id = s)
if not new_tweets:
break
for tweet in new_tweets:
tweets.append(tweet)
tweet_count += len(new_tweets)
m = new_tweets[-1].id
except tweepy.TweepError as e:
error = (-1, "error:" + str(e))
return error
search_results = (tweet_count, tweets)
return search_results
def find_latest_id(d, s):
"""Find latest search tweet.
Finds the latest tweet in the mongo data base for
a particular search
Input
d: list of tweet dictionaries
s: search_id of search
Output
m: id of latest tweet or None
"""
selected_tweets = [t['id'] for t in d if t['search_id'] == s]
if selected_tweets:
m = max(selected_tweets)
else:
m = None
return m
def write_csv(d, f):
"""Write csv file back to disk.
Input
d: tuple containing (header, data)
f: filename for csv file.
Output
Fiel written to disk
"""
with open(f, 'w') as csv_file:
writer = csv.writer(csv_file)
writer.writerow(d[0])
for row in d[1]:
row_encode = list()
for x in row:
if type(x) == unicode:
row_encode.append(x.encode('utf8'))
else:
row_encode.append(x)
writer.writerow(row_encode)
return True
def md_5_hash(i):
"""MD5 Hash values.
Input
i: input to be hashed
Output
h: hashed value
"""
h = hashlib.md5(i.encode('utf-8')).hexdigest()
return h
def extract_first_name(s):
"""Extract first name from string.
Extracts the first name with a name string that
contains 2 or more numbers.
Input
s: string containing name
Output
name: string containing first name (or None if not names > 1)
"""
clean_name = re.sub(r'\s+', r' ', s).split()
for name in clean_name:
if len(name) > 1:
return name.title()
else:
pass
return None
def unicode_decode(text):
"""Convert to unicode.
Input
text: text for convertion
Output
converted text (not unicode left encoded)
"""
try:
return text.encode('utf-8').decode()
except UnicodeDecodeError:
return text.encode('utf-8')
def extract_value(k, d, f=''):
"""Extract value from dictionary if exists.
Optional apply function to value once extracted
Inputs
k: key form key/value pair in dictionary
d: dictionary to extract from
f: (optional) function to apply to the value
Output
v: Value if key exists in dictionary or the empty string
"""
if k in d:
if f != '':
p = f(d[k])
else:
p = d[k]
if type(p) == str:
v = unicode_decode(p)
else:
v = p
else:
v = unicode_decode('')
return v
def extract_dict(d, f):
"""Extract value from dictionary chain.
Inputs
d: Top level dictionary
f: List of dictionary element including final required value
e.g. ['reactions', 'summary', 'total_count']
Output:
required value if at end of chain otherwise recursivly call function
till rearch the end of the chain
"""
if len(f) == 1:
return extract_value(f[0], d)
else:
return extract_dict(d[f[0]], f[1:])
def create_csv(d, f):
"""Create a flattened csv from a python dictionary.
Inputs
d: dictionary of JSON object
f: list of fields for csv file
(use dots to extract from deeper within the dictionary
Outputs
csv: tuple of (list of headers, list of data rows)
"""
csv_data = list()
csv_head = [unicode(x) for x in f]
for row in d:
row_data = list()
for field in f:
fields = field.split('.')
row_data.append(extract_dict(row, fields))
csv_data.append(row_data)
csv = (csv_head, csv_data)
return csv
|
998,794 | bf994521f9025c85ad14c1e959ebf9701baf954b | class Test002:
def test001(self):
print('\n--test002') |
998,795 | 057fbdb9557545be10d1cf37881a839399942e86 | import settings
import equations
import time
import tkinter as tk
from threading import Thread
INTERVAL = 1
class MainPanel:
def __init__(self):
pass
def start(self):
self.root = tk.Tk()
self.root.geometry('1000x1000')
self.root.title('Penny')
self.add_button = tk.Button(self.root, text="Monitor", font=(
None, 12), command=self.add_monitor).pack(pady=40)
self.root.mainloop()
def add_monitor(self):
self.monitor = tk.Tk()
self.monitor.title('Monitor')
self.monitor.geometry('1000x1000')
self.profit = [{}, {}]
self.label = [{}, {}]
for i in settings.coins:
self.profit[0][i] = tk.StringVar()
self.label[0][i] = tk.Label(
self.monitor, textvariable=self.profit[0][i]).pack()
self.profit[1][i] = tk.StringVar()
self.label[1][i] = tk.Label(
self.monitor, textvariable=self.profit[1][i]).pack()
while True:
self.update_data()
time.sleep(INTERVAL)
def update_data(self):
settings.refreshData()
for i in settings.coins:
self.profit[0][i].set(settings.profit[0][i])
self.profit[1][i].set(settings.profit[1][i])
settings.init()
gui = MainPanel()
gui.start()
|
998,796 | c1d59ef99bc57a73b89ac7d0771b9293f86417e7 | # Generated by Django 3.1 on 2020-12-03 09:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('game', '0004_myteam'),
]
operations = [
migrations.AlterField(
model_name='myteam',
name='D',
field=models.CharField(db_column='column5', max_length=80, null=True),
),
migrations.AlterField(
model_name='myteam',
name='K',
field=models.CharField(db_column='column6', max_length=80, null=True),
),
migrations.AlterField(
model_name='myteam',
name='QB',
field=models.CharField(db_column='column1', max_length=80, null=True),
),
migrations.AlterField(
model_name='myteam',
name='RB1',
field=models.CharField(db_column='column2', max_length=80, null=True),
),
migrations.AlterField(
model_name='myteam',
name='TE',
field=models.CharField(db_column='column4', max_length=80, null=True),
),
migrations.AlterField(
model_name='myteam',
name='WR',
field=models.CharField(db_column='column3', max_length=80, null=True),
),
]
|
998,797 | 8128a1c2fc2b531248b95b8f30367798787df8a0 | '''
Created on Jun 25, 2015
@author: Edmundo Cossio
'''
from selenium.webdriver.support import expected_conditions as EC
from robot.libraries.BuiltIn import BuiltIn
from selenium.webdriver.common.by import By
from Singleton.DriverManager import DriverManager
from Singleton.GlobalVar import HomePageURL
class CreateAccount:
__driver = None
__wait = None
__register_button = (By.CSS_SELECTOR, "div.buttons-set > button.button")
__create_account_form = (By.ID, "form-validate")
__validate_hello = (By.CSS_SELECTOR, "p.hello > strong")
__firstname_text = (By.ID, "firstname")
__lastname_text = (By.ID, "lastname")
__email_address_text = (By.ID, "email_address")
__password_text = (By.ID, "password")
__confirmation_text = (By.ID, "confirmation")
def __init__(self):
self.__driver = DriverManager().get_instance().get_driver()
#self.open_browser(HomePageURL)
self.__wait = DriverManager().get_instance().get_wait()
def create_new_account(self, firstName, lastName, emailAddress, passw, confirmPassw):
self.__wait.until(EC.visibility_of_element_located(self.__create_account_form), "Error,")
#self.__wait.until(EC.presence_of_all_elements_located(self.__create_account_form), "Error,")
self.__driver.find_element(*self.__firstname_text).send_keys(firstName)
self.__driver.find_element(*self.__lastname_text).send_keys(lastName)
self.__driver.find_element(*self.__email_address_text).send_keys(emailAddress)
self.__driver.find_element(*self.__password_text).send_keys(passw)
self.__driver.find_element(*self.__confirmation_text).send_keys(confirmPassw)
def click_register(self):
self.__driver.find_element(*self.__register_button).click()
def validate_if_the_My_Dashboard_page_is_displayed(self, firstname, lastname):
self.__wait.until(EC.visibility_of_element_located(self.__validate_hello), "Error,")
actual_result = self.__driver.find_element(*self.__validate_hello).text
expected_result = "Hello, "+firstname+" "+lastname+"!"
BuiltIn().should_be_equal(actual_result, expected_result, 'Error, the message is not displayed: ' + "Invalid login or password.") |
998,798 | 9bd3b778974b7b399bb262609fa7340d0002610f | # pylint: disable=unused-variable
from dataclasses import dataclass, is_dataclass
from datafiles import decorators
def describe_datafile():
def it_turns_normal_class_into_dataclass(expect):
class Normal:
pass
cls = decorators.datafile("<pattern>")(Normal)
expect(is_dataclass(cls)).is_(True)
def it_can_reuse_existing_dataclass(expect):
@dataclass
class Existing:
pass
cls = decorators.datafile("")(Existing)
expect(id(cls)) == id(Existing)
def it_maps_to_dataclass_without_parentheses(expect):
class Sample:
pass
cls = decorators.datafile(Sample)
expect(is_dataclass(cls)).is_(True)
def it_forwards_arguments_dataclass_decorator(expect):
class Sample:
pass
cls = decorators.datafile(order=True)(Sample)
expect(is_dataclass(cls)).is_(True)
def describe_sync():
def it_turns_dataclass_instance_into_model_instance(expect):
@dataclass
class Existing:
count: int = 42
instance = Existing()
decorators.sync(instance, "tmp/example.yml", defaults=True)
expect(instance.datafile.data) == {"count": 42} # type: ignore
|
998,799 | ebdf5dbd0cbbadf5028929d1abe289a3af691820 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('TaskAndFlow', '0018_flowtemplatestep_sequence'),
]
operations = [
migrations.AlterModelOptions(
name='eventstepoperation',
options={'verbose_name': '\u4e8b\u4ef6\u6b65\u9aa4\u64cd\u4f5c', 'verbose_name_plural': '\u4e8b\u4ef6\u6b65\u9aa4\u64cd\u4f5c'},
),
migrations.AlterField(
model_name='directory',
name='createtime',
field=models.DateTimeField(auto_now_add=True, verbose_name=b'\xe5\x88\x9b\xe5\xbb\xba\xe6\x97\xb6\xe9\x97\xb4'),
preserve_default=True,
),
migrations.AlterField(
model_name='doc2relate',
name='createtime',
field=models.DateTimeField(auto_now_add=True, verbose_name=b'\xe5\x85\xb3\xe8\x81\x94\xe6\x97\xb6\xe9\x97\xb4'),
preserve_default=True,
),
migrations.AlterField(
model_name='doc2relate',
name='relatetype',
field=models.CharField(blank=True, max_length=60, null=True, verbose_name=b'\xe5\x85\xb3\xe8\x81\x94\xe5\x85\x83\xe7\xb4\xa0\xe7\xb1\xbb\xe5\x9e\x8b', choices=[(b'\xe6\x9e\x84\xe4\xbb\xb6', b'\xe6\x9e\x84\xe4\xbb\xb6'), (b'\xe4\xbb\xbb\xe5\x8a\xa1', b'\xe4\xbb\xbb\xe5\x8a\xa1'), (b'\xe4\xba\x8b\xe4\xbb\xb6\xe6\xad\xa5\xe9\xaa\xa4\xe6\x93\x8d\xe4\xbd\x9c', b'\xe4\xba\x8b\xe4\xbb\xb6\xe6\xad\xa5\xe9\xaa\xa4\xe6\x93\x8d\xe4\xbd\x9c'), (b'\xe6\x96\xbd\xe5\xb7\xa5\xe6\x9c\xba\xe6\xa2\xb0', b'\xe6\x96\xbd\xe5\xb7\xa5\xe6\x9c\xba\xe6\xa2\xb0'), (b'\xe6\xb5\x81\xe7\xa8\x8b\xe6\xad\xa5\xe9\xaa\xa4', b'\xe6\xb5\x81\xe7\xa8\x8b\xe6\xad\xa5\xe9\xaa\xa4'), (b'\xe9\x87\x8d\xe5\xa4\xa7\xe5\x8d\xb1\xe9\x99\xa9\xe6\xba\x90\xe4\xbf\xae\xe6\x94\xb9\xe8\xae\xb0\xe5\xbd\x95', b'\xe9\x87\x8d\xe5\xa4\xa7\xe5\x8d\xb1\xe9\x99\xa9\xe6\xba\x90\xe4\xbf\xae\xe6\x94\xb9\xe8\xae\xb0\xe5\xbd\x95'), (b'\xe6\x96\xbd\xe5\xb7\xa5\xe6\x9c\xba\xe6\xa2\xb0\xe7\x8a\xb6\xe6\x80\x81\xe4\xbf\xae\xe6\x94\xb9\xe8\xae\xb0\xe5\xbd\x95', b'\xe6\x96\xbd\xe5\xb7\xa5\xe6\x9c\xba\xe6\xa2\xb0\xe7\x8a\xb6\xe6\x80\x81\xe4\xbf\xae\xe6\x94\xb9\xe8\xae\xb0\xe5\xbd\x95'), (b'\xe6\x9e\x84\xe4\xbb\xb6\xe7\x8a\xb6\xe6\x80\x81\xe4\xbf\xae\xe6\x94\xb9\xe8\xae\xb0\xe5\xbd\x95', b'\xe6\x9e\x84\xe4\xbb\xb6\xe7\x8a\xb6\xe6\x80\x81\xe4\xbf\xae\xe6\x94\xb9\xe8\xae\xb0\xe5\xbd\x95'), (b'\xe4\xbb\xbb\xe5\x8a\xa1\xe7\x8a\xb6\xe6\x80\x81\xe4\xbf\xae\xe6\x94\xb9\xe8\xae\xb0\xe5\xbd\x95', b'\xe4\xbb\xbb\xe5\x8a\xa1\xe7\x8a\xb6\xe6\x80\x81\xe4\xbf\xae\xe6\x94\xb9\xe8\xae\xb0\xe5\xbd\x95')]),
preserve_default=True,
),
migrations.AlterField(
model_name='document',
name='createtime',
field=models.DateTimeField(auto_now_add=True, verbose_name=b'\xe5\x88\x9b\xe5\xbb\xba\xe6\x97\xb6\xe9\x97\xb4'),
preserve_default=True,
),
migrations.AlterField(
model_name='eventstep',
name='starttime',
field=models.DateTimeField(auto_now_add=True, verbose_name=b'\xe5\xbc\x80\xe5\xa7\x8b\xe6\x97\xb6\xe9\x97\xb4'),
preserve_default=True,
),
migrations.AlterField(
model_name='eventstepoperation',
name='oprtime',
field=models.DateTimeField(auto_now_add=True, verbose_name=b'\xe6\x93\x8d\xe4\xbd\x9c\xe6\x97\xb6\xe9\x97\xb4'),
preserve_default=True,
),
migrations.AlterField(
model_name='hazard',
name='relatetype',
field=models.CharField(blank=True, max_length=60, null=True, verbose_name=b'\xe5\x85\xb3\xe8\x81\x94\xe5\x85\x83\xe7\xb4\xa0\xe7\xb1\xbb\xe5\x9e\x8b', choices=[(b'\xe6\x9e\x84\xe4\xbb\xb6', b'\xe6\x9e\x84\xe4\xbb\xb6'), (b'\xe4\xbb\xbb\xe5\x8a\xa1', b'\xe4\xbb\xbb\xe5\x8a\xa1'), (b'\xe4\xba\x8b\xe4\xbb\xb6\xe6\xad\xa5\xe9\xaa\xa4\xe6\x93\x8d\xe4\xbd\x9c', b'\xe4\xba\x8b\xe4\xbb\xb6\xe6\xad\xa5\xe9\xaa\xa4\xe6\x93\x8d\xe4\xbd\x9c'), (b'\xe6\x96\xbd\xe5\xb7\xa5\xe6\x9c\xba\xe6\xa2\xb0', b'\xe6\x96\xbd\xe5\xb7\xa5\xe6\x9c\xba\xe6\xa2\xb0'), (b'\xe6\xb5\x81\xe7\xa8\x8b\xe6\xad\xa5\xe9\xaa\xa4', b'\xe6\xb5\x81\xe7\xa8\x8b\xe6\xad\xa5\xe9\xaa\xa4'), (b'\xe9\x87\x8d\xe5\xa4\xa7\xe5\x8d\xb1\xe9\x99\xa9\xe6\xba\x90\xe4\xbf\xae\xe6\x94\xb9\xe8\xae\xb0\xe5\xbd\x95', b'\xe9\x87\x8d\xe5\xa4\xa7\xe5\x8d\xb1\xe9\x99\xa9\xe6\xba\x90\xe4\xbf\xae\xe6\x94\xb9\xe8\xae\xb0\xe5\xbd\x95'), (b'\xe6\x96\xbd\xe5\xb7\xa5\xe6\x9c\xba\xe6\xa2\xb0\xe7\x8a\xb6\xe6\x80\x81\xe4\xbf\xae\xe6\x94\xb9\xe8\xae\xb0\xe5\xbd\x95', b'\xe6\x96\xbd\xe5\xb7\xa5\xe6\x9c\xba\xe6\xa2\xb0\xe7\x8a\xb6\xe6\x80\x81\xe4\xbf\xae\xe6\x94\xb9\xe8\xae\xb0\xe5\xbd\x95'), (b'\xe6\x9e\x84\xe4\xbb\xb6\xe7\x8a\xb6\xe6\x80\x81\xe4\xbf\xae\xe6\x94\xb9\xe8\xae\xb0\xe5\xbd\x95', b'\xe6\x9e\x84\xe4\xbb\xb6\xe7\x8a\xb6\xe6\x80\x81\xe4\xbf\xae\xe6\x94\xb9\xe8\xae\xb0\xe5\xbd\x95'), (b'\xe4\xbb\xbb\xe5\x8a\xa1\xe7\x8a\xb6\xe6\x80\x81\xe4\xbf\xae\xe6\x94\xb9\xe8\xae\xb0\xe5\xbd\x95', b'\xe4\xbb\xbb\xe5\x8a\xa1\xe7\x8a\xb6\xe6\x80\x81\xe4\xbf\xae\xe6\x94\xb9\xe8\xae\xb0\xe5\xbd\x95')]),
preserve_default=True,
),
migrations.AlterField(
model_name='message',
name='relatetype',
field=models.CharField(blank=True, max_length=60, null=True, verbose_name=b'\xe5\x85\xb3\xe8\x81\x94\xe5\x85\x83\xe7\xb4\xa0\xe7\xb1\xbb\xe5\x9e\x8b', choices=[(b'\xe6\x9e\x84\xe4\xbb\xb6', b'\xe6\x9e\x84\xe4\xbb\xb6'), (b'\xe4\xbb\xbb\xe5\x8a\xa1', b'\xe4\xbb\xbb\xe5\x8a\xa1'), (b'\xe4\xba\x8b\xe4\xbb\xb6\xe6\xad\xa5\xe9\xaa\xa4\xe6\x93\x8d\xe4\xbd\x9c', b'\xe4\xba\x8b\xe4\xbb\xb6\xe6\xad\xa5\xe9\xaa\xa4\xe6\x93\x8d\xe4\xbd\x9c'), (b'\xe6\x96\xbd\xe5\xb7\xa5\xe6\x9c\xba\xe6\xa2\xb0', b'\xe6\x96\xbd\xe5\xb7\xa5\xe6\x9c\xba\xe6\xa2\xb0'), (b'\xe6\xb5\x81\xe7\xa8\x8b\xe6\xad\xa5\xe9\xaa\xa4', b'\xe6\xb5\x81\xe7\xa8\x8b\xe6\xad\xa5\xe9\xaa\xa4'), (b'\xe9\x87\x8d\xe5\xa4\xa7\xe5\x8d\xb1\xe9\x99\xa9\xe6\xba\x90\xe4\xbf\xae\xe6\x94\xb9\xe8\xae\xb0\xe5\xbd\x95', b'\xe9\x87\x8d\xe5\xa4\xa7\xe5\x8d\xb1\xe9\x99\xa9\xe6\xba\x90\xe4\xbf\xae\xe6\x94\xb9\xe8\xae\xb0\xe5\xbd\x95'), (b'\xe6\x96\xbd\xe5\xb7\xa5\xe6\x9c\xba\xe6\xa2\xb0\xe7\x8a\xb6\xe6\x80\x81\xe4\xbf\xae\xe6\x94\xb9\xe8\xae\xb0\xe5\xbd\x95', b'\xe6\x96\xbd\xe5\xb7\xa5\xe6\x9c\xba\xe6\xa2\xb0\xe7\x8a\xb6\xe6\x80\x81\xe4\xbf\xae\xe6\x94\xb9\xe8\xae\xb0\xe5\xbd\x95'), (b'\xe6\x9e\x84\xe4\xbb\xb6\xe7\x8a\xb6\xe6\x80\x81\xe4\xbf\xae\xe6\x94\xb9\xe8\xae\xb0\xe5\xbd\x95', b'\xe6\x9e\x84\xe4\xbb\xb6\xe7\x8a\xb6\xe6\x80\x81\xe4\xbf\xae\xe6\x94\xb9\xe8\xae\xb0\xe5\xbd\x95'), (b'\xe4\xbb\xbb\xe5\x8a\xa1\xe7\x8a\xb6\xe6\x80\x81\xe4\xbf\xae\xe6\x94\xb9\xe8\xae\xb0\xe5\xbd\x95', b'\xe4\xbb\xbb\xe5\x8a\xa1\xe7\x8a\xb6\xe6\x80\x81\xe4\xbf\xae\xe6\x94\xb9\xe8\xae\xb0\xe5\xbd\x95')]),
preserve_default=True,
),
migrations.AlterField(
model_name='projectevent',
name='relatetype',
field=models.CharField(max_length=60, verbose_name=b'\xe5\x85\xb3\xe8\x81\x94\xe5\x85\x83\xe7\xb4\xa0\xe7\xb1\xbb\xe5\x9e\x8b', choices=[(b'\xe6\x9e\x84\xe4\xbb\xb6', b'\xe6\x9e\x84\xe4\xbb\xb6'), (b'\xe4\xbb\xbb\xe5\x8a\xa1', b'\xe4\xbb\xbb\xe5\x8a\xa1'), (b'\xe4\xba\x8b\xe4\xbb\xb6\xe6\xad\xa5\xe9\xaa\xa4\xe6\x93\x8d\xe4\xbd\x9c', b'\xe4\xba\x8b\xe4\xbb\xb6\xe6\xad\xa5\xe9\xaa\xa4\xe6\x93\x8d\xe4\xbd\x9c'), (b'\xe6\x96\xbd\xe5\xb7\xa5\xe6\x9c\xba\xe6\xa2\xb0', b'\xe6\x96\xbd\xe5\xb7\xa5\xe6\x9c\xba\xe6\xa2\xb0'), (b'\xe6\xb5\x81\xe7\xa8\x8b\xe6\xad\xa5\xe9\xaa\xa4', b'\xe6\xb5\x81\xe7\xa8\x8b\xe6\xad\xa5\xe9\xaa\xa4'), (b'\xe9\x87\x8d\xe5\xa4\xa7\xe5\x8d\xb1\xe9\x99\xa9\xe6\xba\x90\xe4\xbf\xae\xe6\x94\xb9\xe8\xae\xb0\xe5\xbd\x95', b'\xe9\x87\x8d\xe5\xa4\xa7\xe5\x8d\xb1\xe9\x99\xa9\xe6\xba\x90\xe4\xbf\xae\xe6\x94\xb9\xe8\xae\xb0\xe5\xbd\x95'), (b'\xe6\x96\xbd\xe5\xb7\xa5\xe6\x9c\xba\xe6\xa2\xb0\xe7\x8a\xb6\xe6\x80\x81\xe4\xbf\xae\xe6\x94\xb9\xe8\xae\xb0\xe5\xbd\x95', b'\xe6\x96\xbd\xe5\xb7\xa5\xe6\x9c\xba\xe6\xa2\xb0\xe7\x8a\xb6\xe6\x80\x81\xe4\xbf\xae\xe6\x94\xb9\xe8\xae\xb0\xe5\xbd\x95'), (b'\xe6\x9e\x84\xe4\xbb\xb6\xe7\x8a\xb6\xe6\x80\x81\xe4\xbf\xae\xe6\x94\xb9\xe8\xae\xb0\xe5\xbd\x95', b'\xe6\x9e\x84\xe4\xbb\xb6\xe7\x8a\xb6\xe6\x80\x81\xe4\xbf\xae\xe6\x94\xb9\xe8\xae\xb0\xe5\xbd\x95'), (b'\xe4\xbb\xbb\xe5\x8a\xa1\xe7\x8a\xb6\xe6\x80\x81\xe4\xbf\xae\xe6\x94\xb9\xe8\xae\xb0\xe5\xbd\x95', b'\xe4\xbb\xbb\xe5\x8a\xa1\xe7\x8a\xb6\xe6\x80\x81\xe4\xbf\xae\xe6\x94\xb9\xe8\xae\xb0\xe5\xbd\x95')]),
preserve_default=True,
),
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.