max_stars_repo_path stringlengths 4 286 | max_stars_repo_name stringlengths 5 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.03M | content_cleaned stringlengths 6 1.03M | language stringclasses 111 values | language_score float64 0.03 1 | comments stringlengths 0 556k | edu_score float64 0.32 5.03 | edu_int_score int64 0 5 |
|---|---|---|---|---|---|---|---|---|---|---|
src/b3get/utils.py | psteinb/b3get | 20 | 6614751 | <reponame>psteinb/b3get<gh_stars>10-100
from __future__ import print_function, with_statement
import tempfile
import os
import re
import requests
import tqdm
import math
import numpy as np
import zipfile
def tmp_location():
""" return a folder under /tmp or similar,
If something exists that matches the name '.*-b3get', use this.
If nothing is found, a new folder under /tmp is created and returned
"""
tmp = tempfile.gettempdir()
folders = [subdir[0] for subdir in os.walk(tmp) if subdir[0].endswith('-b3get')]
if len(folders) > 0:
return folders[0]
else:
return tempfile.mkdtemp(suffix='-b3get')
def filter_files(alist, rex):
""" given a list (of strings), filter out items that match the regular express rex """
if not isinstance(rex, str) or len(rex) == 0:
return alist
compiled = re.compile(rex)
srcs = [item for item in alist if compiled.search(item)]
return srcs
def size_of_content(url):
""" given an URL, return the number of bytes stored in the header attribute content-length """
try:
r = requests.head(url, timeout=2)
except requests.exceptions.Timeout as texc:
print('timed out on', url, texc)
return 0
except Exception as ex:
raise ex
value = 0
if not r.ok:
print('E url {} does not exist'.format(url))
return value
value = int(r.headers.get('content-length'))
return value
def serial_download_file(url, dstfolder, chunk_bytes=1024*1024, npos=None):
""" download file from <url> into folder <dstfolder>
returns the full path of the successfully downloaded file
"""
if not os.path.exists(dstfolder):
print('E destination path {} does not exist'.format(dstfolder))
return ""
r = requests.get(url, stream=True, timeout=2)
assert r.ok, "unable to access URL: {}".format(url)
_, fname = os.path.split(url)
dstf = os.path.join(dstfolder, fname)
total_length = int(r.headers.get('content-length'))
if os.path.isfile(dstf) and os.stat(dstf).st_size == total_length: # nothing to download
return dstf
with open(dstf, 'wb') as fo:
if total_length == 0: # no content length header
fo.write(r.content)
else:
total_length = int(total_length)
nbytes = 0
pbar = None
if not npos:
pbar = tqdm.tqdm(total=total_length, unit='B', unit_scale=True)
else:
pbar = tqdm.tqdm(total=total_length, unit='B', unit_scale=True, position=npos)
for data in r.iter_content(chunk_size=chunk_bytes):
fo.write(data)
pbar.update(len(data))
nbytes += len(data)
return dstf
def wrap_serial_download_file(args):
""" wrap serial_download to unpack args """
return serial_download_file(*args)
def chunk_npz(ndalist, basename, max_megabytes=1):
""" given a list of numpy.ndarrays <ndalist>, store them compressed inside <basename>
if the storage volume of ndalist exceeds max_megabytes, chunk the data
"""
value = []
if not ndalist:
return value
total_bytes = sum([item.nbytes for item in ndalist])
total_mb = total_bytes/(1024.*1024.)
if total_mb > max_megabytes:
nchunks = int(math.ceil(total_mb/max_megabytes))
nitems = int(math.ceil(len(ndalist)/nchunks))
ndigits = len(str(nchunks))
cnt = 0
for i in range(int(math.ceil(nchunks))):
if cnt >= len(ndalist):
break
end = -1 if cnt+nitems > len(ndalist) else cnt+nitems
dst = basename+(('{0:0'+str(ndigits)+'}.npz').format(i))
np.savez_compressed(dst,
*ndalist[cnt:end])
cnt += nitems
value.append(dst)
else:
dst = basename+'.npz'
np.savez_compressed(dst,
*ndalist)
value.append(dst)
return value
def unzip_to(azipfile, basedir, force=False):
""" unzip file <zipfile> into <basedir>
If the full content of <zipfile> is already found inside <basedir>, do nothing.
If <force> is True, always unzip"""
value = []
zf = zipfile.ZipFile(azipfile, 'r')
content = zf.infolist()
if not content:
return value
for info in content:
xsize = info.file_size
xname = info.filename
exp_path = os.path.join(basedir, xname)
if not os.path.isfile(exp_path) or not os.stat(exp_path).st_size == xsize:
zf.extract(xname, basedir)
value.append(exp_path)
return value
def wrap_unzip_to(args):
""" wrapper around unzip_to that unpacks the arguments """
return unzip_to(*args)
| from __future__ import print_function, with_statement
import tempfile
import os
import re
import requests
import tqdm
import math
import numpy as np
import zipfile
def tmp_location():
""" return a folder under /tmp or similar,
If something exists that matches the name '.*-b3get', use this.
If nothing is found, a new folder under /tmp is created and returned
"""
tmp = tempfile.gettempdir()
folders = [subdir[0] for subdir in os.walk(tmp) if subdir[0].endswith('-b3get')]
if len(folders) > 0:
return folders[0]
else:
return tempfile.mkdtemp(suffix='-b3get')
def filter_files(alist, rex):
""" given a list (of strings), filter out items that match the regular express rex """
if not isinstance(rex, str) or len(rex) == 0:
return alist
compiled = re.compile(rex)
srcs = [item for item in alist if compiled.search(item)]
return srcs
def size_of_content(url):
""" given an URL, return the number of bytes stored in the header attribute content-length """
try:
r = requests.head(url, timeout=2)
except requests.exceptions.Timeout as texc:
print('timed out on', url, texc)
return 0
except Exception as ex:
raise ex
value = 0
if not r.ok:
print('E url {} does not exist'.format(url))
return value
value = int(r.headers.get('content-length'))
return value
def serial_download_file(url, dstfolder, chunk_bytes=1024*1024, npos=None):
""" download file from <url> into folder <dstfolder>
returns the full path of the successfully downloaded file
"""
if not os.path.exists(dstfolder):
print('E destination path {} does not exist'.format(dstfolder))
return ""
r = requests.get(url, stream=True, timeout=2)
assert r.ok, "unable to access URL: {}".format(url)
_, fname = os.path.split(url)
dstf = os.path.join(dstfolder, fname)
total_length = int(r.headers.get('content-length'))
if os.path.isfile(dstf) and os.stat(dstf).st_size == total_length: # nothing to download
return dstf
with open(dstf, 'wb') as fo:
if total_length == 0: # no content length header
fo.write(r.content)
else:
total_length = int(total_length)
nbytes = 0
pbar = None
if not npos:
pbar = tqdm.tqdm(total=total_length, unit='B', unit_scale=True)
else:
pbar = tqdm.tqdm(total=total_length, unit='B', unit_scale=True, position=npos)
for data in r.iter_content(chunk_size=chunk_bytes):
fo.write(data)
pbar.update(len(data))
nbytes += len(data)
return dstf
def wrap_serial_download_file(args):
""" wrap serial_download to unpack args """
return serial_download_file(*args)
def chunk_npz(ndalist, basename, max_megabytes=1):
""" given a list of numpy.ndarrays <ndalist>, store them compressed inside <basename>
if the storage volume of ndalist exceeds max_megabytes, chunk the data
"""
value = []
if not ndalist:
return value
total_bytes = sum([item.nbytes for item in ndalist])
total_mb = total_bytes/(1024.*1024.)
if total_mb > max_megabytes:
nchunks = int(math.ceil(total_mb/max_megabytes))
nitems = int(math.ceil(len(ndalist)/nchunks))
ndigits = len(str(nchunks))
cnt = 0
for i in range(int(math.ceil(nchunks))):
if cnt >= len(ndalist):
break
end = -1 if cnt+nitems > len(ndalist) else cnt+nitems
dst = basename+(('{0:0'+str(ndigits)+'}.npz').format(i))
np.savez_compressed(dst,
*ndalist[cnt:end])
cnt += nitems
value.append(dst)
else:
dst = basename+'.npz'
np.savez_compressed(dst,
*ndalist)
value.append(dst)
return value
def unzip_to(azipfile, basedir, force=False):
""" unzip file <zipfile> into <basedir>
If the full content of <zipfile> is already found inside <basedir>, do nothing.
If <force> is True, always unzip"""
value = []
zf = zipfile.ZipFile(azipfile, 'r')
content = zf.infolist()
if not content:
return value
for info in content:
xsize = info.file_size
xname = info.filename
exp_path = os.path.join(basedir, xname)
if not os.path.isfile(exp_path) or not os.stat(exp_path).st_size == xsize:
zf.extract(xname, basedir)
value.append(exp_path)
return value
def wrap_unzip_to(args):
""" wrapper around unzip_to that unpacks the arguments """
return unzip_to(*args) | en | 0.762034 | return a folder under /tmp or similar, If something exists that matches the name '.*-b3get', use this. If nothing is found, a new folder under /tmp is created and returned given a list (of strings), filter out items that match the regular express rex given an URL, return the number of bytes stored in the header attribute content-length download file from <url> into folder <dstfolder> returns the full path of the successfully downloaded file # nothing to download # no content length header wrap serial_download to unpack args given a list of numpy.ndarrays <ndalist>, store them compressed inside <basename> if the storage volume of ndalist exceeds max_megabytes, chunk the data unzip file <zipfile> into <basedir> If the full content of <zipfile> is already found inside <basedir>, do nothing. If <force> is True, always unzip wrapper around unzip_to that unpacks the arguments | 3.116123 | 3 |
lammps-master/tools/i-pi/ipi/utils/prng.py | rajkubp020/helloword | 0 | 6614752 | <filename>lammps-master/tools/i-pi/ipi/utils/prng.py
"""Contains the classes used to generate pseudo-random numbers.
Copyright (C) 2013, <NAME> and <NAME>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http.//www.gnu.org/licenses/>.
Allows the user to specify a seed for the random number generator.
These are used in initialising the velocities and in stochastic thermostats.
The state of the random number generator is kept track of, so that the if the
simulation is restarted from a checkpoint, we will see the same dynamics as if
it had not been stopped.
Classes:
Random: An interface between the numpy.random module and the user.
"""
__all__ = ['Random']
import numpy as np
import math
class Random(object):
"""Class to interface with the standard pseudo-random number generator.
Initialises the standard numpy pseudo-random number generator from a seed
at the beginning of the simulation, and keeps track of the state so that
it can be output to the checkpoint files throughout the simulation.
Attributes:
rng: The random number generator to be used.
seed: The seed number to start the generator.
state: A tuple of five objects giving the current state of the random
number generator. The first is the type of random number generator,
here 'MT19937', the second is an array of 624 integers, the third
is the current position in the array that is being read from, the
fourth gives whether it has a gaussian random number stored, and
the fifth is this stored Gaussian random number, or else the last
Gaussian random number returned.
"""
def __init__(self, seed=12345, state=None):
"""Initialises Random.
Args:
seed: An optional seed giving an integer to initialise the state with.
state: An optional state tuple to initialise the state with.
"""
self.rng = np.random.mtrand.RandomState(seed=seed)
self.seed = seed
if state is None:
self.rng.seed(seed)
else:
self.state = state
def get_state(self):
"""Interface to the standard get_state() function."""
return self.rng.get_state()
def set_state(self, value):
"""Interface to the standard set_state() function.
Should only be used with states generated from another similar random
number generator, such as one from a previous run.
"""
return self.rng.set_state(value)
state=property(get_state, set_state)
@property
def u(self):
"""Interface to the standard random_sample() function.
Returns:
A pseudo-random number from a uniform distribution from 0-1.
"""
return self.rng.random_sample()
@property
def g(self):
"""Interface to the standard standard_normal() function.
Returns:
A pseudo-random number from a normal Gaussian distribution.
"""
return self.rng.standard_normal()
def gamma(self, k, theta=1.0):
"""Interface to the standard gamma() function.
Args:
k: Shape parameter for the gamma distribution.
theta: Mean of the distribution.
Returns:
A random number from a gamma distribution with a shape k and a
mean value theta.
"""
return self.rng.gamma(k,theta)
def gvec(self, shape):
"""Interface to the standard_normal array function.
Args:
shape: The shape of the array to be returned.
Returns:
An array with the required shape where each element is taken from
a normal Gaussian distribution.
"""
return self.rng.standard_normal(shape)
| <filename>lammps-master/tools/i-pi/ipi/utils/prng.py
"""Contains the classes used to generate pseudo-random numbers.
Copyright (C) 2013, <NAME> and <NAME>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http.//www.gnu.org/licenses/>.
Allows the user to specify a seed for the random number generator.
These are used in initialising the velocities and in stochastic thermostats.
The state of the random number generator is kept track of, so that the if the
simulation is restarted from a checkpoint, we will see the same dynamics as if
it had not been stopped.
Classes:
Random: An interface between the numpy.random module and the user.
"""
__all__ = ['Random']
import numpy as np
import math
class Random(object):
"""Class to interface with the standard pseudo-random number generator.
Initialises the standard numpy pseudo-random number generator from a seed
at the beginning of the simulation, and keeps track of the state so that
it can be output to the checkpoint files throughout the simulation.
Attributes:
rng: The random number generator to be used.
seed: The seed number to start the generator.
state: A tuple of five objects giving the current state of the random
number generator. The first is the type of random number generator,
here 'MT19937', the second is an array of 624 integers, the third
is the current position in the array that is being read from, the
fourth gives whether it has a gaussian random number stored, and
the fifth is this stored Gaussian random number, or else the last
Gaussian random number returned.
"""
def __init__(self, seed=12345, state=None):
"""Initialises Random.
Args:
seed: An optional seed giving an integer to initialise the state with.
state: An optional state tuple to initialise the state with.
"""
self.rng = np.random.mtrand.RandomState(seed=seed)
self.seed = seed
if state is None:
self.rng.seed(seed)
else:
self.state = state
def get_state(self):
"""Interface to the standard get_state() function."""
return self.rng.get_state()
def set_state(self, value):
"""Interface to the standard set_state() function.
Should only be used with states generated from another similar random
number generator, such as one from a previous run.
"""
return self.rng.set_state(value)
state=property(get_state, set_state)
@property
def u(self):
"""Interface to the standard random_sample() function.
Returns:
A pseudo-random number from a uniform distribution from 0-1.
"""
return self.rng.random_sample()
@property
def g(self):
"""Interface to the standard standard_normal() function.
Returns:
A pseudo-random number from a normal Gaussian distribution.
"""
return self.rng.standard_normal()
def gamma(self, k, theta=1.0):
"""Interface to the standard gamma() function.
Args:
k: Shape parameter for the gamma distribution.
theta: Mean of the distribution.
Returns:
A random number from a gamma distribution with a shape k and a
mean value theta.
"""
return self.rng.gamma(k,theta)
def gvec(self, shape):
"""Interface to the standard_normal array function.
Args:
shape: The shape of the array to be returned.
Returns:
An array with the required shape where each element is taken from
a normal Gaussian distribution.
"""
return self.rng.standard_normal(shape)
| en | 0.820415 | Contains the classes used to generate pseudo-random numbers. Copyright (C) 2013, <NAME> and <NAME> This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http.//www.gnu.org/licenses/>. Allows the user to specify a seed for the random number generator. These are used in initialising the velocities and in stochastic thermostats. The state of the random number generator is kept track of, so that the if the simulation is restarted from a checkpoint, we will see the same dynamics as if it had not been stopped. Classes: Random: An interface between the numpy.random module and the user. Class to interface with the standard pseudo-random number generator. Initialises the standard numpy pseudo-random number generator from a seed at the beginning of the simulation, and keeps track of the state so that it can be output to the checkpoint files throughout the simulation. Attributes: rng: The random number generator to be used. seed: The seed number to start the generator. state: A tuple of five objects giving the current state of the random number generator. The first is the type of random number generator, here 'MT19937', the second is an array of 624 integers, the third is the current position in the array that is being read from, the fourth gives whether it has a gaussian random number stored, and the fifth is this stored Gaussian random number, or else the last Gaussian random number returned. Initialises Random. Args: seed: An optional seed giving an integer to initialise the state with. state: An optional state tuple to initialise the state with. Interface to the standard get_state() function. Interface to the standard set_state() function. Should only be used with states generated from another similar random number generator, such as one from a previous run. Interface to the standard random_sample() function. Returns: A pseudo-random number from a uniform distribution from 0-1. Interface to the standard standard_normal() function. Returns: A pseudo-random number from a normal Gaussian distribution. Interface to the standard gamma() function. Args: k: Shape parameter for the gamma distribution. theta: Mean of the distribution. Returns: A random number from a gamma distribution with a shape k and a mean value theta. Interface to the standard_normal array function. Args: shape: The shape of the array to be returned. Returns: An array with the required shape where each element is taken from a normal Gaussian distribution. | 3.104792 | 3 |
mkbot/object.py | EvelynSubarrow/mkbot | 0 | 6614753 | import datetime
class NoteUser:
def __init__(self, _state, json: dict):
self.id = json['id']
self.name = json['name']
self.username = json['username']
self.host = json['host']
self.avatar_url = json['avatarUrl']
self.avatar_blurhash = json['avatarBlurhash']
self.avatar_color = json['avatarColor']
if json.get('instance'):
self.instance = Instance(_state, json['instance'])
else:
self.instance = None
self.emojis = [Emoji(_state, x) for x in json['emojis']]
self.online_status = json['onlineStatus']
self.remote = (_state.host != self.host)
class ClientUser:
def __init__(self, _state, json: dict):
self.id = json['id']
self.name = json['name']
self.username = json['username']
self.host = json['host']
self.avatar_url = json['avatarUrl']
self.avatar_blurhash = json['avatarBlurhash']
self.avatar_color = json['avatarColor']
self.admin = json['isAdmin']
self.moderator = json['isModerator']
self.bot = json['isBot']
self.emojis = [Emoji(_state, x) for x in json['emojis']]
self.online_status = json['onlineStatus']
self.url = json['url'] or json['uri']
self.created_at = datetime.datetime.fromisoformat(json['createdAt'][:-1])
self.updated_at = datetime.datetime.fromisoformat(json['updatedAt'][:-1])
self.banner_url = json['bannerUrl']
self.banner_blurhash = json['bannerBlurhash']
self.banner_color = json['bannerColor']
self.locked = json['isLocked']
self.suspended = json['isSuspended']
self.silenced = json['isSilenced']
self.description = json['description']
self.location = json['location']
self.birthday = None
if json['birthday']:
try:
self.birthday = datetime.datetime.strptime(json['birthday'], '%Y-%m-%d')
except ValueError:
pass
self.lang = json['lang']
self.fields = [ProfileField(_state, x) for x in json['fields']]
self.followers_count = json['followersCount']
self.following_count = json['followingCount']
self.notes_count = json['notesCount']
self.pinned_note_ids = json['pinnedNoteIds']
self.pinned_notes = [Note(_state, x) for x in json['pinnedNotes']]
class ProfileField:
def __init__(self, _state, json: dict):
self.name = json['name']
self.value = json['value']
class Instance:
def __init__(self, _state, json: dict):
self.name = json['name']
self.software_name = json['softwareName']
self.software_version = json['softwareVersion']
self.icon_url = json['iconUrl']
self.favicon_url = json['faviconUrl']
self.theme_color = json['themeColor']
class Emoji:
def __init__(self, _state, json: dict):
self.name = json['name']
self.url = json['url']
class File:
def __init__(self, _state, json: dict):
self.id = json['id']
self.created_at = datetime.datetime.fromisoformat(json['createdAt'][:-1])
self.name = json['name']
self.url = json.get('url') or json.get('uri')
self.thumbnail_url = json['thumbnailUrl']
self.size = json['size']
self.type = json['type']
self.comment = json['comment']
self.is_sensitive = json['isSensitive']
self.blurhash = json['blurhash']
self.properties = FileProperties(_state, json['properties'])
self.folder_id = json['folderId']
self.folder = json['folder']
self.user_id = json['userId']
self.user = None
if json['user']:
self.user = NoteUser(_state, json['user'])
class FileProperties:
def __init__(self, _state, json: dict):
self.width = json.get('width')
self.height = json.get('height')
class Note:
def fromAPIResult(self, _state, json: dict):
return Note(_state, json['createdNote'])
def __init__(self, _state, json: dict):
self.id = json.get('id', '')
self.created_at = None
self.created_at = datetime.datetime.fromisoformat(json['createdAt'][:-1])
self.author = NoteUser(_state, json['user'])
self.text = json.get('text')
self.cw = json.get('cw')
self.visibility = json['visibility']
self.renote_count = json['renoteCount']
self.replies_count = json['repliesCount']
self.reactions = json['reactions']
self.emojis = [Emoji(_state, x) for x in json['emojis']]
self.files = [File(_state, x) for x in json['files']]
self.file_ids = json['fileIds']
self.reply_id = json['replyId']
self.renote_id = json['renoteId']
self.mentions = json.get('mentions') or []
self.url = json.get('url') or json.get('uri')
self._state = _state
async def delete(self):
if self._state.user.id != self.author.id:
raise PermissionError('You are not the author of this note.')
self._state.api.notes_delete(self.id)
async def pin(self):
if self._state.user.id != self.author.id:
raise PermissionError('You are not the author of this note.')
self._state.api.i_pin(self.id)
async def unpin(self):
if self._state.user.id != self.author.id:
raise PermissionError('You are not the author of this note.')
self._state.api.i_unpin(self.id)
async def reply(self, *args, **kwargs):
d = self._state.api.notes_create(reply_id=self.id, *args, **kwargs)
return Note(self._state, d)
async def renote(self):
d = self._state.api.notes_create(renote_id=self.id)
| import datetime
class NoteUser:
def __init__(self, _state, json: dict):
self.id = json['id']
self.name = json['name']
self.username = json['username']
self.host = json['host']
self.avatar_url = json['avatarUrl']
self.avatar_blurhash = json['avatarBlurhash']
self.avatar_color = json['avatarColor']
if json.get('instance'):
self.instance = Instance(_state, json['instance'])
else:
self.instance = None
self.emojis = [Emoji(_state, x) for x in json['emojis']]
self.online_status = json['onlineStatus']
self.remote = (_state.host != self.host)
class ClientUser:
def __init__(self, _state, json: dict):
self.id = json['id']
self.name = json['name']
self.username = json['username']
self.host = json['host']
self.avatar_url = json['avatarUrl']
self.avatar_blurhash = json['avatarBlurhash']
self.avatar_color = json['avatarColor']
self.admin = json['isAdmin']
self.moderator = json['isModerator']
self.bot = json['isBot']
self.emojis = [Emoji(_state, x) for x in json['emojis']]
self.online_status = json['onlineStatus']
self.url = json['url'] or json['uri']
self.created_at = datetime.datetime.fromisoformat(json['createdAt'][:-1])
self.updated_at = datetime.datetime.fromisoformat(json['updatedAt'][:-1])
self.banner_url = json['bannerUrl']
self.banner_blurhash = json['bannerBlurhash']
self.banner_color = json['bannerColor']
self.locked = json['isLocked']
self.suspended = json['isSuspended']
self.silenced = json['isSilenced']
self.description = json['description']
self.location = json['location']
self.birthday = None
if json['birthday']:
try:
self.birthday = datetime.datetime.strptime(json['birthday'], '%Y-%m-%d')
except ValueError:
pass
self.lang = json['lang']
self.fields = [ProfileField(_state, x) for x in json['fields']]
self.followers_count = json['followersCount']
self.following_count = json['followingCount']
self.notes_count = json['notesCount']
self.pinned_note_ids = json['pinnedNoteIds']
self.pinned_notes = [Note(_state, x) for x in json['pinnedNotes']]
class ProfileField:
def __init__(self, _state, json: dict):
self.name = json['name']
self.value = json['value']
class Instance:
def __init__(self, _state, json: dict):
self.name = json['name']
self.software_name = json['softwareName']
self.software_version = json['softwareVersion']
self.icon_url = json['iconUrl']
self.favicon_url = json['faviconUrl']
self.theme_color = json['themeColor']
class Emoji:
def __init__(self, _state, json: dict):
self.name = json['name']
self.url = json['url']
class File:
def __init__(self, _state, json: dict):
self.id = json['id']
self.created_at = datetime.datetime.fromisoformat(json['createdAt'][:-1])
self.name = json['name']
self.url = json.get('url') or json.get('uri')
self.thumbnail_url = json['thumbnailUrl']
self.size = json['size']
self.type = json['type']
self.comment = json['comment']
self.is_sensitive = json['isSensitive']
self.blurhash = json['blurhash']
self.properties = FileProperties(_state, json['properties'])
self.folder_id = json['folderId']
self.folder = json['folder']
self.user_id = json['userId']
self.user = None
if json['user']:
self.user = NoteUser(_state, json['user'])
class FileProperties:
def __init__(self, _state, json: dict):
self.width = json.get('width')
self.height = json.get('height')
class Note:
def fromAPIResult(self, _state, json: dict):
return Note(_state, json['createdNote'])
def __init__(self, _state, json: dict):
self.id = json.get('id', '')
self.created_at = None
self.created_at = datetime.datetime.fromisoformat(json['createdAt'][:-1])
self.author = NoteUser(_state, json['user'])
self.text = json.get('text')
self.cw = json.get('cw')
self.visibility = json['visibility']
self.renote_count = json['renoteCount']
self.replies_count = json['repliesCount']
self.reactions = json['reactions']
self.emojis = [Emoji(_state, x) for x in json['emojis']]
self.files = [File(_state, x) for x in json['files']]
self.file_ids = json['fileIds']
self.reply_id = json['replyId']
self.renote_id = json['renoteId']
self.mentions = json.get('mentions') or []
self.url = json.get('url') or json.get('uri')
self._state = _state
async def delete(self):
if self._state.user.id != self.author.id:
raise PermissionError('You are not the author of this note.')
self._state.api.notes_delete(self.id)
async def pin(self):
if self._state.user.id != self.author.id:
raise PermissionError('You are not the author of this note.')
self._state.api.i_pin(self.id)
async def unpin(self):
if self._state.user.id != self.author.id:
raise PermissionError('You are not the author of this note.')
self._state.api.i_unpin(self.id)
async def reply(self, *args, **kwargs):
d = self._state.api.notes_create(reply_id=self.id, *args, **kwargs)
return Note(self._state, d)
async def renote(self):
d = self._state.api.notes_create(renote_id=self.id)
| none | 1 | 2.507594 | 3 | |
tutorials/W3D2_DynamicNetworks/solutions/W3D2_Tutorial2_Solution_187031a9.py | liuxiaomiao123/NeuroMathAcademy | 2 | 6614754 | <gh_stars>1-10
pars = default_pars()
x = np.arange(0,10,.1)
with plt.xkcd():
fig1 = plt.figure(figsize=(8, 5.5))
plt.plot(x,F(x,pars['a_E'],pars['theta_E']), 'b', label='E population')
plt.plot(x,F(x,pars['a_I'],pars['theta_I']), 'r', label='I population')
plt.legend(loc='lower right')
plt.xlabel('x (a.u.)')
plt.ylabel('F(x)')
plt.show() | pars = default_pars()
x = np.arange(0,10,.1)
with plt.xkcd():
fig1 = plt.figure(figsize=(8, 5.5))
plt.plot(x,F(x,pars['a_E'],pars['theta_E']), 'b', label='E population')
plt.plot(x,F(x,pars['a_I'],pars['theta_I']), 'r', label='I population')
plt.legend(loc='lower right')
plt.xlabel('x (a.u.)')
plt.ylabel('F(x)')
plt.show() | none | 1 | 2.581217 | 3 | |
test_remove_popular.py | JB-Tellez/dsa-practice | 0 | 6614755 | import pytest
from remove_popular import solution
def test_solution_simple():
actual = solution([1], 0)
expected = []
assert actual == expected
def test_solution():
actual = solution([1, 1, 1, 1, 2, 2, 2, 3, 4, 4, 5, 6, 6, 4, 2, 4], 3)
expected = [3, 5, 6, 6]
assert actual == expected
| import pytest
from remove_popular import solution
def test_solution_simple():
actual = solution([1], 0)
expected = []
assert actual == expected
def test_solution():
actual = solution([1, 1, 1, 1, 2, 2, 2, 3, 4, 4, 5, 6, 6, 4, 2, 4], 3)
expected = [3, 5, 6, 6]
assert actual == expected
| none | 1 | 2.54091 | 3 | |
tadataka/optimization/updaters.py | IshitaTakeshi/Tadataka | 54 | 6614756 | <filename>tadataka/optimization/updaters.py
from autograd import jacobian
from autograd import numpy as np
from tadataka.assertion import check_non_nan
class GaussNewtonUpdater(object):
def __init__(self, residual, robustifier):
self.residual = residual
self.robustifier = robustifier
def jacobian(self, theta):
return jacobian(self.residual.compute)(theta)
def flattened_residual(self, theta):
residual = self.residual.compute(theta)
return residual.flatten()
def compute(self, theta):
# Not exactly the same as the equation of Gauss-Newton update
# d = lstsq(J, r), not the stardard update d = inv (J^T * J) * J * r
# however, it works better than implementing the equation malually
r = self.flattened_residual(theta)
J = self.jacobian(theta)
check_non_nan(r)
check_non_nan(J)
assert(np.ndim(r) == 1)
# residuals can be a multi-dimensonal array so flatten them
J = J.reshape(r.shape[0], theta.shape[0])
# TODO add weighted Gauss-Newton as an option
# weights = self.robustifier.weights(r)
delta, error, _, _ = np.linalg.lstsq(J, r, rcond=None)
return delta
| <filename>tadataka/optimization/updaters.py
from autograd import jacobian
from autograd import numpy as np
from tadataka.assertion import check_non_nan
class GaussNewtonUpdater(object):
def __init__(self, residual, robustifier):
self.residual = residual
self.robustifier = robustifier
def jacobian(self, theta):
return jacobian(self.residual.compute)(theta)
def flattened_residual(self, theta):
residual = self.residual.compute(theta)
return residual.flatten()
def compute(self, theta):
# Not exactly the same as the equation of Gauss-Newton update
# d = lstsq(J, r), not the stardard update d = inv (J^T * J) * J * r
# however, it works better than implementing the equation malually
r = self.flattened_residual(theta)
J = self.jacobian(theta)
check_non_nan(r)
check_non_nan(J)
assert(np.ndim(r) == 1)
# residuals can be a multi-dimensonal array so flatten them
J = J.reshape(r.shape[0], theta.shape[0])
# TODO add weighted Gauss-Newton as an option
# weights = self.robustifier.weights(r)
delta, error, _, _ = np.linalg.lstsq(J, r, rcond=None)
return delta
| en | 0.824722 | # Not exactly the same as the equation of Gauss-Newton update # d = lstsq(J, r), not the stardard update d = inv (J^T * J) * J * r # however, it works better than implementing the equation malually # residuals can be a multi-dimensonal array so flatten them # TODO add weighted Gauss-Newton as an option # weights = self.robustifier.weights(r) | 2.481192 | 2 |
labtex/document.py | CianLM/labtex | 4 | 6614757 | from labtex.linear import LinearRegression
from labtex.measurement import MeasurementList
from typing import List, Union
import os
class Document:
"A class for LaTeX template document creation with tables and graphs already inserted."
def __init__(self,title : str,author : str):
"Initialise a LaTeX document with an title and an author."
# Customise these folders and the templates as you wish.
Document.texfolder = "tex/"
Document.graphfolder = "figures/"
Document.template = r"""\documentclass[]{article}
\title{!title}
\author{!author}
\usepackage{amsmath,amssymb,amsfonts,amsthm,physics,graphicx,geometry,enumitem,booktabs}
\begin{document}
\maketitle
\abstract{
}
\section{Introduction}
\section{Theory}
\section{Method}
\section{Results}
!table
\section{Data Analysis}
!graph
\section{Discussion}
\section*{References}
\end{document}
"""
Document.tabletemplates = {
"default": r"""
\begin{table}[ht]
\centering
\caption{!caption}
\label{tab:!tablenumber}
\begin{tabular}{!columns}
\toprule
!data
\bottomrule
\end{tabular}
\end{table}
!table
"""
}
Document.graphtemplates = {
"default": r"""
\begin{figure}[ht]
\centering
\includegraphics[width=!width\textwidth]{!filename.png}
\caption{!caption}
\label{fig:!graphnumber}
\end{figure}
!graph
"""
}
self.document = Document.template.replace("!title",title).replace("!author",author)
self.tablenumber = 0
self.graphnumber = 0
def __repr__(self):
return self.document
def table(self,listheads : List[str], data : Union[List[MeasurementList]], \
headers :List[str] = [], caption : str = "",style : str = "sideways"):
"""
Add a table to the LaTeX document.
"""
assert len(listheads) == len(data)
assert all(len(data[0]) == len(line) for line in data)
columns = len(data[0])
table = Document.tabletemplates["default"]
self.tablenumber += 1
table = table.replace("!tablenumber", str(self.tablenumber))
table = table.replace("!caption",caption)
if not (all(isinstance(line, MeasurementList) for line in data)):
raise Exception("Data Error: Data should be a list of Measurement Lists.")
if(style == "sideways"):
table = table.replace("!columns", "*{" + str(1+columns) + "}c" )
if(headers != []):
table = table.replace("!data",
fr"""{headers[0]} & \multicolumn{{{columns}}}{{c}}{{{headers[1]}}} \\
\midrule !data"""
)
for i in range(len(data)):
table = table.replace("!data",
fr"""
{listheads[i]}, { data[i].tableprint("uv") } \\ !data"""
)
elif(style == "upright"):
table = table.replace("!columns", "*{" + str(len(data)) + "}c" )
for i in range(len(data)):
table = table.replace("!data",
fr"""{listheads[i]}, {data[i].tableprint("u")} & !data"""
)
table = table.replace("& !data",
r"""\\
\midrule
!data"""
)
tableprint = [m.tableprint("v")[1:].split("&") for m in data]
indexfirst = [ [index[j] for index in tableprint] for j in range(len(tableprint[0]))]
for index in indexfirst:
table = table.replace("!data",
rf""" {"&".join([*index])} \\
!data""")
else:
raise Exception("Style Error: Only 'sideways' and 'upright' styles supported.")
table = table.replace("!data","")
self.document = self.document.replace("!table",table)
def graph(self, data : List[MeasurementList], title : str = "", xnameandsymbol : str = "Name, Symbol", \
ynameandsymbol : str = "Name, Symbol", caption : str = "", width : float = 0.8, style :str = "default", \
showline : bool = True):
"Add a graph to the LaTeX document."
graph = Document.graphtemplates[style]
self.graphnumber += 1
graph = graph.replace("!graphnumber",str(self.graphnumber))
graph = graph.replace("!caption",caption)
graph = graph.replace("!width",str(width))
if len(data) != 2 or not all(
isinstance(listitem, MeasurementList) for listitem in data
):
raise Exception("2 MeasurementLists needed for graphing.")
eq = LinearRegression(*data)
filename = f"graph{self.graphnumber}"
if(Document.graphfolder != '.'): # assuming the graph folder is a subfolder
graph = graph.replace("!filename","../" + Document.graphfolder + filename)
else:
graph = graph.replace("!filename", Document.graphfolder + filename)
self.document = self.document.replace("!graph",graph)
if (not os.path.exists(Document.graphfolder)):
os.makedirs(Document.graphfolder)
eq.savefig(Document.graphfolder + filename,title,xnameandsymbol,ynameandsymbol,showline,self.graphnumber)
print(f"labtex: Wrote to '{Document.graphfolder + filename}.png'.")
def save(self,filename: str ="labdocument"):
"Save the document to 'filename.tex'."
self.document = self.document.replace("!table","").replace("!graph","")
if(not os.path.exists(Document.texfolder)):
os.makedirs(Document.texfolder)
with open(Document.texfolder + filename + '.tex','w') as outputfile:
outputfile.write(self.document)
print(f"labtex: Wrote to '{Document.texfolder + filename}.tex'.") | from labtex.linear import LinearRegression
from labtex.measurement import MeasurementList
from typing import List, Union
import os
class Document:
"A class for LaTeX template document creation with tables and graphs already inserted."
def __init__(self,title : str,author : str):
"Initialise a LaTeX document with an title and an author."
# Customise these folders and the templates as you wish.
Document.texfolder = "tex/"
Document.graphfolder = "figures/"
Document.template = r"""\documentclass[]{article}
\title{!title}
\author{!author}
\usepackage{amsmath,amssymb,amsfonts,amsthm,physics,graphicx,geometry,enumitem,booktabs}
\begin{document}
\maketitle
\abstract{
}
\section{Introduction}
\section{Theory}
\section{Method}
\section{Results}
!table
\section{Data Analysis}
!graph
\section{Discussion}
\section*{References}
\end{document}
"""
Document.tabletemplates = {
"default": r"""
\begin{table}[ht]
\centering
\caption{!caption}
\label{tab:!tablenumber}
\begin{tabular}{!columns}
\toprule
!data
\bottomrule
\end{tabular}
\end{table}
!table
"""
}
Document.graphtemplates = {
"default": r"""
\begin{figure}[ht]
\centering
\includegraphics[width=!width\textwidth]{!filename.png}
\caption{!caption}
\label{fig:!graphnumber}
\end{figure}
!graph
"""
}
self.document = Document.template.replace("!title",title).replace("!author",author)
self.tablenumber = 0
self.graphnumber = 0
def __repr__(self):
return self.document
def table(self,listheads : List[str], data : Union[List[MeasurementList]], \
headers :List[str] = [], caption : str = "",style : str = "sideways"):
"""
Add a table to the LaTeX document.
"""
assert len(listheads) == len(data)
assert all(len(data[0]) == len(line) for line in data)
columns = len(data[0])
table = Document.tabletemplates["default"]
self.tablenumber += 1
table = table.replace("!tablenumber", str(self.tablenumber))
table = table.replace("!caption",caption)
if not (all(isinstance(line, MeasurementList) for line in data)):
raise Exception("Data Error: Data should be a list of Measurement Lists.")
if(style == "sideways"):
table = table.replace("!columns", "*{" + str(1+columns) + "}c" )
if(headers != []):
table = table.replace("!data",
fr"""{headers[0]} & \multicolumn{{{columns}}}{{c}}{{{headers[1]}}} \\
\midrule !data"""
)
for i in range(len(data)):
table = table.replace("!data",
fr"""
{listheads[i]}, { data[i].tableprint("uv") } \\ !data"""
)
elif(style == "upright"):
table = table.replace("!columns", "*{" + str(len(data)) + "}c" )
for i in range(len(data)):
table = table.replace("!data",
fr"""{listheads[i]}, {data[i].tableprint("u")} & !data"""
)
table = table.replace("& !data",
r"""\\
\midrule
!data"""
)
tableprint = [m.tableprint("v")[1:].split("&") for m in data]
indexfirst = [ [index[j] for index in tableprint] for j in range(len(tableprint[0]))]
for index in indexfirst:
table = table.replace("!data",
rf""" {"&".join([*index])} \\
!data""")
else:
raise Exception("Style Error: Only 'sideways' and 'upright' styles supported.")
table = table.replace("!data","")
self.document = self.document.replace("!table",table)
def graph(self, data : List[MeasurementList], title : str = "", xnameandsymbol : str = "Name, Symbol", \
ynameandsymbol : str = "Name, Symbol", caption : str = "", width : float = 0.8, style :str = "default", \
showline : bool = True):
"Add a graph to the LaTeX document."
graph = Document.graphtemplates[style]
self.graphnumber += 1
graph = graph.replace("!graphnumber",str(self.graphnumber))
graph = graph.replace("!caption",caption)
graph = graph.replace("!width",str(width))
if len(data) != 2 or not all(
isinstance(listitem, MeasurementList) for listitem in data
):
raise Exception("2 MeasurementLists needed for graphing.")
eq = LinearRegression(*data)
filename = f"graph{self.graphnumber}"
if(Document.graphfolder != '.'): # assuming the graph folder is a subfolder
graph = graph.replace("!filename","../" + Document.graphfolder + filename)
else:
graph = graph.replace("!filename", Document.graphfolder + filename)
self.document = self.document.replace("!graph",graph)
if (not os.path.exists(Document.graphfolder)):
os.makedirs(Document.graphfolder)
eq.savefig(Document.graphfolder + filename,title,xnameandsymbol,ynameandsymbol,showline,self.graphnumber)
print(f"labtex: Wrote to '{Document.graphfolder + filename}.png'.")
def save(self,filename: str ="labdocument"):
"Save the document to 'filename.tex'."
self.document = self.document.replace("!table","").replace("!graph","")
if(not os.path.exists(Document.texfolder)):
os.makedirs(Document.texfolder)
with open(Document.texfolder + filename + '.tex','w') as outputfile:
outputfile.write(self.document)
print(f"labtex: Wrote to '{Document.texfolder + filename}.tex'.") | en | 0.262705 | # Customise these folders and the templates as you wish. \documentclass[]{article} \title{!title} \author{!author} \usepackage{amsmath,amssymb,amsfonts,amsthm,physics,graphicx,geometry,enumitem,booktabs} \begin{document} \maketitle \abstract{ } \section{Introduction} \section{Theory} \section{Method} \section{Results} !table \section{Data Analysis} !graph \section{Discussion} \section*{References} \end{document} \begin{table}[ht] \centering \caption{!caption} \label{tab:!tablenumber} \begin{tabular}{!columns} \toprule !data \bottomrule \end{tabular} \end{table} !table \begin{figure}[ht] \centering \includegraphics[width=!width\textwidth]{!filename.png} \caption{!caption} \label{fig:!graphnumber} \end{figure} !graph Add a table to the LaTeX document. {headers[0]} & \multicolumn{{{columns}}}{{c}}{{{headers[1]}}} \\ \midrule !data {listheads[i]}, { data[i].tableprint("uv") } \\ !data {listheads[i]}, {data[i].tableprint("u")} & !data \\ \midrule !data {"&".join([*index])} \\ !data # assuming the graph folder is a subfolder | 2.444226 | 2 |
examples/sand.py | ailin-nemui/sinobit-micropython | 0 | 6614758 | # Digital sand demo uses the accelerometer to move sand particiles in a
# realistic way. Tilt the board to see the sand grains tumble around and light
# up LEDs. Based on the code created by <NAME> and <NAME>, see:
# https://learn.adafruit.com/digital-sand-dotstar-circuitpython-edition/code
# https://learn.adafruit.com/animated-led-sand
# Ported to sino:bit by <NAME>
#
# The MIT License (MIT)
#
# Copyright (c) 2018 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import math
import random
import microbit
import sinobit
# Configuration:
GRAINS = 20 # Number of grains of sand
WIDTH = 12 # Display width in pixels
HEIGHT = 12 # Display height in pixels
# Class to represent the position of each grain.
class Grain:
def __init__(self):
self.x = 0
self.y = 0
self.vx = 0
self.vy = 0
# Helper to find a grain at x, y within the occupied_bits list.
def index_of_xy(x, y):
return (y >> 8) * WIDTH + (x >> 8)
# Global state
max_x = WIDTH * 256 - 1 # Grain coordinates are 256 times the pixel
max_y = HEIGHT * 256 - 1 # coordinates to allow finer sub-pixel movements.
grains = [Grain() for _ in range(GRAINS)]
occupied_bits = [False for _ in range(WIDTH * HEIGHT)]
oldidx = 0
newidx = 0
delta = 0
newx = 0
newy = 0
# Randomly place grains to start. Go through each grain and pick random
# positions until one is found. Start with no initial velocity too.
for g in grains:
placed = False
while not placed:
g.x = random.randint(0, max_x)
g.y = random.randint(0, max_y)
placed = not occupied_bits[index_of_xy(g.x, g.y)]
occupied_bits[index_of_xy(g.x, g.y)] = True
# Main loop.
while True:
# Draw each grain.
sinobit.display.clear()
for g in grains:
x = g.x >> 8 # Convert from grain coordinates to pixel coordinates by
y = g.y >> 8 # dividing by 256.
sinobit.display.set_pixel(x, y, True)
sinobit.display.write()
# Read accelerometer...
f_x, f_y, f_z = microbit.accelerometer.get_values()
# sinobit accelerometer returns values in signed -1024 to 1024 values
# that are millig's. We'll divide by 8 to get a value in the -127 to 127
# range for the sand coordinates. We invert the y axis to match the
# current display orientation too.
f_y *= -1 # Invert y
ax = f_x >> 3 # Transform accelerometer axes
ay = f_y >> 3 # to grain coordinate space (divide by 8)
az = abs(f_z) >> 6 # Random motion factor grabs a few top
# bits from Z axis.
az = 1 if (az >= 3) else (4 - az) # Clip & invert
ax -= az # Subtract motion factor from X, Y
ay -= az
az2 = (az << 1) + 1 # Range of random motion to add back in
# ...and apply 2D accel vector to grain velocities...
v2 = 0 # Velocity squared
v = 0.0 # Absolute velociy
for g in grains:
g.vx += ax + random.randint(0, az2) # A little randomness makes
g.vy += ay + random.randint(0, az2) # tall stacks topple better!
# Terminal velocity (in any direction) is 256 units -- equal to
# 1 pixel -- which keeps moving grains from passing through each other
# and other such mayhem. Though it takes some extra math, velocity is
# clipped as a 2D vector (not separately-limited X & Y) so that
# diagonal movement isn't faster
v2 = g.vx * g.vx + g.vy * g.vy
if v2 > 65536: # If v^2 > 65536, then v > 256
v = math.floor(math.sqrt(v2)) # Velocity vector magnitude
g.vx = (g.vx // v) << 8 # Maintain heading
g.vy = (g.vy // v) << 8 # Limit magnitude
# ...then update position of each grain, one at a time, checking for
# collisions and having them react. This really seems like it shouldn't
# work, as only one grain is considered at a time while the rest are
# regarded as stationary. Yet this naive algorithm, taking many not-
# technically-quite-correct steps, and repeated quickly enough,
# visually integrates into something that somewhat resembles physics.
# (I'd initially tried implementing this as a bunch of concurrent and
# "realistic" elastic collisions among circular grains, but the
# calculations and volument of code quickly got out of hand for both
# the tiny 8-bit AVR microcontroller and my tiny dinosaur brain.)
for g in grains:
newx = g.x + g.vx # New position in grain space
newy = g.y + g.vy
if newx > max_x: # If grain would go out of bounds
newx = max_x # keep it inside, and
g.vx //= -2 # give a slight bounce off the wall
elif newx < 0:
newx = 0
g.vx //= -2
if newy > max_y:
newy = max_y
g.vy //= -2
elif newy < 0:
newy = 0
g.vy //= -2
oldidx = index_of_xy(g.x, g.y) # prior pixel
newidx = index_of_xy(newx, newy) # new pixel
if oldidx != newidx and occupied_bits[newidx]: # If grain is moving to a new pixel...
# but if that pixel is already occupied...
delta = abs(newidx - oldidx) # What direction when blocked?
if delta == 1: # 1 pixel left or right
newx = g.x # cancel x motion
g.vx //= -2 # and bounce X velocity (Y is ok)
newidx = oldidx # no pixel change
elif delta == WIDTH: # 1 pixel up or down
newy = g.y # cancel Y motion
g.vy //= -2 # and bounce Y velocity (X is ok)
newidx = oldidx # no pixel change
else: # Diagonal intersection is more tricky...
# Try skidding along just one axis of motion if possible (start w/
# faster axis). Because we've already established that diagonal
# (both-axis) motion is occurring, moving on either axis alone WILL
# change the pixel index, no need to check that again.
if abs(g.vx) > abs(g.vy): # x axis is faster
newidx = index_of_xy(newx, g.y)
if not occupied_bits[newidx]: # that pixel is free, take it! But...
newy = g.y # cancel Y motion
g.vy //= -2 # and bounce Y velocity
else: # X pixel is taken, so try Y...
newidx = index_of_xy(g.x, newy)
if not occupied_bits[newidx]: # Pixel is free, take it, but first...
newx = g.x # Cancel X motion
g.vx //= -2 # Bounce X velocity
else: # both spots are occupied
newx = g.x # Cancel X & Y motion
newy = g.y
g.vx //= -2 # Bounce X & Y velocity
g.vy //= -2
newidx = oldidx # Not moving
else: # y axis is faster. start there
newidx = index_of_xy(g.x, newy)
if not occupied_bits[newidx]: # Pixel's free! Take it! But...
newx = g.x # Cancel X motion
g.vx //= -2 # Bounce X velocity
else: # Y pixel is taken, so try X...
newidx = index_of_xy(newx, g.y)
if not occupied_bits[newidx]: # Pixel is free, take it, but first...
newy = g.y # cancel Y motion
g.vy //= -2 # and bounce Y velocity
else: # both spots are occupied
newx = g.x # Cancel X & Y motion
newy = g.y
g.vx //= -2 # Bounce X & Y velocity
g.vy //= -2
newidx = oldidx # Not moving
occupied_bits[oldidx] = False
occupied_bits[newidx] = True
g.x = newx
g.y = newy
| # Digital sand demo uses the accelerometer to move sand particiles in a
# realistic way. Tilt the board to see the sand grains tumble around and light
# up LEDs. Based on the code created by <NAME> and <NAME>, see:
# https://learn.adafruit.com/digital-sand-dotstar-circuitpython-edition/code
# https://learn.adafruit.com/animated-led-sand
# Ported to sino:bit by <NAME>
#
# The MIT License (MIT)
#
# Copyright (c) 2018 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import math
import random
import microbit
import sinobit
# Configuration:
GRAINS = 20 # Number of grains of sand
WIDTH = 12 # Display width in pixels
HEIGHT = 12 # Display height in pixels
# Class to represent the position of each grain.
class Grain:
def __init__(self):
self.x = 0
self.y = 0
self.vx = 0
self.vy = 0
# Helper to find a grain at x, y within the occupied_bits list.
def index_of_xy(x, y):
return (y >> 8) * WIDTH + (x >> 8)
# Global state
max_x = WIDTH * 256 - 1 # Grain coordinates are 256 times the pixel
max_y = HEIGHT * 256 - 1 # coordinates to allow finer sub-pixel movements.
grains = [Grain() for _ in range(GRAINS)]
occupied_bits = [False for _ in range(WIDTH * HEIGHT)]
oldidx = 0
newidx = 0
delta = 0
newx = 0
newy = 0
# Randomly place grains to start. Go through each grain and pick random
# positions until one is found. Start with no initial velocity too.
for g in grains:
placed = False
while not placed:
g.x = random.randint(0, max_x)
g.y = random.randint(0, max_y)
placed = not occupied_bits[index_of_xy(g.x, g.y)]
occupied_bits[index_of_xy(g.x, g.y)] = True
# Main loop.
while True:
# Draw each grain.
sinobit.display.clear()
for g in grains:
x = g.x >> 8 # Convert from grain coordinates to pixel coordinates by
y = g.y >> 8 # dividing by 256.
sinobit.display.set_pixel(x, y, True)
sinobit.display.write()
# Read accelerometer...
f_x, f_y, f_z = microbit.accelerometer.get_values()
# sinobit accelerometer returns values in signed -1024 to 1024 values
# that are millig's. We'll divide by 8 to get a value in the -127 to 127
# range for the sand coordinates. We invert the y axis to match the
# current display orientation too.
f_y *= -1 # Invert y
ax = f_x >> 3 # Transform accelerometer axes
ay = f_y >> 3 # to grain coordinate space (divide by 8)
az = abs(f_z) >> 6 # Random motion factor grabs a few top
# bits from Z axis.
az = 1 if (az >= 3) else (4 - az) # Clip & invert
ax -= az # Subtract motion factor from X, Y
ay -= az
az2 = (az << 1) + 1 # Range of random motion to add back in
# ...and apply 2D accel vector to grain velocities...
v2 = 0 # Velocity squared
v = 0.0 # Absolute velociy
for g in grains:
g.vx += ax + random.randint(0, az2) # A little randomness makes
g.vy += ay + random.randint(0, az2) # tall stacks topple better!
# Terminal velocity (in any direction) is 256 units -- equal to
# 1 pixel -- which keeps moving grains from passing through each other
# and other such mayhem. Though it takes some extra math, velocity is
# clipped as a 2D vector (not separately-limited X & Y) so that
# diagonal movement isn't faster
v2 = g.vx * g.vx + g.vy * g.vy
if v2 > 65536: # If v^2 > 65536, then v > 256
v = math.floor(math.sqrt(v2)) # Velocity vector magnitude
g.vx = (g.vx // v) << 8 # Maintain heading
g.vy = (g.vy // v) << 8 # Limit magnitude
# ...then update position of each grain, one at a time, checking for
# collisions and having them react. This really seems like it shouldn't
# work, as only one grain is considered at a time while the rest are
# regarded as stationary. Yet this naive algorithm, taking many not-
# technically-quite-correct steps, and repeated quickly enough,
# visually integrates into something that somewhat resembles physics.
# (I'd initially tried implementing this as a bunch of concurrent and
# "realistic" elastic collisions among circular grains, but the
# calculations and volument of code quickly got out of hand for both
# the tiny 8-bit AVR microcontroller and my tiny dinosaur brain.)
for g in grains:
newx = g.x + g.vx # New position in grain space
newy = g.y + g.vy
if newx > max_x: # If grain would go out of bounds
newx = max_x # keep it inside, and
g.vx //= -2 # give a slight bounce off the wall
elif newx < 0:
newx = 0
g.vx //= -2
if newy > max_y:
newy = max_y
g.vy //= -2
elif newy < 0:
newy = 0
g.vy //= -2
oldidx = index_of_xy(g.x, g.y) # prior pixel
newidx = index_of_xy(newx, newy) # new pixel
if oldidx != newidx and occupied_bits[newidx]: # If grain is moving to a new pixel...
# but if that pixel is already occupied...
delta = abs(newidx - oldidx) # What direction when blocked?
if delta == 1: # 1 pixel left or right
newx = g.x # cancel x motion
g.vx //= -2 # and bounce X velocity (Y is ok)
newidx = oldidx # no pixel change
elif delta == WIDTH: # 1 pixel up or down
newy = g.y # cancel Y motion
g.vy //= -2 # and bounce Y velocity (X is ok)
newidx = oldidx # no pixel change
else: # Diagonal intersection is more tricky...
# Try skidding along just one axis of motion if possible (start w/
# faster axis). Because we've already established that diagonal
# (both-axis) motion is occurring, moving on either axis alone WILL
# change the pixel index, no need to check that again.
if abs(g.vx) > abs(g.vy): # x axis is faster
newidx = index_of_xy(newx, g.y)
if not occupied_bits[newidx]: # that pixel is free, take it! But...
newy = g.y # cancel Y motion
g.vy //= -2 # and bounce Y velocity
else: # X pixel is taken, so try Y...
newidx = index_of_xy(g.x, newy)
if not occupied_bits[newidx]: # Pixel is free, take it, but first...
newx = g.x # Cancel X motion
g.vx //= -2 # Bounce X velocity
else: # both spots are occupied
newx = g.x # Cancel X & Y motion
newy = g.y
g.vx //= -2 # Bounce X & Y velocity
g.vy //= -2
newidx = oldidx # Not moving
else: # y axis is faster. start there
newidx = index_of_xy(g.x, newy)
if not occupied_bits[newidx]: # Pixel's free! Take it! But...
newx = g.x # Cancel X motion
g.vx //= -2 # Bounce X velocity
else: # Y pixel is taken, so try X...
newidx = index_of_xy(newx, g.y)
if not occupied_bits[newidx]: # Pixel is free, take it, but first...
newy = g.y # cancel Y motion
g.vy //= -2 # and bounce Y velocity
else: # both spots are occupied
newx = g.x # Cancel X & Y motion
newy = g.y
g.vx //= -2 # Bounce X & Y velocity
g.vy //= -2
newidx = oldidx # Not moving
occupied_bits[oldidx] = False
occupied_bits[newidx] = True
g.x = newx
g.y = newy
| en | 0.85499 | # Digital sand demo uses the accelerometer to move sand particiles in a # realistic way. Tilt the board to see the sand grains tumble around and light # up LEDs. Based on the code created by <NAME> and <NAME>, see: # https://learn.adafruit.com/digital-sand-dotstar-circuitpython-edition/code # https://learn.adafruit.com/animated-led-sand # Ported to sino:bit by <NAME> # # The MIT License (MIT) # # Copyright (c) 2018 <NAME> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # Configuration: # Number of grains of sand # Display width in pixels # Display height in pixels # Class to represent the position of each grain. # Helper to find a grain at x, y within the occupied_bits list. # Global state # Grain coordinates are 256 times the pixel # coordinates to allow finer sub-pixel movements. # Randomly place grains to start. Go through each grain and pick random # positions until one is found. Start with no initial velocity too. # Main loop. # Draw each grain. # Convert from grain coordinates to pixel coordinates by # dividing by 256. # Read accelerometer... # sinobit accelerometer returns values in signed -1024 to 1024 values # that are millig's. We'll divide by 8 to get a value in the -127 to 127 # range for the sand coordinates. We invert the y axis to match the # current display orientation too. # Invert y # Transform accelerometer axes # to grain coordinate space (divide by 8) # Random motion factor grabs a few top # bits from Z axis. # Clip & invert # Subtract motion factor from X, Y # Range of random motion to add back in # ...and apply 2D accel vector to grain velocities... # Velocity squared # Absolute velociy # A little randomness makes # tall stacks topple better! # Terminal velocity (in any direction) is 256 units -- equal to # 1 pixel -- which keeps moving grains from passing through each other # and other such mayhem. Though it takes some extra math, velocity is # clipped as a 2D vector (not separately-limited X & Y) so that # diagonal movement isn't faster # If v^2 > 65536, then v > 256 # Velocity vector magnitude # Maintain heading # Limit magnitude # ...then update position of each grain, one at a time, checking for # collisions and having them react. This really seems like it shouldn't # work, as only one grain is considered at a time while the rest are # regarded as stationary. Yet this naive algorithm, taking many not- # technically-quite-correct steps, and repeated quickly enough, # visually integrates into something that somewhat resembles physics. # (I'd initially tried implementing this as a bunch of concurrent and # "realistic" elastic collisions among circular grains, but the # calculations and volument of code quickly got out of hand for both # the tiny 8-bit AVR microcontroller and my tiny dinosaur brain.) # New position in grain space # If grain would go out of bounds # keep it inside, and # give a slight bounce off the wall # prior pixel # new pixel # If grain is moving to a new pixel... # but if that pixel is already occupied... # What direction when blocked? # 1 pixel left or right # cancel x motion # and bounce X velocity (Y is ok) # no pixel change # 1 pixel up or down # cancel Y motion # and bounce Y velocity (X is ok) # no pixel change # Diagonal intersection is more tricky... # Try skidding along just one axis of motion if possible (start w/ # faster axis). Because we've already established that diagonal # (both-axis) motion is occurring, moving on either axis alone WILL # change the pixel index, no need to check that again. # x axis is faster # that pixel is free, take it! But... # cancel Y motion # and bounce Y velocity # X pixel is taken, so try Y... # Pixel is free, take it, but first... # Cancel X motion # Bounce X velocity # both spots are occupied # Cancel X & Y motion # Bounce X & Y velocity # Not moving # y axis is faster. start there # Pixel's free! Take it! But... # Cancel X motion # Bounce X velocity # Y pixel is taken, so try X... # Pixel is free, take it, but first... # cancel Y motion # and bounce Y velocity # both spots are occupied # Cancel X & Y motion # Bounce X & Y velocity # Not moving | 2.993454 | 3 |
world/gen/biome/Plains.py | uuk0/mcpython-3 | 0 | 6614759 | """
not fully implementation of plains biome of minecraft
missing: sunflower plains, animals, ores
"""
import globals as G
import world.gen.biome.IBiome
import world.gen.structure.tree.OakTree
@G.biomehandler
class Plains(world.gen.biome.IBiome.IBiome):
@staticmethod
def getName():
return "minecraft:plains"
@staticmethod
def getStructures(): # structure -> weight
return {world.gen.structure.tree.OakTree.oaktree: 1}
@staticmethod
def getStructurWeight():
return 200000
@staticmethod
def getBaseHighVariation():
return 1
@staticmethod
def getBaseHighVariationFactor():
return 400
@staticmethod
def getHighVariation():
return 5
@staticmethod
def getTemperatur():
return 0.8
@G.biomehandler
class SunflowerPlains(Plains):
"""
todo: implement this
"""
@staticmethod
def getName():
return "minecraft:sunflower_plains"
| """
not fully implementation of plains biome of minecraft
missing: sunflower plains, animals, ores
"""
import globals as G
import world.gen.biome.IBiome
import world.gen.structure.tree.OakTree
@G.biomehandler
class Plains(world.gen.biome.IBiome.IBiome):
@staticmethod
def getName():
return "minecraft:plains"
@staticmethod
def getStructures(): # structure -> weight
return {world.gen.structure.tree.OakTree.oaktree: 1}
@staticmethod
def getStructurWeight():
return 200000
@staticmethod
def getBaseHighVariation():
return 1
@staticmethod
def getBaseHighVariationFactor():
return 400
@staticmethod
def getHighVariation():
return 5
@staticmethod
def getTemperatur():
return 0.8
@G.biomehandler
class SunflowerPlains(Plains):
"""
todo: implement this
"""
@staticmethod
def getName():
return "minecraft:sunflower_plains"
| en | 0.694181 | not fully implementation of plains biome of minecraft missing: sunflower plains, animals, ores # structure -> weight todo: implement this | 2.178841 | 2 |
tests/__init__.py | B-rade/scrapy-docs | 0 | 6614760 | <gh_stars>0
# -*- coding: utf-8 -*-
"""Unit test package for scrapy_docs."""
| # -*- coding: utf-8 -*-
"""Unit test package for scrapy_docs.""" | en | 0.755993 | # -*- coding: utf-8 -*- Unit test package for scrapy_docs. | 0.923285 | 1 |
strategies/bayes.py | aladics/DeepBugHunter | 6 | 6614761 | <filename>strategies/bayes.py
import os
import math
import dbh_util as util
from sklearn.naive_bayes import GaussianNB
def predict(classifier, test, args, sargs_str, threshold=None):
preds = classifier.predict(test[0])
if threshold is not None:
preds = [1 if x >= threshold else 0 for x in preds]
return preds
def learn(train, dev, test, args, sargs_str):
return util.sklearn_wrapper(train, dev, test, GaussianNB()) | <filename>strategies/bayes.py
import os
import math
import dbh_util as util
from sklearn.naive_bayes import GaussianNB
def predict(classifier, test, args, sargs_str, threshold=None):
preds = classifier.predict(test[0])
if threshold is not None:
preds = [1 if x >= threshold else 0 for x in preds]
return preds
def learn(train, dev, test, args, sargs_str):
return util.sklearn_wrapper(train, dev, test, GaussianNB()) | none | 1 | 2.824242 | 3 | |
prg01_basic_python/basicpython05_stringtypes.py | imademethink/MachineLearning_related_Python | 0 | 6614762 | <filename>prg01_basic_python/basicpython05_stringtypes.py<gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# string
str_a = "Obstacle is the way"
print("str_a= " , str_a)
print("str_a[5]= " , str_a[5])
print("str_a[7:]= " , str_a[7:])
print("str_a[:7]= " , str_a[:7])
print("str_a[5:9]= " , str_a[5:9])
print("str_a * 2 = " , str_a * 2)
print("str_a+' Success'=" , str_a + " Success")
print("\n")
str_b = " Obstacle is the way "
print("str_b= " , str_b)
print("str_b.split()= " , str_b.split())
print("str_b.split()[0]= " , str_b.split()[0])
print("str_b.strip()= " , str_b.strip())
str_b = str_b.strip()
print("str_b[::-1]=reverse= " , str_b[::-1])
print("str_b.replace('way','way ahead')= " , str_b.replace('way','way ahead'))
str_b = str_b.replace('way','way ahead')
print("str_b= " , str_b)
print("\n")
| <filename>prg01_basic_python/basicpython05_stringtypes.py<gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# string
str_a = "Obstacle is the way"
print("str_a= " , str_a)
print("str_a[5]= " , str_a[5])
print("str_a[7:]= " , str_a[7:])
print("str_a[:7]= " , str_a[:7])
print("str_a[5:9]= " , str_a[5:9])
print("str_a * 2 = " , str_a * 2)
print("str_a+' Success'=" , str_a + " Success")
print("\n")
str_b = " Obstacle is the way "
print("str_b= " , str_b)
print("str_b.split()= " , str_b.split())
print("str_b.split()[0]= " , str_b.split()[0])
print("str_b.strip()= " , str_b.strip())
str_b = str_b.strip()
print("str_b[::-1]=reverse= " , str_b[::-1])
print("str_b.replace('way','way ahead')= " , str_b.replace('way','way ahead'))
str_b = str_b.replace('way','way ahead')
print("str_b= " , str_b)
print("\n")
| en | 0.253278 | #!/usr/bin/env python3 # -*- coding: utf-8 -*- # string | 3.776623 | 4 |
tests/test_db_aggregations.py | tb0hdan/twigator_project | 2 | 6614763 | import sys
sys.path.insert(1, ".")
sys.path.insert(2, "..")
import unittest
from mongoengine import connect, disconnect
from twigator.db.aggregations import (
get_top_hashtags,
get_tweet_count,
get_top_twitters)
from tests import mytestrunner
class AggregationsTestCase(unittest.TestCase):
'''
'''
def setUp(self):
connect('mongoenginetest', host='mongomock://localhost')
def test_01_test_top_hashtags(self):
print(get_top_hashtags())
def test_02_get_tweet_count(self):
print(get_tweet_count())
def test_03_get_top_twitters(self):
print(get_top_twitters())
def tearDown(self):
disconnect()
if __name__ == '__main__':
classes = [AggregationsTestCase]
mytestrunner(classes)
| import sys
sys.path.insert(1, ".")
sys.path.insert(2, "..")
import unittest
from mongoengine import connect, disconnect
from twigator.db.aggregations import (
get_top_hashtags,
get_tweet_count,
get_top_twitters)
from tests import mytestrunner
class AggregationsTestCase(unittest.TestCase):
'''
'''
def setUp(self):
connect('mongoenginetest', host='mongomock://localhost')
def test_01_test_top_hashtags(self):
print(get_top_hashtags())
def test_02_get_tweet_count(self):
print(get_tweet_count())
def test_03_get_top_twitters(self):
print(get_top_twitters())
def tearDown(self):
disconnect()
if __name__ == '__main__':
classes = [AggregationsTestCase]
mytestrunner(classes)
| none | 1 | 2.356229 | 2 | |
src/pycliques/lists.py | rvf0068/pycliquesscaffold | 0 | 6614764 | <filename>src/pycliques/lists.py
"""
This file gives an interface to use graph data from
<NAME>'s `page
<http://cs.anu.edu.au/~bdm/data/graphs.html>`_. Currently only
includes the data for connected graphs from 6 to 10 vertices.
"""
import networkx as nx
import pkg_resources
import gzip
graph6c = pkg_resources.resource_filename('pycliques', '/data/graph6c.g6.gz')
graph7c = pkg_resources.resource_filename('pycliques', '/data/graph7c.g6.gz')
graph8 = pkg_resources.resource_filename('pycliques', '/data/graph8.g6.gz')
graph8c = pkg_resources.resource_filename('pycliques', '/data/graph8c.g6.gz')
graph9 = pkg_resources.resource_filename('pycliques', '/data/graph9.g6.gz')
graph9c = pkg_resources.resource_filename('pycliques', '/data/graph9c.g6.gz')
graph10 = pkg_resources.resource_filename('pycliques', '/data/graph10.g6.gz')
graph10c = pkg_resources.resource_filename('pycliques', '/data/graph10c.g6.gz')
_dict_all = {8: graph8, 9: graph9, 10: graph10}
_dict_connected = {6: graph6c, 7: graph7c, 8: graph8c, 9: graph9c,
10: graph10c}
small_torsion = pkg_resources.resource_filename('pycliques', '/data/small-torsion.g6')
def list_graphs(n, connected=True):
"""List of connected graphs of a given order, from B. McKay data
Args:
n (int): integer. Only supported between 6 and 10
Returns:
list: List of NetworkX graphs
Examples:
>>> from pycliques.lists import list_graphs
>>> len(list_graphs(6))
112
"""
list_of_graphs = []
if connected:
the_dict = _dict_connected
else:
the_dict = _dict_all
with gzip.open(the_dict[n], 'rt') as graph_file:
for graph in graph_file:
graph = graph.strip()
graph = nx.from_graph6_bytes(bytes(graph, 'utf8'))
list_of_graphs.append(graph)
return list_of_graphs
def small_torsion_graphs():
list_of_graphs = []
with open(small_torsion, 'r') as graph_file:
for graph in graph_file:
graph = graph.strip()
graph = nx.from_graph6_bytes(bytes(graph, 'utf8'))
list_of_graphs.append(graph)
return list_of_graphs
| <filename>src/pycliques/lists.py
"""
This file gives an interface to use graph data from
<NAME>'s `page
<http://cs.anu.edu.au/~bdm/data/graphs.html>`_. Currently only
includes the data for connected graphs from 6 to 10 vertices.
"""
import networkx as nx
import pkg_resources
import gzip
graph6c = pkg_resources.resource_filename('pycliques', '/data/graph6c.g6.gz')
graph7c = pkg_resources.resource_filename('pycliques', '/data/graph7c.g6.gz')
graph8 = pkg_resources.resource_filename('pycliques', '/data/graph8.g6.gz')
graph8c = pkg_resources.resource_filename('pycliques', '/data/graph8c.g6.gz')
graph9 = pkg_resources.resource_filename('pycliques', '/data/graph9.g6.gz')
graph9c = pkg_resources.resource_filename('pycliques', '/data/graph9c.g6.gz')
graph10 = pkg_resources.resource_filename('pycliques', '/data/graph10.g6.gz')
graph10c = pkg_resources.resource_filename('pycliques', '/data/graph10c.g6.gz')
_dict_all = {8: graph8, 9: graph9, 10: graph10}
_dict_connected = {6: graph6c, 7: graph7c, 8: graph8c, 9: graph9c,
10: graph10c}
small_torsion = pkg_resources.resource_filename('pycliques', '/data/small-torsion.g6')
def list_graphs(n, connected=True):
"""List of connected graphs of a given order, from B. McKay data
Args:
n (int): integer. Only supported between 6 and 10
Returns:
list: List of NetworkX graphs
Examples:
>>> from pycliques.lists import list_graphs
>>> len(list_graphs(6))
112
"""
list_of_graphs = []
if connected:
the_dict = _dict_connected
else:
the_dict = _dict_all
with gzip.open(the_dict[n], 'rt') as graph_file:
for graph in graph_file:
graph = graph.strip()
graph = nx.from_graph6_bytes(bytes(graph, 'utf8'))
list_of_graphs.append(graph)
return list_of_graphs
def small_torsion_graphs():
list_of_graphs = []
with open(small_torsion, 'r') as graph_file:
for graph in graph_file:
graph = graph.strip()
graph = nx.from_graph6_bytes(bytes(graph, 'utf8'))
list_of_graphs.append(graph)
return list_of_graphs
| en | 0.789374 | This file gives an interface to use graph data from <NAME>'s `page <http://cs.anu.edu.au/~bdm/data/graphs.html>`_. Currently only includes the data for connected graphs from 6 to 10 vertices. List of connected graphs of a given order, from B. McKay data Args: n (int): integer. Only supported between 6 and 10 Returns: list: List of NetworkX graphs Examples: >>> from pycliques.lists import list_graphs >>> len(list_graphs(6)) 112 | 2.89571 | 3 |
test/vpc_create.py | peitur/aws-utils | 0 | 6614765 | <reponame>peitur/aws-utils
import sys,os
import boto3
from pprint import pprint
def print_vpcs( vpclist ):
for v in vpclist:
print( "VPC: ID: %(id)-16s NW: %(nw)-34s State: %(state)-4s" % { 'id': v['VpcId'],'nw': v['CidrBlock'], 'state': v['State'] } )
def print_secgroups( sglist ):
for sg in sglist:
print("SEG: ID: %(id)-16s Name: %(name)-32s VPC: %(vpc)-16s" % { 'id': sg['GroupId'], 'name': sg['GroupName'], 'vpc': sg['VpcId'] } )
for ipp in sg['IpPermissions']:
if 'FromPort' in ipp:
iprange = []
for ir in ipp['IpRanges']:
iprange.append( ir['CidrIp'])
print("SEC: ... %(proto)-5s : %(fport)-8d => %(tport)-32d Ranges: %(range)s" % { 'proto': ipp['IpProtocol'], 'fport': ipp['FromPort'], 'tport': ipp['ToPort'], 'range': ",".join( iprange ) } )
else:
pprint( ipp )
if __name__ == "__main__":
vpc_cidr = "172.16.17.32/16"
vpc_name = "Tester1"
vpc_id = None
security_group = {
'name':'sg_test1',
'descr': 'testing basics ',
}
internet_gateway = {
}
subnet_list = [
{
'name':'tester1_subet1',
'cidr':"172.16.31.10/24"
},
{
'name':'tester1_subet2',
'cidr':"172.16.31.10/24"
}
]
print("======== Current Network Info ========")
client = boto3.client("ec2")
vpcdata = client.describe_vpcs()
print_vpcs( vpcdata['Vpcs'] )
sgdata = client.describe_security_groups( )
print_secgroups( sgdata['SecurityGroups'] )
task_num = 0
print("======== Starting creation ========")
task_num += 1
print("%(tn)d. Creating new VPC: '%(cidr)s' as '%(name)s'" % {'tn': task_num, 'cidr': vpc_cidr, 'name': vpc_name } )
## Create VPC
## Attach Tag with name
vpc_id = "vpc-123123"
if not vpc_id:
print("ERROR: Could not create VPC, aborting")
sys.exit()
task_num += 1
print("%(tn)d. Getting the new VPCs (%(vpc)s) routing table" % {'tn': task_num, 'vpc': vpc_id} )
task_num += 1
print("%(tn)d. Create Security Group %(name)s for VPC %(vpc)s" % { 'tn': task_num, 'name': security_group['name'], 'vpc': vpc_id } )
## Create
## Attach Tag with name
task_num += 1
print("%(tn)d. Create Internet Gateway " % { 'tn': task_num } )
## Create
## Attach Tag with name
task_num += 1
stask_num = 0
print("%(tn)d. Create %(ln)s subnets" % { 'tn': task_num, 'ln': len( subnet_list ) } )
for net in subnet_list:
stask_num += 1
print("%(tn)d.%(stn)d. Creating %(cidr)s as %(name)s" % { 'tn': task_num, 'stn': stask_num,'cidr': net['cidr'],'name': net['name'] } )
## Create
## Attach Tag with name
| import sys,os
import boto3
from pprint import pprint
def print_vpcs( vpclist ):
for v in vpclist:
print( "VPC: ID: %(id)-16s NW: %(nw)-34s State: %(state)-4s" % { 'id': v['VpcId'],'nw': v['CidrBlock'], 'state': v['State'] } )
def print_secgroups( sglist ):
for sg in sglist:
print("SEG: ID: %(id)-16s Name: %(name)-32s VPC: %(vpc)-16s" % { 'id': sg['GroupId'], 'name': sg['GroupName'], 'vpc': sg['VpcId'] } )
for ipp in sg['IpPermissions']:
if 'FromPort' in ipp:
iprange = []
for ir in ipp['IpRanges']:
iprange.append( ir['CidrIp'])
print("SEC: ... %(proto)-5s : %(fport)-8d => %(tport)-32d Ranges: %(range)s" % { 'proto': ipp['IpProtocol'], 'fport': ipp['FromPort'], 'tport': ipp['ToPort'], 'range': ",".join( iprange ) } )
else:
pprint( ipp )
if __name__ == "__main__":
vpc_cidr = "172.16.17.32/16"
vpc_name = "Tester1"
vpc_id = None
security_group = {
'name':'sg_test1',
'descr': 'testing basics ',
}
internet_gateway = {
}
subnet_list = [
{
'name':'tester1_subet1',
'cidr':"172.16.31.10/24"
},
{
'name':'tester1_subet2',
'cidr':"172.16.31.10/24"
}
]
print("======== Current Network Info ========")
client = boto3.client("ec2")
vpcdata = client.describe_vpcs()
print_vpcs( vpcdata['Vpcs'] )
sgdata = client.describe_security_groups( )
print_secgroups( sgdata['SecurityGroups'] )
task_num = 0
print("======== Starting creation ========")
task_num += 1
print("%(tn)d. Creating new VPC: '%(cidr)s' as '%(name)s'" % {'tn': task_num, 'cidr': vpc_cidr, 'name': vpc_name } )
## Create VPC
## Attach Tag with name
vpc_id = "vpc-123123"
if not vpc_id:
print("ERROR: Could not create VPC, aborting")
sys.exit()
task_num += 1
print("%(tn)d. Getting the new VPCs (%(vpc)s) routing table" % {'tn': task_num, 'vpc': vpc_id} )
task_num += 1
print("%(tn)d. Create Security Group %(name)s for VPC %(vpc)s" % { 'tn': task_num, 'name': security_group['name'], 'vpc': vpc_id } )
## Create
## Attach Tag with name
task_num += 1
print("%(tn)d. Create Internet Gateway " % { 'tn': task_num } )
## Create
## Attach Tag with name
task_num += 1
stask_num = 0
print("%(tn)d. Create %(ln)s subnets" % { 'tn': task_num, 'ln': len( subnet_list ) } )
for net in subnet_list:
stask_num += 1
print("%(tn)d.%(stn)d. Creating %(cidr)s as %(name)s" % { 'tn': task_num, 'stn': stask_num,'cidr': net['cidr'],'name': net['name'] } )
## Create
## Attach Tag with name | en | 0.468528 | ## Create VPC ## Attach Tag with name ## Create ## Attach Tag with name ## Create ## Attach Tag with name ## Create ## Attach Tag with name | 2.457252 | 2 |
taiga/projects/attachments/migrations/0005_attachment_sha1.py | threefoldtech/Threefold-Circles | 1 | 6614766 | <reponame>threefoldtech/Threefold-Circles<filename>taiga/projects/attachments/migrations/0005_attachment_sha1.py
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('attachments', '0004_auto_20150508_1141'),
]
operations = [
migrations.AddField(
model_name='attachment',
name='sha1',
field=models.CharField(default='', verbose_name='sha1', max_length=40, blank=True),
preserve_default=True,
),
] | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('attachments', '0004_auto_20150508_1141'),
]
operations = [
migrations.AddField(
model_name='attachment',
name='sha1',
field=models.CharField(default='', verbose_name='sha1', max_length=40, blank=True),
preserve_default=True,
),
] | en | 0.769321 | # -*- coding: utf-8 -*- | 1.433853 | 1 |
static_share/url_resolver.py | RRMoelker/django-static-share | 2 | 6614767 | <gh_stars>1-10
# -*- coding: utf-8 -*-
import urllib
def compute_full_url(url, parameters):
"""
Converts page url and parameter dictionary into a sharable static link with relevant get parameters
"""
querystring = urllib.urlencode(parameters)
full_url ='{}?{}'.format(url, querystring)
return full_url
def get_parameter_dictionary(syntax, **kwargs):
"""
Conversion of static_share names to get parameter name for network.
For example conversion of 'url' to 'source' and 'text' to 't' for Twitter.
"""
parameters = {}
for key, value in syntax.iteritems():
if key in kwargs:
parameters[value] = kwargs[key]
return parameters
def get_share_url(share_url, syntax, **kwargs):
"""
Combine share url given the syntax for the specific social network into a full url
"""
parameters = get_parameter_dictionary(syntax, **kwargs)
return compute_full_url(share_url, parameters)
| # -*- coding: utf-8 -*-
import urllib
def compute_full_url(url, parameters):
"""
Converts page url and parameter dictionary into a sharable static link with relevant get parameters
"""
querystring = urllib.urlencode(parameters)
full_url ='{}?{}'.format(url, querystring)
return full_url
def get_parameter_dictionary(syntax, **kwargs):
"""
Conversion of static_share names to get parameter name for network.
For example conversion of 'url' to 'source' and 'text' to 't' for Twitter.
"""
parameters = {}
for key, value in syntax.iteritems():
if key in kwargs:
parameters[value] = kwargs[key]
return parameters
def get_share_url(share_url, syntax, **kwargs):
"""
Combine share url given the syntax for the specific social network into a full url
"""
parameters = get_parameter_dictionary(syntax, **kwargs)
return compute_full_url(share_url, parameters) | en | 0.554061 | # -*- coding: utf-8 -*- Converts page url and parameter dictionary into a sharable static link with relevant get parameters Conversion of static_share names to get parameter name for network. For example conversion of 'url' to 'source' and 'text' to 't' for Twitter. Combine share url given the syntax for the specific social network into a full url | 3.489698 | 3 |
code/interp_functions/interpolate_support.py | pblankenau2/pymetric | 20 | 6614768 | <reponame>pblankenau2/pymetric
#--------------------------------
# Name: interpolate_support.py
# Purpose: Interpolator support functions
#--------------------------------
from __future__ import division
import datetime as dt
# import gc
import logging
from multiprocessing import Process, Queue, cpu_count
import os
import sys
import warnings
import drigo
import numpy as np
from osgeo import gdal, ogr
from scipy import interpolate
# import et_common
import python_common as dripy
# np.seterr(invalid='ignore')
gdal.UseExceptions()
def landsat_dt_func(image_id):
""""""
# Assume image_id has been verified as a Landsat image ID
# i.e. LC08_L1TP_043030_20150415_20170227_01_T1
return dt.datetime.strptime(image_id.split('_')[3], '%Y%m%d').date()
def daterange_func(start_dt, end_dt, delta=1):
""""""
curr_dt = start_dt
while curr_dt <= end_dt:
yield curr_dt
curr_dt += dt.timedelta(delta)
def tile_wkt_func(input_path, path_field='PATH', row_field='ROW',
tile_fmt='p{:03d}r{:03d}'):
"""Return a dictionary of path/rows and their geometries"""
output_dict = dict()
input_ds = ogr.Open(input_path, 0)
input_lyr = input_ds.GetLayer()
input_ftr = input_lyr.GetNextFeature()
while input_ftr:
path = input_ftr.GetFieldAsInteger(
input_ftr.GetFieldIndex(path_field))
row = input_ftr.GetFieldAsInteger(
input_ftr.GetFieldIndex(row_field))
input_wkt = input_ftr.GetGeometryRef().ExportToWkt()
output_dict[tile_fmt.format(path, row)] = input_wkt
input_ftr = input_lyr.GetNextFeature()
input_ds = None
return output_dict
# def clip_project_raster_worker(args, input_q, output_q):
# """Worker function for multiprocessing with input and output queues
#
# First input argument is an index that will be passed through to the output
# Convert projection WKT parameters to OSR objects
# 4th and 7th?
#
# """
# while True:
# args = input_q.get()
# if args is None:
# break
# args_mod = args[:]
# for i, arg in enumerate(args):
# # DEADBEEF - Do all projection WKT's start with 'PROJCS'?
# # Could try testing to see if the result of proj_osr is an OSR?
# if type(arg) == str and arg.startswith('PROJCS'):
# args_mod[i] = drigo.proj_osr(arg)
# output_q.put([args_mod[0], clip_project_raster_func(*args_mod[1:])])
# # output_q.put(clip_project_raster_mp(args))
#
# def clip_project_raster_mp(args):
# """MP wrapper for calling clip_project_raster_func with Pool
#
# First input parameter is an index that will be passed through
# Convert projection WKT parameters to OSR objects
# 4th and 7th?
#
# """
# args_mod = args[:]
# for i, arg in enumerate(args):
# # DEADBEEF - Do all projection WKT's start with 'PROJCS'?
# # Could try testing to see if the result of proj_osr is an OSR?
# if type(arg) == str and arg.startswith('PROJCS'):
# args_mod[i] = drigo.proj_osr(arg)
# return args_mod[0], clip_project_raster_func(*args_mod[1:])
def clip_project_raster_func(input_raster, resampling_type,
input_osr, input_cs, input_extent,
ouput_osr, output_cs, output_extent):
"""Clip and then project an input raster"""
# Read array from input raster using input extent
input_array = drigo.raster_to_array(
input_raster, 1, input_extent, return_nodata=False)
# Project and clip array to block
output_array = drigo.project_array(
input_array, resampling_type,
input_osr, input_cs, input_extent,
ouput_osr, output_cs, output_extent)
return output_array
def mosaic_func(mosaic_array, input_array, mosaic_method):
""""""
input_mask = np.isfinite(input_array)
if not np.any(input_mask):
# Only mosaic if there is new data
pass
elif mosaic_method.lower() == 'first':
# Fill cells that are currently empty
input_mask &= np.isnan(mosaic_array)
mosaic_array[input_mask] = input_array[input_mask]
elif mosaic_method.lower() == 'last':
# Overwrite any cells with new data
mosaic_array[input_mask] = input_array[input_mask]
elif mosaic_method.lower() == 'mean':
# Fill cells that are currently empty
temp_mask = input_mask & np.isnan(mosaic_array)
mosaic_array[temp_mask] = input_array[temp_mask]
# plt.imshow(mosaic_array)
# plt.title('mosaic_array')
# plt.colorbar()
# plt.show()
# plt.imshow(input_array)
# plt.title('input_array')
# plt.colorbar()
# plt.show()
# plt.imshow((mosaic_array - input_array))
# plt.title('mosaic_array - input_array')
# plt.colorbar()
# plt.show()
# print((mosaic_array - input_array))
# Mean with existing value (overlapping rows)
temp_mask = input_mask & np.isfinite(mosaic_array)
mosaic_array[temp_mask] += input_array[temp_mask]
mosaic_array[temp_mask] *= 0.5
del temp_mask
return mosaic_array
def load_etrf_func(array_shape, date_list, year_ws, year,
etrf_raster, block_tile_list, block_extent,
tile_image_dict, mosaic_method, resampling_type,
output_osr, output_cs, output_extent, debug_flag):
"""Load ETrF from rasters to an array for all images/dates
Parameters
----------
array_shape : list
date_list : list
List of dates to be processed.
year_ws : str
File path of the workspace to the year folder from METRIC run.
etrf_raster : str
File path for the output ETrF.
year : str
Year that will be processed.
block_tile_list : list
List of the tiles to be processed in each block.
block_extent(class:`gdal_common.env`):
The gdal_common.extent of the block.
tile_image_dict : dict
A dictionary of the tiles/years to be processed.
mosaic_method : str
Mean, upper, or lower
resampling_type : int
GDAL resampling type used to reproject the daily ETrF.
output_osr (class:`osr.SpatialReference):
Desired spatial reference object.
output_cs : int
Desired cellsize of the output
output_extent(class:`gdal_common.extent):
Desired gdal_common.extent of the output.
debug_flag : bool
If True, NumPy RuntimeWarnings will be printed.
"""
# Read in ETrF raster from each scene folder
days, rows, cols = array_shape
# days, x, y = etrf_array.shape
tile_etrf_array = np.full(
(days, len(block_tile_list), rows, cols), np.nan, np.float32)
for tile_i, tile_name in enumerate(block_tile_list):
if tile_name not in tile_image_dict[year].keys():
continue
for image_id in dripy.shuffle(tile_image_dict[year][tile_name]):
tile_ws = os.path.join(year_ws, tile_name)
image_ws = os.path.join(tile_ws, image_id)
image_etrf_raster = os.path.join(image_ws, etrf_raster)
if not os.path.isfile(image_etrf_raster):
logging.debug(' ETrF raster does not exist')
continue
# Get projection and extent for each image
block_tile_ds = gdal.Open(image_etrf_raster)
block_tile_osr = drigo.raster_ds_osr(block_tile_ds)
block_tile_cs = drigo.raster_ds_cellsize(block_tile_ds, x_only=True)
block_tile_x, block_tile_y = drigo.raster_ds_origin(block_tile_ds)
block_tile_extent = drigo.project_extent(
block_extent, output_osr, block_tile_osr, output_cs)
block_tile_extent.adjust_to_snap(
'EXPAND', block_tile_x, block_tile_y, block_tile_cs)
block_tile_ds = None
# Use image_id to determine date
date_i = date_list.index(landsat_dt_func(image_id))
tile_etrf_array[date_i, tile_i, :, :] = clip_project_raster_func(
image_etrf_raster, resampling_type,
block_tile_osr, block_tile_cs, block_tile_extent,
output_osr, output_cs, output_extent)
# if low_etrf_limit is not None:
# temp_array[temp_array < low_etrf_limit] = low_etrf_limit
# if high_etrf_limit is not None:
# temp_array[temp_array > high_etrf_limit] = high_etrf_limit
# Suppress the numpy nan warning if the debug flag is off
if not debug_flag:
with warnings.catch_warnings():
warnings.simplefilter('ignore', category=RuntimeWarning)
etrf_array = np.nanmean(tile_etrf_array, axis=1)
else:
etrf_array = np.nanmean(tile_etrf_array, axis=1)
return etrf_array
# def load_etrf_swb_func(etrf_array, etrf_raster,
# low_etrf_limit, high_etrf_limit,
# date_list, year_ws, ndvi_raster, year,
# block_tile_list, block_extent,
# tile_image_dict, mosaic_method, resampling_type,
# output_osr, output_cs, output_extent, debug_flag,
# soil_water_balance_adjust_flag,
# year_tile_ndvi_paths, tile_ndvi_dict,
# awc_path, etr_input_ws, etr_input_re, ppt_input_ws,
# ppt_input_re, ndvi_threshold):
# """
#
# Parameters
# ----------
#
# Returns
# -------
# numpy.array: class:`numpy.array`
# """
# days, x, y = etrf_array.shape
# tiles = len(block_tile_list)
# temp_etrf_array = np.full((days, tiles, x, y), np.nan)
# temp_ndvi_array = np.full((days, tiles, x, y), np.nan)
# load_etrf_func(
# etrf_array, date_list, year_ws, etrf_raster, year,
# block_tile_list, block_extent,
# tile_image_dict, mosaic_method, resampling_type,
# output_osr, output_cs, output_extent, debug_flag,
# low_etrf_limit, high_etrf_limit)
# year = int(year)
# for tile_i, tile_name in enumerate(block_tile_list):
# if tile_name not in tile_image_dict[year].keys():
# continue
# for image_id in dripy.shuffle(tile_image_dict[year][tile_name]):
# tile_ws = os.path.join(year_ws, tile_name)
# image_ws = os.path.join(tile_ws, image_id)
# image_ndvi_raster = os.path.join(image_ws, ndvi_raster)
# if not os.path.isfile(image_ndvi_raster):
# continue
# # Get projection and extent for each image
# block_tile_ds = gdal.Open(image_ndvi_raster)
# block_tile_osr = drigo.raster_ds_osr(block_tile_ds)
# block_tile_cs = drigo.raster_ds_cellsize(block_tile_ds, x_only=True)
# block_tile_x, block_tile_y = drigo.raster_ds_origin(block_tile_ds)
# block_tile_extent = drigo.project_extent(
# block_extent, output_osr, block_tile_osr, output_cs)
# # block_tile_extent.adjust_to_snap(
# # 'EXPAND', block_tile_x, block_tile_y, block_tile_cs)
# block_tile_ds = None
# awc_ds = gdal.Open(awc_path)
# awc_osr = drigo.raster_ds_osr(awc_ds)
# awc_cs = drigo.raster_ds_cellsize(awc_ds, x_only=True)
# awc_x, awc_y = drigo.raster_ds_origin(awc_ds)
# awc_extent = drigo.project_extent(
# block_extent, output_osr, awc_osr, awc_cs)
# awc_extent.adjust_to_snap(
# 'EXPAND', awc_x, awc_y, awc_cs)
# awc_ds = None
# dt_object = landsat_dt_func(image_id)
# date_i = date_list.index(dt_object)
# etrf_array = daily_etrf_array[date_i,:,:,]
# if np.all(np.isnan(etrf_array)):
# continue
# etrf_background = et_common.array_swb_func(
# dt_object, awc_path, etr_input_ws, etr_input_re,
# ppt_input_ws, ppt_input_re, awc_osr, awc_cs, awc_extent,
# output_osr, output_cs, output_extent, 30)
# ndvi_array = clip_project_raster_func(
# image_ndvi_raster, resampling_type,
# block_tile_osr, block_tile_cs, block_tile_extent,
# output_osr, output_cs, output_extent)
# ndvi_mask = (ndvi_array > ndvi_threshold).astype(np.bool)
# fc = calc_fc(
# # ndvi_array=temp_ndvi_array[date_i, tile_i,:,:,],
# ndvi_array=ndvi_array,
# ndvi_full_cover=tile_ndvi_dict[year][tile_name][image_id]['cold'],
# ndvi_bare_soil=tile_ndvi_dict[year][tile_name][image_id]['hot'])
# etrf_transpiration = etrf_array - ((1 - fc) * etrf_background)
# etrf_transpiration_adj = np.max(
# np.array([etrf_transpiration, etrf_background]),
# axis=0)
# etrf_adjusted = (
# ((1 - fc) * etrf_background) + (fc * etrf_transpiration_adj))
# etrf_adjusted[ndvi_mask] = etrf_array[ndvi_mask]
# temp_etrf_array[date_i, tile_i,:,:,] = etrf_adjusted
# # Suppress the numpy nan warning if the debug flag is off
# if not debug_flag:
# with warnings.catch_warnings():
# warnings.simplefilter('ignore', category=RuntimeWarning)
# etrf_array[:] = np.nanmean(temp_etrf_array, axis=1)
# elif debug_flag:
# etrf_array[:] = np.nanmean(temp_etrf_array, axis=1)
# else:
# logging.error(
# ('Could not calculate ETRF using ' +
# 'temp_etrf_array: {}, shape {}'.format(
# temp_etrf_array, temp_etrf_array.shape)))
# sys.exit()
def spatial_fill_func(data_array, date_list, mp_flag, mp_procs):
""""""
return data_array
# def end_fill_func(data_array, block_mask, fill_method='linear'):
# """"""
#
# # Skip block if array is all nodata
# if not np.any(block_mask):
# return data_array
# # Skip block if array is all nodata
# # elif np.all(np.isnan(data_array)):
# # return data_array
#
# # Fill first and last Landsat ETrF rasters
# # Filling anchor rasters is independent of the fill method
# # date_str_list = [d.strftime('%Y_%m_%d') for d in date_list]
#
# data_shape = data_array.shape
# data_index = np.tile(
# np.arange(data_shape[0], dtype=np.float32)[:, np.newaxis, np.newaxis],
# (data_shape[1], data_shape[2]))
# data_index[np.isnan(data_array)] = np.nan
#
# min_index = np.nanargmin(data_index, axis=0)
# max_index = np.nanargmax(data_index, axis=0)
# print min_index
# print max_index
# return data_array
def end_fill_func(data_array, block_mask, fill_method='linear'):
"""Fill start/end/anchor values using nearest value in time
Parameters
----------
data_array : ndarray
block_mask : ndarray
fill_method : {'linear' or 'cubicspline'}
Returns
-------
ndarray
Notes
-----
The actual spacing/timing of the images is not being considered.
This approach would be inefficient if the full array was passed in.
"""
# Skip block if array is all nodata
if not np.any(block_mask):
return data_array
# Skip block if array is all nodata
# elif np.all(np.isnan(data_array)):
# return data_array
def fill_from_next(data_array, block_mask, data_i_list):
""""""
# First axis of block array is the date/doy
fill_array = np.empty(data_array[0].shape, dtype=data_array.dtype)
fill_array[:] = np.nan
for data_i in data_i_list:
next_array = data_array[data_i,:,:]
next_mask = np.isfinite(next_array)
# Only fill values that are nan
next_mask &= np.isnan(fill_array)
# Only fill values that are nan
next_mask &= block_mask
# Only fill pixels that have a usable number of scenes
if np.any(next_mask):
fill_array[next_mask] = next_array[next_mask]
del next_array, next_mask
# Stop once all usable scene pixels are filled
if np.all(np.isfinite(fill_array[block_mask])):
break
return fill_array
# The actual spacing/timing of the images is not being considered
data_i_list = range(data_array.shape[0])
# Calculate ETrF start raster
if np.any(np.isnan(data_array[0, :, :])):
data_array[0, :, :] = fill_from_next(
data_array, block_mask, data_i_list)
# Calculate ETrF end raster
if np.any(np.isnan(data_array[-1, :, :])):
data_array[-1, :, :] = fill_from_next(
data_array, block_mask, sorted(data_i_list, reverse=True))
# Calculate start/end anchor rasters
if fill_method == 'cubicspline':
if np.any(np.isnan(data_array[1, :, :])):
data_array[1, :, :] = fill_from_next(
data_array, block_mask, data_i_list)
if np.any(np.isnan(data_array[-2, :, :])):
data_array[-2, :, :] = fill_from_next(
data_array, block_mask, sorted(data_i_list, reverse=True))
return data_array
# DEADBEEF - Single core implementation
def temporal_fill_func(sub_array, sub_i_array, block_mask, fill_method='linear'):
"""Single core temporal fill function
Fill Landsat scene dates so that interpolator only runs between known dates
Parameters
----------
sub_array : ndarray
sub_i_array : ndarray
block_mask : ndarray
fill_method : {'linear' or 'cubicspline'}
Interpolation method (the default is 'linear').
Returns
-------
ndarray
"""
# Skip block if array is all nodata
if not np.any(block_mask):
return sub_array
# Skip block if array is all nodata
# elif np.all(np.isnan(data_array)):
# return sub_array
# Begin interpolating scene days with missing values
# for interp_i, interp_doy in enumerate(sub_i_array):
for interp_sub_i, interp_full_i in enumerate(sub_i_array):
# Interp mask is False where pixels have data
# (i.e. True for pixels that will be interpolated)
interp_mask = np.isnan(sub_array[interp_sub_i, :, :])
interp_mask &= block_mask
if not np.any(interp_mask):
continue
# logging.info(' INTERP {} {}'.format(
# interp_sub_i, interp_full_i))
# list of subsequent days
for anchor_sub_i, anchor_full_i in enumerate(sub_i_array):
if anchor_sub_i <= interp_sub_i:
continue
# Interpolate when next DOY has data
anchor_mask = np.copy(interp_mask)
anchor_mask &= np.isfinite(sub_array[anchor_sub_i, :, :])
if not np.any(anchor_mask):
continue
# logging.info(' ANCHOR {} {}'.format(
# anchor_sub_i, anchor_full_i))
if fill_method == 'cubicspline':
for cubic_sub_i, cubic_full_i in enumerate(sub_i_array):
if cubic_sub_i <= anchor_sub_i:
continue
cubic_mask = np.copy(anchor_mask)
cubic_mask &= np.isfinite(sub_array[cubic_sub_i, :, :])
if not np.any(cubic_mask):
continue
# logging.info(' CUBIC {} {}'.format(
# cubic_sub_i, cubic_full_i))
interp_i_array = np.array([
sub_i_array[interp_sub_i-2], sub_i_array[interp_sub_i-1],
sub_i_array[anchor_sub_i], sub_i_array[cubic_sub_i]])
interp_i_mask = np.in1d(sub_i_array, interp_i_array)
interp_array = sub_array[interp_i_mask, :, :][:, cubic_mask]
f = interpolate.interp1d(
interp_i_array, interp_array,
axis=0, kind=3)
sub_array[interp_sub_i, :, :][cubic_mask] = f(interp_full_i)
# sub_array[interp_sub_i,:,:][anchor_mask] = f(interp_full_i).astype(np.float32)
interp_mask[cubic_mask] = False
anchor_mask[cubic_mask] = False
del f, interp_i_array, interp_i_mask
del cubic_mask, interp_array
if not np.any(interp_mask):
break
elif fill_method == 'linear':
interp_i_array = np.array(
[sub_i_array[interp_sub_i-1], sub_i_array[anchor_sub_i]])
interp_i_mask = np.in1d(sub_i_array, interp_i_array)
interp_array = sub_array[interp_i_mask, :, :][:, anchor_mask]
f = interpolate.interp1d(
interp_i_array, interp_array, axis=0, kind=fill_method)
sub_array[interp_sub_i, :, :][anchor_mask] = f(interp_full_i)
# sub_array[interp_sub_i,:,:][anchor_mask] = f(interp_full_i).astype(np.float32)
interp_mask[anchor_mask] = False
del f, interp_i_array, interp_i_mask, interp_array
if not np.any(interp_mask):
break
elif fill_method == 'nearest':
pass
# There is a memory leak with f/interp1d
# gc.collect()
del interp_mask
return sub_array
def interpolate_func(full_array, sub_array, sub_i_array,
block_mask, interp_method):
"""Single core interpolator function
This function should be used after scene dates have already been filled
There is no error checking to see if the start/end/anchor have data
Parameters
----------
full_array : ndarray
sub_array : ndarray
sub_i_array : ndarray
block_mask : ndarray
interp_method : str
Returns
-------
ndarray
"""
# Skip block if array is all nodata
if not np.any(block_mask):
return full_array
# Skip block if array is all nodata
# elif np.all(np.isnan(data_array)):
# return full_array
# Assume each step is a day
full_i_array = np.arange(full_array.shape[0])
# Copy start/end/anchor dates directly to output
copy_i_list = [full_i_array[0], full_i_array[-1]]
if interp_method in ['cubic', 'cubicspline']:
copy_i_list.extend([full_i_array[1], full_i_array[-2]])
copy_i_list.sort()
# Begin interpolating scene days with missing values
for interp_full_i in full_i_array:
# Interp mask is False where pixels have data
# (i.e. True for pixels that will be interpolated)
interp_mask = np.isnan(full_array[interp_full_i, :, :])
interp_mask &= block_mask
if not np.any(interp_mask):
continue
# logging.info(' INTERP {}'.format(interp_full_i))
# Copy start/end/anchor dates directly to output
# if interp_full_i in list(sub_i_array):
if interp_full_i in copy_i_list:
full_array[interp_full_i, :, :][interp_mask] = sub_array[
list(sub_i_array).index(interp_full_i), :, :][interp_mask]
continue
# Select anchor days (last day(s) before interp and first day(s) after)
if interp_method in ['cubic', 'cubicspline']:
interp_i_array = sub_i_array[np.concatenate(
(np.where(sub_i_array <= interp_full_i)[0][-2:],
np.where(sub_i_array > interp_full_i)[0][:2]))]
else:
interp_i_array = sub_i_array[np.concatenate(
(np.where(sub_i_array <= interp_full_i)[0][-1:],
np.where(sub_i_array > interp_full_i)[0][:1]))]
interp_i_mask = np.in1d(sub_i_array, interp_i_array)
interp_array = sub_array[interp_i_mask, :, :][:, interp_mask]
f = interpolate.interp1d(
interp_i_array, interp_array, axis=0, kind=interp_method)
full_array[interp_full_i, :, :][interp_mask] = f(interp_full_i)
# data_array[interp_full_i,:,:][:,interp_mask] = f(interp_full_i).astype(np.float32)
del f, interp_array, interp_i_array
# There is a memory leak with f/interp1d
# gc.collect()
return full_array
# def mp_interpolate_func(full_array, sub_array, sub_i_array,
# block_mask, interp_method,
# mp_flag=True, mp_procs=cpu_count()):
# """"""
# mp_procs = 1
#
# # Skip block if array is all nodata
# if not np.any(block_mask):
# return data_array
# # Skip block if array is all nodata
# # elif np.all(np.isnan(data_array)):
# # return data_array
#
# # Assume each step is a day
# full_i_array = np.arange(full_array.shape[0])
#
# # Create shared memory object of full_array
# print sub_array[0,:,:]
# print sub_array[:,0,0]
# sub_ctypes = RawArray(ctypes.c_float, sub_array.size)
# sub_shr_array = np.frombuffer(
# sub_ctypes, dtype=np.float32, count=sub_array.size)
# # Copy sub_array into the shared memory array
# # sub_shr_array = sub_array
# sub_shr_array = sub_array.flatten()
#
# # Begin interpolating scene days with missing values
# input_q = Queue()
# output_q = Queue()
# mp_tasks = 0
# for interp_full_i in full_i_array:
# # Interp mask is False where pixels have data
# # (i.e. True for pixels that will be interpolated)
# interp_mask = np.isnan(full_array[interp_full_i,:,:])
# interp_mask &= block_mask
# if not np.any(interp_mask):
# continue
# # Copy start/end/anchor dates directly to output
# # if interp_i in list(sub_i_array):
# if (interp_full_i == full_i_array[0] or
# interp_full_i == full_i_array[-1] or
# (interp_method in ['cubic', 'cubicspline'] and
# (interp_full_i == full_i_array[1] or
# interp_full_i == full_i_array[-2]))):
# full_array[interp_full_i,:,:][interp_mask] = sub_array[
# list(sub_i_array).index(interp_full_i),:,:][interp_mask]
# continue
# # Select anchor days for each day being interpolated
# if interp_method in ['cubic', 'cubicspline']:
# interp_sub_i_array = np.concatenate(
# (np.where(sub_i_array <= interp_full_i)[0][-2:],
# np.where(sub_i_array > interp_full_i)[0][:2]))
# else:
# interp_sub_i_array = np.concatenate(
# (np.where(sub_i_array <= interp_full_i)[0][-1:],
# np.where(sub_i_array > interp_full_i)[0][:1]))
# interp_full_i_array = sub_i_array[interp_sub_i_array]
# # Put the items into the processing queue
# input_q.put([
# interp_full_i, interp_full_i_array,
# interp_sub_i_array, interp_method])
# mp_tasks += 1
# del interp_full_i, interp_full_i_array, interp_sub_i_array
#
# # Start the workers
# for i in range(max(1, mp_procs - 1)):
# p = Process(
# target=interpolate_worker,
# args=(sub_ctypes, sub_array.shape, input_q, output_q)).start()
# # Start processing
# for i in range(mp_tasks):
# # for i in range(input_q.qsize()):
# interp_i, interp_array = output_q.get()
# full_array[interp_i,:,:][block_mask] = interp_array[block_mask]
# del interp_i, interp_array
# # Terminate the workers
# for i in range(max(1, mp_procs - 1)):
# input_q.put(None)
# input_q.close()
# output_q.close()
# del input_q, output_q
# del sub_ctypes, sub_shr_array
# return full_array
# def interpolate_worker(sub_ctypes, sub_shape, input_q, output_q):
# """Worker function for multiprocessing with input and output queues"""
# # sub_array = np.ctypeslib.as_array(sub_ctypes)
# # sub_array = sub_array.reshape(sub_shape)
# # sub_array.shape = sub_shape
# # sub_array = np.ctypeslib.as_array(sub_ctypes).reshape(sub_shape)
# sub_array = np.asarray(np.frombuffer(sub_ctypes, dtype=np.float32))
# sub_array = sub_array.reshape(sub_shape)
# print sub_array
# print sub_array.shape
# print sub_array[:,0,0]
# print sub_array.dtype
# print input_q
# print output_q
# while True:
# args = input_q.get()
# if args is None:
# break
# interp_full_i = args[0]
# interp_full_i_array = args[1]
# interp_sub_i_array = args[2]
# interp_method = args[3]
# f = interpolate.interp1d(
# interp_full_i_array, sub_array[interp_sub_i_array,:,:],
# axis=0, kind=interp_method)
# # f = interpolate.interp1d(
# # interp_i_array, sub_array[[0,2],:,:], axis=0, kind=interp_method)
# output_q.put([interp_full_i, f(interp_full_i)])
# # output_q.put(interpolate_mp(args))
# def interpolate_mp(args):
# """MP wrapper for calling interpolate
#
# First input parameter is the date index that will be passed through
#
# """
# f = interpolate.interp1d(args[1], args[2], axis=0, kind=args[3])
# return args[0], f(args[0])
# def interpolate_mp(tup):
# """MP wrapper for calling interpolate
#
# First input parameter is the date index that will be passed through
# Second input parameter is a mask that will be passed through
#
# """
# return tup[0], tup[1], interpolate_sp(*tup[2:])
# def interpolate_sp(x_array, y_array, interp_doy, interp_method):
# """Wrapper function for clipping and then projecting an input raster"""
# f = interpolate.interp1d(x_array, y_array, axis=0, kind=interp_method)
# return f(interp_doy)
def block_interpolate_func(full_array, sub_array, sub_i_array,
block_mask, fill_method, interp_method,
mp_flag=True, mp_procs=cpu_count()):
"""Interpolate sub block using multiprocessing
Parameters
----------
full_array : ndarray
sub_array : ndarray
sub_i_array : ndarray
block_mask : ndarray
fill_method : str
interp_method : str
mp_flag : bool
mp_procs : int
Returns
-------
ndarray
"""
logging.info(' Processing by sub block')
block_rows, block_cols = block_mask.shape
sub_bs = 64
mp_list = []
for s_i, s_j in drigo.block_gen(block_rows, block_cols, sub_bs):
# logging.info(' Sub y: {:5d} x: {:5d}'.format(s_i, s_j))
sub_rows, sub_cols = drigo.block_shape(
block_rows, block_cols, s_i, s_j, sub_bs)
# logging.info(' Sub rows: {} cols: {}'.format(sub_rows, sub_cols))
mp_list.append([s_i, s_j])
if mp_list:
input_q = Queue()
output_q = Queue()
# Load some inputs into the input queue
mp_tasks = len(mp_list)
for i in range(max(1, mp_procs - 1)):
s_i, s_j = mp_list.pop()
input_q.put([
s_i, s_j, full_array[:, s_i:s_i+sub_rows, s_j:s_j+sub_cols],
block_mask[s_i:s_i+sub_rows, s_j:s_j+sub_cols], interp_method])
# Load all inputs into the input queue
# for mp_args in mp_list:
# input_q.put(mp_args)
# Start workers
for i in range(max(1, mp_procs - 1)):
p = Process(target=block_interpolate_worker,
args=(i, input_q, output_q)).start()
del p
# Get data from workers and add new items to queue
for i in range(mp_tasks):
s_i, s_j, interp_array = output_q.get()
full_array[:, s_i:s_i+sub_rows, s_j:s_j+sub_cols] = sub_array
del s_i, s_j, sub_array
try:
s_i, s_j = mp_list.pop()
input_q.put([
s_i, s_j, full_array[:, s_i:s_i+sub_rows, s_j:s_j+sub_cols],
block_mask[s_i:s_i+sub_rows, s_j:s_j+sub_cols], interp_method])
del s_i, s_j
except IndexError:
pass
# Close workers
for i in range(max(1, mp_procs - 1)):
input_q.put(None)
# Close queues
input_q.close()
output_q.close()
del input_q, output_q
return full_array
def block_interpolate_worker(args, input_q, output_q):
"""Worker function for multiprocessing with input and output queues"""
while True:
args = input_q.get()
if args is None:
break
s_i, s_j, full_array, sub_array, sub_i_array, sub_mask, fill_method, interp_method = args
sub_array = end_fill_func(sub_array, sub_mask, fill_method)
sub_array = temporal_fill_func(
sub_array, sub_i_array, sub_mask, fill_method)
full_array = interpolate_func(
full_array, sub_array, sub_i_array, sub_mask, interp_method)
output_q.put([s_i, s_j, full_array])
def load_year_array_func(input_ws, input_re, date_list,
mask_osr, mask_cs, mask_extent,
name='ETr', return_geo_array=True):
"""Load
Parameters
----------
input_ws : str
input_re
date_list : list
output_osr
output_cs : float
output_extent
name : str
return_geo_array : bool
If True, return array geo-spatial properties (the default is True).
Returns
-------
ndarray
"""
logging.info('\n{}'.format(name))
logging.debug(' {} workspace: {}'.format(name, input_ws))
year_str_list = sorted(list(set([
date.strftime('%Y') for date in date_list])))
if not os.path.isdir(input_ws):
logging.error(
'\nERROR: The {} folder does not exist:\n {}'.format(
name, input_ws))
sys.exit()
input_dict = {
input_match.group('YYYY'): os.path.join(input_ws, input_name)
for input_name in os.listdir(os.path.join(input_ws))
for input_match in [input_re.match(input_name)]
if (input_match and input_match.group('YYYY') and
input_match.group('YYYY') in year_str_list)}
if not input_dict:
logging.error(
(' No {0} files found in {1} for {2}\n'
' The {0} year folder may be empty or the regular '
'expression is invalid\n Exiting').format(
name, input_ws, ', '.join(year_str_list)))
sys.exit()
# Assume all rasters have same projection, cellsize, and snap
for date_obj in date_list:
try:
input_path = input_dict[date_obj.strftime('%Y')]
break
except KeyError:
logging.debug(
' {} raster for date {} does not exist'.format(
name, date_obj.strftime('%Y%m%d')))
sys.exit()
input_ds = gdal.Open(input_path, 0)
input_osr = drigo.raster_ds_osr(input_ds)
# input_proj = drigo.osr_proj(input_osr)
input_cs = drigo.raster_ds_cellsize(input_ds, x_only=True)
input_x, input_y = drigo.raster_ds_origin(input_ds)
input_ds = None
# Get mask extent in the original spat. ref.
output_extent = drigo.project_extent(
mask_extent, mask_osr, input_osr, mask_cs)
output_extent.adjust_to_snap('EXPAND', input_x, input_y, input_cs)
output_rows, output_cols = output_extent.shape(cs=input_cs)
# Initialize the common array
output_array = np.full(
(len(date_list), output_rows, output_cols), np.nan, np.float32)
# Read in the raster for each date
for date_i, date_obj in enumerate(date_list):
try:
input_path = input_dict[date_obj.strftime('%Y')]
except KeyError:
logging.debug(
' {} - {} raster does not exist'.format(
date_obj.strftime('%Y%m%d'), name))
continue
output_array[date_i, :, :] = drigo.raster_to_array(
input_path, band=int(date_obj.strftime('%j')),
mask_extent=output_extent, return_nodata=False,)
if return_geo_array:
return output_array, input_osr, input_cs, output_extent
else:
return output_array
def swb_adjust_fc(ndvi_array, ndvi_full_cover, ndvi_bare_soil):
""""""
return (1 - (ndvi_full_cover - ndvi_array) /
(ndvi_full_cover - ndvi_bare_soil))
def unknown_proj_osr(input_proj):
"""Return the spatial reference object for a projection string"""
try:
output_osr = drigo.epsg_osr(input_proj)
logging.debug(' OSR from EPSG string')
return output_osr
except:
pass
try:
output_osr = drigo.epsg_osr(input_proj.replace('EPSG:'))
logging.debug(' OSR from EPSG integer')
return output_osr
except:
pass
try:
output_osr = drigo.proj_osr(input_proj)
logging.debug(' OSR from WKT')
return output_osr
except:
pass
try:
output_osr = drigo.proj4_osr(input_proj)
logging.debug(' OSR from PROJ4')
return output_osr
except:
pass
try:
output_osr = drigo.raster_path_osr(input_proj)
logging.debug(' OSR from raster path')
return output_osr
except:
pass
try:
output_osr = drigo.feature_path_osr(input_proj)
logging.debug(' OSR from feature path')
return output_osr
except:
pass
return output_osr
# def feature_extents(input_path):
# """Return a dictionary of zone FIDs and their extents"""
# output_dict = dict()
# # shp_driver = ogr.GetDriverByName('ESRI Shapefile')
# input_ds = ogr.Open(input_path, 0)
# input_lyr = input_ds.GetLayer()
# input_lyr.ResetReading()
# for input_ftr in input_lyr:
# input_fid = input_ftr.GetFID()
# input_extent = drigo.Extent(
# input_ftr.GetGeometryRef().GetEnvelope()).ogrenv_swap()
# output_dict[input_fid] = input_extent
# input_ds = None
# return output_dict
# def feature_geometries(input_path):
# """Return a dictionary of zone FIDs and their geometries"""
# output_dict = dict()
# # shp_driver = ogr.GetDriverByName('ESRI Shapefile')
# input_ds = ogr.Open(input_path, 0)
# input_lyr = input_ds.GetLayer()
# input_lyr.ResetReading()
# for input_ftr in input_lyr:
# input_fid = input_ftr.GetFID()
# input_geom = input_ftr.GetGeometryRef().ExportToWkt()
# output_dict[input_fid] = input_geom
# input_ds = None
# return output_dict
# def feature_field_values(input_path, field='FID'):
# """Return a dictionary of zone FIDs and their field values"""
# output_dict = dict()
# # shp_driver = ogr.GetDriverByName('ESRI Shapefile')
# input_ds = ogr.Open(input_path, 0)
# input_lyr = input_ds.GetLayer()
# input_lyr.ResetReading()
# for input_ftr in input_lyr:
# input_fid = input_ftr.GetFID()
# output_dict[input_fid] = input_ftr.GetField(field)
# input_ds = None
# return output_dict
| #--------------------------------
# Name: interpolate_support.py
# Purpose: Interpolator support functions
#--------------------------------
from __future__ import division
import datetime as dt
# import gc
import logging
from multiprocessing import Process, Queue, cpu_count
import os
import sys
import warnings
import drigo
import numpy as np
from osgeo import gdal, ogr
from scipy import interpolate
# import et_common
import python_common as dripy
# np.seterr(invalid='ignore')
gdal.UseExceptions()
def landsat_dt_func(image_id):
""""""
# Assume image_id has been verified as a Landsat image ID
# i.e. LC08_L1TP_043030_20150415_20170227_01_T1
return dt.datetime.strptime(image_id.split('_')[3], '%Y%m%d').date()
def daterange_func(start_dt, end_dt, delta=1):
""""""
curr_dt = start_dt
while curr_dt <= end_dt:
yield curr_dt
curr_dt += dt.timedelta(delta)
def tile_wkt_func(input_path, path_field='PATH', row_field='ROW',
tile_fmt='p{:03d}r{:03d}'):
"""Return a dictionary of path/rows and their geometries"""
output_dict = dict()
input_ds = ogr.Open(input_path, 0)
input_lyr = input_ds.GetLayer()
input_ftr = input_lyr.GetNextFeature()
while input_ftr:
path = input_ftr.GetFieldAsInteger(
input_ftr.GetFieldIndex(path_field))
row = input_ftr.GetFieldAsInteger(
input_ftr.GetFieldIndex(row_field))
input_wkt = input_ftr.GetGeometryRef().ExportToWkt()
output_dict[tile_fmt.format(path, row)] = input_wkt
input_ftr = input_lyr.GetNextFeature()
input_ds = None
return output_dict
# def clip_project_raster_worker(args, input_q, output_q):
# """Worker function for multiprocessing with input and output queues
#
# First input argument is an index that will be passed through to the output
# Convert projection WKT parameters to OSR objects
# 4th and 7th?
#
# """
# while True:
# args = input_q.get()
# if args is None:
# break
# args_mod = args[:]
# for i, arg in enumerate(args):
# # DEADBEEF - Do all projection WKT's start with 'PROJCS'?
# # Could try testing to see if the result of proj_osr is an OSR?
# if type(arg) == str and arg.startswith('PROJCS'):
# args_mod[i] = drigo.proj_osr(arg)
# output_q.put([args_mod[0], clip_project_raster_func(*args_mod[1:])])
# # output_q.put(clip_project_raster_mp(args))
#
# def clip_project_raster_mp(args):
# """MP wrapper for calling clip_project_raster_func with Pool
#
# First input parameter is an index that will be passed through
# Convert projection WKT parameters to OSR objects
# 4th and 7th?
#
# """
# args_mod = args[:]
# for i, arg in enumerate(args):
# # DEADBEEF - Do all projection WKT's start with 'PROJCS'?
# # Could try testing to see if the result of proj_osr is an OSR?
# if type(arg) == str and arg.startswith('PROJCS'):
# args_mod[i] = drigo.proj_osr(arg)
# return args_mod[0], clip_project_raster_func(*args_mod[1:])
def clip_project_raster_func(input_raster, resampling_type,
input_osr, input_cs, input_extent,
ouput_osr, output_cs, output_extent):
"""Clip and then project an input raster"""
# Read array from input raster using input extent
input_array = drigo.raster_to_array(
input_raster, 1, input_extent, return_nodata=False)
# Project and clip array to block
output_array = drigo.project_array(
input_array, resampling_type,
input_osr, input_cs, input_extent,
ouput_osr, output_cs, output_extent)
return output_array
def mosaic_func(mosaic_array, input_array, mosaic_method):
""""""
input_mask = np.isfinite(input_array)
if not np.any(input_mask):
# Only mosaic if there is new data
pass
elif mosaic_method.lower() == 'first':
# Fill cells that are currently empty
input_mask &= np.isnan(mosaic_array)
mosaic_array[input_mask] = input_array[input_mask]
elif mosaic_method.lower() == 'last':
# Overwrite any cells with new data
mosaic_array[input_mask] = input_array[input_mask]
elif mosaic_method.lower() == 'mean':
# Fill cells that are currently empty
temp_mask = input_mask & np.isnan(mosaic_array)
mosaic_array[temp_mask] = input_array[temp_mask]
# plt.imshow(mosaic_array)
# plt.title('mosaic_array')
# plt.colorbar()
# plt.show()
# plt.imshow(input_array)
# plt.title('input_array')
# plt.colorbar()
# plt.show()
# plt.imshow((mosaic_array - input_array))
# plt.title('mosaic_array - input_array')
# plt.colorbar()
# plt.show()
# print((mosaic_array - input_array))
# Mean with existing value (overlapping rows)
temp_mask = input_mask & np.isfinite(mosaic_array)
mosaic_array[temp_mask] += input_array[temp_mask]
mosaic_array[temp_mask] *= 0.5
del temp_mask
return mosaic_array
def load_etrf_func(array_shape, date_list, year_ws, year,
etrf_raster, block_tile_list, block_extent,
tile_image_dict, mosaic_method, resampling_type,
output_osr, output_cs, output_extent, debug_flag):
"""Load ETrF from rasters to an array for all images/dates
Parameters
----------
array_shape : list
date_list : list
List of dates to be processed.
year_ws : str
File path of the workspace to the year folder from METRIC run.
etrf_raster : str
File path for the output ETrF.
year : str
Year that will be processed.
block_tile_list : list
List of the tiles to be processed in each block.
block_extent(class:`gdal_common.env`):
The gdal_common.extent of the block.
tile_image_dict : dict
A dictionary of the tiles/years to be processed.
mosaic_method : str
Mean, upper, or lower
resampling_type : int
GDAL resampling type used to reproject the daily ETrF.
output_osr (class:`osr.SpatialReference):
Desired spatial reference object.
output_cs : int
Desired cellsize of the output
output_extent(class:`gdal_common.extent):
Desired gdal_common.extent of the output.
debug_flag : bool
If True, NumPy RuntimeWarnings will be printed.
"""
# Read in ETrF raster from each scene folder
days, rows, cols = array_shape
# days, x, y = etrf_array.shape
tile_etrf_array = np.full(
(days, len(block_tile_list), rows, cols), np.nan, np.float32)
for tile_i, tile_name in enumerate(block_tile_list):
if tile_name not in tile_image_dict[year].keys():
continue
for image_id in dripy.shuffle(tile_image_dict[year][tile_name]):
tile_ws = os.path.join(year_ws, tile_name)
image_ws = os.path.join(tile_ws, image_id)
image_etrf_raster = os.path.join(image_ws, etrf_raster)
if not os.path.isfile(image_etrf_raster):
logging.debug(' ETrF raster does not exist')
continue
# Get projection and extent for each image
block_tile_ds = gdal.Open(image_etrf_raster)
block_tile_osr = drigo.raster_ds_osr(block_tile_ds)
block_tile_cs = drigo.raster_ds_cellsize(block_tile_ds, x_only=True)
block_tile_x, block_tile_y = drigo.raster_ds_origin(block_tile_ds)
block_tile_extent = drigo.project_extent(
block_extent, output_osr, block_tile_osr, output_cs)
block_tile_extent.adjust_to_snap(
'EXPAND', block_tile_x, block_tile_y, block_tile_cs)
block_tile_ds = None
# Use image_id to determine date
date_i = date_list.index(landsat_dt_func(image_id))
tile_etrf_array[date_i, tile_i, :, :] = clip_project_raster_func(
image_etrf_raster, resampling_type,
block_tile_osr, block_tile_cs, block_tile_extent,
output_osr, output_cs, output_extent)
# if low_etrf_limit is not None:
# temp_array[temp_array < low_etrf_limit] = low_etrf_limit
# if high_etrf_limit is not None:
# temp_array[temp_array > high_etrf_limit] = high_etrf_limit
# Suppress the numpy nan warning if the debug flag is off
if not debug_flag:
with warnings.catch_warnings():
warnings.simplefilter('ignore', category=RuntimeWarning)
etrf_array = np.nanmean(tile_etrf_array, axis=1)
else:
etrf_array = np.nanmean(tile_etrf_array, axis=1)
return etrf_array
# def load_etrf_swb_func(etrf_array, etrf_raster,
# low_etrf_limit, high_etrf_limit,
# date_list, year_ws, ndvi_raster, year,
# block_tile_list, block_extent,
# tile_image_dict, mosaic_method, resampling_type,
# output_osr, output_cs, output_extent, debug_flag,
# soil_water_balance_adjust_flag,
# year_tile_ndvi_paths, tile_ndvi_dict,
# awc_path, etr_input_ws, etr_input_re, ppt_input_ws,
# ppt_input_re, ndvi_threshold):
# """
#
# Parameters
# ----------
#
# Returns
# -------
# numpy.array: class:`numpy.array`
# """
# days, x, y = etrf_array.shape
# tiles = len(block_tile_list)
# temp_etrf_array = np.full((days, tiles, x, y), np.nan)
# temp_ndvi_array = np.full((days, tiles, x, y), np.nan)
# load_etrf_func(
# etrf_array, date_list, year_ws, etrf_raster, year,
# block_tile_list, block_extent,
# tile_image_dict, mosaic_method, resampling_type,
# output_osr, output_cs, output_extent, debug_flag,
# low_etrf_limit, high_etrf_limit)
# year = int(year)
# for tile_i, tile_name in enumerate(block_tile_list):
# if tile_name not in tile_image_dict[year].keys():
# continue
# for image_id in dripy.shuffle(tile_image_dict[year][tile_name]):
# tile_ws = os.path.join(year_ws, tile_name)
# image_ws = os.path.join(tile_ws, image_id)
# image_ndvi_raster = os.path.join(image_ws, ndvi_raster)
# if not os.path.isfile(image_ndvi_raster):
# continue
# # Get projection and extent for each image
# block_tile_ds = gdal.Open(image_ndvi_raster)
# block_tile_osr = drigo.raster_ds_osr(block_tile_ds)
# block_tile_cs = drigo.raster_ds_cellsize(block_tile_ds, x_only=True)
# block_tile_x, block_tile_y = drigo.raster_ds_origin(block_tile_ds)
# block_tile_extent = drigo.project_extent(
# block_extent, output_osr, block_tile_osr, output_cs)
# # block_tile_extent.adjust_to_snap(
# # 'EXPAND', block_tile_x, block_tile_y, block_tile_cs)
# block_tile_ds = None
# awc_ds = gdal.Open(awc_path)
# awc_osr = drigo.raster_ds_osr(awc_ds)
# awc_cs = drigo.raster_ds_cellsize(awc_ds, x_only=True)
# awc_x, awc_y = drigo.raster_ds_origin(awc_ds)
# awc_extent = drigo.project_extent(
# block_extent, output_osr, awc_osr, awc_cs)
# awc_extent.adjust_to_snap(
# 'EXPAND', awc_x, awc_y, awc_cs)
# awc_ds = None
# dt_object = landsat_dt_func(image_id)
# date_i = date_list.index(dt_object)
# etrf_array = daily_etrf_array[date_i,:,:,]
# if np.all(np.isnan(etrf_array)):
# continue
# etrf_background = et_common.array_swb_func(
# dt_object, awc_path, etr_input_ws, etr_input_re,
# ppt_input_ws, ppt_input_re, awc_osr, awc_cs, awc_extent,
# output_osr, output_cs, output_extent, 30)
# ndvi_array = clip_project_raster_func(
# image_ndvi_raster, resampling_type,
# block_tile_osr, block_tile_cs, block_tile_extent,
# output_osr, output_cs, output_extent)
# ndvi_mask = (ndvi_array > ndvi_threshold).astype(np.bool)
# fc = calc_fc(
# # ndvi_array=temp_ndvi_array[date_i, tile_i,:,:,],
# ndvi_array=ndvi_array,
# ndvi_full_cover=tile_ndvi_dict[year][tile_name][image_id]['cold'],
# ndvi_bare_soil=tile_ndvi_dict[year][tile_name][image_id]['hot'])
# etrf_transpiration = etrf_array - ((1 - fc) * etrf_background)
# etrf_transpiration_adj = np.max(
# np.array([etrf_transpiration, etrf_background]),
# axis=0)
# etrf_adjusted = (
# ((1 - fc) * etrf_background) + (fc * etrf_transpiration_adj))
# etrf_adjusted[ndvi_mask] = etrf_array[ndvi_mask]
# temp_etrf_array[date_i, tile_i,:,:,] = etrf_adjusted
# # Suppress the numpy nan warning if the debug flag is off
# if not debug_flag:
# with warnings.catch_warnings():
# warnings.simplefilter('ignore', category=RuntimeWarning)
# etrf_array[:] = np.nanmean(temp_etrf_array, axis=1)
# elif debug_flag:
# etrf_array[:] = np.nanmean(temp_etrf_array, axis=1)
# else:
# logging.error(
# ('Could not calculate ETRF using ' +
# 'temp_etrf_array: {}, shape {}'.format(
# temp_etrf_array, temp_etrf_array.shape)))
# sys.exit()
def spatial_fill_func(data_array, date_list, mp_flag, mp_procs):
""""""
return data_array
# def end_fill_func(data_array, block_mask, fill_method='linear'):
# """"""
#
# # Skip block if array is all nodata
# if not np.any(block_mask):
# return data_array
# # Skip block if array is all nodata
# # elif np.all(np.isnan(data_array)):
# # return data_array
#
# # Fill first and last Landsat ETrF rasters
# # Filling anchor rasters is independent of the fill method
# # date_str_list = [d.strftime('%Y_%m_%d') for d in date_list]
#
# data_shape = data_array.shape
# data_index = np.tile(
# np.arange(data_shape[0], dtype=np.float32)[:, np.newaxis, np.newaxis],
# (data_shape[1], data_shape[2]))
# data_index[np.isnan(data_array)] = np.nan
#
# min_index = np.nanargmin(data_index, axis=0)
# max_index = np.nanargmax(data_index, axis=0)
# print min_index
# print max_index
# return data_array
def end_fill_func(data_array, block_mask, fill_method='linear'):
"""Fill start/end/anchor values using nearest value in time
Parameters
----------
data_array : ndarray
block_mask : ndarray
fill_method : {'linear' or 'cubicspline'}
Returns
-------
ndarray
Notes
-----
The actual spacing/timing of the images is not being considered.
This approach would be inefficient if the full array was passed in.
"""
# Skip block if array is all nodata
if not np.any(block_mask):
return data_array
# Skip block if array is all nodata
# elif np.all(np.isnan(data_array)):
# return data_array
def fill_from_next(data_array, block_mask, data_i_list):
""""""
# First axis of block array is the date/doy
fill_array = np.empty(data_array[0].shape, dtype=data_array.dtype)
fill_array[:] = np.nan
for data_i in data_i_list:
next_array = data_array[data_i,:,:]
next_mask = np.isfinite(next_array)
# Only fill values that are nan
next_mask &= np.isnan(fill_array)
# Only fill values that are nan
next_mask &= block_mask
# Only fill pixels that have a usable number of scenes
if np.any(next_mask):
fill_array[next_mask] = next_array[next_mask]
del next_array, next_mask
# Stop once all usable scene pixels are filled
if np.all(np.isfinite(fill_array[block_mask])):
break
return fill_array
# The actual spacing/timing of the images is not being considered
data_i_list = range(data_array.shape[0])
# Calculate ETrF start raster
if np.any(np.isnan(data_array[0, :, :])):
data_array[0, :, :] = fill_from_next(
data_array, block_mask, data_i_list)
# Calculate ETrF end raster
if np.any(np.isnan(data_array[-1, :, :])):
data_array[-1, :, :] = fill_from_next(
data_array, block_mask, sorted(data_i_list, reverse=True))
# Calculate start/end anchor rasters
if fill_method == 'cubicspline':
if np.any(np.isnan(data_array[1, :, :])):
data_array[1, :, :] = fill_from_next(
data_array, block_mask, data_i_list)
if np.any(np.isnan(data_array[-2, :, :])):
data_array[-2, :, :] = fill_from_next(
data_array, block_mask, sorted(data_i_list, reverse=True))
return data_array
# DEADBEEF - Single core implementation
def temporal_fill_func(sub_array, sub_i_array, block_mask, fill_method='linear'):
"""Single core temporal fill function
Fill Landsat scene dates so that interpolator only runs between known dates
Parameters
----------
sub_array : ndarray
sub_i_array : ndarray
block_mask : ndarray
fill_method : {'linear' or 'cubicspline'}
Interpolation method (the default is 'linear').
Returns
-------
ndarray
"""
# Skip block if array is all nodata
if not np.any(block_mask):
return sub_array
# Skip block if array is all nodata
# elif np.all(np.isnan(data_array)):
# return sub_array
# Begin interpolating scene days with missing values
# for interp_i, interp_doy in enumerate(sub_i_array):
for interp_sub_i, interp_full_i in enumerate(sub_i_array):
# Interp mask is False where pixels have data
# (i.e. True for pixels that will be interpolated)
interp_mask = np.isnan(sub_array[interp_sub_i, :, :])
interp_mask &= block_mask
if not np.any(interp_mask):
continue
# logging.info(' INTERP {} {}'.format(
# interp_sub_i, interp_full_i))
# list of subsequent days
for anchor_sub_i, anchor_full_i in enumerate(sub_i_array):
if anchor_sub_i <= interp_sub_i:
continue
# Interpolate when next DOY has data
anchor_mask = np.copy(interp_mask)
anchor_mask &= np.isfinite(sub_array[anchor_sub_i, :, :])
if not np.any(anchor_mask):
continue
# logging.info(' ANCHOR {} {}'.format(
# anchor_sub_i, anchor_full_i))
if fill_method == 'cubicspline':
for cubic_sub_i, cubic_full_i in enumerate(sub_i_array):
if cubic_sub_i <= anchor_sub_i:
continue
cubic_mask = np.copy(anchor_mask)
cubic_mask &= np.isfinite(sub_array[cubic_sub_i, :, :])
if not np.any(cubic_mask):
continue
# logging.info(' CUBIC {} {}'.format(
# cubic_sub_i, cubic_full_i))
interp_i_array = np.array([
sub_i_array[interp_sub_i-2], sub_i_array[interp_sub_i-1],
sub_i_array[anchor_sub_i], sub_i_array[cubic_sub_i]])
interp_i_mask = np.in1d(sub_i_array, interp_i_array)
interp_array = sub_array[interp_i_mask, :, :][:, cubic_mask]
f = interpolate.interp1d(
interp_i_array, interp_array,
axis=0, kind=3)
sub_array[interp_sub_i, :, :][cubic_mask] = f(interp_full_i)
# sub_array[interp_sub_i,:,:][anchor_mask] = f(interp_full_i).astype(np.float32)
interp_mask[cubic_mask] = False
anchor_mask[cubic_mask] = False
del f, interp_i_array, interp_i_mask
del cubic_mask, interp_array
if not np.any(interp_mask):
break
elif fill_method == 'linear':
interp_i_array = np.array(
[sub_i_array[interp_sub_i-1], sub_i_array[anchor_sub_i]])
interp_i_mask = np.in1d(sub_i_array, interp_i_array)
interp_array = sub_array[interp_i_mask, :, :][:, anchor_mask]
f = interpolate.interp1d(
interp_i_array, interp_array, axis=0, kind=fill_method)
sub_array[interp_sub_i, :, :][anchor_mask] = f(interp_full_i)
# sub_array[interp_sub_i,:,:][anchor_mask] = f(interp_full_i).astype(np.float32)
interp_mask[anchor_mask] = False
del f, interp_i_array, interp_i_mask, interp_array
if not np.any(interp_mask):
break
elif fill_method == 'nearest':
pass
# There is a memory leak with f/interp1d
# gc.collect()
del interp_mask
return sub_array
def interpolate_func(full_array, sub_array, sub_i_array,
block_mask, interp_method):
"""Single core interpolator function
This function should be used after scene dates have already been filled
There is no error checking to see if the start/end/anchor have data
Parameters
----------
full_array : ndarray
sub_array : ndarray
sub_i_array : ndarray
block_mask : ndarray
interp_method : str
Returns
-------
ndarray
"""
# Skip block if array is all nodata
if not np.any(block_mask):
return full_array
# Skip block if array is all nodata
# elif np.all(np.isnan(data_array)):
# return full_array
# Assume each step is a day
full_i_array = np.arange(full_array.shape[0])
# Copy start/end/anchor dates directly to output
copy_i_list = [full_i_array[0], full_i_array[-1]]
if interp_method in ['cubic', 'cubicspline']:
copy_i_list.extend([full_i_array[1], full_i_array[-2]])
copy_i_list.sort()
# Begin interpolating scene days with missing values
for interp_full_i in full_i_array:
# Interp mask is False where pixels have data
# (i.e. True for pixels that will be interpolated)
interp_mask = np.isnan(full_array[interp_full_i, :, :])
interp_mask &= block_mask
if not np.any(interp_mask):
continue
# logging.info(' INTERP {}'.format(interp_full_i))
# Copy start/end/anchor dates directly to output
# if interp_full_i in list(sub_i_array):
if interp_full_i in copy_i_list:
full_array[interp_full_i, :, :][interp_mask] = sub_array[
list(sub_i_array).index(interp_full_i), :, :][interp_mask]
continue
# Select anchor days (last day(s) before interp and first day(s) after)
if interp_method in ['cubic', 'cubicspline']:
interp_i_array = sub_i_array[np.concatenate(
(np.where(sub_i_array <= interp_full_i)[0][-2:],
np.where(sub_i_array > interp_full_i)[0][:2]))]
else:
interp_i_array = sub_i_array[np.concatenate(
(np.where(sub_i_array <= interp_full_i)[0][-1:],
np.where(sub_i_array > interp_full_i)[0][:1]))]
interp_i_mask = np.in1d(sub_i_array, interp_i_array)
interp_array = sub_array[interp_i_mask, :, :][:, interp_mask]
f = interpolate.interp1d(
interp_i_array, interp_array, axis=0, kind=interp_method)
full_array[interp_full_i, :, :][interp_mask] = f(interp_full_i)
# data_array[interp_full_i,:,:][:,interp_mask] = f(interp_full_i).astype(np.float32)
del f, interp_array, interp_i_array
# There is a memory leak with f/interp1d
# gc.collect()
return full_array
# def mp_interpolate_func(full_array, sub_array, sub_i_array,
# block_mask, interp_method,
# mp_flag=True, mp_procs=cpu_count()):
# """"""
# mp_procs = 1
#
# # Skip block if array is all nodata
# if not np.any(block_mask):
# return data_array
# # Skip block if array is all nodata
# # elif np.all(np.isnan(data_array)):
# # return data_array
#
# # Assume each step is a day
# full_i_array = np.arange(full_array.shape[0])
#
# # Create shared memory object of full_array
# print sub_array[0,:,:]
# print sub_array[:,0,0]
# sub_ctypes = RawArray(ctypes.c_float, sub_array.size)
# sub_shr_array = np.frombuffer(
# sub_ctypes, dtype=np.float32, count=sub_array.size)
# # Copy sub_array into the shared memory array
# # sub_shr_array = sub_array
# sub_shr_array = sub_array.flatten()
#
# # Begin interpolating scene days with missing values
# input_q = Queue()
# output_q = Queue()
# mp_tasks = 0
# for interp_full_i in full_i_array:
# # Interp mask is False where pixels have data
# # (i.e. True for pixels that will be interpolated)
# interp_mask = np.isnan(full_array[interp_full_i,:,:])
# interp_mask &= block_mask
# if not np.any(interp_mask):
# continue
# # Copy start/end/anchor dates directly to output
# # if interp_i in list(sub_i_array):
# if (interp_full_i == full_i_array[0] or
# interp_full_i == full_i_array[-1] or
# (interp_method in ['cubic', 'cubicspline'] and
# (interp_full_i == full_i_array[1] or
# interp_full_i == full_i_array[-2]))):
# full_array[interp_full_i,:,:][interp_mask] = sub_array[
# list(sub_i_array).index(interp_full_i),:,:][interp_mask]
# continue
# # Select anchor days for each day being interpolated
# if interp_method in ['cubic', 'cubicspline']:
# interp_sub_i_array = np.concatenate(
# (np.where(sub_i_array <= interp_full_i)[0][-2:],
# np.where(sub_i_array > interp_full_i)[0][:2]))
# else:
# interp_sub_i_array = np.concatenate(
# (np.where(sub_i_array <= interp_full_i)[0][-1:],
# np.where(sub_i_array > interp_full_i)[0][:1]))
# interp_full_i_array = sub_i_array[interp_sub_i_array]
# # Put the items into the processing queue
# input_q.put([
# interp_full_i, interp_full_i_array,
# interp_sub_i_array, interp_method])
# mp_tasks += 1
# del interp_full_i, interp_full_i_array, interp_sub_i_array
#
# # Start the workers
# for i in range(max(1, mp_procs - 1)):
# p = Process(
# target=interpolate_worker,
# args=(sub_ctypes, sub_array.shape, input_q, output_q)).start()
# # Start processing
# for i in range(mp_tasks):
# # for i in range(input_q.qsize()):
# interp_i, interp_array = output_q.get()
# full_array[interp_i,:,:][block_mask] = interp_array[block_mask]
# del interp_i, interp_array
# # Terminate the workers
# for i in range(max(1, mp_procs - 1)):
# input_q.put(None)
# input_q.close()
# output_q.close()
# del input_q, output_q
# del sub_ctypes, sub_shr_array
# return full_array
# def interpolate_worker(sub_ctypes, sub_shape, input_q, output_q):
# """Worker function for multiprocessing with input and output queues"""
# # sub_array = np.ctypeslib.as_array(sub_ctypes)
# # sub_array = sub_array.reshape(sub_shape)
# # sub_array.shape = sub_shape
# # sub_array = np.ctypeslib.as_array(sub_ctypes).reshape(sub_shape)
# sub_array = np.asarray(np.frombuffer(sub_ctypes, dtype=np.float32))
# sub_array = sub_array.reshape(sub_shape)
# print sub_array
# print sub_array.shape
# print sub_array[:,0,0]
# print sub_array.dtype
# print input_q
# print output_q
# while True:
# args = input_q.get()
# if args is None:
# break
# interp_full_i = args[0]
# interp_full_i_array = args[1]
# interp_sub_i_array = args[2]
# interp_method = args[3]
# f = interpolate.interp1d(
# interp_full_i_array, sub_array[interp_sub_i_array,:,:],
# axis=0, kind=interp_method)
# # f = interpolate.interp1d(
# # interp_i_array, sub_array[[0,2],:,:], axis=0, kind=interp_method)
# output_q.put([interp_full_i, f(interp_full_i)])
# # output_q.put(interpolate_mp(args))
# def interpolate_mp(args):
# """MP wrapper for calling interpolate
#
# First input parameter is the date index that will be passed through
#
# """
# f = interpolate.interp1d(args[1], args[2], axis=0, kind=args[3])
# return args[0], f(args[0])
# def interpolate_mp(tup):
# """MP wrapper for calling interpolate
#
# First input parameter is the date index that will be passed through
# Second input parameter is a mask that will be passed through
#
# """
# return tup[0], tup[1], interpolate_sp(*tup[2:])
# def interpolate_sp(x_array, y_array, interp_doy, interp_method):
# """Wrapper function for clipping and then projecting an input raster"""
# f = interpolate.interp1d(x_array, y_array, axis=0, kind=interp_method)
# return f(interp_doy)
def block_interpolate_func(full_array, sub_array, sub_i_array,
block_mask, fill_method, interp_method,
mp_flag=True, mp_procs=cpu_count()):
"""Interpolate sub block using multiprocessing
Parameters
----------
full_array : ndarray
sub_array : ndarray
sub_i_array : ndarray
block_mask : ndarray
fill_method : str
interp_method : str
mp_flag : bool
mp_procs : int
Returns
-------
ndarray
"""
logging.info(' Processing by sub block')
block_rows, block_cols = block_mask.shape
sub_bs = 64
mp_list = []
for s_i, s_j in drigo.block_gen(block_rows, block_cols, sub_bs):
# logging.info(' Sub y: {:5d} x: {:5d}'.format(s_i, s_j))
sub_rows, sub_cols = drigo.block_shape(
block_rows, block_cols, s_i, s_j, sub_bs)
# logging.info(' Sub rows: {} cols: {}'.format(sub_rows, sub_cols))
mp_list.append([s_i, s_j])
if mp_list:
input_q = Queue()
output_q = Queue()
# Load some inputs into the input queue
mp_tasks = len(mp_list)
for i in range(max(1, mp_procs - 1)):
s_i, s_j = mp_list.pop()
input_q.put([
s_i, s_j, full_array[:, s_i:s_i+sub_rows, s_j:s_j+sub_cols],
block_mask[s_i:s_i+sub_rows, s_j:s_j+sub_cols], interp_method])
# Load all inputs into the input queue
# for mp_args in mp_list:
# input_q.put(mp_args)
# Start workers
for i in range(max(1, mp_procs - 1)):
p = Process(target=block_interpolate_worker,
args=(i, input_q, output_q)).start()
del p
# Get data from workers and add new items to queue
for i in range(mp_tasks):
s_i, s_j, interp_array = output_q.get()
full_array[:, s_i:s_i+sub_rows, s_j:s_j+sub_cols] = sub_array
del s_i, s_j, sub_array
try:
s_i, s_j = mp_list.pop()
input_q.put([
s_i, s_j, full_array[:, s_i:s_i+sub_rows, s_j:s_j+sub_cols],
block_mask[s_i:s_i+sub_rows, s_j:s_j+sub_cols], interp_method])
del s_i, s_j
except IndexError:
pass
# Close workers
for i in range(max(1, mp_procs - 1)):
input_q.put(None)
# Close queues
input_q.close()
output_q.close()
del input_q, output_q
return full_array
def block_interpolate_worker(args, input_q, output_q):
"""Worker function for multiprocessing with input and output queues"""
while True:
args = input_q.get()
if args is None:
break
s_i, s_j, full_array, sub_array, sub_i_array, sub_mask, fill_method, interp_method = args
sub_array = end_fill_func(sub_array, sub_mask, fill_method)
sub_array = temporal_fill_func(
sub_array, sub_i_array, sub_mask, fill_method)
full_array = interpolate_func(
full_array, sub_array, sub_i_array, sub_mask, interp_method)
output_q.put([s_i, s_j, full_array])
def load_year_array_func(input_ws, input_re, date_list,
mask_osr, mask_cs, mask_extent,
name='ETr', return_geo_array=True):
"""Load
Parameters
----------
input_ws : str
input_re
date_list : list
output_osr
output_cs : float
output_extent
name : str
return_geo_array : bool
If True, return array geo-spatial properties (the default is True).
Returns
-------
ndarray
"""
logging.info('\n{}'.format(name))
logging.debug(' {} workspace: {}'.format(name, input_ws))
year_str_list = sorted(list(set([
date.strftime('%Y') for date in date_list])))
if not os.path.isdir(input_ws):
logging.error(
'\nERROR: The {} folder does not exist:\n {}'.format(
name, input_ws))
sys.exit()
input_dict = {
input_match.group('YYYY'): os.path.join(input_ws, input_name)
for input_name in os.listdir(os.path.join(input_ws))
for input_match in [input_re.match(input_name)]
if (input_match and input_match.group('YYYY') and
input_match.group('YYYY') in year_str_list)}
if not input_dict:
logging.error(
(' No {0} files found in {1} for {2}\n'
' The {0} year folder may be empty or the regular '
'expression is invalid\n Exiting').format(
name, input_ws, ', '.join(year_str_list)))
sys.exit()
# Assume all rasters have same projection, cellsize, and snap
for date_obj in date_list:
try:
input_path = input_dict[date_obj.strftime('%Y')]
break
except KeyError:
logging.debug(
' {} raster for date {} does not exist'.format(
name, date_obj.strftime('%Y%m%d')))
sys.exit()
input_ds = gdal.Open(input_path, 0)
input_osr = drigo.raster_ds_osr(input_ds)
# input_proj = drigo.osr_proj(input_osr)
input_cs = drigo.raster_ds_cellsize(input_ds, x_only=True)
input_x, input_y = drigo.raster_ds_origin(input_ds)
input_ds = None
# Get mask extent in the original spat. ref.
output_extent = drigo.project_extent(
mask_extent, mask_osr, input_osr, mask_cs)
output_extent.adjust_to_snap('EXPAND', input_x, input_y, input_cs)
output_rows, output_cols = output_extent.shape(cs=input_cs)
# Initialize the common array
output_array = np.full(
(len(date_list), output_rows, output_cols), np.nan, np.float32)
# Read in the raster for each date
for date_i, date_obj in enumerate(date_list):
try:
input_path = input_dict[date_obj.strftime('%Y')]
except KeyError:
logging.debug(
' {} - {} raster does not exist'.format(
date_obj.strftime('%Y%m%d'), name))
continue
output_array[date_i, :, :] = drigo.raster_to_array(
input_path, band=int(date_obj.strftime('%j')),
mask_extent=output_extent, return_nodata=False,)
if return_geo_array:
return output_array, input_osr, input_cs, output_extent
else:
return output_array
def swb_adjust_fc(ndvi_array, ndvi_full_cover, ndvi_bare_soil):
""""""
return (1 - (ndvi_full_cover - ndvi_array) /
(ndvi_full_cover - ndvi_bare_soil))
def unknown_proj_osr(input_proj):
"""Return the spatial reference object for a projection string"""
try:
output_osr = drigo.epsg_osr(input_proj)
logging.debug(' OSR from EPSG string')
return output_osr
except:
pass
try:
output_osr = drigo.epsg_osr(input_proj.replace('EPSG:'))
logging.debug(' OSR from EPSG integer')
return output_osr
except:
pass
try:
output_osr = drigo.proj_osr(input_proj)
logging.debug(' OSR from WKT')
return output_osr
except:
pass
try:
output_osr = drigo.proj4_osr(input_proj)
logging.debug(' OSR from PROJ4')
return output_osr
except:
pass
try:
output_osr = drigo.raster_path_osr(input_proj)
logging.debug(' OSR from raster path')
return output_osr
except:
pass
try:
output_osr = drigo.feature_path_osr(input_proj)
logging.debug(' OSR from feature path')
return output_osr
except:
pass
return output_osr
# def feature_extents(input_path):
# """Return a dictionary of zone FIDs and their extents"""
# output_dict = dict()
# # shp_driver = ogr.GetDriverByName('ESRI Shapefile')
# input_ds = ogr.Open(input_path, 0)
# input_lyr = input_ds.GetLayer()
# input_lyr.ResetReading()
# for input_ftr in input_lyr:
# input_fid = input_ftr.GetFID()
# input_extent = drigo.Extent(
# input_ftr.GetGeometryRef().GetEnvelope()).ogrenv_swap()
# output_dict[input_fid] = input_extent
# input_ds = None
# return output_dict
# def feature_geometries(input_path):
# """Return a dictionary of zone FIDs and their geometries"""
# output_dict = dict()
# # shp_driver = ogr.GetDriverByName('ESRI Shapefile')
# input_ds = ogr.Open(input_path, 0)
# input_lyr = input_ds.GetLayer()
# input_lyr.ResetReading()
# for input_ftr in input_lyr:
# input_fid = input_ftr.GetFID()
# input_geom = input_ftr.GetGeometryRef().ExportToWkt()
# output_dict[input_fid] = input_geom
# input_ds = None
# return output_dict
# def feature_field_values(input_path, field='FID'):
# """Return a dictionary of zone FIDs and their field values"""
# output_dict = dict()
# # shp_driver = ogr.GetDriverByName('ESRI Shapefile')
# input_ds = ogr.Open(input_path, 0)
# input_lyr = input_ds.GetLayer()
# input_lyr.ResetReading()
# for input_ftr in input_lyr:
# input_fid = input_ftr.GetFID()
# output_dict[input_fid] = input_ftr.GetField(field)
# input_ds = None
# return output_dict | en | 0.38241 | #-------------------------------- # Name: interpolate_support.py # Purpose: Interpolator support functions #-------------------------------- # import gc # import et_common # np.seterr(invalid='ignore') # Assume image_id has been verified as a Landsat image ID # i.e. LC08_L1TP_043030_20150415_20170227_01_T1 Return a dictionary of path/rows and their geometries # def clip_project_raster_worker(args, input_q, output_q): # """Worker function for multiprocessing with input and output queues # # First input argument is an index that will be passed through to the output # Convert projection WKT parameters to OSR objects # 4th and 7th? # # """ # while True: # args = input_q.get() # if args is None: # break # args_mod = args[:] # for i, arg in enumerate(args): # # DEADBEEF - Do all projection WKT's start with 'PROJCS'? # # Could try testing to see if the result of proj_osr is an OSR? # if type(arg) == str and arg.startswith('PROJCS'): # args_mod[i] = drigo.proj_osr(arg) # output_q.put([args_mod[0], clip_project_raster_func(*args_mod[1:])]) # # output_q.put(clip_project_raster_mp(args)) # # def clip_project_raster_mp(args): # """MP wrapper for calling clip_project_raster_func with Pool # # First input parameter is an index that will be passed through # Convert projection WKT parameters to OSR objects # 4th and 7th? # # """ # args_mod = args[:] # for i, arg in enumerate(args): # # DEADBEEF - Do all projection WKT's start with 'PROJCS'? # # Could try testing to see if the result of proj_osr is an OSR? # if type(arg) == str and arg.startswith('PROJCS'): # args_mod[i] = drigo.proj_osr(arg) # return args_mod[0], clip_project_raster_func(*args_mod[1:]) Clip and then project an input raster # Read array from input raster using input extent # Project and clip array to block # Only mosaic if there is new data # Fill cells that are currently empty # Overwrite any cells with new data # Fill cells that are currently empty # plt.imshow(mosaic_array) # plt.title('mosaic_array') # plt.colorbar() # plt.show() # plt.imshow(input_array) # plt.title('input_array') # plt.colorbar() # plt.show() # plt.imshow((mosaic_array - input_array)) # plt.title('mosaic_array - input_array') # plt.colorbar() # plt.show() # print((mosaic_array - input_array)) # Mean with existing value (overlapping rows) Load ETrF from rasters to an array for all images/dates Parameters ---------- array_shape : list date_list : list List of dates to be processed. year_ws : str File path of the workspace to the year folder from METRIC run. etrf_raster : str File path for the output ETrF. year : str Year that will be processed. block_tile_list : list List of the tiles to be processed in each block. block_extent(class:`gdal_common.env`): The gdal_common.extent of the block. tile_image_dict : dict A dictionary of the tiles/years to be processed. mosaic_method : str Mean, upper, or lower resampling_type : int GDAL resampling type used to reproject the daily ETrF. output_osr (class:`osr.SpatialReference): Desired spatial reference object. output_cs : int Desired cellsize of the output output_extent(class:`gdal_common.extent): Desired gdal_common.extent of the output. debug_flag : bool If True, NumPy RuntimeWarnings will be printed. # Read in ETrF raster from each scene folder # days, x, y = etrf_array.shape # Get projection and extent for each image # Use image_id to determine date # if low_etrf_limit is not None: # temp_array[temp_array < low_etrf_limit] = low_etrf_limit # if high_etrf_limit is not None: # temp_array[temp_array > high_etrf_limit] = high_etrf_limit # Suppress the numpy nan warning if the debug flag is off # def load_etrf_swb_func(etrf_array, etrf_raster, # low_etrf_limit, high_etrf_limit, # date_list, year_ws, ndvi_raster, year, # block_tile_list, block_extent, # tile_image_dict, mosaic_method, resampling_type, # output_osr, output_cs, output_extent, debug_flag, # soil_water_balance_adjust_flag, # year_tile_ndvi_paths, tile_ndvi_dict, # awc_path, etr_input_ws, etr_input_re, ppt_input_ws, # ppt_input_re, ndvi_threshold): # """ # # Parameters # ---------- # # Returns # ------- # numpy.array: class:`numpy.array` # """ # days, x, y = etrf_array.shape # tiles = len(block_tile_list) # temp_etrf_array = np.full((days, tiles, x, y), np.nan) # temp_ndvi_array = np.full((days, tiles, x, y), np.nan) # load_etrf_func( # etrf_array, date_list, year_ws, etrf_raster, year, # block_tile_list, block_extent, # tile_image_dict, mosaic_method, resampling_type, # output_osr, output_cs, output_extent, debug_flag, # low_etrf_limit, high_etrf_limit) # year = int(year) # for tile_i, tile_name in enumerate(block_tile_list): # if tile_name not in tile_image_dict[year].keys(): # continue # for image_id in dripy.shuffle(tile_image_dict[year][tile_name]): # tile_ws = os.path.join(year_ws, tile_name) # image_ws = os.path.join(tile_ws, image_id) # image_ndvi_raster = os.path.join(image_ws, ndvi_raster) # if not os.path.isfile(image_ndvi_raster): # continue # # Get projection and extent for each image # block_tile_ds = gdal.Open(image_ndvi_raster) # block_tile_osr = drigo.raster_ds_osr(block_tile_ds) # block_tile_cs = drigo.raster_ds_cellsize(block_tile_ds, x_only=True) # block_tile_x, block_tile_y = drigo.raster_ds_origin(block_tile_ds) # block_tile_extent = drigo.project_extent( # block_extent, output_osr, block_tile_osr, output_cs) # # block_tile_extent.adjust_to_snap( # # 'EXPAND', block_tile_x, block_tile_y, block_tile_cs) # block_tile_ds = None # awc_ds = gdal.Open(awc_path) # awc_osr = drigo.raster_ds_osr(awc_ds) # awc_cs = drigo.raster_ds_cellsize(awc_ds, x_only=True) # awc_x, awc_y = drigo.raster_ds_origin(awc_ds) # awc_extent = drigo.project_extent( # block_extent, output_osr, awc_osr, awc_cs) # awc_extent.adjust_to_snap( # 'EXPAND', awc_x, awc_y, awc_cs) # awc_ds = None # dt_object = landsat_dt_func(image_id) # date_i = date_list.index(dt_object) # etrf_array = daily_etrf_array[date_i,:,:,] # if np.all(np.isnan(etrf_array)): # continue # etrf_background = et_common.array_swb_func( # dt_object, awc_path, etr_input_ws, etr_input_re, # ppt_input_ws, ppt_input_re, awc_osr, awc_cs, awc_extent, # output_osr, output_cs, output_extent, 30) # ndvi_array = clip_project_raster_func( # image_ndvi_raster, resampling_type, # block_tile_osr, block_tile_cs, block_tile_extent, # output_osr, output_cs, output_extent) # ndvi_mask = (ndvi_array > ndvi_threshold).astype(np.bool) # fc = calc_fc( # # ndvi_array=temp_ndvi_array[date_i, tile_i,:,:,], # ndvi_array=ndvi_array, # ndvi_full_cover=tile_ndvi_dict[year][tile_name][image_id]['cold'], # ndvi_bare_soil=tile_ndvi_dict[year][tile_name][image_id]['hot']) # etrf_transpiration = etrf_array - ((1 - fc) * etrf_background) # etrf_transpiration_adj = np.max( # np.array([etrf_transpiration, etrf_background]), # axis=0) # etrf_adjusted = ( # ((1 - fc) * etrf_background) + (fc * etrf_transpiration_adj)) # etrf_adjusted[ndvi_mask] = etrf_array[ndvi_mask] # temp_etrf_array[date_i, tile_i,:,:,] = etrf_adjusted # # Suppress the numpy nan warning if the debug flag is off # if not debug_flag: # with warnings.catch_warnings(): # warnings.simplefilter('ignore', category=RuntimeWarning) # etrf_array[:] = np.nanmean(temp_etrf_array, axis=1) # elif debug_flag: # etrf_array[:] = np.nanmean(temp_etrf_array, axis=1) # else: # logging.error( # ('Could not calculate ETRF using ' + # 'temp_etrf_array: {}, shape {}'.format( # temp_etrf_array, temp_etrf_array.shape))) # sys.exit() # def end_fill_func(data_array, block_mask, fill_method='linear'): # """""" # # # Skip block if array is all nodata # if not np.any(block_mask): # return data_array # # Skip block if array is all nodata # # elif np.all(np.isnan(data_array)): # # return data_array # # # Fill first and last Landsat ETrF rasters # # Filling anchor rasters is independent of the fill method # # date_str_list = [d.strftime('%Y_%m_%d') for d in date_list] # # data_shape = data_array.shape # data_index = np.tile( # np.arange(data_shape[0], dtype=np.float32)[:, np.newaxis, np.newaxis], # (data_shape[1], data_shape[2])) # data_index[np.isnan(data_array)] = np.nan # # min_index = np.nanargmin(data_index, axis=0) # max_index = np.nanargmax(data_index, axis=0) # print min_index # print max_index # return data_array Fill start/end/anchor values using nearest value in time Parameters ---------- data_array : ndarray block_mask : ndarray fill_method : {'linear' or 'cubicspline'} Returns ------- ndarray Notes ----- The actual spacing/timing of the images is not being considered. This approach would be inefficient if the full array was passed in. # Skip block if array is all nodata # Skip block if array is all nodata # elif np.all(np.isnan(data_array)): # return data_array # First axis of block array is the date/doy # Only fill values that are nan # Only fill values that are nan # Only fill pixels that have a usable number of scenes # Stop once all usable scene pixels are filled # The actual spacing/timing of the images is not being considered # Calculate ETrF start raster # Calculate ETrF end raster # Calculate start/end anchor rasters # DEADBEEF - Single core implementation Single core temporal fill function Fill Landsat scene dates so that interpolator only runs between known dates Parameters ---------- sub_array : ndarray sub_i_array : ndarray block_mask : ndarray fill_method : {'linear' or 'cubicspline'} Interpolation method (the default is 'linear'). Returns ------- ndarray # Skip block if array is all nodata # Skip block if array is all nodata # elif np.all(np.isnan(data_array)): # return sub_array # Begin interpolating scene days with missing values # for interp_i, interp_doy in enumerate(sub_i_array): # Interp mask is False where pixels have data # (i.e. True for pixels that will be interpolated) # logging.info(' INTERP {} {}'.format( # interp_sub_i, interp_full_i)) # list of subsequent days # Interpolate when next DOY has data # logging.info(' ANCHOR {} {}'.format( # anchor_sub_i, anchor_full_i)) # logging.info(' CUBIC {} {}'.format( # cubic_sub_i, cubic_full_i)) # sub_array[interp_sub_i,:,:][anchor_mask] = f(interp_full_i).astype(np.float32) # sub_array[interp_sub_i,:,:][anchor_mask] = f(interp_full_i).astype(np.float32) # There is a memory leak with f/interp1d # gc.collect() Single core interpolator function This function should be used after scene dates have already been filled There is no error checking to see if the start/end/anchor have data Parameters ---------- full_array : ndarray sub_array : ndarray sub_i_array : ndarray block_mask : ndarray interp_method : str Returns ------- ndarray # Skip block if array is all nodata # Skip block if array is all nodata # elif np.all(np.isnan(data_array)): # return full_array # Assume each step is a day # Copy start/end/anchor dates directly to output # Begin interpolating scene days with missing values # Interp mask is False where pixels have data # (i.e. True for pixels that will be interpolated) # logging.info(' INTERP {}'.format(interp_full_i)) # Copy start/end/anchor dates directly to output # if interp_full_i in list(sub_i_array): # Select anchor days (last day(s) before interp and first day(s) after) # data_array[interp_full_i,:,:][:,interp_mask] = f(interp_full_i).astype(np.float32) # There is a memory leak with f/interp1d # gc.collect() # def mp_interpolate_func(full_array, sub_array, sub_i_array, # block_mask, interp_method, # mp_flag=True, mp_procs=cpu_count()): # """""" # mp_procs = 1 # # # Skip block if array is all nodata # if not np.any(block_mask): # return data_array # # Skip block if array is all nodata # # elif np.all(np.isnan(data_array)): # # return data_array # # # Assume each step is a day # full_i_array = np.arange(full_array.shape[0]) # # # Create shared memory object of full_array # print sub_array[0,:,:] # print sub_array[:,0,0] # sub_ctypes = RawArray(ctypes.c_float, sub_array.size) # sub_shr_array = np.frombuffer( # sub_ctypes, dtype=np.float32, count=sub_array.size) # # Copy sub_array into the shared memory array # # sub_shr_array = sub_array # sub_shr_array = sub_array.flatten() # # # Begin interpolating scene days with missing values # input_q = Queue() # output_q = Queue() # mp_tasks = 0 # for interp_full_i in full_i_array: # # Interp mask is False where pixels have data # # (i.e. True for pixels that will be interpolated) # interp_mask = np.isnan(full_array[interp_full_i,:,:]) # interp_mask &= block_mask # if not np.any(interp_mask): # continue # # Copy start/end/anchor dates directly to output # # if interp_i in list(sub_i_array): # if (interp_full_i == full_i_array[0] or # interp_full_i == full_i_array[-1] or # (interp_method in ['cubic', 'cubicspline'] and # (interp_full_i == full_i_array[1] or # interp_full_i == full_i_array[-2]))): # full_array[interp_full_i,:,:][interp_mask] = sub_array[ # list(sub_i_array).index(interp_full_i),:,:][interp_mask] # continue # # Select anchor days for each day being interpolated # if interp_method in ['cubic', 'cubicspline']: # interp_sub_i_array = np.concatenate( # (np.where(sub_i_array <= interp_full_i)[0][-2:], # np.where(sub_i_array > interp_full_i)[0][:2])) # else: # interp_sub_i_array = np.concatenate( # (np.where(sub_i_array <= interp_full_i)[0][-1:], # np.where(sub_i_array > interp_full_i)[0][:1])) # interp_full_i_array = sub_i_array[interp_sub_i_array] # # Put the items into the processing queue # input_q.put([ # interp_full_i, interp_full_i_array, # interp_sub_i_array, interp_method]) # mp_tasks += 1 # del interp_full_i, interp_full_i_array, interp_sub_i_array # # # Start the workers # for i in range(max(1, mp_procs - 1)): # p = Process( # target=interpolate_worker, # args=(sub_ctypes, sub_array.shape, input_q, output_q)).start() # # Start processing # for i in range(mp_tasks): # # for i in range(input_q.qsize()): # interp_i, interp_array = output_q.get() # full_array[interp_i,:,:][block_mask] = interp_array[block_mask] # del interp_i, interp_array # # Terminate the workers # for i in range(max(1, mp_procs - 1)): # input_q.put(None) # input_q.close() # output_q.close() # del input_q, output_q # del sub_ctypes, sub_shr_array # return full_array # def interpolate_worker(sub_ctypes, sub_shape, input_q, output_q): # """Worker function for multiprocessing with input and output queues""" # # sub_array = np.ctypeslib.as_array(sub_ctypes) # # sub_array = sub_array.reshape(sub_shape) # # sub_array.shape = sub_shape # # sub_array = np.ctypeslib.as_array(sub_ctypes).reshape(sub_shape) # sub_array = np.asarray(np.frombuffer(sub_ctypes, dtype=np.float32)) # sub_array = sub_array.reshape(sub_shape) # print sub_array # print sub_array.shape # print sub_array[:,0,0] # print sub_array.dtype # print input_q # print output_q # while True: # args = input_q.get() # if args is None: # break # interp_full_i = args[0] # interp_full_i_array = args[1] # interp_sub_i_array = args[2] # interp_method = args[3] # f = interpolate.interp1d( # interp_full_i_array, sub_array[interp_sub_i_array,:,:], # axis=0, kind=interp_method) # # f = interpolate.interp1d( # # interp_i_array, sub_array[[0,2],:,:], axis=0, kind=interp_method) # output_q.put([interp_full_i, f(interp_full_i)]) # # output_q.put(interpolate_mp(args)) # def interpolate_mp(args): # """MP wrapper for calling interpolate # # First input parameter is the date index that will be passed through # # """ # f = interpolate.interp1d(args[1], args[2], axis=0, kind=args[3]) # return args[0], f(args[0]) # def interpolate_mp(tup): # """MP wrapper for calling interpolate # # First input parameter is the date index that will be passed through # Second input parameter is a mask that will be passed through # # """ # return tup[0], tup[1], interpolate_sp(*tup[2:]) # def interpolate_sp(x_array, y_array, interp_doy, interp_method): # """Wrapper function for clipping and then projecting an input raster""" # f = interpolate.interp1d(x_array, y_array, axis=0, kind=interp_method) # return f(interp_doy) Interpolate sub block using multiprocessing Parameters ---------- full_array : ndarray sub_array : ndarray sub_i_array : ndarray block_mask : ndarray fill_method : str interp_method : str mp_flag : bool mp_procs : int Returns ------- ndarray # logging.info(' Sub y: {:5d} x: {:5d}'.format(s_i, s_j)) # logging.info(' Sub rows: {} cols: {}'.format(sub_rows, sub_cols)) # Load some inputs into the input queue # Load all inputs into the input queue # for mp_args in mp_list: # input_q.put(mp_args) # Start workers # Get data from workers and add new items to queue # Close workers # Close queues Worker function for multiprocessing with input and output queues Load Parameters ---------- input_ws : str input_re date_list : list output_osr output_cs : float output_extent name : str return_geo_array : bool If True, return array geo-spatial properties (the default is True). Returns ------- ndarray # Assume all rasters have same projection, cellsize, and snap # input_proj = drigo.osr_proj(input_osr) # Get mask extent in the original spat. ref. # Initialize the common array # Read in the raster for each date Return the spatial reference object for a projection string # def feature_extents(input_path): # """Return a dictionary of zone FIDs and their extents""" # output_dict = dict() # # shp_driver = ogr.GetDriverByName('ESRI Shapefile') # input_ds = ogr.Open(input_path, 0) # input_lyr = input_ds.GetLayer() # input_lyr.ResetReading() # for input_ftr in input_lyr: # input_fid = input_ftr.GetFID() # input_extent = drigo.Extent( # input_ftr.GetGeometryRef().GetEnvelope()).ogrenv_swap() # output_dict[input_fid] = input_extent # input_ds = None # return output_dict # def feature_geometries(input_path): # """Return a dictionary of zone FIDs and their geometries""" # output_dict = dict() # # shp_driver = ogr.GetDriverByName('ESRI Shapefile') # input_ds = ogr.Open(input_path, 0) # input_lyr = input_ds.GetLayer() # input_lyr.ResetReading() # for input_ftr in input_lyr: # input_fid = input_ftr.GetFID() # input_geom = input_ftr.GetGeometryRef().ExportToWkt() # output_dict[input_fid] = input_geom # input_ds = None # return output_dict # def feature_field_values(input_path, field='FID'): # """Return a dictionary of zone FIDs and their field values""" # output_dict = dict() # # shp_driver = ogr.GetDriverByName('ESRI Shapefile') # input_ds = ogr.Open(input_path, 0) # input_lyr = input_ds.GetLayer() # input_lyr.ResetReading() # for input_ftr in input_lyr: # input_fid = input_ftr.GetFID() # output_dict[input_fid] = input_ftr.GetField(field) # input_ds = None # return output_dict | 2.328259 | 2 |
test-framework/test-suites/integration/tests/load/json/test_load_json_network.py | knutsonchris/stacki | 123 | 6614769 | <reponame>knutsonchris/stacki<gh_stars>100-1000
class TestLoadJsonNetwork:
"""
Test that loading network data works properly
"""
def skip_test_load_json_network(self, host, test_file):
# open the file containing the network data, stripping the trailing new line
path = test_file('load/json/network.json')
with open(path) as f:
imported_network_data = f.read().strip()
# load the data with stack load json
results = host.run(f'stack load json network file={path}')
assert results.rc == 0
# dump the data
results = host.run('stack dump network')
assert results.rc == 0
dumped_network_data = results.stdout.strip()
# make sure that they are the same
assert set(dumped_network_data) == set(imported_network_data)
| class TestLoadJsonNetwork:
"""
Test that loading network data works properly
"""
def skip_test_load_json_network(self, host, test_file):
# open the file containing the network data, stripping the trailing new line
path = test_file('load/json/network.json')
with open(path) as f:
imported_network_data = f.read().strip()
# load the data with stack load json
results = host.run(f'stack load json network file={path}')
assert results.rc == 0
# dump the data
results = host.run('stack dump network')
assert results.rc == 0
dumped_network_data = results.stdout.strip()
# make sure that they are the same
assert set(dumped_network_data) == set(imported_network_data) | en | 0.851294 | Test that loading network data works properly # open the file containing the network data, stripping the trailing new line # load the data with stack load json # dump the data # make sure that they are the same | 2.802148 | 3 |
py/2015/5A.py | pedrotari7/advent_of_code | 0 | 6614770 | with open('5.in','r') as f:
strings = f.readlines()
total = 0
for s in strings:
if any([d in s for d in ['ab', 'cd', 'pq', 'xy']]):
continue
if sum([s.count(v) for v in 'aeiou']) < 3:
continue
if 0 not in [ord(t) - ord(a) for a, t in zip(s, s[1:])]:
continue
total += 1
print total | with open('5.in','r') as f:
strings = f.readlines()
total = 0
for s in strings:
if any([d in s for d in ['ab', 'cd', 'pq', 'xy']]):
continue
if sum([s.count(v) for v in 'aeiou']) < 3:
continue
if 0 not in [ord(t) - ord(a) for a, t in zip(s, s[1:])]:
continue
total += 1
print total | none | 1 | 3.242716 | 3 | |
website/views.py | AbderrhmanAbdellatif/Claculate-the-Distance-Project | 0 | 6614771 | from flask import Blueprint, render_template, request, flash, jsonify
from flask_login import login_required, current_user
from . import db
import json
views = Blueprint('views', __name__)
| from flask import Blueprint, render_template, request, flash, jsonify
from flask_login import login_required, current_user
from . import db
import json
views = Blueprint('views', __name__)
| none | 1 | 1.673415 | 2 | |
coolisf/rest/apps.py | letuananh/intsem.fx | 8 | 6614772 | from django.apps import AppConfig
class DjangoisfConfig(AppConfig):
name = 'djangoisf'
| from django.apps import AppConfig
class DjangoisfConfig(AppConfig):
name = 'djangoisf'
| none | 1 | 1.122297 | 1 | |
code/resources/public/ui/downloads/nuvlabox-self-registration.py | nuvla/ui | 8 | 6614773 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""NuvlaBox Self Registration script
This script is part of the NuvlaBox industrialization process.
Given the right user credentials and NuvlaBox initialization attributes,
this script will automatically register a new NuvlaBox resource in Nuvla.
Arguments:
:param nuvlabox-installation-trigger-json: JSON string of the NuvlaBox Installation Trigger's content. See schema below
The expected JSON schema is:
{
"apikey": "credential/<uuid>",
"apisecret": "<secret>",
"endpoint": "<nuvla endpoint>",
"version": "<nuvlabox-engine release>",
"script": "<link to this script>",
"name": "<basename nuvlaboxes>",
"description": "<base description>",
"vpn": "infrastructure-service/<uuid>",
"assets": ["docker-compose.yml", <other compose files to install alongside>],
"environment": {
"HOSTNAME": "myhostname",
"SKIP_MINIMUM_REQUIREMENTS": True
},
"ssh": {
"ids": ["credential/111-bbb-ccc", ...],
"public-keys": ["ssh-rsa AAA...", ...]
}
}
:returns NuvlaBox UUID
"""
import requests
import argparse
import json
import time
import os
from subprocess import run, PIPE, STDOUT, TimeoutExpired
from uuid import getnode as get_mac
__copyright__ = "Copyright (C) 2020 SixSq"
__email__ = "<EMAIL>"
def arguments():
""" Builds a generic argparse
:return: parser
"""
workdir = '/opt/nuvlabox/installation'
parser = argparse.ArgumentParser(description='NuvlaBox Agent')
parser.add_argument('--nuvlabox-installation-trigger-json', dest='nb_trigger_content', default=None, metavar='JSON',
help="JSON content, as a string, of the NuvlaBox installation USB trigger file")
parser.add_argument('--nuvlabox-installation-dir', dest='nb_workdir', default=workdir, metavar='PATH',
help="Location on the filesystem where to keep the NuvlaBox Engine installation files")
return parser.parse_args()
def prepare_nuvlabox_engine_installation(version, compose_files, workdir, keep_files=[]):
""" Prepares the working environment for installing the NuvlaBox Engine
:param version: GitHub release of the NuvlaBox Engine
:param compose_files: list of release assets to download
:param workdir: path where the compose files are to be saved
:param keep_files: list of files that is not supposed to be modified during this preparation
:returns absolute path to the NuvlaBox Engine installer script
"""
github_release = 'https://github.com/nuvlabox/deployment/releases/download/{}'.format(version)
# Double check that the workdir is created
try:
# Create working directory
os.makedirs(workdir)
except FileExistsError:
pass
# Backup the previous installation files
existing_files = os.listdir(workdir)
now = int(time.time())
for efile in existing_files:
filename = "{}/{}".format(workdir, efile)
if not filename.endswith("backup") and filename not in keep_files:
new_file = "{}.backup".format(filename, now)
os.rename(filename, new_file)
final_compose_files = []
for file in compose_files:
gh_url = "{}/{}".format(github_release, file)
r = requests.get(gh_url)
r.raise_for_status()
save_compose_file_at = "{}/{}".format(workdir, file)
with open(save_compose_file_at, 'wb') as f:
f.write(r.content)
final_compose_files.append(save_compose_file_at)
# also download install file
installer_file_name = "install.sh"
installer_file = "{}/{}".format(workdir, installer_file_name)
installer_file_gh = "{}/{}".format(github_release, installer_file_name)
r = requests.get(installer_file_gh)
r.raise_for_status()
with open(installer_file, 'wb') as f:
f.write(r.content)
return installer_file, final_compose_files
def install_nuvlabox_engine(cmd, env=os.environ.copy(), timeout=600):
""" Runs a command
:param cmd: command to be executed
:param env: environment to be passed
:param timeout: time after which the command will abruptly be terminated
"""
try:
result = run(cmd, stdout=PIPE, stderr=STDOUT, env=env, input=None,
timeout=timeout, universal_newlines=True)
except TimeoutExpired:
raise Exception('Command execution timed out after {} seconds'.format(timeout))
if result.returncode != 0:
raise Exception(result.stdout)
print(result.stdout)
if __name__ == "__main__":
args = arguments()
nb_trigger_json = json.loads(args.nb_trigger_content)
nb_workdir = args.nb_workdir.rstrip('/')
env_file = "{}/.env".format(nb_workdir)
# Check if env files already exists
# cause that will tell us if this is the first time we are self-registring this NB or not
# if there's a previous env file (thus previous installation), we will check if it is COMMISSIONED or not
# based on this check, we will either UPDATE or OVERWRITE the existing installation, respectively
installation_strategy = "OVERWRITE" # default
nuvlabox_id = None
previous_conf = {}
if not os.path.exists(nb_workdir):
os.makedirs(nb_workdir)
else:
if os.path.isfile(env_file):
# .env file exists - get the previous details
with open(env_file) as f:
for l in f.read().splitlines():
if l and "=" in l:
varname = l.split('=', 1)[0]
varvalue = l.split('=', 1)[1]
previous_conf[varname] = varvalue
# argparse
nuvla = nb_trigger_json['endpoint']
nuvla_endpoint = nb_trigger_json['endpoint'].rstrip('/').rstrip('/api') + "/api"
nb_basename = nb_trigger_json.get('name', '')
nb_basedescription = nb_trigger_json.get('description', ' ')
nb_release = nb_trigger_json['version']
nb_vpn_server_id = nb_trigger_json.get('vpn')
nb_assets = nb_trigger_json['assets']
nb_ssh = nb_trigger_json.get('ssh', {})
new_conf = nb_trigger_json.get('environment', {})
nb_ssh_pubkeys = nb_ssh.get('public-keys', [])
nb_version = nb_release.split('.')[0]
login_apikey = {
"template": {
"href": "session-template/api-key",
"key": nb_trigger_json['apikey'],
"secret": nb_trigger_json['apisecret']
}
}
s = requests.Session()
# login
connection_verify = True
login_endpoint = nuvla_endpoint + "/session"
print("Nuvla login at {}...".format(login_endpoint))
try:
session = s.post(login_endpoint, json=login_apikey)
except requests.exceptions.SSLError:
connection_verify = False
session = s.post(login_endpoint, json=login_apikey, verify=connection_verify)
session.raise_for_status()
new_conf['NUVLA_ENDPOINT'] = nuvla
new_conf['NUVLA_ENDPOINT_INSECURE'] = str(not connection_verify)
if nb_ssh_pubkeys:
new_conf['NUVLABOX_SSH_PUB_KEY'] = '\\n'.join(nb_ssh_pubkeys)
if previous_conf:
if "NUVLABOX_UUID" in previous_conf:
previous_uuid = previous_conf['NUVLABOX_UUID']
print("Existing env file from previous deployment found, with NuvlaBox UUID {}".format(previous_uuid))
check_nb_endpoint = nuvla_endpoint + "/" + previous_uuid
nb = s.get(check_nb_endpoint, verify=connection_verify)
if nb.status_code == 200:
state = nb.json().get('state', 'UNKNOWN')
if state in ["DECOMMISSIONED", 'ERROR']:
# this NuvlaBox has been decommissioned or is in error, just overwrite the local installation
print("Previous NuvlaBox {} is in state {}. Going to OVERWRITE it...".format(previous_uuid, state))
else:
new_conf['NUVLABOX_UUID'] = previous_uuid
if new_conf == previous_conf:
print("NuvlaBox environment hasn't changed, performing an UPDATE")
installation_strategy = "UPDATE"
else:
print("NuvlaBox environment different from existing installation, performing an OVERWRITE")
elif nb.status_code == 404:
# doesn't exist, so let's just OVERWRITE this local installation
print("Previous NuvlaBox {} doesn't exist anymore...creating new one".format(previous_uuid))
else:
# something went wrong, either a network issue or we have the wrong credentials to access the
# current NuvlaBox resource...just throw the error and do nothing
nb.raise_for_status()
else:
print("There's a previous NuvlaBox environment but couldn't find a NuvlaBox UUID...let's OVERWRITE")
if installation_strategy == "OVERWRITE":
print("Creating new NuvlaBox resource...")
try:
unique_id = str(get_mac())
except:
unique_id = str(int(time.time()))
nb_name = nb_basename.rstrip("_") + "_" + unique_id if nb_basename else unique_id
nb_description = "{} - self-registration number {}".format(nb_basedescription, unique_id)
nuvlabox = {
"name": nb_name,
"description": nb_description,
"version": int(nb_version)
}
if nb_vpn_server_id:
nuvlabox['vpn-server-id'] = nb_vpn_server_id
if nb_ssh and "ids" in nb_ssh and isinstance(nb_ssh.get('ids'), list):
nuvlabox['ssh-keys'] = nb_ssh.get('ids')
new_nb_endpoint = nuvla_endpoint + "/nuvlabox"
nb_id = s.post(new_nb_endpoint, json=nuvlabox, verify=connection_verify)
nb_id.raise_for_status()
nuvlabox_id = nb_id.json()["resource-id"]
print("Created NuvlaBox resource {} in {}".format(nuvlabox_id, nuvla))
new_conf['NUVLABOX_UUID'] = nuvlabox_id
# update env file
print("Setting up environment {} at {}".format(new_conf, env_file))
with open(env_file, 'w') as f:
for varname, varvalue in new_conf.items():
f.write("{}={}\n".format(varname, varvalue))
try:
installer_file, compose_files = prepare_nuvlabox_engine_installation(nb_release,
nb_assets,
nb_workdir,
keep_files=[env_file])
install_command = ["sh", installer_file, "--env-file={}".format(env_file),
"--compose-files={}".format(",".join(compose_files)),
"--installation-strategy={}".format(installation_strategy), "--action=INSTALL"]
print("Installing NuvlaBox Engine - this can take a few minutes...")
install_nuvlabox_engine(install_command)
except:
# On any error, cleanup the resource in Nuvla
print("NuvlaBox Engine installation failed")
if nuvlabox_id:
print("removing {} from Nuvla".format(nuvlabox_id))
s.delete(nuvla_endpoint + "/" + nuvlabox_id, verify=connection_verify)
raise
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""NuvlaBox Self Registration script
This script is part of the NuvlaBox industrialization process.
Given the right user credentials and NuvlaBox initialization attributes,
this script will automatically register a new NuvlaBox resource in Nuvla.
Arguments:
:param nuvlabox-installation-trigger-json: JSON string of the NuvlaBox Installation Trigger's content. See schema below
The expected JSON schema is:
{
"apikey": "credential/<uuid>",
"apisecret": "<secret>",
"endpoint": "<nuvla endpoint>",
"version": "<nuvlabox-engine release>",
"script": "<link to this script>",
"name": "<basename nuvlaboxes>",
"description": "<base description>",
"vpn": "infrastructure-service/<uuid>",
"assets": ["docker-compose.yml", <other compose files to install alongside>],
"environment": {
"HOSTNAME": "myhostname",
"SKIP_MINIMUM_REQUIREMENTS": True
},
"ssh": {
"ids": ["credential/111-bbb-ccc", ...],
"public-keys": ["ssh-rsa AAA...", ...]
}
}
:returns NuvlaBox UUID
"""
import requests
import argparse
import json
import time
import os
from subprocess import run, PIPE, STDOUT, TimeoutExpired
from uuid import getnode as get_mac
__copyright__ = "Copyright (C) 2020 SixSq"
__email__ = "<EMAIL>"
def arguments():
""" Builds a generic argparse
:return: parser
"""
workdir = '/opt/nuvlabox/installation'
parser = argparse.ArgumentParser(description='NuvlaBox Agent')
parser.add_argument('--nuvlabox-installation-trigger-json', dest='nb_trigger_content', default=None, metavar='JSON',
help="JSON content, as a string, of the NuvlaBox installation USB trigger file")
parser.add_argument('--nuvlabox-installation-dir', dest='nb_workdir', default=workdir, metavar='PATH',
help="Location on the filesystem where to keep the NuvlaBox Engine installation files")
return parser.parse_args()
def prepare_nuvlabox_engine_installation(version, compose_files, workdir, keep_files=[]):
""" Prepares the working environment for installing the NuvlaBox Engine
:param version: GitHub release of the NuvlaBox Engine
:param compose_files: list of release assets to download
:param workdir: path where the compose files are to be saved
:param keep_files: list of files that is not supposed to be modified during this preparation
:returns absolute path to the NuvlaBox Engine installer script
"""
github_release = 'https://github.com/nuvlabox/deployment/releases/download/{}'.format(version)
# Double check that the workdir is created
try:
# Create working directory
os.makedirs(workdir)
except FileExistsError:
pass
# Backup the previous installation files
existing_files = os.listdir(workdir)
now = int(time.time())
for efile in existing_files:
filename = "{}/{}".format(workdir, efile)
if not filename.endswith("backup") and filename not in keep_files:
new_file = "{}.backup".format(filename, now)
os.rename(filename, new_file)
final_compose_files = []
for file in compose_files:
gh_url = "{}/{}".format(github_release, file)
r = requests.get(gh_url)
r.raise_for_status()
save_compose_file_at = "{}/{}".format(workdir, file)
with open(save_compose_file_at, 'wb') as f:
f.write(r.content)
final_compose_files.append(save_compose_file_at)
# also download install file
installer_file_name = "install.sh"
installer_file = "{}/{}".format(workdir, installer_file_name)
installer_file_gh = "{}/{}".format(github_release, installer_file_name)
r = requests.get(installer_file_gh)
r.raise_for_status()
with open(installer_file, 'wb') as f:
f.write(r.content)
return installer_file, final_compose_files
def install_nuvlabox_engine(cmd, env=os.environ.copy(), timeout=600):
""" Runs a command
:param cmd: command to be executed
:param env: environment to be passed
:param timeout: time after which the command will abruptly be terminated
"""
try:
result = run(cmd, stdout=PIPE, stderr=STDOUT, env=env, input=None,
timeout=timeout, universal_newlines=True)
except TimeoutExpired:
raise Exception('Command execution timed out after {} seconds'.format(timeout))
if result.returncode != 0:
raise Exception(result.stdout)
print(result.stdout)
if __name__ == "__main__":
args = arguments()
nb_trigger_json = json.loads(args.nb_trigger_content)
nb_workdir = args.nb_workdir.rstrip('/')
env_file = "{}/.env".format(nb_workdir)
# Check if env files already exists
# cause that will tell us if this is the first time we are self-registring this NB or not
# if there's a previous env file (thus previous installation), we will check if it is COMMISSIONED or not
# based on this check, we will either UPDATE or OVERWRITE the existing installation, respectively
installation_strategy = "OVERWRITE" # default
nuvlabox_id = None
previous_conf = {}
if not os.path.exists(nb_workdir):
os.makedirs(nb_workdir)
else:
if os.path.isfile(env_file):
# .env file exists - get the previous details
with open(env_file) as f:
for l in f.read().splitlines():
if l and "=" in l:
varname = l.split('=', 1)[0]
varvalue = l.split('=', 1)[1]
previous_conf[varname] = varvalue
# argparse
nuvla = nb_trigger_json['endpoint']
nuvla_endpoint = nb_trigger_json['endpoint'].rstrip('/').rstrip('/api') + "/api"
nb_basename = nb_trigger_json.get('name', '')
nb_basedescription = nb_trigger_json.get('description', ' ')
nb_release = nb_trigger_json['version']
nb_vpn_server_id = nb_trigger_json.get('vpn')
nb_assets = nb_trigger_json['assets']
nb_ssh = nb_trigger_json.get('ssh', {})
new_conf = nb_trigger_json.get('environment', {})
nb_ssh_pubkeys = nb_ssh.get('public-keys', [])
nb_version = nb_release.split('.')[0]
login_apikey = {
"template": {
"href": "session-template/api-key",
"key": nb_trigger_json['apikey'],
"secret": nb_trigger_json['apisecret']
}
}
s = requests.Session()
# login
connection_verify = True
login_endpoint = nuvla_endpoint + "/session"
print("Nuvla login at {}...".format(login_endpoint))
try:
session = s.post(login_endpoint, json=login_apikey)
except requests.exceptions.SSLError:
connection_verify = False
session = s.post(login_endpoint, json=login_apikey, verify=connection_verify)
session.raise_for_status()
new_conf['NUVLA_ENDPOINT'] = nuvla
new_conf['NUVLA_ENDPOINT_INSECURE'] = str(not connection_verify)
if nb_ssh_pubkeys:
new_conf['NUVLABOX_SSH_PUB_KEY'] = '\\n'.join(nb_ssh_pubkeys)
if previous_conf:
if "NUVLABOX_UUID" in previous_conf:
previous_uuid = previous_conf['NUVLABOX_UUID']
print("Existing env file from previous deployment found, with NuvlaBox UUID {}".format(previous_uuid))
check_nb_endpoint = nuvla_endpoint + "/" + previous_uuid
nb = s.get(check_nb_endpoint, verify=connection_verify)
if nb.status_code == 200:
state = nb.json().get('state', 'UNKNOWN')
if state in ["DECOMMISSIONED", 'ERROR']:
# this NuvlaBox has been decommissioned or is in error, just overwrite the local installation
print("Previous NuvlaBox {} is in state {}. Going to OVERWRITE it...".format(previous_uuid, state))
else:
new_conf['NUVLABOX_UUID'] = previous_uuid
if new_conf == previous_conf:
print("NuvlaBox environment hasn't changed, performing an UPDATE")
installation_strategy = "UPDATE"
else:
print("NuvlaBox environment different from existing installation, performing an OVERWRITE")
elif nb.status_code == 404:
# doesn't exist, so let's just OVERWRITE this local installation
print("Previous NuvlaBox {} doesn't exist anymore...creating new one".format(previous_uuid))
else:
# something went wrong, either a network issue or we have the wrong credentials to access the
# current NuvlaBox resource...just throw the error and do nothing
nb.raise_for_status()
else:
print("There's a previous NuvlaBox environment but couldn't find a NuvlaBox UUID...let's OVERWRITE")
if installation_strategy == "OVERWRITE":
print("Creating new NuvlaBox resource...")
try:
unique_id = str(get_mac())
except:
unique_id = str(int(time.time()))
nb_name = nb_basename.rstrip("_") + "_" + unique_id if nb_basename else unique_id
nb_description = "{} - self-registration number {}".format(nb_basedescription, unique_id)
nuvlabox = {
"name": nb_name,
"description": nb_description,
"version": int(nb_version)
}
if nb_vpn_server_id:
nuvlabox['vpn-server-id'] = nb_vpn_server_id
if nb_ssh and "ids" in nb_ssh and isinstance(nb_ssh.get('ids'), list):
nuvlabox['ssh-keys'] = nb_ssh.get('ids')
new_nb_endpoint = nuvla_endpoint + "/nuvlabox"
nb_id = s.post(new_nb_endpoint, json=nuvlabox, verify=connection_verify)
nb_id.raise_for_status()
nuvlabox_id = nb_id.json()["resource-id"]
print("Created NuvlaBox resource {} in {}".format(nuvlabox_id, nuvla))
new_conf['NUVLABOX_UUID'] = nuvlabox_id
# update env file
print("Setting up environment {} at {}".format(new_conf, env_file))
with open(env_file, 'w') as f:
for varname, varvalue in new_conf.items():
f.write("{}={}\n".format(varname, varvalue))
try:
installer_file, compose_files = prepare_nuvlabox_engine_installation(nb_release,
nb_assets,
nb_workdir,
keep_files=[env_file])
install_command = ["sh", installer_file, "--env-file={}".format(env_file),
"--compose-files={}".format(",".join(compose_files)),
"--installation-strategy={}".format(installation_strategy), "--action=INSTALL"]
print("Installing NuvlaBox Engine - this can take a few minutes...")
install_nuvlabox_engine(install_command)
except:
# On any error, cleanup the resource in Nuvla
print("NuvlaBox Engine installation failed")
if nuvlabox_id:
print("removing {} from Nuvla".format(nuvlabox_id))
s.delete(nuvla_endpoint + "/" + nuvlabox_id, verify=connection_verify)
raise
| en | 0.768255 | #!/usr/bin/env python3 # -*- coding: utf-8 -*- NuvlaBox Self Registration script This script is part of the NuvlaBox industrialization process. Given the right user credentials and NuvlaBox initialization attributes, this script will automatically register a new NuvlaBox resource in Nuvla. Arguments: :param nuvlabox-installation-trigger-json: JSON string of the NuvlaBox Installation Trigger's content. See schema below The expected JSON schema is: { "apikey": "credential/<uuid>", "apisecret": "<secret>", "endpoint": "<nuvla endpoint>", "version": "<nuvlabox-engine release>", "script": "<link to this script>", "name": "<basename nuvlaboxes>", "description": "<base description>", "vpn": "infrastructure-service/<uuid>", "assets": ["docker-compose.yml", <other compose files to install alongside>], "environment": { "HOSTNAME": "myhostname", "SKIP_MINIMUM_REQUIREMENTS": True }, "ssh": { "ids": ["credential/111-bbb-ccc", ...], "public-keys": ["ssh-rsa AAA...", ...] } } :returns NuvlaBox UUID Builds a generic argparse :return: parser Prepares the working environment for installing the NuvlaBox Engine :param version: GitHub release of the NuvlaBox Engine :param compose_files: list of release assets to download :param workdir: path where the compose files are to be saved :param keep_files: list of files that is not supposed to be modified during this preparation :returns absolute path to the NuvlaBox Engine installer script # Double check that the workdir is created # Create working directory # Backup the previous installation files # also download install file Runs a command :param cmd: command to be executed :param env: environment to be passed :param timeout: time after which the command will abruptly be terminated # Check if env files already exists # cause that will tell us if this is the first time we are self-registring this NB or not # if there's a previous env file (thus previous installation), we will check if it is COMMISSIONED or not # based on this check, we will either UPDATE or OVERWRITE the existing installation, respectively # default # .env file exists - get the previous details # argparse # login # this NuvlaBox has been decommissioned or is in error, just overwrite the local installation # doesn't exist, so let's just OVERWRITE this local installation # something went wrong, either a network issue or we have the wrong credentials to access the # current NuvlaBox resource...just throw the error and do nothing # update env file # On any error, cleanup the resource in Nuvla | 2.316352 | 2 |
codes/PolicyGradient/task0.py | johnjim0816/rl-tutorials | 33 | 6614774 | <reponame>johnjim0816/rl-tutorials
#!/usr/bin/env python
# coding=utf-8
'''
Author: John
Email: <EMAIL>
Date: 2020-11-22 23:21:53
LastEditor: John
LastEditTime: 2022-02-10 06:13:21
Discription:
Environment:
'''
import sys
import os
curr_path = os.path.dirname(os.path.abspath(__file__)) # 当前文件所在绝对路径
parent_path = os.path.dirname(curr_path) # 父路径
sys.path.append(parent_path) # 添加路径到系统路径
import gym
import torch
import datetime
from itertools import count
from pg import PolicyGradient
from common.utils import save_results, make_dir
from common.utils import plot_rewards
curr_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S") # 获取当前时间
class Config:
'''超参数
'''
def __init__(self):
################################## 环境超参数 ###################################
self.algo_name = "PolicyGradient" # 算法名称
self.env_name = 'CartPole-v0' # 环境名称
self.device = torch.device(
"cuda" if torch.cuda.is_available() else "cpu") # 检测GPUgjgjlkhfsf风刀霜的撒发十
self.seed = 10 # 随机种子,置0则不设置随机种子
self.train_eps = 300 # 训练的回合数
self.test_eps = 30 # 测试的回合数
################################################################################
################################## 算法超参数 ###################################
self.batch_size = 8 # mini-batch SGD中的批量大小
self.lr = 0.01 # 学习率
self.gamma = 0.99 # 强化学习中的折扣因子
self.hidden_dim = 36 # 网络隐藏层
################################################################################
################################# 保存结果相关参数 ################################
self.result_path = curr_path + "/outputs/" + self.env_name + \
'/' + curr_time + '/results/' # 保存结果的路径
self.model_path = curr_path + "/outputs/" + self.env_name + \
'/' + curr_time + '/models/' # 保存模型的路径
self.save = True # 是否保存图片
################################################################################
def env_agent_config(cfg,seed=1):
env = gym.make(cfg.env_name)
env.seed(seed)
n_states = env.observation_space.shape[0]
agent = PolicyGradient(n_states,cfg)
return env,agent
def train(cfg,env,agent):
print('开始训练!')
print(f'环境:{cfg.env_name}, 算法:{cfg.algo_name}, 设备:{cfg.device}')
state_pool = [] # 存放每batch_size个episode的state序列
action_pool = []
reward_pool = []
rewards = []
ma_rewards = []
for i_ep in range(cfg.train_eps):
state = env.reset()
ep_reward = 0
for _ in count():
action = agent.choose_action(state) # 根据当前环境state选择action
next_state, reward, done, _ = env.step(action)
ep_reward += reward
if done:
reward = 0
state_pool.append(state)
action_pool.append(float(action))
reward_pool.append(reward)
state = next_state
if done:
print('回合:{}/{}, 奖励:{}'.format(i_ep + 1, cfg.train_eps, ep_reward))
break
if i_ep > 0 and i_ep % cfg.batch_size == 0:
agent.update(reward_pool,state_pool,action_pool)
state_pool = [] # 每个episode的state
action_pool = []
reward_pool = []
rewards.append(ep_reward)
if ma_rewards:
ma_rewards.append(
0.9*ma_rewards[-1]+0.1*ep_reward)
else:
ma_rewards.append(ep_reward)
print('完成训练!')
env.close()
return rewards, ma_rewards
def test(cfg,env,agent):
print('开始测试!')
print(f'环境:{cfg.env_name}, 算法:{cfg.algo_name}, 设备:{cfg.device}')
rewards = []
ma_rewards = []
for i_ep in range(cfg.test_eps):
state = env.reset()
ep_reward = 0
for _ in count():
action = agent.choose_action(state) # 根据当前环境state选择action
next_state, reward, done, _ = env.step(action)
ep_reward += reward
if done:
reward = 0
state = next_state
if done:
print('回合:{}/{}, 奖励:{}'.format(i_ep + 1, cfg.train_eps, ep_reward))
break
rewards.append(ep_reward)
if ma_rewards:
ma_rewards.append(
0.9*ma_rewards[-1]+0.1*ep_reward)
else:
ma_rewards.append(ep_reward)
print('完成测试!')
env.close()
return rewards, ma_rewards
if __name__ == "__main__":
cfg = Config()
# 训练
env, agent = env_agent_config(cfg)
rewards, ma_rewards = train(cfg, env, agent)
make_dir(cfg.result_path, cfg.model_path) # 创建保存结果和模型路径的文件夹
agent.save(path=cfg.model_path) # 保存模型
save_results(rewards, ma_rewards, tag='train',
path=cfg.result_path) # 保存结果
plot_rewards(rewards, ma_rewards, cfg, tag="train") # 画出结果
# 测试
env, agent = env_agent_config(cfg)
agent.load(path=cfg.model_path) # 导入模型
rewards, ma_rewards = test(cfg, env, agent)
save_results(rewards, ma_rewards, tag='test',
path=cfg.result_path) # 保存结果
plot_rewards(rewards, ma_rewards, cfg, tag="test") # 画出结果
| #!/usr/bin/env python
# coding=utf-8
'''
Author: John
Email: <EMAIL>
Date: 2020-11-22 23:21:53
LastEditor: John
LastEditTime: 2022-02-10 06:13:21
Discription:
Environment:
'''
import sys
import os
curr_path = os.path.dirname(os.path.abspath(__file__)) # 当前文件所在绝对路径
parent_path = os.path.dirname(curr_path) # 父路径
sys.path.append(parent_path) # 添加路径到系统路径
import gym
import torch
import datetime
from itertools import count
from pg import PolicyGradient
from common.utils import save_results, make_dir
from common.utils import plot_rewards
curr_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S") # 获取当前时间
class Config:
'''超参数
'''
def __init__(self):
################################## 环境超参数 ###################################
self.algo_name = "PolicyGradient" # 算法名称
self.env_name = 'CartPole-v0' # 环境名称
self.device = torch.device(
"cuda" if torch.cuda.is_available() else "cpu") # 检测GPUgjgjlkhfsf风刀霜的撒发十
self.seed = 10 # 随机种子,置0则不设置随机种子
self.train_eps = 300 # 训练的回合数
self.test_eps = 30 # 测试的回合数
################################################################################
################################## 算法超参数 ###################################
self.batch_size = 8 # mini-batch SGD中的批量大小
self.lr = 0.01 # 学习率
self.gamma = 0.99 # 强化学习中的折扣因子
self.hidden_dim = 36 # 网络隐藏层
################################################################################
################################# 保存结果相关参数 ################################
self.result_path = curr_path + "/outputs/" + self.env_name + \
'/' + curr_time + '/results/' # 保存结果的路径
self.model_path = curr_path + "/outputs/" + self.env_name + \
'/' + curr_time + '/models/' # 保存模型的路径
self.save = True # 是否保存图片
################################################################################
def env_agent_config(cfg,seed=1):
env = gym.make(cfg.env_name)
env.seed(seed)
n_states = env.observation_space.shape[0]
agent = PolicyGradient(n_states,cfg)
return env,agent
def train(cfg,env,agent):
print('开始训练!')
print(f'环境:{cfg.env_name}, 算法:{cfg.algo_name}, 设备:{cfg.device}')
state_pool = [] # 存放每batch_size个episode的state序列
action_pool = []
reward_pool = []
rewards = []
ma_rewards = []
for i_ep in range(cfg.train_eps):
state = env.reset()
ep_reward = 0
for _ in count():
action = agent.choose_action(state) # 根据当前环境state选择action
next_state, reward, done, _ = env.step(action)
ep_reward += reward
if done:
reward = 0
state_pool.append(state)
action_pool.append(float(action))
reward_pool.append(reward)
state = next_state
if done:
print('回合:{}/{}, 奖励:{}'.format(i_ep + 1, cfg.train_eps, ep_reward))
break
if i_ep > 0 and i_ep % cfg.batch_size == 0:
agent.update(reward_pool,state_pool,action_pool)
state_pool = [] # 每个episode的state
action_pool = []
reward_pool = []
rewards.append(ep_reward)
if ma_rewards:
ma_rewards.append(
0.9*ma_rewards[-1]+0.1*ep_reward)
else:
ma_rewards.append(ep_reward)
print('完成训练!')
env.close()
return rewards, ma_rewards
def test(cfg,env,agent):
print('开始测试!')
print(f'环境:{cfg.env_name}, 算法:{cfg.algo_name}, 设备:{cfg.device}')
rewards = []
ma_rewards = []
for i_ep in range(cfg.test_eps):
state = env.reset()
ep_reward = 0
for _ in count():
action = agent.choose_action(state) # 根据当前环境state选择action
next_state, reward, done, _ = env.step(action)
ep_reward += reward
if done:
reward = 0
state = next_state
if done:
print('回合:{}/{}, 奖励:{}'.format(i_ep + 1, cfg.train_eps, ep_reward))
break
rewards.append(ep_reward)
if ma_rewards:
ma_rewards.append(
0.9*ma_rewards[-1]+0.1*ep_reward)
else:
ma_rewards.append(ep_reward)
print('完成测试!')
env.close()
return rewards, ma_rewards
if __name__ == "__main__":
cfg = Config()
# 训练
env, agent = env_agent_config(cfg)
rewards, ma_rewards = train(cfg, env, agent)
make_dir(cfg.result_path, cfg.model_path) # 创建保存结果和模型路径的文件夹
agent.save(path=cfg.model_path) # 保存模型
save_results(rewards, ma_rewards, tag='train',
path=cfg.result_path) # 保存结果
plot_rewards(rewards, ma_rewards, cfg, tag="train") # 画出结果
# 测试
env, agent = env_agent_config(cfg)
agent.load(path=cfg.model_path) # 导入模型
rewards, ma_rewards = test(cfg, env, agent)
save_results(rewards, ma_rewards, tag='test',
path=cfg.result_path) # 保存结果
plot_rewards(rewards, ma_rewards, cfg, tag="test") # 画出结果 | zh | 0.384954 | #!/usr/bin/env python # coding=utf-8 Author: John Email: <EMAIL> Date: 2020-11-22 23:21:53 LastEditor: John LastEditTime: 2022-02-10 06:13:21 Discription: Environment: # 当前文件所在绝对路径 # 父路径 # 添加路径到系统路径 # 获取当前时间 超参数 ################################## 环境超参数 ################################### # 算法名称 # 环境名称 # 检测GPUgjgjlkhfsf风刀霜的撒发十 # 随机种子,置0则不设置随机种子 # 训练的回合数 # 测试的回合数 ################################################################################ ################################## 算法超参数 ################################### # mini-batch SGD中的批量大小 # 学习率 # 强化学习中的折扣因子 # 网络隐藏层 ################################################################################ ################################# 保存结果相关参数 ################################ # 保存结果的路径 # 保存模型的路径 # 是否保存图片 ################################################################################ # 存放每batch_size个episode的state序列 # 根据当前环境state选择action # 每个episode的state # 根据当前环境state选择action # 训练 # 创建保存结果和模型路径的文件夹 # 保存模型 # 保存结果 # 画出结果 # 测试 # 导入模型 # 保存结果 # 画出结果 | 2.090716 | 2 |
dropbox/upload.py | maruuusa83/vivado-zed-builder | 0 | 6614775 | from marconfparser import MarConfParser
import io
import dropbox
SETTINGS_FILE = "../dropbox_settings.conf"
CONFIG_SETTINGS = {
'dropbox': [
{'name':'token', 'type': str, 'required': True},
],
'hardware': [
{'name':'bootbin', 'type': str, 'required': True},
],
}
if __name__ == '__main__':
mcp = MarConfParser(SETTINGS_FILE, CONFIG_SETTINGS)
config = mcp.getConfigDict()
dbx_client = dropbox.client.DropboxClient(config['dropbox']['token'])
f = open('../' + config['hardware']['bootbin'], "rb")
response = dbx_client.put_file(config['dropbox']['position'] + "BOOT.bin", f)
| from marconfparser import MarConfParser
import io
import dropbox
SETTINGS_FILE = "../dropbox_settings.conf"
CONFIG_SETTINGS = {
'dropbox': [
{'name':'token', 'type': str, 'required': True},
],
'hardware': [
{'name':'bootbin', 'type': str, 'required': True},
],
}
if __name__ == '__main__':
mcp = MarConfParser(SETTINGS_FILE, CONFIG_SETTINGS)
config = mcp.getConfigDict()
dbx_client = dropbox.client.DropboxClient(config['dropbox']['token'])
f = open('../' + config['hardware']['bootbin'], "rb")
response = dbx_client.put_file(config['dropbox']['position'] + "BOOT.bin", f)
| none | 1 | 2.321716 | 2 | |
old/delete_the_data_punctuation.py | archu2020/python-2 | 48 | 6614776 | import re # 正则表达式模块,用于删除标点符号
# 数据清洗函数
def CleanInput(input_str):
input_str = re.sub("[:“”;()、,。!~《》\s'.]", "", input_str) # 正则表达式re的sub函数
return input_str
# 文本数据清洗
file_material = open("D:\\Users\\YeahKun\\Desktop\\play\\分词材料.txt",
"rb").read().decode("utf8", "ignore")
file_material = CleanInput(file_material)
with open("D:\\Users\\YeahKun\\Desktop\\play\\new_分词材料.txt", "a") as file_save: # 将分好的词放到文件里面
file_save.write(file_material)
| import re # 正则表达式模块,用于删除标点符号
# 数据清洗函数
def CleanInput(input_str):
input_str = re.sub("[:“”;()、,。!~《》\s'.]", "", input_str) # 正则表达式re的sub函数
return input_str
# 文本数据清洗
file_material = open("D:\\Users\\YeahKun\\Desktop\\play\\分词材料.txt",
"rb").read().decode("utf8", "ignore")
file_material = CleanInput(file_material)
with open("D:\\Users\\YeahKun\\Desktop\\play\\new_分词材料.txt", "a") as file_save: # 将分好的词放到文件里面
file_save.write(file_material)
| zh | 0.981617 | # 正则表达式模块,用于删除标点符号 # 数据清洗函数 # 正则表达式re的sub函数 # 文本数据清洗 # 将分好的词放到文件里面 | 3.261508 | 3 |
four_in_a_row.py | tobiascr/four-in-a-row-py | 0 | 6614777 | <gh_stars>0
import tkinter as tk
from engine import EngineInterface
from engine import GameState
class MainWindow(tk.Tk):
def __init__(self):
tk.Tk.__init__(self)
self.resizable(False, False)
self.board = Board(self)
self.board.pack()
self.player_color = "yellow"
self.engine_color = "red"
self.new_game_flag = False
self.difficulty_level = tk.StringVar()
self.difficulty_level.set("Medium")
self.player_make_first_move = True
self.protocol("WM_DELETE_WINDOW", self.close_window)
self.score = [0, 0]
self.title("Four in a row: 0 - 0")
self.animations = False
def new_game_dialog_box(self):
self.protocol("WM_DELETE_WINDOW", self.dont_close_window) # Disable close window
dialog_box = DialogBox(main_window, "New game")
if self.new_game_flag:
self.new_game_flag = False
self.protocol("WM_DELETE_WINDOW", self.close_window) # Enable close window
else:
self.destroy()
def update_difficulty_level(self, *args):
"""Update the difficulty level in the engine and reset score if
the level is changed.
"""
current_level = engine_interface.difficulty_level
if self.difficulty_level.get() == "Easy":
engine_interface.difficulty_level = 1
elif self.difficulty_level.get() == "Medium":
engine_interface.difficulty_level = 2
elif self.difficulty_level.get() == "Hard":
engine_interface.difficulty_level = 3
if engine_interface.difficulty_level != current_level:
self.score = [0, 0]
self.title_update()
def title_update(self):
self.title("Four in a row: " + str(self.score[0]) + " - " + str(self.score[1]))
def update_and_pause(self, time_in_ms):
self.board.unbind_mouse()
self.update_idletasks()
self.after(time_in_ms)
self.update() # Handle possible events.
self.board.rebind_mouse()
def mouse_click(self, column_number):
"""This function is called if the column with column_number have been
clicked on.
"""
def dialog(text):
dialog_box = DialogBox(main_window, text)
if self.new_game_flag:
self.protocol("WM_DELETE_WINDOW", self.close_window) # Enable close window
self.new_game()
else:
self.destroy()
self.protocol("WM_DELETE_WINDOW", self.dont_close_window) # Disable close window
# Player make a move, if there is empty places left in the column.
if engine_interface.legal(column_number):
engine_interface.make_move(column_number)
self.board.add_disk_to_top_of_column(column_number, self.player_color, self.animations)
self.update_idletasks()
else:
self.protocol("WM_DELETE_WINDOW", self.close_window) # Enable close window
return
# If player win.
if engine_interface.four_in_a_row():
self.score[0] += 1
self.title_update()
self.highlight_four_in_a_row(self.player_color)
self.update_and_pause(1000)
dialog("You win! Congratulations!")
return
# If draw.
if engine_interface.draw():
self.update_and_pause(600)
dialog("Draw")
return
# Engine makes a move
column_number = engine_interface.engine_move()
engine_interface.make_move(column_number)
if self.animations:
self.update_and_pause(50)
else:
self.update_and_pause(300)
self.board.add_disk_to_top_of_column(column_number, self.engine_color, self.animations)
# If engine win.
if engine_interface.four_in_a_row():
self.score[1] += 1
self.title_update()
self.highlight_four_in_a_row(self.engine_color)
self.update_and_pause(1000)
dialog("Computer win!")
return
# If draw.
if engine_interface.draw():
self.update_and_pause(600)
dialog("Draw")
return
self.protocol("WM_DELETE_WINDOW", self.close_window) # Enable close window
def highlight_four_in_a_row(self, color):
positions = engine_interface.four_in_a_row_positions()
self.update_and_pause(500)
for (column, row) in positions:
self.board.remove_disk(column, row)
self.update_and_pause(500)
for (column, row) in positions:
self.board.add_disk(column, row, color)
def new_game(self):
self.new_game_flag = False
self.player_make_first_move = not self.player_make_first_move
engine_interface.new_game()
self.board.remove_all_disks()
if not self.player_make_first_move:
column_number = engine_interface.engine_move()
engine_interface.make_move(column_number)
self.update_and_pause(300)
self.board.add_disk_to_top_of_column(column_number, self.engine_color, self.animations)
def dont_close_window(self):
pass
def close_window(self):
self.destroy()
class Board(tk.Frame):
def __init__(self, parent):
tk.Frame.__init__(self, parent)
self.parent = parent
self.column_list = []
for column_number in range(7):
column = Column(self, column_number)
column.pack(side=tk.LEFT)
self.column_list.append(column)
def mouse_click(self, column_number):
self.parent.mouse_click(column_number)
def add_disk_to_top_of_column(self, column_number, color, animations):
"""column_number is 0,1 to 6. animations is True or False."""
self.column_list[column_number].add_disk_to_top_of_column(color, animations)
def add_disk(self, column, row, color):
self.column_list[column].add_disk(row, color)
def remove_disk(self, column, row):
self.column_list[column].remove_disk(row)
def remove_all_disks(self):
for column in self.column_list:
column.remove_all_disks()
def unbind_mouse(self):
for column in self.column_list:
column.unbind_mouse()
def rebind_mouse(self):
for column in self.column_list:
column.rebind_mouse()
class Column(tk.Frame):
def __init__(self, parent, column_number):
"""column_number is 0,1 to 6 and is used as an identifier."""
tk.Frame.__init__(self, parent)
self.parent = parent
self.column_number = column_number
self.disks_in_column = 0
self.column = []
for cell in range(6):
new_cell = Cell(self, 90)
new_cell.pack(side=tk.BOTTOM)
self.column.append(new_cell)
def mouse_click(self, event):
self.parent.mouse_click(self.column_number)
def add_disk_to_top_of_column(self, color, animations):
"""animations is True or False."""
if animations:
time_in_each_row = [0.41421356237309515, 0.31783724519578205, 0.2679491924311228,
0.2360679774997898, 0.21342176528338808]
total_time = 0
min_time = 170
self.add_disk(5, color)
self.update_idletasks()
row = 4
while row >= self.disks_in_column:
pause_time = round(170*time_in_each_row[row])
self.after(pause_time)
total_time += pause_time
self.remove_disk(row + 1)
self.add_disk(row, color)
self.update_idletasks()
row -=1
if total_time < min_time:
self.after(min_time - total_time)
else:
self.add_disk(self.disks_in_column, color)
self.disks_in_column += 1
def add_disk(self, row, color):
self.column[row].add_disk(color)
def remove_disk(self, row):
self.column[row].remove_disk()
def remove_all_disks(self):
self.disks_in_column = 0
for cell in self.column:
cell.remove_disk()
def unbind_mouse(self):
for cell in self.column:
cell.unbind_mouse()
def rebind_mouse(self):
for cell in self.column:
cell.rebind_mouse()
class Cell(tk.Canvas):
def __init__(self, parent, side_length):
"""A cell is the a square-shaped piece of the board consisting of
one empty space where a disk can be placed.
"""
self.parent = parent
self.background_color = "#1439f9"
tk.Canvas.__init__(self, parent, width=side_length, height=side_length,
bg=self.background_color, highlightthickness=0)
# An odd diameter can give a better looking circle.
radius = (9 * side_length) // 20
d = (side_length - (2 * radius + 1)) // 2
self.disk = self.create_oval(d, d, d + 2 * radius + 1, d + 2 * radius + 1,
width=2, outline="#0000AA")
self.bind("<Button-1>", parent.mouse_click)
def add_disk(self, color):
self.itemconfig(self.disk, fill=color)
def remove_disk(self):
self.itemconfig(self.disk, fill=self.background_color)
def unbind_mouse(self):
self.unbind("<Button-1>")
def rebind_mouse(self):
self.bind("<Button-1>", self.parent.mouse_click)
class DialogBox(tk.Toplevel):
def __init__(self, parent, text):
"""Return 'play' or 'quit'."""
tk.Toplevel.__init__(self, parent)
self.parent = parent
self.transient(parent)
self.title("Four in a row")
box_width = 300
box_height = 120
parent_width = parent.winfo_width()
parent_height = parent.winfo_height()
if box_width >= parent_width:
x_offset = parent.winfo_rootx()
else:
x_offset = parent.winfo_rootx() + (parent_width - box_width) // 2
y_offset = parent.winfo_rooty() + (parent_height - box_height - 40) // 2
if y_offset < parent.winfo_rooty():
y_offset = parent.winfo_rooty()
self.geometry("%dx%d+%d+%d" % (box_width, box_height, x_offset, y_offset))
self.wait_visibility() # Window needs to be visible for the grab.
self.grab_set() # Routes all events for this application to this widget.
self.focus_set()
text = tk.Label(self, text=text, font=("", 11, "bold"), borderwidth=10)
text.pack()
radio_button_frame = tk.Frame(master=self)
tk.Radiobutton(radio_button_frame, text="Easy", font=("", 10),
variable=parent.difficulty_level, value="Easy").pack(side=tk.LEFT)
tk.Radiobutton(radio_button_frame, text="Medium", font=("", 10),
variable=parent.difficulty_level, value="Medium").pack(side=tk.LEFT)
tk.Radiobutton(radio_button_frame, text="Hard", font=("", 10),
variable=parent.difficulty_level, value="Hard").pack()
radio_button_frame.pack()
button_frame = tk.Frame(master=self, pady=10)
button_frame.pack()
tk.Button(button_frame, text="Play", font=("", 10), width=8,
command=self.play).pack(side=tk.LEFT)
tk.Button(button_frame, text="Quit", font=("", 10), width=8,
command=self.quit).pack()
self.bind("<Return>", self.play)
self.bind("<Escape>", self.quit)
parent.wait_window(window=self) # Wait for the dialog box to be destroyed.
def play(self, event=None):
self.parent.new_game_flag = True
self.parent.update_difficulty_level()
self.destroy()
def quit(self, event=None):
self.destroy()
engine_interface = EngineInterface(2)
main_window = MainWindow()
main_window.update()
main_window.new_game_dialog_box()
main_window.mainloop()
| import tkinter as tk
from engine import EngineInterface
from engine import GameState
class MainWindow(tk.Tk):
def __init__(self):
tk.Tk.__init__(self)
self.resizable(False, False)
self.board = Board(self)
self.board.pack()
self.player_color = "yellow"
self.engine_color = "red"
self.new_game_flag = False
self.difficulty_level = tk.StringVar()
self.difficulty_level.set("Medium")
self.player_make_first_move = True
self.protocol("WM_DELETE_WINDOW", self.close_window)
self.score = [0, 0]
self.title("Four in a row: 0 - 0")
self.animations = False
def new_game_dialog_box(self):
self.protocol("WM_DELETE_WINDOW", self.dont_close_window) # Disable close window
dialog_box = DialogBox(main_window, "New game")
if self.new_game_flag:
self.new_game_flag = False
self.protocol("WM_DELETE_WINDOW", self.close_window) # Enable close window
else:
self.destroy()
def update_difficulty_level(self, *args):
"""Update the difficulty level in the engine and reset score if
the level is changed.
"""
current_level = engine_interface.difficulty_level
if self.difficulty_level.get() == "Easy":
engine_interface.difficulty_level = 1
elif self.difficulty_level.get() == "Medium":
engine_interface.difficulty_level = 2
elif self.difficulty_level.get() == "Hard":
engine_interface.difficulty_level = 3
if engine_interface.difficulty_level != current_level:
self.score = [0, 0]
self.title_update()
def title_update(self):
self.title("Four in a row: " + str(self.score[0]) + " - " + str(self.score[1]))
def update_and_pause(self, time_in_ms):
self.board.unbind_mouse()
self.update_idletasks()
self.after(time_in_ms)
self.update() # Handle possible events.
self.board.rebind_mouse()
def mouse_click(self, column_number):
"""This function is called if the column with column_number have been
clicked on.
"""
def dialog(text):
dialog_box = DialogBox(main_window, text)
if self.new_game_flag:
self.protocol("WM_DELETE_WINDOW", self.close_window) # Enable close window
self.new_game()
else:
self.destroy()
self.protocol("WM_DELETE_WINDOW", self.dont_close_window) # Disable close window
# Player make a move, if there is empty places left in the column.
if engine_interface.legal(column_number):
engine_interface.make_move(column_number)
self.board.add_disk_to_top_of_column(column_number, self.player_color, self.animations)
self.update_idletasks()
else:
self.protocol("WM_DELETE_WINDOW", self.close_window) # Enable close window
return
# If player win.
if engine_interface.four_in_a_row():
self.score[0] += 1
self.title_update()
self.highlight_four_in_a_row(self.player_color)
self.update_and_pause(1000)
dialog("You win! Congratulations!")
return
# If draw.
if engine_interface.draw():
self.update_and_pause(600)
dialog("Draw")
return
# Engine makes a move
column_number = engine_interface.engine_move()
engine_interface.make_move(column_number)
if self.animations:
self.update_and_pause(50)
else:
self.update_and_pause(300)
self.board.add_disk_to_top_of_column(column_number, self.engine_color, self.animations)
# If engine win.
if engine_interface.four_in_a_row():
self.score[1] += 1
self.title_update()
self.highlight_four_in_a_row(self.engine_color)
self.update_and_pause(1000)
dialog("Computer win!")
return
# If draw.
if engine_interface.draw():
self.update_and_pause(600)
dialog("Draw")
return
self.protocol("WM_DELETE_WINDOW", self.close_window) # Enable close window
def highlight_four_in_a_row(self, color):
positions = engine_interface.four_in_a_row_positions()
self.update_and_pause(500)
for (column, row) in positions:
self.board.remove_disk(column, row)
self.update_and_pause(500)
for (column, row) in positions:
self.board.add_disk(column, row, color)
def new_game(self):
self.new_game_flag = False
self.player_make_first_move = not self.player_make_first_move
engine_interface.new_game()
self.board.remove_all_disks()
if not self.player_make_first_move:
column_number = engine_interface.engine_move()
engine_interface.make_move(column_number)
self.update_and_pause(300)
self.board.add_disk_to_top_of_column(column_number, self.engine_color, self.animations)
def dont_close_window(self):
pass
def close_window(self):
self.destroy()
class Board(tk.Frame):
def __init__(self, parent):
tk.Frame.__init__(self, parent)
self.parent = parent
self.column_list = []
for column_number in range(7):
column = Column(self, column_number)
column.pack(side=tk.LEFT)
self.column_list.append(column)
def mouse_click(self, column_number):
self.parent.mouse_click(column_number)
def add_disk_to_top_of_column(self, column_number, color, animations):
"""column_number is 0,1 to 6. animations is True or False."""
self.column_list[column_number].add_disk_to_top_of_column(color, animations)
def add_disk(self, column, row, color):
self.column_list[column].add_disk(row, color)
def remove_disk(self, column, row):
self.column_list[column].remove_disk(row)
def remove_all_disks(self):
for column in self.column_list:
column.remove_all_disks()
def unbind_mouse(self):
for column in self.column_list:
column.unbind_mouse()
def rebind_mouse(self):
for column in self.column_list:
column.rebind_mouse()
class Column(tk.Frame):
def __init__(self, parent, column_number):
"""column_number is 0,1 to 6 and is used as an identifier."""
tk.Frame.__init__(self, parent)
self.parent = parent
self.column_number = column_number
self.disks_in_column = 0
self.column = []
for cell in range(6):
new_cell = Cell(self, 90)
new_cell.pack(side=tk.BOTTOM)
self.column.append(new_cell)
def mouse_click(self, event):
self.parent.mouse_click(self.column_number)
def add_disk_to_top_of_column(self, color, animations):
"""animations is True or False."""
if animations:
time_in_each_row = [0.41421356237309515, 0.31783724519578205, 0.2679491924311228,
0.2360679774997898, 0.21342176528338808]
total_time = 0
min_time = 170
self.add_disk(5, color)
self.update_idletasks()
row = 4
while row >= self.disks_in_column:
pause_time = round(170*time_in_each_row[row])
self.after(pause_time)
total_time += pause_time
self.remove_disk(row + 1)
self.add_disk(row, color)
self.update_idletasks()
row -=1
if total_time < min_time:
self.after(min_time - total_time)
else:
self.add_disk(self.disks_in_column, color)
self.disks_in_column += 1
def add_disk(self, row, color):
self.column[row].add_disk(color)
def remove_disk(self, row):
self.column[row].remove_disk()
def remove_all_disks(self):
self.disks_in_column = 0
for cell in self.column:
cell.remove_disk()
def unbind_mouse(self):
for cell in self.column:
cell.unbind_mouse()
def rebind_mouse(self):
for cell in self.column:
cell.rebind_mouse()
class Cell(tk.Canvas):
def __init__(self, parent, side_length):
"""A cell is the a square-shaped piece of the board consisting of
one empty space where a disk can be placed.
"""
self.parent = parent
self.background_color = "#1439f9"
tk.Canvas.__init__(self, parent, width=side_length, height=side_length,
bg=self.background_color, highlightthickness=0)
# An odd diameter can give a better looking circle.
radius = (9 * side_length) // 20
d = (side_length - (2 * radius + 1)) // 2
self.disk = self.create_oval(d, d, d + 2 * radius + 1, d + 2 * radius + 1,
width=2, outline="#0000AA")
self.bind("<Button-1>", parent.mouse_click)
def add_disk(self, color):
self.itemconfig(self.disk, fill=color)
def remove_disk(self):
self.itemconfig(self.disk, fill=self.background_color)
def unbind_mouse(self):
self.unbind("<Button-1>")
def rebind_mouse(self):
self.bind("<Button-1>", self.parent.mouse_click)
class DialogBox(tk.Toplevel):
def __init__(self, parent, text):
"""Return 'play' or 'quit'."""
tk.Toplevel.__init__(self, parent)
self.parent = parent
self.transient(parent)
self.title("Four in a row")
box_width = 300
box_height = 120
parent_width = parent.winfo_width()
parent_height = parent.winfo_height()
if box_width >= parent_width:
x_offset = parent.winfo_rootx()
else:
x_offset = parent.winfo_rootx() + (parent_width - box_width) // 2
y_offset = parent.winfo_rooty() + (parent_height - box_height - 40) // 2
if y_offset < parent.winfo_rooty():
y_offset = parent.winfo_rooty()
self.geometry("%dx%d+%d+%d" % (box_width, box_height, x_offset, y_offset))
self.wait_visibility() # Window needs to be visible for the grab.
self.grab_set() # Routes all events for this application to this widget.
self.focus_set()
text = tk.Label(self, text=text, font=("", 11, "bold"), borderwidth=10)
text.pack()
radio_button_frame = tk.Frame(master=self)
tk.Radiobutton(radio_button_frame, text="Easy", font=("", 10),
variable=parent.difficulty_level, value="Easy").pack(side=tk.LEFT)
tk.Radiobutton(radio_button_frame, text="Medium", font=("", 10),
variable=parent.difficulty_level, value="Medium").pack(side=tk.LEFT)
tk.Radiobutton(radio_button_frame, text="Hard", font=("", 10),
variable=parent.difficulty_level, value="Hard").pack()
radio_button_frame.pack()
button_frame = tk.Frame(master=self, pady=10)
button_frame.pack()
tk.Button(button_frame, text="Play", font=("", 10), width=8,
command=self.play).pack(side=tk.LEFT)
tk.Button(button_frame, text="Quit", font=("", 10), width=8,
command=self.quit).pack()
self.bind("<Return>", self.play)
self.bind("<Escape>", self.quit)
parent.wait_window(window=self) # Wait for the dialog box to be destroyed.
def play(self, event=None):
self.parent.new_game_flag = True
self.parent.update_difficulty_level()
self.destroy()
def quit(self, event=None):
self.destroy()
engine_interface = EngineInterface(2)
main_window = MainWindow()
main_window.update()
main_window.new_game_dialog_box()
main_window.mainloop() | en | 0.871498 | # Disable close window # Enable close window Update the difficulty level in the engine and reset score if the level is changed. # Handle possible events. This function is called if the column with column_number have been clicked on. # Enable close window # Disable close window # Player make a move, if there is empty places left in the column. # Enable close window # If player win. # If draw. # Engine makes a move # If engine win. # If draw. # Enable close window column_number is 0,1 to 6. animations is True or False. column_number is 0,1 to 6 and is used as an identifier. animations is True or False. A cell is the a square-shaped piece of the board consisting of one empty space where a disk can be placed. # An odd diameter can give a better looking circle. Return 'play' or 'quit'. # Window needs to be visible for the grab. # Routes all events for this application to this widget. # Wait for the dialog box to be destroyed. | 3.258973 | 3 |
crawl/models.py | chunky2808/SPOJ-history-Django-App- | 1 | 6614778 | <reponame>chunky2808/SPOJ-history-Django-App-<filename>crawl/models.py
from django.db import models
class paras(models.Model):
Spoj_Handle = models.CharField(max_length = 140)
def __str__(self):
return self.Spoj_Handle
class jain(models.Model):
hits = models.IntegerField(default=0)
# Create your models here.
class ta(models.Model):
name = models.CharField(max_length=140) | from django.db import models
class paras(models.Model):
Spoj_Handle = models.CharField(max_length = 140)
def __str__(self):
return self.Spoj_Handle
class jain(models.Model):
hits = models.IntegerField(default=0)
# Create your models here.
class ta(models.Model):
name = models.CharField(max_length=140) | en | 0.963489 | # Create your models here. | 2.331896 | 2 |
Sorting_and_searching/find the position fo an element in an infinite array.py | mukul20-21/python_datastructure | 0 | 6614779 | ### binary search for infinite array element..!!!
def binary_search(arr,ele,start,end):
mid = 0
while start<=end:
mid = (start+end)//2
if(ele == arr[mid]):
return arr[mid]
elif(ele < arr[mid]):
end = mid -1
else:
start = mid+1
return -1
## code to get the correct value of start and end index from which we can bound the search element..!!!
def infinite_search(arr,ele):
start = 0
end = 1
while(arr[end]<ele):
start = end
end = 2*end
res = binary_search(arr,ele,start,end)
return -1
## Driver code...!!!!
if __name__ == '__main__':
## infinite array which practical not possible to take input..
## it is a general code for it...
arr = list(map(int,input().split()))
ele = int(input())
print('index of element in infinite sorted array..',infinite_search(arr,ele)) | ### binary search for infinite array element..!!!
def binary_search(arr,ele,start,end):
mid = 0
while start<=end:
mid = (start+end)//2
if(ele == arr[mid]):
return arr[mid]
elif(ele < arr[mid]):
end = mid -1
else:
start = mid+1
return -1
## code to get the correct value of start and end index from which we can bound the search element..!!!
def infinite_search(arr,ele):
start = 0
end = 1
while(arr[end]<ele):
start = end
end = 2*end
res = binary_search(arr,ele,start,end)
return -1
## Driver code...!!!!
if __name__ == '__main__':
## infinite array which practical not possible to take input..
## it is a general code for it...
arr = list(map(int,input().split()))
ele = int(input())
print('index of element in infinite sorted array..',infinite_search(arr,ele)) | en | 0.742049 | ### binary search for infinite array element..!!! ## code to get the correct value of start and end index from which we can bound the search element..!!! ## Driver code...!!!! ## infinite array which practical not possible to take input.. ## it is a general code for it... | 4.280219 | 4 |
pacifique/forms.py | rogeruwayezu/pacifique_IO | 0 | 6614780 | from .models import Article
from django import forms
from martor.fields import MartorFormField
class NewArticleForm(forms.ModelForm):
class Meta:
model = Article
exclude = ['editor', 'pub_date']
class UpdateArticleForm(forms.ModelForm):
class Meta:
model = Article
exclude = ['editor', 'pub_date']
# class NewArticleForm(forms.Form):
# title = forms.CharField(label='title', max_length=30)
# content = MartorFormField()
# article_image = forms.ImageField()
| from .models import Article
from django import forms
from martor.fields import MartorFormField
class NewArticleForm(forms.ModelForm):
class Meta:
model = Article
exclude = ['editor', 'pub_date']
class UpdateArticleForm(forms.ModelForm):
class Meta:
model = Article
exclude = ['editor', 'pub_date']
# class NewArticleForm(forms.Form):
# title = forms.CharField(label='title', max_length=30)
# content = MartorFormField()
# article_image = forms.ImageField()
| en | 0.457266 | # class NewArticleForm(forms.Form): # title = forms.CharField(label='title', max_length=30) # content = MartorFormField() # article_image = forms.ImageField() | 2.110054 | 2 |
illustration_api.py | atoledo1/deprecated-30-story-squad-ds-a | 1 | 6614781 | from fastapi import FastAPI
import uvicorn
import tensorflow as tf
from tensorflow.keras.models import load_model
app = FastAPI(
title="Labs30-StorySquad-DS-Team A",
description="An API for the illustration score",
version="0.1",
docs_url="/"
)
# this api is meant to help transfer the illustration similarity scoring model
# when set up properly, there should be a file called 'transfer_model.h5' in the same scope as this illustration_api file
# first, you'll want to download the data that the neural network will use, which can be found at this link: https://drive.google.com/drive/folders/1rWbjhPRoGj-kwvESVUWhAigfecsN6XDo?usp=sharing
# then open the Google Colaboratory notebook that can be found here: https://colab.research.google.com/drive/1J66ylaqZfZQzCiOmRYHJ4mWt7Jmh7y_B?usp=sharing
# get the data folder to the "Files" sidebar, run all the cells properly, and take the newly downloaded h5 from your Colaboratory workflow to the story-squad-ds-a main folder
# lastly, uncomment the line below this one
#model = load_model('transfer_model.h5')
if __name__ == "__main__":
uvicorn.run(app) | from fastapi import FastAPI
import uvicorn
import tensorflow as tf
from tensorflow.keras.models import load_model
app = FastAPI(
title="Labs30-StorySquad-DS-Team A",
description="An API for the illustration score",
version="0.1",
docs_url="/"
)
# this api is meant to help transfer the illustration similarity scoring model
# when set up properly, there should be a file called 'transfer_model.h5' in the same scope as this illustration_api file
# first, you'll want to download the data that the neural network will use, which can be found at this link: https://drive.google.com/drive/folders/1rWbjhPRoGj-kwvESVUWhAigfecsN6XDo?usp=sharing
# then open the Google Colaboratory notebook that can be found here: https://colab.research.google.com/drive/1J66ylaqZfZQzCiOmRYHJ4mWt7Jmh7y_B?usp=sharing
# get the data folder to the "Files" sidebar, run all the cells properly, and take the newly downloaded h5 from your Colaboratory workflow to the story-squad-ds-a main folder
# lastly, uncomment the line below this one
#model = load_model('transfer_model.h5')
if __name__ == "__main__":
uvicorn.run(app) | en | 0.858541 | # this api is meant to help transfer the illustration similarity scoring model # when set up properly, there should be a file called 'transfer_model.h5' in the same scope as this illustration_api file # first, you'll want to download the data that the neural network will use, which can be found at this link: https://drive.google.com/drive/folders/1rWbjhPRoGj-kwvESVUWhAigfecsN6XDo?usp=sharing # then open the Google Colaboratory notebook that can be found here: https://colab.research.google.com/drive/1J66ylaqZfZQzCiOmRYHJ4mWt7Jmh7y_B?usp=sharing # get the data folder to the "Files" sidebar, run all the cells properly, and take the newly downloaded h5 from your Colaboratory workflow to the story-squad-ds-a main folder # lastly, uncomment the line below this one #model = load_model('transfer_model.h5') | 2.761421 | 3 |
ec3/main_shell.py | scivey/ec3 | 0 | 6614782 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# from __future__ import print_function, unicode_literals
# import prompt_toolkit
# from prompt_toolkit import prompt
# from prompt_toolkit.contrib.completers import WordCompleter
# from prompt_toolkit.history import InMemoryHistory
# from prompt_toolkit.interface import AbortAction
# from prompt_toolkit import auto_suggest
# from ec3.boto_cache import BotoCache
# def main_shell(conf):
# print('conf: ', conf)
# boto_cache = BotoCache.get_or_create()
# all_keys = set()
# all_pairs = set()
# for tag in boto_cache.iter_tags():
# all_keys.add(tag.key)
# all_pairs.add('%s=%s' % (tag.key, tag.value))
# words = ['fish', 'cat', 'gorilla', 'ssh']
# words = list(set(words) | all_keys | all_pairs)
# completer = WordCompleter(words, ignore_case=True)
# history = InMemoryHistory()
# text = prompt('yes? ', completer=completer,
# # history = history,
# # auto_suggest=auto_suggest.AutoSuggestFromHistory(),
# display_completions_in_columns=True,
# # enable_history_search=True
# )
# print('yeah : "%s"' % text)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
# from __future__ import print_function, unicode_literals
# import prompt_toolkit
# from prompt_toolkit import prompt
# from prompt_toolkit.contrib.completers import WordCompleter
# from prompt_toolkit.history import InMemoryHistory
# from prompt_toolkit.interface import AbortAction
# from prompt_toolkit import auto_suggest
# from ec3.boto_cache import BotoCache
# def main_shell(conf):
# print('conf: ', conf)
# boto_cache = BotoCache.get_or_create()
# all_keys = set()
# all_pairs = set()
# for tag in boto_cache.iter_tags():
# all_keys.add(tag.key)
# all_pairs.add('%s=%s' % (tag.key, tag.value))
# words = ['fish', 'cat', 'gorilla', 'ssh']
# words = list(set(words) | all_keys | all_pairs)
# completer = WordCompleter(words, ignore_case=True)
# history = InMemoryHistory()
# text = prompt('yes? ', completer=completer,
# # history = history,
# # auto_suggest=auto_suggest.AutoSuggestFromHistory(),
# display_completions_in_columns=True,
# # enable_history_search=True
# )
# print('yeah : "%s"' % text)
| en | 0.316575 | #!/usr/bin/env python # -*- coding: utf-8 -*- # from __future__ import print_function, unicode_literals # import prompt_toolkit # from prompt_toolkit import prompt # from prompt_toolkit.contrib.completers import WordCompleter # from prompt_toolkit.history import InMemoryHistory # from prompt_toolkit.interface import AbortAction # from prompt_toolkit import auto_suggest # from ec3.boto_cache import BotoCache # def main_shell(conf): # print('conf: ', conf) # boto_cache = BotoCache.get_or_create() # all_keys = set() # all_pairs = set() # for tag in boto_cache.iter_tags(): # all_keys.add(tag.key) # all_pairs.add('%s=%s' % (tag.key, tag.value)) # words = ['fish', 'cat', 'gorilla', 'ssh'] # words = list(set(words) | all_keys | all_pairs) # completer = WordCompleter(words, ignore_case=True) # history = InMemoryHistory() # text = prompt('yes? ', completer=completer, # # history = history, # # auto_suggest=auto_suggest.AutoSuggestFromHistory(), # display_completions_in_columns=True, # # enable_history_search=True # ) # print('yeah : "%s"' % text) | 2.506301 | 3 |
app.py | dyno-marketing/text-classification-api | 1 | 6614783 | <filename>app.py
# -*- coding: utf-8 -*-
__author__ = 'daotuanvu'
create_date = '2/6/2015'
import sys
# reload(sys)
# sys.setdefaultencoding('utf-8')
import os
import logging.config
import logging
import yaml
from flask import Flask
import flask_restful
# from flask_restful.representations.json import output_json
# output_json.func_globals['settings'] = {'ensure_ascii': False, 'encoding': 'utf8'}
app = Flask(__name__)
api = flask_restful.Api(app)
from handler.text_classifier import TextClassifier
api.add_resource(TextClassifier, r"/text_classifier")
# Setup logging configuration
def setup_logging(default_path='logging.yaml', default_level=logging.INFO, env_key='LOG_CFG'):
path = default_path
value = os.getenv(env_key, None)
if value:
path = value
if os.path.exists(path):
with open(path, 'rt') as f:
config = yaml.load(f.read())
logging.config.dictConfig(config)
else:
logging.basicConfig(level=default_level)
| <filename>app.py
# -*- coding: utf-8 -*-
__author__ = 'daotuanvu'
create_date = '2/6/2015'
import sys
# reload(sys)
# sys.setdefaultencoding('utf-8')
import os
import logging.config
import logging
import yaml
from flask import Flask
import flask_restful
# from flask_restful.representations.json import output_json
# output_json.func_globals['settings'] = {'ensure_ascii': False, 'encoding': 'utf8'}
app = Flask(__name__)
api = flask_restful.Api(app)
from handler.text_classifier import TextClassifier
api.add_resource(TextClassifier, r"/text_classifier")
# Setup logging configuration
def setup_logging(default_path='logging.yaml', default_level=logging.INFO, env_key='LOG_CFG'):
path = default_path
value = os.getenv(env_key, None)
if value:
path = value
if os.path.exists(path):
with open(path, 'rt') as f:
config = yaml.load(f.read())
logging.config.dictConfig(config)
else:
logging.basicConfig(level=default_level)
| en | 0.367519 | # -*- coding: utf-8 -*- # reload(sys) # sys.setdefaultencoding('utf-8') # from flask_restful.representations.json import output_json # output_json.func_globals['settings'] = {'ensure_ascii': False, 'encoding': 'utf8'} # Setup logging configuration | 2.215728 | 2 |
tests/commands/xor_memory.py | ebubekirtrkr/gef | 0 | 6614784 | """
xor-memory command test module
"""
from tests.utils import GefUnitTestGeneric, gdb_run_cmd, gdb_start_silent_cmd
class XorMemoryCommand(GefUnitTestGeneric):
"""`xor-memory` command test module"""
def test_cmd_xor_memory_display(self):
cmd = "xor-memory display $sp 0x10 0x41"
self.assertFailIfInactiveSession(gdb_run_cmd(cmd))
res = gdb_start_silent_cmd(cmd)
self.assertNoException(res)
self.assertIn("Original block", res)
self.assertIn("XOR-ed block", res)
def test_cmd_xor_memory_patch(self):
cmd = "xor-memory patch $sp 0x10 0x41"
res = gdb_start_silent_cmd(cmd)
self.assertNoException(res)
self.assertIn("Patching XOR-ing ", res)
| """
xor-memory command test module
"""
from tests.utils import GefUnitTestGeneric, gdb_run_cmd, gdb_start_silent_cmd
class XorMemoryCommand(GefUnitTestGeneric):
"""`xor-memory` command test module"""
def test_cmd_xor_memory_display(self):
cmd = "xor-memory display $sp 0x10 0x41"
self.assertFailIfInactiveSession(gdb_run_cmd(cmd))
res = gdb_start_silent_cmd(cmd)
self.assertNoException(res)
self.assertIn("Original block", res)
self.assertIn("XOR-ed block", res)
def test_cmd_xor_memory_patch(self):
cmd = "xor-memory patch $sp 0x10 0x41"
res = gdb_start_silent_cmd(cmd)
self.assertNoException(res)
self.assertIn("Patching XOR-ing ", res)
| fi | 0.080089 | xor-memory command test module `xor-memory` command test module | 2.420998 | 2 |
trio2o/tests/unit/cinder_apigw/controllers/test_volume.py | OpenCloudNeXt/trio2o | 1 | 6614785 | <gh_stars>1-10
# Copyright 2016 OpenStack Foundation.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from mock import patch
import pecan
import unittest
from trio2o.cinder_apigw.controllers import volume_type
from trio2o.common import context
from trio2o.db import api as db_api
from trio2o.db import core
class FakeResponse(object):
def __new__(cls, code=500):
cls.status = code
cls.status_code = code
return super(FakeResponse, cls).__new__(cls)
class VolumeTypeTest(unittest.TestCase):
def setUp(self):
core.initialize()
core.ModelBase.metadata.create_all(core.get_engine())
self.context = context.get_admin_context()
self.project_id = 'test_project'
self.controller = volume_type.VolumeTypeController(self.project_id)
def _validate_error_code(self, res, code):
self.assertEqual(code, res[res.keys()[0]]['code'])
@patch.object(pecan, 'response', new=FakeResponse)
@patch.object(context, 'extract_context_from_environ')
def test_post(self, mock_context):
mock_context.return_value = self.context
body = {'volume_type': {'name': 'vol-type-001',
'description': 'volume type 001',
'os-volume-type-access:is_public': True,
'extra_specs': {
'capabilities': 'gpu',
}}}
self.controller.post(**body)
res = db_api.volume_type_get_by_name(self.context, 'vol-type-001')
self.assertEqual('vol-type-001', res['name'])
self.assertEqual('volume type 001', res['description'])
capabilities = res['extra_specs']['capabilities']
self.assertEqual('gpu', capabilities)
# failure case, only admin can create volume type
self.context.is_admin = False
res = self.controller.post(**body)
self._validate_error_code(res, 403)
self.context.is_admin = True
# failure case, volume_type body is required
body = {'name': 'vol-type-002'}
res = self.controller.post(**body)
self._validate_error_code(res, 400)
# failure case, volume type name is empty
body = {'volume_type': {'name': '',
'description': 'volume type 001',
'os-volume-type-access:is_public': True,
'extra_specs': {
'capabilities': 'gpu',
}}}
res = self.controller.post(**body)
self._validate_error_code(res, 400)
# failure case, volume type name has more than 255 characters
body = {'volume_type': {'name': ('a' * 500),
'description': 'volume type 001',
'os-volume-type-access:is_public': True,
'extra_specs': {
'capabilities': 'gpu', }
}
}
res = self.controller.post(**body)
self._validate_error_code(res, 400)
# failure case, volume type description has more than 255 characters
body = {'volume_type': {'name': 'vol-type-001',
'description': ('a' * 500),
'os-volume-type-access:is_public': True,
'extra_specs': {
'capabilities': 'gpu',
}}}
self.controller.post(**body)
self._validate_error_code(res, 400)
# failure case, is_public is invalid input
body = {'volume_type': {'name': 'vol-type-001',
'description': 'volume type 001',
'os-volume-type-access:is_public': 'a',
'extra_specs': {
'capabilities': 'gpu',
}}}
res = self.controller.post(**body)
self._validate_error_code(res, 400)
# failure case, volume type name is unique
body = {'volume_type': {'name': 'vol-type-001',
'description': 'volume type 001',
'os-volume-type-access:is_public': True,
'extra_specs': {
'capabilities': 'gpu',
}}}
res = self.controller.post(**body)
self._validate_error_code(res, 409)
@patch.object(pecan, 'response', new=FakeResponse)
@patch.object(context, 'extract_context_from_environ')
def test_get_one(self, mock_context):
mock_context.return_value = self.context
body = {'volume_type': {'name': 'vol-type-001',
'description': 'volume type 001',
'os-volume-type-access:is_public': True,
'extra_specs': {
'capabilities': 'gpu',
}}}
self.controller.post(**body)
vtype = db_api.volume_type_get_by_name(self.context, 'vol-type-001')
res = self.controller.get_one(vtype['id'])['volume_type']
self.assertEqual('vol-type-001', res['name'])
self.assertEqual('volume type 001', res['description'])
capabilities = res['extra_specs']['capabilities']
self.assertEqual('gpu', capabilities)
# failure case, volume type is not exist.
fake_id = "Fake_ID"
res = self.controller.get_one(fake_id)
self._validate_error_code(res, 404)
# failure case, the volume type is private.
body = {'volume_type': {'name': 'vol-type-002',
'description': 'volume type 002',
'os-volume-type-access:is_public': False,
'extra_specs': {
'capabilities': 'gpu',
}}}
self.controller.post(**body)
vtype = db_api.volume_type_get_by_name(self.context, 'vol-type-002')
self.context.is_admin = False
res = self.controller.get_one(vtype['id'])
self._validate_error_code(res, 404)
@patch.object(context, 'extract_context_from_environ')
def test_get_all(self, mock_context):
mock_context.return_value = self.context
volume_type_001 = {'volume_type': {'name': 'vol-type-001',
'description': 'volume type 001',
'os-volume-'
'type-access:is_public': True,
'extra_specs': {
'capabilities': 'gpu',
}}}
volume_type_002 = {'volume_type': {'name': 'vol-type-002',
'description': 'volume type 002',
'os-volume-'
'type-access:is_public': True,
'extra_specs': {
'capabilities': 'gpu',
}}}
self.controller.post(**volume_type_001)
self.controller.post(**volume_type_002)
volume_types = self.controller.get_all()['volume_types']
self.assertEqual('vol-type-001', volume_types[0]['name'])
self.assertEqual('volume type 001', volume_types[0]['description'])
capabilities_001 = volume_types[0]['extra_specs']['capabilities']
self.assertEqual('gpu', capabilities_001)
self.assertEqual('vol-type-002', volume_types[1]['name'])
self.assertEqual('volume type 002', volume_types[1]['description'])
capabilities_002 = volume_types[1]['extra_specs']['capabilities']
self.assertEqual('gpu', capabilities_002)
@patch.object(pecan, 'response', new=FakeResponse)
@patch.object(context, 'extract_context_from_environ')
def test_put(self, mock_context):
mock_context.return_value = self.context
body = {'volume_type': {'name': 'vol-type-001',
'description': 'volume type 001',
'os-volume-type-access:is_public': True,
'extra_specs': {
'capabilities': 'gpu',
}}}
body_update = {'volume_type': {'name': 'vol-type-002',
'description': 'volume type 002',
'os-volume-'
'type-access:is_public': True,
'extra_specs': {
'capabilities': 'gpu',
}}}
self.controller.post(**body)
vtype = db_api.volume_type_get_by_name(self.context, 'vol-type-001')
res = self.controller.put(vtype['id'], **body_update)['volume_type']
self.assertEqual('vol-type-002', res['name'])
self.assertEqual('volume type 002', res['description'])
capabilities = res['extra_specs']['capabilities']
self.assertEqual('gpu', capabilities)
# failure case, volume type name, description, is_public
# not None at the same time
body = {'volume_type': {'extra_specs': {
'capabilities': 'gpu',
}}}
res = self.controller.put(vtype['id'], **body)
self._validate_error_code(res, 400)
# failure case, name exists in db
body = {'volume_type': {'name': 'vol-type-003',
'description': 'volume type 003',
'os-volume-type-access:is_public': True,
'extra_specs': {
'capabilities': 'gpu',
}}}
self.controller.post(**body)
res = self.controller.put(vtype['id'], **body)
self._validate_error_code(res, 500)
@patch.object(pecan, 'response', new=FakeResponse)
@patch.object(db_api, 'volume_type_delete')
@patch.object(context, 'extract_context_from_environ')
def test_delete(self, mock_context, mock_delete):
mock_context.return_value = self.context
mock_delete.return_value = Exception()
body = {'volume_type': {'name': 'vol-type-001',
'description': 'volume type 001',
'os-volume-type-access:is_public': True,
'extra_specs': {
'capabilities': 'gpu',
}}}
self.controller.post(**body)
vtype = db_api.volume_type_get_by_name(self.context, 'vol-type-001')
# failure case, only admin delete create volume type
self.context.is_admin = False
res = self.controller.delete(vtype['id'])
self._validate_error_code(res, 403)
# failure case, bad request
self.context.is_admin = True
res = self.controller.delete(_id=None)
self._validate_error_code(res, 404)
res = self.controller.delete(vtype['id'])
self.assertEqual(res.status, 202)
def tearDown(self):
core.ModelBase.metadata.drop_all(core.get_engine())
| # Copyright 2016 OpenStack Foundation.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from mock import patch
import pecan
import unittest
from trio2o.cinder_apigw.controllers import volume_type
from trio2o.common import context
from trio2o.db import api as db_api
from trio2o.db import core
class FakeResponse(object):
def __new__(cls, code=500):
cls.status = code
cls.status_code = code
return super(FakeResponse, cls).__new__(cls)
class VolumeTypeTest(unittest.TestCase):
def setUp(self):
core.initialize()
core.ModelBase.metadata.create_all(core.get_engine())
self.context = context.get_admin_context()
self.project_id = 'test_project'
self.controller = volume_type.VolumeTypeController(self.project_id)
def _validate_error_code(self, res, code):
self.assertEqual(code, res[res.keys()[0]]['code'])
@patch.object(pecan, 'response', new=FakeResponse)
@patch.object(context, 'extract_context_from_environ')
def test_post(self, mock_context):
mock_context.return_value = self.context
body = {'volume_type': {'name': 'vol-type-001',
'description': 'volume type 001',
'os-volume-type-access:is_public': True,
'extra_specs': {
'capabilities': 'gpu',
}}}
self.controller.post(**body)
res = db_api.volume_type_get_by_name(self.context, 'vol-type-001')
self.assertEqual('vol-type-001', res['name'])
self.assertEqual('volume type 001', res['description'])
capabilities = res['extra_specs']['capabilities']
self.assertEqual('gpu', capabilities)
# failure case, only admin can create volume type
self.context.is_admin = False
res = self.controller.post(**body)
self._validate_error_code(res, 403)
self.context.is_admin = True
# failure case, volume_type body is required
body = {'name': 'vol-type-002'}
res = self.controller.post(**body)
self._validate_error_code(res, 400)
# failure case, volume type name is empty
body = {'volume_type': {'name': '',
'description': 'volume type 001',
'os-volume-type-access:is_public': True,
'extra_specs': {
'capabilities': 'gpu',
}}}
res = self.controller.post(**body)
self._validate_error_code(res, 400)
# failure case, volume type name has more than 255 characters
body = {'volume_type': {'name': ('a' * 500),
'description': 'volume type 001',
'os-volume-type-access:is_public': True,
'extra_specs': {
'capabilities': 'gpu', }
}
}
res = self.controller.post(**body)
self._validate_error_code(res, 400)
# failure case, volume type description has more than 255 characters
body = {'volume_type': {'name': 'vol-type-001',
'description': ('a' * 500),
'os-volume-type-access:is_public': True,
'extra_specs': {
'capabilities': 'gpu',
}}}
self.controller.post(**body)
self._validate_error_code(res, 400)
# failure case, is_public is invalid input
body = {'volume_type': {'name': 'vol-type-001',
'description': 'volume type 001',
'os-volume-type-access:is_public': 'a',
'extra_specs': {
'capabilities': 'gpu',
}}}
res = self.controller.post(**body)
self._validate_error_code(res, 400)
# failure case, volume type name is unique
body = {'volume_type': {'name': 'vol-type-001',
'description': 'volume type 001',
'os-volume-type-access:is_public': True,
'extra_specs': {
'capabilities': 'gpu',
}}}
res = self.controller.post(**body)
self._validate_error_code(res, 409)
@patch.object(pecan, 'response', new=FakeResponse)
@patch.object(context, 'extract_context_from_environ')
def test_get_one(self, mock_context):
mock_context.return_value = self.context
body = {'volume_type': {'name': 'vol-type-001',
'description': 'volume type 001',
'os-volume-type-access:is_public': True,
'extra_specs': {
'capabilities': 'gpu',
}}}
self.controller.post(**body)
vtype = db_api.volume_type_get_by_name(self.context, 'vol-type-001')
res = self.controller.get_one(vtype['id'])['volume_type']
self.assertEqual('vol-type-001', res['name'])
self.assertEqual('volume type 001', res['description'])
capabilities = res['extra_specs']['capabilities']
self.assertEqual('gpu', capabilities)
# failure case, volume type is not exist.
fake_id = "Fake_ID"
res = self.controller.get_one(fake_id)
self._validate_error_code(res, 404)
# failure case, the volume type is private.
body = {'volume_type': {'name': 'vol-type-002',
'description': 'volume type 002',
'os-volume-type-access:is_public': False,
'extra_specs': {
'capabilities': 'gpu',
}}}
self.controller.post(**body)
vtype = db_api.volume_type_get_by_name(self.context, 'vol-type-002')
self.context.is_admin = False
res = self.controller.get_one(vtype['id'])
self._validate_error_code(res, 404)
@patch.object(context, 'extract_context_from_environ')
def test_get_all(self, mock_context):
mock_context.return_value = self.context
volume_type_001 = {'volume_type': {'name': 'vol-type-001',
'description': 'volume type 001',
'os-volume-'
'type-access:is_public': True,
'extra_specs': {
'capabilities': 'gpu',
}}}
volume_type_002 = {'volume_type': {'name': 'vol-type-002',
'description': 'volume type 002',
'os-volume-'
'type-access:is_public': True,
'extra_specs': {
'capabilities': 'gpu',
}}}
self.controller.post(**volume_type_001)
self.controller.post(**volume_type_002)
volume_types = self.controller.get_all()['volume_types']
self.assertEqual('vol-type-001', volume_types[0]['name'])
self.assertEqual('volume type 001', volume_types[0]['description'])
capabilities_001 = volume_types[0]['extra_specs']['capabilities']
self.assertEqual('gpu', capabilities_001)
self.assertEqual('vol-type-002', volume_types[1]['name'])
self.assertEqual('volume type 002', volume_types[1]['description'])
capabilities_002 = volume_types[1]['extra_specs']['capabilities']
self.assertEqual('gpu', capabilities_002)
@patch.object(pecan, 'response', new=FakeResponse)
@patch.object(context, 'extract_context_from_environ')
def test_put(self, mock_context):
mock_context.return_value = self.context
body = {'volume_type': {'name': 'vol-type-001',
'description': 'volume type 001',
'os-volume-type-access:is_public': True,
'extra_specs': {
'capabilities': 'gpu',
}}}
body_update = {'volume_type': {'name': 'vol-type-002',
'description': 'volume type 002',
'os-volume-'
'type-access:is_public': True,
'extra_specs': {
'capabilities': 'gpu',
}}}
self.controller.post(**body)
vtype = db_api.volume_type_get_by_name(self.context, 'vol-type-001')
res = self.controller.put(vtype['id'], **body_update)['volume_type']
self.assertEqual('vol-type-002', res['name'])
self.assertEqual('volume type 002', res['description'])
capabilities = res['extra_specs']['capabilities']
self.assertEqual('gpu', capabilities)
# failure case, volume type name, description, is_public
# not None at the same time
body = {'volume_type': {'extra_specs': {
'capabilities': 'gpu',
}}}
res = self.controller.put(vtype['id'], **body)
self._validate_error_code(res, 400)
# failure case, name exists in db
body = {'volume_type': {'name': 'vol-type-003',
'description': 'volume type 003',
'os-volume-type-access:is_public': True,
'extra_specs': {
'capabilities': 'gpu',
}}}
self.controller.post(**body)
res = self.controller.put(vtype['id'], **body)
self._validate_error_code(res, 500)
@patch.object(pecan, 'response', new=FakeResponse)
@patch.object(db_api, 'volume_type_delete')
@patch.object(context, 'extract_context_from_environ')
def test_delete(self, mock_context, mock_delete):
mock_context.return_value = self.context
mock_delete.return_value = Exception()
body = {'volume_type': {'name': 'vol-type-001',
'description': 'volume type 001',
'os-volume-type-access:is_public': True,
'extra_specs': {
'capabilities': 'gpu',
}}}
self.controller.post(**body)
vtype = db_api.volume_type_get_by_name(self.context, 'vol-type-001')
# failure case, only admin delete create volume type
self.context.is_admin = False
res = self.controller.delete(vtype['id'])
self._validate_error_code(res, 403)
# failure case, bad request
self.context.is_admin = True
res = self.controller.delete(_id=None)
self._validate_error_code(res, 404)
res = self.controller.delete(vtype['id'])
self.assertEqual(res.status, 202)
def tearDown(self):
core.ModelBase.metadata.drop_all(core.get_engine()) | en | 0.835345 | # Copyright 2016 OpenStack Foundation. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # failure case, only admin can create volume type # failure case, volume_type body is required # failure case, volume type name is empty # failure case, volume type name has more than 255 characters # failure case, volume type description has more than 255 characters # failure case, is_public is invalid input # failure case, volume type name is unique # failure case, volume type is not exist. # failure case, the volume type is private. # failure case, volume type name, description, is_public # not None at the same time # failure case, name exists in db # failure case, only admin delete create volume type # failure case, bad request | 1.807556 | 2 |
read.py | ep1cman/PyOnzo | 2 | 6614786 | <reponame>ep1cman/PyOnzo
import datetime
import time
import math
import onzo.device
conn = onzo.device.Connection()
try:
conn.connect()
disp = onzo.device.Display(conn)
clamp = onzo.device.Clamp(conn)
p_reactive = None
counter = 0
print("Timestamp,P_real,P_reactive,P_apparent,kWh,Battery_Voltage")
while True:
p_real = clamp.get_power()
# reactive power only updates onces every 15s, so there is no use
# querying more often, this just wastes clamp battery
if counter % 15 == 0:
p_reactive = clamp.get_powervars()
# Only update battery once every 10mins
if counter % (60 * 10) == 0:
battery = clamp.get_batteryvolts()
p_apparent = int(math.sqrt(p_real**2 + p_reactive**2))
ear = clamp.get_cumulative_kwh()
timestamp = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
print("{},{},{},{},{},{}".format(timestamp, p_real, p_reactive, p_apparent, ear, battery))
counter += 1
time.sleep(1)
finally:
conn.disconnect()
| import datetime
import time
import math
import onzo.device
conn = onzo.device.Connection()
try:
conn.connect()
disp = onzo.device.Display(conn)
clamp = onzo.device.Clamp(conn)
p_reactive = None
counter = 0
print("Timestamp,P_real,P_reactive,P_apparent,kWh,Battery_Voltage")
while True:
p_real = clamp.get_power()
# reactive power only updates onces every 15s, so there is no use
# querying more often, this just wastes clamp battery
if counter % 15 == 0:
p_reactive = clamp.get_powervars()
# Only update battery once every 10mins
if counter % (60 * 10) == 0:
battery = clamp.get_batteryvolts()
p_apparent = int(math.sqrt(p_real**2 + p_reactive**2))
ear = clamp.get_cumulative_kwh()
timestamp = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
print("{},{},{},{},{},{}".format(timestamp, p_real, p_reactive, p_apparent, ear, battery))
counter += 1
time.sleep(1)
finally:
conn.disconnect() | en | 0.860017 | # reactive power only updates onces every 15s, so there is no use # querying more often, this just wastes clamp battery # Only update battery once every 10mins | 2.667443 | 3 |
reduce_std.py | okadate/seasonal | 0 | 6614787 | # coding: utf-8
# (c) 2016-02-12 <NAME>
import netCDF4
import shutil
std_tmp = '/home/okada/Data/ob500_std_i_param_v1_NL1_{0:04d}.nc'
std_main = '/home/okada/Data/ob500_std_i_param_v1_NL1.nc'
std_zeros = '/home/okada/Data/ob500_std_i_zeros.nc'
grdfile = '/home/okada/Data/ob500_grd-11_3.nc'
vnames = ['temp', 'salt', 'NO3', 'NH4', 'chlorophyll', 'phytoplankton', 'zooplankton',
'LdetritusN', 'SdetritusN', 'oxygen', 'PO4', 'LdetritusP', 'SdetritusP']
shutil.copyfile(std_zeros, std_main)
main = netCDF4.Dataset(std_main, 'a')
for i in range(12):
stdfile = std_tmp.format(i+1)
nc = netCDF4.Dataset(stdfile, 'r')
for vname in vnames:
main[vname][i] = nc[vname][:]
nc.close()
main.close()
| # coding: utf-8
# (c) 2016-02-12 <NAME>
import netCDF4
import shutil
std_tmp = '/home/okada/Data/ob500_std_i_param_v1_NL1_{0:04d}.nc'
std_main = '/home/okada/Data/ob500_std_i_param_v1_NL1.nc'
std_zeros = '/home/okada/Data/ob500_std_i_zeros.nc'
grdfile = '/home/okada/Data/ob500_grd-11_3.nc'
vnames = ['temp', 'salt', 'NO3', 'NH4', 'chlorophyll', 'phytoplankton', 'zooplankton',
'LdetritusN', 'SdetritusN', 'oxygen', 'PO4', 'LdetritusP', 'SdetritusP']
shutil.copyfile(std_zeros, std_main)
main = netCDF4.Dataset(std_main, 'a')
for i in range(12):
stdfile = std_tmp.format(i+1)
nc = netCDF4.Dataset(stdfile, 'r')
for vname in vnames:
main[vname][i] = nc[vname][:]
nc.close()
main.close()
| en | 0.741166 | # coding: utf-8 # (c) 2016-02-12 <NAME> | 2.364315 | 2 |
tests/test_create_model_inputs.py | Malachov/mydatapreprocessing | 1 | 6614788 | import numpy as np
import mypythontools
mypythontools.tests.setup_tests()
import mydatapreprocessing.create_model_inputs as mdpi
def test_make_sequences():
data = np.array(
[[1, 2, 3, 4, 5, 6, 7, 8], [9, 10, 11, 12, 13, 14, 15, 16], [17, 18, 19, 20, 21, 22, 23, 24],]
).T
X, y, x_input, _ = mdpi.make_sequences(data, n_steps_in=2, n_steps_out=3, constant=1)
X_res = np.array(
[
[1.0, 1.0, 2.0, 3.0, 4.0, 9.0, 10.0, 11.0, 12.0, 17.0, 18.0, 19.0, 20.0],
[1.0, 2.0, 3.0, 4.0, 5.0, 10.0, 11.0, 12.0, 13.0, 18.0, 19.0, 20.0, 21.0],
]
)
y_res = np.array([[5, 6, 7], [6, 7, 8]])
x_inpu_res = np.array([[1.0, 5.0, 6.0, 7.0, 8.0, 13.0, 14.0, 15.0, 16.0, 21.0, 22.0, 23.0, 24.0]])
data2 = np.array([[1, 2, 3, 4, 5, 6, 7, 8, 9, 10], [11, 12, 13, 14, 15, 16, 17, 18, 19, 20]]).T
X2, y2, x_input2, test_inputs2 = mdpi.make_sequences(
data2, n_steps_in=2, n_steps_out=1, constant=0, predicts=3, repeatit=2
)
X2_res = np.array(
np.array(
[
[1, 2, 11, 12],
[2, 3, 12, 13],
[3, 4, 13, 14],
[4, 5, 14, 15],
[5, 6, 15, 16],
[6, 7, 16, 17],
[7, 8, 17, 18],
[8, 9, 18, 19],
]
)
)
y2_res = np.array(([[3], [4], [5], [6], [7], [8], [9], [10]]))
x_input2_res = np.array(([[9, 10, 19, 20]]))
test_inputs2_res = np.array([[[5, 6, 15, 16]], [[6, 7, 16, 17]]])
assert all(
[
np.allclose(X, X_res),
np.allclose(y, y_res),
np.allclose(x_input, x_inpu_res),
np.allclose(X2, X2_res),
np.allclose(y2, y2_res),
np.allclose(x_input2, x_input2_res),
np.allclose(test_inputs2, test_inputs2_res),
]
)
if __name__ == "__main__":
pass
| import numpy as np
import mypythontools
mypythontools.tests.setup_tests()
import mydatapreprocessing.create_model_inputs as mdpi
def test_make_sequences():
data = np.array(
[[1, 2, 3, 4, 5, 6, 7, 8], [9, 10, 11, 12, 13, 14, 15, 16], [17, 18, 19, 20, 21, 22, 23, 24],]
).T
X, y, x_input, _ = mdpi.make_sequences(data, n_steps_in=2, n_steps_out=3, constant=1)
X_res = np.array(
[
[1.0, 1.0, 2.0, 3.0, 4.0, 9.0, 10.0, 11.0, 12.0, 17.0, 18.0, 19.0, 20.0],
[1.0, 2.0, 3.0, 4.0, 5.0, 10.0, 11.0, 12.0, 13.0, 18.0, 19.0, 20.0, 21.0],
]
)
y_res = np.array([[5, 6, 7], [6, 7, 8]])
x_inpu_res = np.array([[1.0, 5.0, 6.0, 7.0, 8.0, 13.0, 14.0, 15.0, 16.0, 21.0, 22.0, 23.0, 24.0]])
data2 = np.array([[1, 2, 3, 4, 5, 6, 7, 8, 9, 10], [11, 12, 13, 14, 15, 16, 17, 18, 19, 20]]).T
X2, y2, x_input2, test_inputs2 = mdpi.make_sequences(
data2, n_steps_in=2, n_steps_out=1, constant=0, predicts=3, repeatit=2
)
X2_res = np.array(
np.array(
[
[1, 2, 11, 12],
[2, 3, 12, 13],
[3, 4, 13, 14],
[4, 5, 14, 15],
[5, 6, 15, 16],
[6, 7, 16, 17],
[7, 8, 17, 18],
[8, 9, 18, 19],
]
)
)
y2_res = np.array(([[3], [4], [5], [6], [7], [8], [9], [10]]))
x_input2_res = np.array(([[9, 10, 19, 20]]))
test_inputs2_res = np.array([[[5, 6, 15, 16]], [[6, 7, 16, 17]]])
assert all(
[
np.allclose(X, X_res),
np.allclose(y, y_res),
np.allclose(x_input, x_inpu_res),
np.allclose(X2, X2_res),
np.allclose(y2, y2_res),
np.allclose(x_input2, x_input2_res),
np.allclose(test_inputs2, test_inputs2_res),
]
)
if __name__ == "__main__":
pass
| none | 1 | 2.296879 | 2 | |
src/test/python/tranquilitybase/gcpdac/unit/test_utils.py | tranquilitybase-io/tb-gcp-dac | 2 | 6614789 | import unittest
from unittest import TestCase
from src.main.python.tranquilitybase.lib.common.utils import labellize
from src.main.python.tranquilitybase.lib.common.utils import folderize
from src.main.python.tranquilitybase.lib.common.utils import sanitize
class Utils_Test(TestCase):
def test_labellize(self):
# google label rules here - https://cloud.google.com/compute/docs/labeling-resources
self.assertEqual("abc", labellize("abc"))
self.assertEqual("ab-c", labellize("ab c"))
self.assertEqual("ab-c", labellize("ab&c"))
self.assertEqual("ab_c", labellize("ab_c"))
self.assertEqual("ab-c", labellize("ab-c"))
self.assertEqual("abc", labellize("ABC"))
self.assertEqual("123", labellize("123"))
self.assertEqual("-123", labellize("-123"))
self.assertEqual("abc-", labellize("abc-"))
self.assertEqual("_123", labellize("_123"))
self.assertEqual("èÿā", labellize("èÿā"))
self.assertEqual("èÿāć", labellize("èÿāĆ"))
self.assertEqual("abcdefghijklimnopqrstuvwxyz-0123456789_abcdefghijklimnopqrstuvw",
labellize("abcdefghijklimnopqrstuvwxyz-0123456789_abcdefghijklimnopqrstuvwxyz"))
def test_sanitize(self):
self.assertEqual("abc", sanitize("abc"))
self.assertEqual("ab-c", sanitize("ab c"))
self.assertEqual("ab-c", sanitize("ab&c"))
self.assertEqual("ab-c", sanitize("ab_c"))
self.assertEqual("ab-c", sanitize("ab-c"))
self.assertEqual("abc", sanitize("ABC"))
self.assertEqual("a123a", sanitize("123"))
self.assertEqual("a-123a", sanitize("-123"))
self.assertEqual("abc", sanitize("-abc"))
self.assertEqual("a-123a", sanitize("_123"))
self.assertEqual("abcdefghijklimnopqrstuvwxyz-0123456789-abcdefghijklimnopqrstuvw",
sanitize("abcdefghijklimnopqrstuvwxyz-0123456789-abcdefghijklimnopqrstuvwxyz"))
def test_folderize(self):
self.assertEqual("abc", folderize("abc"))
self.assertEqual("ab-c", folderize("ab c"))
self.assertEqual("ab-c", folderize("ab&c"))
self.assertEqual("ab_c", folderize("ab_c"))
self.assertEqual("ab-c", folderize("ab-c"))
self.assertEqual("ABC", folderize("ABC"))
self.assertEqual("123", folderize("123"))
self.assertEqual("123", folderize("-123"))
self.assertEqual("abc", folderize("-abc"))
self.assertEqual("123", folderize("_123"))
self.assertEqual("abcDEFghijklmnopqrstuvwxyz-012",
folderize("abcDEFghijklmnopqrstuvwxyz-0123456789-abcdefghijklimnopqrstuvwxyz"))
if __name__ == '__main__':
unittest.main()
| import unittest
from unittest import TestCase
from src.main.python.tranquilitybase.lib.common.utils import labellize
from src.main.python.tranquilitybase.lib.common.utils import folderize
from src.main.python.tranquilitybase.lib.common.utils import sanitize
class Utils_Test(TestCase):
def test_labellize(self):
# google label rules here - https://cloud.google.com/compute/docs/labeling-resources
self.assertEqual("abc", labellize("abc"))
self.assertEqual("ab-c", labellize("ab c"))
self.assertEqual("ab-c", labellize("ab&c"))
self.assertEqual("ab_c", labellize("ab_c"))
self.assertEqual("ab-c", labellize("ab-c"))
self.assertEqual("abc", labellize("ABC"))
self.assertEqual("123", labellize("123"))
self.assertEqual("-123", labellize("-123"))
self.assertEqual("abc-", labellize("abc-"))
self.assertEqual("_123", labellize("_123"))
self.assertEqual("èÿā", labellize("èÿā"))
self.assertEqual("èÿāć", labellize("èÿāĆ"))
self.assertEqual("abcdefghijklimnopqrstuvwxyz-0123456789_abcdefghijklimnopqrstuvw",
labellize("abcdefghijklimnopqrstuvwxyz-0123456789_abcdefghijklimnopqrstuvwxyz"))
def test_sanitize(self):
self.assertEqual("abc", sanitize("abc"))
self.assertEqual("ab-c", sanitize("ab c"))
self.assertEqual("ab-c", sanitize("ab&c"))
self.assertEqual("ab-c", sanitize("ab_c"))
self.assertEqual("ab-c", sanitize("ab-c"))
self.assertEqual("abc", sanitize("ABC"))
self.assertEqual("a123a", sanitize("123"))
self.assertEqual("a-123a", sanitize("-123"))
self.assertEqual("abc", sanitize("-abc"))
self.assertEqual("a-123a", sanitize("_123"))
self.assertEqual("abcdefghijklimnopqrstuvwxyz-0123456789-abcdefghijklimnopqrstuvw",
sanitize("abcdefghijklimnopqrstuvwxyz-0123456789-abcdefghijklimnopqrstuvwxyz"))
def test_folderize(self):
self.assertEqual("abc", folderize("abc"))
self.assertEqual("ab-c", folderize("ab c"))
self.assertEqual("ab-c", folderize("ab&c"))
self.assertEqual("ab_c", folderize("ab_c"))
self.assertEqual("ab-c", folderize("ab-c"))
self.assertEqual("ABC", folderize("ABC"))
self.assertEqual("123", folderize("123"))
self.assertEqual("123", folderize("-123"))
self.assertEqual("abc", folderize("-abc"))
self.assertEqual("123", folderize("_123"))
self.assertEqual("abcDEFghijklmnopqrstuvwxyz-012",
folderize("abcDEFghijklmnopqrstuvwxyz-0123456789-abcdefghijklimnopqrstuvwxyz"))
if __name__ == '__main__':
unittest.main()
| en | 0.451913 | # google label rules here - https://cloud.google.com/compute/docs/labeling-resources | 2.59739 | 3 |
utils/dataset/scrape_transet_nodes_from_osm.py | OpenGridMap/power-grid-detection | 0 | 6614790 | from __future__ import print_function
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir, os.pardir))
import config
from utils.parsers.transnet_parser import TransnetParser
from utils.scrapers.osm_nodes_scraper import OsmNodesScraper
def scrape_nodes():
region = config.config_params['loc']
min_voltage = 380000
max_voltage = 380000
print('Parsing transnet data...')
transnet_parser = TransnetParser()
print('Filtering by region : %s' % region)
transnet_parser.filter_by_regions(regions='config')
print('Filtering by voltage,\n min voltage : %d \n max voltage : %d' % (min_voltage, max_voltage))
transnet_parser.filter_by_min_max_voltage(min_voltage=min_voltage, max_voltage=max_voltage)
nodes = transnet_parser.nodes
print('Total nodes : %d' % len(nodes))
print('done..\n')
print('Scraping osm data...')
osm_scraper = OsmNodesScraper(nodes, region)
n = osm_scraper.scrape()
print('Scraped %d nodes..' % n)
print('done..')
if __name__ == '__main__':
scrape_nodes()
| from __future__ import print_function
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir, os.pardir))
import config
from utils.parsers.transnet_parser import TransnetParser
from utils.scrapers.osm_nodes_scraper import OsmNodesScraper
def scrape_nodes():
region = config.config_params['loc']
min_voltage = 380000
max_voltage = 380000
print('Parsing transnet data...')
transnet_parser = TransnetParser()
print('Filtering by region : %s' % region)
transnet_parser.filter_by_regions(regions='config')
print('Filtering by voltage,\n min voltage : %d \n max voltage : %d' % (min_voltage, max_voltage))
transnet_parser.filter_by_min_max_voltage(min_voltage=min_voltage, max_voltage=max_voltage)
nodes = transnet_parser.nodes
print('Total nodes : %d' % len(nodes))
print('done..\n')
print('Scraping osm data...')
osm_scraper = OsmNodesScraper(nodes, region)
n = osm_scraper.scrape()
print('Scraped %d nodes..' % n)
print('done..')
if __name__ == '__main__':
scrape_nodes()
| none | 1 | 2.466826 | 2 | |
SSR/monopoly/consumers/join.py | gaoyi-ai/monopoly | 0 | 6614791 | import logging
from channels.generic.websocket import AsyncJsonWebsocketConsumer
from monopoly.consumers.message import build_join_failed_msg, build_join_reply_msg, build_start_msg
from monopoly.consumers.room import Room, RoomStatus
from monopoly.consumers.util import rooms, games, change_handlers, get_user
from monopoly.core.game import Game
from monopoly.handlers.notice_handler import NoticeHandler
logger = logging.getLogger(__name__)
async def add_player(room_name, player_name):
if room_name not in rooms:
new_room = Room(room_name)
new_room.host = player_name
new_room.join(player_name)
rooms[room_name] = new_room
else:
rooms[room_name].join(player_name)
if rooms[room_name].status == RoomStatus.FULL:
return False
return True
def handle_start(hostname):
if hostname not in games:
room: Room = rooms[hostname]
room.status = RoomStatus.GAMING
player_num = len(room)
game = Game(player_num)
games[hostname] = game
change_handler = NoticeHandler(game, hostname)
game.add_game_change_listener(change_handler)
change_handlers[hostname] = change_handler
return build_start_msg()
class QueryAuthMiddleware:
"""
Custom middleware (insecure) that takes user IDs from the query string.
"""
def __init__(self, app):
# Store the ASGI application we were passed
self.app = app
async def __call__(self, scope, receive, send):
if scope['user'].is_anonymous:
# Look up user from query string (you should also do things like
# checking if it is a valid user ID, or if scope["user"] is already
# populated).
username = scope["query_string"].decode('utf-8').split("=")[-1]
scope['user'] = await get_user(username)
return await self.app(scope, receive, send)
class JoinConsumer(AsyncJsonWebsocketConsumer):
async def receive_json(self, content, **kwargs):
player = self.scope['user']
action = content['action']
logger.info(f"{player}: {action}")
if action == 'join':
player_name = player.username
join_type = content['type']
if join_type == 1:
player_name = "AI"
if not await add_player(self.room_name, player_name):
return await self.send_json(build_join_failed_msg())
else:
msg = await build_join_reply_msg(self.room_name)
elif action == 'start':
msg = handle_start(self.room_name)
else: # action == 'refresh':
msg = await build_join_reply_msg(self.room_name)
# Send message to room group
await self.channel_layer.group_send(
self.room_group_name,
{
'type': 'game_message',
'msg': msg
}
)
# Receive message from room group
async def game_message(self, event):
msg = event['msg']
# Send message to WebSocket
await self.send_json(msg)
async def connect(self):
self.room_name = self.scope['url_route']['kwargs']['room_name']
self.room_group_name = 'monopoly_%s' % self.room_name
# Join room group
await self.channel_layer.group_add(
self.room_group_name,
self.channel_name
)
await self.accept()
async def disconnect(self, close_code):
user = self.scope['user']
player = user.username
room_name = self.room_name
room: Room = rooms.get(room_name)
if room is not None and room.status != RoomStatus.GAMING:
room.players.discard(player)
if room.host == player:
rooms.pop(player)
msg = build_join_failed_msg(status=1)
# Send message to room group
await self.channel_layer.group_send(
self.room_group_name,
{
'type': 'game_message',
'msg': msg
}
)
# Leave room group
await self.channel_layer.group_discard(
self.room_group_name,
self.channel_name
)
| import logging
from channels.generic.websocket import AsyncJsonWebsocketConsumer
from monopoly.consumers.message import build_join_failed_msg, build_join_reply_msg, build_start_msg
from monopoly.consumers.room import Room, RoomStatus
from monopoly.consumers.util import rooms, games, change_handlers, get_user
from monopoly.core.game import Game
from monopoly.handlers.notice_handler import NoticeHandler
logger = logging.getLogger(__name__)
async def add_player(room_name, player_name):
if room_name not in rooms:
new_room = Room(room_name)
new_room.host = player_name
new_room.join(player_name)
rooms[room_name] = new_room
else:
rooms[room_name].join(player_name)
if rooms[room_name].status == RoomStatus.FULL:
return False
return True
def handle_start(hostname):
if hostname not in games:
room: Room = rooms[hostname]
room.status = RoomStatus.GAMING
player_num = len(room)
game = Game(player_num)
games[hostname] = game
change_handler = NoticeHandler(game, hostname)
game.add_game_change_listener(change_handler)
change_handlers[hostname] = change_handler
return build_start_msg()
class QueryAuthMiddleware:
"""
Custom middleware (insecure) that takes user IDs from the query string.
"""
def __init__(self, app):
# Store the ASGI application we were passed
self.app = app
async def __call__(self, scope, receive, send):
if scope['user'].is_anonymous:
# Look up user from query string (you should also do things like
# checking if it is a valid user ID, or if scope["user"] is already
# populated).
username = scope["query_string"].decode('utf-8').split("=")[-1]
scope['user'] = await get_user(username)
return await self.app(scope, receive, send)
class JoinConsumer(AsyncJsonWebsocketConsumer):
async def receive_json(self, content, **kwargs):
player = self.scope['user']
action = content['action']
logger.info(f"{player}: {action}")
if action == 'join':
player_name = player.username
join_type = content['type']
if join_type == 1:
player_name = "AI"
if not await add_player(self.room_name, player_name):
return await self.send_json(build_join_failed_msg())
else:
msg = await build_join_reply_msg(self.room_name)
elif action == 'start':
msg = handle_start(self.room_name)
else: # action == 'refresh':
msg = await build_join_reply_msg(self.room_name)
# Send message to room group
await self.channel_layer.group_send(
self.room_group_name,
{
'type': 'game_message',
'msg': msg
}
)
# Receive message from room group
async def game_message(self, event):
msg = event['msg']
# Send message to WebSocket
await self.send_json(msg)
async def connect(self):
self.room_name = self.scope['url_route']['kwargs']['room_name']
self.room_group_name = 'monopoly_%s' % self.room_name
# Join room group
await self.channel_layer.group_add(
self.room_group_name,
self.channel_name
)
await self.accept()
async def disconnect(self, close_code):
user = self.scope['user']
player = user.username
room_name = self.room_name
room: Room = rooms.get(room_name)
if room is not None and room.status != RoomStatus.GAMING:
room.players.discard(player)
if room.host == player:
rooms.pop(player)
msg = build_join_failed_msg(status=1)
# Send message to room group
await self.channel_layer.group_send(
self.room_group_name,
{
'type': 'game_message',
'msg': msg
}
)
# Leave room group
await self.channel_layer.group_discard(
self.room_group_name,
self.channel_name
)
| en | 0.842007 | Custom middleware (insecure) that takes user IDs from the query string. # Store the ASGI application we were passed # Look up user from query string (you should also do things like # checking if it is a valid user ID, or if scope["user"] is already # populated). # action == 'refresh': # Send message to room group # Receive message from room group # Send message to WebSocket # Join room group # Send message to room group # Leave room group | 2.293476 | 2 |
projecteuler/problem_011.py | micahpp/projecteuler | 0 | 6614792 | <gh_stars>0
from projecteuler import util
from functools import reduce
from operator import mul
def solution():
"""
In the 20×20 grid below, four numbers along a diagonal line have been marked in red.
The product of these numbers is 26 × 63 × 78 × 14 = 1788696.
What is the greatest product of four adjacent numbers in the same direction
(up, down, left, right, or diagonally) in the 20×20 grid?
"""
ans = 0
# create grid
grid = [[int(x) for x in line.split()] for line in
open("../data/problem_011_data.txt")]
for i in range(len(grid)):
for j in range(len(grid)):
# horizontal
tmp = reduce(mul, grid[i][j:j + 4])
if tmp > ans:
ans = tmp
# vertical
v_bound = min(i + 4, len(grid))
tmp = []
for k in range(i, v_bound):
tmp.append(grid[k][j])
tmp = reduce(mul, tmp)
if tmp > ans:
ans = tmp
# down & right
tmp = []
h_bound = min(j + 4, len(grid))
for k, l in zip(range(i, v_bound), range(j, h_bound)):
tmp.append(grid[k][l])
tmp = reduce(mul, tmp)
if tmp > ans:
ans = tmp
# down & left
tmp = []
h_bound = max(-1, j - 4)
for k, l in zip(range(i, v_bound), range(j, h_bound, -1)):
tmp.append(grid[k][l])
tmp = reduce(mul, tmp)
if tmp > ans:
ans = tmp
return ans
if __name__ == '__main__':
assert str(solution()) == util.get_answer(11)
| from projecteuler import util
from functools import reduce
from operator import mul
def solution():
"""
In the 20×20 grid below, four numbers along a diagonal line have been marked in red.
The product of these numbers is 26 × 63 × 78 × 14 = 1788696.
What is the greatest product of four adjacent numbers in the same direction
(up, down, left, right, or diagonally) in the 20×20 grid?
"""
ans = 0
# create grid
grid = [[int(x) for x in line.split()] for line in
open("../data/problem_011_data.txt")]
for i in range(len(grid)):
for j in range(len(grid)):
# horizontal
tmp = reduce(mul, grid[i][j:j + 4])
if tmp > ans:
ans = tmp
# vertical
v_bound = min(i + 4, len(grid))
tmp = []
for k in range(i, v_bound):
tmp.append(grid[k][j])
tmp = reduce(mul, tmp)
if tmp > ans:
ans = tmp
# down & right
tmp = []
h_bound = min(j + 4, len(grid))
for k, l in zip(range(i, v_bound), range(j, h_bound)):
tmp.append(grid[k][l])
tmp = reduce(mul, tmp)
if tmp > ans:
ans = tmp
# down & left
tmp = []
h_bound = max(-1, j - 4)
for k, l in zip(range(i, v_bound), range(j, h_bound, -1)):
tmp.append(grid[k][l])
tmp = reduce(mul, tmp)
if tmp > ans:
ans = tmp
return ans
if __name__ == '__main__':
assert str(solution()) == util.get_answer(11) | en | 0.89793 | In the 20×20 grid below, four numbers along a diagonal line have been marked in red. The product of these numbers is 26 × 63 × 78 × 14 = 1788696. What is the greatest product of four adjacent numbers in the same direction (up, down, left, right, or diagonally) in the 20×20 grid? # create grid # horizontal # vertical # down & right # down & left | 3.155384 | 3 |
src/rest_app/storage/sqlite.py | dzavodnikov/restful-web-service | 0 | 6614793 | from datetime import date
from typing import List, Any, Optional
from pydantic import ValidationError
from rest_app.domain import Book, BookUpdate, BookNotFoundException
import sqlite3
class SQLiteBookStorage:
"""Storage for books that save data in SQLite 3."""
def __init__(self, storage_name: str):
self.storage_name = storage_name
# Create database if it is not exists.
from pathlib import Path
path = Path(self.storage_name)
if not path.parent.exists():
path.parent.mkdir() # Create dirs.
path.touch() # Create file.
# Create table if it was not exists.
with sqlite3.connect(self.storage_name) as connection:
cursor = connection.cursor()
create_books_table = """
CREATE TABLE IF NOT EXISTS books (
id INTEGER PRIMARY KEY AUTOINCREMENT,
author TEXT,
title TEXT,
-- SQLite does not have a storage class set aside for storing dates and/or times.
-- See: https://sqlite.org/datatype3.html
published_date TEXT
);
"""
cursor.execute(create_books_table)
connection.commit()
@staticmethod
def get_book_columns(cursor) -> List[str]:
return [column[0] for column in cursor.description]
@staticmethod
def read_book(book_columns: List[str], book_record: List[Any]):
return Book(**dict(zip(book_columns, book_record)))
@staticmethod
def string_compare_expr(key, value):
if value is None:
return None
if "?" in value or "*" in value:
value_expr = value.replace("?", "_").replace("*", "%")
return f'{key} LIKE "{value_expr}"'
else:
return f'{key} = "{value}"'
@staticmethod
def date_compare_expr(key, comparator, value):
if value is None:
return None
time_format = "%Y-%m-%d"
return f'strftime("{time_format}", {key}) {comparator} strftime("{time_format}", "{value}")'
def list(self,
author: Optional[str] = None,
title: Optional[str] = None,
published_date_from: Optional[date] = None,
published_date_to: Optional[date] = None) -> List[Book]:
"""Provide list of saved books."""
filter_conditions = [SQLiteBookStorage.string_compare_expr("author", author),
SQLiteBookStorage.string_compare_expr("title", title),
SQLiteBookStorage.date_compare_expr("published_date", ">", published_date_from),
SQLiteBookStorage.date_compare_expr("published_date", "<", published_date_to)]
condition = " AND ".join([v for v in filter_conditions if v])
where = "" if condition is "" else f"WHERE {condition}"
with sqlite3.connect(self.storage_name) as connection:
cursor = connection.cursor()
cursor.execute(f"SELECT * from books {where};")
book_record_list = cursor.fetchall()
book_columns = SQLiteBookStorage.get_book_columns(cursor)
result = []
for book_record in book_record_list:
try:
book = SQLiteBookStorage.read_book(book_columns, book_record)
result.append(book)
except ValidationError:
self.remove(book_record[0])
return result
def find(self, book_id: int) -> Book:
"""Find book from the storage. Can be used for modification or delete requests."""
with sqlite3.connect(self.storage_name) as connection:
cursor = connection.cursor()
cursor.execute(f"SELECT * from books WHERE id = {book_id};")
book_record = cursor.fetchone()
if book_record is None:
raise BookNotFoundException(book_id)
book_columns = SQLiteBookStorage.get_book_columns(cursor)
return SQLiteBookStorage.read_book(book_columns, book_record)
def create(self, book: BookUpdate) -> Book:
"""Create book in a storage. Populate unique identifier for future requests."""
with sqlite3.connect(self.storage_name) as connection:
cursor = connection.cursor()
book_dict = book.dict()
columns = ', '.join(book_dict.keys())
values = ', '.join([f'"{str(val)}"' for val in book_dict.values()])
create_book = f"INSERT INTO books({columns}) VALUES ({values});"
print(create_book)
cursor.execute(create_book)
new_id = cursor.lastrowid
connection.commit()
return self.find(new_id)
def remove(self, book_id: str) -> None:
"""Remove book from the storage."""
with sqlite3.connect(self.storage_name) as connection:
cursor = connection.cursor()
cursor.execute(f"DELETE FROM books WHERE id = {book_id};")
connection.commit()
def persist(self, book: Book) -> None:
"""Update book in a storage."""
with sqlite3.connect(self.storage_name) as connection:
cursor = connection.cursor()
update_items = []
for item in book.dict().items():
if item[0] != 'id' and item[1]:
update_items.append(item)
string_items = [f'"{item[0]}" = "{item[1]}"' for item in update_items]
update_str = ", ".join(string_items)
update_book = f"UPDATE books SET {update_str} WHERE id = {book.id};"
cursor.execute(update_book)
connection.commit()
| from datetime import date
from typing import List, Any, Optional
from pydantic import ValidationError
from rest_app.domain import Book, BookUpdate, BookNotFoundException
import sqlite3
class SQLiteBookStorage:
"""Storage for books that save data in SQLite 3."""
def __init__(self, storage_name: str):
self.storage_name = storage_name
# Create database if it is not exists.
from pathlib import Path
path = Path(self.storage_name)
if not path.parent.exists():
path.parent.mkdir() # Create dirs.
path.touch() # Create file.
# Create table if it was not exists.
with sqlite3.connect(self.storage_name) as connection:
cursor = connection.cursor()
create_books_table = """
CREATE TABLE IF NOT EXISTS books (
id INTEGER PRIMARY KEY AUTOINCREMENT,
author TEXT,
title TEXT,
-- SQLite does not have a storage class set aside for storing dates and/or times.
-- See: https://sqlite.org/datatype3.html
published_date TEXT
);
"""
cursor.execute(create_books_table)
connection.commit()
@staticmethod
def get_book_columns(cursor) -> List[str]:
return [column[0] for column in cursor.description]
@staticmethod
def read_book(book_columns: List[str], book_record: List[Any]):
return Book(**dict(zip(book_columns, book_record)))
@staticmethod
def string_compare_expr(key, value):
if value is None:
return None
if "?" in value or "*" in value:
value_expr = value.replace("?", "_").replace("*", "%")
return f'{key} LIKE "{value_expr}"'
else:
return f'{key} = "{value}"'
@staticmethod
def date_compare_expr(key, comparator, value):
if value is None:
return None
time_format = "%Y-%m-%d"
return f'strftime("{time_format}", {key}) {comparator} strftime("{time_format}", "{value}")'
def list(self,
author: Optional[str] = None,
title: Optional[str] = None,
published_date_from: Optional[date] = None,
published_date_to: Optional[date] = None) -> List[Book]:
"""Provide list of saved books."""
filter_conditions = [SQLiteBookStorage.string_compare_expr("author", author),
SQLiteBookStorage.string_compare_expr("title", title),
SQLiteBookStorage.date_compare_expr("published_date", ">", published_date_from),
SQLiteBookStorage.date_compare_expr("published_date", "<", published_date_to)]
condition = " AND ".join([v for v in filter_conditions if v])
where = "" if condition is "" else f"WHERE {condition}"
with sqlite3.connect(self.storage_name) as connection:
cursor = connection.cursor()
cursor.execute(f"SELECT * from books {where};")
book_record_list = cursor.fetchall()
book_columns = SQLiteBookStorage.get_book_columns(cursor)
result = []
for book_record in book_record_list:
try:
book = SQLiteBookStorage.read_book(book_columns, book_record)
result.append(book)
except ValidationError:
self.remove(book_record[0])
return result
def find(self, book_id: int) -> Book:
"""Find book from the storage. Can be used for modification or delete requests."""
with sqlite3.connect(self.storage_name) as connection:
cursor = connection.cursor()
cursor.execute(f"SELECT * from books WHERE id = {book_id};")
book_record = cursor.fetchone()
if book_record is None:
raise BookNotFoundException(book_id)
book_columns = SQLiteBookStorage.get_book_columns(cursor)
return SQLiteBookStorage.read_book(book_columns, book_record)
def create(self, book: BookUpdate) -> Book:
"""Create book in a storage. Populate unique identifier for future requests."""
with sqlite3.connect(self.storage_name) as connection:
cursor = connection.cursor()
book_dict = book.dict()
columns = ', '.join(book_dict.keys())
values = ', '.join([f'"{str(val)}"' for val in book_dict.values()])
create_book = f"INSERT INTO books({columns}) VALUES ({values});"
print(create_book)
cursor.execute(create_book)
new_id = cursor.lastrowid
connection.commit()
return self.find(new_id)
def remove(self, book_id: str) -> None:
"""Remove book from the storage."""
with sqlite3.connect(self.storage_name) as connection:
cursor = connection.cursor()
cursor.execute(f"DELETE FROM books WHERE id = {book_id};")
connection.commit()
def persist(self, book: Book) -> None:
"""Update book in a storage."""
with sqlite3.connect(self.storage_name) as connection:
cursor = connection.cursor()
update_items = []
for item in book.dict().items():
if item[0] != 'id' and item[1]:
update_items.append(item)
string_items = [f'"{item[0]}" = "{item[1]}"' for item in update_items]
update_str = ", ".join(string_items)
update_book = f"UPDATE books SET {update_str} WHERE id = {book.id};"
cursor.execute(update_book)
connection.commit()
| en | 0.795555 | Storage for books that save data in SQLite 3. # Create database if it is not exists. # Create dirs. # Create file. # Create table if it was not exists. CREATE TABLE IF NOT EXISTS books ( id INTEGER PRIMARY KEY AUTOINCREMENT, author TEXT, title TEXT, -- SQLite does not have a storage class set aside for storing dates and/or times. -- See: https://sqlite.org/datatype3.html published_date TEXT ); Provide list of saved books. Find book from the storage. Can be used for modification or delete requests. Create book in a storage. Populate unique identifier for future requests. Remove book from the storage. Update book in a storage. | 3.435684 | 3 |
tests/test_utils.py | Brown-University-Library/vivo-data-management | 4 | 6614794 | <gh_stars>1-10
import pytest
def test_scrub_doi():
from vdm.utils import scrub_doi
d = 'http://dx.doi.org/10.1234'
scrubbed = scrub_doi(d)
assert(scrubbed == '10.1234')
d = '10.123 4'
assert(
scrub_doi(d) == '10.1234'
)
d = '<p>10.1234</p>'
assert(
scrub_doi(d) == '10.1234'
)
d = '<a href="http://dx.doi.org/10.1234">10.1234</a>'
assert(
scrub_doi(d) == '10.1234'
)
d = 'DOI:10.1234'
assert (
scrub_doi(d) == '10.1234'
)
d = 'doi:10.1234'
assert (
scrub_doi(d) == '10.1234'
)
def test_pull():
from vdm.utils import pull
d = {}
d['mykey'] = 'Value'
assert(
pull(d, 'mykey') == 'Value'
)
d['key2'] = ''
assert(
pull(d, 'key2') is None
)
d['key3'] = u''
assert(
pull(d, 'key3') is None
)
def test_get_env():
from vdm.utils import get_env
import os
os.environ['TMP'] = 'pytest'
assert(
get_env('TMP') == 'pytest'
)
os.environ.pop('TMP')
with pytest.raises(Exception):
get_env('TMP')
def test_remove_html():
from vdm.utils import remove_html
t = "<h1>hello</h1>"
assert(remove_html(t) == 'hello')
t = "<div><h1>hello</h1><span class=\"blah\">world</span></div>"
assert(remove_html(t) == 'helloworld')
def test_user_agent():
"""
Set user agent.
"""
from vdm.utils import get_user_agent
import os
import requests
agent = "Sample agent"
os.environ['VDM_USER_AGENT'] = agent
h = get_user_agent()
resp = requests.get('http://httpbin.org/get', headers=h)
assert(resp.request.headers.get('User-Agent') == agent)
del os.environ['VDM_USER_AGENT']
def test_user_agent_not_set():
"""
No user agent set should trigger a warning.
"""
from vdm.utils import get_user_agent
import os
import requests
#This will cause warnings to raise an error
try:
del os.environ['VDM_USER_AGENT']
except KeyError:
pass
headers = get_user_agent()
assert headers == {}
resp = requests.get('http://httpbin.org/get', headers=headers)
#By default the user agent will contain python.
assert(resp.request.headers.get('User-Agent').find('python') > -1)
def test_scrub_pmid():
from vdm.utils import scrub_pmid
assert scrub_pmid(u'PMC2727248') is None
p = u'18633329'
assert scrub_pmid(p) == p
assert(scrub_pmid(u'000') is None)
#7 digit pmid ids
assert(
scrub_pmid(u'8013034') == u'8013034'
)
assert(
scrub_pmid(u'9059992') == u'9059992'
)
| import pytest
def test_scrub_doi():
from vdm.utils import scrub_doi
d = 'http://dx.doi.org/10.1234'
scrubbed = scrub_doi(d)
assert(scrubbed == '10.1234')
d = '10.123 4'
assert(
scrub_doi(d) == '10.1234'
)
d = '<p>10.1234</p>'
assert(
scrub_doi(d) == '10.1234'
)
d = '<a href="http://dx.doi.org/10.1234">10.1234</a>'
assert(
scrub_doi(d) == '10.1234'
)
d = 'DOI:10.1234'
assert (
scrub_doi(d) == '10.1234'
)
d = 'doi:10.1234'
assert (
scrub_doi(d) == '10.1234'
)
def test_pull():
from vdm.utils import pull
d = {}
d['mykey'] = 'Value'
assert(
pull(d, 'mykey') == 'Value'
)
d['key2'] = ''
assert(
pull(d, 'key2') is None
)
d['key3'] = u''
assert(
pull(d, 'key3') is None
)
def test_get_env():
from vdm.utils import get_env
import os
os.environ['TMP'] = 'pytest'
assert(
get_env('TMP') == 'pytest'
)
os.environ.pop('TMP')
with pytest.raises(Exception):
get_env('TMP')
def test_remove_html():
from vdm.utils import remove_html
t = "<h1>hello</h1>"
assert(remove_html(t) == 'hello')
t = "<div><h1>hello</h1><span class=\"blah\">world</span></div>"
assert(remove_html(t) == 'helloworld')
def test_user_agent():
"""
Set user agent.
"""
from vdm.utils import get_user_agent
import os
import requests
agent = "Sample agent"
os.environ['VDM_USER_AGENT'] = agent
h = get_user_agent()
resp = requests.get('http://httpbin.org/get', headers=h)
assert(resp.request.headers.get('User-Agent') == agent)
del os.environ['VDM_USER_AGENT']
def test_user_agent_not_set():
"""
No user agent set should trigger a warning.
"""
from vdm.utils import get_user_agent
import os
import requests
#This will cause warnings to raise an error
try:
del os.environ['VDM_USER_AGENT']
except KeyError:
pass
headers = get_user_agent()
assert headers == {}
resp = requests.get('http://httpbin.org/get', headers=headers)
#By default the user agent will contain python.
assert(resp.request.headers.get('User-Agent').find('python') > -1)
def test_scrub_pmid():
from vdm.utils import scrub_pmid
assert scrub_pmid(u'PMC2727248') is None
p = u'18633329'
assert scrub_pmid(p) == p
assert(scrub_pmid(u'000') is None)
#7 digit pmid ids
assert(
scrub_pmid(u'8013034') == u'8013034'
)
assert(
scrub_pmid(u'9059992') == u'9059992'
) | en | 0.68539 | Set user agent. No user agent set should trigger a warning. #This will cause warnings to raise an error #By default the user agent will contain python. #7 digit pmid ids | 2.235869 | 2 |
src/model/history_prices.py | louces85/Yfinance | 1 | 6614795 | <filename>src/model/history_prices.py
"""
-----------------------------------------------------------------------------
@copyright 2021 <NAME>
@doc Analyze B3 stocks List
@author <NAME> <<EMAIL>>
@yfinance 1.0
-----------------------------------------------------------------------------
"""
import yfinance as yf
import warnings
from pandas.core.common import SettingWithCopyWarning
import sys
import os
from prettytable import PrettyTable
from tqdm import tqdm
PACKAGE_PARENT = '..'
SCRIPT_DIR = os.path.dirname(os.path.realpath(os.path.join(os.getcwd(), os.path.expanduser(__file__))))
sys.path.append(os.path.normpath(os.path.join(SCRIPT_DIR, PACKAGE_PARENT)))
from jdbc.connection_factory import Connection_Factory
warnings.simplefilter(action="ignore", category=SettingWithCopyWarning)
def get_all_stocks():
conn = Connection_Factory().connection()
cur = conn.cursor()
cur.execute("select * from stock;")
list = cur.fetchall()
cur.close()
conn.commit()
conn.close
list_stocks = []
for stock in list:
list_stocks.append(stock[1])
return list_stocks
def uptate_history_stock(price_min, price_max, net_income, ticker):
conn = Connection_Factory().connection()
cur = conn.cursor()
cur.execute("update history set price_min={}, price_max={}, net_income={}, date_update=CURRENT_DATE where ticker like '{}';".format(price_min, price_max,net_income, ticker))
cur.close()
conn.commit()
conn.close
get_all_stocks()
myTable = PrettyTable(["Ticker", "pMax", "pMin", "Net Income"])
myTable.align["Ticker"] = "l"
for stock in tqdm(get_all_stocks()):
df_six_month = yf.download(stock + '.SA', period='6mo', progress=False)
df_prices = df_six_month[['Adj Close']]
df_prices.dropna(subset = ['Adj Close'], inplace=True) #remove values NaN
cols_as_np_v = df_prices[df_prices.columns[0:]].to_numpy()
flag = True
try:
msft = yf.Ticker(stock + '.SA')
df = msft.financials
row = df.loc['Net Income', :]
list_incomes = row.tolist()
for i in list_incomes:
if i < 0:
flag = False
break
except Exception as e:
print(e)
flag = True
try:
highest_price_in_the_last_six_months = round(cols_as_np_v.max(),1)
lowest_price_in_the_last_six_months = round(cols_as_np_v.min(),1)
except Exception as e:
highest_price_in_the_last_six_months = 10000.00
lowest_price_in_the_last_six_months = 10000.00
myTable.add_row([stock, str(highest_price_in_the_last_six_months), str(lowest_price_in_the_last_six_months),str(flag)])
if(len(df_six_month) > 1):
uptate_history_stock(lowest_price_in_the_last_six_months, highest_price_in_the_last_six_months, flag , stock)
else:
uptate_history_stock(lowest_price_in_the_last_six_months, highest_price_in_the_last_six_months, flag, stock)
print(myTable) | <filename>src/model/history_prices.py
"""
-----------------------------------------------------------------------------
@copyright 2021 <NAME>
@doc Analyze B3 stocks List
@author <NAME> <<EMAIL>>
@yfinance 1.0
-----------------------------------------------------------------------------
"""
import yfinance as yf
import warnings
from pandas.core.common import SettingWithCopyWarning
import sys
import os
from prettytable import PrettyTable
from tqdm import tqdm
PACKAGE_PARENT = '..'
SCRIPT_DIR = os.path.dirname(os.path.realpath(os.path.join(os.getcwd(), os.path.expanduser(__file__))))
sys.path.append(os.path.normpath(os.path.join(SCRIPT_DIR, PACKAGE_PARENT)))
from jdbc.connection_factory import Connection_Factory
warnings.simplefilter(action="ignore", category=SettingWithCopyWarning)
def get_all_stocks():
conn = Connection_Factory().connection()
cur = conn.cursor()
cur.execute("select * from stock;")
list = cur.fetchall()
cur.close()
conn.commit()
conn.close
list_stocks = []
for stock in list:
list_stocks.append(stock[1])
return list_stocks
def uptate_history_stock(price_min, price_max, net_income, ticker):
conn = Connection_Factory().connection()
cur = conn.cursor()
cur.execute("update history set price_min={}, price_max={}, net_income={}, date_update=CURRENT_DATE where ticker like '{}';".format(price_min, price_max,net_income, ticker))
cur.close()
conn.commit()
conn.close
get_all_stocks()
myTable = PrettyTable(["Ticker", "pMax", "pMin", "Net Income"])
myTable.align["Ticker"] = "l"
for stock in tqdm(get_all_stocks()):
df_six_month = yf.download(stock + '.SA', period='6mo', progress=False)
df_prices = df_six_month[['Adj Close']]
df_prices.dropna(subset = ['Adj Close'], inplace=True) #remove values NaN
cols_as_np_v = df_prices[df_prices.columns[0:]].to_numpy()
flag = True
try:
msft = yf.Ticker(stock + '.SA')
df = msft.financials
row = df.loc['Net Income', :]
list_incomes = row.tolist()
for i in list_incomes:
if i < 0:
flag = False
break
except Exception as e:
print(e)
flag = True
try:
highest_price_in_the_last_six_months = round(cols_as_np_v.max(),1)
lowest_price_in_the_last_six_months = round(cols_as_np_v.min(),1)
except Exception as e:
highest_price_in_the_last_six_months = 10000.00
lowest_price_in_the_last_six_months = 10000.00
myTable.add_row([stock, str(highest_price_in_the_last_six_months), str(lowest_price_in_the_last_six_months),str(flag)])
if(len(df_six_month) > 1):
uptate_history_stock(lowest_price_in_the_last_six_months, highest_price_in_the_last_six_months, flag , stock)
else:
uptate_history_stock(lowest_price_in_the_last_six_months, highest_price_in_the_last_six_months, flag, stock)
print(myTable) | en | 0.168126 | ----------------------------------------------------------------------------- @copyright 2021 <NAME> @doc Analyze B3 stocks List @author <NAME> <<EMAIL>> @yfinance 1.0 ----------------------------------------------------------------------------- #remove values NaN | 2.606009 | 3 |
socketUpd/socketUdpClient.py | ZiqiangGe/Python | 2 | 6614796 | import socket
s = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
for data in [b'Michael',b'Tracy',b'Sarah']:
s.sendto(data,('127.0.0.1',9999))
print(s.recv(1024).decode('utf-8'))
s.close() | import socket
s = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
for data in [b'Michael',b'Tracy',b'Sarah']:
s.sendto(data,('127.0.0.1',9999))
print(s.recv(1024).decode('utf-8'))
s.close() | none | 1 | 2.564354 | 3 | |
proso_feedback/views.py | adaptive-learning/proso-apps | 2 | 6614797 | <reponame>adaptive-learning/proso-apps<gh_stars>1-10
# -*- coding: utf-8 -*-
from proso.django.request import json_body
from proso.django.response import render, render_json
from django.template.loader import render_to_string
from django.http import HttpResponse, HttpResponseBadRequest
from django.core.mail import EmailMultiAlternatives
from logging import getLogger
from .models import Rating, Comment
from proso_user.models import Session
from lazysignup.decorators import allow_lazy_user
from proso_common.models import get_config
from django.utils.translation import ugettext as _
LOGGER = getLogger(__name__)
def is_likely_worthless(feedback):
return len(feedback['text'].split()) <= 5
@allow_lazy_user
def feedback(request):
"""
Send feedback to the authors of the system.
GET parameters:
html
turn on the HTML version of the API
POST parameters (JSON):
text:
the main feedback content
email (optional):
user's e-mail
username (optional):
user's name
"""
if request.method == 'GET':
return render(request, 'feedback_feedback.html', {}, help_text=feedback.__doc__)
if request.method == 'POST':
feedback_data = json_body(request.body.decode("utf-8"))
feedback_data['user_agent'] = Session.objects.get_current_session().http_user_agent.content
if not feedback_data.get('username'):
feedback_data['username'] = request.user.username
if not feedback_data.get('email'):
feedback_data['email'] = request.user.email
comment = Comment.objects.create(
username=feedback_data['username'],
email=feedback_data['email'],
text=feedback_data['text'])
if get_config('proso_feedback', 'send_emails', default=True):
feedback_domain = get_config('proso_feedback', 'domain', required=True)
feedback_to = get_config('proso_feedback', 'to', required=True)
if is_likely_worthless(feedback_data):
mail_from = 'spam@' + feedback_domain
else:
mail_from = 'feedback@' + feedback_domain
text_content = render_to_string("emails/feedback.plain.txt", {
"feedback": feedback_data,
"user": request.user,
})
html_content = render_to_string("emails/feedback.html", {
"feedback": feedback_data,
"user": request.user,
})
subject = feedback_domain + ' feedback ' + str(comment.id)
mail = EmailMultiAlternatives(
subject,
text_content,
mail_from,
feedback_to,
)
mail.attach_alternative(html_content, "text/html")
mail.send()
LOGGER.debug("email sent %s\n", text_content)
return HttpResponse('ok', status=201)
else:
return HttpResponseBadRequest("method %s is not allowed".format(request.method))
@allow_lazy_user
def rating(request):
"""
Rate the current practice.
GET parameters:
html
turn on the HTML version of the API
POST parameters (JSON):
value:
one of the following numbers (how difficult questions are?):
(1) too easy,
(2) appropriate,
(3) too difficult
or one of the following numbers (how difficult questions should be?):
(4) much easier
(5) bit easier
(6) the same
(7) bit harder
(8) much harder
"""
if request.method == 'GET':
return render(request, 'feedback_rating.html', {}, help_text=rating.__doc__)
if request.method == 'POST':
data = json_body(request.body.decode("utf-8"))
if data['value'] not in list(range(1, 9)):
return render_json(
request,
{'error': _('The given value is not valid.'), 'error_type': 'invalid_value'},
template='feedback_json.html', status=400
)
rating_object = Rating(
user=request.user,
value=data['value'],
)
rating_object.save()
return HttpResponse('ok', status=201)
else:
return HttpResponseBadRequest("method %s is not allowed".format(request.method))
| # -*- coding: utf-8 -*-
from proso.django.request import json_body
from proso.django.response import render, render_json
from django.template.loader import render_to_string
from django.http import HttpResponse, HttpResponseBadRequest
from django.core.mail import EmailMultiAlternatives
from logging import getLogger
from .models import Rating, Comment
from proso_user.models import Session
from lazysignup.decorators import allow_lazy_user
from proso_common.models import get_config
from django.utils.translation import ugettext as _
LOGGER = getLogger(__name__)
def is_likely_worthless(feedback):
return len(feedback['text'].split()) <= 5
@allow_lazy_user
def feedback(request):
"""
Send feedback to the authors of the system.
GET parameters:
html
turn on the HTML version of the API
POST parameters (JSON):
text:
the main feedback content
email (optional):
user's e-mail
username (optional):
user's name
"""
if request.method == 'GET':
return render(request, 'feedback_feedback.html', {}, help_text=feedback.__doc__)
if request.method == 'POST':
feedback_data = json_body(request.body.decode("utf-8"))
feedback_data['user_agent'] = Session.objects.get_current_session().http_user_agent.content
if not feedback_data.get('username'):
feedback_data['username'] = request.user.username
if not feedback_data.get('email'):
feedback_data['email'] = request.user.email
comment = Comment.objects.create(
username=feedback_data['username'],
email=feedback_data['email'],
text=feedback_data['text'])
if get_config('proso_feedback', 'send_emails', default=True):
feedback_domain = get_config('proso_feedback', 'domain', required=True)
feedback_to = get_config('proso_feedback', 'to', required=True)
if is_likely_worthless(feedback_data):
mail_from = 'spam@' + feedback_domain
else:
mail_from = 'feedback@' + feedback_domain
text_content = render_to_string("emails/feedback.plain.txt", {
"feedback": feedback_data,
"user": request.user,
})
html_content = render_to_string("emails/feedback.html", {
"feedback": feedback_data,
"user": request.user,
})
subject = feedback_domain + ' feedback ' + str(comment.id)
mail = EmailMultiAlternatives(
subject,
text_content,
mail_from,
feedback_to,
)
mail.attach_alternative(html_content, "text/html")
mail.send()
LOGGER.debug("email sent %s\n", text_content)
return HttpResponse('ok', status=201)
else:
return HttpResponseBadRequest("method %s is not allowed".format(request.method))
@allow_lazy_user
def rating(request):
"""
Rate the current practice.
GET parameters:
html
turn on the HTML version of the API
POST parameters (JSON):
value:
one of the following numbers (how difficult questions are?):
(1) too easy,
(2) appropriate,
(3) too difficult
or one of the following numbers (how difficult questions should be?):
(4) much easier
(5) bit easier
(6) the same
(7) bit harder
(8) much harder
"""
if request.method == 'GET':
return render(request, 'feedback_rating.html', {}, help_text=rating.__doc__)
if request.method == 'POST':
data = json_body(request.body.decode("utf-8"))
if data['value'] not in list(range(1, 9)):
return render_json(
request,
{'error': _('The given value is not valid.'), 'error_type': 'invalid_value'},
template='feedback_json.html', status=400
)
rating_object = Rating(
user=request.user,
value=data['value'],
)
rating_object.save()
return HttpResponse('ok', status=201)
else:
return HttpResponseBadRequest("method %s is not allowed".format(request.method)) | en | 0.686965 | # -*- coding: utf-8 -*- Send feedback to the authors of the system. GET parameters: html turn on the HTML version of the API POST parameters (JSON): text: the main feedback content email (optional): user's e-mail username (optional): user's name Rate the current practice. GET parameters: html turn on the HTML version of the API POST parameters (JSON): value: one of the following numbers (how difficult questions are?): (1) too easy, (2) appropriate, (3) too difficult or one of the following numbers (how difficult questions should be?): (4) much easier (5) bit easier (6) the same (7) bit harder (8) much harder | 2.192636 | 2 |
common/config.py | weipeng/pyepi | 1 | 6614798 | from numpy import float64
data_type = float64
| from numpy import float64
data_type = float64
| none | 1 | 1.524568 | 2 | |
UnitDict.py | mikequentel/c2ada | 4 | 6614799 | <gh_stars>1-10
# $Source: /home/CVSROOT/c2ada/UnitDict.py,v $
# $Revision: 1.1.1.1 $ $Date: 1999/02/02 12:01:51 $
# A UnitDict is a dictionary that maps unit numbers to lists.
# This module is used in aux_decls to keep track of various interesting
# types associated with a module.
class UnitDict:
def __init__(self):
self.dict = {}
def entry(self, key):
try:
return self.dict[key]
except:
result = []
self.dict[key] = result
return result
# The lists use_type record the types for which the Ada module
# requires a "use type" declaration.
#
use_type = UnitDict()
# The lists in stdarg_concat record the types for which the
# Ada module requires an instantation of Stdarg.Concat.
#
stdarg_concat = UnitDict()
| # $Source: /home/CVSROOT/c2ada/UnitDict.py,v $
# $Revision: 1.1.1.1 $ $Date: 1999/02/02 12:01:51 $
# A UnitDict is a dictionary that maps unit numbers to lists.
# This module is used in aux_decls to keep track of various interesting
# types associated with a module.
class UnitDict:
def __init__(self):
self.dict = {}
def entry(self, key):
try:
return self.dict[key]
except:
result = []
self.dict[key] = result
return result
# The lists use_type record the types for which the Ada module
# requires a "use type" declaration.
#
use_type = UnitDict()
# The lists in stdarg_concat record the types for which the
# Ada module requires an instantation of Stdarg.Concat.
#
stdarg_concat = UnitDict() | en | 0.720765 | # $Source: /home/CVSROOT/c2ada/UnitDict.py,v $ # $Revision: 1.1.1.1 $ $Date: 1999/02/02 12:01:51 $ # A UnitDict is a dictionary that maps unit numbers to lists. # This module is used in aux_decls to keep track of various interesting # types associated with a module. # The lists use_type record the types for which the Ada module # requires a "use type" declaration. # # The lists in stdarg_concat record the types for which the # Ada module requires an instantation of Stdarg.Concat. # | 2.439332 | 2 |
jiralinker.py | mnokka/JiraIssueLinker | 0 | 6614800 | <reponame>mnokka/JiraIssueLinker<filename>jiralinker.py
# This utility tool use (hardcoded) JQL rules to decide if source project issue(s)
# should be linked to target project issue(s)
#
# <EMAIL> 11.2.2020
from jira import JIRA
from datetime import datetime
import logging as log
#import pandas
import argparse
import getpass
import time
import sys, logging
from author import Authenticate # no need to use as external command
from author import DoJIRAStuff
import openpyxl
from collections import defaultdict
import re
import keyboard
start = time.clock()
__version__ = u"0.1"
###################################################################
# should pass via parameters
# CODE CONFIGURATIONS
#####################################################################
# development vs production Jira
#ENV="DEV"
ENV="PROD"
# do only one operation for testing purposes
ONCE="NO"
#ONCE="YES"
# how many "rounds" done BE CAREFUL AS ONCE nneds to be NO
ROUNDS=2000 # 15
# Used in JQL query
CUSTOMFIELDDEV="customfield_10019"
CUSTOMFIELDEVID="cf[10019]"
CUSTOMFIELDPROD="customfield_10019"
CUSTOMFIELPRODID="cf[10019]"
if (ENV=="DEV"):
CUSTOMFIELD=CUSTOMFIELDDEV
CUSTOMFIELDID=CUSTOMFIELDEVID
elif (ENV=="PROD"):
CUSTOMFIELD=CUSTOMFIELDPROD
CUSTOMFIELDID=CUSTOMFIELPRODID
# used to JQL query "to which older project to link"
OLDPROJECTNUMBER=394
# LOGGING LEVEL: DEBUG or INFO or ERROR
logging.basicConfig(level=logging.DEBUG) # IF calling from Groovy, this must be set logging level DEBUG in Groovy side order these to be written out
###########################################################################
def main():
JIRASERVICE=u""
JIRAPROJECT=u""
PSWD=u''
USER=u''
parser = argparse.ArgumentParser(usage="""
{1} Version:{0} - <EMAIL>.com
USAGE:
python jiralinker.py -u <USERNAME> -w <PASSWORD> -s https://MYJIRA.COM -p <SOURCEPROJECTID> -l <LINKABLEPROJECTID>
Press x anytime: Stop program
""".format(__version__,sys.argv[0]))
parser.add_argument('-v','--version', help='<Version>', action='store_true')
parser.add_argument('-w','--password', help='<JIRA password>')
parser.add_argument('-u','--user', help='<JIRA username>')
parser.add_argument('-s','--service', help='<JIRA service, like https://my.jira.com>')
parser.add_argument('-l','--linked', help='<Jira linking target project ID to which source project issues to be linked, if (hardcoded) JQL rule matches') #add issue links to generated issues (target "into" linked issues must be allready in target jira)
parser.add_argument('-p','--project', help='<JIRA source project ID')
parser.add_argument('-d','--dry', help='Dry run mode ON|OFF . Default ON')
args = parser.parse_args()
if args.version:
print 'Tool version: %s' % __version__
sys.exit(2)
JIRASERVICE = args.service or ''
JIRAPROJECT = args.project or ''
PSWD= args.password or ''
USER= args.user or ''
JIRALINKED=args.linked or ''
DRYRUN=args.dry or 'ON'
#RENAME= args.rename or ''
#ASCII=args.ascii or ''
# quick old-school way to check needed parameters
if (JIRASERVICE=='' or PSWD=='' or USER=='' or JIRAPROJECT=='' or JIRALINKED==''):
parser.print_help()
print "args: {0}".format(args)
sys.exit(2)
Authenticate(JIRASERVICE,PSWD,USER)
jira=DoJIRAStuff(USER,PSWD,JIRASERVICE)
SourceCustomField="issue.fields.{0}".format(CUSTOMFIELD)
logging.debug("Using sourceCustomField==> {0}".format(SourceCustomField))
jql_query="Project = \'{0}\' and issuetype !=\'Drawings for Approval Remark\' ".format(JIRAPROJECT) # drop subtask off from first query
print "Used query:{0}".format(jql_query)
issue_list=jira.search_issues(jql_query, maxResults=4000)
#required for plan b, runtime same as used method
#allfields = jira.fields()
#nameMap = {jira.field['name']:jira.field['id'] for jira.field in allfields}
if len(issue_list) >= 1:
COUNTER=1
for issue in issue_list:
#logging.debug("One issue returned for query")
logging.debug("{0}: Issue investigated ==========> {1}".format(COUNTER,issue))
COUNTER=COUNTER+1
#data="{0}".format(SourceCustomField)
#mydata=data
#kissa=issue.raw["fields"]["customfield_10019"]
kissa=issue.raw["fields"]["{0}".format(CUSTOMFIELD)]
types=issue.raw["fields"]["issuetype"]
#koira=issue.custom_field_option(customfield_10019)
# plan b , works
#koira=getattr(issue.fields, nameMap["Drawing Number"])
#logging.debug("koira==> {0}".format(koira))
if kissa !=None:
logging.debug("Tracked custom field value ==> {0}".format(kissa))
OrinalIssueType=types.get("name")
logging.debug("Tracked's issuetype ==> {0}".format(OrinalIssueType))
regex = r"(D)(\.)(\d\d\d)(.*)" # custom field wished value: D.396.4600.401.036
match = re.search(regex, kissa)
if (match):
ProjectNumber=match.group(3)
logging.debug ("Matched: ProjectNumber:{0}".format(ProjectNumber))
#OLDPROJECTNUMBER
OldProjectValue=str(kissa)
OldProjectValue=OldProjectValue.replace(str(ProjectNumber),str(OLDPROJECTNUMBER)) # D.396.4600.401.036 ---> D.394.4600.401.036
logging.debug ("Generated customfield tracking JQL: OldProjectValue:{0}".format(OldProjectValue))
jql_query2="Project = \'{0}\' and \'{1}\' ~ \'{2}\' ".format(JIRALINKED,CUSTOMFIELDID,OldProjectValue)
logging.debug ("JQL query:{0}".format(jql_query2))
issue_list2=jira.search_issues(jql_query2)
logging.debug ("issue_list2:{0}".format(issue_list2))
#logging.debug ("DRYRUN:{0}".format(DRYRUN))
# Check all issues matched the secondary JQL query (with modified custom field value)
if len(issue_list2) >= 1:
for issue2 in issue_list2:
LINK=False
if (DRYRUN=="ON" or DRYRUN=="OFF"):
#logging.debug("DRYRUN: WOULD LIKE TO LINK {0} ==> {1}".format(issue,issue2))
types2=issue2.raw["fields"]["issuetype"]
FoundIssueType=types2.get("name")
#
#logging.debug("FoundIssueType==> {0}".format(FoundIssueType))
#logging.debug("OrinalIssueType ==> {0}".format(OrinalIssueType))
if (FoundIssueType != OrinalIssueType or ("Remark" in OrinalIssueType )): # Remarks (subtasks) not part of linking (iether source or target)
logging.debug("....Skipping this match (Remark or different types): {0}".format(issue2))
LINK=False
else:
logging.debug("OK, same issutypes")
#logging.debug("DRYRUN: WOULD LIKE TO LINK {0} ==> {1}".format(issue,issue2))
if (issue2.fields.issuelinks):
#logging.debug("HIT: LINKS FOUND, NO OPERATIONS DONE")
for link in issue2.fields.issuelinks:
names=link.type.name
logging.debug("link id:{0} name:{1}".format(link,names)) #cloners
if (names=="cloners"):
logging.debug("cloners link , no actions")
LINK=False
elif (names=="Cloners"):
logging.debug("cloners link , no actions check issue manually")
LINK=False
elif (names=="relates"):
logging.debug("existing relates link(s) , no actions, check issue manually")
LINK=False
elif (names=="Relates"):
logging.debug("existing relates link(s) , no actions, check issue manually")
LINK=False
else:
#logging.debug("action can be done")
#logging.debug("DRYRUN: WOULD LIKE TO LINK {0} ==> {1}".format(issue,issue2))
LINK=True
else:
logging.debug("No links found.")
LINK=True
if (LINK==True):
if (DRYRUN=="ON"):
logging.debug("DRYRUN: WOULD LIKE TO LINK {0} ==> {1}".format(issue,issue2))
LINK=False
elif (DRYRUN=="OFF"):
logging.debug("--REAL EXECUTION MODE ---")
logging.debug("LINKING {0} ==> {1}".format(issue,issue2))
resp=jira.create_issue_link("Relates", issue, issue2, comment={"body": "Automation created link to previously approved 1394 card",})
logging.debug("Linking done, response:{0}".format(resp))
else:
LINK=False
else:
logging.debug("NOTHING: No issues to be linked found")
else:
print "ERROR: No match for ProjectNumber, skipping this issue !!!!"
else:
print "ERROR: NULL value for customfield , skipping this issue !!!!"
logging.debug("---------------------------------------------------------------------------------------------------")
if (keyboard.is_pressed("x")):
logging.debug("x pressed, stopping now")
break
# ONCE==0
if (COUNTER >= ROUNDS):
logging.debug("Did ROUNDS=={0} times, stopping now".format(ROUNDS))
break
if (ONCE=="YES"):
logging.debug("ONCE flag active, stopping now")
break
#elif len(issue_list) > 1:
# logging.debug("ERROR ==> More than 1 issue was returned by JQL query")
# LINKEDISSUE="EMPTY"
else:
logging.debug("==> No issue(s) returned by JQL query")
#LINKEDISSUE="EMPTY"
#else:
# LINKEDISSUE="EMPTY"
time.sleep(0.7) # prevent jira crashing for script attack
if (ONCE=="YES"):
print "ONCE testing mode ,stopping now"
sys.exit(5) #testing do only once
print "++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++"
#now excel has been prosessed
end = time.clock()
totaltime=end-start
print "Time taken:{0} seconds".format(totaltime)
print "*************************************************************************"
sys.exit(0)
if __name__ == '__main__':
main()
| # This utility tool use (hardcoded) JQL rules to decide if source project issue(s)
# should be linked to target project issue(s)
#
# <EMAIL> 11.2.2020
from jira import JIRA
from datetime import datetime
import logging as log
#import pandas
import argparse
import getpass
import time
import sys, logging
from author import Authenticate # no need to use as external command
from author import DoJIRAStuff
import openpyxl
from collections import defaultdict
import re
import keyboard
start = time.clock()
__version__ = u"0.1"
###################################################################
# should pass via parameters
# CODE CONFIGURATIONS
#####################################################################
# development vs production Jira
#ENV="DEV"
ENV="PROD"
# do only one operation for testing purposes
ONCE="NO"
#ONCE="YES"
# how many "rounds" done BE CAREFUL AS ONCE nneds to be NO
ROUNDS=2000 # 15
# Used in JQL query
CUSTOMFIELDDEV="customfield_10019"
CUSTOMFIELDEVID="cf[10019]"
CUSTOMFIELDPROD="customfield_10019"
CUSTOMFIELPRODID="cf[10019]"
if (ENV=="DEV"):
CUSTOMFIELD=CUSTOMFIELDDEV
CUSTOMFIELDID=CUSTOMFIELDEVID
elif (ENV=="PROD"):
CUSTOMFIELD=CUSTOMFIELDPROD
CUSTOMFIELDID=CUSTOMFIELPRODID
# used to JQL query "to which older project to link"
OLDPROJECTNUMBER=394
# LOGGING LEVEL: DEBUG or INFO or ERROR
logging.basicConfig(level=logging.DEBUG) # IF calling from Groovy, this must be set logging level DEBUG in Groovy side order these to be written out
###########################################################################
def main():
JIRASERVICE=u""
JIRAPROJECT=u""
PSWD=u''
USER=u''
parser = argparse.ArgumentParser(usage="""
{1} Version:{0} - <EMAIL>.com
USAGE:
python jiralinker.py -u <USERNAME> -w <PASSWORD> -s https://MYJIRA.COM -p <SOURCEPROJECTID> -l <LINKABLEPROJECTID>
Press x anytime: Stop program
""".format(__version__,sys.argv[0]))
parser.add_argument('-v','--version', help='<Version>', action='store_true')
parser.add_argument('-w','--password', help='<JIRA password>')
parser.add_argument('-u','--user', help='<JIRA username>')
parser.add_argument('-s','--service', help='<JIRA service, like https://my.jira.com>')
parser.add_argument('-l','--linked', help='<Jira linking target project ID to which source project issues to be linked, if (hardcoded) JQL rule matches') #add issue links to generated issues (target "into" linked issues must be allready in target jira)
parser.add_argument('-p','--project', help='<JIRA source project ID')
parser.add_argument('-d','--dry', help='Dry run mode ON|OFF . Default ON')
args = parser.parse_args()
if args.version:
print 'Tool version: %s' % __version__
sys.exit(2)
JIRASERVICE = args.service or ''
JIRAPROJECT = args.project or ''
PSWD= args.password or ''
USER= args.user or ''
JIRALINKED=args.linked or ''
DRYRUN=args.dry or 'ON'
#RENAME= args.rename or ''
#ASCII=args.ascii or ''
# quick old-school way to check needed parameters
if (JIRASERVICE=='' or PSWD=='' or USER=='' or JIRAPROJECT=='' or JIRALINKED==''):
parser.print_help()
print "args: {0}".format(args)
sys.exit(2)
Authenticate(JIRASERVICE,PSWD,USER)
jira=DoJIRAStuff(USER,PSWD,JIRASERVICE)
SourceCustomField="issue.fields.{0}".format(CUSTOMFIELD)
logging.debug("Using sourceCustomField==> {0}".format(SourceCustomField))
jql_query="Project = \'{0}\' and issuetype !=\'Drawings for Approval Remark\' ".format(JIRAPROJECT) # drop subtask off from first query
print "Used query:{0}".format(jql_query)
issue_list=jira.search_issues(jql_query, maxResults=4000)
#required for plan b, runtime same as used method
#allfields = jira.fields()
#nameMap = {jira.field['name']:jira.field['id'] for jira.field in allfields}
if len(issue_list) >= 1:
COUNTER=1
for issue in issue_list:
#logging.debug("One issue returned for query")
logging.debug("{0}: Issue investigated ==========> {1}".format(COUNTER,issue))
COUNTER=COUNTER+1
#data="{0}".format(SourceCustomField)
#mydata=data
#kissa=issue.raw["fields"]["customfield_10019"]
kissa=issue.raw["fields"]["{0}".format(CUSTOMFIELD)]
types=issue.raw["fields"]["issuetype"]
#koira=issue.custom_field_option(customfield_10019)
# plan b , works
#koira=getattr(issue.fields, nameMap["Drawing Number"])
#logging.debug("koira==> {0}".format(koira))
if kissa !=None:
logging.debug("Tracked custom field value ==> {0}".format(kissa))
OrinalIssueType=types.get("name")
logging.debug("Tracked's issuetype ==> {0}".format(OrinalIssueType))
regex = r"(D)(\.)(\d\d\d)(.*)" # custom field wished value: D.396.4600.401.036
match = re.search(regex, kissa)
if (match):
ProjectNumber=match.group(3)
logging.debug ("Matched: ProjectNumber:{0}".format(ProjectNumber))
#OLDPROJECTNUMBER
OldProjectValue=str(kissa)
OldProjectValue=OldProjectValue.replace(str(ProjectNumber),str(OLDPROJECTNUMBER)) # D.396.4600.401.036 ---> D.394.4600.401.036
logging.debug ("Generated customfield tracking JQL: OldProjectValue:{0}".format(OldProjectValue))
jql_query2="Project = \'{0}\' and \'{1}\' ~ \'{2}\' ".format(JIRALINKED,CUSTOMFIELDID,OldProjectValue)
logging.debug ("JQL query:{0}".format(jql_query2))
issue_list2=jira.search_issues(jql_query2)
logging.debug ("issue_list2:{0}".format(issue_list2))
#logging.debug ("DRYRUN:{0}".format(DRYRUN))
# Check all issues matched the secondary JQL query (with modified custom field value)
if len(issue_list2) >= 1:
for issue2 in issue_list2:
LINK=False
if (DRYRUN=="ON" or DRYRUN=="OFF"):
#logging.debug("DRYRUN: WOULD LIKE TO LINK {0} ==> {1}".format(issue,issue2))
types2=issue2.raw["fields"]["issuetype"]
FoundIssueType=types2.get("name")
#
#logging.debug("FoundIssueType==> {0}".format(FoundIssueType))
#logging.debug("OrinalIssueType ==> {0}".format(OrinalIssueType))
if (FoundIssueType != OrinalIssueType or ("Remark" in OrinalIssueType )): # Remarks (subtasks) not part of linking (iether source or target)
logging.debug("....Skipping this match (Remark or different types): {0}".format(issue2))
LINK=False
else:
logging.debug("OK, same issutypes")
#logging.debug("DRYRUN: WOULD LIKE TO LINK {0} ==> {1}".format(issue,issue2))
if (issue2.fields.issuelinks):
#logging.debug("HIT: LINKS FOUND, NO OPERATIONS DONE")
for link in issue2.fields.issuelinks:
names=link.type.name
logging.debug("link id:{0} name:{1}".format(link,names)) #cloners
if (names=="cloners"):
logging.debug("cloners link , no actions")
LINK=False
elif (names=="Cloners"):
logging.debug("cloners link , no actions check issue manually")
LINK=False
elif (names=="relates"):
logging.debug("existing relates link(s) , no actions, check issue manually")
LINK=False
elif (names=="Relates"):
logging.debug("existing relates link(s) , no actions, check issue manually")
LINK=False
else:
#logging.debug("action can be done")
#logging.debug("DRYRUN: WOULD LIKE TO LINK {0} ==> {1}".format(issue,issue2))
LINK=True
else:
logging.debug("No links found.")
LINK=True
if (LINK==True):
if (DRYRUN=="ON"):
logging.debug("DRYRUN: WOULD LIKE TO LINK {0} ==> {1}".format(issue,issue2))
LINK=False
elif (DRYRUN=="OFF"):
logging.debug("--REAL EXECUTION MODE ---")
logging.debug("LINKING {0} ==> {1}".format(issue,issue2))
resp=jira.create_issue_link("Relates", issue, issue2, comment={"body": "Automation created link to previously approved 1394 card",})
logging.debug("Linking done, response:{0}".format(resp))
else:
LINK=False
else:
logging.debug("NOTHING: No issues to be linked found")
else:
print "ERROR: No match for ProjectNumber, skipping this issue !!!!"
else:
print "ERROR: NULL value for customfield , skipping this issue !!!!"
logging.debug("---------------------------------------------------------------------------------------------------")
if (keyboard.is_pressed("x")):
logging.debug("x pressed, stopping now")
break
# ONCE==0
if (COUNTER >= ROUNDS):
logging.debug("Did ROUNDS=={0} times, stopping now".format(ROUNDS))
break
if (ONCE=="YES"):
logging.debug("ONCE flag active, stopping now")
break
#elif len(issue_list) > 1:
# logging.debug("ERROR ==> More than 1 issue was returned by JQL query")
# LINKEDISSUE="EMPTY"
else:
logging.debug("==> No issue(s) returned by JQL query")
#LINKEDISSUE="EMPTY"
#else:
# LINKEDISSUE="EMPTY"
time.sleep(0.7) # prevent jira crashing for script attack
if (ONCE=="YES"):
print "ONCE testing mode ,stopping now"
sys.exit(5) #testing do only once
print "++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++"
#now excel has been prosessed
end = time.clock()
totaltime=end-start
print "Time taken:{0} seconds".format(totaltime)
print "*************************************************************************"
sys.exit(0)
if __name__ == '__main__':
main() | en | 0.508895 | # This utility tool use (hardcoded) JQL rules to decide if source project issue(s) # should be linked to target project issue(s) # # <EMAIL> 11.2.2020 #import pandas # no need to use as external command ################################################################### # should pass via parameters # CODE CONFIGURATIONS ##################################################################### # development vs production Jira #ENV="DEV" # do only one operation for testing purposes #ONCE="YES" # how many "rounds" done BE CAREFUL AS ONCE nneds to be NO # 15 # Used in JQL query # used to JQL query "to which older project to link" # LOGGING LEVEL: DEBUG or INFO or ERROR # IF calling from Groovy, this must be set logging level DEBUG in Groovy side order these to be written out ########################################################################### {1} Version:{0} - <EMAIL>.com USAGE: python jiralinker.py -u <USERNAME> -w <PASSWORD> -s https://MYJIRA.COM -p <SOURCEPROJECTID> -l <LINKABLEPROJECTID> Press x anytime: Stop program #add issue links to generated issues (target "into" linked issues must be allready in target jira) #RENAME= args.rename or '' #ASCII=args.ascii or '' # quick old-school way to check needed parameters # drop subtask off from first query #required for plan b, runtime same as used method #allfields = jira.fields() #nameMap = {jira.field['name']:jira.field['id'] for jira.field in allfields} #logging.debug("One issue returned for query") #data="{0}".format(SourceCustomField) #mydata=data #kissa=issue.raw["fields"]["customfield_10019"] #koira=issue.custom_field_option(customfield_10019) # plan b , works #koira=getattr(issue.fields, nameMap["Drawing Number"]) #logging.debug("koira==> {0}".format(koira)) # custom field wished value: D.396.4600.401.036 #OLDPROJECTNUMBER # D.396.4600.401.036 ---> D.394.4600.401.036 #logging.debug ("DRYRUN:{0}".format(DRYRUN)) # Check all issues matched the secondary JQL query (with modified custom field value) #logging.debug("DRYRUN: WOULD LIKE TO LINK {0} ==> {1}".format(issue,issue2)) # #logging.debug("FoundIssueType==> {0}".format(FoundIssueType)) #logging.debug("OrinalIssueType ==> {0}".format(OrinalIssueType)) # Remarks (subtasks) not part of linking (iether source or target) #logging.debug("DRYRUN: WOULD LIKE TO LINK {0} ==> {1}".format(issue,issue2)) #logging.debug("HIT: LINKS FOUND, NO OPERATIONS DONE") #cloners #logging.debug("action can be done") #logging.debug("DRYRUN: WOULD LIKE TO LINK {0} ==> {1}".format(issue,issue2)) # ONCE==0 #elif len(issue_list) > 1: # logging.debug("ERROR ==> More than 1 issue was returned by JQL query") # LINKEDISSUE="EMPTY" #LINKEDISSUE="EMPTY" #else: # LINKEDISSUE="EMPTY" # prevent jira crashing for script attack #testing do only once #now excel has been prosessed | 2.021967 | 2 |
Medium/Medium_Threesums_15_WYH.py | LinkWoong/LC-Solutions | 4 | 6614801 |
# coding: utf-8
# In[1]:
class Solution(object):
def threeSum(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
#算法思路:
#step1:将输入的一个list的整数从小到大排列
#step2:利用a+b+c=0等效于a+b=-c,从最小的一个开始遍历,找到这个数的后面的两个的和是这个的负数的数
#step3:像上步所述,在某个list整数的遍历中包含一个它之后数的遍历,采取首位相加的思路,如果小于target则首向后移一个,如果大于,尾向前移一位
#当和与target相等的时候,满足要求,输出结果
nums.sort()
N, result = len(nums), []
for i in range(N):
######################################## target选择中避免重复,因为已经排序,避免相邻的相等即可避免重复
if i > 0 and nums[i] == nums[i-1]:
continue
#########################################
target = nums[i]*-1
s,e = i+1, N-1
while s<e:
if nums[s]+nums[e] == target:
result.append([nums[i], nums[s], nums[e]])
s = s+1
############################################# 找两个加数中避免重复,因为已经排序,避免相邻的相等即可避免重复
while s<e and nums[s] == nums[s-1]:
s = s+1
############################################
elif nums[s] + nums[e] < target: #小于
s = s+1
else:
e = e-1 #大于
return result
|
# coding: utf-8
# In[1]:
class Solution(object):
def threeSum(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
#算法思路:
#step1:将输入的一个list的整数从小到大排列
#step2:利用a+b+c=0等效于a+b=-c,从最小的一个开始遍历,找到这个数的后面的两个的和是这个的负数的数
#step3:像上步所述,在某个list整数的遍历中包含一个它之后数的遍历,采取首位相加的思路,如果小于target则首向后移一个,如果大于,尾向前移一位
#当和与target相等的时候,满足要求,输出结果
nums.sort()
N, result = len(nums), []
for i in range(N):
######################################## target选择中避免重复,因为已经排序,避免相邻的相等即可避免重复
if i > 0 and nums[i] == nums[i-1]:
continue
#########################################
target = nums[i]*-1
s,e = i+1, N-1
while s<e:
if nums[s]+nums[e] == target:
result.append([nums[i], nums[s], nums[e]])
s = s+1
############################################# 找两个加数中避免重复,因为已经排序,避免相邻的相等即可避免重复
while s<e and nums[s] == nums[s-1]:
s = s+1
############################################
elif nums[s] + nums[e] < target: #小于
s = s+1
else:
e = e-1 #大于
return result
| zh | 0.540771 | # coding: utf-8 # In[1]: :type nums: List[int] :rtype: List[List[int]] #算法思路: #step1:将输入的一个list的整数从小到大排列 #step2:利用a+b+c=0等效于a+b=-c,从最小的一个开始遍历,找到这个数的后面的两个的和是这个的负数的数 #step3:像上步所述,在某个list整数的遍历中包含一个它之后数的遍历,采取首位相加的思路,如果小于target则首向后移一个,如果大于,尾向前移一位 #当和与target相等的时候,满足要求,输出结果 ######################################## target选择中避免重复,因为已经排序,避免相邻的相等即可避免重复 ######################################### ############################################# 找两个加数中避免重复,因为已经排序,避免相邻的相等即可避免重复 ############################################ #小于 #大于 | 3.65335 | 4 |
scripts/python/turtleRelated/siteimages.py | jeremiahmarks/dangerzone | 1 | 6614802 | import os
from wand.image import Image
import wand
def makeindex(pictureDir, picwidth, picheight , filetypes=['jpg','gif','png']):
blacksort(pictureDir)
allfiles=os.listdir(pictureDir)
allfiles.sort()
indexname=pictureDir+'index.html'
if not os.path.exists(indexname):
f=open(indexname, 'w')
f.close()
f=open(indexname, 'rb+')
filecontents="""<html>
<head>
<script type="text/javascript" src="http://jlmarks.org/javascripts/sliderman.1.3.7.js"></script>
<link rel="stylesheet" type="text/css" href="http://jlmarks.org/css/sliderman.css" />
</head>
<body>
<div id="wrapper">
<div id="outer">
<div id="slider_container_2">
<div id="SliderName_2" class="SliderName_2">
"""
tail="""
</div>
<div id="SliderNameNavigation_2"></div>
</div>
<script type="text/javascript">
var myslider=Sliderman.slider({container: 'SliderName_2', width:"""+str(picwidth)+""", height: """+str(picheight)+""",effects: 'fade', display: {autoplay: 1500}});
</script>
</div>
</body>
</html>
"""
x=0
first=True
total=len(allfiles)
for eachfile in allfiles:
print str(x)+" of "+str(total)
#if first and eachfile[-3:] in filetypes:
#newline='\n<img src="'+eachfile+'" width="'+str(picwidth)+'" height="'+str(picheight)+'" alt="sometext" title="'+eachfile+'" usemap="#img1map" />\n <map'
if eachfile[-3:] in filetypes:
newline='\n<img src="'+eachfile+'" width="'+str(picwidth)+'" height="'+str(picheight)+'" alt="sometext" title="'+eachfile+'" />\n'
filecontents=filecontents+newline
x+=1
filecontents=filecontents+tail
f.write(filecontents)
f.close()
def wdivide(inputDir, filetypes=['gif','jpg','png'], sizediff=100):
sizediff=int(sizediff)
allfiles=os.listdir(inputDir)
for eachfile in allfiles:
if eachfile[-3:] in filetypes:
with Image(filename=inputDir+eachfile) as img:
endwidth=((int(img.size[0])/sizediff)*sizediff)+sizediff
endheight=((int(img.size[1])/sizediff)*sizediff)+sizediff
borderw=(endwidth-int(img.size[0]))/2
borderh=(endheight-int(img.size[1]))/2
#bordercommand='convert '+inputDir+eachfile+' -matte -bordercolor none -border '+borderw+'x'+borderh+' '+inputDir+size+'/'+eachfile
size=str(endwidth)+'x'+str(endheight)
if not os.path.exists(inputDir+size):
os.mkdir(inputDir+size)
command = 'convert '+inputDir+eachfile+' -matte -bordercolor none -border '+str(borderw)+'x'+str(borderh)+' '+inputDir+size+'/'+eachfile
os.system(command)
def bringtoonedir(mainDir, someDir=None):
"""
This is designed to bring all of the files from different subdirectories into
one main directory
"""
if someDir==None:someDir=''
curDir=mainDir+someDir
print curDir, mainDir, someDir
allfiles=os.listdir(curDir)
for eachfile in allfiles:
if os.path.isdir(curDir+eachfile):
print 'isdir! '+someDir+eachfile+'/'
bringtoonedir(mainDir, someDir+eachfile+'/')
else:
command='mv '+curDir+eachfile+' '+mainDir
os.system(command)
def blacksort(dirtosort, filetypes=['gif','jpg','png']):
allfiles=os.listdir(dirtosort)
letters=lambda x: chr(97+((x/(26**10))%26))+chr(97+((x/(26**9))%26))+chr(97+((x/(26**8))%26))+chr(97+((x/(26**7))%26))+chr(97+((x/(26**6))%26))+chr(97+((x/(26**5))%26))+chr(97+((x/(26**4))%26))+chr(97+((x/(26**3))%26))+chr(97+((x/(26*26))%26))+chr(97+((x/26)%26))+chr(97+(x%26))
x=0
blacks=[]
for eachfile in allfiles:
if eachfile[-3:] in filetypes:
with Image(filename=dirtosort+eachfile) as img:
if wand.color.Color('rgb(0,0,0)') in img.histogram.keys():
blacks.append([letters(x)+'.'+eachfile[-3:], img.histogram[wand.color.Color('rgb(0,0,0)')]])
else:
blacks.append([letters(x)+'.'+eachfile[-3:], 0])
os.system('mv '+dirtosort+eachfile+' '+dirtosort+letters(x)+'.'+eachfile[-3:])
x+=1
x=0
blacks.sort(key=lambda x: x[1])
for eachfiles in blacks:
os.system('mv '+dirtosort+eachfiles[0]+' '+dirtosort+'%08d' %x + eachfiles[0][-4:])
x+=1
| import os
from wand.image import Image
import wand
def makeindex(pictureDir, picwidth, picheight , filetypes=['jpg','gif','png']):
blacksort(pictureDir)
allfiles=os.listdir(pictureDir)
allfiles.sort()
indexname=pictureDir+'index.html'
if not os.path.exists(indexname):
f=open(indexname, 'w')
f.close()
f=open(indexname, 'rb+')
filecontents="""<html>
<head>
<script type="text/javascript" src="http://jlmarks.org/javascripts/sliderman.1.3.7.js"></script>
<link rel="stylesheet" type="text/css" href="http://jlmarks.org/css/sliderman.css" />
</head>
<body>
<div id="wrapper">
<div id="outer">
<div id="slider_container_2">
<div id="SliderName_2" class="SliderName_2">
"""
tail="""
</div>
<div id="SliderNameNavigation_2"></div>
</div>
<script type="text/javascript">
var myslider=Sliderman.slider({container: 'SliderName_2', width:"""+str(picwidth)+""", height: """+str(picheight)+""",effects: 'fade', display: {autoplay: 1500}});
</script>
</div>
</body>
</html>
"""
x=0
first=True
total=len(allfiles)
for eachfile in allfiles:
print str(x)+" of "+str(total)
#if first and eachfile[-3:] in filetypes:
#newline='\n<img src="'+eachfile+'" width="'+str(picwidth)+'" height="'+str(picheight)+'" alt="sometext" title="'+eachfile+'" usemap="#img1map" />\n <map'
if eachfile[-3:] in filetypes:
newline='\n<img src="'+eachfile+'" width="'+str(picwidth)+'" height="'+str(picheight)+'" alt="sometext" title="'+eachfile+'" />\n'
filecontents=filecontents+newline
x+=1
filecontents=filecontents+tail
f.write(filecontents)
f.close()
def wdivide(inputDir, filetypes=['gif','jpg','png'], sizediff=100):
sizediff=int(sizediff)
allfiles=os.listdir(inputDir)
for eachfile in allfiles:
if eachfile[-3:] in filetypes:
with Image(filename=inputDir+eachfile) as img:
endwidth=((int(img.size[0])/sizediff)*sizediff)+sizediff
endheight=((int(img.size[1])/sizediff)*sizediff)+sizediff
borderw=(endwidth-int(img.size[0]))/2
borderh=(endheight-int(img.size[1]))/2
#bordercommand='convert '+inputDir+eachfile+' -matte -bordercolor none -border '+borderw+'x'+borderh+' '+inputDir+size+'/'+eachfile
size=str(endwidth)+'x'+str(endheight)
if not os.path.exists(inputDir+size):
os.mkdir(inputDir+size)
command = 'convert '+inputDir+eachfile+' -matte -bordercolor none -border '+str(borderw)+'x'+str(borderh)+' '+inputDir+size+'/'+eachfile
os.system(command)
def bringtoonedir(mainDir, someDir=None):
"""
This is designed to bring all of the files from different subdirectories into
one main directory
"""
if someDir==None:someDir=''
curDir=mainDir+someDir
print curDir, mainDir, someDir
allfiles=os.listdir(curDir)
for eachfile in allfiles:
if os.path.isdir(curDir+eachfile):
print 'isdir! '+someDir+eachfile+'/'
bringtoonedir(mainDir, someDir+eachfile+'/')
else:
command='mv '+curDir+eachfile+' '+mainDir
os.system(command)
def blacksort(dirtosort, filetypes=['gif','jpg','png']):
allfiles=os.listdir(dirtosort)
letters=lambda x: chr(97+((x/(26**10))%26))+chr(97+((x/(26**9))%26))+chr(97+((x/(26**8))%26))+chr(97+((x/(26**7))%26))+chr(97+((x/(26**6))%26))+chr(97+((x/(26**5))%26))+chr(97+((x/(26**4))%26))+chr(97+((x/(26**3))%26))+chr(97+((x/(26*26))%26))+chr(97+((x/26)%26))+chr(97+(x%26))
x=0
blacks=[]
for eachfile in allfiles:
if eachfile[-3:] in filetypes:
with Image(filename=dirtosort+eachfile) as img:
if wand.color.Color('rgb(0,0,0)') in img.histogram.keys():
blacks.append([letters(x)+'.'+eachfile[-3:], img.histogram[wand.color.Color('rgb(0,0,0)')]])
else:
blacks.append([letters(x)+'.'+eachfile[-3:], 0])
os.system('mv '+dirtosort+eachfile+' '+dirtosort+letters(x)+'.'+eachfile[-3:])
x+=1
x=0
blacks.sort(key=lambda x: x[1])
for eachfiles in blacks:
os.system('mv '+dirtosort+eachfiles[0]+' '+dirtosort+'%08d' %x + eachfiles[0][-4:])
x+=1
| en | 0.348218 | <html> <head> <script type="text/javascript" src="http://jlmarks.org/javascripts/sliderman.1.3.7.js"></script> <link rel="stylesheet" type="text/css" href="http://jlmarks.org/css/sliderman.css" /> </head> <body> <div id="wrapper"> <div id="outer"> <div id="slider_container_2"> <div id="SliderName_2" class="SliderName_2"> </div> <div id="SliderNameNavigation_2"></div> </div> <script type="text/javascript"> var myslider=Sliderman.slider({container: 'SliderName_2', width: , height: ,effects: 'fade', display: {autoplay: 1500}}); </script> </div> </body> </html> #if first and eachfile[-3:] in filetypes: #newline='\n<img src="'+eachfile+'" width="'+str(picwidth)+'" height="'+str(picheight)+'" alt="sometext" title="'+eachfile+'" usemap="#img1map" />\n <map' #bordercommand='convert '+inputDir+eachfile+' -matte -bordercolor none -border '+borderw+'x'+borderh+' '+inputDir+size+'/'+eachfile This is designed to bring all of the files from different subdirectories into one main directory | 2.690391 | 3 |
src/leetcode/prob_9_palindrome_number.py | arnaudblois/leetcode | 1 | 6614803 | <gh_stars>1-10
"""Leetcode 009 - Palindrome number.
Check if a number is a palindrome, e.g. 1234321 is one,
-1234321 is not (1234321- is invalid), neither is 10.
"""
def is_palindrome(number: int) -> bool:
"""Check if number is palindromic."""
return str(number) == str(number)[::-1]
| """Leetcode 009 - Palindrome number.
Check if a number is a palindrome, e.g. 1234321 is one,
-1234321 is not (1234321- is invalid), neither is 10.
"""
def is_palindrome(number: int) -> bool:
"""Check if number is palindromic."""
return str(number) == str(number)[::-1] | en | 0.742412 | Leetcode 009 - Palindrome number. Check if a number is a palindrome, e.g. 1234321 is one, -1234321 is not (1234321- is invalid), neither is 10. Check if number is palindromic. | 3.845416 | 4 |
hypercoref/python/test/data/test_scipy.py | UKPLab/emnlp2021-hypercoref-cdcr | 5 | 6614804 | <gh_stars>1-10
from unittest import TestCase
from numpy.linalg import norm
from numpy.random import RandomState
from numpy.testing import assert_array_almost_equal
from scipy.sparse import csr_matrix
from scipy.spatial.distance import squareform
from python.util.scipy import batch_pairwise_dot, parallel_batch_pairwise_dot
class TestScipy(TestCase):
def test_batch_pairwise_dot(self):
rs = RandomState(0)
a = rs.rand(1000, 5)
a = a / norm(a, axis=1).reshape((-1, 1))
a = csr_matrix(a)
cosine_sim = a * a.transpose()
cosine_sim.setdiag(0)
expected = squareform(cosine_sim.todense())
actual = batch_pairwise_dot(a, batch_size=83)
assert_array_almost_equal(expected, actual)
actual_parallel = parallel_batch_pairwise_dot(a, batch_size=83, n_jobs=2)
assert_array_almost_equal(expected, actual_parallel) | from unittest import TestCase
from numpy.linalg import norm
from numpy.random import RandomState
from numpy.testing import assert_array_almost_equal
from scipy.sparse import csr_matrix
from scipy.spatial.distance import squareform
from python.util.scipy import batch_pairwise_dot, parallel_batch_pairwise_dot
class TestScipy(TestCase):
def test_batch_pairwise_dot(self):
rs = RandomState(0)
a = rs.rand(1000, 5)
a = a / norm(a, axis=1).reshape((-1, 1))
a = csr_matrix(a)
cosine_sim = a * a.transpose()
cosine_sim.setdiag(0)
expected = squareform(cosine_sim.todense())
actual = batch_pairwise_dot(a, batch_size=83)
assert_array_almost_equal(expected, actual)
actual_parallel = parallel_batch_pairwise_dot(a, batch_size=83, n_jobs=2)
assert_array_almost_equal(expected, actual_parallel) | none | 1 | 2.354195 | 2 | |
wsgi.py | AparnaKarve/my-aiops-publisher | 0 | 6614805 | from flask import Flask
import os
#import s3
import producer
application = Flask(__name__)
@application.route("/")
def wake_up():
server = os.environ.get('KAFKA_SERVER')
topic = os.environ.get('KAFKA_TOPIC')
# available_message = os.environ.get('KAFKA_AVAILABLE_MESSAGE') -- ?
print("server: \n")
print(server)
print("topic: \n")
print(topic)
print("Test var: \n")
print( os.environ.get('MYVAR'))
server = 'platform-mq-dev-kafka-brokers.platform-mq-dev.svc:9092'
topic = 'available'
# aws_key = os.environ.get('AWS_ACCESS_KEY_ID')
# aws_secret = os.environ.get('AWS_SECRET_ACCESS_KEY')
# aws_bucket = os.environ.get('AWS_S3_BUCKET_NAME')
# filesystem = s3.connect(aws_key, aws_secret)
# s3.save_data(filesystem, aws_bucket, "Hello AIOPS")
producer.publish_message(server, topic, 'available')
return 'Hello World!'
if __name__ == '__main__':
application.run()
| from flask import Flask
import os
#import s3
import producer
application = Flask(__name__)
@application.route("/")
def wake_up():
server = os.environ.get('KAFKA_SERVER')
topic = os.environ.get('KAFKA_TOPIC')
# available_message = os.environ.get('KAFKA_AVAILABLE_MESSAGE') -- ?
print("server: \n")
print(server)
print("topic: \n")
print(topic)
print("Test var: \n")
print( os.environ.get('MYVAR'))
server = 'platform-mq-dev-kafka-brokers.platform-mq-dev.svc:9092'
topic = 'available'
# aws_key = os.environ.get('AWS_ACCESS_KEY_ID')
# aws_secret = os.environ.get('AWS_SECRET_ACCESS_KEY')
# aws_bucket = os.environ.get('AWS_S3_BUCKET_NAME')
# filesystem = s3.connect(aws_key, aws_secret)
# s3.save_data(filesystem, aws_bucket, "Hello AIOPS")
producer.publish_message(server, topic, 'available')
return 'Hello World!'
if __name__ == '__main__':
application.run()
| en | 0.130972 | #import s3 # available_message = os.environ.get('KAFKA_AVAILABLE_MESSAGE') -- ? # aws_key = os.environ.get('AWS_ACCESS_KEY_ID') # aws_secret = os.environ.get('AWS_SECRET_ACCESS_KEY') # aws_bucket = os.environ.get('AWS_S3_BUCKET_NAME') # filesystem = s3.connect(aws_key, aws_secret) # s3.save_data(filesystem, aws_bucket, "Hello AIOPS") | 2.313256 | 2 |
silex_client/action/parameter_buffer.py | ArtFXDev/silex_client | 10 | 6614806 | """
@author: <NAME>
Dataclass used to store the data related to a parameter
"""
from __future__ import annotations
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, Type
from silex_client.action.base_buffer import BaseBuffer
from silex_client.utils.datatypes import CommandOutput
from silex_client.utils.parameter_types import AnyParameter, CommandParameterMeta
# Forward references
if TYPE_CHECKING:
from silex_client.action.action_query import ActionQuery
# Alias the metaclass type, to avoid clash with the type attribute
Type = type
@dataclass()
class ParameterBuffer(BaseBuffer):
"""
Store the data of a parameter, it is used as a comunication payload with the UI
"""
PRIVATE_FIELDS = ["outdated_cache", "serialize_cache", "parent"]
READONLY_FIELDS = ["type", "label"]
#: The type of the parameter, must be a class definition or a CommandParameterMeta instance
type: Type = field(default=type(None))
#: The value that will return the parameter
value: Any = field(default=None)
#: Specify if the parameter gets its value from a command output or not
command_output: bool = field(compare=False, repr=False, default=False)
def __post_init__(self):
super().__post_init__()
# Check if the parameter gets a command output
if isinstance(self.value, CommandOutput):
self.command_output = True
self.hide = True
# The AnyParameter type does not have any widget in the frontend
if self.type is AnyParameter:
self.hide = True
# Get the default value from to the type
if self.value is None and isinstance(self.type, CommandParameterMeta):
self.value = self.type.get_default()
@property
def outdated_caches(self) -> bool:
"""
Check if the cache need to be recomputed by looking at the current cache
and the children caches
"""
return self.outdated_cache
def get_value(self, action_query: ActionQuery) -> Any:
"""
Get the value of the parameter, always use this method to get
the value of a parameter, this will resolve references, callable...
"""
# If the value is the output of an other command, get is
if isinstance(self.value, CommandOutput):
return self.value.get_value(action_query)
# If the value is a callable, call it (for mutable default values)
if callable(self.value):
return self.value()
return self.value
| """
@author: <NAME>
Dataclass used to store the data related to a parameter
"""
from __future__ import annotations
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, Type
from silex_client.action.base_buffer import BaseBuffer
from silex_client.utils.datatypes import CommandOutput
from silex_client.utils.parameter_types import AnyParameter, CommandParameterMeta
# Forward references
if TYPE_CHECKING:
from silex_client.action.action_query import ActionQuery
# Alias the metaclass type, to avoid clash with the type attribute
Type = type
@dataclass()
class ParameterBuffer(BaseBuffer):
"""
Store the data of a parameter, it is used as a comunication payload with the UI
"""
PRIVATE_FIELDS = ["outdated_cache", "serialize_cache", "parent"]
READONLY_FIELDS = ["type", "label"]
#: The type of the parameter, must be a class definition or a CommandParameterMeta instance
type: Type = field(default=type(None))
#: The value that will return the parameter
value: Any = field(default=None)
#: Specify if the parameter gets its value from a command output or not
command_output: bool = field(compare=False, repr=False, default=False)
def __post_init__(self):
super().__post_init__()
# Check if the parameter gets a command output
if isinstance(self.value, CommandOutput):
self.command_output = True
self.hide = True
# The AnyParameter type does not have any widget in the frontend
if self.type is AnyParameter:
self.hide = True
# Get the default value from to the type
if self.value is None and isinstance(self.type, CommandParameterMeta):
self.value = self.type.get_default()
@property
def outdated_caches(self) -> bool:
"""
Check if the cache need to be recomputed by looking at the current cache
and the children caches
"""
return self.outdated_cache
def get_value(self, action_query: ActionQuery) -> Any:
"""
Get the value of the parameter, always use this method to get
the value of a parameter, this will resolve references, callable...
"""
# If the value is the output of an other command, get is
if isinstance(self.value, CommandOutput):
return self.value.get_value(action_query)
# If the value is a callable, call it (for mutable default values)
if callable(self.value):
return self.value()
return self.value
| en | 0.625877 | @author: <NAME> Dataclass used to store the data related to a parameter # Forward references # Alias the metaclass type, to avoid clash with the type attribute Store the data of a parameter, it is used as a comunication payload with the UI #: The type of the parameter, must be a class definition or a CommandParameterMeta instance #: The value that will return the parameter #: Specify if the parameter gets its value from a command output or not # Check if the parameter gets a command output # The AnyParameter type does not have any widget in the frontend # Get the default value from to the type Check if the cache need to be recomputed by looking at the current cache and the children caches Get the value of the parameter, always use this method to get the value of a parameter, this will resolve references, callable... # If the value is the output of an other command, get is # If the value is a callable, call it (for mutable default values) | 2.665682 | 3 |
test/utils.py | almaan/eggplant | 12 | 6614807 | import numpy as np
import pandas as pd
import anndata as ad
import eggplant as eg
from scipy.spatial.distance import cdist
import torch as t
import gpytorch as gp
from PIL import Image
def create_model_input(n_obs: int = 20, n_lmks: int = 5):
np.random.seed(13)
xx = np.arange(n_obs)
yy = np.arange(n_obs)
xx, yy = np.meshgrid(xx, yy)
xx = xx.flatten()
yy = yy.flatten()
crd = np.hstack((xx[:, np.newaxis], yy[:, np.newaxis])) / n_obs
lmks = np.random.uniform(0, 1, size=(n_lmks, 2))
lmk_dists = cdist(crd, lmks)
inducing_points = lmk_dists[0 : int(n_obs / 2), :]
values = np.random.normal(0, 1, size=xx.shape[0])
meta = np.random.randint(0, 1, size=xx.shape[0])
return dict(
domain=t.tensor(crd.astype(np.float32)),
landmarks=t.tensor(lmks.astype(np.float32)),
landmark_distances=t.tensor(lmk_dists.astype(np.float32)),
feature_values=t.tensor(values.astype(np.float32)),
meta=meta,
inducing_points=t.tensor(inducing_points.astype(np.float32)),
)
def create_adata(
n_obs: int = 20,
n_lmks: int = 5,
n_features: int = 2,
pandas_landmark_distance=False,
):
model_input = create_model_input(n_obs, n_lmks)
n_obs = model_input["domain"].shape[0]
feature_names = [f"feature_{k}" for k in range(n_features)]
var = pd.DataFrame(
feature_names,
index=feature_names,
columns=["feature"],
)
adata = ad.AnnData(
np.random.random((n_obs, n_features)),
var=var,
)
adata.layers["var"] = np.random.random(adata.X.shape)
adata.obsm["spatial"] = model_input["domain"].numpy()
lmks = model_input["landmark_distances"].numpy()
adata.uns["curated_landmarks"] = np.random.random((n_lmks, 2))
if pandas_landmark_distance:
lmks = pd.DataFrame(
lmks,
columns=[f"Landmark_{k}" for k in range(n_lmks)],
index=adata.obs.index,
)
adata.obsm["landmark_distances"] = lmks
adata.layers["layer"] = adata.X.copy()
adata.uns["spatial"] = dict(
sample_0=dict(
scalefactors=dict(
tissue_hires_scalef=1,
spot_diameter_fullres=0.1,
),
images=dict(
hires=np.random.random((10, 10)),
lowres=np.random.random((5, 5)),
),
)
)
return adata
def create_image(
color: bool = False,
side_size: float = 32,
return_counts: bool = False,
) -> Image.Image:
np.random.random(3)
probs = np.random.dirichlet(np.ones(3))
img = np.zeros((side_size, side_size, 3))
r = side_size / 4
r2 = r**2
center = [int(side_size) / 2] * 2
colors = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
counts = np.zeros((3 if color else 1))
for ii in range(side_size):
for jj in range(side_size):
d2 = (ii - center[0]) ** 2 + (jj - center[1]) ** 2
if d2 <= r2:
if color:
c = np.random.choice(3, p=probs)
img[ii, jj, :] = colors[c, :]
counts[c] += 1
else:
img[ii, jj, :] = 1
counts[0] += 1
img = (img * 255).astype(np.uint8)
if color:
img = Image.fromarray(img).convert("RGB")
else:
img = Image.fromarray(img).convert("L")
counts = int(counts)
if return_counts:
return img, counts
else:
return img
| import numpy as np
import pandas as pd
import anndata as ad
import eggplant as eg
from scipy.spatial.distance import cdist
import torch as t
import gpytorch as gp
from PIL import Image
def create_model_input(n_obs: int = 20, n_lmks: int = 5):
np.random.seed(13)
xx = np.arange(n_obs)
yy = np.arange(n_obs)
xx, yy = np.meshgrid(xx, yy)
xx = xx.flatten()
yy = yy.flatten()
crd = np.hstack((xx[:, np.newaxis], yy[:, np.newaxis])) / n_obs
lmks = np.random.uniform(0, 1, size=(n_lmks, 2))
lmk_dists = cdist(crd, lmks)
inducing_points = lmk_dists[0 : int(n_obs / 2), :]
values = np.random.normal(0, 1, size=xx.shape[0])
meta = np.random.randint(0, 1, size=xx.shape[0])
return dict(
domain=t.tensor(crd.astype(np.float32)),
landmarks=t.tensor(lmks.astype(np.float32)),
landmark_distances=t.tensor(lmk_dists.astype(np.float32)),
feature_values=t.tensor(values.astype(np.float32)),
meta=meta,
inducing_points=t.tensor(inducing_points.astype(np.float32)),
)
def create_adata(
n_obs: int = 20,
n_lmks: int = 5,
n_features: int = 2,
pandas_landmark_distance=False,
):
model_input = create_model_input(n_obs, n_lmks)
n_obs = model_input["domain"].shape[0]
feature_names = [f"feature_{k}" for k in range(n_features)]
var = pd.DataFrame(
feature_names,
index=feature_names,
columns=["feature"],
)
adata = ad.AnnData(
np.random.random((n_obs, n_features)),
var=var,
)
adata.layers["var"] = np.random.random(adata.X.shape)
adata.obsm["spatial"] = model_input["domain"].numpy()
lmks = model_input["landmark_distances"].numpy()
adata.uns["curated_landmarks"] = np.random.random((n_lmks, 2))
if pandas_landmark_distance:
lmks = pd.DataFrame(
lmks,
columns=[f"Landmark_{k}" for k in range(n_lmks)],
index=adata.obs.index,
)
adata.obsm["landmark_distances"] = lmks
adata.layers["layer"] = adata.X.copy()
adata.uns["spatial"] = dict(
sample_0=dict(
scalefactors=dict(
tissue_hires_scalef=1,
spot_diameter_fullres=0.1,
),
images=dict(
hires=np.random.random((10, 10)),
lowres=np.random.random((5, 5)),
),
)
)
return adata
def create_image(
color: bool = False,
side_size: float = 32,
return_counts: bool = False,
) -> Image.Image:
np.random.random(3)
probs = np.random.dirichlet(np.ones(3))
img = np.zeros((side_size, side_size, 3))
r = side_size / 4
r2 = r**2
center = [int(side_size) / 2] * 2
colors = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
counts = np.zeros((3 if color else 1))
for ii in range(side_size):
for jj in range(side_size):
d2 = (ii - center[0]) ** 2 + (jj - center[1]) ** 2
if d2 <= r2:
if color:
c = np.random.choice(3, p=probs)
img[ii, jj, :] = colors[c, :]
counts[c] += 1
else:
img[ii, jj, :] = 1
counts[0] += 1
img = (img * 255).astype(np.uint8)
if color:
img = Image.fromarray(img).convert("RGB")
else:
img = Image.fromarray(img).convert("L")
counts = int(counts)
if return_counts:
return img, counts
else:
return img
| none | 1 | 2.300339 | 2 | |
app/models.py | AdamVerner/flask-boilerplate | 0 | 6614808 | <gh_stars>0
from flask_login import UserMixin
from werkzeug.security import generate_password_hash, check_password_hash
from app import db, login
class Admin(db.Model):
id = db.Column(db.Integer, primary_key=True, index=True, unique=True)
username = db.Column(db.String(64), index=True, unique=True)
password_hash = db.Column(db.String(128))
def __init__(self, username, password):
self.username = username
self.password_hash = generate_password_hash(password)
def __repr__(self):
return "<Admin {}>".format(self.username)
class User(UserMixin, db.Model):
id = db.Column(db.Integer, primary_key=True, index=True, unique=True)
username = db.Column(db.String(32))
email = db.Column(db.String(120))
password = db.Column(db.String(94))
def __init__(self, username, email, password):
self.username = username
self.password = <PASSWORD>_password_<PASSWORD>(password)
self.email = email
self.confirmed = False
@staticmethod
def get(username):
return User.query.filter((User.username == username) | (User.email == username)).first()
def check_password(self, password):
return check_password_hash(self.password, password)
def __repr__(self):
return f'<User {self.username}>'
@login.user_loader
def load_user(id):
return User.query.get(int(id))
| from flask_login import UserMixin
from werkzeug.security import generate_password_hash, check_password_hash
from app import db, login
class Admin(db.Model):
id = db.Column(db.Integer, primary_key=True, index=True, unique=True)
username = db.Column(db.String(64), index=True, unique=True)
password_hash = db.Column(db.String(128))
def __init__(self, username, password):
self.username = username
self.password_hash = generate_password_hash(password)
def __repr__(self):
return "<Admin {}>".format(self.username)
class User(UserMixin, db.Model):
id = db.Column(db.Integer, primary_key=True, index=True, unique=True)
username = db.Column(db.String(32))
email = db.Column(db.String(120))
password = db.Column(db.String(94))
def __init__(self, username, email, password):
self.username = username
self.password = <PASSWORD>_password_<PASSWORD>(password)
self.email = email
self.confirmed = False
@staticmethod
def get(username):
return User.query.filter((User.username == username) | (User.email == username)).first()
def check_password(self, password):
return check_password_hash(self.password, password)
def __repr__(self):
return f'<User {self.username}>'
@login.user_loader
def load_user(id):
return User.query.get(int(id)) | none | 1 | 2.891916 | 3 | |
app/gpt3.py | toanphan19/playgroundapi | 0 | 6614809 | <reponame>toanphan19/playgroundapi
import os
import json
import logging
import openai
openai.api_key = os.getenv("OPENAI_API_KEY")
logger = logging.getLogger(__name__)
def summarize(text: str) -> str:
"""Summarise a piece of text.
Maximum text input: 6000 characters.
"""
MAX_INPUT_CHARS = 6000
if len(text) > MAX_INPUT_CHARS:
raise ValueError(f"Input text exceed maximum of {MAX_INPUT_CHARS} characters")
response = openai.Completion.create(
engine="text-davinci-001",
prompt="Summarize this for a second-grade student:\n\n" + text,
temperature=0.7,
max_tokens=256,
top_p=1.0,
frequency_penalty=0.0,
presence_penalty=0.0,
)
if len(response["choices"]) == 0:
raise Exception(
f"No answer found from openai. Response: {json.dumps(response)}"
)
logger.info(response)
response_text: str = response["choices"][0]["text"]
response_text = response_text.strip()
return response_text
if __name__ == "__main__":
response = summarize(
"""Tokens can be thought of as pieces of words. Before the API processes the prompts, the input is broken down into tokens. These tokens are not cut up exactly where the words start or end - tokens can include trailing spaces and even sub-words.
"""
)
print(response)
| import os
import json
import logging
import openai
openai.api_key = os.getenv("OPENAI_API_KEY")
logger = logging.getLogger(__name__)
def summarize(text: str) -> str:
"""Summarise a piece of text.
Maximum text input: 6000 characters.
"""
MAX_INPUT_CHARS = 6000
if len(text) > MAX_INPUT_CHARS:
raise ValueError(f"Input text exceed maximum of {MAX_INPUT_CHARS} characters")
response = openai.Completion.create(
engine="text-davinci-001",
prompt="Summarize this for a second-grade student:\n\n" + text,
temperature=0.7,
max_tokens=256,
top_p=1.0,
frequency_penalty=0.0,
presence_penalty=0.0,
)
if len(response["choices"]) == 0:
raise Exception(
f"No answer found from openai. Response: {json.dumps(response)}"
)
logger.info(response)
response_text: str = response["choices"][0]["text"]
response_text = response_text.strip()
return response_text
if __name__ == "__main__":
response = summarize(
"""Tokens can be thought of as pieces of words. Before the API processes the prompts, the input is broken down into tokens. These tokens are not cut up exactly where the words start or end - tokens can include trailing spaces and even sub-words.
"""
)
print(response) | en | 0.907762 | Summarise a piece of text. Maximum text input: 6000 characters. Tokens can be thought of as pieces of words. Before the API processes the prompts, the input is broken down into tokens. These tokens are not cut up exactly where the words start or end - tokens can include trailing spaces and even sub-words. | 3.330538 | 3 |
tagAirWatchInbox.py | DrMachin/airwatch | 3 | 6614810 | #!/Library/Frameworks/Python.framework/Versions/3.6/bin/python3
"""
Find devices AirWatch Inbox app and tag them
Creating this to migrate devices from AirWatch Inbox to VMWare Boxer.
Step 1: Identify all devices with AW Inbox installed
Step 2: Tag All devices
Step 3: Create smart group with 'AWInbox' tag
Step 4: Limit Inbox Assignment to 'AWInbox' Group
-- This should stop new devices from getting inbox without removing existing users
Go from there...
"""
from toolbox.AirWatchAPI import AirWatchAPI as airwatch
api = airwatch()
def searchTags():
## Get List of all Tags from Airwatch
## tagDevice(self, tagID, deviceID=None, bulkDevices=None, verbose=False):
search = api.searchTag()
if search is None:
print('No tags available.')
return None
else:
return search['Tags']
return 0
def getTagID(tagName):
## Get ID for specified Tag
tagID = None
tagList = searchTags()
if tagList is not None:
for tag in tagList:
if tag['TagName'] == str(tagName):
tagID = tag['Id']['Value']
break
if tagID is None:
print('Could not find tag with name:', tagName)
return tagID
search = api.searchApplications('AirWatch Inbox')
appList = search['Application']
tagID = getTagID('AirWatch Inbox')
#"""
for app in appList:
appID = app['Id']['Value']
appName = app['ApplicationName']
print(str(appID) + ' - ' + appName)
deviceList = api.getDevicesWithInstalledPublicApp(appID)
if deviceList is not None:
idList = deviceList['DeviceId']
if len(idList) < 1:
print('\nNo devices found.')
else:
print('Sending request to AirWatch')
response = api.tagDevice(tagID, bulkDevices=idList, verbose=True)
accepted = failed = ignored = 0
faults = []
if response is not None:
try:
accepted = response['AcceptedItems']
failed = response['FailedItems']
if failed > 0:
for error in response['Faults']['Fault']:
if error['ErrorCode']:
ignored += 1
else:
faults.append(error)
except KeyError:
print(api.prettyJSON(response))
print('Tag Count:', len(idList))
print()
print("Devices Tagged:", accepted)
print("Devices Ignored:", ignored)
print()
if len(faults) > 0:
print()
print("*****Errors Report*****")
print(api.prettyJSON(faults))
else:
print('\nNo devices found.')
#""" | #!/Library/Frameworks/Python.framework/Versions/3.6/bin/python3
"""
Find devices AirWatch Inbox app and tag them
Creating this to migrate devices from AirWatch Inbox to VMWare Boxer.
Step 1: Identify all devices with AW Inbox installed
Step 2: Tag All devices
Step 3: Create smart group with 'AWInbox' tag
Step 4: Limit Inbox Assignment to 'AWInbox' Group
-- This should stop new devices from getting inbox without removing existing users
Go from there...
"""
from toolbox.AirWatchAPI import AirWatchAPI as airwatch
api = airwatch()
def searchTags():
## Get List of all Tags from Airwatch
## tagDevice(self, tagID, deviceID=None, bulkDevices=None, verbose=False):
search = api.searchTag()
if search is None:
print('No tags available.')
return None
else:
return search['Tags']
return 0
def getTagID(tagName):
## Get ID for specified Tag
tagID = None
tagList = searchTags()
if tagList is not None:
for tag in tagList:
if tag['TagName'] == str(tagName):
tagID = tag['Id']['Value']
break
if tagID is None:
print('Could not find tag with name:', tagName)
return tagID
search = api.searchApplications('AirWatch Inbox')
appList = search['Application']
tagID = getTagID('AirWatch Inbox')
#"""
for app in appList:
appID = app['Id']['Value']
appName = app['ApplicationName']
print(str(appID) + ' - ' + appName)
deviceList = api.getDevicesWithInstalledPublicApp(appID)
if deviceList is not None:
idList = deviceList['DeviceId']
if len(idList) < 1:
print('\nNo devices found.')
else:
print('Sending request to AirWatch')
response = api.tagDevice(tagID, bulkDevices=idList, verbose=True)
accepted = failed = ignored = 0
faults = []
if response is not None:
try:
accepted = response['AcceptedItems']
failed = response['FailedItems']
if failed > 0:
for error in response['Faults']['Fault']:
if error['ErrorCode']:
ignored += 1
else:
faults.append(error)
except KeyError:
print(api.prettyJSON(response))
print('Tag Count:', len(idList))
print()
print("Devices Tagged:", accepted)
print("Devices Ignored:", ignored)
print()
if len(faults) > 0:
print()
print("*****Errors Report*****")
print(api.prettyJSON(faults))
else:
print('\nNo devices found.')
#""" | en | 0.687696 | #!/Library/Frameworks/Python.framework/Versions/3.6/bin/python3 Find devices AirWatch Inbox app and tag them Creating this to migrate devices from AirWatch Inbox to VMWare Boxer. Step 1: Identify all devices with AW Inbox installed Step 2: Tag All devices Step 3: Create smart group with 'AWInbox' tag Step 4: Limit Inbox Assignment to 'AWInbox' Group -- This should stop new devices from getting inbox without removing existing users Go from there... ## Get List of all Tags from Airwatch ## tagDevice(self, tagID, deviceID=None, bulkDevices=None, verbose=False): ## Get ID for specified Tag #""" #""" | 2.506735 | 3 |
src/astro/sql/operators/truncate.py | astro-projects/astro | 71 | 6614811 | from typing import Dict
from airflow.decorators.base import get_unique_task_id
from airflow.models import BaseOperator
from astro.databases import create_database
from astro.sql.table import Table
class TruncateOperator(BaseOperator):
"""Airflow Operator for truncating SQL tables."""
def __init__(
self,
table: Table,
task_id: str = "",
**kwargs,
):
self.table = table
task_id = task_id or get_unique_task_id(table.name + "_truncate")
super().__init__(
task_id=task_id,
**kwargs,
)
def execute(self, context: Dict) -> None: # skipcq: PYL-W0613
"""Method run when the Airflow runner calls the operator."""
database = create_database(self.table.conn_id)
self.table = database.populate_table_metadata(self.table)
database.drop_table(self.table)
| from typing import Dict
from airflow.decorators.base import get_unique_task_id
from airflow.models import BaseOperator
from astro.databases import create_database
from astro.sql.table import Table
class TruncateOperator(BaseOperator):
"""Airflow Operator for truncating SQL tables."""
def __init__(
self,
table: Table,
task_id: str = "",
**kwargs,
):
self.table = table
task_id = task_id or get_unique_task_id(table.name + "_truncate")
super().__init__(
task_id=task_id,
**kwargs,
)
def execute(self, context: Dict) -> None: # skipcq: PYL-W0613
"""Method run when the Airflow runner calls the operator."""
database = create_database(self.table.conn_id)
self.table = database.populate_table_metadata(self.table)
database.drop_table(self.table)
| en | 0.656804 | Airflow Operator for truncating SQL tables. # skipcq: PYL-W0613 Method run when the Airflow runner calls the operator. | 2.400241 | 2 |
experiments/chouse48_auxiliary.py | jkulhanek/a2cat-vn-pytorch | 7 | 6614812 | from experiments.data import TRAIN3, VALIDATION3
from environments.gym_house.multi import create_multiscene
from deep_rl.common.env import RewardCollector, TransposeImage, ScaledFloatFrame
from deep_rl.common.vec_env import DummyVecEnv, SubprocVecEnv
from deep_rl.a2c_unreal.util import UnrealEnvBaseWrapper
from deep_rl.configuration import configuration
import deep_rl
import environments
import os
import numpy as np
import torch
from deep_rl import register_trainer
from experiments.ai2_auxiliary.trainer import AuxiliaryTrainer
from models import AuxiliaryBigGoalHouseModel as Model
from deep_rl.common.schedules import LinearSchedule, MultistepSchedule
from torch import nn
from deep_rl.model import TimeDistributed, Flatten, MaskedRNN
from deep_rl.common.tester import TestingEnv, TestingVecEnv
import math
VALIDATION_PROCESSES = 1 # note: single environment is supported at the moment
TestingEnv.set_hardness = lambda _, hardness: print('Hardnes was set to %s' % hardness)
TestingVecEnv.set_hardness = lambda _, hardness: print('Hardnes was set to %s' % hardness)
@register_trainer(max_time_steps = 40e6, validation_period = 200, validation_episodes = 20, episode_log_interval = 10, saving_period = 100000, save = True)
class Trainer(AuxiliaryTrainer):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.num_processes = 16
self.max_gradient_norm = 0.5
self.rms_alpha = 0.99
self.rms_epsilon = 1e-5
self.num_steps = 20
self.gamma = .99
self.allow_gpu = True
self.learning_rate = LinearSchedule(7e-4, 0, self.max_time_steps)
self.rp_weight = 1.0
self.pc_weight = 0.05
self.vr_weight = 1.0
self.auxiliary_weight = 0.1
#self.pc_cell_size =
self.scene_complexity = MultistepSchedule(0.3, [
(5000000, LinearSchedule(0.3, 1.0, 5000000)),
(10000000, 1.0)
])
def _get_input_for_pixel_control(self, inputs):
return inputs[0][0]
def create_env(self, kwargs):
env, self.validation_env = create_envs(self.num_processes, kwargs)
return env
def create_model(self):
model = Model(self.env.observation_space.spaces[0].spaces[0].shape[0], self.env.action_space.n)
model_path = os.path.join(configuration.get('models_path'),'chouse-multienv4-auxiliary', 'weights.pth')
print('Loading weights from %s' % model_path)
model.load_state_dict(torch.load(model_path))
return model
def process(self, *args, **kwargs):
a, b, metric_context = super().process(*args, **kwargs)
self.env.set_hardness(self.scene_complexity)
metric_context.add_last_value_scalar('scene_complexity', self.scene_complexity)
return a, b, metric_context
def create_envs(num_training_processes, env_kwargs):
def wrap(env):
env = RewardCollector(env)
env = TransposeImage(env)
env = ScaledFloatFrame(env)
env = UnrealEnvBaseWrapper(env)
return env
env = create_multiscene(num_training_processes, TRAIN3, wrap = wrap, **env_kwargs)
env.set_hardness = lambda hardness: env.call_unwrapped('set_hardness', hardness)
val_env = create_multiscene(VALIDATION_PROCESSES, VALIDATION3, wrap = wrap, **env_kwargs)
val_env.set_hardness = lambda hardness: val_env.call_unwrapped('set_hardness', hardness)
val_env.set_hardness(0.6)
return env, val_env
def default_args():
return dict(
env_kwargs = dict(
id = 'AuxiliaryGoalHouse-v1',
screen_size=(172,172),
enable_noise = True,
hardness = 0.3,
configuration=deep_rl.configuration.get('house3d').as_dict()),
model_kwargs = dict()
) | from experiments.data import TRAIN3, VALIDATION3
from environments.gym_house.multi import create_multiscene
from deep_rl.common.env import RewardCollector, TransposeImage, ScaledFloatFrame
from deep_rl.common.vec_env import DummyVecEnv, SubprocVecEnv
from deep_rl.a2c_unreal.util import UnrealEnvBaseWrapper
from deep_rl.configuration import configuration
import deep_rl
import environments
import os
import numpy as np
import torch
from deep_rl import register_trainer
from experiments.ai2_auxiliary.trainer import AuxiliaryTrainer
from models import AuxiliaryBigGoalHouseModel as Model
from deep_rl.common.schedules import LinearSchedule, MultistepSchedule
from torch import nn
from deep_rl.model import TimeDistributed, Flatten, MaskedRNN
from deep_rl.common.tester import TestingEnv, TestingVecEnv
import math
VALIDATION_PROCESSES = 1 # note: single environment is supported at the moment
TestingEnv.set_hardness = lambda _, hardness: print('Hardnes was set to %s' % hardness)
TestingVecEnv.set_hardness = lambda _, hardness: print('Hardnes was set to %s' % hardness)
@register_trainer(max_time_steps = 40e6, validation_period = 200, validation_episodes = 20, episode_log_interval = 10, saving_period = 100000, save = True)
class Trainer(AuxiliaryTrainer):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.num_processes = 16
self.max_gradient_norm = 0.5
self.rms_alpha = 0.99
self.rms_epsilon = 1e-5
self.num_steps = 20
self.gamma = .99
self.allow_gpu = True
self.learning_rate = LinearSchedule(7e-4, 0, self.max_time_steps)
self.rp_weight = 1.0
self.pc_weight = 0.05
self.vr_weight = 1.0
self.auxiliary_weight = 0.1
#self.pc_cell_size =
self.scene_complexity = MultistepSchedule(0.3, [
(5000000, LinearSchedule(0.3, 1.0, 5000000)),
(10000000, 1.0)
])
def _get_input_for_pixel_control(self, inputs):
return inputs[0][0]
def create_env(self, kwargs):
env, self.validation_env = create_envs(self.num_processes, kwargs)
return env
def create_model(self):
model = Model(self.env.observation_space.spaces[0].spaces[0].shape[0], self.env.action_space.n)
model_path = os.path.join(configuration.get('models_path'),'chouse-multienv4-auxiliary', 'weights.pth')
print('Loading weights from %s' % model_path)
model.load_state_dict(torch.load(model_path))
return model
def process(self, *args, **kwargs):
a, b, metric_context = super().process(*args, **kwargs)
self.env.set_hardness(self.scene_complexity)
metric_context.add_last_value_scalar('scene_complexity', self.scene_complexity)
return a, b, metric_context
def create_envs(num_training_processes, env_kwargs):
def wrap(env):
env = RewardCollector(env)
env = TransposeImage(env)
env = ScaledFloatFrame(env)
env = UnrealEnvBaseWrapper(env)
return env
env = create_multiscene(num_training_processes, TRAIN3, wrap = wrap, **env_kwargs)
env.set_hardness = lambda hardness: env.call_unwrapped('set_hardness', hardness)
val_env = create_multiscene(VALIDATION_PROCESSES, VALIDATION3, wrap = wrap, **env_kwargs)
val_env.set_hardness = lambda hardness: val_env.call_unwrapped('set_hardness', hardness)
val_env.set_hardness(0.6)
return env, val_env
def default_args():
return dict(
env_kwargs = dict(
id = 'AuxiliaryGoalHouse-v1',
screen_size=(172,172),
enable_noise = True,
hardness = 0.3,
configuration=deep_rl.configuration.get('house3d').as_dict()),
model_kwargs = dict()
) | en | 0.866248 | # note: single environment is supported at the moment #self.pc_cell_size = | 1.930095 | 2 |
app/process.py | varandrew/room-air-quality-forecast | 0 | 6614813 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
@Project :bottle-iot
@File :database.py
@Author :<NAME>
@Date :2021/11/12 3:47 下午
"""
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.impute import SimpleImputer
def process_data(db):
col = db["air"]
data = [[0 if v == '' else v for v in list(
item.values())[1:]] for item in list(col.find({}, {"_id": 0}))]
train_data = np.array_split(data, 3)
X = pd.DataFrame(train_data[1])
y = pd.DataFrame(train_data[2])
X_test = pd.DataFrame(train_data[0])
my_imputer = SimpleImputer()
X_train, X_valid, y_train, y_valid = train_test_split(
X, y, train_size=0.8, test_size=0.2, random_state=0)
imputed_X_train = pd.DataFrame(my_imputer.fit_transform(X_train))
imputed_X_valid = pd.DataFrame(my_imputer.transform(X_valid))
imputed_X_train.columns = X_train.columns
imputed_X_valid.columns = X_valid.columns
imputed_y_train = pd.DataFrame(my_imputer.fit_transform(y_train))
imputed_y_valid = pd.DataFrame(my_imputer.transform(y_valid))
imputed_y_train.columns = y_train.columns
imputed_y_valid.columns = y_valid.columns
imputed_X_test = pd.DataFrame(my_imputer.fit_transform(X_test))
results = [imputed_X_train, imputed_X_valid,
imputed_y_train, imputed_y_valid, imputed_X_test]
return results
| #!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
@Project :bottle-iot
@File :database.py
@Author :<NAME>
@Date :2021/11/12 3:47 下午
"""
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.impute import SimpleImputer
def process_data(db):
col = db["air"]
data = [[0 if v == '' else v for v in list(
item.values())[1:]] for item in list(col.find({}, {"_id": 0}))]
train_data = np.array_split(data, 3)
X = pd.DataFrame(train_data[1])
y = pd.DataFrame(train_data[2])
X_test = pd.DataFrame(train_data[0])
my_imputer = SimpleImputer()
X_train, X_valid, y_train, y_valid = train_test_split(
X, y, train_size=0.8, test_size=0.2, random_state=0)
imputed_X_train = pd.DataFrame(my_imputer.fit_transform(X_train))
imputed_X_valid = pd.DataFrame(my_imputer.transform(X_valid))
imputed_X_train.columns = X_train.columns
imputed_X_valid.columns = X_valid.columns
imputed_y_train = pd.DataFrame(my_imputer.fit_transform(y_train))
imputed_y_valid = pd.DataFrame(my_imputer.transform(y_valid))
imputed_y_train.columns = y_train.columns
imputed_y_valid.columns = y_valid.columns
imputed_X_test = pd.DataFrame(my_imputer.fit_transform(X_test))
results = [imputed_X_train, imputed_X_valid,
imputed_y_train, imputed_y_valid, imputed_X_test]
return results
| zh | 0.423912 | #!/usr/bin/env python # -*- coding: UTF-8 -*- @Project :bottle-iot
@File :database.py
@Author :<NAME>
@Date :2021/11/12 3:47 下午 | 2.916559 | 3 |
analysis_scripts/plot_metrics.py | dezeraecox/Behind-the-scenes---Investigator-Grants-2019 | 0 | 6614814 | <reponame>dezeraecox/Behind-the-scenes---Investigator-Grants-2019
import os
import re
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import ptitprince as pt
from loguru import logger
from GEN_Utils import FileHandling
from GEN_Utils.HDF5_Utils import hdf_to_dict
logger.info('Import OK')
input_path = 'analysis_results/scival_test/ten_year_metrics_summary.xlsx'
output_folder = 'images/'
if not os.path.exists(output_folder):
os.mkdir(output_folder)
# Print all lone variables during execution
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = 'all'
# Set plotting backgrounds to white
matplotlib.rcParams.update(_VSCode_defaultMatplotlib_Params)
matplotlib.rcParams.update({'figure.facecolor': (1,1,1,1)})
metrics = pd.read_excel(input_path)
metrics.head(100)
# in any case where values were not read properly, discard
def value_checker(value):
try:
return float(value)
except:
return np.nan
metrics['fwci_awarded'] = metrics['fwci_awarded'].apply(value_checker)
metrics['pubs_awarded'] = metrics['pubs_awarded'].apply(value_checker)
for_plotting = metrics.copy().reset_index()
numeric_cols = ['Year', 'type_cat']
for_plotting[numeric_cols] = for_plotting[numeric_cols].astype(float)
year_dict = {2015: 0, 2016: 1, 2017: 2, 2018: 3, 2019: 4}
for_plotting['Year_num'] = for_plotting['Year'].map(year_dict)
col_pal = [sns.color_palette('Blues')[x] for x in [2, 3, 5]]
# colors = {1.0: ['#89bedc'], 2.0: ['#539ecd'], 3.0: ['#0b559f']}
colors = {1.0: ['#0b559f'], 2.0: ['#0b559f'], 3.0: ['#0b559f']}
labels = ['ECF/EL1', 'CDF/EL2', 'RF/L']
# Test histogram for years
fig, ax = plt.subplots(figsize=(12, 5))
for year, test_df in for_plotting.groupby('Year'):
test_df
sns.distplot(test_df['pubs_awarded'].dropna(), ax=ax, hist=False, kde=True, label=year)
# Plot all together
fig, ax = plt.subplots(figsize=(12, 12))
pt.RainCloud(x='Year', y='pubs_awarded', hue='type_cat', data=for_plotting, palette=col_pal, width_viol=.6, ax=ax, orient='h', move=.25, alpha=0.5, dodge=True)
plt.xlabel('Number of publications in ten years prior to award.')
plt.ylabel('Year of award.')
# Test raincloud plots for each level
for level, df in for_plotting.groupby('type_cat'):
level
fig, ax = plt.subplots(figsize=(12, 5))
pt.RainCloud(x=df['Year'], y=df['pubs_awarded'],
palette=sns.color_palette(colors[level]), width_viol=.6, ax=ax, orient='h', move=.25)
plt.xlabel('Number of publications in ten years prior to award')
plt.ylabel('Year of award')
ax.set_yticklabels([2015, 2016, 2017, 2018, 2019])
ax.axvline(df[df['Year'] == 2015.0]['pubs_awarded'].median(), color='firebrick', linestyle='--', alpha=0.5)
ax.set_axisbelow(True)
plt.title(f'Ten-year publication record of successful awardees at level {int(level)}.', loc='left', fontdict={'fontsize': 15, 'fontweight': 'bold'}, pad=20)
plt.savefig(f'{output_folder}publications_level_{level}.png')
# Repeat for FWCI
for level, df in for_plotting.groupby('type_cat'):
level
fig, ax = plt.subplots(figsize=(12, 5))
pt.RainCloud(x=df['Year'], y=df['fwci_awarded'],
palette=sns.color_palette(colors[level]), width_viol=.6, ax=ax, orient='h', move=.25)
plt.xlabel('Average FWCI in ten years prior to award')
plt.ylabel('Year of award')
plt.xlim(-2.5, 25)
ax.set_yticklabels([2015, 2016, 2017, 2018, 2019])
ax.axvline(df[df['Year'] == 2015.0]['fwci_awarded'].median(),
color='firebrick', linestyle='--', alpha=0.5)
ax.set_axisbelow(True)
plt.title(f'Average FWCI of successful awardees at level {int(level)}.', loc='left', fontdict={
'fontsize': 15, 'fontweight': 'bold'}, pad=20)
plt.savefig(f'{output_folder}fwci_level_{level}.png')
| import os
import re
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import ptitprince as pt
from loguru import logger
from GEN_Utils import FileHandling
from GEN_Utils.HDF5_Utils import hdf_to_dict
logger.info('Import OK')
input_path = 'analysis_results/scival_test/ten_year_metrics_summary.xlsx'
output_folder = 'images/'
if not os.path.exists(output_folder):
os.mkdir(output_folder)
# Print all lone variables during execution
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = 'all'
# Set plotting backgrounds to white
matplotlib.rcParams.update(_VSCode_defaultMatplotlib_Params)
matplotlib.rcParams.update({'figure.facecolor': (1,1,1,1)})
metrics = pd.read_excel(input_path)
metrics.head(100)
# in any case where values were not read properly, discard
def value_checker(value):
try:
return float(value)
except:
return np.nan
metrics['fwci_awarded'] = metrics['fwci_awarded'].apply(value_checker)
metrics['pubs_awarded'] = metrics['pubs_awarded'].apply(value_checker)
for_plotting = metrics.copy().reset_index()
numeric_cols = ['Year', 'type_cat']
for_plotting[numeric_cols] = for_plotting[numeric_cols].astype(float)
year_dict = {2015: 0, 2016: 1, 2017: 2, 2018: 3, 2019: 4}
for_plotting['Year_num'] = for_plotting['Year'].map(year_dict)
col_pal = [sns.color_palette('Blues')[x] for x in [2, 3, 5]]
# colors = {1.0: ['#89bedc'], 2.0: ['#539ecd'], 3.0: ['#0b559f']}
colors = {1.0: ['#0b559f'], 2.0: ['#0b559f'], 3.0: ['#0b559f']}
labels = ['ECF/EL1', 'CDF/EL2', 'RF/L']
# Test histogram for years
fig, ax = plt.subplots(figsize=(12, 5))
for year, test_df in for_plotting.groupby('Year'):
test_df
sns.distplot(test_df['pubs_awarded'].dropna(), ax=ax, hist=False, kde=True, label=year)
# Plot all together
fig, ax = plt.subplots(figsize=(12, 12))
pt.RainCloud(x='Year', y='pubs_awarded', hue='type_cat', data=for_plotting, palette=col_pal, width_viol=.6, ax=ax, orient='h', move=.25, alpha=0.5, dodge=True)
plt.xlabel('Number of publications in ten years prior to award.')
plt.ylabel('Year of award.')
# Test raincloud plots for each level
for level, df in for_plotting.groupby('type_cat'):
level
fig, ax = plt.subplots(figsize=(12, 5))
pt.RainCloud(x=df['Year'], y=df['pubs_awarded'],
palette=sns.color_palette(colors[level]), width_viol=.6, ax=ax, orient='h', move=.25)
plt.xlabel('Number of publications in ten years prior to award')
plt.ylabel('Year of award')
ax.set_yticklabels([2015, 2016, 2017, 2018, 2019])
ax.axvline(df[df['Year'] == 2015.0]['pubs_awarded'].median(), color='firebrick', linestyle='--', alpha=0.5)
ax.set_axisbelow(True)
plt.title(f'Ten-year publication record of successful awardees at level {int(level)}.', loc='left', fontdict={'fontsize': 15, 'fontweight': 'bold'}, pad=20)
plt.savefig(f'{output_folder}publications_level_{level}.png')
# Repeat for FWCI
for level, df in for_plotting.groupby('type_cat'):
level
fig, ax = plt.subplots(figsize=(12, 5))
pt.RainCloud(x=df['Year'], y=df['fwci_awarded'],
palette=sns.color_palette(colors[level]), width_viol=.6, ax=ax, orient='h', move=.25)
plt.xlabel('Average FWCI in ten years prior to award')
plt.ylabel('Year of award')
plt.xlim(-2.5, 25)
ax.set_yticklabels([2015, 2016, 2017, 2018, 2019])
ax.axvline(df[df['Year'] == 2015.0]['fwci_awarded'].median(),
color='firebrick', linestyle='--', alpha=0.5)
ax.set_axisbelow(True)
plt.title(f'Average FWCI of successful awardees at level {int(level)}.', loc='left', fontdict={
'fontsize': 15, 'fontweight': 'bold'}, pad=20)
plt.savefig(f'{output_folder}fwci_level_{level}.png') | en | 0.833013 | # Print all lone variables during execution # Set plotting backgrounds to white # in any case where values were not read properly, discard # colors = {1.0: ['#89bedc'], 2.0: ['#539ecd'], 3.0: ['#0b559f']} # Test histogram for years # Plot all together # Test raincloud plots for each level # Repeat for FWCI | 2.351553 | 2 |
nightshift.py | komax/tinker-micropython | 0 | 6614815 | <filename>nightshift.py
import collections
Time = collections.namedtuple('Time', 'hour minute')
def duration_m(time_a, time_b):
hours_delta = time_b.hour - time_a.hour
minutes_delta = time_b.minute - time_a.minute
if hours_delta:
duration_minutes = hours_delta * 60 + minutes_delta
else:
duration_minutes = abs(minutes_delta)
return duration_minutes
class Nightshift:
def __init__(self, begin, end):
self.begin = begin
self.end = end
def duration_s(self):
duration_minutes = duration_m(self.begin, self.end)
print("duration in minutes {}".format(duration_minutes))
return duration_minutes * 60
def is_before(self, time):
return time.hour < self.begin.hour or \
(time.hour == self.begin.hour and time.minute < self.begin.minute)
def is_within(self, time):
if time.hour == self.begin.hour \
and self.begin.minute <= time.minute:
return True
elif time.hour == self.end.hour \
and time.minute <= self.end.minute:
return True
elif self.begin.hour < time.hour < self.end.hour:
return True
else:
return False
def is_after(self, time):
return time.hour > self.end.hour or \
(time.hour == self.end.hour and time.minute > self.end.minute)
def is_at_begin(self, time, delta=Time(0,5)):
return abs(self.begin.hour - time.hour) <= delta.hour and \
abs(self.begin.minute - time.minute) <= delta.minute
def is_at_end(self, time, delta=Time(0,5)):
return abs(self.end.hour - time.hour) <= delta.hour and \
abs(self.end.minute - time.minute) <= delta.minute
def sleep_time(self, time, max_sleep=Time(0,3), min_sleep=Time(0, 1)):
st = None
if self.is_at_end(time) or self.is_after(time):
duration_today = duration_m(time, Time(23, 59))
duration_tomorrow = duration_m(Time(0, 0), self.begin)
st = duration_today + duration_tomorrow
elif self.is_at_begin(time) or self.is_within(time):
# Calculate sleep time from time to end.
st = duration_m(time, self.end)
elif self.is_before(time):
# Calculate sleep time between time and begin
st = duration_m(time, self.begin)
else:
raise RuntimeError("{} needs to be either before, within or after the duration".format(time))
if not st:
st = min_sleep.hour * 60 + min_sleep.minute
print("I calculated a sleeping time of {} minutes".format(st))
max_sleep_minutes = max_sleep.hour * 60 + max_sleep.minute
if st > max_sleep_minutes:
st = max_sleep_minutes
print("I am sleeping for {} minutes".format(st))
return st
def __repr__(self):
return "Nightshift(begin={}, end={})".format(self.begin, self.end)
| <filename>nightshift.py
import collections
Time = collections.namedtuple('Time', 'hour minute')
def duration_m(time_a, time_b):
hours_delta = time_b.hour - time_a.hour
minutes_delta = time_b.minute - time_a.minute
if hours_delta:
duration_minutes = hours_delta * 60 + minutes_delta
else:
duration_minutes = abs(minutes_delta)
return duration_minutes
class Nightshift:
def __init__(self, begin, end):
self.begin = begin
self.end = end
def duration_s(self):
duration_minutes = duration_m(self.begin, self.end)
print("duration in minutes {}".format(duration_minutes))
return duration_minutes * 60
def is_before(self, time):
return time.hour < self.begin.hour or \
(time.hour == self.begin.hour and time.minute < self.begin.minute)
def is_within(self, time):
if time.hour == self.begin.hour \
and self.begin.minute <= time.minute:
return True
elif time.hour == self.end.hour \
and time.minute <= self.end.minute:
return True
elif self.begin.hour < time.hour < self.end.hour:
return True
else:
return False
def is_after(self, time):
return time.hour > self.end.hour or \
(time.hour == self.end.hour and time.minute > self.end.minute)
def is_at_begin(self, time, delta=Time(0,5)):
return abs(self.begin.hour - time.hour) <= delta.hour and \
abs(self.begin.minute - time.minute) <= delta.minute
def is_at_end(self, time, delta=Time(0,5)):
return abs(self.end.hour - time.hour) <= delta.hour and \
abs(self.end.minute - time.minute) <= delta.minute
def sleep_time(self, time, max_sleep=Time(0,3), min_sleep=Time(0, 1)):
st = None
if self.is_at_end(time) or self.is_after(time):
duration_today = duration_m(time, Time(23, 59))
duration_tomorrow = duration_m(Time(0, 0), self.begin)
st = duration_today + duration_tomorrow
elif self.is_at_begin(time) or self.is_within(time):
# Calculate sleep time from time to end.
st = duration_m(time, self.end)
elif self.is_before(time):
# Calculate sleep time between time and begin
st = duration_m(time, self.begin)
else:
raise RuntimeError("{} needs to be either before, within or after the duration".format(time))
if not st:
st = min_sleep.hour * 60 + min_sleep.minute
print("I calculated a sleeping time of {} minutes".format(st))
max_sleep_minutes = max_sleep.hour * 60 + max_sleep.minute
if st > max_sleep_minutes:
st = max_sleep_minutes
print("I am sleeping for {} minutes".format(st))
return st
def __repr__(self):
return "Nightshift(begin={}, end={})".format(self.begin, self.end)
| en | 0.913751 | # Calculate sleep time from time to end. # Calculate sleep time between time and begin | 3.394659 | 3 |
codraft/core/gui/__init__.py | CODRA-Software/CodraFT | 0 | 6614816 | <reponame>CODRA-Software/CodraFT<filename>codraft/core/gui/__init__.py
# -*- coding: utf-8 -*-
#
# Licensed under the terms of the BSD 3-Clause or the CeCILL-B License
# (see codraft/__init__.py for details)
"""
CodraFT core.gui module
This module handles all GUI features which are specific to CodraFT:
* core.gui.main: handles CodraFT main window which relies on signal and image panels
* core.gui.panel: handles CodraFT signal and image panels, relying on:
* core.gui.actionhandler
* core.gui.objectlist
* core.gui.plotitemlist
* core.gui.roieditor
* core.gui.processor
* core.gui.docks: handles CodraFT dockwidgets
* core.gui.h5io: handles HDF5 browser widget and related features
"""
| # -*- coding: utf-8 -*-
#
# Licensed under the terms of the BSD 3-Clause or the CeCILL-B License
# (see codraft/__init__.py for details)
"""
CodraFT core.gui module
This module handles all GUI features which are specific to CodraFT:
* core.gui.main: handles CodraFT main window which relies on signal and image panels
* core.gui.panel: handles CodraFT signal and image panels, relying on:
* core.gui.actionhandler
* core.gui.objectlist
* core.gui.plotitemlist
* core.gui.roieditor
* core.gui.processor
* core.gui.docks: handles CodraFT dockwidgets
* core.gui.h5io: handles HDF5 browser widget and related features
""" | en | 0.620273 | # -*- coding: utf-8 -*- # # Licensed under the terms of the BSD 3-Clause or the CeCILL-B License # (see codraft/__init__.py for details) CodraFT core.gui module This module handles all GUI features which are specific to CodraFT: * core.gui.main: handles CodraFT main window which relies on signal and image panels * core.gui.panel: handles CodraFT signal and image panels, relying on: * core.gui.actionhandler * core.gui.objectlist * core.gui.plotitemlist * core.gui.roieditor * core.gui.processor * core.gui.docks: handles CodraFT dockwidgets * core.gui.h5io: handles HDF5 browser widget and related features | 1.049727 | 1 |
Scripts/simulation/rewards/reward_operation.py | velocist/TS4CheatsInfo | 0 | 6614817 | <reponame>velocist/TS4CheatsInfo
# uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Server\rewards\reward_operation.py
# Compiled at: 2016-03-03 03:00:16
# Size of source mod 2**32: 1062 bytes
from interactions.utils.loot_basic_op import BaseLootOperation
from rewards.reward import Reward
import sims4.log
logger = sims4.log.Logger('RewardOperation', default_owner='rmccord')
class RewardOperation(BaseLootOperation):
FACTORY_TUNABLES = {'reward': Reward.TunableReference(description='\n The reward given to the subject of the loot operation.\n ')}
def __init__(self, *args, reward, **kwargs):
(super().__init__)(*args, **kwargs)
self.reward = reward
def _apply_to_subject_and_target(self, subject, target, resolver):
if not subject.is_sim:
logger.error('Attempting to apply Reward Loot Op to {} which is not a Sim.', subject)
return False
self.reward.give_reward(subject)
return True | # uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Server\rewards\reward_operation.py
# Compiled at: 2016-03-03 03:00:16
# Size of source mod 2**32: 1062 bytes
from interactions.utils.loot_basic_op import BaseLootOperation
from rewards.reward import Reward
import sims4.log
logger = sims4.log.Logger('RewardOperation', default_owner='rmccord')
class RewardOperation(BaseLootOperation):
FACTORY_TUNABLES = {'reward': Reward.TunableReference(description='\n The reward given to the subject of the loot operation.\n ')}
def __init__(self, *args, reward, **kwargs):
(super().__init__)(*args, **kwargs)
self.reward = reward
def _apply_to_subject_and_target(self, subject, target, resolver):
if not subject.is_sim:
logger.error('Attempting to apply Reward Loot Op to {} which is not a Sim.', subject)
return False
self.reward.give_reward(subject)
return True | en | 0.515676 | # uncompyle6 version 3.7.4 # Python bytecode 3.7 (3394) # Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)] # Embedded file name: T:\InGame\Gameplay\Scripts\Server\rewards\reward_operation.py # Compiled at: 2016-03-03 03:00:16 # Size of source mod 2**32: 1062 bytes | 2.012253 | 2 |
torch/preprocess.py | zhmz90/first_step_with_julia_kaggle.jl | 1 | 6614818 | <reponame>zhmz90/first_step_with_julia_kaggle.jl
#!/usr/local/env python
import h5py
from skimage.io import imread
import numpy as np
import pandas as pd
def read_data(typeData, labelsInfo, imageSize, path):
x = np.zeros((labelsInfo.shape[0], 3, 20, 20))
for (index, idImage) in enumerate(labelsInfo["ID"]):
nameFile = "{0}/{1}Resized/{2}.Bmp".format(path, typeData, idImage)
img = imread(nameFile, as_grey=False)
#print(img.shape)
#print(len(img.ravel()))
x[index, :] = np.reshape(img.ravel(), (-1, 20, 20))
return x
def class_dict(classes):
num2class = pd.unique(classes) # num2class
num2class.sort()
class2num = {}
for num,c in enumerate(num2class):
class2num[c] = num
return class2num,num2class
imageSize = 400
data_path = "/home/guo/haplox/Github/first_step_with_julia_kaggle/data/data"
labelsInfoTrain = pd.read_csv("{0}/trainLabels.csv".format(data_path))
xTrain = read_data("train", labelsInfoTrain, imageSize, data_path)
class2num,num2class = class_dict(labelsInfoTrain["Class"])
yTrain = map(lambda x:class2num[x], labelsInfoTrain["Class"])
yTrain = np.array(yTrain)
labelsInfoTest = pd.read_csv("{0}/sampleSubmission.csv".format(data_path))
xTest = read_data("test", labelsInfoTest, imageSize, data_path)
IDTest = labelsInfoTest["ID"]
classes = labelsInfoTrain["Class"]
class_dict(classes)
with h5py.File("data.hdf5", "w") as f:
f.create_dataset("XTr", data = xTrain)
f.create_dataset("yTr", data = yTrain)
f.create_dataset("XTe", data = xTest)
f.create_dataset("IDTe", data = IDTest)
| #!/usr/local/env python
import h5py
from skimage.io import imread
import numpy as np
import pandas as pd
def read_data(typeData, labelsInfo, imageSize, path):
x = np.zeros((labelsInfo.shape[0], 3, 20, 20))
for (index, idImage) in enumerate(labelsInfo["ID"]):
nameFile = "{0}/{1}Resized/{2}.Bmp".format(path, typeData, idImage)
img = imread(nameFile, as_grey=False)
#print(img.shape)
#print(len(img.ravel()))
x[index, :] = np.reshape(img.ravel(), (-1, 20, 20))
return x
def class_dict(classes):
num2class = pd.unique(classes) # num2class
num2class.sort()
class2num = {}
for num,c in enumerate(num2class):
class2num[c] = num
return class2num,num2class
imageSize = 400
data_path = "/home/guo/haplox/Github/first_step_with_julia_kaggle/data/data"
labelsInfoTrain = pd.read_csv("{0}/trainLabels.csv".format(data_path))
xTrain = read_data("train", labelsInfoTrain, imageSize, data_path)
class2num,num2class = class_dict(labelsInfoTrain["Class"])
yTrain = map(lambda x:class2num[x], labelsInfoTrain["Class"])
yTrain = np.array(yTrain)
labelsInfoTest = pd.read_csv("{0}/sampleSubmission.csv".format(data_path))
xTest = read_data("test", labelsInfoTest, imageSize, data_path)
IDTest = labelsInfoTest["ID"]
classes = labelsInfoTrain["Class"]
class_dict(classes)
with h5py.File("data.hdf5", "w") as f:
f.create_dataset("XTr", data = xTrain)
f.create_dataset("yTr", data = yTrain)
f.create_dataset("XTe", data = xTest)
f.create_dataset("IDTe", data = IDTest) | ru | 0.236726 | #!/usr/local/env python #print(img.shape) #print(len(img.ravel())) # num2class | 2.501337 | 3 |
API/apps.py | MrAbdelaziz/GestionStoc_django | 3 | 6614819 | <filename>API/apps.py
from django.apps import AppConfig
class BackofficeConfig(AppConfig):
name = 'API'
| <filename>API/apps.py
from django.apps import AppConfig
class BackofficeConfig(AppConfig):
name = 'API'
| none | 1 | 1.322248 | 1 | |
django/apps/attachment/migrations/0011_enable_audit.py | wykys/project-thesaurus | 0 | 6614820 | # Generated by Django 3.0.6 on 2020-05-29 16:43
from django.db import migrations
from apps.audit.operations import EnableAuditOperation
class Migration(migrations.Migration):
dependencies = [
('attachment', '0010_attachment_size'),
]
operations = [
EnableAuditOperation('Attachment'),
]
| # Generated by Django 3.0.6 on 2020-05-29 16:43
from django.db import migrations
from apps.audit.operations import EnableAuditOperation
class Migration(migrations.Migration):
dependencies = [
('attachment', '0010_attachment_size'),
]
operations = [
EnableAuditOperation('Attachment'),
]
| en | 0.805461 | # Generated by Django 3.0.6 on 2020-05-29 16:43 | 1.324616 | 1 |
seguimiento/planes/admin.py | nnrcschmdt/pislea2 | 1 | 6614821 | from django.contrib import admin
from django.utils.html import format_html
from .models import Plan
@admin.register(Plan)
class PlanAdmin(admin.ModelAdmin):
fieldsets = (
(None, {
'fields': ('entidad', 'enlace_fuente', 'estatus', 'documento', 'comentarios')
}),
('Fechas', {
'fields': ('recibido', 'publicado', 'revisado')
}),
('Páginas', {
'fields': ('páginas', 'páginas_quitadas')
})
)
list_display = ('entidad', 'recibido', 'publicado')
list_filter = ('estatus',)
readonly_fields = ['entidad', 'enlace_fuente', 'documento']
def enlace_fuente(self, obj):
return format_html("<a href='{url}'>{url}</a>", url=obj.fuente)
| from django.contrib import admin
from django.utils.html import format_html
from .models import Plan
@admin.register(Plan)
class PlanAdmin(admin.ModelAdmin):
fieldsets = (
(None, {
'fields': ('entidad', 'enlace_fuente', 'estatus', 'documento', 'comentarios')
}),
('Fechas', {
'fields': ('recibido', 'publicado', 'revisado')
}),
('Páginas', {
'fields': ('páginas', 'páginas_quitadas')
})
)
list_display = ('entidad', 'recibido', 'publicado')
list_filter = ('estatus',)
readonly_fields = ['entidad', 'enlace_fuente', 'documento']
def enlace_fuente(self, obj):
return format_html("<a href='{url}'>{url}</a>", url=obj.fuente)
| none | 1 | 2.039095 | 2 | |
picturebot.py | YOricH/ImagesOnScheduleBot | 0 | 6614822 | # -*- coding: utf-8 -*-
# Main class of the application.
import logging
import config
import threading
import time
import re
import schedule
import telebot
import dbmanager
from grabber import PictureGrabber
log = logging.getLogger(f'logger.{__name__}')
class PictureBot:
"""The class to manage behavior of the bot. It binds the database and the Telegram bot class each other."""
days_of_week = ('Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday')
countable_intervals = ('Second', 'Minute', 'Hour', 'Day', 'Week')
def __init__(self, db_manager: dbmanager.DBManager, bot: telebot.TeleBot):
self.__db_manager = db_manager
self.__schedule_thread = None
self.__bot = bot
self.schedule_setup_state = {}
def __repr__(self):
return f'self.schedule_setup_state: {str(self.schedule_setup_state)}\n\r' \
f'self.__db_manager: {str(self.__db_manager)}\n\r' \
f'self.__bot: {str(self.__bot)}\n\r' \
f'self.__schedule_thread: {str(self.__schedule_thread)}\n\r'
def start(self):
"""Must be called when the application starts.
It starts sending images to all subscriber from the database.
"""
sub_set = self.__db_manager.get_all_subscribers()
for sub_id in sub_set:
try:
eval('schedule.' + sub_id[2])
log.info('Started schedule: ' + sub_id[2])
except Exception as e:
log.exception(f'Error when run schedule job on running bot: {type(e)} - {str(e)}')
self.__schedule_thread = threading.Thread(target=self.run_schedule_thread)
log.info('Starting thread for schedules')
self.__schedule_thread.daemon = True
self.__schedule_thread.start()
log.info('Thread for schedules has started')
def run_schedule_thread(self):
"""The separate thread for schedules."""
while True:
schedule.run_pending()
time.sleep(1)
def add_subscriber(self, chat_id, keywords, schedule_str=None):
"""Adds a new record to database and launches the new schedule for sending images.
Keywords arguments:
chat_id -- chat identifier, int
keywords -- query to Google Image Search, str
schedule_str -- schedule for sending images, str. If empty or None, a new image will be sent every minute."""
if schedule_str is None:
schedule_str = f'every(1).minutes.do(self.send_picture, chat_id={str(chat_id)}).tag(str({str(chat_id)}))'
log.debug(f'schedule_str: {schedule_str}')
try:
eval('schedule.' + schedule_str)
except Exception as e:
log.exception(f'Error when running schedule job: {type(e)} - {str(e)}')
else:
self.__db_manager.save_schedule(chat_id, keywords, schedule_str)
def delete_subscriber(self, chat_id):
"""Stops sending images and delete all data about the specific subscriber from the database.
Keywords arguments:
chat_id -- chat identifier, int
"""
try:
schedule.clear(str(chat_id))
except Exception as e:
log.exception(f'Error when clearing schedule job for chat {str(chat_id)}: {type(e)} - {str(e)}')
else:
self.__db_manager.delete_schedule(chat_id)
def send_picture(self, chat_id):
"""Sends the unused image to the chat.
Keywords arguments:
chat_id -- chat identifier, int
"""
kl_list = self.__db_manager.get_keywords_and_links(chat_id)
if len(kl_list) == 0:
return
keywords = kl_list[0]
used_links_list = kl_list[1]
picture_link = PictureGrabber.get_img_url(keywords, used_links_list)
if picture_link:
self.__bot.send_photo(chat_id, picture_link)
self.__db_manager.save_link(chat_id, picture_link)
def get_setup_schedule_state(self, chat_id):
"""Returns the state of the schedule setup (str) for the chat or an empty string.
Keywords arguments:
chat_id -- chat identifier, int
"""
schedule_options = self.schedule_setup_state.get(chat_id)
if schedule_options is not None:
return schedule_options.get('state')
return ''
def get_previous_state(self, chat_id):
"""Returns the previous state of the schedule setup. It needs to verify the input.
Keywords arguments:
chat_id -- chat identifier, int
"""
schedule_options = self.schedule_setup_state.get(chat_id)
if schedule_options is not None:
return schedule_options.get('prev_state')
return ''
def wrong_input(self, message):
"""Returns True if the message.text is correct, or False if it's not.
Keywords arguments:
message -- Telegram message (an object of the class Message from pyTelegramBotAPI)
"""
prev_state = self.get_previous_state(message.chat.id)
if prev_state is None:
return True
interval_name = self.schedule_setup_state[message.chat.id]['schedule']['interval']['name']
message_str = message.text.strip()
if prev_state == 'day_schedule' or (prev_state == 'interval' and (interval_name in PictureBot.days_of_week)):
if re.match(r'^(([0,1][0-9])|(2[0-3])):[0-5][0-9]$', message_str) is None:
return True
else:
return False
else:
if message_str.isdecimal() and (int(message_str) > 0):
return False
else:
return True
def set_schedule_state(self, message, state):
"""Sets the state of the schedule setup to continue setting.
Keywords arguments:
message -- Telegram message (an object of the class Message from pyTelegramBotAPI)
state -- the new state for the schedule setup, str.
"""
if state == 'start':
self.schedule_setup_state[message.chat.id] = \
{'state': state, 'prev_state': '', 'query': '',
'schedule': {'interval': {'name': '', 'value': 0}, 'time': ''}}
if state == 'setup':
if len(self.schedule_setup_state[message.chat.id]['query']) == 0:
query_string = message.text.strip().replace('\n', '').replace('\r', '').replace('\\', '')
self.schedule_setup_state[message.chat.id]['query'] = query_string
if state == 'random':
self.translate_schedule(message.chat.id, True)
return
if state == 'day_schedule' or state == 'last':
if self.schedule_setup_state[message.chat.id]['state'] == 'day_schedule':
self.schedule_setup_state[message.chat.id]['schedule']['interval']['value'] = int(message.text)
else:
self.schedule_setup_state[message.chat.id]['schedule']['interval']['name'] = message.text
if state == 'end':
if (message.text.find(':') == -1) and message.text.isdecimal():
self.schedule_setup_state[message.chat.id]['schedule']['interval']['value'] = int(message.text)
else:
self.schedule_setup_state[message.chat.id]['schedule']['time'] = message.text
log.debug(f'self.schedule_setup_state: {self.schedule_setup_state}')
self.translate_schedule(message.chat.id)
return
self.schedule_setup_state[message.chat.id]['prev_state'] = self.schedule_setup_state[message.chat.id]['state']
self.schedule_setup_state[message.chat.id]['state'] = state
def translate_schedule(self, chat_id, random=False):
"""Translates the schedule dictionary to the correct string.
Keywords arguments:
chat_id -- chat identifier, int
random -- if True, images will be sent at random intervals. Parameters of random are set in config.py.
"""
schedule_settings = self.schedule_setup_state[chat_id]
if random:
schedule_str = f'every({config.INTERVAL_START}).to({config.INTERVAL_END}).minutes.' \
f'do(self.send_picture, chat_id={str(chat_id)}).tag(str({str(chat_id)}))'
else:
interval_name = schedule_settings['schedule']['interval']['name'].lower()
interval_value = schedule_settings['schedule']['interval']['value']
interval_name = interval_name if interval_value <= 1 else interval_name + 's'
interval_value = '' if (interval_value == 0 or interval_value == 1) else str(interval_value)
start_time = schedule_settings['schedule']['time']
start_time = f'at("{start_time}").' if start_time != '' else start_time
schedule_str = f'every({interval_value}).{interval_name}.{start_time}do(self.send_picture, ' \
f'chat_id={str(chat_id)}).tag(str({str(chat_id)}))'
log.debug(f'schedule_str: {schedule_str}')
self.add_subscriber(chat_id, schedule_settings['query'], schedule_str)
self.schedule_setup_state.pop(chat_id)
def is_schedule_exist(self, chat_id):
"""Returns True, if the schedule for the chat is already exists.
Keywords arguments:
chat_id -- chat identifier, int
"""
result = self.__db_manager.get_schedule(chat_id)
return len(result) > 0
| # -*- coding: utf-8 -*-
# Main class of the application.
import logging
import config
import threading
import time
import re
import schedule
import telebot
import dbmanager
from grabber import PictureGrabber
log = logging.getLogger(f'logger.{__name__}')
class PictureBot:
"""The class to manage behavior of the bot. It binds the database and the Telegram bot class each other."""
days_of_week = ('Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday')
countable_intervals = ('Second', 'Minute', 'Hour', 'Day', 'Week')
def __init__(self, db_manager: dbmanager.DBManager, bot: telebot.TeleBot):
self.__db_manager = db_manager
self.__schedule_thread = None
self.__bot = bot
self.schedule_setup_state = {}
def __repr__(self):
return f'self.schedule_setup_state: {str(self.schedule_setup_state)}\n\r' \
f'self.__db_manager: {str(self.__db_manager)}\n\r' \
f'self.__bot: {str(self.__bot)}\n\r' \
f'self.__schedule_thread: {str(self.__schedule_thread)}\n\r'
def start(self):
"""Must be called when the application starts.
It starts sending images to all subscriber from the database.
"""
sub_set = self.__db_manager.get_all_subscribers()
for sub_id in sub_set:
try:
eval('schedule.' + sub_id[2])
log.info('Started schedule: ' + sub_id[2])
except Exception as e:
log.exception(f'Error when run schedule job on running bot: {type(e)} - {str(e)}')
self.__schedule_thread = threading.Thread(target=self.run_schedule_thread)
log.info('Starting thread for schedules')
self.__schedule_thread.daemon = True
self.__schedule_thread.start()
log.info('Thread for schedules has started')
def run_schedule_thread(self):
"""The separate thread for schedules."""
while True:
schedule.run_pending()
time.sleep(1)
def add_subscriber(self, chat_id, keywords, schedule_str=None):
"""Adds a new record to database and launches the new schedule for sending images.
Keywords arguments:
chat_id -- chat identifier, int
keywords -- query to Google Image Search, str
schedule_str -- schedule for sending images, str. If empty or None, a new image will be sent every minute."""
if schedule_str is None:
schedule_str = f'every(1).minutes.do(self.send_picture, chat_id={str(chat_id)}).tag(str({str(chat_id)}))'
log.debug(f'schedule_str: {schedule_str}')
try:
eval('schedule.' + schedule_str)
except Exception as e:
log.exception(f'Error when running schedule job: {type(e)} - {str(e)}')
else:
self.__db_manager.save_schedule(chat_id, keywords, schedule_str)
def delete_subscriber(self, chat_id):
"""Stops sending images and delete all data about the specific subscriber from the database.
Keywords arguments:
chat_id -- chat identifier, int
"""
try:
schedule.clear(str(chat_id))
except Exception as e:
log.exception(f'Error when clearing schedule job for chat {str(chat_id)}: {type(e)} - {str(e)}')
else:
self.__db_manager.delete_schedule(chat_id)
def send_picture(self, chat_id):
"""Sends the unused image to the chat.
Keywords arguments:
chat_id -- chat identifier, int
"""
kl_list = self.__db_manager.get_keywords_and_links(chat_id)
if len(kl_list) == 0:
return
keywords = kl_list[0]
used_links_list = kl_list[1]
picture_link = PictureGrabber.get_img_url(keywords, used_links_list)
if picture_link:
self.__bot.send_photo(chat_id, picture_link)
self.__db_manager.save_link(chat_id, picture_link)
def get_setup_schedule_state(self, chat_id):
"""Returns the state of the schedule setup (str) for the chat or an empty string.
Keywords arguments:
chat_id -- chat identifier, int
"""
schedule_options = self.schedule_setup_state.get(chat_id)
if schedule_options is not None:
return schedule_options.get('state')
return ''
def get_previous_state(self, chat_id):
"""Returns the previous state of the schedule setup. It needs to verify the input.
Keywords arguments:
chat_id -- chat identifier, int
"""
schedule_options = self.schedule_setup_state.get(chat_id)
if schedule_options is not None:
return schedule_options.get('prev_state')
return ''
def wrong_input(self, message):
"""Returns True if the message.text is correct, or False if it's not.
Keywords arguments:
message -- Telegram message (an object of the class Message from pyTelegramBotAPI)
"""
prev_state = self.get_previous_state(message.chat.id)
if prev_state is None:
return True
interval_name = self.schedule_setup_state[message.chat.id]['schedule']['interval']['name']
message_str = message.text.strip()
if prev_state == 'day_schedule' or (prev_state == 'interval' and (interval_name in PictureBot.days_of_week)):
if re.match(r'^(([0,1][0-9])|(2[0-3])):[0-5][0-9]$', message_str) is None:
return True
else:
return False
else:
if message_str.isdecimal() and (int(message_str) > 0):
return False
else:
return True
def set_schedule_state(self, message, state):
"""Sets the state of the schedule setup to continue setting.
Keywords arguments:
message -- Telegram message (an object of the class Message from pyTelegramBotAPI)
state -- the new state for the schedule setup, str.
"""
if state == 'start':
self.schedule_setup_state[message.chat.id] = \
{'state': state, 'prev_state': '', 'query': '',
'schedule': {'interval': {'name': '', 'value': 0}, 'time': ''}}
if state == 'setup':
if len(self.schedule_setup_state[message.chat.id]['query']) == 0:
query_string = message.text.strip().replace('\n', '').replace('\r', '').replace('\\', '')
self.schedule_setup_state[message.chat.id]['query'] = query_string
if state == 'random':
self.translate_schedule(message.chat.id, True)
return
if state == 'day_schedule' or state == 'last':
if self.schedule_setup_state[message.chat.id]['state'] == 'day_schedule':
self.schedule_setup_state[message.chat.id]['schedule']['interval']['value'] = int(message.text)
else:
self.schedule_setup_state[message.chat.id]['schedule']['interval']['name'] = message.text
if state == 'end':
if (message.text.find(':') == -1) and message.text.isdecimal():
self.schedule_setup_state[message.chat.id]['schedule']['interval']['value'] = int(message.text)
else:
self.schedule_setup_state[message.chat.id]['schedule']['time'] = message.text
log.debug(f'self.schedule_setup_state: {self.schedule_setup_state}')
self.translate_schedule(message.chat.id)
return
self.schedule_setup_state[message.chat.id]['prev_state'] = self.schedule_setup_state[message.chat.id]['state']
self.schedule_setup_state[message.chat.id]['state'] = state
def translate_schedule(self, chat_id, random=False):
"""Translates the schedule dictionary to the correct string.
Keywords arguments:
chat_id -- chat identifier, int
random -- if True, images will be sent at random intervals. Parameters of random are set in config.py.
"""
schedule_settings = self.schedule_setup_state[chat_id]
if random:
schedule_str = f'every({config.INTERVAL_START}).to({config.INTERVAL_END}).minutes.' \
f'do(self.send_picture, chat_id={str(chat_id)}).tag(str({str(chat_id)}))'
else:
interval_name = schedule_settings['schedule']['interval']['name'].lower()
interval_value = schedule_settings['schedule']['interval']['value']
interval_name = interval_name if interval_value <= 1 else interval_name + 's'
interval_value = '' if (interval_value == 0 or interval_value == 1) else str(interval_value)
start_time = schedule_settings['schedule']['time']
start_time = f'at("{start_time}").' if start_time != '' else start_time
schedule_str = f'every({interval_value}).{interval_name}.{start_time}do(self.send_picture, ' \
f'chat_id={str(chat_id)}).tag(str({str(chat_id)}))'
log.debug(f'schedule_str: {schedule_str}')
self.add_subscriber(chat_id, schedule_settings['query'], schedule_str)
self.schedule_setup_state.pop(chat_id)
def is_schedule_exist(self, chat_id):
"""Returns True, if the schedule for the chat is already exists.
Keywords arguments:
chat_id -- chat identifier, int
"""
result = self.__db_manager.get_schedule(chat_id)
return len(result) > 0
| en | 0.60998 | # -*- coding: utf-8 -*- # Main class of the application. The class to manage behavior of the bot. It binds the database and the Telegram bot class each other. Must be called when the application starts. It starts sending images to all subscriber from the database. The separate thread for schedules. Adds a new record to database and launches the new schedule for sending images. Keywords arguments: chat_id -- chat identifier, int keywords -- query to Google Image Search, str schedule_str -- schedule for sending images, str. If empty or None, a new image will be sent every minute. Stops sending images and delete all data about the specific subscriber from the database. Keywords arguments: chat_id -- chat identifier, int Sends the unused image to the chat. Keywords arguments: chat_id -- chat identifier, int Returns the state of the schedule setup (str) for the chat or an empty string. Keywords arguments: chat_id -- chat identifier, int Returns the previous state of the schedule setup. It needs to verify the input. Keywords arguments: chat_id -- chat identifier, int Returns True if the message.text is correct, or False if it's not. Keywords arguments: message -- Telegram message (an object of the class Message from pyTelegramBotAPI) Sets the state of the schedule setup to continue setting. Keywords arguments: message -- Telegram message (an object of the class Message from pyTelegramBotAPI) state -- the new state for the schedule setup, str. Translates the schedule dictionary to the correct string. Keywords arguments: chat_id -- chat identifier, int random -- if True, images will be sent at random intervals. Parameters of random are set in config.py. Returns True, if the schedule for the chat is already exists. Keywords arguments: chat_id -- chat identifier, int | 2.430045 | 2 |
Problem 1_2.py | rghvat/-TitlePython-Programming-A-Concise-Introduction | 0 | 6614823 | <reponame>rghvat/-TitlePython-Programming-A-Concise-Introduction
def problem1_2(x,y):
print(x+y)
print(x*y)
| def problem1_2(x,y):
print(x+y)
print(x*y) | none | 1 | 2.929898 | 3 | |
profit/sur/linreg/__init__.py | krystophny/unsur | 0 | 6614824 | <filename>profit/sur/linreg/__init__.py
from .linear_regression import LinearRegression
from .chaospy_linreg import ChaospyLinReg
| <filename>profit/sur/linreg/__init__.py
from .linear_regression import LinearRegression
from .chaospy_linreg import ChaospyLinReg
| none | 1 | 0.937951 | 1 | |
python_research/experiments/multiple_feature_learning/pso_multiple_features.py | ESA-PhiLab/hypernet | 34 | 6614825 | <reponame>ESA-PhiLab/hypernet
from train_multiple_features import build_training_set
from python_research.fastPSO.pso import Pso, Particle, Bounds
from keras.callbacks import EarlyStopping
import numpy as np
import os
import argparse
class MultipleFeaturesPso:
def __init__(
self,
original_path,
gt_path,
area_path,
stddev_path,
diagonal_path,
moment_path,
patience
):
self.original_path = original_path
self.gt_path = gt_path
self.area_path = area_path
self.stddev_path = stddev_path
self.diagonal_path = diagonal_path
self.moment_path = moment_path
self.patience = patience
self.archive = {}
def run(
self,
swarm_size,
min_batch_size,
max_batch_size,
min_nb_samples,
max_nb_samples,
min_neighborhood,
max_neighborhood
):
if min_nb_samples < 10:
raise ValueError('min_nb_samples must greater or equal to 10')
if max_nb_samples < 10:
raise ValueError('max_nb_samples must greater or equal to 10')
if min_neighborhood <= 0:
raise ValueError('min_neighborhood must be positive')
if max_neighborhood <= 0:
raise ValueError('max_neighborhood must be positive')
if min_neighborhood % 2 == 0:
raise ValueError('min_neighborhood must be odd')
if max_neighborhood % 2 == 0:
raise ValueError('max_neighborhood must be odd')
lower_bounds = np.array([min_batch_size, min_nb_samples, min_neighborhood])
upper_bounds = np.array([max_batch_size, max_nb_samples, max_neighborhood])
pso = Pso(
swarm_size=swarm_size,
objective_function=self._objective_function,
lower_bound=lower_bounds,
upper_bound=upper_bounds,
threads=1
)
best_position, best_score = pso.run()
batch_size, nb_samples, neighborhood = self._extract_parameters(best_position)
print(
'Best result: batch size = {}, samples = {}, neighborhood = {} (score = {})'.format(
batch_size,
nb_samples,
neighborhood,
best_score
)
)
def _objective_function(self, particle: Particle):
batch_size, nb_samples, neighborhood = self._extract_parameters(particle.position())
print(
'Processing: batch size = {}, samples = {}, neighborhood = {}'.format(
batch_size,
nb_samples,
neighborhood
)
)
archive_index = '{}_{}_{}'.format(batch_size, nb_samples, neighborhood)
if archive_index in self.archive:
return 1 - self.archive[archive_index]['val_acc'][-1]
training_set = build_training_set(
self.original_path,
self.gt_path,
self.area_path,
self.stddev_path,
self.diagonal_path,
self.moment_path,
nb_samples,
(neighborhood, neighborhood)
)
early = EarlyStopping(patience=self.patience)
history = training_set.model.fit(
x=training_set.x_train,
y=training_set.y_train,
validation_data=(training_set.x_val, training_set.y_val),
epochs=200,
batch_size=batch_size,
verbose=1,
callbacks=[
early
]
)
history.history['eval'] = training_set.model.evaluate(
training_set.x_test,
training_set.y_test,
verbose=1
)[1]
self.archive[archive_index] = history.history
score = 1 - history.history['val_acc'][-1]
print('Score = {}'.format(score))
return score
def _extract_parameters(self, position):
batch_size, nb_samples, neighborhood = position
batch_size = int(batch_size)
nb_samples = int(nb_samples)
neighborhood = int(neighborhood)
if neighborhood % 2 == 0:
neighborhood += 1
return batch_size, nb_samples, neighborhood
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Script for Multiple Feature Learning'
)
parser.add_argument(
'-o',
action='store',
dest='original_path',
type=str,
help='Path to the original dataset in .npy format'
)
parser.add_argument(
'-a',
action='store',
dest='area_path',
type=str,
help='Path to the EAP dataset for area attribute in .npy format'
)
parser.add_argument(
'-s',
action='store',
dest='stddev_path',
type=str,
help='Path to the EAP dataset for standard deviation attribute in .npy format'
)
parser.add_argument(
'-d',
action='store',
dest='diagonal_path',
type=str,
help='Path to the EAP dataset for diagonal attribute in .npy format'
)
parser.add_argument(
'-m',
action='store',
dest='moment_path',
type=str,
help='Path to the EAP dataset for moment attribute in .npy format'
)
parser.add_argument(
'-t',
action='store',
dest='gt_path',
type=str,
help='Path to the ground truth file in .npy format'
)
parser.add_argument(
'-p',
action='store',
dest='patience',
type=int,
help='Number of epochs without improvement on validation score before stopping the learning'
)
parser.add_argument(
'swarm',
action='store',
type=int,
help='Swarm size'
)
parser.add_argument(
'minBatchSize',
action='store',
type=int,
help='Minimal size of training batch'
)
parser.add_argument(
'maxBatchSize',
action='store',
type=int,
help='Maximal size of training batch'
)
parser.add_argument(
'minSamples',
action='store',
type=int,
help='Minimal number of training samples used'
)
parser.add_argument(
'maxSamples',
action='store',
type=int,
help='Maximal number of training samples used'
)
parser.add_argument(
'minneighborhood',
action='store',
type=int,
help='Minimal neighborhood size of the pixel'
)
parser.add_argument(
'maxneighborhood',
action='store',
type=int,
help='Maximal neighborhood size of the pixel'
)
args = parser.parse_args()
pso = MultipleFeaturesPso(
args.original_path,
args.gt_path,
args.area_path,
args.stddev_path,
args.diagonal_path,
args.moment_path,
args.patience
)
pso.run(
args.swarm,
args.minBatchSize,
args.maxBatchSize,
args.minSamples,
args.maxSamples,
args.minneighborhood,
args.maxneighborhood
)
| from train_multiple_features import build_training_set
from python_research.fastPSO.pso import Pso, Particle, Bounds
from keras.callbacks import EarlyStopping
import numpy as np
import os
import argparse
class MultipleFeaturesPso:
def __init__(
self,
original_path,
gt_path,
area_path,
stddev_path,
diagonal_path,
moment_path,
patience
):
self.original_path = original_path
self.gt_path = gt_path
self.area_path = area_path
self.stddev_path = stddev_path
self.diagonal_path = diagonal_path
self.moment_path = moment_path
self.patience = patience
self.archive = {}
def run(
self,
swarm_size,
min_batch_size,
max_batch_size,
min_nb_samples,
max_nb_samples,
min_neighborhood,
max_neighborhood
):
if min_nb_samples < 10:
raise ValueError('min_nb_samples must greater or equal to 10')
if max_nb_samples < 10:
raise ValueError('max_nb_samples must greater or equal to 10')
if min_neighborhood <= 0:
raise ValueError('min_neighborhood must be positive')
if max_neighborhood <= 0:
raise ValueError('max_neighborhood must be positive')
if min_neighborhood % 2 == 0:
raise ValueError('min_neighborhood must be odd')
if max_neighborhood % 2 == 0:
raise ValueError('max_neighborhood must be odd')
lower_bounds = np.array([min_batch_size, min_nb_samples, min_neighborhood])
upper_bounds = np.array([max_batch_size, max_nb_samples, max_neighborhood])
pso = Pso(
swarm_size=swarm_size,
objective_function=self._objective_function,
lower_bound=lower_bounds,
upper_bound=upper_bounds,
threads=1
)
best_position, best_score = pso.run()
batch_size, nb_samples, neighborhood = self._extract_parameters(best_position)
print(
'Best result: batch size = {}, samples = {}, neighborhood = {} (score = {})'.format(
batch_size,
nb_samples,
neighborhood,
best_score
)
)
def _objective_function(self, particle: Particle):
batch_size, nb_samples, neighborhood = self._extract_parameters(particle.position())
print(
'Processing: batch size = {}, samples = {}, neighborhood = {}'.format(
batch_size,
nb_samples,
neighborhood
)
)
archive_index = '{}_{}_{}'.format(batch_size, nb_samples, neighborhood)
if archive_index in self.archive:
return 1 - self.archive[archive_index]['val_acc'][-1]
training_set = build_training_set(
self.original_path,
self.gt_path,
self.area_path,
self.stddev_path,
self.diagonal_path,
self.moment_path,
nb_samples,
(neighborhood, neighborhood)
)
early = EarlyStopping(patience=self.patience)
history = training_set.model.fit(
x=training_set.x_train,
y=training_set.y_train,
validation_data=(training_set.x_val, training_set.y_val),
epochs=200,
batch_size=batch_size,
verbose=1,
callbacks=[
early
]
)
history.history['eval'] = training_set.model.evaluate(
training_set.x_test,
training_set.y_test,
verbose=1
)[1]
self.archive[archive_index] = history.history
score = 1 - history.history['val_acc'][-1]
print('Score = {}'.format(score))
return score
def _extract_parameters(self, position):
batch_size, nb_samples, neighborhood = position
batch_size = int(batch_size)
nb_samples = int(nb_samples)
neighborhood = int(neighborhood)
if neighborhood % 2 == 0:
neighborhood += 1
return batch_size, nb_samples, neighborhood
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Script for Multiple Feature Learning'
)
parser.add_argument(
'-o',
action='store',
dest='original_path',
type=str,
help='Path to the original dataset in .npy format'
)
parser.add_argument(
'-a',
action='store',
dest='area_path',
type=str,
help='Path to the EAP dataset for area attribute in .npy format'
)
parser.add_argument(
'-s',
action='store',
dest='stddev_path',
type=str,
help='Path to the EAP dataset for standard deviation attribute in .npy format'
)
parser.add_argument(
'-d',
action='store',
dest='diagonal_path',
type=str,
help='Path to the EAP dataset for diagonal attribute in .npy format'
)
parser.add_argument(
'-m',
action='store',
dest='moment_path',
type=str,
help='Path to the EAP dataset for moment attribute in .npy format'
)
parser.add_argument(
'-t',
action='store',
dest='gt_path',
type=str,
help='Path to the ground truth file in .npy format'
)
parser.add_argument(
'-p',
action='store',
dest='patience',
type=int,
help='Number of epochs without improvement on validation score before stopping the learning'
)
parser.add_argument(
'swarm',
action='store',
type=int,
help='Swarm size'
)
parser.add_argument(
'minBatchSize',
action='store',
type=int,
help='Minimal size of training batch'
)
parser.add_argument(
'maxBatchSize',
action='store',
type=int,
help='Maximal size of training batch'
)
parser.add_argument(
'minSamples',
action='store',
type=int,
help='Minimal number of training samples used'
)
parser.add_argument(
'maxSamples',
action='store',
type=int,
help='Maximal number of training samples used'
)
parser.add_argument(
'minneighborhood',
action='store',
type=int,
help='Minimal neighborhood size of the pixel'
)
parser.add_argument(
'maxneighborhood',
action='store',
type=int,
help='Maximal neighborhood size of the pixel'
)
args = parser.parse_args()
pso = MultipleFeaturesPso(
args.original_path,
args.gt_path,
args.area_path,
args.stddev_path,
args.diagonal_path,
args.moment_path,
args.patience
)
pso.run(
args.swarm,
args.minBatchSize,
args.maxBatchSize,
args.minSamples,
args.maxSamples,
args.minneighborhood,
args.maxneighborhood
) | none | 1 | 2.306499 | 2 | |
depthai_sdk/src/test/test_encoding_manager.py | Luxonis-Brandon/Hardware | 27 | 6614826 | import unittest
from pathlib import Path
from depthai_sdk.managers import EncodingManager, PipelineManager
from depthai_sdk import Previews
import depthai as dai
import os
unittest.TestLoader.sortTestMethodsUsing = None
class TestEncodingManager(unittest.TestCase):
def test_Init1(self):
"""Testing init with an empty dict and a real path"""
test = EncodingManager(encodeConfig={}, encodeOutput=Path(""))
self.assertIsNotNone(test)
def test_Init2(self):
"""Testing init with an empty dict and a false path"""
with self.assertRaises(RuntimeError):
EncodingManager(encodeConfig={}, encodeOutput=Path("/NotARealPath"))
def test_Init3(self):
"""Testing if everything in init is stored correctly if used with every attribute"""
test = EncodingManager(encodeConfig={Previews.color.name: 30}, encodeOutput=Path(""))
self.assertDictEqual(test.encodeConfig, {Previews.color.name: 30})
self.assertEqual(test.encodeOutput, Path(""))
def test_CreateEncoders1(self):
"""Testing createEncoders with a valid pipeline"""
pm = PipelineManager()
pm.createColorCam()
test = EncodingManager({Previews.color.name: 30}, Path(""))
test.createEncoders(pm)
self.assertTrue("color" in test._encodingNodes)
def test_CreateEncoders2(self):
"""Testing createEncoders with a valid pipeline(all nodes)"""
pm = PipelineManager()
pm.createColorCam()
pm.createLeftCam()
pm.createRightCam()
test = EncodingManager({
Previews.color.name: 30,
Previews.left.name: 30,
Previews.right.name: 30}, Path(""))
test.createEncoders(pm)
self.assertTrue("color" in test._encodingNodes and
"left" in test._encodingNodes and
"right" in test._encodingNodes)
def test_CreateDefaultQueues1(self):
"""Testing createDefaultQueues with a valid pipeline"""
pm = PipelineManager()
pm.createColorCam()
test = EncodingManager({Previews.color.name: 30}, Path(""))
test.createEncoders(pm)
with dai.Device(pm.pipeline) as device:
test.createDefaultQueues(device)
self.assertEqual(len(test._encodingQueues), 1)
self.assertTrue("color" in test._encodingQueues)
self.assertTrue("color" in test._encodingFiles)
def test_CreateDefaultQueues2(self):
"""Testing createDefaultQueues with a valid pipeline(all nodes)"""
pm = PipelineManager()
pm.createColorCam()
pm.createLeftCam()
pm.createRightCam()
test = EncodingManager({
Previews.color.name: 30,
Previews.left.name: 30,
Previews.right.name: 30}, Path(""))
test.createEncoders(pm)
with dai.Device(pm.pipeline) as device:
test.createDefaultQueues(device)
self.assertEqual(len(test._encodingQueues), 3)
self.assertTrue("color" in test._encodingQueues and
"left" in test._encodingQueues and
"right" in test._encodingQueues)
self.assertTrue("color" in test._encodingFiles and
"left" in test._encodingFiles and
"right" in test._encodingFiles)
def test_close1(self):
"""Testing close with a valid pipeline, if closed correctly the file will be deleted (files are in .h264)"""
pm = PipelineManager()
pm.createColorCam()
test = EncodingManager({Previews.color.name: 30}, Path(""))
test.createEncoders(pm)
with dai.Device(pm.pipeline) as device:
test.createDefaultQueues(device)
test.close()
os.remove("color.h264")
def test_close2(self):
"""Testing close with a valid pipeline, if closed correctly the files will be deleted (files are in .h264)"""
pm = PipelineManager()
pm.createColorCam()
pm.createLeftCam()
pm.createRightCam()
test = EncodingManager({
Previews.color.name: 30,
Previews.left.name: 30,
Previews.right.name: 30}, Path(""))
test.createEncoders(pm)
with dai.Device(pm.pipeline) as device:
test.createDefaultQueues(device)
test.close()
os.remove("color.h264")
os.remove("left.h264")
os.remove("right.h264")
| import unittest
from pathlib import Path
from depthai_sdk.managers import EncodingManager, PipelineManager
from depthai_sdk import Previews
import depthai as dai
import os
unittest.TestLoader.sortTestMethodsUsing = None
class TestEncodingManager(unittest.TestCase):
def test_Init1(self):
"""Testing init with an empty dict and a real path"""
test = EncodingManager(encodeConfig={}, encodeOutput=Path(""))
self.assertIsNotNone(test)
def test_Init2(self):
"""Testing init with an empty dict and a false path"""
with self.assertRaises(RuntimeError):
EncodingManager(encodeConfig={}, encodeOutput=Path("/NotARealPath"))
def test_Init3(self):
"""Testing if everything in init is stored correctly if used with every attribute"""
test = EncodingManager(encodeConfig={Previews.color.name: 30}, encodeOutput=Path(""))
self.assertDictEqual(test.encodeConfig, {Previews.color.name: 30})
self.assertEqual(test.encodeOutput, Path(""))
def test_CreateEncoders1(self):
"""Testing createEncoders with a valid pipeline"""
pm = PipelineManager()
pm.createColorCam()
test = EncodingManager({Previews.color.name: 30}, Path(""))
test.createEncoders(pm)
self.assertTrue("color" in test._encodingNodes)
def test_CreateEncoders2(self):
"""Testing createEncoders with a valid pipeline(all nodes)"""
pm = PipelineManager()
pm.createColorCam()
pm.createLeftCam()
pm.createRightCam()
test = EncodingManager({
Previews.color.name: 30,
Previews.left.name: 30,
Previews.right.name: 30}, Path(""))
test.createEncoders(pm)
self.assertTrue("color" in test._encodingNodes and
"left" in test._encodingNodes and
"right" in test._encodingNodes)
def test_CreateDefaultQueues1(self):
"""Testing createDefaultQueues with a valid pipeline"""
pm = PipelineManager()
pm.createColorCam()
test = EncodingManager({Previews.color.name: 30}, Path(""))
test.createEncoders(pm)
with dai.Device(pm.pipeline) as device:
test.createDefaultQueues(device)
self.assertEqual(len(test._encodingQueues), 1)
self.assertTrue("color" in test._encodingQueues)
self.assertTrue("color" in test._encodingFiles)
def test_CreateDefaultQueues2(self):
"""Testing createDefaultQueues with a valid pipeline(all nodes)"""
pm = PipelineManager()
pm.createColorCam()
pm.createLeftCam()
pm.createRightCam()
test = EncodingManager({
Previews.color.name: 30,
Previews.left.name: 30,
Previews.right.name: 30}, Path(""))
test.createEncoders(pm)
with dai.Device(pm.pipeline) as device:
test.createDefaultQueues(device)
self.assertEqual(len(test._encodingQueues), 3)
self.assertTrue("color" in test._encodingQueues and
"left" in test._encodingQueues and
"right" in test._encodingQueues)
self.assertTrue("color" in test._encodingFiles and
"left" in test._encodingFiles and
"right" in test._encodingFiles)
def test_close1(self):
"""Testing close with a valid pipeline, if closed correctly the file will be deleted (files are in .h264)"""
pm = PipelineManager()
pm.createColorCam()
test = EncodingManager({Previews.color.name: 30}, Path(""))
test.createEncoders(pm)
with dai.Device(pm.pipeline) as device:
test.createDefaultQueues(device)
test.close()
os.remove("color.h264")
def test_close2(self):
"""Testing close with a valid pipeline, if closed correctly the files will be deleted (files are in .h264)"""
pm = PipelineManager()
pm.createColorCam()
pm.createLeftCam()
pm.createRightCam()
test = EncodingManager({
Previews.color.name: 30,
Previews.left.name: 30,
Previews.right.name: 30}, Path(""))
test.createEncoders(pm)
with dai.Device(pm.pipeline) as device:
test.createDefaultQueues(device)
test.close()
os.remove("color.h264")
os.remove("left.h264")
os.remove("right.h264")
| en | 0.770229 | Testing init with an empty dict and a real path Testing init with an empty dict and a false path Testing if everything in init is stored correctly if used with every attribute Testing createEncoders with a valid pipeline Testing createEncoders with a valid pipeline(all nodes) Testing createDefaultQueues with a valid pipeline Testing createDefaultQueues with a valid pipeline(all nodes) Testing close with a valid pipeline, if closed correctly the file will be deleted (files are in .h264) Testing close with a valid pipeline, if closed correctly the files will be deleted (files are in .h264) | 2.754735 | 3 |
data_driven_acquisition/migrations/0006_auto_20191029_1820.py | adam-grandt-tts/data-driven-acquisition | 1 | 6614827 | # Generated by Django 2.2.6 on 2019-10-29 18:20
import django.contrib.postgres.fields.hstore
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('data_driven_acquisition', '0005_auto_20191029_1531'),
]
operations = [
migrations.AddField(
model_name='packagetemplate',
name='properties',
field=django.contrib.postgres.fields.hstore.HStoreField(blank=True, null=True),
),
migrations.DeleteModel(
name='ACL',
),
]
| # Generated by Django 2.2.6 on 2019-10-29 18:20
import django.contrib.postgres.fields.hstore
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('data_driven_acquisition', '0005_auto_20191029_1531'),
]
operations = [
migrations.AddField(
model_name='packagetemplate',
name='properties',
field=django.contrib.postgres.fields.hstore.HStoreField(blank=True, null=True),
),
migrations.DeleteModel(
name='ACL',
),
]
| en | 0.842128 | # Generated by Django 2.2.6 on 2019-10-29 18:20 | 1.721702 | 2 |
src/unibet.py | MiladC4/betting-crawler | 39 | 6614828 | #!/usr/bin/env python3
import urllib.request as request
import json, re
import database
site = "Unibet"
time_regex = "([0-9\-]+)T([0-9:]+)Z"
db = database.match_database()
def scrape_json(url):
import ipdb
ipdb.set_trace()
info = json.loads(request.urlopen(url).read().decode())
for offer in info["betoffers"]:
if offer["betOfferType"]['id'] != 2:
continue
for event in info['events']:
if event["id"] != offer["eventId"]:
continue
comp = event["group"]
m = re.match(time_regex, event["start"])
if m is None:
print("Regex failed: %s" % event["start"])
break
sql_date = m.group(1)
clock_time = m.group(2)
home_team = event['homeName']
away_team = event['awayName']
break
odds = {}
for outcome in offer['outcomes']:
raw_odds = str(outcome['odds'])
float_odds = "%s.%s" % (raw_odds[0], raw_odds[1:])
if outcome['type'] == "OT_ONE":
odds['1'] = float_odds
elif outcome['type'] == "OT_CROSS":
odds['X'] = float_odds
elif outcome['type'] == "OT_TWO":
odds['2'] = float_odds
db.process_match(comp, home_team, away_team, sql_date, clock_time, site, odds)
url_prefix = "https://e4-api.kambi.com/offering/api/v2/ub/betoffer/group/"
url_suffix = ".json?cat=1295&range_size=100&range_start=0"
leagues = {
'CL' : 1000093381,
'EL' : 2000051195,
'Spain' : 1000461813,
'Italy' : 1000461745,
'Germany' : 1000461728,
'France' : 1000461727,
'England' : 1000461733,
'Sweden' : 1000461814}
for league_nbr in [2000051195, 1000461733, 1000093381, 1000461814]:
url = url_prefix + str(league_nbr) + url_suffix
scrape_json(url)
| #!/usr/bin/env python3
import urllib.request as request
import json, re
import database
site = "Unibet"
time_regex = "([0-9\-]+)T([0-9:]+)Z"
db = database.match_database()
def scrape_json(url):
import ipdb
ipdb.set_trace()
info = json.loads(request.urlopen(url).read().decode())
for offer in info["betoffers"]:
if offer["betOfferType"]['id'] != 2:
continue
for event in info['events']:
if event["id"] != offer["eventId"]:
continue
comp = event["group"]
m = re.match(time_regex, event["start"])
if m is None:
print("Regex failed: %s" % event["start"])
break
sql_date = m.group(1)
clock_time = m.group(2)
home_team = event['homeName']
away_team = event['awayName']
break
odds = {}
for outcome in offer['outcomes']:
raw_odds = str(outcome['odds'])
float_odds = "%s.%s" % (raw_odds[0], raw_odds[1:])
if outcome['type'] == "OT_ONE":
odds['1'] = float_odds
elif outcome['type'] == "OT_CROSS":
odds['X'] = float_odds
elif outcome['type'] == "OT_TWO":
odds['2'] = float_odds
db.process_match(comp, home_team, away_team, sql_date, clock_time, site, odds)
url_prefix = "https://e4-api.kambi.com/offering/api/v2/ub/betoffer/group/"
url_suffix = ".json?cat=1295&range_size=100&range_start=0"
leagues = {
'CL' : 1000093381,
'EL' : 2000051195,
'Spain' : 1000461813,
'Italy' : 1000461745,
'Germany' : 1000461728,
'France' : 1000461727,
'England' : 1000461733,
'Sweden' : 1000461814}
for league_nbr in [2000051195, 1000461733, 1000093381, 1000461814]:
url = url_prefix + str(league_nbr) + url_suffix
scrape_json(url)
| fr | 0.221828 | #!/usr/bin/env python3 | 2.763237 | 3 |
aless_art_shop/migrations/0005_donation.py | AlessioMartello/art_shop | 0 | 6614829 | <filename>aless_art_shop/migrations/0005_donation.py
# Generated by Django 3.2.6 on 2021-09-04 10:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('aless_art_shop', '0004_product_photo'),
]
operations = [
migrations.CreateModel(
name='Donation',
fields=[
('amount', models.IntegerField(primary_key=True, serialize=False)),
('stripe_price_id', models.CharField(max_length=100)),
],
),
]
| <filename>aless_art_shop/migrations/0005_donation.py
# Generated by Django 3.2.6 on 2021-09-04 10:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('aless_art_shop', '0004_product_photo'),
]
operations = [
migrations.CreateModel(
name='Donation',
fields=[
('amount', models.IntegerField(primary_key=True, serialize=False)),
('stripe_price_id', models.CharField(max_length=100)),
],
),
]
| en | 0.80605 | # Generated by Django 3.2.6 on 2021-09-04 10:16 | 1.520908 | 2 |
src/body.py | papaljuka/Barnes-Hut | 0 | 6614830 | import numpy as np
import abc
from constants import *
from vector import Vector
from dist import Dist
# Definition of a body
class Body():
def __init__(self, m=constants.cons_m, pos, v = Vector(), size=, i):
self.x, self.y = x, y
self.vx, self.vy = vx, vy
self.m = m
self.pos = pos
self.v = v
self.a = Vector()
def accelleration(self, m, pos, epsilon):
x = pos[:, 0, 1]
y = pos[:, 1, 2]
dx = x.T - x
dy = y.T - y
r3_inv = (dx**2 + dy**2 + epsilon**2)** (-1.5)
ax = constants.G * (dx * r3_inv) @ mass
ay = constants.G * (dy * r3_inv) @ mass
a = np.hstack((ax, ay))
return a
# a = (ax, ay, az) gravitacijski!
# Leap-Frog integration: kick + drift + kick
def move(self, dt, ax, ay):
vx += ax * 0.5 * dt
vy += ay * 0.5 * dt
x += vx * dt
y += vy * dt
acc = accelleration(self, m, x, y, G, epsilon)
vx += ax * 0.5 * dt
vy += ay * 0.5 * dt
t += dt
EKin, EPot = energy(self, m, G, vx, vy, x, y)
# E = E_k + E_pot
def energy(self, m, G, v, pos):
Ekin = 0.5 * np.sum(np.sum( m * v **2))
x = pos[:, 0, 1]
y = pos[:, 1, 2]
dx = x.T - x
dy = y.T - y
r_inv = np.sqrt(dx**2 + dy**2)
self.vy = (self.m * vy + other.m * other.vy) / (self.m + other.m)
self.m += other.m
def pydraw(self, pd, surface):
vmag = self.v.mag()
#color =
x = math.floor(self.pos.x)
y = math.flor(self.pos.y)
pd.circle(surface, color, (x, y), 1 + math.floor(0.2 * self.m/#particle mass))
def __repr__(self):
return "Body: ({0}.x, {0}.y), mass= {0}.m".format(self)
class Drawable(object, metaclass=abc.ABCMeta):
@abctractmethod
def pydraw(self, pd, surface):
raise NotImplementedError('Must implement Pydraw function!')
| import numpy as np
import abc
from constants import *
from vector import Vector
from dist import Dist
# Definition of a body
class Body():
def __init__(self, m=constants.cons_m, pos, v = Vector(), size=, i):
self.x, self.y = x, y
self.vx, self.vy = vx, vy
self.m = m
self.pos = pos
self.v = v
self.a = Vector()
def accelleration(self, m, pos, epsilon):
x = pos[:, 0, 1]
y = pos[:, 1, 2]
dx = x.T - x
dy = y.T - y
r3_inv = (dx**2 + dy**2 + epsilon**2)** (-1.5)
ax = constants.G * (dx * r3_inv) @ mass
ay = constants.G * (dy * r3_inv) @ mass
a = np.hstack((ax, ay))
return a
# a = (ax, ay, az) gravitacijski!
# Leap-Frog integration: kick + drift + kick
def move(self, dt, ax, ay):
vx += ax * 0.5 * dt
vy += ay * 0.5 * dt
x += vx * dt
y += vy * dt
acc = accelleration(self, m, x, y, G, epsilon)
vx += ax * 0.5 * dt
vy += ay * 0.5 * dt
t += dt
EKin, EPot = energy(self, m, G, vx, vy, x, y)
# E = E_k + E_pot
def energy(self, m, G, v, pos):
Ekin = 0.5 * np.sum(np.sum( m * v **2))
x = pos[:, 0, 1]
y = pos[:, 1, 2]
dx = x.T - x
dy = y.T - y
r_inv = np.sqrt(dx**2 + dy**2)
self.vy = (self.m * vy + other.m * other.vy) / (self.m + other.m)
self.m += other.m
def pydraw(self, pd, surface):
vmag = self.v.mag()
#color =
x = math.floor(self.pos.x)
y = math.flor(self.pos.y)
pd.circle(surface, color, (x, y), 1 + math.floor(0.2 * self.m/#particle mass))
def __repr__(self):
return "Body: ({0}.x, {0}.y), mass= {0}.m".format(self)
class Drawable(object, metaclass=abc.ABCMeta):
@abctractmethod
def pydraw(self, pd, surface):
raise NotImplementedError('Must implement Pydraw function!')
| en | 0.467363 | # Definition of a body # a = (ax, ay, az) gravitacijski! # Leap-Frog integration: kick + drift + kick # E = E_k + E_pot #color = #particle mass)) | 2.892743 | 3 |
yoh-wrapper/app/routes.py | Svyat935/YOH | 1 | 6614831 | <filename>yoh-wrapper/app/routes.py
from modules.game import game_bp
from modules.service_page import service_bp
from modules.api import api_bp
def route(app):
"""
Регистрируем модули Flask сервера
"""
app.register_blueprint(game_bp)
app.register_blueprint(service_bp)
app.register_blueprint(api_bp)
| <filename>yoh-wrapper/app/routes.py
from modules.game import game_bp
from modules.service_page import service_bp
from modules.api import api_bp
def route(app):
"""
Регистрируем модули Flask сервера
"""
app.register_blueprint(game_bp)
app.register_blueprint(service_bp)
app.register_blueprint(api_bp)
| ru | 0.645067 | Регистрируем модули Flask сервера | 1.862441 | 2 |
backend/booking/migrations/0001_initial.py | uncle-yura/simple_booking | 0 | 6614832 | <filename>backend/booking/migrations/0001_initial.py
# Generated by Django 3.2.3 on 2022-01-12 11:56
import datetime
from django.conf import settings
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
import base.storage
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name="JobType",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"name",
models.CharField(
help_text="Enter here job name.",
max_length=100,
verbose_name="Job name",
),
),
(
"description",
models.TextField(
blank=True,
help_text="Enter here the text to be displayed as description of job. ",
max_length=200,
verbose_name="Description",
),
),
(
"time_interval",
models.DurationField(
default=datetime.timedelta(seconds=900),
help_text="Enter here the time it takes for this job.",
verbose_name="Time",
),
),
(
"image",
models.ImageField(
blank=True,
help_text="Upload your cover image here.",
storage=base.storage.UUIDStorage,
upload_to="images/",
verbose_name="Image",
),
),
],
),
migrations.CreateModel(
name="Order",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"booking_date",
models.DateTimeField(
help_text="Select your booking date here.",
null=True,
verbose_name="Date",
),
),
(
"client_comment",
models.CharField(
blank=True,
help_text="Enter a comment for your booking here.",
max_length=200,
verbose_name="Comment",
),
),
(
"state",
models.CharField(
choices=[("A", "Active"), ("C", "Canceled")],
default="A",
help_text="Select your booking status here.",
max_length=1,
verbose_name="State",
),
),
(
"gcal_event_id",
models.CharField(
blank=True,
help_text="Google calendar event ID.",
max_length=30,
verbose_name="Event",
),
),
],
),
migrations.CreateModel(
name="Profile",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"avatar",
models.ImageField(
blank=True,
help_text="Upload your avatar image here.",
storage=base.storage.UUIDStorage,
upload_to="images/",
verbose_name="Profile photo",
),
),
(
"phone_number",
models.CharField(
blank=True,
help_text="Enter your phone number here (Example: +380123456789)",
max_length=17,
validators=[
django.core.validators.RegexValidator(
message="Phone number must be entered in the format: "
+ "'+380123456789'.",
regex="^\\+?1?\\d{9,15}$",
)
],
verbose_name="Phone",
),
),
(
"comment",
models.TextField(
blank=True,
help_text="Enter the text about the profile owner here.",
max_length=200,
verbose_name="Comment",
),
),
(
"discount",
models.DecimalField(
decimal_places=2,
default=0,
help_text="Enter the profile discount value here.",
max_digits=2,
verbose_name="Discount",
),
),
(
"gcal_link",
models.CharField(
blank=True,
help_text="Enter your google calendar link here.",
max_length=200,
verbose_name="GCalendar link",
),
),
(
"timetable",
models.CharField(
choices=[
("A", "All"),
("M", "My clients"),
("V", "Verified clients"),
("N", "Nobody"),
],
default="A",
help_text="Select your current service booking mode.",
max_length=1,
verbose_name="Timetable",
),
),
(
"booking_time_delay",
models.DurationField(
default=datetime.timedelta(seconds=3600),
help_text="Enter the minimum delay for booking today.",
verbose_name="Booking delay",
),
),
(
"booking_time_range",
models.IntegerField(
default=30,
help_text="Enter how many days in advance the booking can be made.",
verbose_name="Booking range",
),
),
(
"black_list",
models.ManyToManyField(
blank=True,
help_text="Select users who cannot book with you.",
related_name="_booking_profile_black_list_+",
to="booking.Profile",
verbose_name="Black list",
),
),
(
"clients",
models.ManyToManyField(
help_text="Your clients listed here.",
related_name="_booking_profile_clients_+",
through="booking.Order",
to="booking.Profile",
verbose_name="Clients",
),
),
(
"masters",
models.ManyToManyField(
help_text="Your masters listed here.",
related_name="_booking_profile_masters_+",
through="booking.Order",
to="booking.Profile",
verbose_name="Masters",
),
),
(
"user",
models.OneToOneField(
on_delete=django.db.models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
),
),
(
"white_list",
models.ManyToManyField(
blank=True,
help_text="Select users who can always book with you.",
related_name="_booking_profile_white_list_+",
to="booking.Profile",
verbose_name="White list",
),
),
],
),
migrations.AddField(
model_name="order",
name="client",
field=models.ForeignKey(
help_text="Select the client here.",
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="orders",
to="booking.profile",
verbose_name="Client",
),
),
migrations.AddField(
model_name="order",
name="master",
field=models.ForeignKey(
help_text="Select the master here.",
limit_choices_to={"user__groups__name": "Master"},
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="jobs",
to="booking.profile",
verbose_name="Master",
),
),
migrations.AddField(
model_name="order",
name="work_type",
field=models.ManyToManyField(
help_text="Select the job for this order here.",
to="booking.JobType",
verbose_name="Job",
),
),
migrations.CreateModel(
name="PriceList",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"price",
models.DecimalField(
decimal_places=2,
help_text="Enter the price for this job here.",
max_digits=10,
verbose_name="Price",
),
),
(
"job",
models.ForeignKey(
help_text="Select the job here.",
on_delete=django.db.models.deletion.CASCADE,
related_name="prices",
to="booking.jobtype",
verbose_name="Job",
),
),
(
"profile",
models.ForeignKey(
help_text="Select the pricelist owner here.",
on_delete=django.db.models.deletion.CASCADE,
related_name="prices",
to="booking.profile",
verbose_name="Owner",
),
),
],
options={
"unique_together": {("profile", "job")},
},
),
]
| <filename>backend/booking/migrations/0001_initial.py
# Generated by Django 3.2.3 on 2022-01-12 11:56
import datetime
from django.conf import settings
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
import base.storage
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name="JobType",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"name",
models.CharField(
help_text="Enter here job name.",
max_length=100,
verbose_name="Job name",
),
),
(
"description",
models.TextField(
blank=True,
help_text="Enter here the text to be displayed as description of job. ",
max_length=200,
verbose_name="Description",
),
),
(
"time_interval",
models.DurationField(
default=datetime.timedelta(seconds=900),
help_text="Enter here the time it takes for this job.",
verbose_name="Time",
),
),
(
"image",
models.ImageField(
blank=True,
help_text="Upload your cover image here.",
storage=base.storage.UUIDStorage,
upload_to="images/",
verbose_name="Image",
),
),
],
),
migrations.CreateModel(
name="Order",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"booking_date",
models.DateTimeField(
help_text="Select your booking date here.",
null=True,
verbose_name="Date",
),
),
(
"client_comment",
models.CharField(
blank=True,
help_text="Enter a comment for your booking here.",
max_length=200,
verbose_name="Comment",
),
),
(
"state",
models.CharField(
choices=[("A", "Active"), ("C", "Canceled")],
default="A",
help_text="Select your booking status here.",
max_length=1,
verbose_name="State",
),
),
(
"gcal_event_id",
models.CharField(
blank=True,
help_text="Google calendar event ID.",
max_length=30,
verbose_name="Event",
),
),
],
),
migrations.CreateModel(
name="Profile",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"avatar",
models.ImageField(
blank=True,
help_text="Upload your avatar image here.",
storage=base.storage.UUIDStorage,
upload_to="images/",
verbose_name="Profile photo",
),
),
(
"phone_number",
models.CharField(
blank=True,
help_text="Enter your phone number here (Example: +380123456789)",
max_length=17,
validators=[
django.core.validators.RegexValidator(
message="Phone number must be entered in the format: "
+ "'+380123456789'.",
regex="^\\+?1?\\d{9,15}$",
)
],
verbose_name="Phone",
),
),
(
"comment",
models.TextField(
blank=True,
help_text="Enter the text about the profile owner here.",
max_length=200,
verbose_name="Comment",
),
),
(
"discount",
models.DecimalField(
decimal_places=2,
default=0,
help_text="Enter the profile discount value here.",
max_digits=2,
verbose_name="Discount",
),
),
(
"gcal_link",
models.CharField(
blank=True,
help_text="Enter your google calendar link here.",
max_length=200,
verbose_name="GCalendar link",
),
),
(
"timetable",
models.CharField(
choices=[
("A", "All"),
("M", "My clients"),
("V", "Verified clients"),
("N", "Nobody"),
],
default="A",
help_text="Select your current service booking mode.",
max_length=1,
verbose_name="Timetable",
),
),
(
"booking_time_delay",
models.DurationField(
default=datetime.timedelta(seconds=3600),
help_text="Enter the minimum delay for booking today.",
verbose_name="Booking delay",
),
),
(
"booking_time_range",
models.IntegerField(
default=30,
help_text="Enter how many days in advance the booking can be made.",
verbose_name="Booking range",
),
),
(
"black_list",
models.ManyToManyField(
blank=True,
help_text="Select users who cannot book with you.",
related_name="_booking_profile_black_list_+",
to="booking.Profile",
verbose_name="Black list",
),
),
(
"clients",
models.ManyToManyField(
help_text="Your clients listed here.",
related_name="_booking_profile_clients_+",
through="booking.Order",
to="booking.Profile",
verbose_name="Clients",
),
),
(
"masters",
models.ManyToManyField(
help_text="Your masters listed here.",
related_name="_booking_profile_masters_+",
through="booking.Order",
to="booking.Profile",
verbose_name="Masters",
),
),
(
"user",
models.OneToOneField(
on_delete=django.db.models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
),
),
(
"white_list",
models.ManyToManyField(
blank=True,
help_text="Select users who can always book with you.",
related_name="_booking_profile_white_list_+",
to="booking.Profile",
verbose_name="White list",
),
),
],
),
migrations.AddField(
model_name="order",
name="client",
field=models.ForeignKey(
help_text="Select the client here.",
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="orders",
to="booking.profile",
verbose_name="Client",
),
),
migrations.AddField(
model_name="order",
name="master",
field=models.ForeignKey(
help_text="Select the master here.",
limit_choices_to={"user__groups__name": "Master"},
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="jobs",
to="booking.profile",
verbose_name="Master",
),
),
migrations.AddField(
model_name="order",
name="work_type",
field=models.ManyToManyField(
help_text="Select the job for this order here.",
to="booking.JobType",
verbose_name="Job",
),
),
migrations.CreateModel(
name="PriceList",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"price",
models.DecimalField(
decimal_places=2,
help_text="Enter the price for this job here.",
max_digits=10,
verbose_name="Price",
),
),
(
"job",
models.ForeignKey(
help_text="Select the job here.",
on_delete=django.db.models.deletion.CASCADE,
related_name="prices",
to="booking.jobtype",
verbose_name="Job",
),
),
(
"profile",
models.ForeignKey(
help_text="Select the pricelist owner here.",
on_delete=django.db.models.deletion.CASCADE,
related_name="prices",
to="booking.profile",
verbose_name="Owner",
),
),
],
options={
"unique_together": {("profile", "job")},
},
),
]
| en | 0.892294 | # Generated by Django 3.2.3 on 2022-01-12 11:56 | 1.925383 | 2 |
utils/cli.py | dyelax/selfie2bitmoji | 3 | 6614833 | <filename>utils/cli.py
import os
import argparse
from tensorpack.utils.logger import set_logger_dir
from utils.misc import get_dir, date_str
def get_avatar_synth_args():
parser = argparse.ArgumentParser()
parser.add_argument('--train_dir',
help='Directory of train data',
default='./data/bitmoji/train')
parser.add_argument('--test_dir',
help='Directory of test data',
default='./data/bitmoji/test')
parser.add_argument('--logger_dir',
help='Directory to save logs and model checkpoints',
default=os.path.join('save', 'log', date_str()))
parser.add_argument('--load_path',
help='Path of the model checkpoint to load')
parser.add_argument('--epochs',
help='Number of epochs to train',
default=100000,
type=int)
parser.add_argument('--batch_size',
help='Minibatch size',
default=512,
type=int)
parser.add_argument('--lr',
help='Learning rate',
default=1e-4,
type=float)
parser.add_argument('--lr_decay',
help='The multiple by which to decay the learning rate every epoch',
default=0.96,
type=float)
parser.add_argument('--resume_lr',
help='Resume the learning rate from the previous run',
action='store_true')
parser.add_argument('--keep_prob',
help='The keep probability for dropout (always 1 for testing)',
default=0.5,
type=float)
parser.add_argument('--summary_freq',
help='Frequency (in steps) with which to write tensorboard summaries',
default=100,
type=int)
parser.add_argument('--gpu',
help='Comma separated list of GPU(s) to use',
default='0')
parser.add_argument('--num_threads',
help='The number of threads to read and process data',
default=32,
type=int)
args = parser.parse_args()
if args.gpu:
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
set_logger_dir(args.logger_dir)
return args
def get_s2b_args():
parser = argparse.ArgumentParser()
parser.add_argument('--train_dir_bitmoji',
help='Directory of bitmoji train data',
default='./data/bitmoji/train')
parser.add_argument('--test_dir_bitmoji',
help='Directory of bitmoji test data',
default='./data/bitmoji/test')
parser.add_argument('--train_dir_face',
help='Directory of real face train data',
default='./data/celeba/train')
parser.add_argument('--test_dir_face',
help='Directory of real face test data',
default='./data/celeba/test')
parser.add_argument('--logger_dir',
help='Directory to save logs and model checkpoints',
default=os.path.join('save', 's2b', date_str()))
parser.add_argument('--load_path',
help='Path of the model checkpoint to load',
default=os.path.join('save', 's2b', 'default', 'model'))
parser.add_argument('--epochs',
help='Number of epochs to train',
default=100000,
type=int)
parser.add_argument('--batch_size',
help='Minibatch size',
default=128,
type=int)
parser.add_argument('--lr',
help='Learning rate',
default=1e-4,
type=float)
parser.add_argument('--decay',
help='The multiple by which to decay learning rate, instance noise stddev '
'and discriminator uncertainty threshhold every epoch',
default=0.98,
type=float)
parser.add_argument('--resume_lr',
help='Resume the learning rate from the previous run',
action='store_true')
parser.add_argument('--keep_prob',
help='The keep probability for dropout (always 1 for testing)',
default=0.5,
type=float)
parser.add_argument('--summary_freq',
help='Frequency (in steps) with which to write tensorboard summaries',
default=20,
type=int)
parser.add_argument('--gpu',
help='Which GPU to use')
parser.add_argument('--num_threads',
help='The number of threads to read and process data',
default=32,
type=int)
args = parser.parse_args()
if args.gpu:
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
set_logger_dir(args.logger_dir)
return args | <filename>utils/cli.py
import os
import argparse
from tensorpack.utils.logger import set_logger_dir
from utils.misc import get_dir, date_str
def get_avatar_synth_args():
parser = argparse.ArgumentParser()
parser.add_argument('--train_dir',
help='Directory of train data',
default='./data/bitmoji/train')
parser.add_argument('--test_dir',
help='Directory of test data',
default='./data/bitmoji/test')
parser.add_argument('--logger_dir',
help='Directory to save logs and model checkpoints',
default=os.path.join('save', 'log', date_str()))
parser.add_argument('--load_path',
help='Path of the model checkpoint to load')
parser.add_argument('--epochs',
help='Number of epochs to train',
default=100000,
type=int)
parser.add_argument('--batch_size',
help='Minibatch size',
default=512,
type=int)
parser.add_argument('--lr',
help='Learning rate',
default=1e-4,
type=float)
parser.add_argument('--lr_decay',
help='The multiple by which to decay the learning rate every epoch',
default=0.96,
type=float)
parser.add_argument('--resume_lr',
help='Resume the learning rate from the previous run',
action='store_true')
parser.add_argument('--keep_prob',
help='The keep probability for dropout (always 1 for testing)',
default=0.5,
type=float)
parser.add_argument('--summary_freq',
help='Frequency (in steps) with which to write tensorboard summaries',
default=100,
type=int)
parser.add_argument('--gpu',
help='Comma separated list of GPU(s) to use',
default='0')
parser.add_argument('--num_threads',
help='The number of threads to read and process data',
default=32,
type=int)
args = parser.parse_args()
if args.gpu:
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
set_logger_dir(args.logger_dir)
return args
def get_s2b_args():
parser = argparse.ArgumentParser()
parser.add_argument('--train_dir_bitmoji',
help='Directory of bitmoji train data',
default='./data/bitmoji/train')
parser.add_argument('--test_dir_bitmoji',
help='Directory of bitmoji test data',
default='./data/bitmoji/test')
parser.add_argument('--train_dir_face',
help='Directory of real face train data',
default='./data/celeba/train')
parser.add_argument('--test_dir_face',
help='Directory of real face test data',
default='./data/celeba/test')
parser.add_argument('--logger_dir',
help='Directory to save logs and model checkpoints',
default=os.path.join('save', 's2b', date_str()))
parser.add_argument('--load_path',
help='Path of the model checkpoint to load',
default=os.path.join('save', 's2b', 'default', 'model'))
parser.add_argument('--epochs',
help='Number of epochs to train',
default=100000,
type=int)
parser.add_argument('--batch_size',
help='Minibatch size',
default=128,
type=int)
parser.add_argument('--lr',
help='Learning rate',
default=1e-4,
type=float)
parser.add_argument('--decay',
help='The multiple by which to decay learning rate, instance noise stddev '
'and discriminator uncertainty threshhold every epoch',
default=0.98,
type=float)
parser.add_argument('--resume_lr',
help='Resume the learning rate from the previous run',
action='store_true')
parser.add_argument('--keep_prob',
help='The keep probability for dropout (always 1 for testing)',
default=0.5,
type=float)
parser.add_argument('--summary_freq',
help='Frequency (in steps) with which to write tensorboard summaries',
default=20,
type=int)
parser.add_argument('--gpu',
help='Which GPU to use')
parser.add_argument('--num_threads',
help='The number of threads to read and process data',
default=32,
type=int)
args = parser.parse_args()
if args.gpu:
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
set_logger_dir(args.logger_dir)
return args | none | 1 | 2.347436 | 2 | |
lightnion/cell/address.py | pthevenet/lightnion | 120 | 6614834 | <reponame>pthevenet/lightnion<gh_stars>100-1000
from . import view as _view
import ipaddress
class addr_type(_view.enum(1, cached=True)):
HOSTNAME = 0x00
IPV4_ADDR = 0x04
IPV6_ADDR = 0x06
ERROR_TRANS = 0xF0
ERROR_NON_TRANS = 0xF1
header_view = _view.fields(**{
'type': addr_type,
'length': _view.cache(_view.uint, init=[1])})
class address_view(_view.packet):
_default_extra_fields = ['type']
_default_header_view = header_view
_default_data_name = 'host'
def __init__(self, *kargs, **kwargs):
super().__init__(*kargs, **kwargs)
length = self.header._fields['length']
self._fields['host'] = _view.union(
view_table={
addr_type.HOSTNAME: _view.codec('utf8', size=length),
addr_type.IPV4_ADDR: _view.ip_address(version=4),
addr_type.IPV6_ADDR: _view.ip_address(version=6),
addr_type.ERROR_TRANS: _view.data(length),
addr_type.ERROR_NON_TRANS: _view.data(length)
}, union_tag=self.header._fields['type'])
view = address_view()
address = _view.like(view, 'address')
def pack(host, type_hint=None):
if type_hint is None:
try:
ipaddress.IPv4Address(host)
type_hint = addr_type.IPV4_ADDR
except ValueError:
ipaddress.IPv6Address(host)
type_hint = addr_type.IPV6_ADDR
base = address(b'')
base.header.set(**{'type': type_hint, 'length': 0})
length = base._view.host.width()
if length == 0:
length = len(host)
base.header.set(length=length)
base.set(host=host)
return base
| from . import view as _view
import ipaddress
class addr_type(_view.enum(1, cached=True)):
HOSTNAME = 0x00
IPV4_ADDR = 0x04
IPV6_ADDR = 0x06
ERROR_TRANS = 0xF0
ERROR_NON_TRANS = 0xF1
header_view = _view.fields(**{
'type': addr_type,
'length': _view.cache(_view.uint, init=[1])})
class address_view(_view.packet):
_default_extra_fields = ['type']
_default_header_view = header_view
_default_data_name = 'host'
def __init__(self, *kargs, **kwargs):
super().__init__(*kargs, **kwargs)
length = self.header._fields['length']
self._fields['host'] = _view.union(
view_table={
addr_type.HOSTNAME: _view.codec('utf8', size=length),
addr_type.IPV4_ADDR: _view.ip_address(version=4),
addr_type.IPV6_ADDR: _view.ip_address(version=6),
addr_type.ERROR_TRANS: _view.data(length),
addr_type.ERROR_NON_TRANS: _view.data(length)
}, union_tag=self.header._fields['type'])
view = address_view()
address = _view.like(view, 'address')
def pack(host, type_hint=None):
if type_hint is None:
try:
ipaddress.IPv4Address(host)
type_hint = addr_type.IPV4_ADDR
except ValueError:
ipaddress.IPv6Address(host)
type_hint = addr_type.IPV6_ADDR
base = address(b'')
base.header.set(**{'type': type_hint, 'length': 0})
length = base._view.host.width()
if length == 0:
length = len(host)
base.header.set(length=length)
base.set(host=host)
return base | none | 1 | 2.111698 | 2 | |
lambda-code/layer/stream_decompressor.py | aws-samples/amazon-s3-object-lambda-decompression | 5 | 6614835 | class StreamDecompressor:
def __init__(self, compressed_file_obj, decompressor):
self.data = StreamDecompressor._decompressor_chunk_gen(compressed_file_obj, decompressor)
def read(self, _len):
for d in self.data:
return d
@staticmethod
def _decompressor_chunk_gen(compressed_file_obj, decompressor):
"""This function is used for the snappy and zlib methods only"""
while True:
compressed_chunk = compressed_file_obj.read(4096)
# If end of file reached
if not compressed_chunk:
break
decompressed = decompressor.decompress(compressed_chunk)
# Need to make sure we don't send empty chunks, could close connection
if decompressed:
yield decompressed
yield decompressor.flush() | class StreamDecompressor:
def __init__(self, compressed_file_obj, decompressor):
self.data = StreamDecompressor._decompressor_chunk_gen(compressed_file_obj, decompressor)
def read(self, _len):
for d in self.data:
return d
@staticmethod
def _decompressor_chunk_gen(compressed_file_obj, decompressor):
"""This function is used for the snappy and zlib methods only"""
while True:
compressed_chunk = compressed_file_obj.read(4096)
# If end of file reached
if not compressed_chunk:
break
decompressed = decompressor.decompress(compressed_chunk)
# Need to make sure we don't send empty chunks, could close connection
if decompressed:
yield decompressed
yield decompressor.flush() | en | 0.933024 | This function is used for the snappy and zlib methods only # If end of file reached # Need to make sure we don't send empty chunks, could close connection | 3.236423 | 3 |
src/local_reload/templatetags/local_reload.py | quadrant-newmedia/local_reload | 0 | 6614836 | import time
from django import template
register = template.Library()
@register.simple_tag
def millisecond_timestamp():
return str(int(round(time.time()*1000))) | import time
from django import template
register = template.Library()
@register.simple_tag
def millisecond_timestamp():
return str(int(round(time.time()*1000))) | none | 1 | 1.990013 | 2 | |
ACM-Solution/numwordscp.py | wasi0013/Python-CodeBase | 2 | 6614837 | def a(b):
if b==0:return'zero'
T=('one','two','three','four','five','six','seven','eight','nine');n='teen';x='ty';c=[];d=b//1000%1000
if d:c+=[a(d),'thousand']
d=b//100%10
if d:c+=[a(d),'hundred']
h=b//10%10;i=b%10
if h==1:c+=[['ten','eleven','twelve','thir'+n,T[3]+n,'fif'+n,T[5]+n,T[6]+n,T[7]+'een',T[8]+n][i]]
else:
if h:c+=[['twenty','thirty','forty','fifty',T[5]+x,T[6]+x,T[7]+'y',T[8]+x][h-2]]
if i:c+=[T[i-1]]
return' '.join(c)
print(a(int(input())))
| def a(b):
if b==0:return'zero'
T=('one','two','three','four','five','six','seven','eight','nine');n='teen';x='ty';c=[];d=b//1000%1000
if d:c+=[a(d),'thousand']
d=b//100%10
if d:c+=[a(d),'hundred']
h=b//10%10;i=b%10
if h==1:c+=[['ten','eleven','twelve','thir'+n,T[3]+n,'fif'+n,T[5]+n,T[6]+n,T[7]+'een',T[8]+n][i]]
else:
if h:c+=[['twenty','thirty','forty','fifty',T[5]+x,T[6]+x,T[7]+'y',T[8]+x][h-2]]
if i:c+=[T[i-1]]
return' '.join(c)
print(a(int(input())))
| none | 1 | 2.965192 | 3 | |
server.py | m-primo/Secure-Local-Server-Chat | 0 | 6614838 | import socket
import sys
import time
import config
host_key = config.host_key
host = config.host_name
password = <PASSWORD>
s = config.s
print("Host:", host)
port = int(input("Port: "))
print("Port:", port)
print("Password:", password)
print("Host Key:", host_key)
s.bind((host, port))
print("Server done binding to host and port successfully.")
print("Server is waiting for incoming connections...")
s.listen(1)
conn, addr = s.accept()
print(addr, "New connection to the server.")
print("")
while 1:
message = input(str(">> "))
message = str(message+host_key).encode()
conn.send(message)
print("Message has been sent.")
print("Waiting for any incoming message...")
print("-----------------------------------")
incoming_message = conn.recv(1024)
incoming_message = ((incoming_message.decode()).replace(host_key, ''))
print("Client : ", incoming_message)
print("-----------------------------------")
| import socket
import sys
import time
import config
host_key = config.host_key
host = config.host_name
password = <PASSWORD>
s = config.s
print("Host:", host)
port = int(input("Port: "))
print("Port:", port)
print("Password:", password)
print("Host Key:", host_key)
s.bind((host, port))
print("Server done binding to host and port successfully.")
print("Server is waiting for incoming connections...")
s.listen(1)
conn, addr = s.accept()
print(addr, "New connection to the server.")
print("")
while 1:
message = input(str(">> "))
message = str(message+host_key).encode()
conn.send(message)
print("Message has been sent.")
print("Waiting for any incoming message...")
print("-----------------------------------")
incoming_message = conn.recv(1024)
incoming_message = ((incoming_message.decode()).replace(host_key, ''))
print("Client : ", incoming_message)
print("-----------------------------------")
| none | 1 | 3.34652 | 3 | |
records/09-07/asdad.py | AaronYang2333/CSCI_570 | 36 | 6614839 | __author__ = '<NAME>'
__email__ = '<EMAIL>'
__date__ = '9/7/2020 11:52 PM'
class Solution:
def combine(self, n: int, k: int):
self.result = []
def backtrace(n, k, start, subset):
if len(subset) == k:
self.result.append(subset[:])
return
for i in range(start, n + 1):
subset.append(i)
backtrace(n, k, i + 1, subset)
subset.pop()
backtrace(n, k, 1, [])
return self.result
if __name__ == '__main__':
Solution().combine(4, 2)
| __author__ = '<NAME>'
__email__ = '<EMAIL>'
__date__ = '9/7/2020 11:52 PM'
class Solution:
def combine(self, n: int, k: int):
self.result = []
def backtrace(n, k, start, subset):
if len(subset) == k:
self.result.append(subset[:])
return
for i in range(start, n + 1):
subset.append(i)
backtrace(n, k, i + 1, subset)
subset.pop()
backtrace(n, k, 1, [])
return self.result
if __name__ == '__main__':
Solution().combine(4, 2)
| none | 1 | 3.299207 | 3 | |
debufftracker/screen_tools.py | nstatz/PoEDebuffTracker | 0 | 6614840 | <reponame>nstatz/PoEDebuffTracker<gh_stars>0
import numpy as np
import datetime as dt
import mss
import toml
from debufftracker import errors as customErrors
from debufftracker import status
import time
import os
from threading import Thread
class ConfigReader:
"""
This class contains functions to read and return configuration Data
"""
def __init__(self):
self.__config_path = os.path.join("resources", "config.toml")
self.__toml_content = toml.load(f=self.__config_path)
def get_imagetransformation_config(self):
"""
Get config for image transformation
:return: self.__toml_content["imagetransformation"], dictionary with image transformation config from config.toml
:rtype: dict
"""
allowed_colors = ["color"]
if self.__toml_content["imagetransformation"]["color_type"].lower() not in allowed_colors:
raise customErrors.ColorConfigError(self.__toml_content["imagetransformation"]["color_type"])
return self.__toml_content["imagetransformation"]
def get_debuff_configs(self, status_type): # ailment/curse/ground
"""
Returns Config of a status type. Status type
:param status_type: Name of the status (ailment/curse/ground)
:type status_type: str
:return: status_config, a dictionary containing the config data of a status from config.toml
:rtype: dict
"""
status_config = self.__toml_content[status_type]
return status_config
class ScreenTracker:
"""
This class contains functions to track the screen content
"""
def __init__(self):
self._config_reader = ConfigReader()
self.image_config = self._config_reader.get_imagetransformation_config()
self.status_instances = {}
def create_removestatus_dict(self):
"""
Iterates over each status type in ["ailment", "curse", "ground"] and adds the status specific config
to dictionary relevant_dicts. Then return relevant dict
:return: relevant_dicts, a dictionary with status configs.
:rtype: Dictionary
"""
def get_relevant_dicts(d):
"""
A helpfunction, only callable inside create_removestatus_dict, to "flatten" a dictionary and
only return results where remove_debuff is True
:param d: dictionary that contains sub dictionaries. Each subdictionary represents a status config
:return: big_dict. Da Dictionary that only contains configs where subdict["remove_debuff"] == True)
:rtype: Dictionary
"""
big_dict = {}
for key in d.keys():
# "Flatten" dictionary if True
if (d[key]["key"] !="") and (d[key]["remove_debuff"] == True):
big_dict[key] = d[key]
elif (d[key]["key"] =="") and (d[key]["remove_debuff"] == True):
raise customErrors.StatusConfigError("if remove_debuff is true, then keybinding must be set")
return big_dict
relevant_dicts = {}
status_types = ["ailment", "curse", "ground"]
for status_type in status_types:
status_type_all_dict = self._config_reader.get_debuff_configs(status_type=status_type)
status_type_remove_dict = get_relevant_dicts(status_type_all_dict)
relevant_dicts.update(status_type_remove_dict)
self.__removestatus_dicts = relevant_dicts #dict contains dicts
# dict structure
# removestatus_dicts=\
# {
# "shocK":
# {
# "type" : "shock",
# }
# }
return relevant_dicts
def create_status_instances(self):
"""
Create instances of status.Status and add them to a dictionary self.__status_instances.
Using this dictionary enables managing those instances, when necessary
:return: None
"""
# config example needed to initiate status classes
# config = \
# {
# "type" : "bleed",
# "flask" : "1",
# "color_type" : "gray",
# "remove_debuff" : True
# }
try:
remove_status_dicts = self.__removestatus_dicts
except:
remove_status_dicts = self.create_removestatus_dict()
status_instances_dict = {}
for status_type in remove_status_dicts.keys():
#print(remove_status_dicts)
status_config = remove_status_dicts[status_type]
#add color_type to config. This is required to read the template with the correct method (gray/color)
status_config["color_type"] = self.image_config["color_type"]
status_instance = status.Status(status_config)
status_instances_dict[status_type] = status_instance
self.status_instances = status_instances_dict
def manage_status_instances(self):
"""
Takes a partial screenshot, then iterates over the status.Status instances and checks if a harmful effect of
type of instance was found. If so, remove the effect. Threads will be joined to prevent chaotic behaviour.
:return: debuffs_dict, a dict that contains the negative effect and a dt stamp when it was recognized
:rtype: Dictionary
"""
#https://www.geeksforgeeks.org/how-to-create-a-new-thread-in-python/
screen = self.grab_transform_screen()
debuffs_dict = {}
thread_list = []
for status_name in self.status_instances.keys():
status_instance = self.status_instances[status_name]
#status_instance.run(screen) # each instance is run as a seperate Thread
t = Thread(target=status_instance.run, args=(screen, ))
thread_list.append(t)
t.start()
# wait for threads to finish. Not waiting caused chaotic behavior.
for t in thread_list:
t.join()
return debuffs_dict
def run(self):
"""
Infinitive loop that calls self.manage_status_instances() which causes any found negative effects to be removed.
:return: None
"""
continue_run = True
print("Debuff Tracker started")
while continue_run==True:
self.manage_status_instances()
time.sleep(1)
def grab_transform_screen(self):
"""
Make a partial Screenshot, transform to screenshot to numpy array and return transformed screenshot.
:return: screen_cv2, partial screenshot that contains all 3 color channels. Order is BGR
:rtype: np.array
"""
# I compared 3 methods over 1000 iterations:
# pyautogui: take screenshot, then cut and transform (avg time 0:00:00.054545)
# PIL: take partial screenshot, then transform (avg time 0:00:00.035084)
# mss: take partial screenshot, then transform (avg time 0:00:00.013324)
# mss is lightweight and fast
with mss.mss() as sct:
# The screen part to capture
monitor_area = \
{
"top": 0,
"left": 0,
"width": self.image_config["width"],
"height": self.image_config["height"]
}
screen = sct.grab(monitor_area)
screen_cv2 = np.array(screen)
screen_cv2 = screen_cv2[:,:,:3] # 4th channel contains value 255 (uint8). Remove fourth channel
end_dt = dt.datetime.now()
fname = str(end_dt).replace(":", "") + ".png"
p = os.path.join(os.getcwd(), os.pardir, "resources", "track_screen", fname)
return screen_cv2
if __name__ == "__main__":
current_dir = os.path.dirname( os.path.abspath(__file__))
project_dir = os.path.join(current_dir, os.path.pardir)
# set project source folder as working directory
os.chdir(project_dir)
screentracker = ScreenTracker()
screentracker.create_status_instances()
screentracker.run() | import numpy as np
import datetime as dt
import mss
import toml
from debufftracker import errors as customErrors
from debufftracker import status
import time
import os
from threading import Thread
class ConfigReader:
"""
This class contains functions to read and return configuration Data
"""
def __init__(self):
self.__config_path = os.path.join("resources", "config.toml")
self.__toml_content = toml.load(f=self.__config_path)
def get_imagetransformation_config(self):
"""
Get config for image transformation
:return: self.__toml_content["imagetransformation"], dictionary with image transformation config from config.toml
:rtype: dict
"""
allowed_colors = ["color"]
if self.__toml_content["imagetransformation"]["color_type"].lower() not in allowed_colors:
raise customErrors.ColorConfigError(self.__toml_content["imagetransformation"]["color_type"])
return self.__toml_content["imagetransformation"]
def get_debuff_configs(self, status_type): # ailment/curse/ground
"""
Returns Config of a status type. Status type
:param status_type: Name of the status (ailment/curse/ground)
:type status_type: str
:return: status_config, a dictionary containing the config data of a status from config.toml
:rtype: dict
"""
status_config = self.__toml_content[status_type]
return status_config
class ScreenTracker:
"""
This class contains functions to track the screen content
"""
def __init__(self):
self._config_reader = ConfigReader()
self.image_config = self._config_reader.get_imagetransformation_config()
self.status_instances = {}
def create_removestatus_dict(self):
"""
Iterates over each status type in ["ailment", "curse", "ground"] and adds the status specific config
to dictionary relevant_dicts. Then return relevant dict
:return: relevant_dicts, a dictionary with status configs.
:rtype: Dictionary
"""
def get_relevant_dicts(d):
"""
A helpfunction, only callable inside create_removestatus_dict, to "flatten" a dictionary and
only return results where remove_debuff is True
:param d: dictionary that contains sub dictionaries. Each subdictionary represents a status config
:return: big_dict. Da Dictionary that only contains configs where subdict["remove_debuff"] == True)
:rtype: Dictionary
"""
big_dict = {}
for key in d.keys():
# "Flatten" dictionary if True
if (d[key]["key"] !="") and (d[key]["remove_debuff"] == True):
big_dict[key] = d[key]
elif (d[key]["key"] =="") and (d[key]["remove_debuff"] == True):
raise customErrors.StatusConfigError("if remove_debuff is true, then keybinding must be set")
return big_dict
relevant_dicts = {}
status_types = ["ailment", "curse", "ground"]
for status_type in status_types:
status_type_all_dict = self._config_reader.get_debuff_configs(status_type=status_type)
status_type_remove_dict = get_relevant_dicts(status_type_all_dict)
relevant_dicts.update(status_type_remove_dict)
self.__removestatus_dicts = relevant_dicts #dict contains dicts
# dict structure
# removestatus_dicts=\
# {
# "shocK":
# {
# "type" : "shock",
# }
# }
return relevant_dicts
def create_status_instances(self):
"""
Create instances of status.Status and add them to a dictionary self.__status_instances.
Using this dictionary enables managing those instances, when necessary
:return: None
"""
# config example needed to initiate status classes
# config = \
# {
# "type" : "bleed",
# "flask" : "1",
# "color_type" : "gray",
# "remove_debuff" : True
# }
try:
remove_status_dicts = self.__removestatus_dicts
except:
remove_status_dicts = self.create_removestatus_dict()
status_instances_dict = {}
for status_type in remove_status_dicts.keys():
#print(remove_status_dicts)
status_config = remove_status_dicts[status_type]
#add color_type to config. This is required to read the template with the correct method (gray/color)
status_config["color_type"] = self.image_config["color_type"]
status_instance = status.Status(status_config)
status_instances_dict[status_type] = status_instance
self.status_instances = status_instances_dict
def manage_status_instances(self):
"""
Takes a partial screenshot, then iterates over the status.Status instances and checks if a harmful effect of
type of instance was found. If so, remove the effect. Threads will be joined to prevent chaotic behaviour.
:return: debuffs_dict, a dict that contains the negative effect and a dt stamp when it was recognized
:rtype: Dictionary
"""
#https://www.geeksforgeeks.org/how-to-create-a-new-thread-in-python/
screen = self.grab_transform_screen()
debuffs_dict = {}
thread_list = []
for status_name in self.status_instances.keys():
status_instance = self.status_instances[status_name]
#status_instance.run(screen) # each instance is run as a seperate Thread
t = Thread(target=status_instance.run, args=(screen, ))
thread_list.append(t)
t.start()
# wait for threads to finish. Not waiting caused chaotic behavior.
for t in thread_list:
t.join()
return debuffs_dict
def run(self):
"""
Infinitive loop that calls self.manage_status_instances() which causes any found negative effects to be removed.
:return: None
"""
continue_run = True
print("Debuff Tracker started")
while continue_run==True:
self.manage_status_instances()
time.sleep(1)
def grab_transform_screen(self):
"""
Make a partial Screenshot, transform to screenshot to numpy array and return transformed screenshot.
:return: screen_cv2, partial screenshot that contains all 3 color channels. Order is BGR
:rtype: np.array
"""
# I compared 3 methods over 1000 iterations:
# pyautogui: take screenshot, then cut and transform (avg time 0:00:00.054545)
# PIL: take partial screenshot, then transform (avg time 0:00:00.035084)
# mss: take partial screenshot, then transform (avg time 0:00:00.013324)
# mss is lightweight and fast
with mss.mss() as sct:
# The screen part to capture
monitor_area = \
{
"top": 0,
"left": 0,
"width": self.image_config["width"],
"height": self.image_config["height"]
}
screen = sct.grab(monitor_area)
screen_cv2 = np.array(screen)
screen_cv2 = screen_cv2[:,:,:3] # 4th channel contains value 255 (uint8). Remove fourth channel
end_dt = dt.datetime.now()
fname = str(end_dt).replace(":", "") + ".png"
p = os.path.join(os.getcwd(), os.pardir, "resources", "track_screen", fname)
return screen_cv2
if __name__ == "__main__":
current_dir = os.path.dirname( os.path.abspath(__file__))
project_dir = os.path.join(current_dir, os.path.pardir)
# set project source folder as working directory
os.chdir(project_dir)
screentracker = ScreenTracker()
screentracker.create_status_instances()
screentracker.run() | en | 0.736726 | This class contains functions to read and return configuration Data Get config for image transformation
:return: self.__toml_content["imagetransformation"], dictionary with image transformation config from config.toml
:rtype: dict # ailment/curse/ground Returns Config of a status type. Status type
:param status_type: Name of the status (ailment/curse/ground)
:type status_type: str
:return: status_config, a dictionary containing the config data of a status from config.toml
:rtype: dict This class contains functions to track the screen content Iterates over each status type in ["ailment", "curse", "ground"] and adds the status specific config
to dictionary relevant_dicts. Then return relevant dict
:return: relevant_dicts, a dictionary with status configs.
:rtype: Dictionary A helpfunction, only callable inside create_removestatus_dict, to "flatten" a dictionary and
only return results where remove_debuff is True
:param d: dictionary that contains sub dictionaries. Each subdictionary represents a status config
:return: big_dict. Da Dictionary that only contains configs where subdict["remove_debuff"] == True)
:rtype: Dictionary # "Flatten" dictionary if True #dict contains dicts # dict structure # removestatus_dicts=\ # { # "shocK": # { # "type" : "shock", # } # } Create instances of status.Status and add them to a dictionary self.__status_instances.
Using this dictionary enables managing those instances, when necessary
:return: None # config example needed to initiate status classes # config = \ # { # "type" : "bleed", # "flask" : "1", # "color_type" : "gray", # "remove_debuff" : True # } #print(remove_status_dicts) #add color_type to config. This is required to read the template with the correct method (gray/color) Takes a partial screenshot, then iterates over the status.Status instances and checks if a harmful effect of
type of instance was found. If so, remove the effect. Threads will be joined to prevent chaotic behaviour.
:return: debuffs_dict, a dict that contains the negative effect and a dt stamp when it was recognized
:rtype: Dictionary #https://www.geeksforgeeks.org/how-to-create-a-new-thread-in-python/ #status_instance.run(screen) # each instance is run as a seperate Thread # wait for threads to finish. Not waiting caused chaotic behavior. Infinitive loop that calls self.manage_status_instances() which causes any found negative effects to be removed.
:return: None Make a partial Screenshot, transform to screenshot to numpy array and return transformed screenshot.
:return: screen_cv2, partial screenshot that contains all 3 color channels. Order is BGR
:rtype: np.array # I compared 3 methods over 1000 iterations: # pyautogui: take screenshot, then cut and transform (avg time 0:00:00.054545) # PIL: take partial screenshot, then transform (avg time 0:00:00.035084) # mss: take partial screenshot, then transform (avg time 0:00:00.013324) # mss is lightweight and fast # The screen part to capture # 4th channel contains value 255 (uint8). Remove fourth channel # set project source folder as working directory | 2.406258 | 2 |
apps/users/tests.py | python3-7/tupian | 1 | 6614841 | <reponame>python3-7/tupian<gh_stars>1-10
from django.test import TestCase
from django.http import HttpResponse
# Create your tests here.
# 测试
from django.core.mail import send_mail
def sendmail():# SMTP
send_mail(
'Subject here',
'Here is the message.',
'<EMAIL>',
['<EMAIL>'],
fail_silently=False,
html_message='asdfasda'
)
def test(request):
sendmail()
return HttpResponse('aaaa')
| from django.test import TestCase
from django.http import HttpResponse
# Create your tests here.
# 测试
from django.core.mail import send_mail
def sendmail():# SMTP
send_mail(
'Subject here',
'Here is the message.',
'<EMAIL>',
['<EMAIL>'],
fail_silently=False,
html_message='asdfasda'
)
def test(request):
sendmail()
return HttpResponse('aaaa') | en | 0.819708 | # Create your tests here. # 测试 # SMTP | 2.33677 | 2 |
blog/views.py | sanjaysheel/blogx | 1 | 6614842 | from django.shortcuts import render
from .models import Post
from django.http import HttpResponse
# Create your views here.
post=[
{
"author":'jorge',
'title':'blog post',
'conetnt':'first blog post',
'date post':'august 27,2020'
},
{
"author": 'jorge',
'title': 'blog post',
'conetnt': 'second blog post',
'datepost': 'august 28,2020'
}
]
def home(request):
context={
'posts':Post.objects.all()
}
return render(request,'blog/template.html',context)
def about(request):
return render(request,'blog/about.html',{'title':'About page'}) | from django.shortcuts import render
from .models import Post
from django.http import HttpResponse
# Create your views here.
post=[
{
"author":'jorge',
'title':'blog post',
'conetnt':'first blog post',
'date post':'august 27,2020'
},
{
"author": 'jorge',
'title': 'blog post',
'conetnt': 'second blog post',
'datepost': 'august 28,2020'
}
]
def home(request):
context={
'posts':Post.objects.all()
}
return render(request,'blog/template.html',context)
def about(request):
return render(request,'blog/about.html',{'title':'About page'}) | en | 0.968116 | # Create your views here. | 2.326097 | 2 |
tests/test_io_binary.py | akki2825/CorpusTools | 97 | 6614843 | <reponame>akki2825/CorpusTools<filename>tests/test_io_binary.py
import pytest
import os
from corpustools.corpus.io.binary import download_binary, save_binary, load_binary
def test_save(export_test_dir, unspecified_test_corpus):
save_path = os.path.join(export_test_dir, 'testsave.corpus')
save_binary(unspecified_test_corpus,save_path)
c = load_binary(save_path)
assert(unspecified_test_corpus == c)
#class BinaryCorpusLoadTest(unittest.TestCase):
#def setUp(self):
#self.example_path = os.path.join(TEST_DIR,'example.corpus')
#def test_load(self):
#return
#if not os.path.exists(TEST_DIR):
#return
#c = load_binary(self.example_path)
#example_c = create_unspecified_test_corpus()
#self.assertEqual(c,example_c)
#class BinaryFeatureMatrixSaveTest(unittest.TestCase):
#def setUp(self):
#self.basic_path = os.path.join(TEST_DIR,'test_feature_matrix.txt')
#self.basic_save_path = os.path.join(TEST_DIR,'basic.feature')
#self.missing_segment_path = os.path.join(TEST_DIR,'test_feature_matrix_missing_segment.txt')
#self.missing_save_path = os.path.join(TEST_DIR,'missing_segments.feature')
#def test_save(self):
#if not os.path.exists(TEST_DIR):
#return
#fm = load_feature_matrix_csv('test',self.basic_path,',')
#save_binary(fm,self.basic_save_path)
#saved_fm = load_binary(self.basic_save_path)
#self.assertEqual(fm,saved_fm)
#fm = load_feature_matrix_csv('test',self.missing_segment_path,',')
#save_binary(fm,self.missing_save_path)
#saved_fm = load_binary(self.missing_save_path)
#self.assertEqual(fm,saved_fm)
| import pytest
import os
from corpustools.corpus.io.binary import download_binary, save_binary, load_binary
def test_save(export_test_dir, unspecified_test_corpus):
save_path = os.path.join(export_test_dir, 'testsave.corpus')
save_binary(unspecified_test_corpus,save_path)
c = load_binary(save_path)
assert(unspecified_test_corpus == c)
#class BinaryCorpusLoadTest(unittest.TestCase):
#def setUp(self):
#self.example_path = os.path.join(TEST_DIR,'example.corpus')
#def test_load(self):
#return
#if not os.path.exists(TEST_DIR):
#return
#c = load_binary(self.example_path)
#example_c = create_unspecified_test_corpus()
#self.assertEqual(c,example_c)
#class BinaryFeatureMatrixSaveTest(unittest.TestCase):
#def setUp(self):
#self.basic_path = os.path.join(TEST_DIR,'test_feature_matrix.txt')
#self.basic_save_path = os.path.join(TEST_DIR,'basic.feature')
#self.missing_segment_path = os.path.join(TEST_DIR,'test_feature_matrix_missing_segment.txt')
#self.missing_save_path = os.path.join(TEST_DIR,'missing_segments.feature')
#def test_save(self):
#if not os.path.exists(TEST_DIR):
#return
#fm = load_feature_matrix_csv('test',self.basic_path,',')
#save_binary(fm,self.basic_save_path)
#saved_fm = load_binary(self.basic_save_path)
#self.assertEqual(fm,saved_fm)
#fm = load_feature_matrix_csv('test',self.missing_segment_path,',')
#save_binary(fm,self.missing_save_path)
#saved_fm = load_binary(self.missing_save_path)
#self.assertEqual(fm,saved_fm) | en | 0.335526 | #class BinaryCorpusLoadTest(unittest.TestCase): #def setUp(self): #self.example_path = os.path.join(TEST_DIR,'example.corpus') #def test_load(self): #return #if not os.path.exists(TEST_DIR): #return #c = load_binary(self.example_path) #example_c = create_unspecified_test_corpus() #self.assertEqual(c,example_c) #class BinaryFeatureMatrixSaveTest(unittest.TestCase): #def setUp(self): #self.basic_path = os.path.join(TEST_DIR,'test_feature_matrix.txt') #self.basic_save_path = os.path.join(TEST_DIR,'basic.feature') #self.missing_segment_path = os.path.join(TEST_DIR,'test_feature_matrix_missing_segment.txt') #self.missing_save_path = os.path.join(TEST_DIR,'missing_segments.feature') #def test_save(self): #if not os.path.exists(TEST_DIR): #return #fm = load_feature_matrix_csv('test',self.basic_path,',') #save_binary(fm,self.basic_save_path) #saved_fm = load_binary(self.basic_save_path) #self.assertEqual(fm,saved_fm) #fm = load_feature_matrix_csv('test',self.missing_segment_path,',') #save_binary(fm,self.missing_save_path) #saved_fm = load_binary(self.missing_save_path) #self.assertEqual(fm,saved_fm) | 2.465498 | 2 |
code/src/target.py | tomboulier/dcc-translation | 0 | 6614844 | <reponame>tomboulier/dcc-translation<gh_stars>0
import numpy as np
class RTKEllipse(object):
"""
Class of ellipse where projections are simulated with the module 'RTK', by <NAME>
"""
def __init__(self, params):
self.params = params
def get_density(self):
return self.params.ellipseDensity
def get_angle(self):
return self.params.ellipseAngle
def get_center(self, t):
"""
Since it is moving, the position depends on t
"""
T = self.params.T
v = self.params.v
v2 = self.params.v2
return [self.params.ellipseCenterX - (t + T / 2) * v,
self.params.ellipseCenterY - (t + T / 2) * v2,
0]
def get_axis(self):
return [self.params.ellipseSemiAxisX,
self.params.ellipseSemiAxisY,
self.params.ellipseSemiAxisY]
def compute_projection(self, t, source, detector):
"""
Simulate fan-beam acquisition of the object with given
source and detector, at time t
"""
import SimpleRTK as srtk
# create geometry of the source at time t
geometry = source.get_geometry(t)
# compute intersection of fan-beam with ellipse
empty_image_detector = detector.get_empty_image()
rei = srtk.RayEllipsoidIntersectionImageFilter()
rei.SetDensity(self.get_density())
rei.SetAngle(self.get_angle())
rei.SetCenter(self.get_center(t)) #
rei.SetAxis(self.get_axis())
rei.SetGeometry(geometry)
reiImage = rei.Execute(empty_image_detector)
return srtk.GetArrayFromImage(reiImage)[0, 0, :]
class AnalyticalEllipse(object):
"""
Class of ellipse where projections are computed according to analytical (i.e. exact)
formulas. Hence, this is not a simulation but a computation.
"""
def __init__(self, params):
self.params = params
def get_density(self):
return self.params.ellipseDensity
def get_angle(self):
return self.params.ellipseAngle
def get_center(self, t):
"""
Since it is moving, the position depends on t
"""
T = self.params.T
v = self.params.v
v2 = self.params.v2
return [self.params.ellipseCenterX + (t + T / 2) * v,
self.params.ellipseCenterY + (t + T / 2) * v2,
0]
def get_axis(self):
return [self.params.ellipseSemiAxisX,
self.params.ellipseSemiAxisY,
self.params.ellipseSemiAxisY]
def compute_projection(self, t, source, detector):
"""
Simulate fan-beam acquisition of the object with given
source and detector, at time t
"""
N = self.params.imageSize
results = np.zeros(N)
# general parameters
alpha = self.params.get_alpha_range()
omega = self.params.omega / 360 * 2 * np.pi
# ellipse parameters
x, y, _ = self.get_center(t)
a, b, _ = self.get_axis()
if (a != b):
raise ValueError("Ellipse is not a circle (the analytical formula only works with circle)", a, b)
s1, s2 = source.get_position(t)
# for i in np.arange(N):
# # i is the number of the pixel in the image printed on the detector
# # there is no "resolution" parameter, meaning that there is 1 pixel
# # per millimeter
# # TODO : add this parameter?
# # phi is the angle between the beam and the y-axis
# phi = omega*t + alpha[i]
# # computation of the distance between the center of the circle
# # and the beam
# dist = np.abs( (s1-x)*np.cos(phi) + (s2-y)*np.sin(phi) )
# # stores in the array
# if dist > a:
# results[i] = 0
# else:
# results[i] = 2 * np.sqrt(a**2 - dist**2)
# phi is the angle between the beam and the y-axis
phi = omega * t + alpha
# distance between the center of the circle and the beam
dist = np.abs((s1 - x) * np.cos(phi) + (s2 - y) * np.sin(phi))
# results = (dist<a) * 2 * np.sqrt(a**2 - dist**2)
# [x if x < 5 else 0 for x in np.arange(10)]
# ipdb.set_trace()
results[dist < a] = (2 * np.sqrt(a ** 2 - dist ** 2))[dist < a]
return self.get_density() * results | import numpy as np
class RTKEllipse(object):
"""
Class of ellipse where projections are simulated with the module 'RTK', by <NAME>
"""
def __init__(self, params):
self.params = params
def get_density(self):
return self.params.ellipseDensity
def get_angle(self):
return self.params.ellipseAngle
def get_center(self, t):
"""
Since it is moving, the position depends on t
"""
T = self.params.T
v = self.params.v
v2 = self.params.v2
return [self.params.ellipseCenterX - (t + T / 2) * v,
self.params.ellipseCenterY - (t + T / 2) * v2,
0]
def get_axis(self):
return [self.params.ellipseSemiAxisX,
self.params.ellipseSemiAxisY,
self.params.ellipseSemiAxisY]
def compute_projection(self, t, source, detector):
"""
Simulate fan-beam acquisition of the object with given
source and detector, at time t
"""
import SimpleRTK as srtk
# create geometry of the source at time t
geometry = source.get_geometry(t)
# compute intersection of fan-beam with ellipse
empty_image_detector = detector.get_empty_image()
rei = srtk.RayEllipsoidIntersectionImageFilter()
rei.SetDensity(self.get_density())
rei.SetAngle(self.get_angle())
rei.SetCenter(self.get_center(t)) #
rei.SetAxis(self.get_axis())
rei.SetGeometry(geometry)
reiImage = rei.Execute(empty_image_detector)
return srtk.GetArrayFromImage(reiImage)[0, 0, :]
class AnalyticalEllipse(object):
"""
Class of ellipse where projections are computed according to analytical (i.e. exact)
formulas. Hence, this is not a simulation but a computation.
"""
def __init__(self, params):
self.params = params
def get_density(self):
return self.params.ellipseDensity
def get_angle(self):
return self.params.ellipseAngle
def get_center(self, t):
"""
Since it is moving, the position depends on t
"""
T = self.params.T
v = self.params.v
v2 = self.params.v2
return [self.params.ellipseCenterX + (t + T / 2) * v,
self.params.ellipseCenterY + (t + T / 2) * v2,
0]
def get_axis(self):
return [self.params.ellipseSemiAxisX,
self.params.ellipseSemiAxisY,
self.params.ellipseSemiAxisY]
def compute_projection(self, t, source, detector):
"""
Simulate fan-beam acquisition of the object with given
source and detector, at time t
"""
N = self.params.imageSize
results = np.zeros(N)
# general parameters
alpha = self.params.get_alpha_range()
omega = self.params.omega / 360 * 2 * np.pi
# ellipse parameters
x, y, _ = self.get_center(t)
a, b, _ = self.get_axis()
if (a != b):
raise ValueError("Ellipse is not a circle (the analytical formula only works with circle)", a, b)
s1, s2 = source.get_position(t)
# for i in np.arange(N):
# # i is the number of the pixel in the image printed on the detector
# # there is no "resolution" parameter, meaning that there is 1 pixel
# # per millimeter
# # TODO : add this parameter?
# # phi is the angle between the beam and the y-axis
# phi = omega*t + alpha[i]
# # computation of the distance between the center of the circle
# # and the beam
# dist = np.abs( (s1-x)*np.cos(phi) + (s2-y)*np.sin(phi) )
# # stores in the array
# if dist > a:
# results[i] = 0
# else:
# results[i] = 2 * np.sqrt(a**2 - dist**2)
# phi is the angle between the beam and the y-axis
phi = omega * t + alpha
# distance between the center of the circle and the beam
dist = np.abs((s1 - x) * np.cos(phi) + (s2 - y) * np.sin(phi))
# results = (dist<a) * 2 * np.sqrt(a**2 - dist**2)
# [x if x < 5 else 0 for x in np.arange(10)]
# ipdb.set_trace()
results[dist < a] = (2 * np.sqrt(a ** 2 - dist ** 2))[dist < a]
return self.get_density() * results | en | 0.83135 | Class of ellipse where projections are simulated with the module 'RTK', by <NAME> Since it is moving, the position depends on t Simulate fan-beam acquisition of the object with given source and detector, at time t # create geometry of the source at time t # compute intersection of fan-beam with ellipse # Class of ellipse where projections are computed according to analytical (i.e. exact) formulas. Hence, this is not a simulation but a computation. Since it is moving, the position depends on t Simulate fan-beam acquisition of the object with given source and detector, at time t # general parameters # ellipse parameters # for i in np.arange(N): # # i is the number of the pixel in the image printed on the detector # # there is no "resolution" parameter, meaning that there is 1 pixel # # per millimeter # # TODO : add this parameter? # # phi is the angle between the beam and the y-axis # phi = omega*t + alpha[i] # # computation of the distance between the center of the circle # # and the beam # dist = np.abs( (s1-x)*np.cos(phi) + (s2-y)*np.sin(phi) ) # # stores in the array # if dist > a: # results[i] = 0 # else: # results[i] = 2 * np.sqrt(a**2 - dist**2) # phi is the angle between the beam and the y-axis # distance between the center of the circle and the beam # results = (dist<a) * 2 * np.sqrt(a**2 - dist**2) # [x if x < 5 else 0 for x in np.arange(10)] # ipdb.set_trace() | 2.955117 | 3 |
login.py | Gwk7/test | 0 | 6614845 | <reponame>Gwk7/test
num=0
num=1
num=2
| num=0
num=1
num=2 | none | 1 | 1.616984 | 2 | |
server/commServer.py | daqbroker/daqbroker | 1 | 6614846 | import time
import zmq
import multiprocessing
import json
import traceback
import sys
import concurrent.futures
import daqbrokerDatabase
import daqbrokerSettings
from sqlalchemy import text
from sqlalchemy import bindparam
from sqlalchemy import func
from sqlalchemy.orm import sessionmaker, scoped_session
from supportFuncs import *
def collector(servers, port, logPort, backupInfo, localPath):
""" Communications server main process loop. This process is responsible for listening for inbound DAQBroker client communications and handling the sent requests. Each client request will have a specific node identifier associated with it as well as an order to be fulfilled.
:param servers: (`multiporcessing.Manager().list`_) process-shared list of database servers under monitoring by DAQBroker. They are used here to update the state of instruments in the databases
:param port: (Integer) Port for network communications
:param logPort: (Integer) The local event logging port. See :py:mod:`logServer` for more information
:param backupInfo: (`multiporcessing.Manager().list`_) process-shared dict with relevant backup information
.. _multiporcessing.Manager().list: https://docs.python.org/2/library/multiprocessing.html#sharing-state-between-processes
.. warning::
This is a long running process and blocks execution of the main task, it should therefore be called on a separate process.
"""
manager = multiprocessing.Manager()
context = zmq.Context()
theLogSocket = context.socket(zmq.REQ)
theLogSocket.connect("tcp://127.0.0.1:" + str(logPort))
toSend = {'req': 'LOG', 'type': 'INFO', 'process': 'COLLECTOR',
'message': "started collector server", 'method': 'collector'}
theLogSocket.send(json.dumps(toSend).encode())
theLogSocket.close()
results_receiver = context.socket(zmq.PULL)
results_receiver.bind("tcp://*:" + str(port))
workerpool = concurrent.futures.ThreadPoolExecutor(max_workers=multiprocessing.cpu_count() * 2) # Using threads
lockList = manager.list() # Make a structure that is dependent of database
timeStart = time.time()
BACKUPPATH = ''
IMPORTPATH = ''
ADDONPATH = ''
daqbrokerSettings.setupLocalVars(localPath)
newPaths = checkPaths(context, BACKUPPATH, IMPORTPATH, ADDONPATH, logPort)
paths = {"BACKUPPATH": newPaths[0], "IMPORTPATH": newPaths[1], "ADDONPATH": newPaths[2]}
sessions = {}
while True:
try:
result = results_receiver.recv_json()
if 'order' in result:
if result["order"] == "METASYNCOVER": # Lock the instrument for parsing
#print(result)
for server in servers:
if server["server"] == result["server"] and server["engine"] == result["engine"]:
if server["server"]+server["engine"] not in sessions:
sessions[server["server"]+server["engine"]]={}
else:
if result["database"] not in sessions[server["server"]+server["engine"]]:
serverURL = server["engine"] + "://" + server["username"] + ":" + \
server["password"] + "@" + server["server"] + "/daqbro_" + result["database"]
eng = create_engine(serverURL, connect_args={'connect_timeout': 120}, isolation_level ="READ_COMMITTED")
sessions[server["server"] + server["engine"]][result["database"]] = {'session': scoped_session(sessionmaker(bind=eng)), 'engine': eng}
if server["server"]+server["engine"] in sessions:
if result["database"] in sessions[server["server"]+server["engine"]]:
daqbrokerDatabase.daqbroker_database.metadata.reflect(bind=sessions[server["server"] + server["engine"]][result["database"]]["engine"])
workerpool.submit(
backupOver,
sessions[server["server"] + server["engine"]][result["database"]]["session"],
server,
result["database"],
result["metaid"],
result["instrument"],
logPort,
lockList,
paths)
if time.time() - timeStart > 10:
BACKUPPATH = ''
IMPORTPATH = ''
ADDONPATH = ''
newPaths = checkPaths(context, BACKUPPATH, IMPORTPATH, ADDONPATH, logPort)
paths = {"BACKUPPATH": newPaths[0], "IMPORTPATH": newPaths[1], "ADDONPATH": newPaths[2]}
except Exception as e:
_, _, tb = sys.exc_info()
tbResult = traceback.format_list(traceback.extract_tb(tb)[-1:])[-1]
filename = tbResult.split(',')[0].replace('File', '').replace('"', '')
lineno = tbResult.split(',')[1].replace('line', '')
funname = tbResult.split(',')[2].replace('\n', '').replace(' in ', '')
line = str(e)
theLogSocket = context.socket(zmq.REQ)
theLogSocket.connect("tcp://127.0.0.1:" + str(logPort))
toSend = {
'req': 'LOG',
'type': 'ERROR',
'process': 'COLLECTOR',
'message': str(e),
'filename': filename,
'lineno': lineno,
'funname': funname,
'line': line}
theLogSocket.send(json.dumps(toSend).encode())
theLogSocket.close()
# Should be able to protect with string from xsfr (TODO LATER)
def backupOver(scopedSession, server, database, metaid, instrument, logPort, lockList, paths):
""" Supporting function that updates the state of the database when a remote instrument's data backup is completed
:param server: (Dict) server dictionary, contains the address and the database engine
:param database: (String) database name
:param metaid: (Integer) unique data source identifier
:param instrument: (String) instrument name
:param logPort: (String) database server address
:param lockList: (String) database server address
:param paths: (`multiporcessing.Manager().list`_) database server address
.. _multiporcessing.Manager().list: https://docs.python.org/2/library/multiprocessing.html#sharing-state-between-processes
"""
try:
session = scopedSession()
theMeta = session.query(daqbrokerDatabase.instmeta).filter_by(metaid=metaid).first()
theMeta.sentRequest=False
#session.commit()
if theMeta:
theMetaRemarks = json.loads(theMeta.remarks)
theParsingRemarks = json.loads(theMeta.parsing[0].remarks)
if theMetaRemarks['toParse']:
parseThis = True
thisIdx = -1
notFound = True
for q, el in enumerate(lockList):
# Found the entry, must alter this
#print(el)
if el['instrument'] == instrument and el["meta"] == theMeta.name and el["database"] == database and el["server"] == server["server"]:
if el['locked']:
parseThis = False
notFound = False
thisIdx = q
break
if notFound:
lockList.append({'server': server["server"], 'database': database,
'instrument': instrument, 'meta': theMeta.name, 'locked': False})
if parseThis:
lockList[thisIdx] = {
'server': server["server"],
'database': database,
'instrument': instrument,
'meta': theMeta.name,
'locked': True}
#print("AMPARSING",instrument,metaid) #GOTTA START HERE NOW TO THE PARSEMETA FUNCTION
#theTable_data = daqbrokerDatabase.daqbroker_database.metadata.tables[instrument + "_data"]
#print(theTable_data.c)
parseMeta(server["server"], database, {
"Name": instrument, "instid": theMeta.meta.instid}, theMeta, paths, logPort, lockList, session)
session.commit()
except BaseException:
traceback.print_exc()
session.rollback()
poop = "poop"
| import time
import zmq
import multiprocessing
import json
import traceback
import sys
import concurrent.futures
import daqbrokerDatabase
import daqbrokerSettings
from sqlalchemy import text
from sqlalchemy import bindparam
from sqlalchemy import func
from sqlalchemy.orm import sessionmaker, scoped_session
from supportFuncs import *
def collector(servers, port, logPort, backupInfo, localPath):
""" Communications server main process loop. This process is responsible for listening for inbound DAQBroker client communications and handling the sent requests. Each client request will have a specific node identifier associated with it as well as an order to be fulfilled.
:param servers: (`multiporcessing.Manager().list`_) process-shared list of database servers under monitoring by DAQBroker. They are used here to update the state of instruments in the databases
:param port: (Integer) Port for network communications
:param logPort: (Integer) The local event logging port. See :py:mod:`logServer` for more information
:param backupInfo: (`multiporcessing.Manager().list`_) process-shared dict with relevant backup information
.. _multiporcessing.Manager().list: https://docs.python.org/2/library/multiprocessing.html#sharing-state-between-processes
.. warning::
This is a long running process and blocks execution of the main task, it should therefore be called on a separate process.
"""
manager = multiprocessing.Manager()
context = zmq.Context()
theLogSocket = context.socket(zmq.REQ)
theLogSocket.connect("tcp://127.0.0.1:" + str(logPort))
toSend = {'req': 'LOG', 'type': 'INFO', 'process': 'COLLECTOR',
'message': "started collector server", 'method': 'collector'}
theLogSocket.send(json.dumps(toSend).encode())
theLogSocket.close()
results_receiver = context.socket(zmq.PULL)
results_receiver.bind("tcp://*:" + str(port))
workerpool = concurrent.futures.ThreadPoolExecutor(max_workers=multiprocessing.cpu_count() * 2) # Using threads
lockList = manager.list() # Make a structure that is dependent of database
timeStart = time.time()
BACKUPPATH = ''
IMPORTPATH = ''
ADDONPATH = ''
daqbrokerSettings.setupLocalVars(localPath)
newPaths = checkPaths(context, BACKUPPATH, IMPORTPATH, ADDONPATH, logPort)
paths = {"BACKUPPATH": newPaths[0], "IMPORTPATH": newPaths[1], "ADDONPATH": newPaths[2]}
sessions = {}
while True:
try:
result = results_receiver.recv_json()
if 'order' in result:
if result["order"] == "METASYNCOVER": # Lock the instrument for parsing
#print(result)
for server in servers:
if server["server"] == result["server"] and server["engine"] == result["engine"]:
if server["server"]+server["engine"] not in sessions:
sessions[server["server"]+server["engine"]]={}
else:
if result["database"] not in sessions[server["server"]+server["engine"]]:
serverURL = server["engine"] + "://" + server["username"] + ":" + \
server["password"] + "@" + server["server"] + "/daqbro_" + result["database"]
eng = create_engine(serverURL, connect_args={'connect_timeout': 120}, isolation_level ="READ_COMMITTED")
sessions[server["server"] + server["engine"]][result["database"]] = {'session': scoped_session(sessionmaker(bind=eng)), 'engine': eng}
if server["server"]+server["engine"] in sessions:
if result["database"] in sessions[server["server"]+server["engine"]]:
daqbrokerDatabase.daqbroker_database.metadata.reflect(bind=sessions[server["server"] + server["engine"]][result["database"]]["engine"])
workerpool.submit(
backupOver,
sessions[server["server"] + server["engine"]][result["database"]]["session"],
server,
result["database"],
result["metaid"],
result["instrument"],
logPort,
lockList,
paths)
if time.time() - timeStart > 10:
BACKUPPATH = ''
IMPORTPATH = ''
ADDONPATH = ''
newPaths = checkPaths(context, BACKUPPATH, IMPORTPATH, ADDONPATH, logPort)
paths = {"BACKUPPATH": newPaths[0], "IMPORTPATH": newPaths[1], "ADDONPATH": newPaths[2]}
except Exception as e:
_, _, tb = sys.exc_info()
tbResult = traceback.format_list(traceback.extract_tb(tb)[-1:])[-1]
filename = tbResult.split(',')[0].replace('File', '').replace('"', '')
lineno = tbResult.split(',')[1].replace('line', '')
funname = tbResult.split(',')[2].replace('\n', '').replace(' in ', '')
line = str(e)
theLogSocket = context.socket(zmq.REQ)
theLogSocket.connect("tcp://127.0.0.1:" + str(logPort))
toSend = {
'req': 'LOG',
'type': 'ERROR',
'process': 'COLLECTOR',
'message': str(e),
'filename': filename,
'lineno': lineno,
'funname': funname,
'line': line}
theLogSocket.send(json.dumps(toSend).encode())
theLogSocket.close()
# Should be able to protect with string from xsfr (TODO LATER)
def backupOver(scopedSession, server, database, metaid, instrument, logPort, lockList, paths):
""" Supporting function that updates the state of the database when a remote instrument's data backup is completed
:param server: (Dict) server dictionary, contains the address and the database engine
:param database: (String) database name
:param metaid: (Integer) unique data source identifier
:param instrument: (String) instrument name
:param logPort: (String) database server address
:param lockList: (String) database server address
:param paths: (`multiporcessing.Manager().list`_) database server address
.. _multiporcessing.Manager().list: https://docs.python.org/2/library/multiprocessing.html#sharing-state-between-processes
"""
try:
session = scopedSession()
theMeta = session.query(daqbrokerDatabase.instmeta).filter_by(metaid=metaid).first()
theMeta.sentRequest=False
#session.commit()
if theMeta:
theMetaRemarks = json.loads(theMeta.remarks)
theParsingRemarks = json.loads(theMeta.parsing[0].remarks)
if theMetaRemarks['toParse']:
parseThis = True
thisIdx = -1
notFound = True
for q, el in enumerate(lockList):
# Found the entry, must alter this
#print(el)
if el['instrument'] == instrument and el["meta"] == theMeta.name and el["database"] == database and el["server"] == server["server"]:
if el['locked']:
parseThis = False
notFound = False
thisIdx = q
break
if notFound:
lockList.append({'server': server["server"], 'database': database,
'instrument': instrument, 'meta': theMeta.name, 'locked': False})
if parseThis:
lockList[thisIdx] = {
'server': server["server"],
'database': database,
'instrument': instrument,
'meta': theMeta.name,
'locked': True}
#print("AMPARSING",instrument,metaid) #GOTTA START HERE NOW TO THE PARSEMETA FUNCTION
#theTable_data = daqbrokerDatabase.daqbroker_database.metadata.tables[instrument + "_data"]
#print(theTable_data.c)
parseMeta(server["server"], database, {
"Name": instrument, "instid": theMeta.meta.instid}, theMeta, paths, logPort, lockList, session)
session.commit()
except BaseException:
traceback.print_exc()
session.rollback()
poop = "poop"
| en | 0.726328 | Communications server main process loop. This process is responsible for listening for inbound DAQBroker client communications and handling the sent requests. Each client request will have a specific node identifier associated with it as well as an order to be fulfilled. :param servers: (`multiporcessing.Manager().list`_) process-shared list of database servers under monitoring by DAQBroker. They are used here to update the state of instruments in the databases :param port: (Integer) Port for network communications :param logPort: (Integer) The local event logging port. See :py:mod:`logServer` for more information :param backupInfo: (`multiporcessing.Manager().list`_) process-shared dict with relevant backup information .. _multiporcessing.Manager().list: https://docs.python.org/2/library/multiprocessing.html#sharing-state-between-processes .. warning:: This is a long running process and blocks execution of the main task, it should therefore be called on a separate process. # Using threads # Make a structure that is dependent of database # Lock the instrument for parsing #print(result) # Should be able to protect with string from xsfr (TODO LATER) Supporting function that updates the state of the database when a remote instrument's data backup is completed :param server: (Dict) server dictionary, contains the address and the database engine :param database: (String) database name :param metaid: (Integer) unique data source identifier :param instrument: (String) instrument name :param logPort: (String) database server address :param lockList: (String) database server address :param paths: (`multiporcessing.Manager().list`_) database server address .. _multiporcessing.Manager().list: https://docs.python.org/2/library/multiprocessing.html#sharing-state-between-processes #session.commit() # Found the entry, must alter this #print(el) #print("AMPARSING",instrument,metaid) #GOTTA START HERE NOW TO THE PARSEMETA FUNCTION #theTable_data = daqbrokerDatabase.daqbroker_database.metadata.tables[instrument + "_data"] #print(theTable_data.c) | 2.198109 | 2 |
seg/seg.py | manuel-castro/reco-suave | 0 | 6614847 | import argparse
from cloudvolume import CloudVolume
from cloudvolume.lib import Bbox, Vec
import numpy as np
import waterz
from taskqueue import LocalTaskQueue
import igneous.task_creation as tc
from time import strftime
def vec3(s):
try:
z, y, x = map(int, s.split(','))
return (z,y,x)
except:
raise argparse.ArgumentTypeError("Vec3 must be z,y,x")
def segment(args):
"""Run segmentation on contiguous block of affinities from CV
Args:
args: ArgParse object from main
"""
bbox_start = Vec(*args.bbox_start)
bbox_size = Vec(*args.bbox_size)
chunk_size = Vec(*args.chunk_size)
bbox = Bbox(bbox_start, bbox_start + bbox_size)
src_cv = CloudVolume(args.src_path, fill_missing=True,
parallel=args.parallel)
info = CloudVolume.create_new_info(
num_channels = 1,
layer_type = 'segmentation',
data_type = 'uint64',
encoding = 'raw',
resolution = src_cv.info['scales'][args.mip]['resolution'],
voxel_offset = bbox_start,
chunk_size = chunk_size,
volume_size = bbox_size,
mesh = 'mesh_mip_{}_err_{}'.format(args.mip,
args.max_simplification_error)
)
dst_cv = CloudVolume(args.dst_path, info=info, parallel=args.parallel)
dst_cv.provenance.description = 'ws+agg using waterz'
dst_cv.provenance.processing.append({
'method': {
'task': 'watershed+agglomeration',
'src_path': args.src_path,
'dst_path': args.dst_path,
'mip': args.mip,
'shape': bbox_size.tolist(),
'bounds': [
bbox.minpt.tolist(),
bbox.maxpt.tolist(),
],
},
'by': args.owner,
'date': strftime('%Y-%m-%d%H:%M %Z'),
})
dst_cv.provenance.owners = [args.owner]
dst_cv.commit_info()
dst_cv.commit_provenance()
if args.segment:
print('Downloading affinities')
aff = src_cv[bbox.to_slices()]
aff = np.transpose(aff, (3,0,1,2))
aff = np.ascontiguousarray(aff, dtype=np.float32)
thresholds = [args.threshold]
print('Starting ws+agg')
seg_gen = waterz.agglomerate(aff, thresholds)
seg = next(seg_gen)
print('Deleting affinities')
del aff
print('Uploading segmentation')
dst_cv[bbox.to_slices()] = seg
if args.mesh:
print('Starting meshing')
with LocalTaskQueue(parallel=args.parallel) as tq:
tasks = tc.create_meshing_tasks(layer_path=args.dst_path,
mip=args.mip,
shape=args.chunk_size,
simplification=True,
max_simplification_error=args.max_simplification_error,
progress=True)
tq.insert_all(tasks)
tasks = tc.create_mesh_manifest_tasks(layer_path=args.dst_path,
magnitude=args.magnitude)
tq.insert_all(tasks)
print("Meshing complete")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--src_path', type=str,
help='CloudVolume path for affinities')
parser.add_argument('--dst_path', type=str,
help='CloudVolume path to store the segmentation')
parser.add_argument('--bbox_start', type=vec3,
help='bbox origin, int list, commas & no space, e.g. x,y,z')
parser.add_argument('--bbox_size', type=vec3,
help='bbox size, int list, commas & no space, e.g. x,y,z')
parser.add_argument('--mip', type=int, default=0,
help='int MIP level for affinities')
parser.add_argument('--parallel', type=int, default=1,
help='int number of processes to use for parallel ops')
parser.add_argument('--threshold', type=float, default=0.7,
help='float for agglomeration threshold')
parser.add_argument('--chunk_size', type=vec3,
help='cloudvolume chunk, int list, commas & no space, e.g. x,y,z')
parser.add_argument('--owner', type=str,
help='email address for cloudvolume provenance')
parser.add_argument('--segment',
help='run segmentation on affinities',
action='store_true')
parser.add_argument('--mesh',
help='mesh existing segmentation',
action='store_true')
parser.add_argument('--max_simplification_error', type=int, default=40,
help='int for mesh simplification')
parser.add_argument('--magnitude', type=int, default=4,
help='int for magnitude used in igneous mesh manifest')
args = parser.parse_args()
segment(args)
| import argparse
from cloudvolume import CloudVolume
from cloudvolume.lib import Bbox, Vec
import numpy as np
import waterz
from taskqueue import LocalTaskQueue
import igneous.task_creation as tc
from time import strftime
def vec3(s):
try:
z, y, x = map(int, s.split(','))
return (z,y,x)
except:
raise argparse.ArgumentTypeError("Vec3 must be z,y,x")
def segment(args):
"""Run segmentation on contiguous block of affinities from CV
Args:
args: ArgParse object from main
"""
bbox_start = Vec(*args.bbox_start)
bbox_size = Vec(*args.bbox_size)
chunk_size = Vec(*args.chunk_size)
bbox = Bbox(bbox_start, bbox_start + bbox_size)
src_cv = CloudVolume(args.src_path, fill_missing=True,
parallel=args.parallel)
info = CloudVolume.create_new_info(
num_channels = 1,
layer_type = 'segmentation',
data_type = 'uint64',
encoding = 'raw',
resolution = src_cv.info['scales'][args.mip]['resolution'],
voxel_offset = bbox_start,
chunk_size = chunk_size,
volume_size = bbox_size,
mesh = 'mesh_mip_{}_err_{}'.format(args.mip,
args.max_simplification_error)
)
dst_cv = CloudVolume(args.dst_path, info=info, parallel=args.parallel)
dst_cv.provenance.description = 'ws+agg using waterz'
dst_cv.provenance.processing.append({
'method': {
'task': 'watershed+agglomeration',
'src_path': args.src_path,
'dst_path': args.dst_path,
'mip': args.mip,
'shape': bbox_size.tolist(),
'bounds': [
bbox.minpt.tolist(),
bbox.maxpt.tolist(),
],
},
'by': args.owner,
'date': strftime('%Y-%m-%d%H:%M %Z'),
})
dst_cv.provenance.owners = [args.owner]
dst_cv.commit_info()
dst_cv.commit_provenance()
if args.segment:
print('Downloading affinities')
aff = src_cv[bbox.to_slices()]
aff = np.transpose(aff, (3,0,1,2))
aff = np.ascontiguousarray(aff, dtype=np.float32)
thresholds = [args.threshold]
print('Starting ws+agg')
seg_gen = waterz.agglomerate(aff, thresholds)
seg = next(seg_gen)
print('Deleting affinities')
del aff
print('Uploading segmentation')
dst_cv[bbox.to_slices()] = seg
if args.mesh:
print('Starting meshing')
with LocalTaskQueue(parallel=args.parallel) as tq:
tasks = tc.create_meshing_tasks(layer_path=args.dst_path,
mip=args.mip,
shape=args.chunk_size,
simplification=True,
max_simplification_error=args.max_simplification_error,
progress=True)
tq.insert_all(tasks)
tasks = tc.create_mesh_manifest_tasks(layer_path=args.dst_path,
magnitude=args.magnitude)
tq.insert_all(tasks)
print("Meshing complete")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--src_path', type=str,
help='CloudVolume path for affinities')
parser.add_argument('--dst_path', type=str,
help='CloudVolume path to store the segmentation')
parser.add_argument('--bbox_start', type=vec3,
help='bbox origin, int list, commas & no space, e.g. x,y,z')
parser.add_argument('--bbox_size', type=vec3,
help='bbox size, int list, commas & no space, e.g. x,y,z')
parser.add_argument('--mip', type=int, default=0,
help='int MIP level for affinities')
parser.add_argument('--parallel', type=int, default=1,
help='int number of processes to use for parallel ops')
parser.add_argument('--threshold', type=float, default=0.7,
help='float for agglomeration threshold')
parser.add_argument('--chunk_size', type=vec3,
help='cloudvolume chunk, int list, commas & no space, e.g. x,y,z')
parser.add_argument('--owner', type=str,
help='email address for cloudvolume provenance')
parser.add_argument('--segment',
help='run segmentation on affinities',
action='store_true')
parser.add_argument('--mesh',
help='mesh existing segmentation',
action='store_true')
parser.add_argument('--max_simplification_error', type=int, default=40,
help='int for mesh simplification')
parser.add_argument('--magnitude', type=int, default=4,
help='int for magnitude used in igneous mesh manifest')
args = parser.parse_args()
segment(args)
| en | 0.665804 | Run segmentation on contiguous block of affinities from CV Args: args: ArgParse object from main | 2.454425 | 2 |
apollo_ex3.py | Mateus-Colaco/Apollo-Ex3 | 0 | 6614848 | # -*- coding: utf-8 -*-
"""Apollo_Ex3.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1cYBFPb_gDQLZntf5S8lkQjKH87FnSjCN
"""
# Commented out IPython magic to ensure Python compatibility.
!git clone https://github.com/ultralytics/yolov5 # clone repo
# %cd yolov5
!pip install -qr requirements.txt # install dependencies (ignore errors)
from google.colab import drive
drive.mount('/content/drive')
import cv2
import torch
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
from PIL import Image, ImageDraw,ImageFont
class Detec:
def __init__(self,video_path,shrinked_video_path,track,font_path,final_video_path):
self.track = track
self.final_video_path = final_video_path #directory to save final video
self.font = ImageFont.truetype(font_path, 20)
self.cap = cv2.VideoCapture(video_path)
self.shrinked_video_path = shrinked_video_path
fourcc_1 = cv2.VideoWriter_fourcc(*'mp4v')
self.size = (960,800)
self.fps = 25
self.out = cv2.VideoWriter(shrinked_video_path,fourcc_1,self.fps,self.size)
while True:
success,frame = self.cap.read()
if not success:
break
else:
frame_resized = cv2.resize(frame,(size_X,size_Y),fx=0,fy=0)
self.out.write(frame_resized)
self.cap.release()
self.out.release()
return
# |#################################|
###############################################################| HUMAN DETECTION |###############################################################
# |#################################|
def Human(self):
self.Human_detection = True
self.bgr_color = (0,255,255)
self.human_coordinates = []
self.human_limits_list = []
self.first_append = 0
self.Human_frames_to_video = list()
self.human_model = torch.hub.load('ultralytics/yolov5', 'yolov5x')
self.human_model.classes = [0]
self.new_cap = cv2.VideoCapture(self.shrinked_video_path)
while True:
success,frames = self.new_cap.read()
if not success:
break
self.human_bounding_box = self.human_model(frames)
self.human_detec_number = self.human_bounding_box.pandas().xywh
max_and_min = (self.human_bounding_box.pandas().xyxy[0]) # [xmin, ymin, xmax, ymax, confidence, class, name] all bounding boxes
if self.track:
img = Image.fromarray(self.human_bounding_box.render()[0])
draw = ImageDraw.Draw(img)
for row in range(max_and_min['xmin'].count()):
x_c = ( (max_and_min.iat[row,2] - max_and_min.iat[row,0]) / 2) + max_and_min.iat[row,0]
y_c = ( (max_and_min.iat[row,3] - max_and_min.iat[row,1]) / 2) + max_and_min.iat[row,1]
x_min_plot = max_and_min.iat[row,0]
x_max = 1.25*x_c
x_min = 0.75*x_c
y_max = 1.22*y_c
y_min = 0.8*y_c
if self.first_append==0:
self.human_limits_list.append( (x_max, x_min, y_max, y_min) )
self.human_coordinates.append( (x_c,y_c) )
self.first_append = 1
else:
for index,element in enumerate(self.human_limits_list):
x_max_test, x_min_test, y_max_test, y_min_test = element
text=str(int(index))
#if limits conditions True, new coordinate to existent ID
if (x_c < x_max_test) and (x_c > x_min_test) and (y_c < y_max_test) and (y_c > y_min_test):
if (x_c,y_c) in self.human_coordinates:
None
else:
self.human_limits_list[index] = (x_max, x_min, y_max, y_min)
self.human_coordinates[index] = (x_c,y_c)
draw.text( (x_min_plot ,y_c ), "ID."+ str(self.human_coordinates.index((x_c,y_c))),fill=self.bgr_color,font = self.font)
if (x_c,y_c) in self.human_coordinates:
None
else:
self.human_limits_list.append((x_max, x_min, y_max, y_min))
self.human_coordinates.append((x_c,y_c))
draw.text( (x_min_plot ,y_c ), "ID."+ str(self.human_coordinates.index((x_c,y_c))),fill=self.bgr_color,font = self.font)
human_img = np.asarray(img)
text_top = 'People Detected:' + str(np.shape(self.human_detec_number)[1])
cv2.putText(human_img,text_top,(50,50),cv2.FONT_HERSHEY_DUPLEX, 1.1, (255,0,255), 1, cv2.LINE_AA)
self.Human_frames_to_video.append(human_img)
fourcc = cv2.VideoWriter_fourcc(*'XVID')
if self.Human_detection == True:
self.human_final_video = cv2.VideoWriter(self.final_video_path + "Track_Human.mp4",fourcc,fps,(size_X,size_Y))
for frame in self.Human_frames_to_video:
self.human_final_video.write(frame)
self.human_final_video.release()
return
# |#################################|
###############################################################| RIFLE DETECTION |#################################################################
# |#################################|
def Rifle(self,weights_path):
self.Rifle_detection = True
self.bgr_color = (0,255,255)
self.Rifle_coordinates = []
self.Rifle_limits_list = []
self.Rifle_first_append = 0
self.Rifle_frames_to_video = list()
self.Rifle_model = torch.hub.load('ultralytics/yolov5', 'custom',weights_path)
self.new_cap = cv2.VideoCapture(self.shrinked_video_path)
while True:
success,frames = self.new_cap.read()
if not success:
break
self.Rifle_bounding_box = self.Rifle_model(frames)
self.Rifle_detec_number = self.Rifle_bounding_box.pandas().xywh
max_and_min = (self.Rifle_bounding_box.pandas().xyxy[0]) # [xmin, ymin, xmax, ymax, confidence, class, name] all bounding boxes
if self.track:
img = Image.fromarray(self.Rifle_bounding_box.render()[0])
draw = ImageDraw.Draw(img)
for row in range(max_and_min['xmin'].count()):
x_c = ( (max_and_min.iat[row,2] - max_and_min.iat[row,0]) / 2) + max_and_min.iat[row,0]
y_c = ( (max_and_min.iat[row,3] - max_and_min.iat[row,1]) / 2) + max_and_min.iat[row,1]
x_min_plot = max_and_min.iat[row,0]
x_max = 1.1*x_c
x_min = 0.7*x_c
y_max = 1.06*y_c
y_min = 0.9*y_c
if self.Rifle_first_append==0:
self.Rifle_limits_list.append( (x_max, x_min, y_max, y_min) )
self.Rifle_coordinates.append( (x_c,y_c) )
self.Rifle_first_append = 1
else:
for index,element in enumerate(self.Rifle_limits_list):
x_max_test, x_min_test, y_max_test, y_min_test = element
text=str(int(index))
#if limits conditions True, new coordinate to existent ID
if (x_c < x_max_test) and (x_c > x_min_test) and (y_c < y_max_test) and (y_c > y_min_test):
if (x_c,y_c) in self.Rifle_coordinates:
None
else:
self.Rifle_limits_list[index] = (x_max, x_min, y_max, y_min)
self.Rifle_coordinates[index] = (x_c,y_c)
draw.text( (x_min_plot ,y_c ), "ID."+ str(self.Rifle_coordinates.index((x_c,y_c))),fill=self.bgr_color,font = self.font)
if (x_c,y_c) in self.Rifle_coordinates:
None
else:
self.Rifle_limits_list.append((x_max, x_min, y_max, y_min))
self.Rifle_coordinates.append((x_c,y_c))
draw.text( (x_min_plot ,y_c ), "ID."+ str(self.Rifle_coordinates.index((x_c,y_c))),fill=self.bgr_color,font = self.font)
Rifle_img = np.asarray(img)
text_top = 'Rifle Detected:' + str(np.shape(self.Rifle_detec_number)[1])
cv2.putText(Rifle_img,text_top,(50,50),cv2.FONT_HERSHEY_DUPLEX, 1.1, (255,0,255), 1, cv2.LINE_AA)
self.Rifle_frames_to_video.append(Rifle_img)
fourcc = cv2.VideoWriter_fourcc(*'XVID')
self.Rifle_final_video = cv2.VideoWriter(self.final_video_path + "Track_Rifle.mp4",fourcc,fps,(size_X,size_Y))
for frame in self.Rifle_frames_to_video:
self.Rifle_final_video.write(frame)
self.Rifle_final_video.release()
return
if __name__ == "__main__":
Human_Tracker = Detec('/content/track_people.mp4','/content/shrinked_video.mp4',True,'/content/Amplesoft.ttf','/content/')
Human_Tracker.Human()
Rifle_Tracker = Detec('/content/drive/MyDrive/Colab Notebooks/video_ex02.mp4','/content/shrinked_video_2.mp4',True,'/content/Amplesoft.ttf','/content/')
Rifle_Tracker.Rifle('/content/drive/MyDrive/Colab Notebooks/best_ex2.pt')
#(self,video_path,shrinked_video_path,track = True,font_path,final_video_path) | # -*- coding: utf-8 -*-
"""Apollo_Ex3.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1cYBFPb_gDQLZntf5S8lkQjKH87FnSjCN
"""
# Commented out IPython magic to ensure Python compatibility.
!git clone https://github.com/ultralytics/yolov5 # clone repo
# %cd yolov5
!pip install -qr requirements.txt # install dependencies (ignore errors)
from google.colab import drive
drive.mount('/content/drive')
import cv2
import torch
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
from PIL import Image, ImageDraw,ImageFont
class Detec:
def __init__(self,video_path,shrinked_video_path,track,font_path,final_video_path):
self.track = track
self.final_video_path = final_video_path #directory to save final video
self.font = ImageFont.truetype(font_path, 20)
self.cap = cv2.VideoCapture(video_path)
self.shrinked_video_path = shrinked_video_path
fourcc_1 = cv2.VideoWriter_fourcc(*'mp4v')
self.size = (960,800)
self.fps = 25
self.out = cv2.VideoWriter(shrinked_video_path,fourcc_1,self.fps,self.size)
while True:
success,frame = self.cap.read()
if not success:
break
else:
frame_resized = cv2.resize(frame,(size_X,size_Y),fx=0,fy=0)
self.out.write(frame_resized)
self.cap.release()
self.out.release()
return
# |#################################|
###############################################################| HUMAN DETECTION |###############################################################
# |#################################|
def Human(self):
self.Human_detection = True
self.bgr_color = (0,255,255)
self.human_coordinates = []
self.human_limits_list = []
self.first_append = 0
self.Human_frames_to_video = list()
self.human_model = torch.hub.load('ultralytics/yolov5', 'yolov5x')
self.human_model.classes = [0]
self.new_cap = cv2.VideoCapture(self.shrinked_video_path)
while True:
success,frames = self.new_cap.read()
if not success:
break
self.human_bounding_box = self.human_model(frames)
self.human_detec_number = self.human_bounding_box.pandas().xywh
max_and_min = (self.human_bounding_box.pandas().xyxy[0]) # [xmin, ymin, xmax, ymax, confidence, class, name] all bounding boxes
if self.track:
img = Image.fromarray(self.human_bounding_box.render()[0])
draw = ImageDraw.Draw(img)
for row in range(max_and_min['xmin'].count()):
x_c = ( (max_and_min.iat[row,2] - max_and_min.iat[row,0]) / 2) + max_and_min.iat[row,0]
y_c = ( (max_and_min.iat[row,3] - max_and_min.iat[row,1]) / 2) + max_and_min.iat[row,1]
x_min_plot = max_and_min.iat[row,0]
x_max = 1.25*x_c
x_min = 0.75*x_c
y_max = 1.22*y_c
y_min = 0.8*y_c
if self.first_append==0:
self.human_limits_list.append( (x_max, x_min, y_max, y_min) )
self.human_coordinates.append( (x_c,y_c) )
self.first_append = 1
else:
for index,element in enumerate(self.human_limits_list):
x_max_test, x_min_test, y_max_test, y_min_test = element
text=str(int(index))
#if limits conditions True, new coordinate to existent ID
if (x_c < x_max_test) and (x_c > x_min_test) and (y_c < y_max_test) and (y_c > y_min_test):
if (x_c,y_c) in self.human_coordinates:
None
else:
self.human_limits_list[index] = (x_max, x_min, y_max, y_min)
self.human_coordinates[index] = (x_c,y_c)
draw.text( (x_min_plot ,y_c ), "ID."+ str(self.human_coordinates.index((x_c,y_c))),fill=self.bgr_color,font = self.font)
if (x_c,y_c) in self.human_coordinates:
None
else:
self.human_limits_list.append((x_max, x_min, y_max, y_min))
self.human_coordinates.append((x_c,y_c))
draw.text( (x_min_plot ,y_c ), "ID."+ str(self.human_coordinates.index((x_c,y_c))),fill=self.bgr_color,font = self.font)
human_img = np.asarray(img)
text_top = 'People Detected:' + str(np.shape(self.human_detec_number)[1])
cv2.putText(human_img,text_top,(50,50),cv2.FONT_HERSHEY_DUPLEX, 1.1, (255,0,255), 1, cv2.LINE_AA)
self.Human_frames_to_video.append(human_img)
fourcc = cv2.VideoWriter_fourcc(*'XVID')
if self.Human_detection == True:
self.human_final_video = cv2.VideoWriter(self.final_video_path + "Track_Human.mp4",fourcc,fps,(size_X,size_Y))
for frame in self.Human_frames_to_video:
self.human_final_video.write(frame)
self.human_final_video.release()
return
# |#################################|
###############################################################| RIFLE DETECTION |#################################################################
# |#################################|
def Rifle(self,weights_path):
self.Rifle_detection = True
self.bgr_color = (0,255,255)
self.Rifle_coordinates = []
self.Rifle_limits_list = []
self.Rifle_first_append = 0
self.Rifle_frames_to_video = list()
self.Rifle_model = torch.hub.load('ultralytics/yolov5', 'custom',weights_path)
self.new_cap = cv2.VideoCapture(self.shrinked_video_path)
while True:
success,frames = self.new_cap.read()
if not success:
break
self.Rifle_bounding_box = self.Rifle_model(frames)
self.Rifle_detec_number = self.Rifle_bounding_box.pandas().xywh
max_and_min = (self.Rifle_bounding_box.pandas().xyxy[0]) # [xmin, ymin, xmax, ymax, confidence, class, name] all bounding boxes
if self.track:
img = Image.fromarray(self.Rifle_bounding_box.render()[0])
draw = ImageDraw.Draw(img)
for row in range(max_and_min['xmin'].count()):
x_c = ( (max_and_min.iat[row,2] - max_and_min.iat[row,0]) / 2) + max_and_min.iat[row,0]
y_c = ( (max_and_min.iat[row,3] - max_and_min.iat[row,1]) / 2) + max_and_min.iat[row,1]
x_min_plot = max_and_min.iat[row,0]
x_max = 1.1*x_c
x_min = 0.7*x_c
y_max = 1.06*y_c
y_min = 0.9*y_c
if self.Rifle_first_append==0:
self.Rifle_limits_list.append( (x_max, x_min, y_max, y_min) )
self.Rifle_coordinates.append( (x_c,y_c) )
self.Rifle_first_append = 1
else:
for index,element in enumerate(self.Rifle_limits_list):
x_max_test, x_min_test, y_max_test, y_min_test = element
text=str(int(index))
#if limits conditions True, new coordinate to existent ID
if (x_c < x_max_test) and (x_c > x_min_test) and (y_c < y_max_test) and (y_c > y_min_test):
if (x_c,y_c) in self.Rifle_coordinates:
None
else:
self.Rifle_limits_list[index] = (x_max, x_min, y_max, y_min)
self.Rifle_coordinates[index] = (x_c,y_c)
draw.text( (x_min_plot ,y_c ), "ID."+ str(self.Rifle_coordinates.index((x_c,y_c))),fill=self.bgr_color,font = self.font)
if (x_c,y_c) in self.Rifle_coordinates:
None
else:
self.Rifle_limits_list.append((x_max, x_min, y_max, y_min))
self.Rifle_coordinates.append((x_c,y_c))
draw.text( (x_min_plot ,y_c ), "ID."+ str(self.Rifle_coordinates.index((x_c,y_c))),fill=self.bgr_color,font = self.font)
Rifle_img = np.asarray(img)
text_top = 'Rifle Detected:' + str(np.shape(self.Rifle_detec_number)[1])
cv2.putText(Rifle_img,text_top,(50,50),cv2.FONT_HERSHEY_DUPLEX, 1.1, (255,0,255), 1, cv2.LINE_AA)
self.Rifle_frames_to_video.append(Rifle_img)
fourcc = cv2.VideoWriter_fourcc(*'XVID')
self.Rifle_final_video = cv2.VideoWriter(self.final_video_path + "Track_Rifle.mp4",fourcc,fps,(size_X,size_Y))
for frame in self.Rifle_frames_to_video:
self.Rifle_final_video.write(frame)
self.Rifle_final_video.release()
return
if __name__ == "__main__":
Human_Tracker = Detec('/content/track_people.mp4','/content/shrinked_video.mp4',True,'/content/Amplesoft.ttf','/content/')
Human_Tracker.Human()
Rifle_Tracker = Detec('/content/drive/MyDrive/Colab Notebooks/video_ex02.mp4','/content/shrinked_video_2.mp4',True,'/content/Amplesoft.ttf','/content/')
Rifle_Tracker.Rifle('/content/drive/MyDrive/Colab Notebooks/best_ex2.pt')
#(self,video_path,shrinked_video_path,track = True,font_path,final_video_path) | de | 0.288121 | # -*- coding: utf-8 -*- Apollo_Ex3.ipynb Automatically generated by Colaboratory. Original file is located at https://colab.research.google.com/drive/1cYBFPb_gDQLZntf5S8lkQjKH87FnSjCN # Commented out IPython magic to ensure Python compatibility. # clone repo # %cd yolov5 # install dependencies (ignore errors) # %matplotlib inline #directory to save final video # |#################################| ###############################################################| HUMAN DETECTION |############################################################### # |#################################| # [xmin, ymin, xmax, ymax, confidence, class, name] all bounding boxes #if limits conditions True, new coordinate to existent ID # |#################################| ###############################################################| RIFLE DETECTION |################################################################# # |#################################| # [xmin, ymin, xmax, ymax, confidence, class, name] all bounding boxes #if limits conditions True, new coordinate to existent ID #(self,video_path,shrinked_video_path,track = True,font_path,final_video_path) | 2.220438 | 2 |
Question 4.py | Mkez45634/Python-Coding-Challenges | 0 | 6614849 | def getDigits(s):
digits = ""
if s.isdigit():
return s
else:
for x in range(0, len(s)):
if s[x].isdigit():
digits = digits + s[x]
return digits
print(getDigits("**1.23a-42"))
| def getDigits(s):
digits = ""
if s.isdigit():
return s
else:
for x in range(0, len(s)):
if s[x].isdigit():
digits = digits + s[x]
return digits
print(getDigits("**1.23a-42"))
| none | 1 | 3.747156 | 4 | |
app.py | Mayur-Debu/Final_Year_Project | 0 | 6614850 | <reponame>Mayur-Debu/Final_Year_Project<filename>app.py
# Package importing
from flask import Flask, render_template, url_for, redirect, jsonify, request
from authlib.integrations.flask_client import OAuth
import util
# Declaring the flasks app name
app = Flask(__name__)
# ============================================= Authentication configration for Google and Github ========================================
oauth = OAuth(app)
# Secret key is the one asset that defines your are the authorized owner of the software
app.config['SECRET_KEY'] = "THIS SHOULD BE SECRET"
# CLIENT_ID and CLIENT_SECRET are the credentials from the developer account of Google
app.config['GOOGLE_CLIENT_ID'] = "790276491366-hf1untelphhtvafl00o5beagffj918d1.apps.googleusercontent.com"
app.config['GOOGLE_CLIENT_SECRET'] = "<KEY>"
# CLIENT_ID and CLIENT_SECRET are the credentials from the developer account of Github
app.config['GITHUB_CLIENT_ID'] = "67beeb3d9297f11e3102"
app.config['GITHUB_CLIENT_SECRET'] = "8f8a06364b62b470c02da78e5adf2c25bbe22de2"
# Autlib Oauth2.0 configration for Google
google = oauth.register(
name='google',
client_id=app.config["GOOGLE_CLIENT_ID"],
client_secret=app.config["GOOGLE_CLIENT_SECRET"],
access_token_url='https://accounts.google.com/o/oauth2/token',
access_token_params=None,
authorize_url='https://accounts.google.com/o/oauth2/auth',
authorize_params=None,
api_base_url='https://www.googleapis.com/oauth2/v1/',
# This is only needed if using openId to fetch user info
userinfo_endpoint='https://openidconnect.googleapis.com/v1/userinfo',
client_kwargs={'scope': 'openid email profile'},
)
# Autlib Oauth2.0 configration for Github
github = oauth.register(
name='github',
client_id=app.config["GITHUB_CLIENT_ID"],
client_secret=app.config["GITHUB_CLIENT_SECRET"],
access_token_url='https://github.com/login/oauth/access_token',
access_token_params=None,
authorize_url='https://github.com/login/oauth/authorize',
authorize_params=None,
api_base_url='https://api.github.com/',
client_kwargs={'scope': 'user:email'},
)
# ========================================================================================================================================
# ================================================== Authentication routing for Google and Github ========================================
# Default route to the home page
@app.route('/')
def index():
return render_template('index.html')
# Route to the login page
@app.route('/login')
def login():
return render_template('login.html')
# Google login route
@app.route('/login/google')
def google_login():
google = oauth.create_client('google')
redirect_uri = url_for('google_authorize', _external=True)
return google.authorize_redirect(redirect_uri)
# Google authorized route
@app.route('/login/google/authorize')
def google_authorize():
google = oauth.create_client('google')
token = google.authorize_access_token()
resp = google.get('userinfo').json()
print(f"\n{resp}\n")
redirect_uri = url_for('estimate_Price', _external=False)
# return "You are successfully signed in using google"
return redirect(redirect_uri)
# Github login route
@app.route('/login/github')
def github_login():
github = oauth.create_client('github')
redirect_uri = url_for('github_authorize', _external=True)
return github.authorize_redirect(redirect_uri)
# Github authorized route
@app.route('/login/github/authorize')
def github_authorize():
github = oauth.create_client('github')
token = github.authorize_access_token()
resp = github.get('user').json()
print(f"\n{resp}\n")
redirect_uri = url_for('estimate_Price', _external=False)
# return "You are successfully signed in using google"
return redirect(redirect_uri)
# Contact the developer's route
@app.route('/contact')
def contact_page():
return render_template('contact.html')
# Contact the developer's route
@app.route('/estimatePrice')
def estimate_Price():
return render_template('PriceEstimator.html')
# ========================================================================================================================================
# =================================================== Machine Learning Backend Routing ===================================================
# Get the location info.
@app.route('/get_location_names')
def get_location_names():
response = jsonify({'location': util.get_location_names()})
response.headers.add('Access-Control-Allow-Origin', '*')
return response
# Get the parking info.
@app.route('/get_parking')
def get_parking():
response = jsonify({'parking': util.get_parking()})
response.headers.add('Access-Control-Allow-Origin', '*')
return response
# Get the type of house info.
@app.route('/get_houseType')
def get_houseType():
response = jsonify({'houseType': util.get_houseType()})
response.headers.add('Access-Control-Allow-Origin', '*')
return response
# Get the type of street info
@app.route('/get_streetType')
def get_streetType():
response = jsonify({'streetType': util.get_streetType()})
response.headers.add('Access-Control-Allow-Origin', '*')
return response
# Route to predict the house prices
@app.route('/predict_home_price', methods=['GET', 'POST'])
def predict_home_price():
'''
@ The predict_home_price docs:
House Features:
INT_SQFT – The interior Sq. Ft of the property
N_BEDROOM – The number of Bed rooms
N_BATHROOM - The number of bathrooms
N_ROOM – Total Number of Rooms
QS_ROOMS – The quality score assigned for rooms based on buyer reviews
QS_BATHROOM – The quality score assigned for bathroom based on buyer reviews
QS_BEDROOM – The quality score assigned for bedroom based on buyer reviews
QS_OVERALL – The Overall quality score assigned for the property
BUILD TYPE –
House (ready to move-in)
Commercial (it's a property for rental / business)
Others (can be villa, penthouse etc.)
Surrounding and Locality
Parking Facility – Whether parking facility is available.
STREET TYPE -
Gravel
Paved
No Access
'''
if request.method == "POST":
# String datatype attributes
location = request.form.get('ui-location')
parking = request.form.get('ui-parking-facility')
houseType = request.form.get('ui-house-type')
streetType = request.form.get('ui-street-type')
# int datatype attributes
INT_SQFT = int(request.form.get('ui-int-sqft'))
N_BEDROOM = int(request.form.get('ui-n-bedroom'))
N_BATHROOM = int(request.form.get('ui-n-bathroom'))
N_ROOM = int(request.form.get('ui-n-room'))
QS_ROOMS = int(request.form.get('ui-qs-room'))
QS_BATHROOM = int(request.form.get('ui-qs-bathroom'))
QS_BEDROOM = int(request.form.get('ui-qs-bedroom'))
QS_OVERALL = int(request.form.get('ui-qs-overall'))
print('got the values in here!!!')
response = jsonify({
'estimated_price':
util.get_estimated_price(location, parking, houseType, streetType,
INT_SQFT, N_BEDROOM, N_BATHROOM, N_ROOM,
QS_ROOMS, QS_BATHROOM, QS_BEDROOM,
QS_OVERALL)
})
print(response)
response.headers.add('Access-Control-Allow-Origin', '*')
return render_template('PriceEstimator.html',response=response.json)
# ======================================================================================================================================
# =============================================================== Driver Code ==========================================================
if __name__ == '__main__':
# Loading the artifacts....
util.load_saved_artifacts()
app.run(debug=True)
| # Package importing
from flask import Flask, render_template, url_for, redirect, jsonify, request
from authlib.integrations.flask_client import OAuth
import util
# Declaring the flasks app name
app = Flask(__name__)
# ============================================= Authentication configration for Google and Github ========================================
oauth = OAuth(app)
# Secret key is the one asset that defines your are the authorized owner of the software
app.config['SECRET_KEY'] = "THIS SHOULD BE SECRET"
# CLIENT_ID and CLIENT_SECRET are the credentials from the developer account of Google
app.config['GOOGLE_CLIENT_ID'] = "790276491366-hf1untelphhtvafl00o5beagffj918d1.apps.googleusercontent.com"
app.config['GOOGLE_CLIENT_SECRET'] = "<KEY>"
# CLIENT_ID and CLIENT_SECRET are the credentials from the developer account of Github
app.config['GITHUB_CLIENT_ID'] = "67beeb3d9297f11e3102"
app.config['GITHUB_CLIENT_SECRET'] = "8f8a06364b62b470c02da78e5adf2c25bbe22de2"
# Autlib Oauth2.0 configration for Google
google = oauth.register(
name='google',
client_id=app.config["GOOGLE_CLIENT_ID"],
client_secret=app.config["GOOGLE_CLIENT_SECRET"],
access_token_url='https://accounts.google.com/o/oauth2/token',
access_token_params=None,
authorize_url='https://accounts.google.com/o/oauth2/auth',
authorize_params=None,
api_base_url='https://www.googleapis.com/oauth2/v1/',
# This is only needed if using openId to fetch user info
userinfo_endpoint='https://openidconnect.googleapis.com/v1/userinfo',
client_kwargs={'scope': 'openid email profile'},
)
# Autlib Oauth2.0 configration for Github
github = oauth.register(
name='github',
client_id=app.config["GITHUB_CLIENT_ID"],
client_secret=app.config["GITHUB_CLIENT_SECRET"],
access_token_url='https://github.com/login/oauth/access_token',
access_token_params=None,
authorize_url='https://github.com/login/oauth/authorize',
authorize_params=None,
api_base_url='https://api.github.com/',
client_kwargs={'scope': 'user:email'},
)
# ========================================================================================================================================
# ================================================== Authentication routing for Google and Github ========================================
# Default route to the home page
@app.route('/')
def index():
return render_template('index.html')
# Route to the login page
@app.route('/login')
def login():
return render_template('login.html')
# Google login route
@app.route('/login/google')
def google_login():
google = oauth.create_client('google')
redirect_uri = url_for('google_authorize', _external=True)
return google.authorize_redirect(redirect_uri)
# Google authorized route
@app.route('/login/google/authorize')
def google_authorize():
google = oauth.create_client('google')
token = google.authorize_access_token()
resp = google.get('userinfo').json()
print(f"\n{resp}\n")
redirect_uri = url_for('estimate_Price', _external=False)
# return "You are successfully signed in using google"
return redirect(redirect_uri)
# Github login route
@app.route('/login/github')
def github_login():
github = oauth.create_client('github')
redirect_uri = url_for('github_authorize', _external=True)
return github.authorize_redirect(redirect_uri)
# Github authorized route
@app.route('/login/github/authorize')
def github_authorize():
github = oauth.create_client('github')
token = github.authorize_access_token()
resp = github.get('user').json()
print(f"\n{resp}\n")
redirect_uri = url_for('estimate_Price', _external=False)
# return "You are successfully signed in using google"
return redirect(redirect_uri)
# Contact the developer's route
@app.route('/contact')
def contact_page():
return render_template('contact.html')
# Contact the developer's route
@app.route('/estimatePrice')
def estimate_Price():
return render_template('PriceEstimator.html')
# ========================================================================================================================================
# =================================================== Machine Learning Backend Routing ===================================================
# Get the location info.
@app.route('/get_location_names')
def get_location_names():
response = jsonify({'location': util.get_location_names()})
response.headers.add('Access-Control-Allow-Origin', '*')
return response
# Get the parking info.
@app.route('/get_parking')
def get_parking():
response = jsonify({'parking': util.get_parking()})
response.headers.add('Access-Control-Allow-Origin', '*')
return response
# Get the type of house info.
@app.route('/get_houseType')
def get_houseType():
response = jsonify({'houseType': util.get_houseType()})
response.headers.add('Access-Control-Allow-Origin', '*')
return response
# Get the type of street info
@app.route('/get_streetType')
def get_streetType():
response = jsonify({'streetType': util.get_streetType()})
response.headers.add('Access-Control-Allow-Origin', '*')
return response
# Route to predict the house prices
@app.route('/predict_home_price', methods=['GET', 'POST'])
def predict_home_price():
'''
@ The predict_home_price docs:
House Features:
INT_SQFT – The interior Sq. Ft of the property
N_BEDROOM – The number of Bed rooms
N_BATHROOM - The number of bathrooms
N_ROOM – Total Number of Rooms
QS_ROOMS – The quality score assigned for rooms based on buyer reviews
QS_BATHROOM – The quality score assigned for bathroom based on buyer reviews
QS_BEDROOM – The quality score assigned for bedroom based on buyer reviews
QS_OVERALL – The Overall quality score assigned for the property
BUILD TYPE –
House (ready to move-in)
Commercial (it's a property for rental / business)
Others (can be villa, penthouse etc.)
Surrounding and Locality
Parking Facility – Whether parking facility is available.
STREET TYPE -
Gravel
Paved
No Access
'''
if request.method == "POST":
# String datatype attributes
location = request.form.get('ui-location')
parking = request.form.get('ui-parking-facility')
houseType = request.form.get('ui-house-type')
streetType = request.form.get('ui-street-type')
# int datatype attributes
INT_SQFT = int(request.form.get('ui-int-sqft'))
N_BEDROOM = int(request.form.get('ui-n-bedroom'))
N_BATHROOM = int(request.form.get('ui-n-bathroom'))
N_ROOM = int(request.form.get('ui-n-room'))
QS_ROOMS = int(request.form.get('ui-qs-room'))
QS_BATHROOM = int(request.form.get('ui-qs-bathroom'))
QS_BEDROOM = int(request.form.get('ui-qs-bedroom'))
QS_OVERALL = int(request.form.get('ui-qs-overall'))
print('got the values in here!!!')
response = jsonify({
'estimated_price':
util.get_estimated_price(location, parking, houseType, streetType,
INT_SQFT, N_BEDROOM, N_BATHROOM, N_ROOM,
QS_ROOMS, QS_BATHROOM, QS_BEDROOM,
QS_OVERALL)
})
print(response)
response.headers.add('Access-Control-Allow-Origin', '*')
return render_template('PriceEstimator.html',response=response.json)
# ======================================================================================================================================
# =============================================================== Driver Code ==========================================================
if __name__ == '__main__':
# Loading the artifacts....
util.load_saved_artifacts()
app.run(debug=True) | en | 0.720325 | # Package importing # Declaring the flasks app name # ============================================= Authentication configration for Google and Github ======================================== # Secret key is the one asset that defines your are the authorized owner of the software # CLIENT_ID and CLIENT_SECRET are the credentials from the developer account of Google # CLIENT_ID and CLIENT_SECRET are the credentials from the developer account of Github # Autlib Oauth2.0 configration for Google # This is only needed if using openId to fetch user info # Autlib Oauth2.0 configration for Github # ======================================================================================================================================== # ================================================== Authentication routing for Google and Github ======================================== # Default route to the home page # Route to the login page # Google login route # Google authorized route # return "You are successfully signed in using google" # Github login route # Github authorized route # return "You are successfully signed in using google" # Contact the developer's route # Contact the developer's route # ======================================================================================================================================== # =================================================== Machine Learning Backend Routing =================================================== # Get the location info. # Get the parking info. # Get the type of house info. # Get the type of street info # Route to predict the house prices @ The predict_home_price docs: House Features: INT_SQFT – The interior Sq. Ft of the property N_BEDROOM – The number of Bed rooms N_BATHROOM - The number of bathrooms N_ROOM – Total Number of Rooms QS_ROOMS – The quality score assigned for rooms based on buyer reviews QS_BATHROOM – The quality score assigned for bathroom based on buyer reviews QS_BEDROOM – The quality score assigned for bedroom based on buyer reviews QS_OVERALL – The Overall quality score assigned for the property BUILD TYPE – House (ready to move-in) Commercial (it's a property for rental / business) Others (can be villa, penthouse etc.) Surrounding and Locality Parking Facility – Whether parking facility is available. STREET TYPE - Gravel Paved No Access # String datatype attributes # int datatype attributes # ====================================================================================================================================== # =============================================================== Driver Code ========================================================== # Loading the artifacts.... | 2.719212 | 3 |