id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
5052679 | import sqlite3
import sys, os
THIS_PATH = os.path.realpath(
os.path.join(os.getcwd(), os.path.dirname(__file__)))
sys.path.append(THIS_PATH)
conn = sqlite3.connect(THIS_PATH + '/syncat.db')
c = conn.cursor()
import methods_lexicon
def findallwords(string_in):
"""Returns a list of tuples (word, rest) where rest is the remainder of the string passed in"""
ret = []
i = 1
while (i <= len(string_in)):
test_word = string_in[0:i]
if methods_lexicon.is_word(test_word):
ret.append( (test_word, string_in[i:]) )
i += 1
return ret | StarcoderdataPython |
1704121 | import unittest
from Frame import Frame
from Game import Game
class FrameTest(unittest.TestCase):
def test_frame_init(self):
frame = Frame()
self.assertFalse(frame.is_full())
def test_invalid_pins(self):
frame = Frame()
self.assertRaises(Exception, frame.add_ball, 11)
def test_invalid_pins_neg(self):
frame = Frame()
self.assertRaises(Exception, frame.add_ball, -1)
def test_is_full_False_AfterOneBall(self):
frame = Frame()
frame.add_ball(7)
self.assertFalse(frame.is_full())
def test_is_strike_True(self):
frame = Frame()
frame.add_ball(10)
self.assertTrue(frame.is_strike())
def test_is_strike_False(self):
frame = Frame()
frame.add_ball(7)
self.assertFalse(frame.is_strike())
frame.add_ball(3)
self.assertFalse(frame.is_strike())
def test_is_spare(self):
frame = Frame()
frame.add_ball(7)
self.assertFalse(frame.is_spare())
frame.add_ball(3)
self.assertTrue(frame.is_spare())
def test_invalid_frame(self):
frame = Frame()
frame.add_ball(3)
self.assertRaises(Exception, frame.add_ball, 8)
def test_additionalBallAfterSpare(self):
frame = self.get_spare_frame()
frame.add_ball(4)
self.assertRaises(Exception, frame.add_ball, 8)
def test_pin_count_ok(self):
frame = Frame()
self.assertFalse(frame.is_pin_count_ok(11))
self.assertTrue(frame.is_pin_count_ok(8))
frame.add_ball(8)
self.assertFalse(frame.is_pin_count_ok(3))
self.assertTrue(frame.is_pin_count_ok(2))
self.assertTrue(frame.is_pin_count_ok(1))
frame.add_ball(1)
self.assertFalse(frame.is_pin_count_ok(2))
self.assertFalse(frame.is_pin_count_ok(1))
def test_pin_count_strike(self):
frame = self.get_strike_frame()
self.assertTrue(frame.is_pin_count_ok(7))
frame.add_ball(7)
self.assertFalse(frame.is_pin_count_ok(10))
self.assertTrue(frame.is_pin_count_ok(3))
frame.add_ball(3)
self.assertFalse(frame.is_pin_count_ok(1))
def test_pin_count_spare(self):
frame = self.get_spare_frame()
self.assertTrue(frame.is_pin_count_ok(7))
frame.add_ball(6)
self.assertFalse(frame.is_pin_count_ok(1))
def test_pin_count_ok_tooMany(self):
frame = Frame()
frame.add_ball(8)
self.assertFalse(frame.is_pin_count_ok(3))
def test_score(self):
frame = Frame()
self.assertEqual(0, frame.score())
frame.add_ball(8)
self.assertEqual(8, frame.score())
frame.add_ball(1)
self.assertEqual(9, frame.score())
def test_score_strike(self):
frame = self.get_strike_frame()
self.assertEqual(10, frame.score())
frame.add_ball(8)
self.assertEqual(18, frame.score())
frame.add_ball(1)
self.assertEqual(19, frame.score())
def test_score_strike(self):
frame = self.get_spare_frame()
self.assertEqual(10, frame.score())
frame.add_ball(8)
self.assertEqual(18, frame.score())
def test_is_speical(self):
self.assertFalse(Frame().is_special())
self.assertTrue(self.get_strike_frame().is_special())
self.assertTrue(self.get_spare_frame().is_special())
def get_spare_frame(self):
frame = Frame()
frame.add_ball(3)
frame.add_ball(7)
return frame
def get_strike_frame(self):
frame = Frame()
frame.add_ball(10)
return frame
class GameTest(unittest.TestCase):
def test_game_init(self):
game = Game()
self.assertEquals(game.current_frame(), 1)
self.assertEquals(game.score(), 0)
def test_add_ball(self):
game = Game()
game.add_ball(1)
self.assertEquals(game.current_frame(), 1)
self.assertEquals(game.score(), 1)
def test_add_second_ball(self):
game = Game()
game.add_ball(1)
game.add_ball(2)
self.assertEquals(game.score(), 3)
self.assertEquals(game.current_frame(), 2)
def test_strike(self):
game = Game()
game.add_ball(10)
self.assertEquals(game.score(), 10)
self.assertEquals(game.current_frame(), 2)
def check_full_game(self, expectedScore, balls):
game = Game()
for ball in balls:
game.add_ball(ball)
self.assertEquals(expectedScore, game.score())
def score_full_game(self, balls):
game = Game()
for ball in balls:
game.add_ball(ball)
return game.score()
def test_game_full(self):
self.check_full_game(20, [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1])
def test_game_full2(self):
self.check_full_game(47, [1,1, 1,1, 1,1, 1,1, 1,1, 1,1, 1,1, 1,1, 1,1, 10, 10 ,9])
def test_game_full3(self):
self.check_full_game(300, [10,10,10,10,10,10,10,10,10,10,10,10])
def test_game_full4(self):
self.check_full_game(173, [7,3, 7,3, 7,3, 7,3, 7,3, 7,3, 7,3, 7,3, 7,3, 7,3, 10])
def test_game_full_suite(self):
self.check_full_game(0, [0,0, 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, 0,0])
self.check_full_game(20, [1,1, 1,1, 1,1, 1,1, 1,1, 1,1, 1,1, 1,1, 1,1, 1,1])
self.check_full_game(6, [1,1, 1,1, 1,1]) # incomplete
self.check_full_game(18, [1,1, 6,4, 3]) # incomplete w/ spare
self.check_full_game(150, [5,5, 5,5, 5,5, 5,5, 5,5, 5,5, 5,5, 5,5, 5,5, 5,5, 5])
self.check_full_game(47, [1,1, 1,1, 1,1, 1,1, 1,1, 1,1, 1,1, 1,1, 1,1, 10, 10 ,9])
self.check_full_game(173, [7,3, 7,3, 7,3, 7,3, 7,3, 7,3, 7,3, 7,3, 7,3, 7,3, 10])
self.check_full_game(300, [10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10])
self.check_full_game(280, [10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 5]) # incomplete
self.check_full_game(300, [10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10]) # extras
self.check_full_game(240, [10, 10, 10, 0,0, 10, 10, 10, 10, 10, 10, 10, 10])
self.check_full_game(245, [10, 10, 10, 10, 10, 10, 10, 10, 10, 1,1])
def test_game_full_error_1(self):
self.assertRaises(Exception, self.score_full_game, [1,1, 12,1, 1,1, 1,1, 1,1, 1,1, 1,1, 1,1, 1,1, 1,1]) # invalid roll
def test_game_full_error_2(self):
self.assertRaises(Exception, self.score_full_game, [1,1, 6,-1, 1,1, 1,1, 1,1, 1,1, 1,1, 1,1, 1,1, 1,1]) # invalid roll
def test_game_full_error_3(self):
self.assertRaises(Exception, self.score_full_game, [10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 9,2]) # invalid extras
def test_game_full_error_4(self):
self.assertRaises(Exception, self.score_full_game, [10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 11]) # invalid extras
def test_game_full_error_5(self):
self.assertRaises(Exception, self.score_full_game, [10, 10, 10, 10, 10, 10, 10, 10, 10, 9,1, 11]) # invalid extras
def test_game_full_error_6(self):
self.assertRaises(Exception, self.score_full_game, [10, 10, 5,6, 10, 10, 10, 10, 10, 10, 1,1])
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
8146521 | <gh_stars>10-100
from tracker.models import *
for c in Contatto.objects.all():
if len( c.nota_set.all() ) > 1:
print 'maggiore ', c
continue
try:
nota = c.nota_set.all()[0]
print nota.testo.encode('latin-1')
if nota.testo == 'Fascia 1':
c.priorita = 1
c.save()
nota.testo = ''
nota.save()
if nota.testo == 'Fascia 2':
c.priorita = 2
c.save()
nota.testo = ''
nota.save()
if nota.testo == 'Fascia 3':
c.priorita = 3
c.save()
nota.testo = ''
nota.save()
except Nota.DoesNotExist: pass
| StarcoderdataPython |
8108618 | <reponame>Pzzzzz5142/animal-forest-QQ-group-bot
from nonebot import on_command, CommandSession, on_startup
from nonebot.message import unescape
import asyncio
import asyncpg
from datetime import datetime
import nonebot
import pytz
from aiocqhttp.exceptions import Error as CQHttpError
import yaml
import os
from nonebot.argparse import ArgumentParser
import sys
from nonebot.log import logger
from random import randint
import random
import bisect
from db import db
import cq
from utils import *
random.seed(114514)
BIGUSAGE = r"""
大头菜命令!
命令:'大头菜'
功能参数:
-p, --price X
以X的价格发布你岛上的大头菜
-l, --list
查看当前所有大头菜价格
-d, --delt
删除你自己的房间
例如:
使用
大头菜 -p 650
或者
大头菜 --price 650
来让你的大头菜以650的价格挂牌上市!
注意!
1)当大头菜价格刷新时,你的大头菜价格将会被强制下市。
2)如果你输错了,直接重新输入命令来更新大头菜价格!
""".strip()
maxbig_head = 0
#__plugin_name__ = '大头菜挂牌上市'
#__plugin_usage__ = BIGUSAGE
def roomPaser(value, lv: int = 0) -> str:
return '\t'*lv + f"QQ号:{value['qid']}\n"+'\t'*lv+f"大头菜价格:{value['price']}"
@on_command('大头菜', only_to_me=False, shell_like=True)
async def bighead(session: CommandSession):
parser = ArgumentParser(session=session, usage=BIGUSAGE)
group = parser.add_argument_group()
group.add_argument('-p', '--price', type=int, help="大头菜的价格")
group.add_argument('-l', '--list', action='store_true',
help="列出当前有的大头菜价格", default=False)
group.add_argument('-d', '--delt', action='store_true',
help='删除你自己的大头菜价格', default=False)
args = parser.parse_args(session.argv)
if args.price != None:
if args.price > 810 or args.price < 0:
session.finish('小老弟,你怎么回事?')
async with db.pool.acquire() as conn:
try:
state = await conn.execute('''insert into datou (qid,price) values ({0},{1});'''.format(session.event.user_id, args.price))
except asyncpg.exceptions.ForeignKeyViolationError as e:
await conn.execute('''insert into quser (qid,swid) values ({0},{1});'''.format(session.event.user_id, swFormatter(session.event.sender['card'] if session.event['message_type'] != 'private' else '-1')))
state = await conn.execute('''insert into datou (qid,price) values ({0},{1});'''.format(session.event.user_id, args.price))
except asyncpg.exceptions.UniqueViolationError as e:
state = await conn.execute('''update datou set price = {1} where qid='{0}';'''.format(session.event.user_id, args.price))
values = await conn.fetch('''select * from datou where qid = {0}'''.format(session.event.user_id))
logger.info('大头菜上市完成')
session.finish(
'已{}如下记录:\n'.format('添加' if 'UPDATE' not in state else '更新') + roomPaser(values[0], 1))
elif args.list == True:
bot = nonebot.get_bot()
async with db.pool.acquire() as conn:
values = await conn.fetch('''select * from datou order by price DESC''')
if len(values) == 0:
session.finish('很遗憾,当前没有大头菜报价。')
for i in range(min(maxbig_head, len(values))):
await session.send(roomPaser(values[i]))
if session.event['message_type'] == 'group'and len(values) <= maxbig_head:
await session.send(unescape(f"{cq.at(session.event.user_id)} 全部报价如上。"))
try:
for value in values[maxbig_head:]:
await bot.send_private_msg(message=roomPaser(value), user_id=session.event.user_id)
if len(values) > maxbig_head:
await bot.send_private_msg(message='全部报价如上。', user_id=session.event.user_id)
except CQHttpError:
session.finish(unescape(f'{cq.at(session.event.user_id)} 剩余大头菜报价信息发送失败,请尝试与我发送临时消息。'))
if len(values) > maxbig_head and session.event['message_type'] == 'group':
pass
# await session.send(unescape(f'{cq.at(session.event.user_id)} 剩余大头菜报价已私发,请查收。'))
elif args.delt == True:
async with db.pool.acquire() as conn:
value = await conn.execute(f'select * from datou where qid={session.event.user_id}')
if len(value) == 0:
session.finish('你貌似并没有上市的大头菜。')
await conn.execute(f'''delete from datou where qid={session.event.user_id};''')
session.finish('删除完成')
| StarcoderdataPython |
254063 | from random import random
from Actuation.IVehicleActuator import IVehicleActuator
from Decision.IDecisionMaker import IDecisionMaker
from Vision.ICamera import ICamera
class RandomMovement(IDecisionMaker):
def __init__(self, vehicle_actuator: IVehicleActuator, camera: ICamera):
self._vehicle_actuator = vehicle_actuator
self._camera = camera
def start(self):
self._vehicle_actuator.speed = random() * 0.5
self._vehicle_actuator.turning_speed = random() * 15
print(str(self._vehicle_actuator.speed) + " " + str(self._vehicle_actuator.turning_speed) + "\n")
def stop(self):
self._vehicle_actuator.speed = 0
self._vehicle_actuator.turning_speed = 0
print(str(self._vehicle_actuator.speed) + " " + str(self._vehicle_actuator.turning_speed) + "\n")
| StarcoderdataPython |
6669052 | """."""
from time import sleep
from queue import Queue
from threading import Thread
from dearpygui import core
from adheya import DPGObject
from adheya.layout import Group
class Label(DPGObject):
def __init__(self, parent, **kw):
super().__init__(parent, **kw)
kw['parent'] = self.parent.guid
label = kw.pop('label', self.label)
kw.pop('width', None)
kw['default_value'] = kw.get('default_value', label)
core.add_text(self.guid, **kw)
class Field(DPGObject):
def __init__(self, parent, **kw):
super().__init__(parent, **kw)
dfv = kw.get('default_value', ' ')
g = Group(self.parent, horizontal=True)
self.__label = Label(g.guid, default_value=self.label)
self.__value = Label(g.guid, default_value=dfv)
@property
def text(self):
return self.__label.value
@text.setter
def text(self, val):
self.__label.value = val
@property
def value(self):
return self.__value.value
@value.setter
def value(self, val):
self.__value.value = val
class ThreadUpdate(Thread):
"""Thread that updates a set of widgets from a Q."""
def __init__(self, queue: Queue, callback=None):
super().__init__()
self.daemon = True
self.__q = queue
self.__callback = callback
"""Where to yell after the Q is dry."""
def run(self):
while self.is_alive:
# Should be a dict with values to replace
widgets: dict = self.__q.get()
if widgets is None:
break
for w, v in widgets.items():
w.value = v
sleep(0.01)
if self.__callback:
self.__callback()
class ThreadProgress(Thread):
"""Threaded progress bar widget."""
def __init__(self, progressBar, queue: Queue, callback=None):
super().__init__()
self.daemon = True
self.__q = queue
self.__progressBar = progressBar
self.__callback = callback
def run(self):
while self.is_alive:
val, overlay = self.__q.get()
if val is None:
break
self.__progressBar.overlay = overlay
self.__progressBar.value = val
sleep(0.01)
if self.__callback:
self.__callback()
class ProgressBar(DPGObject):
def __init__(self, parent, **kw):
super().__init__(parent, **kw)
self.__callback = kw.pop('callback', None)
self.__worker = None
self.__q = Queue()
width, _ = self.parent.rect
kw['width'] = kw.get('width', width)
kw['show'] = kw.get('show', False)
kw.pop('parent', None)
self.__group = Group(self, width=width)
core.add_progress_bar(self.__group, **kw)
@property
def value(self):
return self.value
@value.setter
def value(self, val):
# self.value = val
data = f'{round(val * 100)}%'
if val < 0:
val = 0
if val > 1.:
val = None
self.__q.put((val, data))
def start(self):
self.show = True
self.__worker = ThreadProgress(self, self.__q, callback=lambda: self.__callback(self.__guid))
self.__worker.start()
| StarcoderdataPython |
158826 | VERSION = (0, 4, 2)
def get_version():
return '%s.%s.%s' % VERSION
version = get_version()
| StarcoderdataPython |
1715404 | <filename>utils/xterm.py
ANSI_RESET = "\x1b[0m"
def rgb(r, g, b):
"""Returns a xterm256 color index that represents the specified RGB color.
Each argument should be an integer in the range [0, 5]."""
if r < 0 or r > 5 or g < 0 or g > 5 or b < 0 or b > 5:
raise ValueError("Value out of range")
return 16 + r * 36 + g * 6 + b
def gray(graylevel):
"""Returns a xterm256 color index that represents the specified gray level.
The argument should be an integer in the range [0, 25]."""
if graylevel < 0 or graylevel > 25:
raise ValueError("Value out of range")
if graylevel == 0:
return 0
elif graylevel == 25:
return 231
return 231 + graylevel
def sequence(fore=None, back=None):
if fore is None and back is None:
return ""
codes = [ ]
if fore is not None:
codes.extend((38, 5, fore))
if back is not None:
codes.extend((48, 5, back))
return "\x1b[{}m".format(";".join(str(c) for c in codes))
def wrap(text, fore=None, back=None):
if fore is None and back is None:
return str(text)
return "".join([
sequence(fore, back),
str(text),
ANSI_RESET])
def ignore(text, fore=None, back=None):
return str(text)
def wrap_for_stream(stream):
try:
if stream.isatty():
return wrap
except AttributeError:
pass
return ignore
| StarcoderdataPython |
6611846 | <reponame>kalekundert/autosnapgene
#!/usr/bin/env python3
import pytest
import autosnapgene as snap
from pathlib import Path
def test_getters(parse_and_write):
for dna in parse_and_write('puc19_bsai_abc.dna'):
assert dna.count_traces() == count_seq_blocks(dna) == 3
assert dna.trace_names == [
'puc19_bsai_a', 'puc19_bsai_b', 'puc19_bsai_c']
@pytest.mark.parametrize(
'path, count', [
('puc19_bsai.dna', 0),
('puc19_bsai_a.dna', 1),
('puc19_bsai_ab.dna', 2),
('puc19_bsai_abc.dna', 3),
])
def test_count_traces(examples, path, count):
dna = snap.parse(examples / path)
assert dna.count_traces() == count
@pytest.mark.parametrize(
'path, name, has_trace', [
('puc19_bsai.dna', 'puc19_bsai_a', False),
('puc19_bsai.dna', 'puc19_bsai_b', False),
('puc19_bsai.dna', 'puc19_bsai_c', False),
('puc19_bsai_a.dna', 'puc19_bsai_a', True),
('puc19_bsai_a.dna', 'puc19_bsai_b', False),
('puc19_bsai_a.dna', 'puc19_bsai_c', False),
('puc19_bsai_ab.dna', 'puc19_bsai_a', True),
('puc19_bsai_ab.dna', 'puc19_bsai_b', True),
('puc19_bsai_ab.dna', 'puc19_bsai_c', False),
('puc19_bsai_abc.dna', 'puc19_bsai_a', True),
('puc19_bsai_abc.dna', 'puc19_bsai_b', True),
('puc19_bsai_abc.dna', 'puc19_bsai_c', True),
])
def test_have_trace(examples, path, name, has_trace):
dna = snap.parse(examples / path)
assert dna.has_trace(name) == has_trace
@pytest.mark.parametrize(
'path, names', [
('puc19_bsai.dna', [
]),
('puc19_bsai_a.dna', [
'puc19_bsai_a',
]),
('puc19_bsai_ab.dna', [
'puc19_bsai_a',
'puc19_bsai_b',
]),
('puc19_bsai_abc.dna', [
'puc19_bsai_a',
'puc19_bsai_b',
'puc19_bsai_c',
]),
])
def test_trace_names(examples, path, names):
dna = snap.parse(examples / path)
assert dna.trace_names == names
def test_add_trace(examples):
dna = snap.parse(examples / 'puc19_bsai.dna')
assert dna.count_traces() == count_seq_blocks(dna) == 0
assert dna.trace_names == []
dna.add_trace(examples / 'puc19_bsai_a.ab1')
assert dna.count_traces() == count_seq_blocks(dna) == 1
assert dna.trace_names == ['puc19_bsai_a']
dna.add_trace(examples / 'puc19_bsai_a.ab1')
assert dna.count_traces() == count_seq_blocks(dna) == 1
assert dna.trace_names == ['puc19_bsai_a']
dna.add_trace(examples / 'puc19_bsai_b.ab1')
assert dna.count_traces() == count_seq_blocks(dna) == 2
assert dna.trace_names == ['puc19_bsai_a', 'puc19_bsai_b']
dna.add_trace(examples / 'puc19_bsai_b.ab1')
assert dna.count_traces() == count_seq_blocks(dna) == 2
assert dna.trace_names == ['puc19_bsai_a', 'puc19_bsai_b']
def test_append_trace(examples):
dna = snap.parse(examples / 'puc19_bsai.dna')
assert dna.count_traces() == count_seq_blocks(dna) == 0
assert dna.trace_names == []
dna.append_trace(examples / 'puc19_bsai_a.ab1')
assert dna.count_traces() == count_seq_blocks(dna) == 1
assert dna.trace_names == [
'puc19_bsai_a']
dna.append_trace(examples / 'puc19_bsai_a.ab1')
assert dna.count_traces() == count_seq_blocks(dna) == 2
assert dna.trace_names == [
'puc19_bsai_a', 'puc19_bsai_a']
dna.append_trace(examples / 'puc19_bsai_b.ab1')
assert dna.count_traces() == count_seq_blocks(dna) == 3
assert dna.trace_names == [
'puc19_bsai_a', 'puc19_bsai_a', 'puc19_bsai_b']
dna.append_trace(examples / 'puc19_bsai_b.ab1')
assert dna.count_traces() == count_seq_blocks(dna) == 4
assert dna.trace_names == [
'puc19_bsai_a', 'puc19_bsai_a', 'puc19_bsai_b', 'puc19_bsai_b']
def test_prepend_trace(examples):
dna = snap.parse(examples / 'puc19_bsai.dna')
assert dna.count_traces() == count_seq_blocks(dna) == 0
assert dna.trace_names == []
dna.prepend_trace(examples / 'puc19_bsai_a.ab1')
assert dna.count_traces() == count_seq_blocks(dna) == 1
assert dna.trace_names == [
'puc19_bsai_a']
dna.prepend_trace(examples / 'puc19_bsai_a.ab1')
assert dna.count_traces() == count_seq_blocks(dna) == 2
assert dna.trace_names == [
'puc19_bsai_a', 'puc19_bsai_a']
dna.prepend_trace(examples / 'puc19_bsai_b.ab1')
assert dna.count_traces() == count_seq_blocks(dna) == 3
assert dna.trace_names == [
'puc19_bsai_b', 'puc19_bsai_a', 'puc19_bsai_a']
dna.prepend_trace(examples / 'puc19_bsai_b.ab1')
assert dna.count_traces() == count_seq_blocks(dna) == 4
assert dna.trace_names == [
'puc19_bsai_b', 'puc19_bsai_b', 'puc19_bsai_a', 'puc19_bsai_a']
def test_insert_trace(examples):
dna = snap.parse(examples / 'puc19_bsai_ab.dna')
assert dna.count_traces() == count_seq_blocks(dna) == 2
assert dna.trace_names == [
'puc19_bsai_a', 'puc19_bsai_b']
dna.insert_trace(1, examples / 'puc19_bsai_c.ab1')
assert dna.count_traces() == count_seq_blocks(dna) == 3
assert dna.trace_names == [
'puc19_bsai_a', 'puc19_bsai_c', 'puc19_bsai_b']
def test_remove_trace(examples):
dna = snap.parse(examples / 'puc19_bsai_abc.dna')
assert dna.count_traces() == count_seq_blocks(dna) == 3
assert dna.trace_names == [
'puc19_bsai_a', 'puc19_bsai_b', 'puc19_bsai_c']
dna.remove_trace('puc19_bsai_b')
assert dna.count_traces() == count_seq_blocks(dna) == 2
assert dna.trace_names == [
'puc19_bsai_a', 'puc19_bsai_c']
dna.remove_trace('puc19_bsai_c')
assert dna.count_traces() == count_seq_blocks(dna) == 1
assert dna.trace_names == [
'puc19_bsai_a']
dna.remove_trace(examples / 'puc19_bsai_a.ab1')
assert dna.count_traces() == count_seq_blocks(dna) == 0
assert dna.trace_names == []
with pytest.raises(ValueError):
dna.remove_trace('xxx')
def test_remove_trace_dups(examples):
dna = snap.parse(examples / 'puc19_bsai_ab.dna')
dna.append_trace(examples / 'puc19_bsai_a.ab1')
assert dna.count_traces() == count_seq_blocks(dna) == 3
assert dna.trace_names == [
'puc19_bsai_a', 'puc19_bsai_b', 'puc19_bsai_a']
dna.remove_trace('puc19_bsai_a')
assert dna.count_traces() == count_seq_blocks(dna) == 1
assert dna.trace_names == [
'puc19_bsai_b']
def test_replace_target(examples):
dna = snap.parse(examples / 'puc19_bsai_ab.dna')
assert dna.count_traces() == count_seq_blocks(dna) == 2
assert dna.trace_names == [
'puc19_bsai_a', 'puc19_bsai_b']
dna.replace_trace(examples / 'puc19_bsai_b', examples / 'puc19_bsai_c.ab1')
assert dna.count_traces() == count_seq_blocks(dna) == 2
assert dna.trace_names == [
'puc19_bsai_a', 'puc19_bsai_c']
def test_replace_target_dups(examples):
dna = snap.parse(examples / 'puc19_bsai_ab.dna')
dna.append_trace(examples / 'puc19_bsai_a.ab1')
assert dna.count_traces() == count_seq_blocks(dna) == 3
assert dna.trace_names == [
'puc19_bsai_a', 'puc19_bsai_b', 'puc19_bsai_a']
dna.replace_trace(examples / 'puc19_bsai_a', examples / 'puc19_bsai_c.ab1')
assert dna.count_traces() == count_seq_blocks(dna) == 2
assert dna.trace_names == [
'puc19_bsai_c', 'puc19_bsai_b']
def test_sort_traces(examples):
dna = snap.parse(examples / 'puc19_bsai_abc.dna')
assert dna.count_traces() == count_seq_blocks(dna) == 3
assert dna.trace_names == [
'puc19_bsai_a', 'puc19_bsai_b', 'puc19_bsai_c']
dna.sort_traces(reverse=True)
assert dna.count_traces() == count_seq_blocks(dna) == 3
assert dna.trace_names == [
'puc19_bsai_c', 'puc19_bsai_b', 'puc19_bsai_a']
def test_clear_traces(examples):
dna = snap.parse(examples / 'puc19_bsai_abc.dna')
assert dna.count_traces() == count_seq_blocks(dna) == 3
assert dna.trace_names == [
'puc19_bsai_a', 'puc19_bsai_b', 'puc19_bsai_c']
dna.clear_traces()
assert dna.count_traces() == count_seq_blocks(dna) == 0
assert dna.trace_names == []
# Not an error to clar an empty sequence.
dna.clear_traces()
assert dna.count_traces() == count_seq_blocks(dna) == 0
assert dna.trace_names == []
def test_extract_traces(examples, tmp_path):
dna = snap.parse(examples / 'puc19_bsai_abc.dna')
dna.extract_traces(tmp_path)
assert (tmp_path / 'puc19_bsai_a.ztr').exists()
assert (tmp_path / 'puc19_bsai_b.ztr').exists()
assert (tmp_path / 'puc19_bsai_c.ztr').exists()
def count_seq_blocks(dna):
return len(dna.find_blocks(snap.blocks.AlignedSequenceBlock))
| StarcoderdataPython |
4835297 |
# Copyright (c) 2020 Institution of Parallel and Distributed System, Shanghai Jiao Tong University
# ServerlessBench is licensed under the Mulan PSL v1.
# You can use this software according to the terms and conditions of the Mulan PSL v1.
# You may obtain a copy of Mulan PSL v1 at:
# http://license.coscl.org.cn/MulanPSL
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR
# PURPOSE.
# See the Mulan PSL v1 for more details.
import os
import threading
import time
import subprocess
from subprocess import PIPE
import argparse
import json
import sys
# this script should be executed in parent dir of scripts
try:
IMAGE_PROCESS_HOME=os.environ['TESTCASE4_HOME'] + "/image-process"
except Exception as e:
print("Error: TESTCASE4_HOME environment variable not set. Exiting...")
exit(-1)
def client(client_num, i, single_results, single_logs, single_errors):
# run invoke command
command = f"{IMAGE_PROCESS_HOME}/scripts/action_invoke.sh"
result = subprocess.run(command, stdout=PIPE, stderr=PIPE)
if result.returncode != 0:
print(f"Client {client_num} iteration {i} failed to invoke function with returncode {result.returncode}\n")
single_errors[i] = result.stderr.decode("utf-8").strip() + result.stderr.decode("utf-8").strip()
return
activation_id = result.stdout.decode("utf-8").strip()
# activation result
command = f"{IMAGE_PROCESS_HOME}/scripts/get_activation.sh"
result = subprocess.run([command, activation_id], stdout=PIPE, stderr=PIPE)
if result.returncode != 0:
print(f"Client {client_num} iteration {i} failed to fetch activation record for {activation_id} with returncode {result.returncode}\n")
single_errors[i] = result.stderr.decode("utf-8").strip() + result.stderr.decode("utf-8").strip()
return
result = result.stdout.decode("utf-8").strip()
# Parse and record results
parsed_result = parse_result(result)
if parsed_result:
single_results[i] = parsed_result
if len(parsed_result) == 7:
single_logs[i] = result
else:
print(f"Client {client_num} had error for invocation {i} (activation_id={activation_id})")
single_errors[i] = result
def looping_client(client_num, results, logs, errors, num_iters, delay):
print(f"client {client_num} start")
threads = []
single_results = []
single_logs = []
single_errors = []
for invoke_num in range(num_iters):
t = threading.Thread(target=client, args=(client_num, invoke_num, single_results, single_logs, single_errors))
threads.append(t)
single_results.append([])
single_logs.append("")
single_errors.append("")
for invoke_num in range(num_iters):
print(f"client {client_num} started {invoke_num}")
threads[invoke_num].start()
time.sleep(delay)
for invoke_num in range(num_iters):
threads[invoke_num].join()
results[client_num] = single_results
logs[client_num] = single_logs
errors[client_num] = single_errors
print(f"client {client_num} finished")
def main():
args = parse_args()
print(f"About to run {args.num_clients} clients with {args.num_iters} iterations each and {args.delay} delay")
# Second: invoke the actions
# Initialize the results and the clients
threads = []
results = []
logs = []
errors = []
for i in range(args.num_clients):
results.append([])
logs.append([])
errors.append([])
# Create the clients
for i in range(args.num_clients):
t = threading.Thread(target=looping_client,args=(i, results, logs, errors, args.num_iters, args.delay))
threads.append(t)
# start the clients
for i in range(args.num_clients):
threads[i].start()
# wait for the clients to complete
for i in range(args.num_clients):
threads[i].join()
# write log to logfile
with open(args.logfile, 'w', encoding='utf-8') as f:
for l in logs:
for log in l:
if len(log) > 0:
f.write(log)
f.write('\n\n')
# write to error file
with open(args.errfile, 'w', encoding='utf-8') as f:
for e in errors:
for err in e:
if len(err) > 0:
f.write(err)
f.write('\n\n')
# write to results file
with open(args.resfile, 'w', encoding='utf-8') as f:
f.write("start, end, dbtime1, dbtime2, dbtime3, dbtime4, dbtime5\n")
for rl in results:
for r in rl:
if len(r) > 0:
string_r = [str(i) for i in r]
f.write(', '.join(string_r))
f.write('\n')
write_summary(args.sumfile, results, args.num_clients, args.num_iters, args.delay)
def write_summary(sumfile, results, num_clients, num_iters, delay):
START=0
END=1
DB_TIME_START=2
latencies = []
latencies_no_db = []
err_count = 0.0
min_start = 0x7fffffffffffffff
max_end = 0
for rs in results:
for r in rs:
# count errors
if len(r) < 7:
err_count += 1
if len(r) >= 2:
min_start = min(r[START], min_start)
max_end = max(r[END], max_end)
# Calculate latency for each result
latency = r[END] - r[START]
latencies.append(latency)
# Calculate latency minus db but only for successful invocations
if len(r) == 7:
latency_no_db = latency - sum(r[DB_TIME_START:])
latencies_no_db.append(latency_no_db)
# Calculate number of requests and duration
num_requests = float(num_clients * num_iters)
num_successful_requests = float(len(latencies_no_db))
duration = float(max_end - min_start)
# theoretical_duration = float(num_iters * delay)
# sort the latencies
latencies.sort()
latencies_no_db.sort()
with open(sumfile, 'w') as fh:
format_summary(fh, "Including DB Time", num_requests, latencies, num_successful_requests, duration, sumfile)
format_summary(fh, "Excluding DB Time", num_requests, latencies_no_db, num_successful_requests, duration, sumfile)
print(f"Lost {num_requests - len(latencies)} requests....\n")
def format_summary(fh, title, num_requests, latencies, num_successful_requests, duration, sumfile):
# calculate the average latency
total = 0
for latency in latencies:
total += latency
num_results = len(latencies)
fh.write("\n")
fh.write(f"------------------ {title} ---------------------\n")
fh.write(f"{num_successful_requests} / {num_requests} requests, duration is {duration}\n")
fh.write("latency (ms):\navg\t50%\t75%\t90%\t95%\t99%\n")
if num_requests > 0:
average_latency = float(total) / num_results
_50_pc_latency = latencies[int(num_results * 0.5) - 1]
_75_pc_latency = latencies[int(num_results * 0.75) - 1]
_90_pc_latency = latencies[int(num_results * 0.9) - 1]
_95_pc_latency = latencies[int(num_results * 0.95) - 1]
_99_pc_latency = latencies[int(num_results * 0.99) - 1]
fh.write("%.2f\t%d\t%d\t%d\t%d\t%d\n" % (average_latency, _50_pc_latency, _75_pc_latency, _90_pc_latency, _95_pc_latency, _99_pc_latency))
fh.write("throughput (n/s):\n%.2f\n" % (num_requests / (duration / 1000)))
fh.write("goodput (n/s):\n%.2f\n" % (num_successful_requests / (duration / 1000)))
def parse_result(result):
json_start = result.find('{')
json_str = result[json_start:]
try:
json_result = json.loads(json_str)
# Grab values for return
start = json_result['start']
end = json_result['end']
if json_result['statusCode'] != 0:
return (start, end)
else:
response = json_result['response']
result = response['result']
comm_times = json_result['response']['result']['commTimes']
# Check results
if len(comm_times) == 5:
return [start, end] + comm_times
else:
return [start, end]
except:
print(f"Could not parse results json: {json_str}")
return None
def parse_args():
# Create arg parser
parser = argparse.ArgumentParser(description='Run image-process sequences.')
parser.add_argument(
'num_clients',
type=int,
help='Number of concurrent clients'
)
parser.add_argument(
'num_iters',
type=int,
help='Number of invocations per client'
)
parser.add_argument(
'delay',
type=float,
help='Delay between invocations on each client. Measured in seconds.'
)
parser.add_argument(
'-s',
'--sumfile',
default='eval-result.log',
help='Path to summary output file'
)
parser.add_argument(
'-e',
'--errfile',
default='error.dat',
help='Path to error results output file'
)
parser.add_argument(
'-r',
'--resfile',
default="results.csv",
help='Path to results output file'
)
parser.add_argument(
'-l',
'--logfile',
default='log.dat',
help='Path to log output file'
)
args = parser.parse_args()
# Check bounds
if args.num_clients < 1:
print("Error: num_clients must be >= 1")
parser.print_help()
exit(-1)
if args.num_iters < 1:
print("Error: num_iters must be >= 1")
parser.print_help()
exit(-1)
if args.delay < 0.0:
print("Error: delay must be >= 0")
parser.print_help()
exit(-1)
return args
if __name__ == "__main__":
main()
| StarcoderdataPython |
371276 | import types
class Logger:
def __init__(self, filepath=None, is_stdout=True):
self._filepath = filepath
self._logfile = None
self._callbacks = {}
self._is_stdout = is_stdout
def start(self, text=''):
if self._filepath != None:
self._logfile = open(self._filepath, 'w')
self._logfile.write(text + '\n')
else:
self._logfile = None
print text
def add_callback(self, function, key='message'):
self._callbacks[key] = function
def progress(self, percent):
pass
def w(self, data, key='message', **kwargs):
# print 'w:',data,self._callbacks
if self._logfile != None:
self._logfile.write(str(data) + '\n')
elif self._callbacks.has_key(key):
kwargs['key'] = key
self._callbacks[key](data, **kwargs)
# elif type(data)==types.StringType:
# print data
if self._is_stdout:
print str(data)
def stop(self, text=''):
if self._logfile != None:
self._logfile.write(text + '\n')
self._logfile.close()
self._logfile = None
else:
print text
| StarcoderdataPython |
6523472 | <reponame>LieonShelly/iOS-RelateServer<gh_stars>0
from App.Log import log_api
from flask import send_from_directory, current_app
@log_api.route('/get', methods=['GET'])
def get_log():
file_response = send_from_directory(directory=current_app.config['BASE_DIR'], filename="output.log", as_attachment=True)
return file_response
| StarcoderdataPython |
1831494 | """These calculations are for the Critical Natural Capital paper."""
#cd C:\Users\Becky\Documents\raster_calculations
#conda activate py38_gdal312
import glob
import sys
import os
import logging
import multiprocessing
import datetime
import subprocess
import raster_calculations_core
from osgeo import gdal
from osgeo import osr
import taskgraph
#import pygeoprocessing
import ecoshard.geoprocessing as pygeoprocessing
gdal.SetCacheMax(2**30)
WORKSPACE_DIR = 'CNC_workspace'
NCPUS = multiprocessing.cpu_count()
try:
os.makedirs(WORKSPACE_DIR)
except OSError:
pass
logging.basicConfig(
level=logging.DEBUG,
format=(
'%(asctime)s (%(relativeCreated)d) %(levelname)s %(name)s'
' [%(funcName)s:%(lineno)d] %(message)s'),
stream=sys.stdout)
LOGGER = logging.getLogger(__name__)
def main():
"""Write your expression here."""
# CNC calculations
#to find nodata value:
#gdalinfo [raster path]
calculation_list = [
{
'expression': '(raster1>0)*(raster2>0)',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\A_90_md5_79f5e0d5d5029d90e8f10d5932da93ff.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\S_90_md5_5d18924c69519ec76993f4d58a7b2687.tif",
},
'target_nodata': -1,
'target_raster_path': "coastal_overlapping_A90.tif",
},
{
'expression': '(raster1>0)*(raster2>0)',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\A_90_md5_79f5e0d5d5029d90e8f10d5932da93ff.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\T_90_md5_6a0142de25bb3b5a107f7abae694c5b0.tif",
},
'target_nodata': -1,
'target_raster_path': "timber_overlapping_A90.tif",
},
{
'expression': '(raster1>0)*(raster2>0)',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\A_90_md5_79f5e0d5d5029d90e8f10d5932da93ff.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\U_90_md5_258160b638e742e91b84979e6b2c748f.tif",
},
'target_nodata': -1,
'target_raster_path': "flood_overlapping_A90.tif",
},
{
'expression': '(raster1>0)*(raster2>0)',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\A_90_md5_79f5e0d5d5029d90e8f10d5932da93ff.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\V_90_md5_eeb6b515ad2f25a3ad76099e07e030bc.tif",
},
'target_nodata': -1,
'target_raster_path': "fuelwood_overlapping_A90.tif",
},
{
'expression': '(raster1>0)*(raster2>0)',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\A_90_md5_79f5e0d5d5029d90e8f10d5932da93ff.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\W_90_md5_de1e7dc33c7227cdbcda5b7e6f9919bb.tif",
},
'target_nodata': -1,
'target_raster_path': "fwfish_overlapping_A90.tif",
},
{
'expression': '(raster1>0)*(raster2>0)',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\A_90_md5_79f5e0d5d5029d90e8f10d5932da93ff.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\X_90_md5_0cc1f3aeb8e1a566a6b220bf9986b828.tif",
},
'target_nodata': -1,
'target_raster_path': "grazing_overlapping_A90.tif",
},
{
'expression': '(raster1>0)*(raster2>0)',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\A_90_md5_79f5e0d5d5029d90e8f10d5932da93ff.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\Y_90_md5_f8393b73f3548658f610ac47acea72e7.tif",
},
'target_nodata': -1,
'target_raster_path': "marinefish_overlapping_A90.tif",
},
{
'expression': '(raster1>0)*(raster2>0)',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\A_90_md5_79f5e0d5d5029d90e8f10d5932da93ff.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\Z_90_md5_1b9d0deb1e16f6975dc3402aacf4846e.tif",
},
'target_nodata': -1,
'target_raster_path': "natureaccess_overlapping_A90.tif",
},
{
'expression': '(raster1>0)*(raster2>0)',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\A_90_md5_79f5e0d5d5029d90e8f10d5932da93ff.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\A1_90_md5_1fb33de8a6ced1d1f54dcc7debed3c6c.tif",
},
'target_nodata': -1,
'target_raster_path': "nitrogen_overlapping_A90.tif",
},
{
'expression': '(raster1>0)*(raster2>0)',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\A_90_md5_79f5e0d5d5029d90e8f10d5932da93ff.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\B1_90_md5_14484122eba5a970559c57a48621d3fd.tif",
},
'target_nodata': -1,
'target_raster_path': "pollination_overlapping_A90.tif",
},
{
'expression': '(raster1>0)*(raster2>0)',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\A_90_md5_79f5e0d5d5029d90e8f10d5932da93ff.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\C1_90_md5_3246d7fc06267a18f59ca9a8decf64fe.tif",
},
'target_nodata': -1,
'target_raster_path': "reeftourism_overlapping_A90.tif",
},
{
'expression': '(raster1>0)*(raster2>0)',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\A_90_md5_79f5e0d5d5029d90e8f10d5932da93ff.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\D1_90_md5_ee81ad59355f2309c2ecb882e788454a.tif",
},
'target_nodata': -1,
'target_raster_path': "sediment_overlapping_A90.tif",
},
{
'expression': '(raster1>0)*(raster2>0)',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\A_90_md5_79f5e0d5d5029d90e8f10d5932da93ff.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\S_90_md5_5d18924c69519ec76993f4d58a7b2687.tif",
},
'target_nodata': -1,
'target_raster_path': "coastal_overlapping_A90.tif",
},
{
'expression': '(raster1>0)*(raster2>0)',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\A_90_md5_79f5e0d5d5029d90e8f10d5932da93ff.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\H1_90_md5_7973783ac2786f9d521a4b8b4cf5d68d.tif",
},
'target_nodata': -1,
'target_raster_path': "carbon_overlapping_A90.tif",
},
{
'expression': '(raster1>0)*(raster2>0)',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\A_90_md5_79f5e0d5d5029d90e8f10d5932da93ff.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\I1_90_md5_54ad2f227abc1cf66ed23cc6d3b72d47.tif",
},
'target_nodata': -1,
'target_raster_path': "moisture_overlapping_A90.tif",
},
]
for calculation in calculation_list:
raster_calculations_core.evaluate_calculation(
calculation, TASK_GRAPH, WORKSPACE_DIR)
TASK_GRAPH.join()
TASK_GRAPH.close()
return
calculation_list = [
# {
# 'expression': 'raster1*(raster2<2)*(raster2)',
# 'symbol_to_path_map': {
# 'raster1': r"C:\Users\Becky\Documents\raster_calculations\lspop2017_compressed_md5_53e326f463a2c8a8fa92d8dea6f37df1.tif",
# 'raster2': r"C:\Users\Becky\Documents\cnc_project\overlap\beneficiaries\downstream_mask_A_90_WARPED_near_md5_0ed997ee57533433c6372e070592e880_compressed.tif",
# },
# 'target_nodata': 0,
# 'target_pixel_size': (0.008333333333333333218,-0.008333333333333333218),
# 'resample_method': 'average',
# 'target_raster_path': "lspop_on_downstream_of_10sA90.tif",
# },
# {
# 'expression': 'raster1*(raster2<2)*(raster2)',
# 'symbol_to_path_map': {
# 'raster1': r"C:\Users\Becky\Documents\raster_calculations\lspop2017_compressed_md5_53e326f463a2c8a8fa92d8dea6f37df1.tif",
# 'raster2': r"C:\Users\Becky\Documents\cnc_project\overlap\beneficiaries\downstream_mask_C_90_WARPED_near_md5_6a33ab63b7ac8fb9a679e192741bcac5_compressed.tif",
# },
# 'target_nodata': 0,
# 'target_pixel_size': (0.008333333333333333218,-0.008333333333333333218),
# 'resample_method': 'average',
# 'target_raster_path': "lspop_on_downstream_of_10sC90.tif",
# },
# {
# 'expression': 'raster1*(raster2<2)*(raster2)',
# 'symbol_to_path_map': {
# 'raster1': r"C:\Users\Becky\Documents\raster_calculations\lspop2017_compressed_md5_53e326f463a2c8a8fa92d8dea6f37df1.tif",
# 'raster2': r"C:\Users\Becky\Documents\cnc_project\overlap\beneficiaries\downstream_mask_masked_all_nathab_wstreams_esa2015_md5_c291ff6ef7db1d5ff4d95a82e0f035de.tif",
# },
# 'target_nodata': 0,
# 'target_pixel_size': (0.008333333333333333218,-0.008333333333333333218),
# 'resample_method': 'average',
# 'target_raster_path': "lspop_on_downstream_of_nathab.tif",
# },
# {
# 'expression': 'raster1*(raster2<2)*(raster2)',
# 'symbol_to_path_map': {
# 'raster1': r"C:\Users\Becky\Documents\cnc_project\original_rasters\downstream_beneficiaries\floodplains\floodplains_masked_pop_30s_md5_c027686bb9a9a36bdababbe8af35d696.tif",
# 'raster2': r"C:\Users\Becky\Documents\cnc_project\overlap\beneficiaries\downstream_mask_A_90_WARPED_near_md5_0ed997ee57533433c6372e070592e880_compressed.tif",
# },
# 'target_nodata': 0,
# 'target_pixel_size': (0.008333333333333333218,-0.008333333333333333218),
# 'resample_method': 'average',
# 'target_raster_path': "lspopfloodplains_on_downstream_of_10sA90.tif",
# },
{
'expression': 'raster1*(raster2<2)*(raster2)',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\original_rasters\downstream_beneficiaries\floodplains\floodplains_masked_pop_30s_md5_c027686bb9a9a36bdababbe8af35d696.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\overlap\beneficiaries\downstream_mask_masked_all_nathab_wstreams_esa2015_md5_c291ff6ef7db1d5ff4d95a82e0f035de.tif",
},
'target_nodata': 0,
'target_pixel_size': (0.008333333333333333218,-0.008333333333333333218),
'resample_method': 'average',
'target_raster_path': "lspopfloodplains_on_downstream_of_nathab.tif",
},
# {
# 'expression': 'raster1*(raster2>0)',
# 'symbol_to_path_map': {
# 'raster1': r"C:\Users\Becky\Documents\cnc_project\original_rasters\cnc_cv\normalized_pop_on_hab\norm_total_pop_hab_mask_coverage_md5_8f31e5fc65bf07488b4945b35f493d3f.tif",
# 'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\wgs\A_90_WARPED_near_md5_0ed997ee57533433c6372e070592e880.tif",
# },
# 'target_nodata': 0,
# 'target_pixel_size': (0.002777777777999999864,-0.002777777777999999864),
# 'resample_method': 'average',
# 'target_raster_path': "coastal_pop-on-hab_A90.tif",
# },
]
for calculation in calculation_list:
raster_calculations_core.evaluate_calculation(
calculation, TASK_GRAPH, WORKSPACE_DIR)
TASK_GRAPH.join()
TASK_GRAPH.close()
return
calculation_list = [
{
'expression': 'raster1*(raster2<2)*(raster2)',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\raster_calculations\lspop2017_compressed_md5_53e326f463a2c8a8fa92d8dea6f37df1.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\overlap\beneficiaries\global_mask_access_A_90_60.0m_WARPED_wgs_near_md5_b8de1aaeec4a800b7626944dd6df52ba.tif",
},
'target_nodata': 0,
'target_pixel_size': (0.008333333333333333218,-0.008333333333333333218),
'target_raster_path': "lspop_within_60min_of_10sA90.tif",
},
{
'expression': 'raster1*(raster2<2)*(raster2)',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\raster_calculations\lspop2017_compressed_md5_53e326f463a2c8a8fa92d8dea6f37df1.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\overlap\beneficiaries\global_mask_access_C_90_60.0m_WARPED_wgs_near_md5_71f1dc947f32915ab153873c64fa3827.tif",
},
'target_nodata': 0,
'target_pixel_size': (0.008333333333333333218,-0.008333333333333333218),
'target_raster_path': "lspop_within_60min_of_10sC90.tif",
},
{
'expression': 'raster1*(raster2<2)*(raster2)',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\raster_calculations\lspop2017_compressed_md5_53e326f463a2c8a8fa92d8dea6f37df1.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\overlap\beneficiaries\global_mask_access_masked_all_nathab_wstreams_esa2015_nodata_60.0m_WARPED_wgs_near_md5_21e1df6d6d886b6f388948d7fc660e77.tif",
},
'target_nodata': 0,
'target_pixel_size': (0.008333333333333333218,-0.008333333333333333218),
'target_raster_path': "lspop_within_60min_of_nathab.tif",
},
]
for calculation in calculation_list:
raster_calculations_core.evaluate_calculation(
calculation, TASK_GRAPH, WORKSPACE_DIR)
TASK_GRAPH.join()
TASK_GRAPH.close()
return
#did average_rasters.py on downstream and within 60 minutes and then did a nodata_replace with the coastal pop:
# python nodata_replace.py "C:\Users\Becky\Documents\cnc_project\overlap\beneficiaries\lspop_downstream_within_60min_A90_average_raster.tif" "C:\Users\Becky\Documents\raster_calculations\align_to_mask_workspace\coastalpop_on_A90_WARPED_near_md5_131687f40a4985e81e23331f6d479105.tif" "lspop_downstream_within60min_coastal.tif"
# BUT THIS IS ACTUALLY NO GOOD BECAUSE IT'S THE COASTAL POP BENEFITTING FROM A90 MAPPED TO A90, NOT THE PEOPLE WITHIN PROTECTIVE DISTANCE OF A90 HABITAT!!!
calculation_list = [
{
'expression': '(raster1>11)',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\overlap\drop1\sum_drop1_compressed_md5_c67b069d9e812dad11f67daf6cd04435.tif",
},
'target_nodata': -1,
'target_raster_path': "pixels_always_selected.tif",
},
{
'expression': '(raster1>=11)',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\overlap\drop1\sum_drop1_compressed_md5_c67b069d9e812dad11f67daf6cd04435.tif",
},
'target_nodata': -1,
'target_raster_path': "pixels_90pct_selected.tif",
},
{
'expression': '(raster1>=6)',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\overlap\drop1\sum_drop1_compressed_md5_c67b069d9e812dad11f67daf6cd04435.tif",
},
'target_nodata': -1,
'target_raster_path': "pixels_50pct_selected.tif",
},
]
for calculation in calculation_list:
raster_calculations_core.evaluate_calculation(
calculation, TASK_GRAPH, WORKSPACE_DIR)
TASK_GRAPH.join()
TASK_GRAPH.close()
return
#Fig 1:
#First - Run cna_masks.py with the following inputs using (near) for reproject to WGS & mask to land and eez
#MASK_ECOSHARD_URL = (
#'https://storage.googleapis.com/critical-natural-capital-ecoshards/habmasks/landmask_10s_md5_748981cbf6ebf22643a3a3e655ec50ce_compressed_reduce8x.tif')
#'https://storage.googleapis.com/critical-natural-capital-ecoshards/habmasks/EEZ_mask_0027_compressed_md5_0f25e6a690fef616d34c5675b57e76f8_reduce8x.tif')
# RASTER_LIST = [
#('solution_A_all_targets_2km_compressed_md5_46647c1d514427417a588674a98fd93b.tif', True, False),
#('solution_B_all_targets_2km_compressed_md5_46640e0340231bc3f7a3d9c286985d3f.tif', True, False),
#Then do: python nodata_replace.py "C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\wgs\solution_A_all_targets_2km_compressed_WARPED_near_MASKED_land_md5_d95883e02b205e300b232ef156bcc45b.tif" "C:\Users\Becky\Documents\cnc_project\supporting_layers\natural_assets_wstreams_.0027_to_.022_0s_md5_48c16399d89fe8f9411c4e905873b40f.tif" solution_A_all_targets_2km_land_wgs.02_fill.tif
#python nodata_replace.py "C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\wgs\solution_A_all_targets_2km_compressed_WARPED_near_MASKED_eez_md5_227e3df7cb09bfb6c7f183f8dc721157.tif" "C:\Users\Becky\Documents\cnc_project\supporting_layers\EEZ_mask_0027_compressed_md5_0f25e6a690fef616d34c5675b57e76f8_reduce8x.tif" solution_A_all_targets_2km_eez_wgs.02_fill.tif
#python nodata_replace.py "C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\wgs\solution_B_all_targets_2km_compressed_WARPED_near_MASKED_land_md5_9e08e0b58df950a4e9772c0a8e36e867.tif" "C:\Users\Becky\Documents\cnc_project\supporting_layers\natural_assets_wstreams_.0027_to_.022_0s_md5_48c16399d89fe8f9411c4e905873b40f.tif" solution_B_all_targets_2km_land_wgs.02_fill.tif
#python nodata_replace.py "C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\wgs\solution_B_all_targets_2km_compressed_WARPED_near_MASKED_eez_md5_ef870a38c66a26c1b718a8ffde07c4fa.tif" "C:\Users\Becky\Documents\cnc_project\supporting_layers\EEZ_mask_0027_compressed_md5_0f25e6a690fef616d34c5675b57e76f8_reduce8x.tif" solution_B_all_targets_2km_eez_wgs.02_fill.tif
#python nodata_replace.py "C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\wgs\solution_C_all_targets_2km_compressed_WARPED_near_md5_c2733d7dc996e039f2ffdcf4a1ce412b.tif" "C:\Users\Becky\Documents\cnc_project\supporting_layers\natural_assets_wstreams_.0027_to_.022_0s_md5_48c16399d89fe8f9411c4e905873b40f.tif" solution_C_all_targets_2km_land_wgs.02_fill.tif
#Overlap for Fig 2
single_expression = {
'expression': '(raster1>0)*(raster2<1)*(raster3<1) + 2*(raster1<1)*(raster2>0)*(raster3<1) + 3*(raster3>0)',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\A_90_md5_79f5e0d5d5029d90e8f10d5932da93ff.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\C_90_md5_bdf604015a7b1c7c78845ad716d568ef.tif",
'raster3': 'A90_C90_overlap.tif',
},
'target_nodata': -1,
'target_raster_path': "ncp1_climate2_overlap3.tif",
}
raster_calculations_core.evaluate_calculation(
single_expression, TASK_GRAPH, WORKSPACE_DIR)
TASK_GRAPH.join()
TASK_GRAPH.close()
return
calculation_list = [
{
'expression': '(raster1>0)*(raster2<1)',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\A_90_md5_79f5e0d5d5029d90e8f10d5932da93ff.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\C_90_md5_bdf604015a7b1c7c78845ad716d568ef.tif",
},
'target_nodata': -1,
'target_raster_path': "A90_nonoverlapping_C90.tif",
},
{
'expression': '(raster1<1)*(raster2>0)',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\A_90_md5_79f5e0d5d5029d90e8f10d5932da93ff.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\C_90_md5_bdf604015a7b1c7c78845ad716d568ef.tif",
},
'target_nodata': -1,
'target_raster_path': "C90_nonoverlapping_A90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\A_90_md5_79f5e0d5d5029d90e8f10d5932da93ff.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\C_90_md5_bdf604015a7b1c7c78845ad716d568ef.tif",
},
'target_nodata': -1,
'target_raster_path': "A90_C90_overlap.tif",
},
{
'expression': '(raster1>0)*(raster2<1)',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\B_90_md5_b08de6ccc0fc3e122450c1ccfcb8b60d.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\D_90_md5_893abc862f38d66e222a99fa1808dd34.tif",
},
'target_nodata': -1,
'target_raster_path': "B90_nonoverlapping_D90.tif",
},
{
'expression': '(raster1>0)*(raster2<1)',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\C_90_md5_bdf604015a7b1c7c78845ad716d568ef.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\D_90_md5_893abc862f38d66e222a99fa1808dd34.tif",
},
'target_nodata': -1,
'target_raster_path': "C90_nonoverlapping_D90.tif",
},
]
for calculation in calculation_list:
raster_calculations_core.evaluate_calculation(
calculation, TASK_GRAPH, WORKSPACE_DIR)
TASK_GRAPH.join()
TASK_GRAPH.close()
return
#Overlap analyses to make correlation table
#coastal - S_90_md5_5d18924c69519ec76993f4d58a7b2687.tif
#timber - T_90_md5_6a0142de25bb3b5a107f7abae694c5b0.tif
#flood - U_90_md5_258160b638e742e91b84979e6b2c748f.tif
#fuelwood - V_90_md5_eeb6b515ad2f25a3ad76099e07e030bc.tif
#fwfish - W_90_md5_de1e7dc33c7227cdbcda5b7e6f9919bb.tif
#grazing - X_90_md5_0cc1f3aeb8e1a566a6b220bf9986b828.tif
#marinefish - Y_90_md5_f8393b73f3548658f610ac47acea72e7.tif
#natureacces - Z_90_md5_1b9d0deb1e16f6975dc3402aacf4846e.tif
#nitrogen - A1_90_md5_1fb33de8a6ced1d1f54dcc7debed3c6c.tif
#pollination - B1_90_md5_14484122eba5a970559c57a48621d3fd.tif
#reeftourism - C1_90_md5_3246d7fc06267a18f59ca9a8decf64fe.tif
#sediment - D1_90_md5_ee81ad59355f2309c2ecb882e788454a.tif
#carbon - H1_90_md5_7973783ac2786f9d521a4b8b4cf5d68d
#moisture - I1_90_md5_54ad2f227abc1cf66ed23cc6d3b72d47
#SINGLESERVICE_NONOVERLAPS
calculation_list = [
{
'expression': '(raster1<1)*(raster2>0)',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\A_90_md5_79f5e0d5d5029d90e8f10d5932da93ff.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\S_90_md5_5d18924c69519ec76993f4d58a7b2687.tif",
},
'target_nodata': -1,
'target_raster_path': "coastal_nonoverlapping_A90.tif",
},
{
'expression': '(raster1<1)*(raster2>0)',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\A_90_md5_79f5e0d5d5029d90e8f10d5932da93ff.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\T_90_md5_6a0142de25bb3b5a107f7abae694c5b0.tif",
},
'target_nodata': -1,
'target_raster_path': "timber_nonoverlapping_A90.tif",
},
{
'expression': '(raster1<1)*(raster2>0)',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\A_90_md5_79f5e0d5d5029d90e8f10d5932da93ff.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\U_90_md5_258160b638e742e91b84979e6b2c748f.tif",
},
'target_nodata': -1,
'target_raster_path': "flood_nonoverlapping_A90.tif",
},
{
'expression': '(raster1<1)*(raster2>0)',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\A_90_md5_79f5e0d5d5029d90e8f10d5932da93ff.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\V_90_md5_eeb6b515ad2f25a3ad76099e07e030bc.tif",
},
'target_nodata': -1,
'target_raster_path': "fuelwood_nonoverlapping_A90.tif",
},
{
'expression': '(raster1<1)*(raster2>0)',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\A_90_md5_79f5e0d5d5029d90e8f10d5932da93ff.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\W_90_md5_de1e7dc33c7227cdbcda5b7e6f9919bb.tif",
},
'target_nodata': -1,
'target_raster_path': "fwfish_nonoverlapping_A90.tif",
},
{
'expression': '(raster1<1)*(raster2>0)',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\A_90_md5_79f5e0d5d5029d90e8f10d5932da93ff.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\X_90_md5_0cc1f3aeb8e1a566a6b220bf9986b828.tif",
},
'target_nodata': -1,
'target_raster_path': "grazing_nonoverlapping_A90.tif",
},
{
'expression': '(raster1<1)*(raster2>0)',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\A_90_md5_79f5e0d5d5029d90e8f10d5932da93ff.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\Y_90_md5_f8393b73f3548658f610ac47acea72e7.tif",
},
'target_nodata': -1,
'target_raster_path': "marinefish_nonoverlapping_A90.tif",
},
{
'expression': '(raster1<1)*(raster2>0)',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\A_90_md5_79f5e0d5d5029d90e8f10d5932da93ff.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\Z_90_md5_1b9d0deb1e16f6975dc3402aacf4846e.tif",
},
'target_nodata': -1,
'target_raster_path': "natureaccess_nonoverlapping_A90.tif",
},
{
'expression': '(raster1<1)*(raster2>0)',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\A_90_md5_79f5e0d5d5029d90e8f10d5932da93ff.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\A1_90_md5_1fb33de8a6ced1d1f54dcc7debed3c6c.tif",
},
'target_nodata': -1,
'target_raster_path': "nitrogen_nonoverlapping_A90.tif",
},
{
'expression': '(raster1<1)*(raster2>0)',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\A_90_md5_79f5e0d5d5029d90e8f10d5932da93ff.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\B1_90_md5_14484122eba5a970559c57a48621d3fd.tif",
},
'target_nodata': -1,
'target_raster_path': "pollination_nonoverlapping_A90.tif",
},
{
'expression': '(raster1<1)*(raster2>0)',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\A_90_md5_79f5e0d5d5029d90e8f10d5932da93ff.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\C1_90_md5_3246d7fc06267a18f59ca9a8decf64fe.tif",
},
'target_nodata': -1,
'target_raster_path': "reeftourism_nonoverlapping_A90.tif",
},
{
'expression': '(raster1<1)*(raster2>0)',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\A_90_md5_79f5e0d5d5029d90e8f10d5932da93ff.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\D1_90_md5_ee81ad59355f2309c2ecb882e788454a.tif",
},
'target_nodata': -1,
'target_raster_path': "sediment_nonoverlapping_A90.tif",
},
{
'expression': '(raster1<1)*(raster2>0)',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\A_90_md5_79f5e0d5d5029d90e8f10d5932da93ff.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\S_90_md5_5d18924c69519ec76993f4d58a7b2687.tif",
},
'target_nodata': -1,
'target_raster_path': "coastal_nonoverlapping_A90.tif",
},
{
'expression': '(raster1<1)*(raster2>0)',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\A_90_md5_79f5e0d5d5029d90e8f10d5932da93ff.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\H1_90_md5_7973783ac2786f9d521a4b8b4cf5d68d.tif",
},
'target_nodata': -1,
'target_raster_path': "carbon_nonoverlapping_A90.tif",
},
{
'expression': '(raster1<1)*(raster2>0)',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\A_90_md5_79f5e0d5d5029d90e8f10d5932da93ff.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\I1_90_md5_54ad2f227abc1cf66ed23cc6d3b72d47.tif",
},
'target_nodata': -1,
'target_raster_path': "moisture_nonoverlapping_A90.tif",
},
]
for calculation in calculation_list:
raster_calculations_core.evaluate_calculation(
calculation, TASK_GRAPH, WORKSPACE_DIR)
TASK_GRAPH.join()
TASK_GRAPH.close()
return
#PAIRWISE OVERLAPS
calculation_list = [
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\H1_90_md5_7973783ac2786f9d521a4b8b4cf5d68d.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\S_90_md5_5d18924c69519ec76993f4d58a7b2687.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_carbon_coastal_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\I1_90_md5_54ad2f227abc1cf66ed23cc6d3b72d47.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\S_90_md5_5d18924c69519ec76993f4d58a7b2687.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_moisture_coastal_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\H1_90_md5_7973783ac2786f9d521a4b8b4cf5d68d.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\T_90_md5_6a0142de25bb3b5a107f7abae694c5b0.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_carbon_timber_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\H1_90_md5_7973783ac2786f9d521a4b8b4cf5d68d.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\U_90_md5_258160b638e742e91b84979e6b2c748f.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_carbon_flood_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\H1_90_md5_7973783ac2786f9d521a4b8b4cf5d68d.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\V_90_md5_eeb6b515ad2f25a3ad76099e07e030bc.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_carbon_fuelwood_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\H1_90_md5_7973783ac2786f9d521a4b8b4cf5d68d.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\W_90_md5_de1e7dc33c7227cdbcda5b7e6f9919bb.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_carbon_fwfish_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\H1_90_md5_7973783ac2786f9d521a4b8b4cf5d68d.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\X_90_md5_0cc1f3aeb8e1a566a6b220bf9986b828.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_carbon_grazing_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\H1_90_md5_7973783ac2786f9d521a4b8b4cf5d68d.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\Z_90_md5_1b9d0deb1e16f6975dc3402aacf4846e.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_carbon_natureaccess_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\H1_90_md5_7973783ac2786f9d521a4b8b4cf5d68d.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\A1_90_md5_1fb33de8a6ced1d1f54dcc7debed3c6c.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_carbon_nitrogen_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\H1_90_md5_7973783ac2786f9d521a4b8b4cf5d68d.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\B1_90_md5_14484122eba5a970559c57a48621d3fd.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_carbon_pollination_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\H1_90_md5_7973783ac2786f9d521a4b8b4cf5d68d.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\D1_90_md5_ee81ad59355f2309c2ecb882e788454a.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_carbon_sediment_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\H1_90_md5_7973783ac2786f9d521a4b8b4cf5d68d.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\C1_90_md5_3246d7fc06267a18f59ca9a8decf64fe.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_carbon_reeftourism_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\H1_90_md5_7973783ac2786f9d521a4b8b4cf5d68d.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\I1_90_md5_54ad2f227abc1cf66ed23cc6d3b72d47.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_carbon_moisture_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\I1_90_md5_54ad2f227abc1cf66ed23cc6d3b72d47.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\T_90_md5_6a0142de25bb3b5a107f7abae694c5b0.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_moisture_timber_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\I1_90_md5_54ad2f227abc1cf66ed23cc6d3b72d47.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\U_90_md5_258160b638e742e91b84979e6b2c748f.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_moisture_flood_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\I1_90_md5_54ad2f227abc1cf66ed23cc6d3b72d47.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\V_90_md5_eeb6b515ad2f25a3ad76099e07e030bc.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_moisture_fuelwood_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\I1_90_md5_54ad2f227abc1cf66ed23cc6d3b72d47.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\W_90_md5_de1e7dc33c7227cdbcda5b7e6f9919bb.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_moisture_fwfish_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\I1_90_md5_54ad2f227abc1cf66ed23cc6d3b72d47.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\X_90_md5_0cc1f3aeb8e1a566a6b220bf9986b828.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_moisture_grazing_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\I1_90_md5_54ad2f227abc1cf66ed23cc6d3b72d47.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\Z_90_md5_1b9d0deb1e16f6975dc3402aacf4846e.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_moisture_natureaccess_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\I1_90_md5_54ad2f227abc1cf66ed23cc6d3b72d47.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\A1_90_md5_1fb33de8a6ced1d1f54dcc7debed3c6c.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_moisture_nitrogen_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\I1_90_md5_54ad2f227abc1cf66ed23cc6d3b72d47.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\B1_90_md5_14484122eba5a970559c57a48621d3fd.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_moisture_pollination_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\I1_90_md5_54ad2f227abc1cf66ed23cc6d3b72d47.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\D1_90_md5_ee81ad59355f2309c2ecb882e788454a.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_moisture_sediment_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\S_90_md5_5d18924c69519ec76993f4d58a7b2687.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\T_90_md5_6a0142de25bb3b5a107f7abae694c5b0.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_coastal_timber_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\S_90_md5_5d18924c69519ec76993f4d58a7b2687.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\U_90_md5_258160b638e742e91b84979e6b2c748f.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_coastal_flood_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\S_90_md5_5d18924c69519ec76993f4d58a7b2687.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\V_90_md5_eeb6b515ad2f25a3ad76099e07e030bc.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_coastal_fuelwood_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\S_90_md5_5d18924c69519ec76993f4d58a7b2687.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\W_90_md5_de1e7dc33c7227cdbcda5b7e6f9919bb.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_coastal_fwfish_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\S_90_md5_5d18924c69519ec76993f4d58a7b2687.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\X_90_md5_0cc1f3aeb8e1a566a6b220bf9986b828.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_coastal_grazing_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\S_90_md5_5d18924c69519ec76993f4d58a7b2687.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\Y_90_md5_f8393b73f3548658f610ac47acea72e7.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_coastal_marinefish_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\S_90_md5_5d18924c69519ec76993f4d58a7b2687.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\Z_90_md5_1b9d0deb1e16f6975dc3402aacf4846e.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_coastal_natureaccess_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\S_90_md5_5d18924c69519ec76993f4d58a7b2687.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\A1_90_md5_1fb33de8a6ced1d1f54dcc7debed3c6c.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_coastal_nitrogen_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\S_90_md5_5d18924c69519ec76993f4d58a7b2687.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\B1_90_md5_14484122eba5a970559c57a48621d3fd.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_coastal_pollination_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\S_90_md5_5d18924c69519ec76993f4d58a7b2687.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\D1_90_md5_ee81ad59355f2309c2ecb882e788454a.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_coastal_sediment_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\S_90_md5_5d18924c69519ec76993f4d58a7b2687.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\C1_90_md5_3246d7fc06267a18f59ca9a8decf64fe.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_coastal_reeftourism_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\C1_90_md5_3246d7fc06267a18f59ca9a8decf64fe.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\Y_90_md5_f8393b73f3548658f610ac47acea72e7.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_marinefish_reeftourism_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\T_90_md5_6a0142de25bb3b5a107f7abae694c5b0.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\U_90_md5_258160b638e742e91b84979e6b2c748f.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_timber_flood_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\T_90_md5_6a0142de25bb3b5a107f7abae694c5b0.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\V_90_md5_eeb6b515ad2f25a3ad76099e07e030bc.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_timber_fuelwood_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\T_90_md5_6a0142de25bb3b5a107f7abae694c5b0.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\W_90_md5_de1e7dc33c7227cdbcda5b7e6f9919bb.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_timber_fwfish_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\T_90_md5_6a0142de25bb3b5a107f7abae694c5b0.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\Z_90_md5_1b9d0deb1e16f6975dc3402aacf4846e.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_timber_natureaccess_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\T_90_md5_6a0142de25bb3b5a107f7abae694c5b0.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\A1_90_md5_1fb33de8a6ced1d1f54dcc7debed3c6c.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_timber_nitrogen_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\T_90_md5_6a0142de25bb3b5a107f7abae694c5b0.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\B1_90_md5_14484122eba5a970559c57a48621d3fd.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_timber_pollination_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\T_90_md5_6a0142de25bb3b5a107f7abae694c5b0.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\D1_90_md5_ee81ad59355f2309c2ecb882e788454a.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_timber_sediment_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\U_90_md5_258160b638e742e91b84979e6b2c748f.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\V_90_md5_eeb6b515ad2f25a3ad76099e07e030bc.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_flood_fuelwood_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\U_90_md5_258160b638e742e91b84979e6b2c748f.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\W_90_md5_de1e7dc33c7227cdbcda5b7e6f9919bb.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_flood_fwfish_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\U_90_md5_258160b638e742e91b84979e6b2c748f.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\X_90_md5_0cc1f3aeb8e1a566a6b220bf9986b828.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_flood_grazing_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\U_90_md5_258160b638e742e91b84979e6b2c748f.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\Z_90_md5_1b9d0deb1e16f6975dc3402aacf4846e.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_flood_natureaccess_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\U_90_md5_258160b638e742e91b84979e6b2c748f.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\A1_90_md5_1fb33de8a6ced1d1f54dcc7debed3c6c.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_flood_nitrogen_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\U_90_md5_258160b638e742e91b84979e6b2c748f.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\B1_90_md5_14484122eba5a970559c57a48621d3fd.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_flood_pollination_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\U_90_md5_258160b638e742e91b84979e6b2c748f.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\D1_90_md5_ee81ad59355f2309c2ecb882e788454a.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_flood_sediment_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\V_90_md5_eeb6b515ad2f25a3ad76099e07e030bc.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\W_90_md5_de1e7dc33c7227cdbcda5b7e6f9919bb.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_fuelwood_fwfish_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\V_90_md5_eeb6b515ad2f25a3ad76099e07e030bc.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\Z_90_md5_1b9d0deb1e16f6975dc3402aacf4846e.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_fuelwood_natureaccess_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\V_90_md5_eeb6b515ad2f25a3ad76099e07e030bc.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\A1_90_md5_1fb33de8a6ced1d1f54dcc7debed3c6c.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_fuelwood_nitrogen_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\V_90_md5_eeb6b515ad2f25a3ad76099e07e030bc.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\B1_90_md5_14484122eba5a970559c57a48621d3fd.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_fuelwood_pollination_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\V_90_md5_eeb6b515ad2f25a3ad76099e07e030bc.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\D1_90_md5_ee81ad59355f2309c2ecb882e788454a.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_fuelwood_sediment_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\W_90_md5_de1e7dc33c7227cdbcda5b7e6f9919bb.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\X_90_md5_0cc1f3aeb8e1a566a6b220bf9986b828.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_fwfish_grazing_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\W_90_md5_de1e7dc33c7227cdbcda5b7e6f9919bb.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\Z_90_md5_1b9d0deb1e16f6975dc3402aacf4846e.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_fwfish_natureaccess_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\W_90_md5_de1e7dc33c7227cdbcda5b7e6f9919bb.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\A1_90_md5_1fb33de8a6ced1d1f54dcc7debed3c6c.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_fwfish_nitrogen_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\W_90_md5_de1e7dc33c7227cdbcda5b7e6f9919bb.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\B1_90_md5_14484122eba5a970559c57a48621d3fd.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_fwfish_pollination_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\W_90_md5_de1e7dc33c7227cdbcda5b7e6f9919bb.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\D1_90_md5_ee81ad59355f2309c2ecb882e788454a.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_fwfish_sediment_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\X_90_md5_0cc1f3aeb8e1a566a6b220bf9986b828.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\Z_90_md5_1b9d0deb1e16f6975dc3402aacf4846e.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_grazing_natureaccess_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\X_90_md5_0cc1f3aeb8e1a566a6b220bf9986b828.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\A1_90_md5_1fb33de8a6ced1d1f54dcc7debed3c6c.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_grazing_nitrogen_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\X_90_md5_0cc1f3aeb8e1a566a6b220bf9986b828.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\B1_90_md5_14484122eba5a970559c57a48621d3fd.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_grazing_pollination_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\X_90_md5_0cc1f3aeb8e1a566a6b220bf9986b828.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\D1_90_md5_ee81ad59355f2309c2ecb882e788454a.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_grazing_sediment_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\Z_90_md5_1b9d0deb1e16f6975dc3402aacf4846e.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\A1_90_md5_1fb33de8a6ced1d1f54dcc7debed3c6c.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_natureaccess_nitrogen_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\Z_90_md5_1b9d0deb1e16f6975dc3402aacf4846e.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\B1_90_md5_14484122eba5a970559c57a48621d3fd.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_natureaccess_pollination_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\Z_90_md5_1b9d0deb1e16f6975dc3402aacf4846e.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\D1_90_md5_ee81ad59355f2309c2ecb882e788454a.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_natureaccess_sediment_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\A1_90_md5_1fb33de8a6ced1d1f54dcc7debed3c6c.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\B1_90_md5_14484122eba5a970559c57a48621d3fd.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_nitrogen_pollination_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\A1_90_md5_1fb33de8a6ced1d1f54dcc7debed3c6c.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\D1_90_md5_ee81ad59355f2309c2ecb882e788454a.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_nitrogen_sediment_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\B1_90_md5_14484122eba5a970559c57a48621d3fd.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\D1_90_md5_ee81ad59355f2309c2ecb882e788454a.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_pollination_sediment_90.tif",
},
]
for calculation in calculation_list:
raster_calculations_core.evaluate_calculation(
calculation, TASK_GRAPH, WORKSPACE_DIR)
TASK_GRAPH.join()
TASK_GRAPH.close()
return
#POPULATION STUFF##
calculation_list = [ #Trying at a finer resolution - 10s A90 -- there are significantly fewer people so this is probably the correct way to do it
#{
# 'expression': 'raster1*(raster2)',
# 'symbol_to_path_map': {
# 'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\wgs\A_90_WARPED_near_md5_0ed997ee57533433c6372e070592e880.tif",
# 'raster2': r"C:\Users\Becky\Documents\lspop2017_md5_eafa6a4724f3d3a6675687114d4de6ba.tif",
# },
# 'target_nodata': -9999,
# 'target_pixel_size': (0.008333333333333333218,-0.008333333333333333218),
# 'resample_method': 'near',
# 'target_raster_path': "lspop2017_on_10sA90.tif",
#},
#{
# 'expression': 'raster1*(raster2)',
# 'symbol_to_path_map': {
# 'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\wgs\B_90_WARPED_near_md5_2b44cf1e234acbd8d12156068ba8ce2e.tif",
# 'raster2': r"C:\Users\Becky\Documents\lspop2017_md5_eafa6a4724f3d3a6675687114d4de6ba.tif",
# },
# 'target_nodata': -9999,
# 'target_pixel_size': (0.008333333333333333218,-0.008333333333333333218),
# 'resample_method': 'near',
# 'target_raster_path': "lspop2017_on_10sB90.tif",
#},
{
'expression': 'raster1*(raster2)',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\wgs\C_90_WARPED_near_md5_6a33ab63b7ac8fb9a679e192741bcac5.tif",
'raster2': r"C:\Users\Becky\Documents\lspop2017_md5_eafa6a4724f3d3a6675687114d4de6ba.tif",
},
'target_nodata': -9999,
'target_pixel_size': (0.008333333333333333218,-0.008333333333333333218),
'resample_method': 'near',
'target_raster_path': "lspop2017_on_10sC90.tif",
},
]
for calculation in calculation_list:
raster_calculations_core.evaluate_calculation(
calculation, TASK_GRAPH, WORKSPACE_DIR)
TASK_GRAPH.join()
TASK_GRAPH.close()
return
calculation_list = [
{
'expression': 'raster1*(raster2)',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\wgs\A_90_WARPED_near_MASKED_land_2km_md5_66c8b850ace04761abef3a1d7a02f04a.tif",
'raster2': r"C:\Users\Becky\Documents\lspop2017_md5_eafa6a4724f3d3a6675687114d4de6ba.tif",
},
'target_nodata': -9999,
'target_pixel_size': (0.008333333333333333218,-0.008333333333333333218),
'resample_method': 'average',
'target_raster_path': "lspop2017_on_A90.tif",
},
{
'expression': 'raster1*(raster2)',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\wgs\B_90_WARPED_near_MASKED_land_2km_md5_8e7a1e1badc25b30b5dd20d9c8ae4c85.tif",
'raster2': r"C:\Users\Becky\Documents\lspop2017_md5_eafa6a4724f3d3a6675687114d4de6ba.tif",
},
'target_nodata': -9999,
'target_pixel_size': (0.008333333333333333218,-0.008333333333333333218),
'resample_method': 'average',
'target_raster_path': "lspop2017_on_B90.tif",
},
{
'expression': 'raster1*(raster2)',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\wgs\C_90_WARPED_near_2km_md5_f54c83a0078f91a2c5cb98c9bd23b22f.tif",
'raster2': r"C:\Users\Becky\Documents\lspop2017_md5_eafa6a4724f3d3a6675687114d4de6ba.tif",
},
'target_nodata': -9999,
'target_pixel_size': (0.008333333333333333218,-0.008333333333333333218),
'resample_method': 'average',
'target_raster_path': "lspop2017_on_C90.tif",
},
#{
# 'expression': 'raster1*(raster2)',
# 'symbol_to_path_map': {
# 'raster1': r"C:\Users\Becky\Documents\raster_calculations\align-to-mask-and-normalize\workspace\A_90_WARPED_near_md5_1e9f19fadc8ba5e2b32c5c11bb4154cf.tif",
# 'raster2': r"C:\Users\Becky\Documents\cnc_project\supporting_layers\poverty\chi_relative_wealth_index.tif",
# },
# 'target_nodata': -9999,
# 'target_pixel_size': (0.02222222222222399943,-0.02222222222222399943),
# 'resample_method': 'near',
# 'target_raster_path': "chi_relative_wealth_on_A90.tif",
#},
#{
# 'expression': 'raster1*(raster2)',
# 'symbol_to_path_map': {
# 'raster1': r"C:\Users\Becky\Documents\raster_calculations\align-to-mask-and-normalize\workspace\B_90_WARPED_near_md5_27f59aaa7d7e4abf71b3f80567bb66db.tif",
# 'raster2': r"C:\Users\Becky\Documents\cnc_project\supporting_layers\poverty\chi_relative_wealth_index.tif",
# },
# 'target_nodata': -9999,
# 'target_pixel_size': (0.02222222222222399943,-0.02222222222222399943),
# 'resample_method': 'near',
# 'target_raster_path': "chi_relative_wealth_on_B90.tif",
#},
#{
# 'expression': 'raster1*(raster2)',
# 'symbol_to_path_map': {
# 'raster1': r"C:\Users\Becky\Documents\raster_calculations\align-to-mask-and-normalize\workspace\C_90_WARPED_near_md5_931c49db12100ab5837c1d0ff199f933.tif",
# 'raster2': r"C:\Users\Becky\Documents\cnc_project\supporting_layers\poverty\chi_relative_wealth_index.tif",
# },
# 'target_nodata': -9999,
# 'target_pixel_size': (0.02222222222222399943,-0.02222222222222399943),
# 'resample_method': 'near',
# 'target_raster_path': "chi_relative_wealth_on_C90.tif",
#},
]
for calculation in calculation_list:
raster_calculations_core.evaluate_calculation(
calculation, TASK_GRAPH, WORKSPACE_DIR)
TASK_GRAPH.join()
TASK_GRAPH.close()
return
calculation_list = [
{
'expression': 'raster1*(raster2)',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\original_rasters\nature_access\global_normalized_people_access_lspop_2017_URCA_rural_60_noneg_md5_dcc342357e635e511e9d43ad1e057c1e.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\A_90_md5_79f5e0d5d5029d90e8f10d5932da93ff.tif",
},
'target_nodata': 0,
'target_raster_path': "ruralpop_within1hr_A90.tif",
},
{
'expression': 'raster1*(raster2)',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\original_rasters\nature_access\global_normalized_people_access_lspop_2017_URCA_urban_60_noneg_md5_24f9290d317e8985f47a8ae58b67c7b3.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\A_90_md5_79f5e0d5d5029d90e8f10d5932da93ff.tif",
},
'target_nodata': 0,
'target_raster_path': "urbanpop_within1hr_A90.tif",
},
{
'expression': 'raster1*(raster2)',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\original_rasters\nature_access\global_normalized_people_access_lspop_2017_URCA_rural_360_noneg_md5_d7b34c31cd72b84974da08471dd6620d.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\A_90_md5_79f5e0d5d5029d90e8f10d5932da93ff.tif",
},
'target_nodata': 0,
'target_raster_path': "ruralpop_within6hr_A90.tif",
},
{
'expression': 'raster1*(raster2)',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\original_rasters\nature_access\global_normalized_people_access_lspop_2017_URCA_urban_360_noneg_md5_da47d209c5ca2be346e939a4c33cf7c1.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\A_90_md5_79f5e0d5d5029d90e8f10d5932da93ff.tif",
},
'target_nodata': 0,
'target_raster_path': "urbanpop_within6hr_A90.tif",
},
]
for calculation in calculation_list:
raster_calculations_core.evaluate_calculation(
calculation, TASK_GRAPH, WORKSPACE_DIR)
TASK_GRAPH.join()
TASK_GRAPH.close()
return
calculation_list = [
{
'expression': 'raster1*(raster2)',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\original_rasters\pollination_norm\norm_ppl_fed_within_2km_per_pixel_mask_to_hab_compressed_md5_e32a0dd59de79a8dfc0d34dc08c18c41.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\wgs\A_90_WARPED_near_MASKED_land_2km_md5_66c8b850ace04761abef3a1d7a02f04a.tif",
},
'target_nodata': 0,
'target_pixel_size': (0.002777777777777778,-0.002777777777777778),
'resample_method': 'average',
'target_raster_path': "pollinationpop_on_A90.tif",
},
#{
# 'expression': 'raster1*(raster2)', # this is not the right way to do this!! I need the population mapped back to habitat but that seems like it hasn't been normalized correctly because it exceeds this total coastal pop
# 'symbol_to_path_map': {
# 'raster1': r"C:\Users\Becky\Documents\cnc_project\original_rasters\cnc_cv\normalized_pop_on_hab\total_pop_masked_by_10m_md5_ef02b7ee48fa100f877e3a1671564be2.tif",
# 'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\wgs\A_90_WARPED_near_MASKED_land_2km_md5_66c8b850ace04761abef3a1d7a02f04a.tif",
# },
# 'target_nodata': 0,
# 'target_pixel_size': (0.008333333333333333218,-0.008333333333333333218),
# 'resample_method': 'average',
# 'target_raster_path': "coastalpop_on_A90.tif",
#},
#{
# 'expression': 'raster1*(raster2)', #this is not actually the right way to do this - need to delineate the downstream area of A90 and then just mask to population
# 'symbol_to_path_map': {
# 'raster1': r"C:\Users\Becky\Documents\cnc_project\original_rasters\downstream_beneficiaries\normalized\downstream_bene_2017_hab_normalized_compressed_overviews_md5_7e8c9ecd4092068afaebc1a4b1efe3ce.tif",
# 'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\wgs\A_90_WARPED_near_MASKED_land_2km_md5_66c8b850ace04761abef3a1d7a02f04a.tif",
# },
# 'target_nodata': 0,
# 'target_pixel_size': (0.005555555555555555768,-0.005555555555555555768),
# 'resample_method': 'average',
# 'target_raster_path': "downstreampop_A90.tif",
#},
{
'expression': 'raster1*(raster2<2)*(raster2)',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\raster_calculations\lspop2017_compressed_md5_53e326f463a2c8a8fa92d8dea6f37df1.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\overlap\beneficiaries\downstream_mask_A_90_WARPED_near_md5_0ed997ee57533433c6372e070592e880_compressed.tif",
},
'target_nodata': 0,
'target_pixel_size': (0.008333333333333333218,-0.008333333333333333218),
'resample_method': 'average',
'target_raster_path': "lspop_on_downstream_of_10sA90.tif",
},
{
'expression': 'raster1*(raster2<2)*(raster2)',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\raster_calculations\lspop2017_compressed_md5_53e326f463a2c8a8fa92d8dea6f37df1.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\overlap\beneficiaries\downstream_mask_C_90_WARPED_near_md5_6a33ab63b7ac8fb9a679e192741bcac5_compressed.tif",
},
'target_nodata': 0,
'target_pixel_size': (0.008333333333333333218,-0.008333333333333333218),
'resample_method': 'average',
'target_raster_path': "lspop_on_downstream_of_10sC90.tif",
},
{
'expression': 'raster1*(raster2<2)*(raster2)',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\raster_calculations\lspop2017_compressed_md5_53e326f463a2c8a8fa92d8dea6f37df1.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\overlap\beneficiaries\downstream_mask_masked_all_nathab_wstreams_esa2015_md5_c291ff6ef7db1d5ff4d95a82e0f035de.tif",
},
'target_nodata': 0,
'target_pixel_size': (0.008333333333333333218,-0.008333333333333333218),
'resample_method': 'average',
'target_raster_path': "lspop_on_downstream_of_nathab.tif",
},
]
for calculation in calculation_list:
raster_calculations_core.evaluate_calculation(
calculation, TASK_GRAPH, WORKSPACE_DIR)
TASK_GRAPH.join()
TASK_GRAPH.close()
return
single_expression = {
'expression': 'raster1*(raster2<0)',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\lspop2017_md5_eafa6a4724f3d3a6675687114d4de6ba.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\supporting_layers\poverty\chi_relative_wealth_index.tif",
},
'target_nodata': -9999,
'target_pixel_size': (0.008333333333333333218,-0.008333333333333333218),
'resample_method': 'near',
'target_raster_path': "lspop_negchi.tif",
}
raster_calculations_core.evaluate_calculation(
single_expression, TASK_GRAPH, WORKSPACE_DIR)
TASK_GRAPH.join()
TASK_GRAPH.close()
return
calculation_list = [
{
'expression': '(raster1<0)*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\overlap\population\chi_relative_wealth_on_A90.tif",
'raster2': r"C:\Users\Becky\Documents\lspop2017_md5_eafa6a4724f3d3a6675687114d4de6ba.tif",
},
'target_nodata': -9999,
'target_pixel_size': (0.008333333333333333218,-0.008333333333333333218),
'resample_method': 'near',
'target_raster_path': "pop_negchi_on_A90.tif",
},
{
'expression': '(raster1<0)*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\overlap\population\chi_relative_wealth_on_B90.tif",
'raster2': r"C:\Users\Becky\Documents\lspop2017_md5_eafa6a4724f3d3a6675687114d4de6ba.tif",
},
'target_nodata': -9999,
'target_pixel_size': (0.008333333333333333218,-0.008333333333333333218),
'resample_method': 'near',
'target_raster_path': "pop_negchi_on_B90.tif",
},
{
'expression': '(raster1<0)*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\overlap\population\chi_relative_wealth_on_C90.tif",
'raster2': r"C:\Users\Becky\Documents\lspop2017_md5_eafa6a4724f3d3a6675687114d4de6ba.tif",
},
'target_nodata': -9999,
'target_pixel_size': (0.008333333333333333218,-0.008333333333333333218),
'resample_method': 'near',
'target_raster_path': "pop_negchi_on_C90.tif",
},
]
for calculation in calculation_list:
raster_calculations_core.evaluate_calculation(
calculation, TASK_GRAPH, WORKSPACE_DIR)
TASK_GRAPH.join()
TASK_GRAPH.close()
return
#For Pat's analysis
#This doesn't work anymore, something about bounding box not fitting... so use cna_align_to_mask.py instead:
#MASK_ECOSHARD_URL = ('https://storage.googleapis.com/critical-natural-capital-ecoshards/habmasks/landmask_0s_2km_moisturextent_md5_b91bdc0eed9397d0ed104be8cb145880.tif')
#RASTER_LIST= [('A_90_md5_79f5e0d5d5029d90e8f10d5932da93ff.tif', True, False),]
#RESAMPLE_MODE = 'near'
#it yields A_90_WARPED_near_MASKED_md5_66c8b850ace04761abef3a1d7a02f04a.tif which I renamed A_90_WARPED_near_MASKED_land0s_2km_md5_66c8b850ace04761abef3a1d7a02f04a.tif
# wgs84_srs = osr.SpatialReference()
# wgs84_srs.ImportFromEPSG(4326)
#
# single_expression = {
# 'expression': 'raster1*(raster2>-1)',
# 'symbol_to_path_map': {
# #'raster1': r"C:\Users\Becky\Documents\raster_calculations\align-to-mask-and-normalize\workspace\A_90_WARPED_near_md5_1e9f19fadc8ba5e2b32c5c11bb4154cf.tif",
# 'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\A_90.tif",
# 'raster2': r"C:\Users\Becky\Documents\cnc_project\supporting_layers\landmask_0s_2km_moisturextent.tif",
# },
# 'target_nodata': -9999,
# 'target_projection_wkt': wgs84_srs.ExportToWkt(),
# 'target_pixel_size': (0.02131900000000000114,-0.02131900000000000114),
# 'resample_method': 'near',
# 'target_raster_path': "A_90_land_nathab.tif",
# }
#
# raster_calculations_core.evaluate_calculation(
# single_expression, TASK_GRAPH, WORKSPACE_DIR)
#
# TASK_GRAPH.join()
# TASK_GRAPH.close()
#
# return
#then use nodata_replace using landmask_0s_2km_moisturextent and then do the below on that resulting raster
# python nodata_replace.py "C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\wgs\A_90_WARPED_near_MASKED_land_2km_md5_66c8b850ace04761abef3a1d7a02f04a.tif" "C:\Users\Becky\Documents\cnc_project\supporting_layers\landmask_0s_2km_moisturextent_md5_b91bdc0eed9397d0ed104be8cb145880.tif" A_90_land_nodata0s.tif
single_expression = {
'expression': '(raster1)*(raster2>-6)',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\raster_calculations\A_90_land_nodata0s.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\original_rasters\realized_e_source_ratio_ann_mean.tif",
},
'target_nodata': -9999,
'target_pixel_size': (1.493750063578287657,-1.486111252396195015),
'resample_method': 'average',
'target_raster_path': "A_90_1.5d_prop_area.tif",
}
raster_calculations_core.evaluate_calculation(
single_expression, TASK_GRAPH, WORKSPACE_DIR)
TASK_GRAPH.join()
TASK_GRAPH.close()
return
single_expression = {
'expression': 'raster1-raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Downloads\cntr_2km_nocarb_land_resampled15_mode.tif",
'raster2': r"C:\Users\Becky\Documents\raster_calculations\align-to-mask-and-normalize\workspace\solution_A_all_targets_resampled1.5d_near_md5_98d52ff13ca9a38784a687339e30b2fd.tif",
},
'target_nodata': -9999,
'target_pixel_size': (1.493750063578287657,-1.486111252396195015),
'resample_method': 'near',
'target_raster_path': "diff_oldcna_newcna_1.5d.tif",
}
raster_calculations_core.evaluate_calculation(
single_expression, TASK_GRAPH, WORKSPACE_DIR)
TASK_GRAPH.join()
TASK_GRAPH.close()
return
calculation_list = [ #scrubbin out the accidental negatives from nodata overlap in the nature access
{
'expression': '(raster1>=0)*raster1 + (raster1<0)*0',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\raster_calculations\align_to_mask_workspace\ecoshards\global_normalized_people_access_lspop_2017_URCA_rural_60.0m_md5_77e111769dcab34cf992fb0d3a9eb49c.tif",
},
'target_nodata': -1,
'target_raster_path': "global_normalized_people_access_lspop_2017_URCA_rural_60_noneg.tif",
},
{
'expression': '(raster1>=0)*raster1 + (raster1<0)*0',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\raster_calculations\align_to_mask_workspace\ecoshards\global_normalized_people_access_lspop_2017_URCA_rural_360.0m_md5_5cd804c489ab949c4891410d65b71057.tif",
},
'target_nodata': -1,
'target_raster_path': "global_normalized_people_access_lspop_2017_URCA_rural_360_noneg.tif",
},
{
'expression': '(raster1>=0)*raster1 + (raster1<0)*0',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\raster_calculations\align_to_mask_workspace\ecoshards\global_normalized_people_access_lspop_2017_URCA_urban_60.0m_md5_77d3af07d88721543128205645f75b8d.tif",
},
'target_nodata': -1,
'target_raster_path': "global_normalized_people_access_lspop_2017_URCA_urban_60_noneg.tif",
},
{
'expression': '(raster1>=0)*raster1 + (raster1<0)*0',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\raster_calculations\align_to_mask_workspace\ecoshards\global_normalized_people_access_lspop_2017_URCA_urban_360.0m_md5_e7720b3032df6ea8293cddcb2be26802.tif",
},
'target_nodata': -1,
'target_raster_path': "global_normalized_people_access_lspop_2017_URCA_urban_360_noneg.tif",
},
]
for calculation in calculation_list:
raster_calculations_core.evaluate_calculation(
calculation, TASK_GRAPH, WORKSPACE_DIR)
TASK_GRAPH.join()
TASK_GRAPH.close()
return
calculation_list = [ #making masks so I can try average_rasters on these and masked_all_nathab_wstreams_esa2015_nodata_WARPED_near_md5_d801fffb0e3fbfd8d7ffb508f18ebb7c.tif to see where these exist outside that mask (they shouldn't, but they do)
{
'expression': 'raster1*10',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\resampled_Eckert2km\masked_all_nathab_wstreams_esa2015_nodata_WARPED_near_md5_d801fffb0e3fbfd8d7ffb508f18ebb7c.tif",
},
'target_nodata': 0,
'target_raster_path': "nathab_10.tif",
},
{
'expression': 'raster1*2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\churn\target_stitch_dir\A_100_md5_7474de70786c3dce0b760c691368c839.tif",
},
'target_nodata': 0,
'target_raster_path': "A100_2.tif",
},
{
'expression': 'raster1*3',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\churn\target_stitch_dir\A_90_md5_396196b740bcbb151e033ff9f9609fe5.tif",
},
'target_nodata': 0,
'target_raster_path': "A90_3.tif",
},
]
for calculation in calculation_list:
raster_calculations_core.evaluate_calculation(
calculation, TASK_GRAPH, WORKSPACE_DIR)
TASK_GRAPH.join()
TASK_GRAPH.close()
return
calculation_list = [ #making a realized sediment layer that's not masked to nathab, for a presentation
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\original_rasters\potential_sedimentdeposition_md5_aa9ee6050c423b6da37f8c2723d9b513.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\original_rasters\downstream_beneficiaries\stream_attenuated\downstream_bene_2017_500000.0_compressed_overviews_md5_a73557e0c216e390d4e288816c9838bb.tif",
},
'target_nodata': -9999,
'target_pixel_size': (0.005555555555555556, -0.005555555555555556),
'resample_method': 'near',
'target_raster_path': "realized_sediment_attn_500k.tif",
},
]
for calculation in calculation_list:
raster_calculations_core.evaluate_calculation(
calculation, TASK_GRAPH, WORKSPACE_DIR)
TASK_GRAPH.join()
TASK_GRAPH.close()
return
#marinefish problems
single_expression = {
'expression': '(raster1>=0)',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\solutions\D-I1_90s\Y_90_md5_68117f49cd41e41f3a8915a2a8c941b1.tif",
},
'target_nodata': 0,
'target_raster_path': "marinefish_extent_Eckert.tif", #this is not a good mask to use because the optimization created haloes around some islands
}
raster_calculations_core.evaluate_calculation(
single_expression, TASK_GRAPH, WORKSPACE_DIR)
TASK_GRAPH.join()
TASK_GRAPH.close()
return
#create mask by rasterizing EEZ vector and then use align_to_mask but without using a mask to align that mask to Eckert 2km
single_expression = {
'expression': 'raster1*(raster2>0)',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\Eckert\90_targets\Y_90_md5_81cd585dcfadd703e24c0a9229c1cdc9.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\supporting_layers\eez_mask_eckert_2km_md5_3208b8094dbece295374bddf4d99d192.tif",
},
'target_nodata': 0,
'target_raster_path': "Y_90_md5_81cd585dcfadd703e24c0a9229c1cdc9_nodata0.tif", #still has haloes but at least the mask is the full EEZ area
}
raster_calculations_core.evaluate_calculation(
single_expression, TASK_GRAPH, WORKSPACE_DIR)
TASK_GRAPH.join()
TASK_GRAPH.close()
return
#this is the way to fix it:
single_expression = {
'expression': '(raster1>0)*0 + (raster1<1)*-9999',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\raster_calculations\align_to_mask_workspace\ecoshards\eez_mask_eckert_2km_md5_3208b8094dbece295374bddf4d99d192.tif",
},
'target_nodata': -9999,
'target_raster_path': "eez_mask0s_eckert_2km.tif",
}
raster_calculations_core.evaluate_calculation(
single_expression, TASK_GRAPH, WORKSPACE_DIR)
TASK_GRAPH.join()
TASK_GRAPH.close()
return
# then do python nodata_replace to fill the nodata haloes so that the whole EEZ can show up as CNA area
# python nodata_replace.py "C:\Users\Becky\Documents\cnc_project\resampled_Eckert2km\realized_marinefish_watson_2010_2014_clamped_WARPED_average_MASKED_md5_1c9ea302eeadd8027f6a17e03f943888.tif" "C:\Users\Becky\Documents\cnc_project\supporting_layers\eez_mask0s_eckert_2km_md5_72e7907ce7380f95e20d3c2b4448605b.tif" realized_marinefish_watson_2010_2014_clamped_0sfill_WARPED_average_MASKED.tif
# python nodata_replace.py "C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\Y_90_md5_f8393b73f3548658f610ac47acea72e7.tif" "C:\Users\Becky\Documents\cnc_project\supporting_layers\eez_mask0s_eckert_2km_md5_72e7907ce7380f95e20d3c2b4448605b.tif" Y_90_0sfill.tif
NNth_fl = 12445
clamped_service_list = [ #some services just have crazy high values that throw the whole percentiles off so we're clamping them to the 99th percentile
{
'expression': f'(service>{NNth_fl})*{NNth_fl} + (service<={NNth_fl})*(service>=0)*service + -9999*(service<0)', #sets anything above the 99th percentile value to that value, anything negative to nodata
'symbol_to_path_map': {
'service': r"C:\Users\Becky\Documents\cnc_project\masked_rasters\needed_clamping\realized_floodmitigation_attn_50km_nathab_md5_3cbadb2d1b4207f029a264e090783c6d.tif",
},
'target_nodata': -9999,
'target_raster_path': "realized_floodmitigation_attn_50km_nathab_clamped.tif",
'target_pixel_size': (0.002777777777778, -0.002777777777778),
},
]
for calculation in clamped_service_list:
raster_calculations_core.evaluate_calculation(
calculation, TASK_GRAPH, WORKSPACE_DIR)
TASK_GRAPH.join()
TASK_GRAPH.close()
return
calculation_list = [
{
'expression': 'service*mask + (mask<1)*-9999',
'symbol_to_path_map': {
'mask': r"C:\Users\Becky\Documents\cnc_project\supporting_layers\masked_all_nathab_wstreams_esa2015_md5_c291ff6ef7db1d5ff4d95a82e0f035de.tif",
'service': r"C:\Users\Becky\Documents\cnc_project\original_rasters\downstream_beneficiaries\stream_attenuated\service\realized_floodmitigation_attn_500km_md5_1b659e3fd93e5f0b6aac396245258517.tif",
},
'target_nodata': -9999,
'target_raster_path': "realized_floodmitigation_attn_500km_nathab.tif",
'target_pixel_size': (0.002777777777777778, -0.002777777777777778),
'resample_method': 'near',
},
{
'expression': 'service*mask + (mask<1)*-9999',
'symbol_to_path_map': {
'mask': r"C:\Users\Becky\Documents\cnc_project\supporting_layers\masked_all_nathab_wstreams_esa2015_md5_c291ff6ef7db1d5ff4d95a82e0f035de.tif",
'service': r"C:\Users\Becky\Documents\cnc_project\original_rasters\downstream_beneficiaries\stream_attenuated\service\realized_floodmitigation_attn_50km_md5_029cbd998fc4464cf04861cf58dddc1d.tif",
},
'target_nodata': -9999,
'target_raster_path': "realized_floodmitigation_attn_50km_nathab.tif",
'target_pixel_size': (0.002777777777777778, -0.002777777777777778),
'resample_method': 'near',
},
]
for calculation in calculation_list:
raster_calculations_core.evaluate_calculation(
calculation, TASK_GRAPH, WORKSPACE_DIR)
TASK_GRAPH.join()
TASK_GRAPH.close()
return
calculation_list = [
{
'expression': 'service*benes',
'symbol_to_path_map': {
'benes': r"C:\Users\Becky\Documents\cnc_project\original_rasters\downstream_beneficiaries\stream_attenuated\downstream_bene_2017_500000.0_compressed_overviews_md5_a73557e0c216e390d4e288816c9838bb.tif",
'service': r"C:\Users\Becky\Documents\cnc_project\original_rasters\potential_nitrogenretention_nci_unmasked_md5_09425dff042ea8dbb94a8d1977be472a.tif",
},
'target_nodata': -9999,
'target_raster_path': "realized_nitrogenretention_unmasked_attn_500k.tif",
'target_pixel_size': (0.002777777777777778, -0.002777777777777778),
'resample_method': 'near',
},
{
'expression': 'service*benes',
'symbol_to_path_map': {
'benes': r"C:\Users\Becky\Documents\cnc_project\original_rasters\downstream_beneficiaries\stream_attenuated\downstream_bene_2017_50000.0_compressed_overviews_md5_ddbc9006bbfb21ef681a42bf78046b69.tif",
'service': r"C:\Users\Becky\Documents\cnc_project\original_rasters\potential_nitrogenretention_nci_unmasked_md5_09425dff042ea8dbb94a8d1977be472a.tif",
},
'target_nodata': -9999,
'target_raster_path': "realized_nitrogenretention_unmasked_attn_50k.tif",
'target_pixel_size': (0.002777777777777778, -0.002777777777777778),
'resample_method': 'near',
},
]
for calculation in calculation_list:
raster_calculations_core.evaluate_calculation(
calculation, TASK_GRAPH, WORKSPACE_DIR)
TASK_GRAPH.join()
TASK_GRAPH.close()
return
calculation_list = [
# {
# 'expression': 'raster1*raster2',
# 'symbol_to_path_map': {
# 'raster1': r"C:\Users\Becky\Documents\cnc_project\masked_rasters\need_processing\potential_nitrogenretention_nci_nathab_clamped_md5_fff6f944bfaf13baf24129f7fbfb1107.tif",
# 'raster2': r"C:\Users\Becky\Documents\cnc_project\original_rasters\downstream_beneficiaries\stream_attenuated\downstream_bene_2017_50000.0_compressed_overviews_md5_ddbc9006bbfb21ef681a42bf78046b69.tif",
# },
# 'target_nodata': -9999,
# 'target_raster_path': "realized_nitrogenretention_attn_50km.tif",
# 'target_pixel_size': (0.002777777777777778, -0.002777777777777778),
# 'resample_method': 'near',
# },
# {
# 'expression': 'raster1*raster2',
# 'symbol_to_path_map': {
# 'raster1': r"C:\Users\Becky\Documents\cnc_project\masked_rasters\need_processing\potential_sedimentdeposition_nathab_clamped_md5_1d826c8885c6479b6307bc345b95d8bf.tif",
# 'raster2': r"C:\Users\Becky\Documents\cnc_project\original_rasters\downstream_beneficiaries\stream_attenuated\downstream_bene_2017_50000.0_compressed_overviews_md5_ddbc9006bbfb21ef681a42bf78046b69.tif",
# },
# 'target_nodata': -9999,
# 'target_raster_path': "realized_sedimentdeposition_attn_50km.tif",
# 'target_pixel_size': (0.002777777777777778, -0.002777777777777778),
# 'resample_method': 'near',
# },
# {
# 'expression': 'raster1*raster2',
# 'symbol_to_path_map': {
# 'raster1': r"C:\Users\Becky\Documents\cnc_project\masked_rasters\need_processing\potential_nitrogenretention_nci_nathab_clamped_md5_fff6f944bfaf13baf24129f7fbfb1107.tif",
# 'raster2': r"C:\Users\Becky\Documents\cnc_project\original_rasters\downstream_beneficiaries\stream_attenuated\downstream_bene_2017_500000.0_compressed_overviews_md5_a73557e0c216e390d4e288816c9838bb.tif",
# },
# 'target_nodata': -9999,
# 'target_raster_path': "realized_nitrogenretention_attn_500km.tif",
# 'target_pixel_size': (0.002777777777777778, -0.002777777777777778),
# 'resample_method': 'near',
# },
# {
# 'expression': 'raster1*raster2',
# 'symbol_to_path_map': {
# 'raster1': r"C:\Users\Becky\Documents\cnc_project\masked_rasters\need_processing\potential_sedimentdeposition_nathab_clamped_md5_1d826c8885c6479b6307bc345b95d8bf.tif",
# 'raster2': r"C:\Users\Becky\Documents\cnc_project\original_rasters\downstream_beneficiaries\stream_attenuated\downstream_bene_2017_500000.0_compressed_overviews_md5_a73557e0c216e390d4e288816c9838bb.tif",
# },
# 'target_nodata': -9999,
# 'target_raster_path': "realized_sedimentdeposition_attn_500km.tif",
# 'target_pixel_size': (0.002777777777777778, -0.002777777777777778),
# 'resample_method': 'near',
# },
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\original_rasters\potential_floodmitigation_PotInflGStorage.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\original_rasters\downstream_beneficiaries\stream_attenuated\downstream_bene_floodplain_500000.0_compressed_overviews_md5_2ce1f378646fcfe8c9ddf98bd6212d03.tif",
},
'target_nodata': -9999,
'target_raster_path': "realized_floodmitigation_attn_500km.tif",
'target_pixel_size': (0.002777777777777778, -0.002777777777777778),
'resample_method': 'near',
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\original_rasters\potential_floodmitigation_PotInflGStorage.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\original_rasters\downstream_beneficiaries\stream_attenuated\downstream_bene_floodplain_50000.0_compressed_overviews_md5_6c604be0fc0d87225dd81adeeb4b67a3.tif",
},
'target_nodata': -9999,
'target_raster_path': "realized_floodmitigation_attn_50km.tif",
'target_pixel_size': (0.002777777777777778, -0.002777777777777778),
'resample_method': 'near',
},
#{
# 'expression': 'raster1*raster2',
# 'symbol_to_path_map': {
# 'raster1': r"C:\Users\Becky\Documents\cnc_project\masked_rasters\need_processing\potential_nitrogenretention_nci_nathab_clamped_md5_fff6f944bfaf13baf24129f7fbfb1107.tif",
# 'raster2': r"C:\Users\Becky\Documents\cnc_project\original_rasters\downstream_beneficiaries\downstream_bene_2017_compressed_overviews_md5_32c17fb4ab0eb2b1fe193839dbc7e85b.tif",
# },
# 'target_nodata': -9999,
# 'target_raster_path': "realized_nitrogenretention_0attn.tif",
# 'target_pixel_size': (0.002777777777777778, -0.002777777777777778),
# 'resample_method': 'near',
#},
#{
# 'expression': 'raster1*raster2',
# 'symbol_to_path_map': {
# 'raster1': r"C:\Users\Becky\Documents\cnc_project\masked_rasters\need_processing\potential_sedimentdeposition_nathab_clamped_md5_1d826c8885c6479b6307bc345b95d8bf.tif",
# 'raster2': r"C:\Users\Becky\Documents\cnc_project\original_rasters\downstream_beneficiaries\downstream_bene_2017_compressed_overviews_md5_32c17fb4ab0eb2b1fe193839dbc7e85b.tif",
# },
# 'target_nodata': -9999,
# 'target_raster_path': "realized_sedimentdeposition_0attn.tif",
# 'target_pixel_size': (0.002777777777777778, -0.002777777777777778),
# 'resample_method': 'near',
#},
]
for calculation in calculation_list:
raster_calculations_core.evaluate_calculation(
calculation, TASK_GRAPH, WORKSPACE_DIR)
TASK_GRAPH.join()
TASK_GRAPH.close()
return
calculation_list = [
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\masked_rasters\need_processing\potential_nitrogenretention_nci_nathab_clamped_md5_fff6f944bfaf13baf24129f7fbfb1107.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\original_rasters\downstream_beneficiaries\stream_attenuated\downstream_bene_2017_0.9999_normalized_compressed_overviews_md5_afbbfe893a6fb155aa6fffc54c6e8b69.tif",
},
'target_nodata': -9999,
'target_raster_path': "realized_nitrogenretention_attn_0.9999.tif",
'target_pixel_size': (0.002777777777777778, -0.002777777777777778),
'resample_method': 'near',
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\masked_rasters\need_processing\potential_sedimentdeposition_nathab_clamped_md5_1d826c8885c6479b6307bc345b95d8bf.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\original_rasters\downstream_beneficiaries\stream_attenuated\downstream_bene_2017_0.9999_normalized_compressed_overviews_md5_afbbfe893a6fb155aa6fffc54c6e8b69.tif",
},
'target_nodata': -9999,
'target_raster_path': "realized_sedimentdeposition_attn_0.9999.tif",
'target_pixel_size': (0.002777777777777778, -0.002777777777777778),
'resample_method': 'near',
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\masked_rasters\need_processing\potential_nitrogenretention_nci_nathab_clamped_md5_fff6f944bfaf13baf24129f7fbfb1107.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\original_rasters\downstream_beneficiaries\stream_attenuated\downstream_bene_2017_0.999_compressed_overviews_md5_d15639dbfd5914f44c59642c459b6ced.tif",
},
'target_nodata': -9999,
'target_raster_path': "realized_nitrogenretention_attn_0.999.tif",
'target_pixel_size': (0.002777777777777778, -0.002777777777777778),
'resample_method': 'near',
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\masked_rasters\need_processing\potential_sedimentdeposition_nathab_clamped_md5_1d826c8885c6479b6307bc345b95d8bf.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\original_rasters\downstream_beneficiaries\stream_attenuated\downstream_bene_2017_0.999_compressed_overviews_md5_d15639dbfd5914f44c59642c459b6ced.tif",
},
'target_nodata': -9999,
'target_raster_path': "realized_sedimentdeposition_attn_0.999.tif",
'target_pixel_size': (0.002777777777777778, -0.002777777777777778),
'resample_method': 'near',
},
]
for calculation in calculation_list:
raster_calculations_core.evaluate_calculation(
calculation, TASK_GRAPH, WORKSPACE_DIR)
TASK_GRAPH.join()
TASK_GRAPH.close()
return
masked_service_list = [
{
'expression': 'service*mask',
'symbol_to_path_map': {
'mask': r"C:\Users\Becky\Documents\cnc_project\supporting_layers\EEZ_mask_0027_compressed_md5_0f25e6a690fef616d34c5675b57e76f8.tif",
'service': r"C:\Users\Becky\Documents\cnc_project\original_rasters\realized_coastalprotection_norm_md5_485aef1d6c412bde472bdaa1393100d7.tif",
},
'target_nodata': -9999,
'target_raster_path': "realized_coastalprotection_norm_offshore.tif",
'target_pixel_size': (0.002777777777777778, -0.002777777777777778),
'resample_method': 'near',
},
]
for calculation in masked_service_list:
raster_calculations_core.evaluate_calculation(
calculation, TASK_GRAPH, WORKSPACE_DIR)
TASK_GRAPH.join()
TASK_GRAPH.close()
return
single_expression = {
'expression': '(raster1>0)*raster1*(raster2>0)*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\original_rasters\cnc_cv\global_cv_pop_md5_d7af43a2656b44838f01796f523fb696.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\original_rasters\cnc_cv\global_cv_value_md5_0f6c1b3a2904d7de5c263490814c4a44.tif",
},
'target_nodata': -9999,
'target_pixel_size': (0.0027777777777777778, -0.0027777777777777778),
'resample_method': 'near',
'target_raster_path': "realized_coastalprotection_norm.tif",
}
raster_calculations_core.evaluate_calculation(
single_expression, TASK_GRAPH, WORKSPACE_DIR)
TASK_GRAPH.join()
TASK_GRAPH.close()
return
expression_list = [
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\masked_rasters\need_processing\potential_nitrogenretention_nci_nathab_clamped_md5_0403ac4f961b259a89c013d939c39463.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\original_rasters\downstream_beneficiaries\downstream_bene_2017_normalized_compressed_overviews_md5_0da01aaa9d5d03c652a03b64afde24f8.tif",
},
'target_nodata': -9999,
'target_raster_path': "realized_nitrogenretention_norm.tif",
'target_pixel_size': (0.002777777777777778, -0.002777777777777778),
'resample_method': 'near',
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\masked_rasters\need_processing\potential_sedimentdeposition_nathab_clamped_md5_1d826c8885c6479b6307bc345b95d8bf.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\original_rasters\downstream_beneficiaries\downstream_bene_2017_normalized_compressed_overviews_md5_0da01aaa9d5d03c652a03b64afde24f8.tif",
},
'target_nodata': -9999,
'target_raster_path': "realized_sedimentdeposition_norm.tif",
'target_pixel_size': (0.002777777777777778, -0.002777777777777778),
'resample_method': 'near',
},
]
for calculation in expression_list:
raster_calculations_core.evaluate_calculation(
calculation, TASK_GRAPH, WORKSPACE_DIR)
TASK_GRAPH.join()
TASK_GRAPH.close()
return
single_expression = {
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\original_rasters\downstream_beneficiaries\MarkMulligansLayer\acc_gr_storage_ratio__lt_10_globally.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\original_rasters\downstream_beneficiaries\floodplains\downstream_bene_floodplain_hab_normalized_compressed_overviews_md5_07d02a635bc908fed74d0a6e73152dc6.tif",
},
'target_nodata': -9999,
'target_pixel_size': (0.0027777777777777778, -0.0027777777777777778),
'resample_method': 'near',
'target_raster_path': "realized_floodmitigation_norm.tif",
}
raster_calculations_core.evaluate_calculation(
single_expression, TASK_GRAPH, WORKSPACE_DIR)
TASK_GRAPH.join()
TASK_GRAPH.close()
return
#first make this mask. then upload it to a bucket. then use align to mask and normalize to reproject it in Eckert 2km.
#then use that mask to remask/project all of the layers in align to mask and normalize. do Mark's both ways
single_expression = {
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\supporting_layers\masked_all_nathab_wstreams_esa2015_md5_c291ff6ef7db1d5ff4d95a82e0f035de.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\supporting_layers\landmask_10s_md5_748981cbf6ebf22643a3a3e655ec50ce.tif",
},
'target_nodata': 0,
'target_pixel_size': (0.0027777777777777778, -0.0027777777777777778),
'resample_method': 'near',
'target_raster_path': "masked_all_nathab_wstreams_esa2015_nodata.tif",
}
raster_calculations_core.evaluate_calculation(
single_expression, TASK_GRAPH, WORKSPACE_DIR)
TASK_GRAPH.join()
TASK_GRAPH.close()
return
#99.9th percentiles of the new layers (NCI nitrogen retention and new normalized pollination)
NNth_nit = 2325
NNth_poll = 48
NNth_fl = 31755
clamped_service_list = [ #some services just have crazy high values that throw the whole percentiles off so we're clamping them to the 99th percentile
{
'expression': f'(service>{NNth_fl})*{NNth_fl} + (service<={NNth_fl})*(service>=0)*service + -9999*(service<0)', #sets anything above the 99th percentile value to that value, anything negative to nodata
'symbol_to_path_map': {
'service': r"C:\Users\Becky\Documents\cnc_project\masked_rasters\needed_clamping\realized_floodmitigation_attn_500km_nathab_md5_bc788aea3fd99c82ef38b51693fc2ed5.tif",
},
'target_nodata': -9999,
'target_raster_path': "realized_floodmitigation_attn_500km_nathab.tif",
'target_pixel_size': (0.002777777777778, -0.002777777777778),
},
{
'expression': f'(service>{NNth_nit})*{NNth_nit} + (service<={NNth_nit})*(service>=0)*service + -9999*(service<0)', #sets anything above the 99th percentile value to that value, anything negative to nodata
'symbol_to_path_map': {
'service': r"C:\Users\Becky\Documents\cnc_project\masked_rasters\needed_clamping\potential_nitrogenretention_nci_nathab.tif",
},
'target_nodata': -9999,
'target_raster_path': "potential_nitrogenretention_nci_nathab_clamped.tif",
'target_pixel_size': (0.002777777777778, -0.002777777777778),
},
{
'expression': f'(service>{NNth_poll})*({NNth_poll})+(service<={NNth_poll})*(service>=0)*service + -9999*(service<0)',
'symbol_to_path_map': {
'service': r"C:\Users\Becky\Documents\cnc_project\masked_rasters\needed_clamping\norm_ppl_fed_within_2km_per_pixel_mask_to_hab_compressed_md5_e32a0dd59de79a8dfc0d34dc08c18c41.tif",
},
'target_nodata': -9999,
'target_raster_path': "realized_pollination_norm_nathab_clamped.tif",
'target_pixel_size': (0.002777777777778, -0.002777777777778),
},
]
for calculation in clamped_service_list:
raster_calculations_core.evaluate_calculation(
calculation, TASK_GRAPH, WORKSPACE_DIR)
TASK_GRAPH.join()
TASK_GRAPH.close()
return
single_expression = {
'expression': 'raster2/raster1',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\original_rasters\downstream_beneficiaries\MarkMulligansLayer\acc_gr_storage_ratio__lt_10_globally.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\original_rasters\downstream_beneficiaries\MarkMulligansLayer\RealInflGStoragePop.tif",
},
'target_nodata': -9999,
'default_nan': -9999,
'default_inf': -9999,
'target_pixel_size': (0.0833333333333333, -0.0833333333333333),
'resample_method': 'near',
'target_raster_path': "backcalculated_MM_downstreambenes.tif",
}
raster_calculations_core.evaluate_calculation(
single_expression, TASK_GRAPH, WORKSPACE_DIR)
TASK_GRAPH.join()
TASK_GRAPH.close()
return
single_expression = {
'expression': '(raster1>0)*raster1*(raster2>0)*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\original_rasters\cnc_cv\global_cv_pop_md5_d7af43a2656b44838f01796f523fb696.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\original_rasters\cnc_cv\global_cv_value_md5_0f6c1b3a2904d7de5c263490814c4a44.tif",
},
'target_nodata': -9999,
'target_pixel_size': (0.0027777777777777778, -0.0027777777777777778),
'resample_method': 'near',
'target_raster_path': "realized_coastalprotection_norm.tif",
}
raster_calculations_core.evaluate_calculation(
single_expression, TASK_GRAPH, WORKSPACE_DIR)
TASK_GRAPH.join()
TASK_GRAPH.close()
return
masked_service_list = [
{
'expression': '(raster1>=0)*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\original_rasters\downstream_beneficiaries\floodplains\global_floodplains_mask.tif",
'raster2': r"C:\Users\Becky\Documents\lspop2017_md5_eafa6a4724f3d3a6675687114d4de6ba.tif",
},
'target_nodata': -2147483647,
'target_raster_path': "floodplains_masked_pop_30s.tif",
'target_pixel_size': (0.008333333333333333, -0.008333333333333333),
'resample_method': 'near',
},
]
for calculation in masked_service_list:
raster_calculations_core.evaluate_calculation(
calculation, TASK_GRAPH, WORKSPACE_DIR)
TASK_GRAPH.join()
TASK_GRAPH.close()
return
single_expression = {
'expression': '(raster1>0)*raster1*raster2 + (raster1<=0)*-1',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\supporting_layers\masked_all_nathab_esa2015_md5_50debbf5fba6dbdaabfccbc39a9b1670.tif",
'raster2':r"C:\Users\Becky\Documents\cnc_project\original_rasters\potential_nitrogenretention_nci_md5_09425dff042ea8dbb94a8d1977be472a.tif",
},
'target_nodata': -1,
'target_pixel_size': (0.0027777777777777778, -0.0027777777777777778),
'resample_method': 'near',
'target_raster_path': "potential_nitrogenretention_nci_nathab.tif",
}
raster_calculations_core.evaluate_calculation(
single_expression, TASK_GRAPH, WORKSPACE_DIR)
TASK_GRAPH.join()
TASK_GRAPH.close()
return
single_expression = {
'expression': 'raster1-raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\nci\ndr\compressed_baseline_currentpractices_300.0_D8_modified_load_md5_a836509e72dacd536764249ea7beb4d7.tif",
'raster2':r"C:\Users\Becky\Documents\nci\ndr\compressed_baseline_currentpractices_300.0_D8_export_md5_eb9855f076fdc8d45a42ca45b5c23219.tif",
},
'target_nodata': -1,
'target_pixel_size': (0.0027777777777777778, -0.0027777777777777778),
'resample_method': 'near',
'target_raster_path': "potential_nitrogenretention_nci.tif",
}
raster_calculations_core.evaluate_calculation(
single_expression, TASK_GRAPH, WORKSPACE_DIR)
TASK_GRAPH.join()
TASK_GRAPH.close()
return
single_expression = {
'expression': '(raster1/raster2)*raster3*(raster2>0)',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\original_rasters\downstream_beneficiaries\downstream_bene_2017_compressed_overviews_md5_a2d9f969617c728311b4f3d33bc5f1f8.tif",
'raster2':r"C:\Users\Becky\Documents\cnc_project\original_rasters\downstream_beneficiaries\global_stitch_dsbsum_md5_55441129edcc27880861bf448309481a.tif",
'raster3':r"C:\Users\Becky\Documents\cnc_project\original_rasters\downstream_beneficiaries\global_stitch_lspopsum_md5_a2db49316a2d47840a9a8f17657fff3b.tif",
},
'target_nodata': -9999,
'default_nan': -9999,
'target_pixel_size': (0.005555555555, -0.005555555555),
'resample_method': 'near',
'target_raster_path': "downstream_bene_2017_norm.tif",
}
raster_calculations_core.evaluate_calculation(
single_expression, TASK_GRAPH, WORKSPACE_DIR)
TASK_GRAPH.join()
TASK_GRAPH.close()
return
single_expression = {
'expression': '(raster1>=8)*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\supporting_layers\Urban-Rural Catchment Areas_md5_942ffae026b526a1044680e28ef58b89.tif",
'raster2':r"C:\Users\Becky\Documents\lspop2017_md5_eafa6a4724f3d3a6675687114d4de6ba.tif",
},
'target_nodata': -1,
'target_pixel_size': (0.0083333333, 0.0083333333),
'resample_method': 'near',
'target_raster_path': "lspop_2017_URCA_rural.tif",
}
raster_calculations_core.evaluate_calculation(
single_expression, TASK_GRAPH, WORKSPACE_DIR)
TASK_GRAPH.join()
TASK_GRAPH.close()
return
single_expression = {
'expression': '(raster1<8)*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\supporting_layers\Urban-Rural Catchment Areas_md5_942ffae026b526a1044680e28ef58b89.tif",
'raster2':r"C:\Users\Becky\Documents\lspop2017_md5_eafa6a4724f3d3a6675687114d4de6ba.tif",
},
'target_nodata': -1,
'target_pixel_size': (0.0083333333, 0.0083333333),
'resample_method': 'near',
'target_raster_path': "lspop_2017_URCA_urban.tif",
}
raster_calculations_core.evaluate_calculation(
single_expression, TASK_GRAPH, WORKSPACE_DIR)
TASK_GRAPH.join()
TASK_GRAPH.close()
return
#unfortunately the VuC is nodata in a bunch of places where we have CNA. So need to go back and nodata_replace copy b to where a is nodata:
# python nodata_replace.py [raster_a_path] [raster_b_path] [target_path]
# python nodata_replace.py "C:\Users\Becky\Documents\cnc_project\overlap\ctr90_outside_VuC_2km.tif" "C:\Users\Becky\Documents\cnc_project\optimization\ctr90_2km_VuCextent.tif" "full_cntr90_outside_VuC_2km.tif"
# so actually we need to go back and fill all the nodata in the Vulnerable Carbon layer with 0's instead of nodata
# python nodata_replace.py "C:\Users\Becky\Documents\cnc_project\supporting_layers\carbon\VuC_top90_2km.tif" "C:\Users\Becky\Documents\cnc_project\supporting_layers\carbon\landmask_0s_2km_VuCextent.tif" "VuC_top90_2km_0s.tif"
# python nodata_replace.py "C:\Users\Becky\Documents\cnc_project\overlap\moisture_top90_2km_ext.tif" "C:\Users\Becky\Documents\cnc_project\supporting_layers\landmask_0s_2km_moisturextent.tif" "moisture_top90_2km_0s.tif"
single_expression = {
'expression': '(raster1>0)*(raster2<1)',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\supporting_layers\masked_all_nathab_esa2015_md5_50debbf5fba6dbdaabfccbc39a9b1670.tif",
'raster2':r"C:\Users\Becky\Documents\cnc_project\supporting_layers\landmask_0s_2km.tif",
},
'target_nodata': -1,
'target_pixel_size': (0.021319, 0.021319),
'resample_method': 'near',
'target_raster_path': "natural_assets_wostreams_300m_to_2km_0s.tif",
}
raster_calculations_core.evaluate_calculation(
single_expression, TASK_GRAPH, WORKSPACE_DIR)
TASK_GRAPH.join()
TASK_GRAPH.close()
return
wgs84_srs = osr.SpatialReference()
wgs84_srs.ImportFromEPSG(4326)
single_expression = {
'expression': '(raster1>0)*(raster2<1)',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\prioritiz-2km-country\cntr_2km_carb_wgs.tif",
'raster2':r"C:\Users\Becky\Documents\cnc_project\supporting_layers\landmask_0s_2km.tif",
},
'target_nodata': -1,
'target_projection_wkt': wgs84_srs.ExportToWkt(),
'target_pixel_size': (0.021319, 0.021319),
'resample_method': 'near',
'target_raster_path': "natural_assets_full_2km_0s.tif",
}
raster_calculations_core.evaluate_calculation(
single_expression, TASK_GRAPH, WORKSPACE_DIR)
TASK_GRAPH.join()
TASK_GRAPH.close()
return
single_expression = {
'expression': '(raster1>0)*(raster2<1)',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\supporting_layers\masked_all_nathab_wstreams_esa2015_md5_c291ff6ef7db1d5ff4d95a82e0f035de.tif",
'raster2':r"C:\Users\Becky\Documents\cnc_project\supporting_layers\landmask_0s_2km.tif",
},
'target_nodata': -1,
'target_pixel_size': (0.021319, 0.021319),
'resample_method': 'near',
'target_raster_path': "natural_assets_300m_to_2km_0s.tif",
}
raster_calculations_core.evaluate_calculation(
single_expression, TASK_GRAPH, WORKSPACE_DIR)
TASK_GRAPH.join()
TASK_GRAPH.close()
return
single_expression = {
'expression': '(raster1==1)',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\overlap\nonoverlapping_ctr90_moisture_VuC_2km_0s.tif",
},
'target_nodata': -9999,
'target_raster_path': "ctr90_outside_VuC_moisture_2km_0s.tif",
}
raster_calculations_core.evaluate_calculation(
single_expression, TASK_GRAPH, WORKSPACE_DIR)
TASK_GRAPH.join()
TASK_GRAPH.close()
return
single_expression = {
'expression': '(raster1<1)*(raster4>2)',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\overlap\ctr90_VuCtop90_2km_0s.tif",
'raster4': r"C:\Users\Becky\Documents\cnc_project\optimization\prioritiz-2km-country\cntr_2km_nocarb.tif",
},
'target_nodata': -9999,
'target_pixel_size': (0.021319, 0.021319),
'resample_method': 'near',
'target_raster_path': "ctr90_outside_VuC_2km_0s.tif",
}
raster_calculations_core.evaluate_calculation(
single_expression, TASK_GRAPH, WORKSPACE_DIR)
TASK_GRAPH.join()
TASK_GRAPH.close()
return
single_expression = {
'expression': '(raster1<1)*(raster2<1)*(raster3<1)*(raster4>2) + 2*(raster1<1)*(raster2<1)*(raster3<1)*raster5 + 3*(raster1<1)*(raster2<1)*(raster3<1)*raster6 ',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\overlap\ctr90_VuCtop90_2km_0s.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\overlap\ctr90_moisturetop90_2km_0s.tif",
'raster3': r"C:\Users\Becky\Documents\cnc_project\overlap\ctr90_moisture_VuC_2km_0s.tif",
'raster4': r"C:\Users\Becky\Documents\cnc_project\optimization\prioritiz-2km-country\cntr_2km_nocarb.tif",
'raster5': r"C:\Users\Becky\Documents\cnc_project\overlap\moisture_top90_2km_0s.tif",
'raster6': r"C:\Users\Becky\Documents\cnc_project\supporting_layers\carbon\VuC_top90_2km_0s.tif",
},
'target_nodata': -9999,
'target_pixel_size': (0.021319, 0.021319),
'resample_method': 'near',
'target_raster_path': "nonoverlapping_ctr90_moisture_VuC_2km_0s.tif",
}
raster_calculations_core.evaluate_calculation(
single_expression, TASK_GRAPH, WORKSPACE_DIR)
TASK_GRAPH.join()
TASK_GRAPH.close()
return
single_expression = {
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\overlap\ctr90_VuCtop90_2km_0s.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\overlap\ctr90_moisturetop90_2km_0s.tif",
},
'target_nodata': -9999,
'target_pixel_size': (0.021319, 0.021319),
'resample_method': 'near',
'target_raster_path': "ctr90_moisture_VuC_2km_0s.tif",
}
raster_calculations_core.evaluate_calculation(
single_expression, TASK_GRAPH, WORKSPACE_DIR)
TASK_GRAPH.join()
TASK_GRAPH.close()
return
single_expression = {
'expression': '(raster1>2)*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\prioritiz-2km-country\cntr_2km_nocarb.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\overlap\moisture_top90_2km_0s.tif",
},
'target_nodata': -9999,
'target_pixel_size': (0.021319, 0.021319),
'resample_method': 'near',
'target_raster_path': "ctr90_moisturetop90_2km_0s.tif",
}
raster_calculations_core.evaluate_calculation(
single_expression, TASK_GRAPH, WORKSPACE_DIR)
TASK_GRAPH.join()
TASK_GRAPH.close()
return
single_expression = {
'expression': '(raster1>2)*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\prioritiz-2km-country\cntr_2km_nocarb.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\supporting_layers\carbon\VuC_top90_2km_0s.tif",
},
'target_nodata': 66535,
'target_pixel_size': (0.021319, 0.021319),
'resample_method': 'near',
'target_raster_path': "ctr90_VuCtop90_2km_0s.tif",
}
raster_calculations_core.evaluate_calculation(
single_expression, TASK_GRAPH, WORKSPACE_DIR)
TASK_GRAPH.join()
TASK_GRAPH.close()
return
single_expression = {
'expression': '(raster1<2)',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\overlap\nonoverlapping_ctr90_moisture_VuC_2km.tif",
},
'target_nodata': -9999,
'target_raster_path': "ctr90_2km.tif",
}
raster_calculations_core.evaluate_calculation(
single_expression, TASK_GRAPH, WORKSPACE_DIR)
TASK_GRAPH.join()
TASK_GRAPH.close()
return
single_expression = {
'expression': '(raster1>2)*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\prioritiz-2km-country\cntr_2km_nocarb.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\overlap\moisture_top90.tif",
},
'target_nodata': -9999,
'target_pixel_size': (0.021319, 0.021319),
'resample_method': 'near',
'target_raster_path': "ctr90_moisturetop90_2km.tif",
}
raster_calculations_core.evaluate_calculation(
single_expression, TASK_GRAPH, WORKSPACE_DIR)
TASK_GRAPH.join()
TASK_GRAPH.close()
return
single_expression = {
'expression': '(raster1>2.61)',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\resampled_30s\realized_moisturerecycling_nathab30s.tif",
},
'target_nodata': -9999,
'target_raster_path': "moisture_top90.tif",
}
raster_calculations_core.evaluate_calculation(
single_expression, TASK_GRAPH, WORKSPACE_DIR)
TASK_GRAPH.join()
TASK_GRAPH.close()
return
single_expression = {
'expression': '(raster2>=0)*raster1',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\raster_calculations\VuC_top90.tif",
'raster2': r"C:\Users\Becky\Documents\raster_calculations\ctr90_VuCtop90_2km.tif",
},
'target_nodata': 66535,
'target_pixel_size': (0.021319, 0.021319),
'resample_method': 'near',
'target_raster_path': "VuC_top90_2km.tif",
}
raster_calculations_core.evaluate_calculation(
single_expression, TASK_GRAPH, WORKSPACE_DIR)
TASK_GRAPH.join()
TASK_GRAPH.close()
return
single_expression = {
'expression': '(raster1>2)*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\prioritiz-2km-country\cntr_2km_nocarb.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\supporting_layers\carbon\VuC_top90.tif",
},
'target_nodata': 66535,
'target_pixel_size': (0.021319, 0.021319),
'resample_method': 'near',
'target_raster_path': "ctr90_VuCtop90_2km.tif",
}
raster_calculations_core.evaluate_calculation(
single_expression, TASK_GRAPH, WORKSPACE_DIR)
TASK_GRAPH.join()
TASK_GRAPH.close()
return
single_expression = {
'expression': '(raster1>37)',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\supporting_layers\carbon\Vulnerable_C_Total_2018.tif",
},
'target_nodata': 65535,
'target_raster_path': "VuC_top90.tif",
}
raster_calculations_core.evaluate_calculation(
single_expression, TASK_GRAPH, WORKSPACE_DIR)
TASK_GRAPH.join()
TASK_GRAPH.close()
return
single_expression = {
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\ctr90_moisture61_2km.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\ctr90_C65_2km.tif",
},
'target_nodata': -9999,
'target_pixel_size': (0.021319, 0.021319),
'resample_method': 'near',
'target_raster_path': "ctr90_moisture61_C65_2km.tif",
}
raster_calculations_core.evaluate_calculation(
single_expression, TASK_GRAPH, WORKSPACE_DIR)
TASK_GRAPH.join()
TASK_GRAPH.close()
return
single_expression = {
'expression': '(raster1>2)*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\prioritiz-2km-country\cntr_2km_nocarb.tif",
'raster2': r"C:\Users\Becky\Documents\raster_calculations\moisture_top39.tif",
},
'target_nodata': -9999,
'target_pixel_size': (0.021319, 0.021319),
'resample_method': 'near',
'target_raster_path': "ctr90_moisture61_2km.tif",
}
raster_calculations_core.evaluate_calculation(
single_expression, TASK_GRAPH, WORKSPACE_DIR)
TASK_GRAPH.join()
TASK_GRAPH.close()
return
single_expression = {
'expression': '(raster1>4.55)',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\resampled_30s\realized_moisturerecycling_nathab30s.tif",
},
'target_nodata': -9999,
'target_raster_path': "moisture_top39.tif",
}
raster_calculations_core.evaluate_calculation(
single_expression, TASK_GRAPH, WORKSPACE_DIR)
TASK_GRAPH.join()
TASK_GRAPH.close()
return
raster_calculation_list = [
{
'expression': 'raster1>0.9',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\output_CBD\food_water_average_raster.tif",
},
'target_nodata': -9999,
'target_raster_path': "food_water_mask.tif",
},
{
'expression': 'raster1>0.9',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\output_CBD\food_hazards_average_raster.tif",
},
'target_nodata': -9999,
'target_raster_path': "food_hazards_mask.tif",
},
{
'expression': 'raster1>0.9',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\output_CBD\hazards_water_average_raster.tif",
},
'target_nodata': -9999,
'target_raster_path': "hazards_water_mask.tif",
},
{
'expression': 'raster1>0.9',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\output_CBD\food_water_hazards_average_raster.tif",
},
'target_nodata': -9999,
'target_raster_path': "food_water_hazards_mask.tif",
},
]
for calculation in raster_calculation_list:
raster_calculations_core.evaluate_calculation(
calculation, TASK_GRAPH, WORKSPACE_DIR)
TASK_GRAPH.join()
TASK_GRAPH.close()
return
wgs84_srs = osr.SpatialReference()
wgs84_srs.ImportFromEPSG(4326)
single_expression = {
'expression': '(raster2>-9999)*raster1',
'symbol_to_path_map': {
'raster1': r"solution_111_tar_80_res_2km_carbon_0.tif",
'raster2': r"realized_e_source_abs_ann_mean.tif"
},
'target_nodata': -9999,
'default_nan': -9999,
'target_projection_wkt': wgs84_srs.ExportToWkt(),
'target_pixel_size': (1.495833333333333348,1.5092592592592593),
'resample_method': 'average',
'target_raster_path': "top80_solution_1.5d_avg.tif",
}
raster_calculations_core.evaluate_calculation(
single_expression, TASK_GRAPH, WORKSPACE_DIR)
TASK_GRAPH.join()
TASK_GRAPH.close()
return
wgs84_srs = osr.SpatialReference()
wgs84_srs.ImportFromEPSG(4326)
single_expression = {
'expression': '(raster2>-9999)*raster1',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Dropbox\NatCap\projects\CI-CNC\Final figs\Fig1_green_blue\cntr_2km_nocarb_land2.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\original_rasters\realized_e_source_abs_ann_mean.tif"
},
'target_nodata': -9999,
'default_nan': -9999,
'target_sr_wkt': wgs84_srs.ExportToWkt(),
'target_pixel_size': (1.495833333333333348,1.5092592592592593),
'resample_method': 'mode',
'target_raster_path': "cntr_2km_nocarb_land_resampled15_mode.tif",
}
raster_calculations_core.evaluate_calculation(
single_expression, TASK_GRAPH, WORKSPACE_DIR)
TASK_GRAPH.join()
TASK_GRAPH.close()
return
single_expression = {
'expression': '(raster1>2)*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\prioritiz-2km-country\cntr_2km_nocarb.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\Total_C_v10_2km_optimization_output_2020_08_18\optimal_mask_0.65.tif"
},
'target_nodata': -9999,
'target_pixel_size': (0.021319, 0.021319),
'resample_method': 'average',
'target_raster_path': "ctr90_C65_2km.tif",
}
raster_calculations_core.evaluate_calculation(
single_expression, TASK_GRAPH, WORKSPACE_DIR)
TASK_GRAPH.join()
TASK_GRAPH.close()
return
wgs84_srs = osr.SpatialReference()
wgs84_srs.ImportFromEPSG(4326)
single_expression = {
'expression': '(raster2>-9999)*raster1',
'symbol_to_path_map': {
#'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\output_2km_masks\solution_111_tar_90_res_2km_carbon_0.tif",
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\output_2km_masks\solution_222_tar_90_res_2km.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\original_rasters\realized_e_source_abs_ann_mean.tif"
},
'target_nodata': -9999,
'default_nan': -9999,
'target_sr_wkt': wgs84_srs.ExportToWkt(),
'target_pixel_size': (1.495833333333333348,1.5092592592592593),
'resample_method': 'mode',
#'target_raster_path': "solution_111_tar_90_res_2km_carbon_0_resampled15_mode.tif",
'target_raster_path': "solution_222_tar_90_res_2km_resampled15_mode.tif",
}
raster_calculations_core.evaluate_calculation(
single_expression, TASK_GRAPH, WORKSPACE_DIR)
TASK_GRAPH.join()
TASK_GRAPH.close()
return
single_expression = {
'expression': '(raster1>2)*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\prioritiz-2km-country\cntr_2km_nocarb.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\Total_C_v10_2km_optimization_output_2020_08_18\optimal_mask_0.65.tif"
},
'target_nodata': -9999,
'target_pixel_size': (0.021319, 0.021319),
'resample_method': 'average',
'target_raster_path': "ctr90_C65_2km.tif",
}
raster_calculations_core.evaluate_calculation(
single_expression, TASK_GRAPH, WORKSPACE_DIR)
TASK_GRAPH.join()
TASK_GRAPH.close()
return
single_expression = {
'expression': '(raster1>=0)*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\prioritiz-2km-country\cntr_2km_nocarb.tif",
'raster2': r"C:\Users\Becky\Documents\raster_calculations\Total_C_v10_300m.tif"
},
'target_nodata': -9999,
'target_pixel_size': (0.021319, 0.021319),
'resample_method': 'average',
'target_raster_path': "Total_C_v10_2km.tif",
}
raster_calculations_core.evaluate_calculation(
single_expression, TASK_GRAPH, WORKSPACE_DIR)
TASK_GRAPH.join()
TASK_GRAPH.close()
return
NNth_poll = 38
Max_lang = 43 #original map did not exceed 43 languages per degree grid cell; higher than that must be an error
LO = 0.001 # not contributing much below this point!
LOth_ffish = 0.001 # Min values are regression artifacts. Should be cut off at 10-1 tons per grid cell (~100 sq km). That’s 1 kg per sq km
NNth_ffish = 30 # Max cut-off should be 3000 tons per grid cell. That’s 30 tons per sq km. (In between the 99 and 99.9th percentiles once small values are excluded)
#Max_mfish = 400 #this one's different because even though it's higher than the 99th percentile, there are some realistic values of up to 346 kg /km2
#NOTE: Rachel subsequently asked <NAME> about this and he said it should NOT be clamped - if anything his upper values (of a few thousand) are underestimates
LOth_MM = 0.001
clamped_service_list = [ #some services just have crazy high values that throw the whole percentiles off so we're clamping them to the 99th percentile
{
'expression': f'(service>{NNth_poll})*({NNth_poll})+(service<={NNth_poll})*(service>={LO})*service + 0*(service<{LO})',
'symbol_to_path_map': {
'service': r"C:\Users\Becky\Documents\cnc_project\resampled_30s\realized_pollination_nathab30s.tif",
},
'target_nodata': -9999,
'target_raster_path': "realized_pollination_nathab30s_clamped.tif",
},
{
'expression': f'(service>{Max_lang})*(128) + (service<={Max_lang})*service',
'symbol_to_path_map': {
'service': r"C:\Users\Becky\Documents\cnc_project\resampled_30s\realized_cultural_language_nathab30s.tif",
},
'target_nodata': 128,
'target_raster_path': "realized_cultural_language_nathab30s_clamped.tif",
},
{
'expression': f'(service>{NNth_ffish})*{NNth_ffish} + (service<={NNth_ffish})*(service>={LOth_ffish})*service + 0*(service<{LOth_ffish})',
'symbol_to_path_map': {
'service': r"C:\Users\Becky\Documents\cnc_project\resampled_30s\realized_fwfish_nathab30s.tif",
},
'target_nodata': -9999,
'target_raster_path': "realized_fwfish_per_km2_30s_clamped.tif",
},
# {
# 'expression': f'(service>{Max_mfish})*({Max_mfish})+(service<={Max_mfish})*(service>={LO})*service+ 0*(service<{LO})',
# 'symbol_to_path_map': {
# 'service': r"C:\Users\Becky\Documents\cnc_project\resampled_30s\realized_marinefish_watson_2010_2014_30s.tif",
# },
# 'target_nodata': -9999,
# 'target_raster_path': "realized_marinefish_watson_2010_2014_30s_clamped.tif",
# },
{
'expression': f'(service>{LOth_MM})*service + 0*(service<={LOth_MM})',
'symbol_to_path_map': {
'service': r"C:\Users\Becky\Documents\cnc_project\resampled_30s\realized_commercialtimber_forest30s.tif",
},
'target_nodata': -9999,
'target_raster_path': "realized_commercialtimber_forest30s_clamped.tif",
},
{
'expression': f'(service>{LOth_MM})*service + 0*(service<={LOth_MM})',
'symbol_to_path_map': {
'service': r"C:\Users\Becky\Documents\cnc_project\resampled_30s\realized_domestictimber_forest30s.tif",
},
'target_nodata': -9999,
'target_raster_path': "realized_domestictimber_forest30s_clamped.tif",
},
{
'expression': f'(service>{LOth_MM})*service + 0*(service<={LOth_MM})',
'symbol_to_path_map': {
'service': r"C:\Users\Becky\Documents\cnc_project\resampled_30s\realized_flood_nathab30s.tif",
},
'target_nodata': -9999,
'target_raster_path': "realized_flood_nathab30s_clamped.tif",
},
{
'expression': f'(service>{LOth_MM})*service + 0*(service<={LOth_MM})',
'symbol_to_path_map': {
'service': r"C:\Users\Becky\Documents\cnc_project\resampled_30s\realized_fuelwood_forestshrub30s.tif",
},
'target_nodata': -9999,
'target_raster_path': "realized_fuelwood_forest30s_clamped.tif",
},
{
'expression': f'(service>{LOth_MM})*service + 0*(service<={LOth_MM})',
'symbol_to_path_map': {
'service': r"C:\Users\Becky\Documents\cnc_project\resampled_30s\realized_grazing_natnotforest30s.tif",
},
'target_nodata': -9999,
'target_raster_path': "realized_grazing_natnotforest30s_clamped.tif",
},
{
'expression': f'(service>{LO})*service + 0*(service<={LO})',
'symbol_to_path_map': {
'service': r"C:\Users\Becky\Documents\cnc_project\resampled_30s\realized_natureaccess10_nathab30s.tif",
},
'target_nodata': -9999,
'target_raster_path': "realized_natureaccess10_nathab30s_clamped.tif",
},
{
'expression': f'(service>{LO})*service + 0*(service<={LO})',
'symbol_to_path_map': {
'service': r"C:\Users\Becky\Documents\cnc_project\resampled_30s\realized_natureaccess100_nathab30s.tif",
},
'target_nodata': -9999,
'target_raster_path': "realized_natureaccess100_nathab30s_clamped.tif",
},
]
for calculation in clamped_service_list:
raster_calculations_core.evaluate_calculation(
calculation, TASK_GRAPH, WORKSPACE_DIR)
TASK_GRAPH.join()
TASK_GRAPH.close()
return
masked_service_list = [
{
'expression': 'service*mask + 128*(1-mask)', #this sets all values not in the mask to nodata (in this case, 128)
'symbol_to_path_map': {
'mask': r"C:\Users\Becky\Documents\raster_calculations\masked_all_nathab_esa2015_30s.tif",
'service': r"C:\Users\Becky\Documents\cnc_project\original_rasters\half_degree_grid_langa_19_dslv_density.tif",
},
'target_nodata': 128,
'target_raster_path': "realized_cultural_language_nathab30s.tif",
'target_pixel_size': (0.008333333333333333218, -0.008333333333333333218),
'resample_method': 'average',
},
{
'expression': 'service*mask + -9999*(1-mask)',
'symbol_to_path_map': {
'mask': r"C:\Users\Becky\Documents\raster_calculations\masked_all_nathab_esa2015_30s.tif",
'service': r"C:\Users\Becky\Documents\cnc_project\masked_rasters\realized_sedimentdeposition_nathab_clamped_md5_30d4d6ac5ff4bca4b91a3a462ce05bfe.tif",
},
'target_nodata': -9999,
'target_raster_path': "realized_sedimentdeposition_nathab30s.tif",
'target_pixel_size': (0.008333333333333333218, -0.008333333333333333218),
'resample_method': 'average',
},
{
'expression': 'service*mask + -9999*(1-mask)',
'symbol_to_path_map': {
'mask': r"C:\Users\Becky\Documents\raster_calculations\masked_all_nathab_esa2015_30s.tif",
'service': r"C:\Users\Becky\Documents\cnc_project\original_rasters\realized_pollination_md5_443522f6688011fd561297e9a556629b.tif"
},
'target_nodata': -9999,
'target_raster_path': "realized_pollination_nathab30s.tif",
'target_pixel_size': (0.008333333333333333218, -0.008333333333333333218),
'resample_method': 'average',
},
{
'expression': 'service*mask + -9999*(1-mask)',
'symbol_to_path_map': {
'mask': r"C:\Users\Becky\Documents\raster_calculations\masked_all_nathab_esa2015_30s.tif",
'service': r"C:\Users\Becky\Documents\cnc_project\original_rasters\realized_nitrogenretention_downstream3s_10s.tif",
},
'target_nodata': -9999,
'target_raster_path': "realized_nitrogenretention_nathab30s.tif",
'target_pixel_size': (0.008333333333333333218, -0.008333333333333333218),
'resample_method': 'average',
},
{
'expression': '((service<0)*(-9999)+(service>=0)*service)*mask + -9999*(1-mask)', #this both sets all negative values to nodata AND sets anything outside the mask to nodata
'symbol_to_path_map': {
'mask': r"C:\Users\Becky\Documents\raster_calculations\masked_all_nathab_esa2015_30s.tif",
'service': r"C:\Users\Becky\Documents\cnc_project\original_rasters\realized_e_source_ratio_ann_mean.tif",
},
'target_nodata': -9999,
'default_nan': -9999, # this is necessary because there are apparently nans in this list!
'target_raster_path': "realized_moisturerecycling_nathab30s.tif",
'target_pixel_size': (0.008333333333333333218, -0.008333333333333333218),
'resample_method': 'average',
},
{
'expression': 'service*mask + -9999*(1-mask)',
'symbol_to_path_map': {
'mask': r"C:\Users\Becky\Documents\raster_calculations\masked_all_nathab_esa2015_30s.tif",
'service': r"C:\Users\Becky\Documents\cnc_project\original_rasters\RealInflGStoragePop.tif",
},
'target_nodata': -9999,
'target_raster_path': "realized_flood_nathab30s.tif",
'target_pixel_size': (0.008333333333333333218, -0.008333333333333333218),
'resample_method': 'average',
},
]
for calculation in masked_service_list:
raster_calculations_core.evaluate_calculation(
calculation, TASK_GRAPH, WORKSPACE_DIR)
TASK_GRAPH.join()
TASK_GRAPH.close()
return
masked_service_list = [
{
'expression': 'service*mask + 128*(1-mask)', #this sets all values not in the mask to nodata (in this case, 128)
'symbol_to_path_map': {
'mask': r"C:\Users\Becky\Documents\raster_calculations\masked_all_nathab_wstreams_esa2015_30s.tif",
'service': r"C:\Users\Becky\Documents\cnc_project\original_rasters\half_degree_grid_langa_19_dslv_density.tif",
},
'target_nodata': 128,
'target_raster_path': "realized_cultural_language_nathab30s.tif",
'target_pixel_size': (0.008333333333333333218, -0.008333333333333333218),
'resample_method': 'average',
},
{
'expression': '((service<0)*(-9999)+(service>=0)*service)*mask + -9999*(1-mask)', #this both sets all negative values to nodata AND sets anything outside the mask to nodata
'symbol_to_path_map': {
'mask': r"C:\Users\Becky\Documents\raster_calculations\masked_all_nathab_wstreams_esa2015_30s.tif",
'service': r"C:\Users\Becky\Documents\cnc_project\original_rasters\realized_e_source_ratio_ann_mean.tif",
},
'target_nodata': -9999,
'default_nan': -9999, # this is necessary because there are apparently nans in this list!
'target_raster_path': "realized_moisturerecycling_nathab30s.tif",
'target_pixel_size': (0.008333333333333333218, -0.008333333333333333218),
'resample_method': 'average',
},
{
'expression': 'service*mask + -9999*(1-mask)',
'symbol_to_path_map': {
'mask': r"C:\Users\Becky\Documents\raster_calculations\masked_all_nathab_wstreams_esa2015_30s.tif",
'service': r"C:\Users\Becky\Documents\cnc_project\original_rasters\RealInflGStoragePop.tif",
},
'target_nodata': -9999,
'target_raster_path': "realized_flood_nathab30s.tif",
'target_pixel_size': (0.008333333333333333218, -0.008333333333333333218),
'resample_method': 'average',
},
{
'expression': 'service*mask + -9999*(1-mask)',
'symbol_to_path_map': {
'mask': r"C:\Users\Becky\Documents\raster_calculations\masked_nathab_forest_esa2015_30s.tif",
'service': r"C:\Users\Becky\Documents\cnc_project\original_rasters\realised_commercial_timber_value.tif",
},
'target_nodata': -9999,
'target_raster_path': "realized_commercialtimber_forest30s.tif",
'target_pixel_size': (0.008333333333333333218, -0.008333333333333333218),
'resample_method': 'average',
},
{
'expression': 'service*mask + -9999*(1-mask)',
'symbol_to_path_map': {
'mask': r"C:\Users\Becky\Documents\raster_calculations\masked_nathab_forest_esa2015_30s.tif",
'service': r"C:\Users\Becky\Documents\cnc_project\original_rasters\realised_domestic_timber_value.tif",
},
'target_nodata': -9999,
'target_raster_path': "realized_domestictimber_forest30s.tif",
'target_pixel_size': (0.008333333333333333218, -0.008333333333333333218),
'resample_method': 'average',
},
{
'expression': 'service*mask + -9999*(1-mask)',
'symbol_to_path_map': {
'mask': r"C:\Users\Becky\Documents\raster_calculations\masked_nathab_forestshrub_esa2015_30s.tif",
'service': r"C:\Users\Becky\Documents\cnc_project\original_rasters\realized_fuelwood.tif",
},
'target_nodata': -9999,
'target_raster_path': "realized_fuelwood_forestshrub30s.tif",
'target_pixel_size': (0.008333333333333333218, -0.008333333333333333218),
'resample_method': 'average',
},
{
'expression': 'service*mask + -9999*(1-mask)',
'symbol_to_path_map': {
'mask': r"C:\Users\Becky\Documents\raster_calculations\masked_nathab_notforest_esa2015_30s.tif",
'service': r"C:\Users\Becky\Documents\cnc_project\original_rasters\realized_grazing_md5_19085729ae358e0e8566676c5c7aae72.tif",
},
'target_nodata': -9999,
'target_raster_path': "realized_grazing_natnotforest30s.tif",
'target_pixel_size': (0.008333333333333333218, -0.008333333333333333218),
'resample_method': 'average',
},
{
'expression': 'service*mask + -9999*(1-mask)',
'symbol_to_path_map': {
'mask': r"C:\Users\Becky\Documents\raster_calculations\masked_all_nathab_wstreams_esa2015_30s.tif",
'service': r"C:\Users\Becky\Documents\cnc_project\original_rasters\total_pop_near_nature_10.tif",
},
'target_nodata': -9999,
'target_raster_path': "realized_natureaccess10_nathab30s.tif",
'target_pixel_size': (0.008333333333333333218, -0.008333333333333333218),
'resample_method': 'average',
},
{
'expression': 'service*mask + -9999*(1-mask)',
'symbol_to_path_map': {
'mask': r"C:\Users\Becky\Documents\raster_calculations\masked_all_nathab_wstreams_esa2015_30s.tif",
'service': r"C:\Users\Becky\Documents\cnc_project\original_rasters\total_pop_near_nature_100.tif",
},
'target_nodata': -9999,
'target_raster_path': "realized_natureaccess100_nathab30s.tif",
'target_pixel_size': (0.008333333333333333218, -0.008333333333333333218),
'resample_method': 'average',
},
{
'expression': 'service*mask + -9999*(1-mask)',
'symbol_to_path_map': {
'mask': r"C:\Users\Becky\Documents\raster_calculations\masked_all_nathab_wstreams_esa2015_30s.tif",
'service': r"C:\Users\Becky\Documents\cnc_project\original_rasters\realized_pollination_md5_443522f6688011fd561297e9a556629b.tif"
},
'target_nodata': -9999,
'target_raster_path': "realized_pollination_nathab30s.tif",
'target_pixel_size': (0.008333333333333333218, -0.008333333333333333218),
'resample_method': 'average',
},
{
'expression': 'service*mask + -9999*(1-mask)',
'symbol_to_path_map': {
'mask': r"C:\Users\Becky\Documents\raster_calculations\masked_all_nathab_wstreams_esa2015_30s.tif",
'service': r"C:\Users\Becky\Documents\cnc_project\original_rasters\realized_nitrogenretention_downstream3s_10s.tif",
},
'target_nodata': -9999,
'target_raster_path': "realized_nitrogenretention_nathab30s.tif",
'target_pixel_size': (0.008333333333333333218, -0.008333333333333333218),
'resample_method': 'average',
},
{
'expression': 'service*mask + -9999*(1-mask)',
'symbol_to_path_map': {
'mask': r"C:\Users\Becky\Documents\raster_calculations\masked_all_nathab_wstreams_esa2015_30s.tif",
'service': r"C:\Users\Becky\Documents\cnc_project\masked_rasters\realized_sedimentdeposition_nathab_clamped_md5_30d4d6ac5ff4bca4b91a3a462ce05bfe.tif",
},
'target_nodata': -9999,
'target_raster_path': "realized_sedimentdeposition_nathab30s.tif",
'target_pixel_size': (0.008333333333333333218, -0.008333333333333333218),
'resample_method': 'average',
},
{
'expression': 'service*mask + -9999*(1-mask)',
'symbol_to_path_map': {
'mask': r"C:\Users\Becky\Documents\raster_calculations\masked_all_nathab_wstreams_esa2015_30s.tif",
'service': r"C:\Users\Becky\Documents\cnc_project\original_rasters\per_km_2_realized_fwfish_distrib_catch_md5_995d3d330ed5fc4462a47f7db44225e9.tif",
},
'target_nodata': -9999,
'target_raster_path': "realized_fwfish_nathab30s.tif",
'target_pixel_size': (0.008333333333333333218, -0.008333333333333333218),
'resample_method': 'average',
},
]
for calculation in masked_service_list:
raster_calculations_core.evaluate_calculation(
calculation, TASK_GRAPH, WORKSPACE_DIR)
TASK_GRAPH.join()
TASK_GRAPH.close()
return
single_expression = {
'expression': 'service*pop',
'symbol_to_path_map': {
'service': r"C:\Users\Becky\Documents\cnc_project\masked_rasters\need_processing\potential_nitrogenretention3s_10s_clamped.tif",
'pop': r"C:\Users\Becky\Documents\cnc_project\masked_rasters\need_processing\beneficiaries_downstream_nathab_md5_db1311d54c0174c932cc676bbd621643.tif",
},
'target_nodata': -9999,
'default_nan': -9999,
'target_raster_path': "realized_nitrogenretention_downstream3s_10s.tif",
'target_pixel_size': (0.002777777777778, -0.002777777777778),
'resample_method': 'average'
}
raster_calculations_core.evaluate_calculation(
single_expression, TASK_GRAPH, WORKSPACE_DIR)
TASK_GRAPH.join()
single_expression = {
'expression': '(service>=0)*(service<186)*service + (service>=186)*186 + (service<0)*0',
'symbol_to_path_map': {
'service': r"C:\Users\Becky\Documents\cnc_project\masked_rasters\need_processing\potential_nitrogenretention3s_10s.tif",
},
'target_nodata': -9999,
'default_nan': -9999,
'target_raster_path': "potential_nitrogenretention3s_10s_clamped.tif",
'target_pixel_size': (0.002777777777778, -0.002777777777778),
'resample_method': 'average'
}
raster_calculations_core.evaluate_calculation(
single_expression, TASK_GRAPH, WORKSPACE_DIR)
TASK_GRAPH.join()
TASK_GRAPH.close()
return
single_expression = {
'expression': 'load - export',
'symbol_to_path_map': {
'load': r"C:\Users\Becky\Documents\modified_load_n_baseline_napp_rate_global_md5_00d3e7f1abc5d6aee99d820cd22ef7da.tif",
'export': r"C:\Users\Becky\Documents\n_export_baseline_napp_rate_global_md5_b210146a5156422041eb7128c147512f.tif",
},
'target_nodata': -9999,
'default_nan': -9999,
'target_raster_path': "potential_nitrogenretention3s_10s.tif",
'target_pixel_size': (0.002777777777778, -0.002777777777778),
'resample_method': 'average'
}
raster_calculations_core.evaluate_calculation(
single_expression, TASK_GRAPH, WORKSPACE_DIR)
TASK_GRAPH.join()
TASK_GRAPH.close()
return
# resampling of just one raster doesn't work in raster calculations, so just use pygeoprocessing directly
pygeoprocessing.warp_raster(
r"C:\Users\Becky\Documents\raster_calculations\masked_all_nathab_esa2015.tif",
(30/3600, -30/3600), 'masked_all_nathab_esa2015_30s.tif',
'mode'
)
TASK_GRAPH.join()
pygeoprocessing.warp_raster(
r"C:\Users\Becky\Documents\raster_calculations\masked_all_nathab_wstreams_esa2015.tif",
(30/3600, -30/3600), 'masked_all_nathab_wstreams_esa2015_30s.tif',
'mode'
)
TASK_GRAPH.join()
pygeoprocessing.warp_raster(
r"C:\Users\Becky\Documents\raster_calculations\masked_nathab_forest_esa2015.tif",
(30/3600, -30/3600), 'masked_nathab_forest_esa2015_30s.tif',
'mode'
)
TASK_GRAPH.join()
pygeoprocessing.warp_raster(
r"C:\Users\Becky\Documents\raster_calculations\masked_nathab_forestshrub_esa2015.tif",
(30/3600, -30/3600), 'masked_nathab_forestshrub_esa2015_30s.tif',
'mode'
)
TASK_GRAPH.join()
pygeoprocessing.warp_raster(
r"C:\Users\Becky\Documents\raster_calculations\masked_nathab_notforest_esa2015.tif",
(30/3600, -30/3600), 'masked_nathab_notforest_esa2015_30s.tif',
'mode'
)
TASK_GRAPH.join()
#now doing all the layers that don't need to get masked by habitat (because they're already on the habitat or they can't be)
pygeoprocessing.warp_raster(
r"C:\Users\Becky\Documents\cnc_project\original_rasters\realized_coastalprotection_md5_b8e0ec0c13892c2bf702c4d2d3e50536.tif",
(30/3600, -30/3600), 'realized_coastalprotection_30s.tif',
'average'
)
TASK_GRAPH.join()
pygeoprocessing.warp_raster(
r"C:\Users\Becky\Documents\cnc_project\original_rasters\watson_2010_2014_catch_per_sqkm_AVG.tif",
(30/3600, -30/3600), 'realized_marinefish_watson_2010_2014_30s.tif',
'average'
)
TASK_GRAPH.join()
pygeoprocessing.warp_raster(
r"C:\Users\Becky\Documents\cnc_project\original_rasters\realized_coastalprotection_barrierreef_md5_126320d42827adc0f7504d4693c67e18.tif",
(30/3600, -30/3600), 'realized_coastalprotection_barrierreef_30s.tif',
'average'
)
TASK_GRAPH.join()
#this one's also in a different CRS so needs to be reprojected
wgs84_srs = osr.SpatialReference()
wgs84_srs.ImportFromEPSG(4326)
pygeoprocessing.warp_raster(
r"C:\Users\Becky\Documents\cnc_project\original_rasters\Modelled_Total_Dollar_Value_of_Reef_Tourism_USD_per_km2.tif",
(30/3600, -30/3600), 'realized_reeftourism_30s.tif',
'average', target_sr_wkt=wgs84_srs.ExportToWkt()
)
TASK_GRAPH.join()
TASK_GRAPH.close()
return
masker_list = [
{
# this is for masking out forest from natural habitat, for livestock production
# this counts the >50% herbaceous / < 50% tree cover category as "not forest"; also includes lichens, mosses and shrubland which maybe isn't totally edible by cattle either
'expression': 'mask(raster, %s, invert=False)'%(str([x for x in range(100,154)]+[30]+[40]+[180])[1:-1]),
'symbol_to_path_map': {
'raster': r"C:\Users\Becky\Documents\ESACCI-LC-L4-LCCS-Map-300m-P1Y-2015-v2.0.7_md5_1254d25f937e6d9bdee5779d377c5aa4.tif",
},
'target_nodata': -1,
'target_raster_path': "masked_nathab_notforest_esa2015.tif",
},
{
'expression': 'mask(raster, %s, invert=False)'%(str([x for x in range(30,111)]+[150]+[151]+[160]+[170])[1:-1]),
'symbol_to_path_map': {
'raster': r"C:\Users\Becky\Documents\ESACCI-LC-L4-LCCS-Map-300m-P1Y-2015-v2.0.7_md5_1254d25f937e6d9bdee5779d377c5aa4.tif",
},
'target_nodata': -1,
'target_raster_path': "masked_nathab_forest_esa2015.tif",
},
{
'expression': 'mask(raster, %s, invert=False)'%(str([x for x in range(30,123)]+[150]+[151]+[152]+[160]+[170]+[180])[1:-1]),
'symbol_to_path_map': {
'raster': r"C:\Users\Becky\Documents\ESACCI-LC-L4-LCCS-Map-300m-P1Y-2015-v2.0.7_md5_1254d25f937e6d9bdee5779d377c5aa4.tif",
},
'target_nodata': -1,
'target_raster_path': "masked_nathab_forestshrub_esa2015.tif",
},
{
'expression': 'mask(raster, %s, invert=False)'%(str([]+[x for x in range(30,181)]+[210])[1:-1]),
'symbol_to_path_map': {
'raster': r"C:\Users\Becky\Documents\ESACCI-LC-L4-LCCS-Map-300m-P1Y-2015-v2.0.7_md5_1254d25f937e6d9bdee5779d377c5aa4.tif",
},
'target_nodata': -1,
'target_raster_path': "masked_all_nathab_wstreams_esa2015.tif",
},
{
'expression': 'mask(raster, %s, invert=False)'%(str([]+[x for x in range(30,181)])[1:-1]),
'symbol_to_path_map': {
'raster': r"C:\Users\Becky\Documents\ESACCI-LC-L4-LCCS-Map-300m-P1Y-2015-v2.0.7_md5_1254d25f937e6d9bdee5779d377c5aa4.tif",
},
'target_nodata': -1,
'target_raster_path': "masked_all_nathab_esa2015.tif",
},
]
for masker in masker_list:
raster_calculations_core.evaluate_calculation(
masker, TASK_GRAPH, WORKSPACE_DIR)
TASK_GRAPH.join()
TASK_GRAPH.close()
# single_expression = {
# 'expression': '(raster3 > 0) + (raster4 > 0) + (raster5 > 0) + (raster6 > 0) + (raster7 > 0) + (raster11 > 0) + (raster12 > 0) + (raster13 > 0) + (raster15 > 0)',
# 'symbol_to_path_map': {
# #'raster1': r"C:\Users\Becky\Documents\cnc_project\masked_rasters\realized_coastalprotection_barrierreef_md5_126320d42827adc0f7504d4693c67e18.tif",
# #'raster2': r"C:\Users\Becky\Documents\cnc_project\masked_rasters\realized_coastalprotection_md5_b8e0ec0c13892c2bf702c4d2d3e50536.tif",
# 'raster3': r"C:\Users\Becky\Documents\cnc_project\masked_rasters\realized_commercialtimber_forest_clamped0_md5_24844213f0f65a6c0bedfebe2fbd089e.tif",
# 'raster4': r"C:\Users\Becky\Documents\cnc_project\masked_rasters\realized_cultural_language_nathab_md5_8e517eaa7db482d1446be5b82152c79b.tif",
# 'raster5': r"C:\Users\Becky\Documents\cnc_project\masked_rasters\realized_domestictimber_forest_clamped0_md5_dca99ceb7dd9f96d54b3fcec656d3180.tif",
# 'raster6': r"C:\Users\Becky\Documents\cnc_project\masked_rasters\realized_flood_nathab_clamped0_md5_eb8fd58621e00c6aeb80f4483da1b35c.tif",
# 'raster7': r"C:\Users\Becky\Documents\cnc_project\masked_rasters\realized_fuelwood_forest_clamped0_md5_4ee236f5400ac400c07642356dd358d1.tif",
# #'raster8': r"C:\Users\Becky\Documents\cnc_project\masked_rasters\realized_fwfish_per_km2_clamped_1e-3_30_md5_0b4455185988a9e2062a39b27910eb8b.tif",
# #'raster9': r"C:\Users\Becky\Documents\cnc_project\masked_rasters\realized_grazing_natnotforest_clamped0_md5_8eeb02139f0fabf552658f7641ab7576.tif",
# #'raster10': r"C:\Users\Becky\Documents\cnc_project\masked_rasters\realized_marinefish_watson_2010_2014_clamped_md5_167448a2c010fb2f20f9727b024efab8.tif",
# 'raster11': r"C:\Users\Becky\Documents\cnc_project\masked_rasters\realized_natureaccess10_nathab_md5_af07e76ecea7fb5be0fa307dc7ff4eed.tif",
# 'raster12': r"C:\Users\Becky\Documents\cnc_project\masked_rasters\realized_nitrogenretention_nathab_clamped_md5_fe63ffd7c6633f336c91241bbd47bddd.tif",
# 'raster13': r"C:\Users\Becky\Documents\cnc_project\masked_rasters\realized_pollination_nathab_clamped_md5_c9486d6c8d55cea16d84ff4e129b005a.tif",
# #'raster14': r"C:\Users\Becky\Documents\cnc_project\masked_rasters\realized_reeftourism_Modelled_Total_Dollar_Value_md5_171a993b8ff40d0447f343dd014c72e0.tif",
# 'raster15': r"C:\Users\Becky\Documents\cnc_project\masked_rasters\realized_sedimentdeposition_nathab_clamped_md5_30d4d6ac5ff4bca4b91a3a462ce05bfe.tif"
# },
# 'target_nodata': -9999,
# 'default_nan': -9999,
# 'target_raster_path': "zeroes_in_forest.tif"
# }
#
# raster_calculations_core.evaluate_calculation(
# single_expression, TASK_GRAPH, WORKSPACE_DIR)
#
# TASK_GRAPH.join()
# TASK_GRAPH.close()
#
# return
wgs84_srs = osr.SpatialReference()
wgs84_srs.ImportFromEPSG(4326)
single_expression = {
'expression': 'mask*raster',
#'expression': '(service>=0)*(service<101)*service + (service>=101)*101 + (service<0)*0',
'symbol_to_path_map': {
'mask': r"C:\Users\Becky\Dropbox\NatCap\projects\NASA GEOBON\data\CR_intersecting_wsheds_26917.tif",
#'raster': r"C:\Users\Becky\Documents\ESACCI_LC_L4_LCCS_borrelli_sed_export_compressed_md5_19cd746cdeb63bd0ced4815071b252bf.tif",
#'raster': r"C:\Users\Becky\Documents\n_export_baseline_napp_rate_global_md5_b210146a5156422041eb7128c147512f.tif"
#'raster': r"C:\Users\Becky\Documents\cnc_project\original_rasters\potential_nitrogenretention3s_10s_clamped.tif"
#'raster': r"C:\Users\Becky\Documents\cnc_project\original_rasters\potential_sedimentdeposition_md5_aa9ee6050c423b6da37f8c2723d9b513.tif"
#'service':r"C:\Users\Becky\Documents\raster_calculations\ESA_sed_retention_CR.tif",
'raster': r"C:\Users\Becky\Documents\cnc_project\original_rasters\cv_service_sum_md5_0f86665de086aba2e16dca68ac859428.tif",
},
'target_nodata': -9999,
'default_nan': -9999,
#'target_raster_path': "ESA_sed_export_CR.tif",
#'target_raster_path': "ESA_n_export_CR.tif",
#'target_raster_path': "ESA_n_retention_CR.tif",
#'target_raster_path': "ESA_sed_retention_CR.tif",
'target_raster_path': "ESA_WCMC_coastal_protection_CR.tif",
'target_sr_wkt': wgs84_srs.ExportToWkt(),
'target_pixel_size': (0.002777777777778, -0.002777777777778),
'resample_method': 'average'
}
raster_calculations_core.evaluate_calculation(
single_expression, TASK_GRAPH, WORKSPACE_DIR)
TASK_GRAPH.join()
TASK_GRAPH.close()
return
Max_mfish = 400 #this one's different because even though it's higher than the 99th percentile, there are some realistic values of up to 346 kg /km2
clamped_service_list = [ #some services just have crazy high values that throw the whole percentiles off so we're clamping them to the 99th percentile
{
'expression': f'(service>{Max_mfish})*({Max_mfish})+(service<={Max_mfish})*service',
'symbol_to_path_map': {
'service': r"C:\Users\Becky\Documents\cnc_project\original_rasters\watson_2010_2014_catch_per_sqkm_AVG.tif",
},
'target_nodata': -9999,
'target_raster_path': "realized_marinefish_watson_2010_2014_clamped.tif",
'target_pixel_size': (0.002777777777778, -0.002777777777778),
},
]
for calculation in clamped_service_list:
raster_calculations_core.evaluate_calculation(
calculation, TASK_GRAPH, WORKSPACE_DIR)
TASK_GRAPH.join()
TASK_GRAPH.close()
return
single_expression = {
'expression': 'service * pop',
'symbol_to_path_map': {
'service': r"C:\Users\Becky\Documents\cnc_project\masked_rasters\need_processing\reefs\barrier_reef_service_average_raster_md5_e12c2928e16bdbad45ce4220d18a5889.tif",
'pop': r"C:\Users\Becky\Documents\cnc_project\masked_rasters\need_processing\reefs\barrier_reef_pop_average_raster_md5_8387777dc970a55e7b5f5949791cf1ef.tif",
},
'target_nodata': -9999,
'default_nan': -9999,
'target_raster_path': "realized_coastalprotection_barrierreef.tif",
'target_pixel_size': (0.002777777777778, -0.002777777777778),
'resample_method': 'average'
}
raster_calculations_core.evaluate_calculation(
single_expression, TASK_GRAPH, WORKSPACE_DIR)
TASK_GRAPH.join()
TASK_GRAPH.close()
return
single_expression = {
'expression': 'service * pop',
'symbol_to_path_map': {
'service': r"C:\Users\Becky\Documents\cnc_project\masked_rasters\cv_pop_sum_md5_954b755a9300ceb03a284197672b3656.tif",
'pop': r"C:\Users\Becky\Documents\cnc_project\masked_rasters\cv_service_sum_md5_0f86665de086aba2e16dca68ac859428.tif",
},
'target_nodata': -9999,
'target_raster_path': "realized_coastalprotection.tif",
'target_pixel_size': (0.002777777777778, -0.002777777777778),
'resample_method': 'average'
}
raster_calculations_core.evaluate_calculation(
single_expression, TASK_GRAPH, WORKSPACE_DIR)
TASK_GRAPH.join()
TASK_GRAPH.close()
return
calculation_list = [
{
'expression': 'service*pop',
'symbol_to_path_map': {
'service': r"C:\Users\Becky\Documents\cnc_project\masked_rasters\potential_nitrogenretention_nathab_clamped_md5_bf6ce40d6d9e8c8c1b2774b375b85b8a.tif",
'pop':r"C:\Users\Becky\Documents\cnc_project\masked_rasters\beneficiaries_downstream_nathab_md5_db1311d54c0174c932cc676bbd621643.tif"
},
'target_nodata': -9999,
'target_raster_path': "realized_nitrogenretention_nathab_clamped.tif",
'target_pixel_size': (0.002777777777778, -0.002777777777778),
},
{
'expression': 'service*pop',
'symbol_to_path_map': {
'service': r"C:\Users\Becky\Documents\cnc_project\masked_rasters\potential_sedimentdeposition_nathab_clamped_md5_1d826c8885c6479b6307bc345b95d8bf.tif",
'pop':r"C:\Users\Becky\Documents\cnc_project\masked_rasters\beneficiaries_downstream_nathab_md5_db1311d54c0174c932cc676bbd621643.tif"
},
'target_nodata': -9999,
'target_raster_path': "realized_sedimentdeposition_nathab_clamped.tif",
'target_pixel_size': (0.002777777777778, -0.002777777777778),
},
{
'expression': 'service*pop',
'symbol_to_path_map': {
'service': r"C:\Users\Becky\Documents\cnc_project\masked_rasters\cv_service_sum_bin_raster_md5_bc04cd7112c865fc12f8229ad4757af5.tif",
'pop':r"C:\Users\Becky\Documents\cnc_project\masked_rasters\cv_pop_sum_bin_raster_md5_27be87e1a0c5a789c82d84122ebf61b8.tif"
},
'target_nodata': -9999,
'target_raster_path': "realized_coastalprotectionbin.tif",
'target_pixel_size': (0.002777777777778, -0.002777777777778),
},
{
'expression': 'service * pop / 10',
'symbol_to_path_map': {
'service': r"C:\Users\Becky\Documents\cnc_project\binned_services_global\barrier_reef_service_average_raster_md5_e12c2928e16bdbad45ce4220d18a5889_eez__GLOBAL_bin_nodata0_raster_md5_c271e54f1b04174d3e620df344a52bd9.tif",
'pop': r"C:\Users\Becky\Documents\cnc_project\binned_services_global\barrier_reef_pop_average_raster_md5_8387777dc970a55e7b5f5949791cf1ef_eez__GLOBAL_bin_nodata0_raster_md5_b36485a7d4f837804982e5e9272d34fe.tif",
},
'target_nodata': -9999,
'target_raster_path': "realized_coastalprotectionbin_barrierreef.tif",
'target_pixel_size': (0.002777777777778, -0.002777777777778),
'resample_method': 'average'
}
]
for calculation in calculation_list:
raster_calculations_core.evaluate_calculation(
calculation, TASK_GRAPH, WORKSPACE_DIR)
TASK_GRAPH.join()
TASK_GRAPH.close()
return
masker_list = [
{
# the %s is a placeholder for the string we're passing it using this function that lists every number in the range and takes away the [] of the list and turns it into a string
'expression': 'mask(raster, %s, invert=False)'%(str([]+[x for x in range(50,181)])[1:-1]),
#'expression': 'mask(raster, %s, invert=False)'%(str([]+[x for x in range(10,200)]+[220])[1:-1]),
'symbol_to_path_map': {
'raster': r"C:\Users\Becky\Documents\ESACCI-LC-L4-LCCS-Map-300m-P1Y-2015-v2.0.7_md5_1254d25f937e6d9bdee5779d377c5aa4.tif",
},
'target_nodata': -1,
'target_raster_path': "masked_all_nathab_esa2015.tif",
},
{
# this is for masking out forest from natural habitat, for livestock production
# this counts the >50% herbaceous / < 50% tree cover category as "not forest"; also includes lichens, mosses and shrubland which maybe isn't totally edible by cattle either
'expression': 'mask(raster, %s, invert=False)'%(str([x for x in range(110,154)]+[180])[1:-1]),
'symbol_to_path_map': {
'raster': r"C:\Users\Becky\Documents\ESACCI-LC-L4-LCCS-Map-300m-P1Y-2015-v2.0.7_md5_1254d25f937e6d9bdee5779d377c5aa4.tif",
},
'target_nodata': -1,
'target_raster_path': "masked_nathab_notforest_esa2015.tif",
},
{
'expression': 'mask(raster, %s, invert=False)'%(str([x for x in range(50,111)]+[150]+[151]+[160]+[170])[1:-1]),
'symbol_to_path_map': {
'raster': r"C:\Users\Becky\Documents\ESACCI-LC-L4-LCCS-Map-300m-P1Y-2015-v2.0.7_md5_1254d25f937e6d9bdee5779d377c5aa4.tif",
},
'target_nodata': -1,
'target_raster_path': "masked_nathab_forest_esa2015.tif",
},
{
'expression': 'mask(raster, %s, invert=False)'%(str([]+[x for x in range(50,181)]+[210])[1:-1]),
'symbol_to_path_map': {
'raster': r"C:\Users\Becky\Documents\ESACCI-LC-L4-LCCS-Map-300m-P1Y-2015-v2.0.7_md5_1254d25f937e6d9bdee5779d377c5aa4.tif",
},
'target_nodata': -1,
'target_raster_path': "masked_all_nathab_wstreams_esa2015.tif",
},
{
'expression': 'mask(raster, %s, invert=False)'%(str([]+[x for x in range(10,31)])[1:-1]),
'symbol_to_path_map': {
'raster': r"C:\Users\Becky\Documents\ESACCI-LC-L4-LCCS-Map-300m-P1Y-2015-v2.0.7_md5_1254d25f937e6d9bdee5779d377c5aa4.tif",
},
'target_nodata': -1,
'target_raster_path': "agmask_esa2015.tif",
},
]
for masker in masker_list:
raster_calculations_core.evaluate_calculation(
masker, TASK_GRAPH, WORKSPACE_DIR)
TASK_GRAPH.join()
TASK_GRAPH.close()
return
masked_service_list = [
{
'expression': 'service*mask + 128*(1-mask)', #this sets all values not in the mask to nodata (in this case, 128)
'symbol_to_path_map': {
'mask': r"C:\Users\Becky\Documents\raster_calculations\masked_all_nathab_esa2015.tif",
'service': r"C:\Users\Becky\Documents\cnc_project\original_rasters\half_degree_grid_langa_19_dslv_density.tif",
},
'target_nodata': 128,
'target_raster_path': "realized_cultural_language_nathab.tif",
'target_pixel_size': (0.002777777777778, -0.002777777777778),
},
{
'expression': '((service<0)*(-9999)+(service>=0)*service)*mask + -9999*(1-mask)', #this both sets all negative values to nodata AND sets anything outside the mask to nodata
'symbol_to_path_map': {
'mask': r"C:\Users\Becky\Documents\raster_calculations\masked_all_nathab_esa2015.tif",
'service': r"C:\Users\Becky\Documents\cnc_project\original_rasters\realized_e_source_ratio_ann_mean.tif",
},
'target_nodata': -9999,
'default_nan': -9999, # this is necessary because there are apparently nans in this list!
'target_raster_path': "realized_moisturerecycling_nathab.tif",
'target_pixel_size': (0.002777777777778, -0.002777777777778),
},
{
'expression': 'service*mask + -9999*(1-mask)',
'symbol_to_path_map': {
'mask': r"C:\Users\Becky\Documents\raster_calculations\masked_all_nathab_esa2015.tif",
'service': r"C:\Users\Becky\Documents\cnc_project\RealInflGStoragePop.tif",
},
'target_nodata': -9999,
'target_raster_path': "realized_flood_nathab.tif",
'target_pixel_size': (0.002777777777778, -0.002777777777778),
},
{
'expression': 'service*mask + -9999*(1-mask)',
'symbol_to_path_map': {
'mask': r"C:\Users\Becky\Documents\raster_calculations\masked_nathab_forest_esa2015.tif",
'service': r"C:\Users\Becky\Documents\cnc_project\realised_commercial_timber_value.tif",
},
'target_nodata': -9999,
'target_raster_path': "realized_commercialtimber_forest.tif",
'target_pixel_size': (0.002777777777778, -0.002777777777778),
},
{
'expression': 'service*mask + -9999*(1-mask)',
'symbol_to_path_map': {
'mask': r"C:\Users\Becky\Documents\raster_calculations\masked_nathab_forest_esa2015.tif",
'service': r"C:\Users\Becky\Documents\cnc_project\realised_domestic_timber_value.tif",
},
'target_nodata': -9999,
'target_raster_path': "realized_domestictimber_forest.tif",
'target_pixel_size': (0.002777777777778, -0.002777777777778),
},
{
'expression': 'service*mask + -9999*(1-mask)',
'symbol_to_path_map': {
'mask': r"C:\Users\Becky\Documents\raster_calculations\masked_nathab_forest_esa2015.tif",
'service': r"C:\Users\Becky\Documents\cnc_project\realized_fuelwood.tif",
},
'target_nodata': -9999,
'target_raster_path': "realized_fuelwood_forest.tif",
'target_pixel_size': (0.002777777777778, -0.002777777777778),
},
{
'expression': 'service*mask + -9999*(1-mask)',
'symbol_to_path_map': {
'mask': r"C:\Users\Becky\Documents\raster_calculations\masked_nathab_notforest_esa2015.tif",
'service': r"C:\Users\Becky\Documents\cnc_project\realized_grazing_md5_19085729ae358e0e8566676c5c7aae72.tif",
},
'target_nodata': -9999,
'target_raster_path': "realized_grazing_natnotforest.tif",
'target_pixel_size': (0.002777777777778, -0.002777777777778),
},
{
'expression': 'service*mask + -9999*(1-mask)',
'symbol_to_path_map': {
'mask': r"C:\Users\Becky\Documents\raster_calculations\masked_all_nathab_esa2015.tif",
'service': r"C:\Users\Becky\Documents\cnc_project\total_pop_near_nature_10.tif",
},
'target_nodata': -9999,
'target_raster_path': "realized_natureaccess10_nathab.tif",
'target_pixel_size': (0.002777777777778, -0.002777777777778),
},
{
'expression': 'service*mask + -9999*(1-mask)',
'symbol_to_path_map': {
'mask': r"C:\Users\Becky\Documents\raster_calculations\masked_all_nathab_esa2015.tif",
'service': r"C:\Users\Becky\Documents\cnc_project\total_pop_near_nature_100.tif",
},
'target_nodata': -9999,
'target_raster_path': "realized_natureaccess100_nathab.tif",
'target_pixel_size': (0.002777777777778, -0.002777777777778),
},
{
'expression': 'service*mask + -9999*(1-mask)',
'symbol_to_path_map': {
'mask': r"C:\Users\Becky\Documents\raster_calculations\masked_all_nathab_esa2015.tif",
'service': r"C:\Users\Becky\Documents\cnc_project\potential_nitrogenretention_md5_286c51393042973f71884ddc701be03d.tif",
},
'target_nodata': -9999,
'target_raster_path': "potential_nitrogenretention_nathab.tif",
'target_pixel_size': (0.002777777777778, -0.002777777777778),
},
{
'expression': 'service*mask + -9999*(1-mask)',
'symbol_to_path_map': {
'mask': r"C:\Users\Becky\Documents\raster_calculations\masked_all_nathab_esa2015.tif",
'service': r"C:\Users\Becky\Documents\cnc_project\potential_sedimentdeposition_md5_aa9ee6050c423b6da37f8c2723d9b513.tif",
},
'target_nodata': -9999,
'target_raster_path': "potential_sedimentdeposition_nathab.tif",
'target_pixel_size': (0.002777777777778, -0.002777777777778),
},
{
'expression': 'service*mask + -9999*(1-mask)',
'symbol_to_path_map': {
'mask': r"C:\Users\Becky\Documents\raster_calculations\masked_all_nathab_esa2015.tif",
'service': r"C:\Users\Becky\Documents\cnc_project\downstream_beneficiaries_md5_68495f4bbdd889d7aaf9683ce958a4fe.tif",
},
'target_nodata': -9999,
'target_raster_path': "beneficiaries_downstream_nathab.tif",
'target_pixel_size': (0.002777777777778, -0.002777777777778),
},
{
'expression': 'service*mask + -9999*(1-mask)',
'symbol_to_path_map': {
'mask': r"C:\Users\Becky\Documents\raster_calculations\masked_all_nathab_esa2015.tif",
'service': r"C:\Users\Becky\Documents\cnc_project\original_rasters\realized_pollination_md5_443522f6688011fd561297e9a556629b.tif"
},
'target_nodata': -9999,
'target_raster_path': "realized_pollination_nathab.tif",
'target_pixel_size': (0.002777777777778, -0.002777777777778),
},
{
'expression': 'service*mask + -9999*(1-mask)',
'symbol_to_path_map': {
'mask': r"C:\Users\Becky\Documents\raster_calculations\masked_all_nathab_esa2015.tif",
'service': r"C:\Users\Becky\Documents\cnc_project\realized_nitrogenretention_downstream_md5_82d4e57042482eb1b92d03c0d387f501.tif",
},
'target_nodata': -9999,
'target_raster_path': "realized_nitrogenretention_nathab.tif",
'target_pixel_size': (0.002777777777778, -0.002777777777778),
},
{
'expression': 'service*mask + -9999*(1-mask)',
'symbol_to_path_map': {
'mask': r"C:\Users\Becky\Documents\raster_calculations\masked_all_nathab_esa2015.tif",
'service': r"C:\Users\Becky\Documents\cnc_project\realized_sedimentdeposition_downstream_md5_1613b12643898c1475c5ec3180836770.tif",
},
'target_nodata': -9999,
'target_raster_path': "realized_sedimentdeposition_nathab.tif",
'target_pixel_size': (0.002777777777778, -0.002777777777778),
},
]
for calculation in masked_service_list:
raster_calculations_core.evaluate_calculation(
calculation, TASK_GRAPH, WORKSPACE_DIR)
TASK_GRAPH.join()
TASK_GRAPH.close()
return
NNth_nit = 322
NNth_sed = 161
NNth_poll = 982
NNth_ffish = 75
LOth_ffish = 0.001 # Min values are regression artifacts. Should be cut off at 10-1 tons per grid cell (~100 sq km). That’s 1 kg per sq km
NNth_ffish = 30 # Max cut-off should be 3000 tons per grid cell. That’s 30 tons per sq km. (In between the 99 and 99.9th percentiles once small values are excluded)
Max_mfish = 400 #this one's different because even though it's higher than the 99th percentile, there are some realistic values of up to 346 kg /km2
LOth_MM = 0.00001
clamped_service_list = [ #some services just have crazy high values that throw the whole percentiles off so we're clamping them to the 99th percentile
{
'expression': f'(service>{NNth_nit})*{NNth_nit} + (service<={NNth_nit})*(service>=0)*service + -9999*(service<0)', #sets anything above the 99th percentile value to that value, anything negative to nodata
'symbol_to_path_map': {
'service': r"C:\Users\Becky\Documents\cnc_project\masked_rasters\needed_clamping\potential_nitrogenretention_nathab_md5_95b25783b6114b63738f8d6b20d2af51.tif",
},
'target_nodata': -9999,
'target_raster_path': "potential_nitrogenretention_nathab_clamped.tif",
'target_pixel_size': (0.002777777777778, -0.002777777777778),
},
{
'expression': f'(service>{NNth_sed})*({NNth_sed})+(service<={NNth_sed})*(service>=0)*service + -9999*(service<0)',
'symbol_to_path_map': {
'service': r"C:\Users\Becky\Documents\cnc_project\masked_rasters\needed_clamping\potential_sedimentdeposition_nathab_md5_1a0dd289bee1fe09c30453ab80f9ddf4.tif",
},
'target_nodata': -9999,
'target_raster_path': "potential_sedimentdeposition_nathab_clamped.tif",
'target_pixel_size': (0.002777777777778, -0.002777777777778),
},
{
'expression': f'(service>{NNth_poll})*({NNth_poll})+(service<={NNth_poll})*(service>=0)*service + -9999*(service<0)',
'symbol_to_path_map': {
'service': r"C:\Users\Becky\Documents\cnc_project\masked_rasters\needed_clamping\realized_pollination_nathab_md5_feab479b3d6bf25a928c355547c9d9ab.tif",
},
'target_nodata': -9999,
'target_raster_path': "realized_pollination_nathab_clamped.tif",
'target_pixel_size': (0.002777777777778, -0.002777777777778),
},
{
'expression': f'(service>{NNth_ffish})*{NNth_ffish} + (service<={NNth_ffish})*(service>={LOth_ffish})*service + -9999*(service<{LOth_ffish})',
'symbol_to_path_map': {
'service': r"C:\Users\Becky\Documents\cnc_project\masked_rasters\needed_clamping\per_km_2_realized_fwfish_distrib_catch_md5_995d3d330ed5fc4462a47f7db44225e9.tif",
},
'target_nodata': -9999,
'target_raster_path': "realized_fwfish_per_km2_clamped_3e-2_13.tif",
'target_pixel_size': (0.002777777777778, -0.002777777777778),
},
{
'expression': f'(service>{Max_mfish})*({Max_mfish})+(service<={Max_mfish})*service',
'symbol_to_path_map': {
'service': r"C:\Users\Becky\Documents\cnc_project\masked_rasters\needed_clamping\needed_clamping\realized_marinefish_watson_2015_catch_Ind_Non_Ind_Rprt_IUU_md5_61e08ed60006e9ad23b74bcd44c61548.tif",
},
'target_nodata': -9999,
'target_raster_path': "realized_marinefish_watson_2015_clamped.tif",
'target_pixel_size': (0.002777777777778, -0.002777777777778),
},
{
'expression': f'(service>{LOth_MM})*service + 0*(service<={LOth_MM})',
'symbol_to_path_map': {
'service': r"C:\Users\Becky\Documents\cnc_project\masked_rasters\needed_clamping\realized_commercialtimber_forest_md5_99153e7a8177fd7ed6bb75a5fdc426e5.tif",
},
'target_nodata': -9999,
'target_raster_path': "realized_commercialtimber_forest_clamped.tif",
'target_pixel_size': (0.002777777777778, -0.002777777777778),
},
{
'expression': f'(service>{LOth_MM})*service + 0*(service<={LOth_MM})',
'symbol_to_path_map': {
'service': r"C:\Users\Becky\Documents\cnc_project\masked_rasters\needed_clamping\realized_domestictimber_forest_md5_3ee8a15ce8ed38b0710b8f6d74640b70.tif",
},
'target_nodata': -9999,
'target_raster_path': "realized_domestictimber_forest_clamped.tif",
'target_pixel_size': (0.002777777777778, -0.002777777777778),
},
{
'expression': f'(service>{LOth_MM})*service + 0*(service<={LOth_MM})',
'symbol_to_path_map': {
'service': r"C:\Users\Becky\Documents\cnc_project\masked_rasters\needed_clamping\realized_flood_nathab_md5_bf277802945a0a7067d2a90941e355e1.tif",
},
'target_nodata': -9999,
'target_raster_path': "realized_flood_nathab_clamped.tif",
'target_pixel_size': (0.002777777777778, -0.002777777777778),
},
{
'expression': f'(service>{LOth_MM})*service + 0*(service<={LOth_MM})',
'symbol_to_path_map': {
'service': r"C:\Users\Becky\Documents\cnc_project\masked_rasters\needed_clamping\realized_fuelwood_forest_md5_e86706b0ebe0d296acac30db78f2c284.tif",
},
'target_nodata': -9999,
'target_raster_path': "realized_fuelwood_forest_clamped.tif",
'target_pixel_size': (0.002777777777778, -0.002777777777778),
},
{
'expression': f'(service>{LOth_MM})*service + 0*(service<={LOth_MM})',
'symbol_to_path_map': {
'service': r"C:\Users\Becky\Documents\cnc_project\masked_rasters\needed_clamping\realized_grazing_natnotforest_md5_fbc4907814187d1be75b35932617af65.tif",
},
'target_nodata': -9999,
'target_raster_path': "realized_grazing_natnotforest_clamped.tif",
'target_pixel_size': (0.002777777777778, -0.002777777777778),
},
]
for calculation in clamped_service_list:
raster_calculations_core.evaluate_calculation(
calculation, TASK_GRAPH, WORKSPACE_DIR)
TASK_GRAPH.join()
TASK_GRAPH.close()
return
N90 = 7.3
S90 = 9.8
P90 = 8.4
FF90 = 9.5
MF90 = 9.3
CT90 = 4.2
DT90 = 5.8
FW90 = 6.2
F90 = 7.9
G90 = 4.9
CL90 = 6.1
MR90 = 4.4
NA90 = 8.2
RT90 = 3.9
CP90 = 2.7
top_values_list = [
{
'expression': f'(service>={N90}) + 0*(service<{N90})',
'symbol_to_path_map': {
'service': r"C:\Users\Becky\Documents\cnc_project\binned_services_global\realized_nitrogenretention_bin.tif",
},
'target_nodata': -9999,
'target_raster_path': "realized_nitrogenretention_nathab_top90.tif",
'target_pixel_size': (0.002777777777778, -0.002777777777778),
},
{
'expression': f'(service>={S90}) + 0*(service<{S90})',
'symbol_to_path_map': {
'service': r"C:\Users\Becky\Documents\cnc_project\binned_services_global\realized_sedimentdeposition_bin.tif",
},
'target_nodata': -9999,
'target_raster_path': "realized_sedimentdeposition_nathab_top90.tif",
'target_pixel_size': (0.002777777777778, -0.002777777777778),
},
{
'expression': f'(service>={P90}) + 0*(service<{P90})',
'symbol_to_path_map': {
'service': r"C:\Users\Becky\Documents\cnc_project\binned_services_global\realized_pollination_bin.tif",
},
'target_nodata': -9999,
'target_raster_path': "realized_pollination_nathab_top90.tif",
'target_pixel_size': (0.002777777777778, -0.002777777777778),
},
{
'expression': f'(service>={FF90}) + 0*(service<{FF90})',
'symbol_to_path_map': {
'service': r"C:\Users\Becky\Documents\cnc_project\binned_services_global\realized_fwfish_bin.tif",
},
'target_nodata': -9999,
'target_raster_path': "realized_fwfish_per_km2_top90_3e-2_13.tif",
'target_pixel_size': (0.002777777777778, -0.002777777777778),
},
{
'expression': f'(service>={MF90}) + 0*(service<{MF90})',
'symbol_to_path_map': {
'service': r"C:\Users\Becky\Documents\cnc_project\binned_services_global\realized_marinefish_bin.tif",
},
'target_nodata': -9999,
'target_raster_path': "realized_marinefish_watson_2015_top90.tif",
'target_pixel_size': (0.002777777777778, -0.002777777777778),
},
{
'expression': f'(service>={CT90}) + 0*(service<{CT90})',
'symbol_to_path_map': {
'service': r"C:\Users\Becky\Documents\cnc_project\binned_services_global\realized_commercialtimber_bin.tif",
},
'target_nodata': -9999,
'target_raster_path': "realized_commercialtimber_forest_top90.tif",
'target_pixel_size': (0.002777777777778, -0.002777777777778),
},
{
'expression': f'(service>={DT90}) + 0*(service<{DT90})',
'symbol_to_path_map': {
'service': r"C:\Users\Becky\Documents\cnc_project\binned_services_global\realized_domestictimber_binf.tif",
},
'target_nodata': -9999,
'target_raster_path': "realized_domestictimber_forest_top90.tif",
'target_pixel_size': (0.002777777777778, -0.002777777777778),
},
{
'expression': f'(service>={F90}) + 0*(service<{F90})',
'symbol_to_path_map': {
'service': r"C:\Users\Becky\Documents\cnc_project\binned_services_global\realized_flood_bin.tif",
},
'target_nodata': -9999,
'target_raster_path': "realized_flood_nathab_top90.tif",
'target_pixel_size': (0.002777777777778, -0.002777777777778),
},
{
'expression': f'(service>={FW90}) + 0*(service<{FW90})',
'symbol_to_path_map': {
'service': r"C:\Users\Becky\Documents\cnc_project\binned_services_global\realized_fuelwood_bin.tif",
},
'target_nodata': -9999,
'target_raster_path': "realized_fuelwood_forest_top90.tif",
'target_pixel_size': (0.002777777777778, -0.002777777777778),
},
{
'expression': f'(service>={G90}) + 0*(service<{G90})',
'symbol_to_path_map': {
'service': r"C:\Users\Becky\Documents\cnc_project\binned_services_global\realized_grazing_bin.tif",
},
'target_nodata': -9999,
'target_raster_path': "realized_grazing_natnotforest_top90.tif",
'target_pixel_size': (0.002777777777778, -0.002777777777778),
},
{
'expression': f'(service>={CL90}) + 0*(service<{CL90})',
'symbol_to_path_map': {
'service': r"C:\Users\Becky\Documents\cnc_project\binned_services_global\realized_cultural_language_bin.tif",
},
'target_nodata': -9999,
'target_raster_path': "realized_cultural_language_nathab_top90.tif",
'target_pixel_size': (0.002777777777778, -0.002777777777778),
},
{
'expression': f'(service>={MR90}) + 0*(service<{MR90})',
'symbol_to_path_map': {
'service': r"C:\Users\Becky\Documents\cnc_project\binned_services_global\realized_moisturerecycling_bin.tif",
},
'target_nodata': -9999,
'target_raster_path': "realized_moisturerecycling_nathab_top90.tif",
'target_pixel_size': (0.002777777777778, -0.002777777777778),
},
{
'expression': f'(service>={NA90}) + 0*(service<{NA90})',
'symbol_to_path_map': {
'service': r"C:\Users\Becky\Documents\cnc_project\binned_services_global\realized_natureaccess10_bin.tif",
},
'target_nodata': -9999,
'target_raster_path': "realized_natureaccess10_nathab_top90.tif",
'target_pixel_size': (0.002777777777778, -0.002777777777778),
},
{
'expression': f'(service>={RT90}) + 0*(service<{RT90})',
'symbol_to_path_map': {
'service': r"C:\Users\Becky\Documents\cnc_project\binned_services_global\realized_reeftourism_bin.tif",
},
'target_nodata': -9999,
'target_raster_path': "realized_reeftourism_top90.tif",
'target_pixel_size': (0.002777777777778, -0.002777777777778),
},
{
'expression': f'(service>={CP90}) + 0*(service<{CP90})',
'symbol_to_path_map': {
'service': r"C:\Users\Becky\Documents\cnc_project\binned_services_global\realized_coastalprotectionbin_plusbarrierreefs_md5_a3f43a2e60e5976799d257ad9561731f.tif",
},
'target_nodata': -9999,
'target_raster_path': "realized_coastalprotection_top90.tif",
'target_pixel_size': (0.002777777777778, -0.002777777777778),
},
]
for calculation in top_values_list:
raster_calculations_core.evaluate_calculation(
calculation, TASK_GRAPH, WORKSPACE_DIR)
TASK_GRAPH.join()
TASK_GRAPH.close()
return
#loop to set thresholds
for base_raster_path, threshold, target_raster_path in [
#(r"C:\Users\Becky\Documents\raster_calculations\CNC_workspace\normalized_realized_pollination_md5_06f52f2854ae1c584742d587b1c31359.tif", 0.06, "top04_pollination.tif"),
#(r"C:\Users\Becky\Documents\raster_calculations\CNC_workspace\normalized_realized_flood_md5_f1237e76a41039e22629abb85963ba16.tif", 0.05, "top30_flood.tif"),
#(r"C:\Users\Becky\Documents\raster_calculations\CNC_workspace\normalized_realized_grazing_md5_d03b584dac965539a77bf96cba3f8096_masked_md5_db038b499342efa926c3c5815c822fe3.tif", 0.1, "top15_grazing.tif"),
#(r"C:\Users\Becky\Documents\raster_calculations\CNC_workspace\normalized_realized_nitrogen_downstream_md5_437e1759b0f994b47add4baf76509bbe_masked_md5_ac82368cedcfc692b0440b0cc0ed7fdb.tif", 0.06, "top25_nitrogen.tif"),
#(r"C:\Users\Becky\Documents\raster_calculations\CNC_workspace\normalized_realized_nwfp_masked_md5_754ba4d8cd0c54399fd816748a9e0091_masked_md5_f48ada73cb74cd59726b066db2f03855.tif", 0.05, "top10_nwfp.tif"),
#(r"C:\Users\Becky\Documents\raster_calculations\CNC_workspace\normalized_realized_sediment_downstream_md5_daa86f70232c5e1a8a0efaf0b2653db2_masked_md5_6e9050a9fcf3f08925343a48208aeab8.tif", 0.09, "top05_sediment.tif"),
#(r"C:\Users\Becky\Documents\raster_calculations\CNC_workspace\normalized_realized_timber_masked_md5_fc5ad0ff1f4702d75f204267fc90b33f_masked_md5_68df861a8e4c5cbb0e800f389690a792.tif", 0.13, "top15_timber.tif"),
(r"C:\Users\Becky\Documents\raster_calculations\aggregate_realized_ES_score_nspwogf_md5_0ab07f38ed0290fea6142db188ae51f8.tif", 0.30, "top40_nspwogf.tif"),
]:
mask_expr_dict = {
'expression': 'raster > %f' % threshold,
'symbol_to_path_map': {
'raster': base_raster_path,
},
'target_nodata': -1,
'target_raster_path': target_raster_path,
}
raster_calculations_core.evaluate_calculation(
mask_expr_dict, TASK_GRAPH, WORKSPACE_DIR)
TASK_GRAPH.join()
TASK_GRAPH.close()
return
#looping the same mask over a bunch of rasters
base_directory = r"C:\Users\Becky\Documents\raster_calculations\CNC_workspace"
masked_workspace_dir = 'masked_workspace_dir'
ecoshard_workspace_dir = 'ecoshard_dir'
for dirname in [masked_workspace_dir, ecoshard_workspace_dir]:
try:
os.makedirs(dirname)
except OSError:
pass
for path in glob.glob(os.path.join(base_directory, '*.tif')):
path_root_name = os.path.splitext(os.path.basename(path))[0]
target_raster_path = os.path.join(
masked_workspace_dir, '%s_masked.tif' % (path_root_name))
remasking_expression = {
'expression': 'mask*service',
'symbol_to_path_map': {
'mask': 'masked_nathab_esa_nodata_md5_7c9acfe052cb7bdad319f011e9389fb1.tif',
'service': path,
},
'target_nodata': -1,
'target_raster_path': target_raster_path,
###file name split off from its path and its ecoshard too because it will be re-ecosharded
'target_pixel_size': (0.002777777777778, -0.002777777777778),
}
raster_calculations_core.evaluate_calculation(
remasking_expression, TASK_GRAPH, WORKSPACE_DIR)
TASK_GRAPH.join()
subprocess.check_call("python -m ecoshard ./masked_workspace_dir/*.tif --hash_file --rename --buildoverviews --interpolation_method average")
TASK_GRAPH.join()
TASK_GRAPH.close()
clamping_service_list = [
{
'expression': '(val >= 0) * (val < 1) * val + (val >= 1)',
'symbol_to_path_map': {
'val': "raw_normalized_potential_flood.tif",
},
'target_nodata': -1,
'target_raster_path': "normalized_potential_flood.tif",
},
{
'expression': '(val >= 0) * (val < 1) * val + (val >= 1)',
'symbol_to_path_map': {
'val': "raw_normalized_potential_moisture.tif",
},
'target_nodata': -1,
'target_raster_path': "normalized_potential_moisture.tif",
},
{
'expression': '(val >= 0) * (val < 1) * val + (val >= 1)',
'symbol_to_path_map': {
'val': "raw_normalized_realized_flood.tif",
},
'target_nodata': -1,
'target_raster_path': "normalized_realized_flood.tif",
},
{
'expression': '(val >= 0) * (val < 1) * val + (val >= 1)',
'symbol_to_path_map': {
'val': "raw_normalized_realized_moisture.tif",
},
'target_nodata': -1,
'target_raster_path': "normalized_realized_moisture.tif",
},
]
for calculation in clamping_service_list:
raster_calculations_core.evaluate_calculation(
calculation, TASK_GRAPH, WORKSPACE_DIR)
TASK_GRAPH.join()
TASK_GRAPH.close()
return #terminates at this point
# just build overviews
raster_calculation_list = [
{
'expression': 'x',
'symbol_to_path_map': {
'x': '../nathab_potential_pollination.tif',
},
'target_nodata': -1,
'target_raster_path': "potential_pollination.tif",
'build_overview': True,
},
]
for calculation in raster_calculation_list:
raster_calculations_core.evaluate_calculation(
calculation, TASK_GRAPH, WORKSPACE_DIR)
TASK_GRAPH.join()
TASK_GRAPH.close()
#dasgupta calcs:
raster_list = [
{
'expression': 'total_realized /total_potential',
'symbol_to_path_map': {
'total_realized': r"C:\Users\Becky\Documents\dasgupta\nci_ag_multi_lulc\ESACCI-LC-L4-LCCS-Map-300m-P1Y-2015-v2.0.7_md5_1254d25f937e6d9bdee5779d377c5aa4\prod_total_realized_en_10s_ESACCI-LC-L4-LCCS-Map-300m-P1Y-2015-v2.0.7_md5_1254d25f937e6d9bdee5779d377c5aa4.tif",
'total_potential': r"C:\Users\Becky\Documents\dasgupta\nci_ag_multi_lulc\ESACCI-LC-L4-LCCS-Map-300m-P1Y-2015-v2.0.7_md5_1254d25f937e6d9bdee5779d377c5aa4\prod_total_potential_en_10s_ESACCI-LC-L4-LCCS-Map-300m-P1Y-2015-v2.0.7_md5_1254d25f937e6d9bdee5779d377c5aa4.tif",
},
'target_nodata': -1,
'default_nan': -1, # this is necessary because divides by 0's; could also set them to 0 instead
'target_raster_path': "percent_realized_current.tif",
'target_pixel_size': (0.002777777777778, -0.002777777777778),
},
{
'expression': 'total_realized /total_potential',
'symbol_to_path_map': {
'total_realized': r"C:\Users\Becky\Documents\dasgupta\nci_ag_multi_lulc\lulc_WB_bau_esa_classes_md5_b411f14d7cff237e3415c5afa26d4b78\prod_total_realized_en_10s_lulc_WB_bau_esa_classes_md5_b411f14d7cff237e3415c5afa26d4b78.tif",
'total_potential': r"C:\Users\Becky\Documents\dasgupta\nci_ag_multi_lulc\lulc_WB_bau_esa_classes_md5_b411f14d7cff237e3415c5afa26d4b78\prod_total_potential_en_10s_lulc_WB_bau_esa_classes_md5_b411f14d7cff237e3415c5afa26d4b78.tif",
},
'target_nodata': -1,
'default_nan': -1,
'target_raster_path': "percent_realized_bau.tif",
'target_pixel_size': (0.002777777777778, -0.002777777777778),
},
{
'expression': 'total_realized /total_potential',
'symbol_to_path_map': {
'total_realized': r"C:\Users\Becky\Documents\dasgupta\nci_ag_multi_lulc\lulc_WB_cons_esa_classes_md5_8c150474406a3f230b992399429bd182\prod_total_realized_en_10s_lulc_WB_cons_esa_classes_md5_8c150474406a3f230b992399429bd182.tif",
'total_potential': r"C:\Users\Becky\Documents\dasgupta\nci_ag_multi_lulc\lulc_WB_cons_esa_classes_md5_8c150474406a3f230b992399429bd182\prod_total_potential_en_10s_lulc_WB_cons_esa_classes_md5_8c150474406a3f230b992399429bd182.tif",
},
'target_nodata': -1,
'default_nan': -1,
'target_raster_path': "percent_realized_cons.tif",
'target_pixel_size': (0.002777777777778, -0.002777777777778),
},
{
'expression': 'total_realized /total_potential',
'symbol_to_path_map': {
'total_realized': r"C:\Users\Becky\Documents\dasgupta\nci_ag_multi_lulc\lulc_WB_mid\prod_total_realized_en_10s_lulc_WB_mid.tif",
'total_potential': r"C:\Users\Becky\Documents\dasgupta\nci_ag_multi_lulc\lulc_WB_mid\prod_total_potential_en_10s_lulc_WB_mid.tif",
},
'target_nodata': -1,
'default_nan': -1,
'target_raster_path': "percent_realized_mid.tif",
'target_pixel_size': (0.002777777777778, -0.002777777777778),
},
{
'expression': 'total_realized /total_potential',
'symbol_to_path_map': {
'total_realized': r"C:\Users\Becky\Documents\dasgupta\nci_ag_multi_lulc\ESACCI-LC-L4-LCCS-Map-300m-P1Y-2015-v2.0.7_md5_1254d25f937e6d9bdee5779d377c5aa4\prod_total_realized_va_10s_ESACCI-LC-L4-LCCS-Map-300m-P1Y-2015-v2.0.7_md5_1254d25f937e6d9bdee5779d377c5aa4.tif",
'total_potential': r"C:\Users\Becky\Documents\dasgupta\nci_ag_multi_lulc\ESACCI-LC-L4-LCCS-Map-300m-P1Y-2015-v2.0.7_md5_1254d25f937e6d9bdee5779d377c5aa4\prod_total_potential_va_10s_ESACCI-LC-L4-LCCS-Map-300m-P1Y-2015-v2.0.7_md5_1254d25f937e6d9bdee5779d377c5aa4.tif",
},
'target_nodata': -1,
'default_nan': -1,
'target_raster_path': "percent_realized_current_va.tif",
'target_pixel_size': (0.002777777777778, -0.002777777777778),
},
{
'expression': 'total_realized /total_potential',
'symbol_to_path_map': {
'total_realized': r"C:\Users\Becky\Documents\dasgupta\nci_ag_multi_lulc\lulc_WB_bau_esa_classes_md5_b411f14d7cff237e3415c5afa26d4b78\prod_total_realized_va_10s_lulc_WB_bau_esa_classes_md5_b411f14d7cff237e3415c5afa26d4b78.tif",
'total_potential': r"C:\Users\Becky\Documents\dasgupta\nci_ag_multi_lulc\lulc_WB_bau_esa_classes_md5_b411f14d7cff237e3415c5afa26d4b78\prod_total_potential_va_10s_lulc_WB_bau_esa_classes_md5_b411f14d7cff237e3415c5afa26d4b78.tif",
},
'target_nodata': -1,
'default_nan': -1,
'target_raster_path': "percent_realized_bau_va.tif",
'target_pixel_size': (0.002777777777778, -0.002777777777778),
},
{
'expression': 'total_realized /total_potential',
'symbol_to_path_map': {
'total_realized': r"C:\Users\Becky\Documents\dasgupta\nci_ag_multi_lulc\lulc_WB_cons_esa_classes_md5_8c150474406a3f230b992399429bd182\prod_total_realized_va_10s_lulc_WB_cons_esa_classes_md5_8c150474406a3f230b992399429bd182.tif",
'total_potential': r"C:\Users\Becky\Documents\dasgupta\nci_ag_multi_lulc\lulc_WB_cons_esa_classes_md5_8c150474406a3f230b992399429bd182\prod_total_potential_va_10s_lulc_WB_cons_esa_classes_md5_8c150474406a3f230b992399429bd182.tif",
},
'target_nodata': -1,
'default_nan': -1,
'target_raster_path': "percent_realized_cons_va.tif",
'target_pixel_size': (0.002777777777778, -0.002777777777778),
},
{
'expression': 'total_realized /total_potential',
'symbol_to_path_map': {
'total_realized': r"C:\Users\Becky\Documents\dasgupta\nci_ag_multi_lulc\lulc_WB_mid\prod_total_realized_va_10s_lulc_WB_mid.tif",
'total_potential': r"C:\Users\Becky\Documents\dasgupta\nci_ag_multi_lulc\lulc_WB_mid\prod_total_potential_va_10s_lulc_WB_mid.tif",
},
'target_nodata': -1,
'default_nan': -1,
'target_raster_path': "percent_realized_mid_va.tif",
'target_pixel_size': (0.002777777777778, -0.002777777777778),
},
]
for calculation in raster_list:
raster_calculations_core.evaluate_calculation(
calculation, TASK_GRAPH, WORKSPACE_DIR)
TASK_GRAPH.join()
TASK_GRAPH.close()
return
#NCI
single_expression = {
'expression': 'averageraster*mask - raster2*(mask>1)',
'symbol_to_path_map': {
'raster2': r"C:\Users\Becky\Documents\raster_calculations\fertilizers\NitrogenApplication_Rate_md5_caee837fa0e881be0c36c1eba1dea44e.tif",
'averageraster': r"C:\Users\Becky\Documents\raster_calculations\fertilizer_average_raster.tif",
'mask': r"C:\Users\Becky\Documents\raster_calculations\fertilizer_valid_count_raster.tif",
},
'target_nodata': -9999,
'target_raster_path': "Intensified_NitrogenApplication_Rate_gapfilled.tif",
'target_pixel_size': (0.08333333333333332871, -0.08333333333333332871),
'resample_method': 'average'
}
raster_calculations_core.evaluate_calculation(
single_expression, TASK_GRAPH, WORKSPACE_DIR)
TASK_GRAPH.join()
TASK_GRAPH.close()
return
if __name__ == '__main__':
TASK_GRAPH = taskgraph.TaskGraph(WORKSPACE_DIR, NCPUS, 5.0)
main()
| StarcoderdataPython |
9373 | import random
from otp.ai.AIBase import *
from direct.distributed.ClockDelta import *
from toontown.battle.BattleBase import *
from toontown.battle.BattleCalculatorAI import *
from toontown.toonbase.ToontownBattleGlobals import *
from toontown.battle.SuitBattleGlobals import *
from pandac.PandaModules import *
from toontown.battle import BattleExperienceAI
from direct.distributed import DistributedObjectAI
from direct.fsm import ClassicFSM, State
from direct.fsm import State
from direct.task import Task
from direct.directnotify import DirectNotifyGlobal
from toontown.ai import DatabaseObject
from toontown.toon import DistributedToonAI
from toontown.toon import InventoryBase
from toontown.toonbase import ToontownGlobals
from toontown.toon import NPCToons
from otp.ai.MagicWordGlobal import *
from toontown.pets import DistributedPetProxyAI
class DistributedBattleBaseAI(DistributedObjectAI.DistributedObjectAI, BattleBase):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedBattleBaseAI')
def __init__(self, air, zoneId, finishCallback = None, maxSuits = 4, bossBattle = 0, tutorialFlag = 0, interactivePropTrackBonus = -1):
DistributedObjectAI.DistributedObjectAI.__init__(self, air)
self.serialNum = 0
self.zoneId = zoneId
self.maxSuits = maxSuits
self.setBossBattle(bossBattle)
self.tutorialFlag = tutorialFlag
self.interactivePropTrackBonus = interactivePropTrackBonus
self.finishCallback = finishCallback
self.avatarExitEvents = []
self.responses = {}
self.adjustingResponses = {}
self.joinResponses = {}
self.adjustingSuits = []
self.adjustingToons = []
self.numSuitsEver = 0
BattleBase.__init__(self)
self.streetBattle = 1
self.pos = Point3(0, 0, 0)
self.initialSuitPos = Point3(0, 0, 0)
self.toonExp = {}
self.toonOrigQuests = {}
self.toonItems = {}
self.toonOrigMerits = {}
self.toonMerits = {}
self.toonParts = {}
self.battleCalc = BattleCalculatorAI(self, tutorialFlag)
if self.air.suitInvasionManager.getInvading():
mult = getInvasionMultiplier()
self.battleCalc.setSkillCreditMultiplier(mult)
if self.air.holidayManager.isMoreXpHolidayRunning():
mult = getMoreXpHolidayMultiplier()
self.battleCalc.setSkillCreditMultiplier(mult)
self.fsm = None
self.clearAttacks()
self.ignoreFaceOffDone = 0
self.needAdjust = 0
self.movieHasBeenMade = 0
self.movieHasPlayed = 0
self.rewardHasPlayed = 0
self.movieRequested = 0
self.ignoreResponses = 0
self.ignoreAdjustingResponses = 0
self.taskNames = []
self.exitedToons = []
self.suitsKilled = []
self.suitsKilledThisBattle = []
self.suitsKilledPerFloor = []
self.suitsEncountered = []
self.newToons = []
self.newSuits = []
self.numNPCAttacks = 0
self.npcAttacks = {}
self.pets = {}
self.fireCount = 0
self.fsm = ClassicFSM.ClassicFSM('DistributedBattleAI', [State.State('FaceOff', self.enterFaceOff, self.exitFaceOff, ['WaitForInput', 'Resume']),
State.State('WaitForJoin', self.enterWaitForJoin, self.exitWaitForJoin, ['WaitForInput', 'Resume']),
State.State('WaitForInput', self.enterWaitForInput, self.exitWaitForInput, ['MakeMovie', 'Resume']),
State.State('MakeMovie', self.enterMakeMovie, self.exitMakeMovie, ['PlayMovie', 'Resume']),
State.State('PlayMovie', self.enterPlayMovie, self.exitPlayMovie, ['WaitForJoin', 'Reward', 'Resume']),
State.State('Reward', self.enterReward, self.exitReward, ['Resume']),
State.State('Resume', self.enterResume, self.exitResume, []),
State.State('Off', self.enterOff, self.exitOff, ['FaceOff', 'WaitForJoin'])], 'Off', 'Off')
self.joinableFsm = ClassicFSM.ClassicFSM('Joinable', [State.State('Joinable', self.enterJoinable, self.exitJoinable, ['Unjoinable']), State.State('Unjoinable', self.enterUnjoinable, self.exitUnjoinable, ['Joinable'])], 'Unjoinable', 'Unjoinable')
self.joinableFsm.enterInitialState()
self.runableFsm = ClassicFSM.ClassicFSM('Runable', [State.State('Runable', self.enterRunable, self.exitRunable, ['Unrunable']), State.State('Unrunable', self.enterUnrunable, self.exitUnrunable, ['Runable'])], 'Unrunable', 'Unrunable')
self.runableFsm.enterInitialState()
self.adjustFsm = ClassicFSM.ClassicFSM('Adjust', [State.State('Adjusting', self.enterAdjusting, self.exitAdjusting, ['NotAdjusting', 'Adjusting']), State.State('NotAdjusting', self.enterNotAdjusting, self.exitNotAdjusting, ['Adjusting'])], 'NotAdjusting', 'NotAdjusting')
self.adjustFsm.enterInitialState()
self.fsm.enterInitialState()
self.startTime = globalClock.getRealTime()
self.adjustingTimer = Timer()
def clearAttacks(self):
self.toonAttacks = {}
self.suitAttacks = getDefaultSuitAttacks()
def requestDelete(self):
if hasattr(self, 'fsm'):
self.fsm.request('Off')
self.__removeTaskName(self.uniqueName('make-movie'))
DistributedObjectAI.DistributedObjectAI.requestDelete(self)
def delete(self):
self.notify.debug('deleting battle')
self.fsm.request('Off')
self.ignoreAll()
self.__removeAllTasks()
del self.fsm
del self.joinableFsm
del self.runableFsm
del self.adjustFsm
self.__cleanupJoinResponses()
self.timer.stop()
del self.timer
self.adjustingTimer.stop()
del self.adjustingTimer
self.battleCalc.cleanup()
del self.battleCalc
for suit in self.suits:
del suit.battleTrap
del self.finishCallback
for petProxy in self.pets.values():
petProxy.requestDelete()
DistributedObjectAI.DistributedObjectAI.delete(self)
def pause(self):
self.timer.stop()
self.adjustingTimer.stop()
def unpause(self):
self.timer.resume()
self.adjustingTimer.resume()
def abortBattle(self):
self.notify.debug('%s.abortBattle() called.' % self.doId)
toonsCopy = self.toons[:]
for toonId in toonsCopy:
self.__removeToon(toonId)
if self.fsm.getCurrentState().getName() == 'PlayMovie' or self.fsm.getCurrentState().getName() == 'MakeMovie':
self.exitedToons.append(toonId)
self.d_setMembers()
self.b_setState('Resume')
self.__removeAllTasks()
self.timer.stop()
self.adjustingTimer.stop()
def __removeSuit(self, suit):
self.notify.debug('__removeSuit(%d)' % suit.doId)
self.suits.remove(suit)
self.activeSuits.remove(suit)
if self.luredSuits.count(suit) == 1:
self.luredSuits.remove(suit)
self.suitGone = 1
del suit.battleTrap
def findSuit(self, id):
for s in self.suits:
if s.doId == id:
return s
return None
def __removeTaskName(self, name):
if self.taskNames.count(name):
self.taskNames.remove(name)
self.notify.debug('removeTaskName() - %s' % name)
taskMgr.remove(name)
def __removeAllTasks(self):
for n in self.taskNames:
self.notify.debug('removeAllTasks() - %s' % n)
taskMgr.remove(n)
self.taskNames = []
def __removeToonTasks(self, toonId):
name = self.taskName('running-toon-%d' % toonId)
self.__removeTaskName(name)
name = self.taskName('to-pending-av-%d' % toonId)
self.__removeTaskName(name)
def getLevelDoId(self):
return 0
def getBattleCellId(self):
return 0
def getPosition(self):
self.notify.debug('getPosition() - %s' % self.pos)
return [self.pos[0], self.pos[1], self.pos[2]]
def getInitialSuitPos(self):
p = []
p.append(self.initialSuitPos[0])
p.append(self.initialSuitPos[1])
p.append(self.initialSuitPos[2])
return p
def setBossBattle(self, bossBattle):
self.bossBattle = bossBattle
def getBossBattle(self):
return self.bossBattle
def b_setState(self, state):
self.notify.debug('network:setState(%s)' % state)
stime = globalClock.getRealTime() + SERVER_BUFFER_TIME
self.sendUpdate('setState', [state, globalClockDelta.localToNetworkTime(stime)])
self.setState(state)
def setState(self, state):
self.fsm.request(state)
def getState(self):
return [self.fsm.getCurrentState().getName(), globalClockDelta.getRealNetworkTime()]
def d_setMembers(self):
self.notify.debug('network:setMembers()')
self.sendUpdate('setMembers', self.getMembers())
def getMembers(self):
suits = []
for s in self.suits:
suits.append(s.doId)
joiningSuits = ''
for s in self.joiningSuits:
joiningSuits += str(suits.index(s.doId))
pendingSuits = ''
for s in self.pendingSuits:
pendingSuits += str(suits.index(s.doId))
activeSuits = ''
for s in self.activeSuits:
activeSuits += str(suits.index(s.doId))
luredSuits = ''
for s in self.luredSuits:
luredSuits += str(suits.index(s.doId))
suitTraps = ''
for s in self.suits:
if s.battleTrap == NO_TRAP:
suitTraps += '9'
elif s.battleTrap == BattleCalculatorAI.TRAP_CONFLICT:
suitTraps += '9'
else:
suitTraps += str(s.battleTrap)
toons = []
for t in self.toons:
toons.append(t)
joiningToons = ''
for t in self.joiningToons:
joiningToons += str(toons.index(t))
pendingToons = ''
for t in self.pendingToons:
pendingToons += str(toons.index(t))
activeToons = ''
for t in self.activeToons:
activeToons += str(toons.index(t))
runningToons = ''
for t in self.runningToons:
runningToons += str(toons.index(t))
self.notify.debug('getMembers() - suits: %s joiningSuits: %s pendingSuits: %s activeSuits: %s luredSuits: %s suitTraps: %s toons: %s joiningToons: %s pendingToons: %s activeToons: %s runningToons: %s' % (suits,
joiningSuits,
pendingSuits,
activeSuits,
luredSuits,
suitTraps,
toons,
joiningToons,
pendingToons,
activeToons,
runningToons))
return [suits,
joiningSuits,
pendingSuits,
activeSuits,
luredSuits,
suitTraps,
toons,
joiningToons,
pendingToons,
activeToons,
runningToons,
globalClockDelta.getRealNetworkTime()]
def d_adjust(self):
self.notify.debug('network:adjust()')
self.sendUpdate('adjust', [globalClockDelta.getRealNetworkTime()])
def getInteractivePropTrackBonus(self):
return self.interactivePropTrackBonus
def getZoneId(self):
return self.zoneId
def getTaskZoneId(self):
return self.zoneId
def d_setMovie(self):
self.notify.debug('network:setMovie()')
self.sendUpdate('setMovie', self.getMovie())
self.__updateEncounteredCogs()
def getMovie(self):
suitIds = []
for s in self.activeSuits:
suitIds.append(s.doId)
p = [self.movieHasBeenMade]
p.append(self.activeToons)
p.append(suitIds)
for t in self.activeToons:
if t in self.toonAttacks:
ta = self.toonAttacks[t]
index = -1
id = ta[TOON_ID_COL]
if id != -1:
index = self.activeToons.index(id)
track = ta[TOON_TRACK_COL]
if (track == NO_ATTACK or attackAffectsGroup(track, ta[TOON_LVL_COL])) and track != NPCSOS and track != PETSOS:
target = -1
if track == HEAL:
if ta[TOON_LVL_COL] == 1:
ta[TOON_HPBONUS_COL] = random.randint(0, 10000)
elif track == SOS or track == NPCSOS or track == PETSOS:
target = ta[TOON_TGT_COL]
elif track == HEAL:
if self.activeToons.count(ta[TOON_TGT_COL]) != 0:
target = self.activeToons.index(ta[TOON_TGT_COL])
else:
target = -1
elif suitIds.count(ta[TOON_TGT_COL]) != 0:
target = suitIds.index(ta[TOON_TGT_COL])
else:
target = -1
p = p + [index,
track,
ta[TOON_LVL_COL],
target]
p = p + ta[4:]
else:
index = self.activeToons.index(t)
attack = getToonAttack(index)
p = p + attack
for i in range(4 - len(self.activeToons)):
p = p + getToonAttack(-1)
for sa in self.suitAttacks:
index = -1
id = sa[SUIT_ID_COL]
if id != -1:
index = suitIds.index(id)
if sa[SUIT_ATK_COL] == -1:
targetIndex = -1
else:
targetIndex = sa[SUIT_TGT_COL]
if targetIndex == -1:
self.notify.debug('suit attack: %d must be group' % sa[SUIT_ATK_COL])
else:
toonId = self.activeToons[targetIndex]
p = p + [index, sa[SUIT_ATK_COL], targetIndex]
sa[SUIT_TAUNT_COL] = 0
if sa[SUIT_ATK_COL] != -1:
suit = self.findSuit(id)
sa[SUIT_TAUNT_COL] = getAttackTauntIndexFromIndex(suit, sa[SUIT_ATK_COL])
p = p + sa[3:]
return p
def d_setChosenToonAttacks(self):
self.notify.debug('network:setChosenToonAttacks()')
self.sendUpdate('setChosenToonAttacks', self.getChosenToonAttacks())
def getChosenToonAttacks(self):
ids = []
tracks = []
levels = []
targets = []
for t in self.activeToons:
if t in self.toonAttacks:
ta = self.toonAttacks[t]
else:
ta = getToonAttack(t)
ids.append(t)
tracks.append(ta[TOON_TRACK_COL])
levels.append(ta[TOON_LVL_COL])
targets.append(ta[TOON_TGT_COL])
return [ids,
tracks,
levels,
targets]
def d_setBattleExperience(self):
self.notify.debug('network:setBattleExperience()')
self.sendUpdate('setBattleExperience', self.getBattleExperience())
def getBattleExperience(self):
returnValue = BattleExperienceAI.getBattleExperience(4, self.activeToons, self.toonExp, self.battleCalc.toonSkillPtsGained, self.toonOrigQuests, self.toonItems, self.toonOrigMerits, self.toonMerits, self.toonParts, self.suitsKilled, self.helpfulToons)
return returnValue
def getToonUberStatus(self):
fieldList = []
uberIndex = LAST_REGULAR_GAG_LEVEL + 1
for toon in self.activeToons:
toonList = []
for trackIndex in range(MAX_TRACK_INDEX):
toonList.append(toon.inventory.numItem(track, uberIndex))
fieldList.append(encodeUber(toonList))
return fieldList
def addSuit(self, suit):
self.notify.debug('addSuit(%d)' % suit.doId)
self.newSuits.append(suit)
self.suits.append(suit)
suit.battleTrap = NO_TRAP
self.numSuitsEver += 1
def __joinSuit(self, suit):
self.joiningSuits.append(suit)
toPendingTime = MAX_JOIN_T + SERVER_BUFFER_TIME
taskName = self.taskName('to-pending-av-%d' % suit.doId)
self.__addJoinResponse(suit.doId, taskName)
self.taskNames.append(taskName)
taskMgr.doMethodLater(toPendingTime, self.__serverJoinDone, taskName, extraArgs=(suit.doId, taskName))
def __serverJoinDone(self, avId, taskName):
self.notify.debug('join for av: %d timed out on server' % avId)
self.__removeTaskName(taskName)
self.__makeAvPending(avId)
return Task.done
def __makeAvPending(self, avId):
self.notify.debug('__makeAvPending(%d)' % avId)
self.__removeJoinResponse(avId)
self.__removeTaskName(self.taskName('to-pending-av-%d' % avId))
if self.toons.count(avId) > 0:
self.joiningToons.remove(avId)
self.pendingToons.append(avId)
else:
suit = self.findSuit(avId)
if suit != None:
if not suit.isEmpty():
if not self.joiningSuits.count(suit) == 1:
self.notify.warning('__makeAvPending(%d) in zone: %d' % (avId, self.zoneId))
self.notify.warning('toons: %s' % self.toons)
self.notify.warning('joining toons: %s' % self.joiningToons)
self.notify.warning('pending toons: %s' % self.pendingToons)
self.notify.warning('suits: %s' % self.suits)
self.notify.warning('joining suits: %s' % self.joiningSuits)
self.notify.warning('pending suits: %s' % self.pendingSuits)
self.joiningSuits.remove(suit)
self.pendingSuits.append(suit)
else:
self.notify.warning('makeAvPending() %d not in toons or suits' % avId)
return
self.d_setMembers()
self.needAdjust = 1
self.__requestAdjust()
def suitRequestJoin(self, suit):
self.notify.debug('suitRequestJoin(%d)' % suit.getDoId())
if self.suitCanJoin():
self.addSuit(suit)
self.__joinSuit(suit)
self.d_setMembers()
suit.prepareToJoinBattle()
return 1
else:
self.notify.warning('suitRequestJoin() - not joinable - joinable state: %s max suits: %d' % (self.joinableFsm.getCurrentState().getName(), self.maxSuits))
return 0
def addToon(self, avId):
self.notify.debug('addToon(%d)' % avId)
toon = self.getToon(avId)
if toon == None:
return 0
toon.stopToonUp()
event = simbase.air.getAvatarExitEvent(avId)
self.avatarExitEvents.append(event)
self.accept(event, self.__handleUnexpectedExit, extraArgs=[avId])
event = 'inSafezone-%s' % avId
self.avatarExitEvents.append(event)
self.accept(event, self.__handleSuddenExit, extraArgs=[avId, 0])
self.newToons.append(avId)
self.toons.append(avId)
toon = simbase.air.doId2do.get(avId)
if toon:
if hasattr(self, 'doId'):
toon.b_setBattleId(self.doId)
else:
toon.b_setBattleId(-1)
messageToonAdded = 'Battle adding toon %s' % avId
messenger.send(messageToonAdded, [avId])
if self.fsm != None and self.fsm.getCurrentState().getName() == 'PlayMovie':
self.responses[avId] = 1
else:
self.responses[avId] = 0
self.adjustingResponses[avId] = 0
if avId not in self.toonExp:
p = []
for t in Tracks:
p.append(toon.experience.getExp(t))
self.toonExp[avId] = p
if avId not in self.toonOrigMerits:
self.toonOrigMerits[avId] = toon.cogMerits[:]
if avId not in self.toonMerits:
self.toonMerits[avId] = [0,
0,
0,
0,
0]
if avId not in self.toonOrigQuests:
flattenedQuests = []
for quest in toon.quests:
flattenedQuests.extend(quest)
self.toonOrigQuests[avId] = flattenedQuests
if avId not in self.toonItems:
self.toonItems[avId] = ([], [])
return 1
def __joinToon(self, avId, pos):
self.joiningToons.append(avId)
toPendingTime = MAX_JOIN_T + SERVER_BUFFER_TIME
taskName = self.taskName('to-pending-av-%d' % avId)
self.__addJoinResponse(avId, taskName, toon=1)
taskMgr.doMethodLater(toPendingTime, self.__serverJoinDone, taskName, extraArgs=(avId, taskName))
self.taskNames.append(taskName)
def __updateEncounteredCogs(self):
for toon in self.activeToons:
if toon in self.newToons:
for suit in self.activeSuits:
if hasattr(suit, 'dna'):
self.suitsEncountered.append({'type': suit.dna.name,
'activeToons': self.activeToons[:]})
else:
self.notify.warning('Suit has no DNA in zone %s: toons involved = %s' % (self.zoneId, self.activeToons))
return
self.newToons.remove(toon)
for suit in self.activeSuits:
if suit in self.newSuits:
if hasattr(suit, 'dna'):
self.suitsEncountered.append({'type': suit.dna.name,
'activeToons': self.activeToons[:]})
else:
self.notify.warning('Suit has no DNA in zone %s: toons involved = %s' % (self.zoneId, self.activeToons))
return
self.newSuits.remove(suit)
def __makeToonRun(self, toonId, updateAttacks):
self.activeToons.remove(toonId)
self.toonGone = 1
self.runningToons.append(toonId)
taskName = self.taskName('running-toon-%d' % toonId)
taskMgr.doMethodLater(TOON_RUN_T, self.__serverRunDone, taskName, extraArgs=(toonId, updateAttacks, taskName))
self.taskNames.append(taskName)
def __serverRunDone(self, toonId, updateAttacks, taskName):
self.notify.debug('run for toon: %d timed out on server' % toonId)
self.__removeTaskName(taskName)
self.__removeToon(toonId)
self.d_setMembers()
if len(self.toons) == 0:
self.notify.debug('last toon is gone - battle is finished')
self.b_setState('Resume')
else:
if updateAttacks == 1:
self.d_setChosenToonAttacks()
self.needAdjust = 1
self.__requestAdjust()
return Task.done
def __requestAdjust(self):
if not self.fsm:
return
cstate = self.fsm.getCurrentState().getName()
if cstate == 'WaitForInput' or cstate == 'WaitForJoin':
if self.adjustFsm.getCurrentState().getName() == 'NotAdjusting':
if self.needAdjust == 1:
self.d_adjust()
self.adjustingSuits = []
for s in self.pendingSuits:
self.adjustingSuits.append(s)
self.adjustingToons = []
for t in self.pendingToons:
self.adjustingToons.append(t)
self.adjustFsm.request('Adjusting')
else:
self.notify.debug('requestAdjust() - dont need to')
else:
self.notify.debug('requestAdjust() - already adjusting')
else:
self.notify.debug('requestAdjust() - in state: %s' % cstate)
def __handleUnexpectedExit(self, avId):
#TODO: fixme
#disconnectCode = self.air.getAvatarDisconnectReason(avId)
disconnectCode = "placeHolder dc code, need self.air.getAvatarDisconnectReason(avId)"
self.notify.warning('toon: %d exited unexpectedly, reason %s' % (avId, disconnectCode))
#userAborted = disconnectCode == ToontownGlobals.DisconnectCloseWindow
#TODO: fixme
userAborted = False
self.__handleSuddenExit(avId, userAborted)
def __handleSuddenExit(self, avId, userAborted):
self.__removeToon(avId, userAborted=userAborted)
if self.fsm.getCurrentState().getName() == 'PlayMovie' or self.fsm.getCurrentState().getName() == 'MakeMovie':
self.exitedToons.append(avId)
self.d_setMembers()
if len(self.toons) == 0:
self.notify.debug('last toon is gone - battle is finished')
self.__removeAllTasks()
self.timer.stop()
self.adjustingTimer.stop()
self.b_setState('Resume')
else:
self.needAdjust = 1
self.__requestAdjust()
def __removeSuit(self, suit):
self.notify.debug('__removeSuit(%d)' % suit.doId)
self.suits.remove(suit)
self.activeSuits.remove(suit)
if self.luredSuits.count(suit) == 1:
self.luredSuits.remove(suit)
self.suitGone = 1
del suit.battleTrap
def __removeToon(self, toonId, userAborted = 0):
self.notify.debug('__removeToon(%d)' % toonId)
if self.toons.count(toonId) == 0:
return
self.battleCalc.toonLeftBattle(toonId)
self.__removeToonTasks(toonId)
self.toons.remove(toonId)
if self.joiningToons.count(toonId) == 1:
self.joiningToons.remove(toonId)
if self.pendingToons.count(toonId) == 1:
self.pendingToons.remove(toonId)
if self.activeToons.count(toonId) == 1:
activeToonIdx = self.activeToons.index(toonId)
self.notify.debug('removing activeToons[%d], updating suitAttacks SUIT_HP_COL to match' % activeToonIdx)
for i in range(len(self.suitAttacks)):
if activeToonIdx < len(self.suitAttacks[i][SUIT_HP_COL]):
del self.suitAttacks[i][SUIT_HP_COL][activeToonIdx]
else:
self.notify.warning("suitAttacks %d doesn't have an HP column for active toon index %d" % (i, activeToonIdx))
self.activeToons.remove(toonId)
if self.runningToons.count(toonId) == 1:
self.runningToons.remove(toonId)
if self.adjustingToons.count(toonId) == 1:
self.notify.warning('removeToon() - toon: %d was adjusting!' % toonId)
self.adjustingToons.remove(toonId)
self.toonGone = 1
if toonId in self.pets:
self.pets[toonId].requestDelete()
del self.pets[toonId]
self.__removeResponse(toonId)
self.__removeAdjustingResponse(toonId)
self.__removeJoinResponses(toonId)
event = simbase.air.getAvatarExitEvent(toonId)
self.avatarExitEvents.remove(event)
self.ignore(event)
event = 'inSafezone-%s' % toonId
self.avatarExitEvents.remove(event)
self.ignore(event)
toon = simbase.air.doId2do.get(toonId)
if toon:
toon.b_setBattleId(0)
messageToonReleased = 'Battle releasing toon %s' % toon.doId
messenger.send(messageToonReleased, [toon.doId])
if not userAborted:
toon = self.getToon(toonId)
if toon != None:
toon.hpOwnedByBattle = 0
toon.d_setHp(toon.hp)
toon.d_setInventory(toon.inventory.makeNetString())
self.air.cogPageManager.toonEncounteredCogs(toon, self.suitsEncountered, self.getTaskZoneId())
elif len(self.suits) > 0 and not self.streetBattle:
self.notify.info('toon %d aborted non-street battle; clearing inventory and hp.' % toonId)
toon = DistributedToonAI.DistributedToonAI(self.air)
toon.doId = toonId
empty = InventoryBase.InventoryBase(toon)
toon.b_setInventory(empty.makeNetString())
toon.b_setHp(0)
db = DatabaseObject.DatabaseObject(self.air, toonId)
db.storeObject(toon, ['setInventory', 'setHp'])
self.notify.info('killing mem leak from temporary DistributedToonAI %d' % toonId)
toon.deleteDummy()
def getToon(self, toonId):
if toonId in self.air.doId2do:
return self.air.doId2do[toonId]
else:
self.notify.warning('getToon() - toon: %d not in repository!' % toonId)
return
def toonRequestRun(self):
toonId = self.air.getAvatarIdFromSender()
if self.ignoreResponses == 1:
self.notify.debug('ignoring response from toon: %d' % toonId)
return
self.notify.debug('toonRequestRun(%d)' % toonId)
if not self.isRunable():
self.notify.warning('toonRequestRun() - not runable')
return
updateAttacks = 0
if self.activeToons.count(toonId) == 0:
self.notify.warning('toon tried to run, but not found in activeToons: %d' % toonId)
return
for toon in self.activeToons:
if toon in self.toonAttacks:
ta = self.toonAttacks[toon]
track = ta[TOON_TRACK_COL]
level = ta[TOON_LVL_COL]
if ta[TOON_TGT_COL] == toonId or track == HEAL and attackAffectsGroup(track, level) and len(self.activeToons) <= 2:
healerId = ta[TOON_ID_COL]
self.notify.debug('resetting toon: %ds attack' % healerId)
self.toonAttacks[toon] = getToonAttack(toon, track=UN_ATTACK)
self.responses[healerId] = 0
updateAttacks = 1
self.__makeToonRun(toonId, updateAttacks)
self.d_setMembers()
self.needAdjust = 1
self.__requestAdjust()
def toonRequestJoin(self, x, y, z):
toonId = self.air.getAvatarIdFromSender()
self.notify.debug('toonRequestJoin(%d)' % toonId)
self.signupToon(toonId, x, y, z)
def toonDied(self):
toonId = self.air.getAvatarIdFromSender()
self.notify.debug('toonDied(%d)' % toonId)
if toonId in self.toons:
toon = self.getToon(toonId)
if toon:
toon.hp = -1
toon.inventory.zeroInv(1)
self.__handleSuddenExit(toonId, 0)
def signupToon(self, toonId, x, y, z):
if self.toons.count(toonId):
return
if self.toonCanJoin():
if self.addToon(toonId):
self.__joinToon(toonId, Point3(x, y, z))
self.d_setMembers()
else:
self.notify.warning('toonRequestJoin() - not joinable')
self.d_denyLocalToonJoin(toonId)
def d_denyLocalToonJoin(self, toonId):
self.notify.debug('network: denyLocalToonJoin(%d)' % toonId)
self.sendUpdateToAvatarId(toonId, 'denyLocalToonJoin', [])
def resetResponses(self):
self.responses = {}
for t in self.toons:
self.responses[t] = 0
self.ignoreResponses = 0
def allToonsResponded(self):
for t in self.toons:
if self.responses[t] == 0:
return 0
self.ignoreResponses = 1
return 1
def __allPendingActiveToonsResponded(self):
for t in self.pendingToons + self.activeToons:
if self.responses[t] == 0:
return 0
self.ignoreResponses = 1
return 1
def __allActiveToonsResponded(self):
for t in self.activeToons:
if self.responses[t] == 0:
return 0
self.ignoreResponses = 1
return 1
def __removeResponse(self, toonId):
del self.responses[toonId]
if self.ignoreResponses == 0 and len(self.toons) > 0:
currStateName = self.fsm.getCurrentState().getName()
if currStateName == 'WaitForInput':
if self.__allActiveToonsResponded():
self.notify.debug('removeResponse() - dont wait for movie')
self.__requestMovie()
elif currStateName == 'PlayMovie':
if self.__allPendingActiveToonsResponded():
self.notify.debug('removeResponse() - surprise movie done')
self.__movieDone()
elif currStateName == 'Reward' or currStateName == 'BuildingReward':
if self.__allActiveToonsResponded():
self.notify.debug('removeResponse() - surprise reward done')
self.handleRewardDone()
def __resetAdjustingResponses(self):
self.adjustingResponses = {}
for t in self.toons:
self.adjustingResponses[t] = 0
self.ignoreAdjustingResponses = 0
def __allAdjustingToonsResponded(self):
for t in self.toons:
if self.adjustingResponses[t] == 0:
return 0
self.ignoreAdjustingResponses = 1
return 1
def __removeAdjustingResponse(self, toonId):
if toonId in self.adjustingResponses:
del self.adjustingResponses[toonId]
if self.ignoreAdjustingResponses == 0 and len(self.toons) > 0:
if self.__allAdjustingToonsResponded():
self.__adjustDone()
def __addJoinResponse(self, avId, taskName, toon = 0):
if toon == 1:
for jr in self.joinResponses.values():
jr[avId] = 0
self.joinResponses[avId] = {}
for t in self.toons:
self.joinResponses[avId][t] = 0
self.joinResponses[avId]['taskName'] = taskName
def __removeJoinResponses(self, avId):
self.__removeJoinResponse(avId)
removedOne = 0
for j in self.joinResponses.values():
if avId in j:
del j[avId]
removedOne = 1
if removedOne == 1:
for t in self.joiningToons:
if self.__allToonsRespondedJoin(t):
self.__makeAvPending(t)
def __removeJoinResponse(self, avId):
if avId in self.joinResponses:
taskMgr.remove(self.joinResponses[avId]['taskName'])
del self.joinResponses[avId]
def __allToonsRespondedJoin(self, avId):
jr = self.joinResponses[avId]
for t in self.toons:
if jr[t] == 0:
return 0
return 1
def __cleanupJoinResponses(self):
for jr in self.joinResponses.values():
taskMgr.remove(jr['taskName'])
del jr
def adjustDone(self):
toonId = self.air.getAvatarIdFromSender()
if self.ignoreAdjustingResponses == 1:
self.notify.debug('adjustDone() - ignoring toon: %d' % toonId)
return
elif self.adjustFsm.getCurrentState().getName() != 'Adjusting':
self.notify.warning('adjustDone() - in state %s' % self.fsm.getCurrentState().getName())
return
elif self.toons.count(toonId) == 0:
self.notify.warning('adjustDone() - toon: %d not in toon list' % toonId)
return
self.adjustingResponses[toonId] += 1
self.notify.debug('toon: %d done adjusting' % toonId)
if self.__allAdjustingToonsResponded():
self.__adjustDone()
def timeout(self):
toonId = self.air.getAvatarIdFromSender()
if self.ignoreResponses == 1:
self.notify.debug('timeout() - ignoring toon: %d' % toonId)
return
elif self.fsm.getCurrentState().getName() != 'WaitForInput':
self.notify.warning('timeout() - in state: %s' % self.fsm.getCurrentState().getName())
return
elif self.toons.count(toonId) == 0:
self.notify.warning('timeout() - toon: %d not in toon list' % toonId)
return
self.toonAttacks[toonId] = getToonAttack(toonId)
self.d_setChosenToonAttacks()
self.responses[toonId] += 1
self.notify.debug('toon: %d timed out' % toonId)
if self.__allActiveToonsResponded():
self.__requestMovie(timeout=1)
def movieDone(self):
toonId = self.air.getAvatarIdFromSender()
if self.ignoreResponses == 1:
self.notify.debug('movieDone() - ignoring toon: %d' % toonId)
return
elif self.fsm.getCurrentState().getName() != 'PlayMovie':
self.notify.warning('movieDone() - in state %s' % self.fsm.getCurrentState().getName())
return
elif self.toons.count(toonId) == 0:
self.notify.warning('movieDone() - toon: %d not in toon list' % toonId)
return
self.responses[toonId] += 1
self.notify.debug('toon: %d done with movie' % toonId)
if self.__allPendingActiveToonsResponded():
self.__movieDone()
else:
self.timer.stop()
self.timer.startCallback(TIMEOUT_PER_USER, self.__serverMovieDone)
def rewardDone(self):
toonId = self.air.getAvatarIdFromSender()
stateName = self.fsm.getCurrentState().getName()
if self.ignoreResponses == 1:
self.notify.debug('rewardDone() - ignoring toon: %d' % toonId)
return
elif stateName not in ('Reward', 'BuildingReward', 'FactoryReward', 'MintReward', 'StageReward', 'CountryClubReward'):
self.notify.warning('rewardDone() - in state %s' % stateName)
return
elif self.toons.count(toonId) == 0:
self.notify.warning('rewardDone() - toon: %d not in toon list' % toonId)
return
self.responses[toonId] += 1
self.notify.debug('toon: %d done with reward' % toonId)
if self.__allActiveToonsResponded():
self.handleRewardDone()
else:
self.timer.stop()
self.timer.startCallback(TIMEOUT_PER_USER, self.serverRewardDone)
def assignRewards(self):
if self.rewardHasPlayed == 1:
self.notify.debug('handleRewardDone() - reward has already played')
return
self.rewardHasPlayed = 1
BattleExperienceAI.assignRewards(self.activeToons, self.battleCalc.toonSkillPtsGained, self.suitsKilled, self.getTaskZoneId(), self.helpfulToons)
def joinDone(self, avId):
toonId = self.air.getAvatarIdFromSender()
if self.toons.count(toonId) == 0:
self.notify.warning('joinDone() - toon: %d not in toon list' % toonId)
return
if avId not in self.joinResponses:
self.notify.debug('joinDone() - no entry for: %d - ignoring: %d' % (avId, toonId))
return
jr = self.joinResponses[avId]
if toonId in jr:
jr[toonId] += 1
self.notify.debug('client with localToon: %d done joining av: %d' % (toonId, avId))
if self.__allToonsRespondedJoin(avId):
self.__makeAvPending(avId)
def requestAttack(self, track, level, av):
toonId = self.air.getAvatarIdFromSender()
if self.ignoreResponses == 1:
self.notify.debug('requestAttack() - ignoring toon: %d' % toonId)
return
elif self.fsm.getCurrentState().getName() != 'WaitForInput':
self.notify.warning('requestAttack() - in state: %s' % self.fsm.getCurrentState().getName())
return
elif self.activeToons.count(toonId) == 0:
self.notify.warning('requestAttack() - toon: %d not in toon list' % toonId)
return
self.notify.debug('requestAttack(%d, %d, %d, %d)' % (toonId,
track,
level,
av))
toon = self.getToon(toonId)
if toon == None:
self.notify.warning('requestAttack() - no toon: %d' % toonId)
return
validResponse = 1
if track == SOS:
self.notify.debug('toon: %d calls for help' % toonId)
self.air.writeServerEvent('friendSOS', toonId, '%s' % av)
self.toonAttacks[toonId] = getToonAttack(toonId, track=SOS, target=av)
elif track == NPCSOS:
self.notify.debug('toon: %d calls for help' % toonId)
self.air.writeServerEvent('NPCSOS', toonId, '%s' % av)
toon = self.getToon(toonId)
if toon == None:
return
if av in toon.NPCFriendsDict:
npcCollision = 0
if av in self.npcAttacks:
callingToon = self.npcAttacks[av]
if self.activeToons.count(callingToon) == 1:
self.toonAttacks[toonId] = getToonAttack(toonId, track=PASS)
npcCollision = 1
if npcCollision == 0:
self.toonAttacks[toonId] = getToonAttack(toonId, track=NPCSOS, level=5, target=av)
self.numNPCAttacks += 1
self.npcAttacks[av] = toonId
elif track == PETSOS:
self.notify.debug('toon: %d calls for pet: %d' % (toonId, av))
self.air.writeServerEvent('PETSOS', toonId, '%s' % av)
toon = self.getToon(toonId)
if toon == None:
return
if not self.validate(toonId, level in toon.petTrickPhrases, 'requestAttack: invalid pet trickId: %s' % level):
return
self.toonAttacks[toonId] = getToonAttack(toonId, track=PETSOS, level=level, target=av)
elif track == UN_ATTACK:
self.notify.debug('toon: %d changed its mind' % toonId)
self.toonAttacks[toonId] = getToonAttack(toonId, track=UN_ATTACK)
if toonId in self.responses:
self.responses[toonId] = 0
validResponse = 0
elif track == PASS:
self.toonAttacks[toonId] = getToonAttack(toonId, track=PASS)
elif track == FIRE:
if simbase.air.doId2do[toonId].getPinkSlips() < self.getFireCount() + 1:
#Not allowed to fire, force them to pass >:D
self.toonAttacks[toonId] = getToonAttack(toonId, track=PASS)
else:
#Allowed to fire
self.setFireCount(self.fireCount + 1)
self.toonAttacks[toonId] = getToonAttack(toonId, track=FIRE, target=av)
else:
if not self.validate(toonId, track >= 0 and track <= MAX_TRACK_INDEX, 'requestAttack: invalid track %s' % track):
return
if not self.validate(toonId, level >= 0 and level <= MAX_LEVEL_INDEX, 'requestAttack: invalid level %s' % level):
return
if toon.inventory.numItem(track, level) == 0:
self.notify.warning('requestAttack() - toon has no item track: %d level: %d' % (track, level))
self.toonAttacks[toonId] = getToonAttack(toonId)
return
if track == HEAL:
if self.runningToons.count(av) == 1 or attackAffectsGroup(track, level) and len(self.activeToons) < 2:
self.toonAttacks[toonId] = getToonAttack(toonId, track=UN_ATTACK)
validResponse = 0
else:
self.toonAttacks[toonId] = getToonAttack(toonId, track=track, level=level, target=av)
else:
self.toonAttacks[toonId] = getToonAttack(toonId, track=track, level=level, target=av)
if av == -1 and not attackAffectsGroup(track, level):
validResponse = 0
self.d_setChosenToonAttacks()
if validResponse == 1:
self.responses[toonId] += 1
self.notify.debug('toon: %d chose an attack' % toonId)
if self.__allActiveToonsResponded():
self.__requestMovie()
def requestPetProxy(self, av):
toonId = self.air.getAvatarIdFromSender()
if self.ignoreResponses == 1:
self.notify.debug('requestPetProxy() - ignoring toon: %d' % toonId)
return
elif self.fsm.getCurrentState().getName() != 'WaitForInput':
self.notify.warning('requestPetProxy() - in state: %s' % self.fsm.getCurrentState().getName())
return
elif self.activeToons.count(toonId) == 0:
self.notify.warning('requestPetProxy() - toon: %d not in toon list' % toonId)
return
self.notify.debug('requestPetProxy(%s, %s)' % (toonId, av))
toon = self.getToon(toonId)
if toon == None:
self.notify.warning('requestPetProxy() - no toon: %d' % toonId)
return
petId = toon.getPetId()
zoneId = self.zoneId
if petId == av:
if not toonId in self.pets:
def handleGetPetProxy(success, pet, petId = petId, zoneId = zoneId, toonId = toonId):
if success:
petProxy = DistributedPetProxyAI.DistributedPetProxyAI(self.air)
petProxy.setOwnerId(pet.getOwnerId())
petProxy.setPetName(pet.getPetName())
petProxy.setTraitSeed(pet.getTraitSeed())
petProxy.setSafeZone(pet.getSafeZone())
petProxy.setForgetfulness(pet.getForgetfulness())
petProxy.setBoredomThreshold(pet.getBoredomThreshold())
petProxy.setRestlessnessThreshold(pet.getRestlessnessThreshold())
petProxy.setPlayfulnessThreshold(pet.getPlayfulnessThreshold())
petProxy.setLonelinessThreshold(pet.getLonelinessThreshold())
petProxy.setSadnessThreshold(pet.getSadnessThreshold())
petProxy.setFatigueThreshold(pet.getFatigueThreshold())
petProxy.setHungerThreshold(pet.getHungerThreshold())
petProxy.setConfusionThreshold(pet.getConfusionThreshold())
petProxy.setExcitementThreshold(pet.getExcitementThreshold())
petProxy.setAngerThreshold(pet.getAngerThreshold())
petProxy.setSurpriseThreshold(pet.getSurpriseThreshold())
petProxy.setAffectionThreshold(pet.getAffectionThreshold())
petProxy.setHead(pet.getHead())
petProxy.setEars(pet.getEars())
petProxy.setNose(pet.getNose())
petProxy.setTail(pet.getTail())
petProxy.setBodyTexture(pet.getBodyTexture())
petProxy.setColor(pet.getColor())
petProxy.setColorScale(pet.getColorScale())
petProxy.setEyeColor(pet.getEyeColor())
petProxy.setGender(pet.getGender())
petProxy.setLastSeenTimestamp(pet.getLastSeenTimestamp())
petProxy.setBoredom(pet.getBoredom())
petProxy.setRestlessness(pet.getRestlessness())
petProxy.setPlayfulness(pet.getPlayfulness())
petProxy.setLoneliness(pet.getLoneliness())
petProxy.setSadness(pet.getSadness())
petProxy.setAffection(pet.getAffection())
petProxy.setHunger(pet.getHunger())
petProxy.setConfusion(pet.getConfusion())
petProxy.setExcitement(pet.getExcitement())
petProxy.setFatigue(pet.getFatigue())
petProxy.setAnger(pet.getAnger())
petProxy.setSurprise(pet.getSurprise())
petProxy.setTrickAptitudes(pet.getTrickAptitudes())
pet.requestDelete()
def deleted(task):
petProxy.doNotDeallocateChannel = True
petProxy.generateWithRequiredAndId(petId, self.air.districtId, self.zoneId)
petProxy.broadcastDominantMood()
self.pets[toonId] = petProxy
return task.done
self.acceptOnce(self.air.getAvatarExitEvent(petId),
lambda: taskMgr.doMethodLater(0,
deleted, self.uniqueName('petdel-%d' % petId)))
else:
self.notify.warning('error generating petProxy: %s' % petId)
self.getPetProxyObject(petId, handleGetPetProxy)
def suitCanJoin(self):
return len(self.suits) < self.maxSuits and self.isJoinable()
def toonCanJoin(self):
return len(self.toons) < 4 and self.isJoinable()
def __requestMovie(self, timeout = 0):
if self.adjustFsm.getCurrentState().getName() == 'Adjusting':
self.notify.debug('__requestMovie() - in Adjusting')
self.movieRequested = 1
else:
movieDelay = 0
if len(self.activeToons) == 0:
self.notify.warning('only pending toons left in battle %s, toons = %s' % (self.doId, self.toons))
elif len(self.activeSuits) == 0:
self.notify.warning('only pending suits left in battle %s, suits = %s' % (self.doId, self.suits))
elif len(self.activeToons) > 1 and not timeout:
movieDelay = 1
self.fsm.request('MakeMovie')
if movieDelay:
taskMgr.doMethodLater(0.8, self.__makeMovie, self.uniqueName('make-movie'))
self.taskNames.append(self.uniqueName('make-movie'))
else:
self.__makeMovie()
def __makeMovie(self, task = None):
self.notify.debug('makeMovie()')
if self._DOAI_requestedDelete:
self.notify.warning('battle %s requested delete, then __makeMovie was called!' % self.doId)
if hasattr(self, 'levelDoId'):
self.notify.warning('battle %s in level %s' % (self.doId, self.levelDoId))
return
self.__removeTaskName(self.uniqueName('make-movie'))
if self.movieHasBeenMade == 1:
self.notify.debug('__makeMovie() - movie has already been made')
return
self.movieRequested = 0
self.movieHasBeenMade = 1
self.movieHasPlayed = 0
self.rewardHasPlayed = 0
for t in self.activeToons:
if t not in self.toonAttacks:
self.toonAttacks[t] = getToonAttack(t)
attack = self.toonAttacks[t]
if attack[TOON_TRACK_COL] == PASS or attack[TOON_TRACK_COL] == UN_ATTACK:
self.toonAttacks[t] = getToonAttack(t)
if self.toonAttacks[t][TOON_TRACK_COL] != NO_ATTACK:
self.addHelpfulToon(t)
self.battleCalc.calculateRound()
for t in self.activeToons:
self.sendEarnedExperience(t)
toon = self.getToon(t)
if toon != None:
toon.hpOwnedByBattle = 1
if toon.immortalMode:
toon.toonUp(toon.maxHp)
self.d_setMovie()
self.b_setState('PlayMovie')
return Task.done
def sendEarnedExperience(self, toonId):
toon = self.getToon(toonId)
if toon != None:
expList = self.battleCalc.toonSkillPtsGained.get(toonId, None)
if expList == None:
toon.d_setEarnedExperience([])
else:
roundList = []
for exp in expList:
roundList.append(int(exp + 0.5))
toon.d_setEarnedExperience(roundList)
def enterOff(self):
return
def exitOff(self):
return
def enterFaceOff(self):
return
def exitFaceOff(self):
return
def enterWaitForJoin(self):
self.notify.debug('enterWaitForJoin()')
if len(self.activeSuits) > 0:
self.b_setState('WaitForInput')
else:
self.notify.debug('enterWaitForJoin() - no active suits')
self.runableFsm.request('Runable')
self.resetResponses()
self.__requestAdjust()
def exitWaitForJoin(self):
pass
def enterWaitForInput(self):
self.notify.debug('enterWaitForInput()')
self.joinableFsm.request('Joinable')
self.runableFsm.request('Runable')
self.resetResponses()
self.__requestAdjust()
if not self.tutorialFlag:
self.timer.startCallback(SERVER_INPUT_TIMEOUT, self.__serverTimedOut)
self.npcAttacks = {}
for toonId in self.toons:
if bboard.get('autoRestock-%s' % toonId, False):
toon = self.air.doId2do.get(toonId)
if toon is not None:
toon.doRestock(0)
def exitWaitForInput(self):
self.npcAttacks = {}
self.timer.stop()
def __serverTimedOut(self):
self.notify.debug('wait for input timed out on server')
self.ignoreResponses = 1
self.__requestMovie(timeout=1)
def enterMakeMovie(self):
self.notify.debug('enterMakeMovie()')
self.runableFsm.request('Unrunable')
self.resetResponses()
def exitMakeMovie(self):
pass
def enterPlayMovie(self):
self.notify.debug('enterPlayMovie()')
self.joinableFsm.request('Joinable')
self.runableFsm.request('Unrunable')
self.resetResponses()
movieTime = TOON_ATTACK_TIME * (len(self.activeToons) + self.numNPCAttacks) + SUIT_ATTACK_TIME * len(self.activeSuits) + SERVER_BUFFER_TIME
self.numNPCAttacks = 0
self.notify.debug('estimated upper bound of movie time: %f' % movieTime)
self.timer.startCallback(movieTime, self.__serverMovieDone)
def __serverMovieDone(self):
self.notify.debug('movie timed out on server')
self.ignoreResponses = 1
self.__movieDone()
def serverRewardDone(self):
self.notify.debug('reward timed out on server')
self.ignoreResponses = 1
self.handleRewardDone()
def handleRewardDone(self):
self.b_setState('Resume')
def exitPlayMovie(self):
self.timer.stop()
def __movieDone(self):
self.notify.debug('__movieDone() - movie is finished')
if self.movieHasPlayed == 1:
self.notify.debug('__movieDone() - movie had already finished')
return
self.movieHasBeenMade = 0
self.movieHasPlayed = 1
self.ignoreResponses = 1
needUpdate = 0
toonHpDict = {}
for toon in self.activeToons:
toonHpDict[toon] = [0, 0, 0]
actualToon = self.getToon(toon)
self.notify.debug('BEFORE ROUND: toon: %d hp: %d' % (toon, actualToon.hp))
deadSuits = []
trapDict = {}
suitsLuredOntoTraps = []
npcTrapAttacks = []
for activeToon in self.activeToons + self.exitedToons:
if activeToon in self.toonAttacks:
attack = self.toonAttacks[activeToon]
track = attack[TOON_TRACK_COL]
npc_level = None
if track == NPCSOS:
track, npc_level, npc_hp = NPCToons.getNPCTrackLevelHp(attack[TOON_TGT_COL])
if track == None:
track = NPCSOS
elif track == TRAP:
npcTrapAttacks.append(attack)
toon = self.getToon(attack[TOON_ID_COL])
av = attack[TOON_TGT_COL]
if toon != None and av in toon.NPCFriendsDict:
toon.NPCFriendsDict[av] -= 1
if toon.NPCFriendsDict[av] <= 0:
del toon.NPCFriendsDict[av]
toon.d_setNPCFriendsDict(toon.NPCFriendsDict)
continue
if track != NO_ATTACK:
toonId = attack[TOON_ID_COL]
level = attack[TOON_LVL_COL]
if npc_level != None:
level = npc_level
if attack[TOON_TRACK_COL] == NPCSOS:
toon = self.getToon(toonId)
av = attack[TOON_TGT_COL]
if toon != None and av in toon.NPCFriendsDict:
toon.NPCFriendsDict[av] -= 1
if toon.NPCFriendsDict[av] <= 0:
del toon.NPCFriendsDict[av]
toon.d_setNPCFriendsDict(toon.NPCFriendsDict)
elif track == PETSOS:
pass
elif track == FIRE:
pass
elif track != SOS:
toon = self.getToon(toonId)
if toon != None:
check = toon.inventory.useItem(track, level)
if check == -1:
self.air.writeServerEvent('suspicious', toonId, 'Toon generating movie for non-existant gag track %s level %s' % (track, level))
self.notify.warning('generating movie for non-existant gag track %s level %s! avId: %s' % (track, level, toonId))
toon.d_setInventory(toon.inventory.makeNetString())
hps = attack[TOON_HP_COL]
if track == SOS:
self.notify.debug('toon: %d called for help' % toonId)
elif track == NPCSOS:
self.notify.debug('toon: %d called for help' % toonId)
elif track == PETSOS:
self.notify.debug('toon: %d called for pet' % toonId)
for i in range(len(self.activeToons)):
toon = self.getToon(self.activeToons[i])
if toon != None:
if i < len(hps):
hp = hps[i]
if hp > 0:
toonHpDict[toon.doId][0] += hp
self.notify.debug('pet heal: toon: %d healed for hp: %d' % (toon.doId, hp))
else:
self.notify.warning('Invalid targetIndex %s in hps %s.' % (i, hps))
elif track == NPC_RESTOCK_GAGS:
for at in self.activeToons:
toon = self.getToon(at)
if toon != None:
toon.inventory.NPCMaxOutInv(npc_level)
toon.d_setInventory(toon.inventory.makeNetString())
elif track == HEAL:
if levelAffectsGroup(HEAL, level):
for i in range(len(self.activeToons)):
at = self.activeToons[i]
if at != toonId or attack[TOON_TRACK_COL] == NPCSOS:
toon = self.getToon(at)
if toon != None:
if i < len(hps):
hp = hps[i]
else:
self.notify.warning('Invalid targetIndex %s in hps %s.' % (i, hps))
hp = 0
toonHpDict[toon.doId][0] += hp
self.notify.debug('HEAL: toon: %d healed for hp: %d' % (toon.doId, hp))
else:
targetId = attack[TOON_TGT_COL]
toon = self.getToon(targetId)
if toon != None and targetId in self.activeToons:
targetIndex = self.activeToons.index(targetId)
if targetIndex < len(hps):
hp = hps[targetIndex]
else:
self.notify.warning('Invalid targetIndex %s in hps %s.' % (targetIndex, hps))
hp = 0
toonHpDict[toon.doId][0] += hp
elif attackAffectsGroup(track, level, attack[TOON_TRACK_COL]):
for suit in self.activeSuits:
targetIndex = self.activeSuits.index(suit)
if targetIndex < 0 or targetIndex >= len(hps):
self.notify.warning('Got attack (%s, %s) on target suit %s, but hps has only %s entries: %s' % (track,
level,
targetIndex,
len(hps),
hps))
else:
hp = hps[targetIndex]
if hp > 0 and track == LURE:
if suit.battleTrap == UBER_GAG_LEVEL_INDEX:
pass
suit.battleTrap = NO_TRAP
needUpdate = 1
if suit.doId in trapDict:
del trapDict[suit.doId]
if suitsLuredOntoTraps.count(suit) == 0:
suitsLuredOntoTraps.append(suit)
if track == TRAP:
targetId = suit.doId
if targetId in trapDict:
trapDict[targetId].append(attack)
else:
trapDict[targetId] = [attack]
needUpdate = 1
died = attack[SUIT_DIED_COL] & 1 << targetIndex
if died != 0:
if deadSuits.count(suit) == 0:
deadSuits.append(suit)
else:
targetId = attack[TOON_TGT_COL]
target = self.findSuit(targetId)
if target != None:
targetIndex = self.activeSuits.index(target)
if targetIndex < 0 or targetIndex >= len(hps):
self.notify.warning('Got attack (%s, %s) on target suit %s, but hps has only %s entries: %s' % (track,
level,
targetIndex,
len(hps),
hps))
else:
hp = hps[targetIndex]
if track == TRAP:
if targetId in trapDict:
trapDict[targetId].append(attack)
else:
trapDict[targetId] = [attack]
if hp > 0 and track == LURE:
oldBattleTrap = target.battleTrap
if oldBattleTrap == UBER_GAG_LEVEL_INDEX:
pass
target.battleTrap = NO_TRAP
needUpdate = 1
if target.doId in trapDict:
del trapDict[target.doId]
if suitsLuredOntoTraps.count(target) == 0:
suitsLuredOntoTraps.append(target)
if oldBattleTrap == UBER_GAG_LEVEL_INDEX:
for otherSuit in self.activeSuits:
if not otherSuit == target:
otherSuit.battleTrap = NO_TRAP
if otherSuit.doId in trapDict:
del trapDict[otherSuit.doId]
died = attack[SUIT_DIED_COL] & 1 << targetIndex
if died != 0:
if deadSuits.count(target) == 0:
deadSuits.append(target)
self.exitedToons = []
for suitKey in trapDict.keys():
attackList = trapDict[suitKey]
attack = attackList[0]
target = self.findSuit(attack[TOON_TGT_COL])
if attack[TOON_LVL_COL] == UBER_GAG_LEVEL_INDEX:
targetId = suitKey
target = self.findSuit(targetId)
if len(attackList) == 1:
if suitsLuredOntoTraps.count(target) == 0:
self.notify.debug('movieDone() - trap set')
target.battleTrap = attack[TOON_LVL_COL]
needUpdate = 1
else:
target.battleTrap = NO_TRAP
else:
self.notify.debug('movieDone() - traps collided')
if target != None:
target.battleTrap = NO_TRAP
if self.battleCalc.trainTrapTriggered:
self.notify.debug('Train trap triggered, clearing all traps')
for otherSuit in self.activeSuits:
self.notify.debug('suit =%d, oldBattleTrap=%d' % (otherSuit.doId, otherSuit.battleTrap))
otherSuit.battleTrap = NO_TRAP
currLuredSuits = self.battleCalc.getLuredSuits()
if len(self.luredSuits) == len(currLuredSuits):
for suit in self.luredSuits:
if currLuredSuits.count(suit.doId) == 0:
needUpdate = 1
break
else:
needUpdate = 1
self.luredSuits = []
for i in currLuredSuits:
suit = self.air.doId2do[i]
self.luredSuits.append(suit)
self.notify.debug('movieDone() - suit: %d is lured' % i)
for attack in npcTrapAttacks:
track, level, hp = NPCToons.getNPCTrackLevelHp(attack[TOON_TGT_COL])
for suit in self.activeSuits:
if self.luredSuits.count(suit) == 0 and suit.battleTrap == NO_TRAP:
suit.battleTrap = level
needUpdate = 1
for suit in deadSuits:
self.notify.debug('removing dead suit: %d' % suit.doId)
if suit.isDeleted():
self.notify.debug('whoops, suit %d is deleted.' % suit.doId)
else:
self.notify.debug('suit had revives? %d' % suit.getMaxSkeleRevives())
encounter = {'type': suit.dna.name,
'level': suit.getActualLevel(),
'track': suit.dna.dept,
'isSkelecog': suit.getSkelecog(),
'isForeman': suit.isForeman(),
'isVP': 0,
'isCFO': 0,
'isSupervisor': suit.isSupervisor(),
'isVirtual': suit.isVirtual(),
'hasRevives': suit.getMaxSkeleRevives(),
'activeToons': self.activeToons[:]}
self.suitsKilled.append(encounter)
self.suitsKilledThisBattle.append(encounter)
self.air.suitInvasionManager.handleSuitDefeated()
self.__removeSuit(suit)
needUpdate = 1
suit.resume()
lastActiveSuitDied = 0
if len(self.activeSuits) == 0 and len(self.pendingSuits) == 0:
lastActiveSuitDied = 1
for i in range(4):
attack = self.suitAttacks[i][SUIT_ATK_COL]
if attack != NO_ATTACK:
suitId = self.suitAttacks[i][SUIT_ID_COL]
suit = self.findSuit(suitId)
if suit == None:
self.notify.warning('movieDone() - suit: %d is gone!' % suitId)
continue
if not (hasattr(suit, 'dna') and suit.dna):
toonId = self.air.getAvatarIdFromSender()
self.notify.warning('_movieDone avoiding crash, sender=%s but suit has no dna' % toonId)
self.air.writeServerEvent('suspicious', toonId, '_movieDone avoiding crash, suit has no dna')
continue
adict = getSuitAttack(suit.getStyleName(), suit.getLevel(), attack)
hps = self.suitAttacks[i][SUIT_HP_COL]
if adict['group'] == ATK_TGT_GROUP:
for activeToon in self.activeToons:
toon = self.getToon(activeToon)
if toon != None:
targetIndex = self.activeToons.index(activeToon)
toonDied = self.suitAttacks[i][TOON_DIED_COL] & 1 << targetIndex
if targetIndex >= len(hps):
self.notify.warning('DAMAGE: toon %s is no longer in battle!' % activeToon)
else:
hp = hps[targetIndex]
if hp > 0:
self.notify.debug('DAMAGE: toon: %d hit for dmg: %d' % (activeToon, hp))
if toonDied != 0:
toonHpDict[toon.doId][2] = 1
toonHpDict[toon.doId][1] += hp
elif adict['group'] == ATK_TGT_SINGLE:
targetIndex = self.suitAttacks[i][SUIT_TGT_COL]
if targetIndex >= len(self.activeToons):
self.notify.warning('movieDone() - toon: %d gone!' % targetIndex)
break
toonId = self.activeToons[targetIndex]
toon = self.getToon(toonId)
toonDied = self.suitAttacks[i][TOON_DIED_COL] & 1 << targetIndex
if targetIndex >= len(hps):
self.notify.warning('DAMAGE: toon %s is no longer in battle!' % toonId)
else:
hp = hps[targetIndex]
if hp > 0:
self.notify.debug('DAMAGE: toon: %d hit for dmg: %d' % (toonId, hp))
if toonDied != 0:
toonHpDict[toon.doId][2] = 1
toonHpDict[toon.doId][1] += hp
deadToons = []
for activeToon in self.activeToons:
hp = toonHpDict[activeToon]
toon = self.getToon(activeToon)
if toon != None:
self.notify.debug('AFTER ROUND: currtoonHP: %d toonMAX: %d hheal: %d damage: %d' % (toon.hp,
toon.maxHp,
hp[0],
hp[1]))
toon.hpOwnedByBattle = 0
hpDelta = hp[0] - hp[1]
if hpDelta >= 0:
toon.toonUp(hpDelta, quietly=1)
else:
toon.takeDamage(-hpDelta, quietly=1)
if toon.hp <= 0:
self.notify.debug('movieDone() - toon: %d was killed' % activeToon)
toon.inventory.zeroInv(1)
deadToons.append(activeToon)
self.notify.debug('AFTER ROUND: toon: %d setHp: %d' % (toon.doId, toon.hp))
if toon.unlimitedGags:
toon.doRestock(noUber=0, noPaid=0)
for deadToon in deadToons:
self.__removeToon(deadToon)
needUpdate = 1
self.clearAttacks()
self.d_setMovie()
self.d_setChosenToonAttacks()
self.localMovieDone(needUpdate, deadToons, deadSuits, lastActiveSuitDied)
def enterResume(self):
for suit in self.suits:
self.notify.info('battle done, resuming suit: %d' % suit.doId)
if suit.isDeleted():
self.notify.info('whoops, suit %d is deleted.' % suit.doId)
else:
suit.resume()
self.suits = []
self.joiningSuits = []
self.pendingSuits = []
self.adjustingSuits = []
self.activeSuits = []
self.luredSuits = []
for toonId in self.toons:
toon = simbase.air.doId2do.get(toonId)
if toon:
toon.b_setBattleId(0)
messageToonReleased = 'Battle releasing toon %s' % toon.doId
messenger.send(messageToonReleased, [toon.doId])
for exitEvent in self.avatarExitEvents:
self.ignore(exitEvent)
eventMsg = {}
for encounter in self.suitsKilledThisBattle:
cog = encounter['type']
level = encounter['level']
msgName = '%s%s' % (cog, level)
if encounter['isSkelecog']:
msgName += '+'
if msgName in eventMsg:
eventMsg[msgName] += 1
else:
eventMsg[msgName] = 1
msgText = ''
for msgName, count in eventMsg.items():
if msgText != '':
msgText += ','
msgText += '%s%s' % (count, msgName)
self.air.writeServerEvent('battleCogsDefeated', self.doId, '%s|%s' % (msgText, self.getTaskZoneId()))
def exitResume(self):
pass
def isJoinable(self):
return self.joinableFsm.getCurrentState().getName() == 'Joinable'
def enterJoinable(self):
self.notify.debug('enterJoinable()')
def exitJoinable(self):
pass
def enterUnjoinable(self):
self.notify.debug('enterUnjoinable()')
def exitUnjoinable(self):
pass
def isRunable(self):
return self.runableFsm.getCurrentState().getName() == 'Runable'
def enterRunable(self):
self.notify.debug('enterRunable()')
def exitRunable(self):
pass
def enterUnrunable(self):
self.notify.debug('enterUnrunable()')
def exitUnrunable(self):
pass
def __estimateAdjustTime(self):
self.needAdjust = 0
adjustTime = 0
if len(self.pendingSuits) > 0 or self.suitGone == 1:
self.suitGone = 0
pos0 = self.suitPendingPoints[0][0]
pos1 = self.suitPoints[0][0][0]
adjustTime = self.calcSuitMoveTime(pos0, pos1)
if len(self.pendingToons) > 0 or self.toonGone == 1:
self.toonGone = 0
if adjustTime == 0:
pos0 = self.toonPendingPoints[0][0]
pos1 = self.toonPoints[0][0][0]
adjustTime = self.calcToonMoveTime(pos0, pos1)
return adjustTime
def enterAdjusting(self):
self.notify.debug('enterAdjusting()')
self.timer.stop()
self.__resetAdjustingResponses()
self.adjustingTimer.startCallback(self.__estimateAdjustTime() + SERVER_BUFFER_TIME, self.__serverAdjustingDone)
def __serverAdjustingDone(self):
if self.needAdjust == 1:
self.adjustFsm.request('NotAdjusting')
self.__requestAdjust()
else:
self.notify.debug('adjusting timed out on the server')
self.ignoreAdjustingResponses = 1
self.__adjustDone()
def exitAdjusting(self):
currStateName = self.fsm.getCurrentState().getName()
if currStateName == 'WaitForInput':
self.timer.restart()
elif currStateName == 'WaitForJoin':
self.b_setState('WaitForInput')
self.adjustingTimer.stop()
def __addTrainTrapForNewSuits(self):
hasTrainTrap = False
trapInfo = None
for otherSuit in self.activeSuits:
if otherSuit.battleTrap == UBER_GAG_LEVEL_INDEX:
hasTrainTrap = True
if hasTrainTrap:
for curSuit in self.activeSuits:
if not curSuit.battleTrap == UBER_GAG_LEVEL_INDEX:
oldBattleTrap = curSuit.battleTrap
curSuit.battleTrap = UBER_GAG_LEVEL_INDEX
self.battleCalc.addTrainTrapForJoiningSuit(curSuit.doId)
self.notify.debug('setting traintrack trap for joining suit %d oldTrap=%s' % (curSuit.doId, oldBattleTrap))
def __adjustDone(self):
for s in self.adjustingSuits:
self.pendingSuits.remove(s)
self.activeSuits.append(s)
self.adjustingSuits = []
for toon in self.adjustingToons:
if self.pendingToons.count(toon) == 1:
self.pendingToons.remove(toon)
else:
self.notify.warning('adjustDone() - toon: %d not pending!' % toon.doId)
if self.activeToons.count(toon) == 0:
self.activeToons.append(toon)
self.ignoreResponses = 0
self.sendEarnedExperience(toon)
else:
self.notify.warning('adjustDone() - toon: %d already active!' % toon.doId)
self.adjustingToons = []
self.__addTrainTrapForNewSuits()
self.d_setMembers()
self.adjustFsm.request('NotAdjusting')
if self.needAdjust == 1:
self.notify.debug('__adjustDone() - need to adjust again')
self.__requestAdjust()
def enterNotAdjusting(self):
self.notify.debug('enterNotAdjusting()')
if self.movieRequested == 1:
if len(self.activeToons) > 0 and self.__allActiveToonsResponded():
self.__requestMovie()
def exitNotAdjusting(self):
pass
def getPetProxyObject(self, petId, callback):
doneEvent = 'generate-%d' % petId
def handlePetProxyRead(pet):
callback(1, pet)
self.air.sendActivate(petId, self.air.districtId, 0)
self.acceptOnce(doneEvent, handlePetProxyRead)
def _getNextSerialNum(self):
num = self.serialNum
self.serialNum += 1
return num
def setFireCount(self, amount):
self.fireCount = amount
def getFireCount(self):
return self.fireCount
@magicWord(category=CATEGORY_PROGRAMMER)
def skipMovie():
invoker = spellbook.getInvoker()
battleId = invoker.getBattleId()
if not battleId:
return 'You are not currently in a battle!'
battle = simbase.air.doId2do.get(battleId)
battle._DistributedBattleBaseAI__movieDone()
return 'Battle movie skipped.'
| StarcoderdataPython |
287098 | #!/usr/bin/env python
import random
class C(object):
def __setattr__(self, name, value):
pass
def __getattr__(self, name):
return random.randint(100, 200)
# START OMIT
c = C()
c.foo = 42
print c.foo
# END OMIT
| StarcoderdataPython |
9706357 | <gh_stars>1-10
#!/usr/bin/python3
# SPDX-License-Identifier: BSD-3-Clause
#
# Copyright 2015 Raritan Inc. All rights reserved.
import sys, time
sys.path.append("pdu-python-api")
from raritan.rpc import Agent, pdumodel, firmware
ip = "10.0.42.2"
user = "admin"
pw = "<PASSWORD>"
try:
ip = sys.argv[1]
user = sys.argv[2]
pw = sys.argv[3]
except IndexError:
pass # use defaults
agent = Agent("https", ip, user, pw, disable_certificate_verification=True)
pdu = pdumodel.Pdu("/model/pdu/0", agent)
firmware_proxy = firmware.Firmware("/firmware", agent)
inlets = pdu.getInlets()
ocps = pdu.getOverCurrentProtectors()
outlets = pdu.getOutlets()
print ("PDU: %s" % (ip))
print ("Firmware version: %s" % (firmware_proxy.getVersion()))
print ("Number of inlets: %d" % (len(inlets)))
print ("Number of over current protectors: %d" % (len(ocps)))
print ("Number of outlets: %d" % (len(outlets)))
outlet = outlets[0]
outlet_sensors = outlet.getSensors()
outlet_metadata = outlet.getMetaData()
outlet_settings = outlet.getSettings()
print ("Outlet %s:" % (format(outlet_metadata.label)))
print (" Name: %s" % (outlet_settings.name if outlet_settings.name != "" else "(none)"))
print (" Switchable: %s" % ("yes" if outlet_metadata.isSwitchable else "no"))
if outlet_sensors.voltage:
sensor_reading = outlet_sensors.voltage.getReading()
print (" Voltage: %s" % (("%d V" % (sensor_reading.value)) if sensor_reading.valid else "n/a"))
if outlet_sensors.current:
sensor_reading = outlet_sensors.current.getReading()
print (" Current: %s" % (("%d A" % (sensor_reading.value)) if sensor_reading.valid else "n/a"))
if outlet_metadata.isSwitchable:
outlet_state_sensor = outlet_sensors.outletState
outlet_state = outlet_state_sensor.getState()
if outlet_state.available:
print (" Status :%s" % ("on" if outlet_state.value == outlet_state_sensor.OnOffState.ON.val else "off"))
print (" Turning outlet off...")
outlet.setPowerState(outlet.PowerState.PS_OFF)
print (" Sleeping 4 seconds...")
time.sleep(4)
print (" Turning outlet on...")
outlet.setPowerState(outlet.PowerState.PS_ON)
outlet_state = outlet_state_sensor.getState()
if outlet_state.available:
print (" Status :%s" % ("on" if outlet_state.value == outlet_state_sensor.OnOffState.ON.val else "off"))
| StarcoderdataPython |
6464573 | import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as manimation
from scipy.stats import gaussian_kde
from pprint import pprint
import sys
import os
from astropy.io import ascii
from astropy.table import vstack
# THIS FILE: UTILITY FUNCTIONS FOR PLOTTING!
def loadChainFolder(chainfolder):
for filename in os.listdir(chainfolder):
if '.paramnames' in filename:
paramfile = os.path.join(chainfolder, filename)
# print(paramfile)
params = np.array(ascii.read(paramfile,delimiter="\t", format="no_header"))['col1']
# print(params)
data_all = None
# print(chainfolder)
for filename in os.listdir(chainfolder):
if filename.endswith(".txt"):
chainfile = os.path.join(chainfolder, filename)
# print(chainfile)
data = (ascii.read(chainfile, delimiter="\s"))[100:]
# set up column names (read in from param file)
data['col1'].name = 'acceptance'
data['col2'].name = 'likelihood'
for i in range(3,len(params)+3):
data['col' + str(i)].name = params[i-3]
if data_all == None:
data_all = data
else:
data_all = vstack( [data_all, data] )
# print(len(data), len(data_all))
return data_all
def repeatRows(data, acceptance):
newData = data[:]
# for each row in data, add rows based on acceptance number
for row, acc in zip(data, acceptance):
for i in range(acc-1):
newData.append( row )
return newData
def denplot( list_data, ax, acc, name="data", \
lower=0.0, upper=0.25, nbins=20, extend=False, \
extent=0.1, cov=0.2, fmt="k-", mylabel="label" ):
# print("repeating")
# list_data = np.array(list_data).tolist()
# list_data = repeatRows(list_data, acc)
# list_data = np.array(list_data)
x = np.linspace(lower, upper, 300)
# new_weights = data['acceptance']
if extend:
new_list_data = np.hstack( (list_data,-list_data) )
density = gaussian_kde(new_list_data)
else:
density = gaussian_kde( list_data )
density.covariance_factor = lambda : cov
density._compute_covariance()
ax.plot( x, density(x) / np.max(density(x)), fmt, label=mylabel )
# counts, bins = np.histogram( list_data, bins=x, weights=new_weights, density=True )
#ax.plot( x[:-1], counts, "r." )
ax.get_yaxis().set_ticks([])
# ax.set_ylim( 0.0, counts.max() )
ax.set_xlim( lower, upper )
ax.set_xlabel( name )
def plotRow(data, ax1, ax2, ax3, ax4, c, mylabel):
prr1 = data['P_{RR}^1']; pii1 = data['P_{II}^1']; pri1 = data['P_{RI}^1'];
prr2 = data['P_{RR}^2']; pii2 = data['P_{II}^2']; pri2 = pri1 * np.sqrt(pii2 * prr2 / (pii1 * prr1))
# make density plot
# sc(x,y)
beta_iso1 = pii1 / (prr1 + pii1)
beta_iso2 = pii2 / (prr2 + pii2)
alpha = pri1 / np.sqrt( pii1 * prr1 )
# \frac{\log( P_{AB}^2 / P_{AB}^1 )}{\log ( k_2 / k_1 )
k1 = 0.002 # Mpc^{-1}
k2 = 0.1 # Mpc^{-1}
nRR = np.log(prr2/prr1) / np.log(k2/k1)
nRI = np.log(pri2/pri1) / np.log(k2/k1)
nII = np.log(pii2/pii1) / np.log(k2/k1)
denplot( beta_iso1, ax1, data['acceptance'], r"$\beta_{iso}(k_{low})$", 0.0, 0.1, extend=True, fmt=c )
denplot( beta_iso2, ax2, data['acceptance'], r"$\beta_{iso}(k_{high})$", 0.0, 0.8, extend=True, fmt=c)
denplot( alpha, ax3, data['acceptance'], r"$\cos \Delta$", -0.5, 0.5, fmt=c)
denplot( nII, ax4, data['acceptance'], r"$n_{II}$", -1.0, 2.8, fmt=c, mylabel=mylabel )
ax4.legend()
| StarcoderdataPython |
4805681 | # GENERATED BY KOMAND SDK - DO NOT EDIT
import insightconnect_plugin_runtime
import json
class Component:
DESCRIPTION = "Retrieve a list of software updates"
class Input:
MACHINE = "machine"
class Output:
UPDATES = "updates"
class GetMissingSoftwareUpdatesInput(insightconnect_plugin_runtime.Input):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"machine": {
"type": "string",
"title": "Machine",
"description": "Machine IP address, hostname or machine ID",
"order": 1
}
},
"required": [
"machine"
]
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
class GetMissingSoftwareUpdatesOutput(insightconnect_plugin_runtime.Output):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"updates": {
"type": "array",
"title": "Updates",
"description": "List of updates",
"items": {
"$ref": "#/definitions/update"
},
"order": 1
}
},
"required": [
"updates"
],
"definitions": {
"update": {
"type": "object",
"title": "update",
"properties": {
"cveAddressed": {
"type": "integer",
"title": "CVE Addressed",
"description": "Update CVE addressed",
"order": 7
},
"id": {
"type": "string",
"title": "ID",
"description": "Update ID",
"order": 1
},
"machineMissedOn": {
"type": "integer",
"title": "Machine Missed On",
"description": "Update machine missed on",
"order": 6
},
"name": {
"type": "string",
"title": "Name",
"description": "Update name",
"order": 2
},
"osBuild": {
"type": "integer",
"title": "OS Build",
"description": "Update OS build",
"order": 3
},
"productsNames": {
"type": "array",
"title": "Products Names",
"description": "Update products names",
"items": {
"type": "string"
},
"order": 4
},
"url": {
"type": "string",
"title": "URL",
"description": "Update URL",
"order": 5
}
}
}
}
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
| StarcoderdataPython |
11251399 | <gh_stars>1-10
import os
import keras.backend as K
from keras.layers import Input
from keras.utils import multi_gpu_model
from yolov3.model import yolo_eval, yolo_body
from yolov3.utils import letterbox_image, wh2xy, draw_box, nms, segmentation
from timeit import default_timer as timer
import numpy as np
class YOLO:
def __init__(self):
# 设置默认属性
self.weight_path = 'model_data/weight.h5'
self.anchors_path = 'model_data/anchors.txt'
self.classes_path = 'model_data/classes.txt' # English only
self.model_image_size = (576, 576) # hw
# cv is GBR
self.colors = [(0, 191, 255), (127, 255, 212), (238, 130, 238)]
self.class_names = self._get_class()
self.anchors = self._get_anchors()
self.sess = K.get_session()
self.score = 0.3
self.iou = 0.45
self.gpu_num = 1
self.yolo_model = None
self.input_image_shape = None
# 3个运算结果 tensor
self.boxes, self.scores, self.classes = self.generate()
def _get_class(self):
# expanduser: 把path中包含的"~"和"~user"转换成用户目录
classes_path = os.path.expanduser(self.classes_path)
with open(classes_path, 'r') as f:
class_names = f.readlines()
class_names = [s.strip() for s in class_names]
return class_names
def _get_anchors(self):
anchors_path = os.path.expanduser(self.anchors_path)
with open(anchors_path) as f:
anchors = f.readline()
anchors = [float(x) for x in anchors.split(',')]
return np.array(anchors).reshape(-1, 2)
def generate(self):
"""未运算, 返回model结果的tensor"""
weight_path = os.path.expanduser(self.weight_path)
assert weight_path.endswith('.h5'), 'Keras model or weights must be a .h5 file.'
# Load model, or construct model and load weights.
num_anchors = len(self.anchors)
num_classes = len(self.class_names)
h, w = self.model_image_size
assert os.path.exists(self.weight_path), 'weight文件不存在'
self.yolo_model = yolo_body(Input(shape = (h, w, 3)), num_anchors // 3, num_classes)
self.yolo_model.load_weights(self.weight_path) # make sure model, anchors and classes match
print('{} model, anchors, and classes loaded.'.format(weight_path))
# Generate output tensor targets for filtered bounding boxes.
# placeholder: 实例化一个占位符张量并返回它
# input_image_shape 只输入wh
self.input_image_shape = K.placeholder(shape = (2,))
if self.gpu_num >= 2:
self.yolo_model = multi_gpu_model(self.yolo_model, gpus = self.gpu_num)
boxes, scores, classes = yolo_eval(self.yolo_model.output,
self.anchors,
len(self.class_names),
self.input_image_shape,
score_threshold = self.score)
# boxes: xywh
return boxes, scores, classes
def detect_image(self, image: np.ndarray):
"""检测图像"""
start = timer()
image_h, image_w = image.shape[0:2]
assert self.model_image_size[0] % 32 == 0, 'Multiples of 32 required'
assert self.model_image_size[1] % 32 == 0, 'Multiples of 32 required'
# reversed: 反向迭代器, 默认输入为hw, 要转化为wh
boxed_image = letterbox_image(image, tuple(reversed(self.model_image_size)))
image_data = boxed_image.astype('float32')
print(image_data.shape)
image_data /= 255.
image_data = np.expand_dims(image_data, 0) # Add batch dimension.
# run run run
out_boxes, out_scores, out_classes = self.sess.run(
[self.boxes, self.scores, self.classes],
feed_dict = {
self.yolo_model.input: image_data, # 替换图中的某个tensor的值
self.input_image_shape: [image_w, image_h],
# learning_phase, 学习阶段标志是一个布尔张量(0 = test,1 = train)
K.learning_phase(): 0
})
print('Found {} boxes for {}'.format(len(out_boxes), 'img'))
out_boxes = wh2xy(out_boxes)
keep = nms(out_boxes, out_scores, self.iou) # box中为角度
out_boxes = out_boxes[keep] # [N, 5]
out_scores = out_scores[keep] # [N,]
out_classes = out_classes[keep] # [N,]
print('Found {} boxes for {}'.format(len(out_boxes), 'img'))
out_boxes = np.floor(out_boxes).astype(np.int)
# draw
image = draw_box(image, out_boxes, out_scores, out_classes, self.colors, self.class_names)
end = timer()
print('time: ', end - start)
return image
def detect_big_image(self, image: np.ndarray):
"""检测大图像"""
start = timer()
assert self.model_image_size[0] % 32 == 0, 'Multiples of 32 required'
assert self.model_image_size[1] % 32 == 0, 'Multiples of 32 required'
# reversed: 反向迭代器, 默认输入为hw, 要转化为wh
size = segmentation(image, self.model_image_size)
H, W, _ = image.shape
all_box, all_score, all_classes = [], [], []
print(image.shape)
for t in size:
img = image[t[1]:t[3], t[0]:t[2]]
boxed_image = letterbox_image(img, tuple(reversed(self.model_image_size)))
image_data = boxed_image.astype('float32')
image_h, image_w, _ = image_data.shape
image_data /= 255.0
image_data = np.expand_dims(image_data, 0)
out_boxes, out_scores, out_classes = self.sess.run(
[self.boxes, self.scores, self.classes],
feed_dict = {
self.yolo_model.input: image_data, # 替换图中的某个tensor的值
self.input_image_shape: [image_w, image_h],
# learning_phase, 学习阶段标志是一个布尔张量(0 = test,1 = train)
K.learning_phase(): 0
})
out_boxes[..., 0] += t[0]
out_boxes[..., 1] += t[1]
all_box.append(out_boxes)
all_score.append(out_scores)
all_classes.append(out_classes)
out_boxes = np.concatenate(all_box)
out_scores = np.concatenate(all_score)
out_classes = np.concatenate(all_classes)
out_boxes = wh2xy(out_boxes)
keep = nms(out_boxes, out_scores, self.iou)
print('Found {} boxes for {}'.format(len(out_boxes), 'img'))
out_boxes = np.floor(out_boxes[keep]).astype(np.int) # [N, 5]
out_scores = out_scores[keep] # [N,]
out_classes = out_classes[keep] # [N,]
out_boxes[..., 0:2][out_boxes[..., 0:2] < 0] = 0
out_boxes[..., 2:3][out_boxes[..., 2:3] > (W - 1)] = W - 1
out_boxes[..., 3:4][out_boxes[..., 3:4] > (H - 1)] = H - 1
image = draw_box(image, out_boxes, out_scores, out_classes, self.colors, self.class_names)
end = timer()
print('time: ', end - start)
return image
def close_session(self):
self.sess.close()
| StarcoderdataPython |
58655 | <reponame>Niveshpai/University_Login_System
class Student:
# Using a global base for all the available courses
numReg = []
# We will initalise the student class
def __init__(self, number, name, family, courses=None):
if courses is None:
courses = []
self.number = number
self.numReg.append(self)
self.name = name
self.family = family
self.courses = courses
# This is used to display the student details
def displayStudent(self):
print('You are logged in as ' + self.name + ' ' + self.family)
# This is used to display the number of courses a student takes up during his term/semester
def displayStudentCourses(self, numStudent):
if self.number == numStudent:
if self.courses:
print(self.courses)
else:
print('You have not selected any courses. Please choose a course.')
# Using this, the selected course will be added to the student's portfolio/data
def studentCourseAdding(self, wantedCourse, numStudent):
if self.number == numStudent:
self.courses.append(wantedCourse)
print('You Added ' + wantedCourse + ' to your schedule, successfully!') | StarcoderdataPython |
3522492 | __author__ = '<NAME>, <EMAIL>'
from memetic import MemeticSearch
class InverseMemeticSearch(MemeticSearch):
""" Interleaving local search with topology search (inverse of memetic search) """
def _learnStep(self):
self.switchMutations()
MemeticSearch._learnStep(self)
self.switchMutations()
| StarcoderdataPython |
6565820 | <reponame>ram-nad/wildfirepy<gh_stars>0
from wildfirepy.net.util import URLOpenerWithRedirect, USGSHtmlParser
from wildfirepy.coordinates.util import SinusoidalCoordinate
from pathlib import Path
from urllib.error import HTTPError
__all__ = ['AbstractUSGSDownloader', 'ModisBurntAreaDownloader']
class AbstractUSGSDownloader:
"""
Description
-----------
An Abstract Base Class Downloader for USGS products.
"""
def __init__(self):
self.base_url = 'https://e4ftl01.cr.usgs.gov/'
self.regex_traverser = USGSHtmlParser()
self.converter = SinusoidalCoordinate()
self.url_opener = URLOpenerWithRedirect()
self.has_files = False
def get_available_dates(self):
"""
Returns dates for which data is available.
"""
self.regex_traverser(self.base_url)
return self.regex_traverser.get_all_dates()
def get_files_from_date(self, year, month):
"""
Returns names of all available files.
Parameters
----------
year: `int`
Year for which filenames are to be retrieved.
month: `int`
Month for which filenames are to be retrieved.
"""
month = str(month) if month > 9 else "0" + str(month)
date = f"{str(year)}.{month}.01/"
self.regex_traverser(self.base_url + date)
self.has_files = True
return self.regex_traverser.get_all_files()
def get_available_jpg_files(self):
"""
Returns names of available jpg files.
"""
return self.regex_traverser.get_all_jpg_files()
def get_available_xml_files(self):
"""
Returns names of available xml files.
"""
return self.regex_traverser.get_all_xml_files()
def get_available_hdf_files(self):
"""
Returns names of available hdf files.
"""
return self.regex_traverser.get_all_hdf_files()
def get_filename(self, latitude, longitude):
"""
Returns name of file for given latitude and longitude.
Parameters
----------
latitude: `float`
latitude of the observation.
longitude: `float`
longitude of the observation.
"""
h, v = self.converter(latitude, longitude)
return self.regex_traverser.get_filename(h, v)
def get_hdf(self, *, year, month, latitude, longitude, **kwargs):
"""
Downloads the `hdf` file and stores it on the disk.
Parameters
----------
year: `int`
Year of the observation.
month: `int`
Month of the observation.
latitude: `float`
latitude of the observation.
longitude: `float`
longitude of the observation.
kwargs: keyword arguments to be passed to `AbstractUSGSDownloader.fetch`
Returns
-------
path: `str`
Absolute path to the downloaded `hdf` file.
"""
if not self.has_files:
self.get_files_from_date(year, month)
filename = self.get_filename(latitude, longitude)
month = str(month) if month > 9 else "0" + str(month)
date = f"{str(year)}.{month}.01/"
url = self.base_url + date + filename
return self.fetch(url=url, filename=filename, **kwargs)
def get_xml(self, *, year, month, latitude, longitude, **kwargs):
"""
Downloads the `xml` file and stores it on the disk.
Parameters
----------
year: `int`
Year of the observation.
month: `int`
Month of the observation.
latitude: `float`
latitude of the observation.
longitude: `float`
longitude of the observation.
kwargs: keyword arguments to be passed to `AbstractUSGSDownloader.fetch`
Returns
-------
path: `str`
Absolute path to the downloaded `xml` file.
"""
if not self.has_files:
self.get_files_from_date(year, month)
filename = self.get_filename(latitude, longitude) + ".xml"
month = str(month) if month > 9 else "0" + str(month)
date = f"{str(year)}.{month}.01/"
url = self.base_url + date + filename
return self.fetch(url=url, filename=filename, **kwargs)
def get_jpg(self, *, year, month, latitude, longitude, **kwargs):
"""
Downloads the `jpg` file and stores it on the disk.
Parameters
----------
year: `int`
Year of the observation.
month: `int`
Month of the observation.
latitude: `float`
latitude of the observation.
longitude: `float`
longitude of the observation.
kwargs: keyword arguments to be passed to `AbstractUSGSDownloader.fetch`
Returns
-------
path: `str`
Absolute path to the downloaded `jpg` file.
"""
if not self.has_files:
self.get_files_from_date(year, month)
filename = "BROWSE." + self.get_filename(latitude, longitude)[:-3] + "1.jpg"
month = str(month) if month > 9 else "0" + str(month)
date = f"{str(year)}.{month}.01/"
url = self.base_url + date + filename
return self.fetch(url=url, filename=filename, **kwargs)
def fetch(self, url, path='./', filename='temp.hdf'):
"""
Fetches data from `url`.
Parameters
----------
url: `str`
URL to get the data from.
path: `str`
path to store the downladed file.
filename: `str`
name of the downladed file.
Returns
-------
path: `str`
Absolute path to the downloaded `hdf` file.
"""
data_folder = Path(path)
filename = data_folder / filename
try:
response = self.url_opener(url)
print("Download Successful!")
print("Writing file!")
with open(filename, 'wb') as file:
file.write(response.read())
response.close()
return filename.absolute().as_posix()
except HTTPError as err:
output = format(err)
print(output)
class ModisBurntAreaDownloader(AbstractUSGSDownloader):
"""
Description
-----------
MODIS Class for `MCD64A1`, i.e., Burnt Area.
By default downloads data from the 6th collection.
"""
def __init__(self, collection='006'):
super().__init__()
self.base_url = self.base_url + f"MOTA/MCD64A1.{collection}/"
| StarcoderdataPython |
6618393 | <filename>libscampi/contrib/cms/communism/storage.py
from django.core.files.storage import Storage
class URLStorage(Storage):
def delete(self, name):
raise NotImplementedError()
def exists(self, name):
return True
def listdir(self, path):
raise NotImplementedError()
def size(self, name):
return 0
def url(self, name):
return name
def _open(self, name, mode):
raise NotImplementedError()
def _save(self, name, content):
raise NotImplementedError()
def get_available_name(self, name):
raise NotImplementedError()
def get_valid_name(self, name):
raise NotImplementedError()
| StarcoderdataPython |
11273948 | <filename>tests/test_httpkit.py
# -*- coding: UTF-8 -*-
# Copyright (C) 2012-2016 <NAME> <<EMAIL>> and contributors.
# Licensed under the MIT license: http://opensource.org/licenses/mit-license
from ganggu import httpkit as http
from ganggu.resolvecache import smart_url
import requests
import json
import pytest
# 有时候访问 httpbin.org 很慢(电信网络丢包严重)
http.TIMEOUT = (10.0, 30.0)
def test_get():
url = 'http://httpbin.org/get'
params = {'foo': 'bar'}
resp = http.get(url, params=params)
req = resp.request
result = resp.json()
assert resp.status_code == 200
assert 'httpkit' in req.headers['User-Agent']
assert result['url'] == url + '?foo=bar'
def test_debug_mode(capfd):
# 参数 capfd 是 pytest 捕捉 stdout 和 stderr 输出的 fixture
url = 'http://httpbin.org/get'
with http.debug_mode():
http.get(url)
out, err = capfd.readouterr()
assert 'GET /get HTTP/1.1' in out
url2 = 'http://httpbin.org/ip'
http.get(url2)
assert 'GET /ip HTTP/1.1' not in out
def test_timeout():
url = 'http://httpbin.org/delay/3'
old_timeout = http.TIMEOUT
http.TIMEOUT = (0.01, 10.0)
with pytest.raises(requests.ConnectTimeout):
resp = http.get(url)
http.TIMEOUT = (5, 0.01)
with pytest.raises(requests.ReadTimeout):
resp = http.get(url)
http.TIMEOUT = old_timeout
def test_post():
url = 'http://httpbin.org/post'
data = {'foo': 'bar'}
resp = http.post(url, data=data)
result = resp.json()
assert result['form'] == data
resp = http.post(url, json=data)
result = resp.json()
assert json.loads(result['data']) == data
def test_callable_url():
url1 = 'http://httpbin.org/ip'
url2 = smart_url(url1)
resp1 = http.get(url1)
resp2 = http.get(url2)
assert resp1.text == resp2.text
| StarcoderdataPython |
9678618 | from graphics import *
from sprite import Sprite
import numpy as np
import time
win = GraphWin(width = 1024, height = 512)
win.setCoords(0, 0, 1024, 512)
win.setBackground("cyan3")
#mySquare = Rectangle(Point(200, 200), Point(400, 400))
#mySquare.draw(win)
sprit = Sprite([(200,200), (400,200), (400,400), (200, 400)],
fill="yellow", outline="green", width=10)
sprit.translate(200,50)
sprit.rotate(np.pi*0.25)
sprit.draw(win)
time.sleep(1)
sprit.rotate(np.pi*0.25)
sprit.draw(win)
time.sleep(1)
sprit.rotate(np.pi*0.25)
sprit.draw(win)
time.sleep(1)
sprit.rotate(np.pi*0.25)
sprit.draw(win)
time.sleep(1)
sprit.rotate(np.pi*0.25)
sprit.draw(win)
time.sleep(1)
sprit.rotate(np.pi*0.25)
sprit.draw(win)
time.sleep(1)
sprit.rotate(np.pi*0.25)
sprit.draw(win)
win.getMouse() | StarcoderdataPython |
11212159 | '''
Code for synthesizing built-in functions.
'''
from ..... import inspect
from .. import ir, statics
from ...runtime.currylib.prelude.math import apply_unboxed
import operator as op
from six.moves import range
__all__ = ['synthesize_function']
def synthesize_function(*args, **kwds):
'''
Synthesize a special function, if possible, or return None.
'''
return compile_boxedfunc(*args, **kwds) or \
compile_rawfunc(*args, **kwds) or \
compile_unboxedfunc(*args, **kwds)
def compile_boxedfunc(interp, ifun, closure, entry):
'''
Compiles code for a built-in function. See README.md. Corresponds to the
"py.boxedfunc" metadata.
The Python implementation function must accept the arguments in head-normal
form, but without any other preprocessing (e.g., unboxing). It returns a
sequence of arguments accepted by ``runtime.Node.__new__``.
'''
boxedfunc = ifun.metadata.get('py.boxedfunc', None)
if boxedfunc is not None:
h_impl = closure.intern(boxedfunc)
lines = [
'def %s(rts, _0):' % entry
, [ 'args = (rts.variable(_0, i).hnf() for i in range(len(_0.successors)))'
, '_0.rewrite(%s(rts, *args))' % h_impl
]
]
return lines
def compile_rawfunc(interp, ifun, closure, entry):
'''
Compiles code for a raw built-in function. See README.md. Corresponds to
the "py.rawfunc" metadata.
Like compile_boxedfunc but does not head-normalize the arguments. The
left-hand-side expression is simply passed to the implementation function.
'''
rawfunc = ifun.metadata.get('py.rawfunc', None)
if rawfunc is not None:
h_impl = closure.intern(rawfunc)
lines = [
'def %s(rts, _0):' % entry
, ['rts.Node(%s(rts, _0), target=_0.target)' % h_impl]
]
return lines
def compile_unboxedfunc(interp, ifun, closure, entry):
'''
Compiles a function over primitive data.
See README.md. Corresponds to the "py.unboxedfunc" metadata.
'''
unboxedfunc = ifun.metadata.get('py.unboxedfunc', None)
if unboxedfunc is not None:
h_impl = closure.intern(unboxedfunc)
h_eval = closure.intern(apply_unboxed)
lines = [
'def %s(rts, _0):' % entry
, [ 'return %s(rts, %s, _0)' % (h_eval, h_impl) ]
]
return lines
| StarcoderdataPython |
6462878 | """
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os, sys
def __relative_imports(number_of_descent):
file = __file__
for _ in range(number_of_descent):
file = os.path.dirname(file)
sys.path.append(file)
sys.path.append("..")
sys.path = list(set(sys.path))
[print(x) for x in sys.path]
__relative_imports(number_of_descent=4)
import src.config.GlobalSettings as GS
if not GS.GPU_TO_PREDICT:
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
GS.MINIMUM_IMPORTS = True
from src.utils.AsynchronousThreading import object_to_json
from src.utils.Folders import write_string_to_pathfile
from src.utils.Datetimes import date_from_format
from src.utils.Prints import pt
from src.utils.PetitionObject import Petition, JSON_PETITION_NAME
from src.config.Configurator import Configurator
from src.services.processing.CPrediction import CPrediction
from src.config.Projects import Projects
from timeit import default_timer as timer
import time, datetime, argparse
# Load updated config
CONFIG = Projects.get_problem_config()
# Load updated settings
SETTINGS = Projects.get_settings()
MODEL_USED_FULLPATH = SETTINGS.model_path + CONFIG.model_name_saved
UPLOADS_PATH = GS.GLOBAL_CONFIG_JSON["upload_aimodel_python_path"]
class AnswerConfiguration():
json_petition_name = JSON_PETITION_NAME
json_answer_name = GS.GLOBAL_CONFIG_JSON["json_answer_name"]
def __init__(self, petition_id, prediction_results=None):
self.petition_src = UPLOADS_PATH + "\\" + petition_id + "\\"
self.model_folder = os.listdir(self.petition_src)[0]
self.final_petition_dir = self.petition_src + self.model_folder + "\\"
self.json_petition_src = self.final_petition_dir + self.json_petition_name
self.json_answer_src = self.final_petition_dir + self.json_answer_name
self.date = date_from_format(date=datetime.datetime.now())
self.user_id = USER_ID
self.user_id_path = USER_ID_PATH
self.model_selected = MODEL_SELECTED
self.model_used_fullpath = MODEL_USED_FULLPATH
prediction_results = self.__get_results(prediction_results)
if prediction_results:
if prediction_results.results:
self.answer = prediction_results.readable_results
else:
self.answer = "NOK1"
else:
self.answer = "NOK2"
def __get_results(self, prediction_results: CPrediction):
return prediction_results
def execute_clasification(PETITIONS):
"""
Get petition and classify elements
Args:
PETITIONS: List with new petitions
Returns: petitions_end_ok
"""
petitions_end_ok = []
for petition_id in PETITIONS:
GS.LOGGER.write_to_logger("Petition was found: " + petition_id)
# Read petition json
# TODO (@gabvaztor) Create a different object class to manage paths logic
path_config = AnswerConfiguration(petition_id=petition_id)
petition = Petition(path=path_config.json_petition_src, petition_id=petition_id)
prediction_results = CPrediction(current_petition=petition)
new_answer_configuration = AnswerConfiguration(petition_id=petition_id,
prediction_results=prediction_results)
json_answer_str = object_to_json(object=new_answer_configuration)
pt(json_answer_str)
write_string_to_pathfile(string=json_answer_str, filepath=new_answer_configuration.json_answer_src)
petitions_end_ok.append(petition_id)
GS.LOGGER.write_to_logger("Petition finished")
return petitions_end_ok
def __get_new_online_petitions():
global PETITIONS
# First time
start = timer()
#past_petitions = __get_new_folders(petitions=PETITIONS)
petitions_counts = 0
sleeps_counts = 0
while True:
#pt("p1", past_petitions)
PETITIONS = __get_new_folders(petitions=PETITIONS)
#pt("p2", PETITIONS)
#PETITIONS = list(set(PETITIONS) - set(past_petitions))
if PETITIONS:
pt("\n")
pt("Petitions:", PETITIONS, "|@@| Date:[" + str(date_from_format(date=datetime.datetime.now()) + "]"))
pt("\n")
elif sleeps_counts % 10 == 0:
pt("Total Counts: " + str(petitions_counts) + " ### Petitions:", PETITIONS, "|@@| Date:[" +
str(date_from_format(date=datetime.datetime.now()) + "]"))
#if sleeps_counts % 600: gc.collect()
if PETITIONS:
execute_clasification(PETITIONS)
# TODO Detele folders
# TODO if classification OK or timeout, then move/delete folder petition
#past_petitions = past_petitions + petitions_end_ok
#PETITIONS = list(set(PETITIONS) - set(petitions_end_ok))
petitions_counts += 1
sys.exit()
exit()
quit()
end = timer()
if end - start >= 600:
exit()
quit()
sys.exit()
time.sleep(0.2)
sleeps_counts += 1
def __get_new_folders(petitions):
"""
Returns:
"""
users_ids = os.listdir(UPLOADS_PATH)
if USER_ID in users_ids:
users_ids = [USER_ID]
else:
users_ids.clear()
return users_ids
def run():
__get_new_online_petitions()
if __name__ == "__main__":
USER_ID = None
MODEL_SELECTED = None
try:
# Example:
# python "..\API.py" -i 192.168.3.11_[29-10-2019_14.34.19] -m retinopathy_k_id
Configurator().run_basics()
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--userID", required=False,
help="userID")
ap.add_argument("-m", "--userModelSelection", required=False,
help="userModelSelection")
args = vars(ap.parse_args())
GS.LOGGER.write_to_logger("API executed")
USER_ID = args["userID"] if "userID" in args else None
MODEL_SELECTED = args["userModelSelection"] if "userModelSelection" in args else None
GS.PROBLEM_ID = MODEL_SELECTED
PETITIONS = []
TRIES = 0
USER_ID_PATH = UPLOADS_PATH + "\\" + USER_ID if USER_ID else UPLOADS_PATH + "\\"
run()
except Exception as e:
import traceback
traceback.print_exc()
USER_ID = "" if not USER_ID else USER_ID # To avoid warning
MODEL_SELECTED = "" if not MODEL_SELECTED else MODEL_SELECTED # To avoid warning
info = "USER_ID: " + USER_ID + " || MODEL_SELECTED: " + MODEL_SELECTED
GS.LOGGER.write_log_error(err=e, info=info)
sys.exit()
| StarcoderdataPython |
4969933 |
from tests.utils.runtest import makesuite, run
from tests.utils.testcase import TestCase
from tools.utils.dllreader import DllReader
class DllReaderTest(TestCase):
def testInit(self):
sm = DllReader('tests/data/exportsymbols.dll')
self.assertEquals(sm.functions, ['Func', 'Funk', 'Jazz'])
self.assertEquals(sm.data, ['Alphabetised', 'AnotherExportedSymbol', 'ExportedSymbol'])
suite = makesuite(
DllReaderTest,
)
if __name__ == '__main__':
run(suite)
| StarcoderdataPython |
1742885 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
sys.path.append('utils')
import json
import numpy as np
from .utils.box import *
from .utils.draw import *
from .utils.infrastructure import *
from .utils.detbox import *
def save_results(records,fpath):
with open(fpath,'w') as fid:
for record in records:
line = json.dumps(record)+'\n'
fid.write(line)
return fpath
def load_func(fpath):
assert os.path.exists(fpath)
with open(fpath,'r') as fid:
lines = fid.readlines()
records =[json.loads(line.strip('\n')) for line in lines]
return records
def clip_boundary(dtboxes,height,width):
num = dtboxes.shape[0]
dtboxes[:,0] = np.maximum(dtboxes[:,0], 0)
dtboxes[:,1] = np.maximum(dtboxes[:,1], 0)
dtboxes[:,2] = np.minimum(dtboxes[:,2], width)
dtboxes[:,3] = np.minimum(dtboxes[:,3], height)
return dtboxes
def recover_func(dtboxes):
assert dtboxes.shape[1]>=4
dtboxes[:,2] += dtboxes[:,0]
dtboxes[:,3] += dtboxes[:,1]
return dtboxes
| StarcoderdataPython |
310242 | from django.db import models
from django.contrib.auth.models import User
class SampleModel(models.Model):
'''
a model with usual fields used as a typical workflow object.
'''
date = models.DateField(auto_now_add=True)
text = models.CharField(max_length = 100)
number = models.IntegerField(null=True, blank=True)
requester = models.ForeignKey(User, on_delete=models.CASCADE, verbose_name='requester')
def __unicode__(self):
return self.text
| StarcoderdataPython |
6500121 | from pyspark import SparkContext
from pyspark.sql.types import StructType
from pyspark.sql.functions import col, udf
import json
def requests_to_spark(p):
return {
"requestLine": {
"method": p.method,
"uri": p.url},
"headers": [{"name": name, "value": value} for name, value in p.headers.items() if name != "Content-Length"],
"entity": None if p.body is None else (
{"content": p.body,
"isChunked": False,
"isRepeatable": True,
"isStreaming": False}
)
}
# SparkContext._active_spark_context._jvm.com.microsoft.ml.spark.io.http.HTTPRequestData.schema().json()
# TODO figure out why we cannot just grab from SparkContext on databricks
HTTPRequestDataType = StructType().fromJson(json.loads(
'{"type":"struct","fields":[{"name":"requestLine","type":{"type":"struct","fields":[{"name":"method",'
'"type":"string","nullable":true,"metadata":{}},{"name":"uri","type":"string","nullable":true,"metadata":{}},'
'{"name":"protocolVersion","type":{"type":"struct","fields":[{"name":"protocol","type":"string",'
'"nullable":true,"metadata":{}},{"name":"major","type":"integer","nullable":false,"metadata":{}},'
'{"name":"minor","type":"integer","nullable":false,"metadata":{}}]},"nullable":true,"metadata":{}}]},'
'"nullable":true,"metadata":{}},{"name":"headers","type":{"type":"array","elementType":{"type":"struct",'
'"fields":[{"name":"name","type":"string","nullable":true,"metadata":{}},{"name":"value","type":"string",'
'"nullable":true,"metadata":{}}]},"containsNull":true},"nullable":true,"metadata":{}},{"name":"entity",'
'"type":{"type":"struct","fields":[{"name":"content","type":"binary","nullable":true,"metadata":{}},'
'{"name":"contentEncoding","type":{"type":"struct","fields":[{"name":"name","type":"string","nullable":true,'
'"metadata":{}},{"name":"value","type":"string","nullable":true,"metadata":{}}]},"nullable":true,"metadata":{}},'
'{"name":"contentLength","type":"long","nullable":true,"metadata":{}},{"name":"contentType",'
'"type":{"type":"struct","fields":[{"name":"name","type":"string","nullable":true,"metadata":{}},'
'{"name":"value","type":"string","nullable":true,"metadata":{}}]},"nullable":true,"metadata":{}},'
'{"name":"isChunked","type":"boolean","nullable":false,"metadata":{}},'
'{"name":"isRepeatable","type":"boolean","nullable":false,"metadata":{}},'
'{"name":"isStreaming","type":"boolean","nullable":false,"metadata":{}}]},"nullable":true,"metadata":{}}]}'
))
def http_udf(func):
def composition(*args):
return requests_to_spark(func(*args).prepare())
return udf(composition, HTTPRequestDataType)
| StarcoderdataPython |
1945894 | n, k = map(int, input().split())
b = list(map(int, bin(n)[2:]))
s = sum(b)
if k>n or k<s :
print("NO")
else :
ind = 0
excess = k - s
l = len(b)
for i in range(l-1) :
if excess >= b[i] :
b[i+1] += b[i]*2
excess -= b[i]
b[i] = 0
else :
b[i] -= excess
b[i+1] += excess*2
excess = 0
break
n = 1
print("YES")
for i in range(l) :
for j in range(b[i]):
print(2**(l-i-1), end=" ")
| StarcoderdataPython |
5190567 | from datetime import timedelta
import re
from math import floor
from ebu_tt_live.strings import ERR_TIME_NEGATIVE, \
ERR_TIME_FRAMES_OUT_OF_RANGE, \
ERR_TIME_FRAME_IS_DROPPED
from ebu_tt_live.errors import TimeNegativeError, TimeFormatError
class ISMPTEtoTimedeltaConverter(object):
def __init__(self):
raise NotImplementedError()
def timedelta(smpte_time):
raise NotImplementedError()
def can_convert(smpte_time):
raise NotImplementedError()
class FixedOffsetSMPTEtoTimedeltaConverter(ISMPTEtoTimedeltaConverter):
"""
Converts SMPTE timecodes to timedeltas with a fixed offset.
This converter utility class uses a strategy that assumes a fixed offset,
a reference SMPTE timecode value that is considered the zero point, and
a continuous set of SMPTE timecodes monotonically increasing (aside
from drop frames). It should not be used in cases where there may be
discontinuities in the timecode, since it will give incorrect results.
The object
uses the ``frameRate``, ``frameRateMultiplier`` and ``dropMode`` to
calculate the equivalent timedelta output value for any
given input SMPTE timecode, and raises an exception if an attempt
is made to convert a timecode that is earlier than the zero point.
This can be avoided by calling :py:func:`can_convert()` to check first.
Alternatively call :py:func:`timedelta()` directly in a ``try`` block
and catch the :py:class:`ebu_tt_live.errors.TimeNegativeError` instead,
which avoids essentially running the same code twice.
"""
_smpteReferenceS = None
_frameRate = None
_effectiveFrameRate = None
_dropMode = None
_frm_regex = re.compile('(?P<numerator>\\d+)\\s(?P<denominator>\\d+)')
_tc_regex = \
re.compile('([0-9][0-9]):([0-5][0-9]):([0-5][0-9]):([0-9][0-9])')
def __init__(self, smpteReference, frameRate,
frameRateMultiplier, dropMode):
self._frameRate = int(frameRate)
self._effectiveFrameRate = \
self._calc_effective_frame_rate(
int(frameRate), frameRateMultiplier)
self._dropMode = dropMode
self._smpteReferenceS = self._calculate_s(smpteReference)
def timedelta(self, smpte_time):
"""
Convert a timecode to a timedelta.
:param smpte_time: The timecode value to convert
:return timedelta: The equivalent timedelta
:raises TimeNegativeError: if the timecode occurs before the reference zero point
:raises TimeFormatError: if the frames value is illegal
"""
s = self._calculate_s(smpte_time)
if self._smpteReferenceS > s:
raise TimeNegativeError(ERR_TIME_NEGATIVE)
return timedelta(seconds=s-self._smpteReferenceS)
def can_convert(self, smpte_time):
"""
Check if a given timecode can successfully be converted to a timedelta.
:param smpte_time: The test value
:return Boolean: True if the timecode can successfully be converted
:raises TimeFormatError: if the frames value is illegal
"""
s = self._calculate_s(smpte_time)
return self._smpteReferenceS <= s
@classmethod
def _calc_effective_frame_rate(cls, frameRate, frameRateMultiplier):
# See https://www.w3.org/TR/ttml1/#time-expression-semantics-smpte
# for the semantics of effective frame rate calculation
frm_numerator_s, frm_denominator_s = \
cls._frm_regex.match(frameRateMultiplier).groups()
return float(frameRate) * \
float(frm_numerator_s) / \
float(frm_denominator_s)
def _dropped_frames(self, hours, minutes):
# See https://www.w3.org/TR/ttml1/#time-expression-semantics-smpte
# for the semantics of dropped frame calculation
dropped_frames = 0
if self._dropMode == 'dropNTSC':
dropped_frames = \
(hours * 54 + minutes - floor(minutes/10)) * 2
elif self._dropMode == 'dropPAL':
dropped_frames = \
(hours * 27 + floor(minutes / 2) - floor(minutes / 20)) * 4
return dropped_frames
def _counted_frames(self, hours, minutes, seconds, frames):
# See https://www.w3.org/TR/ttml1/#time-expression-semantics-smpte
# for the semantics of counted frame calculation
return (3600 * hours + 60 * minutes + seconds) * \
self._frameRate + frames
def _calculate_s(self, smpte_time):
# Thie method mplements
# https://www.w3.org/TR/ttml1/#time-expression-semantics-smpte
# which specifies the calculation of S
hours, minutes, seconds, frames = \
[int(x) for x in self._tc_regex.match(smpte_time).groups()]
if frames >= self._frameRate:
raise TimeFormatError(ERR_TIME_FRAMES_OUT_OF_RANGE)
if self._is_dropped_frame(minutes, seconds, frames):
raise TimeFormatError(ERR_TIME_FRAME_IS_DROPPED)
s = (self._counted_frames(hours, minutes, seconds, frames) -
self._dropped_frames(hours, minutes)) / \
self._effectiveFrameRate
return s
def _is_dropped_frame(self, minutes, seconds, frames):
# This method implements
# https://www.w3.org/TR/ttml1/#parameter-attribute-dropMode
# which defines the rules for dropped frames.
is_dropped_frame = False
if seconds == 0: # in NTSC and PAL frames are only dropped at 0s
if self._dropMode == 'dropNTSC' and \
minutes not in [0, 10, 20, 30, 40, 50]:
is_dropped_frame = frames in [0, 1]
elif self._dropMode == 'dropPAL' and \
minutes % 2 == 0 and minutes not in [0, 20, 40]:
is_dropped_frame = frames in [0, 1, 2, 3]
return is_dropped_frame
| StarcoderdataPython |
5091242 | <gh_stars>1-10
# Generated by Django 2.2.5 on 2019-10-04 11:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('data_process', '0011_auto_20191004_1125'),
]
operations = [
migrations.AlterModelOptions(
name='firstcatage',
options={'ordering': ['first_catage_order'], 'verbose_name': '一级目录', 'verbose_name_plural': '一级目录'},
),
migrations.AlterField(
model_name='firstcatage',
name='first_catage_slug',
field=models.SlugField(help_text='一级目录连接,禁止为空', unique=True, verbose_name='一级目录连接'),
),
migrations.AlterField(
model_name='secondcatage',
name='second_catage_slug',
field=models.SlugField(help_text='二级目录连接,禁止为空', unique=True, verbose_name='二级目录连接'),
),
]
| StarcoderdataPython |
6545848 | #!/usr/bin/env python
#-*- encoding: utf8 -*-
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import modules.util_kor as util
class Data():
def __init__(self, entity_tracker, action_tracker):
self.action_templates = action_tracker.get_action_templates()
self.et = entity_tracker
self.trainset = self.prepare_data()
def prepare_data(self):
# get dialogs from raw text
dialogs, dialog_indices = util.read_dialogs(with_indices=True)
# get utteracnes
utterances = util.get_utterances(dialogs)
# get responses
responses = util.get_response(dialogs)
responses = [self.get_template_id(response) for response in responses]
# make actual trainset
trainset = []
for u,r in zip(utterances, responses):
trainset.append((u,r))
return trainset, dialog_indices
def get_template_id(self, response):
return self.action_templates.index(self.et.extract_entities(response))
| StarcoderdataPython |
6609017 | <reponame>TobiaMarcucci/sos4hjb
import unittest
from sos4hjb.polynomials import (Variable, MonomialVector, ChebyshevVector,
Polynomial)
class TestChebyshevVector(unittest.TestCase):
def test_call(self):
x = Variable('x')
y = Variable('y')
z = Variable('z')
power_dict = {x: 5, y: 2, z: 3}
m = ChebyshevVector(power_dict)
x_eval = {x: - 2.1, y: 1.5, z: 5}
value = 16 * (- 2.1) ** 5 - 20 * (- 2.1) ** 3 + 5 * (- 2.1)
value *= 2 * 1.5 ** 2 - 1
value *= 4 * 5 ** 3 - 3 * 5
self.assertAlmostEqual(m(x_eval), value)
def test_substitute(self):
# Partial evaluation.
x = Variable('x')
y = Variable('y')
z = Variable('z')
power_dict = {x: 5, y: 2, z: 3}
m = ChebyshevVector(power_dict)
x_eval = {x: - 2.1, y: 1.5}
m_eval = ChebyshevVector({z: 3})
c_eval = 16 * (- 2.1) ** 5 - 20 * (- 2.1) ** 3 + 5 * (- 2.1)
c_eval *= 2 * 1.5 ** 2 - 1
p = Polynomial({m_eval: c_eval})
self.assertAlmostEqual(m.substitute(x_eval), p)
# Complete evaluation.
x_eval[z] = 5
m_eval = ChebyshevVector({})
c_eval *= 4 * 5 ** 3 - 3 * 5
p = Polynomial({m_eval: c_eval})
self.assertAlmostEqual(m.substitute(x_eval), p)
# Cancellation.
x_eval = {z: 0}
p = Polynomial({})
self.assertAlmostEqual(m.substitute(x_eval), p)
def test_mul(self):
# Bivariate times univariate.
x = Variable('x')
y = Variable('y')
c0 = ChebyshevVector({x: 5, y: 3})
c1 = ChebyshevVector({x: 2})
p = c0 * c1
self.assertEqual(len(p), 2)
self.assertEqual(p[ChebyshevVector({x: 7, y: 3})], 1 / 2)
self.assertEqual(p[ChebyshevVector({x: 3, y: 3})], 1 / 2)
# Bivariate times bivariate.
c1 = ChebyshevVector({x: 8, y: 1})
p = c0 * c1
self.assertEqual(len(p), 4)
self.assertEqual(p[ChebyshevVector({x: 13, y: 4})], 1 / 4)
self.assertEqual(p[ChebyshevVector({x: 13, y: 2})], 1 / 4)
self.assertEqual(p[ChebyshevVector({x: 3, y: 4})], 1 / 4)
self.assertEqual(p[ChebyshevVector({x: 3, y: 2})], 1 / 4)
# With zero power.
c1 = ChebyshevVector({x: 5, y: 1})
p = c0 * c1
self.assertEqual(len(p), 4)
self.assertEqual(p[ChebyshevVector({x: 10, y: 4})], 1 / 4)
self.assertEqual(p[ChebyshevVector({x: 10, y: 2})], 1 / 4)
self.assertEqual(p[ChebyshevVector({y: 4})], 1 / 4)
self.assertEqual(p[ChebyshevVector({y: 2})], 1 / 4)
# Multiplication by wrong type.
c = ChebyshevVector({x: 3, y: 4})
m = MonomialVector({x: 5, y: 2})
with self.assertRaises(TypeError):
c * m
def test_derivative(self):
# Derivative of 1 is 0.
x = Variable('x')
y = Variable('y')
z = Variable('z')
c = ChebyshevVector({x: 5, y: 4})
p = c.derivative(z)
self.assertEqual(len(p), 0)
self.assertEqual(p[ChebyshevVector({})], 0)
# Derivative of odd power.
p = c.derivative(x)
self.assertEqual(len(p), 3)
self.assertEqual(p[ChebyshevVector({y: 4})], 5)
self.assertEqual(p[ChebyshevVector({x: 2, y: 4})], 10)
self.assertEqual(p[ChebyshevVector({x: 4, y: 4})], 10)
# Derivative of even power.
p = c.derivative(y)
self.assertEqual(len(p), 2)
self.assertEqual(p[ChebyshevVector({x: 5, y: 1})], 8)
self.assertEqual(p[ChebyshevVector({x: 5, y: 3})], 8)
def test_integral(self):
# Power 0.
x = Variable('x')
y = Variable('y')
z = Variable('z')
c = ChebyshevVector({y: 1, z: 3})
p = c.integral(x)
self.assertEqual(len(p), 1)
self.assertEqual(p[ChebyshevVector({x: 1, y: 1, z: 3})], 1)
# Power 1.
p = c.integral(y)
self.assertEqual(len(p), 2)
self.assertEqual(p[ChebyshevVector({z: 3})], 1 / 4)
self.assertEqual(p[ChebyshevVector({y: 2, z: 3})], 1 / 4)
# Power > 1.
p = c.integral(z)
self.assertEqual(len(p), 2)
self.assertEqual(p[ChebyshevVector({y: 1, z: 2})], - 1 / 4)
self.assertEqual(p[ChebyshevVector({y: 1, z: 4})], 1 / 8)
def test_in_monomial_basis(self):
# Zero-dimensional.
m = ChebyshevVector({})
p = Polynomial({MonomialVector({}): 1})
self.assertEqual(m.in_monomial_basis(), p)
# One-dimensional.
x = Variable('x')
m = ChebyshevVector({x: 9})
p = Polynomial({
MonomialVector({x: 1}): 9,
MonomialVector({x: 3}): - 120,
MonomialVector({x: 5}): 432,
MonomialVector({x: 7}): - 576,
MonomialVector({x: 9}): 256,
})
self.assertEqual(m.in_monomial_basis(), p)
# Two-dimensional.
y = Variable('y')
m = ChebyshevVector({x: 4, y: 3})
p = Polynomial({
MonomialVector({y: 1}): - 3,
MonomialVector({y: 3}): 4,
MonomialVector({x: 2, y: 1}): 24,
MonomialVector({x: 2, y: 3}): - 32,
MonomialVector({x: 4, y: 1}): - 24,
MonomialVector({x: 4, y: 3}): 32,
})
self.assertEqual(m.in_monomial_basis(), p)
def test_repr(self):
x = Variable('x')
x3 = Variable('x', 3)
c = ChebyshevVector({x: 5, x3: 2})
self.assertEqual(c.__repr__(), 'T_{5}(x)T_{2}(x_{3})')
| StarcoderdataPython |
3567319 | <filename>algos/GLASSO/pISTA.py
import numpy as np
from numpy import linalg
from algos.GLASSO.base import base
from utils.common import np_soft_threshold
from utils.GLASSO.glasso import objective_F_cholesky
class pISTA(base):
def __init__(self, T, N, lam, ls_iter, step_lim, init_step):
super(pISTA, self).__init__(T, N, lam)
self.ls_iter = ls_iter
self.step_lim = step_lim
self.init_step = init_step
self.save_name = "pISTA_N{N}_T{T}_step{step}_LsIter{ls_iter}_StepLim{step_lim}"\
.format(N=self.N, T=self.T, step=self.init_step, ls_iter=self.ls_iter, step_lim=self.step_lim)
def compute(self, S, A0, status_f, history, test_check_f):
init_step = np.float32(self.init_step)
As = []
status = []
lam = np.float32(self.lam)
if A0 is None:
A_diag = self.lam * np.ones(self.N, dtype='float32')
A_diag = A_diag + np.diag(S)
A_diag = 1.0 / A_diag
A = np.diag(A_diag)
A_diag = None
else:
A = np.array(A0, dtype='float32')
if history:
As.append(A.copy())
if status_f is not None: status.append(status_f(A, 0.0))
for t in range(self.T):
A_inv = np.linalg.inv(A)
if test_check_f is not None:
if test_check_f(A, S, self.lam, A_inv):
t -= 1
break
sign_A = np.sign(A, dtype='float32')
mask_A = np.abs(sign_A, dtype='float32').astype('int8')
G = S - A_inv
sign_soft_G = np.sign(np_soft_threshold(G, lam), dtype='float32')
mask_G = np.abs(sign_soft_G).astype('int8')
mask = np.bitwise_or(mask_A, mask_G)
mask_G = None
AgA = A @ (mask * G) @ A
G = None
sign_A -= np.bitwise_xor(mask, mask_A) * sign_soft_G
sign_soft_G = None
AhA = A @ sign_A @ A
AhA *= lam
A_diag = np.diag(A).copy().reshape(-1, 1)
np.fill_diagonal(A, 0)
A_no_diag = A
AAt = ((A_no_diag * A_no_diag) + (A_diag * A_diag.T)) * sign_A
AAt *= lam
np.fill_diagonal(A, A_diag)
A_diag = None
A_no_diag = None
sign_A = None
AghA = AgA + AhA - AAt
AgA = None
AhA = None
A, step = pista_cholesky_linesearch(A, S, lam, mask, AghA, AAt, A,
step=init_step, max_iter=self.ls_iter, step_lim=self.step_lim)
if step == 0: init_step = 0
if history:
As.append(A.copy())
if status_f is not None: status.append(status_f(A, step))
if init_step == 0: t = np.inf
return A, status, As, t+1
def pista_cholesky_linesearch(A, S, lam, mask, a, b, c, step, max_iter, step_lim):
if step == 0:
return A, 0.0
beta = step
L = np.linalg.cholesky(A)
init_F_value = objective_F_cholesky(A,S,lam,L)
L = None
beta_psd = None
for _ in range(max_iter):
if beta < step_lim: break
try:
beta_a = beta * a
beta_b = np.abs(beta * b, dtype='float32')
A_next = mask * np_soft_threshold(c - beta_a, beta_b)
beta_a = None
beta_b = None
A_next = A_next + np.transpose(A_next)
A_next *= 0.5
L = np.linalg.cholesky(A_next)
if beta_psd is None: beta_psd = beta
if objective_F_cholesky(A_next,S,lam,L) < init_F_value:
return A_next, beta
except linalg.LinAlgError:
pass
beta *= 0.5
eigs = np.linalg.eigvalsh(A)
beta = (eigs[0]/eigs[-1]) ** 2
beta = np.float32(0.81 * beta)
eigs = None
if beta_psd is not None and beta > beta_psd: beta = beta_psd
beta_eps = np.finfo(np.float32).eps
while True:
try:
beta_a = beta * a
beta_b = np.abs(beta * b, dtype='float32')
A_next = mask * np_soft_threshold(c - beta_a, beta_b)
beta_a = None
beta_b = None
A_next = A_next + np.transpose(A_next)
A_next *= 0.5
L = np.linalg.cholesky(A_next)
return A_next, beta
except linalg.LinAlgError:
pass
beta *= 0.5
#Emulate do while
if beta < beta_eps: break
return A, 0.0
def init_pISTA_parser(pISTA_pasrser):
pISTA_pasrser.set_defaults(algo='pISTA')
pISTA_pasrser.add_argument(
'-T', '--T', required=False, type=int, default=15, dest='T',
help="Number of iterations.")
pISTA_pasrser.add_argument(
'-linesearch', '--linesearch', required=False, type=int, default=15, dest='ls_iter',
help="Number of linesearch iterations.")
pISTA_pasrser.add_argument(
'-step_lim', '--step_limit', required=False, type=float, default=1e-4, dest='step_lim',
help="The smallest step size possible.")
pISTA_pasrser.add_argument(
'-st', '--step', required=False, type=float, default=1.0, dest='init_step',
help='init_step.') | StarcoderdataPython |
3337995 | <reponame>warrd18-meet/meet201617YL1cs-mod4
#FIX THE LINE BELOW
class MyStr(str): #<-----Replace xyz-make a new class, MyStr, that inherits from str
"""
Build a subclass of str with some new, fun methods.
"""
#The first method is done for you; you must complete the second (replace).
def exclaim(self,num):
"""
Add num exclamation points to string.
(We did this example in class.)
:param num: number of exclamation points to add
:returns: a string with num exclamation points added to the end
"""
return self+'!'*num
def replace(self, take_out, use_this):
"""
Override the replace method of string.
The new replace method is case-insensitive,
and the output will always be lower-case.
Examples:
>>> test=MyStr('aAaA')
>>> test.replace('a','b')
bbbb
>>> test=MyStr('aAaADD')
>>> test.replace('AA','c')
ccdd
:param take_out: the substring that will be replaced
:param use_this: the substring that will be used in place of take_out
:returns: a new string with replacement complete
"""
#################
#Make this method work in the way described in
#the block comment above.
#Hints:
# 1. Remember that self is a MyStr object,
# and a MyStr object is also a str.
# However, remember that to call the replace
# method of str, you may need to use super
# 2. The following str methods will be helpful:
# replace, lower, and upper
# 3. There are multiple solutions, but you can
# do this in as little as 1 line.
#YOUR CODE BELOW:
#################
return super(MyStr,MyStr(self.lower())).replace(take_out.lower(),use_this)
| StarcoderdataPython |
4998749 | <reponame>whtngus/chatbot_copy
from utils.Preprocess import Preprocess
from models.ner.NerModel import NerModel
p = Preprocess(word2index_dic='../train_tools/dict/chatbot_dict.bin',
userdic='../utils/user_dic.tsv')
ner = NerModel(model_name='../models/ner/ner_model.h5', proprocess=p)
query = '오늘 오전 13시 2분에 탕수육 주문 하고 싶어요'
predicts = ner.predict(query)
tags = ner.predict_tags(query)
print(predicts)
print(tags)
| StarcoderdataPython |
1674277 | """
Test cases for codeop.py
<NAME>
"""
import sys
import unittest
import warnings
from test import support
from test.support import warnings_helper
from codeop import compile_command, PyCF_DONT_IMPLY_DEDENT
import io
if support.is_jython:
def unify_callables(d):
for n,v in d.items():
if hasattr(v, '__call__'):
d[n] = True
return d
class CodeopTests(unittest.TestCase):
def assertValid(self, str, symbol='single'):
'''succeed iff str is a valid piece of code'''
if support.is_jython:
code = compile_command(str, "<input>", symbol)
self.assertTrue(code)
if symbol == "single":
d,r = {},{}
saved_stdout = sys.stdout
sys.stdout = io.StringIO()
try:
exec(code, d)
exec(compile(str,"<input>","single"), r)
finally:
sys.stdout = saved_stdout
elif symbol == 'eval':
ctx = {'a': 2}
d = { 'value': eval(code,ctx) }
r = { 'value': eval(str,ctx) }
self.assertEqual(unify_callables(r),unify_callables(d))
else:
expected = compile(str, "<input>", symbol, PyCF_DONT_IMPLY_DEDENT)
self.assertEqual(compile_command(str, "<input>", symbol), expected)
def assertIncomplete(self, str, symbol='single'):
'''succeed iff str is the start of a valid piece of code'''
self.assertEqual(compile_command(str, symbol=symbol), None)
def assertInvalid(self, str, symbol='single', is_syntax=1):
'''succeed iff str is the start of an invalid piece of code'''
try:
compile_command(str,symbol=symbol)
self.fail("No exception raised for invalid code")
except SyntaxError:
self.assertTrue(is_syntax)
except OverflowError:
self.assertTrue(not is_syntax)
def test_valid(self):
av = self.assertValid
# special case
if not support.is_jython:
self.assertEqual(compile_command(""),
compile("pass", "<input>", 'single',
PyCF_DONT_IMPLY_DEDENT))
self.assertEqual(compile_command("\n"),
compile("pass", "<input>", 'single',
PyCF_DONT_IMPLY_DEDENT))
else:
av("")
av("\n")
av("a = 1")
av("\na = 1")
av("a = 1\n")
av("a = 1\n\n")
av("\n\na = 1\n\n")
av("def x():\n pass\n")
av("if 1:\n pass\n")
av("\n\nif 1: pass\n")
av("\n\nif 1: pass\n\n")
av("def x():\n\n pass\n")
av("def x():\n pass\n \n")
av("def x():\n pass\n \n")
av("pass\n")
av("3**3\n")
av("if 9==3:\n pass\nelse:\n pass\n")
av("if 1:\n pass\n if 1:\n pass\n else:\n pass\n")
av("#a\n#b\na = 3\n")
av("#a\n\n \na=3\n")
av("a=3\n\n")
av("a = 9+ \\\n3")
av("3**3","eval")
av("(lambda z: \n z**3)","eval")
av("9+ \\\n3","eval")
av("9+ \\\n3\n","eval")
av("\n\na**3","eval")
av("\n \na**3","eval")
av("#a\n#b\na**3","eval")
av("\n\na = 1\n\n")
av("\n\nif 1: a=1\n\n")
av("if 1:\n pass\n if 1:\n pass\n else:\n pass\n")
av("#a\n\n \na=3\n\n")
av("\n\na**3","eval")
av("\n \na**3","eval")
av("#a\n#b\na**3","eval")
av("def f():\n try: pass\n finally: [x for x in (1,2)]\n")
av("def f():\n pass\n#foo\n")
av("@a.b.c\ndef f():\n pass\n")
def test_incomplete(self):
ai = self.assertIncomplete
ai("(a **")
ai("(a,b,")
ai("(a,b,(")
ai("(a,b,(")
ai("a = (")
ai("a = {")
ai("b + {")
ai("print([1,\n2,")
ai("print({1:1,\n2:3,")
ai("print((1,\n2,")
ai("if 9==3:\n pass\nelse:")
ai("if 9==3:\n pass\nelse:\n")
ai("if 9==3:\n pass\nelse:\n pass")
ai("if 1:")
ai("if 1:\n")
ai("if 1:\n pass\n if 1:\n pass\n else:")
ai("if 1:\n pass\n if 1:\n pass\n else:\n")
ai("if 1:\n pass\n if 1:\n pass\n else:\n pass")
ai("def x():")
ai("def x():\n")
ai("def x():\n\n")
ai("def x():\n pass")
ai("def x():\n pass\n ")
ai("def x():\n pass\n ")
ai("\n\ndef x():\n pass")
ai("a = 9+ \\")
ai("a = 'a\\")
ai("a = '''xy")
ai("","eval")
ai("\n","eval")
ai("(","eval")
ai("(9+","eval")
ai("9+ \\","eval")
ai("lambda z: \\","eval")
ai("if True:\n if True:\n if True: \n")
ai("@a(")
ai("@a(b")
ai("@a(b,")
ai("@a(b,c")
ai("@a(b,c,")
ai("from a import (")
ai("from a import (b")
ai("from a import (b,")
ai("from a import (b,c")
ai("from a import (b,c,")
ai("[");
ai("[a");
ai("[a,");
ai("[a,b");
ai("[a,b,");
ai("{");
ai("{a");
ai("{a:");
ai("{a:b");
ai("{a:b,");
ai("{a:b,c");
ai("{a:b,c:");
ai("{a:b,c:d");
ai("{a:b,c:d,");
ai("a(")
ai("a(b")
ai("a(b,")
ai("a(b,c")
ai("a(b,c,")
ai("a[")
ai("a[b")
ai("a[b,")
ai("a[b:")
ai("a[b:c")
ai("a[b:c:")
ai("a[b:c:d")
ai("def a(")
ai("def a(b")
ai("def a(b,")
ai("def a(b,c")
ai("def a(b,c,")
ai("(")
ai("(a")
ai("(a,")
ai("(a,b")
ai("(a,b,")
ai("if a:\n pass\nelif b:")
ai("if a:\n pass\nelif b:\n pass\nelse:")
ai("while a:")
ai("while a:\n pass\nelse:")
ai("for a in b:")
ai("for a in b:\n pass\nelse:")
ai("try:")
ai("try:\n pass\nexcept:")
ai("try:\n pass\nfinally:")
ai("try:\n pass\nexcept:\n pass\nfinally:")
ai("with a:")
ai("with a as b:")
ai("class a:")
ai("class a(")
ai("class a(b")
ai("class a(b,")
ai("class a():")
ai("[x for")
ai("[x for x in")
ai("[x for x in (")
ai("(x for")
ai("(x for x in")
ai("(x for x in (")
def test_invalid(self):
ai = self.assertInvalid
ai("a b")
ai("a @")
ai("a b @")
ai("a ** @")
ai("a = ")
ai("a = 9 +")
ai("def x():\n\npass\n")
ai("\n\n if 1: pass\n\npass")
ai("a = 9+ \\\n")
ai("a = 'a\\ ")
ai("a = 'a\\\n")
ai("a = 1","eval")
ai("]","eval")
ai("())","eval")
ai("[}","eval")
ai("9+","eval")
ai("lambda z:","eval")
ai("a b","eval")
ai("return 2.3")
ai("if (a == 1 and b = 2): pass")
ai("del 1")
ai("del (1,)")
ai("del [1]")
ai("del '1'")
ai("[i for i in range(10)] = (1, 2, 3)")
def test_invalid_exec(self):
ai = self.assertInvalid
ai("raise = 4", symbol="exec")
ai('def a-b', symbol='exec')
ai('await?', symbol='exec')
ai('=!=', symbol='exec')
ai('a await raise b', symbol='exec')
ai('a await raise b?+1', symbol='exec')
def test_filename(self):
self.assertEqual(compile_command("a = 1\n", "abc").co_filename,
compile("a = 1\n", "abc", 'single').co_filename)
self.assertNotEqual(compile_command("a = 1\n", "abc").co_filename,
compile("a = 1\n", "def", 'single').co_filename)
def test_warning(self):
# Test that the warning is only returned once.
with warnings_helper.check_warnings(
(".*literal", SyntaxWarning),
(".*invalid", DeprecationWarning),
) as w:
compile_command(r"'\e' is 0")
self.assertEqual(len(w.warnings), 2)
# bpo-41520: check SyntaxWarning treated as an SyntaxError
with warnings.catch_warnings(), self.assertRaises(SyntaxError):
warnings.simplefilter('error', SyntaxWarning)
compile_command('1 is 1', symbol='exec')
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
274393 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heat_integrationtests.functional import functional_base
TEMPLATE_SIMPLE = '''
heat_template_version: 2016-04-08
parameters:
string-length:
type: number
resources:
my-chain:
type: OS::Heat::ResourceChain
properties:
resources: ['OS::Heat::RandomString', 'OS::Heat::RandomString']
resource_properties:
length: { get_param: string-length }
outputs:
resource-ids:
value: { get_attr: [my-chain, refs] }
resource-0-value:
value: { get_attr: [my-chain, resource.0, value] }
all-resource-attrs:
value: { get_attr: [my-chain, attributes, value] }
'''
TEMPLATE_PARAM_DRIVEN = '''
heat_template_version: 2016-04-08
parameters:
chain-types:
type: comma_delimited_list
resources:
my-chain:
type: OS::Heat::ResourceChain
properties:
resources: { get_param: chain-types }
'''
class ResourceChainTests(functional_base.FunctionalTestsBase):
def test_create(self):
# Test
params = {'string-length': 8}
stack_id = self.stack_create(template=TEMPLATE_SIMPLE,
parameters=params)
# Verify
stack = self.client.stacks.get(stack_id)
self.assertTrue(stack is not None)
# Top-level resource for chain
expected = {'my-chain': 'OS::Heat::ResourceChain'}
found = self.list_resources(stack_id)
self.assertEqual(expected, found)
# Nested stack exists and has two resources
nested_id = self.group_nested_identifier(stack_id, 'my-chain')
expected = {'0': 'OS::Heat::RandomString',
'1': 'OS::Heat::RandomString'}
found = self.list_resources(nested_id)
self.assertEqual(expected, found)
# Outputs
resource_ids = self._stack_output(stack, 'resource-ids')
self.assertTrue(resource_ids is not None)
self.assertEqual(2, len(resource_ids))
resource_value = self._stack_output(stack, 'resource-0-value')
self.assertTrue(resource_value is not None)
self.assertEqual(8, len(resource_value)) # from parameter
resource_attrs = self._stack_output(stack, 'all-resource-attrs')
self.assertTrue(resource_attrs is not None)
self.assertIsInstance(resource_attrs, dict)
self.assertEqual(2, len(resource_attrs))
self.assertEqual(8, len(resource_attrs['0']))
self.assertEqual(8, len(resource_attrs['1']))
def test_update(self):
# Setup
params = {'string-length': 8}
stack_id = self.stack_create(template=TEMPLATE_SIMPLE,
parameters=params)
update_tmpl = '''
heat_template_version: 2016-04-08
parameters:
string-length:
type: number
resources:
my-chain:
type: OS::Heat::ResourceChain
properties:
resources: ['OS::Heat::None']
'''
# Test
self.update_stack(stack_id, template=update_tmpl, parameters=params)
# Verify
# Nested stack only has the None resource
nested_id = self.group_nested_identifier(stack_id, 'my-chain')
expected = {'0': 'OS::Heat::None'}
found = self.list_resources(nested_id)
self.assertEqual(expected, found)
def test_resources_param_driven(self):
# Setup
params = {'chain-types':
'OS::Heat::None,OS::Heat::RandomString,OS::Heat::None'}
# Test
stack_id = self.stack_create(template=TEMPLATE_PARAM_DRIVEN,
parameters=params)
# Verify
nested_id = self.group_nested_identifier(stack_id, 'my-chain')
expected = {'0': 'OS::Heat::None',
'1': 'OS::Heat::RandomString',
'2': 'OS::Heat::None'}
found = self.list_resources(nested_id)
self.assertEqual(expected, found)
def test_resources_env_defined(self):
# Setup
env = {'parameters': {'chain-types': 'OS::Heat::None'}}
# Test
stack_id = self.stack_create(template=TEMPLATE_PARAM_DRIVEN,
environment=env)
# Verify
nested_id = self.group_nested_identifier(stack_id, 'my-chain')
expected = {'0': 'OS::Heat::None'}
found = self.list_resources(nested_id)
self.assertEqual(expected, found)
| StarcoderdataPython |
6517779 | # coding: utf-8
from zaglushka_tests import ZaglushkaAsyncHTTPTestCase
class DefaultResponseTestCase(ZaglushkaAsyncHTTPTestCase):
def get_zaglushka_config(self):
return {}
def test_default_response(self):
self.assertIsDefaultResponse(self.fetch('/path'))
class DefaultResponseBodyTestCase(ZaglushkaAsyncHTTPTestCase):
def get_zaglushka_config(self):
return {
'urls': [
{
'path': '/my_response',
'code': 500,
'headers': {
'X-Custom-Header': 'my;header',
}
}
]
}
def test_default_response(self):
response = self.fetch('/my_response')
self.assertResponseBody(b'', response)
self.assertEquals(500, response.code)
self.assertResponseHeaders({'X-Custom-Header': 'my;header'}, response)
| StarcoderdataPython |
5060103 | #
# Copyright (c) 2020 Bitdefender
# SPDX-License-Identifier: Apache-2.0
#
import os
import sys
from pybddisasm.bddisasm import *
try:
from termcolor import colored
except:
colored = None
_SPACES = [
'',
' ',
' ',
' ',
' ',
' ',
' ',
' ',
' ',
' ',
' ',
' ',
' ',
' ',
' ',
' ',
]
def print_internal(string, foreground=None, highlight=True):
no_colors = (foreground is None) or (not highlight)
if not colored or no_colors:
sys.stdout.write(string)
else:
text = colored(string, foreground)
sys.stdout.write(text)
def print_instruction(instruction, rip, highlight=False, ext_info=False):
k = 0
print_internal('%016x ' % rip)
# prefixes
for ibyte in range(0, instruction.PrefLength):
print_internal('%02x' % instruction.InstructionBytes[ibyte])
# opcodes
k += instruction.PrefLength
for ibyte in range(k, k + instruction.OpLength):
print_internal('%02x' % instruction.InstructionBytes[ibyte], 'green', highlight)
# modrm and sib
k += instruction.OpLength
for ibyte in range(k, k + instruction.HasModRm + instruction.HasSib):
print_internal('%02x' % instruction.InstructionBytes[ibyte], 'yellow', highlight)
# displacement
k += instruction.HasModRm + instruction.HasSib
for ibyte in range(k, k + instruction.DispLength):
print_internal('%02x' % instruction.InstructionBytes[ibyte], 'blue', highlight)
# relative offset/moffset/immediates
rest = instruction.Imm1Length + instruction.Imm2Length + instruction.RelOffsLength + \
instruction.MoffsetLength + instruction.HasSseImm + instruction.AddrLength
k += instruction.DispLength
for ibyte in range(k, k + rest):
print_internal('%02x' % instruction.InstructionBytes[ibyte], 'red', highlight)
# the rest of the bytes
k += rest
for ibyte in range(k, instruction.Length):
print_internal('%02x' % instruction.InstructionBytes[ibyte])
print_internal('%s' % _SPACES[16 - instruction.Length])
print_internal('%s' % instruction.Text)
if ext_info:
print_internal('\n')
print_internal('%r' % instruction)
print_internal('\n')
def disassemble_file(filepath, offset=0, size=0, rip=0, arch=64,
highlight=True, vendor='any', ext_info=False):
if not filepath:
return
with open(filepath, 'rb') as f:
total = 0
file_size = os.path.getsize(filepath)
if not size:
size = file_size
while offset < file_size and total < size:
to_read = file_size - offset
if to_read > 15:
to_read = 15
f.seek(offset, 0)
buff = f.read(to_read)
current_rip = rip + total
instr = nd_decode_ex2(buff, arch, arch, arch, vendor, current_rip)
if instr:
print_instruction(instr, current_rip, highlight, ext_info)
offset += instr['Length']
total += instr['Length']
else:
sys.stdout.write('%016x %02x %s db 0x%02x' % (current_rip, buff[0],
_SPACES[15], buff[0]))
if str.isalpha(chr(buff[0])):
sys.stdout.write(str(buff[0]))
sys.stdout.write('\n')
offset += 1
total += 1
def disassemble_hexstring(hexstring, offset=0, size=0, rip=0, arch=64,
highlight=True, vendor='any', ext_info=False):
if not hexstring:
return
buff = bytes.fromhex(''.join(hexstring))
total = 0
if not size:
size = len(buff)
while total < size:
current_rip = rip + total
instr = nd_decode_ex2(buff[total:total+16], arch, arch, arch, vendor, current_rip)
if instr:
print_instruction(instr, current_rip, highlight, ext_info)
offset += instr['Length']
total += instr['Length']
else:
sys.stdout.write('%016x %02x %s db 0x%02x\n' % (current_rip, buff[offset],
_SPACES[15], buff[offset]))
offset += 1
total += 1
| StarcoderdataPython |
5056077 | # jsb/plugs/socket/jira.py
"""
jira.py - jsonbot module for performing lookups on a jira server
Copyright 2011, <NAME>
Special thanks to <NAME> for his phenny module; many of the ideas for
this were adapted from that plugin
http://inamidst.com/phenny/
"""
## jsb imports
from jsb.lib.callbacks import callbacks
from jsb.lib.commands import cmnds
from jsb.lib.persist import PlugPersist
from jsb.lib.examples import examples
from jsb.plugs.common.tinyurl import get_tinyurl
## basic imports
import logging
import xmlrpclib
import re
import time
#import modules.activecollab
## defines
recent_tickets = {}
min_age = 60 * 5
rpc_clients = {}
cfg = PlugPersist('jira', {})
## getServerInfo function
def getServerInfo(server, auth):
try:
server_statuses = server.jira1.getStatuses(auth)
statusMap = {}
for status in server_statuses:
statusMap[status['id']] = status['name']
server_priorities = server.jira1.getPriorities(auth)
priorityMap = {}
for priority in server_priorities:
priorityMap[priority['id']] = priority['name']
info = server.jira1.getServerInfo(auth)
jira_baseurl = info['baseUrl']
return {
"statusMap": statusMap,
"priorityMap": priorityMap,
"baseUrl": jira_baseurl
}
except xmlrpclib.Error, v:
print "XMLRPC ERROR: ",
def getRpcClient(sInfo):
base_url = "%s/rpc/xmlrpc" % sInfo["url"]
server = xmlrpclib.ServerProxy(base_url)
username, password = sInfo["username"], sInfo["password"]
auth = server.jira1.login(username, password)
sInfo["serverInfo"] = getServerInfo(server, auth)
logging.info("Server info: %s" % sInfo)
return (server, auth)
def getJiraIssue(s, ticket):
server, auth = getRpcClient(s)
try:
info = server.jira1.getIssue(auth, ticket)
return info
except xmlrpclib.Error, v:
print "XMLRPC ERROR:", v
return None
def getJiraIssueMessage(s, ticket):
"""docstring for ticket_lookup"""
info = getJiraIssue(s, ticket)
logging.info("jira ticket: %s is %s" % (ticket, info))
if info:
outInfo = []
outInfo.append("%s: Summary: %s" % (info['key'], info['summary']))
if info.has_key('assignee'):
outInfo.append( "%s: Assigned To: %s" % (info['key'], info['assignee']))
else:
outInfo.append( "%s: Assigned To: Unassigned" % (info['key']))
data = (info["key"], s["serverInfo"]["priorityMap"][info['priority']], s["serverInfo"]["statusMap"][info['status']], s["serverInfo"]["baseUrl"], info["key"])
outInfo.append( "%s: Priority: %s, Status: %s, %s/browse/%s" % data)
logging.info("Jira ticket text: %s" % outInfo)
return outInfo
def getMatchRegEx(prefixList):
prefixLookup = "(%s)" % "|".join(prefixList)
test = re.compile(".*(%s-[0-9]+).*" % prefixLookup)
return test
def containsJiraTag(bot, ievent):
if ievent.how == "backgound": return 0
if not cfg.data.servers: return 0
prefixList = set()
for server, serverData in cfg.data["servers"].iteritems():
if ievent.channel in serverData["channels"]:
prefixList.update(serverData["channels"][ievent.channel])
test = getMatchRegEx(prefixList)
fnd = test.match(ievent.txt)
if fnd:
return 1
return 0
def doLookup(bot, ievent):
if not cfg.data.servers: logging.warn("servers is not defined in config.") ; return 0
prefixList = set()
serversForPrefix = {}
for server, serverData in cfg.data["servers"].iteritems():
if ievent.channel in serverData["channels"]:
for prefix in serverData["channels"][ievent.channel]:
serversForPrefix[prefix] = server
prefixList.add(prefix)
test = getMatchRegEx(prefixList)
fnd = test.match(ievent.txt)
if fnd:
ticket = fnd.group(1)
prefix = fnd.group(2)
if not ticket: logging.warn("ticket missing: %s" % str(fnd)) ; return
if not prefix: logging.warn("prefix missing: %s" % str(fnd)) ; return
logging.info("Found: %s %s" % (ticket, prefix))
logging.info("servers: %s" % cfg.data["servers"])
try: server = serversForPrefix[prefix]
except KeyError: return
logging.warn("server is %s" % server)
msg = getJiraIssueMessage(cfg.data["servers"][server], ticket)
for line in msg:
bot.say(ievent.channel, line)
callbacks.add('PRIVMSG', doLookup, containsJiraTag, threaded=True)
callbacks.add('CONSOLE', doLookup, containsJiraTag, threaded=True)
callbacks.add('MESSAGE', doLookup, containsJiraTag, threaded=True)
callbacks.add('DISPATCH', doLookup, containsJiraTag, threaded=True)
callbacks.add('TORNADO', doLookup, containsJiraTag, threaded=True)
## add_jira_server command
def handle_add_jira_server(bot, ievent):
""" configure a new jira server; syntax: add_jira_server [server name] [url] [username] [password] """
if len(ievent.args) != 4:
ievent.reply("syntax: add_jira_server [server name] [url] [username] [password]")
return
server = {
"name": ievent.args[0],
"url": ievent.args[1].strip("/"),
"username": ievent.args[2],
"password": ievent.args[3],
"channels": {},
"serverInfo": {},
}
if not cfg.data.has_key("servers"):
cfg.data["servers"] = {}
cfg.data["servers"][server["name"]] = server
cfg.save()
ievent.reply("Added jira server %s" % server["name"])
cmnds.add("add_jira_server", handle_add_jira_server, ["OPER"])
examples.add("add_jira_server", "add a jira server", "add_jira_server FireBreath http://jira.firebreath.org myuser mypassword")
## del_jira_server command
def handle_del_jira_server(bot, ievent):
""" remove a jira server; syntax: del_jira_server """
if len(ievent.args) != 1:
ievent.reply("syntax: del_jira_server [server name]")
return
serverName = ievent.args[0]
if not cfg.data.has_key("servers"):
cfg.data["servers"] = {}
if serverName in cfg.data["servers"]:
del cfg.data["servers"][serverName]
cfg.save()
ievent.reply("Deleted jira server %s" % serverName)
else:
ievent.reply("Unknown jira server %s" % serverName)
cmnds.add("del_jira_server", handle_del_jira_server, ["OPER"])
examples.add("del_jira_server", "del a jira server", "del_jira_server FireBreath http://jira.firebreath.org myuser mypassword")
## jira_issue_lookup_enable command
def handle_jira_issue_lookup_enable(bot, ievent):
""" enable lookups for jira issues in the current channel; syntax: jira_issue_lookup_enable [server] [prefix] """
if len(ievent.args) != 2:
ievent.reply("syntax: jira_issue_lookup_enable [server] [prefix]")
return
serverName, prefix = ievent.args
if not "servers" in cfg.data or not serverName in cfg.data["servers"]:
ievent.reply("Unknown server %s" % serverName)
return
prefixSet = set(cfg.data["prefixes"]) if "prefixes" in cfg.data else set()
server = cfg.data["servers"][serverName]
if ievent.channel not in server["channels"]:
server["channels"][ievent.channel] = []
if not prefix in server["channels"][ievent.channel]:
server["channels"][ievent.channel].append(prefix)
prefixSet.add(prefix)
cfg.data["prefixes"] = list(prefixSet)
cfg.save()
ievent.reply("enabled lookups of %s-* jira tickets in this channel on server %s" % (prefix, serverName))
cmnds.add("jira_issue_lookup_enable", handle_jira_issue_lookup_enable, ["OPER"])
examples.add("jira_issue_lookup_enable", "enable lookups of jira tickets in the channel", "jira_issue_lookup_enable jiraserver FIREBREATH")
## jira_issue_lookup_disable command
def handle_jira_issue_lookup_disable(bot, ievent):
""" enable lookups for jira issues in the current channel for a given server; syntax: jira_issue_lookup_disable [server] """
if len(ievent.args) != 1:
ievent.reply("syntax: jira_issue_lookup_disable [server]")
return
serverName = ievent.args[0]
if not "servers" in cfg.data or not serverName in cfg.data["servers"]:
ievent.reply("Unknown server %s" % serverName)
return
server = cfg.data["servers"][serverName]
if ievent.channel in server["channels"]:
server["channels"].remove(ievent.channel)
ievent.reply("disabled lookups of jira tickets on server %s from this server" % serverName)
| StarcoderdataPython |
6646100 | #REVERSE A STRING /////////////////////////////////////////////////////////////////////////
# string = '<NAME>' [::-1]
# print(string)
#other solutions //////////////////
#Make a function
# def this_function(backwards):
# return backwards[::-1]
# theOtherSting = this_function('This is what the string looks like backwards')
# print(theOtherSting)
#REVERSE A NUMBER ///////////////////////////////////////////////////////////////////////////
# num = 4562
# reverse = 0
# while (num > 0):
# remainder = num % 10
# reverse = reverse * 10 + remainder
# num = num / 10
# print(reverse)
# FAILED ATTEMPT
# Python 3 program to reverse digits
# of a number
rev_num = 0
base_pos = 1
# Recursive function to reverse
# digits of num
# ***Global**** keyword in Python. Global keyword is a keyword that allows a user to modify a variable outside of the current scope.
#It is used to create global variables from a non-global scope i.e inside a function.
def reversDigits(num):
global rev_num
global base_pos
if(num > 0):
reversDigits((int)(num / 10))
rev_num += (num % 10) * base_pos
base_pos *= 10
return rev_num
# Driver Code
num = 4562
print("Reverse of no. is ",
reversDigits(num))
#////////////////////////////////////////////////////////////////////////////////////// | StarcoderdataPython |
11280159 | import random
import pytest
from etcd3 import Client
from tests.docker_cli import docker_run_etcd_main
from .envs import protocol, host
from .etcd_go_cli import etcdctl, NO_ETCD_SERVICE
@pytest.fixture(scope='module')
def client():
"""
init Etcd3Client, close its connection-pool when teardown
"""
_, p, _ = docker_run_etcd_main()
c = Client(host, p, protocol)
yield c
c.close()
@pytest.mark.timeout(60)
@pytest.mark.skipif(NO_ETCD_SERVICE, reason="no etcd service available")
def test_transaction(client):
etcdctl('put foo bar')
txn = client.Txn()
txn.compare(txn.key('foo').value == 'bar')
txn.success(txn.put('foo', 'bra'))
r = txn.commit()
assert r.succeeded
assert client.range('foo').kvs[0].value == b'bra'
txn = client.Txn()
txn.If(txn.key('foo').value == 'bar')
txn.Then(txn.put('foo', 'bra'))
txn.Else(txn.put('foo', 'bar'))
txn.commit()
assert client.range('foo').kvs[0].value == b'bar'
etcdctl('put foo 2')
txn = client.Txn()
txn.If(txn.key('foo').value > b'1')
txn.If(txn.key('foo').value < b'3')
txn.If(txn.key('foo').value != b'0')
txn.Then(txn.put('foo', 'bra'))
r = txn.commit()
assert r.succeeded
assert client.range('foo').kvs[0].value == b'bra'
etcdctl('put foo bar')
etcdctl('put fizz buzz')
txn = client.Txn()
txn.success(txn.range('foo'))
txn.success(txn.delete('fizz'))
r = txn.commit()
assert r.succeeded
for i in r.responses:
if 'response_range' in i:
assert i.response_range.kvs[0].value == b'bar'
else: # delete
assert i.response_delete_range.deleted == 1
assert not client.range('fizz').kvs
# no gt and lt
with pytest.raises(NotImplementedError):
txn.If(txn.key('foo').value >= b'1')
with pytest.raises(NotImplementedError):
txn.If(txn.key('foo').value <= b'1')
# type should match with target
with pytest.raises(TypeError):
txn.If(txn.key('foo').value < 1)
with pytest.raises(TypeError):
txn.If(txn.key('foo').version < 'a')
with pytest.raises(TypeError):
txn.If(txn.key('foo').create < 'a')
with pytest.raises(TypeError):
txn.If(txn.key('foo').mod < 'a')
with pytest.raises(TypeError):
txn.If(txn.key('foo').mod.value < 1)
with pytest.raises(TypeError):
client.Txn().key(123)
# range_end
etcdctl('del', '--from-key', '')
etcdctl('put foo 1')
etcdctl('put bar 2')
etcdctl('put fiz 3')
etcdctl('put buz 4')
txn = client.Txn()
r = txn.compare(txn.key(all=True).value > b'0').commit()
assert r.succeeded
txn = client.Txn()
r = txn.compare(txn.key(all=True).value < b'3').commit()
assert not r.succeeded
# target lease
ID = random.randint(10000, 100000)
TTL = 60
r = client.lease_grant(TTL, ID=ID)
assert r.ID == ID
hexid = hex(ID)[2:]
etcdctl('put --lease=%s foo bar' % hexid)
txn = client.Txn()
r = txn.compare(txn.key('foo').lease == ID).commit()
assert r.succeeded
def test_txn_clone(client):
txn0 = client.Txn()
txn0.If(txn0.key('foo').value < b'1')
txn1 = txn0.clone()
assert id(txn0._compare) != id(txn1._compare)
txn0.Then(txn0.range('foo'))
assert len(txn0._success) == 1
assert len(txn1._success) == 0
| StarcoderdataPython |
4896838 | from anytree import NodeMixin, PostOrderIter, RenderTree, ContStyle
__all__ = ["ScheduleTree", "NodeSection", "NodeIteration", "NodeConditional",
"NodeExprs", "NodeHalo"]
class ScheduleTree(NodeMixin):
is_Section = False
is_Iteration = False
is_Conditional = False
is_Exprs = False
is_Halo = False
def __init__(self, parent=None):
self.parent = parent
def __repr__(self):
return render(self)
def visit(self):
for i in PostOrderIter(self):
yield i
@property
def last(self):
return self.children[-1] if self.children else None
class NodeSection(ScheduleTree):
is_Section = True
@property
def __repr_render__(self):
return "<S>"
class NodeIteration(ScheduleTree):
is_Iteration = True
def __init__(self, ispace, parent=None):
super(NodeIteration, self).__init__(parent)
self.ispace = ispace
@property
def interval(self):
return self.ispace.intervals[0]
@property
def dim(self):
return self.interval.dim
@property
def limits(self):
return self.interval.limits
@property
def direction(self):
return self.ispace.directions[self.dim]
@property
def sub_iterators(self):
return self.ispace.sub_iterators.get(self.dim, [])
@property
def __repr_render__(self):
return "%s%s" % (self.dim, self.direction)
class NodeConditional(ScheduleTree):
is_Conditional = True
def __init__(self, guard, parent=None):
super(NodeConditional, self).__init__(parent)
self.guard = guard
@property
def __repr_render__(self):
return "If"
class NodeExprs(ScheduleTree):
is_Exprs = True
def __init__(self, exprs, ispace, dspace, shape, ops, traffic, parent=None):
super(NodeExprs, self).__init__(parent)
self.exprs = exprs
self.ispace = ispace
self.dspace = dspace
self.shape = shape
self.ops = ops
self.traffic = traffic
@property
def __repr_render__(self):
ths = 2
n = len(self.exprs)
ret = ",".join("Eq" for i in range(min(n, ths)))
ret = ("%s,..." % ret) if n > ths else ret
return "[%s]" % ret
class NodeHalo(ScheduleTree):
is_Halo = True
def __init__(self, halo_scheme):
self.halo_scheme = halo_scheme
@property
def __repr_render__(self):
return "<Halo>"
def insert(node, parent, children):
"""
Insert ``node`` between ``parent`` and ``children``, where ``children``
are a subset of nodes in ``parent.children``.
"""
processed = []
for n in list(parent.children):
if n in children:
n.parent = node
if node not in processed:
processed.append(node)
else:
processed.append(n)
parent.children = processed
def render(stree):
return RenderTree(stree, style=ContStyle()).by_attr('__repr_render__')
| StarcoderdataPython |
3282763 | <reponame>toelen/beam<gh_stars>1-10
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Helper to render pipeline graph in IPython when running interactively.
This module is experimental. No backwards-compatibility guarantees.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
from apache_beam.runners.interactive import pipeline_graph
def nice_str(o):
s = repr(o)
s = s.replace('"', "'")
s = s.replace('\\', '|')
s = re.sub(r'[^\x20-\x7F]', ' ', s)
assert '"' not in s
if len(s) > 35:
s = s[:35] + '...'
return s
def format_sample(contents, count=1000):
contents = list(contents)
elems = ', '.join([nice_str(o) for o in contents[:count]])
if len(contents) > count:
elems += ', ...'
assert '"' not in elems
return '{%s}' % elems
class InteractivePipelineGraph(pipeline_graph.PipelineGraph):
"""Creates the DOT representation of an interactive pipeline. Thread-safe."""
def __init__(self,
pipeline_proto,
required_transforms=None,
referenced_pcollections=None,
cached_pcollections=None):
"""Constructor of PipelineGraph.
Examples:
pipeline_graph = PipelineGraph(pipeline_proto)
print(pipeline_graph.get_dot())
pipeline_graph.display_graph()
Args:
pipeline_proto: (Pipeline proto) Pipeline to be rendered.
required_transforms: (dict from str to PTransform proto) Mapping from
transform ID to transforms that leads to visible results.
referenced_pcollections: (dict from str to PCollection proto) PCollection
ID mapped to PCollection referenced during pipeline execution.
cached_pcollections: (set of str) A set of PCollection IDs of those whose
cached results are used in the execution.
"""
self._pipeline_proto = pipeline_proto
self._required_transforms = required_transforms or {}
self._referenced_pcollections = referenced_pcollections or {}
self._cached_pcollections = cached_pcollections or set()
super(InteractivePipelineGraph, self).__init__(
pipeline_proto=pipeline_proto,
default_vertex_attrs={'color': 'gray', 'fontcolor': 'gray'},
default_edge_attrs={'color': 'gray'}
)
transform_updates, pcollection_updates = self._generate_graph_update_dicts()
self._update_graph(transform_updates, pcollection_updates)
def display_graph(self):
"""Displays graph via IPython or prints DOT if not possible."""
try:
from IPython.core import display # pylint: disable=import-error
display.display(display.HTML(self._get_graph().create_svg())) # pylint: disable=protected-access
except ImportError:
print(str(self._get_graph()))
def update_pcollection_stats(self, pcollection_stats):
"""Updates PCollection stats.
Args:
pcollection_stats: (dict of dict) maps PCollection IDs to informations. In
particular, we only care about the field 'sample' which should be a
the PCollection result in as a list.
"""
edge_dict = {}
for pcoll_id, stats in pcollection_stats.items():
attrs = {}
pcoll_list = stats['sample']
if pcoll_list:
attrs['label'] = format_sample(pcoll_list, 1)
attrs['labeltooltip'] = format_sample(pcoll_list, 10)
else:
attrs['label'] = '?'
edge_dict[pcoll_id] = attrs
self._update_graph(edge_dict=edge_dict)
def _generate_graph_update_dicts(self):
"""Generate updates specific to interactive pipeline.
Returns:
vertex_dict: (Dict[str, Dict[str, str]]) maps vertex name to attributes
edge_dict: (Dict[str, Dict[str, str]]) maps vertex name to attributes
"""
transforms = self._pipeline_proto.components.transforms
transform_dict = {} # maps PTransform IDs to properties
pcoll_dict = {} # maps PCollection IDs to properties
def leaf_transform_ids(parent_id):
parent = transforms[parent_id]
if parent.subtransforms:
for child in parent.subtransforms:
for leaf in leaf_transform_ids(child):
yield leaf
else:
yield parent_id
for transform_id, transform in transforms.items():
if not super(
InteractivePipelineGraph, self)._is_top_level_transform(transform):
continue
transform_dict[transform.unique_name] = {
'required':
all(
leaf in self._required_transforms
for leaf in leaf_transform_ids(transform_id))
}
for pcoll_id in transform.outputs.values():
pcoll_dict[pcoll_id] = {
'cached': pcoll_id in self._cached_pcollections,
'referenced': pcoll_id in self._referenced_pcollections
}
def vertex_properties_to_attributes(vertex):
"""Converts PCollection properties to DOT vertex attributes."""
attrs = {}
if 'leaf' in vertex:
attrs['style'] = 'invis'
elif vertex.get('required'):
attrs['color'] = 'blue'
attrs['fontcolor'] = 'blue'
else:
attrs['color'] = 'grey'
return attrs
def edge_properties_to_attributes(edge):
"""Converts PTransform properties to DOT edge attributes."""
attrs = {}
if edge.get('cached'):
attrs['color'] = 'red'
elif edge.get('referenced'):
attrs['color'] = 'black'
else:
attrs['color'] = 'grey'
return attrs
vertex_dict = {} # maps vertex names to attributes
edge_dict = {} # maps edge names to attributes
for transform_name, transform_properties in transform_dict.items():
vertex_dict[transform_name] = vertex_properties_to_attributes(
transform_properties)
for pcoll_id, pcoll_properties in pcoll_dict.items():
edge_dict[pcoll_id] = edge_properties_to_attributes(pcoll_properties)
return vertex_dict, edge_dict
| StarcoderdataPython |
8171862 | """
Constant variables shared among packages that constitute bedbase project
"""
import os
SCHEMA_DIRNAME = "schemas"
SCHEMAS_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), SCHEMA_DIRNAME)
BED_TABLE_SCHEMA = os.path.join(SCHEMAS_PATH, "bedfiles_schema.yaml")
BEDSET_TABLE_SCHEMA = os.path.join(SCHEMAS_PATH, "bedsets_schema.yaml")
DIST_TABLE_SCHEMA = os.path.join(SCHEMAS_PATH, "distance_schema.yaml")
PKG_NAME = "bbconf"
DOC_URL = "TBA" # TODO: add documentation URL once it's established
BED_TABLE = "bedfiles"
BEDSET_TABLE = "bedsets"
BEDFILES_REL_KEY = "bedfiles"
BEDSETS_REL_KEY = "bedsets"
REL_TABLE = "bedset_bedfiles"
DIST_TABLE = "distances"
CFG_ENV_VARS = ["BEDBASE"]
DB_DEFAULT_HOST = "localhost"
DB_DEFAULT_USER = "postgres"
DB_DEFAULT_PASSWORD = "<PASSWORD>"
DB_DEFAULT_NAME = "postgres"
DB_DEFAULT_PORT = 5432
DB_DEFAULT_DIALECT = "postgresql"
SERVER_DEFAULT_PORT = 80
SERVER_DEFAULT_HOST = "0.0.0.0"
PATH_DEFAULT_REMOTE_URL_BASE = None
PIPESTATS_KEY = "__pipestats"
COMMON_DECL_BASE_KEY = "__common_declarative_base"
HIDDEN_ATTR_KEYS = [PIPESTATS_KEY, COMMON_DECL_BASE_KEY]
# bedset_bedfiles table definition
REL_BED_ID_KEY = "bedfile_id"
REL_BEDSET_ID_KEY = "bedset_id"
# config file constants
CFG_PATH_KEY = "path"
CFG_SERVER_KEY = "server"
CFG_DATABASE_KEY = "database"
CFG_NAME_KEY = "name"
CFG_HOST_KEY = "host"
CFG_PORT_KEY = "port"
CFG_PASSWORD_KEY = "password"
CFG_USER_KEY = "user"
CFG_BEDSTAT_DIR_KEY = "bedstat_dir"
CFG_BEDBUNCHER_DIR_KEY = "bedbuncher_dir"
CFG_PIPELINE_OUT_PTH_KEY = "pipeline_output_path"
CFG_REMOTE_KEY = "remotes"
DEFAULT_SECTION_VALUES = {
CFG_DATABASE_KEY: {
CFG_USER_KEY: DB_DEFAULT_USER,
CFG_PASSWORD_KEY: DB_DEFAULT_PASSWORD,
CFG_NAME_KEY: DB_DEFAULT_NAME,
CFG_PORT_KEY: DB_DEFAULT_PORT,
CFG_HOST_KEY: DB_DEFAULT_HOST,
},
CFG_SERVER_KEY: {
CFG_HOST_KEY: SERVER_DEFAULT_HOST,
CFG_PORT_KEY: SERVER_DEFAULT_PORT,
},
}
CFG_KEYS = [
"CFG_PATH_KEY",
"CFG_SERVER_KEY",
"CFG_DATABASE_KEY",
"CFG_HOST_KEY",
"CFG_PORT_KEY",
"CFG_NAME_KEY",
"CFG_PASSWORD_KEY",
"CFG_USER_KEY",
"CFG_PIPELINE_OUT_PTH_KEY",
"CFG_BEDSTAT_DIR_KEY",
"CFG_BEDBUNCHER_DIR_KEY",
"CFG_REMOTE_KEY",
]
__all__ = [
"BED_TABLE",
"BEDSET_TABLE",
"REL_TABLE",
"DIST_TABLE",
"CFG_ENV_VARS",
"DB_DEFAULT_HOST",
"SERVER_DEFAULT_PORT",
"SERVER_DEFAULT_HOST",
"PKG_NAME",
"DEFAULT_SECTION_VALUES",
"HIDDEN_ATTR_KEYS",
"REL_BED_ID_KEY",
"REL_BEDSET_ID_KEY",
"BED_TABLE_SCHEMA",
"BEDSET_TABLE_SCHEMA",
"DIST_TABLE_SCHEMA",
"PIPESTATS_KEY",
"COMMON_DECL_BASE_KEY",
"BEDSETS_REL_KEY",
"BEDFILES_REL_KEY",
] + CFG_KEYS
| StarcoderdataPython |
1939780 | <reponame>xudongmit/Statistics-Computation
import pandas as pd
import numpy as np
from scipy import stats
from numpy.linalg import inv
import matplotlib.pylab as plt
import os
os.chdir('e:/MIT4/6.439/pset1')
# 1.2
df_gamma = pd.read_csv('data/gamma-ray.csv')
df_gamma.head()
lam = np.sum(df_gamma['count'])/np.sum(df_gamma['seconds'])
lam
df_gamma['count'] = pd.to_numeric(df_gamma['count'], downcast='signed')
df_gamma['count'].dtype
df_gamma['x!'] = df_gamma['count'].apply(np.math.factorial)
# 1.4
df_golub = pd.read_csv('data/golub_data/golub.csv', index_col=0)
df_cl = pd.read_csv('data/golub_data/golub_cl.csv' ,index_col=0)
df_names = pd.read_csv('data/golub_data/golub_gnames.csv', index_col=0)
df_golub.columns = list(range(1,39))
# 3051 genes of 18 patients
df_golub
df_cl
df_cl
(1-df_cl)['x'].sum()
# How many genes are associated with the different tumor types (meaning that their expression level differs between the two tumor types) using (i) the uncorrected p-values, (ii)the Holm-Bonferroni correction, and (iii) the Benjamini-Hochberg correction?
# split the data into ALL and AML
col_ALL = (df_cl.loc[df_golub.columns] == 0).transpose().values.tolist()[0]
col_AML = (df_cl.loc[df_golub.columns] == 1).transpose().values.tolist()[0]
df_ALL = df_golub[df_golub.columns[col_ALL]]
df_AML = df_golub[df_golub.columns[col_AML]]
df_ALL
# hypothesis testing
from scipy.stats import ttest_ind
# test each gene
t_stat, p_value = [],[]
for i in range(df_golub.shape[0]):
# t, p = ttest_ind( df_ALL.iloc[i], df_AML.iloc[i])
t, p = ttest_ind( df_ALL.iloc[i], df_AML.iloc[i], equal_var=False )
t_stat.append(t)
p_value.append(p)
df_welch_ttest = pd.DataFrame(index = range(1, df_golub.shape[0]+1))
df_welch_ttest['t_stat'] = t_stat
df_welch_ttest['p_value'] = p_value
df_welch_ttest
df_welch_ttest['significant_uncorrected'] = df_welch_ttest['p_value']<0.05
pvals = df_welch_ttest['p_value']
# bonferroni correction
def holm_bonferroni(pvals, alpha=0.05):
m, pvals = len(pvals), np.asarray(pvals)
ind = np.argsort(pvals)
test = [p > alpha/(m+1-k) for k, p in enumerate(pvals[ind])]
significant = np.zeros(np.shape(pvals), dtype='bool')
significant[ind[0:m-np.sum(test)]] = True
return significant
# Benjamini-Hochberg procedure
def BH(pvals, q=0.05):
m = len(pvals)
significant = np.zeros(m, dtype='bool')
sort_ind = np.argsort(pvals).astype(int)+1 # sort the p-values
for i in range(1,m+1): #i = the individual p-value’s rank
if pvals[sort_ind[i]] < (i)*q/m:
significant[sort_ind[i]-1] = True # record the significant index
return significant
significant_pvals = holm_bonferroni(pvals, alpha=0.05)
df_welch_ttest['significant_pvals'] = significant_pvals
significant_pvals_BH = BH(pvals, q=0.05)
df_welch_ttest['significant_pvals_BH'] = significant_pvals_BH
df_welch_ttest
df_welch_ttest.sum()
# 1.6
syn_x = pd.read_csv('data/syn_X.csv',header=None)
syn_y = pd.read_csv('data/syn_Y.csv',header=None)
syn_x.columns = ['x1','x2']
syn_x['x0'] = 1
syn_x = syn_x[['x0','x1','x2']]
X = syn_x.values
Y = syn_y.values
X.shape
#beta = (X'X)^-1X'Y
a = np.matmul(inv(np.matmul(np.transpose(X),X)),np.transpose(X))
beta = np.matmul(a,Y)
beta
def gradientDescent(X, Y, beta_0, alpha, t):
m, n = X.shape # m is number of cases, n is the number of variables
cost = pd.DataFrame(np.zeros([t,2]))
cost.columns = ['step','cost']
beta = beta_0
for i in range(t):
# vectorized gradient: X'*(Y-X*beta)
res = Y- np.matmul(X, beta)
beta = beta + 2 * alpha * (1/m) * np.matmul(np.transpose(X), res)
# calculate the cost base on current beta
cost['step'][i] = i
cost['cost'][i] = calCost(X, Y, beta)
cost.plot(kind = 'scatter', x = 'step',y = 'cost')
return beta, cost
def calCost(X, Y, beta):
m, n = X.shape
# vectorized cost: (X*beta - Y)'(X*beta - Y)
residual = Y- np.matmul(X, beta)
return (1/(2*m))*np.matmul(np.transpose(residual), residual)
beta_0 = np.matrix('0.5; 0.5; 0.5')
alpha = 0.1
t = 50
beta, cost = gradientDescent(X, Y, beta_0,alpha, t )
beta_0 = np.matrix('0.5; 0.5; 0.5')
alpha = 0.01
t = 50
beta, cost = gradientDescent(X, Y, beta_0,alpha, t )
beta_0 = np.matrix('0.5; 0.5; 0.5')
alpha = 0.05
t = 50
beta, cost = gradientDescent(X, Y, beta_0,alpha, t )
beta_0 = np.matrix('0.5; 0.5; 0.5')
alpha = 0.8
t = 50
beta, cost = gradientDescent(X, Y, beta_0,alpha, t )
beta_0 = np.random.rand(3,1)
alpha = 0.05
t = 100
beta, cost = gradientDescent(X, Y, beta_0,alpha, t )
df_mort_0 = pd.read_csv('data/mortality.csv')
df_mort = df_mort_0
# check the scatterplot
df_mort.head()
# check the correlation matrix
import seaborn as sns
corr = df_mort.iloc[:,2:].corr()
corrplot = sns.heatmap(corr,xticklabels=corr.columns,yticklabels=corr.columns, linewidths=.01,cmap="YlGnBu")
fig = corrplot.get_figure()
fig.savefig('corrplot.png')
df_mort.columns
df_mort = df_mort.drop(['HC'], axis = 1) # drop the intercorrelated
sns.set(style="ticks")
pairplot = sns.pairplot(df_mort.iloc[:,1:], diag_kind="kde",markers="+",plot_kws=dict(s=50, edgecolor="b", linewidth=1),diag_kws=dict(shade=True))
pairplot.savefig('pairplot1.png')
# log-transformation
df_mort[['SO2','NOx','Pop']] = np.log(df_mort[['SO2','NOx','Pop']])
df_morthead()
# normalize the Data
data = df_mort.iloc[:,1:]
data = (data - data.mean())/(data.max() - data.min())
df_mort.iloc[:,1:] = data
df_mort.plot(kind = 'bar', x = 'City', y = 'Mortality',fontsize = 5)
plt.savefig('city.pdf')
df_mort[df_mort['Mortality'] == df_mort['Mortality'].max()]
df_mort[df_mort['Mortality'] == df_mort['Mortality'].min()]
# GD on raw data
Y_r = pd.DataFrame(df_mort_0['Mortality']).values
X_r = df_mort_0.iloc[:,2:].values
Y_r.shape
m, n = X_r.shape
beta_0 = np.random.rand(n,1)
beta_0
alpha = 0.00001
t = 1000
beta, cost = gradientDescent(X_r, Y_r, beta_0, alpha, t )
cost
Y= pd.DataFrame(df_mort['Mortality']).values
X = df_mort.iloc[:,2:].values
m, n = X.shape
beta_0 = np.random.rand(n,1)
alpha = 0.05
t = 2000
beta, cost = gradientDescent(X, Y, beta_0, alpha, t )
beta
#plot the residual
import scipy.stats as stats
import pylab
residuals = np.transpose(Y - np.matmul(X, beta))
res_list = sorted(residuals[0].tolist())
def q_q_plot(data):
norm=np.random.normal(0,2,len(data))
norm.sort()
plt.plot(norm,data,"o")
z = np.polyfit(norm,data, 1)
p = np.poly1d(z)
plt.plot(norm,p(norm),"k--", linewidth=2)
plt.title("Normal Q-Q plot", size=20)
plt.xlabel("Theoretical quantiles", size=18)
plt.ylabel("Expreimental quantiles", size=18)
plt.tick_params(labelsize=16)
plt.savefig('qqplot.png')
plt.show()
q_q_plot(res_list)
#
| StarcoderdataPython |
1777277 | <gh_stars>0
import os
import time
from core.worker import worker, box
from ppadb.client import Client
from cv2 import cv2
import json
import sys
from tqdm import tqdm
import argparse
__author__ = "Paver(Zhen_Bo)"
os.system('cls')
def app_path():
"""Returns the base application path."""
if hasattr(sys, 'frozen'):
# Handles PyInstaller
return os.path.dirname(sys.executable) # 使用pyinstaller打包后的exe目录
return os.path.dirname(__file__) # 没打包前的py目录
root_path = app_path()
def setup():
adb_path = "{}/adb/adb.exe".format(root_path)
os.system("{0} start-server".format(adb_path))
os.system("cls")
client = Client(host="127.0.0.1", port=5037)
devices = client.devices()
return select_devices(client, devices)
def get_template(version, folder=None):
templates = dict()
templates_path = os.path.join(root_path, "templates")
templates_path = os.path.join(templates_path, version)
if folder is not None:
templates_path = os.path.join(templates_path, folder)
for name in os.listdir(templates_path):
img = cv2.imread(os.path.join(templates_path, name))
templates[name.replace('.png', '')] = img
return templates
def select_devices(client, devices, error=0):
for i in range(len(devices)):
print("\033[1;32m{}: {}\033[0m".format(i, devices[i].serial))
if error == 1:
print("\033[1;31m{}\033[0m".format("!!!輸入設備編號過大!!!"))
elif error == 2:
print("\033[1;31m{}\033[0m".format("!!!編號輸入錯誤,請在試一次!!!"))
print("輸入a以新增設備")
try:
inputIndex = input("請輸入編號 [1 ~ {0}]:".format(len(devices)))
value = int(inputIndex)
if value < 0:
exit()
elif value > len(devices):
os.system('cls')
return select_devices(client, devices, 1)
else:
return devices[value]
except (KeyboardInterrupt, SystemExit):
raise Exception("KeyboardInterrupt")
except:
if inputIndex.lower() == "a":
port = input("port號為?")
if len(port) == 4 and port.isdigit():
client.remote_connect("127.0.0.1", int(port))
devices = client.devices()
os.system('cls')
return select_devices(client, devices)
else:
os.system('cls')
return select_devices(client, devices, 2)
else:
os.system('cls')
return select_devices(client, devices, 2)
def recovery_plane():
apple_dict = {"": "", "0": "quartz",
"1": "goldden", "2": "silver", "3": "copper"}
apple_name = {"": "自然回體", "0": "聖晶石", "1": "金蘋果", "2": "銀蘋果", "3": "銅蘋果"}
while True:
os.system('cls')
print("回體方案:")
for i in range(1, 4):
print("{} = {}".format(i, apple_name["{}".format(i)]))
print("不輸入為自然回體")
apple = input("請選擇方案: ")
if apple not in apple_dict:
continue
else:
apple = apple_dict[apple]
if apple == "":
return {"apple": "", "count": ""}
else:
break
while True:
count = input("請輸入使用上限: ")
if count.isdigit():
return {"apple": apple, "count": count}
def get_script():
with open('{}/UserData/script.json'.format(root_path), newline='', encoding='utf8') as jsonfile:
data = json.load(jsonfile)
print("請選擇要使用的腳本")
for i in range(len(data['script'])):
print("{}: {}".format(i, data['script'][i]['name']))
number = input("請輸入編號:")
while not number.isdigit() or int(number) > len(data['script']) or int(number) < 0:
print("輸入編號錯誤,請重新輸入")
number = input("請輸入編號:")
return data['script'][int(number)]
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--debug", type=bool, default=False)
parser.add_argument("--boxmode", type=bool, default=False)
parser.add_argument("--version", type=str, default="JP")
args = parser.parse_args()
debug = args.debug
boxmode = args.boxmode
version = args.version
os.system('cls')
dev = setup()
os.system('cls')
if boxmode == False:
script_data = get_script()
templates = get_template(script_data["version"])
while True:
times = input("請問要執行幾次:")
if times == "":
times = 999
break
elif times.isdigit():
break
apple = recovery_plane()
width = len(script_data['battle'])+2
bar_format = "{{desc:}}{{percentage:3.0f}}%|{{bar:{}}}|".format(
width)
progress = tqdm(range(width), desc="腳本進度",
bar_format=bar_format, file=sys.stdout)
bot = worker(root=root_path, device=dev, templates=templates, name=script_data["name"], times=times,
apple=apple['apple'], count=apple['count'], team=script_data['team'],
support=script_data['support'], recover=script_data['recover'], progress_bar=progress)
print("\r\x1b[2K", end='')
total_runtime = 0
singel_runtime = 0
if debug:
while debug:
shell = input("\r\x1b[2K指令: ")
exec("bot.{}".format(shell))
else:
while True:
tstart = time.time()
bot.pbar.reset()
for instruct in script_data["battle"]:
if instruct == "start_battle()":
instruct = "start_battle({},{})".format(
round(total_runtime, 1), round(singel_runtime, 1))
exec("bot.{}".format(instruct))
time.sleep(1)
# for instruct in progress:
# if instruct == "start_battle()":
# instruct = "start_battle({},{})".format(
# round(total_runtime, 1), round(singel_runtime, 1))
# print("\r", end='')
# exec("bot.{}".format(instruct))
# time.sleep(1)
tend = time.time()
singel_runtime = int(tend)-int(tstart)
total_runtime += singel_runtime
else:
game = {"0": "JP", "1": "TW"}
print("\033[31m Scrpit made by\033[0m\033[41;37mPaver\033[0m,github:\033[37;34mhttps://github.com/Zhen-Bo\033[0m")
print(
"\033[31m此腳本作者為\033[0m\033[41;37mPaver\033[0m,github頁面:\033[37;34mhttps://github.com/Zhen-Bo\033[0m")
print("\033[31m請勿使用於商業用途,此程式包含MIT授權\033[0m")
while True:
print("請問遊戲版本?")
print("輸入0 = 日版")
print("輸入1 = 台版")
version = input("請輸入版本(0/1): ")
if version in ["0", "1"]:
break
bot = box(device=dev, templates=get_template(
"{}".format(game[version]), folder="box"))
while True:
os.system('cls')
print(
"\033[31m Scrpit made by\033[0m\033[41;37mPaver\033[0m,github:\033[37;34mhttps://github.com/Zhen-Bo\033[0m")
print(
"\033[31m此腳本作者為\033[0m\033[41;37mPaver\033[0m,github頁面:\033[37;34mhttps://github.com/Zhen-Bo\033[0m")
print("\033[31m請勿使用於商業用途,此程式包含MIT授權\033[0m")
i = 0
times = int(input("請問要抽幾箱: "))
while i <= times:
i += 1
if i > times:
break
elif i > 1:
bot.tap(bot.result[1])
bot.standby("execute")
bot.standby("close")
print("目前在抽第 {} 箱".format(i))
status = bot.box_gacha()
if status == True:
break
os.system('PAUSE')
| StarcoderdataPython |
3552173 | <gh_stars>1-10
from rx.core import ObservableBase, AnonymousObservable
from rx.internal.basic import identity, default_comparer
def distinct_until_changed(self, key_mapper=None, comparer=None) -> ObservableBase:
"""Returns an observable sequence that contains only distinct
contiguous elements according to the key_mapper and the comparer.
1 - obs = observable.distinct_until_changed();
2 - obs = observable.distinct_until_changed(lambda x: x.id)
3 - obs = observable.distinct_until_changed(lambda x: x.id,
lambda x, y: x == y)
key_mapper -- [Optional] A function to compute the comparison key
for each element. If not provided, it projects the value.
comparer -- [Optional] Equality comparer for computed key values. If
not provided, defaults to an equality comparer function.
Return an observable sequence only containing the distinct
contiguous elements, based on a computed key value, from the source
sequence.
"""
source = self
key_mapper = key_mapper or identity
comparer = comparer or default_comparer
def subscribe(observer, scheduler=None):
has_current_key = [False]
current_key = [None]
def on_next(value):
comparer_equals = False
try:
key = key_mapper(value)
except Exception as exception:
observer.on_error(exception)
return
if has_current_key[0]:
try:
comparer_equals = comparer(current_key[0], key)
except Exception as exception:
observer.on_error(exception)
return
if not has_current_key[0] or not comparer_equals:
has_current_key[0] = True
current_key[0] = key
observer.on_next(value)
return source.subscribe_(on_next, observer.on_error, observer.on_completed)
return AnonymousObservable(subscribe)
| StarcoderdataPython |
290019 | """ HelpMaker: Builds help message from comments in the code.
"""
"""
There are two types of help messages:
the general help,
and the specific helps to corresponding functions/commands.
The general help message is the first comment in the .py file,
wrapped by the triple-double-quotes.
The general help describes what the .py file is about, or gives a brief
introduction of the module.
A specific help message is wrapped by the triple-double-quotes, and has only
one word (no space in between) in the first line of the comment.
A specific help explains/illustrates the usage of a function/command.
The specific helps are stored in an internal dictionary. The only-one-word
in the first line of comment serves as the key to the help message in the
internal dictionary.
"""
import sys
import re
class Phase:
HAVE_NOTHING = 0
HAVE_GENERAL_MSG = 1
HAVE_SPECIFIC_MSG = 2
class HelpMaker:
def __init__(self, filename):
""" construct function
construction function that takes the name of the .py file.
The function screens the entire file and gets ready to
display the comments as help messages.
"""
try:
with open(filename) as pyfile:
self._comment_head = re.compile(r"""^\s*\"{3}""")
self._key_word = re.compile(r"""^\s*\"{3}\s*\S{1,}\s*\Z""")
self._phase = Phase.HAVE_NOTHING
self._is_comment = False
self._general_help = ''
self._specific_help = dict()
self._tmp_key = ''
self._tmp_msg = ''
self._tmp_index = 0
for line in pyfile:
if self._phase is Phase.HAVE_NOTHING:
self._general_help_filter(line)
elif self._phase is Phase.HAVE_GENERAL_MSG:
self._specific_help_filter(line)
except IOError:
print('Could not find the file: %s'%filename)
""" available_help
Generate all the items with help messages
"""
def available_help(self):
return self._specific_help.keys()
def show_help(self, showitem = None):
""" show_help
Without any input this funtion shows general common help message.
When the showitem is given, the corresponding help message will be
displayed, if exists.
"""
if showitem is None:
print(self._general_help)
else:
try:
print(self._specific_help[showitem])
except KeyError:
print('No help on %s available'%showitem)
def _general_help_filter(self, line):
""" helper function that finds out the general help message
"""
if self._comment_head.search(line) is not None:
self._tmp_indent = line.find('"') + 3
self._is_comment = not self._is_comment
if self._is_comment is True:
self._general_help += line[self._tmp_indent:]
if self._general_help is not '' and self._is_comment is False:
self._phase = Phase.HAVE_GENERAL_MSG
def _specific_help_filter(self, line):
""" helper function that finds out the speific help message
"""
if self._key_word.search(line) is not None:
self._tmp_indent = line.find('"') + 3
self._tmp_key = line[self._tmp_indent:].strip()
self._is_comment = True
return
if self._is_comment is True:
if self._comment_head.search(line) is not None:
self._specific_help[self._tmp_key] = self._tmp_msg
self._tmp_msg = ''
self._is_comment = False
else:
self._tmp_msg += line[self._tmp_indent:]
if __name__ == '__main__':
hm = HelpMaker(sys.argv[0])
print('General help of HelpMaker:')
hm.show_help()
print("Functions with help message:")
for item in hm.available_help():
print(item)
print('')
print('Specific help of HelpMaker:show_help():')
hm.show_help('show_help')
print('Specific help of a nonexist function:')
hm.show_help('nonsense')
| StarcoderdataPython |
8005128 | <reponame>buildbuddy-io/rules_xcodeproj
"""Constants for fixture declarations."""
_FIXTURE_BASENAMES = [
"cc",
"command_line",
"generator",
"tvos_app",
]
_FIXTURE_SUFFIXES = ["bwx", "bwb"]
_FIXTURE_PACKAGES = ["//test/fixtures/{}".format(b) for b in _FIXTURE_BASENAMES]
FIXTURE_TARGETS = [
"{}:xcodeproj_{}".format(package, suffix)
for package in _FIXTURE_PACKAGES
for suffix in _FIXTURE_SUFFIXES
]
| StarcoderdataPython |
148591 | <gh_stars>0
import unittest
import sys
sys.path.insert(1, "..")
from aws_api_mock.RDS_Data_Generator import RDS_Data_Generator
class test_RDS_Data_Generator(unittest.TestCase):
def setUp(self):
self.rds_data_generator = RDS_Data_Generator()
def test_generate_return_type(self):
return_dict = self.rds_data_generator.generate()
self.assertTrue(isinstance(return_dict, dict))
def test_generate_securities_groups_count_default(self):
return_dict = self.rds_data_generator.generate()
securities_groups = return_dict["DBInstances"][0]["VpcSecurityGroups"]
self.assertEqual(1, len(securities_groups))
def test_generate_instances_count_default(self):
return_dict = self.rds_data_generator.generate()
self.assertEqual(1, len(return_dict["DBInstances"]))
def test_set_security_group_id(self):
security_group_id_forced = "sg-123412abcd123"
self.rds_data_generator.set_security_group_id(security_group_id_forced)
returned_instances = self.rds_data_generator.generate()
returned_security_group_id = returned_instances["DBInstances"][0]["VpcSecurityGroups"][0]["VpcSecurityGroupId"]
self.assertEqual(security_group_id_forced, returned_security_group_id)
| StarcoderdataPython |
6402736 | import datetime
import enum
import pydantic
import schemas.financing_statement
import schemas.payment
class SearchType(enum.Enum):
AIRCRAFT_DOT = 'AIRCRAFT_DOT'
BUSINESS_DEBTOR = 'BUSINESS_DEBTOR'
INDIVIDUAL_DEBTOR = 'INDIVIDUAL_DEBTOR'
MHR_NUMBER = 'MHR_NUMBER'
REGISTRATION_NUMBER = 'REGISTRATION_NUMBER'
SERIAL_NUMBER = 'SERIAL_NUMBER'
class SearchResultType(enum.Enum):
EXACT = True
SIMILAR = False
class SearchBase(pydantic.BaseModel): # pylint:disable=no-member
type: str
criteria: dict
@pydantic.validator('type')
def type_must_match_search_type(cls, search_type): # pylint:disable=no-self-argument # noqa: N805
try:
SearchType[search_type]
except KeyError:
raise ValueError('type must be one of: {}'.format(list(map(lambda st: st.name, SearchType))))
return search_type
@pydantic.validator('criteria')
def criteria_must_match_format_for_type(cls, criteria, values): # pylint:disable=no-self-argument # noqa: N805
if 'type' not in values:
return criteria
if values['type'] == SearchType.INDIVIDUAL_DEBTOR.value:
if 'debtorName' not in criteria:
raise ValueError('"debtorName" is required in criteria')
elif 'last' not in criteria['debtorName']:
raise ValueError('"last" is required in criteria.debtorName')
elif 'first' not in criteria['debtorName']:
raise ValueError('"first" is required in criteria.debtorName')
elif 'value' not in criteria:
raise ValueError('"value" is required in criteria')
return criteria
class Config:
orm_mode = True
allow_population_by_alias = True
fields = {
'type': {'alias': 'type_code'}
}
class Search(SearchBase): # pylint:disable=no-member
id: int
searchDateTime: datetime.datetime
payment: schemas.payment.Payment = None
class Config:
orm_mode = True
allow_population_by_alias = True
fields = {
'searchDateTime': {'alias': 'creation_date_time'}
}
json_encoders = {
datetime.datetime: lambda dt: dt.isoformat(timespec='seconds')
}
class SearchResult(pydantic.BaseModel): # pylint:disable=no-member
type: str
financingStatement: schemas.financing_statement.FinancingStatement = None
@pydantic.validator('type')
def type_must_match_search_result_type(cls, value): # pylint:disable=no-self-argument # noqa: N805
try:
SearchResultType[value]
except KeyError:
raise ValueError('type must be one of: {}'.format(list(map(lambda st: st.name, SearchResultType))))
return value
class Config:
json_encoders = {
datetime.datetime: lambda dt: dt.isoformat(timespec='seconds')
}
| StarcoderdataPython |
1635740 | # Generated by Django 3.1.3 on 2021-08-27 05:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("track_history", "0002_auto_20200524_1009"),
]
operations = [
migrations.AlterField(
model_name="trackhistoryfullsnapshot",
name="history_data",
field=models.JSONField(),
),
migrations.AlterField(
model_name="trackhistoryrecord",
name="changes",
field=models.JSONField(default=dict),
),
]
| StarcoderdataPython |
5139236 | <reponame>usc-isi-i2/datamart-upload
from abc import ABC, abstractmethod
import typing
from etk.document import Document
from typing import TypeVar
DatasetID = TypeVar('DatasetID') # a string indicate the dataset id
class PreParsedResult(object):
def __init__(self, content: list, metadata: typing.List[dict] = None):
self._content = content
self._metadata = metadata
@property
def metadata(self):
return self._metadata
@property
def content(self):
return self._content
class ParserBase(ABC):
"""Abstract class of parser, should be extended for other parsers.
"""
@abstractmethod
def load_and_preprocess(self, **kwargs) -> PreParsedResult:
"""
Implement loading and preprocessing method
"""
pass
def model_data(self, doc: Document, inputs: PreParsedResult, **kwargs) -> typing.Union[Document, DatasetID]:
"""
Implement data modeling method and append this to doc
"""
pass
| StarcoderdataPython |
5084130 | <filename>house-finder.py
#!/usr/bin/python3
# Author: <NAME>, <NAME>
from lxml import html
import argparse
import csv
import datetime
import json
import requests
import os
import sys
import unicodedata
import webbrowser
estate_status = {'n': 'new', 'a': 'available', 'd': 'discarded', 't': 'tainted', 'r': 'removed'}
def format_name(name):
if name:
nfkd_form = unicodedata.normalize('NFKD', name.replace(' ', '-').lower())
return u"".join([c for c in nfkd_form if not unicodedata.combining(c)])
def format_status(category):
if category == 'todas':
category = [estate_status['n'], estate_status['a'], estate_status['r']]
elif category == 'nuevas':
category = [estate_status['n']]
elif category == 'disponibles':
category = [estate_status['n'], estate_status['a']]
elif category == 'removidas':
category = [estate_status['r'], estate_status['d']]
elif category == 'descartadas':
category = [estate_status['d']]
return category
def init(arguments):
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--propiedades', default='avisos.json', help='Precio mínimo de la propiedad')
parser.add_argument('-o', '--operacion', default='alquileres', choices=['alquileres', 'ventas'],
help='Especifica el tipo de operación')
parser.add_argument('-d', '--precio_desde', type=int, default=35000, help='Precio mínimo de la propiedad')
parser.add_argument('-u', '--precio_hasta', type=int, default=70000, help='Precio máximo de la propiedad')
parser.add_argument('-m', '--moneda', default='pesos', choices=['pesos', 'dolares'],
help='Moneda en que se ofrece la propiedad')
parser.add_argument('-cd', '--canditad_de_dormitorios', type=int, default=3,
help='Cantidad de dormitorios de la propiedad')
parser.add_argument('-p', '--provincia', default='cordoba', type=format_name,
help='Provincia en la que se encuentra la propiedad')
parser.add_argument('-B', '--barrio', default=None, type=format_name,
help='Barrio en la que se encuentra la propiedad')
parser.add_argument('-c', '--ciudad', default=None, type=format_name,
help='Ciudad en la que se encuentra la propiedad')
parser.add_argument('-b', '--tipo_de_barrio', default=None,
choices=['abierto', 'country', 'cerrado', 'con-seguridad'],
help='Tipo de barrio')
parser.add_argument('-t', '--tipo_de_unidad', default=None,
choices=['casa', 'duplex', 'triplex', 'chalet', 'casa-quinta', 'Cabana', 'prefabricada'],
help='Tipo de unidad')
parser.add_argument('-w', '--browser', action='store_true', help='Abrir las propiedades en un browser')
parser.add_argument('--initial_link', default=None, help=argparse.SUPPRESS)
parser.add_argument('--search', default=None, help=argparse.SUPPRESS)
subparsers = parser.add_subparsers(help='Acciones', dest='command')
show_parser = subparsers.add_parser("listar")
show_parser.add_argument('categoria', choices=['todas', 'nuevas', 'disponibles', 'removidas', 'descartadas'],
default='nuevas', help='Busca y lista propiedades basandose en la caracteristica')
remove_parser = subparsers.add_parser("quitar")
remove_parser.add_argument('id', type=int,
help='Busca el identificador y marca la propiedad como borrada')
args = parser.parse_args(arguments)
if args.command is None:
args.categoria = format_status('disponibles')
args.command = 'listar'
elif args.command == 'listar':
if args.categoria is not None:
args.categoria = format_status(args.categoria)
initial_link = f'https://clasificados.lavoz.com.ar/inmuebles/casas/alquileres?list=true' \
f'&provincia={args.provincia}&precio-desde={args.precio_desde}&precio-hasta={args.precio_hasta}' \
f'&moneda={args.moneda}&operacion={args.operacion}' \
f'&cantidad-de-dormitorios%5B1%5D={args.canditad_de_dormitorios}-dormitorios'
search = f'{args.provincia}_{args.precio_desde}_{args.precio_hasta}_{args.moneda}_{args.operacion}_' \
f'{args.canditad_de_dormitorios}'
if args.ciudad:
initial_link += f'&ciudad={args.ciudad}'
search += f'_{args.ciudad}'
if args.tipo_de_barrio:
initial_link += f'&tipo-de-barrio={args.tipo_de_barrio}'
search += f'_{args.tipo_de_barrio}'
if args.barrio:
initial_link += f'&barrio[1]={args.barrio}'
search += f'_{args.barrio}'
if args.tipo_de_unidad:
initial_link += f'&tipo-de-unidad={args.tipo_de_unidad}'
search += f'_{args.tipo_de_unidad}'
args.search = search
args.initial_link = initial_link
return args
def get_announcement(announcement):
estate = {}
link = announcement.xpath('div[2]/div/a/@href')
key = None
if link:
link = link[0].strip()
key = link.split('/')[5]
if key:
description = announcement.xpath('div[2]/div/a/div/text()')
description = description[0].strip() if description else "***falta la descripcion***"
nbhd = announcement.xpath('div[2]/div/div[3]/span[2]/text()')
nbhd = nbhd[0].strip() if nbhd else "no especificado"
price = announcement.xpath('div[2]/div/div[1]/div[1]/p/text()')
price = price[0].strip() if price else "consultar"
detail = announcement.xpath('div[2]/div/div[4]/text()')
detail = detail[0].strip() if detail else "Sin informacion adicional"
estate = {'id': key,
'description': description,
'nbhd': nbhd,
'price': price,
'link': link,
'detail': detail}
return estate
def load_history(database):
if os.path.exists(database):
with open(database, 'r', encoding='utf8') as json_file:
data = json.load(json_file)
else:
data = {}
return data
def save_data(history, data, store, search):
history[search] = data
with open(store, 'w', encoding='utf8') as json_file:
json.dump(history, json_file, ensure_ascii=False)
with open(f'{search}.csv', 'w', encoding='utf8') as data_file:
csv_writer = csv.writer(data_file)
csv_writer.writerow([search])
first = True
for estate_info in data.keys():
if first:
header = list(data[estate_info].keys())
header.insert(0, 'id')
csv_writer.writerow(header)
first = False
new_row = list(data[estate_info].values())
new_row.insert(0, estate_info)
csv_writer.writerow(new_row)
def taint_properties(properties):
for estate in properties.keys():
if properties[estate]['status'] != estate_status['d'] and \
properties[estate]['status'] != estate_status['r']:
properties[estate]['status'] = estate_status['t']
return properties
def remove_tainted(properties):
current_properties = properties.copy()
for estate in properties.keys():
if properties[estate]['status'] == estate_status['t']:
print(f'La propiedad {estate} no se encuentra mas en la lista')
print(properties[estate])
current_properties[estate]['status'] = estate_status['r']
return current_properties
def show_estate(items_shown, headline, description, url, browser):
print(f'{items_shown}- {headline}')
print(f'\t{description}')
if browser:
if items_shown == 1:
webbrowser.open_new(url)
else:
webbrowser.open_new_tab(url)
else:
print(f'\t{url}')
def get_page_estates(announces, properties):
for announce in announces:
estate = get_announcement(announce)
if estate:
if estate['id'] not in properties.keys():
properties[estate['id']] = {'description': estate['description'],
'detail': estate['detail'],
'nbhd': estate['nbhd'],
'price': estate['price'],
'link': estate['link'],
'status': estate_status['n'],
'date': f'{datetime.date.today()}'
}
elif properties[estate['id']]['status'] == estate_status['d']:
continue
else:
properties[estate['id']]['status'] = estate_status['a']
return properties
def display_estates(properties, shown, estate_type, operation, browser):
items_shown = 1
for estate in properties.keys():
if properties[estate]['status'] in shown:
estate_type = estate_type if estate_type else 'propiedad'
headline = f'Aviso {estate} de {estate_type} en {operation} en barrio ' \
f'{properties[estate]["nbhd"]} a {properties[estate]["price"]}'
show_estate(items_shown, headline, properties[estate]["description"], properties[estate]["link"], browser)
items_shown += 1
return items_shown
def get_content(base_link, page_number):
page_link = base_link + ("&page=" + str(page_number) if page_number > 1 else "")
page = requests.get(page_link)
content = html.fromstring(page.content)
return content
def find_last_page_number(content):
last_page = 1
pagination_links = content.xpath('/html/body/div[3]/div/div[2]/div/div[2]/div[2]/div[4]/nav/div/ul/*/a/@href')
if pagination_links:
last_page = int(pagination_links[-1].split("=")[-1])
return last_page
def find_properties(params, properties):
properties = taint_properties(properties)
last_page = 1
page_number = 1
while page_number <= last_page:
content = get_content(params.initial_link, page_number)
announces_location = '/html/body/div[3]/div/div[2]/div/div[2]/div/div[2]/*'
if page_number == 1:
last_page = find_last_page_number(content)
if last_page > 1:
announces_location = '/html/body/div[3]/div/div[2]/div/div[2]/div[2]/div[2]/*'
announces = content.xpath(announces_location)
get_page_estates(announces, properties)
page_number += 1
return properties
def main(arguments):
params = init(arguments)
history = load_history(params.propiedades)
properties = {}
if params.search in history.keys():
properties = history[params.search]
if params.command == 'listar':
if estate_status['n'] in params.categoria:
properties = find_properties(params, properties)
display_estates(properties, params.categoria, params.tipo_de_unidad, params.operacion, params.browser)
elif params.command == 'remover':
if properties[str(params.id)]:
properties[str(params.id)]['status'] = estate_status['d']
save_data(history, remove_tainted(properties), params.propiedades, params.search)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| StarcoderdataPython |
1893879 | <gh_stars>10-100
import argparse
import os
import re
import sys
from sqf.parser import parse
import sqf.analyzer
from sqf.exceptions import SQFParserError, SQFWarning
class Writer:
def __init__(self):
self.strings = []
def write(self, message):
self.strings.append(message)
def analyze(code, writer, exceptions_list):
try:
result = parse(code)
except SQFParserError as e:
writer.write('[%d,%d]:%s\n' % (e.position[0], e.position[1] - 1, e.message))
exceptions_list += [e]
return
exceptions = sqf.analyzer.analyze(result).exceptions
for e in exceptions:
writer.write('[%d,%d]:%s\n' % (e.position[0], e.position[1] - 1, e.message))
exceptions_list += exceptions
def analyze_dir(directory, writer, exceptions_list, exclude):
"""
Analyzes a directory recursively
"""
for root, dirs, files in os.walk(directory):
if any([re.match(s, root) for s in exclude.copy()]):
writer.write(root + ' EXCLUDED\n')
continue
files.sort()
for file in files:
if file.endswith(".sqf"):
file_path = os.path.join(root, file)
if any([re.match(s, file_path) for s in exclude.copy()]):
writer.write(file_path + ' EXCLUDED\n')
continue
writer_helper = Writer()
with open(file_path) as f:
analyze(f.read(), writer_helper, exceptions_list)
if writer_helper.strings:
writer.write(os.path.relpath(file_path, directory) + '\n')
for string in writer_helper.strings:
writer.write('\t%s' % string)
return writer
def readable_dir(prospective_dir):
if not os.path.isdir(prospective_dir):
raise Exception("readable_dir:{0} is not a valid path".format(prospective_dir))
if os.access(prospective_dir, os.R_OK):
return prospective_dir
else:
raise Exception("readable_dir:{0} is not a readable dir".format(prospective_dir))
def parse_args(args):
parser = argparse.ArgumentParser(description="Static Analyzer of SQF code")
parser.add_argument('file', nargs='?', type=argparse.FileType('r'), default=None,
help='The full path of the file to be analyzed')
parser.add_argument('-d', '--directory', nargs='?', type=readable_dir, default=None,
help='The full path of the directory to recursively analyse sqf files on')
parser.add_argument('-o', '--output', nargs='?', type=argparse.FileType('w'), default=None,
help='File path to redirect the output to (default to stdout)')
parser.add_argument('-x', '--exclude', action='append', nargs='?', help='Path that should be ignored (regex)', default=[])
parser.add_argument('-e', '--exit', type=str, default='',
help='How the parser should exit. \'\': exit code 0;\n'
'\'e\': exit with code 1 when any error is found;\n'
'\'w\': exit with code 1 when any error or warning is found.')
return parser.parse_args(args)
def entry_point(args):
args = parse_args(args)
if args.output is None:
writer = sys.stdout
else:
writer = args.output
exceptions_list = []
if args.file is None and args.directory is None:
code = sys.stdin.read()
analyze(code, writer, exceptions_list)
elif args.file is not None:
code = args.file.read()
args.file.close()
analyze(code, writer, exceptions_list)
else:
directory = args.directory.rstrip('/')
exclude = list(map(lambda x: x if x.startswith('/') else os.path.join(directory, x), args.exclude))
analyze_dir(directory, writer, exceptions_list, exclude)
if args.output is not None:
writer.close()
exit_code = 0
if args.exit == 'e':
errors = [e for e in exceptions_list if isinstance(e, SQFParserError)]
exit_code = int(len(errors) != 0)
elif args.exit == 'w':
errors_and_warnings = [e for e in exceptions_list if isinstance(e, (SQFWarning, SQFParserError))]
exit_code = int(len(errors_and_warnings) != 0)
return int(exit_code)
def main():
sys.exit(entry_point(sys.argv[1:]))
if __name__ == "__main__":
main()
| StarcoderdataPython |
5074182 | <filename>ChemTopicModel/topicModelGUI.py<gh_stars>10-100
#
# Copyright (c) 2016, Novartis Institutes for BioMedical Research Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Novartis Institutes for BioMedical Research Inc.
# nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Created by <NAME>, December 2016
from ipywidgets import *
from IPython.display import display, clear_output
from ChemTopicModel import drawTopicModel, chemTopicModel
# allows choosing of topic colors
from matplotlib.colors import hex2color, rgb2hex, cnames
import seaborn as sns
import matplotlib.pyplot as plt
import os
import time
import pandas as pd
import numpy as np
from collections import defaultdict
# some nice interactive bokeh plots
from bokeh.models import HoverTool, ColumnDataSource
from bokeh.plotting import show, figure
from bokeh.io import output_notebook
# outputy bokeh plots within the notebook
output_notebook()
# use seaborn style
sns.set()
# for encoding the png images
import base64
def to_base64(png):
return "data:image/png;base64," + base64.b64encode(png).decode("utf-8")
# main GUI
def TopicModel():
def buildModel(sender):
clear_output()
showTopicButton.disabled = True
showMoleculesButton.disabled = True
saveAsButton.disabled = True
saveAsButton2.disabled = True
statsButton.disabled = True
statsButton2.disabled = True
progressBar = widgets.FloatProgress(min=0, max=100, width='300px', margin='10px 5px 10px 10px')
labelProgressBar.value = 'Loading data'
display(progressBar)
filename = dataSetSelector.value
if filename == '':
print('No data set specified, please check your input.')
progressBar.close()
return
try:
data = pd.read_csv(filename)
labelProgressBar.value = 'Generating fragments (may take several minutes for larger data sets)'
progressBar.value +=33
except:
progressBar.value +=100
labelProgressBar.value = 'Reading data failed'
print('Invalid data file, please check your file.')
progressBar.close()
return
try:
starttime = time.time()
topicModel=chemTopicModel.ChemTopicModel(fragmentMethod=fragmentmethodSelector.value, rareThres=rareFilterSelector.value, commonThres=commonFilterSelector.value)
topicModel.loadData(data)
topicModel.generateFragments()
labelProgressBar.value = 'Building the model (may take several minutes for larger data sets and many topics)'
progressBar.value +=33
topicModel.buildTopicModel(numTopicSelector.value)
finaltime = time.time() - starttime
progressBar.value +=34
labelProgressBar.value = 'Finished model successfully in %.3f sec'%finaltime
# Update parameters,dropdown options etc.
labelSelector.options = topicModel.oriLabelNames
labelSelector2.options = topicModel.oriLabelNames
labelSelector2a.options = topicModel.oriLabelNames
numDocs, numTopics = topicModel.documentTopicProbabilities.shape
labelID = topicModel.oriLabelNames.index(labelSelector2.value)
labelSelector3.options = sorted(list(set(topicModel.moldata['label_'+str(labelID)])))
labelSelector3a.options = sorted(list(set(topicModel.moldata['label_'+str(labelID)])))
topicSelector.max=numTopics
params['labels'] = topicModel.oriLabelNames
params['topicModel'] = topicModel
params['colors'] = sns.husl_palette(numTopics, s=.6)
params['numTopics'] = numTopics
showTopicButton.disabled = False
showMoleculesButton.disabled = False
saveAsButton.disabled = False
saveAsButton2.disabled = False
statsButton.disabled = False
statsButton2.disabled = False
progressBar.close()
except:
progressBar.value +=100
labelProgressBar.value = 'Model building failed'
print('Topic model could not be built.')
return
_tooltipstr="""
<div>
<div>
<span style="font-size: 17px;">Topic $index</span><br>
<span style="font-size: 12px;">Top 3 fragments</span>
</div>
<div style="display: flex">
<figure style="text-align: center">
<img
src="@desc1" height="20"
style="float: left; margin: 0px 5px 5px 0px;"
border="2"
></img>
<figcaption> [@desc4] </figcaption>
</figure>
<figure style="text-align: center">
<img
src="@desc2" height="20"
style="float: center; margin: 0px 5px 5px 0px; "
border="2"
></img>
<figcaption> [@desc5] </figcaption>
</figure>
<figure style="text-align: center">
<img
src="@desc3" height="20"
style="float: right; margin: 0px 5px 5px 0px; "
border="2"
></img>
<figcaption> [@desc6] </figcaption>
</figure>
</div>
</div>
"""
def _getToolTipImages(topicModel, numTopics, nTopFrags):
tmp=[]
name=[]
scores=topicModel.getTopicFragmentProbabilities()
for i in range(0,numTopics):
try:
imgs = drawTopicModel.generateTopicRelatedFragmentSVGs(topicModel, i, n_top_frags=nTopFrags,molSize=(100,80),svg=False)
t = [to_base64(i) for i in imgs]
if len(t) < nTopFrags:
for j in range(len(t),nTopFrags):
t.append('')
tmp.append(t)
except:
pass
names = list(map(lambda x: "Score %.2f" % x, filter(lambda y: y > 0.0, sorted(scores[i,:], reverse=True)[:nTopFrags])))
name.append(names)
name = np.array(name)
edges = np.arange(numTopics+1)
if len(tmp) == 0:
tmp=[['','','']]*numTopics
tmp = np.array(tmp)
return name,tmp,edges
def calcOverallStatistics(sender):
clear_output()
labelProgressBar.value=''
topicModel = params['topicModel']
numDocs, numTopics = topicModel.documentTopicProbabilities.shape
topicDocStats=[0]*numTopics
for doc in range(0,numDocs):
topicDocStats[np.argmax(topicModel.documentTopicProbabilities[doc,:])]+=1
topicDocStatsNorm=np.array(topicDocStats).astype(float)/numDocs
name,tmp,edges = _getToolTipImages(topicModel, numTopics, 3)
source = ColumnDataSource( data = dict( y = topicDocStatsNorm, l = edges[ :-1 ], r = edges[ 1: ], desc1 = tmp[:,0], \
desc2 = tmp[:,1], desc3 = tmp[:,2], desc4 = name[:,0], \
desc5 = name[:,1], desc6 = name[:,2]))
hover=HoverTool()
hover.tooltips= _tooltipstr
p = figure(width=800, height=400, tools=[hover], toolbar_location=None, title="Overall topic distribution")
p.quad( top = 'y', bottom = 0, left = 'l', right = 'r',
fill_color = "#036564", line_color = "#033649", source = source )
p.xaxis.axis_label = "Topics"
p.yaxis.axis_label = "% molecules per topic"
p.xaxis.minor_tick_line_color = None
show(p)
def calcSubsetStatistics(sender):
clear_output()
labelProgressBar.value=''
topicModel = params['topicModel']
label = labelSelector3a.value
labelID = params['labels'].index(labelSelector2a.value)
numDocs, numTopics = topicModel.documentTopicProbabilities.shape
data = topicModel.moldata.loc[topicModel.moldata['label_'+str(labelID)] == label]
topicProfile = np.zeros((numTopics,), dtype=np.int)
for idx in data.index:
topicProfile = np.sum([topicProfile, topicModel.documentTopicProbabilities[idx]], axis=0)
topicProfileNorm=np.array(topicProfile).astype(float)/data.shape[0]
name,tmp,edges = _getToolTipImages(topicModel, numTopics, 3)
source = ColumnDataSource( data = dict( y = topicProfileNorm, l = edges[ :-1 ], r = edges[ 1: ], desc1 = tmp[:,0], \
desc2 = tmp[:,1], desc3 = tmp[:,2], desc4 = name[:,0], \
desc5 = name[:,1], desc6 = name[:,2]))
hover=HoverTool()
hover.tooltips= _tooltipstr
p = figure(width=800, height=400, tools=[hover],toolbar_location=None, title="Topic profile for "+str(label))
p.quad( top = 'y', bottom = 0, left = 'l', right = 'r',
fill_color = "#036564", line_color = "#033649", source = source )
p.xaxis.axis_label = "Topics"
p.yaxis.axis_label = "Mean probability of topics"
p.xaxis.minor_tick_line_color = None
show(p)
def showTopic(sender):
topicModel = params['topicModel']
clear_output()
labelProgressBar.value=''
topicID = topicSelector.value
labelID = params['labels'].index(labelSelector.value)
if chooseColor.value:
c = colorSelector.value
if not c.startswith('#'):
c = cnames[c]
hex_color = c
rgb_color = hex2color(hex_color)
else:
rgb_color = tuple(params['colors'][topicSelector.value])
colorSelector.value = rgb2hex(rgb_color)
temp=None
if topicID == '' or labelID == '':
print("Please check your input")
else:
drawTopicModel.drawFragmentsbyTopic(topicModel, topicID, n_top_frags=20, numRowsShown=1.2,\
numColumns=8, tableHeader='Top fragments of topic '+str(topicID))
drawTopicModel.drawMolsByTopic(topicModel, topicID, idsLabelToShow=[labelID], topicProbThreshold = 0.1, baseRad=0.9,\
numRowsShown=3, color=rgb_color)
def showMolecules(sender):
topicModel = params['topicModel']
clear_output()
labelProgressBar.value=''
label = labelSelector3.value
labelID = params['labels'].index(labelSelector2.value)
if label == '' or labelID == '':
print("Please check your input")
else:
drawTopicModel.drawMolsByLabel(topicModel, label, idLabelToMatch=labelID, baseRad=0.9, \
molSize=(250,150), numRowsShown=3)
def saveTopicAs(sender):
topicModel = params['topicModel']
topicID = topicSelector.value
labelID = params['labels'].index(labelSelector.value)
path = filePath.value
if chooseColor.value:
c = colorSelector.value
if not c.startswith('#'):
c = cnames[c]
hex_color = c
rgb_color = hex2color(hex_color)
else:
rgb_color = tuple(params['colors'][topicSelector.value])
colorSelector.value = rgb2hex(rgb_color)
temp=None
if topicID == '' or labelID == '':
print("Please check your input")
else:
svgGrid = drawTopicModel.generateSVGGridMolsbyTopic(topicModel, 0, idLabelToShow=labelID, topicProbThreshold = 0.1,
baseRad=0.9, color=rgb_color)
with open(path+'.svg','w') as out:
out.write(svgGrid)
print("Saved topic image to: "+os.getcwd()+'/'+path+'.svg')
def saveMolSetAs(sender):
topicModel = params['topicModel']
if topicModel == None:
print('No topic model available, please build a valid model first.')
return
path = filePath2.value
label = labelSelector3.value
labelID = params['labels'].index(labelSelector2.value)
if label == '' or labelID == '':
print("Please check your input")
else:
svgGrid = drawTopicModel.generateSVGGridMolsByLabel(topicModel, label, idLabelToMatch=labelID, baseRad=0.9)
with open(path+'.svg','w') as out:
out.write(svgGrid)
print("Saved molecule set image to: "+os.getcwd()+'/'+path+'.svg')
def getMolLabels(labelName):
topicModel = params['topicModel']
try:
labelID = params['labels'].index(labelName)
return list(set(topicModel.moldata['label_'+str(labelID)]))
except:
return []
def selectMolSet(sender):
labelSelector3.options = sorted(getMolLabels(labelSelector2.value))
def selectMolSeta(sender):
labelSelector3a.options = sorted(getMolLabels(labelSelector2a.value))
def topicColor(sender):
rgb_color = tuple(params['colors'][topicSelector.value])
colorSelector.value = rgb2hex(rgb_color)
# init values
params=dict([('labels',[]),('numTopics',50),('colors',sns.husl_palette(20, s=.6)),('topicModel',None),('rareThres',0.001),('commonThres',0.1)])
labelProgressBar = widgets.Label(value='')
########### Model building widgets
# widgets
dataSetSelector = widgets.Text(description='Data set:',value='data/datasetA.csv', width='450px', margin='10px 5px 10px 10px')
numTopicSelector = widgets.IntText(description='Number of topics', width='200px', value=params['numTopics'],\
margin='10px 5px 10px 10px')
rareFilterSelector = widgets.BoundedFloatText(min=0,max=1.0,description='Threshold rare fragments', width='200px', value=params['rareThres'], margin='10px 5px 10px 10px')
commonFilterSelector = widgets.BoundedFloatText(min=0,max=1.0,description='Threshold common fragments', width='200px', value=params['commonThres'], margin='10px 5px 10px 10px')
fragmentmethodSelector = widgets.Dropdown(options=['Morgan', 'RDK', 'Brics'], description='Fragment method:',\
width='200px',margin='10px 5px 10px 10px')
doItButton = widgets.Button(description="Build model", button_style='danger', width='300px', margin='10px 5px 10px 10px')
# actions
labels = doItButton.on_click(buildModel)
# layout widgets
set1 = widgets.HBox()
set1.children = [dataSetSelector]
set2 = widgets.HBox()
set2.children = [numTopicSelector, fragmentmethodSelector]
set2a = widgets.HBox()
set2a.children = [rareFilterSelector, commonFilterSelector]
set3 = widgets.HBox()
set3.children = [doItButton]
finalLayout = widgets.VBox()
finalLayout.children = [set1, set2, set2a, set3]
########### Model statistics widget
statsButton = widgets.Button(description="Show overall topic distribution", disabled=True, button_style='danger',\
width='300px', margin='10px 5px 10px 10px')
labelSelector2a = widgets.Dropdown(options=params['labels'], description='Label:', width='300px', margin='10px 5px 10px 10px')
init = labelSelector2a.value
labelSelector3a = widgets.Dropdown(options=getMolLabels(init), description='Molecule set:', width='300px', margin='10px 5px 10px 10px')
statsButton2 = widgets.Button(description="Show topic profile by label", disabled=True, button_style='danger',\
width='300px', margin='10px 5px 10px 10px')
# actions
statsButton.on_click(calcOverallStatistics)
statsButton2.on_click(calcSubsetStatistics)
labelSelector2a.observe(selectMolSeta)
# layout
statsLayout = widgets.HBox()
statsLayout.children = [statsButton]
statsLayout2 = widgets.HBox()
statsLayout2.children = [labelSelector2a, labelSelector3a, statsButton2]
finalLayoutStats= widgets.VBox()
finalLayoutStats.children = [statsLayout, statsLayout2]
########### Model exploration widgets
# choose topic tab
labelSelector = widgets.Dropdown(options=params['labels'], description='Label to show:', width='300px', margin='10px 5px 10px 10px')
topicSelector = widgets.BoundedIntText(description="Topic to show", min=0, max=params['numTopics']-1, width='200px',\
margin='10px 5px 10px 10px')
lableChooseColor = widgets.Label(value='Define topic color',margin='10px 5px 10px 10px')
chooseColor = widgets.Checkbox(value=False,margin='10px 5px 10px 10px')
showTopicButton = widgets.Button(description="Show the topic", button_style='danger',disabled=True,\
width='200px', margin='10px 5px 10px 10px')
# choose molecules tab
labelSelector2 = widgets.Dropdown(options=params['labels'], description='Label:', width='300px', margin='10px 5px 10px 10px')
init = labelSelector2.value
labelSelector3 = widgets.Dropdown(options=getMolLabels(init), description='Molecule set:', width='300px', margin='10px 5px 10px 10px')
showMoleculesButton = widgets.Button(description="Show the molecules", button_style='danger',disabled=True, width='200px',\
margin='10px 5px 10px 10px')
# choose color tab
colorSelector = widgets.ColorPicker(concise=False, description='Topic highlight color', value='#e0e3e4',width='200px', \
margin='10px 5px 10px 10px')
# save as tab
filePath = widgets.Text(description="Save file as:", width='450px', margin='10px 5px 10px 10px')
saveAsButton = widgets.Button(description="Save topic image", button_style='info',disabled=True,width='200px',\
margin='10px 5px 10px 10px')
filePath2 = widgets.Text(description="Save file as:", width='450px', margin='10px 5px 10px 10px')
saveAsButton2 = widgets.Button(description="Save molecule set image", button_style='info',disabled=True,width='200px', \
margin='10px 5px 10px 10px')
# actions
showTopicButton.on_click(showTopic)
saveAsButton.on_click(saveTopicAs)
saveAsButton2.on_click(saveMolSetAs)
showMoleculesButton.on_click(showMolecules)
labelSelector2.observe(selectMolSet)
# layout widgets
tab1 = widgets.HBox()
tab1.children = [topicSelector, labelSelector, lableChooseColor, chooseColor, showTopicButton]
tab2 = widgets.HBox()
tab2.children = [labelSelector2, labelSelector3, showMoleculesButton]
tab3 = widgets.HBox()
tab3.children = [colorSelector]
tab4a = widgets.HBox()
tab4a.children = [filePath, saveAsButton]
tab4b = widgets.HBox()
tab4b.children = [filePath2, saveAsButton2]
tab4 = widgets.VBox()
tab4.children = [tab4a, tab4b]
children = [tab1, tab2, tab3, tab4]
tabs = widgets.Tab(children=children)
tabs.set_title(0,'Topic to explore')
tabs.set_title(1,'Molecule set to explore')
tabs.set_title(2,'Choose color')
tabs.set_title(3,'Save images as')
accordion = widgets.Accordion(children=[finalLayout, finalLayoutStats, tabs])
accordion.set_title(0, 'Build Topic model')
accordion.set_title(1, 'Statistics Topic model')
accordion.set_title(2, 'Explore Topic model')
display(accordion)
display(labelProgressBar) | StarcoderdataPython |
11383340 | <filename>i2c.py
from smbus import SMBus
import time
bus = SMBus(1)
#address = 0x60
address = 0x40
data = [1,2,3,4,5,6,7,8]
#bus.write_i2c_block_data(address, 0, data)
def fun_data():
data1 = bus.write_byte_data(address, 1, 1)
return data1
def bearing3599():
bear1 = bus.read_byte_data(address,2)
bear2 = bus.read_byte_data(address, 3)
bear = (bear1 << 8)
bear = bear/10.0
return bear
while True:
bearing = bearing3599() #this returns the value to 1 decimal place in degrees.
time.sleep(0.5)
data1 = fun_data() #this returns the value as a byte between 0 and 255.
print (data1)
time.sleep(0.5) | StarcoderdataPython |
9765353 | <filename>.archived/snakecode/0091.py
class Solution:
def numDecodings(self, s: str) -> int:
pw, w, pd = 0, 1, ''
for d in s:
pw, w, pd = w, int(int(d) > 0) * w + int(9 < int(pd + d) < 27) * pw, d
return w
| StarcoderdataPython |
3310752 | DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'my_db',
}
}
TEMPLATE_LOADERS = (
'django.template.loaders.app_directories.Loader',
'django.template.loaders.filesystem.Loader',
'django.template.loaders.eggs.Loader',
)
INSTALLED_APPS = ['softdelete', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.admin', 'registration', 'registration_defaults']
DOMAIN='http://testserver'
ROOT_URLCONF = 'softdelete.urls'
| StarcoderdataPython |
6532050 | <gh_stars>0
from datetime import datetime
from functools import wraps
try:
from IPython import get_ipython
except:
pass
import numpy as np
import os
import sys
import traceback
import time
import types
from warnings import warn
import PyQt5.QtCore as QtCore
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QKeySequence
from PyQt5.QtWidgets import QApplication, QCheckBox, QFormLayout, QGridLayout, QHBoxLayout, QLabel, \
QLineEdit, QMainWindow, QPushButton, QShortcut, QSizePolicy, QSpacerItem, QVBoxLayout, QWidget, \
QFileDialog, QListWidget
from PyQt5.Qt import QImage
import matplotlib
try:
matplotlib.use('Qt5Agg')
except:
pass
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
from matplotlib.figure import Figure
from matplotlib.axes import Axes
from matplotlib.transforms import Bbox
from matplotlib import pyplot as plt
import matplotlib.gridspec as gridspec
import paramiko
try:
from torch import Tensor
except:
Tensor = type(None)
class rd(QMainWindow):
sftp = None
app = None
def __init__(self, hostname, username, port=22, **kwargs):
self.sftp = SFTP(hostname, username, port)
self.sftp.connect()
assert self.sftp.connected, 'Could not open SFTP connection'
self.app = QtCore.QCoreApplication.instance()
if self.app is None:
self.app = QApplication([''])
QMainWindow.__init__(self, parent=None)
self.timestamp = datetime.now().strftime("%y%m%d_%H%M%S")
self.setWindowTitle('rd ' + self.timestamp)
try:
shell = get_ipython()
if not shell is None:
shell.magic('%matplotlib qt')
except:
pass
self.initUI()
self.show()
def initUI(self):
self.widget = QWidget()
form = QFormLayout()
self.uiLEHost = QLineEdit('localhost')
self.uiLEUser = QLineEdit('mazlov')
self.uiLEPort = QLineEdit('22')
form.addRow(QLabel('Hostname:'), self.uiLEHost)
form.addRow(QLabel('Username:'), self.uiLEUser)
form.addRow(QLabel('Port:'), self.uiLEPort)
self.uiBtnRefresh = QPushButton("Refresh")
self.uiBtnRefresh.clicked.connect(self.updateFileList)
vbox = QVBoxLayout()
# vbox.addLayout(form)
vbox.addWidget(self.uiBtnRefresh)
vbox.addStretch()
# vbox.addItem(QSpacerItem(1, 1, vPolicy=QSizePolicy.Expanding))
# vbox.addLayout(form_bottom)
# vbox.addLayout(form_bottom2)
self.list = QListWidget()
# self.list.addItem('hey there!')
hbox = QHBoxLayout()
hbox.addWidget(self.list)
hbox.addLayout(vbox)
hbox.addStretch()
self.widget.setLayout(hbox)
self.setCentralWidget(self.widget)
# keyboard shortcuts
closeShortcut = QShortcut(QKeySequence('Escape'), self.widget)
closeShortcut.activated.connect(self.close)
def updateFileList(self):
files = self.sftp.getFileList()
self.list.clear()
for file in files:
self.list.addItem(file)
class SFTP:
client = None
sftp = None
connected = False
def __init__(self, hostname, username, port, dir='/tmp/remdeb/'):
self.hostname = hostname
self.username = username
self.port = port
self.dir = dir
paramiko.util.log_to_file("remote_debugger.sftp.log")
def connect(self):
self.client = paramiko.SSHClient()
self.client.load_system_host_keys()
self.client.load_host_keys("\\\\wsl$\\Ubuntu-18.04\\home\\mazlov\\.ssh\\id_rsa")
self.client.connect(self.hostname, self.port, self.username)
self.sftp = self.client.open_sftp()
try:
self.sftp.chdir(self.dir) # Test if remote_path exists
except IOError:
self.sftp.mkdir(self.dir) # Create remote_path
self.sftp.chdir(self.dir)
if (self.sftp):
self.connected = True
def getFileList(self):
return [file for file in self.listdir() if 'd' not in str(self.sftp.lstat(file)).split()[0]]
def listdir(self):
return self.sftp.listdir()
if __name__ == '__main__':
rdeb = rd('localhost', 'mazlov', 2040)
sys.exit(rdeb.app.exec_()) | StarcoderdataPython |
1702237 | phone_number = {
'0': [''],
'1': [''],
'2': ['a','b','c'],
'3': ['d','e','f'],
'4': ['g','h','i'],
'5': ['j','k','l'],
'6': ['m','n','o'],
'7': ['p','q','r','s'],
'8': ['t','u','v'],
'9': ['w','x','y','z']
}
def generate_all_possible_words(number):
words = []
for c in number:
r = []
if len(words) == 0:
r = phone_number[c]
else:
for l in phone_number[c]:
result = add_character_to_word(l, words)
r.extend(result)
words = r
return words
def add_character_to_word(character, words):
result = []
for word in words:
word = "%s%s" % (word, character)
result.append(word)
return result
def load_dictionary():
f = open('/usr/share/dict/words', 'r')
d = {}
for line in f:
d[line[:-1]] = line
f.close()
return d
def filter_real_worlds(words):
dict = load_dictionary()
result = []
for word in words:
if word in dict:
result.append(word)
return result
if __name__ == '__main__':
phone = '6666'
#phone = '3254773'
words = generate_all_possible_words(phone)
result = filter_real_worlds(words)
print result
| StarcoderdataPython |
77746 | <reponame>lmbaeza/Crypto
from os import environ
from sys import stdin, stdout
from math import gcd
import numpy as np
from sympy import Matrix
class Hill:
def __init__(self):
self.N = 2
self.M = 2
self.MOD = 26
def pair_to_matrix(self, txt):
assert len(txt) == 2
mtx = []
mtx.append(ord(txt[0]) - ord('A'))
mtx.append(ord(txt[1]) - ord('A'))
return np.array(mtx).reshape((2, 1))
def matrix_to_pair(self, matrix):
# assert type(matrix) == type(Matrix([])) or type(matrix)==type(list())
pair = ""
pair += chr(matrix[0][0] + ord('A'))
pair += chr(matrix[1][0] + ord('A'))
return pair
def encrypt(self, txt, key):
key = np.array(key).reshape((self.N, self.M))
if len(txt) & 1 > 0:
txt += 'X'
n = len(txt)
mult = []
for i in range(1, n, 2):
element = (key @ self.pair_to_matrix(txt[i-1]+txt[i]) ) % self.MOD
mult.append(element)
output = ""
for mtx in mult:
output += self.matrix_to_pair(mtx)
return output
def decrypt(self, encry, key):
key = np.array(key).reshape((self.N, self.M))
n = len(encry)
mult = []
for i in range(1, n, 2):
element = (key @ self.pair_to_matrix(encry[i-1]+encry[i]) ) % self.MOD
mult.append(element)
output = ""
for mtx in mult:
output += self.matrix_to_pair(mtx)
return output
if __name__ == '__main__':
key = [
[3, 3],
[2, 5]
]
txt = "HELP"
handle = Hill()
encrypt = handle.encrypt(txt, key)
print("Encrypt:", encrypt)
key_inverse = np.array(Matrix(key).inv_mod(Hill().MOD))
decrypt = handle.decrypt(encrypt, key_inverse)
print("Decrypt:", decrypt)
| StarcoderdataPython |
4947156 | import numpy as np
import pandas as pd
from sklearn.metrics import confusion_matrix, precision_recall_fscore_support
from .evaluator import Evaluator
class MulticlassEvaluator(Evaluator):
"""
Evaluator for multiclass classification.
The confusion matrix may be replaced by the PyCM version (https://github.com/sepandhaghighi/pycm).
This e.g. supports class weights and activation thresholds for computing the confusion matrix from
class probabilities rather than labels. The only issue is how we accumulate the confusion matrices
in this case since the classes don't support addition.
"""
def __init__(self, n_classes=None, labels=None, evaluation_metric='accuracy'):
"""
Initialize the MulticlassEvaluator object
Args:
n_classes (int): Number of classes
labels (list): The labels for each class
evaluation_metric (str): The attribute to use as evaluation metric
"""
super().__init__(evaluation_metric)
if n_classes is not None:
self._n_classes = n_classes
if labels is not None:
assert self._n_classes == len(labels), 'Must have as many labels as classes'
self._labels = labels
else:
self._labels = np.arange(0, n_classes, dtype=np.int)
self.batch = 0
self._track_metrics = ('loss', 'accuracies', 'f1_scores', 'tprs', 'fprs', 'tnrs', 'fnrs', 'ppvs', 'fors', 'npvs', 'fdrs')
self.history = pd.DataFrame(columns=('batch',) + self._track_metrics)
self.reset() # Setting all tracked metrics of the evaluator to default values.
def update(self, predictions, labels, loss):
"""
Update the tracked metrics: Confusion matrix, accuracy
Args:
predictions (list): List of predictions.
labels (list): The labels corresponding to the predictions.
loss (None or list): List of the loss for each example for each GPU.
"""
# Update loss related values; remember to filter out infs and nans.
loss_filter = np.invert(np.logical_or(np.isinf(loss), np.isnan(loss)))
loss = loss[loss_filter]
self.loss_sum += loss.sum()
self.num_examples += loss.size
loss = loss.mean()
# Update confusion matrix
# Confusion matrix with model predictions in rows, true labels in columns
# Batch statistics for history
cm = confusion_matrix(labels.argmax(axis=1), predictions.argmax(axis=1), labels=self._labels)
tps, fps, tns, fns = self.compute_tp_fp_tn_fn(cm)
tprs, fprs, tnrs, fnrs = self.compute_tpr_fpr_tnr_fnr(tps, fps, tns, fns)
ppvs, fors, npvs, fdrs = self.compute_ppv_for_npv_fdr(tps, fps, tns, fns)
accuracies = self.compute_accuracies(tps, fps, tns, fns)
f1_scores = self.compute_f1_scores(tps, fps, tns, fns)
d = {'batch': self.batch}
for v in self._track_metrics:
d.update({v: eval(v).mean()})
self.history = self.history.append(d, ignore_index=True)
# accumulated statistics
self.cm += cm
self.tps, self.fps, self.tns, self.fns = self.compute_tp_fp_tn_fn(self.cm)
self.tprs, self.fprs, self.tnrs, self.fnrs = self.compute_tpr_fpr_tnr_fnr(self.tps, self.fps, self.tns, self.fns)
self.ppvs, self.fors, self.npvs, self.fdrs = self.compute_ppv_for_npv_fdr(self.tps, self.fps, self.tns, self.fns)
self.accuracies = self.compute_accuracies(self.tps, self.fps, self.tns, self.fns)
self.f1_scores = self.compute_f1_scores(self.tps, self.fps, self.tns, self.fns)
# Bump batch counter
self.batch += 1
@staticmethod
def compute_tp_fp_tn_fn(cm):
tp = np.diag(cm) # TPs are diagonal elements
fp = cm.sum(axis=0) - tp # FPs is sum of row minus true positives
fn = cm.sum(axis=1) - tp # FNs is sum of column minus true positives
tn = cm.sum() - np.array([cm[i, :].sum() + cm[:, i].sum() - cm[i, i] for i in range(cm.shape[0])]) # total count minus false positives and false negatives plus true positives (which are otherwise subtracted twice)
return tp, fp, tn, fn
@staticmethod
def compute_tpr_fpr_tnr_fnr(tp, fp, tn, fn):
TPRs = tp / np.maximum(tp + fn, 1) # True positive rate (recall)
FPRs = fp / np.maximum(tn + fp, 1) # False positive rate
TNRs = tn / np.maximum(tn + fp, 1) # True negative rate (specificity)
FNRs = fn / np.maximum(tp + fn, 1) # False negative rate
return TPRs, FPRs, TNRs, FNRs
@staticmethod
def compute_ppv_for_npv_fdr(tp, fp, tn, fn):
PPVs = tp / np.maximum(tp + fp, 1) # Positive predictive value (precision)
FORs = fn / np.maximum(tn + fn, 1) # False omission rate
NPVs = tn / np.maximum(tn + fn, 1) # Negative predictive value
FDRs = fp / np.maximum(tp + fp, 1) # False discovery rate
return PPVs, FORs, NPVs, FDRs
@staticmethod
def compute_accuracies(tp, fp, tn, fn):
return (tp + tn) / np.maximum(tp + fp + tn + fn, 1)
@staticmethod
def compute_f1_scores(tp, fp, tn, fn):
return 2 * tp / np.maximum(2 * tp + fn + fp, 1)
@property
def loss(self):
return self.loss_sum / self.num_examples
@property
def tpr(self):
return self.tprs.mean()
@property
def ppv(self):
return self.ppvs.mean()
@property
def f1_score(self):
return self.f1_scores.mean()
@property
def accuracy(self):
return self.accuracies.mean()
def reset(self):
"""
Reset the tracked metrics.
"""
self.loss_sum = 0
self.num_examples = 0
self.tps = np.zeros(shape=self._n_classes)
self.fps = np.zeros(shape=self._n_classes)
self.fns = np.zeros(shape=self._n_classes)
self.tns = np.zeros(shape=self._n_classes)
self.cm = np.zeros((self._n_classes, self._n_classes))
| StarcoderdataPython |
283194 | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Myme and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
from frappe import msgprint
import frappe.utils
from frappe.utils import cstr, flt, getdate, comma_and, cint
from frappe import _
from frappe.model.mapper import get_mapped_doc
import operator
class GroupItem(Document):
pass
@frappe.whitelist()
def save_group_item(doc,method):
temp = ""
if doc.data_group :
for i in doc.data_group :
cek_data = frappe.db.sql("""
SELECT
di.`item_code_variant`,
di.`total_roll`,
di.`total_yard_atau_meter`
FROM `tabData Inventory` di
WHERE di.`item_code_variant` = "{}"
and di.`yard_atau_meter_per_roll` = "{}"
and di.`warehouse` = "{}"
and di.`colour` = "{}"
and di.`inventory_uom` = "{}"
""".format(i.item_code_variant, i.yard_atau_meter, i.warehouse, i.colour, i.inventory_uom))
if cek_data :
count = 0
else :
temp = temp + "(" + str(i.item_code_variant) + "," + str(i.yard_atau_meter) + "," + str(i.colour) + ") "
if temp :
frappe.throw("Item "+temp+" tidak ada di dalam inventory")
@frappe.whitelist()
def submit_group_item(doc,method):
count = 0
# belum pengecekan apakah itemnya ada di master inventory apa ndak
if doc.packing_list_receipt :
count = 1
else :
for i in doc.data_group :
mi = frappe.get_doc("Master Inventory", i.item_code_variant)
mi.append("data_inventory", {
"doctype": "Data Inventory",
"item_code_variant" : i.item_code_variant,
"yard_atau_meter_per_roll" : i.yard_atau_meter,
"total_roll" : i.total_qty_roll,
"total_yard_atau_meter" : i.yard_atau_meter * i.total_qty_roll,
"warehouse" : i.warehouse,
"colour" : i.colour,
"group" : doc.group_code,
"inventory_uom" : i.inventory_uom
})
cek_data = frappe.db.sql("""
SELECT
di.`item_code_variant`,
di.`total_roll`,
di.`total_yard_atau_meter`
FROM `tabData Inventory` di
WHERE di.`item_code_variant` = "{}"
and di.`yard_atau_meter_per_roll` = "{}"
and di.`warehouse` = "{}"
and di.`colour` = "{}"
and di.`inventory_uom` = "{}"
""".format(i.item_code_variant, i.yard_atau_meter, i.warehouse, i.colour, i.inventory_uom))
if cek_data :
current_total_roll = cek_data[0][1]
current_total_yard = cek_data[0][2]
new_total_roll = current_total_roll - i.total_qty_roll
new_total_yard = current_total_yard - (i.yard_atau_meter * i.total_qty_roll)
frappe.db.sql ("""
UPDATE
`tabData Inventory` di
SET
di.`total_roll`="{0}",
di.`total_yard_atau_meter`="{1}"
WHERE di.`item_code_variant`="{2}"
AND di.`yard_atau_meter_per_roll`="{3}"
AND di.`warehouse`="{4}"
AND di.`colour` = "{5}"
AND di.`inventory_uom` = "{6}"
""".format(new_total_roll, new_total_yard, i.item_code_variant, i.yard_atau_meter, i.warehouse, i.colour, i.inventory_uom))
mi.flags.ignore_permissions = 1
mi.submit()
@frappe.whitelist()
def cancel_group_item(doc,method):
if doc.packing_list_receipt :
# menghapus data group dari packing list receipt nya
frappe.throw("Tidak dapat menghapus karena terhubung dengan Packing List Receipt")
else :
if doc.is_used == 1 :
frappe.throw("Tidak dapat di cancel karen Group telah di gunakan")
else :
cek_data = frappe.db.sql("""
SELECT
di.`item_code_variant`,
di.`total_roll`,
di.`total_yard_atau_meter`
FROM `tabData Inventory` di
WHERE di.`item_code_variant` = "{}"
and di.`yard_atau_meter_per_roll` = "{}"
and di.`warehouse` = "{}"
and di.`colour` = "{}"
and di.`inventory_uom` = "{}"
""".format(i.item_code_variant, i.yard_atau_meter, i.warehouse, i.colour, i.inventory_uom))
if cek_data :
current_total_roll = cek_data[0][1]
current_total_yard = cek_data[0][2]
new_total_roll = current_total_roll + i.total_roll
new_total_yard = current_total_yard + (i.yard_atau_meter * i.total_roll)
frappe.db.sql ("""
UPDATE
`tabData Inventory` di
SET
di.`total_roll`="{0}",
di.`total_yard_atau_meter`="{1}"
WHERE di.`item_code_variant`="{2}"
AND di.`yard_atau_meter_per_roll`="{3}"
AND di.`warehouse`="{4}"
AND di.`colour` = "{5}"
AND di.`inventory_uom` = "{6}"
""".format(new_total_roll, new_total_yard, i.item_code_variant, i.yard_atau_meter, i.warehouse, i.colour, i.inventory_uom))
frappe.db.sql ("""
DELETE FROM
`tabData Inventory` di
WHERE di.`item_code_variant`="{0}"
AND di.`yard_atau_meter_per_roll`="{1}"
AND di.`warehouse`="{2}"
AND di.`colour` = "{3}"
AND di.`inventory_uom` = "{4}"
AND di.`group` = "{5}"
""".format(i.item_code_variant, i.yard_atau_meter, i.warehouse, i.colour, i.inventory_uom, doc.group_code)) | StarcoderdataPython |
130833 | <filename>setup.py
from setuptools import setup
install_requires = [
'numpy',
'scipy'
'scikit-learn',
'pandas',
'matplotlib'
]
setup(
name='DeepInsight',
version='0.1.0',
packages=['pyDeepInsight'],
url='https://github.com/alok-ai-lab/deepinsight',
license='MIT',
author='<NAME>',
author_email='<EMAIL>',
description='A methodology to transform a non-image data to an image for convolution neural network architecture',
install_requires=install_requires
)
| StarcoderdataPython |
6582757 | <gh_stars>0
import re
from typing import List
from collections import defaultdict
from autobridge.Opt.Slot import Slot
class DeviceBase:
NAME = 'Base'
CR_AREA = None
CR_NUM_VERTICAL = None
FPGA_PART_NAME = None
def __init__(self, ddr_list=[], is_vitis_enabled=True):
self.ddr_list = ddr_list
self.pre_existing_area = self._getVitisRegions(ddr_list, is_vitis_enabled)
def _getCRPblockIntersect(self, cr_pblock1, cr_pblock2):
"""
get the overlapped part of two pblocks of clock regions
"""
assert re.search(r'CLOCKREGION_X\d+Y\d+:CLOCKREGION_X\d+Y\d+', cr_pblock1), cr_pblock1
assert re.search(r'CLOCKREGION_X\d+Y\d+:CLOCKREGION_X\d+Y\d+', cr_pblock2), cr_pblock2
pblock1_DL_x, pblock1_DL_y, pblock1_UR_x, pblock1_UR_y = \
[int(val) for val in re.findall(r'[XY](\d+)', cr_pblock1)] # DownLeft & UpRight
pblock2_DL_x, pblock2_DL_y, pblock2_UR_x, pblock2_UR_y = \
[int(val) for val in re.findall(r'[XY](\d+)', cr_pblock2)] # DownLeft & UpRight
intersect_DL_x = max(pblock1_DL_x, pblock2_DL_x)
intersect_DL_y = max(pblock1_DL_y, pblock2_DL_y)
intersect_UR_x = min(pblock1_UR_x, pblock2_UR_x)
intersect_UR_y = min(pblock1_UR_y, pblock2_UR_y)
if intersect_DL_x <= intersect_UR_x and intersect_DL_y <= intersect_UR_y:
overlap_pblock = f'CLOCKREGION_X{intersect_DL_x}Y{intersect_DL_y}:CLOCKREGION_X{intersect_UR_x}Y{intersect_UR_y}'
else:
overlap_pblock = None
return overlap_pblock
def _getPblockArea(self, pblock_def):
"""
get the total resources in the specified pblock
Note that if a slot is outside the range of the device, we return an area of 0
"""
assert re.search(r'CLOCKREGION_X\d+Y\d+:CLOCKREGION_X\d+Y\d+', pblock_def), f'unexpected format of the slot name {pblock_def}'
DL_x, DL_y, UR_x, UR_y = [int(val) for val in re.findall(r'[XY](\d+)', pblock_def)] # DownLeft & UpRight
# treat the pseudo SLR with 0 area
UR_y = min(self.CR_NUM_VERTICAL-1, UR_y)
area = {
'BRAM' : 0,
'DSP' : 0,
'FF' : 0,
'LUT' : 0,
'URAM' : 0
}
if DL_y > self.CR_NUM_VERTICAL-1:
return area
for item in ['BRAM', 'DSP', 'FF', 'LUT', 'URAM']:
# the total area of one row
area[item] = sum(self.CR_AREA[i][j][item] for i in range(DL_x, UR_x + 1) for j in range(DL_y, UR_y+1))
return area
def getArea(self, slot_pblock):
"""
get the resources available to user. Exclude any pre-exising IPs
"""
slot_user_area = self._getPblockArea(slot_pblock)
for ip_pblock in self.pre_existing_area:
overlap_pblock = self._getCRPblockIntersect(slot_pblock, ip_pblock)
if overlap_pblock:
overlap_area = self._getPblockArea(overlap_pblock)
for item in slot_user_area.keys():
slot_user_area[item] -= overlap_area[item]
return slot_user_area
def getSlotPblockTcl(self, slot: Slot) -> List[str]:
"""
remove the overlaps with vitis IPs
If the slot is an SLR-level slot, then do not remove the DDR region in the middle
"""
tcl = []
pblock_name = slot.getRTLModuleName()
pblock_def = slot.getName()
tcl.append(f'create_pblock {pblock_name}')
tcl.append(f'resize_pblock {pblock_name} -add {pblock_def}')
tcl.append(f'# remove the reserved clock regions for the Vitis infra')
tcl.append(f'resize_pblock {pblock_name} -remove {self.VITIS_REGION}')
# exclude the DDR region if the DDR region is at the boundary of the slot
for ddr in self.ddr_list:
ddr_pblock = self.DDR_TO_CLOCK_REGIONS[ddr]
ddr_slot = Slot(slot.board, ddr_pblock)
if slot.down_left_x == ddr_slot.down_left_x or slot.up_right_x == ddr_slot.up_right_x:
if slot.containsChildSlot(ddr_slot):
if self.getDDRSlolenRegion(ddr):
tcl.append(f'# exclude the area for DDR {ddr}')
tcl.append(f'resize_pblock {pblock_name} -remove {self.DDR_TO_CLOCK_REGIONS[ddr]}')
tcl.append(f'resize_pblock {pblock_name} -add {{ {self.getDDRSlolenRegion(ddr)} }}')
return tcl
def getDDRSlolenRegion(self, ddr: int) -> str:
return ''
DDR_TO_CLOCK_REGIONS = {}
def getDDRPblock(self, ddr):
return self.DDR_TO_CLOCK_REGIONS[ddr]
class DeviceU250(DeviceBase):
def _getVitisRegions(self, ddr_list, is_vitis_enabled):
assert all(ddr in [0, 1, 2, 3] for ddr in ddr_list) # u250 has 4 ddrs
# the area used by implicit IPs
pre_existing_area = []
for ddr in ddr_list:
pre_existing_area.append(self.DDR_TO_CLOCK_REGIONS[ddr])
# the vitis platform will take away the rightmost column
if is_vitis_enabled:
pre_existing_area.append(self.VITIS_REGION) # the area consumed by Vitis platform
return pre_existing_area
def getDDRSlolenRegion(self, ddr: int) -> str:
assert 0 <= ddr <= 3, ddr
# entire sloten region is {SLICE_X144Y0:SLICE_X145Y959 DSP48E2_X19Y0:DSP48E2_X19Y383 RAMB18_X9Y0:RAMB18_X9Y383 RAMB36_X9Y0:RAMB36_X9Y191}
return f'SLICE_X144Y{240 * ddr}:SLICE_X145Y{240 * (ddr+1) - 1} DSP48E2_X19Y{96 * ddr}:DSP48E2_X19Y{96 * (ddr+1) - 1} RAMB18_X9Y{96 * ddr}:RAMB18_X9Y{96 * (ddr+1) - 1} RAMB36_X9Y{48 * ddr}:RAMB36_X9Y{48 * (ddr+1) - 1}'
NAME = 'U250'
FPGA_PART_NAME = 'xcu250-figd2104-2L-e'
# SLR level
SLR_NUM = 4
SLR_AREA = defaultdict(lambda: defaultdict(list))
SLR_AREA['BRAM'][0] = 768
SLR_AREA['DSP'][0] = 1536
SLR_AREA['FF'][0] = 433920
SLR_AREA['LUT'][0] = 216960
SLR_AREA['BRAM'][1] = 384
SLR_AREA['DSP'][1] = 1344
SLR_AREA['FF'][1] = 329280
SLR_AREA['LUT'][1] = 164640
SLR_AREA_DDR = defaultdict(lambda: defaultdict(list))
SLR_AREA_DDR['BRAM'][0] = 768
SLR_AREA_DDR['DSP'][0] = 1536
SLR_AREA_DDR['FF'][0] = 433920
SLR_AREA_DDR['LUT'][0] = 216960
SLR_AREA_DDR['BRAM'][1] = 288
SLR_AREA_DDR['DSP'][1] = 1152
SLR_AREA_DDR['FF'][1] = 245760
SLR_AREA_DDR['LUT'][1] = 122800
CR_NUM_HORIZONTAL = 8
CR_NUM_VERTICAL = 16
CR_NUM_VERTICAL_PER_SLR = 4 # each die has 4 CRs vertically
# to be compatible with U280
ACTUAL_SLR_NUM = 4
ACTUAL_CR_NUM_VERTICAL = 16
# Clock Region level
CR_AREA = defaultdict(lambda: defaultdict(dict))
for y in range(ACTUAL_CR_NUM_VERTICAL):
# clock regions with Laguna columns have less FF and LUT
if y % 4 == 0 or y % 3 == 0:
LUT_adjustment = 0
FF_adjustment = 0
else:
LUT_adjustment = 960
FF_adjustment = 1920
CR_AREA[0][y]['BRAM'] = 48
CR_AREA[0][y]['DSP'] = 96
CR_AREA[0][y]['FF'] = 27840 + FF_adjustment
CR_AREA[0][y]['LUT'] = 13920 + LUT_adjustment
CR_AREA[0][y]['URAM'] = 0
CR_AREA[1][y]['BRAM'] = 48
CR_AREA[1][y]['DSP'] = 96
CR_AREA[1][y]['FF'] = 23040 + FF_adjustment
CR_AREA[1][y]['LUT'] = 11520 + LUT_adjustment
CR_AREA[1][y]['URAM'] = 16
CR_AREA[2][y]['BRAM'] = 72
CR_AREA[2][y]['DSP'] = 120
CR_AREA[2][y]['FF'] = 34560 + FF_adjustment
CR_AREA[2][y]['LUT'] = 17280 + LUT_adjustment
CR_AREA[2][y]['URAM'] = 0
CR_AREA[3][y]['BRAM'] = 24
CR_AREA[3][y]['DSP'] = 72
CR_AREA[3][y]['FF'] = 19200 + FF_adjustment
CR_AREA[3][y]['LUT'] = 9600 + LUT_adjustment
CR_AREA[3][y]['URAM'] = 16
CR_AREA[4][y]['BRAM'] = 48
CR_AREA[4][y]['DSP'] = 96
CR_AREA[4][y]['FF'] = 25920 + FF_adjustment
CR_AREA[4][y]['LUT'] = 12960 + LUT_adjustment
CR_AREA[4][y]['URAM'] = 16
CR_AREA[5][y]['BRAM'] = 24
CR_AREA[5][y]['DSP'] = 120
CR_AREA[5][y]['FF'] = 26880 + FF_adjustment
CR_AREA[5][y]['LUT'] = 13440 + LUT_adjustment
CR_AREA[5][y]['URAM'] = 16
CR_AREA[6][y]['BRAM'] = 24
CR_AREA[6][y]['DSP'] = 120
CR_AREA[6][y]['FF'] = 26880 + FF_adjustment
CR_AREA[6][y]['LUT'] = 13440 + LUT_adjustment
CR_AREA[6][y]['URAM'] = 16
CR_AREA[7][y]['BRAM'] = 48
CR_AREA[7][y]['DSP'] = 48
CR_AREA[7][y]['FF'] = 24000 + FF_adjustment
CR_AREA[7][y]['LUT'] = 12000 + LUT_adjustment
CR_AREA[7][y]['URAM'] = 0
TOTAL_AREA = {}
TOTAL_AREA['BRAM'] = 5376
TOTAL_AREA['DSP'] = 12288
TOTAL_AREA['FF'] = 3456000
TOTAL_AREA['LUT'] = 1728000
TOTAL_AREA['URAM'] = 1280
# for item in ('BRAM', 'DSP', 'FF', 'LUT', 'URAM'):
# assert TOTAL_AREA[item] == sum(CR_AREA[x][y][item] for x in range(8) for y in range(16))
# note that the location of DDR 3 is not consistent with other DDRs
DDR_TO_CLOCK_REGIONS = {
0: 'CLOCKREGION_X4Y1:CLOCKREGION_X4Y3',
1: 'CLOCKREGION_X4Y5:CLOCKREGION_X4Y7',
2: 'CLOCKREGION_X4Y9:CLOCKREGION_X4Y11',
3: 'CLOCKREGION_X4Y12:CLOCKREGION_X4Y14'
}
# the rightmost column
VITIS_REGION = 'CLOCKREGION_X7Y0:CLOCKREGION_X7Y15'
class DeviceU280(DeviceBase):
def _getVitisRegions(self, ddr_list, is_vitis_enabled):
assert all(ddr in [0, 1] for ddr in ddr_list) # u280 only has 2 ddrs on SLR0 and SLR1
# the area used by implicit IPs
pre_existing_area = []
for ddr in ddr_list:
pre_existing_area.append(f'CLOCKREGION_X4Y{4 * ddr}:CLOCKREGION_X4Y{4 * ddr + 3}')
# the vitis platform will take away the rightmost column
if is_vitis_enabled:
pre_existing_area.append(self.VITIS_REGION) # the area consumed by Vitis platform
return pre_existing_area
NAME = 'U280'
FPGA_PART_NAME = 'xcu280-fsvh2892-2L-e'
SLR_NUM = 4 # add a pseudo SLR at the top with area 0
CR_NUM_HORIZONTAL = 8
CR_NUM_VERTICAL = 12
CR_NUM_VERTICAL_PER_SLR = 4
TOTAL_AREA = {}
TOTAL_AREA['BRAM'] = 4032
TOTAL_AREA['DSP'] = 9024
TOTAL_AREA['FF'] = 2607360
TOTAL_AREA['LUT'] = 1303680
TOTAL_AREA['URAM'] = 960
# Clock Region level
CR_AREA = defaultdict(lambda: defaultdict(dict))
CR_AREA[0][0 ]['FF'] = 29760
CR_AREA[1][0 ]['FF'] = 24960
CR_AREA[0][1 ]['FF'] = 29760
CR_AREA[1][1 ]['FF'] = 24960
CR_AREA[0][2 ]['FF'] = 29760
CR_AREA[1][2 ]['FF'] = 24960
CR_AREA[0][3 ]['FF'] = 27840
CR_AREA[1][3 ]['FF'] = 23040
CR_AREA[0][4 ]['FF'] = 27840
CR_AREA[1][4 ]['FF'] = 23040
CR_AREA[0][5 ]['FF'] = 29760
CR_AREA[1][5 ]['FF'] = 24960
CR_AREA[0][6 ]['FF'] = 29760
CR_AREA[1][6 ]['FF'] = 24960
CR_AREA[0][7 ]['FF'] = 27840
CR_AREA[1][7 ]['FF'] = 23040
CR_AREA[0][8 ]['FF'] = 27840
CR_AREA[1][8 ]['FF'] = 23040
CR_AREA[0][9 ]['FF'] = 29760
CR_AREA[1][9 ]['FF'] = 24960
CR_AREA[0][10]['FF'] = 29760
CR_AREA[1][10]['FF'] = 24960
CR_AREA[0][11]['FF'] = 27840
CR_AREA[1][11]['FF'] = 23040
CR_AREA[2][0 ]['FF'] = 36480
CR_AREA[3][0 ]['FF'] = 21120
CR_AREA[2][1 ]['FF'] = 36480
CR_AREA[3][1 ]['FF'] = 21120
CR_AREA[2][2 ]['FF'] = 36480
CR_AREA[3][2 ]['FF'] = 21120
CR_AREA[2][3 ]['FF'] = 34560
CR_AREA[3][3 ]['FF'] = 19200
CR_AREA[2][4 ]['FF'] = 34560
CR_AREA[3][4 ]['FF'] = 19200
CR_AREA[2][5 ]['FF'] = 36480
CR_AREA[3][5 ]['FF'] = 21120
CR_AREA[2][6 ]['FF'] = 36480
CR_AREA[3][6 ]['FF'] = 21120
CR_AREA[2][7 ]['FF'] = 34560
CR_AREA[3][7 ]['FF'] = 19200
CR_AREA[2][8 ]['FF'] = 34560
CR_AREA[3][8 ]['FF'] = 19200
CR_AREA[2][9 ]['FF'] = 36480
CR_AREA[3][9 ]['FF'] = 21120
CR_AREA[2][10]['FF'] = 36480
CR_AREA[3][10]['FF'] = 21120
CR_AREA[2][11]['FF'] = 34560
CR_AREA[3][11]['FF'] = 19200
CR_AREA[4][0 ]['FF'] = 27840
CR_AREA[5][0 ]['FF'] = 28800
CR_AREA[4][1 ]['FF'] = 27840
CR_AREA[5][1 ]['FF'] = 28800
CR_AREA[4][2 ]['FF'] = 27840
CR_AREA[5][2 ]['FF'] = 28800
CR_AREA[4][3 ]['FF'] = 25920
CR_AREA[5][3 ]['FF'] = 26880
CR_AREA[4][4 ]['FF'] = 25920
CR_AREA[5][4 ]['FF'] = 26880
CR_AREA[4][5 ]['FF'] = 27840
CR_AREA[5][5 ]['FF'] = 28800
CR_AREA[4][6 ]['FF'] = 27840
CR_AREA[5][6 ]['FF'] = 28800
CR_AREA[4][7 ]['FF'] = 25920
CR_AREA[5][7 ]['FF'] = 26880
CR_AREA[4][8 ]['FF'] = 25920
CR_AREA[5][8 ]['FF'] = 26880
CR_AREA[4][9 ]['FF'] = 27840
CR_AREA[5][9 ]['FF'] = 28800
CR_AREA[4][10]['FF'] = 27840
CR_AREA[5][10]['FF'] = 28800
CR_AREA[4][11]['FF'] = 25920
CR_AREA[5][11]['FF'] = 26880
CR_AREA[6][0 ]['FF'] = 28800
CR_AREA[7][0 ]['FF'] = 25920
CR_AREA[6][1 ]['FF'] = 28800
CR_AREA[7][1 ]['FF'] = 25920
CR_AREA[6][2 ]['FF'] = 28800
CR_AREA[7][2 ]['FF'] = 25920
CR_AREA[6][3 ]['FF'] = 26880
CR_AREA[7][3 ]['FF'] = 24000
CR_AREA[6][4 ]['FF'] = 26880
CR_AREA[7][4 ]['FF'] = 24000
CR_AREA[6][5 ]['FF'] = 28800
CR_AREA[7][5 ]['FF'] = 25920
CR_AREA[6][6 ]['FF'] = 28800
CR_AREA[7][6 ]['FF'] = 25920
CR_AREA[6][7 ]['FF'] = 26880
CR_AREA[7][7 ]['FF'] = 24000
CR_AREA[6][8 ]['FF'] = 26880
CR_AREA[7][8 ]['FF'] = 24000
CR_AREA[6][9 ]['FF'] = 28800
CR_AREA[7][9 ]['FF'] = 25920
CR_AREA[6][10]['FF'] = 28800
CR_AREA[7][10]['FF'] = 25920
CR_AREA[6][11]['FF'] = 26880
CR_AREA[7][11]['FF'] = 24000
################
CR_AREA[0][0 ]['LUT'] = 14880
CR_AREA[1][0 ]['LUT'] = 12480
CR_AREA[0][1 ]['LUT'] = 14880
CR_AREA[1][1 ]['LUT'] = 12480
CR_AREA[0][2 ]['LUT'] = 14880
CR_AREA[1][2 ]['LUT'] = 12480
CR_AREA[0][3 ]['LUT'] = 13920
CR_AREA[1][3 ]['LUT'] = 11520
CR_AREA[0][4 ]['LUT'] = 13920
CR_AREA[1][4 ]['LUT'] = 11520
CR_AREA[0][5 ]['LUT'] = 14880
CR_AREA[1][5 ]['LUT'] = 12480
CR_AREA[0][6 ]['LUT'] = 14880
CR_AREA[1][6 ]['LUT'] = 12480
CR_AREA[0][7 ]['LUT'] = 13920
CR_AREA[1][7 ]['LUT'] = 11520
CR_AREA[0][8 ]['LUT'] = 13920
CR_AREA[1][8 ]['LUT'] = 11520
CR_AREA[0][9 ]['LUT'] = 14880
CR_AREA[1][9 ]['LUT'] = 12480
CR_AREA[0][10]['LUT'] = 14880
CR_AREA[1][10]['LUT'] = 12480
CR_AREA[0][11]['LUT'] = 13920
CR_AREA[1][11]['LUT'] = 11520
CR_AREA[2][0 ]['LUT'] = 18240
CR_AREA[3][0 ]['LUT'] = 10560
CR_AREA[2][1 ]['LUT'] = 18240
CR_AREA[3][1 ]['LUT'] = 10560
CR_AREA[2][2 ]['LUT'] = 18240
CR_AREA[3][2 ]['LUT'] = 10560
CR_AREA[2][3 ]['LUT'] = 17280
CR_AREA[3][3 ]['LUT'] = 9600
CR_AREA[2][4 ]['LUT'] = 17280
CR_AREA[3][4 ]['LUT'] = 9600
CR_AREA[2][5 ]['LUT'] = 18240
CR_AREA[3][5 ]['LUT'] = 10560
CR_AREA[2][6 ]['LUT'] = 18240
CR_AREA[3][6 ]['LUT'] = 10560
CR_AREA[2][7 ]['LUT'] = 17280
CR_AREA[3][7 ]['LUT'] = 9600
CR_AREA[2][8 ]['LUT'] = 17280
CR_AREA[3][8 ]['LUT'] = 9600
CR_AREA[2][9 ]['LUT'] = 18240
CR_AREA[3][9 ]['LUT'] = 10560
CR_AREA[2][10]['LUT'] = 18240
CR_AREA[3][10]['LUT'] = 10560
CR_AREA[2][11]['LUT'] = 17280
CR_AREA[3][11]['LUT'] = 9600
CR_AREA[4][0 ]['LUT'] = 13920
CR_AREA[5][0 ]['LUT'] = 14400
CR_AREA[4][1 ]['LUT'] = 13920
CR_AREA[5][1 ]['LUT'] = 14400
CR_AREA[4][2 ]['LUT'] = 13920
CR_AREA[5][2 ]['LUT'] = 14400
CR_AREA[4][3 ]['LUT'] = 12960
CR_AREA[5][3 ]['LUT'] = 13440
CR_AREA[4][4 ]['LUT'] = 12960
CR_AREA[5][4 ]['LUT'] = 13440
CR_AREA[4][5 ]['LUT'] = 13920
CR_AREA[5][5 ]['LUT'] = 14400
CR_AREA[4][6 ]['LUT'] = 13920
CR_AREA[5][6 ]['LUT'] = 14400
CR_AREA[4][7 ]['LUT'] = 12960
CR_AREA[5][7 ]['LUT'] = 13440
CR_AREA[4][8 ]['LUT'] = 12960
CR_AREA[5][8 ]['LUT'] = 13440
CR_AREA[4][9 ]['LUT'] = 13920
CR_AREA[5][9 ]['LUT'] = 14400
CR_AREA[4][10]['LUT'] = 13920
CR_AREA[5][10]['LUT'] = 14400
CR_AREA[4][11]['LUT'] = 12960
CR_AREA[5][11]['LUT'] = 13440
CR_AREA[6][0 ]['LUT'] = 14400
CR_AREA[7][0 ]['LUT'] = 12960
CR_AREA[6][1 ]['LUT'] = 14400
CR_AREA[7][1 ]['LUT'] = 12960
CR_AREA[6][2 ]['LUT'] = 14400
CR_AREA[7][2 ]['LUT'] = 12960
CR_AREA[6][3 ]['LUT'] = 13440
CR_AREA[7][3 ]['LUT'] = 12000
CR_AREA[6][4 ]['LUT'] = 13440
CR_AREA[7][4 ]['LUT'] = 12000
CR_AREA[6][5 ]['LUT'] = 14400
CR_AREA[7][5 ]['LUT'] = 12960
CR_AREA[6][6 ]['LUT'] = 14400
CR_AREA[7][6 ]['LUT'] = 12960
CR_AREA[6][7 ]['LUT'] = 13440
CR_AREA[7][7 ]['LUT'] = 12000
CR_AREA[6][8 ]['LUT'] = 13440
CR_AREA[7][8 ]['LUT'] = 12000
CR_AREA[6][9 ]['LUT'] = 14400
CR_AREA[7][9 ]['LUT'] = 12960
CR_AREA[6][10]['LUT'] = 14400
CR_AREA[7][10]['LUT'] = 12960
CR_AREA[6][11]['LUT'] = 13440
CR_AREA[7][11]['LUT'] = 12000
##################
CR_AREA[0][0 ]['DSP'] = 72
CR_AREA[1][0 ]['DSP'] = 72
CR_AREA[0][1 ]['DSP'] = 96
CR_AREA[1][1 ]['DSP'] = 96
CR_AREA[0][2 ]['DSP'] = 96
CR_AREA[1][2 ]['DSP'] = 96
CR_AREA[0][3 ]['DSP'] = 96
CR_AREA[1][3 ]['DSP'] = 96
CR_AREA[0][4 ]['DSP'] = 96
CR_AREA[1][4 ]['DSP'] = 96
CR_AREA[0][5 ]['DSP'] = 96
CR_AREA[1][5 ]['DSP'] = 96
CR_AREA[0][6 ]['DSP'] = 96
CR_AREA[1][6 ]['DSP'] = 96
CR_AREA[0][7 ]['DSP'] = 96
CR_AREA[1][7 ]['DSP'] = 96
CR_AREA[0][8 ]['DSP'] = 96
CR_AREA[1][8 ]['DSP'] = 96
CR_AREA[0][9 ]['DSP'] = 96
CR_AREA[1][9 ]['DSP'] = 96
CR_AREA[0][10]['DSP'] = 96
CR_AREA[1][10]['DSP'] = 96
CR_AREA[0][11]['DSP'] = 96
CR_AREA[1][11]['DSP'] = 96
CR_AREA[2][0 ]['DSP'] = 90
CR_AREA[3][0 ]['DSP'] = 54
CR_AREA[2][1 ]['DSP'] = 120
CR_AREA[3][1 ]['DSP'] = 72
CR_AREA[2][2 ]['DSP'] = 120
CR_AREA[3][2 ]['DSP'] = 72
CR_AREA[2][3 ]['DSP'] = 120
CR_AREA[3][3 ]['DSP'] = 72
CR_AREA[2][4 ]['DSP'] = 120
CR_AREA[3][4 ]['DSP'] = 72
CR_AREA[2][5 ]['DSP'] = 120
CR_AREA[3][5 ]['DSP'] = 72
CR_AREA[2][6 ]['DSP'] = 120
CR_AREA[3][6 ]['DSP'] = 72
CR_AREA[2][7 ]['DSP'] = 120
CR_AREA[3][7 ]['DSP'] = 72
CR_AREA[2][8 ]['DSP'] = 120
CR_AREA[3][8 ]['DSP'] = 72
CR_AREA[2][9 ]['DSP'] = 120
CR_AREA[3][9 ]['DSP'] = 72
CR_AREA[2][10]['DSP'] = 120
CR_AREA[3][10]['DSP'] = 72
CR_AREA[2][11]['DSP'] = 120
CR_AREA[3][11]['DSP'] = 72
CR_AREA[4][0 ]['DSP'] = 72
CR_AREA[5][0 ]['DSP'] = 90
CR_AREA[4][1 ]['DSP'] = 96
CR_AREA[5][1 ]['DSP'] = 120
CR_AREA[4][2 ]['DSP'] = 96
CR_AREA[5][2 ]['DSP'] = 120
CR_AREA[4][3 ]['DSP'] = 96
CR_AREA[5][3 ]['DSP'] = 120
CR_AREA[4][4 ]['DSP'] = 96
CR_AREA[5][4 ]['DSP'] = 120
CR_AREA[4][5 ]['DSP'] = 96
CR_AREA[5][5 ]['DSP'] = 120
CR_AREA[4][6 ]['DSP'] = 96
CR_AREA[5][6 ]['DSP'] = 120
CR_AREA[4][7 ]['DSP'] = 96
CR_AREA[5][7 ]['DSP'] = 120
CR_AREA[4][8 ]['DSP'] = 96
CR_AREA[5][8 ]['DSP'] = 120
CR_AREA[4][9 ]['DSP'] = 96
CR_AREA[5][9 ]['DSP'] = 120
CR_AREA[4][10]['DSP'] = 96
CR_AREA[5][10]['DSP'] = 120
CR_AREA[4][11]['DSP'] = 96
CR_AREA[5][11]['DSP'] = 120
CR_AREA[6][0 ]['DSP'] = 90
CR_AREA[7][0 ]['DSP'] = 36
CR_AREA[6][1 ]['DSP'] = 120
CR_AREA[7][1 ]['DSP'] = 48
CR_AREA[6][2 ]['DSP'] = 120
CR_AREA[7][2 ]['DSP'] = 48
CR_AREA[6][3 ]['DSP'] = 120
CR_AREA[7][3 ]['DSP'] = 48
CR_AREA[6][4 ]['DSP'] = 120
CR_AREA[7][4 ]['DSP'] = 48
CR_AREA[6][5 ]['DSP'] = 120
CR_AREA[7][5 ]['DSP'] = 48
CR_AREA[6][6 ]['DSP'] = 120
CR_AREA[7][6 ]['DSP'] = 48
CR_AREA[6][7 ]['DSP'] = 120
CR_AREA[7][7 ]['DSP'] = 48
CR_AREA[6][8 ]['DSP'] = 120
CR_AREA[7][8 ]['DSP'] = 48
CR_AREA[6][9 ]['DSP'] = 120
CR_AREA[7][9 ]['DSP'] = 48
CR_AREA[6][10]['DSP'] = 120
CR_AREA[7][10]['DSP'] = 48
CR_AREA[6][11]['DSP'] = 120
CR_AREA[7][11]['DSP'] = 48
##################
CR_AREA[0][0 ]['BRAM'] = 24 * 2
CR_AREA[1][0 ]['BRAM'] = 24 * 2
CR_AREA[0][1 ]['BRAM'] = 24 * 2
CR_AREA[1][1 ]['BRAM'] = 24 * 2
CR_AREA[0][2 ]['BRAM'] = 24 * 2
CR_AREA[1][2 ]['BRAM'] = 24 * 2
CR_AREA[0][3 ]['BRAM'] = 24 * 2
CR_AREA[1][3 ]['BRAM'] = 24 * 2
CR_AREA[0][4 ]['BRAM'] = 24 * 2
CR_AREA[1][4 ]['BRAM'] = 24 * 2
CR_AREA[0][5 ]['BRAM'] = 24 * 2
CR_AREA[1][5 ]['BRAM'] = 24 * 2
CR_AREA[0][6 ]['BRAM'] = 24 * 2
CR_AREA[1][6 ]['BRAM'] = 24 * 2
CR_AREA[0][7 ]['BRAM'] = 24 * 2
CR_AREA[1][7 ]['BRAM'] = 24 * 2
CR_AREA[0][8 ]['BRAM'] = 24 * 2
CR_AREA[1][8 ]['BRAM'] = 24 * 2
CR_AREA[0][9 ]['BRAM'] = 24 * 2
CR_AREA[1][9 ]['BRAM'] = 24 * 2
CR_AREA[0][10]['BRAM'] = 24 * 2
CR_AREA[1][10]['BRAM'] = 24 * 2
CR_AREA[0][11]['BRAM'] = 24 * 2
CR_AREA[1][11]['BRAM'] = 24 * 2
CR_AREA[2][0 ]['BRAM'] = 36 * 2
CR_AREA[3][0 ]['BRAM'] = 12 * 2
CR_AREA[2][1 ]['BRAM'] = 36 * 2
CR_AREA[3][1 ]['BRAM'] = 12 * 2
CR_AREA[2][2 ]['BRAM'] = 36 * 2
CR_AREA[3][2 ]['BRAM'] = 12 * 2
CR_AREA[2][3 ]['BRAM'] = 36 * 2
CR_AREA[3][3 ]['BRAM'] = 12 * 2
CR_AREA[2][4 ]['BRAM'] = 36 * 2
CR_AREA[3][4 ]['BRAM'] = 12 * 2
CR_AREA[2][5 ]['BRAM'] = 36 * 2
CR_AREA[3][5 ]['BRAM'] = 12 * 2
CR_AREA[2][6 ]['BRAM'] = 36 * 2
CR_AREA[3][6 ]['BRAM'] = 12 * 2
CR_AREA[2][7 ]['BRAM'] = 36 * 2
CR_AREA[3][7 ]['BRAM'] = 12 * 2
CR_AREA[2][8 ]['BRAM'] = 36 * 2
CR_AREA[3][8 ]['BRAM'] = 12 * 2
CR_AREA[2][9 ]['BRAM'] = 36 * 2
CR_AREA[3][9 ]['BRAM'] = 12 * 2
CR_AREA[2][10]['BRAM'] = 36 * 2
CR_AREA[3][10]['BRAM'] = 12 * 2
CR_AREA[2][11]['BRAM'] = 36 * 2
CR_AREA[3][11]['BRAM'] = 12 * 2
CR_AREA[4][0 ]['BRAM'] = 24 * 2
CR_AREA[5][0 ]['BRAM'] = 12 * 2
CR_AREA[4][1 ]['BRAM'] = 24 * 2
CR_AREA[5][1 ]['BRAM'] = 12 * 2
CR_AREA[4][2 ]['BRAM'] = 24 * 2
CR_AREA[5][2 ]['BRAM'] = 12 * 2
CR_AREA[4][3 ]['BRAM'] = 24 * 2
CR_AREA[5][3 ]['BRAM'] = 12 * 2
CR_AREA[4][4 ]['BRAM'] = 24 * 2
CR_AREA[5][4 ]['BRAM'] = 12 * 2
CR_AREA[4][5 ]['BRAM'] = 24 * 2
CR_AREA[5][5 ]['BRAM'] = 12 * 2
CR_AREA[4][6 ]['BRAM'] = 24 * 2
CR_AREA[5][6 ]['BRAM'] = 12 * 2
CR_AREA[4][7 ]['BRAM'] = 24 * 2
CR_AREA[5][7 ]['BRAM'] = 12 * 2
CR_AREA[4][8 ]['BRAM'] = 24 * 2
CR_AREA[5][8 ]['BRAM'] = 12 * 2
CR_AREA[4][9 ]['BRAM'] = 24 * 2
CR_AREA[5][9 ]['BRAM'] = 12 * 2
CR_AREA[4][10]['BRAM'] = 24 * 2
CR_AREA[5][10]['BRAM'] = 12 * 2
CR_AREA[4][11]['BRAM'] = 24 * 2
CR_AREA[5][11]['BRAM'] = 12 * 2
CR_AREA[6][0 ]['BRAM'] = 12 * 2
CR_AREA[7][0 ]['BRAM'] = 24 * 2
CR_AREA[6][1 ]['BRAM'] = 12 * 2
CR_AREA[7][1 ]['BRAM'] = 24 * 2
CR_AREA[6][2 ]['BRAM'] = 12 * 2
CR_AREA[7][2 ]['BRAM'] = 24 * 2
CR_AREA[6][3 ]['BRAM'] = 12 * 2
CR_AREA[7][3 ]['BRAM'] = 24 * 2
CR_AREA[6][4 ]['BRAM'] = 12 * 2
CR_AREA[7][4 ]['BRAM'] = 24 * 2
CR_AREA[6][5 ]['BRAM'] = 12 * 2
CR_AREA[7][5 ]['BRAM'] = 24 * 2
CR_AREA[6][6 ]['BRAM'] = 12 * 2
CR_AREA[7][6 ]['BRAM'] = 24 * 2
CR_AREA[6][7 ]['BRAM'] = 12 * 2
CR_AREA[7][7 ]['BRAM'] = 24 * 2
CR_AREA[6][8 ]['BRAM'] = 12 * 2
CR_AREA[7][8 ]['BRAM'] = 24 * 2
CR_AREA[6][9 ]['BRAM'] = 12 * 2
CR_AREA[7][9 ]['BRAM'] = 24 * 2
CR_AREA[6][10]['BRAM'] = 12 * 2
CR_AREA[7][10]['BRAM'] = 24 * 2
CR_AREA[6][11]['BRAM'] = 12 * 2
CR_AREA[7][11]['BRAM'] = 24 * 2
#####################
CR_AREA[0][0 ]['URAM'] = 0
CR_AREA[1][0 ]['URAM'] = 16
CR_AREA[0][1 ]['URAM'] = 0
CR_AREA[1][1 ]['URAM'] = 16
CR_AREA[0][2 ]['URAM'] = 0
CR_AREA[1][2 ]['URAM'] = 16
CR_AREA[0][3 ]['URAM'] = 0
CR_AREA[1][3 ]['URAM'] = 16
CR_AREA[0][4 ]['URAM'] = 0
CR_AREA[1][4 ]['URAM'] = 16
CR_AREA[0][5 ]['URAM'] = 0
CR_AREA[1][5 ]['URAM'] = 16
CR_AREA[0][6 ]['URAM'] = 0
CR_AREA[1][6 ]['URAM'] = 16
CR_AREA[0][7 ]['URAM'] = 0
CR_AREA[1][7 ]['URAM'] = 16
CR_AREA[0][8 ]['URAM'] = 0
CR_AREA[1][8 ]['URAM'] = 16
CR_AREA[0][9 ]['URAM'] = 0
CR_AREA[1][9 ]['URAM'] = 16
CR_AREA[0][10]['URAM'] = 0
CR_AREA[1][10]['URAM'] = 16
CR_AREA[0][11]['URAM'] = 0
CR_AREA[1][11]['URAM'] = 16
CR_AREA[2][0 ]['URAM'] = 0
CR_AREA[3][0 ]['URAM'] = 16
CR_AREA[2][1 ]['URAM'] = 0
CR_AREA[3][1 ]['URAM'] = 16
CR_AREA[2][2 ]['URAM'] = 0
CR_AREA[3][2 ]['URAM'] = 16
CR_AREA[2][3 ]['URAM'] = 0
CR_AREA[3][3 ]['URAM'] = 16
CR_AREA[2][4 ]['URAM'] = 0
CR_AREA[3][4 ]['URAM'] = 16
CR_AREA[2][5 ]['URAM'] = 0
CR_AREA[3][5 ]['URAM'] = 16
CR_AREA[2][6 ]['URAM'] = 0
CR_AREA[3][6 ]['URAM'] = 16
CR_AREA[2][7 ]['URAM'] = 0
CR_AREA[3][7 ]['URAM'] = 16
CR_AREA[2][8 ]['URAM'] = 0
CR_AREA[3][8 ]['URAM'] = 16
CR_AREA[2][9 ]['URAM'] = 0
CR_AREA[3][9 ]['URAM'] = 16
CR_AREA[2][10]['URAM'] = 0
CR_AREA[3][10]['URAM'] = 16
CR_AREA[2][11]['URAM'] = 0
CR_AREA[3][11]['URAM'] = 16
CR_AREA[4][0 ]['URAM'] = 16
CR_AREA[5][0 ]['URAM'] = 16
CR_AREA[4][1 ]['URAM'] = 16
CR_AREA[5][1 ]['URAM'] = 16
CR_AREA[4][2 ]['URAM'] = 16
CR_AREA[5][2 ]['URAM'] = 16
CR_AREA[4][3 ]['URAM'] = 16
CR_AREA[5][3 ]['URAM'] = 16
CR_AREA[4][4 ]['URAM'] = 16
CR_AREA[5][4 ]['URAM'] = 16
CR_AREA[4][5 ]['URAM'] = 16
CR_AREA[5][5 ]['URAM'] = 16
CR_AREA[4][6 ]['URAM'] = 16
CR_AREA[5][6 ]['URAM'] = 16
CR_AREA[4][7 ]['URAM'] = 16
CR_AREA[5][7 ]['URAM'] = 16
CR_AREA[4][8 ]['URAM'] = 16
CR_AREA[5][8 ]['URAM'] = 16
CR_AREA[4][9 ]['URAM'] = 16
CR_AREA[5][9 ]['URAM'] = 16
CR_AREA[4][10]['URAM'] = 16
CR_AREA[5][10]['URAM'] = 16
CR_AREA[4][11]['URAM'] = 16
CR_AREA[5][11]['URAM'] = 16
CR_AREA[6][0 ]['URAM'] = 16
CR_AREA[7][0 ]['URAM'] = 0
CR_AREA[6][1 ]['URAM'] = 16
CR_AREA[7][1 ]['URAM'] = 0
CR_AREA[6][2 ]['URAM'] = 16
CR_AREA[7][2 ]['URAM'] = 0
CR_AREA[6][3 ]['URAM'] = 16
CR_AREA[7][3 ]['URAM'] = 0
CR_AREA[6][4 ]['URAM'] = 16
CR_AREA[7][4 ]['URAM'] = 0
CR_AREA[6][5 ]['URAM'] = 16
CR_AREA[7][5 ]['URAM'] = 0
CR_AREA[6][6 ]['URAM'] = 16
CR_AREA[7][6 ]['URAM'] = 0
CR_AREA[6][7 ]['URAM'] = 16
CR_AREA[7][7 ]['URAM'] = 0
CR_AREA[6][8 ]['URAM'] = 16
CR_AREA[7][8 ]['URAM'] = 0
CR_AREA[6][9 ]['URAM'] = 16
CR_AREA[7][9 ]['URAM'] = 0
CR_AREA[6][10]['URAM'] = 16
CR_AREA[7][10]['URAM'] = 0
CR_AREA[6][11]['URAM'] = 16
CR_AREA[7][11]['URAM'] = 0
# for item in ('BRAM', 'DSP', 'FF', 'LUT', 'URAM'):
# assert TOTAL_AREA[item] == sum(CR_AREA[x][y][item] for x in range(8) for y in range(12))
VITIS_REGION = f'CLOCKREGION_X7Y1:CLOCKREGION_X7Y11'
DDR_TO_CLOCK_REGIONS = {
0: 'CLOCKREGION_X4Y1:CLOCKREGION_X4Y3',
1: 'CLOCKREGION_X4Y5:CLOCKREGION_X4Y7',
}
def getDDRSlolenRegion(self, ddr: int) -> str:
assert 0 <= ddr <= 3, ddr
# entire sloten region is {SLICE_X144Y0:SLICE_X145Y959 DSP48E2_X19Y0:DSP48E2_X19Y383 RAMB18_X9Y0:RAMB18_X9Y383 RAMB36_X9Y0:RAMB36_X9Y191}
return f'SLICE_X144Y{240 * ddr}:SLICE_X145Y{240 * (ddr+1) - 1} DSP48E2_X19Y{96 * ddr}:DSP48E2_X19Y{96 * (ddr+1) - 1} RAMB18_X9Y{96 * ddr}:RAMB18_X9Y{96 * (ddr+1) - 1} RAMB36_X9Y{48 * ddr}:RAMB36_X9Y{48 * (ddr+1) - 1}'
class DeviceManager:
def __init__(self, board_name, ddr_list = [], is_vitis_enabled = False):
if board_name == 'U250':
self.board = DeviceU250(ddr_list, is_vitis_enabled)
elif board_name == 'U280':
self.board = DeviceU280(ddr_list, is_vitis_enabled)
else:
assert False, f'unsupported device: {board_name}'
def getBoard(self):
return self.board | StarcoderdataPython |
207343 | # -*- coding: utf-8 -*-
"""
fixtures.py
This module is for storing all of the relavant fixtures used in testing.
"""
from .fixtures_data import (
JSON15min2day,
two_sites_two_params_iv,
nothing_avail,
mult_flags,
diff_freq,
startDST,
endDST,
)
from .fixtures_daily_dupe import daily_dupe, daily_dupe_altered
from .fixtures_multiple_methods import multi_meth
from .fixtures_tzfail import tzfail
from .fixtures_recent_only import recent_only
from .fixtures_usgs_rdb import (
field_fixture,
rating_fixture,
peaks_fixture,
parsing_error_fixture,
)
class fakeResponse(object):
def __init__(
self,
code=200,
url="fake url",
reason="fake reason",
text="fake text",
json=JSON15min2day,
):
self.status_code = code
self.url = url
self.reason = reason
self.text = text
# .json will return a function
# .json() will return JSON15min2day
self.json = lambda: json
if self.status_code == 200:
self.ok = True
else:
self.ok = False
def raise_for_status(self):
return self.status_code
| StarcoderdataPython |
1601282 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from re import sub
import sublime_plugin
from ..api import deviot
from ..libraries.messages import Messages
from ..libraries.thread_progress import ThreadProgress
class DeviotCheckPioUpdatesCommand(sublime_plugin.WindowCommand):
def run(self):
Update().check_update_async()
class DeviotUpdatePioCommand(sublime_plugin.WindowCommand):
def run(self):
Update().update_async()
class DeviotDevPioCommand(sublime_plugin.WindowCommand):
def run(self):
Update().developer_async()
class Update:
"""Update PlatFormIO
Class to upgrade platformIO (update_pio) or install the
developer branch (developer_pio) to avoid block the sublime
text UI both function are run in a separate thread (async)
update_asyc, developer_async
"""
def __init__(self):
super(Update, self).__init__()
self.cwd = None
self.dprint = None
self.env_paths = deviot.get_sysetting('env_paths', False)
def show_feedback(self):
messages = Messages()
messages.initial_text("_deviot_{0}", deviot.version())
messages.create_panel()
self.dprint = messages.print
def update_pio(self):
"""Update PlatformIO
Update platformIO to the last version (block thread)
"""
self.show_feedback()
self.dprint('searching_pio_updates')
cmd = deviot.pio_command(['upgrade'])
out = deviot.run_command(cmd)
self.dprint(out[1])
def update_async(self):
"""New Thread Execution
Starts a new thread to run the update_pio method
"""
from threading import Thread
thread = Thread(target=self.update_pio)
thread.start()
ThreadProgress(thread, 'processing', '')
def developer_async(self):
"""New Thread Execution
Starts a new thread to run the developer_pio method
"""
from threading import Thread
thread = Thread(target=self.developer_pio)
thread.start()
ThreadProgress(thread, 'processing', '')
def developer_pio(self):
"""Developer
Uninstall the current version of platformio and install
a version based in the preference of the user, it can be
the stable or developer version
"""
self.show_feedback()
self.dprint('uninstall_old_pio')
cmd = ['pip', 'uninstall', '--yes', 'platformio']
out = deviot.run_command(cmd)
developer = deviot.get_sysetting('pio_developer', False)
if(not developer):
self.dprint('installing_dev_pio')
option = 'https://github.com/platformio/' \
'platformio/archive/develop.zip'
else:
self.dprint('installing_stable_pio')
option = 'platformio'
cmd = deviot.prepare_command(['pip', 'install', '-U', option])
out = deviot.run_command(cmd)
if(out[0] == 0):
self.dprint('button_ok')
deviot.save_sysetting('pio_developer', not developer)
else:
self.dprint('setup_error')
def check_update_async(self):
"""New Thread Execution
Starts a new thread to run the check_update method
"""
from threading import Thread
thread = Thread(target=self.check_update)
thread.start()
ThreadProgress(thread, 'processing', '')
def check_update(self):
"""Check update
Checks for platformio updates each 5 days.
To know what is the last version of platformio
pypi is checked
"""
installed = deviot.get_sysetting('installed', False)
if(not installed):
return
from datetime import datetime, timedelta
date_now = datetime.now()
last_check = deviot.get_sysetting('last_check_update', False)
try:
last_check = datetime.strptime(last_check, '%Y-%m-%d %H:%M:%S.%f')
if(date_now < last_check):
return
except TypeError:
pass
if(not last_check or date_now > last_check):
last_check = date_now + timedelta(5, 0) # 5 days
deviot.save_sysetting('last_check_update', str(last_check))
cmd = deviot.pio_command(['--version'])
out = deviot.run_command(cmd, env_paths=self.env_paths)
pio_version = int(sub(r'\D', '', out[1]))
last_pio_version = self.online_pio_version()
if(pio_version < last_pio_version):
from sublime import ok_cancel_dialog
from ..libraries.I18n import I18n
translate = I18n().translate
update = ok_cancel_dialog(translate('new_pio_update{0}{1}',
last_pio_version,
pio_version),
translate('update_button'))
if(update):
self.show_feedback()
self.update_pio()
def online_pio_version(self):
from urllib.request import Request
from urllib.request import urlopen
from urllib.error import HTTPError
from json import loads
try:
url = 'https://pypi.python.org/pypi/platformio/json'
req = Request(url, headers=deviot.header())
response = urlopen(req)
pypi_list = loads(response.read().decode())
last_pio_version = pypi_list['info']['version']
except (KeyError, HTTPError) as e:
return 0
return int(sub(r'\D', '', last_pio_version))
| StarcoderdataPython |
6682468 | <filename>polling_stations/apps/data_collection/management/commands/import_malvern_hills.py
from data_collection.management.commands import BaseXpressDemocracyClubCsvImporter
class Command(BaseXpressDemocracyClubCsvImporter):
council_id = 'E07000235'
addresses_name = 'parl.2017-06-08/Version 1/Malvern Hills Democracy_Club__08June2017.CSV'
stations_name = 'parl.2017-06-08/Version 1/Malvern Hills Democracy_Club__08June2017.CSV'
elections = ['parl.2017-06-08']
| StarcoderdataPython |
265987 | # coding=UTF-8
from __future__ import print_function, absolute_import, division
import datetime
import falcon
import logging
import six
import time
from falcon import testing
from freezegun import freeze_time
from falconratelimit import rate_limit
logger = logging.getLogger(__name__)
class NoRedisResource(object):
@falcon.before(rate_limit(per_second=1, window_size=5))
def on_post(self, req, resp):
resp.status = falcon.HTTP_200
class RedisResource(object):
@falcon.before(rate_limit(redis_url='localhost:6379', per_second=1, window_size=5, resource='on_get'))
def on_get(self, req, resp):
resp.status = falcon.HTTP_200
app = falcon.API()
app.add_route('/noredis', NoRedisResource())
app.add_route('/redis', RedisResource())
class TestRatelimit(testing.TestCase):
def setUp(self):
super(TestRatelimit, self).setUp()
self.app = app
def test_limit_ok(self):
with freeze_time("2018-01-01 00:00:00") as frozen_datetime:
resp = self.simulate_post('/noredis')
self.assertEqual(resp.status, falcon.HTTP_200)
for i in range(4):
frozen_datetime.tick()
self.simulate_post('/noredis')
frozen_datetime.tick()
resp = self.simulate_post('/noredis')
self.assertEqual(resp.status, falcon.HTTP_429)
with freeze_time("2018-01-01 00:00:10") as frozen_datetime:
resp = self.simulate_post('/noredis')
self.assertEqual(resp.status, falcon.HTTP_200)
def test_get_rate_limit(self):
with freeze_time("2018-01-01 00:00:00") as frozen_datetime:
resp = self.simulate_get('/redis')
self.assertEqual(resp.status, falcon.HTTP_200)
for i in range(4):
frozen_datetime.tick()
self.simulate_get('/redis')
frozen_datetime.tick()
resp = self.simulate_get('/redis')
self.assertEqual(resp.status, falcon.HTTP_429)
with freeze_time("2018-01-01 00:00:10") as frozen_datetime:
resp = self.simulate_get('/redis')
self.assertEqual(resp.status, falcon.HTTP_200)
| StarcoderdataPython |
11253432 | <filename>models/wide_resnet.py
# network definition
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
# wildcard import for legacy reasons
from .blocks import *
def parse_options(convtype, blocktype):
# legacy cmdline argument parsing
if isinstance(convtype, str):
conv = conv_function(convtype)
elif isinstance(convtype, list):
conv = [conv_function(item) for item in convtype]
else:
raise NotImplementedError("conv must be a string or list")
if isinstance(blocktype, str):
block = block_function(blocktype)
elif isinstance(blocktype, list):
block = [block_function(item) for item in blocktype]
else:
raise NotImplementedError("conv must be a string or list")
return conv, block
class WideResNet(nn.Module):
def __init__(
self,
depth,
widen_factor,
conv=Conv,
block=BasicBlock,
num_classes=10,
dropRate=0.0,
s=1,
convs=[],
masked=False,
darts=False,
):
super(WideResNet, self).__init__()
self.depth = depth
self.widen_factor = widen_factor
nChannels = [16, 16 * widen_factor, 32 * widen_factor, 64 * widen_factor]
nChannels = [int(a) for a in nChannels]
# for indexing conv list
l = 0
assert (depth - 4) % 6 == 0
n = (depth - 4) // 6
assert n % s == 0, "n mod s must be zero"
# 1st conv before any network block
self.conv1 = nn.Conv2d(
3, nChannels[0], kernel_size=3, stride=1, padding=1, bias=False
)
nb_layers = n
self.nb_layers = nb_layers
if len(convs) == 0:
convs = [conv for i in range(2 * nb_layers * s * 3)]
self.convs = convs
# 1st block
self.block1 = torch.nn.ModuleList()
for i in range(s):
self.block1.append(
NetworkBlock(
nb_layers,
nChannels[0] if i == 0 else nChannels[1],
nChannels[1],
1,
dropRate,
convs[l : l + nb_layers],
masked=masked,
darts=darts,
)
)
l += nb_layers * s
# 2nd block
self.block2 = torch.nn.ModuleList()
for i in range(s):
self.block2.append(
NetworkBlock(
nb_layers,
nChannels[1] if i == 0 else nChannels[2],
nChannels[2],
2 if i == 0 else 1,
dropRate,
convs[l : l + nb_layers],
masked=masked,
darts=darts,
)
)
l += nb_layers * s
# 3rd block
self.block3 = torch.nn.ModuleList()
for i in range(s):
self.block3.append(
NetworkBlock(
nb_layers,
nChannels[2] if i == 0 else nChannels[3],
nChannels[3],
2 if i == 0 else 1,
dropRate,
convs[l : l + nb_layers],
masked=masked,
darts=darts,
)
)
l += nb_layers * s
# global average pooling and classifier
self.bn1 = nn.BatchNorm2d(nChannels[3])
self.relu = nn.ReLU(inplace=True)
self.fc = nn.Linear(nChannels[3], num_classes)
self.nChannels = nChannels[3]
self.input_spatial_dims = None
# normal is better than uniform initialisation
# this should really be in `self.reset_parameters`
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
try:
m.weight.data.normal_(0, math.sqrt(2.0 / n))
except AttributeError:
pass
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def _get_convs(self):
cs = []
for c in self.convs:
cs.append(c)
return cs
def forward(self, x):
self.input_spatial_dims = x.size()
activations = []
out = self.conv1(x)
# activations.append(out)
for sub_block in self.block1:
out = sub_block(out)
activations.append(out)
for sub_block in self.block2:
out = sub_block(out)
activations.append(out)
for sub_block in self.block3:
out = sub_block(out)
activations.append(out)
out = self.relu(self.bn1(out))
out = F.avg_pool2d(out, 8)
out = out.view(-1, self.nChannels)
return self.fc(out), activations
def test():
net = WideResNet(40, 2, Conv, BasicBlock)
x = torch.randn(1, 3, 32, 32)
y, _ = net(Variable(x))
print(y.size())
if __name__ == "__main__":
test()
| StarcoderdataPython |
5003071 | <filename>tests/correctness/targets/SampleBuildFile/Input/root.xpybuild.py
from xpybuild.propertysupport import *
from xpybuild.buildcommon import *
from xpybuild.pathsets import *
from xpybuild.targets.java import *
from xpybuild.targets.copy import *
from xpybuild.targets.archive import *
# xpybuild properties are immutable substitution values
# which can be overridden on the command line if needed
# (type can be string/path/outputdir/list/enumeration/bool)
defineStringProperty('APP_VERSION', '1.0.0.0')
defineOutputDirProperty('OUTPUT_DIR', 'build-output')
definePathProperty('MY_DEPENDENT_LIBRARY_DIR', './libs', mustExist=True)
Jar('${OUTPUT_DIR}/myapp.jar',
# FindPaths walks a directory tree, supporting complex ant-style globbing patterns for include/exclude
compile=[
FindPaths('./src/', excludes=['**/VersionConstants.java']),
'${BUILD_WORK_DIR}/filtered-java-src/VersionConstants.java',
],
# DirBasedPathSet statically lists dependent paths under a directory
classpath=[DirBasedPathSet('${MY_DEPENDENT_LIBRARY_DIR}/', 'mydep-api.jar', 'mydep-core.jar')],
# Specify Jar-specific key/values for the MANIFEST.MF (in addition to any set globally via options)
manifest={'Implementation-Title':'My Amazing Java Application'},
).tags('myapp') # tags make it easy to build a subset of targets on the command line
FilteredCopy('${BUILD_WORK_DIR}/filtered-java-src/VersionConstants.java', './src/VersionConstants.java',
StringReplaceLineMapper('@APP_VERSION@', '${APP_VERSION}'),
)
# Global 'options' provide an easy way to apply common settings to all targets;
# options can be overridden for individual targets using `BaseTarget.option(key,value)`
setGlobalOption('jar.manifest.defaults', {'Implementation-Version': '${APP_VERSION}'})
Zip('${OUTPUT_DIR}/myapp-${APP_VERSION}.zip', [
'${OUTPUT_DIR}/myapp.jar',
# The xpybuild "PathSet" concept provides a powerful way to specify sets of source paths,
# and to map each to a corresponding destination (in this case by adding on a prefix)
AddDestPrefix('licenses/', FindPaths('./license-files/', includes='**/*.txt'))
])
# In a large build, you'd split your build across multiple files, included like this:
include('subdir/otherbits.xpybuild.py')
| StarcoderdataPython |
9718010 | <reponame>haideraltahan/datasets
# coding=utf-8
# Copyright 2019 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for binarized_mnist dataset module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow_datasets import testing
from tensorflow_datasets.image import binarized_mnist
# testing/binarized_mnist.py generates fake input data
class MNISTTest(testing.DatasetBuilderTestCase):
DATASET_CLASS = binarized_mnist.BinarizedMNIST
SPLITS = {
"train": 10,
"validation": 2,
"test": 2,
}
DL_EXTRACT_RESULT = {
"train_data": binarized_mnist._TRAIN_DATA_FILENAME,
"validation_data": binarized_mnist._VALID_DATA_FILENAME,
"test_data": binarized_mnist._TEST_DATA_FILENAME,
}
if __name__ == "__main__":
testing.test_main()
| StarcoderdataPython |
9711462 | _base_ = "base.py"
root = ''
data = dict(
samples_per_gpu=2,
workers_per_gpu=2,
train=dict(
sup=dict(
type="CocoDataset",
ann_file=root + "../data/kaggle/annotations/semi_supervised/instances_train2021.${fold}@${percent}.json",
img_prefix="/home/ace19/dl_data/sartorius-cell-instance-segmentation/train/",
),
unsup=dict(
type="CocoDataset",
ann_file=root + "../data/kaggle/annotations/semi_supervised/instances_unlabeled2021.json",
img_prefix="/home/ace19/dl_data/sartorius-cell-instance-segmentation/train_semi_supervised/",
),
),
sampler=dict(
train=dict(
sample_ratio=[1, 4],
)
),
)
fold = 42
percent = 25
work_dir = "work_dirs/${cfg_name}/${percent}/${fold}"
log_config = dict(
interval=50,
hooks=[
dict(type="TextLoggerHook"),
# dict(
# type="WandbLoggerHook",
# init_kwargs=dict(
# project="pre_release",
# name="${cfg_name}",
# config=dict(
# fold="${fold}",
# percent="${percent}",
# work_dirs="${work_dir}",
# total_step="${runner.max_iters}",
# ),
# ),
# by_epoch=False,
# ),
],
)
load_from='https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_2x_coco/mask_rcnn_r50_caffe_fpn_mstrain-poly_2x_coco_bbox_mAP-0.403__segm_mAP-0.365_20200504_231822-a75c98ce.pth'
| StarcoderdataPython |
379820 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""This module handles :ref:`catalog spectra <stsynphot-spec-atlas>`."""
# STDLIB
import numbers
# THIRD-PARTY
import numpy as np
# ASTROPY
from astropy import units as u
from astropy.io import fits
# SYNPHOT
from synphot import exceptions as synexceptions
from synphot import units
from synphot.spectrum import SourceSpectrum
from synphot.utils import validate_totalflux
# LOCAL
from . import exceptions, stio
__all__ = ['reset_cache', 'get_catalog_index', 'grid_to_spec',
'find_valid_g_phoenix', 'plot_phoenix']
_PARAM_NAMES = ['T_eff', 'metallicity', 'log_g']
_CACHE = {} # Stores grid look-up parameters to reduce file I/O.
def reset_cache():
"""Empty the catalog grid cache."""
global _CACHE
_CACHE.clear()
def _par_from_parser(x):
"""Convert parser string to parameter value."""
if not isinstance(x, (numbers.Real, u.Quantity)):
x = float(x)
return x
def _break_list(in_list, index, parameter):
"""Break input list into upper and lower lists."""
array = np.array([parameters[index] for parameters in in_list],
dtype=np.float64)
upper_array = array[array >= parameter]
lower_array = array[array <= parameter]
if upper_array.size == 0:
raise exceptions.ParameterOutOfBounds(
f"Parameter '{_PARAM_NAMES[index]}' exceeds data. "
f"Max allowed={array.max()}, entered={parameter}.")
if lower_array.size == 0:
raise exceptions.ParameterOutOfBounds(
f"Parameter '{_PARAM_NAMES[index]}' exceeds data. "
f"Min allowed={array.min()}, entered={parameter}.")
upper = upper_array.min()
lower = lower_array.max()
upper_list = []
lower_list = []
for i, parameters in enumerate(in_list):
if array[i] >= parameter and array[i] <= upper:
upper_list.append(parameters)
if array[i] >= lower and array[i] <= parameter:
lower_list.append(parameters)
return upper_list, lower_list
def _get_spectrum(parlist, catdir):
"""Get list of spectra for given parameter list and base name."""
name = parlist[3]
filename = name.split('[')[0]
column = name.split('[')[1][:-1]
filename = stio.resolve_filename(catdir, *filename.split('/'))
sp = SourceSpectrum.from_file(filename, flux_col=column)
totflux = sp.integrate()
try:
validate_totalflux(totflux)
except synexceptions.SynphotError:
raise exceptions.ParameterOutOfBounds(
f"Parameter '{parlist}' has no valid data.")
result = [member for member in parlist]
result.pop()
result.append(sp)
return result
def _interpolate_spectrum(sp1, sp2, par):
"""Interpolate spectra to the given parameter value."""
spectrum1 = sp1.pop()
spectrum2 = sp2.pop()
par1 = sp1.pop()
par2 = sp2.pop()
if par1 == par2:
sp = spectrum1
else:
a = (par1 - par) / (par1 - par2)
b = 1.0 - a
sp = a * spectrum2 + b * spectrum1
result = [member for member in sp1]
result.append(sp)
return result
def get_catalog_index(gridname):
"""Extract catalog index (grid parameters).
It is read once and then cached until the cache is cleared explicitly using
:func:`reset_cache`.
Parameters
----------
gridname : str
See :func:`grid_to_spec`.
Returns
-------
cat_index : list
List of ``[t_eff, metallicity, log_g, filename]``.
catdir : str
Directory containing the requested catalog.
"""
if gridname == 'ck04models':
catdir = 'crgridck04$'
elif gridname == 'k93models':
catdir = 'crgridk93$'
elif gridname == 'phoenix':
catdir = 'crgridphoenix$'
else:
raise synexceptions.SynphotError(
f'{gridname} is not a supported catalog grid.')
catdir = stio.irafconvert(catdir)
filename = stio.resolve_filename(catdir, 'catalog.fits')
# If not cached, read from grid catalog and cache it
if filename not in _CACHE:
data = stio.read_catalog(filename) # EXT 1
_CACHE[filename] = [list(map(float, index.split(','))) +
[data['FILENAME'][i]]
for i, index in enumerate(data['INDEX'])]
return _CACHE[filename], catdir
def grid_to_spec(gridname, t_eff, metallicity, log_g):
"""Extract spectrum from given catalog grid parameters.
Interpolate if necessary.
Grid parameters are read with :func:`get_catalog_index`.
Parameters
----------
gridname : {'ck04models', 'k93models', 'phoenix'}
Model to use:
* ``ck04models`` - <NAME> (2004)
* ``k93models`` - Kurucz (1993)
* ``phoenix`` - Allard et al. (2009)
t_eff : str, float or `astropy.units.quantity.Quantity`
Effective temperature of model.
If not Quantity, assumed to be in Kelvin.
If string (from parser), convert to Quantity.
metallicity : str or float
Metallicity of model.
If string (from parser), convert to float.
log_g : str or float
Log surface gravity for model.
If string (from parser), convert to float.
Returns
-------
sp : `synphot.spectrum.SourceSpectrum`
Empirical source spectrum.
Raises
------
stsynphot.exceptions.ParameterOutOfBounds
Grid parameter out of bounds.
synphot.exceptions.SynphotError
Invalid inputs.
"""
indices, catdir = get_catalog_index(gridname)
metallicity = _par_from_parser(metallicity)
if isinstance(metallicity, u.Quantity):
raise synexceptions.SynphotError(
'Quantity is not supported for metallicity.')
log_g = _par_from_parser(log_g)
if isinstance(log_g, u.Quantity):
raise synexceptions.SynphotError(
'Quantity is not supported for log surface gravity.')
t_eff = units.validate_quantity(_par_from_parser(t_eff), u.K).value
list0, list1 = _break_list(indices, 0, t_eff)
list2, list3 = _break_list(list0, 1, metallicity)
list4, list5 = _break_list(list1, 1, metallicity)
list6, list7 = _break_list(list2, 2, log_g)
list8, list9 = _break_list(list3, 2, log_g)
list10, list11 = _break_list(list4, 2, log_g)
list12, list13 = _break_list(list5, 2, log_g)
sp1 = _get_spectrum(list6[0], catdir)
sp2 = _get_spectrum(list7[0], catdir)
sp3 = _get_spectrum(list8[0], catdir)
sp4 = _get_spectrum(list9[0], catdir)
sp5 = _get_spectrum(list10[0], catdir)
sp6 = _get_spectrum(list11[0], catdir)
sp7 = _get_spectrum(list12[0], catdir)
sp8 = _get_spectrum(list13[0], catdir)
spa1 = _interpolate_spectrum(sp1, sp2, log_g)
spa2 = _interpolate_spectrum(sp3, sp4, log_g)
spa3 = _interpolate_spectrum(sp5, sp6, log_g)
spa4 = _interpolate_spectrum(sp7, sp8, log_g)
spa5 = _interpolate_spectrum(spa1, spa2, metallicity)
spa6 = _interpolate_spectrum(spa3, spa4, metallicity)
spa7 = _interpolate_spectrum(spa5, spa6, t_eff)
sp = spa7[0]
sp.meta['expr'] = (f'{gridname}(T_eff={t_eff:g},'
f'metallicity={metallicity:g},log_g={log_g:g})')
return sp
# NOTE: The following functions need refactoring if you want to generalize
# them to work on all the supported catalogs. This is because the catalogs
# do not share the same header structure. Generalization requires reading all
# the spectra tables, not just header, which will have a significant
# performance impact.
# Also see https://github.com/spacetelescope/stsynphot_refactor/issues/45
def find_valid_g_phoenix(): # pragma: no cover
"""Find valid ``log_g`` values in the Phoenix catalog.
.. note::
Takes time to run because it has to find the gaps by
parsing the headers of individual data files in the catalog.
Returns
-------
valid_g : dict
Dictionary mapping ``(t_eff, metallicity)`` to a list of valid
``log_g`` values.
uniq_metallicity : list
Unique metallicity values in the catalog.
"""
indices, catdir = get_catalog_index('phoenix')
specfiles = {}
valid_g = {}
possible_g = set()
uniq_metallicity = set()
for a in indices:
key = tuple(a[:2])
uniq_metallicity.add(a[1])
possible_g.add(a[2])
if key not in specfiles:
specfiles[key] = a[3].split('[')[0]
for key, spfile in specfiles.items():
hdr = fits.getheader(stio.resolve_filename(
catdir, *spfile.split('/')))['LOGG*']
valid_g[key] = []
for val in hdr.values():
gval = float(val)
if gval in possible_g:
valid_g[key].append(gval)
return valid_g, sorted(uniq_metallicity)
def plot_phoenix(filename=''): # pragma: no cover
"""Visualize the Phoenix catalog index (grid parameters).
.. note:: Uses ``matplotlib``.
.. note::
Takes time to run because it has to find the gaps by
parsing the headers of individual data files in the catalog.
Parameters
----------
filename : str
If provided, plot is saved to given filename.
"""
import matplotlib.pyplot as plt
valid_g, uniq_metallicity = find_valid_g_phoenix()
n_plots = len(uniq_metallicity)
ax_map = {}
fig, axes = plt.subplots(nrows=3, ncols=4)
axes = axes.flatten()
n_axes = axes.size
if n_plots > n_axes:
raise ValueError(f'Need {n_plots} but only {n_axes} subplots created')
for i, metallicity in enumerate(uniq_metallicity):
ax_map[metallicity] = axes[i]
for i in range(n_plots, n_axes):
axes[i].remove()
for key, log_g in valid_g.items():
metallicity = key[1]
t_eff = [key[0]] * len(log_g)
ax = ax_map[metallicity]
ax.plot(t_eff, log_g, 'b,')
ax.set_xlabel(r'$T_{\mathrm{eff}}$')
ax.set_ylabel(r'$\log g$')
ax.set_title(f'[M/H] = {metallicity:.2f}')
ax.axvline(0, ls='--', lw=0.5, c='k')
plt.tight_layout()
plt.draw()
if filename:
fig.savefig(filename)
| StarcoderdataPython |
1798251 | def filter_list(l):
'return a new list with the strings filtered out'
return [x for x in l if type(x) == type(1)] | StarcoderdataPython |
5185076 | <reponame>vanvibig/chatbotAL<filename>utils/conf.py
__author__ = 'liuyuemaicha'
import os
class disc_config(object):
# batch_size = 256
batch_size = 16
lr = 0.001
lr_decay = 0.9
embed_dim = 512
steps_per_checkpoint = 1
#hidden_neural_size = 128
num_layers = 2
train_dir = './disc_data/'
name_model = "disc_model"
tensorboard_dir = "./tensorboard/disc_log/"
name_loss = "disc_loss"
max_len = 50
piece_size = batch_size * steps_per_checkpoint
piece_dir = "./disc_data/batch_piece/"
#query_len = 0
valid_num = 100
init_scale = 0.1
num_class = 2
keep_prob = 0.5
#num_epoch = 60
#max_decay_epoch = 30
max_grad_norm = 5
buckets = [(5, 10), (10, 15), (20, 25), (40, 50)]
epoch_num = 100
class gen_config(object):
# batch_size = 128
batch_size = 8
beam_size = 7
learning_rate = 0.001
learning_rate_decay_factor = 0.99
max_gradient_norm = 5.0
disc_data_batch_num = 100
emb_dim = 512
num_layers = 2
train_dir = "./gen_data/"
name_model = "gen_model"
tensorboard_dir = "./tensorboard/gen_log/"
name_loss = "gen_loss"
teacher_loss = "teacher_loss"
reward_name = "reward"
max_train_data_size = 0
steps_per_checkpoint = 1
# bucket->(source_size, target_size), source is the query, target is the answer
buckets = [(5, 10), (10, 15), (20, 25), (40, 50)]
buckets_concat = [(5, 10), (10, 15), (20, 25), (40, 50), (100, 50)]
| StarcoderdataPython |
4865452 | # Copyright (c) 2021 zfit
import tensorflow_probability as tfp
import zfit_interface.variables
import zfit.util.container
@tfp.experimental.auto_composite_tensor()
class VarSupports(tfp.experimental.AutoCompositeTensor):
def __init__(self, var, *, full=None, space=None, scalar=None, vectorspace=None, binned=None, data=None,
types=None):
types = zfit.util.container.convert_to_container(types)
if types:
if full or space or binned or data or scalar or vectorspace:
raise ValueError
elif full:
if space or binned or data or types or scalar or vectorspace:
raise ValueError
elif not (space or scalar or vectorspace or binned or data):
raise ValueError("Need to support at least something.")
if data:
scalar = True
if vectorspace:
space = True
if not isinstance(var, zfit_interface.variables.ZfitVar):
raise TypeError(f"var has to be a ZfitVar, not {var}.")
self.var = var
self.full = full or False
self.scalar = scalar or False
self.vectorspace = vectorspace or False
self.space = space or False
self.binned = binned or False
self.data = data or False
self.types = types or []
| StarcoderdataPython |
3584457 | <filename>abcgraph.py<gh_stars>10-100
# Copyright (c) 2015 The MITRE Corporation. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
import os
import sys
import cgi
import json
import hashlib
import binascii
import argparse
from abcd import ABCParser
from abcd.ABCParser import ABCdException as ABCdException
from swf.movie import SWF
import signal
import traceback
signal.signal(signal.SIGUSR1, lambda sig, stack: traceback.print_stack(stack))
def b2a_printable(s):
result = ''
for c in map(ord, s):
if c >= 0x20 and c <= 0x7e:
result += chr(c)
else:
result += '.'
return result
def hexdump(data):
result = ''
for i in range(0, len(data), 16):
hexstring = ' '.join([binascii.hexlify(a) for a in data[i:i+16]])
asciistring = b2a_printable(data[i:i+16])
result += cgi.escape("%07x: %-48s |%-16s|\n" % (i,
hexstring,
asciistring))
return result
def disassembly_to_dict(body):
result = []
for instr in body.disassemble():
result.append({'name': instr.name,
'opcode': instr.opcode,
'operands': instr.operands})
return result
def create_method_node(parser,
body,
nodes,
edges,
bodies,
relate_to,
color,
label,
level):
if body == None:
opc_hash = "NO BODY"
disassembly = []
dump = ''
else:
#opc_hash = hashlib.md5(body.strip_operands()).hexdigest()
opc_hash = hashlib.md5(body.code).hexdigest()
disassembly = disassembly_to_dict(body)
dump = hexdump(body.code)
if opc_hash in bodies:
id_ = bodies[opc_hash]
node = nodes[id_]
if 'aka' in node:
node['aka'].append(label)
else:
node['aka'] = [label]
print " [-] Duplicate method body: %s (%s) (node: %s)" % (opc_hash,
label,
id_)
# Don't duplicate edges...
edge = {'from': id_, 'to': relate_to}
if edge not in edges:
edges.append(edge)
else:
id_ = len(nodes)
bodies[opc_hash] = id_
nodes.append({'label': label,
'id': id_,
'color': color,
'default_color': color,
'dump': dump,
'disassembly': disassembly,
'level': level})
edges.append({'from': id_, 'to': relate_to})
print " [-] New method body: %s (%s) (node: %s)" % (opc_hash,
label,
id_)
def add_method(parser,
meth_index,
nodes,
edges,
bodies,
relate_to,
color,
label,
level=5):
# Walk all bodies looking for one that references the provided method
# index. If found, add a node and edge.
for body in parser.method_bodies:
if body.method != meth_index:
continue
create_method_node(parser,
body,
nodes,
edges,
bodies,
relate_to,
color,
label,
level)
# Got a body for this one, return.
return
# Not every method has a body. In this case, create an empty body node.
create_method_node(parser,
None,
nodes,
edges,
bodies,
relate_to,
color,
label,
level)
def add_method_nodes(parser, obj, index, nodes, edges, bodies):
# Walk all traits for this object, looking for methods.
for trait in obj.traits:
if (trait.kind & 0x0F) != parser.TRAIT_METHOD:
continue
meth_name = parser.resolve_multiname(trait.name)
meth_index = parser.resolve_trait(trait)['method_index']
add_method(parser,
meth_index,
nodes,
edges,
bodies,
index,
'#CCBBAA',
meth_name)
def get_traits(parser, traits):
results = []
for trait in traits:
t = {}
t['name'] = parser.resolve_multiname(trait.name)
t['type'] = parser.TRAIT_KIND[trait.kind & 0x0F]
results.append(t)
return results
# Return a list of node indexes this file relates to...
def dump_graph(parser,
nodes,
edges,
args,
bodies={},
classes={},
instances={}):
indexes = []
for i, script in enumerate(parser.scripts):
#sname = "script_%s" % i
# Make a node for this script. Every script is unique...
#id_ = len(nodes)
#nodes.append({'label': sname,
# 'id': id_,
# 'color': 'magenta',
# 'default_color': 'magenta',
# 'level': 2})
#indexes.append(id_)
#script_index = id_
#print " [+] Found script: %s" % sname
for trait in script.traits:
if (trait.kind & 0x0F) != parser.TRAIT_CLASS:
continue
cname = parser.resolve_multiname(trait.name)
# If filtering and not a match, skip...
if args.class_names and cname not in args.class_names:
print " [-] Skipping class due to filter (%s)" % cname
continue
# If we have this class already, just use the node index.
# Otherwise, make a new node. Relate node to script node.
if cname in classes:
class_index = classes[cname]
print " [-] Duplicate class: %s (node: %s)!" % (cname,
class_index)
else:
id_ = len(nodes)
nodes.append({'label': "class: %s" % cname,
'id': id_,
'color': '#00CC00',
'default_color': '#00CC00',
'level': 3})
classes[cname] = id_
class_index = id_
print " [-] New class: %s (node: %s)!" % (cname, class_index)
#edges.append({'from': script_index, 'to': class_index})
indexes.append(class_index)
# Handle method for script init...
#add_method(parser,
# script.init,
# nodes,
# edges,
# bodies,
# class_index,
# '#00FFFF',
# "script init %s" % cname,
# level=5)
if not args.full:
continue
# Make instance node for this class and handle init and method nodes.
for instance in parser.instances:
iname = parser.resolve_multiname(instance.name)
if iname != cname:
continue
# Make a node (or use existing one) for this instance.
if iname in instances:
instance_index = instances[iname]
print " [-] Duplicate instance: %s (node: %s)" % (iname,
instance_index)
else:
id_ = len(nodes)
traits = get_traits(parser, instance.traits)
nodes.append({'label': "instance: %s" % iname,
'id': id_,
'color': 'grey',
'default_color': 'grey',
'traits': traits,
'level': 4})
edges.append({'from': class_index, 'to': id_})
instances[iname] = id_
instance_index = id_
print " [-] New instance: %s (node: %s)" % (iname,
instance_index)
# Handle methods and init for this instance.
add_method_nodes(parser,
instance,
instance_index,
nodes,
edges,
bodies)
# Add instance init method too...
add_method(parser,
instance.iinit,
nodes,
edges,
bodies,
instance_index,
'orange',
"instance init %s" % iname,
level=5)
# Got one instance, move along...
break
# Make class node for this script and handle init and method nodes.
for trait in script.traits:
if (trait.kind & 0x0F) != parser.TRAIT_CLASS:
continue
class_index = parser.resolve_trait(trait)['class_index']
klass = parser.classes[class_index]
# Add method for class init.
add_method(parser,
klass.cinit,
nodes,
edges,
bodies,
instance_index,
'yellow',
"class init %s" % cname,
level=5)
add_method_nodes(parser,
klass,
class_index,
nodes,
edges,
bodies)
break
return indexes
def __main__():
parser = argparse.ArgumentParser(description='Dump actionscript stuff.')
parser.add_argument('-s', '--class_names', action='append',
metavar='class', help='class name to dump')
parser.add_argument('-f', '--full', action='store_true',
help='full graph including methods and inits')
parser.add_argument('-m', '--metadata', action='store_true',
help='enable SWF metadata tags')
parser.add_argument('-b', '--binaries', action='store_true',
help='enable SWF binary tags')
parser.add_argument('files', metavar='file', nargs='+',
help='file to parse')
args = parser.parse_args()
if not args.files:
print "[!] Must provide a filename..."
return
nodes = []
edges = []
binaries = {}
metadata = {}
bodies = {}
classes = {}
instances = {}
for file_ in args.files:
print "[+] Opening file: %s" % file_
try:
f = open(file_, 'rb')
except Exception as e:
print "[!] %s" % str(e)
continue
try:
swiff = SWF(f)
except Exception as e:
print "[!] pyswf failure: %s" % str(e)
f.close()
continue
f.close()
parser = None
indexes = []
# Metadata and binary tags are stored until we have nodes returned
# for ABC elements. This ensures that we don't create nodes for these
# tags without also having something else meaningful.
metadata_tags = []
binary_tags = []
for tag in swiff.tags:
#print "Tag: %s" % tag.name
if tag.name == "Metadata" and args.metadata:
metadata_tags.append(tag)
if tag.name == "TagDefineBinaryData" and args.binaries:
binary_tags.append(tag)
elif tag.name in ["DoABC", "DoABCDefine"]:
if hasattr(tag, 'abcName'):
print " [-] ABCName: %s" % tag.abcName
parser = ABCParser.ABCParser(tag.bytes)
try:
parser.parse()
except ABCdException as e:
print "[!] Parsing error: %s" % str(e)
continue
indexes += dump_graph(parser,
nodes,
edges,
args,
bodies=bodies,
classes=classes,
instances=instances)
if indexes:
new_id = len(nodes)
nodes.append({'id': new_id,
'label': os.path.basename(file_),
'color': 'purple',
'default_color': 'purple',
'level': 0})
# Create edge between this new node and all returned indexes
for index in indexes:
edges.append({'from': new_id, 'to': index})
for tag in metadata_tags:
# Create a node for metadata blobs.
md_hash = hashlib.md5(tag.xmlString).hexdigest()
if md_hash in metadata:
mid_id = metadata[md_hash]
else:
md_id = len(nodes)
metadata[md_hash] = md_id
nodes.append({'id': md_id,
'label': md_hash,
'details': tag.xmlString,
'color': 'blue',
'default_color': 'blue',
'level': 1})
edges.append({'from': new_id, 'to': md_id})
print " [-] Metadata: %s" % md_hash
for tag in binary_tags:
# Add a node for binary data blobs.
bin_hash = hashlib.md5(tag.data).hexdigest()
if bin_hash in binaries:
bin_id = binaries[bin_hash]
else:
bin_id = len(nodes)
binaries[bin_hash] = bin_id
# Include hexdump of first 512 bytes...
nodes.append({'id': bin_id,
'label': bin_hash,
'details': "Length: %s" % len(tag.data),
'color': 'pink',
'default_color': 'pink',
'dump': hexdump(tag.data[:512]),
'level': 1})
edges.append({'from': new_id, 'to': bin_id})
print " [-] Binary: %s" % bin_hash
else:
print "[!] No nodes created..."
print "[-] Nodes: %s" % len(nodes)
f = open("nodes.json", 'w')
f.write(json.dumps(nodes))
f.close()
print "[-] Edges: %s" % len(edges)
f = open("edges.json", 'w')
f.write(json.dumps(edges))
f.close()
if __name__ == '__main__':
__main__()
| StarcoderdataPython |
4896567 | <gh_stars>1-10
from scipy.signal import butter, lfilter
def butter_bandpass(lowcut, highcut, fs, order=5):
"""Returns a butterworth bandpass filter of the specified order and f range
Source: https://stackoverflow.com/questions/30659579/calculate-energy-for-each-frequency-band-around-frequency-f-of-interest-in-pytho
:param lowcut: scalar
Low pass cutoff frequency
:param highcut: scalar
High pass cutoff frequency
:param fs: float
Sampling frequency of the signal
:param order: int
The order of the butterworth filter being constructed
:return: ndarray
Numerator (b) and denominator (a) polynomials of the IIR filter.
"""
nyq = 0.5 * fs
low = lowcut / nyq
high = highcut / nyq
return butter(order, [low, high], btype='band')
def butter_bandpass_filter(data, lowcut, highcut, fs, order=5):
"""Applies a fifth-order butterworth bandpass filter to a signal in the specified frequency range
Source: https://stackoverflow.com/questions/30659579/calculate-energy-for-each-frequency-band-around-frequency-f-of-interest-in-pytho
:param data: ndarray
Signal to be filtered
:param lowcut: scalar
Low pass cutoff frequency
:param highcut: scalar
High pass cutoff frequency
:param fs: float
Sampling frequency of the signal
:param order: int
The order of the butterworth filter being constructed
:return: ndarray
Output of the digital IIR filter
"""
b, a = butter_bandpass(lowcut, highcut, fs, order=order)
y = lfilter(b, a, data)
return y
def get_energy(filtered_signal):
"""Sums the energies calculated by butter_bandpass_filter for the passed signal.
Source: https://stackoverflow.com/questions/30659579/calculate-energy-for-each-frequency-band-around-frequency-f-of-interest-in-pytho
:param filtered_signal: ndarray
The signal that has been filtered in a particular frequency band by butter_bandpass_filter
:return: float
The sum of the energies in the frequency band
"""
return sum([x*2 for x in filtered_signal])
| StarcoderdataPython |
8175947 | <reponame>eahrold/Crypt
#-*- coding: utf-8 -*-
#
# Filevault_ServerAppDelegate.py
# Filevault Server
#
# Created by <NAME> on 04/11/2012.
#
# Copyright 2012 <NAME>.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from Foundation import *
import FVUtils
from AppKit import *
class FVAppDelegate(NSObject):
def applicationWillFinishLaunching_(self, sender):
# don't show menu bar
NSMenu.setMenuBarVisible_(NO)
def applicationShouldTerminateAfterLastWindowClosed_(self, sender):
return YES
def applicationDidFinishLaunching_(self, sender):
# Prevent automatic relaunching at login on Lion
if NSApp.respondsToSelector_('disableRelaunchOnLogin'):
NSApp.disableRelaunchOnLogin()
if not FVUtils.internet_on():
NSApp.terminate_(self)
def awakeFromNib(self):
if not FVUtils.root_user():
NSApp.terminate_(self)
if FVUtils.driveIsEncrypted(self):
return True | StarcoderdataPython |
274892 | #
# MLDB-1359_procedure_latest_run.py
# Mich, 2016-02-05
# This file is part of MLDB. Copyright 2016 mldb.ai inc. All rights reserved.
#
import time
from dateutil import parser as date_parser
from mldb import mldb, MldbUnitTest, ResponseException
class ProcedureLatestRunTest(MldbUnitTest): # noqa
@classmethod
def setUpClass(cls):
ds = mldb.create_dataset({
'id' : 'ds',
'type' : 'sparse.mutable',
})
ds.record_row('row1', [['colA', 1, 1]])
ds.commit()
def test_base(self):
url = '/v1/procedures/testProc'
mldb.put(url, {
'type' : 'transform',
'params' : {
'inputData' : 'SELECT *, coco AS sanchez FROM ds',
'outputDataset' : {
'id' : 'dsOut'
},
'runOnCreation' : True
}
})
res = mldb.get(url + '/latestrun').json()
run_date = date_parser.parse(res['runStarted'])
time.sleep(0.01)
mldb.put(url + '/runs/999')
new_res = mldb.get(url + '/latestrun').json()
latest_run_date = date_parser.parse(new_res['runStarted'])
self.assertGreater(latest_run_date, run_date)
run_date = latest_run_date
time.sleep(0.01)
mldb.post(url + '/runs')
new_res = mldb.get(url + '/latestrun').json()
latest_run_date = date_parser.parse(new_res['runStarted'])
self.assertGreater(latest_run_date, run_date)
run_date = latest_run_date
time.sleep(0.01)
mldb.put(url + '/runs/000')
new_res = mldb.get(url + '/latestrun').json()
latest_run_date = date_parser.parse(new_res['runStarted'])
self.assertGreater(latest_run_date, run_date)
self.assertEqual(new_res['id'], '000')
def test_no_latest(self):
url = '/v1/procedures/testProcNoLatest'
mldb.put(url, {
'type' : 'transform',
'params' : {
'inputData' : 'SELECT *, coco AS sanchez FROM ds',
'outputDataset' : {
'id' : 'dsOut'
},
'runOnCreation' : 0
}
})
with self.assertMldbRaises(status_code=404):
mldb.get(url + '/latestrun')
def test_latest_on_unexisting_proc(self):
with self.assertMldbRaises(status_code=404):
mldb.get('/v1/procedures/unexisting/latestrun')
if __name__ == '__main__':
mldb.run_tests()
| StarcoderdataPython |
5157624 | <filename>common/wienerseries.py
import numpy as np
from scipy import signal
import math
from .utils import nexpow2
## TODO: Complete this class and unify all calculation in this class
## TODO: Add plotting functions to this class
class Wiener_class(object):
def __init__(self, gw_array, fs = None, nfft = None, nperseg = None, noverlap = None,
window = 'hann', filt_type = 'hrnr'):
"""
:param gw_array:
:param fs:
:param nfft:
:param nperseg:
:param noverlap:
:param window:
:param filt_type:
"""
try:
self.merger_name = gw_array.merger_name
except:
print("merger name is unknown")
pass
assert len(gw_array.strain) > 1, "strain array does not exist"
try:
start_time = gw_array.sample_times[0]
merger_time = gw_array.merger_time
except:
raise TypeError('start time and merger time should be defined')
self.start_time = start_time
self.merger_time = merger_time
self.strain = {}
for ifo in gw_array.strain.keys():
self.strain[ifo] = np.array(gw_array.strain[ifo])
if not fs:
try:
fs = gw_array.fs
except:
raise ValueError('sampling rate is not defined')
self.fs = fs
self.delta_t = 1/fs
try:
self.sample_times = np.array(gw_array.sample_times)
except:
print("GPS Time is not available. Switch to seconds")
self.merger_time = (self.merger_time - self.start_time) * self.fs
# self.sample_times = np.arange()
if not nperseg:
nperseg = np.fix(0.06*self.fs).astype(int)
if np.remainder(nperseg, 2) == 1:
nperseg = nperseg + 1
self.nperseg = nperseg
if not noverlap:
self.noverlap = np.fix(0.5*self.nperseg).astype(int)
self.offset = self.nperseg - self.noverlap
else:
self.noverlap = noverlap
self.offset = self.nperseg - self.noverlap
if not nfft:
nfft = max([256, nexpow2(self.nperseg)])
self.nfft = nfft
# if data_cut:
self.window = signal.get_window(window, self.nperseg)
def _get_fft(self, cut_sec = None, axis = -1, astype = None):
"""
:param cut_sec:
:param axis:
:param astype:
:return:
"""
# if not cut_sec:
strain = list(self.strain.values())
strain_psd = []
for val in strain:
_, _, wkn = signal.stft(val, self.fs, window = self.window,
nperseg= self.nperseg, noverlap= self.offset,
nfft= self.nfft)
strain_psd.append(wkn)
# if astype == 'init_noise':
# wkn = np.mean(abs(wkn), axis = axis)
return strain_psd | StarcoderdataPython |
3492920 | [
{
'date': '2011-01-01',
'description': 'Újév',
'locale': 'hu-HU',
'notes': '',
'region': '',
'type': 'NF'
},
{
'date': '2011-03-15',
'description': 'Az 1848-as forradalom ünnepe',
'locale': 'hu-HU',
'notes': '',
'region': '',
'type': 'NF'
},
{
'date': '2011-04-24',
'description': 'Húsvét',
'locale': 'hu-HU',
'notes': '',
'region': '',
'type': 'NRV'
},
{
'date': '2011-04-25',
'description': 'Húsvéthétfő',
'locale': 'hu-HU',
'notes': '',
'region': '',
'type': 'NRV'
},
{
'date': '2011-05-01',
'description': 'A munka ünnepe',
'locale': 'hu-HU',
'notes': '',
'region': '',
'type': 'NF'
},
{
'date': '2011-06-12',
'description': 'Pünkösd',
'locale': 'hu-HU',
'notes': '',
'region': '',
'type': 'NRV'
},
{
'date': '2011-06-13',
'description': 'Pünkösdhétfő',
'locale': 'hu-HU',
'notes': '',
'region': '',
'type': 'NRV'
},
{
'date': '2011-08-20',
'description': 'Az államalapítás ünnepe',
'locale': 'hu-HU',
'notes': '',
'region': '',
'type': 'NF'
},
{
'date': '2011-10-23',
'description': 'Az 1956-os forradalom ünnepe',
'locale': 'hu-HU',
'notes': '',
'region': '',
'type': 'NF'
},
{
'date': '2011-11-01',
'description': 'Mindenszentek',
'locale': 'hu-HU',
'notes': '',
'region': '',
'type': 'NRF'
},
{
'date': '2011-12-25',
'description': 'Karácsony',
'locale': 'hu-HU',
'notes': '',
'region': '',
'type': 'NRF'
},
{
'date': '2011-12-26',
'description': 'Karácsony',
'locale': 'hu-HU',
'notes': '',
'region': '',
'type': 'NRF'
}
] | StarcoderdataPython |
6507490 | import pytest
import numpy as np
import pandas as pd
from study_lib.read_data import SML2010Data
def test_data_01_exists():
# BUILD
# OPERATE
data = SML2010Data.new_data_1()
# CHECK
assert(data is not None)
def test_data_01_table():
# BUILD
# OPERATE
data = SML2010Data.new_data_1()
# CHECK
assert(len(data) > 0)
def test_data_02_exists():
# BUILD
# OPERATE
data = SML2010Data.new_data_2()
# CHECK
assert(data is not None)
def test_data_02_table():
# BUILD
# OPERATE
data = SML2010Data.new_data_2()
# CHECK
assert(len(data) > 0)
def test_data_table():
# BUILD
# OPERATE
data = SML2010Data.get_data()
# CHECK
assert(isinstance(data, pd.DataFrame))
def test_data_shape():
# BUILD
# OPERATE
data = SML2010Data.get_data()
# CHECK
assert(data.shape[0] == 4137)
assert(data.shape[1] == 24)
def test_data_colnames():
# BUILD
# OPERATE
data = SML2010Data.get_data()
# CHECK
np.testing.assert_array_equal(data.columns, SML2010Data.COLNAMES) | StarcoderdataPython |
364217 | #!/usr/bin/env python
def gini(list):
numerator = 0
denominator = 0
N = len(list)
for i in range(N):
for j in range(N):
numerator += abs(list[i] - list[j])
denominator += 2 * list[i]
return float(numerator) / denominator
debate = [31+5/60.0, 28+5/60.0, 17+56/60.0, 15+35/60.0, 9+11/60.0]
# source https://twitter.com/nytimes/status/654131249234247682
perfect_eq = [42] * 7
perfect_in = [99999] + ([0] * 999)
print "debate ", round(gini(debate), 3)
print "perfect equality ", gini(perfect_eq)
print "perfect inequality ", gini(perfect_in)
| StarcoderdataPython |
4918269 | <filename>Exercises/ex36_my_text_adventure.py
# Designing and debugging - coding up my own game
"""
To-Do:
- Make nope() exit back to previous method
"""
from sys import exit
global looked_left
looked_left = 0
global looked_right
looked_right = 0
global inquisitive
inquisitive = 0
def look_right():
print "You look further up the road."
print "You see a charming row of houses with neat gardens."
looked_right = 1
start_again()
def look_left():
print "You look back up the road."
print "You see the trail of breadcrumbs that lead you here."
looked_left = 1
start_again()
def start_again():
print looked_left
print looked_right
if looked_left == 1 and looked_right == 1:
print "You are an inquisitive one!"
print "You got Inquisitive +1!"
inquisitive = 1
print "You refocus on the door in front of you."
print "Do you open the door, look left, or look right?"
next = raw_input("> ")
if "open" in next:
hallway()
elif "left" in next:
look_left()
elif "right" in next:
look_right()
else:
nope()
def nope():
print "Nope! That's not valid. Start again to teach you a lesson!"
exit(0)
def start():
print "You are on a cul-de-sac in front of a newly built house."
print "It is a sunny day and the house has a beautiful frontage."
print "For some reason though, you don't feel quite right..."
print "Do you open the door, look left, or look right?"
next = raw_input("> ")
if "open" in next:
hallway()
elif "left" in next:
look_left()
elif "right" in next:
look_right()
else:
nope()
start() | StarcoderdataPython |
11376337 | <gh_stars>10-100
import json
import os
from datetime import datetime
from ocd_backend.items.saenredam import SaenredamItem
from . import ItemTestCase
class SaenredamItemTestCase(ItemTestCase):
def setUp(self):
super(SaenredamItemTestCase, self).setUp()
self.PWD = os.path.dirname(__file__)
self.source_definition = {
'id': 'test_definition',
'extractor': (
'ocd_backend.extractors.staticfile.'
'StaticJSONExtractor'
),
'transformer': 'ocd_backend.transformers.BaseTransformer',
'item': 'ocd_backend.items.gemeente_ede.NijmegenGrintenItem',
'loader': 'ocd_backend.loaders.ElasticsearchLoader',
'file_url': '',
'media_base_url': 'http://static.opencultuurdata.nl/utrechts_archief/saenredam/'
}
with open(os.path.abspath(os.path.join(
self.PWD, (
'../test_dumps/saenredam_item.json'))), 'r') as f:
self.raw_item = f.read()
with open(os.path.abspath(os.path.join(
self.PWD, (
'../test_dumps/saenredam_item.json'))), 'r') as f:
self.item = json.load(f)
self.collection = u'Het Utrechts Archief - Saenredam Collectie'
self.rights = u'http://creativecommons.org/publicdomain/zero/1.0/deed.nl'
self.original_object_id = u'28593'
self.original_object_urls = {
u'html': (
u'http://www.hetutrechtsarchief.nl/collectie/beeldmateriaal/'
u'tekeningen_en_prenten/1400-1410/28593')}
self.media_urls = [{
'original_url': (
u'http://static.opencultuurdata.nl/utrechts_archief/saenredam/X34-28593.jpg'),
'content_type': 'image/jpeg'}]
self.item_date = datetime(1636, 10, 15)
self.item_gran = 8
self.title = u'Oudegracht bij de Stadhuisbrug'
self.description = u'Gezicht op de Stadhuisbrug te Utrecht met links het huis Keyserrijk,'
self.authors = [u'<NAME>.']
def _instantiate_item(self):
return SaenredamItem(
self.source_definition, 'application/json',
self.raw_item, self.item)
def test_item_collection(self):
item = self._instantiate_item()
self.assertEqual(item.get_collection(), self.collection)
def test_get_rights(self):
item = self._instantiate_item()
self.assertEqual(item.get_rights(), self.rights)
def test_get_original_object_id(self):
item = self._instantiate_item()
self.assertEqual(
item.get_original_object_id(), self.original_object_id)
def test_get_original_object_urls(self):
item = self._instantiate_item()
self.assertDictEqual(
item.get_original_object_urls(), self.original_object_urls)
def test_get_combined_index_data(self):
item = self._instantiate_item()
self.assertIsInstance(item.get_combined_index_data(), dict)
def test_get_index_data(self):
item = self._instantiate_item()
self.assertIsInstance(item.get_index_data(), dict)
def test_get_all_text(self):
item = self._instantiate_item()
self.assertEqual(type(item.get_all_text()), unicode)
self.assertTrue(len(item.get_all_text()) > 0)
def test_media_urls(self):
item = self._instantiate_item()
data = item.get_combined_index_data()
self.assertEqual(data['media_urls'], self.media_urls)
def test_date_and_granularity(self):
item = self._instantiate_item()
item_gran, item_date = item._get_date_and_granularity()
self.assertEqual(item_date, self.item_date)
self.assertEqual(item_gran, self.item_gran)
def test_title(self):
item = self._instantiate_item()
data = item.get_combined_index_data()
self.assertEqual(data['title'], self.title)
def test_description(self):
item = self._instantiate_item()
data = item.get_combined_index_data()
self.assertTrue(data['description'].startswith(self.description))
def test_authors(self):
item = self._instantiate_item()
data = item.get_combined_index_data()
self.assertEqual(data['authors'], self.authors)
def test_combined_index_data_types(self):
item = self._instantiate_item()
data = item.get_combined_index_data()
for field, field_type in item.combined_index_fields.iteritems():
self.assertIn(field, data)
if data[field] is not None:
self.assertIsInstance(data[field], field_type)
| StarcoderdataPython |
3510646 | import Pro_crescendi2
print "f1 0 4096 10 1"
Pro_crescendi2.accelerando(0.5,3.7,0.4,0.01, 550)
Pro_crescendi2.accelerando(5.2,11.7,1.2,0.5, 1312)
| StarcoderdataPython |
3300878 | <reponame>sintefneodroid/vision
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = "<NAME>"
__doc__ = r"""
Created on 07/03/2020
"""
import numpy
import torchvision
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
from torchvision.models.detection.mask_rcnn import MaskRCNNPredictor
__all__ = ["bb_from_mask", "get_model_instance_segmentation"]
def bb_from_mask(hard_mask):
nz = numpy.nonzero(hard_mask)
return [numpy.min(nz[0]), numpy.min(nz[1]), numpy.max(nz[0]), numpy.max(nz[1])]
def get_model_instance_segmentation(num_classes, hidden_layer: int = 256):
"""
:param num_classes:
:type num_classes:
:return:
:rtype:
"""
# load an instance segmentation model pre-trained pre-trained on COCO
model = torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=True)
# get number of input features for the classifier
in_features = model.roi_heads.box_predictor.cls_score.in_features
# replace the pre-trained head with a new one
model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)
# now get the number of input features for the mask classifier
in_features_mask = model.roi_heads.mask_predictor.conv5_mask.in_channels
# and replace the mask predictor with a new one
model.roi_heads.mask_predictor = MaskRCNNPredictor(
in_features_mask, hidden_layer, num_classes
)
return model
| StarcoderdataPython |
11214708 | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2018-11-27 15:23
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('wdapp', '0004_auto_20181127_0500'),
]
operations = [
migrations.AddField(
model_name='business',
name='id',
field=models.AutoField(auto_created=True, default=0, primary_key=True, serialize=False, verbose_name='ID'),
preserve_default=False,
),
migrations.AddField(
model_name='businessorder',
name='id',
field=models.AutoField(auto_created=True, default=0, primary_key=True, serialize=False, verbose_name='ID'),
preserve_default=False,
),
migrations.AddField(
model_name='cargo',
name='id',
field=models.AutoField(auto_created=True, default=0, primary_key=True, serialize=False, verbose_name='ID'),
preserve_default=False,
),
migrations.AddField(
model_name='cargomanifest',
name='id',
field=models.AutoField(auto_created=True, default=0, primary_key=True, serialize=False, verbose_name='ID'),
preserve_default=False,
),
migrations.AddField(
model_name='company',
name='id',
field=models.AutoField(auto_created=True, default=0, primary_key=True, serialize=False, verbose_name='ID'),
preserve_default=False,
),
migrations.AddField(
model_name='driver',
name='id',
field=models.AutoField(auto_created=True, default=0, primary_key=True, serialize=False, verbose_name='ID'),
preserve_default=False,
),
migrations.AddField(
model_name='driverexpense',
name='id',
field=models.AutoField(auto_created=True, default=0, primary_key=True, serialize=False, verbose_name='ID'),
preserve_default=False,
),
migrations.AddField(
model_name='stop',
name='id',
field=models.AutoField(auto_created=True, default=0, primary_key=True, serialize=False, verbose_name='ID'),
preserve_default=False,
),
migrations.AddField(
model_name='trip',
name='id',
field=models.AutoField(auto_created=True, default=0, primary_key=True, serialize=False, verbose_name='ID'),
preserve_default=False,
),
migrations.AlterField(
model_name='business',
name='business_id',
field=models.CharField(max_length=25, null='True'),
),
migrations.AlterField(
model_name='businessorder',
name='order_id',
field=models.CharField(max_length=25, null=True),
),
migrations.AlterField(
model_name='cargo',
name='cargo_id',
field=models.CharField(blank='True', max_length=25, null='True'),
),
migrations.AlterField(
model_name='cargomanifest',
name='manifest_id',
field=models.IntegerField(null=True),
),
migrations.AlterField(
model_name='company',
name='company_id',
field=models.CharField(max_length=9, null='True'),
),
migrations.AlterField(
model_name='driver',
name='driver_id',
field=models.CharField(max_length=25, null='True'),
),
migrations.AlterField(
model_name='driverexpense',
name='expenses_id',
field=models.CharField(max_length=25, null=True),
),
migrations.AlterField(
model_name='stop',
name='stop_id',
field=models.CharField(blank='True', max_length=25, null='True'),
),
migrations.AlterField(
model_name='trip',
name='trip_id',
field=models.IntegerField(null=True),
),
]
| StarcoderdataPython |
271426 | import asyncio
import logging
import sys
import traceback
from asyncio import AbstractEventLoop
from typing import Optional, List, Dict
from dacite import from_dict
from TikTokLive.client.http import TikTokHTTPClient
from TikTokLive.client.proxy import ProxyContainer
from TikTokLive.types import AlreadyConnecting, AlreadyConnected, LiveNotFound, FailedConnection, ExtendedGift, InvalidSessionId, ChatMessageSendFailure, ChatMessageRepeat, FailedFetchRoomInfo, FailedFetchGifts, \
FailedRoomPolling
from TikTokLive.utils import validate_and_normalize_unique_id, get_room_id_from_main_page_html
class BaseClient:
"""
Base client responsible for long polling to the TikTok Webcast API
"""
def __init__(
self,
unique_id: str,
loop: Optional[AbstractEventLoop] = None,
client_params: Optional[dict] = None,
headers: Optional[dict] = None,
timeout_ms: Optional[int] = None,
polling_interval_ms: int = 1000,
process_initial_data: bool = True,
fetch_room_info_on_connect: bool = True,
enable_extended_gift_info: bool = True,
trust_env: bool = False,
proxy_container: Optional[ProxyContainer] = None,
lang: Optional[str] = "en-US"
):
"""
Initialize the base client
:param unique_id: The unique id of the creator to connect to
:param loop: Optionally supply your own asyncio loop
:param client_params: Additional client parameters to include when making requests to the Webcast API
:param headers: Additional headers to include when making requests to the Webcast API
:param timeout_ms: The timeout (in ms) for requests made to the Webcast API
:param polling_interval_ms: The interval between requests made to the Webcast API
:param process_initial_data: Whether to process the initial data (including cached chats)
:param fetch_room_info_on_connect: Whether to fetch room info (check if everything is kosher) on connect
:param enable_extended_gift_info: Whether to retrieve extended gift info including its icon & other important things
:param trust_env: Whether to trust environment variables that provide proxies to be used in aiohttp requests
:param proxy_container: A proxy container that allows you to submit an unlimited # of proxies for rotation
:param lang: Change the language. Payloads *will* be in English, but this will change stuff like the extended_gift Gift attribute to the desired language!
"""
# Get Event Loop
if isinstance(loop, AbstractEventLoop):
self.loop: AbstractEventLoop = loop
else:
try:
self.loop: AbstractEventLoop = asyncio.get_event_loop()
except RuntimeError:
self.loop: AbstractEventLoop = asyncio.get_running_loop()
# Private Attributes
self.__unique_id: str = validate_and_normalize_unique_id(unique_id)
self.__discard_extra_events: Optional[bool] = None
self.__room_info: Optional[dict] = None
self.__available_gifts: Dict[int, ExtendedGift] = dict()
self.__room_id: Optional[str] = None
self._viewer_count: Optional[int] = None
self.__connecting: bool = False
self.__connected: bool = False
self.__session_id: Optional[str] = None
# Change Language
TikTokHTTPClient.DEFAULT_CLIENT_PARAMS["app_language"] = lang
TikTokHTTPClient.DEFAULT_CLIENT_PARAMS["webcast_language"] = lang
# Protected Attributes
self._client_params: dict = {**TikTokHTTPClient.DEFAULT_CLIENT_PARAMS, **(client_params if isinstance(client_params, dict) else dict())}
self._http: TikTokHTTPClient = TikTokHTTPClient(headers if headers is not None else dict(), timeout_ms=timeout_ms, proxy_container=proxy_container, trust_env=trust_env)
self._polling_interval_ms: int = polling_interval_ms
self._process_initial_data: bool = process_initial_data
self._fetch_room_info_on_connect: bool = fetch_room_info_on_connect
self._enable_extended_gift_info: bool = enable_extended_gift_info
async def _on_error(self, original: Exception, append: Optional[Exception]) -> None:
"""
Send errors to the _on_error handler for handling, appends a custom exception
:param original: The original Python exception
:param append: The specific exception
:return: None
"""
raise NotImplementedError()
async def __fetch_room_id(self) -> Optional[str]:
"""
Fetch room ID of a given user
:return: Their Room ID
:raises: asyncio.TimeoutError
"""
try:
html: str = await self._http.get_livestream_page_html(self.__unique_id)
self.__room_id = get_room_id_from_main_page_html(html)
self._client_params["room_id"] = self.__room_id
return self.__room_id
except Exception as ex:
await self._on_error(ex, FailedFetchRoomInfo("Failed to fetch room id from WebCast, see stacktrace for more info."))
return None
async def __fetch_room_info(self) -> Optional[dict]:
"""
Fetch room information from Webcast API
:return: Room info dict
"""
try:
response = await self._http.get_json_object_from_webcast_api("room/info/", self._client_params)
self.__room_info = response
return self.__room_info
except Exception as ex:
await self._on_error(ex, FailedFetchRoomInfo("Failed to fetch room info from WebCast, see stacktrace for more info."))
return None
async def __fetch_available_gifts(self) -> Optional[Dict[int, ExtendedGift]]:
"""
Fetch available gifts from Webcast API
:return: Gift info dict
"""
try:
response = await self._http.get_json_object_from_webcast_api("gift/list/", self._client_params)
gifts: Optional[List] = response.get("gifts")
if isinstance(gifts, list):
for gift in gifts:
try:
_gift: ExtendedGift = from_dict(ExtendedGift, gift)
self.__available_gifts[_gift.id] = _gift
except:
logging.error(traceback.format_exc() + "\nFailed to parse gift's extra info")
return self.__available_gifts
except Exception as ex:
await self._on_error(ex, FailedFetchGifts("Failed to fetch gift data from WebCast, see stacktrace for more info."))
return None
async def __fetch_room_polling(self) -> None:
"""
Main loop containing polling for the client
:return: None
"""
self.__is_polling_enabled = True
polling_interval: int = int(self._polling_interval_ms / 1000)
while self.__is_polling_enabled:
try:
await self.__fetch_room_data()
except Exception as ex:
await self._on_error(ex, FailedRoomPolling("Failed to retrieve events from WebCast, see stacktrace for more info."))
await asyncio.sleep(polling_interval)
async def __fetch_room_data(self, is_initial: bool = False) -> None:
"""
Fetch room data from the Webcast API and deserialize it
:param is_initial: Is it the initial request to the API
:return: None
"""
webcast_response = await self._http.get_deserialized_object_from_webcast_api("im/fetch/", self._client_params, "WebcastResponse")
_last_cursor, _next_cursor = self._client_params["cursor"], webcast_response.get("cursor")
self._client_params["cursor"] = _last_cursor if _next_cursor == "0" else _next_cursor
if is_initial and not self._process_initial_data:
return
await self._handle_webcast_messages(webcast_response)
async def _handle_webcast_messages(self, webcast_response) -> None:
"""
Handle the parsing of webcast messages, meant to be overridden by superclass
"""
raise NotImplementedError
async def _connect(self) -> str:
"""
Connect to the Websocket API
:return: The room ID, if connection is successful
"""
if self.__connecting:
raise AlreadyConnecting()
if self.__connected:
raise AlreadyConnected()
self.__connecting = True
try:
await self.__fetch_room_id()
# Fetch room info when connecting
if self._fetch_room_info_on_connect:
await self.__fetch_room_info()
# If offline
if self.__room_info.get("status", 4) == 4:
raise LiveNotFound()
# Get extended gift info
if self._enable_extended_gift_info:
await self.__fetch_available_gifts()
# Make initial request to Webcast Messaging
await self.__fetch_room_data(True)
self.__connected = True
# Use request polling (Websockets not implemented)
self.loop.create_task(self.__fetch_room_polling())
return self.__room_id
except Exception as ex:
message: str
tb: str = traceback.format_exc()
if "SSLCertVerificationError" in tb:
message = (
"Your certificates might be out of date! Navigate to your base interpreter's "
"directory and click on (execute) \"Install Certificates.command\".\nThis package is reading the interpreter path as "
f"{sys.executable}, but if you are using a venv please navigate to your >> base << interpreter."
)
else:
message = str(ex)
self.__connecting = False
await self._on_error(ex, FailedConnection(message))
def _disconnect(self) -> None:
"""
Set unconnected status
:return: None
"""
self.__is_polling_enabled = False
self.__room_info: Optional[dict] = None
self.__connecting: Optional[bool] = False
self.__connected: Optional[bool] = False
self._client_params["cursor"]: str = ""
async def stop(self) -> None:
"""
Stop the client
:return: None
"""
if self.__connected:
self._disconnect()
return
async def start(self, session_id: Optional[str] = None) -> Optional[str]:
"""
Start the client without blocking the main thread
:return: Room ID that was connected to
"""
self.__set_session_id(session_id)
return await self._connect()
def run(self, session_id: Optional[str] = None) -> None:
"""
Run client while blocking main thread
:return: None
"""
self.__set_session_id(session_id)
self.loop.run_until_complete(self._connect())
self.loop.run_forever()
def __set_session_id(self, session_id: Optional[str]) -> None:
"""
Set the Session ID for authenticated requests
:param session_id: New session ID
:return: None
"""
if session_id:
self.__session_id = session_id
self._http.cookies["sessionid"] = session_id
async def send_message(self, text: str, session_id: Optional[str] = None) -> Optional[str]:
"""
Send a message to the TikTok Live Chat
:param text: The message you want to send to the chat
:param session_id: The Session ID (If you've already supplied one, you don't need to)
:return: None
"""
self.__set_session_id(session_id)
if not self.__session_id:
raise InvalidSessionId("Missing Session ID. Please provide your current Session ID to use this feature.")
params: dict = {**self._client_params, "content": text}
response: dict = await self._http.post_json_to_webcast_api("room/chat/", params, None)
status_code: Optional[int] = response.get("status_code")
data: Optional[dict] = response.get("data")
if status_code == 0:
return data
try:
raise {
20003: InvalidSessionId("Your Session ID has expired. Please provide a new one"),
50007: ChatMessageRepeat("You cannot send repeated chat messages!")
}.get(
status_code, ChatMessageSendFailure(f"TikTok responded with status code {status_code}: {data.get('message')}")
)
except Exception as ex:
await self._on_error(ex, None)
async def retrieve_room_info(self) -> Optional[dict]:
"""
Method to retrieve room information
:return: Dictionary containing all room info
"""
# If not connected yet, get their room id
if not self.__connected:
await self.__fetch_room_id()
# Fetch their info & return it
return await self.__fetch_room_info()
async def retrieve_available_gifts(self) -> Optional[Dict[int, ExtendedGift]]:
"""
Retrieve available gifts from Webcast API
:return: None
"""
return await self.__fetch_available_gifts()
async def set_proxies_enabled(self, enabled: bool) -> None:
"""
Set whether to use proxies in requests
:param enabled: Whether proxies are enabled or not
:return: None
"""
self._http.proxy_container.set_enabled(enabled)
async def add_proxies(self, *proxies: str) -> None:
"""
Add proxies to the proxy container for request usage
:param proxies: Proxies for usage
:return: None
"""
for proxy in proxies:
self._http.proxy_container.proxies.append(proxy)
async def remove_proxies(self, *proxies: str) -> None:
"""
Remove proxies from the proxy container for request usage
:param proxies: Proxies to remove
:raises ValueError: Raises ValueError if proxy is not present
:return: None
"""
for proxy in proxies:
self._http.proxy_container.proxies.remove(proxy)
async def get_proxies(self) -> List[str]:
"""
Get a list of the current proxies in the proxy container being used for requests
:return: The proxies in the request container
"""
return self._http.proxy_container.proxies
@property
def viewer_count(self) -> Optional[int]:
"""
Return viewer count of user
:return: Viewer count
"""
return self._viewer_count
@property
def room_id(self) -> Optional[int]:
"""
Room ID if the connection was successful
:return: Room's ID
"""
return self.__room_id
@property
def room_info(self) -> Optional[dict]:
"""
Room info dict if the connection was successful
:return: Room Info Dict
"""
return self.__room_info
@property
def unique_id(self) -> str:
"""
Unique ID of the streamer
:return: Their unique ID
"""
return self.__unique_id
@property
def connected(self) -> bool:
"""
Whether the client is connected
:return: Result
"""
return self.__connected
@property
def available_gifts(self) -> Dict[int, ExtendedGift]:
"""
Available gift information for live room
:return: Gift info
"""
return self.__available_gifts
| StarcoderdataPython |
3292546 | """
Filename: plot_interhemispheric_heat_difference.py
Author: <NAME>, <EMAIL>
Description: Plot ensemble interhemispheric heat difference timeseries for OHC, hfds and rndt
"""
# Import general Python modules
import sys, os, pdb
import argparse
import numpy
import pandas
import iris
import matplotlib.pyplot as plt
import seaborn
seaborn.set_context('talk')
# Import my modules
cwd = os.getcwd()
repo_dir = '/'
for directory in cwd.split('/')[1:]:
repo_dir = os.path.join(repo_dir, directory)
if directory == 'ocean-analysis':
break
modules_dir = os.path.join(repo_dir, 'modules')
sys.path.append(modules_dir)
try:
import general_io as gio
import convenient_universal as uconv
except ImportError:
raise ImportError('Must run this script from anywhere within the ocean-analysis git repo')
# Define functions
names = {'OHC': 'ocean heat content',
'OHU': 'Downward Heat Flux at Sea Water Surface',
'netTOA': 'TOA Incoming Net Radiation'}
columns = ['model', 'mip',
'netTOA, historical', 'netTOA, historicalAA', 'netTOA, historicalGHG',
'OHU, historical', 'OHU, historicalAA', 'OHU, historicalGHG',
'OHC, historical', 'OHC, historicalAA', 'OHC, historicalGHG']
def calc_anomaly(cube):
"""Calculate the anomaly."""
anomaly = cube.copy()
anomaly.data = anomaly.data - anomaly.data[0]
return anomaly
def get_simulation_attributes(cube):
"""Get model. experiment and mip information."""
model = cube.attributes['model_id']
experiment = cube.attributes['experiment_id']
physics = cube.attributes['physics_version']
run = cube.attributes['realization']
mip = 'r%si1p%s' %(run, physics)
if experiment == 'historicalMisc':
experiment = 'historicalAA'
return model, experiment, mip
def generate_data_dict(diff, model, experiment, mip, var):
"""Generate dict that will form a row of a pandas dataframe."""
data_dict = {'model': model, 'mip': mip}
for column_label in columns[2:]:
data_label = var + ', ' + experiment
if data_label == column_label:
data_dict[column_label] = diff
else:
data_dict[column_label] = numpy.nan
return data_dict
def calc_interhemispheric_diff(nh_file, sh_file, var, time_constraint):
"""Calculate the interhemispheric difference timeseries."""
nh_name = names[var] + ' nh sum'
nh_cube = iris.load_cube(nh_file, nh_name & time_constraint)
nh_attributes = get_simulation_attributes(nh_cube)
nh_anomaly = calc_anomaly(nh_cube)
sh_name = names[var] + ' sh sum'
sh_cube = iris.load_cube(sh_file, sh_name & time_constraint)
sh_attributes = get_simulation_attributes(sh_cube)
sh_anomaly = calc_anomaly(sh_cube)
assert nh_attributes == sh_attributes
model, experiment, mip = nh_attributes
diff = nh_anomaly.data[-1] - sh_anomaly.data[-1]
return diff, model, experiment, mip
def main(inargs):
"""Run the program."""
time_constraint = gio.get_time_constraint(inargs.time)
#metadata_dict = {}
fig, ax = plt.subplots()
plt.axvline(x=0, color='0.5', linestyle='--')
data_list = []
for nh_file, sh_file in inargs.rndt_files:
diff, model, experiment, mip = calc_interhemispheric_diff(nh_file, sh_file, 'netTOA', time_constraint)
data_list.append(generate_data_dict(diff, model, experiment, mip, 'netTOA'))
for nh_file, sh_file in inargs.hfds_files:
diff, model, experiment, mip = calc_interhemispheric_diff(nh_file, sh_file, 'OHU', time_constraint)
data_list.append(generate_data_dict(diff, model, experiment, mip, 'OHU'))
for nh_file, sh_file in inargs.ohc_files:
diff, model, experiment, mip = calc_interhemispheric_diff(nh_file, sh_file, 'OHC', time_constraint)
data_list.append(generate_data_dict(diff, model, experiment, mip, 'OHC'))
data_df = pandas.DataFrame(data_list)
seaborn.boxplot(data=data_df[columns], orient="h", palette=['red', '#FFDDDD', '#FFDDDD', 'yellow', '#fdffdd', '#fdffdd', 'blue', '#ddddff', '#ddddff'])
plt.ticklabel_format(style='sci', axis='x', scilimits=(0,0), useMathText=True)
ax.xaxis.major.formatter._useMathText = True
ax.set_xlabel('Northern Hemisphere minus Southern Hemisphere (Joules)')
plt.title('Interhemispheric difference in accumulated heat, 1861-2005')
plt.savefig(inargs.outfile, bbox_inches='tight')
gio.write_metadata(inargs.outfile)
if __name__ == '__main__':
extra_info ="""
author:
<NAME>, <EMAIL>
"""
description = 'Plot ensemble interhemispheric heat difference boxplot for OHC, hfds and rndt'
parser = argparse.ArgumentParser(description=description,
epilog=extra_info,
argument_default=argparse.SUPPRESS,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("outfile", type=str, help="output file")
parser.add_argument("--rndt_files", type=str, nargs=2, action='append',
help="NH and SH integrated netTOA file, in that order (dedrifted)")
parser.add_argument("--hfds_files", type=str, nargs=2, action='append',
help="NH and SH integrated hfds file, in that order (dedrifted)")
parser.add_argument("--ohc_files", type=str, nargs=2, action='append',
help="NH and SH OHC file, in that order (dedrifted)")
parser.add_argument("--time", type=str, nargs=2, metavar=('START_DATE', 'END_DATE'),
default=['1861-01-01', '2005-12-31'], help="Time bounds")
args = parser.parse_args()
main(args)
| StarcoderdataPython |
11203423 | import torch.utils.data as data
from PIL import Image
import os
import os.path
import numpy as np
import glob
IMG_EXTENSIONS = [
'.jpg', '.JPG', '.jpeg', '.JPEG',
'.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP',
]
def is_image_file(filename):
return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
def dataloader(filepath):
l0_train = []
l1_train = []
flow_train = []
for flow_map in sorted(glob.glob('%s/flow/*.pfm'%filepath)):
img1 = flow_map.replace('flow','t0').replace('.pfm','.png')
img2 = flow_map.replace('flow','t1').replace('.pfm','.png')
l0_train.append(img1)
l1_train.append(img2)
flow_train.append(flow_map)
return l0_train, l1_train, flow_train
| StarcoderdataPython |
9751572 | <filename>src/utils/boneyard/sim_score_v2.py
import os
import pickle
import yaml
import json
import pandas as pd
from difflib import SequenceMatcher
import numpy as np
from numpy import dot
from numpy.linalg import norm
base_dir = os.getcwd().replace('src','')
cfg = yaml.full_load(open(base_dir + "/config.yml", 'r'))
paths = cfg['PATHS']
def simscore(gen_tokens, run_id, step_count):
def get_score(gen_tokens, n, run_id):
gen_tokens = gen_tokens.split(' ')
gen_tokens = [' '.join(gen_tokens[i * n:(i + 1) * n]) for i in range((len(gen_tokens) + n - 1) // n )]
try:
os.makedirs(os.path.join(base_dir, cfg['PATHS']['SIM_SCORE_TOKENS'], run_id))
except:
pass
try:
with open(os.path.join(base_dir, cfg['PATHS']['SIM_SCORE_TOKENS'], run_id, 'corpus_tokn-{}'.format(n)), "rb") as fp:
corpus = pickle.load(fp)
except:
with open(os.path.join(base_dir, cfg['PATHS']['CORPUS'])) as f:
corpus = f.read()
corpus = corpus.split(' ')
corpus = [' '.join(corpus[i * n:(i + 1) * n]) for i in range((len(corpus) + n - 1) // n )]
#Save to pickle for faster loading later
with open(os.path.join(base_dir, cfg['PATHS']['SIM_SCORE_TOKENS'], run_id, 'corpus_tokn-{}'.format(n)), "wb") as fp:
pickle.dump(corpus, fp)
def sscore(a, b):
return SequenceMatcher(None, a, b).ratio()
scores = []
for ele in gen_tokens:
s = max([sscore(ele, x) for x in corpus])
scores.append(s)
return np.mean(scores)
try:
scores = [get_score(gen_tokens, n, run_id) for n in cfg['TRAIN_PARAMS']['SIM_SCORES']]
except:
scores = get_score(gen_tokens, cfg['TRAIN_PARAMS']['SIM_SCORES'], run_id)
columns = cfg['TRAIN_PARAMS']['SIM_SCORES']
df = pd.DataFrame()
if step_count is not None:
df['Steps'] = [step_count]
df = df.drop(df[df.Steps >= [step_count]].index) #If training from "existing", delete existing printed steps to mitigate a confusing graph
for i in range(len(columns)):
df[str(columns[i])] = [scores[i]]
if not os.path.isfile(os.path.join(base_dir, cfg['PATHS']['CSV'], run_id,'simscores.csv')):
df.to_csv(os.path.join(base_dir, cfg['PATHS']['CSV'], run_id, 'simscores.csv'), index=False)
else:
df.to_csv(os.path.join(base_dir, cfg['PATHS']['CSV'], run_id, 'simscores.csv'), mode='a', header=False, index=False)
def cos_score(gentext):
def chunks(lst, n):
return [lst[i:i + n] for i in range(0, len(lst), n)]
def cos_sim(a,b):
return dot(a, b)/(norm(a)*norm(b))
with open(os.path.join(base_dir, paths['CORPUS_TOKENS']), "rb") as fp:
corpus = pickle.load(fp)
corpus = [item.tolist() for sublist in corpus for item in sublist]
corpus = chunks(corpus, len(gentext))
return max([cos_sim(gentext, x) for x in corpus if len(x) == len(gentext)]) | StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.