id stringlengths 2 8 | text stringlengths 16 264k | dataset_id stringclasses 1 value |
|---|---|---|
24180 | import copy
import logging
import warnings
from kolibri.plugins.registry import registered_plugins
logger = logging.getLogger(__name__)
def __validate_config_option(
section, name, base_config_spec, plugin_specs, module_path
):
# Raise an error if someone tries to overwrite a base option
# except for the default value.
if section in base_config_spec:
if name in base_config_spec[section]:
raise ValueError("Cannot overwrite a core Kolibri options spec option")
# Warn if a plugin tries to add an option that another plugin has already added
if section in plugin_specs:
if name in plugin_specs[section]:
warnings.warn(
"{plugin} set an option {option} in section {section} but {plugins} had already set it".format(
plugin=module_path,
plugins=", ".join(plugin_specs[section][name]),
option=name,
section=section,
)
)
plugin_specs[section][name].append(module_path)
else:
# If not create the list for this option name
# to track this and future modifications
plugin_specs[section][name] = [module_path]
else:
# If not create the dict for the section
# and the list for this option name
plugin_specs[section] = {name: [module_path]}
def __process_config_spec(
option_spec, base_config_spec, plugin_specs, module_path, final_spec
):
for section, opts in option_spec.items():
for name, attrs in opts.items():
__validate_config_option(
section, name, base_config_spec, plugin_specs, module_path
)
if section not in final_spec:
final_spec[section] = {}
final_spec[section][name] = attrs
def __validate_option_default(section, name, plugin_default_overrides, module_path):
# Warn if a plugin tries to add an option that another plugin has already added
if section in plugin_default_overrides:
if name in plugin_default_overrides[section]:
warnings.warn(
"{plugin} set an option default {option} in section {section} but {plugins} had already set it".format(
plugin=module_path,
plugins=", ".join(plugin_default_overrides[section][name]),
option=name,
section=section,
)
)
plugin_default_overrides[section][name].append(module_path)
else:
# If not create the list for this option name
# to track this and future modifications
plugin_default_overrides[section][name] = [module_path]
else:
# If not create the dict for the section
# and the list for this option name
plugin_default_overrides[section] = {name: [module_path]}
def __process_option_defaults(
option_defaults, base_config_spec, plugin_default_overrides, module_path, final_spec
):
for section, opts in option_defaults.items():
for name, default in opts.items():
__validate_option_default(
section, name, plugin_default_overrides, module_path
)
if section not in final_spec:
logger.error(
"Tried to set a new default in section {}, but this is not a valid section".format(
section
)
)
continue
if name in final_spec[section]:
# This is valid, so set a default
# Note that we do not validation here for now,
# so it is up to the user to ensure the default value
# is kosher.
final_spec[section][name]["default"] = default
else:
logger.error(
"Tried to set a new default in section {}, for option {} but this is not a valid option".format(
section, name
)
)
def extend_config_spec(base_config_spec):
plugin_specs = {}
final_spec = copy.deepcopy(base_config_spec)
# First process options config spec additions
for plugin_instance in registered_plugins:
plugin_options = plugin_instance.options_module
if plugin_options and hasattr(plugin_options, "option_spec"):
module_path = plugin_instance.module_path
option_spec = plugin_options.option_spec
__process_config_spec(
option_spec, base_config_spec, plugin_specs, module_path, final_spec
)
# Now process default value overrides, do this second in order to allow plugins
# to override default values for other plugins!
plugin_default_overrides = {}
for plugin_instance in registered_plugins:
plugin_options = plugin_instance.option_defaults_module
if plugin_options and hasattr(plugin_options, "option_defaults"):
module_path = plugin_instance.module_path
option_defaults = plugin_options.option_defaults
__process_option_defaults(
option_defaults,
base_config_spec,
plugin_default_overrides,
module_path,
final_spec,
)
return final_spec
| StarcoderdataPython |
9778707 | #! /usr/bin/python
FontSize=20
Color="Black"
LineWeight=3
svgWidth=400
svgHeight=400
def svgHeader():
textout = '<svg xmlns="http://www.w3.org/2000/svg" version="1.1"'
textout += ' width="' + str(svgWidth)
textout += '" height="' +str(svgHeight)
textout += '">\n'
return textout
def svgFooter():
return '</svg>'
def svgLine(x1=0,x2=0,y1=400,y2=400):
textout = '<g stroke="' + Color + '">\n'
textout += '<line x1="' + str(x1) \
+ '" y1="' + str(y1) \
+ '" x2="' + str(x2) \
+ '" y2="' + str(y2) \
+ '" stroke-width="' +str(LineWeight)+ '"/>\n'
textout += '</g>'
return textout
def svgText(x=200,y=200,inString="TEST"):
textout = '<text x="'
textout += str(x)
textout += '" y="'
textout += str(y)
textout += '" font-family="Verdana" font-size="'
textout += str(FontSize)
textout += '">\n'
textout += inString
textout += "\n</text>\n"
return textout
def main():
#Test Image to default place:
OFile = open("out.svg",'w')
OFile.write(svgHeader())
OFile.write(svgLine(200,400,200,200))
OFile.write(svgText(200,200,"TESTING"))
OFile.write(svgFooter())
OFile.close()
if __name__ == "__main__":
main() | StarcoderdataPython |
1745672 | <gh_stars>0
#!/usr/bin/env python3
class Animal:
name = ""
category = ""
def __init__(self, name):
self.name = name
def set_category(self, category):
self.category = category
class Turtle(Animal):
category = "reptile"
class Snake(Animal):
category = "reptile"
class Zoo:
def __init__(self):
self.current_animals = {}
def add_animal(self, animal):
self.current_animals[animal.name] = animal.category
def total_of_category(self, category):
result = 0
for animal in self.current_animals.values():
if animal == category:
result += 1
return result
zoo = Zoo()
print(Turtle.category)
turtle = Turtle("Turtle") #create an instance of the Turtle class
snake = Snake("Snake") #create an instance of the Snake class
zoo.add_animal(turtle)
zoo.add_animal(snake)
print(zoo.total_of_category("reptile")) #how many zoo animal types in the reptile category
| StarcoderdataPython |
8092893 | <reponame>knrdk/SoccerPlayersCrawler
import codecs
class CsvWriter:
def __init__(self, fileName):
self.fileName = fileName
self.separator = ","
def __enter__(self):
self.file = codecs.open(self.fileName, 'w', 'utf-8')
return self
def add(self, *args):
row = self.separator.join(args) + '\n'
self.file.write(row)
self.file.flush()
def __exit__(self, exc_type, exc_value, traceback):
self.file.close()
| StarcoderdataPython |
8043391 | <reponame>seberg/scipy
"""sparsetools - a collection of routines for sparse matrix operations
"""
from csr import *
from csc import *
from coo import *
from dia import *
from bsr import *
from csgraph import *
| StarcoderdataPython |
8163780 | # https://www.codewars.com/kata/554b4ac871d6813a03000035/train/python
# In this little assignment you are given a string of space separated numbers,
# and have to return the highest and lowest number.
def highAndLow(numbers):
arr = numbers.split()
lowest = int(arr[0])
highest = int(arr[0])
if len(arr) == 1:
return f"{arr[0]} {arr[0]}"
if (len(arr) == 2) and arr[0] == arr[1]:
return f"{arr[0]} {arr[0]}"
for e in arr:
number = int(e)
if number < lowest:
lowest = number
if number > highest:
highest = number
return f"{highest} {lowest}"
# Alternative:
# def high_and_low(numbers):
# numbers = [int(x) for x in numbers.split(" ")]
# return str(max(numbers)) + " " + str(min(numbers))
| StarcoderdataPython |
11225645 | <gh_stars>100-1000
import pytest
import logbook
from logbook.utils import (
logged_if_slow, deprecated, forget_deprecation_locations,
suppressed_deprecations, log_deprecation_message)
from time import sleep
_THRESHOLD = 0.1
try:
from unittest.mock import Mock, call
except ImportError:
from mock import Mock, call
def test_logged_if_slow_reached(test_handler):
with test_handler.applicationbound():
with logged_if_slow('checking...', threshold=_THRESHOLD):
sleep(2 * _THRESHOLD)
assert len(test_handler.records) == 1
[record] = test_handler.records
assert record.message == 'checking...'
def test_logged_if_slow_did_not_reached(test_handler):
with test_handler.applicationbound():
with logged_if_slow('checking...', threshold=_THRESHOLD):
sleep(_THRESHOLD / 2)
assert len(test_handler.records) == 0
def test_logged_if_slow_logger():
logger = Mock()
with logged_if_slow('checking...', threshold=_THRESHOLD, logger=logger):
sleep(2 * _THRESHOLD)
assert logger.log.call_args == call(logbook.DEBUG, 'checking...')
def test_logged_if_slow_level(test_handler):
with test_handler.applicationbound():
with logged_if_slow('checking...', threshold=_THRESHOLD,
level=logbook.WARNING):
sleep(2 * _THRESHOLD)
assert test_handler.records[0].level == logbook.WARNING
def test_logged_if_slow_deprecated(logger, test_handler):
with test_handler.applicationbound():
with logged_if_slow('checking...', threshold=_THRESHOLD,
func=logbook.error):
sleep(2 * _THRESHOLD)
assert test_handler.records[0].level == logbook.ERROR
assert test_handler.records[0].message == 'checking...'
with pytest.raises(TypeError):
logged_if_slow('checking...', logger=logger, func=logger.error)
def test_deprecated_func_called(capture):
assert deprecated_func(1, 2) == 3
def test_deprecation_message(capture):
deprecated_func(1, 2)
[record] = capture.records
assert "deprecated" in record.message
assert 'deprecated_func' in record.message
def test_deprecation_with_message(capture):
@deprecated("use something else instead")
def func(a, b):
return a + b
func(1, 2)
[record] = capture.records
assert "use something else instead" in record.message
assert "func is deprecated" in record.message
def test_no_deprecations(capture):
@deprecated('msg')
def func(a, b):
return a + b
with suppressed_deprecations():
assert func(1, 2) == 3
assert not capture.records
def _no_decorator(func):
return func
@pytest.mark.parametrize('decorator', [_no_decorator, classmethod])
def test_class_deprecation(capture, decorator):
class Bla(object):
@deprecated('reason')
@classmethod
def func(self, a, b):
assert isinstance(self, Bla)
return a + b
assert Bla().func(2, 4) == 6
[record] = capture.records
assert 'Bla.func is deprecated' in record.message
def test_deprecations_different_sources(capture):
def f():
deprecated_func(1, 2)
def g():
deprecated_func(1, 2)
f()
g()
assert len(capture.records) == 2
def test_deprecations_same_sources(capture):
def f():
deprecated_func(1, 2)
f()
f()
assert len(capture.records) == 1
def test_deprecation_message_different_sources(capture):
def f(flag):
if flag:
log_deprecation_message('first message type')
else:
log_deprecation_message('second message type')
f(True)
f(False)
assert len(capture.records) == 2
def test_deprecation_message_same_sources(capture):
def f(flag):
if flag:
log_deprecation_message('first message type')
else:
log_deprecation_message('second message type')
f(True)
f(True)
assert len(capture.records) == 1
def test_deprecation_message_full_warning(capture):
def f():
log_deprecation_message('some_message')
f()
[record] = capture.records
assert record.message == 'Deprecation message: some_message'
def test_name_doc():
@deprecated
def some_func():
"""docstring here"""
pass
assert some_func.__name__ == 'some_func'
assert 'docstring here' in some_func.__doc__
def test_doc_update():
@deprecated('some_message')
def some_func():
"""docstring here"""
pass
some_func.__doc__ = 'new_docstring'
assert 'docstring here' not in some_func.__doc__
assert 'new_docstring' in some_func.__doc__
assert 'some_message' in some_func.__doc__
def test_deprecatd_docstring():
message = "Use something else instead"
@deprecated()
def some_func():
"""This is a function
"""
@deprecated(message)
def other_func():
"""This is another function
"""
assert ".. deprecated" in some_func.__doc__
assert ".. deprecated\n {0}".format(message) in other_func.__doc__
@pytest.fixture
def capture(request):
handler = logbook.TestHandler(level=logbook.WARNING)
handler.push_application()
@request.addfinalizer
def pop():
handler.pop_application()
return handler
@deprecated
def deprecated_func(a, b):
return a + b
@pytest.fixture(autouse=True)
def forget_locations():
forget_deprecation_locations()
| StarcoderdataPython |
1649472 | #!/usr/bin/env python3
import math
def split_float(x):
xf, xi = math.modf(float(x))
return int(xi), xf
if __name__ == "__main__":
import sys
for line in sys.stdin:
xy = line.strip().split(' ')
x = split_float(xy[0])
y = split_float(xy[1])
print('{0[0]} {1[0]} {0[1]} {1[1]}'.format(x, y))
| StarcoderdataPython |
1674053 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from contextlib import contextmanager
from copy import deepcopy
from functools import partial
from typing import Callable, Generator
import pytest
import torch
import torch.distributed
import torch.multiprocessing as mp
import torch.nn.functional
from torch import nn
from torch.nn.parallel.distributed import DistributedDataParallel
from torch.utils.data import DataLoader
from torch.utils.data.distributed import DistributedSampler
from pytorch_lightning.lite import LightningLite
from pytorch_lightning.plugins.environments.lightning_environment import find_free_network_port
from pytorch_lightning.plugins.training_type.ddp_spawn import DDPSpawnPlugin
from pytorch_lightning.utilities.apply_func import apply_to_collection, move_data_to_device
from pytorch_lightning.utilities.cloud_io import atomic_save
from tests.helpers.boring_model import RandomDataset
from tests.helpers.runif import RunIf
class BoringModel(nn.Module):
def __init__(self):
super().__init__()
self.layer = torch.nn.Linear(32, 2, bias=False)
def forward(self, x):
x = self.layer(x)
return torch.nn.functional.mse_loss(x, torch.ones_like(x))
def configure_optimizers(module: nn.Module):
return torch.optim.SGD(module.parameters(), lr=0.0001)
def main(
move_to_device: Callable,
model: nn.Module,
train_dataloader: DataLoader,
num_epochs: int = 10,
):
model = move_to_device(model)
optimizer = configure_optimizers(model)
for _ in range(num_epochs):
model.train()
for batch in train_dataloader:
batch = move_to_device(batch)
optimizer.zero_grad()
loss = model(batch)
loss.backward()
optimizer.step()
return model.state_dict()
class LiteRunner(LightningLite):
def run(self, model: nn.Module, train_dataloader: DataLoader, num_epochs: int = 10, tmpdir: str = None):
optimizer = configure_optimizers(model)
model, optimizer = self.setup(model, optimizer)
train_dataloader = self.setup_dataloaders(train_dataloader)
model.train()
for _ in range(num_epochs):
for batch in train_dataloader:
batch = self.to_device(batch)
optimizer.zero_grad()
loss = model(batch)
self.backward(loss)
optimizer.step()
if isinstance(self._strategy, DDPSpawnPlugin) and tmpdir and self.global_rank == 0:
checkpoint_path = os.path.join(tmpdir, "model.pt")
atomic_save(model.state_dict(), checkpoint_path)
return checkpoint_path
@contextmanager
def precision_context(precision, accelerator) -> Generator[None, None, None]:
if precision == 32:
yield
return
if accelerator == "gpu":
with torch.cuda.amp.autocast():
yield
elif accelerator == "cpu":
with torch.cpu.amp.autocast():
yield
@pytest.mark.parametrize(
"precision, strategy, devices, accelerator",
[
pytest.param(32, None, 1, "cpu"),
pytest.param(32, None, 1, "gpu", marks=RunIf(min_gpus=1)),
pytest.param(16, None, 1, "gpu", marks=RunIf(min_gpus=1)),
pytest.param("bf16", None, 1, "gpu", marks=RunIf(min_torch="1.10", min_gpus=1)),
],
)
def test_boring_lite_model_single_device(precision, strategy, devices, accelerator, tmpdir):
LightningLite.seed_everything(42)
train_dataloader = DataLoader(RandomDataset(32, 8))
model = BoringModel()
num_epochs = 1
state_dict = deepcopy(model.state_dict())
lite = LiteRunner(precision=precision, strategy=strategy, devices=devices, accelerator=accelerator)
lite.run(model, train_dataloader, num_epochs=num_epochs)
lite_state_dict = model.state_dict()
with precision_context(precision, accelerator):
model.load_state_dict(state_dict)
pure_state_dict = main(lite.to_device, model, train_dataloader, num_epochs=num_epochs)
state_dict = apply_to_collection(state_dict, torch.Tensor, lite.to_device)
for w_pure, w_lite in zip(state_dict.values(), lite_state_dict.values()):
assert not torch.equal(w_pure, w_lite)
for w_pure, w_lite in zip(pure_state_dict.values(), lite_state_dict.values()):
assert torch.equal(w_pure, w_lite)
def run(rank, model, train_dataloader, num_epochs, precision, accelerator, tmpdir):
os.environ["LOCAL_RANK"] = str(rank)
if torch.distributed.is_available() and not torch.distributed.is_initialized():
torch.distributed.init_process_group("gloo", rank=rank, world_size=2)
to_device = partial(move_data_to_device, device=torch.device("cuda", rank))
model = DistributedDataParallel(
to_device(model),
device_ids=[rank],
)
train_dataloader = DataLoader(
train_dataloader.dataset,
sampler=DistributedSampler(train_dataloader.dataset, rank=rank, num_replicas=2, seed=42, drop_last=False),
)
with precision_context(precision, accelerator):
main(to_device, model, train_dataloader, num_epochs=num_epochs)
if rank == 0:
atomic_save(model.state_dict(), os.path.join(tmpdir, "model_spawn.pt"))
@pytest.mark.skipif(True, reason="Skipping as it takes 80 seconds.")
@RunIf(min_gpus=2)
@pytest.mark.parametrize(
"precision, strategy, devices, accelerator",
[
(32, "ddp_spawn", 2, "gpu"),
],
)
def test_boring_lite_model_ddp_spawn(precision, strategy, devices, accelerator, tmpdir):
LightningLite.seed_everything(42)
train_dataloader = DataLoader(RandomDataset(32, 8))
model = BoringModel()
num_epochs = 1
state_dict = deepcopy(model.state_dict())
lite = LiteRunner(precision=precision, strategy=strategy, devices=devices, accelerator=accelerator)
checkpoint_path = lite.run(model, train_dataloader, num_epochs=num_epochs, tmpdir=tmpdir)
spawn_model_state_dict = torch.load(checkpoint_path)
for w_pure, w_lite in zip(state_dict.values(), spawn_model_state_dict.values()):
assert not torch.equal(w_pure.cpu(), w_lite.cpu())
model.load_state_dict(state_dict)
os.environ["MASTER_ADDR"] = "127.0.0.1"
os.environ["MASTER_PORT"] = str(find_free_network_port())
mp.spawn(run, args=(model, train_dataloader, num_epochs, precision, accelerator, tmpdir), nprocs=2)
spawn_pure_model_state_dict = torch.load(os.path.join(tmpdir, "model_spawn.pt"))
for w_pure, w_lite in zip(spawn_pure_model_state_dict.values(), spawn_model_state_dict.values()):
assert torch.equal(w_pure.cpu(), w_lite.cpu())
@RunIf(min_gpus=2, standalone=True)
@pytest.mark.parametrize(
"precision, strategy, devices, accelerator",
[
(32, "ddp", 2, "gpu"),
],
)
def test_boring_lite_model_ddp(precision, strategy, devices, accelerator, tmpdir):
LightningLite.seed_everything(42)
train_dataloader = DataLoader(RandomDataset(32, 4))
model = BoringModel()
num_epochs = 1
state_dict = deepcopy(model.state_dict())
lite = LiteRunner(precision=precision, strategy=strategy, devices=devices, accelerator=accelerator)
lite.run(model, train_dataloader, num_epochs=num_epochs, tmpdir=tmpdir)
lite_model_state_dict = model.state_dict()
for w_pure, w_lite in zip(state_dict.values(), lite_model_state_dict.values()):
assert not torch.equal(w_pure.cpu(), w_lite.cpu())
LightningLite.seed_everything(42)
train_dataloader = DataLoader(RandomDataset(32, 4))
model = BoringModel()
run(lite.global_rank, model, train_dataloader, num_epochs, precision, accelerator, tmpdir)
pure_model_state_dict = model.state_dict()
for w_pure, w_lite in zip(pure_model_state_dict.values(), lite_model_state_dict.values()):
assert torch.equal(w_pure.cpu(), w_lite.cpu())
| StarcoderdataPython |
9619276 | from __future__ import unicode_literals
import unittest
import io
import jshlib
class TestLoadJsh(unittest.TestCase):
def test_nums(self):
s = "42 31 2.343"
result = list(jshlib.load_json_iter(s))
expected = [42, 31, 2.343]
assert expected == result
def test_strs(self):
data = '"hi" "b o"\n"LU"'
s = io.StringIO(data)
result = list(jshlib.load_json_iter(s))
expected = ["hi", "b o", 'LU']
assert expected == result
def test_escape(self):
data = r'"hi\"U"'
s = io.StringIO(data)
result = list(jshlib.load_json_iter(s))
expected = ['hi"U']
assert expected == result
def test_list(self):
data = r'''
[1,2,3]
["yo", "bob"]
'''
s = io.StringIO(data)
result = list(jshlib.load_json_iter(s))
expected = [
[1, 2, 3],
["yo", "bob"],
]
assert expected == result
def test_list_embedded(self):
data = r'''
[1,2,[42,3]]
["yo", ["yo", "yo"]]
'''
s = io.StringIO(data)
result = list(jshlib.load_json_iter(s))
expected = [
[1, 2, [42, 3]],
["yo", ["yo", "yo"]],
]
assert expected == result
def test_object(self):
data = r'''
{
"one": 1,
"string": "foo",
"bool": true,
"list": [1,2,3]
}
'''
s = io.StringIO(data)
result = list(jshlib.load_json_iter(s))
expected = [
{
"one": 1,
"string": "foo",
"bool": True,
"list": [1, 2, 3],
},
]
assert expected == result
| StarcoderdataPython |
5023210 | """
Definition of TreeNode:
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
"""
class Solution:
"""
@param root: the root of the tree
@return: the total sum of all root-to-leaf numbers
"""
def sumNumbers(self, root):
# write your code here
return self._dfs(root, 0)
def _dfs(self, root, total):
if root is None:
return 0
total = total * 10 + root.val
if root.left is None and root.right is None:
return total
return self._dfs(root.left, total) + self._dfs(root.right, total)
| StarcoderdataPython |
12825141 | from model.storage import *
from model.disk import *
from model.blueprint import *
from utils.dbconn import *
import os
from pkg.azure import sas
from asyncio.subprocess import PIPE, STDOUT
import asyncio
from pathlib import Path
from utils.logger import *
async def download_worker(osdisk_raw,project,host):
con = create_db_con()
account_name = Storage.objects(project=project)[0]['storage']
container_name = Storage.objects(project=project)[0]['container']
access_key = Storage.objects(project=project)[0]['access_key']
sas_token = sas.generate_sas_token(account_name,access_key)
pipe_result = ''
file_size = '0'
try:
cur_path = os.getcwd()
path = cur_path+"/osdisks/"+osdisk_raw
if not os.path.exists(path):
os.popen('echo "download started"> ./logs/ansible/migration_log.txt')
url = "https://" + account_name + ".blob.core.windows.net/" + container_name + "/" + osdisk_raw + "?" + sas_token
command1 = "azcopy copy --recursive '" + url + "' '"+path+"'"
os.popen('echo '+command1+'>> ./logs/ansible/migration_log.txt')
process1 = await asyncio.create_subprocess_shell(command1, stdin = PIPE, stdout = PIPE, stderr = STDOUT)
await process1.wait()
BluePrint.objects(project=project,host=host).update(status='32')
except Exception as e:
print(repr(e))
logger(str(e),"warning")
finally:
con.close()
async def upload_worker(osdisk_raw,project,host):
con = create_db_con()
account_name = Storage.objects(project=project)[0]['storage']
container_name = Storage.objects(project=project)[0]['container']
access_key = Storage.objects(project=project)[0]['access_key']
sas_token = sas.generate_sas_token(account_name,access_key)
pipe_result = ''
file_size = '0'
try:
osdisk_vhd = osdisk_raw.replace(".raw.000",".vhd")
cur_path = os.getcwd()
path = cur_path+"/osdisks/"+osdisk_raw
vhd_path = cur_path+"/osdisks/"+osdisk_vhd
file_size = Path(vhd_path).stat().st_size
os.popen('echo "Filesize calculated" >> ./logs/ansible/migration_log.txt')
os.popen('echo "VHD uploading" >> ./logs/ansible/migration_log.txt')
url = "https://" + account_name + ".blob.core.windows.net/" + container_name + "/" + osdisk_vhd + "?" + sas_token
command3 = "azcopy copy --recursive '"+vhd_path + "' '" + url + "'"
process3 = await asyncio.create_subprocess_shell(command3, stdin = PIPE, stdout = PIPE, stderr = STDOUT)
await process3.wait()
os.popen('echo "VHD uploaded" >> ./logs/ansible/migration_log.txt')
BluePrint.objects(project=project,host=host).update(status='36')
Disk.objects(host=host,project=project).update_one(vhd=osdisk_vhd, file_size=str(file_size), upsert=True)
except Exception as e:
print(repr(e))
logger(str(e),"warning")
os.popen('echo "'+repr(e)+'" >> ./logs/ansible/migration_log.txt')
finally:
con.close()
async def conversion_worker(osdisk_raw,project,host):
con = create_db_con()
account_name = Storage.objects(project=project)[0]['storage']
container_name = Storage.objects(project=project)[0]['container']
access_key = Storage.objects(project=project)[0]['access_key']
sas_token = sas.generate_sas_token(account_name,access_key)
pipe_result = ''
try:
osdisk_vhd = osdisk_raw.replace(".raw.000",".vhd")
cur_path = os.getcwd()
path = cur_path+"/osdisks/"+osdisk_raw
vhd_path = cur_path+"/osdisks/"+osdisk_vhd
print("Start converting")
print(path)
os.popen('echo "start converting">> ./logs/ansible/migration_log.txt')
command2 = "qemu-img convert -f raw -o subformat=fixed -O vpc "+path+" "+vhd_path
process2 = await asyncio.create_subprocess_shell(command2, stdin = PIPE, stdout = PIPE, stderr = STDOUT)
await process2.wait()
BluePrint.objects(project=project,host=host).update(status='34')
os.popen('echo "Conversion completed" >> ./logs/ansible/migration_log.txt')
except Exception as e:
print(str(e))
logger(str(e),"warning")
file_size = '0'
finally:
con.close()
| StarcoderdataPython |
8073422 | import fasttext
import pandas as pd
model = fasttext.load_model("tuned-30h.bin")
print("model is loaded")
df = pd.read_csv("data/allrepos_processed_textonly.csv")
df['title_processed'] = df['title_processed'].astype(str)
df['body_processed']= df['body_processed'].astype(str)
df['txt'] = df['title_processed'] + " " + df['body_processed']
l = list(df['txt'])
print("data is loaded")
prediction = model.predict(l, k=3)
print("prediction is completed")
labels, probabilities = prediction[0], prediction[1]
myzip = list(zip(labels, probabilities))
probsDict = list(map(lambda x: dict(zip(x[0],x[1])), myzip))
probsDf = pd.DataFrame(probsDict)
probsDf.to_csv("data/allrepos_ftprobs.csv", index=False)
print("finished")
| StarcoderdataPython |
315758 | from .pyDoodle2Web import PyDoodle2Web | StarcoderdataPython |
1801382 | import logging
from datetime import timedelta
from typing import List
from sqlalchemy.sql import and_, func
from couchers.db import session_scope
from couchers.models import (
Notification,
NotificationDelivery,
NotificationDeliveryType,
NotificationPreference,
NotificationTopicAction,
User,
)
from couchers.sql import couchers_select as select
from couchers.tasks import send_digest_email, send_notification_email
logger = logging.getLogger(__name__)
def get_notification_preference(
session, user_id: int, topic_action: NotificationTopicAction
) -> List[NotificationDeliveryType]:
"""
Gets the user's preference from the DB or otherwise falls back to defaults
Must be done in session scope
Returns list of delivery types
"""
overrides = {
res.delivery_type: res.deliver
for res in session.execute(
select(NotificationPreference)
.where(NotificationPreference.id == user_id)
.where(NotificationPreference.topic_action == topic_action)
)
.scalars()
.all()
}
return [dt for dt in NotificationDeliveryType if overrides.get(dt, dt in topic_action.defaults)]
def handle_notification(notification_id):
with session_scope() as session:
notification = session.execute(select(Notification).where(Notification.id == notification_id)).scalar_one()
# ignore this notification if the user hasn't enabled new notifications
user = session.execute(select(User).where(User.id == notification.user_id)).scalar_one()
if not user.new_notifications_enabled:
logger.info(f"Skipping notification for {user} due to new notifications disabled")
return
topic, action = notification.topic_action.unpack()
logger.info(notification)
delivery_types = get_notification_preference(session, notification.user.id, notification.topic_action)
for delivery_type in delivery_types:
logger.info(f"Should notify by {delivery_type}")
if delivery_type == NotificationDeliveryType.email:
# for emails we don't deliver straight up, wait until the email background worker gets around to it and handles deduplication
session.add(
NotificationDelivery(
notification_id=notification.id,
delivered=None,
delivery_type=NotificationDeliveryType.email,
)
)
elif delivery_type == NotificationDeliveryType.push:
# for push notifications, we send them straight away
session.add(
NotificationDelivery(
notification_id=notification.id,
delivered=func.now(),
delivery_type=NotificationDeliveryType.push,
)
)
# todo
logger.info("Supposed to send push notification")
def handle_email_notifications():
"""
Sends out emails for notifications
"""
logger.info(f"Sending out email notifications")
with session_scope() as session:
# delivered email notifications: we don't want to send emails for these
subquery = (
select(Notification.user_id, Notification.topic_action, Notification.key)
.join(NotificationDelivery, NotificationDelivery.notification_id == Notification.id)
.where(NotificationDelivery.delivery_type == NotificationDeliveryType.email)
.where(NotificationDelivery.delivered != None)
.where(Notification.created > func.now() - timedelta(hours=24))
.group_by(Notification.user_id, Notification.topic_action, Notification.key)
.subquery()
)
email_notifications_to_send = session.execute(
(
select(
User,
Notification.topic_action,
Notification.key,
func.min(Notification.id).label("notification_id"),
func.min(NotificationDelivery.id).label("notification_delivery_id"),
)
.join(User, User.id == Notification.user_id)
.join(NotificationDelivery, NotificationDelivery.notification_id == Notification.id)
.where(NotificationDelivery.delivery_type == NotificationDeliveryType.email)
.where(Notification.created > func.now() - timedelta(hours=1))
.group_by(User, Notification.user_id, Notification.topic_action, Notification.key)
# pick the notifications that haven't been delivered
.outerjoin(
subquery,
and_(
and_(
subquery.c.user_id == Notification.user_id,
subquery.c.topic_action == Notification.topic_action,
),
subquery.c.key == Notification.key,
),
)
.where(subquery.c.key == None)
)
).all()
for user, topic_action, key, notification_id, notification_delivery_id in email_notifications_to_send:
topic, action = topic_action.unpack()
logger.info(f"Sending notification id {notification_id} to {user.id} ({topic}/{action}/{key})")
notification_delivery = session.execute(
select(NotificationDelivery).where(NotificationDelivery.id == notification_delivery_id)
).scalar_one()
assert notification_delivery.delivery_type == NotificationDeliveryType.email
assert not notification_delivery.delivered
assert notification_delivery.notification_id == notification_id
send_notification_email(notification_delivery.notification)
notification_delivery.delivered = func.now()
session.commit()
def handle_email_digests():
"""
Sends out email digests
"""
logger.info(f"Sending out email digests")
with session_scope() as session:
users_to_send_digests_to = session.execute(
(
select(User)
.where(User.last_digest_sent < func.now() - timedelta(hours=24))
# todo: tz
.join(Notification, Notification.user_id == User.id)
.join(NotificationDelivery, NotificationDelivery.notification_id == Notification.id)
.where(NotificationDelivery.delivery_type == NotificationDeliveryType.digest)
.where(NotificationDelivery.delivered == None)
.group_by(User)
)
).all()
for user in users_to_send_digests_to:
# already sent notifications
subquery = (
select(
Notification.id.label("notification_id"),
func.min(NotificationDelivery.id).label("notification_delivery_id"),
)
.join(NotificationDelivery, NotificationDelivery.notification_id == Notification.id)
.where(NotificationDelivery.delivered == True)
.where(Notification.user_id == user.id)
.group_by(Notification)
.subquery()
)
# notifications that haven't been delivered in any way yet
notifications_and_deliveries = session.execute(
(
select(Notification, NotificationDelivery)
.join(NotificationDelivery, NotificationDelivery.notification_id == Notification.id)
.where(NotificationDelivery.delivery_type == NotificationDeliveryType.digest)
.outerjoin(subquery, subquery.c.notification_id == Notification.id)
.where(subquery.c.notification_delivery_id == None)
.where(Notification.user_id == user.id)
.order_by(Notification.created)
)
).all()
if notifications_and_deliveries:
notifications, deliveries = zip(*notifications_and_deliveries)
logger.info(f"Sending {user.id=} a digest with {len(notifications)} notifications")
send_digest_email(notifications)
for delivery in deliveries:
delivery.delivered = func.now()
user.last_digest_sent = func.now()
session.commit()
| StarcoderdataPython |
3541683 | /usr/lib/python3.8/random.py | StarcoderdataPython |
8146283 | from django.conf import settings
from django.urls import include, path
from django.conf.urls.static import static
from django.contrib import admin
import django_cas_ng.views as cas_views
urlpatterns = [
path('service/', include("app.urls")),
path('accounts/login/', cas_views.LoginView.as_view(), name='cas_ng_login'),
path('accounts/logout/', cas_views.LogoutView.as_view(), name='cas_ng_logout'),
path(
'accounts/callback/',
cas_views.CallbackView.as_view(),
name='cas_ng_proxy_callback'
),
path('admin/logout/', cas_views.LogoutView.as_view(), name='cas_ng_logout'),
path(settings.ADMIN_URL, admin.site.urls),
# Your stuff: custom urls includes go here
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| StarcoderdataPython |
8169835 | <gh_stars>0
import torch.nn as nn
from mmcv.cnn import ConvModule
class DepthwiseSeparableConvModule(nn.Module):
"""Depthwise separable convolution module.
See https://arxiv.org/pdf/1704.04861.pdf for details.
This module can replace a ConvModule with the conv block replaced by two
conv block: depthwise conv block and pointwise conv block. The depthwise
conv block contains depthwise-conv/norm/activation layers. The pointwise
conv block contains pointwise-conv/norm/activation layers. It should be
noted that there will be norm/activation layer in the depthwise conv block
if ``norm_cfg`` and ``act_cfg`` are specified.
Args:
in_channels (int): Same as nn.Conv2d.
out_channels (int): Same as nn.Conv2d.
kernel_size (int or tuple[int]): Same as nn.Conv2d.
stride (int or tuple[int]): Same as nn.Conv2d. Default: 1.
padding (int or tuple[int]): Same as nn.Conv2d. Default: 0.
dilation (int or tuple[int]): Same as nn.Conv2d. Default: 1.
norm_cfg (dict): Default norm config for both depthwise ConvModule and
pointwise ConvModule. Default: None.
act_cfg (dict): Default activation config for both depthwise ConvModule
and pointwise ConvModule. Default: dict(type='ReLU').
dw_norm_cfg (dict): Norm config of depthwise ConvModule. If it is
'default', it will be the same as ``norm_cfg``. Default: 'default'.
dw_act_cfg (dict): Activation config of depthwise ConvModule. If it is
'default', it will be the same as ``act_cfg``. Default: 'default'.
pw_norm_cfg (dict): Norm config of pointwise ConvModule. If it is
'default', it will be the same as `norm_cfg`. Default: 'default'.
pw_act_cfg (dict): Activation config of pointwise ConvModule. If it is
'default', it will be the same as ``act_cfg``. Default: 'default'.
kwargs (optional): Other shared arguments for depthwise and pointwise
ConvModule. See ConvModule for ref.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
norm_cfg=None,
act_cfg=dict(type='ReLU'),
dw_norm_cfg='default',
dw_act_cfg='default',
pw_norm_cfg='default',
pw_act_cfg='default',
**kwargs):
super(DepthwiseSeparableConvModule, self).__init__()
assert 'groups' not in kwargs, 'groups should not be specified'
# if norm/activation config of depthwise/pointwise ConvModule is not
# specified, use default config.
dw_norm_cfg = dw_norm_cfg if dw_norm_cfg != 'default' else norm_cfg
dw_act_cfg = dw_act_cfg if dw_act_cfg != 'default' else act_cfg
pw_norm_cfg = pw_norm_cfg if pw_norm_cfg != 'default' else norm_cfg
pw_act_cfg = pw_act_cfg if pw_act_cfg != 'default' else act_cfg
# depthwise convolution
self.depthwise_conv = ConvModule(
in_channels,
in_channels,
kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=in_channels,
norm_cfg=dw_norm_cfg,
act_cfg=dw_act_cfg,
**kwargs)
self.pointwise_conv = ConvModule(
in_channels,
out_channels,
1,
norm_cfg=pw_norm_cfg,
act_cfg=pw_act_cfg,
**kwargs)
def forward(self, x):
"""Forward function.
Args:
x (Tensor): Input tensor with shape (N, C, H, W).
Returns:
Tensor: Output tensor.
"""
x = self.depthwise_conv(x)
x = self.pointwise_conv(x)
return x
| StarcoderdataPython |
3264430 | <filename>edkrepo/common/workspace_maintenance/workspace_maintenance.py
#!/usr/bin/env python3
#
## @file
# workspace_maintenance.py
#
# Copyright (c) 2017- 2020, Intel Corporation. All rights reserved.<BR>
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
''' Contains shared workspace maintenance functions. '''
import os
import unicodedata
from edkrepo.common.edkrepo_exception import EdkrepoFoundMultipleException, EdkrepoNotFoundException
from edkrepo.common.humble import GEN_A_NOT_IN_B, GEN_FOUND_MULT_A_IN_B
def generate_name_for_obsolete_backup(absolute_path):
if not os.path.exists(absolute_path):
raise ValueError("{} does not exist".format(absolute_path))
original_name = os.path.basename(absolute_path)
dir_name = os.path.dirname(absolute_path)
unique_name = ""
unique_name_found = False
index = 1
while not unique_name_found:
if index == 1:
unique_name = "{}_old".format(original_name)
else:
unique_name = "{}_old{}".format(original_name, index)
if not os.path.exists(os.path.join(dir_name, unique_name)):
unique_name_found = True
index += 1
return unique_name
def case_insensitive_equal(str1, str2):
return unicodedata.normalize("NFKD", str1.casefold()) == unicodedata.normalize("NFKD", str2.casefold())
def case_insensitive_single_match(str1, str_list):
matches = [x for x in str_list if case_insensitive_equal(str1, x)]
if len(matches) == 0:
raise EdkrepoNotFoundException(GEN_A_NOT_IN_B.format(str1, str_list))
elif len(matches) > 1:
raise EdkrepoFoundMultipleException(GEN_FOUND_MULT_A_IN_B.format(str1, str_list))
return matches[0]
| StarcoderdataPython |
5069240 | '''
update on GameBoard, specialized for shame and obedience element
'''
from ShameAndObedienceElement import *
from math import sqrt
# assignElementsToRegion needs to go from False -> "assignMode"
# add variable to this method : assign_elements_to_region
class GameBoard:
"""
description:
-
arguments:
- languageInfo := int|list(`languages`)
- dimensions := (int,int)
- maxNumLanguages := int, maximum languages supported
- assignElementsToRegion := False|((t/e|fit)::assignmentMode, int::(assignFrequency))
return:
-
"""
def __init__(self, languageInfo, dimensions, maxNumLanguages = 52, assignElementsToRegion = False):
self.dimensions = dimensions
self.area = self.dimensions[0] * self.dimensions[1]
self.wordCoordinates = {} ## these are not used for Shame And Obedience
self.centroidCoordinates = None ## these are not used for Shame And Obedience
self.config, self.configAreaDiff = None, None
self.set_languages(languageInfo, maxNumLanguages = maxNumLanguages)
self.get_element_stats()
self.assignElementsToRegion = assignElementsToRegion
self.assign_elements(self.assignElementsToRegion)
self.roundNumber = 0
# TODO : test this.
'''
- color is 3-tuple, each value is 0-255
'''
@staticmethod
def generate_colors(numColors):
assert numColors > 0, "invalid numColors {}".format(numColors)
# uses a color-distance division scheme based on numColors
# default first is black
c = (0,0,0)
colors = [c]
if numColors == 1: return colors
maxDistancePerIndex = 255
hopPerIndex = maxDistancePerIndex // (numColors - 1)
numColors -= 1
while numColors > 0:
c = (c[0] + hopPerIndex, c[1] + hopPerIndex, c[2] + hopPerIndex)
colors.append(c)
numColors -= 1
return colors
"""
description:
- calculates area of gameboard
return:
- float
"""
def get_area(self):
return self.dimensions[0] * self.dimensions[1]
# TODO : test
'''
description:
- declares ShameAndObedienceElement instances using `languageInfo`
arguments:
- languageInfo := int|list(`languages`)
'''
def set_languages(self, languageInfo, maxNumLanguages):
if type(languageInfo) is int:
assert languageInfo <= maxNumLanguages, "cannot operate on more than {} elements".format(maxNumLanguages)
languages = LanguageMaker.get_languages(n = languageInfo, minSizeInfo = 100, startSizeInfo = 5, mode = "geq")
elif type(languageInfo) is list: # list of languages
assert len(languageInfo) <= maxNumLanguages, "cannot operate on more than {} elements".format(maxNumLanguages)
languages = languageInfo
else:
raise IOError("invalid languageInfo {}".format(languageInfo))
# make colors for languages
colors = GameBoard.generate_colors(len(languages))
self.elements = {}
i = 0
for l, c in zip(languages, colors):
s = ShameAndObedienceElement(i, l, c)
self.elements[i] = s
i += 1
def get_element_stats(self):
self.elementLanguageCount = self.get_element_stats_on_language()
self.elementLanguageRatio = self.get_element_stats_on_language_by_ratio(self.elementLanguageCount)
# TODO : test this
"""
description:
- calculates a dictionary of key element id and value their language stat
with prohibition
return:
- dict(int : int)
"""
def get_element_stats_on_language(self):
d = {}
for k, v in self.elements.items():
d[k] = v.get_language_stats_with_prohibition()
return d
# TODO : test this
"""
description:
- averages values by sum from output of method above
arguments:
- q := None|dict::(element stats literal counts)
return:
- dict(int : float)
"""
def get_element_stats_on_language_by_ratio(self, q = None):
if q == None:
q = self.get_element_stats_on_language()
s = sum(q.values())
if s != 0:
return {k : v/s for k,v in q.items()}
return False
################# START : methods below used to calculate the element-to-region assignment
"""
description:
- calculates the wanted square region for element based on ratio
arguments:
- r := 0 <= x <= 1
return:
- float::area
"""
def get_element_dim_from_ratio(self, r):
assert r >= 0 and r <= 1, "invalid r {}".format(r)
elemArea = r * self.area
return round(sqrt(elemArea), 4)
"""
description:
- assigns elements to region after alreading setting element stats. `elementRatioScale` determines the proportion of the gameboard the elements
occupy, 1 means area of all elements equals gameboard, 0 means none.
arguments:
- requiredFit := int::(> 0), number of times to attempt fitting elements to gameboard
- elementRatioScale := 0 <= float <= 1
return:
- `configInfo`, `areaDiff`
"""
def assign_elements_to_region_(self, requiredFit, elementRatioScale):
assert elementRatioScale >= 0 and elementRatioScale <= 1, "elementRatioScale {} invalid".format(elementRatioScale)
# get element dimensions
elementDim = self.get_element_dimensions(elementRatioScale = elementRatioScale)
config, areaDiff = GameBoardHandler.get_best_config_by_random_inspection(elementDim,\
self.dimensions, numRandomPoints = 10, cutOff = "auto")
if requiredFit == 0:
return config, areaDiff
else:
return self.assign_elements_to_region_(requiredFit -1, elementRatioScale)
"""
description:
- assigns elements to region depending on some mode,
"""
def assign_elements_to_region_by_fit(self, requiredFit = 0, elementRatioScale = 1):
q = self.assign_elements_to_region_(requiredFit, elementRatioScale)
if q == False: return False
self.config, self.configAreaDiff = q[0],q[1]
self.assign_to_elements_helper()
"""
description:
- assigns elements to region using some either fit-assignment or random assignment
arguments:
- assignElementsToRegion := False|((t/e|fit)::assignmentMode, int::(assignFrequency))
"""
# TODO : add arguments here
def assign_elements(self, assignElementsToRegion):
if assignElementsToRegion is False:
return
assert assignElementsToRegion[0] in {"t/e", "fit"} and\
type(assignElementsToRegion[1]) is int and len(assignElementsToRegion) == 2,\
"invalid assignElementsToRegion {}".format(assignElementsToRegion)
if assignElementsToRegion[0] == "fit":
self.assign_elements_to_region_by_fit() # gets best fit
else:
self.assign_elements_to_region_by_trial_and_error() #
"""
description:
- calculates element dimensions from its ratio of game language.
arguments:
~
return:
- list::((`id`, `dim`:(int,int)))
"""
def get_element_dimensions(self, elementRatioScale = 1):
# get element dimensions
elementDim = []
for k, v in self.elementLanguageRatio.items():
q = self.get_element_dim_from_ratio(v)
q = elementRatioScale * q
elementDim.append((k,(q,q)))
return elementDim
"""
description:
~
arguments:
~
"""
def assign_elements_to_region_by_trial_and_error(self):
ei = self.get_element_dimensions()
self.config, self.configAreaDiff =\
GameBoardHandler.get_best_config_by_trial_and_error(ei, self.dimensions, numTrials = 10)
self.assign_to_elements_helper()
"""
description:
- assigns locations from `self.config` to `self.elements`
"""
# TODO : test this
def assign_to_elements_helper(self):
for c in self.config:
idn = c[0]
reg = c[2]
self.elements[idn].set_location(reg)
################# END : methods below used to calculate the element-to-region assignment | StarcoderdataPython |
11396838 | from flask import Flask, render_template, request, send_file
from flask_uploads import UploadSet, configure_uploads, IMAGES
import images as image_mgr
app = Flask(__name__)
photos = UploadSet('photos', IMAGES)
app.config['UPLOADED_PHOTOS_DEST'] = 'original_images'
configure_uploads(app, photos)
@app.route('/upload', methods=['POST', 'GET'])
def upload():
if request.method == 'POST' and 'photo' in request.files:
photos.save(request.files['photo'])
@app.route('/get_image', methods=['GET'])
def get_image():
image_mgr.resize_save('original_images\\' + request.args.get('name'), int(request.args.get('size')), 'tempimage.png')
return send_file('tempimage.jpeg', mimetype='image/*')
if(__name__ == '__main__'):
app.run(debug=False) | StarcoderdataPython |
294297 | """
"""
# Enter your code here. Read input from STDIN. Print output to STDOUT
for _ in range(int(raw_input())):
n = int(raw_input())
print str(bin(n))[2:]
| StarcoderdataPython |
12848200 | <reponame>wangleon/gamse
import os
import re
import sys
import shutil
import logging
logger = logging.getLogger(__name__)
import configparser
import numpy as np
import astropy.io.fits as fits
import matplotlib.pyplot as plt
import matplotlib.ticker as tck
from ..utils.obslog import read_obslog
from ..utils.misc import write_system_info
from . import common
from . import (feros, foces, hds, hires, levy, lhrs, sarg, xinglong216hrs)
instrument_lst = [
('foces', 'Fraunhofer', 'FOCES'),
('xinglong216hrs', 'Xinglong216', 'HRS'),
('hires', 'Keck-I', 'HIRES'),
('levy', 'APF', 'Levy'),
('hds', 'Subaru', 'HDS'),
('lhrs', 'LAMOST', 'HRS'),
('feros', 'MPG/ESO-2.2m', 'FEROS'),
]
def reduce_echelle():
"""Automatically select the instrument and reduce echelle spectra
accordingly.
Available instruments include:
* *FOCES*: FOCES on 2m Fraunhofer Telescope in Wendelstein Observatory,
Germany.
* *Xinglong216HRS*: HRS on 2.16m telescope in Xinglong Station, China.
"""
log_filename = 'gamse.log'
# initialize running log
log_fmt = ' '.join(['*',
'%(asctime)s.%(msecs)03d',
'[%(levelname)s]',
'%(name)s - %(lineno)d - %(funcName)s():'+os.linesep,
' %(message)s'+os.linesep+'-'*80,
])
# check if there's already an existing log file
if os.path.exists(log_filename):
# if logfile already exists, rename it with its creation time
time_str = None
file1 = open(log_filename)
for row in file1:
# find the first time string in the contents
mobj = re.search('(\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2})', row)
if mobj:
time_str = mobj.group()
break
file1.close()
if time_str is None:
# time string not found
# rename it to gamse.DDDD.log, where DDD is an increasing number
i = 1
while(True):
newfilename = 'gamse.{}.log'.format(i)
if os.path.exists(newfilename):
i += 1
continue
else:
break
else:
# time string is found, rename it to gamse.YYYY-MM-DDTHH-MM-SS.log
time_str = time_str.replace(':', '-')
newfilename = 'gamse.{}.log'.format(time_str)
# rename the existing gamse.log file
shutil.move(log_filename, newfilename)
# load config file in current directory
config_file_lst = [fname for fname in os.listdir(os.curdir)
if fname.endswith('.cfg')]
config = configparser.ConfigParser(
inline_comment_prefixes = (';','#'),
interpolation = configparser.ExtendedInterpolation(),
)
config.read(config_file_lst)
# the level of running log depends on the mode in the config
mode = config['reduce']['mode']
if mode == 'normal':
level = logging.INFO
elif mode == 'debug':
level = logging.DEBUG
else:
level = logging.INFO
# initialize running log
logging.basicConfig(
filename = log_filename,
level = level,
format = log_fmt,
datefmt = '%Y-%m-%dT%H:%M:%S',
)
logger = logging.getLogger(__name__)
# write some system info into the running log
write_system_info()
# find telescope and instrument from config file
section = config['data']
telescope = section['telescope']
instrument = section['instrument']
logger.info('Start reducing {}, {} data'.format(telescope, instrument))
for row in instrument_lst:
if telescope == row[1] and instrument == row[2]:
eval(row[0]).reduce_rawdata()
exit()
print('Unknown Instrument: {} - {}'.format(telescope, instrument))
def make_obslog():
"""Scan the path to the raw FITS files and generate an observing log.
Before generating the observing log file, this function will scan the local
directory and look for *all* files with their names ending with ".cfg", and
read them as config files.
The config files are used to find the name of the instrument that the data
was obtained with.
"""
config_file_lst = []
# find local config file
for fname in os.listdir(os.curdir):
if fname.endswith('.cfg'):
config_file_lst.append(fname)
# load ALL local config files
config = configparser.ConfigParser(
inline_comment_prefixes = (';','#'),
interpolation = configparser.ExtendedInterpolation(),
)
config.read(config_file_lst)
# find the telescope and instrument name
section = config['data']
telescope = section['telescope']
instrument = section['instrument']
for row in instrument_lst:
if telescope == row[1] and instrument == row[2]:
eval(row[0]).make_obslog()
exit()
print('Unknown Instrument: {} - {}'.format(telescope, instrument))
def make_config():
"""Print a list of supported instrument and generate a config file according
to user's selection.
"""
# display a list of supported instruments
print('List of supported instruments:')
for i, row in enumerate(instrument_lst):
telescope = row[1]
instrument = row[2]
print('[{}] {}/{}'.format(i+1, telescope, instrument))
# select instrument
while(True):
string = input('Select the instrument: ')
if string.isdigit():
select = int(string)
break
else:
print('Error: invalid input')
continue
# use individual functions in each pipeline
modulename = instrument_lst[select-1][0]
eval(modulename).make_config()
def show_onedspec():
"""Show 1-D spectra in a pop-up window.
Args:
filename_lst (list): List of filenames of 1-D spectra.
"""
# load obslog
logname_lst = [fname for fname in os.listdir(os.curdir)
if fname.endswith('.obslog')]
if len(logname_lst)==0:
logtable = None
else:
logtable = read_obslog(logname_lst[0])
# load config files in the current directory
config_file_lst = [fname for fname in os.listdir(os.curdir)
if fname.endswith('.cfg')]
config = configparser.ConfigParser(
inline_comment_prefixes = (';','#'),
interpolation = configparser.ExtendedInterpolation(),
)
config.read(config_file_lst)
filename_lst = []
for arg in sys.argv[2:]:
# first, check if argument is a filename.
if os.path.exists(arg):
filename_lst.append(arg)
# if not a filename, try to find the corresponding items in obslog
else:
if config is None:
config = load_config('\S*\.cfg$')
if logtable is None:
logtable = load_obslog('\S*\.obslog$')
# if arg is a number, find the corresponding filename in obslog
if arg.isdigit():
arg = int(arg)
section = config['reduce']
for logitem in logtable:
if arg == logitem['frameid']:
# get the path to the 1d spectra
odspath = section.get('odspath', None)
if odspath is None:
odspath = section.get('oned_spec')
# get the filename suffix for 1d spectra
oned_suffix = config['reduce'].get('oned_suffix')
fname = '{}_{}.fits'.format(
logitem['fileid'], oned_suffix)
filename = os.path.join(odspath, fname)
if os.path.exists(filename):
filename_lst.append(filename)
break
if len(filename_lst)==0:
exit()
spec_lst = []
for filename in filename_lst:
data = fits.getdata(filename)
# determine the column name of flux that will be shown
if 'flux' in data.dtype.names:
flux_key = 'flux'
elif 'flux_sum' in data.dtype.names:
flux_key = 'flux_sum'
else:
flux_key = ''
pass
if 'fiber' in data.dtype.names:
# multi fiber
for fiber in np.unique(data['fiber']):
spec = {}
mask = data['fiber']==fiber
for row in data[mask]:
order = row['order']
wave = row['wavelength']
flux = row[flux_key]
spec[order] = (wave, flux)
label = os.path.basename(filename) + ' Fiber {}'.format(fiber)
spec_lst.append((spec, label))
else:
spec = {}
for row in data:
order = row['order']
wave = row['wavelength']
flux = row[flux_key]
spec[order] = (wave, flux)
label = os.path.basename(filename)
spec_lst.append((spec, label))
################################################
fig = plt.figure(figsize=(15, 8), dpi=150)
ax = fig.add_axes([0.07, 0.1, 0.88, 0.8])
def plot_order(order):
ax.cla()
ax.currentorder = order
wave_min, wave_max = 1e9, 0
flux_min = 1e9
for i, (spec, label) in enumerate(spec_lst):
if order in spec:
wave = spec[order][0]
flux = spec[order][1]
ax.plot(wave, flux, '-', alpha=0.8, lw=0.8, label=label)
wave_min = min(wave_min, wave.min())
wave_max = max(wave_max, wave.max())
flux_min = min(flux_min, flux.min())
leg = ax.legend(loc='upper right')
leg.get_frame().set_alpha(0.1)
ax.set_xlabel(u'Wavelength (\xc5)', fontsize=12)
ax.set_ylabel('Flux', fontsize=12)
ax.set_title('Order %d'%(order), fontsize=14)
ax.set_xlim(wave_min, wave_max)
ax.axhline(y=0, color='k', ls='--', lw=0.5)
if flux_min > 0:
ax.set_ylim(0,)
ax.xaxis.set_major_formatter(tck.FormatStrFormatter('%g'))
ax.yaxis.set_major_formatter(tck.FormatStrFormatter('%g'))
fig.canvas.draw()
def on_key(event):
if event.key == 'up':
can_plot = False
for spec, label in spec_lst:
if ax.currentorder + 1 in spec:
can_plot=True
break
if can_plot:
plot_order(ax.currentorder + 1)
elif event.key == 'down':
can_plot = False
for spec, label in spec_lst:
if ax.currentorder - 1 in spec:
can_plot=True
break
if can_plot:
plot_order(ax.currentorder - 1)
else:
pass
order0 = list(spec_lst[0][0].keys())[0]
plot_order(order0)
fig.canvas.mpl_connect('key_press_event', on_key)
plt.show()
def plot_spectra1d():
# load config files in the current directory
config_file_lst = [fname for fname in os.listdir(os.curdir)
if fname.endswith('.cfg')]
config = configparser.ConfigParser(
inline_comment_prefixes = (';','#'),
interpolation = configparser.ExtendedInterpolation(),
)
config.read(config_file_lst)
# find telescope and instrument from config file
section = config['data']
telescope = section['telescope']
instrument = section['instrument']
for row in instrument_lst:
if telescope == row[1] and instrument == row[2]:
eval(row[0]).plot_spectra1d()
exit()
def convert_onedspec():
"""Convert one-dimensional spectra.
"""
config = common.load_config('\S*\.cfg$', verbose=False)
logtable = common.load_obslog('\S*\.obslog$', fmt='astropy', verbose=False)
section = config['reduce']
odspath = section.get('odspath', None)
oned_suffix = section.get('oned_suffix')
filename_lst = []
if len(sys.argv)==2:
# no addtional args. convert all of the onedspec
for fname in sorted(os.listdir(odspath)):
if fname.endswith('.fits') or fname.endswith('.fit'):
filename = os.path.join(odspath, fname)
filename_lst.append(filename)
else:
for arg in sys.argv[2:]:
if os.path.exists(arg):
filename_lst.append(arg)
elif os.path.exists(os.path.join(odspath, arg)):
filename_lst.append(os.path.join(odspath, arg))
else:
if arg.isdigit():
arg = int(arg)
for logitem in logtable:
if arg == logitem['frameid'] or arg == logitem['fileid']:
pattern = str(logitem['fileid'])+'\S*'
for fname in sorted(os.listdir(odspath)):
filename = os.path.join(odspath, fname)
if os.path.isfile(filename) \
and re.match(pattern, fname):
filename_lst.append(filename)
for filename in filename_lst:
data = fits.getdata(filename)
if 'flux' in data.dtype.names:
flux_key = 'flux'
elif 'flux_sum' in data.dtype.names:
flux_key = 'flux_sum'
else:
pass
spec = {}
for row in data:
order = row['order']
wave = row['wavelength']
flux = row[flux_key]
if wave[0]> wave[-1]:
wave = wave[::-1]
flux = flux[::-1]
spec[order] = (wave, flux)
ascii_prefix = os.path.splitext(os.path.basename(filename))[0]
target_path = os.path.join(odspath, ascii_prefix)
target_fname = '{}_order_{:03d}.txt'.format(ascii_prefix, order)
target_filename = os.path.join(target_path, target_fname)
if not os.path.exists(target_path):
os.mkdir(target_path)
if os.path.exists(target_filename):
print('Warning: {} is overwritten'.format(target_filename))
outfile = open(target_filename, 'w')
for w, f in zip(wave, flux):
outfile.write('{:11.5f} {:+16.8e}'.format(w, f)+os.linesep)
outfile.close()
print('Convert {} to {} files with ASCII formats in {}'.format(
filename, len(data), target_path))
| StarcoderdataPython |
3493453 | """
Test module.
Tests the whole system as a black box.
"""
import os
import io, shutil
from six.moves import getcwd
import pytest
from chatette.parsing.parser import Parser
from chatette.units.ast import AST
from chatette.generator import Generator
from chatette.adapters import RasaAdapter, JsonListAdapter
class ChatetteFacade(object):
"""
A facade to the different objects and calls to their methods required to
make the system perform a parsing, generation and output writing.
This class is also a singleton.
"""
instance = None
def __init__(self):
self.rasa_adapter = RasaAdapter()
self.jsonl_adapter = JsonListAdapter
self.cwd = getcwd()
self.output_dirpath = \
os.path.join(self.cwd, "tests/system-testing/output")
self.generator = None
self.train_examples = None
self.test_examples = None
@staticmethod
def get_or_create():
if ChatetteFacade.instance is None:
ChatetteFacade.instance = ChatetteFacade()
return ChatetteFacade.instance
def run(self, template_filepath):
AST.reset_instance()
parser = Parser()
parser.parse_file(template_filepath)
self.generator = Generator()
self.train_examples = list(self.generator.generate_train())
self.test_examples = list(self.generator.generate_test())
def write(self, adapter="rasa"):
if self.generator is None:
raise ValueError("Tried to write an output file before generation")
if adapter == "rasa":
adapter = self.rasa_adapter
elif adapter == "jsonl":
adapter = self.jsonl_adapter
else:
raise ValueError(adapter+" is not a valid adapter.")
synonyms = AST.get_or_create().get_entities_synonyms()
if self.train_examples:
adapter.write(
os.path.join(self.output_dirpath, "train"),
self.train_examples, synonyms
)
if self.test_examples:
adapter.write(
os.path.join(self.output_dirpath, "train"),
self.test_examples, synonyms
)
def clean(self):
shutil.rmtree(self.output_dirpath)
class TestSystem(object):
solution_file_extension = ".solution"
syn_solution_file_extension = ".syn"
solution_marker = ">>>"
@staticmethod
def get_solution_filepath(input_filepath):
"""
Returns the same file path as `input_filepath`
with a different extension.
"""
return \
os.path.splitext(input_filepath)[0] + \
TestSystem.solution_file_extension
@staticmethod
def get_synonym_solution_filepath(input_filepath):
"""
Returns the same file path as `input_filepath`
with a different extension.
"""
return \
os.path.splitext(input_filepath)[0] + \
TestSystem.syn_solution_file_extension
@staticmethod
def get_legal_examples(input_filepath):
"""Returns the list of all legal examples for file `input_filepath`."""
solution_filepath = TestSystem.get_solution_filepath(input_filepath)
with io.open(solution_filepath, 'r') as f:
solution_examples = f.readlines()
return [
{
"intent": ex.split(TestSystem.solution_marker)[0],
"text": ex.split(TestSystem.solution_marker)[1].rstrip()
}
for ex in solution_examples
if (not ex.startswith('#') and not ex.isspace())
]
@staticmethod
def get_legal_synonyms(input_filepath):
"""
Returns a dict with all legal synonyms
or `None` if there is no synonym solution file.
"""
syn_filepath = TestSystem.get_synonym_solution_filepath(input_filepath)
if not os.path.isfile(syn_filepath):
return None
with io.open(syn_filepath, 'r') as f:
result = dict()
for l in f:
if not l.startswith('#'):
[name, val] = l.rstrip().split("===")
if name not in result:
result[name] = [val]
else:
result[name].append(val)
return result
@staticmethod
def check_no_duplicates(examples):
"""Returns `True` if there are no duplicates in the list"""
return len(examples) == len(set(examples))
def test_generate_all_training(self):
"""
Tests templates that generate all possible examples for each intent
and that generate only training data.
"""
facade = ChatetteFacade.get_or_create()
input_dir_path = "tests/system-testing/inputs/generate-all/"
input_filenames = [
"simplest.chatette", "only-words.chatette",
"words-and-groups.chatette", "alias.chatette", "include.chatette",
"slot.chatette", "slotrolegroup.chatette"
]
for filename in input_filenames:
file_path = os.path.join(input_dir_path, filename)
facade.run(file_path)
if not TestSystem.check_no_duplicates(facade.train_examples):
pytest.fail(
"Some examples were generated several times " +
"when dealing with file '" + filename + "'.\nGenerated: " + \
str(facade.train_examples)
)
legal_examples = TestSystem.get_legal_examples(file_path)
for ex in facade.train_examples:
formatted_ex = {"intent": ex.intent_name, "text": ex.text}
if formatted_ex not in legal_examples:
pytest.fail(
str(formatted_ex) + " is not a legal example for '" + \
file_path + "'"
)
if len(legal_examples) != len(facade.train_examples):
training_texts = [ex.text for ex in facade.train_examples]
for legal_ex in legal_examples:
if legal_ex["text"] not in training_texts:
pytest.fail(
"Example '" + legal_ex["text"] + \
"' was not generated."
)
pytest.fail(
"An unknown example was not generated (" + \
str(len(facade.train_examples)) + \
" generated instead of " + str(len(legal_examples)) + \
").\nGenerated: " + str(facade.train_examples)
)
legal_syn = TestSystem.get_legal_synonyms(file_path)
if legal_syn is not None:
synonyms = AST.get_or_create().get_entities_synonyms()
for key in synonyms:
if key not in legal_syn:
pytest.fail(
"'" + key + "' shouldn't have any synonyms."
)
for syn in synonyms[key]:
if syn not in legal_syn[key]:
pytest.fail(
"'" + syn + "' shouldn't be a synonym of '" + \
key + "'"
)
def test_generate_all_testing(self):
"""
Tests templates that generate all possible examples for each intent
and that generate only testing data.
NOTE: impossible (should it be permitted?)
"""
pass
def test_generate_nb_training(self):
"""
Tests templates that generate a subset of all possible examples
for each intent and that generate only training data.
"""
facade = ChatetteFacade.get_or_create()
input_dir_path = \
"tests/system-testing/inputs/generate-nb/training-only/"
input_filenames = [
"only-words.chatette", "words-and-groups.chatette",
"alias.chatette", "include.chatette", "slot.chatette",
"bugfixes/bug-22-slot-position.chatette"
]
for filename in input_filenames:
file_path = os.path.join(input_dir_path, filename)
facade.run(file_path)
# if not TestSystem.check_no_duplicates(facade.train_examples): # TODO: make sure there are no duplicates in this case
# pytest.fail("Some examples were generated several times "+
# "when dealing with file '"+filename+"'.\n"+
# "Generated: "+str(facade.train_examples))
legal_examples = TestSystem.get_legal_examples(file_path)
for ex in facade.train_examples:
formatted_ex = {"intent": ex.intent_name, "text": ex.text}
if formatted_ex not in legal_examples:
pytest.fail(
str(formatted_ex) + " is not a legal example for '" + \
file_path + "'"
)
legal_syn = TestSystem.get_legal_synonyms(file_path)
if legal_syn is not None:
synonyms = AST.get_or_create().get_entities_synonyms()
for key in synonyms:
if key not in legal_syn:
pytest.fail(
"'" + key + "' shouldn't have any synonyms."
)
for syn in synonyms[key]:
if syn not in legal_syn[key]:
pytest.fail(
"'" + syn + "' shouldn't be a synonym of '" + \
key + "'"
)
filename_zero = "zero-ex.chatette"
file_path = os.path.join(input_dir_path, filename_zero)
facade.run(file_path)
if len(facade.train_examples) != 0:
pytest.fail(
"When dealing with file 'zero-ex.chatette', no examples " + \
"should be generated.\nGenerated: " + \
str(facade.train_examples)
)
filename_one = "one-ex.chatette"
file_path = os.path.join(input_dir_path, filename_one)
facade.run(file_path)
print("TRAIN EX: " + str(facade.train_examples))
if len(facade.train_examples) != 1:
pytest.fail(
"When dealing with file 'one-ex.chatette', one examples " + \
"should be generated.\nGenerated: " + \
str(facade.train_examples)
)
def test_bug_fixes(self):
facade = ChatetteFacade.get_or_create()
file_path = "tests/system-testing/inputs/generate-nb/training-only/bugfixes/bug-22-slot-position.chatette"
facade.run(file_path)
for ex in facade.train_examples:
for entity in ex.entities:
if entity._len != 5 and entity._len != 8:
pytest.fail(
"Incorrect length for entity '" + str(entity.value) + \
"': " + str(entity._len)
)
if entity._start_index < 26 and entity._start_index > 35:
pytest.fail(
"Incorrect starting index for entity '" + \
str(entity.value) + "' in '" + str(ex.text) + \
"': " + str(entity._start_index)
)
def test_generate_nb_testing(self):
"""
Tests templates that generate a subset of all possible examples
for each intent and that generate only testing data.
"""
pass
def test_generate_nb(self):
"""
Tests templates that generate a subset of all possible examples
for each intent and that generate both training and testing data.
"""
pass | StarcoderdataPython |
4902969 | # coding=utf-8
import logging
import sys
import unittest
import matplotlib.pyplot as plt
import numpy as np
from ybckit.mpl import init as mpl_init
from . import ybc_env
logger = logging.getLogger()
logger.level = logging.DEBUG
logger.addHandler(logging.StreamHandler(sys.stdout))
class MplTestCase(unittest.TestCase):
def setUp(self):
super(MplTestCase, self).setUp()
ybc_env.setup()
mpl_init()
def tearDown(self):
super(MplTestCase, self).tearDown()
ybc_env.cleanup()
@unittest.skip('仅限本地运行,需要手动检查 /tmp/request 内容')
def test_show(self):
t = np.arange(0.0, 2.0, 0.01)
s = 1 + np.sin(2 * np.pi * t)
plt.plot(t, s)
plt.xlabel('time (s)')
plt.ylabel('voltage (mV)')
plt.title('About as simple as it gets, folks')
plt.grid(True)
plt.show()
self.assertTrue(True)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
12330 | import pytest
from pypospack.potential import EamPotential
symbols = ['Al']
func_pair_name = "bornmayer"
func_density_name = "eam_dens_exp"
func_embedding_name = "fs"
expected_parameter_names_pair_potential = []
expected_parameter_names_density_function = []
expected_parameter_names_embedding_function = []
expected_parameter_names = [
'p_AlAl_phi0', 'p_AlAl_gamma', 'p_AlAl_r0',
'd_Al_rho0', 'd_Al_beta', 'd_Al_r0',
'e_Al_F0', 'e_Al_p', 'e_Al_q', 'e_Al_F1', 'e_Al_rho0']
print(80*'-')
print("func_pair_name={}".format(func_pair_name))
print("func_density_name={}".format(func_density_name))
print("func_embedding_name={}".format(func_density_name))
print(80*'-')
def test____init__():
obj_pot = EamPotential(
symbols=symbols,
func_pair=func_pair_name,
func_density=func_density_name,
func_embedding=func_embedding_name)
assert type(obj_pot) is EamPotential
assert obj_pot.potential_type == 'eam'
assert type(obj_pot.symbols) is list
assert len(obj_pot.symbols) == len(symbols)
for i,v in enumerate(symbols):
obj_pot.symbols[i] = v
assert obj_pot.is_charge is False
assert type(obj_pot.parameter_names) is list
assert len(obj_pot.parameter_names) == len(expected_parameter_names)
for i,v in enumerate(expected_parameter_names):
obj_pot.parameter_names = v
if __name__ == "__main__":
# CONSTRUCTOR TEST
pot = EamPotential(symbols=symbols,
func_pair=func_pair_name,
func_density=func_density_name,
func_embedding=func_embedding_name)
print('pot.potential_type == {}'.format(\
pot.potential_type))
print('pot.symbols == {}'.format(\
pot.symbols))
print('pot.parameter_names == {}'.format(\
pot.parameter_names))
print('pot.is_charge == {}'.format(\
pot.is_charge))
| StarcoderdataPython |
1645716 | <reponame>FilipDimi/restaurant-website<gh_stars>0
from django.contrib import admin
from core.models import Ingredient, MealCategory, Side, Meal, Special, BeverageCategory, Beverage, Soup, Dessert
admin.site.register(Ingredient)
admin.site.register(MealCategory)
admin.site.register(Side)
admin.site.register(Meal)
admin.site.register(Special)
admin.site.register(BeverageCategory)
admin.site.register(Beverage)
admin.site.register(Soup)
admin.site.register(Dessert)
| StarcoderdataPython |
3317788 | import pandas as pd
import os
from os.path import join
import sys
import time
from itertools import chain
sys.path.insert(1, '/home/nlp/ernstor1/rouge/SummEval_referenceSubsets/code_score_extraction')
import calculateRouge
import numpy as np
import glob
# from DataGenSalientIU_DUC_maxROUGE import greedy_selection_MDS, greedy_selection_clusters, greedy_selection_all_clusters
import pickle
from sklearn.cluster import AgglomerativeClustering
import re
from collections import defaultdict
def read_generic_file(filepath):
""" reads any generic text file into
list containing one line as element
"""
text = []
with open(filepath, 'r') as f:
for line in f.read().splitlines():
text.append(line.strip())
return text
def write_summary(summary_path, summary, topic, type, ):
SUMMARY_TYPES = {
'gold': 'G',
'system': 'S'}
SUMMARY_LEN = 100
type = SUMMARY_TYPES[type]
if not os.path.exists(summary_path):
os.makedirs(summary_path)
summary_name = str(topic) + '.M.' + str(SUMMARY_LEN) + '.T.' + type + '.html'
with open(join(summary_path, summary_name), 'w') as outF:
outF.write(summary)
def calc_rouge(gold_summary_path, sys_summary_path):
calculateRouge.INPUTS = [(calculateRouge.COMPARE_SAME_LEN, gold_summary_path, sys_summary_path,
sys_summary_path + '0_rouge_scores.csv',
2002, calculateRouge.LEAVE_STOP_WORDS)]
# calculateRouge.INPUTS = [(calculateRouge.COMPARE_VARYING_LEN, gold_summary_path, sys_summary_path,
# sys_summary_path + 'rouge_scores.csv',
# 2002, calculateRouge.LEAVE_STOP_WORDS)]
calculateRouge.main()
# the next *three* functions are taken from PreSumm implementation
def _get_ngrams(n, text):
"""Calcualtes n-grams.
Args:
n: which n-grams to calculate
text: An array of tokens
Returns:
A set of n-grams
"""
ngram_set = set()
text_length = len(text)
max_index_ngram_start = text_length - n
for i in range(max_index_ngram_start + 1):
ngram_set.add(tuple(text[i:i + n]))
return ngram_set
def _get_word_ngrams(n, sentences):
"""Calculates word n-grams for multiple sentences.
"""
assert len(sentences) > 0
assert n > 0
# words = _split_into_words(sentences)
words = sum(sentences, [])
# words = [w for w in words if w not in stopwords]
return _get_ngrams(n, words)
def cal_rouge(evaluated_ngrams, reference_ngrams):
reference_count = len(reference_ngrams)
evaluated_count = len(evaluated_ngrams)
overlapping_ngrams = evaluated_ngrams.intersection(reference_ngrams)
overlapping_count = len(overlapping_ngrams)
if evaluated_count == 0:
precision = 0.0
else:
precision = overlapping_count / evaluated_count
if reference_count == 0:
recall = 0.0
else:
recall = overlapping_count / reference_count
f1_score = 2.0 * ((precision * recall) / (precision + recall + 1e-8))
return {"f": f1_score, "p": precision, "r": recall}
def greedy_selection_MDS(doc_sent_list, abstracts, summary_size=1000):
def _rouge_clean(s):
return re.sub(r'[^a-zA-Z0-9 ]', '', s)
max_rouge = 0.0
# abstract = sum(abstract_sent_list, [])
abstracts = [_rouge_clean(abstract.lower().replace('...',' ... ')).split() for abstract in abstracts]
# abstract = _rouge_clean(' '.join(abstract)).split()
sents = [_rouge_clean(s.lower().replace('...',' ... ')).split() for s in doc_sent_list]
evaluated_1grams = [_get_word_ngrams(1, [sent]) for sent in sents]
evaluated_2grams = [_get_word_ngrams(2, [sent]) for sent in sents]
references_1grams = []
references_2grams = []
for abstract in abstracts:
references_1grams.append(_get_word_ngrams(1, [abstract]))
references_2grams.append(_get_word_ngrams(2, [abstract]))
selected = []
for s in range(summary_size):
cur_max_rouge = max_rouge
cur_id = -1
for i in range(len(sents)):
if (i in selected):
continue
c = selected + [i]
candidates_1 = [evaluated_1grams[idx] for idx in c]
candidates_1 = set.union(*map(set, candidates_1))
candidates_2 = [evaluated_2grams[idx] for idx in c]
candidates_2 = set.union(*map(set, candidates_2))
rouge_1 = np.mean([cal_rouge(candidates_1, reference_1grams)['f'] for reference_1grams in references_1grams])
rouge_2 = np.mean([cal_rouge(candidates_2, reference_2grams)['f'] for reference_2grams in references_2grams])
rouge_score = rouge_1 + rouge_2
if rouge_score > cur_max_rouge:
cur_max_rouge = rouge_score
cur_id = i
if (cur_id == -1):
return selected
selected.append(cur_id)
max_rouge = cur_max_rouge
return sorted(selected)
def greedy_selection_clusters(predictions_topic, abstracts, MAX_CLUSTERS = 9, summary_size=1000, HIGH_PRED_REPRESETATIVE_ORACLE = False, CLUSTERS_BY_ORDER = False):
# HIGH_PRED_REPRESETATIVE_ORACLE = True
# CLUSTERS_BY_ORDER = True
def _rouge_clean(s):
return re.sub(r'[^a-zA-Z0-9 ]', '', s)
docSpanList = list(predictions_topic['docSpanText'].values)
span_idx2cluster = list(predictions_topic['cluster_idx'].values)
cluster_items = defaultdict(list)
for span_idx, cluster_idx in enumerate(span_idx2cluster):
cluster_items[cluster_idx].append(span_idx)
allowed_clusters = list(predictions_topic.sort_values(by=['cluster_size', 'inFile_sentIdx'], ascending=[False, True])[
'cluster_idx'].drop_duplicates(keep="first").values)[:MAX_CLUSTERS]
if HIGH_PRED_REPRESETATIVE_ORACLE:
predictions_topic['original_idx2'] = range(len(predictions_topic))
allowed_cluster_represetatives = []
for allowed_cluster_idx in allowed_clusters:
predictions_topic_cluster = predictions_topic[
predictions_topic['cluster_idx'] == allowed_cluster_idx]
predictions_topic_cluster = predictions_topic_cluster.sort_values(by=['prediction'], ascending=False)
allowed_cluster_represetatives.append(predictions_topic_cluster.iloc[0]['original_idx2'])
max_rouge = 0.0
# abstract = sum(abstract_sent_list, [])
abstracts = [_rouge_clean(abstract.lower().replace('...',' ... ')).split() for abstract in abstracts]
# abstract = _rouge_clean(' '.join(abstract)).split()
sents = [_rouge_clean(s.lower().replace('...',' ... ')).split() for s in docSpanList]
evaluated_1grams = [_get_word_ngrams(1, [sent]) for sent in sents]
evaluated_2grams = [_get_word_ngrams(2, [sent]) for sent in sents]
references_1grams = []
references_2grams = []
for abstract in abstracts:
references_1grams.append(_get_word_ngrams(1, [abstract]))
references_2grams.append(_get_word_ngrams(2, [abstract]))
selected = []
selected_clusters = [] #index of spans inside cluster that were already selected
for s in range(summary_size):
cur_max_rouge = 0#max_rouge#0
cur_id = -1
for i in range(len(sents)):
if (i in selected):
continue
if (i in selected_clusters):
continue
if CLUSTERS_BY_ORDER:
if (span_idx2cluster[i] != allowed_clusters[len(selected)]):
continue
else:
if (span_idx2cluster[i] not in allowed_clusters):
continue
if HIGH_PRED_REPRESETATIVE_ORACLE:
if i not in allowed_cluster_represetatives:
continue
c = selected + [i]
candidates_1 = [evaluated_1grams[idx] for idx in c]
candidates_1 = set.union(*map(set, candidates_1))
candidates_2 = [evaluated_2grams[idx] for idx in c]
candidates_2 = set.union(*map(set, candidates_2))
rouge_1 = np.mean([cal_rouge(candidates_1, reference_1grams)['f'] for reference_1grams in references_1grams])
rouge_2 = np.mean([cal_rouge(candidates_2, reference_2grams)['f'] for reference_2grams in references_2grams])
rouge_score = rouge_1 + rouge_2
if rouge_score > cur_max_rouge:
cur_max_rouge = rouge_score
cur_id = i
if (cur_id == -1):
return selected
selected.append(cur_id)
if len(selected) == MAX_CLUSTERS:
return selected
selected_clusters.extend(cluster_items[span_idx2cluster[cur_id]])
max_rouge = cur_max_rouge
return selected
def greedy_selection_all_clusters(predictions_topic, abstracts, MAX_CLUSTERS = 9, allowed_clusters = None, summary_size=1000, HIGH_PRED_REPRESETATIVE_ORACLE = True, CLUSTERS_BY_ORDER = False):
# HIGH_PRED_REPRESETATIVE_ORACLE = False
# CLUSTERS_BY_ORDER = True
def _rouge_clean(s):
return re.sub(r'[^a-zA-Z0-9 ]', '', s)
docSpanList = list(predictions_topic['docSpanText'].values)
span_idx2cluster = list(predictions_topic['cluster_idx'].values)
cluster_items = defaultdict(list)
for span_idx, cluster_idx in enumerate(span_idx2cluster):
cluster_items[cluster_idx].append(span_idx)
# allowed_clusters = list(predictions_topic[predictions_topic['cluster_size']>=3][
# 'cluster_idx'].drop_duplicates(keep="first").values)
# if len(allowed_clusters) < 11:
# allowed_clusters = list(predictions_topic[predictions_topic['cluster_size'] >= 2][
# 'cluster_idx'].drop_duplicates(keep="first").values)
# # if len(allowed_clusters) < 10:
# # allowed_clusters = list(predictions_topic[predictions_topic['cluster_size'] >= 1][
# # 'cluster_idx'].drop_duplicates(keep="first").values)
# allowed_clusters = list(predictions_topic['cluster_idx'].drop_duplicates(keep="first").values)
if allowed_clusters is None:
#select all clusters
allowed_clusters = list(predictions_topic['cluster_idx'].drop_duplicates(keep="first").values)
# if len(predictions_topic[predictions_topic['cluster_size'] >= 3]['cluster_idx'].drop_duplicates()) < 10:
# allowed_clusters = list(predictions_topic[predictions_topic['cluster_size'] >= 2]['cluster_idx'].drop_duplicates().values)
# else:
# allowed_clusters = list(predictions_topic[predictions_topic['cluster_size'] >= 3]['cluster_idx'].drop_duplicates().values)
# if len(allowed_clusters)<MAX_CLUSTERS:
# allowed_clusters = list(
# predictions_topic.sort_values(by=['cluster_size', 'inFile_sentIdx'], ascending=[False, True])[
# 'cluster_idx'].drop_duplicates(keep="first").values)[:MAX_CLUSTERS]
if HIGH_PRED_REPRESETATIVE_ORACLE:
predictions_topic['original_idx2'] = range(len(predictions_topic))
allowed_cluster_represetatives = []
for allowed_cluster_idx in allowed_clusters:
predictions_topic_cluster = predictions_topic[
predictions_topic['cluster_idx'] == allowed_cluster_idx]
predictions_topic_cluster = predictions_topic_cluster.sort_values(by=['prediction'], ascending=False)
allowed_cluster_represetatives.append(predictions_topic_cluster.iloc[0]['original_idx2'])
max_rouge = 0.0
# abstract = sum(abstract_sent_list, [])
abstracts = [_rouge_clean(abstract.lower().replace('...',' ... ')).split() for abstract in abstracts]
# abstract = _rouge_clean(' '.join(abstract)).split()
sents = [_rouge_clean(s.lower().replace('...',' ... ')).split() for s in docSpanList]
evaluated_1grams = [_get_word_ngrams(1, [sent]) for sent in sents]
evaluated_2grams = [_get_word_ngrams(2, [sent]) for sent in sents]
references_1grams = []
references_2grams = []
for abstract in abstracts:
references_1grams.append(_get_word_ngrams(1, [abstract]))
references_2grams.append(_get_word_ngrams(2, [abstract]))
selected = []
selected_cluster_spans = [] #index of spans inside cluster that were already selected
selected_rouge_diff = []
selected_clusters = []
for s in range(summary_size):
cur_max_rouge = 0#max_rouge#0
cur_id = -1
for i in range(len(sents)):
if (i in selected):
continue
if (i in selected_cluster_spans):
continue
if CLUSTERS_BY_ORDER:
if (span_idx2cluster[i] != allowed_clusters[len(selected)]):
continue
else:
if (span_idx2cluster[i] not in allowed_clusters):
continue
if HIGH_PRED_REPRESETATIVE_ORACLE:
if i not in allowed_cluster_represetatives:
continue
c = selected + [i]
candidates_1 = [evaluated_1grams[idx] for idx in c]
candidates_1 = set.union(*map(set, candidates_1))
candidates_2 = [evaluated_2grams[idx] for idx in c]
candidates_2 = set.union(*map(set, candidates_2))
rouge_1 = np.mean([cal_rouge(candidates_1, reference_1grams)['f'] for reference_1grams in references_1grams])
rouge_2 = np.mean([cal_rouge(candidates_2, reference_2grams)['f'] for reference_2grams in references_2grams])
rouge_score = rouge_1 + rouge_2
if rouge_score > cur_max_rouge:
cur_max_rouge = rouge_score
cur_id = i
if (cur_id == -1): #already selected all clusters
assert(len(selected_rouge_diff)==len(allowed_clusters))
return selected, selected_rouge_diff, selected_clusters
selected.append(cur_id)
selected_rouge_diff.append(cur_max_rouge-max_rouge)
# if len(selected) == MAX_CLUSTERS:
# return selected
selected_cluster_spans.extend(cluster_items[span_idx2cluster[cur_id]])
selected_clusters.append(span_idx2cluster[cur_id])
max_rouge = cur_max_rouge
assert (len(selected_rouge_diff) == len(allowed_clusters))
return selected, selected_rouge_diff, selected_clusters
def offset_str2list(offset):
return [[int(start_end) for start_end in offset.split(',')] for offset in offset.split(';')]
def offset_decreaseSentOffset(sentOffset, scu_offsets):
return [[start_end[0] - sentOffset, start_end[1] - sentOffset] for start_end in scu_offsets]
def Union(offsets, sentOffset):
ranges_tmp = set([])
for offset in offsets:
offset = offset_str2list(offset)
offset = offset_decreaseSentOffset(sentOffset, offset)
ranges = [range(marking[0], marking[1]) for marking in offset]
ranges = set(chain(*ranges))
ranges_tmp = ranges_tmp | ranges
return ranges_tmp
def _get_ngrams(n, text):
ngram_set = set()
text_length = len(text)
max_index_ngram_start = text_length - n
for i in range(max_index_ngram_start + 1):
ngram_set.add(tuple(text[i:i + n]))
return ngram_set
def _block_tri(c, p):
tri_c = _get_ngrams(3, c.split())
for s in p.split('\n'):
tri_s = _get_ngrams(3, s.split())
if len(tri_c.intersection(tri_s)) > 0:
return True
return False
def read_simMats(topic_name, predictions_topic, dataset):
sim_mats_path = '/home/nlp/ernstor1/main_summarization/sim_mats/{}/'.format(dataset)
if dataset.startswith('TAC'):
topic_name = topic_name[:-2]
topic_name = glob.glob(sim_mats_path+'/SupAligner_checkpoint-2000_'+ topic_name +'*-A' + '.pickle')[0][-15:-7]
with open(os.path.join(sim_mats_path,'SupAligner_checkpoint-2000_'+ topic_name + '.pickle'), 'rb') as handle:
simMat = pickle.load(handle)
# simMat_l = np.tril(simMat) + np.tril(simMat).transpose()
# np.fill_diagonal(simMat_l,0) #avoid summing diagonal twice
# simMat_u = np.triu(simMat) + np.triu(simMat).transpose()
# simMat = (simMat_l + simMat_u) / 2
with open(os.path.join(sim_mats_path,topic_name + '_idx2span.pickle'), 'rb') as handle:
idx2span = pickle.load(handle)
span2idx = {}
for key, value in idx2span.items():
span2idx[value['documentFile'] + value['docScuText'] + str(value['docSentCharIdx'])] = key
predictions_topic['simMat_idx'] = (predictions_topic['documentFile'] + predictions_topic['docSpanText']
+ predictions_topic['docSentCharIdx'].apply(str)).apply(lambda x: span2idx[x])
return simMat
def createGT_labels(predictions_topic, data_path, topic, overSample=False):
if overSample:
labels_column_name = 'over_sample'
else:
labels_column_name = 'scnd_filter_label'
predictions_topic['original_idx'] = range(len(predictions_topic))
positive_alignments_topic = predictions_topic#[predictions_topic['pred_prob'] >= 0.5]
abstracts = []
# if DATASET == 'TAC2011':
# for summary_path in glob.iglob(data_path + topic.upper() + '.*'):
# summary = ' '.join(read_generic_file(summary_path))
# abstracts.append(summary)
# else:
for summary_path in glob.iglob(data_path + topic[:-1].upper() + '.*'):
summary = ' '.join(read_generic_file(summary_path))
abstracts.append(summary)
docFile_summSpan_cands = list(positive_alignments_topic['docSpanText'].values)
positive_summSpan_idx = greedy_selection_MDS(docFile_summSpan_cands, abstracts)
positive_summSpan_original_idx = [positive_alignments_topic['original_idx'].values[cand_idx] for cand_idx in
positive_summSpan_idx]
scnd_filter_label = np.zeros(len(predictions_topic), dtype=int)
scnd_filter_label[positive_summSpan_original_idx] = 1
predictions_topic[labels_column_name] = scnd_filter_label
##validation for correct indexes
docFile_summSpan_positive = [docFile_summSpan_cands[cand_idx] for cand_idx in positive_summSpan_idx]
positive_labeled_spans_validation = predictions_topic[predictions_topic[labels_column_name] == 1][
'docSpanText'].isin(docFile_summSpan_positive)
assert (all(positive_labeled_spans_validation))
return docFile_summSpan_positive
def cluster_mat(simMat, except_idx, predictions_topic):
# zero_idx = np.delete(range(len(sim_mat)),except_idx)
# sim_mat[zero_idx, :] = 0
# sim_mat[:, zero_idx] = 0
# except_idx = sorted(except_idx)
sim_mat = simMat[except_idx, :]
sim_mat = sim_mat[:, except_idx]
sim_idx2new = {}
for i in range(len(except_idx)):
sim_idx2new[except_idx[i]] = i
clustering = AgglomerativeClustering(affinity='precomputed',n_clusters=None, linkage="average" ,distance_threshold=0.5).fit(1-sim_mat)
predictions_topic['cluster_idx'] = predictions_topic['simMat_idx'].apply(lambda x: clustering.labels_[sim_idx2new[x]])
cluster_size = [list(clustering.labels_).count(i) for i in range(max(clustering.labels_)+1)]
predictions_topic['cluster_size'] = predictions_topic['cluster_idx'].apply(lambda x: cluster_size[x])
def oracle_per_cluster(dataset, gold_summary_path, topic, predictions_topic, MAX_CLUSTERS, HIGH_PRED_REPRESETATIVE_ORACLE = False):
abstracts = []
if dataset.startswith('TAC'):
for summary_path in glob.iglob(gold_summary_path + topic[:-2].upper() + '*'):
abstract = ' '.join(read_generic_file(summary_path))
abstracts.append(abstract)
else:
for summary_path in glob.iglob(gold_summary_path + topic[:-1].upper() + '*'):
abstract = ' '.join(read_generic_file(summary_path))
abstracts.append(abstract)
assert(abstracts)
docFile_summSpan_cands_idx = greedy_selection_clusters(predictions_topic, abstracts, MAX_CLUSTERS = MAX_CLUSTERS, HIGH_PRED_REPRESETATIVE_ORACLE = HIGH_PRED_REPRESETATIVE_ORACLE)
oracle_label = np.zeros(len(predictions_topic))
oracle_label[docFile_summSpan_cands_idx] = 1
predictions_topic['oracle_label'] = oracle_label
return docFile_summSpan_cands_idx
def oracle_between_clusters(dataset, gold_summary_path, topic, predictions_topic, MAX_CLUSTERS):
abstracts = []
if dataset.startswith('TAC'):
for summary_path in glob.iglob(gold_summary_path + topic[:-2].upper() + '*'):
abstract = ' '.join(read_generic_file(summary_path))
abstracts.append(abstract)
else:
for summary_path in glob.iglob(gold_summary_path + topic[:-1].upper() + '*'):
abstract = ' '.join(read_generic_file(summary_path))
abstracts.append(abstract)
assert(abstracts)
docFile_summSpan_cands_idx = greedy_selection_clusters(predictions_topic, abstracts, MAX_CLUSTERS = MAX_CLUSTERS, HIGH_PRED_REPRESETATIVE_ORACLE = True, CLUSTERS_BY_ORDER = False)
oracle_label = np.zeros(len(predictions_topic))
oracle_label[docFile_summSpan_cands_idx] = 1
predictions_topic['oracle_label'] = oracle_label
def build_summary(prediction_topic_selected):
summary = ''
prediction_topic_selected_by_sent = prediction_topic_selected[['documentFile','docSentCharIdx']].drop_duplicates()
for documentFile, docSentCharIdx in zip(prediction_topic_selected_by_sent['documentFile'].values,
prediction_topic_selected_by_sent['docSentCharIdx'].values):
selected_OIEs_sent = prediction_topic_selected[(prediction_topic_selected['documentFile'] == documentFile) &
(prediction_topic_selected['docSentCharIdx'] == docSentCharIdx)]
summary_indices = Union(selected_OIEs_sent['docSpanOffsets'].values, docSentCharIdx)
summary_indices = sorted(list(summary_indices))
sentenceText = selected_OIEs_sent['docSentText'].values[0]
prev_idx = summary_indices[0]
candidate_new_text = ''
for idx in summary_indices:
if idx == prev_idx + 1:
candidate_new_text += sentenceText[idx]
else:
candidate_new_text += ' ' + sentenceText[idx]
prev_idx = idx
summary += candidate_new_text + '\n' # add space between sentences
return summary
def select_cluster_representative(prediction_topic_clusters_selected):
selected_clusters = []
prediction_topic_clusters_selected_concat = pd.concat(prediction_topic_clusters_selected,axis=0)
prediction_topic_clusters_selected_concat = prediction_topic_clusters_selected_concat.sort_values(by=['prediction'], ascending=False)
# prediction_topic_clusters_selected_concat = prediction_topic_clusters_selected_concat[prediction_topic_clusters_selected_concat['prediction'] > 0.1]
selected_sents = pd.DataFrame(columns=prediction_topic_clusters_selected_concat.columns.to_list())
for prediction_topic_cluster in prediction_topic_clusters_selected:
cluster_idx = prediction_topic_cluster.iloc[0]['cluster_idx']
if cluster_idx in selected_clusters:
continue
max_clusters_w_shared_sent = 0
selected_sents_tmp = None
for index, row in prediction_topic_cluster.iterrows():
prediction_topic_clusters_selected_concat_sent = \
prediction_topic_clusters_selected_concat[(prediction_topic_clusters_selected_concat['documentFile'] == row['documentFile']) &
(prediction_topic_clusters_selected_concat['docSentCharIdx'] == row['docSentCharIdx'])]
prediction_topic_clusters_selected_concat_sent = prediction_topic_clusters_selected_concat_sent.drop_duplicates(['documentFile', 'docSentCharIdx', 'cluster_idx']) #leave max one sentence per cluster (if there are two- leave the one with the highest prediction)
if len(prediction_topic_clusters_selected_concat_sent) > max_clusters_w_shared_sent:
max_clusters_w_shared_sent = len(prediction_topic_clusters_selected_concat_sent)
selected_sents_tmp = prediction_topic_clusters_selected_concat_sent.copy()
selected_sents = selected_sents.append(selected_sents_tmp)
selected_clusters.extend(selected_sents_tmp['cluster_idx'].to_list())
prediction_topic_clusters_selected_concat = prediction_topic_clusters_selected_concat[~prediction_topic_clusters_selected_concat['cluster_idx'].isin(selected_clusters)] #remove selected clusters to avoid counting them again
assert(len(selected_sents) == len(prediction_topic_clusters_selected))
return selected_sents
def retrieve_R1_R2(sys_summary_path):
full_path = os.path.join(sys_summary_path,'0_rouge_scores.csv')
rouge_df = pd.read_csv(full_path)
rouge_df = rouge_df.set_index('ROUGE_type')
r1 = rouge_df['100_f']['R1']
r2 = rouge_df['100_f']['R2']
return r1, r2
##################################
###### main ##############
##################################
if __name__ == "__main__":
# tunning_list = []
# for DUC_THRESH in np.linspace(0.0, 0.90, num=31):
# for CLUSTER_THRESH in np.linspace(0.4, 0.7, num=7):
MAX_SENT = 100
DATASETS = ['DUC2004']#['TAC2008','TAC2009','TAC2010']
SET_TYPE = 'test'
ORACLE = False
ORACLE_BY_CLUSTERS = False #if True ORACLE_CLUSTER_REPRESENTATIVE or ORACLE_CLUSTER_RANKING must be True
ORACLE_CLUSTER_REPRESENTATIVE = False #take the best representative from each cluster (using original cluster ranking)
ORACLE_CLUSTER_RANKING = False #select best clusters
ORACLE_BY_ALL_CLUSTERS = False #select best clusters out of all clusters
CLUSTERING = True
SUMM_LEN = 100
MAX_CLUSTERS = 10
SENTENCE_LEVEL = False
if ORACLE:
oracle_flag = '_oracle'
else:
oracle_flag = ''
sys_model = 'roberta'
sys_checkpoint = 'checkpoint-1200' # 'checkpoint-180'#'checkpoint-540'#'checkpoint-1020'#'checkpoint-540'#'checkpoint-600' #'checkpoint-1140'#'checkpoint-240'#'checkpoint-180' # 'checkpoint-1080'
sys_folder = 'OIE_TAC2008_TAC2009_2010_highlighter_CDLM_greedyMaxRouge_no_alignment_filter_negative_over_sample_positive_span_classifier_head_fixed'
if SENTENCE_LEVEL:
sys_checkpoint = 'checkpoint-1200' # 'checkpoint-180'#'checkpoint-540'#'checkpoint-1020'#'checkpoint-540'#'checkpoint-600' #'checkpoint-1140'#'checkpoint-240'#'checkpoint-180' # 'checkpoint-1080'
sys_folder = 'OIE_full_TAC2008_TAC2009_2010_highlighter_CDLM_greedyMaxRouge_no_alignment_filter_negative_over_sample_positive_sentence_based_span_classifier_head'
#DUC2004
if DATASETS[0] == 'DUC2004':
sys_checkpoint = 'checkpoint-1500' # 'checkpoint-180'#'checkpoint-540'#'checkpoint-1020'#'checkpoint-540'#'checkpoint-600' #'checkpoint-1140'#'checkpoint-240'#'checkpoint-180' # 'checkpoint-1080'
sys_folder = 'OIE_DUC2003_highlighter_CDLM_greedyMaxRouge_no_alignment_filter_negative_over_sample_positive_span_classifier_head_fixed_finetuned_TAC8910'
if SENTENCE_LEVEL:
# #sentence-based
sys_checkpoint = 'checkpoint-1800' # 'checkpoint-180'#'checkpoint-540'#'checkpoint-1020'#'checkpoint-540'#'checkpoint-600' #'checkpoint-1140'#'checkpoint-240'#'checkpoint-180' # 'checkpoint-1080'
sys_folder = 'OIE_DUC2003_highlighter_CDLM_greedyMaxRouge_no_alignment_filter_negative_over_sample_positive_sentence_based_span_classifier_head_finetune_TAC8910_not_full/'#'OIE_DUC2003_highlighter_CDLM_greedyMaxRouge_no_alignment_filter_negative_over_sample_positive_sentence_based_span_classifier_head_finetune_TAC8910'
##full
if DATASETS[0] == 'TAC2011':
full_fixed = 'full'
else:
full_fixed = 'fixed'
if DATASETS[0] =='DUC2003':
sys_checkpoint = 'checkpoint-1500' # 'checkpoint-180'#'checkpoint-540'#'checkpoint-1020'#'checkpoint-540'#'checkpoint-600' #'checkpoint-1140'#'checkpoint-240'#'checkpoint-180' # 'checkpoint-1080'
sys_folder = 'OIE_DUC2003_highlighter_CDLM_greedyMaxRouge_no_alignment_filter_negative_over_sample_positive_span_classifier_head_fixed_finetuned_TAC8910'
metadata = pd.read_csv(
'./OIE_highlights/{}_{}_CDLM_greedyMaxRouge_no_alignment_{}_truncated_metadata.csv'.format(
'_'.join(DATASETS),
SET_TYPE, full_fixed))
else:
metadata = pd.read_csv(
'./OIE_highlights/{}_{}_CDLM_allAlignments_{}_truncated_metadata.csv'.format(
'_'.join(DATASETS),
SET_TYPE,full_fixed))
predictions = pd.read_csv(
'./models/{}/{}/{}_{}_results_None.csv'.format(sys_folder,sys_checkpoint,
SET_TYPE, '_'.join(DATASETS)))
if SENTENCE_LEVEL:
if DATASETS[0] == 'DUC2004':
# # sentence_based duc
metadata = pd.read_csv(
'./OIE_highlights/DUC2004_test_CDLM_greedyMaxRouge_no_alignment_sentence_based_fixed_truncated_metadata.csv')
else:
# #sentence_based
metadata = pd.read_csv(
'./OIE_highlights/{}_{}_CDLM_allAlignments_sentence_based_full_truncated_metadata.csv'.format(
'_'.join(DATASETS),
SET_TYPE))
assert (len(predictions)==len(metadata))
metadata.insert(2, "prediction", predictions['prediction'])
predictions = metadata
len_pred = []
len_pred_sent = []
empty = 0
analysis_list = []
clusters_data = []
prediction_selected = []
for SET in DATASETS:
sys_summary_path = './{}_system_summaries/{}/{}_'.format(SET,
sys_folder,
sys_checkpoint) + time.strftime(
"%Y%m%d-%H%M%S") + '{}/'.format(oracle_flag)
gold_summary_path = './data/{}/summaries/'.format(SET)
data_path = './data/{}/'.format(SET)
for topic in os.listdir(data_path):
print(topic)
if topic == 'summaries':
continue
if SET.startswith('TAC'):
topic = topic[:-3] + topic[-2:]
summary = ''
predictions_topic = predictions[predictions['topic'] == topic]
if SET =='DUC2004':
predictions_topic = predictions_topic[predictions_topic['prediction'] >= 0.4]
else:
predictions_topic = predictions_topic[predictions_topic['prediction'] >= 0.04]
#salience threshold -0.4 for DUC2004 0.04 for TAC2011
predictions_topic = predictions_topic.sort_values(by=['prediction'], ascending=False)
len_pred.append(len(predictions_topic))
if len(predictions_topic) == 0:
empty += 1
continue
if CLUSTERING:
simMat = read_simMats(topic, predictions_topic, SET)
cluster_mat(simMat, predictions_topic['simMat_idx'].values, predictions_topic)
allowed_clusters = list(predictions_topic.sort_values(by=['cluster_size','inFile_sentIdx'], ascending=[False,True])[
'cluster_idx'].drop_duplicates(keep="first").values)[:MAX_CLUSTERS]
cluster_idx_idx = 0
summary = ' '
prediction_topic_selected = pd.DataFrame(columns=predictions_topic.columns.to_list())
prediction_topic_clusters_selected = []
# while len(summary.split(' ')) <= SUMM_LEN and cluster_idx_idx < len(allowed_clusters):
while cluster_idx_idx < len(allowed_clusters):
predictions_topic_cluster = predictions_topic[predictions_topic['cluster_idx'] == allowed_clusters[cluster_idx_idx]]
# add most salient span from each cluster
predictions_topic_cluster = predictions_topic_cluster.sort_values(by=['prediction'], ascending=False)
new_cand = predictions_topic_cluster.iloc[0]
prediction_topic_selected = prediction_topic_selected.append(new_cand)
# select cluster representative span that its sentence appears in several clusters
# prediction_topic_clusters_selected.append(predictions_topic_cluster)
# #
# prediction_topic_selected = select_cluster_representative(prediction_topic_clusters_selected)
#if two selected spans are from the same sentence- take their "union"
summary = build_summary(prediction_topic_selected)
cluster_idx_idx += 1
elif ORACLE:
abstracts = []
if SET.startswith('TAC'):
for summary_path in glob.iglob(gold_summary_path + topic[:-2].upper() + '*'):
abstract = ' '.join(read_generic_file(summary_path))
abstracts.append(abstract)
else:
for summary_path in glob.iglob(gold_summary_path + topic[:-1].upper() + '.*'):
abstract = ' '.join(read_generic_file(summary_path))
abstracts.append(abstract)
assert(abstracts)
docFile_summSpan_cands = list(predictions_topic['docSpanText'].values)
if ORACLE_BY_CLUSTERS:
read_simMats(topic, predictions_topic, SET)
cluster_mat(simMat, predictions_topic['simMat_idx'].values, predictions_topic)
if ORACLE_CLUSTER_REPRESENTATIVE:
docFile_summSpan_cands_idx = greedy_selection_clusters(predictions_topic, abstracts, MAX_CLUSTERS = MAX_CLUSTERS, CLUSTERS_BY_ORDER = True)
elif ORACLE_CLUSTER_RANKING:
docFile_summSpan_cands_idx = greedy_selection_clusters(predictions_topic, abstracts,
MAX_CLUSTERS=MAX_CLUSTERS, HIGH_PRED_REPRESETATIVE_ORACLE = True)
else:
assert(False)
prediction_topic_selected = predictions_topic.iloc[docFile_summSpan_cands_idx]
summary = build_summary(prediction_topic_selected)
elif ORACLE_BY_ALL_CLUSTERS:
read_simMats(topic, predictions_topic, SET)
cluster_mat(simMat, predictions_topic['simMat_idx'].values, predictions_topic)
docFile_summSpan_cands_idx, docFile_summSpan_cands_idx_rouge_diff,_ = greedy_selection_all_clusters(predictions_topic, abstracts, MAX_CLUSTERS = MAX_CLUSTERS)
prediction_topic_selected = predictions_topic.iloc[docFile_summSpan_cands_idx]
prediction_topic_selected['rouge_diff'] = docFile_summSpan_cands_idx_rouge_diff
summary = build_summary(prediction_topic_selected)
prediction_selected.append(prediction_topic_selected)
else:
docFile_summSpan_cands_idx = greedy_selection_MDS(docFile_summSpan_cands, abstracts)
docFile_summSpan_positive = [docFile_summSpan_cands[cand_idx] for cand_idx in docFile_summSpan_cands_idx]
summary = ''
candidate_new_idx = 0
while len(summary.split(' ')) <= SUMM_LEN:
if candidate_new_idx >= len(docFile_summSpan_positive):
break
candidate_new_text = docFile_summSpan_positive[candidate_new_idx]
summary += candidate_new_text+"\n"
candidate_new_idx += 1
oracle_label = np.zeros(len(docFile_summSpan_cands))
oracle_label[docFile_summSpan_cands_idx] = 1
predictions_topic['oracle_label'] = oracle_label
else:
predictions_topic = predictions_topic.sort_values(by=['prediction'], ascending=False)
selected_spans = []
candidate_new_idx = 0
# analize_data(analysis_list)
summary = ''
while len(summary.split(' ')) <= SUMM_LEN and len(selected_spans) < MAX_SENT:
if candidate_new_idx >= len(predictions_topic):
break
candidate_new_text = predictions_topic['docSpanText'].values[candidate_new_idx]
candidate_new_idx += 1
if _block_tri(candidate_new_text, summary):
continue
selected_spans.append(candidate_new_text)
summary += candidate_new_text
# if '.' not in summary[-3:]:
# summary += ' .\n' # add period between sentences
# else:
# summary += '\n' # add space between sentences
summary += '\n' # add space between sentences
summary = summary.replace('...' ,' ')
if SET.startswith('TAC'):
write_summary(sys_summary_path, summary, topic=topic.upper()[:-2], type='system')
else:
write_summary(sys_summary_path, summary, topic=topic.upper()[:-1], type='system')
calc_rouge(gold_summary_path, sys_summary_path)
print('mean predictions per topic: ', np.mean(len_pred))
print('max predictions per topic: ', max(len_pred))
print('min predictions per topic: ', min(len_pred))
print('num empty topic: ', empty)
# r1,r2 = retrieve_R1_R2(sys_summary_path)
# tunning_list.append([DUC_THRESH,CLUSTER_THRESH, r1,r2])
#
#
#
# tunning_df = pd.DataFrame(tunning_list, columns=['duc_thresh','cluster_thresh','R1', 'R2'])
# tunning_df.to_csv('/home/nlp/ernstor1/highlighting/{}_system_summaries/{}/tunning/tunning_df.csv'.format(SET, sys_folder))
| StarcoderdataPython |
8129297 | <reponame>StalingradTeam/E-9.11-Event-manager<filename>config.py<gh_stars>0
import os
class Config:
SQLALCHEMY_DATABASE_URI = os.getenv('DATABASE_URL', 'sqlite:///temp.db')
SQLALCHEMY_TRACK_MODIFICATIONS = False
SECRET_KEY=os.environ.get('SECRET_KEY', 'very_secret_key')
TIMEZONE='UTC' | StarcoderdataPython |
9775296 | # this is __inti__.py | StarcoderdataPython |
9724072 | <filename>LabSessionsSol/TP91_superviseur_fils_Q2_v3.py
# encoding: UTF-8
import random, time, sys, numpy, os, mmap, posix_ipc, traceback, signal
# version complete Q2
pid = []
# modif du comportement de signal
def arret(signal, frame):
global pid
# on tue les fils créés
try:
pids = 0
while pids < len(pid):
print(pid[pids])
os.kill(pid[pids], signal.SIGINT)
pids = pids + 1
except:
# si fils deja mort on ne traite pas exception
pass
# le pere desalloue les ress partagees
mm.close()
shm.unlink()
print("fin")
sys.exit(0)
try:
shm = posix_ipc.SharedMemory("/sup", posix_ipc.O_CREAT, size=os.sysconf("SC_PAGE_SIZE"))
# sem = posix_ipc.Semaphore("/semTemp", posix_ipc.O_CREAT, 0o600, 1)
mm = mmap.mmap(shm.fd, os.sysconf("SC_PAGE_SIZE"), prot=mmap.PROT_READ | mmap.PROT_WRITE)
N = sys.argv[1] # nb de capteurs
p = 1
# lancement des fils
# nb est une variable heritee nous servant a savoir dans quelle partie de matrice le capteur ecrit
for nb in range(int(N)):
# print("nb "+ str(nb))
if (p > 0):
p = os.fork()
pid.append(p)
# les fils ont un comportement de capteur
if (p == 0):
print("fils de pid avec nb : " + str(os.getpid()) + " " + str(nb))
i = 0
while (i < 10):
# temperature = random.randint(0,32)
# print ("pid "+ str(os.getpid())+ str(temperature))
mm[nb * 10 + i] = random.randint(0, 32 * (nb + 1))
time.sleep(1)
i = i + 1
print("fin fils")
if (p > 0):
# mise en place du handler pour le pere
signal.signal(signal.SIGINT, arret)
# le pere attend ses fils
for pid in range(int(N)):
os.wait()
# le pere calcule le max de la temp
print("affichage par pere des valeurs")
for t in range(0, int(N) * 10):
print(mm[t])
maximum = numpy.amax(mm)
print(maximum)
# le pere desalloue les ress partagees
mm.close()
shm.unlink()
# test signal en bouclant
print("Le programme va boucler...")
print(pid)
while True:
continue
except OSError as e:
traceback.print_exc()
print(e.strerror)
exit(1)
| StarcoderdataPython |
1602625 | from rest_framework import viewsets, generics, pagination, filters
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework.response import Response
import django_filters
from .models import *
from .serializers import *
from .api_filters import *
from django_filters.rest_framework import FilterSet
class CustomPagination(pagination.PageNumberPagination):
page_size = 25
page_size_query_param = 'page_size'
max_page_size = 10000
def get_paginated_response(self, data):
return Response({
'links': {
'next': self.get_next_link(),
'previous': self.get_previous_link()
},
'count': self.page.paginator.count,
'total_pages': self.page.paginator.num_pages,
'results': data
})
class LemmaViewSet(viewsets.ModelViewSet):
queryset = Lemma.objects.all()
serializer_class = LemmaSerializer
pagination_class = CustomPagination
filter_backends = (DjangoFilterBackend, filters.OrderingFilter, )
filter_class = LemmaRestFilter
ordering_fields = '__all__'
class DateViewSet(viewsets.ModelViewSet):
queryset = Date.objects.all()
serializer_class = DateSerializer
pagination_class = CustomPagination
filter_backends = (DjangoFilterBackend, filters.OrderingFilter, )
filter_class = DateRestFilter
ordering_fields = '__all__'
class CorpusViewSet(viewsets.ModelViewSet):
queryset = Corpus.objects.all()
serializer_class = CorpusSerializer
filter_backends = (DjangoFilterBackend, filters.OrderingFilter, )
filter_class = CorpusRestFilter
ordering_fields = '__all__'
class TextViewSet(viewsets.ModelViewSet):
queryset = Text.objects.all()
serializer_class = TextSerializer
pagination_class = CustomPagination
filter_backends = (DjangoFilterBackend, filters.OrderingFilter, )
filter_class = TextRestFilter
ordering_fields = '__all__'
class ConsonantViewSet(viewsets.ModelViewSet):
queryset = Consonant.objects.all()
serializer_class = ConsonantSerializer
pagination_class = CustomPagination
filter_backends = (DjangoFilterBackend, filters.OrderingFilter, )
filter_class = ConsonantRestFilter
ordering_fields = '__all__'
class ClusterViewSet(viewsets.ModelViewSet):
queryset = Cluster.objects.all()
serializer_class = ClusterSerializer
pagination_class = CustomPagination
filter_backends = (DjangoFilterBackend, filters.OrderingFilter, )
filter_class = ClusterRestFilter
ordering_fields = '__all__'
class TokenLabelViewSet(viewsets.ModelViewSet):
queryset = TokenLabel.objects.all()
serializer_class = TokenLabelSerializer
pagination_class = CustomPagination
filter_backends = (DjangoFilterBackend, filters.OrderingFilter, )
filter_class = TokenLabelRestFilter
ordering_fields = '__all__'
class SchwaPresentViewSet(viewsets.ModelViewSet):
queryset = SchwaPresent.objects.all()
serializer_class = SchwaPresentSerializer
pagination_class = CustomPagination
filter_backends = (DjangoFilterBackend, filters.OrderingFilter, )
filter_class = SchwaPresentRestFilter
ordering_fields = '__all__'
class OnSetViewSet(viewsets.ModelViewSet):
queryset = OnSet.objects.all()
serializer_class = OnSetSerializer
pagination_class = CustomPagination
filter_backends = (DjangoFilterBackend, filters.OrderingFilter, )
filter_class = OnSetRestFilter
ordering_fields = '__all__'
class TokenViewSet(viewsets.ModelViewSet):
queryset = Token.objects.all()
serializer_class = TokenSerializer
pagination_class = CustomPagination
filter_backends = (DjangoFilterBackend, filters.OrderingFilter, )
filter_class = TokenRestFilter
ordering_fields = '__all__'
#https://github.com/encode/django-rest-framework/issues/3636 | StarcoderdataPython |
8154081 | from flask import Blueprint, jsonify, request
from skynet.roku.models import RokuModel
from skynet.roku.forms import RokuKeypressForm, RokuLaunchForm
roku = Blueprint('roku', __name__, url_prefix='/roku')
@roku.route('/keypress')
def keypress():
form_data = RokuKeypressForm(request.args)
if not form_data.validate():
return 'Invalid GET Args', 403
roku_model = RokuModel()
roku_model.load(
'keypress',
form_data.key.data
)
return jsonify(roku_model.send_command())
@roku.route('/launch')
def launch():
form_data = RokuLaunchForm(request.args)
if not form_data.validate():
return 'Invalid GET Args', 403
roku_model = RokuModel()
roku_model.load(
'launch',
form_data.app_id.data
)
return jsonify(roku_model.send_command()) | StarcoderdataPython |
1771747 | #
# Copyright 2019 - <NAME>.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from sys import stdout
import click
from auth0_login import fatal, setting
from auth0_login.aws.console import open_aws_console
from auth0_login.aws.credentials import write_aws_credentials
from auth0_login.aws.account import aws_accounts
from auth0_login.aws.saml_assertion import AWSSAMLAssertion
from auth0_login.saml import SAMLGetAccessTokenCommand
class AWSSTSGetCredentialsFromSAMLCommand(SAMLGetAccessTokenCommand):
"""
get AWS credentials using the obtained SAML token and
stores then in `~/.aws/credentials`.
As multiple AWS roles may have been granted to the SAML token,
the caller has to specify the `account` number and `role` name to
generate the credentials for. If you are unsure which accounts
and roles have been granted, use the `--show` option
The credentials will be stored under the specified `profile` name.
By specifying `--open-console` it will open the AWS console too.
"""
def __init__(self, account, role, profile):
super(AWSSTSGetCredentialsFromSAMLCommand, self).__init__()
self.account = account if account else setting.attributes.get('aws_account')
if not account and self.account:
self.account = aws_accounts.get_account(self.account).number
self.role = role if role else setting.attributes.get('aws_role')
self.profile = profile if profile else setting.attributes.get('aws_profile')
self.open_console = setting.attributes.get('aws_console', False)
self.saml_response: AWSSAMLAssertion = None
def set_saml_response(self, saml_response):
self.saml_response = AWSSAMLAssertion(saml_response)
def print_roles(self):
for role in self.saml_response.available_roles():
account = aws_accounts.get_account(role.account)
stdout.write(f'[{role.name}@{account.alias}]\n')
stdout.write(f'idp_url = {setting.IDP_URL}\n')
stdout.write(f'client_id = {setting.CLIENT_ID}\n')
stdout.write(f'aws_account = {account.alias}\n')
stdout.write(f'aws_role = {role.name}\n')
stdout.write(f'aws_profile = {role.name}@{account.alias}\n\n')
def show_account_roles(self):
self.request_authorization()
self.print_roles()
@property
def role_arn(self):
return f'arn:aws:iam::{self.account}:role/{self.role}'
def run(self):
if not (self.account and self.role and self.profile):
fatal('--account, --role and --profile are required.')
self.request_authorization()
credentials = self.saml_response.assume_role(self.role_arn, setting.ROLE_DURATION)
write_aws_credentials(credentials, self.profile)
if self.open_console:
open_aws_console(self.profile)
@click.command('aws-assume-role', help=AWSSTSGetCredentialsFromSAMLCommand.__doc__)
@click.option('--account', help='aws account number or alias')
@click.option('--role', help='to assume using the token')
@click.option('--profile', help='to store the credentials under')
@click.option('--show', is_flag=True, default=False, help='account roles available to assume')
@click.option('--open-console', '-C', count=True, help=' after credential refresh')
def assume_role_with_saml(account, role, profile, show, open_console):
aws_account = aws_accounts.get_account(account).number if account else None
cmd = AWSSTSGetCredentialsFromSAMLCommand(aws_account, role, profile)
if show:
cmd.show_account_roles()
else:
if open_console:
cmd.open_console = True
cmd.run()
| StarcoderdataPython |
1965648 | <filename>floodsystem/analysis.py<gh_stars>0
from matplotlib.dates import date2num
import numpy as np
from datetime import datetime
def polyfit(dates, levels, p):
"""Calculate the polynomial of order p that passes through the data points
Returns a poly1d object, and the time axis offset"""
date_nums = date2num(dates)
# d0 is the shift of the time axis - first date
d0 = date_nums[0]
p_coeff = np.polyfit(date_nums - d0, levels, p)
poly = np.poly1d(p_coeff)
return poly, d0
def projected_level_after_dt(dates, levels, dt=1):
"""Draws a straight line of best fit to estimate trend
Then finds the projected value of that trend after dt days.
dates is sorted in most-recent-first order"""
best_fit, d0 = polyfit(dates, levels, 1)
projected_level = best_fit(dt)
return projected_level | StarcoderdataPython |
8154736 | <reponame>kodebach/libelektra
class ElektraPlugin(object):
def __init__(self):
self.x = 1
def open(self, config, errorKey):
print("[CLASS-PYTHON-2] open -->")
self.x = self.x + 1
return 1
def get(self, returned, parentKey):
print("[CLASS-PYTHON-2] get")
return 1
def set(self, returned, parentKey):
print("[CLASS-PYTHON-2] set")
return 1
def error(self, returned, parentKey):
print("[CLASS-PYTHON-2] error")
return 1
def close(self, errorKey):
print("[CLASS-PYTHON-2] <-- close")
return 0
| StarcoderdataPython |
6541620 | #! -*- encoding:utf-8 -*-
"""
@File : Baselines.py
@Author : <NAME>
@Contact : <EMAIL>
@Dscpt :
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from transformers import AlbertModel, AlbertPreTrainedModel, BertPreTrainedModel, BertModel
class AlbertBaseline(AlbertPreTrainedModel):
def __init__(self, config, **kwargs):
super(AlbertBaseline, self).__init__(config)
self.albert = AlbertModel(config)
# test
self.scorer = nn.Sequential(
nn.Dropout(0.1),
nn.Linear(config.hidden_size, 1)
)
self.init_weights()
def forward(self, input_ids, attention_mask, token_type_ids, labels):
"""
input_ids: [B, 5, L]
labels: [B, ]
"""
# logits: [B, 2]
logits = self._forward(input_ids, attention_mask, token_type_ids)
loss = F.cross_entropy(logits, labels) # get the CELoss
with torch.no_grad():
logits = F.softmax(logits, dim=1) # get the score
predicts = torch.argmax(logits, dim=1) # find the result
# import pdb; pdb.set_trace()
right_num = torch.sum(predicts == labels)
return loss, right_num
def _forward(self, input_ids, attention_mask, token_type_ids, return_pooler=False):
# [B, 5, L] => [B * 5, L]
flat_input_ids = input_ids.view(-1, input_ids.size(-1))
flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1))
flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1))
outputs = self.albert(
input_ids=flat_input_ids,
attention_mask=flat_attention_mask,
token_type_ids=flat_token_type_ids
)
pooler_output = outputs.pooler_output # [CLS]
# [B*5, H] => [B*5, 1] => [B, 5]
logits = self.scorer(pooler_output).view(-1, 5)
result = pooler_output if return_pooler else logits
return result
def _to_tensor(self, it, device): return torch.tensor(it, device=device, dtype=torch.float)
def predict(self, input_ids, attention_mask, token_type_ids):
"""
return: [B, 5]
"""
logits = self._forward(input_ids, attention_mask, token_type_ids)
logits = F.softmax(logits, dim=1)
return logits
class BertBaseline(BertPreTrainedModel):
def __init__(self, config, *args, **kwargs):
super(BertBaseline, self).__init__(config)
self.bert = BertModel(config)
self.scorer = nn.Sequential(
nn.Dropout(0.1),
nn.Linear(config.hidden_size, 1)
)
self.init_weights()
def forward(self, input_ids, attention_mask, token_type_ids, labels):
"""
input_ids: [B, 5, L]
labels: [B, ]
"""
# logits: [B, 2]
logits = self._forward(input_ids, attention_mask, token_type_ids)
loss = F.cross_entropy(logits, labels) # get the CELoss
with torch.no_grad():
logits = F.softmax(logits, dim=1) # get the score
predicts = torch.argmax(logits, dim=1) # find the result
right_num = torch.sum(predicts == labels)
return loss, right_num
def _forward(self, input_ids, attention_mask, token_type_ids):
# [B, 5, L] => [B * 5, L]
flat_input_ids = input_ids.view(-1, input_ids.size(-1))
flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1))
flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1))
outputs = self.bert(
input_ids=flat_input_ids,
attention_mask=flat_attention_mask,
token_type_ids=flat_token_type_ids
)
pooler_output = outputs.pooler_output # [CLS]
# [B*5, H] => [B*5, 1] => [B, 5]
logits = self.scorer(pooler_output).view(-1, 5)
return logits
def predict(self, input_ids, attention_mask, token_type_ids):
"""
return: [B, 5]
"""
return self._forward(input_ids, attention_mask, token_type_ids)
| StarcoderdataPython |
9783566 | from django.contrib import admin
from products.models import Products
admin.site.register(Products)
# Register your models here.
| StarcoderdataPython |
11318177 | <reponame>Lonewolf-Information-systems/owtf
import os
import shutil
import subprocess
# FIXME: Do not remove user's results. Need OWTF to fix its custom profiles
# options.
DIR_SCRIPTS = 'owtf/scripts'
DB_SETUP_SCRIPT = 'owtf/db_setup.sh'
DIR_OWTF_REVIEW = 'owtf_review'
def db_setup(cmd):
"""Reset OWTF database."""
if cmd not in ['clean', 'init']:
return
pwd = os.getcwd()
db_process = subprocess.Popen(
"/usr/bin/echo '\n' | %s %s" % (
os.path.join(pwd, DIR_SCRIPTS, DB_SETUP_SCRIPT),
cmd),
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
db_process.wait()
def clean_owtf_review():
"""Remove OWTF owtf_review output directory."""
pwd = os.getcwd()
shutil.rmtree(os.path.join(pwd, DIR_OWTF_REVIEW), ignore_errors=True)
| StarcoderdataPython |
5109936 | import json
import matplotlib.pyplot as plt
import numpy as np
def load_json_arr(json_path):
lines = []
with open(json_path, 'r') as f:
for line in f:
lines.append(json.loads(line))
return lines
# # # # Configurations # # #
color_base = 'black'
color_APsml = 'tab:orange'
color_AP = 'tab:blue'
lr = "lr 0.025"
freeze = "freeze at 1"
experiment_folder = './output/1_class_litter/lr00025_1class/lr00025_freeze2_1class_ap50_iter1500'
experiment_metrics = load_json_arr(experiment_folder + '/metrics.json')
# # # # # # # # # # # # # #
# Plotting only Total Loss Metric together with Validation Loss
plt.plot(
[x['iteration'] for x in experiment_metrics if 'total_loss' in x],
[x['total_loss'] for x in experiment_metrics if 'total_loss' in x])
plt.plot(
[x['iteration'] for x in experiment_metrics if 'total_val_loss' in x],
[x['total_val_loss'] for x in experiment_metrics if 'total_val_loss' in x])
plt.legend(['total loss', 'validation loss'], loc='upper right')
plt.title(f"Total loss and Validation Loss for MRCNN Trained on TACO - {lr}")
plt.xlabel("Iteration")
plt.ylabel("Total Loss")
plt.show()
# Plotting bbox AP and Segm AP mccetrics.
fig, ax1 = plt.subplots()
ax1.set_xlabel('Iteration')
ax1.set_ylabel('Loss')
ax1.plot(
[x['iteration'] for x in experiment_metrics if 'total_loss' in x],
[x['total_loss'] for x in experiment_metrics if 'total_loss' in x], color=color_base, label="Total Loss")
ax1.tick_params(axis='y')
plt.legend(loc='upper left')
ax2 = ax1.twinx()
ax2.set_ylabel('AP')
ax2.plot(
[x['iteration'] for x in experiment_metrics if 'bbox/AP' in x],
[x['bbox/AP'] for x in experiment_metrics if 'bbox/AP' in x], color=color_AP, label="BBox AP", linestyle="dashed")
ax2.plot(
[x['iteration'] for x in experiment_metrics if 'bbox/AP50' in x],
[x['bbox/AP50'] for x in experiment_metrics if 'bbox/AP50' in x], color=color_AP, label="BBox AP@.50")
ax2.plot(
[x['iteration'] for x in experiment_metrics if 'bbox/AP75' in x],
[x['bbox/AP75'] for x in experiment_metrics if 'bbox/AP75' in x], color=color_AP, label="BBox AP@.75", linestyle="-.")
# Debugging:
print([x['bbox/AP50'] for x in experiment_metrics if 'bbox/AP' in x])
# Plotting Size dependent metrics (APs, APm, APl)
"""ax2.plot(
[x['iteration'] for x in experiment_metrics if 'bbox/APs' in x],
[x['bbox/APs'] for x in experiment_metrics if 'bbox/APs' in x], color=color_APsml, label="BBox APs")
ax2.plot(
[x['iteration'] for x in experiment_metrics if 'bbox/APm' in x],
[x['bbox/APm'] for x in experiment_metrics if 'bbox/APs' in x], color=color_APsml, label="BBox APm", linestyle="-.")
ax2.plot(
[x['iteration'] for x in experiment_metrics if 'bbox/APl' in x],
[x['bbox/APl'] for x in experiment_metrics if 'bbox/APl' in x], color=color_APsml, label="BBox APl", linestyle="dotted")
"""
ax2.tick_params(axis='y')
plt.legend(loc='upper right')
plt.title(f"MRCNN Metrics - Bounding Box AP - TACO at {lr}")
plt.show()
# Plotting segmentation metrics
fig, ax1 = plt.subplots()
ax1.set_xlabel('Iteration')
ax1.set_ylabel('Loss')
ax1.plot(
[x['iteration'] for x in experiment_metrics if 'total_loss' in x],
[x['total_loss'] for x in experiment_metrics if 'total_loss' in x], color=color_base, label="Total Loss")
ax1.tick_params(axis='y')
plt.legend(loc='upper left')
ax2 = ax1.twinx()
color = 'tab:orange'
ax2.set_ylabel('AP')
ax2.plot(
[x['iteration'] for x in experiment_metrics if 'segm/AP' in x],
[x['segm/AP'] for x in experiment_metrics if 'segm/AP' in x], color=color_AP, label="Segmentation AP", linestyle="dashed")
ax2.plot(
[x['iteration'] for x in experiment_metrics if 'segm/AP50' in x],
[x['segm/AP50'] for x in experiment_metrics if 'segm/AP50' in x], color=color_AP, label="Segmentation AP@.50")
ax2.plot(
[x['iteration'] for x in experiment_metrics if 'bbox/AP75' in x],
[x['segm/AP75'] for x in experiment_metrics if 'bbox/AP75' in x], color=color_AP, label="Segmentation AP@.75", linestyle="-.")
"""
# Plotting size dependent metrics
ax2.plot(
[x['iteration'] for x in experiment_metrics if 'bbox/APs' in x],
[x['segm/APs'] for x in experiment_metrics if 'bbox/APs' in x], color=color_APsml, label="Segmentation APs")
ax2.plot(
[x['iteration'] for x in experiment_metrics if 'bbox/APm' in x],
[x['segm/APm'] for x in experiment_metrics if 'bbox/APs' in x], color=color_APsml, label="Segmentation APm", linestyle="-.")
ax2.plot(
[x['iteration'] for x in experiment_metrics if 'bbox/APl' in x],
[x['segm/APl'] for x in experiment_metrics if 'bbox/APl' in x], color=color_APsml, label="Segmentation APl", linestyle="dotted")
"""
ax2.tick_params(axis='y')
plt.legend(loc='upper right')
plt.title(f"MRCNN Metrics - Segmentation AP - TACO at {lr}")
plt.show()
# Plotting Accuracy, False Positive and False Negative Metrics
fig, ax1 = plt.subplots()
ax1.set_xlabel('Iteration')
ax1.set_ylabel('Loss')
ax1.plot(
[x['iteration'] for x in experiment_metrics if 'total_loss' in x],
[x['total_loss'] for x in experiment_metrics if 'total_loss' in x], color=color_base, label="Total Loss")
ax1.tick_params(axis='y')
plt.legend(loc='upper left')
ax2 = ax1.twinx()
color = 'tab:orange'
ax2.set_ylabel('Percent')
ax2.plot(
[x['iteration'] for x in experiment_metrics if 'mask_rcnn/accuracy' in x],
[x['mask_rcnn/accuracy'] for x in experiment_metrics if 'mask_rcnn/accuracy' in x], color=color_AP, label="Mask R-CNN Accuracy", linestyle="dashed")
ax2.plot(
[x['iteration'] for x in experiment_metrics if 'mask_rcnn/false_negative' in x],
[x['mask_rcnn/false_negative'] for x in experiment_metrics if 'mask_rcnn/false_negative' in x], color=color_APsml, label="Mask R-CNN False Negative")
ax2.plot(
[x['iteration'] for x in experiment_metrics if 'mask_rcnn/false_positive' in x],
[x['mask_rcnn/false_positive'] for x in experiment_metrics if 'mask_rcnn/false_positive' in x], color="tab:red", label="Mask R-CNN False Positive", linestyle="-.")
ax2.tick_params(axis='y')
plt.legend(loc='best')
plt.title(f"MRCNN Performance Metrics - {lr} - {freeze}")
plt.show() | StarcoderdataPython |
6613277 | <gh_stars>0
#!/usr/bin/env python
# from __future__ import print_function
import code
import readline
# context provides the slurm module
# from context import slurm
import modu.slurm as slurm
import modu.color_printer as cp
print("----------------------------------------------------------------------")
cp.printWarn("import modu.slurm as slurm")
cp.printWarn("import modu.color_printer as cp")
cp.printWarn("states = slurm.Slurm.getNonEmptyStates()")
print("----------------------------------------------------------------------")
states = slurm.Slurm.getNonEmptyStates()
vars = globals().copy()
vars.update(locals())
shell = code.InteractiveConsole(vars)
shell.interact()
| StarcoderdataPython |
3213356 | <filename>sphinx_yaml_config/__init__.py
"""A small sphinx extension to let you configure a site with YAML metadata."""
from pathlib import Path
__version__ = "0.0.1dev0"
from yaml import safe_load
def add_yaml_config(app, config):
"""Load all of the key/vals in a config file into the HTML page context"""
path_yaml = app.config['yaml_config_path']
# If no path is given we'll just skip
if len(path_yaml) == 0:
return
# Load the YAML and update our site's configuration
path_yaml = Path(path_yaml)
if not path_yaml.exists():
raise ValueError(f"Could not find YAML configuration file at path {path_yaml}")
yaml_config = safe_load(path_yaml.open())
for key, val in yaml_config.items():
app.config[key] = val
def setup(app):
# configuration for this tool
app.add_config_value("yaml_config_path", "", "html")
# Add configuration value to the template
app.connect('config-inited', add_yaml_config)
return {"version": __version__,
"parallel_read_safe": True,
"parallel_write_safe": True}
| StarcoderdataPython |
11277629 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
from django.utils.encoding import force_text
from django.core import serializers
def old_to_new_questions(apps, schema_editor):
Question = apps.get_model('editor', 'Question')
NewQuestion = apps.get_model('editor', 'NewQuestion')
EditorItem = apps.get_model('editor', 'EditorItem')
NewStampOfApproval = apps.get_model('editor', 'NewStampOfApproval')
TaggedQuestion = apps.get_model('editor', 'TaggedQuestion')
TaggedItem = apps.get_model('editor', 'TaggedItem')
Resource = apps.get_model('editor', 'Resource')
User = apps.get_model('auth', 'User')
ContentType = apps.get_model('contenttypes', 'contenttype')
editoritem_ct = ContentType.objects.get_for_model(EditorItem)
EditorItem._meta.get_field('last_modified').auto_now = False
for q in Question.objects.all():
nq = NewQuestion()
nq.pk = q.pk
ei = EditorItem()
ei.name = q.name
ei.slug = q.slug
ei.filename = q.filename
ei.author = q.author
ei.public_access = q.public_access
ei.licence = q.licence
ei.content = q.content
ei.metadata = q.metadata
ei.published = q.published
ei.published_date = q.published_date
ei.last_modified = q.last_modified
ei.ability_level_start = q.ability_level_start
ei.ability_level_end = q.ability_level_end
ei.save()
nq.editoritem = ei
nq.save()
if q.current_stamp:
ns = NewStampOfApproval()
ns.object = ei
ns.user = User.objects.get(pk=q.current_stamp.user.pk)
ns.status = q.current_stamp.status
ns.date = q.current_stamp.date
ns.save()
ei.current_stamp = ns
for al in q.ability_levels.all():
ei.ability_levels.add(al)
for s in q.subjects.all():
ei.subjects.add(s)
for t in q.topics.all():
ei.topics.add(t)
for r in q.resources.all():
r2 = Resource.objects.create(owner=ei.author, file=r.image)
nq.resources.add(r2)
for e in q.extensions.all():
nq.extensions.add(e)
ei.created = q.created
ei.last_modified = q.last_modified
ei.save()
nq.save()
for q in Question.objects.all():
if q.copy_of:
nq = NewQuestion.objects.get(pk=q.pk)
nq.editoritem.copy_of = NewQuestion.objects.get(pk=q.copy_of.pk).editoritem
nq.editoritem.save()
for tq in TaggedQuestion.objects.all():
TaggedItem.objects.create(content_type=editoritem_ct, object_id=NewQuestion.objects.get(pk=tq.object_id).editoritem.pk, tag=tq.tag)
def remove_new_questions(apps, schema_editor):
for name in ['NewQuestion', 'NewExam', 'EditorItem', 'TaggedItem', 'Access', 'NewStampOfApproval']:
model = apps.get_model('editor', name)
model.objects.all().delete()
def old_exams_to_new(apps, schema_editor):
Exam = apps.get_model('editor', 'Exam')
NewExam = apps.get_model('editor', 'NewExam')
NewQuestion = apps.get_model('editor', 'NewQuestion')
EditorItem = apps.get_model('editor', 'EditorItem')
NewStampOfApproval = apps.get_model('editor', 'NewStampOfApproval')
ExamQuestion = apps.get_model('editor', 'ExamQuestion')
NewExamQuestion = apps.get_model('editor', 'NewExamQuestion')
User = apps.get_model('auth', 'User')
EditorItem._meta.get_field('last_modified').auto_now = False
for e in Exam.objects.all():
ne = NewExam()
ne.pk = e.pk
ei = EditorItem()
ei.name = e.name
ei.slug = e.slug
ei.filename = e.filename
ei.author = e.author
ei.public_access = e.public_access
ei.licence = e.licence
ei.content = e.content
ei.metadata = e.metadata
ei.published = e.published
ei.published_date = e.published_date
ei.last_modified = e.last_modified
ei.ability_level_start = e.ability_level_start
ei.ability_level_end = e.ability_level_end
ne.theme = e.theme
ne.custom_theme = e.custom_theme
ne.locale = e.locale
ei.save()
ne.editoritem = ei
ne.save()
if e.current_stamp:
ns = NewStampOfApproval()
ns.object = ei
ns.user = User.objects.get(pk=e.current_stamp.user.pk)
ns.status = e.current_stamp.status
ns.date = e.current_stamp.date
ns.save()
ei.current_stamp = ns
for al in e.ability_levels.all():
ei.ability_levels.add(al)
for s in e.subjects.all():
ei.subjects.add(s)
for t in e.topics.all():
ei.topics.add(t)
ei.created = e.created
ei.last_modified = e.last_modified
ei.save()
ne.save()
for eq in ExamQuestion.objects.all():
neq = NewExamQuestion()
neq.exam = NewExam.objects.get(pk=eq.exam.pk)
neq.question = NewQuestion.objects.get(pk=eq.question.pk)
neq.qn_order = eq.qn_order
neq.save()
def remove_new_exams(apps, schema_editor):
NewExam = apps.get_model('editor', 'NewExam')
NewExam.objects.all().delete()
NewExamQuestion = apps.get_model('editor', 'NewExamQuestion')
NewExamQuestion.objects.all().delete()
def old_access_to_new(apps, schema_editor):
NewExam = apps.get_model('editor', 'newexam')
NewQuestion = apps.get_model('editor', 'newquestion')
QuestionAccess = apps.get_model('editor', 'QuestionAccess')
ExamAccess = apps.get_model('editor', 'ExamAccess')
Access = apps.get_model('editor', 'Access')
for qa in QuestionAccess.objects.all():
Access.objects.create(item=NewQuestion.objects.get(pk=qa.question.pk).editoritem, user=qa.user, access=qa.access)
for ea in ExamAccess.objects.all():
Access.objects.create(item=NewExam.objects.get(pk=ea.exam.pk).editoritem, user=ea.user, access=ea.access)
def remove_new_access(apps, schema_editor):
Access = apps.get_model('editor', 'Access')
Access.objects.all().delete()
def itemchanged_timeline_items(apps, schema_editor):
EditorItem = apps.get_model('editor', 'editoritem')
ItemChangedTimelineItem = apps.get_model('editor', 'itemchangedtimelineitem')
TimelineItem = apps.get_model('editor', 'timelineitem')
ContentType = apps.get_model('contenttypes', 'contenttype')
itemchange_ct = ContentType.objects.get_for_model(ItemChangedTimelineItem)
for ei in EditorItem.objects.all():
it = ItemChangedTimelineItem.objects.create(user=ei.author, object=ei, verb='created')
ti = TimelineItem.objects.get(object_id=it.pk, object_content_type=itemchange_ct)
ti.date = ei.created
ti.save()
def remove_itemchanged_timeline_items(apps, schema_editor):
ItemChangedTimelineItem = apps.get_model('editor', 'itemchangedtimelineitem')
ItemChangedTimelineItem.objects.all().delete()
def copy_revisions(apps, schema_editor):
Version = apps.get_model('reversion', 'Version')
ContentType = apps.get_model('contenttypes', 'contenttype')
Exam = apps.get_model('editor', 'exam')
Question = apps.get_model('editor', 'question')
NewExam = apps.get_model('editor', 'newexam')
NewQuestion = apps.get_model('editor', 'newquestion')
User = apps.get_model('auth', 'user')
Theme = apps.get_model('editor', 'theme')
Licence = apps.get_model('editor', 'licence')
EditorItem = apps.get_model('editor', 'editoritem')
TimelineItem = apps.get_model('editor', 'timelineitem')
RestorePoint = apps.get_model('editor', 'restorepoint')
exam_ct = ContentType.objects.get_for_model(Exam)
question_ct = ContentType.objects.get_for_model(Question)
newexam_ct = ContentType.objects.get_for_model(NewExam)
newquestion_ct = ContentType.objects.get_for_model(NewQuestion)
editoritem_ct = ContentType.objects.get_for_model(EditorItem)
restorepoint_ct = ContentType.objects.get_for_model(RestorePoint)
for v in Version.objects.exclude(revision__comment=''):
if v.content_type == exam_ct:
data = v.serialized_data
data = force_text(data.encode("utf8"))
e = list(serializers.deserialize(v.format, data, ignorenonexistent=True))[0].object
try:
ne = NewExam.objects.get(pk=v.object_id)
except NewExam.DoesNotExist:
continue
ei = ne.editoritem
ei.name = e.name
ei.author = User.objects.get(pk=e.author.pk)
ei.content = e.content
ei.created = e.created
ei.content = e.content
ei.share_uuid = e.share_uuid
ei.last_modified = e.last_modified
if e.licence:
try:
ei.licence = Licence.objects.get(pk=e.licence.pk)
except Licence.DoesNotExist:
pass
ei.public_access = e.public_access
ei.slug = e.slug
ei.metadata = e.metadata
ne.locale = e.locale
ne.theme = e.theme
try:
if e.custom_theme_id is not None:
ne.custom_theme = Theme.objects.get(pk=e.custom_theme_id)
except Theme.DoesNotExist:
pass
nve = Version()
nve.format = v.format
nve.content_type = editoritem_ct
nve.object_id = ei.pk
nve.object_id_int = ei.pk
nve.object_repr = repr(ei)
nve.revision = v.revision
nve.serialized_data = serializers.serialize(v.format, (ei,))
nve.save()
nvx = Version()
nvx.format = 'json'
nvx.content_type = newexam_ct
nvx.object_id = v.object_id
nvx.object_id_int = v.object_id
nvx.object_repr = repr(ne)
nvx.revision = v.revision
nvx.serialized_data = serializers.serialize(v.format, (ne,))
nvx.save()
elif v.content_type == question_ct:
data = v.serialized_data
data = force_text(data.encode("utf8"))
q = list(serializers.deserialize(v.format, data, ignorenonexistent=True))[0].object
try:
nq = NewQuestion.objects.get(pk=v.object_id)
except NewQuestion.DoesNotExist:
continue
ei = nq.editoritem
ei.name = q.name
ei.author = User.objects.get(pk=q.author.pk)
ei.content = q.content
ei.created = q.created
ei.content = q.content
ei.share_uuid = q.share_uuid
ei.last_modified = q.last_modified
if q.licence:
try:
ei.licence = Licence.objects.get(pk=q.licence.pk)
except Licence.DoesNotExist:
pass
ei.public_access = q.public_access
ei.slug = q.slug
ei.metadata = q.metadata
try:
if q.copy_of_id is not None:
nq2 = NewQuestion.objects.filter(pk=q.copy_of_id)
if nq2.exists():
ei.copy_of = nq2.first().editoritem
except (NewQuestion.DoesNotExist, Question.DoesNotExist):
pass
nve = Version()
nve.format = v.format
nve.content_type = editoritem_ct
nve.object_id = ei.pk
nve.object_id_int = ei.pk
nve.object_repr = repr(ei)
nve.revision = v.revision
nve.serialized_data = serializers.serialize(v.format, (ei,))
nve.save()
nvq = Version()
nvq.format = 'json'
nvq.content_type = newquestion_ct
nvq.object_id = v.object_id
nvq.object_id_int = v.object_id
nvq.object_repr = repr(nq)
nvq.revision = v.revision
nvq.serialized_data = serializers.serialize(v.format, (nq,))
nvq.save()
def set_timelineitem_date_auto_now(v):
ti = TimelineItem()
for field in ti._meta.local_fields:
if field.name == "date":
field.auto_now_add = v
set_timelineitem_date_auto_now(True)
RestorePoint.objects.all().delete()
for v in Version.objects.exclude(revision__comment='').exclude(revision__user=None).filter(content_type=editoritem_ct):
ei = EditorItem.objects.get(pk=v.object_id)
rp = RestorePoint.objects.create(object=ei, description=v.revision.comment, user=v.revision.user, revision=v.revision)
set_timelineitem_date_auto_now(False)
for rp in RestorePoint.objects.all():
ti = TimelineItem.objects.get(object_content_type=restorepoint_ct, object_id=rp.pk)
for field in ti._meta.local_fields:
if field.name == "date":
field.auto_now_add = False
ti.date = rp.revision.date_created
ti.save()
def delete_new_revisions(apps, schema_editor):
Version = apps.get_model('reversion', 'Version')
ContentType = apps.get_model('contenttypes', 'contenttype')
RestorePoint = apps.get_model('editor', 'restorepoint')
newexam_ct = ContentType.objects.get(app_label='editor', model='newexam')
newquestion_ct = ContentType.objects.get(app_label='editor', model='newquestion')
editoritem_ct = ContentType.objects.get(app_label='editor', model='editoritem')
Version.objects.filter(content_type__in=[newexam_ct, newquestion_ct, editoritem_ct]).delete()
RestorePoint.objects.all().delete()
def set_newstamp_dates(apps, schema_editor):
ContentType = apps.get_model('contenttypes', 'contenttype')
TimelineItem = apps.get_model('editor', 'timelineitem')
NewStampOfApproval = apps.get_model('editor', 'newstampofapproval')
StampOfApproval = apps.get_model('editor', 'stampofapproval')
def set_timelineitem_date_auto_now(v):
ti = TimelineItem.objects.first()
if ti:
for field in ti._meta.local_fields:
if field.name == "date":
field.auto_now_add = v
set_timelineitem_date_auto_now(False)
newstamp_ct = ContentType.objects.get_for_model(NewStampOfApproval)
for ns in NewStampOfApproval.objects.all():
try:
rel_obj = ns.object.exam
except Exception:
rel_obj = ns.object.question
os = StampOfApproval.objects.filter(object_id=rel_obj.id).last()
if os is not None:
ti = TimelineItem.objects.get(object_content_type=newstamp_ct, object_id=ns.pk)
for field in ti._meta.local_fields:
if field.name == "date":
field.auto_now_add = False
ti.date = os.date
ti.save()
def set_project(apps, schema_editor):
EditorItem = apps.get_model('editor', 'EditorItem')
EditorItem._meta.get_field('last_modified').auto_now = False
for e in EditorItem.objects.all():
e.project = e.author.userprofile.personal_project
e.save()
def copy_comments(apps, schema_editor):
ContentType = apps.get_model('contenttypes', 'contenttype')
TimelineItem = apps.get_model('editor', 'timelineitem')
Comment = apps.get_model('editor', 'comment')
Question = apps.get_model('editor', 'question')
NewQuestion = apps.get_model('editor', 'newquestion')
EditorItem = apps.get_model('editor', 'editoritem')
question_ct = ContentType.objects.get_for_model(Question)
editoritem_ct = ContentType.objects.get_for_model(EditorItem)
comment_ct = ContentType.objects.get_for_model(Comment)
for oc in Comment.objects.filter(object_content_type=question_ct):
try:
ei = NewQuestion.objects.get(pk=oc.object_id).editoritem
except NewQuestion.DoesNotExist:
continue
nc = Comment.objects.create(object_id=ei.pk, object_content_type=editoritem_ct, user=oc.user, text=oc.text)
nc.date = oc.date
nc.save()
for c in Comment.objects.filter(object_content_type=editoritem_ct):
ti = TimelineItem.objects.get(object_content_type=comment_ct, object_id=c.pk)
for field in ti._meta.local_fields:
if field.name == "date":
field.auto_now_add = False
ti.date = c.date
ti.save()
class Migration(migrations.Migration):
dependencies = [
('editor', '0013_version_2_models'),
('reversion', '__first__'),
('auth', '__first__'),
('accounts', '0013_userprofile_avatar'),
]
operations = [
migrations.RunPython(old_to_new_questions, remove_new_questions),
migrations.RunSQL(
"""
SELECT ei.id, et.id, ct.id
FROM editor_taggedquestion as tq
JOIN editor_editortag as et ON tq.tag_id=et.id
JOIN editor_question AS q ON q.id=tq.object_id
JOIN editor_newquestion AS nq on nq.id=q.id
JOIN editor_editoritem AS ei ON ei.id=nq.editoritem_id
JOIN django_content_type as ct ON ct.app_label="editor" AND ct.model="editoritem"
""",
"""
DELETE FROM editor_taggeditem
"""),
migrations.RunPython(old_exams_to_new, remove_new_exams),
migrations.RunPython(itemchanged_timeline_items, remove_itemchanged_timeline_items),
migrations.RunPython(copy_revisions, delete_new_revisions),
migrations.RunPython(old_access_to_new, remove_new_access),
migrations.RunPython(set_newstamp_dates, migrations.RunPython.noop),
migrations.RunPython(set_project, migrations.RunPython.noop),
migrations.RunPython(copy_comments, migrations.RunPython.noop),
]
| StarcoderdataPython |
5090625 | <filename>altapay/invoice.py
from __future__ import absolute_import, unicode_literals
from altapay.payment import Payment
class Invoice(Payment):
def create(self, terminal, shop_orderid, amount, currency, **kwargs):
"""
Create a invoice reservation request.
:arg terminal: name of the targeted AltaPay terminal
:arg shop_orderid: your order ID to be attached to the payment resource
:arg amount: order amount in floating point
:arg currency: currency for the payment resource
:arg
**kwargs: used for remaining, optional, payment request
parameters, see the AltaPay documentation for a full list.
Note that you will need to use lists and dictionaries to map the
URL structures from the AltaPay documentation into these kwargs.
:rtype: :samp:`True` if a payment was created, otherwise :samp:`False`.
"""
return super(Invoice, self).create(terminal, shop_orderid,
amount, currency, **kwargs)
def get_post_url(self):
return 'API/createInvoiceReservation'
| StarcoderdataPython |
1767701 | <reponame>pimpale/BQuest-Backend
from rest_framework import serializers
from drf_writable_nested import WritableNestedModelSerializer
from django.contrib.auth.models import User, Group
from users.models import Profile, Mentor
from .models import Request
from users.serializers import ProfileSerializer, MentorSerializer
class RequestSerializer(WritableNestedModelSerializer):
mentee = ProfileSerializer()
mentor = MentorSerializer()
class Meta:
model = Request
fields = ('mentee', 'mentor', 'email_body', 'preferred_mentee_email', 'phone', 'date_created',)
read_only_fields = ('mentee', 'mentor', 'email_body', 'preferred_mentee_email', 'phone', 'date_created',) | StarcoderdataPython |
4872447 | """site1 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include, re_path
from . import views
from .ajax import get_ciudades, get_modelos, get_cars_by_model_name, get_model_name
urlpatterns = [
path(r'index', views.CarroListView.as_view(), name='ind'),
path(r'busqueda/', views.form_busqueda, name='busqueda'),
path(r'busqueda/<int:pk>/', views.carro_detail, name="carro_detail"),
path(r'new/', views.carro_new, name='carro_new'),
path(r'ajax/get_ciudades', get_ciudades, name = 'get_ciudades'),
path(r'ajax/get_modelos', get_modelos, name = 'get_modelos'),
path(r'ajax/get_cars_by_model_name', get_cars_by_model_name, name = 'get_cars_by_model_name'),
path(r'ajax/get_model_name', get_model_name, name = 'get_model_name'),
path(r'login/', views.login_, name='login_'),
path(r'logout/', views.logout_, name='logout_'),
path(r'signup/', views.signup, name='signup_'),
path(r'lista/',views.serie_list),
] | StarcoderdataPython |
5191716 | <filename>user/views.py
import hashlib
from django.shortcuts import redirect, render
from user.util import quiet_logout
from .models import User
# Create your views here.
def signup(request):
return render(request, 'user/login.html', {})
def login(request):
return render(request, 'user/login.html', {})
def authenticate(request):
user = User.objects.get(username=request.POST.get('username'))
hash = hashlib.sha256((request.POST.get('password') + user.salt).encode('utf-8')).hexdigest()
for a in range(0, 4):
hash = hashlib.sha256(hash.encode('utf-8')).hexdigest()
if user.password == <PASSWORD>:
request.session['userid'] = user.userid
return redirect('/send')
else:
return redirect('/login')
def logout(request):
quiet_logout(request)
return redirect('/')
def view_user():
return redirect('')
| StarcoderdataPython |
84709 | from MDRSREID.Loss_Meter import Loss
import torch.nn as nn
import torch
from MDRSREID.utils.meter import RecentAverageMeter as Meter
class IDLoss(Loss):
def __init__(self, cfg, tb_writer=None):
super(IDLoss, self).__init__(cfg, tb_writer=tb_writer)
self.criterion = nn.CrossEntropyLoss(reduction='none') # 'none' | 'mean' | 'sum'.
self.part_fmt = '#{}'
def __call__(self, item, pred, step=0, **kwargs):
loss_list = [self.criterion(logits, item['label']).mean() for logits in pred['cls_feat_list']]
# New version of pytorch allow stacking 0-dim tensors, but not concatenating.
loss = torch.stack(loss_list).mean() # sum()
# Meter: stores and computes the average of recent values
self.store_calculate_loss(loss)
# May calculate part loss separately
self.may_calculate_part_loss(loss_list)
# May record losses.
self.may_record_loss(loss_list, step)
# Scale by loss weight
loss *= self.cfg.weight
return {'loss': loss}
def store_calculate_loss(self, loss):
"""
:param loss: torch.stack(loss_list).sum()
:return:
Meter: stores and computes the average of recent values.
"""
if self.cfg.name not in self.meter_dict:
# Here use RecentAverageMeter as Meter
self.meter_dict[self.cfg.name] = Meter(name=self.cfg.name)
# Update the meter, store the current whole loss.
self.meter_dict[self.cfg.name].update(loss.item())
def may_calculate_part_loss(self, loss_list):
"""
:param loss_list: each part loss
:return:
Meter: stores and computes the average of recent values.
For each part loss, calculate the loss separately.
"""
if len(loss_list) > 1:
# stores and computes each part average of recent values
for i in range(len(loss_list)):
# if there is not the meter of the part, create a new one.
if self.part_fmt.format(i + 1) not in self.meter_dict:
self.meter_dict[self.part_fmt.format(i + 1)] = Meter(name=self.part_fmt.format(i + 1))
# Update the meter, store the current part loss
self.meter_dict[self.part_fmt.format(i + 1)].update(loss_list[i].item())
def may_record_loss(self, loss_list, step):
"""
:param loss_list:
:param step:
:return:
Use TensorBoard to record the losses.
"""
if self.tb_writer is not None:
self.tb_writer.add_scalars(main_tag=self.cfg.name,
tag_scalar_dict={self.cfg.name: self.meter_dict[self.cfg.name].avg},
global_step=step
)
# Record each part loss
if len(loss_list) > 1:
self.tb_writer.add_scalars(main_tag='Part ID Losses',
tag_scalar_dict={self.part_fmt.format(i + 1): self.meter_dict[self.part_fmt.format(i + 1)].avg
for i in range(len(loss_list))},
global_step=step
)
| StarcoderdataPython |
6586241 | #import all the modules
from tkinter import *
import sqlite3
import tkinter.messagebox
conn = sqlite3.connect("D:\Store Management System\Database\store.db")
c = conn.cursor()
result = c.execute("SELECT Max(id) from inventory")
for r in result:
id = r[0]
class Database:
def __init__(self, master, *args, **kwargs):
self.master = master
self.heading = Label(master, text="Update Shopping Store database", font=('arial 40 bold'), fg='steelblue')
self.heading.place(x=400, y=0)
#label and entry for id
self.id_1e = Label(master, text="Enter Id", font=('arial 18 bold'))
self.id_1e.place(x=0 ,y=70)
self.id_1eb = Entry(master, font=('arial 18 bold'), width=10)
self.id_1eb.place(x=380, y=70)
self.btn_search = Button(master , text="Search",width=15, height=2, bg='orange', command=self.search)
self.btn_search.place(x=550, y=70)
#labels for the window
self.name_1 = Label(master, text="Enter Product Name", font=('arial 18 bold'))
self.name_1.place(x=0,y=120)
self.stock_1 = Label(master, text="Enter Stocks", font=('arial 18 bold'))
self.stock_1.place(x=0,y=170)
self.cp_1 = Label(master, text="Enter Cost-Price", font=('arial 18 bold'))
self.cp_1.place(x=0,y=220)
self.sp_1 = Label(master, text="Enter Selling Price", font=('arial 18 bold'))
self.sp_1.place(x=0,y=270)
self.totalcp_1 = Label(master, text="Enter Total Cost Price", font=('arial 18 bold'))
self.totalcp_1.place(x=0,y=320)
self.totalsp_1 = Label(master, text="Enter Total Selling Price", font=('arial 18 bold'))
self.totalsp_1.place(x=0,y=370)
self.vendor_1 = Label(master, text="Enter Vendor Name", font=('arial 18 bold'))
self.vendor_1.place(x=0,y=420)
self.vendor_phone_1 = Label(master, text="Enter Vendor Phone Number", font=('arial 18 bold'))
self.vendor_phone_1.place(x=0,y=470)
# entries for the labels
self.name_e = Entry(master, width=25, font=('arial 18 bold'))
self.name_e.place(x=380, y=120)
self.stock_e = Entry(master, width=25, font=('arial 18 bold'))
self.stock_e.place(x=380, y=170)
self.cp_e = Entry(master, width=25, font=('arial 18 bold'))
self.cp_e.place(x=380, y=220)
self.sp_e = Entry(master, width=25, font=('arial 18 bold'))
self.sp_e.place(x=380, y=270)
self.totalcp_e = Entry(master, width=25, font=('arial 18 bold'))
self.totalcp_e.place(x=380, y=320)
self.totalsp_e = Entry(master, width=25, font=('arial 18 bold'))
self.totalsp_e.place(x=380, y=370)
self.vendor_e = Entry(master, width=25, font=('arial 18 bold'))
self.vendor_e.place(x=380, y=420)
self.vendor_phone_e = Entry(master, width=25, font=('arial 18 bold'))
self.vendor_phone_e.place(x=380, y=470)
# Button to add to the database
self.btn_add = Button(master, text="Update Database", width=25, height=2, bg='steelblue', fg='white', command=self.update)
self.btn_add.place(x=520, y=520)
# text box for the logs
self.tBox = Text(master, width=60, height=18)
self.tBox.place(x=750, y=70)
self.tBox.insert(END, "ID has reached upto: " + str(id))
def search(self, *args, **kwargs):
sql = "SELECT * FROM inventory WHERE id=?"
result = c.execute(sql, (self.id_1eb.get(), ))
for r in result:
self.n1 = r[1] #name
self.n2 = r[2] #stock
self.n3 = r[3] #cp
self.n4 = r[4] #sp
self.n5 = r[5] #totalcp
self.n6 = r[6] #totalsp
self.n7 = r[7] #assumed_profit
self.n8 = r[8] #vendor
self.n9 = r[9] #vendor_phone
conn.commit()
#insert into the entries to update
self.name_e.delete(0, END)
self.name_e.insert(0, str(self.n1))
self.stock_e.delete(0, END)
self.stock_e.insert(0, str(self.n2))
self.cp_e.delete(0, END)
self.cp_e.insert(0, str(self.n3))
self.sp_e.delete(0, END)
self.sp_e.insert(0, str(self.n4))
self.vendor_e.delete(0, END)
self.vendor_e.insert(0, str(self.n8))
self.vendor_phone_e.delete(0, END)
self.vendor_phone_e.insert(0, str(self.n9))
self.totalcp_e.delete(0, END)
self.totalcp_e.insert(0, str(self.n5))
self.totalsp_e.delete(0, END)
self.totalsp_e.insert(0, str(self.n6))
def update(self, *args, **kwargs):
#get all the updated values
self.u1 = self.name_e.get()
self.u2 = self.stock_e.get()
self.u3 = self.cp_e.get()
self.u4 = self.sp_e.get()
self.u5 = self.totalcp_e.get()
self.u6 = self.totalsp_e.get()
self.u7 = self.vendor_e.get()
self.u8 = self.vendor_phone_e.get()
query = "UPDATE inventory SET name=?, stock=?, cp=?, sp=?, totalcp=?, totalsp=?, vendor=?, vendor_phoneno=? WHERE id=?"
c.execute(query, (self.u1, self.u2, self.u3, self.u4, self.u5, self.u6, self.u7, self.u8, self.id_1eb.get()))
conn.commit()
tkinter.messagebox.showinfo("Success", "Updated Database Successfully")
root = Tk()
b = Database(root)
root.geometry("1366x768+0+0")
root.title("Update Shopping Store database")
root.mainloop()
| StarcoderdataPython |
141332 | from pygments.lexers.rdf import SparqlLexer
from pygments.token import Other
class SparqlLexerMagics( SparqlLexer ):
"""
A variant of the standard SPARQL Pygments lexer that understands
line magics
"""
#print( "I'm the custom converter")
aliases = [ 'sparql-nb', 'sparql' ]
name = 'SPARQL w/ notebook magics'
# We add to the root tokens a regexp to match %magic lines
tokens = SparqlLexer.tokens
tokens['root'] = [ (r'^%[a-zA-Z]\w+.*\n', Other ) ] + tokens['root'] | StarcoderdataPython |
11315665 | <gh_stars>0
####################
# ES-DOC CIM Questionnaire
# Copyright (c) 2017 ES-DOC. All rights reserved.
#
# University of Colorado, Boulder
# http://cires.colorado.edu/
#
# This project is distributed according to the terms of the MIT license [http://www.opensource.org/licenses/MIT].
####################
"""
.. module:: admin_projects
Summary of module goes here
"""
from django.conf import settings
from django.contrib import admin
from django.core.exceptions import ValidationError
from django.core.files import File
from django.forms import ModelForm
from PIL import Image
import os
from Q.questionnaire.models.models_projects import QProject, QProjectOntology
class QProjectAdminForm(ModelForm):
class Meta:
model = QProject
fields = [
"name",
"title",
"description",
"email",
"url",
"logo",
"display_logo",
"authenticated",
"is_active",
"is_displayed",
"is_legacy",
# "ontologies",
]
def clean(self):
logo = self.cleaned_data.get("logo")
display_logo = self.cleaned_data.get("display_logo")
if display_logo and not logo:
msg = "You must provide a logo if you set display_logo to true."
raise ValidationError(msg)
def save(self, commit=True):
project = super(QProjectAdminForm, self).save(commit)
if project.logo:
# force resizing of the logo...
logo = Image.open(project.logo.file)
logo = logo.resize(QProject.LOGO_SIZE, Image.ANTIALIAS)
logo_path = os.path.join(
settings.MEDIA_ROOT,
project.logo.field.upload_to(project, project.logo.name)
)
logo_dir = os.path.dirname(logo_path)
if not os.path.exists(logo_dir):
os.makedirs(logo_dir)
logo.save(logo_path)
# (the fact that this uses OverwriteStorage means that a new filename will not be created)
project.logo.save(os.path.basename(logo_path), File(open(logo_path), "rb"))
return project
class QProjectOntologyInline(admin.TabularInline):
model = QProjectOntology
extra = 1
class QrojectAdmin(admin.ModelAdmin):
"""
Custom ModelAdmin for QProjects
Provides an inline form for adding QProjectOntologies
"""
inlines = (QProjectOntologyInline,)
form = QProjectAdminForm
admin.site.register(QProject, QrojectAdmin)
| StarcoderdataPython |
118889 | ##Generate patches from a large raster##
"""preprocessing model for creating a non-overlapping sliding window of fixed size to generate tfrecords for model training"""
import rasterio
import tensorflow as tf
import numpy as np
def extract_patches(image, width, height):
# The size of sliding window
ksizes = [1, width, height, 1]
# Move over 1 pixel and make a new patch
strides = [1, 1, 1, 1]
# The document is unclear. However, an intuitive example posted on StackOverflow illustrate its behaviour clearly.
# http://stackoverflow.com/questions/40731433/understanding-tf-extract-image-patches-for-extracting-patches-from-an-image
rates = [1, 1, 1, 1] # sample pixel consecutively
# padding algorithm to used
padding = 'SAME' # or 'SAME'
image = tf.expand_dims(image, 0)
image_patches = tf.image.extract_patches(image, ksizes, strides, rates, padding)
#Squeeze batch array
image_patches = tf.squeeze(image_patches)
return image_patches
| StarcoderdataPython |
3308118 | <reponame>Charly98cma/Boredom-Factory
from sys import stderr as STDERR
from random import choice as rndCh
from string import ascii_letters as letters
from os import path
from lib import err_msgs as err
def print_result(text: str, out_method: int) -> None:
"""Print the text after applying the operation specified by the user.
Parameters
----------
text: str - Text to print
out_method: int - Flag for the output method (0 - File | 1 - Console)
"""
if out_method == 0:
# File
while True:
file_path = read_path(input("-- Path to file: "))
try:
with open(file_path, mode='a', encoding='UTF-8') as f:
f.write(text)
break
except PermissionError:
print(err.permission_error(file_path), file=STDERR)
else:
# Stdout
print("\n-- Message --")
print(text)
def read_path(file_path: str) -> str:
"""Read the path to a file (absolute or relative).
Parameters
----------
file_path: str - Path to the file (absolute or relative)
Returns
-------
str - Absolute path to the file
"""
if file_path[0] == '~':
return path.abspath(file_path[1:])
return file_path
def read_text_file():
"""
Reads user input from a file
"""
while True:
file_path = read_path(input("-- Path to file: "))
try:
return open(file_path, 'r', encoding='UTF-8').read()
except FileNotFoundError:
print(err.file_error(file_path), file=STDERR)
except PermissionError:
print(err.permission_error, file=STDERR)
def read_text_stdin():
"""
Reads user input from standard input.
"""
print("\n-- Type the message --\n\n")
res = ""
while True:
txt = input()+'\n'
if txt == '\n':
break
res += txt
return res
def read_text(in_method: int, perm_len: int) -> str:
"""Read the text to cypher/decypher (from file or console).
Parameters
----------
in_method: int - Flag for the input method (0 - File | 1 - Console)
Returns
-------
str - Users' text (either from a file or console)
"""
res = read_text_file() if in_method == 0 else read_text_stdin()
# Add random chars if text length is not multiple of permitations
return res if len(res) % perm_len == 0 else res+perm_len*rndCh(letters)
| StarcoderdataPython |
5101865 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Oct 2 11:18:20 2021
@author: h2jw
"""
import pandas as pd
# SELECT TEST VISUALIZATION NUMBER
nb = 4
#%%
topic_desc = f"/Users/h2jw/Documents/GitHub/NLP-FOMC/LDA_qje/LDA QJE test {nb}/topic_description.csv"
t_desc = pd.read_csv(topic_desc)
pres = f"/Users/h2jw/Documents/GitHub/NLP-FOMC/LDA_qje/LDA QJE test {nb}/president_top_topics.csv"
pres_topics = pd.read_csv(pres)
dico = f'/Users/h2jw/Documents/GitHub/NLP-FOMC/LDA_qje/LDA QJE test {nb}/dict.csv'
dico_topics = pd.read_csv(dico)
dt_query = f"/Users/h2jw/Documents/GitHub/NLP-FOMC/LDA_qje/LDA QJE test {nb}/dt_query.csv"
query = pd.read_csv(dt_query)
dt = f"/Users/h2jw/Documents/GitHub/NLP-FOMC/LDA_qje/LDA QJE test {nb}/dt.csv"
dt_df = pd.read_csv(dt)
tr = f"/Users/h2jw/Documents/GitHub/NLP-FOMC/LDA_qje/LDA QJE test {nb}/tfidf_ranking.csv"
dt_tr = pd.read_csv(tr)
df_final = pd.read_csv(f"/Users/h2jw/Documents/GitHub/NLP-FOMC/LDA_qje/LDA QJE test {nb}/final_output_agg.csv")
df_final = df_final.astype({ 'T0':'float64', 'T1':'float64', 'T2':'float64', 'T3':'float64', 'T4':'float64', 'T5':'float64', 'T6':'float64',
'T7':'float64', 'T8':'float64', 'T9':'float64', 'T10':'float64', 'T11':'float64', 'T12':'float64', 'T13':'float64', 'T14':'float64', 'T15':'float64', 'T16':'float64',
'T17':'float64', 'T18':'float64', 'T19':'float64', 'T20':'float64', 'T21':'float64', 'T22':'float64', 'T23':'float64', 'T24':'float64', 'T25':'float64', 'T26':'float64',
'T27':'float64', 'T28':'float64', 'T29':'float64'})
df_heatmap = df_final.drop(columns='year').set_index('chair_in_charge')
#%%
from tqdm import trange
import numpy as np
l_scores = [t_desc.iloc[0].tolist()[1:14]]
l_col0 = t_desc.columns.tolist()[1:14]
l_topics = [l_col0]
for i in trange(1,30):
l_topics.append(t_desc.iloc[2*i-1].tolist()[1:14])
l_scores.append(t_desc.iloc[2*i].tolist()[1:14])
l_scores = [np.float_(elem) for elem in l_scores]
#%% SAME VISUALS AS IN ARTICLE
import matplotlib.pyplot as plt
import seaborn as sns
plt.figure(figsize=(20,10))
sns.heatmap(l_scores,cmap="Purples",annot=l_topics, fmt="")
plt.title("Topics ")
plt.show()
#%% VISUALS PER CHAIR PER YEAR
plt.figure()
df_final2 = df_final.set_index(['chair_in_charge', 'year'])
sns.heatmap(df_final2)
plt.title("Distribution des topics par année")
plt.show()
#%% TFIDF RANK
dt_tr['score']=dt_tr['49.296371']
plt.plot(dt_tr.score)
| StarcoderdataPython |
1895420 | import multiprocessing
import threading
import time
import sys
from math import fabs
try:
TIME_FUNC = time.perf_counter
except AttributeError:
TIME_FUNC = time.monotonic
# Default precision
DEF_PRECISION = 4
# Default time step
DEF_STEP = 0.01
def write_time(start_time, precision, step):
cur_time = '{0:.{1}f}'.format(fabs(TIME_FUNC() - start_time), precision)
sys.stdout.write(cur_time)
sys.stdout.flush()
time.sleep(step)
sys.stdout.write('\b' * len(cur_time))
class InlineTimer(object):
def __init__(self, name=None, precision=DEF_PRECISION, step=DEF_STEP):
self.stop_event = None
self.par_type = None
self.par = None
self.precision = precision
self.step = step
self.name = name or type(self).__name__
self.start_time = TIME_FUNC()
def start(self):
self.stop_event.clear()
self.par = self.par_type(
name=self.name, target=self.init_time, daemon=True)
self.par.start()
def stop(self):
if self.par is not None:
self.stop_event.set()
self.par.join()
def init_time(self):
while not self.stop_event.is_set():
write_time(self.start_time, self.precision, self.step)
def __enter__(self):
self.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.stop()
return False
class ProcessTimer(InlineTimer):
def __init__(self, name=None, precision=DEF_PRECISION, step=DEF_STEP):
super().__init__(name, precision, step)
self.par_type = multiprocessing.Process
self.stop_event = multiprocessing.Event()
class ThreadTimer(InlineTimer):
def __init__(self, name=None, precision=DEF_PRECISION, step=DEF_STEP):
super().__init__(name, precision, step)
self.par_type = threading.Thread
self.stop_event = threading.Event()
def process_timer(name=None, prec=DEF_PRECISION, step=DEF_STEP):
return ProcessTimer(name, prec, step)
def thread_timer(name=None, prec=DEF_PRECISION, step=DEF_STEP):
return ThreadTimer(name, prec, step)
def inline_timer(mp=False, **kwargs):
if mp:
return process_timer(**kwargs)
else:
return thread_timer(**kwargs)
| StarcoderdataPython |
5167609 | <filename>src/lib/mine/utility/test_my_assert.py
#!/usr/bin/env false
"""TODO: Write
"""
# Internal packages (absolute references, distributed with Python)
from pathlib import Path
# External packages (absolute references, NOT distributed with Python)
from pytest import raises
# Library modules (absolute references, NOT packaged, in project)
from utility import my_assert as is_
# Project modules (relative references, NOT packaged, in project)
this_file = Path(__file__)
this_directory = this_file.parent
def test_absolute_directory():
assert is_.absolute_directory(this_directory)
with raises(AssertionError):
assert is_.absolute_directory(this_file)
def test_absolute_file():
with raises(AssertionError):
assert is_.absolute_file(this_directory)
assert is_.absolute_file(this_file)
def test_absolute_path():
assert is_.absolute_path(this_directory)
assert is_.absolute_path(this_file)
def test_equal():
assert is_.equal(None, None)
assert is_.equal("", "")
assert is_.equal("Test", "Test")
def test_existing_absolute_path():
assert is_.existing_absolute_path(this_directory)
assert is_.existing_absolute_path(this_file)
def test_instance():
assert is_.instance(None, type(None))
with raises(AssertionError) as info:
assert is_.instance(None, str)
a = str(info.value)
assert "Value is None" in a
assert "<class 'str'>" in a
assert is_.instance("", str)
assert is_.instance(None, (type(None), str))
assert is_.instance("", (type(None), str))
assert is_.instance("Test", (type(None), str))
def test_not_instance():
assert is_.not_instance(None, str)
assert is_.not_instance(None, (bool, float, int, str))
"""DisabledContent
"""
| StarcoderdataPython |
11357949 | <reponame>mrx04programmer/frza
#! /bin/python3
import socket
listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
listener.bind(('127.0.0.1', 8080))
listener.listen(0)
print("[+] Esperando por conexiones")
connection, addr = listener.accept()
print("[+] Conexion de " + str(addr))
while True:
command = input('>>')
connection.send(command.encode())
result = connection.recv(1024)
print(result) | StarcoderdataPython |
3597475 | # Copyright (c) 2010 Resolver Systems Ltd.
# All Rights Reserved
#
from functionaltest import FunctionalTest
class Test_2559_FitEditorToCells(FunctionalTest):
def test_editor_fits_cells(self):
# * Harold logs in and creates a new sheet
self.login_and_create_new_sheet()
cell_locator = self.get_cell_locator(3, 3)
cell_width = self.selenium.get_element_width(cell_locator)
cell_height = self.selenium.get_element_height(cell_locator)
# * He edits a cell and notes that the editor fits the
# cell exactly
self.open_cell_for_editing(3, 3)
editor_locator = 'css=input.editor-text'
editor_width = self.selenium.get_element_width(editor_locator)
editor_height = self.selenium.get_element_height(editor_locator)
self.assertTrue(cell_width - editor_width <= 6, "cell width: %d, editor width: %d" % (cell_width, editor_width))
self.assertTrue(cell_height - editor_height <= 6, "cell height: %d, editor height: %d" % (cell_height, editor_height))
| StarcoderdataPython |
6554839 | from nksama import bot
def send_log(err , module):
bot.send_message(-1001646296281 , f"error in {module}\n\n{err}")
| StarcoderdataPython |
8022894 | <gh_stars>100-1000
DEFAULT_MAXIMUM_SESSION_LENGTH = 60 * 60 * 24 * 30 # 30 days
DEFAULT_AUTHENTICATION_BACKEND = 'mayan.apps.authentication.authentication_backends.AuthenticationBackendModelDjangoDefault'
DEFAULT_AUTHENTICATION_BACKEND_ARGUMENTS = {
'maximum_session_length': DEFAULT_MAXIMUM_SESSION_LENGTH
}
DEFAULT_AUTHENTICATION_DISABLE_PASSWORD_RESET = False
SESSION_MULTI_FACTOR_USER_ID_KEY = '_multi_factor_user_id'
USER_IMPERSONATE_VARIABLE_ID = '_user_impersonate_id'
USER_IMPERSONATE_VARIABLE_DISABLE = '_user_impersonate_end'
USER_IMPERSONATE_VARIABLE_PERMANENT = '_user_impersonate_permanent'
| StarcoderdataPython |
5113099 | from typing import Any, Dict
from fedot.core.optimisers.adapters import PipelineAdapter
from fedot.core.optimisers.opt_history import ParentOperator
from . import any_to_json
def parent_operator_to_json(obj: ParentOperator) -> Dict[str, Any]:
serialized_op = any_to_json(obj)
serialized_op['parent_objects'] = [
parent_obj.graph._serialization_id
for parent_obj in serialized_op['parent_objects']
]
return serialized_op
| StarcoderdataPython |
311406 | import math
import random
import matplotlib.pyplot as plt
from algorithms.ParticleFilter import ParticleFilter
WORLD_SIZE = 10
MARKERS = [1, 2, 5, 6, 8]
MAX_RANGE = 5
HIT_VARIANCE = .75
MOVEMENT_VARIANCE = 5
def sample_measurement_distribution(actual_range):
measurement = min(random.normalvariate(actual_range, HIT_VARIANCE), MAX_RANGE)
return max(0, measurement)
def true_ranges(position):
left_markers = filter(lambda x: x < position, MARKERS)
right_markers = filter(lambda x: x > position, MARKERS)
true_left_range = MAX_RANGE if len(left_markers) == 0 else position - max(left_markers)
true_right_range = MAX_RANGE if len(right_markers) == 0 else min(right_markers) - position
return true_left_range, true_right_range
def make_measurement(position):
true_left_range, true_right_range = true_ranges(position)
return sample_measurement_distribution(true_left_range), sample_measurement_distribution(true_right_range)
def range_likelihood(true_range, measurement):
return 1 / math.sqrt(2 * math.pi * HIT_VARIANCE) * math.exp(-1 / 2 * (true_range - measurement) ** 2 / HIT_VARIANCE)
def measurement_likelihood(measurement, position):
left_range, right_range = true_ranges(position)
left_measurement, right_measurement = measurement
return range_likelihood(left_range, left_measurement) * range_likelihood(right_range, right_measurement)
def prior_distribution():
return random.random() * WORLD_SIZE
def movement_model(position, intended_movement):
return position + intended_movement * max(random.normalvariate(1, .1), 0)
def show_particles(particles):
plt.hist(particles, bins=100, range=(0, WORLD_SIZE))
plt.show()
def test():
# random.seed(42)
bot = 5.5
print true_ranges(bot)
print make_measurement(bot)
particle_filter = ParticleFilter(prior_distribution)
show_particles(particle_filter.particles)
print MARKERS
for move in [0, 1, 1, -3, -1, 5]:
bot = movement_model(bot, move)
print "Bot moving", move
particle_filter.move(move, movement_model)
show_particles(particle_filter.particles)
print "Measuring with bot at", bot
particle_filter.measure(make_measurement(bot), measurement_likelihood)
show_particles(particle_filter.particles)
# n, bins, patches = plt.hist(particle_filter.particles, 50, normed=1, facecolor='green', alpha=0.75)
# plt.hist([sample_measurement_distribution(2) for _ in xrange(1000)])
# plt.hist([movement_model(2, 2) for _ in xrange(1000)])
# plt.show()
if __name__ == "__main__":
test()
| StarcoderdataPython |
3487734 | # coding: utf-8
import os
import sys
import logging
from typing import Dict
from overrides import overrides
import torch
from allennlp.common import Params
from allennlp.data import Vocabulary
from allennlp.models.model import Model
from allennlp.modules import Seq2SeqEncoder, TextFieldEmbedder
from allennlp.nn import RegularizerApplicator
from allennlp.modules.text_field_embedders import BasicTextFieldEmbedder
from allennlp.models.crf_tagger import CrfTagger
from hmtl.modules.text_field_embedders import ShortcutConnectTextFieldEmbedder
from hmtl.models.relation_extraction import RelationExtractor
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
@Model.register("emd_relation")
class LayerEmdRelation(Model):
"""
A class that implement three tasks of HMTL model: EMD (CRF Tagger) and Relation Extraction.
Parameters
----------
vocab: ``allennlp.data.Vocabulary``, required.
The vocabulary fitted on the data.
params: ``allennlp.common.Params``, required
Configuration parameters for the multi-task model.
regularizer: ``allennlp.nn.RegularizerApplicator``, optional (default = None)
A reguralizer to apply to the model's layers.
"""
def __init__(self,
vocab: Vocabulary,
params: Params,
regularizer: RegularizerApplicator = None):
super(LayerEmdRelation, self).__init__(vocab = vocab, regularizer = regularizer)
# Base text Field Embedder
text_field_embedder_params = params.pop("text_field_embedder")
text_field_embedder = BasicTextFieldEmbedder.from_params(vocab=vocab,
params=text_field_embedder_params)
self._text_field_embedder = text_field_embedder
############
# EMD Stuffs
############
emd_params = params.pop("emd")
# Encoder
encoder_emd_params = emd_params.pop("encoder")
encoder_emd = Seq2SeqEncoder.from_params(encoder_emd_params)
self._encoder_emd = encoder_emd
# Tagger EMD - CRF Tagger
tagger_emd_params = emd_params.pop("tagger")
tagger_emd = CrfTagger(vocab = vocab,
text_field_embedder = self._text_field_embedder,
encoder = self._encoder_emd,
label_namespace = tagger_emd_params.pop("label_namespace", "labels"),
constraint_type = tagger_emd_params.pop("constraint_type", None),
dropout = tagger_emd_params.pop("dropout", None),
regularizer = regularizer)
self._tagger_emd = tagger_emd
############################
# Relation Extraction Stuffs
############################
relation_params = params.pop("relation")
# Encoder
encoder_relation_params = relation_params.pop("encoder")
encoder_relation = Seq2SeqEncoder.from_params(encoder_relation_params)
self._encoder_relation = encoder_relation
shortcut_text_field_embedder_relation = ShortcutConnectTextFieldEmbedder(base_text_field_embedder = self._text_field_embedder,
previous_encoders = [self._encoder_emd])
self._shortcut_text_field_embedder_relation = shortcut_text_field_embedder_relation
# Tagger: Relation
tagger_relation_params = relation_params.pop("tagger")
tagger_relation = RelationExtractor(vocab = vocab,
text_field_embedder = self._shortcut_text_field_embedder_relation,
context_layer = self._encoder_relation,
d = tagger_relation_params.pop_int("d"),
l = tagger_relation_params.pop_int("l"),
n_classes = tagger_relation_params.pop("n_classes"),
activation = tagger_relation_params.pop("activation"))
self._tagger_relation = tagger_relation
logger.info("Multi-Task Learning Model has been instantiated.")
@overrides
def forward(self,
tensor_batch,
for_training: bool = False,
task_name: str = "ner") -> Dict[str, torch.Tensor]:
# pylint: disable=arguments-differ
tagger = getattr(self, "_tagger_%s" % task_name)
return tagger.forward(**tensor_batch)
@overrides
def get_metrics(self,
task_name: str,
reset: bool = False,
full: bool = False) -> Dict[str, float]:
task_tagger = getattr(self, "_tagger_" + task_name)
return task_tagger.get_metrics(reset)
@classmethod
def from_params(cls,
vocab: Vocabulary,
params: Params,
regularizer: RegularizerApplicator) -> "LayerEmdRelation":
return cls(vocab = vocab,
params = params,
regularizer = regularizer) | StarcoderdataPython |
6497054 | """
File Name: Urls
Purpose: Url paths this application uses.
Comments:
"""
from django.urls import path, include
from rest_framework.routers import DefaultRouter
from api.views.user_views import UserSearchView
from auth_backend.modules.superAdmin.utilities import bulk_invite
from auth_backend.modules.superAdmin.views import CreateReferralView
# The router enables us to create dynamic urls
router = DefaultRouter()
router.register('users-details', UserSearchView, basename='user-details')
# That is, if for example added 1 or 2 ,etc.it will give show the student that has the id 1, etc.
urlpatterns = [
path('referrals/', CreateReferralView.as_view(), name='referral'),
path('bulk-invite/', bulk_invite, name='bulk_invite'),
path('', include(router.urls)),
]
app_name = 'superAdmin'
| StarcoderdataPython |
1925971 | <filename>sdk/python/pulumi_google_native/memcache/v1beta2/_inputs.py
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from ._enums import *
__all__ = [
'InstanceMessageArgs',
'MemcacheParametersArgs',
'NodeConfigArgs',
]
@pulumi.input_type
class InstanceMessageArgs:
def __init__(__self__, *,
code: Optional[pulumi.Input['InstanceMessageCode']] = None,
message: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input['InstanceMessageCode'] code: A code that correspond to one type of user-facing message.
:param pulumi.Input[str] message: Message on memcached instance which will be exposed to users.
"""
if code is not None:
pulumi.set(__self__, "code", code)
if message is not None:
pulumi.set(__self__, "message", message)
@property
@pulumi.getter
def code(self) -> Optional[pulumi.Input['InstanceMessageCode']]:
"""
A code that correspond to one type of user-facing message.
"""
return pulumi.get(self, "code")
@code.setter
def code(self, value: Optional[pulumi.Input['InstanceMessageCode']]):
pulumi.set(self, "code", value)
@property
@pulumi.getter
def message(self) -> Optional[pulumi.Input[str]]:
"""
Message on memcached instance which will be exposed to users.
"""
return pulumi.get(self, "message")
@message.setter
def message(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "message", value)
@pulumi.input_type
class MemcacheParametersArgs:
def __init__(__self__, *,
params: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] params: User defined set of parameters to use in the memcached process.
"""
if params is not None:
pulumi.set(__self__, "params", params)
@property
@pulumi.getter
def params(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
User defined set of parameters to use in the memcached process.
"""
return pulumi.get(self, "params")
@params.setter
def params(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "params", value)
@pulumi.input_type
class NodeConfigArgs:
def __init__(__self__, *,
cpu_count: pulumi.Input[int],
memory_size_mb: pulumi.Input[int]):
"""
Configuration for a Memcached Node.
:param pulumi.Input[int] cpu_count: Number of cpus per Memcached node.
:param pulumi.Input[int] memory_size_mb: Memory size in MiB for each Memcached node.
"""
pulumi.set(__self__, "cpu_count", cpu_count)
pulumi.set(__self__, "memory_size_mb", memory_size_mb)
@property
@pulumi.getter(name="cpuCount")
def cpu_count(self) -> pulumi.Input[int]:
"""
Number of cpus per Memcached node.
"""
return pulumi.get(self, "cpu_count")
@cpu_count.setter
def cpu_count(self, value: pulumi.Input[int]):
pulumi.set(self, "cpu_count", value)
@property
@pulumi.getter(name="memorySizeMb")
def memory_size_mb(self) -> pulumi.Input[int]:
"""
Memory size in MiB for each Memcached node.
"""
return pulumi.get(self, "memory_size_mb")
@memory_size_mb.setter
def memory_size_mb(self, value: pulumi.Input[int]):
pulumi.set(self, "memory_size_mb", value)
| StarcoderdataPython |
11271795 | <reponame>rohitashwa1907/Text-Summarization-Using-GPT2<gh_stars>1-10
import os
import torch
import argparse
import warnings
import textwrap
import helper as hlp
from transformers import GPT2LMHeadModel
def eval(args):
warnings.filterwarnings("ignore")
""" set the device """
if torch.cuda.is_available():
device = torch.device('cuda:0')
else:
device = torch.device('cpu')
""" tokenizer for gpt2 model"""
tokenizer = hlp.add_special_tokens(args.model_arch_name)
""" downloading the gpt2 model using huggingface"""
print('DOWNLOADING MODEL FROM HUGGINGFACE \U0001F917 \U0001F917........................................................')
model = GPT2LMHeadModel.from_pretrained(args.model_arch_name)
model.resize_token_embeddings(len(tokenizer))
""" loading previously saved model"""
if device.type == 'cuda':
model.load_state_dict(torch.load(args.model_directory))
elif device.type == 'cpu':
model.load_state_dict(torch.load(args.model_directory, map_location = torch.device('cpu')))
model.to(device)
""" read the text file"""
file1 = open(args.input_file,'r')
input_text = file1.read()
file1.close()
sample_article = "<|startoftext|> " + input_text + " <|summarize|>"
""" checking the length of the input text """
inp_length= int(len(tokenizer(sample_article)['input_ids']))
wrapper = textwrap.TextWrapper(width = 150)
if inp_length < 1018:
GPT2_input = torch.tensor(tokenizer.encode(sample_article), dtype=torch.long)
input_id = GPT2_input.to(device)
torch.manual_seed(123)
torch.cuda.manual_seed(123)
# set top_k to 50
sample_output = model.generate(
input_ids = input_id.unsqueeze(0),
temperature = 1,
pad_token_id = tokenizer.pad_token_id,
bos_token_id = tokenizer.bos_token_id,
eos_token_id = tokenizer.eos_token_id,
decoder_start_token_id= '<|summarize|>',
do_sample=True,
max_length=200 + len(input_id),
min_length=20 + len(input_id),
top_p = 0.8,
top_k=50,
no_repeat_ngram_size=3,
num_return_sequences= args.num_of_samples
)
print("HERE ARE SOME SUMMARIES TO TRY FROM :\U0001F607 \U0001F607 ")
for i in range(len(sample_output)):
print('\n')
print( 150 * '-')
print(wrapper.fill(tokenizer.decode(sample_output[i, len(input_id):], skip_special_tokens=True, clean_up_tokenization_spaces =True)))
else:
print('Sorry!! \U0001F641 \U0001F641 Input is too long for me. Please let me try a smaller one.')
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='GPT2 Evaluator')
parser.add_argument('--input_file', type=str, help='provide the path to the input file (.txt file)')
parser.add_argument('--num_of_samples', type=int, default = 3,
help='number of summary samples (default: 3)')
parser.add_argument('--model_directory', type=str, help='path to the GPT2 model')
parser.add_argument('--model_arch_name', type=str, default='gpt2-medium',
help='name of the gpt2 model to be used (default: gpt2-medium)')
parser.add_argument('--device', type=str, help='device to train the model (cpu or cuda)')
eval(parser.parse_args()) | StarcoderdataPython |
4849709 | <gh_stars>0
import requests
import urllib
import math
import time
import random
import numpy as np
import matplotlib.pyplot as plt
import jieba
from snownlp import SnowNLP
from wordcloud import WordCloud
import pandas as pd
import sqlite3
import math
class calculator:
find_artist=''
my_headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh-CN,zh;q=0.9',
'Host': 'music.163.com',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.140 Safari/537.36'
}
def __init__(self,artist_name):
self.find_artist=artist_name
def run(self): #调用接口
print("判断是否有过搜索记录")
self.f=open('history.txt','r+')
b=True
while True:
line=self.f.readline()
if not line:
break
if line==self.find_artist+'\n':
b=False
break
self.conn = sqlite3.connect(self.find_artist+'_netease_cloud_music.db')
if(b):#判断是否有过搜索记录
self.pachong()
else:
self.load()
sql = '''
SELECT *
FROM comment
WHERE song_id IN (
SELECT song_id
FROM song
WHERE artists LIKE artists
)
'''
self.comment = pd.read_sql(sql, con=self.conn) # 读取库
#self.draw()
def pachong(self): #未搜索过该歌手
print("未搜索过该歌手,正在爬取数据...")
fdb=open(self.find_artist+'_netease_cloud_music.db','w+')
fdb.truncate() #清空数据库文件
self.comment_num_list=[]
name=['评论数','歌曲']
song_df = self.getSongList(self.find_artist)
song_df = song_df[song_df['artists'].str.contains(self.find_artist)] #筛选记录
song_df.drop_duplicates(subset=['song_id'], keep='first', inplace=True) #去重
song_df.to_sql(name='song', con=self.conn, if_exists='append', index=False)
sql = '''
SELECT song_id
FROM song
WHERE artists LIKE artists
'''
song_id = pd.read_sql(sql, con=self.conn)
comment_df = pd.DataFrame()
for index, id in zip(song_id.index, song_id['song_id']):
print('0开始爬取第 {0}/{1} 首, {2}'.format(index+1, len(song_id['song_id']), id))
tmp_df = self.getSongComment(id)
comment_df = pd.concat([comment_df, tmp_df])
comment_df.drop_duplicates(subset=['comment_id'], keep='first', inplace=True)
comment_df.to_sql(name='comment', con=self.conn, if_exists='append', index=False)
print('已成功保存至数据库!')
self.comment_num=pd.DataFrame(columns=name,data=self.comment_num_list) #生成评论数的表格
self.comment_num.to_csv(self.find_artist+'的歌曲评论数.csv') #保存评论数的表格
self.f.write(self.find_artist+'\n') #再搜索历史文件中加入该歌手
def load(self): #已搜索过该歌手
print("已搜索过该歌手,正在加载本地数据...")
self.comment_num=pd.read_csv(self.find_artist+'的歌曲评论数.csv') #读取评论数的表格
def draw(self):# 各类图形的绘制
print("数据加载完毕,正在计算...")
plt.style.use('ggplot')
plt.rcParams['font.sans-serif'] = ['Microsoft YaHei']
plt.rcParams['axes.unicode_minus'] = False
#歌手歌曲评论数前十图
data_list=self.comment_num.values.tolist()
data=np.array(data_list)
data=data[:,1:3]
data=np.sort(data,axis=0)
num_data=data[-1:-11:-1,0]
plt.bar(np.arange(10),height=num_data)
plt.title("评论数前十的歌曲")
plt.savefig('评论数前十的歌曲.jpg')
plt.show()
#情感倾向扇形图
self.comment['semiscore'] = self.comment['content'].apply(lambda x: SnowNLP(x).sentiments)
self.comment['semilabel'] = self.comment['semiscore'].apply(lambda x: 1 if x > 0.5 else -1)
semilabel = self.comment['semilabel'].value_counts()
semilabel = semilabel.loc[[1, -1]]
plt.pie(semilabel.values,labels=['积极评论','消极评论'],colors=['green','red'],autopct='%3.2f%%')
plt.title("评论的情感倾向")
plt.savefig('评论的情感倾向.jpg')
plt.show()
# 词云图
text = ''.join(str(s) for s in self.comment['content'] if s not in [None]) #将所有评论合并为一个长文本
word_list = jieba.cut(text, cut_all=False) #分词
stopwords = [line.strip() for line in open('stopwords.txt',encoding='UTF-8').readlines()] #加载停用词列表
clean_list = [seg for seg in word_list if seg not in stopwords] #去除停用词
clean_text=''.join(str(s+'\n') for s in clean_list)#合成去除停用词后的长文本
cloud = WordCloud(
font_path = 'SIMLI.TTF',
background_color = 'white',
max_words = 1000,
max_font_size = 64
)
word_cloud = cloud.generate(clean_text)
plt.figure(figsize=(16, 16))
plt.imshow(word_cloud)
plt.axis('off')
plt.savefig('词云图.jpg')
plt.show()
def getJSON(self,url, headers):
""" Get JSON from the destination URL
@ param url: destination url, str
@ param headers: request headers, dict
@ return json: result, json
"""
res = requests.get(url, headers=headers)
res.raise_for_status() # 抛出异常
res.encoding = 'utf-8'
json = res.json()
return json
def countPages(self,total, limit):
""" Count pages
@ param total: total num of records, int
@ param limit: limit per page, int
@ return page: num of pages, int
"""
page = math.ceil(total / limit)
return page
def parseSongInfo(self,song_list):
""" Parse song info
@ param song_list: list of songs, list
@ return song_info_list: result, list
"""
song_info_list = []
for song in song_list:
song_info = []
song_info.append(song['id'])
song_info.append(song['name'])
artists_name = ''
artists = song['artists']
for artist in artists:
artists_name += artist['name'] + ','
song_info.append(artists_name)
song_info.append(song['album']['name'])
song_info.append(song['album']['id'])
song_info.append(song['duration'])
song_info_list.append(song_info)
return song_info_list
def getSongList(self,key,limit=30):
""" Get a list of songs
@ param key: key word, str
@ param limit: limit per page, int, default 30
@ return result: result, DataFrame
"""
total_list = []
key = urllib.parse.quote(key) # url编码
url = 'http://music.163.com/api/search/get/web?csrf_token=&hlpretag=&hlposttag=&s=' + key + '&type=1&offset=0&total=true&limit='
# 获取总页数
first_page = self.getJSON(url, self.my_headers)
song_count = first_page['result']['songCount']
page_num = self.countPages(song_count, limit)
if page_num > 20:
page_num = 20
# 爬取所有符合条件的记录
for n in range(page_num):
url = 'http://music.163.com/api/search/get/web?csrf_token=&hlpretag=&hlposttag=&s=' + key + '&type=1&offset=' + str(
n * limit) + '&total=true&limit=' + str(limit)
tmp = self.getJSON(url, self.my_headers)
song_list = self.parseSongInfo(tmp['result']['songs'])
total_list += song_list
print('第 {0}/{1} 页爬取完成'.format(n + 1, page_num,10))
#time.sleep(random.randint(2, 4))
df = pd.DataFrame(data=total_list,
columns=['song_id', 'song_name', 'artists', 'album_name', 'album_id', 'duration'])
return df
def parseComment(self,comments):
""" Parse song comment
@ param comments: list of comments, list
@ return comments_list: result, list
"""
comments_list = []
for comment in comments:
comment_info = []
comment_info.append(comment['commentId'])
comment_info.append(comment['user']['userId'])
comment_info.append(comment['user']['nickname'])
comment_info.append(comment['user']['avatarUrl'])
comment_info.append(comment['content'])
comment_info.append(comment['likedCount'])
comments_list.append(comment_info)
return comments_list
def getSongComment(self,id,limit=20):
""" Get Song Comments
@ param id: song id, int
@ param limit: limit per page, int, default 20
@ return result: result, DataFrame
"""
total_comment = []
url = 'http://music.163.com/api/v1/resource/comments/R_SO_4_' + str(id) + '?limit=20&offset=0'
# 获取总页数
first_page = self.getJSON(url, self.my_headers)
total = first_page['total']
page_num = self.countPages(total, limit)
self.comment_num_list.append([total, str(id)])
# 爬取该首歌曲下的所有评论
for n in range(min(page_num,10)):
url = 'http://music.163.com/api/v1/resource/comments/R_SO_4_' + str(id) + '?limit=' + str(
limit) + '&offset=' + str(n * limit)
tmp = self.getJSON(url, self.my_headers)
comment_list = self.parseComment(tmp['comments'])
total_comment += comment_list
print('第 {0}/{1} 页爬取完成'.format(n + 1, min(page_num,10)))
# time.sleep(random.randint(2, 4))
df = pd.DataFrame(data=total_comment,
columns=['comment_id', 'user_id', 'user_nickname', 'user_avatar', 'content', 'likeCount'])
df['song_id'] = str(id) # 添加 song_id 列
return df | StarcoderdataPython |
369105 | """
sonde.formats.merge
~~~~~~~~~~~~~~~~~
This module implements the Merge format used by sonde.merge
"""
from __future__ import absolute_import
import datetime
import pkg_resources
import re
from StringIO import StringIO
import xlrd
import csv
import numpy as np
import quantities as pq
from .. import sonde
from .. import quantities as sq
from ..timezones import cdt, cst
class MergeDataset(sonde.BaseSondeDataset):
"""
Dataset object that represents the data merged from multiple data
files using sonde.merge timezone is default.timezone. parameter
names/units are from the master list data is a dict containing all
the data with param names and units.
"""
def __init__(self, metadata, paramdata):
idx = self._indices_duplicate_data(metadata['dates'], paramdata)
sort_idx = np.argsort(metadata['dates'][idx])
self.manufacturer = metadata['instrument_manufacturer'][idx][sort_idx]
self.serial_number = metadata['instrument_serial_number'][idx][sort_idx]
self.data_file = metadata['data_file_name'][idx][sort_idx]
self.default_tzinfo = sonde.default_static_timezone
# determine parameters provided and in what units
self.parameters = dict()
self.data = dict()
for param in paramdata.keys():
self.parameters[param] = param
self.data[param] = paramdata[param][idx][sort_idx]
self.dates = metadata['dates'][idx][sort_idx]
# I don't think the following line is needed
# super(MergeDataset, self).__init__()
def _indices_duplicate_data(self, dates, data):
"""
return data index required to remove duplicate data
"""
#convert to single structured array
dtypes = [datetime.datetime]
names = ['datetime']
for param in data.keys():
dtypes.append('f8')
names.append(param)
tmp_data = np.zeros(dates.size, dtype=np.dtype({'names': names,
'formats': dtypes}))
tmp_data['datetime'] = dates
for param in data.keys():
tmp_data[param] = data[param]
u, idx = np.unique(tmp_data, return_index=True)
return idx
| StarcoderdataPython |
237935 | """Prepares a text-format word2vec vectors for R's PCA.
Usage: $0 input-vectors.txt output-vectors.txt
"""
import sys
input_file = sys.argv[1]
output_file = sys.argv[2]
k = 0
V = 0
with open(output_file, 'w') as fout:
with open(input_file, 'r') as fin:
first_line = True
count = 0
for line in fin:
line = line.strip()
if first_line:
first_line = False
V, k = line.split()
V = int(V)
k = int(k)
continue
fs = line.split()
if fs[0] == '</s>':
V -= 1
continue
assert len(fs) == k + 1
fout.write('"' + fs[0] + '"\t')
fout.write('\t'.join(fs[1:]))
fout.write('\n')
count += 1
if count == V:
break
assert count == V
print 'Done'
| StarcoderdataPython |
3288559 | import numpy as np
from metod_alg import metod_analysis as mt_ays
def metod_analysis_sog():
"""
Calculates the total number of times the METOD algorithm condition
fails for trajectories that belong to the same region of attraction and
different regions of attraction. Saves all results for different values of
beta.
"""
test_beta = [0.001, 0.01, 0.1, 0.2]
num_functions = 100
num_points = 100
d = 20
p = 10
lambda_1 = 1
lambda_2 = 10
sigma_sq = 0.7
projection = False
tolerance = 0.0001
option = 'minimize_scalar'
met = 'Brent'
initial_guess = 0.005
usage = 'metod_algorithm'
bound_1 = 0
bound_2 = 1
relax_sd_it = 1
number_its_compare = 3
num = 1
(all_comparison_matrix_nsm_total,
total_number_of_checks_nsm_total,
all_comparison_matrix_sm_total,
total_number_of_checks_sm_total,
calculate_sum_quantities_nsm_each_func,
store_all_its,
store_all_norm_grad) = (mt_ays.main_analysis_sog
(d, test_beta,
num_functions,
num_points, p,
sigma_sq,
lambda_1, lambda_2,
projection, tolerance,
option, met,
initial_guess, bound_1,
bound_2, usage,
relax_sd_it, num,
number_its_compare))
np.savetxt('sog_store_all_its_nsm_d=%s_%s_relax_c=%s_num=%s_%s.csv' %
(d, projection, relax_sd_it, num, met),
store_all_its, delimiter=",")
np.savetxt('sog_store_all_grad_norms_nsm_d=%s_%s_relax_c=%s_num=%s_%s.csv' %
(d, projection, relax_sd_it, num, met),
store_all_norm_grad, delimiter=",")
index = 0
for beta in test_beta:
max_b = np.zeros(2)
np.savetxt('sog_beta=%s_nsm_d=%s_%s_relax_c=%s_num=%s_%s.csv' %
(beta, d, projection, relax_sd_it, num, met),
all_comparison_matrix_nsm_total[index], delimiter=",")
np.savetxt('sog_beta=%s_tot_nsm_d=%s_%s_relax_c=%s_num=%s_%s.csv' %
(beta, d, projection, relax_sd_it, num, met),
total_number_of_checks_nsm_total[index], delimiter=",")
np.savetxt('sog_beta=%s_sm_d=%s_%s_relax_c=%s_num=%s_%s.csv' %
(beta, d, projection, relax_sd_it, num, met),
all_comparison_matrix_sm_total[index], delimiter=",")
np.savetxt('sog_beta=%s_tot_sm_d=%s_%s_relax_c=%s_num=%s_%s.csv' %
(beta, d, projection, relax_sd_it, num, met),
total_number_of_checks_sm_total[index], delimiter=",")
prop_nsm = (all_comparison_matrix_nsm_total[index, :11, :11] /
total_number_of_checks_nsm_total[index, :11, :11])
prop_sm = (all_comparison_matrix_sm_total[index, :11, :11] /
total_number_of_checks_sm_total[index, :11, :11])
np.savetxt('sog_beta=%s_nsm_d=%s_prop_%s_relax_c=%s_num=%s_%s.csv' %
(beta, d, projection, relax_sd_it, num, met),
prop_nsm, delimiter=",")
np.savetxt('sog_beta=%s_sm_d=%s_prop_%s_relax_c=%s_num=%s_%s.csv' %
(beta, d, projection, relax_sd_it, num, met),
prop_sm, delimiter=",")
max_b[0] = np.argmax(calculate_sum_quantities_nsm_each_func[index])
max_b[1] = np.max(calculate_sum_quantities_nsm_each_func[index])
np.savetxt('sog_calculate_quantities_beta=%s_d=%s_prop_%s_relax_c=%s_'
'num=%s_%s.csv' % (beta, d, projection, relax_sd_it, num,
met), max_b, delimiter=",")
index += 1
if __name__ == "__main__":
metod_analysis_sog()
| StarcoderdataPython |
3333048 | # ==============================================================================
# Copyright 2019 - <NAME>
#
# NOTICE: Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# ==============================================================================
""" Tests for seeded_random op """
import numpy as np
from diplomacy_research.utils.process import run_in_separate_process
def run_seeded_random():
""" Run tests for seeded_random """
from diplomacy_research.utils.tensorflow import tf, load_so_by_name
seeded_random_so = load_so_by_name('seeded_random')
sess = tf.InteractiveSession()
seeds = tf.placeholder(shape=[None], dtype=tf.int32)
# Static shape - With graph seed provided
op_1 = seeded_random_so.seeded_random(seeds=seeds, offset=1, size=100, seed=75, seed2=0)
output_1 = sess.run(op_1, feed_dict={seeds: [12345, 0, 12345, 0]})
output_2 = sess.run(op_1, feed_dict={seeds: [12345, 0, 12345, 0]})
assert op_1.shape.as_list() == [None, 100]
assert output_1.shape == (4, 100)
assert output_2.shape == (4, 100)
assert np.allclose(output_1[0], output_1[2])
assert np.allclose(output_1[0], output_2[0])
assert np.allclose(output_1[1], output_1[3]) # Since a seed was provided
assert np.allclose(output_2[1], output_2[3]) # Since a seed was provided
assert np.allclose(output_1[1], output_2[1]) # Since a seed was provided
assert np.allclose(output_1[3], output_2[3]) # Since a seed was provided
assert np.all(output_1[0] != output_1[1])
# Dynamic shape - No seed
shape = tf.placeholder(shape=(), dtype=tf.int32)
op_2 = seeded_random_so.seeded_random(seeds=seeds, offset=2, size=shape, seed=0, seed2=0)
output_1 = sess.run(op_2, feed_dict={seeds: [12345, 0, 12345, 0], shape: 200})
output_2 = sess.run(op_2, feed_dict={seeds: [12345, 0, 12345, 0], shape: 200})
assert op_2.shape.as_list() == [None, None]
assert output_1.shape == (4, 200)
assert output_2.shape == (4, 200)
assert np.allclose(output_1[0], output_1[2])
assert np.allclose(output_1[0], output_2[0])
assert np.all(output_1[1] != output_1[3])
assert np.all(output_2[1] != output_2[3])
assert np.all(output_1[1] != output_2[1])
assert np.all(output_1[3] != output_2[3])
def test_seeded_random():
""" Tests for the seeded random op """
run_in_separate_process(target=run_seeded_random, timeout=30)
| StarcoderdataPython |
9717967 | from os import getcwd
from webtest import TestApp
import transaction
from tg import AppConfig
from tg.configuration import milestones
from tg.configuration.auth import TGAuthMetadata
from tgext.pluggable import plug, app_model
from sqlalchemy import Integer, Column, Unicode, inspect
from sqlalchemy.orm import sessionmaker, scoped_session
from sqlalchemy.ext.declarative import declarative_base
from zope.sqlalchemy import ZopeTransactionExtension
import ming
from ming import Session
from ming.odm import ThreadLocalODMSession
from ming.odm.declarative import MappedClass
from ming.odm import FieldProperty
from ming import schema as s
from depot.manager import DepotManager
class FakeAppPackage(object):
__file__ = __file__
__name__ = 'tests'
class lib(object):
class helpers(object):
pass
helpers = helpers()
class app_globals(object):
class Globals():
pass
app_globals = app_globals()
class websetup(object):
def bootstrap(*args, **kwargs):
pass
class FakeSQLAModel(object):
def __init__(self):
self.DeclarativeBase = declarative_base()
self.DBSession = scoped_session(sessionmaker(autoflush=True, autocommit=False,
extension=ZopeTransactionExtension()))
class User(self.DeclarativeBase):
__tablename__ = 'tg_user'
user_id = Column(Integer, autoincrement=True, primary_key=True)
user_name = Column(Unicode(16), unique=True, nullable=False)
email_address = Column(Unicode(255), unique=True, nullable=False)
display_name = Column(Unicode(255))
password = Column(Unicode(255), nullable=False)
self.User = User
def init_model(self, engine):
self.DBSession.configure(bind=engine)
self.DeclarativeBase.metadata.drop_all(engine)
self.DeclarativeBase.metadata.create_all(bind=engine)
class FakeMingModel(object):
def __init__(self):
self.ming_session = Session()
self.DBSession = ThreadLocalODMSession(self.ming_session)
class User(MappedClass):
class __mongometa__:
session = self.DBSession
name = 'tg_user'
unique_indexes = [('user_name',), ('email_address',)]
_id = FieldProperty(s.ObjectId)
user_name = FieldProperty(s.String)
email_address = FieldProperty(s.String)
display_name = FieldProperty(s.String)
password = FieldProperty(s.String)
self.User = User
def init_model(self, datastore):
self.ming_session.bind = datastore
try:
# On MIM drop all data
datastore.conn.drop_all()
except TypeError:
# On MongoDB drop database
datastore.conn.drop_database(datastore.db)
ming.odm.Mapper.compile_all()
for mapper in ming.odm.Mapper.all_mappers():
self.ming_session.ensure_indexes(mapper.collection)
class FakeUser(object):
"""
Fake user that emulates an users without the need to actually
query it from the database, it is able to trick sprox when
resolving relations to the blog post Author.
"""
def __int__(self):
return 1
def __getattr__(self, item):
if item == 'user_id':
return 1
elif item == '_id':
return self
return super(FakeUser, self).__getattr__(item)
class TestAuthMetadata(TGAuthMetadata):
def authenticate(self, environ, identity):
return 'user'
def get_user(self, identity, userid):
if userid:
return FakeUser()
def get_groups(self, identity, userid):
if userid:
return ['managers']
return []
def get_permissions(self, identity, userid):
if userid:
return ['tgappcategories']
return []
def configure_app(using):
# Simulate starting configuration process from scratch
milestones._reset_all()
app_cfg = AppConfig(minimal=True)
app_cfg.renderers = ['kajiki']
app_cfg.default_renderer = 'kajiki'
app_cfg.use_dotted_templatenames = True
app_cfg.package = FakeAppPackage()
app_cfg.use_toscawidgets2 = True
app_cfg['tw2.enabled'] = True
app_cfg.sa_auth.authmetadata = TestAuthMetadata()
app_cfg['beaker.session.secret'] = app_cfg['session.secret'] = 'SECRET'
app_cfg.auth_backend = 'ming'
if using == 'sqlalchemy':
app_cfg.package.model = FakeSQLAModel()
app_cfg.use_sqlalchemy = True
app_cfg['sqlalchemy.url'] = 'sqlite://'
app_cfg.use_transaction_manager = True
app_cfg['tm.enabled'] = True
app_cfg.SQLASession = app_cfg.package.model.DBSession
elif using == 'ming':
app_cfg.package.model = FakeMingModel()
app_cfg.use_ming = True
app_cfg['ming.url'] = 'mim:///testcategories'
app_cfg.MingSession = app_cfg.package.model.DBSession
else:
raise ValueError('Unsupported backend')
app_cfg.model = app_cfg.package.model
app_cfg.DBSession = app_cfg.package.model.DBSession
if using == 'ming': # ugly fix: depot can be configured just once
# that if is just wrong, if sqlalchemy tests starts before it doesn't work
storages = {
'category_images': 'category_image',
}
for storage in storages:
prefix = 'depot.%s.' % storage
print('Configuring Storage %s*' % prefix)
DepotManager.configure(storage, {
'depot_backend_type': 'depot.io.memory.MemoryFileStorage',
'depot.category_images.backend': 'depot.io.memory.MemoryFileStorage',
'depot.category_images.prefix': 'category_images/'
}, prefix)
DepotManager.alias(storages[storage], storage)
plug(app_cfg, 'tgappcategories', plug_bootstrap=False)
return app_cfg
def create_app(app_config, auth=False):
from tgappcategories import lib
app = app_config.make_wsgi_app(skip_authentication=True)
DepotManager._middleware = None
app = DepotManager.make_middleware(app)
if auth:
app = TestApp(app, extra_environ=dict(REMOTE_USER='user'), relative_to=getcwd())
else:
app = TestApp(app)
app.get('/non_existing_url_force_app_config_update', status=404)
return app
def flush_db_changes():
app_model.DBSession.flush()
transaction.commit()
| StarcoderdataPython |
5011319 | #!/usr/bin/env python2.3
#########
#
# Copyright (c) 2005 <NAME>
#
# This file is part of the vignette-removal library.
#
# Vignette-removal is free software; you can redistribute it and/or modify
# it under the terms of the X11 Software License (see the LICENSE file
# for details).
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANY; without even the implied warranty of
# MERCHANTIBILITY or FITNESS FOR A PARTICULAR PURPOSE. See the X11
# License for more details.
#
#########
import string
# from numarray import *
import numpy as np
from emor import EMoR
dorffile = 'data/dorfCurves.txt'
emorfile = 'data/emor.txt'
pemorfile = 'data/pemor.txt'
pemorfile = 'data/pemor.npy'
def parseDorfFile(filename):
#print 'reading file'
fp = open(filename,'r')
lines = fp.readlines()
fp.close()
#print 'parsing file'
i = 0
dorf = {}
nlines = len(lines)
while i < nlines:
name = lines[i].strip(); i+= 1;
curvetype = lines[i].strip(); i+= 1;
I = []
B = []
i+=1
while i < nlines and lines[i][0] in string.digits:
I += map(float,lines[i].split())
i+=1
i+=1
while i < nlines and lines[i][0] in string.digits:
B += map(float,lines[i].split())
i+=1
dorf[name] = (type,np.asarray(I),np.asarray(B))
return dorf
class PEMoR(EMoR):
def setDorf(self,dorf):
curves = map(lambda x: x[2], dorf.values())
curves = np.array(list(curves))
c = self.project(np.transpose(curves))
cov = np.dot(c,np.transpose(c))
from numpy.linalg import inv
self.invsigma = inv(cov)
def negLogLikelihood(self,c):
ca = np.asarray(c)
return np.dot(np.transpose(ca),np.dot(self.invsigma,ca))
def save(self,filename):
# fp = open(filename,'w')
# fp.write(repr(self.invsigma))
# fp.close()
np.save(filename,self.invsigma)
def load(self,filename):
# fp = open(filename,'r')
# s = "\n".join(fp.readlines())
# fp.close()
# self.invsigma = eval(s)
self.invsigma = np.load(filename)
if __name__ == "__main__":
dorf = parseDorfFile(dorffile)
from emor import EMoR
emor = EMoR(emorfile)
pemor = PEMoR(emorfile)
pemor.setDorf(dorf)
pemor.save(pemorfile)
pemor2 = PEMoR(emorfile)
pemor2.load(pemorfile)
nll = pemor2.negLogLikelihood
# for curvetype, I, B in dorf.values():
# c = emor.project(B)
# print dot(transpose(c),dot(invsigma,c))
print(nll([1,1,1,1,1]))
print(nll([0.271685170126, 0.763604793495, -0.00019810334775,
-0.0389643127791, 0.0720207625252]))
print(nll([-1.78447991053, -2.42728052987, -0.487182472277,
0.810791802423, 0.0929491177729]))
print(nll([-1.28074878711, 0.236985072824, -0.170813711448,
0.533237435281, -0.00836594147919]))
| StarcoderdataPython |
3519493 | <reponame>LBJ-Wade/gpr4im
'''
Setup script, to make package pip installable
'''
from setuptools import setup
# read the contents of your README file
from os import path
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(name = 'gpr4im',
version = '1.0.1',
description = 'Python package for using GPR as a foreground removal technique in 21cm intensity mapping',
author = '<NAME>',
author_email = '<EMAIL>',
packages = ['gpr4im'],
url = 'https://github.com/paulassoares/gpr4im',
long_description = long_description,
long_description_content_type = 'text/markdown',
install_requires = ['numpy','scipy','matplotlib','astropy','pandas','GPy','getdist','jupyter'],
)
| StarcoderdataPython |
3308536 | from django.contrib import admin
from .models import Class
admin.site.register(Class) | StarcoderdataPython |
5101270 | <gh_stars>1-10
'''PyPFASST SDC tests.'''
import math
import numpy as np
import pfasst.imex
import linearad
# test these nodes...
nodes = [ ('GL', 3), ('GL', 5), ('GL', 9), ('GL', 13) ]
tolerances = [ -7, -13, -13, -13 ]
sweeps = 12
# test problem
size = 128
feval = linearad.LinearAD(size, Lx=1.0, nu=0.02, acst=5.0, t0=0.1)
dt = 0.0007
def test_sdc():
for i, (qtype, nnodes) in enumerate(nodes):
# set initial condition
q0 = np.zeros(size)
b = np.zeros((nnodes, size))
feval.exact(0.0, q0)
q1 = np.zeros(size)
feval.exact(dt, q1)
qSDC = np.zeros((nnodes, size))
fSDC = np.zeros((2, nnodes, size))
# spread and eval
for m in range(nnodes):
qSDC[m] = q0
feval.f1_evaluate(q0, 0.0, fSDC[0,m])
feval.f2_evaluate(q0, 0.0, fSDC[1,m])
# create sdc object
sdc = pfasst.imex.IMEXSDC(qtype, nnodes)
# sweep and print error
for s in range(sweeps):
b[0] = q0
sdc.sweep(b, 0.0, dt, qSDC, fSDC, feval)
q2 = qSDC[-1]
err = np.log10(abs(q1-q2).max())
print 'node: %s; sweep %d; log error: %lf' % ((qtype, nnodes), s+1, err)
assert(err < tolerances[i])
if __name__ == '__main__':
test_sdc()
| StarcoderdataPython |
1804724 | <reponame>dtoma/python<gh_stars>0
import sqlite3
import urllib.parse as urlp
from contextlib import contextmanager
import click
import feedparser
from flask import Flask, g, redirect, render_template, request, url_for
DATABASE = "feed.db"
app = Flask(__name__)
@contextmanager
def ignore_table_exists(msg):
"""Silence the exception thrown when a table already exists"""
try:
print(msg)
yield
except sqlite3.OperationalError:
print(" - already exists")
@contextmanager
def ignore_row_exists():
"""Silence the exception thrown when a row already exists"""
try:
yield
except sqlite3.IntegrityError:
print("Row already exists")
def get_db():
"""Get a handle on the database"""
db = getattr(g, "_database", None)
if db is None:
db = g._database = sqlite3.connect(DATABASE)
db.row_factory = sqlite3.Row
return db
def query_db(query, args=(), one=False):
"""Return the result of a query as a dict"""
cur = get_db().execute(query, args)
rv = cur.fetchall()
cur.close()
return (rv[0] if rv else None) if one else rv
def add_feed_items_to_db(feed_url, cur):
"""Get the latest items from a RSS feed and add them to the database
Silently ignores duplicates (even if the item was read before)"""
feed_content = feedparser.parse(feed_url)
site = feed_content.feed.title
for item in feed_content.entries:
with ignore_row_exists():
cur.execute(
"INSERT INTO feed_items VALUES (?,?,?,?,?)",
(site, urlp.quote(item.link), item.title, item.description, 0),
)
return site
@app.teardown_appcontext
def close_connection(exception):
"""Close the connection to the database at the end of each request"""
db = getattr(g, "_database", None)
if db is not None:
db.close()
@app.route("/")
def index():
"""Render the index page with all the unread items"""
for feed in query_db(""):
add_feed_items_to_db(feed["link"], get_db().cursor())
return render_template(
"index.html", feed_items=query_db("SELECT * FROM feed_items WHERE read=0")
)
@app.route("/rss")
def rss():
"""Add a (potentially) new RSS feed and all its items to the database"""
feed_url = request.args.get("feed_url")
db = get_db()
cur = db.cursor()
site = add_feed_items_to_db(feed_url, cur)
with ignore_row_exists():
cur.execute("INSERT INTO feeds VALUES (?,?)", (site, urlp.quote(feed_url)))
db.commit()
return redirect(url_for("index"))
@app.route("/read")
def read():
"""Mark an item as read and redirect to its url"""
link = request.args.get("link")
db = get_db()
cur = db.cursor()
cur.execute("UPDATE feed_items SET read=1 WHERE link=?", (urlp.quote(link),))
db.commit()
return redirect(link)
@app.cli.command()
def initdb():
"""Create the database"""
conn = sqlite3.connect(DATABASE)
cur = conn.cursor()
with ignore_table_exists("Create table feed_items"):
cur.execute(
"""CREATE TABLE feed_items
(site text, link text UNIQUE, title text,
description text, read integer)"""
)
with ignore_table_exists("Create table feeds"):
cur.execute("""CREATE TABLE feeds (site text, link text UNIQUE)""")
conn.commit()
conn.close()
@app.cli.command()
def run():
app.run()
if __name__ == "__main__":
app.run()
| StarcoderdataPython |
5136909 | import tensorflow as tf
class Optimize(tf.keras.Model):
def __init__(self, config,global_step):
super(Optimize, self).__init__()
with tf.variable_scope('optimize'):
self.config=config
self.global_step = global_step # global step
self.lr_start = config.lr_start # initial learning rate
self.lr_decay_rate = config.lr_decay_rate # learning rate decay rate
self.lr_decay_step = config.lr_decay_step # learning rate decay step
def build_optim(self,y,Q_score):
with tf.variable_scope('optimize'):
self.loss =(y-Q_score)**2
self.lr = tf.train.exponential_decay(self.lr_start, self.global_step, self.lr_decay_step,self.lr_decay_rate, staircase=False, name="learning_rate")
# Optimizer
self.opt = tf.train.AdamOptimizer(learning_rate=self.lr,name='opt')
# Loss
self.train_step = self.opt.minimize(self.loss,global_step=self.global_step,var_list=tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,scope="q")) | StarcoderdataPython |
6649512 | <filename>src/ScreenScrapper.py<gh_stars>0
from PIL import Image, ImageGrab
import math
import logging, logging.config
# Logger
logging.config.fileConfig(fname='logging.conf', disable_existing_loggers=False)
logger = logging.getLogger('screen_scrapper')
class ScreenScrapper:
# Screen/Game resolution
screenRes = [0, 0]
gameRes = [0, 0]
# Margin of game window
margin = 0
# Current game frame
currFrame = []
# Pixel data
pullPix = [0, 0] # Pixel to check if we got fish on hook
resPix = [0, 0] # Pixel to check if we should pull or release
exitPix = [0, 0] # Pixel to exit popup window
# Constructor
def __init__(self):
self.screenRes = ImageGrab.grab().size
self.calcGameWindow()
logger.info("Initializing screen scrapper")
self.updateFrame()
# Calculates all data needed for operating with game itself
def calcGameWindow(self):
self.gameRes[0] = int((72*self.screenRes[1])/66)
self.gameRes[1] = self.screenRes[1]
self.margin = int((self.screenRes[0] - self.gameRes[0]) / 2)
self.pullPix = [
int(self.gameRes[0]/2),
int(self.gameRes[1]*0.41)
]
self.resPix = [
int(self.gameRes[0]*0.438),
int(self.gameRes[1]*0.4962)
]
self.exitPix = [
int(self.margin + self.gameRes[0]*0.904),
int(self.gameRes[1]*0.207)
]
logger.debug("Screen scrapper data:\n\tscreen_res=%s\n\tgame_res=%s\n\tmargin=%s\n\tpullPix=%s\n\tresPix=%s\n\texitPix=%s",
self.screenRes,
self.gameRes,
self.margin,
self.pullPix,
self.resPix,
self.exitPix
)
# Updates game frame
def updateFrame(self):
self.currFrame = ImageGrab.grab(bbox=(
self.margin,
0,
self.margin+self.gameRes[0],
self.gameRes[1])
).load()
# Saves image to a tmp.png file (mainly for developement)
def saveImg(self):
pix = []
for col in range(self.gameRes[1]):
for row in range(self.gameRes[0]):
pix.append(self.currFrame[row, col])
im = Image.new("RGB", (self.gameRes[0], self.gameRes[1]))
im.putdata(pix)
im.save("tmp.png")
| StarcoderdataPython |
1857319 | <filename>promgen/mixins.py
# Copyright (c) 2019 LINE Corporation
# These sources are released under the terms of the MIT license: see LICENSE
from django.contrib import messages
from django.contrib.auth.mixins import PermissionRequiredMixin
from django.contrib.auth.views import redirect_to_login
from django.contrib.contenttypes.models import ContentType
from django.shortcuts import get_object_or_404
from django.views.generic.base import ContextMixin
from promgen import models
class ContentTypeMixin:
def set_object(self, content_type, object_id):
self.content_type = ContentType.objects.get(
model=content_type, app_label="promgen"
)
self.object_id = object_id
class RuleFormMixin:
# When creating a single object, we want to use the
# default form class and delegate to form_valid but
# when we are importing multiple objects, we delegate
# a form_import class to handle processing
def post(self, request, content_type, object_id):
single = self.get_form(self.form_class)
# Set an instance of our content_object here so that we can
# pass it along for promtool to render
single.instance.set_object(content_type, object_id)
if single.is_valid():
return self.form_valid(single)
importer = self.get_form(self.form_import_class)
if importer.is_valid():
ct = ContentType.objects.get_by_natural_key(
"promgen", content_type
).model_class()
content_object = ct.objects.get(pk=object_id)
return self.form_import(importer, content_object)
return self.form_invalid(single)
class PromgenPermissionMixin(PermissionRequiredMixin):
def handle_no_permission(self):
messages.warning(self.request, self.get_permission_denied_message())
return redirect_to_login(
self.request.get_full_path(),
self.get_login_url(),
self.get_redirect_field_name(),
)
class ShardMixin(ContextMixin):
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
if "pk" in self.kwargs:
context["object"] = context["shard"] = get_object_or_404(
models.Shard, id=self.kwargs["pk"]
)
return context
class ProjectMixin(ContextMixin):
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
if "pk" in self.kwargs:
context["object"] = context["project"] = get_object_or_404(
models.Project, id=self.kwargs["pk"]
)
return context
class ServiceMixin(ContextMixin):
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
if "pk" in self.kwargs:
context["object"] = context["service"] = get_object_or_404(
models.Service, id=self.kwargs["pk"]
)
return context
| StarcoderdataPython |
3248568 | <gh_stars>0
"""
.. module:: data_loader
:synopsis: Data Reader
.. moduleauthor:: <NAME>
This module is to read the data
Todo:
* Add more readers
* Add features to read from Kaggle using API
"""
import sys
sys.path.append('.')
import pandas as pd
import numpy as np
class readDataStore():
"""
This module is to read the data from data store.
In our case, our date store is local and is in .csv format
"""
def __init__(self, data_dir):
self.data_dir = data_dir
def read_stores(self, file_name):
"""
reads the data table with stores information
Args:
file_name: Name of the file for stores information
Returns: dataframe
"""
df = pd.read_csv(self.data_dir + file_name)
return df
def read_items(self, file_name):
"""
reads the data table with items information
Args:
file_name: name of the items data table
Returns: dataframe
"""
df = pd.read_csv(self.data_dir + file_name)
return df
def read_transactions(self, file_name):
"""
reade the transactions data table
Args:
file_name: name of transaction data table file
Returns: dataframe
"""
df = pd.read_csv(self.data_dir + file_name)
return df
def read_oil(self, file_name):
"""
reading the oil prices time series
Args:
file_name: oil prices file name
Returns: dataframe
"""
df = pd.read_csv(self.data_dir + file_name)
return df
def read_holidays(self, file_name):
"""
Holidays data table reader
Args:
file_name: holidays data table name
Returns: dataframe
"""
df = pd.read_csv(self.data_dir + file_name)
return df
def read_test(self, file_name):
"""
Test data reader
Args:
file_name: name of the test data table
Returns: dataframe with index on ['store_nbr', 'item_nbr', 'date']
"""
df = pd.read_csv(self.data_dir + file_name, usecols=[0, 1, 2, 3, 4],
dtype={'onpromotion': bool},
parse_dates=["date"] # , date_parser=parser
).set_index(
['store_nbr', 'item_nbr', 'date']
)
return df
def read_train(self, file_name):
"""
Training data reader. Original training data starts from 2013.
Args:
file_name: Name of training data file name
Returns: dataframe, unit_sales are converted into log
"""
df = pd.read_csv(self.data_dir + file_name,
usecols=[1, 2, 3, 4, 5],
dtype={'onpromotion': bool},
converters={'unit_sales': lambda u: np.log1p(
float(u)) if float(u) > 0 else 0},
parse_dates=["date"],
skiprows=range(1, 66458909) # 2016-01-01
)
return df
| StarcoderdataPython |
1676818 | print("Inside dags folder init") | StarcoderdataPython |
3520269 | <gh_stars>0
from .rgb_data_loader import RGBDataLoad
from .rgb_data_loader_L import RGB2LDataLoad
__datasets__ = {
"dataload": RGBDataLoad,
"dataload_L": RGB2LDataLoad
}
| StarcoderdataPython |
9670814 | <reponame>hackaugusto/scenario-player
import pytest
import yaml
from web3.gas_strategies.time_based import fast_gas_price_strategy, medium_gas_price_strategy
from scenario_player.exceptions.config import InsufficientMintingAmount, UDCTokenConfigError
from scenario_player.scenario import ScenarioYAML
from scenario_player.utils.configuration.base import ConfigMapping
from scenario_player.utils.configuration.settings import (
PFSSettingsConfig,
ScenarioConfigurationError,
ServiceSettingsConfig,
SettingsConfig,
UDCSettingsConfig,
UDCTokenSettings,
)
@pytest.fixture()
def file_for_insufficient_minting_test(tmp_path, minimal_yaml_dict):
minimal_yaml_dict["settings"] = {"services": {"udc": {"token": {"max_funding": 6000}}}}
minimal_yaml_dict["token"] = {"min_balance": 5999}
tmp_file = tmp_path.joinpath("tmp.yaml")
with open(tmp_file, "w") as outfile:
yaml.dump(minimal_yaml_dict, outfile, default_flow_style=False)
yield tmp_file
class TestSettingsConfig:
def test_is_subclass_of_config_mapping(self, minimal_yaml_dict):
"""The class is a subclass of :class:`ConfigMapping`."""
assert isinstance(SettingsConfig(minimal_yaml_dict), ConfigMapping)
@pytest.mark.parametrize("key", ["notify", "timeout", "chain", "gas_price"])
def test_class_returns_expected_default_for_key(
self, key, expected_defaults, minimal_yaml_dict
):
"""If supported keys are absent, sensible defaults are returned for them when accessing
them as a class attribute."""
config = SettingsConfig(minimal_yaml_dict)
try:
actual = getattr(config, key)
except AttributeError as e:
raise AssertionError(e)
assert expected_defaults["settings"][key] == actual
def test_settings_attr_returns_service_settings_config_instance(self, minimal_yaml_dict):
config = SettingsConfig(minimal_yaml_dict)
assert isinstance(config.services, ServiceSettingsConfig)
@pytest.mark.parametrize(
"value, raises",
argvalues=[("super-fast", True), (1.22, True), (11, False), ("fast", False)],
ids=[
"Unknown strategy key",
"Non-int number",
"valid integer value",
"Valid strategy ket",
],
)
def test_validate_raises_exception_for_invalid_gas_price_values(
self, value, raises, minimal_yaml_dict
):
minimal_yaml_dict["settings"]["gas_price"] = value
try:
SettingsConfig(minimal_yaml_dict)
except ScenarioConfigurationError:
if not raises:
pytest.fail("Raised ScenarioConfigurationError unexpectedly!")
def test_gas_price_strategy_returns_a_callable(self, minimal_yaml_dict):
"""The :attr:`SettingsConfig.gas_price_strategy` returns a callable."""
config = SettingsConfig(minimal_yaml_dict)
assert callable(config.gas_price_strategy)
@pytest.mark.parametrize(
"strategy, expected_func",
argvalues=[("fast", fast_gas_price_strategy), ("medium", medium_gas_price_strategy)],
)
def test_gas_price_strategy_property_returns_strategy_from_web3(
self, strategy, expected_func, minimal_yaml_dict
):
"""The gas price strategy is dynamically fetched."""
minimal_yaml_dict["settings"]["gas_price"] = strategy
config = SettingsConfig(minimal_yaml_dict)
assert config.gas_price_strategy == expected_func
class TestServiceSettingsConfig:
def test_is_subclass_of_config_mapping(self, minimal_yaml_dict):
"""The class is a subclass of :class:`ConfigMapping`."""
assert isinstance(ServiceSettingsConfig(minimal_yaml_dict), ConfigMapping)
def test_pfs_attribute_returns_pfs_settings_config(self, minimal_yaml_dict):
config = ServiceSettingsConfig(minimal_yaml_dict)
assert isinstance(config.pfs, PFSSettingsConfig)
def test_ucd_attribute_returns_udc_settings_config(self, minimal_yaml_dict):
config = ServiceSettingsConfig(minimal_yaml_dict)
assert isinstance(config.udc, UDCSettingsConfig)
class TestPFSSettingsConfig:
def test_is_subclass_of_config_mapping(self, minimal_yaml_dict):
"""The class is a subclass of :class:`ConfigMapping`."""
assert isinstance(PFSSettingsConfig(minimal_yaml_dict), ConfigMapping)
def test_url_attribute_returns_default_none_if_key_absent(self, minimal_yaml_dict):
config = PFSSettingsConfig(minimal_yaml_dict)
assert config.url is None
def test_url_attribute_returns_url_key_value_if_key_present(self, minimal_yaml_dict):
minimal_yaml_dict["settings"]["services"] = {"pfs": {"url": "custom_url"}}
config = PFSSettingsConfig(minimal_yaml_dict)
assert config.url == "custom_url"
class TestUDCSettingsConfig:
def test_is_subclass_of_config_mapping(self, minimal_yaml_dict):
"""The class is a subclass of :class:`ConfigMapping`."""
assert isinstance(UDCSettingsConfig(minimal_yaml_dict), ConfigMapping)
def test_token_attribute_is_an_instance_of_udctokenconfig(self, minimal_yaml_dict):
assert isinstance(UDCSettingsConfig(minimal_yaml_dict).token, UDCTokenSettings)
@pytest.mark.parametrize("key, expected", argvalues=[("enable", False), ("address", None)])
def test_attributes_whose_key_is_absent_return_expected_default(
self, key, expected, minimal_yaml_dict
):
config = UDCSettingsConfig(minimal_yaml_dict)
MISSING = object()
assert getattr(config, key, MISSING) == expected
@pytest.mark.parametrize("key, expected", argvalues=[("enable", True), ("address", "walahoo")])
def test_attributes_return_for_key_value_if_key_present(
self, key, expected, minimal_yaml_dict
):
minimal_yaml_dict["settings"] = {"services": {"udc": {key: expected}}}
config = UDCSettingsConfig(minimal_yaml_dict)
MISSING = object()
assert getattr(config, key, MISSING) == expected
class TestUDCTokenConfig:
def test_is_subclass_of_config_mapping(self, minimal_yaml_dict):
"""The class is a subclass of :class:`ConfigMapping`."""
assert isinstance(UDCTokenSettings(minimal_yaml_dict), ConfigMapping)
@pytest.mark.parametrize(
"key, expected",
argvalues=[("deposit", True), ("balance_per_node", 1000), ("max_funding", 10_000)],
)
def test_attributes_return_for_key_value_if_key_present(
self, key, expected, minimal_yaml_dict
):
minimal_yaml_dict["settings"] = {"services": {"udc": {"token": {key: expected}}}}
config = UDCTokenSettings(minimal_yaml_dict)
MISSING = object()
assert getattr(config, key, MISSING) == expected
@pytest.mark.parametrize(
"key, expected",
argvalues=[("deposit", False), ("balance_per_node", 5000), ("max_funding", 5000)],
)
def test_attributes_whose_key_is_absent_return_expected_default(
self, key, expected, minimal_yaml_dict
):
config = UDCTokenSettings(minimal_yaml_dict)
MISSING = object()
assert getattr(config, key, MISSING) == expected
def test_balance_per_node_must_not_be_greater_than_max_funding(self, minimal_yaml_dict):
minimal_yaml_dict["settings"] = {
"services": {"udc": {"token": {"max_funding": 6000, "balance_per_node": 6001}}}
}
with pytest.raises(UDCTokenConfigError):
UDCTokenSettings(minimal_yaml_dict)
def test_insufficient_minting(self, file_for_insufficient_minting_test):
with pytest.raises(InsufficientMintingAmount):
ScenarioYAML(
file_for_insufficient_minting_test, file_for_insufficient_minting_test.parent
)
| StarcoderdataPython |
181489 | from sklearn.base import BaseEstimator, ClassifierMixin
import collections
from typing import Any, Union
import pandas as pd
import numpy as np
from numpy.core._multiarray_umath import ndarray
from pandas import Series
from pandas.core.arrays import ExtensionArray
import sys
import buildBNStructure
import param4BN_learn_from_data_tranfs as parBN
import pysmile
import pysmile_license
from param4BN_learn_from_data_tranfs import readData
class BayesianNetworkClassifier(BaseEstimator, ClassifierMixin):
"""
Bayesian network classifier.
"""
def __init__(self, bayesNetFile):
"""
Called when initializing the classifier
"""
self.bayesNetFile = bayesNetFile
net = pysmile.Network()
self.bayesNet = net.read_file(bayesNetFile)
def change_evidence_and_update(self, net, node_id, outcome_id):
if outcome_id is not None:
net.set_evidence(node_id, outcome_id)
else:
net.clear_evidence(node_id)
net.update_beliefs()
self.print_all_posteriors(net)
print("")
def fit(self, X, y=None):
dfAll = pd.concat([X, y], axis=1)
tmpFileName = '../../data/tmpDataFit.csv'
dfAll.to_csv(tmpFileName, index=False, header=True)
print()
ds = pysmile.learning.DataSet()
ds.read_file(tmpFileName)
matching = ds.match_network(self.net)
em = pysmile.learning.EM()
em.learn(ds, self.net, matching)
# ds.add_int_variable()
# ds.set_state_names()
# ds.set_missing()
return self
def main():
nrows = 2000
bayesNetFile = "../models/AKI prediction_Stage_1_Learning_wo_Drug_v004_order03_4_training.xdsl"
bnC = BayesianNetworkClassifier(bayesNetFile)
dataWONanFN = '../../data/AKI_data_200325_full_dob_v02_forBN_wo_NA.csv'
dataWONaN = readData(dataWONanFN, nrows)
cols1 = dataWONaN.columns.to_list()
print(cols1)
targetColName = 'AKI48H'
y = pd.DataFrame(dataWONaN.pop(targetColName))
x = dataWONaN
# ds = pysmile.learning.DataSet()
# ds.read_file("mydatafile.txt")
bnC.fit(x, y)
print(f'')
if __name__ == '__main__':
main()
| StarcoderdataPython |
302563 | <gh_stars>10-100
# Copyright 2018 <NAME>. All rights reserved.
import logging
import pandas as pd
from src.instrumentation import logspeed
from src.cache import load_from_cache, save_to_cache
## Get the same logger from main"
logger = logging.getLogger("HomeCredit")
@logspeed
def fte_missed_installments(train, test, y, db_conn, folds, cache_file):
cache_key_train = 'fte_missed_installments_train'
cache_key_test = 'fte_missed_installments_test'
# Check if cache file exist and if data for this step is cached
train_cached, test_cached = load_from_cache(cache_file, cache_key_train, cache_key_test)
if train_cached is not None and test_cached is not None:
logger.info('fte_missed_installments - Cache found, will use cached data')
train = pd.concat([train, train_cached], axis = 1, copy = False)
test = pd.concat([test, test_cached], axis = 1, copy = False)
return train, test, y, db_conn, folds, cache_file
logger.info('fte_missed_installments - Cache not found, will recompute from scratch')
########################################################
query = """
select
SK_ID_CURR,
AMT_INSTALMENT - AMT_PAYMENT AS DIFF_EXPECTED_PMT,
DAYS_ENTRY_PAYMENT - DAYS_INSTALMENT AS DAYS_LATE
from
installments_payments
"""
installments_diff = pd.read_sql_query(query, db_conn)
agg_installments_diff = installments_diff.groupby('SK_ID_CURR').agg(
["sum","mean","max","min","std", "count"]
)
agg_installments_diff.columns = pd.Index([e[0] +"_"+ e[1] for e in agg_installments_diff.columns.tolist()])
train = train.merge(agg_installments_diff, left_on='SK_ID_CURR', right_index=True, how = 'left', copy = False)
test = test.merge(agg_installments_diff, left_on='SK_ID_CURR', right_index=True, how = 'left', copy = False)
########################################################
logger.info(f'Caching features in {cache_file}')
train_cache = train[agg_installments_diff.columns]
test_cache = test[agg_installments_diff.columns]
save_to_cache(cache_file, cache_key_train, cache_key_test, train_cache, test_cache)
return train, test, y, db_conn, folds, cache_file
| StarcoderdataPython |
3433390 | import unittest
from marmot.evaluation.evaluation_metrics import get_spans, intersect_spans, sequence_correlation
class TestEvaluationUtils(unittest.TestCase):
def setUp(self):
self.predictions = []
cur_pred = []
for line in open('test_data/hyp'):
if line.strip() == '':
self.predictions.append(cur_pred)
cur_pred = []
else:
cur_pred.append(line.strip())
self.predictions.append(cur_pred)
self.references = []
cur_ref = []
for line in open('test_data/ref'):
if line.strip() == '':
self.references.append(cur_ref)
cur_ref = []
else:
cur_ref.append(line.strip())
self.references.append(cur_ref)
def test_get_spans(self):
sentence = [1, 1, 0, 1, 0, 1, 1, 1, 0]
good_s, bad_s = get_spans(sentence)
# test that right spans are extracted
self.assertItemsEqual(good_s, [(0, 2), (3, 4), (5, 8)])
self.assertItemsEqual(bad_s, [(2, 3), (4, 5), (8, 9)])
all_spans = sorted(good_s + bad_s)
all_items = [t for a_list in [sentence[b:e] for (b, e) in all_spans] for t in a_list]
# test that the extracted spans cover the whole sequence
self.assertItemsEqual(sentence, all_items)
def test_intersect_spans(self):
true_sentence = [1, 1, 0, 1, 0, 1, 1, 1, 0, 0]
sentence = [0, 1, 1, 1, 0, 1, 1, 1, 1, 1]
good_s, bad_s = get_spans(sentence)
good_t, bad_t = get_spans(true_sentence)
res_1 = intersect_spans(good_t, good_s)
res_0 = intersect_spans(bad_t, bad_s)
self.assertEqual(res_1, 4)
self.assertEqual(res_0, 1)
def test_sequence_correlation(self):
sent_scores, total = sequence_correlation(self.references, self.predictions, good_label='OK', bad_label='BAD')
self.assertAlmostEqual(sent_scores[0], 0.31578947)
self.assertAlmostEqual(sent_scores[1], 0.8)
self.assertAlmostEqual(total, 0.55789473)
# def test_alternative_label(self):
# sequence_correlation(y_true, y_pred, good_label='OK', bad_label='BAD')
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
6569311 | from typing import List, NamedTuple
import pandas as pd
from pyspark import SparkContext
from pyspark.sql import SparkSession, Row
from qanta import logging
from qanta.datasets.quiz_bowl import Question, QuestionDatabase
from qanta.guesser.abstract import AbstractGuesser
from qanta.util.io import safe_path
from qanta.util import constants as c
from qanta.util.environment import QB_ROOT
from qanta.util.spark_features import SCHEMA
from qanta.preprocess import format_guess
from qanta.extractors.stats import StatsExtractor
from qanta.extractors.lm import LanguageModel
from qanta.extractors.classifier import Classifier
from qanta.extractors.wikilinks import WikiLinks
from qanta.extractors.mentions import Mentions
from qanta.extractors.answer_present import AnswerPresent
from qanta.extractors.text import TextExtractor
log = logging.get(__name__)
Task = NamedTuple('Task', [('question', Question), ('guess_df', pd.DataFrame)])
def instantiate_feature(feature_name: str):
"""
@param feature_name: The feature to instantiate
"""
log.info('Loading feature {} ...'.format(feature_name))
if feature_name == 'lm':
feature = LanguageModel()
elif feature_name == 'deep':
feature = DeepExtractor()
elif feature_name == 'wikilinks':
feature = WikiLinks()
elif feature_name == 'answer_present':
feature = AnswerPresent()
elif feature_name == 'stats':
feature = StatsExtractor()
elif feature_name == 'classifier':
feature = Classifier()
elif feature_name == 'mentions':
feature = Mentions()
elif feature_name == 'text':
feature = TextExtractor()
else:
log.info('"{}" is not a feature'.format(feature_name))
raise ValueError('Wrong feature type')
log.info('done')
return feature
def task_list():
guess_df = AbstractGuesser.load_all_guesses()
question_db = QuestionDatabase()
question_map = question_db.all_questions()
tasks = []
guess_df = guess_df[['qnum', 'sentence', 'token', 'guess', 'fold']].drop_duplicates(
['qnum', 'sentence', 'token', 'guess'])
for name, guesses in guess_df.groupby(['qnum', 'sentence', 'token']):
qnum = name[0]
question = question_map[qnum]
tasks.append(Task(question, guesses))
return tasks
class GuesserScoreMap:
def __init__(self, directory_prefix=''):
self.initialized = False
self.map = None
self.directory_prefix = directory_prefix
def scores(self):
if not self.initialized:
guess_df = AbstractGuesser.load_all_guesses(directory_prefix=self.directory_prefix)
self.map = AbstractGuesser.load_guess_score_map(guess_df)
self.initialized = True
return self.map
def generate_guesser_feature():
sc = SparkContext.getOrCreate() # type: SparkContext
sql_context = SparkSession.builder.getOrCreate()
log.info('Loading list of guess tasks')
tasks = task_list()
log.info('Using guesser directory prefix: {}'.format(QB_ROOT))
guesser_score_map = GuesserScoreMap(directory_prefix=QB_ROOT)
b_guesser_score_map = sc.broadcast(guesser_score_map)
def f_eval(task: Task) -> List[Row]:
score_map = b_guesser_score_map.value.scores()
df = task.guess_df
result = []
if len(df) > 0:
# Refer to code in evaluate_feature_question for explanation why this is safe
first_row = df.iloc[0]
qnum = int(first_row.qnum)
sentence = int(first_row.sentence)
token = int(first_row.token)
fold = first_row.fold
for guess in df.guess:
vw_features = []
key = (qnum, sentence, token, guess)
vw_features.append(format_guess(guess))
for guesser in score_map:
if key in score_map[guesser]:
score = score_map[guesser][key]
feature = '{guesser}_score:{score} {guesser}_found:1'.format(
guesser=guesser, score=score)
vw_features.append(feature)
else:
vw_features.append('{}_found:-1'.format(guesser))
f_value = '|guessers ' + ' '.join(vw_features)
row = Row(
fold,
qnum,
sentence,
token,
guess,
'guessers',
f_value
)
result.append(row)
return result
log.info('Beginning feature job')
feature_rdd = sc.parallelize(tasks, 5000).flatMap(f_eval)
feature_df = sql_context.createDataFrame(feature_rdd, SCHEMA).cache()
write_feature_df(feature_df, ['guessers'])
def spark_batch(feature_names: List[str]):
sc = SparkContext.getOrCreate()
sql_context = SparkSession.builder.getOrCreate()
log.info('Loading list of guess tasks')
tasks = task_list()
log.info('Number of tasks (unique qnum/sentence/token triplets): {}'.format(len(tasks)))
log.info('Loading features: {}'.format(feature_names))
features = {name: instantiate_feature(name) for name in feature_names}
b_features = sc.broadcast(features)
def f_eval(x: Task) -> List[Row]:
return evaluate_feature_question(x, b_features)
log.info('Beginning feature job')
# Hand tuned value of 5000 to keep the task size below recommended 100KB
feature_rdd = sc.parallelize(tasks, 5000 * len(feature_names)).flatMap(f_eval)
feature_df = sql_context.createDataFrame(feature_rdd, SCHEMA).cache()
write_feature_df(feature_df, feature_names)
def write_feature_df(feature_df, feature_names: list):
log.info('Beginning write job')
for fold in c.VW_FOLDS:
feature_df_with_fold = feature_df.filter(feature_df.fold == fold).cache()
for name in feature_names:
filename = safe_path('output/features/{}/{}.parquet'.format(fold, name))
feature_df_with_fold\
.filter('feature_name = "{}"'.format(name))\
.write\
.partitionBy('qnum')\
.parquet(filename, mode='overwrite')
feature_df_with_fold.unpersist()
log.info('Computation Completed, stopping Spark')
def evaluate_feature_question(task: Task, b_features) -> List[Row]:
features = b_features.value
question = task.question
guess_df = task.guess_df
result = []
if len(guess_df) > 0:
for feature_name in features:
feature_generator = features[feature_name]
# guess_df is dataframe that contains values that are explicitly unique by
# (qnum, sentence, token).
#
# This means that it is guaranteed that qnum, sentence, and token are all the same in
# guess_df so it is safe and efficient to compute the text before iterating over guesses
# as long as there is at least one guess. Additionally since a question is only ever
# in one fold getting the fold is safe as well.
first_row = guess_df.iloc[0]
# Must cast numpy int64 to int for spark
qnum = int(question.qnum)
sentence = int(first_row.sentence)
token = int(first_row.token)
fold = first_row.fold
text = question.get_text(sentence, token)
feature_values = feature_generator.score_guesses(guess_df.guess, text)
for f_value, guess in zip(feature_values, guess_df.guess):
row = Row(
fold,
qnum,
sentence,
token,
guess,
feature_name,
f_value
)
result.append(row)
return result
| StarcoderdataPython |
3570428 | #!/usr/bin/python3
#
# Take CSV and print out DNS A records for the Bind zone file
import fileinput
for line in fileinput.input():
machine = line.rstrip().split(",")
print("{0} in a {1}".format(
machine[1], machine[2])) | StarcoderdataPython |
6680118 | # dataloader for 7-Scenes / when testing D-Net
import os
import random
import glob
import numpy as np
import torch
import torch.utils.data.distributed
from PIL import Image
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
import torchvision.transforms.functional as TF
class SevenScenesLoader(object):
def __init__(self, args, mode):
self.t_samples = SevenScenesLoadPreprocess(args, mode)
self.data = DataLoader(self.t_samples, 1, shuffle=False, num_workers=1)
class SevenScenesLoadPreprocess(Dataset):
def __init__(self, args, mode):
self.args = args
# Test set by Long et al. (CVPR 21)
with open("./data_split/sevenscenes_long_test.txt", 'r') as f:
self.filenames = f.readlines()
self.mode = mode
self.normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
self.dataset_path = args.dataset_path
# img resolution
self.img_H = args.input_height # 480
self.img_W = args.input_width # 640
def __len__(self):
return len(self.filenames)
def __getitem__(self, idx):
scene_name, seq_id, img_idx = self.filenames[idx].split(' ')
seq_id = int(seq_id)
img_idx = int(img_idx)
scene_dir = self.dataset_path + '/{}/seq-%02d/'.format(scene_name) % seq_id
# img path and depth path
img_path = scene_dir + '/frame-%06d.color.png' % img_idx
depth_path = scene_dir + '/frame-%06d.depth.png' % img_idx
# read img and depth
img = Image.open(img_path).convert("RGB").resize(size=(self.img_W, self.img_H), resample=Image.BILINEAR)
depth_gt = Image.open(depth_path).resize(size=(self.img_W, self.img_H), resample=Image.NEAREST)
# img to tensor
img = np.array(img).astype(np.float32) / 255.0
img = torch.from_numpy(img).permute(2, 0, 1) # (3, H, W)
img = self.normalize(img)
depth_gt = np.array(depth_gt)[:, :, np.newaxis]
depth_gt[depth_gt == 65535] = 0.0 # filter out invalid depth
depth_gt = depth_gt.astype(np.float32) / 1000.0 # from mm to m
depth_gt = torch.from_numpy(depth_gt).permute(2, 0, 1) # (1, H, W)
sample = {'img': img,
'depth': depth_gt,
'scene_name': '%s_seq-%02d' % (scene_name, seq_id),
'img_idx': str(img_idx)}
return sample
| StarcoderdataPython |
9741606 | <gh_stars>0
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import subprocess
from kpr import base
from kpr.utils import clients
class TestRoleList(base.TestCase):
def setUp(self):
super(TestRoleList, self).setUp()
self.setup_project('project1', auditor=True, user=1)
self.setup_project('project2', admin=False, auditor=False, user=0)
def tearDown(self):
super(TestRoleList, self).tearDown()
self.teardown_project('project1', auditor=True, user=1)
self.teardown_project('project2', admin=False, auditor=False, user=0)
# クラウド管理者は全てのロールを一覧表示することができる。
def test_list_all_roles_by_cloud_admin(self):
try:
self.os_run(
command=['role', 'list'],
project=clients.OS_ADMIN_PROJECT_NAME,
username=clients.OS_ADMIN_USERNAME,
)
except subprocess.CalledProcessError as e:
self.fail("Failed to list all Roles by cloud admin")
# クラウド監査役は全てのロールを一覧表示することができる。
def test_list_all_roles_by_cloud_admin_auditor(self):
try:
self.os_run(
command=['role', 'list'],
project=clients.OS_ADMIN_PROJECT_NAME,
username=self.admin_auditor.name,
)
except subprocess.CalledProcessError as e:
self.fail("Failed to list all Roles by cloud admin auditor")
# project1 の管理者はロールを一覧表示することができる。
def test_list_all_roles_by_project_admin(self):
try:
self.os_run(
command=['role', 'list'],
project=self.project1.name,
username=self.project1_admin.name,
)
except subprocess.CalledProcessError as e:
self.fail("Failed to list all Roles by project admin")
# project1 の監査役はロールを一覧表示することができる。
def test_list_all_roles_by_project_auditor(self):
try:
self.os_run(
command=['role', 'list'],
project=self.project1.name,
username=self.project1_auditor.name,
)
except subprocess.CalledProcessError as e:
self.fail("Failed to list all Roles by project auditor")
# project1 のユーザはロールを一覧表示することができない。
def test_list_all_roles_by_project_user(self):
try:
self.os_run(
project=self.project1.name,
username=self.project1_user0.name,
command=['role', 'list'],
)
self.fail("project user must not be permitted to list all role")
except subprocess.CalledProcessError as e:
self.assertRegex(e.output.decode('utf-8'), 'HTTP 403')
| StarcoderdataPython |
11221753 | """
Testing simple cases for pyxform
"""
from unittest import TestCase
#from ..pyxform import survey_from_json
from pyxform.survey import Survey
from pyxform.builder import create_survey_element_from_dict
# TODO:
# * test_two_questions_with_same_id_fails
# (get this working in json2xform)
class BasicJson2XFormTests(TestCase):
def test_survey_can_have_to_xml_called_twice(self):
"""
Test: Survey can have "to_xml" called multiple times
(This was not being allowed before.)
It would be good to know (with confidence) that a survey object
can be exported to_xml twice, and the same thing will be returned
both times.
"""
survey = Survey(name=u"SampleSurvey")
q = create_survey_element_from_dict({u'type':u'text', u'name':u'name', u'label':u'label'})
survey.add_child(q)
str1 = survey.to_xml()
str2 = survey.to_xml()
self.assertEqual(str1, str2)
| StarcoderdataPython |
4924077 | # level order, preorder, inorder, post order
class Node:
def __init__(self, key):
self.left = None
self.right = None
self.val = key
def printInorder(root):
if root:
printInorder(root.left)
print(root.val)
printInorder(root.right)
def printPostorder(root):
if root:
printPostorder(root.left)
printPostorder(root.right)
print(root.val)
def printPreorder(root):
if root:
print(root.val)
printPreorder(root.left)
printPreorder(root.right)
# Driver code
root = Node(1)
root.left = Node(2)
root.right = Node(3)
root.left.left = Node(4)
root.left.right = Node(5)
print("Preorder traversal of binary tree is")
printPreorder(root)
print("\nInorder traversal of binary tree is")
printInorder(root)
print("\nPostorder traversal of binary tree is")
printPostorder(root)
| StarcoderdataPython |
268614 | # Generated by Django 3.1.12 on 2021-08-29 18:48
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('projects', '0022_auto_20210829_1747'),
]
operations = [
migrations.DeleteModel(
name='ProjectUser',
),
]
| StarcoderdataPython |
8070962 | <filename>health_check.py
# -*- coding: utf-8 -*-
import argparse
import logging
import os
import re
import socket
import sys
import time
import urllib.error
import urllib.request
import ping3
import pymemcache.client.base
import pymysql
import redis
import redis.sentinel
import stringcase
import yaml
VERSION = '0.0.1'
AUTHOR = 'zhouyl (<EMAIL>)'
logging.basicConfig(
level=logging.DEBUG,
format="[%(asctime)s][%(levelname)s] - %(message)s",
handlers=[
logging.StreamHandler(sys.stdout)
]
)
log = logging.getLogger('health_check')
ping3.EXCEPTIONS = True
def human_size(size):
for unit in ['Bytes', 'KB', 'MB', 'GB', 'TB']:
if abs(size) < 1024.0:
return "%3.1f %s" % (size, unit)
size /= 1024.0
return "%3.1f %s" % (size, 'PB')
def no_zero_div(n1, n2):
try:
return n1 / n2
except ZeroDivisionError:
return 0
class BaseRule:
def check(self):
pass
class HttpRule(BaseRule):
def __init__(self, url, maxtime=3, requests=1, intervals=0.5):
self._url = str(url)
self._maxtime = float(maxtime)
self._requests = int(requests)
self._intervals = float(intervals)
def check(self):
log.info("检查 HTTP 连接 - URL: %s, 限定时间: %ds, 请求次数: %d",
self._url, self._maxtime, self._requests)
for n in range(self._requests):
try:
if n > 0:
time.sleep(self._intervals)
start = time.time()
r = urllib.request.urlopen(self._url, timeout=self._maxtime)
if r.getcode() != 200:
log.error('[第 %d 次请求][%.6fs] 发生错误: 状态码为 %d,',
n + 1, time.time() - start, r.getcode())
else:
log.info("[第 %d 次请求][%.6fs] 请求响应成功",
n + 1, time.time() - start, )
except urllib.error.URLError as e:
log.error('[第 %d 次请求][%.6fs] 发生错误: %s - %s',
n + 1, time.time() - start, e.__class__, e)
class PingRule(BaseRule):
def __init__(self, host, timeout=4, ttl=64, seq=0, size=56):
self._host = str(host)
self._timeout = int(timeout)
self._ttl = int(ttl)
self._seq = int(seq)
self._size = int(size)
def check(self):
log.info('尝试 PING 主机: %s', self._host)
try:
delay = ping3.ping(
dest_addr=self._host,
timeout=self._timeout,
ttl=self._ttl,
seq=self._seq,
size=self._size)
if delay is None:
log.error('获取 PING 数据失败!')
else:
log.info('PING 成功,耗时: %.6fs', delay)
except ping3.errors.PingError as e:
log.error('发生错误: %s - %s', e.__class__, e)
class SocketRule(BaseRule):
def __init__(self, host, port, timeout=10):
self._host = str(host)
self._port = int(port)
self._timeout = int(timeout)
def check(self):
log.info('SOCKET 连接检查: %s:%d', self._host, self._port)
try:
s = socket.socket()
s.connect((self._host, self._port))
log.info('SOCKET TCP 连接成功!')
s.close()
except ConnectionError as e:
log.error('SOCKET TCP 连接失败: %s - %s', e.__class__, e)
class MemcacheRule(BaseRule):
def __init__(self, host, port):
self._host = str(host)
self._port = int(port)
def check(self):
log.info('检查 Memcache 连接: %s:%d', self._host, self._port)
try:
mc = pymemcache.client.base.Client((self._host, self._port))
stats = dict()
for k, v in mc.stats().items():
stats[k.decode('utf8')] = v.decode('utf8') if isinstance(v, bytes) else v
mc.close()
log.info('> Memcache 版本: %s, 进程 ID: %s',
stats['version'], stats['pid'])
log.info('> 当前连接数: %s,历史连接总数: %s',
stats['curr_connections'], stats['total_connections'])
log.info('> 已存储 %d 个对象,占用空间 %s, 命中率: %.2f%%',
stats['curr_items'], human_size(stats['bytes']),
no_zero_div(stats['get_hits'], stats['get_hits'] + stats['get_misses']) * 100)
except Exception as e:
log.error('连接 Memcache 失败: %s - %s', e.__class__, e)
class RedisRule(BaseRule):
def __init__(self, host='127.0.0.1', port=6379, *args, **kwargs):
self._host = str(host)
self._port = int(port)
self._args = args
self._kwargs = kwargs
def _redis_info(self, connection):
connection.send_command('INFO')
return redis.client.parse_info(connection.read_response())
def _log_redis_info(self, info):
log.info('> Redis 版本: %s, 进程 ID: %d',
info['redis_version'], info['process_id'])
log.info('> 内存占用: %s, 消耗峰值: %s',
info['used_memory_human'], info['used_memory_peak_human'])
log.info('> 系统 CPU 占用: %d, 用户 CPU 占用: %d',
info['used_cpu_sys'], info['used_cpu_user'])
log.info('> 已连接客户端: %d, 连接请求数: %d',
info['connected_clients'], info['total_connections_received'])
log.info('> 已执行命令: %d, 每秒执行: %d',
info['total_commands_processed'], info['instantaneous_ops_per_sec'])
log.info('> 等待阻塞命令客户端: %d, 被拒绝的连接请求: %d',
info['blocked_clients'], info['rejected_connections'])
log.info('> 查找数据库键成功: %d, 失败: %d',
info['keyspace_hits'], info['keyspace_misses'])
def check(self):
log.info('检查 Redis 连接: %s:%d', self._host, self._port)
try:
conn = redis.Connection(self._host, self._port, *self._args, **self._kwargs)
conn.connect()
info = self._redis_info(conn)
conn.disconnect()
self._log_redis_info(info)
except redis.exceptions.RedisError as e:
log.error('连接 Redis 失败: %s - %s', e.__class__, e)
class RedisSentinelRule(RedisRule):
def __init__(self, master, password='', sentinels=()):
self._master = str(master)
self._password = str(password)
self._sentinels = list(sentinels)
def check(self):
log.info('检查 Redis Sentinel 连接: %s', self._master)
try:
sentinel = redis.sentinel.Sentinel(
[s.split(':') for s in self._sentinels],
password=self._password, socket_timeout=0.1)
log.info('> master: %s', sentinel.discover_master(self._master))
log.info('> slaves: %s', sentinel.discover_slaves(self._master))
log.info('尝试获取 master 节点状态信息...')
master = sentinel.master_for(self._master, socket_timeout=0.1)
self._log_redis_info(master.info())
except redis.sentinel.MasterNotFoundError as e:
log.error('检查 Redis Sentinel 出错: %s - %s', e.__class__, e)
class MysqlRule(BaseRule):
def __init__(self, host='localhost', port=3306, user='root', password='',
database='', connect_timeout=10, init_command='select 1',
performance_seconds=0):
self._host = str(host)
self._port = int(port)
self._user = str(user)
self._password = str(password)
self._database = str(database)
self._connect_timeout = int(connect_timeout)
self._init_command = str(init_command)
self._performance_seconds = int(performance_seconds)
def _connect(self, host, show_status=False):
try:
conn = pymysql.connect(
host=host,
port=self._port,
user=self._user,
password=self._password,
database=self._database,
connect_timeout=self._connect_timeout,
init_command=self._init_command)
if show_status:
self._show_status(conn)
else:
log.info('[%s]: 连接成功', host)
conn.close()
except pymysql.err.MySQLError as e:
log.error('[%s] 连接失败: %s - %s', host, e.__class__, e)
def _show_info(self, conn, sql):
cursor = conn.cursor(pymysql.cursors.DictCursor)
cursor.execute(sql)
data = dict()
for row in cursor.fetchall():
# 对数字/浮点类型值进行自动转换
# 以便于相关的代码编写更便利
if re.match(r'^\-?\d+$', row['Value']):
data[row['Variable_name']] = int(row['Value'])
elif re.match(r'^\-?\d+\.\d+$', row['Value']):
data[row['Variable_name']] = float(row['Value'])
else:
data[row['Variable_name']] = str(row['Value'])
return data
def _slave_info(self, conn):
cursor = conn.cursor(pymysql.cursors.DictCursor)
cursor.execute('SHOW SLAVE STATUS')
return cursor.fetchall()
def _show_status(self, conn):
secs = self._performance_seconds
vars = self._show_info(conn, 'SHOW VARIABLES')
s1 = self._show_info(conn, 'SHOW GLOBAL STATUS')
r = s1.get('Com_select', 0) + s1.get('Qcache_hits', 0)
w = s1.get('Com_insert', 0) + s1.get('Com_update', 0) + \
s1.get('Com_delete', 0) + s1.get('Com_replace', 0)
log.info('> 读写比例: %d reads / %d writes * 100 = %.2f%%',
r, w, no_zero_div(r, w))
log.info('> Key Buffer 命中率: %.2f%% read hits / %.2f%% write hits',
(1 - no_zero_div(s1['Key_reads'], s1['Key_read_requests'])) * 100,
(1 - no_zero_div(s1['Key_writes'], s1['Key_write_requests'])) * 100)
log.info('> InnoDB Buffer 命中率: %.2f%%',
(1 - no_zero_div(s1['Innodb_buffer_pool_reads'],
s1['Innodb_buffer_pool_read_requests'])) * 100)
log.info('> Thread Cache 命中率: %.2f%% read hits',
(1 - no_zero_div(s1['Threads_created'], s1['Connections'])) * 100)
log.info('> 最大连接数限制(max_connections): %d', vars['max_connections'])
log.info('> 当前开放连接(Threads_connected): %d', s1['Threads_connected'])
log.info('> 当前运行连接(Threads_running): %d', s1['Threads_running'])
log.info('> 服务器错误导致的失败连接(Connection_errors_internal): %d',
s1['Connection_errors_internal'])
log.info('> 试图连接到服务器失败的连接(Aborted_connects): %d', s1['Aborted_connects'])
log.info('> 未正确关闭导致连接中断的客户端(Aborted_clients): %d', s1['Aborted_clients'])
log.info('> 超出最大连接数限制失败的连接(Connection_errors_max_connections): %d',
s1['Connection_errors_max_connections'])
# try:
# slaves = self._slave_info(conn)
# # TODO: print slaves info
# except pymysql.err.MySQLError as e:
# log.error('查询 SLAVE 信息失败: %s - %s', e.__class__, e)
if secs > 0:
log.info('等待 %d 秒钟,以监测性能数据...', secs)
time.sleep(secs)
s2 = self._show_info(conn, 'SHOW GLOBAL STATUS')
log.info('> 每秒查询量(QPS): %.2f | 每秒事务数(TPS): %.2f',
(s2['Queries'] - s1['Queries']) / secs,
((s2['Com_commit'] - s1['Com_commit']) +
(s2['Com_rollback'] - s1['Com_rollback'])) / secs)
log.info('> 每秒读取数据: %s | 每秒写入数据: %s',
human_size((s2['Innodb_data_read'] - s1['Innodb_data_read']) / secs),
human_size((s2['Innodb_data_written'] - s1['Innodb_data_written']) / secs))
log.info('> 每秒接收数据: %s | 每秒发送数据: %s',
human_size((s2['Bytes_received'] - s1['Bytes_received']) / secs),
human_size((s2['Bytes_sent'] - s1['Bytes_sent']) / secs))
def check(self):
log.info("检查 MYSQL 连接 - HOST: %s:%d, USER: %s, DBNAME: %s",
self._host, self._port, self._user, self._database)
self._connect(self._host, True)
class HealthCheck:
def __init__(self, config_file):
log.info("加载配置文件: %s", config_file)
self._config = self._load_yaml_config(config_file)
def _load_yaml_config(self, config_file):
if not os.path.isfile(config_file):
raise IOError('配置文件不存在: "%s"' % config_file)
with open(config_file, 'r') as f:
return yaml.load(f.read(), yaml.CLoader)
def _get_rule_class(self, rule_name):
try:
m = sys.modules[__name__]
c = stringcase.pascalcase(rule_name) + 'Rule'
if not hasattr(m, c):
log.error("无效的规则类型: %s", c)
return getattr(m, c)
except TypeError as e:
log.error("配置文件参数错误: %s", e)
return None
def _check(self, rule, name, options):
log.info("规则检查: [%s - %s]", rule, name)
cls = self._get_rule_class(rule)
if cls is not None:
cls(**options).check()
def do_check(self):
for r in self._config:
for n in self._config[r]:
log.info("-" * 80)
self._check(r, n, self._config[r][n])
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='项目健康状态检查脚本',
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-f', '--config-file', action='store',
dest='config', type=str, required=True, help='配置文件')
parsed_args = parser.parse_args()
# try:
log.info("开始环境健康检查....")
HealthCheck(parsed_args.config).do_check()
# except Exception as e:
# log.error('%s: %s', e.__class__, e)
| StarcoderdataPython |
258350 | <reponame>Jeremiad/Flexget
"""Torrenting utils, mostly for handling bencoding and torrent files."""
# Torrent decoding is a short fragment from effbot.org. Site copyright says:
# Test scripts and other short code fragments can be considered as being in the public domain.
import binascii
import re
from contextlib import suppress
from typing import Dict, Union, Any, Callable, Match, Generator, Iterator, List
from loguru import logger
logger = logger.bind(name='torrent')
# Magic indicator used to quickly recognize torrent files
TORRENT_RE = re.compile(br'^d\d{1,3}:')
# List of all standard keys in a metafile
# See http://packages.python.org/pyrocore/apidocs/pyrocore.util.metafile-module.html#METAFILE_STD_KEYS
METAFILE_STD_KEYS = [
i.split('.')
for i in (
"announce",
"announce-list", # BEP-0012
"comment",
"created by",
"creation date",
"encoding",
"info",
"info.length",
"info.name",
"info.piece length",
"info.pieces",
"info.private",
"info.files",
"info.files.length",
"info.files.path",
)
]
def clean_meta(
meta: Dict[str, Any], including_info: bool = False, log_func: Callable[..., None] = None
):
"""Clean meta dict. Optionally log changes using the given logger.
See also http://packages.python.org/pyrocore/apidocs/pyrocore.util.metafile-pysrc.html#clean_meta
@param log_func: If given, a callable accepting a string message.
@return: Set of keys removed from C{meta}.
"""
modified = set()
for key in list(meta.keys()):
if [key] not in METAFILE_STD_KEYS:
if log_func:
log_func("Removing key %r..." % (key,))
del meta[key]
modified.add(key)
if including_info:
for key in list(meta["info"].keys()):
if ["info", key] not in METAFILE_STD_KEYS:
if log_func:
log_func("Removing key %r..." % ("info." + key,))
del meta["info"][key]
modified.add("info." + key)
for idx, entry in enumerate(meta["info"].get("files", [])):
for key in list(entry.keys()):
if ["info", "files", key] not in METAFILE_STD_KEYS:
if log_func:
log_func("Removing key %r from file #%d..." % (key, idx + 1))
del entry[key]
modified.add("info.files." + key)
return modified
def is_torrent_file(metafilepath: str) -> bool:
"""Check whether a file looks like a metafile by peeking into its content.
Note that this doesn't ensure that the file is a complete and valid torrent,
it just allows fast filtering of candidate files.
@param metafilepath: Path to the file to check, must have read permissions for it.
@return: True if there is a high probability this is a metafile.
"""
with open(metafilepath, 'rb') as f:
data = f.read(200)
magic_marker = bool(TORRENT_RE.match(data))
if not magic_marker:
logger.trace(
"{} doesn't seem to be a torrent, got `{}` (hex)", metafilepath, binascii.hexlify(data)
)
return bool(magic_marker)
def tokenize(
text: bytes,
match=re.compile(
br'([idel])|(\d+):|(-?\d+)'
).match, # type: Callable[[bytes, int], Match[bytes]]
) -> Generator[bytes, None, None]:
i = 0
while i < len(text):
m = match(text, i)
s = m.group(m.lastindex)
i = m.end()
if m.lastindex == 2:
yield b's'
yield text[i : i + int(s)]
i += int(s)
else:
yield s
def decode_item(src_iter: Iterator[bytes], token: bytes) -> Union[bytes, str, int, list, dict]:
data: Union[bytes, str, int, list, dict]
if token == b'i':
# integer: "i" value "e"
data = int(next(src_iter))
if next(src_iter) != b'e':
raise ValueError
elif token == b's':
# string: "s" value (virtual tokens)
data = next(src_iter)
# Strings in torrent file are defined as utf-8 encoded
with suppress(UnicodeDecodeError):
# The pieces field is a byte string, and should be left as such.
data = data.decode('utf-8')
elif token in (b'l', b'd'):
# container: "l"(list) or "d"(dict), values "e"
data = []
tok = next(src_iter)
while tok != b'e':
data.append(decode_item(src_iter, tok))
tok = next(src_iter)
if token == b'd':
data = dict(list(zip(data[0::2], data[1::2])))
else:
raise ValueError
return data
def bdecode(text: bytes) -> Dict[str, Any]:
try:
src_iter = tokenize(text)
data = decode_item(src_iter, next(src_iter))
for _ in src_iter: # look for more tokens
raise SyntaxError("trailing junk")
except (AttributeError, ValueError, StopIteration, TypeError) as e:
raise SyntaxError(f"syntax error: {e}") from e
return data
# encoding implementation by d0b
def encode_string(data: str) -> bytes:
return encode_bytes(data.encode('utf-8'))
def encode_bytes(data: bytes) -> bytes:
return str(len(data)).encode() + b':' + data
def encode_integer(data: int) -> bytes:
return b'i' + str(data).encode() + b'e'
def encode_list(data: list) -> bytes:
encoded = b'l'
for item in data:
encoded += bencode(item)
encoded += b'e'
return encoded
def encode_dictionary(data: dict) -> bytes:
encoded = b'd'
items = list(data.items())
items.sort()
for (key, value) in items:
encoded += bencode(key)
encoded += bencode(value)
encoded += b'e'
return encoded
def bencode(data: Union[bytes, str, int, list, dict]) -> bytes:
if isinstance(data, bytes):
return encode_bytes(data)
if isinstance(data, str):
return encode_string(data)
if isinstance(data, int):
return encode_integer(data)
if isinstance(data, list):
return encode_list(data)
if isinstance(data, dict):
return encode_dictionary(data)
raise TypeError(f'Unknown type for bencode: {type(data)}')
class Torrent:
"""Represents a torrent"""
# string type used for keys, if this ever changes, stuff like "x in y"
# gets broken unless you coerce to this type
KEY_TYPE = str
@classmethod
def from_file(cls, filename: str) -> 'Torrent':
"""Create torrent from file on disk."""
with open(filename, 'rb') as handle:
return cls(handle.read())
def __init__(self, content: bytes) -> None:
"""Accepts torrent file as string"""
# Make sure there is no trailing whitespace. see #1592
content = content.strip()
# decoded torrent structure
self.content = bdecode(content)
self.modified = False
def __repr__(self) -> str:
return "%s(%s, %s)" % (
self.__class__.__name__,
", ".join(
"%s=%r" % (key, self.content["info"].get(key))
for key in ("name", "length", "private")
),
", ".join("%s=%r" % (key, self.content.get(key)) for key in ("announce", "comment")),
)
def get_filelist(self) -> List[Dict[str, Union[str, int]]]:
"""Return array containing fileinfo dictionaries (name, length, path)"""
files = []
if 'length' in self.content['info']:
# single file torrent
if 'name.utf-8' in self.content['info']:
name = self.content['info']['name.utf-8']
else:
name = self.content['info']['name']
t = {'name': name, 'size': self.content['info']['length'], 'path': ''}
files.append(t)
else:
# multifile torrent
for item in self.content['info']['files']:
if 'path.utf-8' in item:
path = item['path.utf-8']
else:
path = item['path']
t = {'path': '/'.join(path[:-1]), 'name': path[-1], 'size': item['length']}
files.append(t)
# Decode strings
for item in files:
for field in ('name', 'path'):
# These should already be decoded if they were utf-8, if not we can try some other stuff
if not isinstance(item[field], str):
try:
item[field] = item[field].decode(self.content.get('encoding', 'cp1252'))
except UnicodeError:
# Broken beyond anything reasonable
fallback = item[field].decode('utf-8', 'replace').replace('\ufffd', '_')
logger.warning(
'{}={!r} field in torrent {!r} is wrongly encoded, falling back to `{}`',
field,
item[field],
self.content['info']['name'],
fallback,
)
item[field] = fallback
return files
@property
def is_multi_file(self) -> bool:
"""Return True if the torrent is a multi-file torrent"""
return 'files' in self.content['info']
@property
def name(self) -> str:
"""Return name of the torrent"""
return self.content['info'].get('name', '')
@property
def size(self) -> int:
"""Return total size of the torrent"""
size = 0
# single file torrent
if 'length' in self.content['info']:
size = int(self.content['info']['length'])
else:
# multifile torrent
for item in self.content['info']['files']:
size += int(item['length'])
return size
@property
def private(self) -> Union[int, bool]:
return self.content['info'].get('private', False)
@property
def trackers(self) -> List[str]:
"""
:returns: List of trackers, supports single-tracker and multi-tracker implementations
"""
trackers = []
# the spec says, if announce-list present use ONLY that
# funny iteration because of nesting, ie:
# [ [ tracker1, tracker2 ], [backup1] ]
for tl in self.content.get('announce-list', []):
for t in tl:
trackers.append(t)
if not self.content.get('announce') in trackers:
trackers.append(self.content.get('announce'))
return trackers
@property
def info_hash(self) -> str:
"""Return Torrent info hash"""
import hashlib
sha1_hash = hashlib.sha1()
info_data = encode_dictionary(self.content['info'])
sha1_hash.update(info_data)
return str(sha1_hash.hexdigest().upper())
@property
def comment(self) -> str:
return self.content['comment']
@comment.setter
def comment(self, comment: str) -> None:
self.content['comment'] = comment
self.modified = True
@property
def piece_size(self) -> int:
return int(self.content['info']['piece length'])
@property
def libtorrent_resume(self) -> dict:
return self.content.get('libtorrent_resume', {})
def set_libtorrent_resume(self, chunks, files) -> None:
self.content['libtorrent_resume'] = {}
self.content['libtorrent_resume']['bitfield'] = chunks
self.content['libtorrent_resume']['files'] = files
self.modified = True
def remove_multitracker(self, tracker: str) -> None:
"""Removes passed multi-tracker from this torrent"""
for tl in self.content.get('announce-list', [])[:]:
with suppress(AttributeError, ValueError):
tl.remove(tracker)
self.modified = True
# if no trackers left in list, remove whole list
if not tl:
self.content['announce-list'].remove(tl)
def add_multitracker(self, tracker: str) -> None:
"""Appends multi-tracker to this torrent"""
self.content.setdefault('announce-list', [])
self.content['announce-list'].append([tracker])
self.modified = True
def __str__(self) -> str:
return f'<Torrent instance. Files: {self.get_filelist()}>'
def encode(self) -> bytes:
return bencode(self.content)
| StarcoderdataPython |
11356251 | <gh_stars>0
from setuptools import setup
setup(
name="nicepy",
version="0.1",
author="<NAME>",
author_email="<EMAIL>",
description="NICE experiment data tools",
requires=['numpy', 'scipy', 'pint', 'matplotlib', 'pandas', 'labrad'],
url='https://github.com/Campbell-IonMolecule/nicepy'
)
| StarcoderdataPython |
345930 | #!/usr/bin/env python
"""
@author: <NAME>
"""
from VISA_Driver import VISA_Driver
from InstrumentConfig import InstrumentQuantity
import numpy as np
__version__ = "0.0.1"
class Driver(VISA_Driver):
""" This class implements the Rigol scope driver"""
def performGetValue(self, quant, options={}):
"""Perform the Get Value instrument operation"""
if quant.name in ('Ch1 - Data', 'Ch2 - Data', 'Ch3 - Data', 'Ch4 - Data'):
# traces, get channel
channel = int(quant.name[2])
# check if channel is on
if self.getValue('Ch%d - Enabled' % channel):
self.writeAndLog('FORMAT:DATA ASCii')
raw_data = self.askAndLog('CHAN%d:WAV:DATA:VAL?'%channel)
string_list=raw_data.split(',')
data_list=[float(s) for s in string_list]
self.log(len(data_list))
if self.getValue('Return X value'):
x_list=data_list[0::2]
y_list=data_list[1::2]
value = InstrumentQuantity.getTraceDict(y_list, x=x_list )
else:
y_list = data_list
x_incre = float(self.askAndLog('TIMebase:RANGe?'))/len(y_list)
value = InstrumentQuantity.getTraceDict(y_list, x0=0 ,dx=x_incre)
else:
# not enabled, return empty array
value = np.asarray([])
else:
# for all other cases, call VISA driver
value = VISA_Driver.performGetValue(self, quant, options)
return value
if __name__ == '__main__':
pass
| StarcoderdataPython |
8050952 | <filename>ztp/vnf.onboard-test.py
#!/usr/bin/python
"""
Test script can trigger second phase VNF on-boarding without
actually doing any VPN.
You just need to make sure vCenter has reachability to remote ESXi
host
<NAME>
<EMAIL>
"""
import logging
import pika
import yaml
import argparse
def main(esx_hostname):
"""
:param esx_hostname:
:return:
"""
default_config = yaml.load(open("config/default.yaml"))
credentials = pika.PlainCredentials(default_config['ampq']['username'], default_config['ampq']['password'])
connection = pika.BlockingConnection(
pika.ConnectionParameters(host=default_config['ampq']['hostname'],
credentials=credentials))
channel = connection.channel()
channel.queue_declare(queue='vnfonboarding')
channel.basic_publish(exchange='',
routing_key='vnfonboarding',
body=esx_hostname)
connection.close()
logging.info("Pushed job to a queue")
if __name__ == "__main__":
"""
Main entry for VNF on boarding listner.
"""
parser = argparse.ArgumentParser()
parser.add_argument('-hh', '--hostname', help='Hostname of esxi', required=True)
args = vars(parser.parse_args())
if args['hostname']:
main(args['hostname'])
| StarcoderdataPython |
9744767 | # -*- coding: utf-8 -*-
from __future__ import division, absolute_import, unicode_literals
from django.conf import settings
# Restricts the attributes that are passed from ModelAdmin2 classes to their
# views. This is a security feature.
# See the docstring on djadmin2.types.ModelAdmin2 for more detail.
MODEL_ADMIN_ATTRS = (
'actions_selection_counter', "date_hierarchy", 'list_display',
'list_display_links', 'list_filter', 'admin', 'search_fields',
'field_renderers', 'index_view', 'detail_view', 'create_view',
'update_view', 'delete_view', 'get_default_view_kwargs',
'get_list_actions', 'get_ordering', 'actions_on_bottom', 'actions_on_top',
'ordering', 'save_on_top', 'save_on_bottom', 'readonly_fields', )
ADMIN2_THEME_DIRECTORY = getattr(settings, "ADMIN2_THEME_DIRECTORY", "djadmin2theme_bootstrap3")
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.